Merge branch 'for-6.9/lenovo' into for-linus
authorJiri Kosina <jkosina@suse.com>
Wed, 13 Mar 2024 20:18:44 +0000 (21:18 +0100)
committerJiri Kosina <jkosina@suse.com>
Wed, 13 Mar 2024 20:18:44 +0000 (21:18 +0100)
- 2nd version of code for applying proper quirk depending on firmware version
  for lenovo/cptkbd (Mikhail Khvainitski)

3970 files changed:
.editorconfig [new file with mode: 0644]
.gitignore
.mailmap
CREDITS
Documentation/ABI/testing/debugfs-vfio [new file with mode: 0644]
Documentation/ABI/testing/sysfs-bus-cdx
Documentation/ABI/testing/sysfs-bus-coresight-devices-tmc
Documentation/ABI/testing/sysfs-bus-coresight-devices-tpdm
Documentation/ABI/testing/sysfs-bus-cxl
Documentation/ABI/testing/sysfs-bus-i3c
Documentation/ABI/testing/sysfs-bus-iio
Documentation/ABI/testing/sysfs-class-led-trigger-netdev
Documentation/ABI/testing/sysfs-class-led-trigger-tty
Documentation/ABI/testing/sysfs-class-net-queues
Documentation/ABI/testing/sysfs-driver-intel-i915-hwmon
Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
Documentation/ABI/testing/sysfs-firmware-initrd [new file with mode: 0644]
Documentation/ABI/testing/sysfs-nvmem-cells [new file with mode: 0644]
Documentation/ABI/testing/sysfs-platform-silicom
Documentation/PCI/boot-interrupts.rst
Documentation/PCI/msi-howto.rst
Documentation/RCU/checklist.rst
Documentation/RCU/rcu_dereference.rst
Documentation/RCU/torture.rst
Documentation/accel/introduction.rst
Documentation/admin-guide/acpi/cppc_sysfs.rst
Documentation/admin-guide/cifs/todo.rst
Documentation/admin-guide/cifs/usage.rst
Documentation/admin-guide/devices.txt
Documentation/admin-guide/features.rst
Documentation/admin-guide/hw_random.rst
Documentation/admin-guide/kernel-parameters.rst
Documentation/admin-guide/kernel-parameters.txt
Documentation/admin-guide/kernel-per-CPU-kthreads.rst
Documentation/admin-guide/pm/amd-pstate.rst
Documentation/admin-guide/sysrq.rst
Documentation/arch/arc/features.rst
Documentation/arch/arm/features.rst
Documentation/arch/arm64/features.rst
Documentation/arch/arm64/silicon-errata.rst
Documentation/arch/loongarch/features.rst
Documentation/arch/m68k/features.rst
Documentation/arch/mips/features.rst
Documentation/arch/nios2/features.rst
Documentation/arch/openrisc/features.rst
Documentation/arch/parisc/features.rst
Documentation/arch/powerpc/features.rst
Documentation/arch/riscv/features.rst
Documentation/arch/riscv/hwprobe.rst
Documentation/arch/s390/features.rst
Documentation/arch/sh/features.rst
Documentation/arch/sparc/features.rst
Documentation/arch/x86/features.rst
Documentation/arch/x86/tdx.rst
Documentation/arch/xtensa/features.rst
Documentation/block/ioprio.rst
Documentation/core-api/workqueue.rst
Documentation/dev-tools/checkuapi.rst [new file with mode: 0644]
Documentation/dev-tools/index.rst
Documentation/dev-tools/kunit/running_tips.rst
Documentation/dev-tools/kunit/usage.rst
Documentation/devicetree/bindings/Makefile
Documentation/devicetree/bindings/arm/calxeda/l2ecc.yaml
Documentation/devicetree/bindings/arm/msm/qcom,idle-state.txt [deleted file]
Documentation/devicetree/bindings/arm/qcom,coresight-remote-etm.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/arm/qcom,coresight-tpdm.yaml
Documentation/devicetree/bindings/auxdisplay/hit,hd44780.yaml
Documentation/devicetree/bindings/cache/qcom,llcc.yaml
Documentation/devicetree/bindings/clock/baikal,bt1-ccu-pll.yaml
Documentation/devicetree/bindings/connector/usb-connector.yaml
Documentation/devicetree/bindings/cpu/idle-states.yaml
Documentation/devicetree/bindings/display/panel/synaptics,r63353.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/rockchip/inno_hdmi-rockchip.txt [deleted file]
Documentation/devicetree/bindings/display/rockchip/rockchip,inno-hdmi.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/samsung/samsung,exynos-mixer.yaml
Documentation/devicetree/bindings/dma/dma-controller.yaml
Documentation/devicetree/bindings/dma/dma-router.yaml
Documentation/devicetree/bindings/dma/loongson,ls2x-apbdma.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/dma/nvidia,tegra210-adma.yaml
Documentation/devicetree/bindings/dma/qcom,gpi.yaml
Documentation/devicetree/bindings/dma/renesas,rz-dmac.yaml
Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml
Documentation/devicetree/bindings/dma/ti/k3-bcdma.yaml
Documentation/devicetree/bindings/dma/ti/k3-pktdma.yaml
Documentation/devicetree/bindings/dma/ti/k3-udma.yaml
Documentation/devicetree/bindings/dts-coding-style.rst [new file with mode: 0644]
Documentation/devicetree/bindings/eeprom/at24.yaml
Documentation/devicetree/bindings/fpga/altera-fpga2sdram-bridge.txt [deleted file]
Documentation/devicetree/bindings/fpga/altera-freeze-bridge.txt [deleted file]
Documentation/devicetree/bindings/fpga/altera-hps2fpga-bridge.txt [deleted file]
Documentation/devicetree/bindings/fpga/altr,freeze-bridge-controller.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/fpga/altr,socfpga-fpga2sdram-bridge.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/fpga/altr,socfpga-hps2fpga-bridge.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/fpga/fpga-bridge.txt [deleted file]
Documentation/devicetree/bindings/fpga/fpga-bridge.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/fpga/xlnx,pr-decoupler.yaml
Documentation/devicetree/bindings/gpio/xlnx,gpio-xilinx.yaml
Documentation/devicetree/bindings/gpu/samsung-g2d.yaml
Documentation/devicetree/bindings/gpu/samsung-rotator.yaml
Documentation/devicetree/bindings/gpu/samsung-scaler.yaml
Documentation/devicetree/bindings/i2c/st,stm32-i2c.yaml
Documentation/devicetree/bindings/iio/adc/adi,ad7091r5.yaml
Documentation/devicetree/bindings/iio/adc/adi,ad7780.yaml
Documentation/devicetree/bindings/iio/adc/maxim,max34408.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/iio/adc/qcom,spmi-iadc.yaml
Documentation/devicetree/bindings/iio/adc/qcom,spmi-rradc.yaml
Documentation/devicetree/bindings/iio/adc/qcom,spmi-vadc.yaml
Documentation/devicetree/bindings/iio/adc/ti,palmas-gpadc.yaml
Documentation/devicetree/bindings/iio/amplifiers/adi,hmc425a.yaml
Documentation/devicetree/bindings/iio/chemical/aosong,ags02ma.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/iio/dac/adi,ad5791.yaml
Documentation/devicetree/bindings/iio/dac/microchip,mcp4821.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/iio/humidity/ti,hdc3020.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/iio/imu/adi,adis16460.yaml
Documentation/devicetree/bindings/iio/imu/adi,adis16475.yaml
Documentation/devicetree/bindings/iio/imu/bosch,bmi323.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/iio/light/liteon,ltr390.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/iio/light/vishay,veml6075.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/iio/pressure/honeywell,hsc030pa.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/iio/pressure/honeywell,mprls0025pa.yaml
Documentation/devicetree/bindings/iio/temperature/melexis,mlx90632.yaml
Documentation/devicetree/bindings/iio/temperature/microchip,mcp9600.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/index.rst
Documentation/devicetree/bindings/input/adafruit,seesaw-gamepad.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/input/gpio-keys.yaml
Documentation/devicetree/bindings/input/gpio-mouse.txt [deleted file]
Documentation/devicetree/bindings/input/gpio-mouse.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/input/iqs269a.yaml
Documentation/devicetree/bindings/input/mediatek,pmic-keys.yaml
Documentation/devicetree/bindings/input/microchip,cap11xx.yaml
Documentation/devicetree/bindings/input/sprd,sc27xx-vibrator.yaml
Documentation/devicetree/bindings/input/ti,drv2665.txt [deleted file]
Documentation/devicetree/bindings/input/ti,drv2667.txt [deleted file]
Documentation/devicetree/bindings/input/ti,drv266x.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/input/touchscreen/neonode,zforce.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/input/touchscreen/samsung,s6sy761.txt [deleted file]
Documentation/devicetree/bindings/input/touchscreen/samsung,s6sy761.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/input/touchscreen/zforce_ts.txt [deleted file]
Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml
Documentation/devicetree/bindings/interrupt-controller/loongson,liointc.yaml
Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.yaml
Documentation/devicetree/bindings/interrupt-controller/st,stih407-irq-syscfg.yaml
Documentation/devicetree/bindings/iommu/apple,dart.yaml
Documentation/devicetree/bindings/iommu/arm,smmu.yaml
Documentation/devicetree/bindings/iommu/rockchip,iommu.yaml
Documentation/devicetree/bindings/leds/allwinner,sun50i-a100-ledc.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/leds/awinic,aw200xx.yaml
Documentation/devicetree/bindings/leds/backlight/mps,mp3309c.yaml
Documentation/devicetree/bindings/leds/common.yaml
Documentation/devicetree/bindings/leds/qcom,spmi-flash-led.yaml
Documentation/devicetree/bindings/loongarch/cpus.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/loongarch/loongson.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml
Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml
Documentation/devicetree/bindings/mailbox/xlnx,zynqmp-ipi-mailbox.yaml
Documentation/devicetree/bindings/media/cnm,wave521c.yaml
Documentation/devicetree/bindings/media/samsung,s5p-mfc.yaml
Documentation/devicetree/bindings/mfd/ams,as3711.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/mfd/as3711.txt [deleted file]
Documentation/devicetree/bindings/mfd/hisilicon,hi6421-spmi-pmic.yaml
Documentation/devicetree/bindings/mfd/qcom,pm8008.yaml
Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.yaml
Documentation/devicetree/bindings/mfd/qcom,tcsr.yaml
Documentation/devicetree/bindings/mfd/sprd,ums512-glbreg.yaml
Documentation/devicetree/bindings/mfd/ti,am3359-tscadc.yaml
Documentation/devicetree/bindings/misc/fsl,dpaa2-console.yaml
Documentation/devicetree/bindings/mmc/arm,pl18x.yaml
Documentation/devicetree/bindings/mmc/sdhci-pxa.yaml
Documentation/devicetree/bindings/net/sff,sfp.yaml
Documentation/devicetree/bindings/nvmem/st,stm32-romem.yaml
Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
Documentation/devicetree/bindings/pci/qcom,pcie.yaml
Documentation/devicetree/bindings/pci/rcar-pci-host.yaml
Documentation/devicetree/bindings/pci/rockchip-dw-pcie.yaml
Documentation/devicetree/bindings/pci/ti,j721e-pci-ep.yaml
Documentation/devicetree/bindings/pci/ti,j721e-pci-host.yaml
Documentation/devicetree/bindings/pci/toshiba,visconti-pcie.yaml
Documentation/devicetree/bindings/phy/amlogic,g12a-mipi-dphy-analog.yaml
Documentation/devicetree/bindings/phy/amlogic,meson-axg-mipi-pcie-analog.yaml
Documentation/devicetree/bindings/phy/mediatek,dsi-phy.yaml
Documentation/devicetree/bindings/phy/mediatek,tphy.yaml
Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml
Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml
Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml
Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb43dp-phy.yaml
Documentation/devicetree/bindings/phy/qcom,snps-eusb2-phy.yaml
Documentation/devicetree/bindings/pinctrl/pinctrl-single.yaml
Documentation/devicetree/bindings/pinctrl/qcom,ipq5018-tlmm.yaml
Documentation/devicetree/bindings/pinctrl/qcom,ipq5332-tlmm.yaml
Documentation/devicetree/bindings/pinctrl/qcom,ipq6018-pinctrl.yaml
Documentation/devicetree/bindings/pinctrl/qcom,ipq8074-pinctrl.yaml
Documentation/devicetree/bindings/pinctrl/qcom,ipq9574-tlmm.yaml
Documentation/devicetree/bindings/pinctrl/qcom,lpass-lpi-common.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/pinctrl/qcom,mdm9607-tlmm.yaml
Documentation/devicetree/bindings/pinctrl/qcom,mdm9615-pinctrl.yaml
Documentation/devicetree/bindings/pinctrl/qcom,msm8226-pinctrl.yaml
Documentation/devicetree/bindings/pinctrl/qcom,msm8660-pinctrl.yaml
Documentation/devicetree/bindings/pinctrl/qcom,msm8909-tlmm.yaml
Documentation/devicetree/bindings/pinctrl/qcom,msm8916-pinctrl.yaml
Documentation/devicetree/bindings/pinctrl/qcom,msm8953-pinctrl.yaml
Documentation/devicetree/bindings/pinctrl/qcom,msm8960-pinctrl.yaml
Documentation/devicetree/bindings/pinctrl/qcom,msm8974-pinctrl.yaml
Documentation/devicetree/bindings/pinctrl/qcom,msm8976-pinctrl.yaml
Documentation/devicetree/bindings/pinctrl/qcom,msm8994-pinctrl.yaml
Documentation/devicetree/bindings/pinctrl/qcom,msm8996-pinctrl.yaml
Documentation/devicetree/bindings/pinctrl/qcom,msm8998-pinctrl.yaml
Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.yaml
Documentation/devicetree/bindings/pinctrl/qcom,qcm2290-tlmm.yaml
Documentation/devicetree/bindings/pinctrl/qcom,qcs404-pinctrl.yaml
Documentation/devicetree/bindings/pinctrl/qcom,qdu1000-tlmm.yaml
Documentation/devicetree/bindings/pinctrl/qcom,sa8775p-tlmm.yaml
Documentation/devicetree/bindings/pinctrl/qcom,sc7180-pinctrl.yaml
Documentation/devicetree/bindings/pinctrl/qcom,sc7280-lpass-lpi-pinctrl.yaml
Documentation/devicetree/bindings/pinctrl/qcom,sc7280-pinctrl.yaml
Documentation/devicetree/bindings/pinctrl/qcom,sc8180x-tlmm.yaml
Documentation/devicetree/bindings/pinctrl/qcom,sc8280xp-lpass-lpi-pinctrl.yaml
Documentation/devicetree/bindings/pinctrl/qcom,sc8280xp-tlmm.yaml
Documentation/devicetree/bindings/pinctrl/qcom,sdm630-pinctrl.yaml
Documentation/devicetree/bindings/pinctrl/qcom,sdm670-tlmm.yaml
Documentation/devicetree/bindings/pinctrl/qcom,sdm845-pinctrl.yaml
Documentation/devicetree/bindings/pinctrl/qcom,sdx55-pinctrl.yaml
Documentation/devicetree/bindings/pinctrl/qcom,sdx65-tlmm.yaml
Documentation/devicetree/bindings/pinctrl/qcom,sdx75-tlmm.yaml
Documentation/devicetree/bindings/pinctrl/qcom,sm4450-tlmm.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/pinctrl/qcom,sm6115-lpass-lpi-pinctrl.yaml
Documentation/devicetree/bindings/pinctrl/qcom,sm6115-tlmm.yaml
Documentation/devicetree/bindings/pinctrl/qcom,sm6125-tlmm.yaml
Documentation/devicetree/bindings/pinctrl/qcom,sm6350-tlmm.yaml
Documentation/devicetree/bindings/pinctrl/qcom,sm6375-tlmm.yaml
Documentation/devicetree/bindings/pinctrl/qcom,sm7150-tlmm.yaml
Documentation/devicetree/bindings/pinctrl/qcom,sm8150-pinctrl.yaml
Documentation/devicetree/bindings/pinctrl/qcom,sm8250-lpass-lpi-pinctrl.yaml
Documentation/devicetree/bindings/pinctrl/qcom,sm8250-pinctrl.yaml
Documentation/devicetree/bindings/pinctrl/qcom,sm8350-lpass-lpi-pinctrl.yaml
Documentation/devicetree/bindings/pinctrl/qcom,sm8350-tlmm.yaml
Documentation/devicetree/bindings/pinctrl/qcom,sm8450-lpass-lpi-pinctrl.yaml
Documentation/devicetree/bindings/pinctrl/qcom,sm8450-tlmm.yaml
Documentation/devicetree/bindings/pinctrl/qcom,sm8550-lpass-lpi-pinctrl.yaml
Documentation/devicetree/bindings/pinctrl/qcom,sm8550-tlmm.yaml
Documentation/devicetree/bindings/pinctrl/qcom,sm8650-lpass-lpi-pinctrl.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/pinctrl/qcom,sm8650-tlmm.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/pinctrl/qcom,x1e80100-tlmm.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/pinctrl/renesas,rza2-pinctrl.yaml
Documentation/devicetree/bindings/pinctrl/renesas,rzg2l-pinctrl.yaml
Documentation/devicetree/bindings/pinctrl/samsung,pinctrl-wakeup-interrupt.yaml
Documentation/devicetree/bindings/pinctrl/samsung,pinctrl.yaml
Documentation/devicetree/bindings/pinctrl/xlnx,zynq-pinctrl.yaml
Documentation/devicetree/bindings/pinctrl/xlnx,zynqmp-pinctrl.yaml
Documentation/devicetree/bindings/power/fsl,scu-pd.yaml
Documentation/devicetree/bindings/power/reset/nvmem-reboot-mode.yaml
Documentation/devicetree/bindings/power/reset/qcom,pon.yaml
Documentation/devicetree/bindings/power/reset/syscon-reboot-mode.yaml
Documentation/devicetree/bindings/power/reset/xlnx,zynqmp-power.yaml
Documentation/devicetree/bindings/power/supply/bq24190.yaml
Documentation/devicetree/bindings/power/supply/richtek,rt9455.yaml
Documentation/devicetree/bindings/power/wakeup-source.txt
Documentation/devicetree/bindings/pwm/mediatek,pwm-disp.yaml
Documentation/devicetree/bindings/pwm/pwm-omap-dmtimer.txt [deleted file]
Documentation/devicetree/bindings/pwm/ti,omap-dmtimer-pwm.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/regulator/mps,mp5416.yaml
Documentation/devicetree/bindings/regulator/mps,mpq7920.yaml
Documentation/devicetree/bindings/remoteproc/fsl,imx-rproc.yaml
Documentation/devicetree/bindings/remoteproc/qcom,sc7180-pas.yaml
Documentation/devicetree/bindings/riscv/cpus.yaml
Documentation/devicetree/bindings/riscv/extensions.yaml
Documentation/devicetree/bindings/rtc/adi,max31335.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/rtc/epson,rx8900.yaml
Documentation/devicetree/bindings/rtc/nuvoton,ma35d1-rtc.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/rtc/qcom-pm8xxx-rtc.yaml
Documentation/devicetree/bindings/security/tpm/google,cr50.txt [deleted file]
Documentation/devicetree/bindings/security/tpm/ibmvtpm.txt [deleted file]
Documentation/devicetree/bindings/security/tpm/st33zp24-i2c.txt [deleted file]
Documentation/devicetree/bindings/security/tpm/st33zp24-spi.txt [deleted file]
Documentation/devicetree/bindings/security/tpm/tpm-i2c.txt [deleted file]
Documentation/devicetree/bindings/security/tpm/tpm_tis_mmio.txt [deleted file]
Documentation/devicetree/bindings/security/tpm/tpm_tis_spi.txt [deleted file]
Documentation/devicetree/bindings/serial/arm,dcc.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/serial/fsl,s32-linflexuart.yaml
Documentation/devicetree/bindings/serial/fsl-imx-uart.yaml
Documentation/devicetree/bindings/serial/qcom,msm-uartdm.yaml
Documentation/devicetree/bindings/serial/renesas,sci.yaml
Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml
Documentation/devicetree/bindings/serial/sprd-uart.yaml
Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-spdif.yaml
Documentation/devicetree/bindings/sound/tas2562.yaml
Documentation/devicetree/bindings/sound/ti,tas2781.yaml
Documentation/devicetree/bindings/timer/sifive,clint.yaml
Documentation/devicetree/bindings/timer/thead,c900-aclint-mtimer.yaml
Documentation/devicetree/bindings/tpm/google,cr50.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/tpm/ibm,vtpm.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/tpm/microsoft,ftpm.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/tpm/tcg,tpm-tis-i2c.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/tpm/tcg,tpm-tis-mmio.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/tpm/tcg,tpm_tis-spi.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/tpm/tpm-common.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/trivial-devices.yaml
Documentation/devicetree/bindings/usb/generic-xhci.yaml
Documentation/devicetree/bindings/usb/genesys,gl850g.yaml
Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.yaml
Documentation/devicetree/bindings/usb/nxp,ptn5110.yaml
Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
Documentation/devicetree/bindings/usb/qcom,wcd939x-usbss.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/usb/renesas,usbhs.yaml
Documentation/devicetree/bindings/usb/snps,dwc3.yaml
Documentation/devicetree/bindings/usb/ti,tps6598x.yaml
Documentation/devicetree/bindings/usb/usb-xhci.yaml
Documentation/devicetree/bindings/vendor-prefixes.yaml
Documentation/devicetree/bindings/w1/amd,axi-1wire-host.yaml [new file with mode: 0644]
Documentation/driver-api/pci/p2pdma.rst
Documentation/driver-api/pwm.rst
Documentation/features/vm/TLB/arch-support.txt
Documentation/filesystems/netfs_library.rst
Documentation/filesystems/overlayfs.rst
Documentation/filesystems/smb/ksmbd.rst
Documentation/memory-barriers.txt
Documentation/netlink/specs/rt_link.yaml
Documentation/process/4.Coding.rst
Documentation/process/coding-style.rst
Documentation/rust/arch-support.rst
Documentation/sphinx/kernel_feat.py
Documentation/sphinx/requirements.txt
Documentation/sphinx/templates/kernel-toc.html
Documentation/staging/rpmsg.rst
Documentation/trace/coresight/coresight.rst
Documentation/trace/ftrace.rst
Documentation/trace/kprobes.rst
Documentation/translations/zh_CN/arch/loongarch/features.rst
Documentation/translations/zh_CN/arch/mips/features.rst
Documentation/translations/zh_TW/arch/loongarch/features.rst
Documentation/translations/zh_TW/arch/mips/features.rst
Documentation/usb/gadget-testing.rst
Documentation/usb/raw-gadget.rst
Documentation/virt/kvm/api.rst
Documentation/virt/kvm/locking.rst
MAINTAINERS
Makefile
arch/Kconfig
arch/alpha/kernel/rtc.c
arch/alpha/kernel/srmcons.c
arch/arc/include/asm/cacheflush.h
arch/arc/include/asm/jump_label.h
arch/arc/mm/dma.c
arch/arm/Kconfig
arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-bletchley.dts
arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-wedge400.dts
arch/arm/boot/dts/aspeed/aspeed-bmc-opp-tacoma.dts
arch/arm/boot/dts/aspeed/ast2600-facebook-netbmc-common.dtsi
arch/arm/boot/dts/broadcom/bcm2711-rpi.dtsi
arch/arm/boot/dts/broadcom/bcm2711.dtsi
arch/arm/boot/dts/nxp/imx/imx6ull-phytec-tauri.dtsi
arch/arm/boot/dts/nxp/imx/imx7d-flex-concentrator.dts
arch/arm/boot/dts/samsung/exynos4212-tab3.dtsi
arch/arm/boot/dts/ti/omap/am335x-moxa-uc-2100-common.dtsi
arch/arm/common/locomo.c
arch/arm/configs/mxs_defconfig
arch/arm/include/asm/cacheflush.h
arch/arm/include/asm/hardware/locomo.h
arch/arm/include/asm/jump_label.h
arch/arm/include/asm/pgtable.h
arch/arm/mach-davinci/Kconfig
arch/arm/mm/dma-mapping-nommu.c
arch/arm/mm/dma-mapping.c
arch/arm/mm/fault.c
arch/arm/mm/kasan_init.c
arch/arm/vfp/vfpmodule.c
arch/arm64/Kconfig
arch/arm64/Makefile
arch/arm64/boot/dts/exynos/google/gs101.dtsi
arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l.dts
arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx.dtsi
arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi
arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts
arch/arm64/boot/dts/freescale/imx8mp-evk.dts
arch/arm64/boot/dts/freescale/imx8mp-venice-gw72xx.dtsi
arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi
arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
arch/arm64/boot/dts/freescale/imx8mq-kontron-pitx-imx8m.dts
arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
arch/arm64/boot/dts/qcom/msm8916.dtsi
arch/arm64/boot/dts/qcom/msm8939.dtsi
arch/arm64/boot/dts/qcom/msm8996.dtsi
arch/arm64/boot/dts/qcom/msm8998.dtsi
arch/arm64/boot/dts/qcom/qcs404.dtsi
arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
arch/arm64/boot/dts/qcom/sdm630.dtsi
arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi
arch/arm64/include/asm/alternative-macros.h
arch/arm64/include/asm/assembler.h
arch/arm64/include/asm/cpu.h
arch/arm64/include/asm/esr.h
arch/arm64/include/asm/irq.h
arch/arm64/include/asm/jump_label.h
arch/arm64/include/asm/kvm_arm.h
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/kvm_nested.h
arch/arm64/include/asm/kvm_pgtable.h
arch/arm64/include/asm/kvm_pkvm.h
arch/arm64/include/asm/vdso.h
arch/arm64/include/asm/vncr_mapping.h [new file with mode: 0644]
arch/arm64/kernel/Makefile
arch/arm64/kernel/asm-offsets.c
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/entry.S
arch/arm64/kernel/fpsimd.c
arch/arm64/kernel/ptrace.c
arch/arm64/kernel/setup.c
arch/arm64/kernel/vdso32/Makefile
arch/arm64/kvm/Kconfig
arch/arm64/kvm/arch_timer.c
arch/arm64/kvm/arm.c
arch/arm64/kvm/emulate-nested.c
arch/arm64/kvm/hyp/include/hyp/fault.h
arch/arm64/kvm/hyp/include/hyp/switch.h
arch/arm64/kvm/hyp/include/nvhe/fixed_config.h
arch/arm64/kvm/hyp/nvhe/hyp-init.S
arch/arm64/kvm/hyp/nvhe/mem_protect.c
arch/arm64/kvm/hyp/nvhe/mm.c
arch/arm64/kvm/hyp/nvhe/pkvm.c
arch/arm64/kvm/hyp/nvhe/setup.c
arch/arm64/kvm/hyp/pgtable.c
arch/arm64/kvm/mmu.c
arch/arm64/kvm/nested.c
arch/arm64/kvm/reset.c
arch/arm64/kvm/sys_regs.c
arch/arm64/kvm/vgic/vgic-its.c
arch/arm64/kvm/vgic/vgic-mmio-v3.c
arch/arm64/kvm/vgic/vgic-mmio.c
arch/arm64/mm/dma-mapping.c
arch/arm64/tools/cpucaps
arch/csky/abiv1/inc/abi/cacheflush.h
arch/csky/abiv2/inc/abi/cacheflush.h
arch/csky/configs/defconfig
arch/csky/include/asm/jump_label.h
arch/loongarch/Kbuild
arch/loongarch/Kconfig
arch/loongarch/Makefile
arch/loongarch/boot/dts/Makefile
arch/loongarch/boot/dts/loongson-2k0500-ref.dts [new file with mode: 0644]
arch/loongarch/boot/dts/loongson-2k0500.dtsi [new file with mode: 0644]
arch/loongarch/boot/dts/loongson-2k1000-ref.dts [new file with mode: 0644]
arch/loongarch/boot/dts/loongson-2k1000.dtsi [new file with mode: 0644]
arch/loongarch/boot/dts/loongson-2k2000-ref.dts [new file with mode: 0644]
arch/loongarch/boot/dts/loongson-2k2000.dtsi [new file with mode: 0644]
arch/loongarch/configs/loongson3_defconfig
arch/loongarch/include/asm/acpi.h
arch/loongarch/include/asm/bootinfo.h
arch/loongarch/include/asm/crash_core.h [new file with mode: 0644]
arch/loongarch/include/asm/elf.h
arch/loongarch/include/asm/ftrace.h
arch/loongarch/include/asm/jump_label.h
arch/loongarch/include/asm/kvm_host.h
arch/loongarch/include/asm/kvm_vcpu.h
arch/loongarch/include/asm/shmparam.h [deleted file]
arch/loongarch/include/uapi/asm/kvm.h
arch/loongarch/kernel/acpi.c
arch/loongarch/kernel/efi.c
arch/loongarch/kernel/elf.c
arch/loongarch/kernel/env.c
arch/loongarch/kernel/fpu.S
arch/loongarch/kernel/head.S
arch/loongarch/kernel/process.c
arch/loongarch/kernel/setup.c
arch/loongarch/kernel/smp.c
arch/loongarch/kernel/topology.c
arch/loongarch/kvm/Kconfig
arch/loongarch/kvm/exit.c
arch/loongarch/kvm/main.c
arch/loongarch/kvm/mmu.c
arch/loongarch/kvm/switch.S
arch/loongarch/kvm/timer.c
arch/loongarch/kvm/trace.h
arch/loongarch/kvm/vcpu.c
arch/loongarch/mm/kasan_init.c
arch/loongarch/mm/tlb.c
arch/loongarch/net/bpf_jit.c
arch/loongarch/vdso/Makefile
arch/m68k/Makefile
arch/m68k/emu/nfcon.c
arch/m68k/include/asm/cacheflush_mm.h
arch/microblaze/configs/mmu_defconfig
arch/mips/alchemy/common/prom.c
arch/mips/alchemy/common/setup.c
arch/mips/alchemy/devboards/db1200.c
arch/mips/alchemy/devboards/db1550.c
arch/mips/bcm47xx/buttons.c
arch/mips/bcm63xx/boards/board_bcm963xx.c
arch/mips/bcm63xx/clk.c
arch/mips/bcm63xx/dev-rng.c
arch/mips/bcm63xx/dev-uart.c
arch/mips/bcm63xx/dev-wdt.c
arch/mips/bcm63xx/irq.c
arch/mips/bcm63xx/setup.c
arch/mips/bcm63xx/timer.c
arch/mips/boot/compressed/dbg.c
arch/mips/boot/compressed/head.S
arch/mips/boot/elf2ecoff.c
arch/mips/cavium-octeon/csrc-octeon.c
arch/mips/cavium-octeon/executive/cvmx-boot-vector.c
arch/mips/cavium-octeon/executive/cvmx-bootmem.c
arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c
arch/mips/cavium-octeon/executive/cvmx-helper-jtag.c
arch/mips/cavium-octeon/executive/cvmx-pko.c
arch/mips/cavium-octeon/octeon-platform.c
arch/mips/cobalt/setup.c
arch/mips/configs/ip27_defconfig
arch/mips/configs/lemote2f_defconfig
arch/mips/configs/loongson3_defconfig
arch/mips/configs/pic32mzda_defconfig
arch/mips/fw/arc/memory.c
arch/mips/fw/arc/promlib.c
arch/mips/include/asm/cacheflush.h
arch/mips/include/asm/checksum.h
arch/mips/include/asm/debug.h
arch/mips/include/asm/dmi.h
arch/mips/include/asm/io.h
arch/mips/include/asm/jump_label.h
arch/mips/include/asm/kvm_host.h
arch/mips/include/asm/mach-au1x00/au1000.h
arch/mips/include/asm/mach-au1x00/au1000_dma.h
arch/mips/include/asm/mach-au1x00/gpio-au1000.h
arch/mips/include/asm/mach-cobalt/cobalt.h
arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h
arch/mips/include/asm/mach-loongson64/loongson_hwmon.h
arch/mips/include/asm/mach-loongson64/loongson_regs.h
arch/mips/include/asm/mach-malta/spaces.h
arch/mips/include/asm/mips-boards/bonito64.h
arch/mips/include/asm/mips-cpc.h
arch/mips/include/asm/mipsregs.h
arch/mips/include/asm/octeon/cvmx-bootinfo.h
arch/mips/include/asm/octeon/cvmx-cmd-queue.h
arch/mips/include/asm/octeon/cvmx-pko.h
arch/mips/include/asm/octeon/cvmx-pow.h
arch/mips/include/asm/octeon/octeon-model.h
arch/mips/include/asm/page.h
arch/mips/include/asm/pci.h
arch/mips/include/asm/pgtable-bits.h
arch/mips/include/asm/ptrace.h
arch/mips/include/asm/sgi/mc.h
arch/mips/include/asm/sn/klconfig.h
arch/mips/include/asm/sync.h
arch/mips/include/asm/thread_info.h
arch/mips/include/asm/timex.h
arch/mips/include/asm/vdso/vdso.h
arch/mips/include/uapi/asm/mman.h
arch/mips/include/uapi/asm/msgbuf.h
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/elf.c
arch/mips/kernel/genex.S
arch/mips/kernel/kprobes.c
arch/mips/kernel/prom.c
arch/mips/kernel/ptrace.c
arch/mips/kernel/relocate.c
arch/mips/kernel/relocate_kernel.S
arch/mips/kernel/setup.c
arch/mips/kernel/signal.c
arch/mips/kernel/traps.c
arch/mips/kernel/vpe.c
arch/mips/kvm/Kconfig
arch/mips/kvm/emulate.c
arch/mips/lantiq/prom.c
arch/mips/loongson2ef/common/platform.c
arch/mips/loongson64/init.c
arch/mips/loongson64/numa.c
arch/mips/loongson64/smp.c
arch/mips/mm/c-r4k.c
arch/mips/mm/cex-gen.S
arch/mips/mm/dma-noncoherent.c
arch/mips/mm/init.c
arch/mips/mm/ioremap.c
arch/mips/mm/tlb-r3k.c
arch/mips/mm/tlb-r4k.c
arch/mips/mm/tlbex.c
arch/mips/net/bpf_jit_comp32.c
arch/mips/pci/ops-loongson2.c
arch/mips/pci/pci-alchemy.c
arch/mips/pci/pci-ar2315.c
arch/mips/pci/pci-lantiq.c
arch/mips/pci/pci-octeon.c
arch/mips/pci/pci-xtalk-bridge.c
arch/mips/pci/pcie-octeon.c
arch/mips/ralink/mt7621.c
arch/mips/sgi-ip27/Makefile
arch/mips/sgi-ip27/ip27-berr.c
arch/mips/sgi-ip27/ip27-common.h
arch/mips/sgi-ip27/ip27-hubio.c [deleted file]
arch/mips/sgi-ip27/ip27-irq.c
arch/mips/sgi-ip27/ip27-memory.c
arch/mips/sgi-ip27/ip27-nmi.c
arch/mips/sgi-ip30/ip30-console.c
arch/mips/sgi-ip30/ip30-setup.c
arch/mips/sgi-ip32/crime.c
arch/mips/sgi-ip32/ip32-berr.c
arch/mips/sgi-ip32/ip32-common.h [new file with mode: 0644]
arch/mips/sgi-ip32/ip32-irq.c
arch/mips/sgi-ip32/ip32-memory.c
arch/mips/sgi-ip32/ip32-reset.c
arch/mips/sgi-ip32/ip32-setup.c
arch/mips/txx9/generic/pci.c
arch/nios2/include/asm/cacheflush.h
arch/parisc/Kconfig
arch/parisc/Makefile
arch/parisc/include/asm/assembly.h
arch/parisc/include/asm/cacheflush.h
arch/parisc/include/asm/extable.h [new file with mode: 0644]
arch/parisc/include/asm/jump_label.h
arch/parisc/include/asm/special_insns.h
arch/parisc/include/asm/uaccess.h
arch/parisc/kernel/cache.c
arch/parisc/kernel/drivers.c
arch/parisc/kernel/firmware.c
arch/parisc/kernel/unaligned.c
arch/parisc/kernel/vmlinux.lds.S
arch/parisc/mm/fault.c
arch/powerpc/Kconfig
arch/powerpc/include/asm/hvconsole.h
arch/powerpc/include/asm/hvsi.h
arch/powerpc/include/asm/jump_label.h
arch/powerpc/include/asm/kvm_host.h
arch/powerpc/include/asm/opal.h
arch/powerpc/include/asm/uaccess.h
arch/powerpc/kernel/iommu.c
arch/powerpc/kernel/irq_64.c
arch/powerpc/kvm/Kconfig
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/powerpc.c
arch/powerpc/platforms/powernv/opal.c
arch/powerpc/platforms/pseries/hvconsole.c
arch/powerpc/sysdev/fsl_pci.c
arch/riscv/Kconfig
arch/riscv/Kconfig.errata
arch/riscv/Makefile
arch/riscv/boot/dts/sophgo/sg2042.dtsi
arch/riscv/configs/defconfig
arch/riscv/configs/rv32_defconfig [deleted file]
arch/riscv/errata/thead/errata.c
arch/riscv/include/asm/arch_hweight.h [new file with mode: 0644]
arch/riscv/include/asm/archrandom.h [new file with mode: 0644]
arch/riscv/include/asm/asm-extable.h
arch/riscv/include/asm/asm-prototypes.h
arch/riscv/include/asm/bitops.h
arch/riscv/include/asm/cacheflush.h
arch/riscv/include/asm/checksum.h [new file with mode: 0644]
arch/riscv/include/asm/cpu_ops.h
arch/riscv/include/asm/cpufeature.h
arch/riscv/include/asm/csr.h
arch/riscv/include/asm/entry-common.h
arch/riscv/include/asm/errata_list.h
arch/riscv/include/asm/ftrace.h
arch/riscv/include/asm/hugetlb.h
arch/riscv/include/asm/hwcap.h
arch/riscv/include/asm/hwprobe.h
arch/riscv/include/asm/jump_label.h
arch/riscv/include/asm/kfence.h
arch/riscv/include/asm/kvm_host.h
arch/riscv/include/asm/kvm_vcpu_sbi.h
arch/riscv/include/asm/paravirt.h [new file with mode: 0644]
arch/riscv/include/asm/paravirt_api_clock.h [new file with mode: 0644]
arch/riscv/include/asm/pgtable-64.h
arch/riscv/include/asm/pgtable.h
arch/riscv/include/asm/processor.h
arch/riscv/include/asm/sbi.h
arch/riscv/include/asm/sections.h
arch/riscv/include/asm/simd.h [new file with mode: 0644]
arch/riscv/include/asm/stacktrace.h
arch/riscv/include/asm/switch_to.h
arch/riscv/include/asm/thread_info.h
arch/riscv/include/asm/tlb.h
arch/riscv/include/asm/tlbbatch.h [new file with mode: 0644]
arch/riscv/include/asm/tlbflush.h
arch/riscv/include/asm/vector.h
arch/riscv/include/asm/word-at-a-time.h
arch/riscv/include/asm/xip_fixup.h
arch/riscv/include/asm/xor.h [new file with mode: 0644]
arch/riscv/include/uapi/asm/hwprobe.h
arch/riscv/include/uapi/asm/kvm.h
arch/riscv/kernel/Makefile
arch/riscv/kernel/cpu-hotplug.c
arch/riscv/kernel/cpu_ops.c
arch/riscv/kernel/cpu_ops_sbi.c
arch/riscv/kernel/cpu_ops_spinwait.c
arch/riscv/kernel/cpufeature.c
arch/riscv/kernel/efi.c
arch/riscv/kernel/entry.S
arch/riscv/kernel/ftrace.c
arch/riscv/kernel/head.S
arch/riscv/kernel/kernel_mode_vector.c [new file with mode: 0644]
arch/riscv/kernel/mcount-dyn.S
arch/riscv/kernel/mcount.S
arch/riscv/kernel/module.c
arch/riscv/kernel/paravirt.c [new file with mode: 0644]
arch/riscv/kernel/patch.c
arch/riscv/kernel/pi/cmdline_early.c
arch/riscv/kernel/process.c
arch/riscv/kernel/ptrace.c
arch/riscv/kernel/sbi.c
arch/riscv/kernel/setup.c
arch/riscv/kernel/signal.c
arch/riscv/kernel/smp.c
arch/riscv/kernel/smpboot.c
arch/riscv/kernel/suspend.c
arch/riscv/kernel/sys_hwprobe.c [new file with mode: 0644]
arch/riscv/kernel/sys_riscv.c
arch/riscv/kernel/time.c
arch/riscv/kernel/traps_misaligned.c
arch/riscv/kernel/vdso/hwprobe.c
arch/riscv/kernel/vector.c
arch/riscv/kernel/vmlinux-xip.lds.S
arch/riscv/kernel/vmlinux.lds.S
arch/riscv/kvm/Kconfig
arch/riscv/kvm/Makefile
arch/riscv/kvm/mmu.c
arch/riscv/kvm/vcpu.c
arch/riscv/kvm/vcpu_onereg.c
arch/riscv/kvm/vcpu_sbi.c
arch/riscv/kvm/vcpu_sbi_replace.c
arch/riscv/kvm/vcpu_sbi_sta.c [new file with mode: 0644]
arch/riscv/kvm/vcpu_switch.S
arch/riscv/kvm/vcpu_vector.c
arch/riscv/kvm/vm.c
arch/riscv/lib/Makefile
arch/riscv/lib/clear_page.S
arch/riscv/lib/csum.c [new file with mode: 0644]
arch/riscv/lib/riscv_v_helpers.c [new file with mode: 0644]
arch/riscv/lib/tishift.S
arch/riscv/lib/uaccess.S
arch/riscv/lib/uaccess_vector.S [new file with mode: 0644]
arch/riscv/lib/xor.S [new file with mode: 0644]
arch/riscv/mm/Makefile
arch/riscv/mm/dma-noncoherent.c
arch/riscv/mm/extable.c
arch/riscv/mm/fault.c
arch/riscv/mm/hugetlbpage.c
arch/riscv/mm/init.c
arch/riscv/mm/kasan_init.c
arch/riscv/mm/pageattr.c
arch/riscv/mm/pgtable.c
arch/riscv/mm/tlbflush.c
arch/riscv/net/bpf_jit_comp64.c
arch/s390/Kconfig
arch/s390/configs/debug_defconfig
arch/s390/configs/defconfig
arch/s390/configs/zfcpdump_defconfig
arch/s390/include/asm/facility.h
arch/s390/include/asm/jump_label.h
arch/s390/include/asm/kvm_host.h
arch/s390/include/asm/pci_io.h
arch/s390/kernel/Makefile
arch/s390/kernel/facility.c [new file with mode: 0644]
arch/s390/kernel/fpu.c
arch/s390/kernel/perf_pai_crypto.c
arch/s390/kernel/perf_pai_ext.c
arch/s390/kernel/ptrace.c
arch/s390/kvm/Kconfig
arch/s390/kvm/guestdbg.c
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/priv.c
arch/s390/kvm/vsie.c
arch/s390/mm/fault.c
arch/s390/mm/gmap.c
arch/s390/pci/pci_mmio.c
arch/sh/boards/mach-ecovec24/setup.c
arch/sh/configs/sdk7786_defconfig
arch/sh/include/asm/cacheflush.h
arch/sh/kernel/vsyscall/Makefile
arch/sparc/include/asm/cacheflush_32.h
arch/sparc/include/asm/cacheflush_64.h
arch/sparc/include/asm/jump_label.h
arch/sparc/kernel/pci_sabre.c
arch/sparc/kernel/pci_schizo.c
arch/sparc/vdso/Makefile
arch/um/Makefile
arch/um/Makefile-skas
arch/um/drivers/chan.h
arch/um/drivers/chan_kern.c
arch/um/drivers/chan_user.c
arch/um/drivers/chan_user.h
arch/um/drivers/line.c
arch/um/drivers/line.h
arch/um/drivers/net_kern.c
arch/um/drivers/null.c
arch/um/drivers/virt-pci.c
arch/um/include/asm/cpufeature.h
arch/um/include/asm/mmu.h
arch/um/include/asm/processor-generic.h
arch/um/include/shared/kern_util.h
arch/um/include/shared/os.h
arch/um/include/shared/ptrace_user.h
arch/um/include/shared/registers.h
arch/um/kernel/process.c
arch/um/kernel/ptrace.c
arch/um/kernel/signal.c
arch/um/kernel/skas/uaccess.c
arch/um/kernel/time.c
arch/um/os-Linux/helper.c
arch/um/os-Linux/registers.c
arch/um/os-Linux/skas/process.c
arch/um/os-Linux/start_up.c
arch/um/os-Linux/util.c
arch/x86/Kconfig
arch/x86/Kconfig.cpu
arch/x86/Makefile
arch/x86/boot/header.S
arch/x86/boot/setup.ld
arch/x86/coco/tdx/tdx-shared.c
arch/x86/include/asm/coco.h
arch/x86/include/asm/cpu.h
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/intel-family.h
arch/x86/include/asm/jump_label.h
arch/x86/include/asm/kmsan.h
arch/x86/include/asm/kvm-x86-ops.h
arch/x86/include/asm/kvm-x86-pmu-ops.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/msr-index.h
arch/x86/include/asm/rmwcc.h
arch/x86/include/asm/shared/tdx.h
arch/x86/include/asm/special_insns.h
arch/x86/include/asm/syscall_wrapper.h
arch/x86/include/asm/tdx.h
arch/x86/include/asm/uaccess.h
arch/x86/include/uapi/asm/kvm.h
arch/x86/kernel/alternative.c
arch/x86/kernel/aperture_64.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/mce/core.c
arch/x86/kernel/early-quirks.c
arch/x86/kernel/fpu/signal.c
arch/x86/kernel/hpet.c
arch/x86/kernel/kvm.c
arch/x86/kernel/kvmclock.c
arch/x86/kernel/rtc.c
arch/x86/kernel/setup.c
arch/x86/kernel/topology.c
arch/x86/kernel/traps.c
arch/x86/kvm/Kconfig
arch/x86/kvm/Makefile
arch/x86/kvm/cpuid.c
arch/x86/kvm/cpuid.h
arch/x86/kvm/debugfs.c
arch/x86/kvm/emulate.c
arch/x86/kvm/governed_features.h
arch/x86/kvm/hyperv.c
arch/x86/kvm/hyperv.h
arch/x86/kvm/irq.c
arch/x86/kvm/irq_comm.c
arch/x86/kvm/kvm_emulate.h
arch/x86/kvm/kvm_onhyperv.h
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu.h
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/mmu_internal.h
arch/x86/kvm/mmu/paging_tmpl.h
arch/x86/kvm/mmu/tdp_mmu.c
arch/x86/kvm/mmu/tdp_mmu.h
arch/x86/kvm/pmu.c
arch/x86/kvm/pmu.h
arch/x86/kvm/reverse_cpuid.h
arch/x86/kvm/svm/hyperv.h
arch/x86/kvm/svm/nested.c
arch/x86/kvm/svm/pmu.c
arch/x86/kvm/svm/sev.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h
arch/x86/kvm/svm/svm_onhyperv.c
arch/x86/kvm/svm/svm_ops.h
arch/x86/kvm/svm/vmenter.S
arch/x86/kvm/vmx/hyperv.c
arch/x86/kvm/vmx/hyperv.h
arch/x86/kvm/vmx/hyperv_evmcs.c [new file with mode: 0644]
arch/x86/kvm/vmx/hyperv_evmcs.h [new file with mode: 0644]
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/nested.h
arch/x86/kvm/vmx/pmu_intel.c
arch/x86/kvm/vmx/sgx.c
arch/x86/kvm/vmx/vmenter.S
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/vmx.h
arch/x86/kvm/vmx/vmx_onhyperv.c [new file with mode: 0644]
arch/x86/kvm/vmx/vmx_onhyperv.h [new file with mode: 0644]
arch/x86/kvm/vmx/vmx_ops.h
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h
arch/x86/kvm/xen.c
arch/x86/lib/getuser.S
arch/x86/lib/putuser.S
arch/x86/pci/acpi.c
arch/x86/pci/mmconfig-shared.c
arch/x86/pci/mmconfig_32.c
arch/x86/pci/mmconfig_64.c
arch/x86/pci/pcbios.c
arch/x86/um/asm/elf.h
arch/x86/um/asm/processor_64.h
arch/x86/um/os-Linux/Makefile
arch/x86/um/os-Linux/prctl.c [deleted file]
arch/x86/um/ptrace_32.c
arch/x86/um/ptrace_64.c
arch/x86/um/shared/sysdep/ptrace_32.h
arch/x86/um/shared/sysdep/ptrace_user.h
arch/x86/um/shared/sysdep/stub_32.h
arch/x86/um/shared/sysdep/stub_64.h
arch/x86/um/syscalls_64.c
arch/x86/um/tls_64.c
arch/x86/virt/vmx/tdx/Makefile
arch/x86/virt/vmx/tdx/tdx.c [new file with mode: 0644]
arch/x86/virt/vmx/tdx/tdx.h [new file with mode: 0644]
arch/xtensa/Kconfig
arch/xtensa/Makefile
arch/xtensa/include/asm/asmmacro.h
arch/xtensa/include/asm/cacheflush.h
arch/xtensa/include/asm/jump_label.h
arch/xtensa/lib/pci-auto.c
arch/xtensa/platforms/iss/console.c
block/bio-integrity.c
block/blk-cgroup.c
block/blk-core.c
block/blk-iocost.c
block/blk-map.c
block/blk-mq-debugfs.c
block/blk-mq-sched.c
block/blk-mq.c
block/blk-wbt.c
block/ioctl.c
block/ioprio.c
block/partitions/core.c
crypto/algif_hash.c
crypto/cbc.c
drivers/accel/ivpu/ivpu_debugfs.c
drivers/accel/ivpu/ivpu_drv.c
drivers/accel/ivpu/ivpu_drv.h
drivers/accel/ivpu/ivpu_fw.c
drivers/accel/ivpu/ivpu_gem.c
drivers/accel/ivpu/ivpu_gem.h
drivers/accel/ivpu/ivpu_hw_37xx.c
drivers/accel/ivpu/ivpu_hw_40xx.c
drivers/accel/ivpu/ivpu_ipc.c
drivers/accel/ivpu/ivpu_job.c
drivers/accel/ivpu/ivpu_job.h
drivers/accel/ivpu/ivpu_mmu.c
drivers/accel/ivpu/ivpu_mmu.h
drivers/accel/ivpu/ivpu_mmu_context.c
drivers/accel/ivpu/ivpu_pm.c
drivers/accel/ivpu/ivpu_pm.h
drivers/acpi/Kconfig
drivers/acpi/acpi_processor.c
drivers/acpi/apei/ghes.c
drivers/acpi/numa/hmat.c
drivers/acpi/property.c
drivers/acpi/resource.c
drivers/acpi/scan.c
drivers/acpi/tables.c
drivers/android/binder.c
drivers/android/binder_alloc.c
drivers/android/binder_alloc.h
drivers/android/binder_alloc_selftest.c
drivers/android/binder_trace.h
drivers/android/binderfs.c
drivers/ata/ahci.c
drivers/ata/ahci.h
drivers/ata/libata-sata.c
drivers/atm/idt77252.c
drivers/base/arch_topology.c
drivers/base/auxiliary.c
drivers/base/bus.c
drivers/base/class.c
drivers/base/container.c
drivers/base/core.c
drivers/base/cpu.c
drivers/base/dd.c
drivers/base/init.c
drivers/base/isa.c
drivers/base/memory.c
drivers/base/node.c
drivers/base/power/clock_ops.c
drivers/base/power/main.c
drivers/base/power/qos.c
drivers/base/power/trace.c
drivers/base/property.c
drivers/base/soc.c
drivers/base/swnode.c
drivers/block/aoe/aoeblk.c
drivers/block/loop.c
drivers/block/nbd.c
drivers/block/null_blk/main.c
drivers/block/rbd.c
drivers/block/virtio_blk.c
drivers/bluetooth/btmtkuart.c
drivers/bluetooth/btnxpuart.c
drivers/bluetooth/btusb.c
drivers/bluetooth/hci_serdev.c
drivers/bus/mhi/ep/internal.h
drivers/bus/mhi/ep/main.c
drivers/bus/mhi/ep/ring.c
drivers/bus/mhi/host/init.c
drivers/bus/mhi/host/internal.h
drivers/bus/mhi/host/main.c
drivers/bus/mhi/host/pci_generic.c
drivers/bus/mhi/host/pm.c
drivers/bus/moxtet.c
drivers/cdx/cdx.c
drivers/char/ppdev.c
drivers/char/ttyprintk.c
drivers/char/virtio_console.c
drivers/clk/qcom/gcc-x1e80100.c
drivers/clocksource/timer-cadence-ttc.c
drivers/clocksource/timer-ep93xx.c
drivers/clocksource/timer-riscv.c
drivers/clocksource/timer-ti-dm.c
drivers/comedi/comedi_fops.c
drivers/cpufreq/amd-pstate.c
drivers/cpufreq/intel_pstate.c
drivers/crypto/caam/caamalg_qi2.c
drivers/crypto/caam/caamhash.c
drivers/crypto/ccp/sev-dev.c
drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
drivers/cxl/Kconfig
drivers/cxl/acpi.c
drivers/cxl/core/Makefile
drivers/cxl/core/cdat.c [new file with mode: 0644]
drivers/cxl/core/core.h
drivers/cxl/core/mbox.c
drivers/cxl/core/memdev.c
drivers/cxl/core/pci.c
drivers/cxl/core/pmem.c
drivers/cxl/core/port.c
drivers/cxl/core/region.c
drivers/cxl/core/trace.h
drivers/cxl/cxl.h
drivers/cxl/cxlmem.h
drivers/cxl/cxlpci.h
drivers/cxl/mem.c
drivers/cxl/pci.c
drivers/cxl/port.c
drivers/dma-buf/heaps/cma_heap.c
drivers/dma/Kconfig
drivers/dma/Makefile
drivers/dma/apple-admac.c
drivers/dma/at_hdmac.c
drivers/dma/dma-axi-dmac.c
drivers/dma/dmaengine.c
drivers/dma/dmatest.c
drivers/dma/dw-edma/dw-edma-v0-debugfs.c
drivers/dma/dw-edma/dw-hdma-v0-debugfs.c
drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
drivers/dma/fsl-edma-main.c
drivers/dma/fsl-qdma.c
drivers/dma/idxd/cdev.c
drivers/dma/idxd/device.c
drivers/dma/imx-sdma.c
drivers/dma/ls2x-apb-dma.c [new file with mode: 0644]
drivers/dma/milbeaut-hdmac.c
drivers/dma/milbeaut-xdmac.c
drivers/dma/pl330.c
drivers/dma/sf-pdma/sf-pdma.c
drivers/dma/sf-pdma/sf-pdma.h
drivers/dma/sh/rz-dmac.c
drivers/dma/sh/shdma.h
drivers/dma/sh/usb-dmac.c
drivers/dma/ste_dma40.c
drivers/dma/tegra186-gpc-dma.c
drivers/dma/tegra210-adma.c
drivers/dma/ti/Makefile
drivers/dma/ti/edma.c
drivers/dma/ti/k3-psil-am62p.c [new file with mode: 0644]
drivers/dma/ti/k3-psil-priv.h
drivers/dma/ti/k3-psil.c
drivers/dma/ti/k3-udma.c
drivers/dma/uniphier-mdmac.c
drivers/dma/uniphier-xdmac.c
drivers/dma/xilinx/xdma-regs.h
drivers/dma/xilinx/xdma.c
drivers/dma/xilinx/xilinx_dpdma.c
drivers/dpll/dpll_core.c
drivers/dpll/dpll_core.h
drivers/dpll/dpll_netlink.c
drivers/edac/edac_device.h
drivers/edac/edac_device_sysfs.c
drivers/edac/edac_module.c
drivers/edac/edac_pci_sysfs.c
drivers/edac/versal_edac.c
drivers/extcon/extcon-qcom-spmi-misc.c
drivers/extcon/extcon-usbc-tusb320.c
drivers/extcon/extcon.c
drivers/firewire/core-card.c
drivers/firewire/core-device.c
drivers/firmware/arm_ffa/driver.c
drivers/firmware/arm_scmi/clock.c
drivers/firmware/arm_scmi/common.h
drivers/firmware/arm_scmi/driver.c
drivers/firmware/arm_scmi/mailbox.c
drivers/firmware/arm_scmi/perf.c
drivers/firmware/arm_scmi/raw_mode.c
drivers/firmware/arm_scmi/shmem.c
drivers/firmware/arm_scpi.c
drivers/firmware/efi/arm-runtime.c
drivers/firmware/efi/cper.c
drivers/firmware/efi/efi-init.c
drivers/firmware/efi/libstub/Makefile
drivers/firmware/efi/libstub/alignedmem.c
drivers/firmware/efi/libstub/efistub.h
drivers/firmware/efi/libstub/kaslr.c
drivers/firmware/efi/libstub/randomalloc.c
drivers/firmware/efi/libstub/x86-stub.c
drivers/firmware/efi/libstub/x86-stub.h
drivers/firmware/efi/libstub/zboot.c
drivers/firmware/efi/riscv-runtime.c
drivers/firmware/imx/imx-dsp.c
drivers/firmware/mtk-adsp-ipc.c
drivers/firmware/qemu_fw_cfg.c
drivers/firmware/raspberrypi.c
drivers/firmware/stratix10-rsu.c
drivers/firmware/stratix10-svc.c
drivers/firmware/sysfb.c
drivers/firmware/turris-mox-rwtm.c
drivers/firmware/xilinx/zynqmp.c
drivers/fpga/altera-fpga2sdram.c
drivers/fpga/altera-freeze-bridge.c
drivers/fpga/altera-hps2fpga.c
drivers/fpga/dfl-afu-main.c
drivers/fpga/dfl-fme-br.c
drivers/fpga/dfl-fme-main.c
drivers/fpga/dfl-fme-region.c
drivers/fpga/dfl.c
drivers/fpga/intel-m10-bmc-sec-update.c
drivers/fpga/of-fpga-region.c
drivers/fpga/socfpga-a10.c
drivers/fpga/stratix10-soc.c
drivers/fpga/xilinx-pr-decoupler.c
drivers/fpga/zynq-fpga.c
drivers/gnss/serial.c
drivers/gnss/sirf.c
drivers/gpio/gpio-eic-sprd.c
drivers/gpio/gpio-en7523.c
drivers/gpio/gpio-mlxbf3.c
drivers/gpio/gpio-rtd.c
drivers/gpio/gpiolib-acpi.c
drivers/gpio/gpiolib-of.c
drivers/gpio/gpiolib-sysfs.c
drivers/gpio/gpiolib-sysfs.h
drivers/gpio/gpiolib.c
drivers/gpio/gpiolib.h
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
drivers/gpu/drm/amd/amdgpu/athub_v3_0.c
drivers/gpu/drm/amd/amdgpu/cik_ih.c
drivers/gpu/drm/amd/amdgpu/cz_ih.c
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/iceland_ih.c
drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
drivers/gpu/drm/amd/amdgpu/navi10_ih.c
drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
drivers/gpu/drm/amd/amdgpu/si_ih.c
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/amdgpu/tonga_ih.c
drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
drivers/gpu/drm/amd/amdgpu/vega10_ih.c
drivers/gpu/drm/amd/amdgpu/vega20_ih.c
drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
drivers/gpu/drm/amd/amdkfd/kfd_process.c
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drivers/gpu/drm/amd/display/dc/core/dc_state.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dc_stream.h
drivers/gpu/drm/amd/display/dc/dc_types.h
drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c
drivers/gpu/drm/amd/display/dc/dml/Makefile
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
drivers/gpu/drm/amd/display/dc/inc/core_types.h
drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
drivers/gpu/drm/amd/display/dc/inc/resource.h
drivers/gpu/drm/amd/display/dc/link/link_dpms.c
drivers/gpu/drm/amd/display/dc/link/link_validation.c
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c
drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
drivers/gpu/drm/amd/display/include/audio_types.h
drivers/gpu/drm/amd/display/modules/power/power_helpers.c
drivers/gpu/drm/amd/display/modules/power/power_helpers.h
drivers/gpu/drm/amd/include/amd_shared.h
drivers/gpu/drm/amd/include/amdgpu_reg_state.h
drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h
drivers/gpu/drm/amd/pm/amdgpu_pm.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
drivers/gpu/drm/ast/ast_i2c.c
drivers/gpu/drm/bridge/analogix/anx7625.c
drivers/gpu/drm/bridge/analogix/anx7625.h
drivers/gpu/drm/bridge/parade-ps8640.c
drivers/gpu/drm/bridge/samsung-dsim.c
drivers/gpu/drm/bridge/sii902x.c
drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
drivers/gpu/drm/display/drm_dp_helper.c
drivers/gpu/drm/display/drm_dp_mst_topology.c
drivers/gpu/drm/exynos/exynos5433_drm_decon.c
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_drm_gsc.c
drivers/gpu/drm/gma500/cdv_intel_dp.c
drivers/gpu/drm/gma500/intel_gmbus.c
drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
drivers/gpu/drm/gma500/psb_intel_sdvo.c
drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
drivers/gpu/drm/i915/Kconfig
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/display/icl_dsi.c
drivers/gpu/drm/i915/display/intel_backlight.c
drivers/gpu/drm/i915/display/intel_cx0_phy.c
drivers/gpu/drm/i915/display/intel_display_power.c
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/display/intel_gmbus.c
drivers/gpu/drm/i915/display/intel_psr.c
drivers/gpu/drm/i915/display/intel_sdvo.c
drivers/gpu/drm/i915/gem/i915_gem_context_types.h
drivers/gpu/drm/i915/gt/intel_gsc.h
drivers/gpu/drm/i915/gt/uc/intel_guc.h
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/i915_perf_types.h
drivers/gpu/drm/i915/intel_gvt.c
drivers/gpu/drm/loongson/lsdc_i2c.c
drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
drivers/gpu/drm/mgag200/mgag200_i2c.c
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
drivers/gpu/drm/msm/dp/dp_ctrl.c
drivers/gpu/drm/msm/dp/dp_link.c
drivers/gpu/drm/msm/dp/dp_reg.h
drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
drivers/gpu/drm/msm/msm_mdss.c
drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
drivers/gpu/drm/nouveau/nouveau_fence.c
drivers/gpu/drm/nouveau/nouveau_fence.h
drivers/gpu/drm/nouveau/nouveau_vmm.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c
drivers/gpu/drm/panel/Kconfig
drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
drivers/gpu/drm/panel/panel-simple.c
drivers/gpu/drm/radeon/radeon_i2c.c
drivers/gpu/drm/rockchip/inno_hdmi.c
drivers/gpu/drm/rockchip/rk3066_hdmi.c
drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
drivers/gpu/drm/scheduler/sched_main.c
drivers/gpu/drm/solomon/ssd130x.c
drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c
drivers/gpu/drm/tegra/drm.c
drivers/gpu/drm/tests/drm_mm_test.c
drivers/gpu/drm/ttm/ttm_device.c
drivers/gpu/drm/v3d/v3d_debugfs.c
drivers/gpu/drm/v3d/v3d_submit.c
drivers/gpu/drm/virtio/virtgpu_drv.c
drivers/gpu/drm/xe/Kconfig
drivers/gpu/drm/xe/Makefile
drivers/gpu/drm/xe/abi/guc_actions_abi.h
drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h
drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h
drivers/gpu/drm/xe/abi/guc_klvs_abi.h
drivers/gpu/drm/xe/abi/guc_messages_abi.h
drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h
drivers/gpu/drm/xe/tests/xe_bo.c
drivers/gpu/drm/xe/tests/xe_migrate.c
drivers/gpu/drm/xe/tests/xe_wa_test.c
drivers/gpu/drm/xe/xe_bo.c
drivers/gpu/drm/xe/xe_device.c
drivers/gpu/drm/xe/xe_device_types.h
drivers/gpu/drm/xe/xe_display.c
drivers/gpu/drm/xe/xe_dma_buf.c
drivers/gpu/drm/xe/xe_exec.c
drivers/gpu/drm/xe/xe_exec_queue.c
drivers/gpu/drm/xe/xe_exec_queue_types.h
drivers/gpu/drm/xe/xe_gt.c
drivers/gpu/drm/xe/xe_gt_freq.c
drivers/gpu/drm/xe/xe_gt_mcr.c
drivers/gpu/drm/xe/xe_gt_pagefault.c
drivers/gpu/drm/xe/xe_guc.c
drivers/gpu/drm/xe/xe_guc_pc.c
drivers/gpu/drm/xe/xe_guc_submit.c
drivers/gpu/drm/xe/xe_hw_fence.c
drivers/gpu/drm/xe/xe_hwmon.c
drivers/gpu/drm/xe/xe_lrc.c
drivers/gpu/drm/xe/xe_migrate.c
drivers/gpu/drm/xe/xe_mmio.c
drivers/gpu/drm/xe/xe_pt.c
drivers/gpu/drm/xe/xe_query.c
drivers/gpu/drm/xe/xe_sched_job.c
drivers/gpu/drm/xe/xe_sync.c
drivers/gpu/drm/xe/xe_sync.h
drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
drivers/gpu/drm/xe/xe_vm.c
drivers/gpu/drm/xe/xe_vm.h
drivers/gpu/drm/xe/xe_vm_types.h
drivers/greybus/gb-beagleplay.c
drivers/hid/amd-sfh-hid/Kconfig
drivers/hid/amd-sfh-hid/amd_sfh_common.h
drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_desc.c
drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c
drivers/hid/bpf/hid_bpf_dispatch.c
drivers/hid/bpf/hid_bpf_dispatch.h
drivers/hid/bpf/hid_bpf_jmp_table.c
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/hid-logitech-hidpp.c
drivers/hid/hid-multitouch.c
drivers/hid/hid-nvidia-shield.c
drivers/hid/hid-steam.c
drivers/hid/hidraw.c
drivers/hid/i2c-hid/i2c-hid-core.c
drivers/hid/i2c-hid/i2c-hid-of.c
drivers/hid/intel-ish-hid/ipc/hw-ish.h
drivers/hid/intel-ish-hid/ipc/pci-ish.c
drivers/hid/intel-ish-hid/ishtp/bus.c
drivers/hid/intel-ish-hid/ishtp/client.c
drivers/hid/wacom_sys.c
drivers/hid/wacom_wac.c
drivers/hsi/controllers/omap_ssi_core.c
drivers/hv/hv_common.c
drivers/hwmon/aspeed-pwm-tacho.c
drivers/hwmon/coretemp.c
drivers/hwmon/gigabyte_waterforce.c
drivers/hwmon/npcm750-pwm-fan.c
drivers/hwmon/pmbus/mp2975.c
drivers/hwmon/pwm-fan.c
drivers/hwspinlock/hwspinlock_core.c
drivers/hwspinlock/qcom_hwspinlock.c
drivers/hwtracing/coresight/coresight-core.c
drivers/hwtracing/coresight/coresight-dummy.c
drivers/hwtracing/coresight/coresight-etm-perf.c
drivers/hwtracing/coresight/coresight-etm4x-core.c
drivers/hwtracing/coresight/coresight-etm4x.h
drivers/hwtracing/coresight/coresight-funnel.c
drivers/hwtracing/coresight/coresight-replicator.c
drivers/hwtracing/coresight/coresight-tmc-core.c
drivers/hwtracing/coresight/coresight-tmc-etr.c
drivers/hwtracing/coresight/coresight-tmc.h
drivers/hwtracing/coresight/coresight-tpda.c
drivers/hwtracing/coresight/coresight-tpda.h
drivers/hwtracing/coresight/coresight-tpdm.c
drivers/hwtracing/coresight/coresight-tpdm.h
drivers/hwtracing/coresight/coresight-trbe.c
drivers/hwtracing/coresight/coresight-trbe.h
drivers/hwtracing/coresight/ultrasoc-smb.c
drivers/hwtracing/ptt/hisi_ptt.c
drivers/hwtracing/ptt/hisi_ptt.h
drivers/i2c/busses/i2c-ali1535.c
drivers/i2c/busses/i2c-ali1563.c
drivers/i2c/busses/i2c-ali15x3.c
drivers/i2c/busses/i2c-amd756.c
drivers/i2c/busses/i2c-amd8111.c
drivers/i2c/busses/i2c-cpm.c
drivers/i2c/busses/i2c-elektor.c
drivers/i2c/busses/i2c-gpio.c
drivers/i2c/busses/i2c-i801.c
drivers/i2c/busses/i2c-ibm_iic.c
drivers/i2c/busses/i2c-imx.c
drivers/i2c/busses/i2c-iop3xx.c
drivers/i2c/busses/i2c-isch.c
drivers/i2c/busses/i2c-kempld.c
drivers/i2c/busses/i2c-mlxcpld.c
drivers/i2c/busses/i2c-nforce2.c
drivers/i2c/busses/i2c-npcm7xx.c
drivers/i2c/busses/i2c-pasemi-pci.c
drivers/i2c/busses/i2c-piix4.c
drivers/i2c/busses/i2c-rcar.c
drivers/i2c/busses/i2c-rk3x.c
drivers/i2c/busses/i2c-s3c2410.c
drivers/i2c/busses/i2c-scmi.c
drivers/i2c/busses/i2c-sh7760.c
drivers/i2c/busses/i2c-sibyte.c
drivers/i2c/busses/i2c-sis5595.c
drivers/i2c/busses/i2c-sis630.c
drivers/i2c/busses/i2c-sis96x.c
drivers/i2c/busses/i2c-stm32f7.c
drivers/i2c/busses/i2c-via.c
drivers/i2c/busses/i2c-viapro.c
drivers/i2c/busses/i2c-wmt.c
drivers/i2c/busses/scx200_acb.c
drivers/i2c/i2c-core-base.c
drivers/i2c/i2c-smbus.c
drivers/i2c/i2c-stub.c
drivers/i2c/muxes/i2c-mux-reg.c
drivers/i3c/master.c
drivers/i3c/master/i3c-master-cdns.c
drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
drivers/i3c/master/mipi-i3c-hci/core.c
drivers/i3c/master/mipi-i3c-hci/dma.c
drivers/i3c/master/mipi-i3c-hci/hci.h
drivers/i3c/master/svc-i3c-master.c
drivers/iio/accel/Kconfig
drivers/iio/accel/bmi088-accel-core.c
drivers/iio/accel/bmi088-accel-spi.c
drivers/iio/adc/Kconfig
drivers/iio/adc/Makefile
drivers/iio/adc/ad7091r-base.c
drivers/iio/adc/ad7091r-base.h
drivers/iio/adc/ad7091r5.c
drivers/iio/adc/ad7091r8.c [new file with mode: 0644]
drivers/iio/adc/ad9467.c
drivers/iio/adc/adi-axi-adc.c
drivers/iio/adc/max34408.c [new file with mode: 0644]
drivers/iio/adc/mcp3911.c
drivers/iio/amplifiers/hmc425a.c
drivers/iio/buffer/industrialio-buffer-dma.c
drivers/iio/chemical/Kconfig
drivers/iio/chemical/Makefile
drivers/iio/chemical/ags02ma.c [new file with mode: 0644]
drivers/iio/chemical/pms7003.c
drivers/iio/chemical/scd30_serial.c
drivers/iio/chemical/sps30_serial.c
drivers/iio/dac/Kconfig
drivers/iio/dac/Makefile
drivers/iio/dac/ad5791.c
drivers/iio/dac/mcp4821.c [new file with mode: 0644]
drivers/iio/frequency/adf4377.c
drivers/iio/frequency/admv1014.c
drivers/iio/humidity/hdc3020.c [new file with mode: 0644]
drivers/iio/imu/Kconfig
drivers/iio/imu/Makefile
drivers/iio/imu/adis.c
drivers/iio/imu/bmi323/Kconfig [new file with mode: 0644]
drivers/iio/imu/bmi323/Makefile [new file with mode: 0644]
drivers/iio/imu/bmi323/bmi323.h [new file with mode: 0644]
drivers/iio/imu/bmi323/bmi323_core.c [new file with mode: 0644]
drivers/iio/imu/bmi323/bmi323_i2c.c [new file with mode: 0644]
drivers/iio/imu/bmi323/bmi323_spi.c [new file with mode: 0644]
drivers/iio/imu/bno055/bno055_ser_core.c
drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
drivers/iio/industrialio-buffer.c
drivers/iio/industrialio-core.c
drivers/iio/light/Kconfig
drivers/iio/light/Makefile
drivers/iio/light/isl76682.c [new file with mode: 0644]
drivers/iio/light/ltr390.c [new file with mode: 0644]
drivers/iio/light/ltrf216a.c
drivers/iio/light/pa12203001.c
drivers/iio/light/rohm-bu27008.c
drivers/iio/light/veml6075.c [new file with mode: 0644]
drivers/iio/magnetometer/tmag5273.c
drivers/iio/pressure/Kconfig
drivers/iio/pressure/Makefile
drivers/iio/pressure/bmp280-core.c
drivers/iio/pressure/bmp280-i2c.c
drivers/iio/pressure/bmp280-spi.c
drivers/iio/pressure/bmp280.h
drivers/iio/pressure/hsc030pa.c [new file with mode: 0644]
drivers/iio/pressure/hsc030pa.h [new file with mode: 0644]
drivers/iio/pressure/hsc030pa_i2c.c [new file with mode: 0644]
drivers/iio/pressure/hsc030pa_spi.c [new file with mode: 0644]
drivers/iio/proximity/irsd200.c
drivers/iio/proximity/sx9324.c
drivers/iio/resolver/ad2s1210.c
drivers/iio/temperature/Kconfig
drivers/iio/temperature/Makefile
drivers/iio/temperature/mcp9600.c [new file with mode: 0644]
drivers/iio/temperature/mlx90635.c [new file with mode: 0644]
drivers/input/input.c
drivers/input/joystick/Kconfig
drivers/input/joystick/Makefile
drivers/input/joystick/adafruit-seesaw.c [new file with mode: 0644]
drivers/input/joystick/as5011.c
drivers/input/joystick/pxrc.c
drivers/input/joystick/xpad.c
drivers/input/keyboard/atkbd.c
drivers/input/keyboard/cap11xx.c
drivers/input/keyboard/gpio_keys.c
drivers/input/keyboard/omap-keypad.c
drivers/input/keyboard/omap4-keypad.c
drivers/input/keyboard/qt1050.c
drivers/input/keyboard/tca6416-keypad.c
drivers/input/misc/da7280.c
drivers/input/misc/da9063_onkey.c
drivers/input/misc/ims-pcu.c
drivers/input/misc/iqs269a.c
drivers/input/misc/max77693-haptic.c
drivers/input/misc/pwm-beeper.c
drivers/input/misc/pwm-vibra.c
drivers/input/mouse/bcm5974.c
drivers/input/mouse/cyapa.c
drivers/input/mouse/cyapa_gen3.c
drivers/input/mouse/cyapa_gen5.c
drivers/input/mouse/cyapa_gen6.c
drivers/input/mouse/elan_i2c_core.c
drivers/input/mouse/navpoint.c
drivers/input/rmi4/rmi_f01.c
drivers/input/serio/i8042-acpipnpio.h
drivers/input/touchscreen/atmel_mxt_ts.c
drivers/input/touchscreen/edt-ft5x06.c
drivers/input/touchscreen/goodix.c
drivers/input/touchscreen/hideep.c
drivers/input/touchscreen/hycon-hy46xx.c
drivers/input/touchscreen/ilitek_ts_i2c.c
drivers/input/touchscreen/iqs5xx.c
drivers/input/touchscreen/iqs7211.c
drivers/input/touchscreen/melfas_mip4.c
drivers/input/touchscreen/usbtouchscreen.c
drivers/input/touchscreen/wdt87xx_i2c.c
drivers/input/touchscreen/zforce_ts.c
drivers/input/vivaldi-fmap.c
drivers/interconnect/imx/imx8mm.c
drivers/interconnect/imx/imx8mn.c
drivers/interconnect/imx/imx8mp.c
drivers/interconnect/imx/imx8mq.c
drivers/interconnect/qcom/Kconfig
drivers/interconnect/qcom/Makefile
drivers/interconnect/qcom/icc-rpm.c
drivers/interconnect/qcom/icc-rpm.h
drivers/interconnect/qcom/msm8916.c
drivers/interconnect/qcom/msm8939.c
drivers/interconnect/qcom/msm8974.c
drivers/interconnect/qcom/msm8996.c
drivers/interconnect/qcom/osm-l3.c
drivers/interconnect/qcom/qcm2290.c
drivers/interconnect/qcom/qcs404.c
drivers/interconnect/qcom/sdm660.c
drivers/interconnect/qcom/sm6115.c [new file with mode: 0644]
drivers/interconnect/qcom/sm8650.c [new file with mode: 0644]
drivers/interconnect/qcom/sm8650.h [new file with mode: 0644]
drivers/interconnect/qcom/smd-rpm.c
drivers/interconnect/qcom/x1e80100.c [new file with mode: 0644]
drivers/interconnect/qcom/x1e80100.h [new file with mode: 0644]
drivers/interconnect/samsung/exynos.c
drivers/iommu/Kconfig
drivers/iommu/amd/amd_iommu.h
drivers/iommu/amd/amd_iommu_types.h
drivers/iommu/amd/init.c
drivers/iommu/amd/io_pgtable.c
drivers/iommu/amd/io_pgtable_v2.c
drivers/iommu/amd/iommu.c
drivers/iommu/apple-dart.c
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
drivers/iommu/arm/arm-smmu/arm-smmu.c
drivers/iommu/arm/arm-smmu/arm-smmu.h
drivers/iommu/arm/arm-smmu/qcom_iommu.c
drivers/iommu/dma-iommu.c
drivers/iommu/intel/debugfs.c
drivers/iommu/intel/iommu.c
drivers/iommu/intel/iommu.h
drivers/iommu/intel/nested.c
drivers/iommu/intel/pasid.c
drivers/iommu/intel/pasid.h
drivers/iommu/intel/svm.c
drivers/iommu/io-pgtable-arm.c
drivers/iommu/io-pgtable.c
drivers/iommu/iommu-sva.c
drivers/iommu/iommu.c
drivers/iommu/iommufd/hw_pagetable.c
drivers/iommu/iommufd/iommufd_private.h
drivers/iommu/iommufd/iommufd_test.h
drivers/iommu/iommufd/main.c
drivers/iommu/iommufd/selftest.c
drivers/iommu/mtk_iommu.c
drivers/iommu/mtk_iommu_v1.c
drivers/iommu/of_iommu.c
drivers/iommu/omap-iommu.c
drivers/iommu/sprd-iommu.c
drivers/iommu/virtio-iommu.c
drivers/ipack/devices/ipoctal.c
drivers/ipack/ipack.c
drivers/isdn/capi/capi.c
drivers/leds/Kconfig
drivers/leds/Makefile
drivers/leds/led-triggers.c
drivers/leds/leds-aw200xx.c
drivers/leds/leds-gpio.c
drivers/leds/leds-max5970.c [new file with mode: 0644]
drivers/leds/leds-pwm.c
drivers/leds/leds-sun50i-a100.c [new file with mode: 0644]
drivers/leds/leds-syscon.c
drivers/leds/leds-tca6507.c
drivers/leds/rgb/Kconfig
drivers/leds/rgb/leds-pwm-multicolor.c
drivers/leds/rgb/leds-qcom-lpg.c
drivers/leds/trigger/ledtrig-gpio.c
drivers/leds/trigger/ledtrig-netdev.c
drivers/leds/trigger/ledtrig-panic.c
drivers/leds/trigger/ledtrig-tty.c
drivers/mailbox/arm_mhuv2.c
drivers/mailbox/bcm-flexrm-mailbox.c
drivers/mailbox/bcm-pdc-mailbox.c
drivers/mailbox/imx-mailbox.c
drivers/mailbox/mailbox-test.c
drivers/mailbox/mtk-cmdq-mailbox.c
drivers/mailbox/omap-mailbox.c
drivers/mailbox/qcom-apcs-ipc-mailbox.c
drivers/mailbox/qcom-ipcc.c
drivers/mailbox/stm32-ipcc.c
drivers/mailbox/sun6i-msgbox.c
drivers/mailbox/tegra-hsp.c
drivers/mailbox/zynqmp-ipi-mailbox.c
drivers/mcb/mcb-core.c
drivers/md/dm-core.h
drivers/md/dm-crypt.c
drivers/md/dm-ioctl.c
drivers/md/dm-stats.c
drivers/md/dm-table.c
drivers/md/dm-verity-target.c
drivers/md/dm-verity.h
drivers/md/dm-writecache.c
drivers/md/md.c
drivers/md/raid1.c
drivers/media/common/videobuf2/videobuf2-core.c
drivers/media/common/videobuf2/videobuf2-v4l2.c
drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c
drivers/media/pci/solo6x10/solo6x10-offsets.h
drivers/media/platform/chips-media/wave5/wave5-vpu.c
drivers/media/rc/pwm-ir-tx.c
drivers/memory/tegra/tegra186.c
drivers/mfd/Kconfig
drivers/mfd/ab8500-sysctrl.c
drivers/mfd/cros_ec_dev.c
drivers/mfd/cs42l43-sdw.c
drivers/mfd/da9062-core.c
drivers/mfd/exynos-lpass.c
drivers/mfd/fsl-imx25-tsadc.c
drivers/mfd/hi655x-pmic.c
drivers/mfd/intel-lpss-acpi.c
drivers/mfd/intel-lpss-pci.c
drivers/mfd/intel-lpss.c
drivers/mfd/intel-lpss.h
drivers/mfd/kempld-core.c
drivers/mfd/mcp-sa11x0.c
drivers/mfd/mxs-lradc.c
drivers/mfd/omap-usb-host.c
drivers/mfd/omap-usb-tll.c
drivers/mfd/pcf50633-adc.c
drivers/mfd/qcom-pm8xxx.c
drivers/mfd/qcom-spmi-pmic.c
drivers/mfd/rave-sp.c
drivers/mfd/rk8xx-core.c
drivers/mfd/sm501.c
drivers/mfd/stm32-timers.c
drivers/mfd/syscon.c
drivers/mfd/ti_am335x_tscadc.c
drivers/mfd/tps65086.c
drivers/mfd/tps65911-comparator.c
drivers/mfd/tps6594-core.c
drivers/mfd/twl4030-audio.c
drivers/mfd/twl6030-irq.c
drivers/misc/Kconfig
drivers/misc/Makefile
drivers/misc/bcm-vk/bcm_vk_tty.c
drivers/misc/cardreader/Makefile
drivers/misc/cardreader/rts5264.c [new file with mode: 0644]
drivers/misc/cardreader/rts5264.h [new file with mode: 0644]
drivers/misc/cardreader/rtsx_pcr.c
drivers/misc/cardreader/rtsx_pcr.h
drivers/misc/dw-xdata-pcie.c
drivers/misc/eeprom/at24.c
drivers/misc/eeprom/ee1004.c
drivers/misc/fastrpc.c
drivers/misc/lis3lv02d/lis3lv02d_i2c.c
drivers/misc/mei/Kconfig
drivers/misc/mei/Makefile
drivers/misc/mei/gsc_proxy/Kconfig
drivers/misc/mei/hdcp/Kconfig
drivers/misc/mei/platform-vsc.c [new file with mode: 0644]
drivers/misc/mei/pxp/Kconfig
drivers/misc/mei/vsc-fw-loader.c [new file with mode: 0644]
drivers/misc/mei/vsc-tp.c [new file with mode: 0644]
drivers/misc/mei/vsc-tp.h [new file with mode: 0644]
drivers/misc/nsm.c [new file with mode: 0644]
drivers/misc/open-dice.c
drivers/misc/pci_endpoint_test.c
drivers/misc/pvpanic/pvpanic-mmio.c
drivers/misc/pvpanic/pvpanic-pci.c
drivers/misc/pvpanic/pvpanic.c
drivers/misc/pvpanic/pvpanic.h
drivers/misc/vmw_vmci/vmci_handle_array.c
drivers/misc/vmw_vmci/vmci_handle_array.h
drivers/mmc/core/sdio_uart.c
drivers/mmc/core/slot-gpio.c
drivers/mmc/host/rtsx_pci_sdmmc.c
drivers/mmc/host/sdhci-pci-o2micro.c
drivers/mtd/mtdcore.c
drivers/mtd/ubi/Kconfig
drivers/mtd/ubi/block.c
drivers/mtd/ubi/debug.c
drivers/mtd/ubi/debug.h
drivers/mtd/ubi/io.c
drivers/mtd/ubi/ubi.h
drivers/mux/mmio.c
drivers/net/amt.c
drivers/net/can/c_can/c_can_platform.c
drivers/net/can/flexcan/flexcan-core.c
drivers/net/can/mscan/mpc5xxx_can.c
drivers/net/can/usb/peak_usb/pcan_usb_core.c
drivers/net/can/xilinx_can.c
drivers/net/dsa/mt7530.c
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/dsa/qca/qca8k-8xxx.c
drivers/net/dsa/vitesse-vsc73xx-core.c
drivers/net/ethernet/8390/8390.c
drivers/net/ethernet/8390/8390p.c
drivers/net/ethernet/8390/apne.c
drivers/net/ethernet/8390/hydra.c
drivers/net/ethernet/8390/stnic.c
drivers/net/ethernet/8390/zorro8390.c
drivers/net/ethernet/amd/pds_core/adminq.c
drivers/net/ethernet/amd/pds_core/core.c
drivers/net/ethernet/amd/pds_core/core.h
drivers/net/ethernet/amd/pds_core/debugfs.c
drivers/net/ethernet/amd/pds_core/dev.c
drivers/net/ethernet/amd/pds_core/devlink.c
drivers/net/ethernet/amd/pds_core/fw.c
drivers/net/ethernet/amd/pds_core/main.c
drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
drivers/net/ethernet/aquantia/atlantic/aq_ring.c
drivers/net/ethernet/aquantia/atlantic/aq_ring.h
drivers/net/ethernet/broadcom/bcm4908_enet.c
drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
drivers/net/ethernet/broadcom/bgmac-bcma.c
drivers/net/ethernet/broadcom/bgmac-platform.c
drivers/net/ethernet/broadcom/bgmac.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
drivers/net/ethernet/cavium/liquidio/lio_core.c
drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h
drivers/net/ethernet/cirrus/ep93xx_eth.c
drivers/net/ethernet/engleder/tsnep_main.c
drivers/net/ethernet/ezchip/nps_enet.c
drivers/net/ethernet/freescale/enetc/enetc.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fsl_pq_mdio.c
drivers/net/ethernet/google/gve/gve_rx.c
drivers/net/ethernet/intel/e1000e/e1000.h
drivers/net/ethernet/intel/e1000e/ptp.c
drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
drivers/net/ethernet/intel/i40e/i40e_dcb.h
drivers/net/ethernet/intel/i40e/i40e_diag.h
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_xsk.c
drivers/net/ethernet/intel/ice/ice_base.c
drivers/net/ethernet/intel/ice/ice_osdep.h
drivers/net/ethernet/intel/ice/ice_txrx.c
drivers/net/ethernet/intel/ice/ice_txrx.h
drivers/net/ethernet/intel/ice/ice_txrx_lib.h
drivers/net/ethernet/intel/ice/ice_type.h
drivers/net/ethernet/intel/ice/ice_xsk.c
drivers/net/ethernet/intel/idpf/idpf_lib.c
drivers/net/ethernet/intel/idpf/virtchnl2.h
drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
drivers/net/ethernet/litex/litex_liteeth.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
drivers/net/ethernet/marvell/octeontx2/af/mbox.c
drivers/net/ethernet/marvell/octeontx2/af/rpm.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
drivers/net/ethernet/mellanox/mlx5/core/en_common.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
drivers/net/ethernet/mellanox/mlx5/core/vport.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/microchip/lan966x/lan966x_port.c
drivers/net/ethernet/neterion/s2io.c
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
drivers/net/ethernet/qualcomm/qca_uart.c
drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/stmicro/stmmac/common.h
drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/ti/am65-cpsw-nuss.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/cpsw_new.c
drivers/net/ethernet/wangxun/Kconfig
drivers/net/ethernet/wangxun/libwx/wx_lib.c
drivers/net/fjes/fjes_hw.c
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/macsec.c
drivers/net/netdevsim/dev.c
drivers/net/netdevsim/netdev.c
drivers/net/phy/mediatek-ge-soc.c
drivers/net/phy/micrel.c
drivers/net/phy/sfp-bus.c
drivers/net/ppp/ppp_async.c
drivers/net/slip/slhc.c
drivers/net/slip/slip.c
drivers/net/tun.c
drivers/net/usb/hso.c
drivers/net/usb/r8152.c
drivers/net/virtio_net.c
drivers/net/wan/slic_ds26522.c
drivers/net/wireless/ath/ar5523/ar5523.c
drivers/net/wireless/ath/ath11k/core.h
drivers/net/wireless/ath/ath11k/debugfs.c
drivers/net/wireless/ath/ath11k/debugfs.h
drivers/net/wireless/ath/ath11k/mac.c
drivers/net/wireless/ath/wcn36xx/main.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/module.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/module.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/module.c
drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
drivers/net/wireless/intel/iwlwifi/iwl-drv.c
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
drivers/net/wireless/intersil/p54/fwio.c
drivers/net/wireless/intersil/p54/p54spi.c
drivers/net/wireless/marvell/mwifiex/usb.c
drivers/net/wireless/mediatek/mt76/mt7603/main.c
drivers/net/wireless/mediatek/mt76/mt7615/main.c
drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
drivers/net/wireless/mediatek/mt76/mt7615/usb.c
drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
drivers/net/wireless/mediatek/mt76/mt76x02_util.c
drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
drivers/net/wireless/mediatek/mt76/mt7921/main.c
drivers/net/wireless/mediatek/mt76/mt7921/pci.c
drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
drivers/net/wireless/mediatek/mt76/mt7921/usb.c
drivers/net/wireless/mediatek/mt76/mt7925/main.c
drivers/net/wireless/mediatek/mt76/mt7925/pci.c
drivers/net/wireless/mediatek/mt76/mt7925/usb.c
drivers/net/wireless/mediatek/mt76/mt792x_core.c
drivers/net/wireless/mediatek/mt76/mt792x_usb.c
drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
drivers/net/wireless/mediatek/mt76/sdio.c
drivers/net/wireless/mediatek/mt76/usb.c
drivers/net/wireless/mediatek/mt76/util.c
drivers/net/wireless/microchip/wilc1000/netdev.c
drivers/net/wireless/microchip/wilc1000/sdio.c
drivers/net/wireless/microchip/wilc1000/spi.c
drivers/net/wireless/ti/wl1251/sdio.c
drivers/net/wireless/ti/wl1251/spi.c
drivers/net/wireless/ti/wl12xx/main.c
drivers/net/wireless/ti/wl18xx/main.c
drivers/net/wireless/ti/wlcore/main.c
drivers/net/wireless/ti/wlcore/sdio.c
drivers/net/wireless/ti/wlcore/spi.c
drivers/net/xen-netback/netback.c
drivers/nfc/pn533/uart.c
drivers/nfc/s3fwrn5/uart.c
drivers/nvdimm/virtio_pmem.c
drivers/nvme/common/auth.c
drivers/nvme/common/keyring.c
drivers/nvme/host/apple.c
drivers/nvme/host/auth.c
drivers/nvme/host/constants.c
drivers/nvme/host/core.c
drivers/nvme/host/fabrics.c
drivers/nvme/host/fabrics.h
drivers/nvme/host/fc.c
drivers/nvme/host/ioctl.c
drivers/nvme/host/multipath.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/pr.c
drivers/nvme/host/rdma.c
drivers/nvme/host/sysfs.c
drivers/nvme/host/tcp.c
drivers/nvme/target/core.c
drivers/nvme/target/discovery.c
drivers/nvme/target/fc.c
drivers/nvme/target/fcloop.c
drivers/nvme/target/loop.c
drivers/nvme/target/rdma.c
drivers/nvme/target/tcp.c
drivers/nvme/target/trace.c
drivers/nvme/target/trace.h
drivers/nvmem/Kconfig
drivers/nvmem/Makefile
drivers/nvmem/core.c
drivers/nvmem/imx-ocotp.c
drivers/nvmem/internals.h [new file with mode: 0644]
drivers/nvmem/layouts.c [new file with mode: 0644]
drivers/nvmem/layouts/Kconfig
drivers/nvmem/layouts/onie-tlv.c
drivers/nvmem/layouts/sl28vpd.c
drivers/nvmem/mtk-efuse.c
drivers/nvmem/stm32-romem.c
drivers/nvmem/u-boot-env.c
drivers/of/base.c
drivers/of/device.c
drivers/of/overlay.c
drivers/of/platform.c
drivers/of/property.c
drivers/of/unittest-data/tests-phandle.dtsi
drivers/of/unittest.c
drivers/parisc/power.c
drivers/parport/parport_serial.c
drivers/parport/share.c
drivers/pci/bus.c
drivers/pci/controller/cadence/Kconfig
drivers/pci/controller/cadence/pci-j721e.c
drivers/pci/controller/cadence/pcie-cadence-ep.c
drivers/pci/controller/cadence/pcie-cadence.h
drivers/pci/controller/dwc/Kconfig
drivers/pci/controller/dwc/pci-dra7xx.c
drivers/pci/controller/dwc/pci-exynos.c
drivers/pci/controller/dwc/pci-imx6.c
drivers/pci/controller/dwc/pci-keystone.c
drivers/pci/controller/dwc/pci-layerscape-ep.c
drivers/pci/controller/dwc/pci-layerscape.c
drivers/pci/controller/dwc/pci-meson.c
drivers/pci/controller/dwc/pcie-al.c
drivers/pci/controller/dwc/pcie-armada8k.c
drivers/pci/controller/dwc/pcie-artpec6.c
drivers/pci/controller/dwc/pcie-bt1.c
drivers/pci/controller/dwc/pcie-designware-ep.c
drivers/pci/controller/dwc/pcie-designware-host.c
drivers/pci/controller/dwc/pcie-designware-plat.c
drivers/pci/controller/dwc/pcie-designware.h
drivers/pci/controller/dwc/pcie-dw-rockchip.c
drivers/pci/controller/dwc/pcie-fu740.c
drivers/pci/controller/dwc/pcie-histb.c
drivers/pci/controller/dwc/pcie-intel-gw.c
drivers/pci/controller/dwc/pcie-keembay.c
drivers/pci/controller/dwc/pcie-kirin.c
drivers/pci/controller/dwc/pcie-qcom-ep.c
drivers/pci/controller/dwc/pcie-qcom.c
drivers/pci/controller/dwc/pcie-rcar-gen4.c
drivers/pci/controller/dwc/pcie-spear13xx.c
drivers/pci/controller/dwc/pcie-tegra194.c
drivers/pci/controller/dwc/pcie-uniphier-ep.c
drivers/pci/controller/dwc/pcie-uniphier.c
drivers/pci/controller/dwc/pcie-visconti.c
drivers/pci/controller/pci-host-common.c
drivers/pci/controller/pci-host-generic.c
drivers/pci/controller/pcie-brcmstb.c
drivers/pci/controller/pcie-iproc-platform.c
drivers/pci/controller/pcie-mediatek-gen3.c
drivers/pci/controller/pcie-mediatek.c
drivers/pci/controller/pcie-rcar-ep.c
drivers/pci/controller/pcie-rcar-host.c
drivers/pci/controller/pcie-rockchip-ep.c
drivers/pci/controller/pcie-rockchip-host.c
drivers/pci/controller/pcie-xilinx-dma-pl.c
drivers/pci/controller/pcie-xilinx-nwl.c
drivers/pci/controller/vmd.c
drivers/pci/endpoint/functions/pci-epf-mhi.c
drivers/pci/endpoint/functions/pci-epf-ntb.c
drivers/pci/endpoint/functions/pci-epf-test.c
drivers/pci/endpoint/functions/pci-epf-vntb.c
drivers/pci/endpoint/pci-epc-core.c
drivers/pci/iov.c
drivers/pci/pci.c
drivers/pci/pci.h
drivers/pci/pcie/aer.c
drivers/pci/pcie/aspm.c
drivers/pci/probe.c
drivers/pci/quirks.c
drivers/pci/setup-bus.c
drivers/pci/setup-res.c
drivers/pci/switch/switchtec.c
drivers/pcmcia/bcm63xx_pcmcia.c
drivers/pcmcia/db1xxx_ss.c
drivers/pcmcia/electra_cf.c
drivers/pcmcia/omap_cf.c
drivers/pcmcia/pxa2xx_base.c
drivers/pcmcia/sa1100_generic.c
drivers/pcmcia/xxs1500_ss.c
drivers/phy/mediatek/phy-mtk-tphy.c
drivers/phy/microchip/lan966x_serdes.c
drivers/phy/phy-can-transceiver.c
drivers/phy/phy-core.c
drivers/phy/qualcomm/phy-qcom-qmp-combo.c
drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v6.h
drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v7.h [new file with mode: 0644]
drivers/phy/qualcomm/phy-qcom-qmp-pcs-v7.h [new file with mode: 0644]
drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v6.h
drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v7.h [new file with mode: 0644]
drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-ufs-v6.h
drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6.h
drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_20.h
drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_n4.h [new file with mode: 0644]
drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v7.h [new file with mode: 0644]
drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
drivers/phy/qualcomm/phy-qcom-qmp-usb.c
drivers/phy/qualcomm/phy-qcom-qmp.h
drivers/phy/renesas/Kconfig
drivers/phy/renesas/phy-rcar-gen3-usb2.c
drivers/phy/rockchip/phy-rockchip-inno-usb2.c
drivers/phy/ti/phy-gmii-sel.c
drivers/phy/ti/phy-j721e-wiz.c
drivers/phy/ti/phy-omap-usb2.c
drivers/pinctrl/Kconfig
drivers/pinctrl/Makefile
drivers/pinctrl/bcm/pinctrl-ns.c
drivers/pinctrl/core.c
drivers/pinctrl/core.h
drivers/pinctrl/devicetree.c
drivers/pinctrl/freescale/pinctrl-imx.c
drivers/pinctrl/intel/Kconfig
drivers/pinctrl/intel/Makefile
drivers/pinctrl/intel/pinctrl-alderlake.c
drivers/pinctrl/intel/pinctrl-baytrail.c
drivers/pinctrl/intel/pinctrl-broxton.c
drivers/pinctrl/intel/pinctrl-cannonlake.c
drivers/pinctrl/intel/pinctrl-cedarfork.c
drivers/pinctrl/intel/pinctrl-denverton.c
drivers/pinctrl/intel/pinctrl-elkhartlake.c
drivers/pinctrl/intel/pinctrl-emmitsburg.c
drivers/pinctrl/intel/pinctrl-geminilake.c
drivers/pinctrl/intel/pinctrl-icelake.c
drivers/pinctrl/intel/pinctrl-intel-platform.c [new file with mode: 0644]
drivers/pinctrl/intel/pinctrl-intel.c
drivers/pinctrl/intel/pinctrl-intel.h
drivers/pinctrl/intel/pinctrl-jasperlake.c
drivers/pinctrl/intel/pinctrl-lakefield.c
drivers/pinctrl/intel/pinctrl-lewisburg.c
drivers/pinctrl/intel/pinctrl-lynxpoint.c
drivers/pinctrl/intel/pinctrl-meteorlake.c
drivers/pinctrl/intel/pinctrl-meteorpoint.c [new file with mode: 0644]
drivers/pinctrl/intel/pinctrl-sunrisepoint.c
drivers/pinctrl/intel/pinctrl-tangier.c
drivers/pinctrl/intel/pinctrl-tigerlake.c
drivers/pinctrl/mediatek/pinctrl-moore.c
drivers/pinctrl/mediatek/pinctrl-moore.h
drivers/pinctrl/mediatek/pinctrl-mt2701.c
drivers/pinctrl/mediatek/pinctrl-mt2712.c
drivers/pinctrl/mediatek/pinctrl-mt6795.c
drivers/pinctrl/mediatek/pinctrl-mt8167.c
drivers/pinctrl/mediatek/pinctrl-mt8173.c
drivers/pinctrl/mediatek/pinctrl-mt8183.c
drivers/pinctrl/mediatek/pinctrl-mt8186.c
drivers/pinctrl/mediatek/pinctrl-mt8188.c
drivers/pinctrl/mediatek/pinctrl-mt8192.c
drivers/pinctrl/mediatek/pinctrl-mt8195.c
drivers/pinctrl/mediatek/pinctrl-mt8365.c
drivers/pinctrl/mediatek/pinctrl-mt8516.c
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
drivers/pinctrl/mediatek/pinctrl-paris.c
drivers/pinctrl/mediatek/pinctrl-paris.h
drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
drivers/pinctrl/pinconf-generic.c
drivers/pinctrl/pinconf.c
drivers/pinctrl/pinconf.h
drivers/pinctrl/pinctrl-amd.c
drivers/pinctrl/pinctrl-as3722.c
drivers/pinctrl/pinctrl-cy8c95x0.c
drivers/pinctrl/pinctrl-equilibrium.c
drivers/pinctrl/pinctrl-ingenic.c
drivers/pinctrl/pinctrl-keembay.c
drivers/pinctrl/pinctrl-single.c
drivers/pinctrl/pinctrl-tps6594.c [new file with mode: 0644]
drivers/pinctrl/pinctrl-utils.c
drivers/pinctrl/pinctrl-utils.h
drivers/pinctrl/pinmux.c
drivers/pinctrl/pinmux.h
drivers/pinctrl/qcom/Kconfig
drivers/pinctrl/qcom/Kconfig.msm
drivers/pinctrl/qcom/Makefile
drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
drivers/pinctrl/qcom/pinctrl-lpass-lpi.h
drivers/pinctrl/qcom/pinctrl-msm.c
drivers/pinctrl/qcom/pinctrl-msm.h
drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c
drivers/pinctrl/qcom/pinctrl-sc8280xp-lpass-lpi.c
drivers/pinctrl/qcom/pinctrl-sm4450.c [new file with mode: 0644]
drivers/pinctrl/qcom/pinctrl-sm6115-lpass-lpi.c
drivers/pinctrl/qcom/pinctrl-sm8250-lpass-lpi.c
drivers/pinctrl/qcom/pinctrl-sm8350-lpass-lpi.c
drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c
drivers/pinctrl/qcom/pinctrl-sm8550-lpass-lpi.c
drivers/pinctrl/qcom/pinctrl-sm8650-lpass-lpi.c [new file with mode: 0644]
drivers/pinctrl/qcom/pinctrl-sm8650.c [new file with mode: 0644]
drivers/pinctrl/qcom/pinctrl-x1e80100.c [new file with mode: 0644]
drivers/pinctrl/renesas/pinctrl-rza1.c
drivers/pinctrl/renesas/pinctrl-rza2.c
drivers/pinctrl/renesas/pinctrl-rzg2l.c
drivers/pinctrl/renesas/pinctrl-rzv2m.c
drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
drivers/pinctrl/samsung/pinctrl-exynos.c
drivers/pinctrl/samsung/pinctrl-exynos.h
drivers/pinctrl/samsung/pinctrl-samsung.c
drivers/pinctrl/samsung/pinctrl-samsung.h
drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
drivers/pinctrl/stm32/pinctrl-stm32.c
drivers/pinctrl/tegra/pinctrl-tegra.c
drivers/platform/chrome/cros_ec_uart.c
drivers/platform/goldfish/goldfish_pipe.c
drivers/platform/mellanox/mlxbf-pmc.c
drivers/platform/mellanox/mlxbf-tmfifo.c
drivers/platform/surface/aggregator/bus.c
drivers/platform/surface/aggregator/controller.h
drivers/platform/surface/aggregator/core.c
drivers/platform/surface/aggregator/ssh_packet_layer.c
drivers/platform/surface/aggregator/ssh_packet_layer.h
drivers/platform/x86/amd/pmf/Kconfig
drivers/platform/x86/amd/pmf/spc.c
drivers/platform/x86/amd/pmf/tee-if.c
drivers/platform/x86/intel/chtwc_int33fe.c
drivers/platform/x86/intel/ifs/load.c
drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h
drivers/platform/x86/intel/wmi/sbl-fw-update.c
drivers/platform/x86/lenovo-yogabook.c
drivers/platform/x86/p2sb.c
drivers/platform/x86/touchscreen_dmi.c
drivers/platform/x86/wmi.c
drivers/pmdomain/core.c
drivers/pmdomain/mediatek/mtk-pm-domains.c
drivers/pmdomain/renesas/r8a77980-sysc.c
drivers/pnp/driver.c
drivers/power/reset/as3722-poweroff.c
drivers/power/reset/at91-poweroff.c
drivers/power/reset/at91-reset.c
drivers/power/reset/at91-sama5d2_shdwc.c
drivers/power/reset/atc260x-poweroff.c
drivers/power/reset/gpio-restart.c
drivers/power/reset/ltc2952-poweroff.c
drivers/power/reset/mt6323-poweroff.c
drivers/power/reset/pwr-mlxbf.c
drivers/power/reset/qnap-poweroff.c
drivers/power/reset/regulator-poweroff.c
drivers/power/reset/restart-poweroff.c
drivers/power/reset/rmobile-reset.c
drivers/power/reset/syscon-poweroff.c
drivers/power/reset/tps65086-restart.c
drivers/power/supply/bq24190_charger.c
drivers/power/supply/bq256xx_charger.c
drivers/power/supply/bq27xxx_battery.c
drivers/power/supply/bq27xxx_battery_i2c.c
drivers/power/supply/cw2015_battery.c
drivers/power/supply/power_supply_core.c
drivers/power/supply/qcom_pmi8998_charger.c
drivers/pwm/core.c
drivers/pwm/pwm-atmel-hlcdc.c
drivers/pwm/pwm-atmel-tcb.c
drivers/pwm/pwm-bcm-kona.c
drivers/pwm/pwm-bcm2835.c
drivers/pwm/pwm-berlin.c
drivers/pwm/pwm-brcmstb.c
drivers/pwm/pwm-crc.c
drivers/pwm/pwm-cros-ec.c
drivers/pwm/pwm-dwc.c
drivers/pwm/pwm-img.c
drivers/pwm/pwm-imx-tpm.c
drivers/pwm/pwm-jz4740.c
drivers/pwm/pwm-lpc18xx-sct.c
drivers/pwm/pwm-lpc32xx.c
drivers/pwm/pwm-mediatek.c
drivers/pwm/pwm-meson.c
drivers/pwm/pwm-omap-dmtimer.c
drivers/pwm/pwm-renesas-tpu.c
drivers/pwm/pwm-rockchip.c
drivers/pwm/pwm-samsung.c
drivers/pwm/pwm-sti.c
drivers/pwm/pwm-stm32-lp.c
drivers/pwm/pwm-stm32.c
drivers/pwm/pwm-stmpe.c
drivers/pwm/pwm-tegra.c
drivers/pwm/pwm-tiecap.c
drivers/pwm/pwm-tiehrpwm.c
drivers/pwm/pwm-twl-led.c
drivers/pwm/pwm-twl.c
drivers/pwm/pwm-vt8500.c
drivers/pwm/sysfs.c
drivers/regulator/max5970-regulator.c
drivers/regulator/pwm-regulator.c
drivers/regulator/ti-abb-regulator.c
drivers/remoteproc/imx_dsp_rproc.c
drivers/remoteproc/qcom_q6v5_pas.c
drivers/remoteproc/ti_k3_dsp_remoteproc.c
drivers/rpmsg/virtio_rpmsg_bus.c
drivers/rtc/Kconfig
drivers/rtc/Makefile
drivers/rtc/class.c
drivers/rtc/rtc-ac100.c
drivers/rtc/rtc-cmos.c
drivers/rtc/rtc-da9063.c
drivers/rtc/rtc-ds3232.c
drivers/rtc/rtc-ma35d1.c [new file with mode: 0644]
drivers/rtc/rtc-max31335.c [new file with mode: 0644]
drivers/rtc/rtc-mc146818-lib.c
drivers/rtc/rtc-nct3018y.c
drivers/rtc/rtc-rv8803.c
drivers/rtc/rtc-tps6594.c [new file with mode: 0644]
drivers/s390/char/con3215.c
drivers/s390/char/con3270.c
drivers/s390/char/uvdevice.c
drivers/s390/crypto/vfio_ap_ops.c
drivers/s390/crypto/vfio_ap_private.h
drivers/s390/net/qeth_l3_main.c
drivers/scsi/fcoe/fcoe_sysfs.c
drivers/scsi/fnic/fnic_scsi.c
drivers/scsi/initio.c
drivers/scsi/isci/request.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/mpi3mr/mpi3mr_fw.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_priv.h
drivers/scsi/smartpqi/smartpqi.h
drivers/scsi/smartpqi/smartpqi_init.c
drivers/scsi/storvsc_drv.c
drivers/scsi/virtio_scsi.c
drivers/sh/maple/maple.c
drivers/soc/apple/mailbox.c
drivers/soc/xilinx/xlnx_event_manager.c
drivers/soc/xilinx/zynqmp_power.c
drivers/soundwire/amd_manager.c
drivers/soundwire/bus.c
drivers/soundwire/debugfs.c
drivers/soundwire/generic_bandwidth_allocation.c
drivers/soundwire/intel_auxdevice.c
drivers/soundwire/master.c
drivers/soundwire/qcom.c
drivers/soundwire/slave.c
drivers/soundwire/stream.c
drivers/spi/spi-bcm-qspi.c
drivers/spi/spi-cadence.c
drivers/spi/spi-coldfire-qspi.c
drivers/spi/spi-cs42l43.c
drivers/spi/spi-hisi-sfc-v3xx.c
drivers/spi/spi-imx.c
drivers/spi/spi-intel-pci.c
drivers/spi/spi-sh-msiof.c
drivers/spi/spi.c
drivers/spmi/Makefile
drivers/spmi/hisi-spmi-controller.c
drivers/spmi/spmi-devres.c [new file with mode: 0644]
drivers/spmi/spmi-mtk-pmif.c
drivers/spmi/spmi-pmic-arb.c
drivers/spmi/spmi.c
drivers/staging/greybus/i2c.c
drivers/staging/rtl8192e/Makefile
drivers/staging/rtl8192e/dot11d.c [deleted file]
drivers/staging/rtl8192e/dot11d.h [deleted file]
drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h
drivers/staging/rtl8192e/rtl8192e/rtl_core.c
drivers/staging/rtl8192e/rtl8192e/rtl_core.h
drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
drivers/staging/rtl8192e/rtl819x_BAProc.c
drivers/staging/rtl8192e/rtl819x_HT.h
drivers/staging/rtl8192e/rtl819x_HTProc.c
drivers/staging/rtl8192e/rtl819x_Qos.h
drivers/staging/rtl8192e/rtl819x_TS.h
drivers/staging/rtl8192e/rtl819x_TSProc.c
drivers/staging/rtl8192e/rtllib.h
drivers/staging/rtl8192e/rtllib_module.c
drivers/staging/rtl8192e/rtllib_rx.c
drivers/staging/rtl8192e/rtllib_softmac.c
drivers/staging/rtl8192e/rtllib_softmac_wx.c
drivers/staging/rtl8192e/rtllib_tx.c
drivers/staging/rtl8192e/rtllib_wx.c
drivers/staging/rtl8712/os_intfs.c
drivers/staging/rtl8712/rtl8712_efuse.c
drivers/staging/rtl8712/rtl8712_recv.c
drivers/staging/rtl8712/rtl8712_xmit.c
drivers/staging/rtl8712/rtl871x_cmd.c
drivers/staging/rtl8712/rtl871x_cmd.h
drivers/staging/rtl8712/rtl871x_ioctl_linux.c
drivers/staging/rtl8712/rtl871x_mp_phy_regdef.h
drivers/staging/vc04_services/interface/TODO
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.h
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
drivers/staging/vme_user/Kconfig
drivers/staging/vme_user/vme.c
drivers/staging/vt6655/card.c
drivers/staging/vt6655/card.h
drivers/staging/vt6655/device.h
drivers/staging/vt6655/device_main.c
drivers/staging/vt6655/rxtx.c
drivers/target/target_core_device.c
drivers/target/target_core_transport.c
drivers/thermal/Kconfig
drivers/thermal/Makefile
drivers/thermal/gov_power_allocator.c
drivers/thermal/intel/intel_hfi.c
drivers/thermal/intel/intel_powerclamp.c
drivers/thermal/loongson2_thermal.c
drivers/thermal/thermal_core.c
drivers/thermal/thermal_core.h
drivers/thermal/thermal_debugfs.c [new file with mode: 0644]
drivers/thermal/thermal_debugfs.h [new file with mode: 0644]
drivers/thermal/thermal_helpers.c
drivers/thermal/thermal_netlink.c
drivers/thermal/thermal_netlink.h
drivers/thermal/thermal_trip.c
drivers/thunderbolt/domain.c
drivers/thunderbolt/icm.c
drivers/thunderbolt/nhi.c
drivers/thunderbolt/nhi.h
drivers/thunderbolt/switch.c
drivers/thunderbolt/tb.c
drivers/thunderbolt/tb.h
drivers/thunderbolt/tmu.c
drivers/thunderbolt/tunnel.c
drivers/thunderbolt/xdomain.c
drivers/tty/amiserial.c
drivers/tty/ehv_bytechan.c
drivers/tty/goldfish.c
drivers/tty/hvc/Kconfig
drivers/tty/hvc/hvc_console.c
drivers/tty/hvc/hvc_console.h
drivers/tty/hvc/hvc_dcc.c
drivers/tty/hvc/hvc_iucv.c
drivers/tty/hvc/hvc_opal.c
drivers/tty/hvc/hvc_riscv_sbi.c
drivers/tty/hvc/hvc_rtas.c
drivers/tty/hvc/hvc_udbg.c
drivers/tty/hvc/hvc_vio.c
drivers/tty/hvc/hvc_xen.c
drivers/tty/hvc/hvsi_lib.c
drivers/tty/ipwireless/main.h
drivers/tty/mips_ejtag_fdc.c
drivers/tty/moxa.c
drivers/tty/mxser.c
drivers/tty/n_gsm.c
drivers/tty/n_hdlc.c
drivers/tty/nozomi.c
drivers/tty/serdev/core.c
drivers/tty/serdev/serdev-ttyport.c
drivers/tty/serial/8250/8250_aspeed_vuart.c
drivers/tty/serial/8250/8250_bcm2835aux.c
drivers/tty/serial/8250/8250_bcm7271.c
drivers/tty/serial/8250/8250_core.c
drivers/tty/serial/8250/8250_dw.c
drivers/tty/serial/8250/8250_dwlib.c
drivers/tty/serial/8250/8250_em.c
drivers/tty/serial/8250/8250_exar.c
drivers/tty/serial/8250/8250_fsl.c
drivers/tty/serial/8250/8250_ingenic.c
drivers/tty/serial/8250/8250_ioc3.c
drivers/tty/serial/8250/8250_lpc18xx.c
drivers/tty/serial/8250/8250_lpss.c
drivers/tty/serial/8250/8250_mtk.c
drivers/tty/serial/8250/8250_of.c
drivers/tty/serial/8250/8250_omap.c
drivers/tty/serial/8250/8250_pci.c
drivers/tty/serial/8250/8250_pci1xxxx.c
drivers/tty/serial/8250/8250_pxa.c
drivers/tty/serial/8250/8250_tegra.c
drivers/tty/serial/8250/8250_uniphier.c
drivers/tty/serial/8250/serial_cs.c
drivers/tty/serial/Kconfig
drivers/tty/serial/altera_jtaguart.c
drivers/tty/serial/altera_uart.c
drivers/tty/serial/amba-pl011.c
drivers/tty/serial/apbuart.c
drivers/tty/serial/ar933x_uart.c
drivers/tty/serial/atmel_serial.c
drivers/tty/serial/bcm63xx_uart.c
drivers/tty/serial/clps711x.c
drivers/tty/serial/cpm_uart.c
drivers/tty/serial/digicolor-usart.c
drivers/tty/serial/earlycon-riscv-sbi.c
drivers/tty/serial/esp32_acm.c
drivers/tty/serial/esp32_uart.c
drivers/tty/serial/fsl_linflexuart.c
drivers/tty/serial/fsl_lpuart.c
drivers/tty/serial/imx.c
drivers/tty/serial/jsm/jsm.h
drivers/tty/serial/jsm/jsm_cls.c
drivers/tty/serial/jsm/jsm_neo.c
drivers/tty/serial/lantiq.c
drivers/tty/serial/liteuart.c
drivers/tty/serial/lpc32xx_hs.c
drivers/tty/serial/ma35d1_serial.c
drivers/tty/serial/max310x.c
drivers/tty/serial/mcf.c
drivers/tty/serial/meson_uart.c
drivers/tty/serial/milbeaut_usio.c
drivers/tty/serial/mpc52xx_uart.c
drivers/tty/serial/msm_serial.c
drivers/tty/serial/mxs-auart.c
drivers/tty/serial/omap-serial.c
drivers/tty/serial/owl-uart.c
drivers/tty/serial/pic32_uart.c
drivers/tty/serial/qcom_geni_serial.c
drivers/tty/serial/rda-uart.c
drivers/tty/serial/rp2.c
drivers/tty/serial/sa1100.c
drivers/tty/serial/samsung_tty.c
drivers/tty/serial/sc16is7xx.c
drivers/tty/serial/sccnxp.c
drivers/tty/serial/serial-tegra.c
drivers/tty/serial/serial_core.c
drivers/tty/serial/serial_txx9.c
drivers/tty/serial/sh-sci.c
drivers/tty/serial/sifive.c
drivers/tty/serial/sprd_serial.c
drivers/tty/serial/st-asc.c
drivers/tty/serial/stm32-usart.c
drivers/tty/serial/sunhv.c
drivers/tty/serial/sunplus-uart.c
drivers/tty/serial/sunsab.c
drivers/tty/serial/sunsu.c
drivers/tty/serial/sunzilog.c
drivers/tty/serial/tegra-tcu.c
drivers/tty/serial/timbuart.c
drivers/tty/serial/uartlite.c
drivers/tty/serial/ucc_uart.c
drivers/tty/serial/xilinx_uartps.c
drivers/tty/sysrq.c
drivers/tty/tty_io.c
drivers/tty/tty_ioctl.c
drivers/tty/tty_port.c
drivers/tty/vt/consolemap.c
drivers/tty/vt/keyboard.c
drivers/ufs/core/ufshcd.c
drivers/ufs/host/ufs-qcom.c
drivers/uio/uio.c
drivers/usb/atm/ueagle-atm.c
drivers/usb/cdns3/cdns3-gadget.c
drivers/usb/cdns3/cdns3-gadget.h
drivers/usb/cdns3/cdns3-plat.c
drivers/usb/cdns3/cdns3-starfive.c
drivers/usb/cdns3/cdnsp-debug.h
drivers/usb/chipidea/ci.h
drivers/usb/chipidea/ci_hdrc_imx.c
drivers/usb/chipidea/core.c
drivers/usb/chipidea/udc.c
drivers/usb/class/cdc-acm.c
drivers/usb/common/ulpi.c
drivers/usb/core/driver.c
drivers/usb/core/generic.c
drivers/usb/core/hub.c
drivers/usb/core/quirks.c
drivers/usb/core/usb.c
drivers/usb/core/usb.h
drivers/usb/dwc2/params.c
drivers/usb/dwc3/core.c
drivers/usb/dwc3/core.h
drivers/usb/dwc3/dwc3-imx8mp.c
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/dwc3/dwc3-qcom.c
drivers/usb/dwc3/dwc3-xilinx.c
drivers/usb/dwc3/ep0.c
drivers/usb/dwc3/gadget.c
drivers/usb/dwc3/gadget.h
drivers/usb/dwc3/host.c
drivers/usb/fotg210/fotg210-hcd.c
drivers/usb/fotg210/fotg210-udc.c
drivers/usb/gadget/configfs.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/function/f_mass_storage.c
drivers/usb/gadget/function/f_midi.c
drivers/usb/gadget/function/f_ncm.c
drivers/usb/gadget/function/f_tcm.c
drivers/usb/gadget/function/f_uac1.c
drivers/usb/gadget/function/f_uac2.c
drivers/usb/gadget/function/f_uvc.c
drivers/usb/gadget/function/f_uvc.h
drivers/usb/gadget/function/u_ether.c
drivers/usb/gadget/function/u_ncm.h
drivers/usb/gadget/function/u_uvc.h
drivers/usb/gadget/function/uvc.h
drivers/usb/gadget/function/uvc_configfs.c
drivers/usb/gadget/function/uvc_v4l2.c
drivers/usb/gadget/function/uvc_video.c
drivers/usb/gadget/function/uvc_video.h
drivers/usb/gadget/legacy/webcam.c
drivers/usb/gadget/udc/at91_udc.c
drivers/usb/gadget/udc/atmel_usba_udc.c
drivers/usb/gadget/udc/cdns2/cdns2-debug.h
drivers/usb/gadget/udc/fsl_udc_core.c
drivers/usb/gadget/udc/gr_udc.c
drivers/usb/gadget/udc/lpc32xx_udc.c
drivers/usb/gadget/udc/mv_udc_core.c
drivers/usb/gadget/udc/pch_udc.c
drivers/usb/gadget/udc/pxa25x_udc.c
drivers/usb/host/max3421-hcd.c
drivers/usb/host/xhci-dbgcap.c
drivers/usb/host/xhci-dbgcap.h
drivers/usb/host/xhci-debugfs.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-mtk.c
drivers/usb/host/xhci-mtk.h
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-plat.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/misc/iowarrior.c
drivers/usb/misc/onboard_usb_hub.c
drivers/usb/misc/onboard_usb_hub.h
drivers/usb/misc/qcom_eud.c
drivers/usb/misc/yurex.c
drivers/usb/mon/mon_bin.c
drivers/usb/mon/mon_stat.c
drivers/usb/mon/mon_text.c
drivers/usb/phy/phy-generic.c
drivers/usb/phy/phy-mxs-usb.c
drivers/usb/phy/phy-twl6030-usb.c
drivers/usb/serial/bus.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/option.c
drivers/usb/serial/qcserial.c
drivers/usb/serial/usb-serial.c
drivers/usb/storage/sierra_ms.c
drivers/usb/storage/uas.c
drivers/usb/typec/class.c
drivers/usb/typec/mux/Kconfig
drivers/usb/typec/mux/Makefile
drivers/usb/typec/mux/wcd939x-usbss.c [new file with mode: 0644]
drivers/usb/typec/pd.c
drivers/usb/typec/tcpm/tcpci_maxim_core.c
drivers/usb/typec/tcpm/tcpm.c
drivers/usb/typec/tipd/core.c
drivers/usb/typec/tipd/tps6598x.h
drivers/usb/typec/ucsi/ucsi.c
drivers/usb/typec/ucsi/ucsi_acpi.c
drivers/usb/usbip/stub_main.c
drivers/usb/usbip/vudc.h
drivers/usb/usbip/vudc_dev.c
drivers/usb/usbip/vudc_main.c
drivers/vdpa/alibaba/eni_vdpa.c
drivers/vdpa/mlx5/core/mlx5_vdpa.h
drivers/vdpa/mlx5/core/mr.c
drivers/vdpa/mlx5/net/mlx5_vnet.c
drivers/vdpa/vdpa.c
drivers/vfio/Kconfig
drivers/vfio/Makefile
drivers/vfio/debugfs.c [new file with mode: 0644]
drivers/vfio/pci/Kconfig
drivers/vfio/pci/Makefile
drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
drivers/vfio/pci/pds/dirty.c
drivers/vfio/pci/pds/dirty.h
drivers/vfio/pci/vfio_pci_rdwr.c
drivers/vfio/pci/virtio/Kconfig [new file with mode: 0644]
drivers/vfio/pci/virtio/Makefile [new file with mode: 0644]
drivers/vfio/pci/virtio/main.c [new file with mode: 0644]
drivers/vfio/vfio.h
drivers/vfio/vfio_iommu_type1.c
drivers/vfio/vfio_main.c
drivers/vhost/vdpa.c
drivers/video/backlight/Kconfig
drivers/video/backlight/Makefile
drivers/video/backlight/hx8357.c
drivers/video/backlight/ili922x.c
drivers/video/backlight/lm3630a_bl.c
drivers/video/backlight/lp855x_bl.c
drivers/video/backlight/mp3309c.c [new file with mode: 0644]
drivers/video/backlight/pwm_bl.c
drivers/video/console/Kconfig
drivers/video/fbdev/core/fb_ddc.c
drivers/video/fbdev/core/fbcon.c
drivers/video/fbdev/cyber2000fb.c
drivers/video/fbdev/i740fb.c
drivers/video/fbdev/matrox/i2c-matroxfb.c
drivers/video/fbdev/s3fb.c
drivers/video/fbdev/savage/savagefb_driver.c
drivers/video/fbdev/sis/sis_main.c
drivers/video/fbdev/ssd1307fb.c
drivers/video/fbdev/stifb.c
drivers/video/fbdev/tdfxfb.c
drivers/video/fbdev/tridentfb.c
drivers/video/fbdev/via/via_i2c.c
drivers/video/fbdev/vt8500lcdfb.c
drivers/virt/vboxguest/vboxguest_core.c
drivers/virt/vboxguest/vboxguest_linux.c
drivers/virt/vboxguest/vboxguest_utils.c
drivers/virt/vmgenid.c
drivers/virtio/Kconfig
drivers/virtio/Makefile
drivers/virtio/virtio.c
drivers/virtio/virtio_balloon.c
drivers/virtio/virtio_pci_admin_legacy_io.c [new file with mode: 0644]
drivers/virtio/virtio_pci_common.c
drivers/virtio/virtio_pci_common.h
drivers/virtio/virtio_pci_modern.c
drivers/virtio/virtio_pci_modern_dev.c
drivers/w1/masters/Kconfig
drivers/w1/masters/Makefile
drivers/w1/masters/amd_axi_w1.c [new file with mode: 0644]
drivers/w1/masters/ds2490.c
drivers/w1/masters/w1-gpio.c
drivers/w1/slaves/w1_ds2433.c
drivers/xen/gntdev-dmabuf.c
drivers/xen/xenbus/xenbus_client.c
fs/9p/v9fs_vfs.h
fs/9p/vfs_addr.c
fs/9p/vfs_file.c
fs/9p/vfs_inode.c
fs/9p/vfs_inode_dotl.c
fs/9p/vfs_super.c
fs/Kconfig
fs/Makefile
fs/afs/dir.c
fs/afs/dynroot.c
fs/afs/file.c
fs/afs/inode.c
fs/afs/internal.h
fs/afs/proc.c
fs/afs/super.c
fs/afs/write.c
fs/anon_inodes.c
fs/bcachefs/Makefile
fs/bcachefs/alloc_background.c
fs/bcachefs/alloc_background_format.h [new file with mode: 0644]
fs/bcachefs/alloc_foreground.c
fs/bcachefs/backpointers.c
fs/bcachefs/backpointers.h
fs/bcachefs/bcachefs.h
fs/bcachefs/bcachefs_format.h
fs/bcachefs/bkey.c
fs/bcachefs/bkey_methods.c
fs/bcachefs/bkey_methods.h
fs/bcachefs/bset.c
fs/bcachefs/bset.h
fs/bcachefs/btree_cache.c
fs/bcachefs/btree_cache.h
fs/bcachefs/btree_gc.c
fs/bcachefs/btree_io.c
fs/bcachefs/btree_iter.c
fs/bcachefs/btree_iter.h
fs/bcachefs/btree_locking.c
fs/bcachefs/btree_locking.h
fs/bcachefs/btree_trans_commit.c
fs/bcachefs/btree_types.h
fs/bcachefs/btree_update_interior.c
fs/bcachefs/btree_update_interior.h
fs/bcachefs/btree_write_buffer.c
fs/bcachefs/buckets.c
fs/bcachefs/buckets.h
fs/bcachefs/buckets_types.h
fs/bcachefs/clock.c
fs/bcachefs/compress.h
fs/bcachefs/data_update.c
fs/bcachefs/debug.c
fs/bcachefs/dirent_format.h [new file with mode: 0644]
fs/bcachefs/ec.c
fs/bcachefs/ec_format.h [new file with mode: 0644]
fs/bcachefs/extents.c
fs/bcachefs/extents.h
fs/bcachefs/extents_format.h [new file with mode: 0644]
fs/bcachefs/eytzinger.h
fs/bcachefs/fs-io-direct.c
fs/bcachefs/fs-io-pagecache.c
fs/bcachefs/fs-io-pagecache.h
fs/bcachefs/fs-io.c
fs/bcachefs/fs-ioctl.c
fs/bcachefs/fsck.c
fs/bcachefs/inode.c
fs/bcachefs/inode_format.h [new file with mode: 0644]
fs/bcachefs/io_misc.c
fs/bcachefs/io_write.c
fs/bcachefs/journal.c
fs/bcachefs/journal_io.c
fs/bcachefs/logged_ops_format.h [new file with mode: 0644]
fs/bcachefs/mean_and_variance.h
fs/bcachefs/move.c
fs/bcachefs/opts.c
fs/bcachefs/opts.h
fs/bcachefs/quota_format.h [new file with mode: 0644]
fs/bcachefs/rebalance.c
fs/bcachefs/recovery.c
fs/bcachefs/reflink.c
fs/bcachefs/reflink.h
fs/bcachefs/reflink_format.h [new file with mode: 0644]
fs/bcachefs/replicas.c
fs/bcachefs/sb-clean.c
fs/bcachefs/sb-counters.c [moved from fs/bcachefs/counters.c with 99% similarity]
fs/bcachefs/sb-counters.h [moved from fs/bcachefs/counters.h with 77% similarity]
fs/bcachefs/sb-counters_format.h [new file with mode: 0644]
fs/bcachefs/sb-members.c
fs/bcachefs/snapshot.c
fs/bcachefs/snapshot_format.h [new file with mode: 0644]
fs/bcachefs/str_hash.h
fs/bcachefs/subvolume_format.h [new file with mode: 0644]
fs/bcachefs/super-io.c
fs/bcachefs/super.c
fs/bcachefs/sysfs.c
fs/bcachefs/thread_with_file.c
fs/bcachefs/trace.h
fs/bcachefs/util.c
fs/bcachefs/util.h
fs/bcachefs/xattr.c
fs/bcachefs/xattr_format.h [new file with mode: 0644]
fs/btrfs/block-group.c
fs/btrfs/block-group.h
fs/btrfs/compression.c
fs/btrfs/compression.h
fs/btrfs/delalloc-space.c
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/lzo.c
fs/btrfs/qgroup.c
fs/btrfs/ref-verify.c
fs/btrfs/scrub.c
fs/btrfs/send.c
fs/btrfs/subpage.c
fs/btrfs/super.c
fs/btrfs/transaction.c
fs/btrfs/tree-checker.c
fs/btrfs/volumes.c
fs/btrfs/zlib.c
fs/btrfs/zoned.c
fs/cachefiles/Kconfig
fs/cachefiles/internal.h
fs/cachefiles/io.c
fs/cachefiles/ondemand.c
fs/ceph/Kconfig
fs/ceph/addr.c
fs/ceph/cache.h
fs/ceph/caps.c
fs/ceph/dir.c
fs/ceph/export.c
fs/ceph/file.c
fs/ceph/inode.c
fs/ceph/mds_client.c
fs/ceph/mds_client.h
fs/ceph/quota.c
fs/ceph/super.h
fs/erofs/Kconfig
fs/erofs/compress.h
fs/erofs/decompressor.c
fs/erofs/decompressor_deflate.c
fs/erofs/decompressor_lzma.c
fs/erofs/fscache.c
fs/erofs/inode.c
fs/erofs/utils.c
fs/erofs/zdata.c
fs/erofs/zmap.c
fs/exec.c
fs/exfat/balloc.c
fs/exfat/exfat_fs.h
fs/exfat/file.c
fs/exfat/inode.c
fs/exfat/namei.c
fs/ext4/ext4.h
fs/ext4/extents.c
fs/ext4/file.c
fs/ext4/indirect.c
fs/ext4/inode.c
fs/ext4/ioctl.c
fs/ext4/mballoc.c
fs/ext4/mballoc.h
fs/ext4/move_extent.c
fs/ext4/super.c
fs/f2fs/super.c
fs/fs-writeback.c
fs/fscache/Kconfig [deleted file]
fs/fscache/Makefile [deleted file]
fs/fscache/internal.h [deleted file]
fs/gfs2/dentry.c
fs/gfs2/inode.c
fs/hugetlbfs/inode.c
fs/jfs/jfs_dmap.c
fs/kernfs/dir.c
fs/kernfs/file.c
fs/kernfs/mount.c
fs/namei.c
fs/namespace.c
fs/netfs/Kconfig
fs/netfs/Makefile
fs/netfs/buffered_read.c
fs/netfs/buffered_write.c [new file with mode: 0644]
fs/netfs/direct_read.c [new file with mode: 0644]
fs/netfs/direct_write.c [new file with mode: 0644]
fs/netfs/fscache_cache.c [moved from fs/fscache/cache.c with 99% similarity]
fs/netfs/fscache_cookie.c [moved from fs/fscache/cookie.c with 100% similarity]
fs/netfs/fscache_internal.h [new file with mode: 0644]
fs/netfs/fscache_io.c [moved from fs/fscache/io.c with 86% similarity]
fs/netfs/fscache_main.c [moved from fs/fscache/main.c with 84% similarity]
fs/netfs/fscache_proc.c [moved from fs/fscache/proc.c with 58% similarity]
fs/netfs/fscache_stats.c [moved from fs/fscache/stats.c with 90% similarity]
fs/netfs/fscache_volume.c [moved from fs/fscache/volume.c with 100% similarity]
fs/netfs/internal.h
fs/netfs/io.c
fs/netfs/iterator.c
fs/netfs/locking.c [new file with mode: 0644]
fs/netfs/main.c
fs/netfs/misc.c [new file with mode: 0644]
fs/netfs/objects.c
fs/netfs/output.c [new file with mode: 0644]
fs/netfs/stats.c
fs/nfs/Kconfig
fs/nfs/fscache.c
fs/nfs/fscache.h
fs/nfsd/nfs4state.c
fs/nilfs2/file.c
fs/nilfs2/recovery.c
fs/nilfs2/segment.c
fs/ntfs3/attrib.c
fs/ntfs3/attrlist.c
fs/ntfs3/bitmap.c
fs/ntfs3/dir.c
fs/ntfs3/file.c
fs/ntfs3/frecord.c
fs/ntfs3/fslog.c
fs/ntfs3/fsntfs.c
fs/ntfs3/index.c
fs/ntfs3/inode.c
fs/ntfs3/namei.c
fs/ntfs3/ntfs.h
fs/ntfs3/ntfs_fs.h
fs/ntfs3/record.c
fs/ntfs3/super.c
fs/ntfs3/xattr.c
fs/overlayfs/copy_up.c
fs/overlayfs/namei.c
fs/overlayfs/overlayfs.h
fs/overlayfs/ovl_entry.h
fs/overlayfs/readdir.c
fs/overlayfs/super.c
fs/overlayfs/util.c
fs/proc/array.c
fs/proc/task_mmu.c
fs/remap_range.c
fs/smb/client/cached_dir.c
fs/smb/client/cifs_debug.c
fs/smb/client/cifsencrypt.c
fs/smb/client/cifsfs.c
fs/smb/client/cifsglob.h
fs/smb/client/connect.c
fs/smb/client/dfs.c
fs/smb/client/file.c
fs/smb/client/fs_context.c
fs/smb/client/fs_context.h
fs/smb/client/fscache.c
fs/smb/client/inode.c
fs/smb/client/misc.c
fs/smb/client/readdir.c
fs/smb/client/sess.c
fs/smb/client/smb2inode.c
fs/smb/client/smb2maperror.c
fs/smb/client/smb2ops.c
fs/smb/client/smb2pdu.c
fs/smb/client/smb2proto.h
fs/smb/client/smb2status.h
fs/smb/client/smbencrypt.c
fs/smb/client/transport.c
fs/smb/server/asn1.c
fs/smb/server/connection.c
fs/smb/server/connection.h
fs/smb/server/ksmbd_netlink.h
fs/smb/server/misc.c
fs/smb/server/oplock.c
fs/smb/server/smb2pdu.c
fs/smb/server/transport_ipc.c
fs/smb/server/transport_rdma.c
fs/smb/server/transport_tcp.c
fs/sysfs/dir.c
fs/tracefs/event_inode.c
fs/tracefs/inode.c
fs/tracefs/internal.h
fs/ubifs/auth.c
fs/ubifs/commit.c
fs/ubifs/dir.c
fs/ubifs/file.c
fs/ubifs/replay.c
fs/userfaultfd.c
fs/xfs/libxfs/xfs_attr.c
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_rtbitmap.c
fs/xfs/libxfs/xfs_rtbitmap.h
fs/xfs/libxfs/xfs_sb.c
fs/xfs/libxfs/xfs_sb.h
fs/xfs/libxfs/xfs_types.h
fs/xfs/scrub/rtbitmap.c
fs/xfs/scrub/rtsummary.c
fs/xfs/xfs_super.c
include/acpi/acpi_bus.h
include/acpi/actbl2.h
include/asm-generic/cacheflush.h
include/asm-generic/checksum.h
include/dt-bindings/dma/fsl-edma.h [new file with mode: 0644]
include/linux/acpi.h
include/linux/aer.h
include/linux/amba/serial.h
include/linux/anon_inodes.h
include/linux/backing-dev-defs.h
include/linux/bio.h
include/linux/blk-mq.h
include/linux/btf.h
include/linux/cdx/cdx_bus.h
include/linux/ceph/messenger.h
include/linux/ceph/osd_client.h
include/linux/compiler-gcc.h
include/linux/compiler_types.h
include/linux/container.h
include/linux/coresight.h
include/linux/cper.h
include/linux/cpu.h
include/linux/cxl-event.h [new file with mode: 0644]
include/linux/device.h
include/linux/device/bus.h
include/linux/device/class.h
include/linux/dma-map-ops.h
include/linux/dmaengine.h
include/linux/edac.h
include/linux/export.h
include/linux/firmware/xlnx-zynqmp.h
include/linux/fortify-string.h
include/linux/fs.h
include/linux/fscache-cache.h
include/linux/fscache.h
include/linux/fsnotify.h
include/linux/fw_table.h
include/linux/gpio/driver.h
include/linux/gpio_keys.h
include/linux/hid_bpf.h
include/linux/hrtimer.h
include/linux/i2c.h
include/linux/i3c/device.h
include/linux/i3c/master.h
include/linux/iio/adc/adi-axi-adc.h
include/linux/iio/buffer-dma.h
include/linux/iio/iio.h
include/linux/iio/types.h
include/linux/init.h
include/linux/input/as5011.h
include/linux/input/navpoint.h
include/linux/io-pgtable.h
include/linux/iommu.h
include/linux/ioprio.h
include/linux/kvm_host.h
include/linux/kvm_types.h
include/linux/leds.h
include/linux/libata.h
include/linux/lsm_hook_defs.h
include/linux/maple.h
include/linux/mc146818rtc.h
include/linux/memory-tiers.h
include/linux/mfd/max77693-private.h
include/linux/mfd/max77843-private.h
include/linux/mfd/si476x-platform.h
include/linux/mfd/tps65910.h
include/linux/mhi.h
include/linux/mhi_ep.h
include/linux/mlx5/driver.h
include/linux/mlx5/fs.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mlx5/mlx5_ifc_vdpa.h
include/linux/mlx5/vport.h
include/linux/mm_types.h
include/linux/mman.h
include/linux/mmzone.h
include/linux/moxtet.h
include/linux/namei.h
include/linux/netfilter/ipset/ip_set.h
include/linux/netfilter_bridge.h
include/linux/netfs.h
include/linux/node.h
include/linux/nvme.h
include/linux/nvmem-consumer.h
include/linux/nvmem-provider.h
include/linux/of_device.h
include/linux/of_iommu.h
include/linux/of_platform.h
include/linux/pagemap.h
include/linux/pci-ecam.h
include/linux/pci-epc.h
include/linux/pci-epf.h
include/linux/pci.h
include/linux/pgtable.h
include/linux/pinctrl/machine.h
include/linux/pinctrl/pinconf-generic.h
include/linux/pinctrl/pinconf.h
include/linux/pinctrl/pinctrl.h
include/linux/pinctrl/pinmux.h
include/linux/platform_data/i2c-mux-reg.h
include/linux/platform_data/keypad-omap.h
include/linux/pm.h
include/linux/pm_clock.h
include/linux/pnp.h
include/linux/power/bq27xxx_battery.h
include/linux/property.h
include/linux/ptrace.h
include/linux/pwm.h
include/linux/rcu_notifier.h
include/linux/rculist.h
include/linux/rcupdate.h
include/linux/ring_buffer.h
include/linux/rtsx_pci.h
include/linux/sched.h
include/linux/seq_buf.h
include/linux/serdev.h
include/linux/serial_core.h
include/linux/skbuff.h
include/linux/skmsg.h
include/linux/soundwire/sdw.h
include/linux/spi/spi.h
include/linux/spinlock.h
include/linux/spmi.h
include/linux/srcu.h
include/linux/string.h
include/linux/surface_aggregator/device.h
include/linux/syscalls.h
include/linux/thermal.h
include/linux/thunderbolt.h
include/linux/trace.h
include/linux/trace_seq.h
include/linux/tty.h
include/linux/tty_driver.h
include/linux/tty_port.h
include/linux/usb.h
include/linux/usb/gadget.h
include/linux/usb/hcd.h
include/linux/usb/quirks.h
include/linux/usb/tcpci.h
include/linux/usb/tcpm.h
include/linux/vfio.h
include/linux/vfio_pci_core.h
include/linux/virtio.h
include/linux/virtio_config.h
include/linux/virtio_console.h [deleted file]
include/linux/virtio_net.h
include/linux/virtio_pci_admin.h [new file with mode: 0644]
include/linux/virtio_pci_modern.h
include/linux/w1-gpio.h [deleted file]
include/linux/writeback.h
include/net/af_unix.h
include/net/cfg80211.h
include/net/inet_connection_sock.h
include/net/inet_sock.h
include/net/ip.h
include/net/llc_pdu.h
include/net/netdev_queues.h
include/net/netfilter/nf_tables.h
include/net/sch_generic.h
include/net/sock.h
include/net/xdp_sock_drv.h
include/soc/qcom/qcom-spmi-pmic.h
include/sound/cs35l56.h
include/sound/tas2781.h
include/trace/events/afs.h
include/trace/events/ext4.h
include/trace/events/kvm.h
include/trace/events/netfs.h
include/trace/events/rxrpc.h
include/uapi/drm/ivpu_accel.h
include/uapi/linux/android/binder.h
include/uapi/linux/btrfs.h
include/uapi/linux/cxl_mem.h
include/uapi/linux/iio/types.h
include/uapi/linux/iommufd.h
include/uapi/linux/kvm.h
include/uapi/linux/mei.h
include/uapi/linux/netfilter/nf_tables.h
include/uapi/linux/nsm.h [new file with mode: 0644]
include/uapi/linux/pcitest.h
include/uapi/linux/serial.h
include/uapi/linux/usb/functionfs.h
include/uapi/linux/vfio.h
include/uapi/linux/virtio_config.h
include/uapi/linux/virtio_pci.h
include/uapi/linux/virtio_pmem.h
include/xen/interface/io/displif.h
include/xen/interface/io/ring.h
include/xen/interface/io/sndif.h
init/Kconfig
init/do_mounts.c
init/initramfs.c
io_uring/io_uring.c
io_uring/io_uring.h
io_uring/net.c
io_uring/opdef.c
io_uring/openclose.c
io_uring/poll.c
io_uring/poll.h
io_uring/register.c
io_uring/rsrc.h
io_uring/rw.c
kernel/bpf/btf.c
kernel/bpf/verifier.c
kernel/cgroup/cgroup-v1.c
kernel/cgroup/cgroup.c
kernel/cgroup/cpuset.c
kernel/crash_core.c
kernel/debug/kdb/kdb_main.c
kernel/dma/debug.c
kernel/dma/swiotlb.c
kernel/events/uprobes.c
kernel/exit.c
kernel/fork.c
kernel/futex/core.c
kernel/futex/pi.c
kernel/irq/irqdesc.c
kernel/kexec_core.c
kernel/kprobes.c
kernel/locking/locktorture.c
kernel/rcu/Kconfig.debug
kernel/rcu/rcu.h
kernel/rcu/rcutorture.c
kernel/rcu/srcutree.c
kernel/rcu/tasks.h
kernel/rcu/tree.c
kernel/rcu/tree_exp.h
kernel/rcu/tree_stall.h
kernel/rcu/update.c
kernel/sched/cpufreq_schedutil.c
kernel/sys.c
kernel/time/clocksource.c
kernel/time/hrtimer.c
kernel/time/tick-sched.c
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/ring_buffer_benchmark.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_boot.c
kernel/trace/trace_events.c
kernel/trace/trace_events_hist.c
kernel/trace/trace_events_trigger.c
kernel/trace/trace_kprobe.c
kernel/trace/trace_osnoise.c
kernel/trace/trace_probe.c
kernel/trace/trace_probe.h
kernel/trace/trace_seq.c
kernel/trace/tracing_map.c
lib/Kconfig.debug
lib/checksum_kunit.c
lib/fw_table.c
lib/kunit/device-impl.h
lib/kunit/device.c
lib/kunit/executor.c
lib/kunit/kunit-test.c
lib/kunit/test.c
lib/nlattr.c
lib/sbitmap.c
lib/stackdepot.c
lib/string.c
lib/test_fortify/write_overflow-strlcpy-src.c [deleted file]
lib/test_fortify/write_overflow-strlcpy.c [deleted file]
mm/Kconfig
mm/backing-dev.c
mm/compaction.c
mm/damon/sysfs-schemes.c
mm/filemap.c
mm/huge_memory.c
mm/init-mm.c
mm/kasan/generic.c
mm/madvise.c
mm/memblock.c
mm/memcontrol.c
mm/memory-failure.c
mm/memory-tiers.c
mm/memory.c
mm/memory_hotplug.c
mm/migrate.c
mm/mm_init.c
mm/mmap.c
mm/page-writeback.c
mm/percpu.c
mm/readahead.c
mm/userfaultfd.c
mm/zswap.c
net/8021q/vlan_netlink.c
net/batman-adv/multicast.c
net/bluetooth/rfcomm/tty.c
net/bridge/br_multicast.c
net/bridge/br_netfilter_hooks.c
net/bridge/br_netfilter_ipv6.c
net/bridge/br_private.h
net/ceph/messenger_v1.c
net/ceph/messenger_v2.c
net/ceph/osd_client.c
net/core/datagram.c
net/core/dev.c
net/core/dev.h
net/core/filter.c
net/core/request_sock.c
net/core/rtnetlink.c
net/core/sock.c
net/devlink/core.c
net/devlink/port.c
net/dsa/user.c
net/ethtool/features.c
net/hsr/hsr_device.c
net/hsr/hsr_main.c
net/ipv4/af_inet.c
net/ipv4/inet_connection_sock.c
net/ipv4/ip_output.c
net/ipv4/ip_sockglue.c
net/ipv4/ip_tunnel_core.c
net/ipv4/ipmr.c
net/ipv4/netfilter/nf_reject_ipv4.c
net/ipv4/raw.c
net/ipv4/tcp.c
net/ipv4/udp.c
net/ipv6/addrconf_core.c
net/ipv6/af_inet6.c
net/ipv6/ip6_tunnel.c
net/ipv6/mcast.c
net/ipv6/netfilter/nf_reject_ipv6.c
net/ipv6/udp.c
net/llc/af_llc.c
net/llc/llc_core.c
net/mac80211/Kconfig
net/mac80211/cfg.c
net/mac80211/debugfs_netdev.c
net/mac80211/debugfs_netdev.h
net/mac80211/iface.c
net/mac80211/mlme.c
net/mac80211/scan.c
net/mac80211/sta_info.c
net/mac80211/tx.c
net/mac80211/wbrf.c
net/mptcp/options.c
net/mptcp/protocol.c
net/mptcp/subflow.c
net/netfilter/ipset/ip_set_bitmap_gen.h
net/netfilter/ipset/ip_set_core.c
net/netfilter/ipset/ip_set_hash_gen.h
net/netfilter/ipset/ip_set_hash_netiface.c
net/netfilter/ipset/ip_set_list_set.c
net/netfilter/ipvs/ip_vs_xmit.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_proto_sctp.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nf_log.c
net/netfilter/nf_log_syslog.c
net/netfilter/nf_queue.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink_log.c
net/netfilter/nfnetlink_queue.c
net/netfilter/nft_chain_filter.c
net/netfilter/nft_compat.c
net/netfilter/nft_ct.c
net/netfilter/nft_flow_offload.c
net/netfilter/nft_limit.c
net/netfilter/nft_nat.c
net/netfilter/nft_rt.c
net/netfilter/nft_set_hash.c
net/netfilter/nft_set_pipapo.c
net/netfilter/nft_set_pipapo.h
net/netfilter/nft_set_pipapo_avx2.c
net/netfilter/nft_set_rbtree.c
net/netfilter/nft_socket.c
net/netfilter/nft_synproxy.c
net/netfilter/nft_tproxy.c
net/netfilter/nft_tunnel.c
net/netfilter/nft_xfrm.c
net/netfilter/xt_physdev.c
net/netlink/af_netlink.c
net/nfc/digital_core.c
net/nfc/nci/core.c
net/nfc/nci/spi.c
net/rds/af_rds.c
net/rxrpc/ar-internal.h
net/rxrpc/call_event.c
net/rxrpc/call_object.c
net/rxrpc/conn_event.c
net/rxrpc/input.c
net/rxrpc/local_object.c
net/rxrpc/output.c
net/rxrpc/proc.c
net/rxrpc/rxkad.c
net/sched/cls_api.c
net/sched/cls_flower.c
net/smc/smc_core.c
net/smc/smc_diag.c
net/sunrpc/auth_gss/auth_gss.c
net/sunrpc/auth_gss/gss_krb5_mech.c
net/sunrpc/sunrpc_syms.c
net/sunrpc/svc.c
net/sunrpc/svcsock.c
net/tipc/bearer.c
net/tls/tls_sw.c
net/unix/af_unix.c
net/unix/diag.c
net/unix/garbage.c
net/wireless/Kconfig
net/wireless/core.c
net/wireless/nl80211.c
net/wireless/scan.c
net/xdp/xsk.c
net/xdp/xsk_buff_pool.c
samples/bpf/asm_goto_workaround.h
samples/cgroup/.gitignore [new file with mode: 0644]
samples/ftrace/ftrace-direct-modify.c
samples/ftrace/ftrace-direct-multi-modify.c
samples/ftrace/ftrace-direct-multi.c
samples/ftrace/ftrace-direct-too.c
samples/ftrace/ftrace-direct.c
samples/ftrace/sample-trace-array.c
scripts/Makefile.defconf
scripts/Makefile.extrawarn
scripts/Makefile.lib
scripts/Makefile.package
scripts/check-uapi.sh [new file with mode: 0755]
scripts/checkpatch.pl
scripts/coccinelle/api/device_attr_show.cocci
scripts/decode_stacktrace.sh
scripts/gdb/linux/tasks.py
scripts/generate_rust_target.rs
scripts/genksyms/genksyms.c
scripts/git.orderFile [new file with mode: 0644]
scripts/head-object-list.txt
scripts/kconfig/Makefile
scripts/kconfig/conf.c
scripts/kconfig/confdata.c
scripts/kconfig/expr.c
scripts/kconfig/lkc.h
scripts/kconfig/lkc_proto.h
scripts/kconfig/mconf.c
scripts/kconfig/menu.c
scripts/kconfig/mnconf-common.c [new file with mode: 0644]
scripts/kconfig/mnconf-common.h [new file with mode: 0644]
scripts/kconfig/nconf.c
scripts/kconfig/symbol.c
scripts/kconfig/util.c
scripts/min-tool-version.sh
scripts/mod/modpost.c
scripts/mod/modpost.h
scripts/package/builddeb
scripts/package/buildtar
scripts/package/deb-build-option [deleted file]
scripts/package/debian/copyright [new file with mode: 0644]
scripts/package/debian/rules
scripts/package/install-extmod-build
scripts/package/kernel.spec
scripts/package/mkdebian
scripts/package/snapcraft.template
scripts/recordmcount.c
scripts/recordmcount.pl
scripts/tags.sh
scripts/xz_wrap.sh
security/apparmor/Kconfig
security/apparmor/apparmorfs.c
security/apparmor/crypto.c
security/apparmor/domain.c
security/apparmor/lib.c
security/apparmor/lsm.c
security/apparmor/policy.c
security/apparmor/policy_unpack.c
security/apparmor/task.c
security/keys/encrypted-keys/encrypted.c
security/security.c
security/tomoyo/tomoyo.c
sound/core/pcm.c
sound/drivers/aloop.c
sound/drivers/serial-generic.c
sound/pci/hda/cs35l41_hda_property.c
sound/pci/hda/cs35l56_hda.c
sound/pci/hda/hda_generic.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_cs8409.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/oxygen/oxygen_mixer.c
sound/soc/amd/acp/acp-mach-common.c
sound/soc/amd/acp/acp-sof-mach.c
sound/soc/amd/acp/acp3x-es83xx/acp3x-es83xx.c
sound/soc/amd/yc/acp6x-mach.c
sound/soc/codecs/cs35l56-shared.c
sound/soc/codecs/cs35l56.c
sound/soc/codecs/cs35l56.h
sound/soc/codecs/es8326.c [changed mode: 0755->0644]
sound/soc/codecs/es8326.h
sound/soc/codecs/lpass-wsa-macro.c
sound/soc/codecs/rtq9128.c
sound/soc/codecs/tas2562.c
sound/soc/codecs/tas2781-i2c.c
sound/soc/codecs/wcd9335.c
sound/soc/codecs/wcd934x.c
sound/soc/codecs/wcd938x.c
sound/soc/codecs/wm_adsp.c
sound/soc/codecs/wsa883x.c
sound/soc/generic/audio-graph-card2.c
sound/soc/intel/boards/bxt_da7219_max98357a.c
sound/soc/intel/boards/bxt_rt298.c
sound/soc/intel/boards/sof_sdw.c
sound/soc/mediatek/common/mtk-dsp-sof-common.c
sound/soc/mediatek/mt8192/mt8192-mt6359-rt1015-rt5682.c
sound/soc/mediatek/mt8195/mt8195-afe-pcm.c
sound/soc/mediatek/mt8195/mt8195-mt6359.c
sound/soc/qcom/sc8280xp.c
sound/soc/rockchip/rk3399_gru_sound.c
sound/soc/soc-core.c
sound/soc/sof/ipc3-dtrace.c
sound/soc/sof/ipc4-loader.c
sound/soc/sof/ipc4-pcm.c
sound/soc/sunxi/sun4i-spdif.c
sound/usb/clock.c
sound/usb/format.c
sound/usb/midi2.c
sound/usb/mixer_scarlett2.c
sound/usb/quirks.c
sound/virtio/virtio_card.c
sound/virtio/virtio_ctl_msg.c
sound/virtio/virtio_pcm_msg.c
tools/arch/x86/include/asm/cpufeatures.h
tools/arch/x86/include/asm/msr-index.h
tools/arch/x86/include/asm/rmwcc.h
tools/arch/x86/include/uapi/asm/kvm.h
tools/arch/x86/lib/memcpy_64.S
tools/arch/x86/lib/memset_64.S
tools/build/Makefile.feature
tools/build/feature/Makefile
tools/build/feature/test-dwarf_getcfi.c [new file with mode: 0644]
tools/build/feature/test-libopencsd.c
tools/counter/Build
tools/counter/Makefile
tools/counter/counter_watch_events.c [new file with mode: 0644]
tools/iio/iio_event_monitor.c
tools/include/asm-generic/unaligned.h
tools/include/linux/compiler_types.h
tools/include/uapi/asm-generic/unistd.h
tools/include/uapi/drm/drm.h
tools/include/uapi/drm/i915_drm.h
tools/include/uapi/linux/fcntl.h
tools/include/uapi/linux/kvm.h
tools/include/uapi/linux/mount.h
tools/include/uapi/linux/perf_event.h
tools/include/uapi/linux/stat.h
tools/lib/api/fs/fs.c
tools/lib/api/io.h
tools/lib/bpf/libbpf.c
tools/lib/perf/Documentation/examples/sampling.c
tools/lib/perf/Documentation/libperf-sampling.txt
tools/lib/perf/Documentation/libperf.txt
tools/lib/perf/cpumap.c
tools/lib/perf/evlist.c
tools/lib/perf/evsel.c
tools/lib/perf/include/internal/mmap.h
tools/lib/perf/include/perf/cpumap.h
tools/lib/perf/libperf.map
tools/lib/perf/mmap.c
tools/lib/perf/tests/test-cpumap.c
tools/lib/perf/tests/test-evlist.c
tools/lib/perf/tests/test-evsel.c
tools/lib/subcmd/help.c
tools/perf/.gitignore
tools/perf/Documentation/itrace.txt
tools/perf/Documentation/perf-annotate.txt
tools/perf/Documentation/perf-config.txt
tools/perf/Documentation/perf-list.txt
tools/perf/Documentation/perf-lock.txt
tools/perf/Documentation/perf-record.txt
tools/perf/Documentation/perf-report.txt
tools/perf/Documentation/perf-stat.txt
tools/perf/Documentation/perf.txt
tools/perf/Makefile.config
tools/perf/Makefile.perf
tools/perf/arch/arm/util/cs-etm.c
tools/perf/arch/arm64/util/arm-spe.c
tools/perf/arch/arm64/util/header.c
tools/perf/arch/loongarch/annotate/instructions.c
tools/perf/arch/x86/tests/hybrid.c
tools/perf/arch/x86/util/dwarf-regs.c
tools/perf/arch/x86/util/event.c
tools/perf/arch/x86/util/intel-bts.c
tools/perf/arch/x86/util/intel-pt.c
tools/perf/bench/epoll-ctl.c
tools/perf/bench/epoll-wait.c
tools/perf/bench/futex-hash.c
tools/perf/bench/futex-lock-pi.c
tools/perf/bench/futex-requeue.c
tools/perf/bench/futex-wake-parallel.c
tools/perf/bench/futex-wake.c
tools/perf/bench/sched-seccomp-notify.c
tools/perf/builtin-annotate.c
tools/perf/builtin-c2c.c
tools/perf/builtin-ftrace.c
tools/perf/builtin-inject.c
tools/perf/builtin-list.c
tools/perf/builtin-lock.c
tools/perf/builtin-record.c
tools/perf/builtin-report.c
tools/perf/builtin-stat.c
tools/perf/builtin-top.c
tools/perf/builtin-trace.c
tools/perf/perf-archive.sh [changed mode: 0644->0755]
tools/perf/perf.c
tools/perf/pmu-events/arch/arm64/ampere/ampereone/core-imp-def.json
tools/perf/pmu-events/arch/arm64/ampere/ampereonex/branch.json [new file with mode: 0644]
tools/perf/pmu-events/arch/arm64/ampere/ampereonex/bus.json [new file with mode: 0644]
tools/perf/pmu-events/arch/arm64/ampere/ampereonex/cache.json [new file with mode: 0644]
tools/perf/pmu-events/arch/arm64/ampere/ampereonex/core-imp-def.json [new file with mode: 0644]
tools/perf/pmu-events/arch/arm64/ampere/ampereonex/exception.json [new file with mode: 0644]
tools/perf/pmu-events/arch/arm64/ampere/ampereonex/instruction.json [new file with mode: 0644]
tools/perf/pmu-events/arch/arm64/ampere/ampereonex/intrinsic.json [new file with mode: 0644]
tools/perf/pmu-events/arch/arm64/ampere/ampereonex/memory.json [new file with mode: 0644]
tools/perf/pmu-events/arch/arm64/ampere/ampereonex/metrics.json [new file with mode: 0644]
tools/perf/pmu-events/arch/arm64/ampere/ampereonex/mmu.json [new file with mode: 0644]
tools/perf/pmu-events/arch/arm64/ampere/ampereonex/pipeline.json [new file with mode: 0644]
tools/perf/pmu-events/arch/arm64/ampere/ampereonex/spe.json [new file with mode: 0644]
tools/perf/pmu-events/arch/arm64/arm/cmn/sys/cmn.json
tools/perf/pmu-events/arch/arm64/mapfile.csv
tools/perf/pmu-events/arch/powerpc/mapfile.csv
tools/perf/pmu-events/arch/powerpc/power10/datasource.json
tools/perf/pmu-events/arch/riscv/mapfile.csv
tools/perf/pmu-events/arch/riscv/starfive/dubhe-80/common.json [new file with mode: 0644]
tools/perf/pmu-events/arch/riscv/starfive/dubhe-80/firmware.json [new file with mode: 0644]
tools/perf/pmu-events/arch/riscv/thead/c900-legacy/cache.json [new file with mode: 0644]
tools/perf/pmu-events/arch/riscv/thead/c900-legacy/firmware.json [new file with mode: 0644]
tools/perf/pmu-events/arch/riscv/thead/c900-legacy/instruction.json [new file with mode: 0644]
tools/perf/pmu-events/arch/riscv/thead/c900-legacy/microarch.json [new file with mode: 0644]
tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json
tools/perf/pmu-events/arch/x86/alderlaken/adln-metrics.json
tools/perf/pmu-events/arch/x86/amdzen4/memory-controller.json [new file with mode: 0644]
tools/perf/pmu-events/arch/x86/amdzen4/recommended.json
tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
tools/perf/pmu-events/arch/x86/emeraldrapids/floating-point.json
tools/perf/pmu-events/arch/x86/emeraldrapids/pipeline.json
tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-interconnect.json
tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-io.json
tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json
tools/perf/pmu-events/arch/x86/icelakex/other.json
tools/perf/pmu-events/arch/x86/icelakex/pipeline.json
tools/perf/pmu-events/arch/x86/icelakex/uncore-interconnect.json
tools/perf/pmu-events/arch/x86/mapfile.csv
tools/perf/pmu-events/arch/x86/rocketlake/rkl-metrics.json
tools/perf/pmu-events/arch/x86/sapphirerapids/floating-point.json
tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json
tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json
tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-interconnect.json
tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-io.json
tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
tools/perf/pmu-events/jevents.py
tools/perf/scripts/python/arm-cs-trace-disasm.py
tools/perf/scripts/python/compaction-times.py
tools/perf/scripts/python/exported-sql-viewer.py
tools/perf/tests/Build
tools/perf/tests/attr.c
tools/perf/tests/attr/base-record
tools/perf/tests/attr/test-record-user-regs-no-sve-aarch64
tools/perf/tests/attr/test-record-user-regs-sve-aarch64
tools/perf/tests/builtin-test.c
tools/perf/tests/code-reading.c
tools/perf/tests/cpumap.c
tools/perf/tests/dso-data.c
tools/perf/tests/keep-tracking.c
tools/perf/tests/make
tools/perf/tests/maps.c
tools/perf/tests/mmap-basic.c
tools/perf/tests/openat-syscall-all-cpus.c
tools/perf/tests/parse-events.c
tools/perf/tests/perf-time-to-tsc.c
tools/perf/tests/shell/coresight/memcpy_thread/memcpy_thread.c
tools/perf/tests/shell/coresight/thread_loop/thread_loop.c
tools/perf/tests/shell/coresight/unroll_loop_thread/unroll_loop_thread.c
tools/perf/tests/shell/daemon.sh
tools/perf/tests/shell/diff.sh [new file with mode: 0755]
tools/perf/tests/shell/lib/perf_has_symbol.sh [new file with mode: 0644]
tools/perf/tests/shell/lib/setup_python.sh [new file with mode: 0644]
tools/perf/tests/shell/list.sh [new file with mode: 0755]
tools/perf/tests/shell/pipe_test.sh
tools/perf/tests/shell/record+probe_libc_inet_pton.sh
tools/perf/tests/shell/record.sh
tools/perf/tests/shell/record_offcpu.sh
tools/perf/tests/shell/script.sh [new file with mode: 0755]
tools/perf/tests/shell/stat+json_output.sh
tools/perf/tests/shell/stat_all_pmu.sh
tools/perf/tests/shell/stat_metrics_values.sh
tools/perf/tests/shell/test_arm_callgraph_fp.sh
tools/perf/tests/shell/test_brstack.sh
tools/perf/tests/shell/test_data_symbol.sh
tools/perf/tests/shell/test_perf_data_converter_json.sh
tools/perf/tests/sigtrap.c
tools/perf/tests/sw-clock.c
tools/perf/tests/switch-tracking.c
tools/perf/tests/task-exit.c
tools/perf/tests/tests.h
tools/perf/tests/topology.c
tools/perf/tests/vmlinux-kallsyms.c
tools/perf/tests/workloads/thloop.c
tools/perf/trace/beauty/arch_errno_names.sh
tools/perf/trace/beauty/beauty.h
tools/perf/trace/beauty/prctl_option.sh
tools/perf/trace/beauty/socket.sh
tools/perf/trace/beauty/statx.c
tools/perf/ui/browsers/annotate.c
tools/perf/ui/browsers/hists.c
tools/perf/ui/browsers/hists.h
tools/perf/ui/browsers/scripts.c
tools/perf/ui/gtk/annotate.c
tools/perf/ui/gtk/gtk.h
tools/perf/ui/tui/setup.c
tools/perf/util/Build
tools/perf/util/annotate-data.c [new file with mode: 0644]
tools/perf/util/annotate-data.h [new file with mode: 0644]
tools/perf/util/annotate.c
tools/perf/util/annotate.h
tools/perf/util/auxtrace.c
tools/perf/util/auxtrace.h
tools/perf/util/block-info.c
tools/perf/util/block-info.h
tools/perf/util/block-range.c
tools/perf/util/bpf-event.c
tools/perf/util/bpf-event.h
tools/perf/util/bpf_counter.c
tools/perf/util/bpf_lock_contention.c
tools/perf/util/compress.h
tools/perf/util/cpumap.c
tools/perf/util/cputopo.c
tools/perf/util/cs-etm.c
tools/perf/util/db-export.c
tools/perf/util/debug.c
tools/perf/util/debug.h
tools/perf/util/debuginfo.c [new file with mode: 0644]
tools/perf/util/debuginfo.h [new file with mode: 0644]
tools/perf/util/dso.c
tools/perf/util/dso.h
tools/perf/util/dwarf-aux.c
tools/perf/util/dwarf-aux.h
tools/perf/util/dwarf-regs.c
tools/perf/util/env.c
tools/perf/util/env.h
tools/perf/util/event.c
tools/perf/util/evlist.c
tools/perf/util/evlist.h
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/genelf.c
tools/perf/util/header.c
tools/perf/util/hisi-ptt.c
tools/perf/util/hist.c
tools/perf/util/hist.h
tools/perf/util/include/dwarf-regs.h
tools/perf/util/include/linux/linkage.h
tools/perf/util/machine.c
tools/perf/util/map.c
tools/perf/util/map.h
tools/perf/util/maps.c
tools/perf/util/maps.h
tools/perf/util/mem-events.c
tools/perf/util/metricgroup.c
tools/perf/util/mmap.c
tools/perf/util/mmap.h
tools/perf/util/parse-branch-options.c
tools/perf/util/parse-events.c
tools/perf/util/perf_api_probe.c
tools/perf/util/perf_event_attr_fprintf.c
tools/perf/util/pmu.c
tools/perf/util/pmu.h
tools/perf/util/print-events.c
tools/perf/util/probe-event.c
tools/perf/util/probe-finder.c
tools/perf/util/probe-finder.h
tools/perf/util/record.c
tools/perf/util/s390-cpumcf-kernel.h
tools/perf/util/s390-sample-raw.c
tools/perf/util/sample.h
tools/perf/util/scripting-engines/trace-event-perl.c
tools/perf/util/scripting-engines/trace-event-python.c
tools/perf/util/session.c
tools/perf/util/sort.c
tools/perf/util/sort.h
tools/perf/util/stat-display.c
tools/perf/util/stat-shadow.c
tools/perf/util/stat.c
tools/perf/util/stat.h
tools/perf/util/symbol-elf.c
tools/perf/util/symbol-minimal.c
tools/perf/util/symbol.c
tools/perf/util/symbol.h
tools/perf/util/symbol_conf.h
tools/perf/util/synthetic-events.c
tools/perf/util/thread.c
tools/perf/util/thread.h
tools/perf/util/top.c
tools/perf/util/top.h
tools/perf/util/unwind-libdw.c
tools/perf/util/unwind-libunwind-local.c
tools/perf/util/vdso.c
tools/perf/util/zstd.c
tools/power/cpupower/bench/Makefile
tools/testing/cxl/Kbuild
tools/testing/cxl/test/Kbuild
tools/testing/cxl/test/cxl.c
tools/testing/cxl/test/mem.c
tools/testing/nvdimm/Kbuild
tools/testing/nvdimm/test/Kbuild
tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c [new file with mode: 0644]
tools/testing/selftests/bpf/prog_tests/test_global_funcs.c
tools/testing/selftests/bpf/progs/bpf_tracing_net.h
tools/testing/selftests/bpf/progs/sock_iter_batch.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/test_jhash.h
tools/testing/selftests/bpf/progs/verifier_global_subprogs.c
tools/testing/selftests/bpf/progs/verifier_value_illegal_alu.c
tools/testing/selftests/core/close_range_test.c
tools/testing/selftests/drivers/net/bonding/bond_options.sh
tools/testing/selftests/drivers/net/bonding/config
tools/testing/selftests/drivers/net/bonding/lag_lib.sh
tools/testing/selftests/drivers/net/bonding/mode-1-recovery-updelay.sh
tools/testing/selftests/drivers/net/bonding/mode-2-recovery-updelay.sh
tools/testing/selftests/drivers/net/bonding/settings
tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh
tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
tools/testing/selftests/drivers/net/netdevsim/config [new file with mode: 0644]
tools/testing/selftests/drivers/net/netdevsim/ethtool-common.sh
tools/testing/selftests/drivers/net/netdevsim/ethtool-fec.sh
tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh
tools/testing/selftests/drivers/net/team/config
tools/testing/selftests/ftrace/test.d/00basic/ringbuffer_subbuf_size.tc [new file with mode: 0644]
tools/testing/selftests/ftrace/test.d/00basic/trace_marker.tc [new file with mode: 0644]
tools/testing/selftests/hid/tests/test_wacom_generic.py
tools/testing/selftests/iommu/iommufd.c
tools/testing/selftests/iommu/iommufd_utils.h
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/aarch64/page_fault_test.c
tools/testing/selftests/kvm/dirty_log_test.c
tools/testing/selftests/kvm/guest_memfd_test.c [new file with mode: 0644]
tools/testing/selftests/kvm/include/aarch64/processor.h
tools/testing/selftests/kvm/include/guest_modes.h
tools/testing/selftests/kvm/include/kvm_util_base.h
tools/testing/selftests/kvm/include/riscv/processor.h
tools/testing/selftests/kvm/include/test_util.h
tools/testing/selftests/kvm/include/ucall_common.h
tools/testing/selftests/kvm/include/x86_64/processor.h
tools/testing/selftests/kvm/kvm_page_table_test.c
tools/testing/selftests/kvm/lib/aarch64/processor.c
tools/testing/selftests/kvm/lib/guest_modes.c
tools/testing/selftests/kvm/lib/kvm_util.c
tools/testing/selftests/kvm/lib/memstress.c
tools/testing/selftests/kvm/lib/riscv/processor.c
tools/testing/selftests/kvm/lib/riscv/ucall.c
tools/testing/selftests/kvm/riscv/get-reg-list.c
tools/testing/selftests/kvm/s390x/cmma_test.c
tools/testing/selftests/kvm/set_memory_region_test.c
tools/testing/selftests/kvm/steal_time.c
tools/testing/selftests/kvm/x86_64/hyperv_clock.c
tools/testing/selftests/kvm/x86_64/hyperv_evmcs.c
tools/testing/selftests/kvm/x86_64/hyperv_extended_hypercalls.c
tools/testing/selftests/kvm/x86_64/hyperv_features.c
tools/testing/selftests/kvm/x86_64/hyperv_ipi.c
tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c
tools/testing/selftests/kvm/x86_64/hyperv_tlb_flush.c
tools/testing/selftests/kvm/x86_64/mmio_warning_test.c [deleted file]
tools/testing/selftests/kvm/x86_64/monitor_mwait_test.c
tools/testing/selftests/kvm/x86_64/private_mem_conversions_test.c [new file with mode: 0644]
tools/testing/selftests/kvm/x86_64/private_mem_kvm_exits_test.c [new file with mode: 0644]
tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c
tools/testing/selftests/kvm/x86_64/ucna_injection_test.c
tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c
tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
tools/testing/selftests/kvm/x86_64/xcr0_cpuid_test.c
tools/testing/selftests/landlock/common.h
tools/testing/selftests/landlock/fs_test.c
tools/testing/selftests/landlock/net_test.c
tools/testing/selftests/livepatch/functions.sh
tools/testing/selftests/mm/charge_reserved_hugetlb.sh
tools/testing/selftests/mm/hugepage-vmemmap.c
tools/testing/selftests/mm/ksm_tests.c
tools/testing/selftests/mm/map_hugetlb.c
tools/testing/selftests/mm/mremap_test.c
tools/testing/selftests/mm/va_high_addr_switch.sh
tools/testing/selftests/mm/write_hugetlb_memory.sh
tools/testing/selftests/net/Makefile
tools/testing/selftests/net/big_tcp.sh
tools/testing/selftests/net/cmsg_ipv6.sh
tools/testing/selftests/net/config
tools/testing/selftests/net/forwarding/Makefile
tools/testing/selftests/net/forwarding/lib.sh [changed mode: 0755->0644]
tools/testing/selftests/net/lib.sh
tools/testing/selftests/net/mptcp/config
tools/testing/selftests/net/mptcp/mptcp_join.sh
tools/testing/selftests/net/mptcp/mptcp_lib.sh
tools/testing/selftests/net/mptcp/settings
tools/testing/selftests/net/mptcp/simult_flows.sh
tools/testing/selftests/net/net_helper.sh [changed mode: 0755->0644]
tools/testing/selftests/net/pmtu.sh
tools/testing/selftests/net/rps_default_mask.sh
tools/testing/selftests/net/rtnetlink.sh
tools/testing/selftests/net/setup_loopback.sh [changed mode: 0755->0644]
tools/testing/selftests/net/setup_veth.sh
tools/testing/selftests/net/so_incoming_cpu.c
tools/testing/selftests/net/tcp_ao/Makefile
tools/testing/selftests/net/tcp_ao/config [new file with mode: 0644]
tools/testing/selftests/net/tcp_ao/key-management.c
tools/testing/selftests/net/tcp_ao/lib/sock.c
tools/testing/selftests/net/tcp_ao/rst.c
tools/testing/selftests/net/tcp_ao/settings [new file with mode: 0644]
tools/testing/selftests/net/tcp_ao/unsigned-md5.c
tools/testing/selftests/net/tls.c
tools/testing/selftests/net/udpgro.sh
tools/testing/selftests/net/udpgro_bench.sh
tools/testing/selftests/net/udpgro_frglist.sh
tools/testing/selftests/net/udpgro_fwd.sh
tools/testing/selftests/net/udpgso_bench_rx.c
tools/testing/selftests/net/veth.sh
tools/testing/selftests/net/xdp_dummy.c [new file with mode: 0644]
tools/testing/selftests/netfilter/conntrack_dump_flush.c
tools/testing/selftests/rcutorture/bin/mkinitrd.sh
tools/testing/selftests/rcutorture/configs/rcu/TREE07.boot
tools/testing/selftests/riscv/hwprobe/Makefile
tools/testing/selftests/riscv/hwprobe/cbo.c
tools/testing/selftests/riscv/hwprobe/hwprobe.c
tools/testing/selftests/riscv/hwprobe/hwprobe.h
tools/testing/selftests/riscv/hwprobe/which-cpus.c [new file with mode: 0644]
tools/testing/selftests/riscv/mm/mmap_test.h
tools/testing/selftests/riscv/vector/v_initval_nolibc.c
tools/testing/selftests/riscv/vector/vstate_exec_nolibc.c
tools/testing/selftests/riscv/vector/vstate_prctl.c
tools/testing/selftests/rseq/basic_percpu_ops_test.c
tools/testing/selftests/rseq/param_test.c
tools/testing/selftests/seccomp/seccomp_benchmark.c
tools/testing/selftests/sgx/Makefile
tools/testing/selftests/sgx/defines.h
tools/testing/selftests/sgx/load.c
tools/testing/selftests/sgx/sigstruct.c
tools/testing/selftests/sgx/test_encl.c
tools/testing/selftests/sgx/test_encl.lds
tools/testing/selftests/sgx/test_encl_bootstrap.S
tools/tracing/rtla/Makefile
tools/tracing/rtla/src/osnoise_hist.c
tools/tracing/rtla/src/osnoise_top.c
tools/tracing/rtla/src/timerlat_hist.c
tools/tracing/rtla/src/timerlat_top.c
tools/tracing/rtla/src/utils.c
tools/tracing/rtla/src/utils.h
tools/verification/rv/Makefile
tools/verification/rv/src/in_kernel.c
usr/gen_init_cpio.c
virt/kvm/Kconfig
virt/kvm/Makefile.kvm
virt/kvm/dirty_ring.c
virt/kvm/eventfd.c
virt/kvm/guest_memfd.c [new file with mode: 0644]
virt/kvm/kvm_main.c
virt/kvm/kvm_mm.h

diff --git a/.editorconfig b/.editorconfig
new file mode 100644 (file)
index 0000000..8547733
--- /dev/null
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+root = true
+
+[{*.{awk,c,dts,dtsi,dtso,h,mk,s,S},Kconfig,Makefile,Makefile.*}]
+charset = utf-8
+end_of_line = lf
+trim_trailing_whitespace = true
+insert_final_newline = true
+indent_style = tab
+indent_size = 8
+
+[*.{json,py,rs}]
+charset = utf-8
+end_of_line = lf
+trim_trailing_whitespace = true
+insert_final_newline = true
+indent_style = space
+indent_size = 4
+
+# this must be below the general *.py to overwrite it
+[tools/{perf,power,rcu,testing/kunit}/**.py,]
+indent_style = tab
+indent_size = 8
+
+[*.yaml]
+charset = utf-8
+end_of_line = lf
+trim_trailing_whitespace = unset
+insert_final_newline = true
+indent_style = space
+indent_size = 2
index 98274e1160d7b11729f307df26f3e93427705f8d..689a4fa3f5477aa0fd46997eca5ee7a29cd78f8a 100644 (file)
@@ -96,6 +96,7 @@ modules.order
 #
 !.clang-format
 !.cocciconfig
+!.editorconfig
 !.get_maintainer.ignore
 !.gitattributes
 !.gitignore
index 1f6ad79b45e4a9463a5f47ab6f969e8ae48c5a50..ee8f03cc7f726fa22b518a45c1044e4e05498ec5 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -289,6 +289,7 @@ Johan Hovold <johan@kernel.org> <johan@hovoldconsulting.com>
 John Crispin <john@phrozen.org> <blogic@openwrt.org>
 John Fastabend <john.fastabend@gmail.com> <john.r.fastabend@intel.com>
 John Keeping <john@keeping.me.uk> <john@metanate.com>
+John Moon <john@jmoon.dev> <quic_johmoo@quicinc.com>
 John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
 John Stultz <johnstul@us.ibm.com>
 <jon.toppins+linux@gmail.com> <jtoppins@cumulusnetworks.com>
@@ -344,6 +345,7 @@ Leonid I Ananiev <leonid.i.ananiev@intel.com>
 Leon Romanovsky <leon@kernel.org> <leon@leon.nu>
 Leon Romanovsky <leon@kernel.org> <leonro@mellanox.com>
 Leon Romanovsky <leon@kernel.org> <leonro@nvidia.com>
+Leo Yan <leo.yan@linux.dev> <leo.yan@linaro.org>
 Liam Mark <quic_lmark@quicinc.com> <lmark@codeaurora.org>
 Linas Vepstas <linas@austin.ibm.com>
 Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch>
@@ -363,7 +365,6 @@ Maheshwar Ajja <quic_majja@quicinc.com> <majja@codeaurora.org>
 Malathi Gottam <quic_mgottam@quicinc.com> <mgottam@codeaurora.org>
 Manikanta Pubbisetty <quic_mpubbise@quicinc.com> <mpubbise@codeaurora.org>
 Manivannan Sadhasivam <mani@kernel.org> <manivannanece23@gmail.com>
-Manivannan Sadhasivam <mani@kernel.org> <manivannan.sadhasivam@linaro.org>
 Manoj Basapathi <quic_manojbm@quicinc.com> <manojbm@codeaurora.org>
 Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com>
 Marc Zyngier <maz@kernel.org> <marc.zyngier@arm.com>
@@ -391,9 +392,10 @@ Matthias Fuchs <socketcan@esd.eu> <matthias.fuchs@esd.eu>
 Matthieu Baerts <matttbe@kernel.org> <matthieu.baerts@tessares.net>
 Matthieu CASTET <castet.matthieu@free.fr>
 Matti Vaittinen <mazziesaccount@gmail.com> <matti.vaittinen@fi.rohmeurope.com>
-Matt Ranostay <matt.ranostay@konsulko.com> <matt@ranostay.consulting>
-Matt Ranostay <mranostay@gmail.com> Matthew Ranostay <mranostay@embeddedalley.com>
-Matt Ranostay <mranostay@gmail.com> <matt.ranostay@intel.com>
+Matt Ranostay <matt@ranostay.sg> <matt.ranostay@konsulko.com>
+Matt Ranostay <matt@ranostay.sg> <matt@ranostay.consulting>
+Matt Ranostay <matt@ranostay.sg> Matthew Ranostay <mranostay@embeddedalley.com>
+Matt Ranostay <matt@ranostay.sg> <matt.ranostay@intel.com>
 Matt Redfearn <matt.redfearn@mips.com> <matt.redfearn@imgtec.com>
 Maulik Shah <quic_mkshah@quicinc.com> <mkshah@codeaurora.org>
 Mauro Carvalho Chehab <mchehab@kernel.org> <maurochehab@gmail.com>
@@ -504,6 +506,9 @@ Ralf Baechle <ralf@linux-mips.org>
 Ralf Wildenhues <Ralf.Wildenhues@gmx.de>
 Ram Chandra Jangir <quic_rjangir@quicinc.com> <rjangir@codeaurora.org>
 Randy Dunlap <rdunlap@infradead.org> <rdunlap@xenotime.net>
+Randy Dunlap <rdunlap@infradead.org> <randy.dunlap@oracle.com>
+Randy Dunlap <rdunlap@infradead.org> <rddunlap@osdl.org>
+Randy Dunlap <rdunlap@infradead.org> <randy.dunlap@intel.com>
 Ravi Kumar Bokka <quic_rbokka@quicinc.com> <rbokka@codeaurora.org>
 Ravi Kumar Siddojigari <quic_rsiddoji@quicinc.com> <rsiddoji@codeaurora.org>
 Rémi Denis-Courmont <rdenis@simphalempin.com>
@@ -582,6 +587,7 @@ Surabhi Vishnoi <quic_svishnoi@quicinc.com> <svishnoi@codeaurora.org>
 Takashi YOSHII <takashi.yoshii.zj@renesas.com>
 Tamizh Chelvam Raja <quic_tamizhr@quicinc.com> <tamizhr@codeaurora.org>
 Taniya Das <quic_tdas@quicinc.com> <tdas@codeaurora.org>
+Tanzir Hasan <tanzhasanwork@gmail.com> <tanzirh@google.com>
 Tejun Heo <htejun@gmail.com>
 Tomeu Vizoso <tomeu@tomeuvizoso.net> <tomeu.vizoso@collabora.com>
 Thomas Graf <tgraf@suug.ch>
diff --git a/CREDITS b/CREDITS
index 06b177c6af6aa46d88d5737714a04df0d2db9157..df8d6946739f68655a8b077f0ebcc4bf4612944b 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -179,6 +179,7 @@ E: ralf@gnu.org
 P: 1024/AF7B30C1 CF 97 C2 CC 6D AE A7 FE  C8 BA 9C FC 88 DE 32 C3
 D: Linux/MIPS port
 D: Linux/68k hacker
+D: AX25 maintainer
 S: Hauptstrasse 19
 S: 79837 St. Blasien
 S: Germany
@@ -677,6 +678,10 @@ D: Media subsystem (V4L/DVB) drivers and core
 D: EDAC drivers and EDAC 3.0 core rework
 S: Brazil
 
+N: Landen Chao
+E: Landen.Chao@mediatek.com
+D: MT7531 Ethernet switch support
+
 N: Raymond Chen
 E: raymondc@microsoft.com
 D: Author of Configure script
@@ -814,6 +819,10 @@ D: Support for Xircom PGSDB9 (firmware and host driver)
 S: Bucharest
 S: Romania
 
+N: John Crispin
+E: john@phrozen.org
+D: MediaTek MT7623 Gigabit ethernet support
+
 N: Laurence Culhane
 E: loz@holmes.demon.co.uk
 D: Wrote the initial alpha SLIP code
@@ -1538,6 +1547,10 @@ N: Andrew Haylett
 E: ajh@primag.co.uk
 D: Selection mechanism
 
+N: Johan Hedberg
+E: johan.hedberg@gmail.com
+D: Bluetooth subsystem maintainer
+
 N: Andre Hedrick
 E: andre@linux-ide.org
 E: andre@linuxdiskcert.org
@@ -2148,6 +2161,19 @@ N: Mike Kravetz
 E: mike.kravetz@oracle.com
 D: Maintenance and development of the hugetlb subsystem
 
+N: Seth Jennings
+E: sjenning@redhat.com
+D: Creation and maintenance of zswap
+
+N: Dan Streetman
+E: ddstreet@ieee.org
+D: Maintenance and development of zswap
+D: Creation and maintenance of the zpool API
+
+N: Vitaly Wool
+E: vitaly.wool@konsulko.com
+D: Maintenance and development of zswap
+
 N: Andreas S. Krebs
 E: akrebs@altavista.net
 D: CYPRESS CY82C693 chipset IDE, Digital's PC-Alpha 164SX boards
@@ -3052,6 +3078,10 @@ S: Demonstratsii 8-382
 S: Tula 300000
 S: Russia
 
+N: Thomas Petazzoni
+E: thomas.petazzoni@bootlin.com
+D: Driver for the Marvell Armada 370/XP network unit.
+
 N: Gordon Peters
 E: GordPeters@smarttech.com
 D: Isochronous receive for IEEE 1394 driver (OHCI module).
@@ -3950,6 +3980,10 @@ S: 21513 Conradia Ct
 S: Cupertino, CA 95014
 S: USA
 
+N: Manohar Vanga
+E: manohar.vanga@gmail.com
+D: VME subsystem maintainer
+
 N: Thibaut Varène
 E: hacks+kernel@slashdirt.org
 W: http://hacks.slashdirt.org/
@@ -4050,6 +4084,10 @@ D: Fixes for the NE/2-driver
 D: Miscellaneous MCA-support
 D: Cleanup of the Config-files
 
+N: Martyn Welch
+E: martyn@welchs.me.uk
+D: VME subsystem maintainer
+
 N: Matt Welsh
 E: mdw@metalab.unc.edu
 W: http://www.cs.berkeley.edu/~mdw
diff --git a/Documentation/ABI/testing/debugfs-vfio b/Documentation/ABI/testing/debugfs-vfio
new file mode 100644 (file)
index 0000000..90f7c26
--- /dev/null
@@ -0,0 +1,25 @@
+What:          /sys/kernel/debug/vfio
+Date:          December 2023
+KernelVersion: 6.8
+Contact:       Longfang Liu <liulongfang@huawei.com>
+Description:   This debugfs file directory is used for debugging
+               of vfio devices, it's a common directory for all vfio devices.
+               Vfio core will create a device subdirectory under this
+               directory.
+
+What:          /sys/kernel/debug/vfio/<device>/migration
+Date:          December 2023
+KernelVersion: 6.8
+Contact:       Longfang Liu <liulongfang@huawei.com>
+Description:   This debugfs file directory is used for debugging
+               of vfio devices that support live migration.
+               The debugfs of each vfio device that supports live migration
+               could be created under this directory.
+
+What:          /sys/kernel/debug/vfio/<device>/migration/state
+Date:          December 2023
+KernelVersion: 6.8
+Contact:       Longfang Liu <liulongfang@huawei.com>
+Description:   Read the live migration status of the vfio device.
+               The contents of the state file reflects the migration state
+               relative to those defined in the vfio_device_mig_state enum
index 8c067ff99e54742bcf3c5d033ecda7d39b213a8f..e84277531414b12977374755fd225840bf8936d1 100644 (file)
@@ -98,6 +98,13 @@ Description:
 
                  # echo 1 > /sys/bus/cdx/devices/.../remove
 
+What:          /sys/bus/cdx/devices/.../resource<N>
+Date:          July 2023
+Contact:       puneet.gupta@amd.com
+Description:
+               The resource binary file contains the content of the memory
+               regions. These files can be m'maped from userspace.
+
 What:          /sys/bus/cdx/devices/.../modalias
 Date:          July 2023
 Contact:       nipun.gupta@amd.com
index 6aa527296c71080cbbf8ecf3ce53e375a2e70022..96aafa66b4a5806c47f085100a15d16a8842639c 100644 (file)
@@ -91,3 +91,19 @@ Contact:     Mathieu Poirier <mathieu.poirier@linaro.org>
 Description:   (RW) Size of the trace buffer for TMC-ETR when used in SYSFS
                mode. Writable only for TMC-ETR configurations. The value
                should be aligned to the kernel pagesize.
+
+What:          /sys/bus/coresight/devices/<memory_map>.tmc/buf_modes_available
+Date:          August 2023
+KernelVersion: 6.7
+Contact:       Anshuman Khandual <anshuman.khandual@arm.com>
+Description:   (Read) Shows all supported Coresight TMC-ETR buffer modes available
+               for the users to configure explicitly. This file is avaialble only
+               for TMC ETR devices.
+
+What:          /sys/bus/coresight/devices/<memory_map>.tmc/buf_mode_preferred
+Date:          August 2023
+KernelVersion: 6.7
+Contact:       Anshuman Khandual <anshuman.khandual@arm.com>
+Description:   (RW) Current Coresight TMC-ETR buffer mode selected. But user could
+               only provide a mode which is supported for a given ETR device. This
+               file is available only for TMC ETR devices.
index 4a58e649550d580980e6a35985110f12508399dd..4dd49b159543b6a60a0e76170eb9a592c3410b1f 100644 (file)
@@ -11,3 +11,162 @@ Description:
                Accepts only one of the 2 values -  1 or 2.
                1 : Generate 64 bits data
                2 : Generate 32 bits data
+
+What:          /sys/bus/coresight/devices/<tpdm-name>/reset_dataset
+Date:          March 2023
+KernelVersion  6.7
+Contact:       Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
+Description:
+               (Write) Reset the dataset of the tpdm.
+
+               Accepts only one value -  1.
+               1 : Reset the dataset of the tpdm
+
+What:          /sys/bus/coresight/devices/<tpdm-name>/dsb_trig_type
+Date:          March 2023
+KernelVersion  6.7
+Contact:       Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
+Description:
+               (RW) Set/Get the trigger type of the DSB for tpdm.
+
+               Accepts only one of the 2 values -  0 or 1.
+               0 : Set the DSB trigger type to false
+               1 : Set the DSB trigger type to true
+
+What:          /sys/bus/coresight/devices/<tpdm-name>/dsb_trig_ts
+Date:          March 2023
+KernelVersion  6.7
+Contact:       Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
+Description:
+               (RW) Set/Get the trigger timestamp of the DSB for tpdm.
+
+               Accepts only one of the 2 values -  0 or 1.
+               0 : Set the DSB trigger type to false
+               1 : Set the DSB trigger type to true
+
+What:          /sys/bus/coresight/devices/<tpdm-name>/dsb_mode
+Date:          March 2023
+KernelVersion  6.7
+Contact:       Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
+Description:
+               (RW) Set/Get the programming mode of the DSB for tpdm.
+
+               Accepts the value needs to be greater than 0. What data
+               bits do is listed below.
+               Bit[0:1] : Test mode control bit for choosing the inputs.
+               Bit[3] : Set to 0 for low performance mode. Set to 1 for high
+               performance mode.
+               Bit[4:8] : Select byte lane for high performance mode.
+
+What:          /sys/bus/coresight/devices/<tpdm-name>/dsb_edge/ctrl_idx
+Date:          March 2023
+KernelVersion  6.7
+Contact:       Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
+Description:
+               (RW) Set/Get the index number of the edge detection for the DSB
+               subunit TPDM. Since there are at most 256 edge detections, this
+               value ranges from 0 to 255.
+
+What:          /sys/bus/coresight/devices/<tpdm-name>/dsb_edge/ctrl_val
+Date:          March 2023
+KernelVersion  6.7
+Contact:       Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
+Description:
+               Write a data to control the edge detection corresponding to
+               the index number. Before writing data to this sysfs file,
+               "ctrl_idx" should be written first to configure the index
+               number of the edge detection which needs to be controlled.
+
+               Accepts only one of the following values.
+               0 - Rising edge detection
+               1 - Falling edge detection
+               2 - Rising and falling edge detection (toggle detection)
+
+
+What:          /sys/bus/coresight/devices/<tpdm-name>/dsb_edge/ctrl_mask
+Date:          March 2023
+KernelVersion  6.7
+Contact:       Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
+Description:
+               Write a data to mask the edge detection corresponding to the index
+               number. Before writing data to this sysfs file, "ctrl_idx" should
+               be written first to configure the index number of the edge detection
+               which needs to be masked.
+
+               Accepts only one of the 2 values -  0 or 1.
+
+What:          /sys/bus/coresight/devices/<tpdm-name>/dsb_edge/edcr[0:15]
+Date:          March 2023
+KernelVersion  6.7
+Contact:       Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
+Description:
+               Read a set of the edge control value of the DSB in TPDM.
+
+What:          /sys/bus/coresight/devices/<tpdm-name>/dsb_edge/edcmr[0:7]
+Date:          March 2023
+KernelVersion  6.7
+Contact:       Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
+Description:
+               Read a set of the edge control mask of the DSB in TPDM.
+
+What:          /sys/bus/coresight/devices/<tpdm-name>/dsb_trig_patt/xpr[0:7]
+Date:          March 2023
+KernelVersion  6.7
+Contact:       Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
+Description:
+               (RW) Set/Get the value of the trigger pattern for the DSB
+               subunit TPDM.
+
+What:          /sys/bus/coresight/devices/<tpdm-name>/dsb_trig_patt/xpmr[0:7]
+Date:          March 2023
+KernelVersion  6.7
+Contact:       Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
+Description:
+               (RW) Set/Get the mask of the trigger pattern for the DSB
+               subunit TPDM.
+
+What:          /sys/bus/coresight/devices/<tpdm-name>/dsb_patt/tpr[0:7]
+Date:          March 2023
+KernelVersion  6.7
+Contact:       Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
+Description:
+               (RW) Set/Get the value of the pattern for the DSB subunit TPDM.
+
+What:          /sys/bus/coresight/devices/<tpdm-name>/dsb_patt/tpmr[0:7]
+Date:          March 2023
+KernelVersion  6.7
+Contact:       Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
+Description:
+               (RW) Set/Get the mask of the pattern for the DSB subunit TPDM.
+
+What:          /sys/bus/coresight/devices/<tpdm-name>/dsb_patt/enable_ts
+Date:          March 2023
+KernelVersion  6.7
+Contact:       Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
+Description:
+               (Write) Set the pattern timestamp of DSB tpdm. Read
+               the pattern timestamp of DSB tpdm.
+
+               Accepts only one of the 2 values -  0 or 1.
+               0 : Disable DSB pattern timestamp.
+               1 : Enable DSB pattern timestamp.
+
+What:          /sys/bus/coresight/devices/<tpdm-name>/dsb_patt/set_type
+Date:          March 2023
+KernelVersion  6.7
+Contact:       Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
+Description:
+               (Write) Set the pattern type of DSB tpdm. Read
+               the pattern type of DSB tpdm.
+
+               Accepts only one of the 2 values -  0 or 1.
+               0 : Set the DSB pattern type to value.
+               1 : Set the DSB pattern type to toggle.
+
+What:          /sys/bus/coresight/devices/<tpdm-name>/dsb_msr/msr[0:31]
+Date:          March 2023
+KernelVersion  6.7
+Contact:       Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
+Description:
+               (RW) Set/Get the MSR(mux select register) for the DSB subunit
+               TPDM.
index e76c3600607f8cc697e0ec2eb6753fffa6eb1647..fff2581b80335891247c5fe1a86cb410ea8a559b 100644 (file)
@@ -28,6 +28,23 @@ Description:
                Payload in the CXL-2.0 specification.
 
 
+What:          /sys/bus/cxl/devices/memX/ram/qos_class
+Date:          May, 2023
+KernelVersion: v6.8
+Contact:       linux-cxl@vger.kernel.org
+Description:
+               (RO) For CXL host platforms that support "QoS Telemmetry"
+               this attribute conveys a comma delimited list of platform
+               specific cookies that identifies a QoS performance class
+               for the volatile partition of the CXL mem device. These
+               class-ids can be compared against a similar "qos_class"
+               published for a root decoder. While it is not required
+               that the endpoints map their local memory-class to a
+               matching platform class, mismatches are not recommended
+               and there are platform specific performance related
+               side-effects that may result. First class-id is displayed.
+
+
 What:          /sys/bus/cxl/devices/memX/pmem/size
 Date:          December, 2020
 KernelVersion: v5.12
@@ -38,6 +55,23 @@ Description:
                Payload in the CXL-2.0 specification.
 
 
+What:          /sys/bus/cxl/devices/memX/pmem/qos_class
+Date:          May, 2023
+KernelVersion: v6.8
+Contact:       linux-cxl@vger.kernel.org
+Description:
+               (RO) For CXL host platforms that support "QoS Telemmetry"
+               this attribute conveys a comma delimited list of platform
+               specific cookies that identifies a QoS performance class
+               for the persistent partition of the CXL mem device. These
+               class-ids can be compared against a similar "qos_class"
+               published for a root decoder. While it is not required
+               that the endpoints map their local memory-class to a
+               matching platform class, mismatches are not recommended
+               and there are platform specific performance related
+               side-effects that may result. First class-id is displayed.
+
+
 What:          /sys/bus/cxl/devices/memX/serial
 Date:          January, 2022
 KernelVersion: v5.18
index e5248fd67a563b489d8c4a65a7b32ddd2605d952..c812ab180ff40cc7f3b3453d266d7425fcdfbc19 100644 (file)
@@ -88,6 +88,21 @@ Description:
                This entry describes the HDRCAP of the master controller
                driving the bus.
 
+What:          /sys/bus/i3c/devices/i3c-<bus-id>/hotjoin
+KernelVersion:  6.8
+Contact:       linux-i3c@vger.kernel.org
+Description:
+               I3C’s Hot-Join mechanism allows an I3C Device to inform the
+               Active Controller that a newly-joined Target is present on the
+               I3C Bus and is ready to receive a Dynamic Address, in order to
+               become fully functional on the Bus. Hot-Join is used when the
+               Target is mounted on the same I3C bus and remains depowered
+               until needed or until the Target is physically inserted into the
+               I3C bus
+
+               This entry allows to enable or disable Hot-join of the Current
+               Controller driving the bus.
+
 What:          /sys/bus/i3c/devices/i3c-<bus-id>/<bus-id>-<device-pid>
 KernelVersion:  5.0
 Contact:       linux-i3c@vger.kernel.org
index 19cde14f38692945a83d3c2699b3c0b43ca895a1..2e6d5ebfd3c73d3c9e394ffb1039b54e73e7544d 100644 (file)
@@ -362,10 +362,21 @@ Description:
 What:          /sys/bus/iio/devices/iio:deviceX/in_accel_x_peak_raw
 What:          /sys/bus/iio/devices/iio:deviceX/in_accel_y_peak_raw
 What:          /sys/bus/iio/devices/iio:deviceX/in_accel_z_peak_raw
+What:          /sys/bus/iio/devices/iio:deviceX/in_humidityrelative_peak_raw
+What:          /sys/bus/iio/devices/iio:deviceX/in_temp_peak_raw
 KernelVersion: 2.6.36
 Contact:       linux-iio@vger.kernel.org
 Description:
-               Highest value since some reset condition.  These
+               Highest value since some reset condition. These
+               attributes allow access to this and are otherwise
+               the direct equivalent of the <type>Y[_name]_raw attributes.
+
+What:          /sys/bus/iio/devices/iio:deviceX/in_humidityrelative_trough_raw
+What:          /sys/bus/iio/devices/iio:deviceX/in_temp_trough_raw
+KernelVersion: 6.7
+Contact:       linux-iio@vger.kernel.org
+Description:
+               Lowest value since some reset condition. These
                attributes allow access to this and are otherwise
                the direct equivalent of the <type>Y[_name]_raw attributes.
 
@@ -618,7 +629,9 @@ KernelVersion:      2.6.35
 Contact:       linux-iio@vger.kernel.org
 Description:
                If a discrete set of scale values is available, they
-               are listed in this attribute.
+               are listed in this attribute. Unlike illumination,
+               multiplying intensity by intensity_scale does not
+               yield value with any standardized unit.
 
 What:          /sys/bus/iio/devices/iio:deviceX/out_voltageY_hardwaregain
 What:          /sys/bus/iio/devices/iio:deviceX/in_intensity_hardwaregain
@@ -1574,6 +1587,8 @@ What:             /sys/.../iio:deviceX/in_intensityY_raw
 What:          /sys/.../iio:deviceX/in_intensityY_ir_raw
 What:          /sys/.../iio:deviceX/in_intensityY_both_raw
 What:          /sys/.../iio:deviceX/in_intensityY_uv_raw
+What:          /sys/.../iio:deviceX/in_intensityY_uva_raw
+What:          /sys/.../iio:deviceX/in_intensityY_uvb_raw
 What:          /sys/.../iio:deviceX/in_intensityY_duv_raw
 KernelVersion: 3.4
 Contact:       linux-iio@vger.kernel.org
@@ -1582,8 +1597,9 @@ Description:
                that measurements contain visible and infrared light
                components or just infrared light, respectively. Modifier
                uv indicates that measurements contain ultraviolet light
-               components. Modifier duv indicates that measurements
-               contain deep ultraviolet light components.
+               components. Modifiers uva, uvb and duv indicate that
+               measurements contain A, B or deep (C) ultraviolet light
+               components respectively.
 
 What:          /sys/.../iio:deviceX/in_uvindex_input
 KernelVersion: 4.6
@@ -2254,3 +2270,21 @@ Description:
                If a label is defined for this event add that to the event
                specific attributes. This is useful for userspace to be able to
                better identify an individual event.
+
+What:          /sys/.../events/in_accel_gesture_tap_wait_timeout
+KernelVersion: 6.7
+Contact:       linux-iio@vger.kernel.org
+Description:
+               Enable tap gesture confirmation with timeout.
+
+What:          /sys/.../events/in_accel_gesture_tap_wait_dur
+KernelVersion: 6.7
+Contact:       linux-iio@vger.kernel.org
+Description:
+               Timeout value in seconds for tap gesture confirmation.
+
+What:          /sys/.../events/in_accel_gesture_tap_wait_dur_available
+KernelVersion: 6.7
+Contact:       linux-iio@vger.kernel.org
+Description:
+               List of available timeout value for tap gesture confirmation.
index f6d9d72ce77b7048177a5ca2123a09a3a2412ed1..a6c307c4befa099e5578e6bc8526255fcf55ca97 100644 (file)
@@ -114,6 +114,45 @@ Description:
                speed of 1000Mbps of the named network device.
                Setting this value also immediately changes the LED state.
 
+What:          /sys/class/leds/<led>/link_2500
+Date:          Nov 2023
+KernelVersion: 6.8
+Contact:       linux-leds@vger.kernel.org
+Description:
+               Signal the link speed state of 2500Mbps of the named network device.
+
+               If set to 0 (default), the LED's normal state is off.
+
+               If set to 1, the LED's normal state reflects the link state
+               speed of 2500Mbps of the named network device.
+               Setting this value also immediately changes the LED state.
+
+What:          /sys/class/leds/<led>/link_5000
+Date:          Nov 2023
+KernelVersion: 6.8
+Contact:       linux-leds@vger.kernel.org
+Description:
+               Signal the link speed state of 5000Mbps of the named network device.
+
+               If set to 0 (default), the LED's normal state is off.
+
+               If set to 1, the LED's normal state reflects the link state
+               speed of 5000Mbps of the named network device.
+               Setting this value also immediately changes the LED state.
+
+What:          /sys/class/leds/<led>/link_10000
+Date:          Nov 2023
+KernelVersion: 6.8
+Contact:       linux-leds@vger.kernel.org
+Description:
+               Signal the link speed state of 10000Mbps of the named network device.
+
+               If set to 0 (default), the LED's normal state is off.
+
+               If set to 1, the LED's normal state reflects the link state
+               speed of 10000Mbps of the named network device.
+               Setting this value also immediately changes the LED state.
+
 What:          /sys/class/leds/<led>/half_duplex
 Date:          Jun 2023
 KernelVersion: 6.5
index 2bf6b24e781b061602b43ef5257fb56f096644d5..30cef9ac0f493a31b16ee03a57d14b0bd86a237f 100644 (file)
@@ -4,3 +4,59 @@ KernelVersion: 5.10
 Contact:       linux-leds@vger.kernel.org
 Description:
                Specifies the tty device name of the triggering tty
+
+What:          /sys/class/leds/<led>/rx
+Date:          February 2024
+KernelVersion: 6.8
+Description:
+               Signal reception (rx) of data on the named tty device.
+               If set to 0, the LED will not blink on reception.
+               If set to 1 (default), the LED will blink on reception.
+
+What:          /sys/class/leds/<led>/tx
+Date:          February 2024
+KernelVersion: 6.8
+Description:
+               Signal transmission (tx) of data on the named tty device.
+               If set to 0, the LED will not blink on transmission.
+               If set to 1 (default), the LED will blink on transmission.
+
+What:          /sys/class/leds/<led>/cts
+Date:          February 2024
+KernelVersion: 6.8
+Description:
+               CTS = Clear To Send
+               DCE is ready to accept data from the DTE.
+               If the line state is detected, the LED is switched on.
+               If set to 0 (default), the LED will not evaluate CTS.
+               If set to 1, the LED will evaluate CTS.
+
+What:          /sys/class/leds/<led>/dsr
+Date:          February 2024
+KernelVersion: 6.8
+Description:
+               DSR = Data Set Ready
+               DCE is ready to receive and send data.
+               If the line state is detected, the LED is switched on.
+               If set to 0 (default), the LED will not evaluate DSR.
+               If set to 1, the LED will evaluate DSR.
+
+What:          /sys/class/leds/<led>/dcd
+Date:          February 2024
+KernelVersion: 6.8
+Description:
+               DCD = Data Carrier Detect
+               DTE is receiving a carrier from the DCE.
+               If the line state is detected, the LED is switched on.
+               If set to 0 (default), the LED will not evaluate CAR (DCD).
+               If set to 1, the LED will evaluate CAR (DCD).
+
+What:          /sys/class/leds/<led>/rng
+Date:          February 2024
+KernelVersion: 6.8
+Description:
+               RNG = Ring Indicator
+               DCE has detected an incoming ring signal on the telephone
+               line. If the line state is detected, the LED is switched on.
+               If set to 0 (default), the LED will not evaluate RNG.
+               If set to 1, the LED will evaluate RNG.
index 906ff3ca928ac1389567a5f02bdc4e06c3980b38..5bff64d256c207c8a7d2c915e0e8affac191913c 100644 (file)
@@ -1,4 +1,4 @@
-What:          /sys/class/<iface>/queues/rx-<queue>/rps_cpus
+What:          /sys/class/net/<iface>/queues/rx-<queue>/rps_cpus
 Date:          March 2010
 KernelVersion: 2.6.35
 Contact:       netdev@vger.kernel.org
@@ -8,7 +8,7 @@ Description:
                network device queue. Possible values depend on the number
                of available CPU(s) in the system.
 
-What:          /sys/class/<iface>/queues/rx-<queue>/rps_flow_cnt
+What:          /sys/class/net/<iface>/queues/rx-<queue>/rps_flow_cnt
 Date:          April 2010
 KernelVersion: 2.6.35
 Contact:       netdev@vger.kernel.org
@@ -16,7 +16,7 @@ Description:
                Number of Receive Packet Steering flows being currently
                processed by this particular network device receive queue.
 
-What:          /sys/class/<iface>/queues/tx-<queue>/tx_timeout
+What:          /sys/class/net/<iface>/queues/tx-<queue>/tx_timeout
 Date:          November 2011
 KernelVersion: 3.3
 Contact:       netdev@vger.kernel.org
@@ -24,7 +24,7 @@ Description:
                Indicates the number of transmit timeout events seen by this
                network interface transmit queue.
 
-What:          /sys/class/<iface>/queues/tx-<queue>/tx_maxrate
+What:          /sys/class/net/<iface>/queues/tx-<queue>/tx_maxrate
 Date:          March 2015
 KernelVersion: 4.1
 Contact:       netdev@vger.kernel.org
@@ -32,7 +32,7 @@ Description:
                A Mbps max-rate set for the queue, a value of zero means disabled,
                default is disabled.
 
-What:          /sys/class/<iface>/queues/tx-<queue>/xps_cpus
+What:          /sys/class/net/<iface>/queues/tx-<queue>/xps_cpus
 Date:          November 2010
 KernelVersion: 2.6.38
 Contact:       netdev@vger.kernel.org
@@ -42,7 +42,7 @@ Description:
                network device transmit queue. Possible values depend on the
                number of available CPU(s) in the system.
 
-What:          /sys/class/<iface>/queues/tx-<queue>/xps_rxqs
+What:          /sys/class/net/<iface>/queues/tx-<queue>/xps_rxqs
 Date:          June 2018
 KernelVersion: 4.18.0
 Contact:       netdev@vger.kernel.org
@@ -53,7 +53,7 @@ Description:
                number of available receive queue(s) in the network device.
                Default is disabled.
 
-What:          /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/hold_time
+What:          /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/hold_time
 Date:          November 2011
 KernelVersion: 3.3
 Contact:       netdev@vger.kernel.org
@@ -62,7 +62,7 @@ Description:
                of this particular network device transmit queue.
                Default value is 1000.
 
-What:          /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/inflight
+What:          /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/inflight
 Date:          November 2011
 KernelVersion: 3.3
 Contact:       netdev@vger.kernel.org
@@ -70,7 +70,7 @@ Description:
                Indicates the number of bytes (objects) in flight on this
                network device transmit queue.
 
-What:          /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/limit
+What:          /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/limit
 Date:          November 2011
 KernelVersion: 3.3
 Contact:       netdev@vger.kernel.org
@@ -79,7 +79,7 @@ Description:
                on this network device transmit queue. This value is clamped
                to be within the bounds defined by limit_max and limit_min.
 
-What:          /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/limit_max
+What:          /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/limit_max
 Date:          November 2011
 KernelVersion: 3.3
 Contact:       netdev@vger.kernel.org
@@ -88,7 +88,7 @@ Description:
                queued on this network device transmit queue. See
                include/linux/dynamic_queue_limits.h for the default value.
 
-What:          /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/limit_min
+What:          /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/limit_min
 Date:          November 2011
 KernelVersion: 3.3
 Contact:       netdev@vger.kernel.org
index 8d7d8f05f6cd0a3d7bb9fa06f0a40730f5bae99b..92fe7c5c5ac1d1d981562d1b441f32a6bafdc1aa 100644 (file)
@@ -1,4 +1,4 @@
-What:          /sys/devices/.../hwmon/hwmon<i>/in0_input
+What:          /sys/bus/pci/drivers/i915/.../hwmon/hwmon<i>/in0_input
 Date:          February 2023
 KernelVersion: 6.2
 Contact:       intel-gfx@lists.freedesktop.org
@@ -6,7 +6,7 @@ Description:    RO. Current Voltage in millivolt.
 
                Only supported for particular Intel i915 graphics platforms.
 
-What:          /sys/devices/.../hwmon/hwmon<i>/power1_max
+What:          /sys/bus/pci/drivers/i915/.../hwmon/hwmon<i>/power1_max
 Date:          February 2023
 KernelVersion: 6.2
 Contact:       intel-gfx@lists.freedesktop.org
@@ -20,7 +20,7 @@ Description:  RW. Card reactive sustained  (PL1/Tau) power limit in microwatts.
 
                Only supported for particular Intel i915 graphics platforms.
 
-What:          /sys/devices/.../hwmon/hwmon<i>/power1_rated_max
+What:          /sys/bus/pci/drivers/i915/.../hwmon/hwmon<i>/power1_rated_max
 Date:          February 2023
 KernelVersion: 6.2
 Contact:       intel-gfx@lists.freedesktop.org
@@ -28,7 +28,7 @@ Description:  RO. Card default power limit (default TDP setting).
 
                Only supported for particular Intel i915 graphics platforms.
 
-What:          /sys/devices/.../hwmon/hwmon<i>/power1_max_interval
+What:          /sys/bus/pci/drivers/i915/.../hwmon/hwmon<i>/power1_max_interval
 Date:          February 2023
 KernelVersion: 6.2
 Contact:       intel-gfx@lists.freedesktop.org
@@ -37,7 +37,7 @@ Description:  RW. Sustained power limit interval (Tau in PL1/Tau) in
 
                Only supported for particular Intel i915 graphics platforms.
 
-What:          /sys/devices/.../hwmon/hwmon<i>/power1_crit
+What:          /sys/bus/pci/drivers/i915/.../hwmon/hwmon<i>/power1_crit
 Date:          February 2023
 KernelVersion: 6.2
 Contact:       intel-gfx@lists.freedesktop.org
@@ -50,7 +50,7 @@ Description:  RW. Card reactive critical (I1) power limit in microwatts.
 
                Only supported for particular Intel i915 graphics platforms.
 
-What:          /sys/devices/.../hwmon/hwmon<i>/curr1_crit
+What:          /sys/bus/pci/drivers/i915/.../hwmon/hwmon<i>/curr1_crit
 Date:          February 2023
 KernelVersion: 6.2
 Contact:       intel-gfx@lists.freedesktop.org
@@ -63,7 +63,7 @@ Description:  RW. Card reactive critical (I1) power limit in milliamperes.
 
                Only supported for particular Intel i915 graphics platforms.
 
-What:          /sys/devices/.../hwmon/hwmon<i>/energy1_input
+What:          /sys/bus/pci/drivers/i915/.../hwmon/hwmon<i>/energy1_input
 Date:          February 2023
 KernelVersion: 6.2
 Contact:       intel-gfx@lists.freedesktop.org
index 8c321bc9dc04401e5b25fb2e4c2e509f0d2eba14..023fd82de3f70a61fb9c58c973690bc0fff38e12 100644 (file)
@@ -1,4 +1,4 @@
-What:          /sys/devices/.../hwmon/hwmon<i>/power1_max
+What:          /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power1_max
 Date:          September 2023
 KernelVersion: 6.5
 Contact:       intel-xe@lists.freedesktop.org
@@ -12,7 +12,7 @@ Description:  RW. Card reactive sustained  (PL1) power limit in microwatts.
 
                Only supported for particular Intel xe graphics platforms.
 
-What:          /sys/devices/.../hwmon/hwmon<i>/power1_rated_max
+What:          /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power1_rated_max
 Date:          September 2023
 KernelVersion: 6.5
 Contact:       intel-xe@lists.freedesktop.org
@@ -20,7 +20,7 @@ Description:  RO. Card default power limit (default TDP setting).
 
                Only supported for particular Intel xe graphics platforms.
 
-What:          /sys/devices/.../hwmon/hwmon<i>/power1_crit
+What:          /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power1_crit
 Date:          September 2023
 KernelVersion: 6.5
 Contact:       intel-xe@lists.freedesktop.org
@@ -33,7 +33,7 @@ Description:  RW. Card reactive critical (I1) power limit in microwatts.
 
                Only supported for particular Intel xe graphics platforms.
 
-What:          /sys/devices/.../hwmon/hwmon<i>/curr1_crit
+What:          /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/curr1_crit
 Date:          September 2023
 KernelVersion: 6.5
 Contact:       intel-xe@lists.freedesktop.org
@@ -44,7 +44,7 @@ Description:  RW. Card reactive critical (I1) power limit in milliamperes.
                the operating frequency if the power averaged over a window
                exceeds this limit.
 
-What:          /sys/devices/.../hwmon/hwmon<i>/in0_input
+What:          /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/in0_input
 Date:          September 2023
 KernelVersion: 6.5
 Contact:       intel-xe@lists.freedesktop.org
@@ -52,7 +52,7 @@ Description:  RO. Current Voltage in millivolt.
 
                Only supported for particular Intel xe graphics platforms.
 
-What:          /sys/devices/.../hwmon/hwmon<i>/energy1_input
+What:          /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/energy1_input
 Date:          September 2023
 KernelVersion: 6.5
 Contact:       intel-xe@lists.freedesktop.org
@@ -60,7 +60,7 @@ Description:  RO. Energy input of device in microjoules.
 
                Only supported for particular Intel xe graphics platforms.
 
-What:          /sys/devices/.../hwmon/hwmon<i>/power1_max_interval
+What:          /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power1_max_interval
 Date:          October 2023
 KernelVersion: 6.6
 Contact:       intel-xe@lists.freedesktop.org
diff --git a/Documentation/ABI/testing/sysfs-firmware-initrd b/Documentation/ABI/testing/sysfs-firmware-initrd
new file mode 100644 (file)
index 0000000..20bf7cf
--- /dev/null
@@ -0,0 +1,8 @@
+What:          /sys/firmware/initrd
+Date:          December 2023
+Contact:       Alexander Graf <graf@amazon.com>
+Description:
+               When the kernel was booted with an initrd and the
+               "retain_initrd" option is set on the kernel command
+               line, /sys/firmware/initrd contains the contents of the
+               initrd that the kernel was booted with.
diff --git a/Documentation/ABI/testing/sysfs-nvmem-cells b/Documentation/ABI/testing/sysfs-nvmem-cells
new file mode 100644 (file)
index 0000000..7af70ad
--- /dev/null
@@ -0,0 +1,21 @@
+What:          /sys/bus/nvmem/devices/.../cells/<cell-name>
+Date:          May 2023
+KernelVersion: 6.5
+Contact:       Miquel Raynal <miquel.raynal@bootlin.com>
+Description:
+               The "cells" folder contains one file per cell exposed by the
+               NVMEM device. The name of the file is: <name>@<where>, with
+               <name> being the cell name and <where> its location in the NVMEM
+               device, in hexadecimal (without the '0x' prefix, to mimic device
+               tree node names). The length of the file is the size of the cell
+               (when known). The content of the file is the binary content of
+               the cell (may sometimes be ASCII, likely without trailing
+               character).
+               Note: This file is only present if CONFIG_NVMEM_SYSFS
+               is enabled.
+
+               Example::
+
+                 hexdump -C /sys/bus/nvmem/devices/1-00563/cells/product-name@d
+                 00000000  54 4e 34 38 4d 2d 50 2d  44 4e         |TN48M-P-DN|
+                 0000000a
index 2288b3665d160a87b352def3e122461256a21761..4d1cc5bdbcc5f9f945dd825aed99cad89982fdd4 100644 (file)
@@ -10,6 +10,7 @@ What:         /sys/devices/platform/silicom-platform/power_cycle
 Date:          November 2023
 KernelVersion: 6.7
 Contact:       Henry Shi <henrys@silicom-usa.com>
+Description:
                This file allow user to power cycle the platform.
                Default value is 0; when set to 1, it powers down
                the platform, waits 5 seconds, then powers on the
index 2ec70121bfca5f265c0377cb0284350913dfa251..931077bb09535d7e992854e92671b3c5fcb39f31 100644 (file)
@@ -61,7 +61,7 @@ Conditions
 ==========
 
 The use of threaded interrupts is the most likely condition to trigger
-this problem today. Threaded interrupts may not be reenabled after the IRQ
+this problem today. Threaded interrupts may not be re-enabled after the IRQ
 handler wakes. These "one shot" conditions mean that the threaded interrupt
 needs to keep the interrupt line masked until the threaded handler has run.
 Especially when dealing with high data rate interrupts, the thread needs to
index c9400f02333bf1be27675eb82a20f7bbb272ca92..783d30b7bb428ac52ea10de79610fe28dfe6bab0 100644 (file)
@@ -236,7 +236,7 @@ including a full 'lspci -v' so we can add the quirks to the kernel.
 Disabling MSIs below a bridge
 -----------------------------
 
-Some PCI bridges are not able to route MSIs between busses properly.
+Some PCI bridges are not able to route MSIs between buses properly.
 In this case, MSIs must be disabled on all devices behind the bridge.
 
 Some bridges allow you to enable MSIs by changing some bits in their
index bd3c58c44befdd3a04811c81e4887bad6f321255..2d42998a89a6378a94521d49785c4f1632b25a34 100644 (file)
@@ -241,15 +241,22 @@ over a rather long period of time, but improvements are always welcome!
        srcu_struct.  The rules for the expedited RCU grace-period-wait
        primitives are the same as for their non-expedited counterparts.
 
-       If the updater uses call_rcu_tasks() or synchronize_rcu_tasks(),
-       then the readers must refrain from executing voluntary
-       context switches, that is, from blocking.  If the updater uses
-       call_rcu_tasks_trace() or synchronize_rcu_tasks_trace(), then
-       the corresponding readers must use rcu_read_lock_trace() and
-       rcu_read_unlock_trace().  If an updater uses call_rcu_tasks_rude()
-       or synchronize_rcu_tasks_rude(), then the corresponding readers
-       must use anything that disables preemption, for example,
-       preempt_disable() and preempt_enable().
+       Similarly, it is necessary to correctly use the RCU Tasks flavors:
+
+       a.      If the updater uses synchronize_rcu_tasks() or
+               call_rcu_tasks(), then the readers must refrain from
+               executing voluntary context switches, that is, from
+               blocking.
+
+       b.      If the updater uses call_rcu_tasks_trace()
+               or synchronize_rcu_tasks_trace(), then the
+               corresponding readers must use rcu_read_lock_trace()
+               and rcu_read_unlock_trace().
+
+       c.      If an updater uses call_rcu_tasks_rude() or
+               synchronize_rcu_tasks_rude(), then the corresponding
+               readers must use anything that disables preemption,
+               for example, preempt_disable() and preempt_enable().
 
        Mixing things up will result in confusion and broken kernels, and
        has even resulted in an exploitable security issue.  Therefore,
index 3b739f6243c85e83a5b5c066ff84a4d577339577..659d5913784d0d9e2d196a04bdfb7fadb57d5eed 100644 (file)
@@ -3,13 +3,26 @@
 PROPER CARE AND FEEDING OF RETURN VALUES FROM rcu_dereference()
 ===============================================================
 
-Most of the time, you can use values from rcu_dereference() or one of
-the similar primitives without worries.  Dereferencing (prefix "*"),
-field selection ("->"), assignment ("="), address-of ("&"), addition and
-subtraction of constants, and casts all work quite naturally and safely.
-
-It is nevertheless possible to get into trouble with other operations.
-Follow these rules to keep your RCU code working properly:
+Proper care and feeding of address and data dependencies is critically
+important to correct use of things like RCU.  To this end, the pointers
+returned from the rcu_dereference() family of primitives carry address and
+data dependencies.  These dependencies extend from the rcu_dereference()
+macro's load of the pointer to the later use of that pointer to compute
+either the address of a later memory access (representing an address
+dependency) or the value written by a later memory access (representing
+a data dependency).
+
+Most of the time, these dependencies are preserved, permitting you to
+freely use values from rcu_dereference().  For example, dereferencing
+(prefix "*"), field selection ("->"), assignment ("="), address-of
+("&"), casts, and addition or subtraction of constants all work quite
+naturally and safely.  However, because current compilers do not take
+either address or data dependencies into account it is still possible
+to get into trouble.
+
+Follow these rules to preserve the address and data dependencies emanating
+from your calls to rcu_dereference() and friends, thus keeping your RCU
+readers working properly:
 
 -      You must use one of the rcu_dereference() family of primitives
        to load an RCU-protected pointer, otherwise CONFIG_PROVE_RCU
index b3b6dfa85248ea2e9619c259a87b16108441d875..49e7beea6ae151aa85c44604a25c0d9ffec6dd92 100644 (file)
@@ -185,7 +185,7 @@ argument.
 Not all changes require that all scenarios be run.  For example, a change
 to Tree SRCU might run only the SRCU-N and SRCU-P scenarios using the
 --configs argument to kvm.sh as follows:  "--configs 'SRCU-N SRCU-P'".
-Large systems can run multiple copies of of the full set of scenarios,
+Large systems can run multiple copies of the full set of scenarios,
 for example, a system with 448 hardware threads can run five instances
 of the full set concurrently.  To make this happen::
 
index 89984dfececf0b0b07a937179808d27b8268cf4b..ae30301366379d067e5cb71b4fcb534bc53c4d40 100644 (file)
@@ -101,8 +101,8 @@ External References
 email threads
 -------------
 
-* `Initial discussion on the New subsystem for acceleration devices <https://lkml.org/lkml/2022/7/31/83>`_ - Oded Gabbay (2022)
-* `patch-set to add the new subsystem <https://lkml.org/lkml/2022/10/22/544>`_ - Oded Gabbay (2022)
+* `Initial discussion on the New subsystem for acceleration devices <https://lore.kernel.org/lkml/CAFCwf11=9qpNAepL7NL+YAV_QO=Wv6pnWPhKHKAepK3fNn+2Dg@mail.gmail.com/>`_ - Oded Gabbay (2022)
+* `patch-set to add the new subsystem <https://lore.kernel.org/lkml/20221022214622.18042-1-ogabbay@kernel.org/>`_ - Oded Gabbay (2022)
 
 Conference talks
 ----------------
index e53d76365aa7076ba3acb9131fb4123ec8de9446..36981c66782320a66bd2bd41b32e87c4521aeb45 100644 (file)
@@ -75,4 +75,4 @@ taking two different snapshots of feedback counters at time T1 and T2.
   delivered_counter_delta = fbc_t2[del] - fbc_t1[del]
   reference_counter_delta = fbc_t2[ref] - fbc_t1[ref]
 
-  delivered_perf = (refernce_perf x delivered_counter_delta) / reference_counter_delta
+  delivered_perf = (reference_perf x delivered_counter_delta) / reference_counter_delta
index 2646ed2e2d3e32751d3eacfc89a87b73bd2d79e4..9a65c670774ee822135c6206edd7a7c1f59c723d 100644 (file)
@@ -2,7 +2,8 @@
 TODO
 ====
 
-Version 2.14 December 21, 2018
+As of 6.7 kernel. See https://wiki.samba.org/index.php/LinuxCIFSKernel
+for list of features added by release
 
 A Partial List of Missing Features
 ==================================
@@ -12,22 +13,22 @@ for visible, important contributions to this module.  Here
 is a partial list of the known problems and missing features:
 
 a) SMB3 (and SMB3.1.1) missing optional features:
+   multichannel performance optimizations, algorithmic channel selection,
+   directory leases optimizations,
+   support for faster packet signing (GMAC),
+   support for compression over the network,
+   T10 copy offload ie "ODX" (copy chunk, and "Duplicate Extents" ioctl
+   are currently the only two server side copy mechanisms supported)
 
-   - multichannel (partially integrated), integration of multichannel with RDMA
-   - directory leases (improved metadata caching). Currently only implemented for root dir
-   - T10 copy offload ie "ODX" (copy chunk, and "Duplicate Extents" ioctl
-     currently the only two server side copy mechanisms supported)
+b) Better optimized compounding and error handling for sparse file support,
+   perhaps addition of new optional SMB3.1.1 fsctls to make collapse range
+   and insert range more atomic
 
-b) improved sparse file support (fiemap and SEEK_HOLE are implemented
-   but additional features would be supportable by the protocol such
-   as FALLOC_FL_COLLAPSE_RANGE and FALLOC_FL_INSERT_RANGE)
-
-c) Directory entry caching relies on a 1 second timer, rather than
-   using Directory Leases, currently only the root file handle is cached longer
-   by leveraging Directory Leases
+c) Support for SMB3.1.1 over QUIC (and perhaps other socket based protocols
+   like SCTP)
 
 d) quota support (needs minor kernel change since quota calls otherwise
-    won't make it to network filesystems or deviceless filesystems).
+   won't make it to network filesystems or deviceless filesystems).
 
 e) Additional use cases can be optimized to use "compounding" (e.g.
    open/query/close and open/setinfo/close) to reduce the number of
@@ -92,23 +93,20 @@ t) split cifs and smb3 support into separate modules so legacy (and less
 
 v) Additional testing of POSIX Extensions for SMB3.1.1
 
-w) Add support for additional strong encryption types, and additional spnego
-   authentication mechanisms (see MS-SMB2).  GCM-256 is now partially implemented.
+w) Support for the Mac SMB3.1.1 extensions to improve interop with Apple servers
+
+x) Support for additional authentication options (e.g. IAKERB, peer-to-peer
+   Kerberos, SCRAM and others supported by existing servers)
 
-x) Finish support for SMB3.1.1 compression
+y) Improved tracing, more eBPF trace points, better scripts for performance
+   analysis
 
 Known Bugs
 ==========
 
 See https://bugzilla.samba.org - search on product "CifsVFS" for
 current bug list.  Also check http://bugzilla.kernel.org (Product = File System, Component = CIFS)
-
-1) existing symbolic links (Windows reparse points) are recognized but
-   can not be created remotely. They are implemented for Samba and those that
-   support the CIFS Unix extensions, although earlier versions of Samba
-   overly restrict the pathnames.
-2) follow_link and readdir code does not follow dfs junctions
-   but recognizes them
+and xfstest results e.g. https://wiki.samba.org/index.php/Xfstest-results-smb3
 
 Misc testing to do
 ==================
index 5f936b4b601881d3e031b71a1cc258c5ddb9585e..aa8290a29dc88b0efdf56500ea40b1a97ddccc9c 100644 (file)
@@ -81,7 +81,7 @@ much older and less secure than the default dialect SMB3 which includes
 many advanced security features such as downgrade attack detection
 and encrypted shares and stronger signing and authentication algorithms.
 There are additional mount options that may be helpful for SMB3 to get
-improved POSIX behavior (NB: can use vers=3.0 to force only SMB3, never 2.1):
+improved POSIX behavior (NB: can use vers=3 to force SMB3 or later, never 2.1):
 
    ``mfsymlinks`` and either ``cifsacl`` or ``modefromsid`` (usually with ``idsfromsid``)
 
@@ -715,6 +715,7 @@ DebugData           Displays information about active CIFS sessions and
 Stats                  Lists summary resource usage information as well as per
                        share statistics.
 open_files             List all the open file handles on all active SMB sessions.
+mount_params            List of all mount parameters available for the module
 ======================= =======================================================
 
 Configuration pseudo-files:
@@ -864,6 +865,11 @@ i.e.::
 
     echo "value" > /sys/module/cifs/parameters/<param>
 
+More detailed descriptions of the available module parameters and their values
+can be seen by doing:
+
+    modinfo cifs (or modinfo smb3)
+
 ================= ==========================================================
 1. enable_oplocks Enable or disable oplocks. Oplocks are enabled by default.
                  [Y/y/1]. To disable use any of [N/n/0].
index 8390549235304f07ff29a283fa1f89a73d574727..94c98be1329a42def5a81307df77774f48645faf 100644 (file)
                    ...
                 185 = /dev/ttyNX15             Hilscher netX serial port 15
                 186 = /dev/ttyJ0               JTAG1 DCC protocol based serial port emulation
+
+                If maximum number of uartlite serial ports is more than 4, then the driver
+                uses dynamic allocation instead of static allocation for major number.
                 187 = /dev/ttyUL0              Xilinx uartlite - port 0
                    ...
                 190 = /dev/ttyUL3              Xilinx uartlite - port 3
index 8c167082a84f9e889724f66d2d74e22cb6e40f83..7651eca38227d0915fa77673877530eb25eb23f5 100644 (file)
@@ -1,3 +1,3 @@
 .. SPDX-License-Identifier: GPL-2.0
 
-.. kernel-feat:: $srctree/Documentation/features
+.. kernel-feat:: features
index d494601717f1f6fe1b8dba6261927a26247febfe..bfc39f1cf470e646785092bd17a6821ca3091d98 100644 (file)
@@ -14,10 +14,9 @@ into that core.
 
 To make the most effective use of these mechanisms, you
 should download the support software as well.  Download the
-latest version of the "rng-tools" package from the
-hw_random driver's official Web site:
+latest version of the "rng-tools" package from:
 
-       http://sourceforge.net/projects/gkernel/
+       https://github.com/nhorman/rng-tools
 
 Those tools use /dev/hwrng to fill the kernel entropy pool,
 which is used internally and exported by the /dev/urandom and
index 102937bc8443a23d88b952b4d7278e5e6cd25c21..4410384596a90b0ab26b4cf43bac54aaf78193fe 100644 (file)
@@ -218,8 +218,3 @@ bytes respectively. Such letter suffixes can also be entirely omitted:
 
 .. include:: kernel-parameters.txt
    :literal:
-
-Todo
-----
-
-       Add more DRM drivers.
index 505af40e97bc07ab0d6b7ed4d40989f10823a5ec..31b3a25680d08cfac3603d58b3d3783bbf1e34bb 100644 (file)
                        memory region [offset, offset + size] for that kernel
                        image. If '@offset' is omitted, then a suitable offset
                        is selected automatically.
-                       [KNL, X86-64, ARM64, RISCV] Select a region under 4G first, and
-                       fall back to reserve region above 4G when '@offset'
-                       hasn't been specified.
+                       [KNL, X86-64, ARM64, RISCV, LoongArch] Select a region
+                       under 4G first, and fall back to reserve region above
+                       4G when '@offset' hasn't been specified.
                        See Documentation/admin-guide/kdump/kdump.rst for further details.
 
        crashkernel=range1:size1[,range2:size2,...][@offset]
                        Documentation/admin-guide/kdump/kdump.rst for an example.
 
        crashkernel=size[KMG],high
-                       [KNL, X86-64, ARM64, RISCV] range could be above 4G.
+                       [KNL, X86-64, ARM64, RISCV, LoongArch] range could be
+                       above 4G.
                        Allow kernel to allocate physical memory region from top,
                        so could be above 4G if system have more than 4G ram
                        installed. Otherwise memory region will be allocated
                        below 4G, if available.
                        It will be ignored if crashkernel=X is specified.
        crashkernel=size[KMG],low
-                       [KNL, X86-64, ARM64, RISCV] range under 4G. When crashkernel=X,high
-                       is passed, kernel could allocate physical memory region
-                       above 4G, that cause second kernel crash on system
-                       that require some amount of low memory, e.g. swiotlb
-                       requires at least 64M+32K low memory, also enough extra
-                       low memory is needed to make sure DMA buffers for 32-bit
-                       devices won't run out. Kernel would try to allocate
+                       [KNL, X86-64, ARM64, RISCV, LoongArch] range under 4G.
+                       When crashkernel=X,high is passed, kernel could allocate
+                       physical memory region above 4G, that cause second kernel
+                       crash on system that require some amount of low memory,
+                       e.g. swiotlb requires at least 64M+32K low memory, also
+                       enough extra low memory is needed to make sure DMA buffers
+                       for 32-bit devices won't run out. Kernel would try to allocate
                        default size of memory below 4G automatically. The default
                        size is platform dependent.
                          --> x86: max(swiotlb_size_or_default() + 8MiB, 256MiB)
                          --> arm64: 128MiB
                          --> riscv: 128MiB
+                         --> loongarch: 128MiB
                        This one lets the user specify own low range under 4G
                        for second kernel instead.
                        0: to disable low allocation.
                        between unregistering the boot console and initializing
                        the real console.
 
-       keepinitrd      [HW,ARM]
+       keepinitrd      [HW,ARM] See retain_initrd.
 
        kernelcore=     [KNL,X86,IA-64,PPC]
                        Format: nn[KMGTPE] | nn% | "mirror"
                        vulnerability. System may allow data leaks with this
                        option.
 
-       no-steal-acc    [X86,PV_OPS,ARM64,PPC/PSERIES] Disable paravirtualized
-                       steal time accounting. steal time is computed, but
-                       won't influence scheduler behaviour
+       no-steal-acc    [X86,PV_OPS,ARM64,PPC/PSERIES,RISCV] Disable
+                       paravirtualized steal time accounting. steal time is
+                       computed, but won't influence scheduler behaviour
 
        nosync          [HW,M68K] Disables sync negotiation for all devices.
 
                        Dump ftrace buffer after reporting RCU CPU
                        stall warning.
 
+       rcupdate.rcu_cpu_stall_notifiers= [KNL]
+                       Provide RCU CPU stall notifiers, but see the
+                       warnings in the RCU_CPU_STALL_NOTIFIER Kconfig
+                       option's help text.  TL;DR:  You almost certainly
+                       do not want rcupdate.rcu_cpu_stall_notifiers.
+
        rcupdate.rcu_cpu_stall_suppress= [KNL]
                        Suppress RCU CPU stall warning messages.
 
                        Useful for devices that are detected asynchronously
                        (e.g. USB and MMC devices).
 
-       retain_initrd   [RAM] Keep initrd memory after extraction
+       retain_initrd   [RAM] Keep initrd memory after extraction. After boot, it will
+                       be accessible via /sys/firmware/initrd.
 
        retbleed=       [X86] Control mitigation of RETBleed (Arbitrary
                        Speculative Code Execution with Return Instructions)
                                        pause after every control message);
                                o = USB_QUIRK_HUB_SLOW_RESET (Hub needs extra
                                        delay after resetting its port);
+                               p = USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT
+                                       (Reduce timeout of the SET_ADDRESS
+                                       request from 5000 ms to 500 ms);
                        Example: quirks=0781:5580:bk,0a5c:5834:gij
 
        usbhid.mousepoll=
index 993c2a05f5eeab65f9e3d3a5464ac26513452472..b6aeae3327ceb537b78fdbd86961ae670614395b 100644 (file)
@@ -243,13 +243,9 @@ To reduce its OS jitter, do any of the following:
 3.     Do any of the following needed to avoid jitter that your
        application cannot tolerate:
 
-       a.      Build your kernel with CONFIG_SLUB=y rather than
-               CONFIG_SLAB=y, thus avoiding the slab allocator's periodic
-               use of each CPU's workqueues to run its cache_reap()
-               function.
-       b.      Avoid using oprofile, thus avoiding OS jitter from
+       a.      Avoid using oprofile, thus avoiding OS jitter from
                wq_sync_buffer().
-       c.      Limit your CPU frequency so that a CPU-frequency
+       b.      Limit your CPU frequency so that a CPU-frequency
                governor is not required, possibly enlisting the aid of
                special heatsinks or other cooling technologies.  If done
                correctly, and if you CPU architecture permits, you should
@@ -259,7 +255,7 @@ To reduce its OS jitter, do any of the following:
 
                WARNING:  Please check your CPU specifications to
                make sure that this is safe on your particular system.
-       d.      As of v3.18, Christoph Lameter's on-demand vmstat workers
+       c.      As of v3.18, Christoph Lameter's on-demand vmstat workers
                commit prevents OS jitter due to vmstat_update() on
                CONFIG_SMP=y systems.  Before v3.18, is not possible
                to entirely get rid of the OS jitter, but you can
@@ -274,7 +270,7 @@ To reduce its OS jitter, do any of the following:
                (based on an earlier one from Gilad Ben-Yossef) that
                reduces or even eliminates vmstat overhead for some
                workloads at https://lore.kernel.org/r/00000140e9dfd6bd-40db3d4f-c1be-434f-8132-7820f81bb586-000000@email.amazonses.com.
-       e.      If running on high-end powerpc servers, build with
+       d.      If running on high-end powerpc servers, build with
                CONFIG_PPC_RTAS_DAEMON=n.  This prevents the RTAS
                daemon from running on each CPU every second or so.
                (This will require editing Kconfig files and will defeat
@@ -282,12 +278,12 @@ To reduce its OS jitter, do any of the following:
                due to the rtas_event_scan() function.
                WARNING:  Please check your CPU specifications to
                make sure that this is safe on your particular system.
-       f.      If running on Cell Processor, build your kernel with
+       e.      If running on Cell Processor, build your kernel with
                CBE_CPUFREQ_SPU_GOVERNOR=n to avoid OS jitter from
                spu_gov_work().
                WARNING:  Please check your CPU specifications to
                make sure that this is safe on your particular system.
-       g.      If running on PowerMAC, build your kernel with
+       f.      If running on PowerMAC, build your kernel with
                CONFIG_PMAC_RACKMETER=n to disable the CPU-meter,
                avoiding OS jitter from rackmeter_do_timer().
 
index 1cf40f69278cd2a1444878803207e16a574ac774..9eb26014d34b6cda600b557102e25a99447ac073 100644 (file)
@@ -361,7 +361,7 @@ Global Attributes
 
 ``amd-pstate`` exposes several global attributes (files) in ``sysfs`` to
 control its functionality at the system level.  They are located in the
-``/sys/devices/system/cpu/amd-pstate/`` directory and affect all CPUs.
+``/sys/devices/system/cpu/amd_pstate/`` directory and affect all CPUs.
 
 ``status``
        Operation mode of the driver: "active", "passive" or "disable".
index 51906e47327b62e1f7e1eabfb69ddce055d82ee7..2f2e5bd440f9b340f1ffb910996b592ef79f645e 100644 (file)
@@ -75,10 +75,19 @@ On other
        submit a patch to be included in this section.
 
 On all
-       Write a character to /proc/sysrq-trigger.  e.g.::
+       Write a single character to /proc/sysrq-trigger.
+       Only the first character is processed, the rest of the string is
+       ignored. However, it is not recommended to write any extra characters
+       as the behavior is undefined and might change in the future versions.
+       E.g.::
 
                echo t > /proc/sysrq-trigger
 
+       Alternatively, write multiple characters prepended by underscore.
+       This way, all characters will be processed. E.g.::
+
+               echo _reisub > /proc/sysrq-trigger
+
 The :kbd:`<command key>` is case sensitive.
 
 What are the 'command' keys?
index b793583d688a460d82967faab3336ed962cc44d9..49ff446ff744ccddf8f4c1fb6717b7878b673ca3 100644 (file)
@@ -1,3 +1,3 @@
 .. SPDX-License-Identifier: GPL-2.0
 
-.. kernel-feat:: $srctree/Documentation/features arc
+.. kernel-feat:: features arc
index 7414ec03dd157c7ab6fc677f5a0af6fb93a88ec3..0e76aaf68ecab2233df316602f0da9e47608c73c 100644 (file)
@@ -1,3 +1,3 @@
 .. SPDX-License-Identifier: GPL-2.0
 
-.. kernel-feat:: $srctree/Documentation/features arm
+.. kernel-feat:: features arm
index dfa4cb3cd3efa570755ffd428e89e413d09b8b84..03321f4309d0be8acc1c8062626ec0d520ad5398 100644 (file)
@@ -1,3 +1,3 @@
 .. SPDX-License-Identifier: GPL-2.0
 
-.. kernel-feat:: $srctree/Documentation/features arm64
+.. kernel-feat:: features arm64
index f47f63bcf67c91b5b401ff25019aeb170e12a95f..e8c2ce1f9df68df5976b7cc536d3f48c0501ba4b 100644 (file)
@@ -71,6 +71,8 @@ stable kernels.
 +----------------+-----------------+-----------------+-----------------------------+
 | ARM            | Cortex-A510     | #2658417        | ARM64_ERRATUM_2658417       |
 +----------------+-----------------+-----------------+-----------------------------+
+| ARM            | Cortex-A510     | #3117295        | ARM64_ERRATUM_3117295       |
++----------------+-----------------+-----------------+-----------------------------+
 | ARM            | Cortex-A520     | #2966298        | ARM64_ERRATUM_2966298       |
 +----------------+-----------------+-----------------+-----------------------------+
 | ARM            | Cortex-A53      | #826319         | ARM64_ERRATUM_826319        |
@@ -117,6 +119,10 @@ stable kernels.
 +----------------+-----------------+-----------------+-----------------------------+
 | ARM            | Cortex-A76      | #1463225        | ARM64_ERRATUM_1463225       |
 +----------------+-----------------+-----------------+-----------------------------+
+| ARM            | Cortex-A76      | #1490853        | N/A                         |
++----------------+-----------------+-----------------+-----------------------------+
+| ARM            | Cortex-A77      | #1491015        | N/A                         |
++----------------+-----------------+-----------------+-----------------------------+
 | ARM            | Cortex-A77      | #1508412        | ARM64_ERRATUM_1508412       |
 +----------------+-----------------+-----------------+-----------------------------+
 | ARM            | Cortex-A710     | #2119858        | ARM64_ERRATUM_2119858       |
@@ -127,6 +133,8 @@ stable kernels.
 +----------------+-----------------+-----------------+-----------------------------+
 | ARM            | Cortex-A715     | #2645198        | ARM64_ERRATUM_2645198       |
 +----------------+-----------------+-----------------+-----------------------------+
+| ARM            | Cortex-X1       | #1502854        | N/A                         |
++----------------+-----------------+-----------------+-----------------------------+
 | ARM            | Cortex-X2       | #2119858        | ARM64_ERRATUM_2119858       |
 +----------------+-----------------+-----------------+-----------------------------+
 | ARM            | Cortex-X2       | #2224489        | ARM64_ERRATUM_2224489       |
@@ -135,6 +143,8 @@ stable kernels.
 +----------------+-----------------+-----------------+-----------------------------+
 | ARM            | Neoverse-N1     | #1349291        | N/A                         |
 +----------------+-----------------+-----------------+-----------------------------+
+| ARM            | Neoverse-N1     | #1490853        | N/A                         |
++----------------+-----------------+-----------------+-----------------------------+
 | ARM            | Neoverse-N1     | #1542419        | ARM64_ERRATUM_1542419       |
 +----------------+-----------------+-----------------+-----------------------------+
 | ARM            | Neoverse-N2     | #2139208        | ARM64_ERRATUM_2139208       |
@@ -143,6 +153,8 @@ stable kernels.
 +----------------+-----------------+-----------------+-----------------------------+
 | ARM            | Neoverse-N2     | #2253138        | ARM64_ERRATUM_2253138       |
 +----------------+-----------------+-----------------+-----------------------------+
+| ARM            | Neoverse-V1     | #1619801        | N/A                         |
++----------------+-----------------+-----------------+-----------------------------+
 | ARM            | MMU-500         | #841119,826419  | N/A                         |
 +----------------+-----------------+-----------------+-----------------------------+
 | ARM            | MMU-600         | #1076982,1209401| N/A                         |
@@ -225,11 +237,9 @@ stable kernels.
 +----------------+-----------------+-----------------+-----------------------------+
 | Rockchip       | RK3588          | #3588001        | ROCKCHIP_ERRATUM_3588001    |
 +----------------+-----------------+-----------------+-----------------------------+
-
 +----------------+-----------------+-----------------+-----------------------------+
 | Fujitsu        | A64FX           | E#010001        | FUJITSU_ERRATUM_010001      |
 +----------------+-----------------+-----------------+-----------------------------+
-
 +----------------+-----------------+-----------------+-----------------------------+
 | ASR            | ASR8601         | #8601001        | N/A                         |
 +----------------+-----------------+-----------------+-----------------------------+
index ebacade3ea454e2daf54ef0957ba1b49b4501412..009f44c7951f8a7fad377e3aa43e59f3c9a3a240 100644 (file)
@@ -1,3 +1,3 @@
 .. SPDX-License-Identifier: GPL-2.0
 
-.. kernel-feat:: $srctree/Documentation/features loongarch
+.. kernel-feat:: features loongarch
index 5107a21194724ec3c756d5f68689013b80ed6b33..de7f0ccf7fc8edf4be22f4192832dd82c8b7069b 100644 (file)
@@ -1,3 +1,3 @@
 .. SPDX-License-Identifier: GPL-2.0
 
-.. kernel-feat:: $srctree/Documentation/features m68k
+.. kernel-feat:: features m68k
index 1973d729b29a98756de8c0192224af913f2ff5c6..6e0ffe3e73540041f670d4b8f8f724036b63936e 100644 (file)
@@ -1,3 +1,3 @@
 .. SPDX-License-Identifier: GPL-2.0
 
-.. kernel-feat:: $srctree/Documentation/features mips
+.. kernel-feat:: features mips
index 8449e63f69b2b4f0380b08891a113f6695c644cf..89913810ccb5a0c4978c38141275be35ec40a08d 100644 (file)
@@ -1,3 +1,3 @@
 .. SPDX-License-Identifier: GPL-2.0
 
-.. kernel-feat:: $srctree/Documentation/features nios2
+.. kernel-feat:: features nios2
index 3f7c40d219f2cc3e278eb9b77322a29fa82e4aea..bae2e25adfd642b6b98dbad6a6b1d0ba550c6ba7 100644 (file)
@@ -1,3 +1,3 @@
 .. SPDX-License-Identifier: GPL-2.0
 
-.. kernel-feat:: $srctree/Documentation/features openrisc
+.. kernel-feat:: features openrisc
index 501d7c450037904bcd328274ec306287037e4964..b3aa4d243b9362584c60a9d9e23727c4df4a4cf8 100644 (file)
@@ -1,3 +1,3 @@
 .. SPDX-License-Identifier: GPL-2.0
 
-.. kernel-feat:: $srctree/Documentation/features parisc
+.. kernel-feat:: features parisc
index aeae73df86b0c58e02458c2f6f98cc47dbc73fb7..ee4b95e04202d31feaf0a81619682ee64c6dcc0f 100644 (file)
@@ -1,3 +1,3 @@
 .. SPDX-License-Identifier: GPL-2.0
 
-.. kernel-feat:: $srctree/Documentation/features powerpc
+.. kernel-feat:: features powerpc
index c70ef6ac2368c9a49ae22012145bdfd0f443288d..36e90144adabd18b2dce8f25ae9e5887dd66bd37 100644 (file)
@@ -1,3 +1,3 @@
 .. SPDX-License-Identifier: GPL-2.0
 
-.. kernel-feat:: $srctree/Documentation/features riscv
+.. kernel-feat:: features riscv
index 7b2384de471f8fa5813271a010f8f5f011e7d8c3..b2bcc9eed9aa9d5e004b562a1741e6b387a82050 100644 (file)
@@ -12,7 +12,7 @@ is defined in <asm/hwprobe.h>::
     };
 
     long sys_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
-                           size_t cpu_count, cpu_set_t *cpus,
+                           size_t cpusetsize, cpu_set_t *cpus,
                            unsigned int flags);
 
 The arguments are split into three groups: an array of key-value pairs, a CPU
@@ -20,12 +20,26 @@ set, and some flags. The key-value pairs are supplied with a count. Userspace
 must prepopulate the key field for each element, and the kernel will fill in the
 value if the key is recognized. If a key is unknown to the kernel, its key field
 will be cleared to -1, and its value set to 0. The CPU set is defined by
-CPU_SET(3). For value-like keys (eg. vendor/arch/impl), the returned value will
-be only be valid if all CPUs in the given set have the same value. Otherwise -1
-will be returned. For boolean-like keys, the value returned will be a logical
-AND of the values for the specified CPUs. Usermode can supply NULL for cpus and
-0 for cpu_count as a shortcut for all online CPUs. There are currently no flags,
-this value must be zero for future compatibility.
+CPU_SET(3) with size ``cpusetsize`` bytes. For value-like keys (eg. vendor,
+arch, impl), the returned value will only be valid if all CPUs in the given set
+have the same value. Otherwise -1 will be returned. For boolean-like keys, the
+value returned will be a logical AND of the values for the specified CPUs.
+Usermode can supply NULL for ``cpus`` and 0 for ``cpusetsize`` as a shortcut for
+all online CPUs. The currently supported flags are:
+
+* :c:macro:`RISCV_HWPROBE_WHICH_CPUS`: This flag basically reverses the behavior
+  of sys_riscv_hwprobe().  Instead of populating the values of keys for a given
+  set of CPUs, the values of each key are given and the set of CPUs is reduced
+  by sys_riscv_hwprobe() to only those which match each of the key-value pairs.
+  How matching is done depends on the key type.  For value-like keys, matching
+  means to be the exact same as the value.  For boolean-like keys, matching
+  means the result of a logical AND of the pair's value with the CPU's value is
+  exactly the same as the pair's value.  Additionally, when ``cpus`` is an empty
+  set, then it is initialized to all online CPUs which fit within it, i.e. the
+  CPU set returned is the reduction of all the online CPUs which can be
+  represented with a CPU set of size ``cpusetsize``.
+
+All other flags are reserved for future compatibility and must be zero.
 
 On success 0 is returned, on failure a negative error code is returned.
 
@@ -80,6 +94,100 @@ The following keys are defined:
   * :c:macro:`RISCV_HWPROBE_EXT_ZICBOZ`: The Zicboz extension is supported, as
        ratified in commit 3dd606f ("Create cmobase-v1.0.pdf") of riscv-CMOs.
 
+  * :c:macro:`RISCV_HWPROBE_EXT_ZBC` The Zbc extension is supported, as defined
+       in version 1.0 of the Bit-Manipulation ISA extensions.
+
+  * :c:macro:`RISCV_HWPROBE_EXT_ZBKB` The Zbkb extension is supported, as
+       defined in version 1.0 of the Scalar Crypto ISA extensions.
+
+  * :c:macro:`RISCV_HWPROBE_EXT_ZBKC` The Zbkc extension is supported, as
+       defined in version 1.0 of the Scalar Crypto ISA extensions.
+
+  * :c:macro:`RISCV_HWPROBE_EXT_ZBKX` The Zbkx extension is supported, as
+       defined in version 1.0 of the Scalar Crypto ISA extensions.
+
+  * :c:macro:`RISCV_HWPROBE_EXT_ZKND` The Zknd extension is supported, as
+       defined in version 1.0 of the Scalar Crypto ISA extensions.
+
+  * :c:macro:`RISCV_HWPROBE_EXT_ZKNE` The Zkne extension is supported, as
+       defined in version 1.0 of the Scalar Crypto ISA extensions.
+
+  * :c:macro:`RISCV_HWPROBE_EXT_ZKNH` The Zknh extension is supported, as
+       defined in version 1.0 of the Scalar Crypto ISA extensions.
+
+  * :c:macro:`RISCV_HWPROBE_EXT_ZKSED` The Zksed extension is supported, as
+       defined in version 1.0 of the Scalar Crypto ISA extensions.
+
+  * :c:macro:`RISCV_HWPROBE_EXT_ZKSH` The Zksh extension is supported, as
+       defined in version 1.0 of the Scalar Crypto ISA extensions.
+
+  * :c:macro:`RISCV_HWPROBE_EXT_ZKT` The Zkt extension is supported, as defined
+       in version 1.0 of the Scalar Crypto ISA extensions.
+
+  * :c:macro:`RISCV_HWPROBE_EXT_ZVBB`: The Zvbb extension is supported as
+       defined in version 1.0 of the RISC-V Cryptography Extensions Volume II.
+
+  * :c:macro:`RISCV_HWPROBE_EXT_ZVBC`: The Zvbc extension is supported as
+       defined in version 1.0 of the RISC-V Cryptography Extensions Volume II.
+
+  * :c:macro:`RISCV_HWPROBE_EXT_ZVKB`: The Zvkb extension is supported as
+       defined in version 1.0 of the RISC-V Cryptography Extensions Volume II.
+
+  * :c:macro:`RISCV_HWPROBE_EXT_ZVKG`: The Zvkg extension is supported as
+       defined in version 1.0 of the RISC-V Cryptography Extensions Volume II.
+
+  * :c:macro:`RISCV_HWPROBE_EXT_ZVKNED`: The Zvkned extension is supported as
+       defined in version 1.0 of the RISC-V Cryptography Extensions Volume II.
+
+  * :c:macro:`RISCV_HWPROBE_EXT_ZVKNHA`: The Zvknha extension is supported as
+       defined in version 1.0 of the RISC-V Cryptography Extensions Volume II.
+
+  * :c:macro:`RISCV_HWPROBE_EXT_ZVKNHB`: The Zvknhb extension is supported as
+       defined in version 1.0 of the RISC-V Cryptography Extensions Volume II.
+
+  * :c:macro:`RISCV_HWPROBE_EXT_ZVKSED`: The Zvksed extension is supported as
+       defined in version 1.0 of the RISC-V Cryptography Extensions Volume II.
+
+  * :c:macro:`RISCV_HWPROBE_EXT_ZVKSH`: The Zvksh extension is supported as
+       defined in version 1.0 of the RISC-V Cryptography Extensions Volume II.
+
+  * :c:macro:`RISCV_HWPROBE_EXT_ZVKT`: The Zvkt extension is supported as
+       defined in version 1.0 of the RISC-V Cryptography Extensions Volume II.
+
+  * :c:macro:`RISCV_HWPROBE_EXT_ZFH`: The Zfh extension version 1.0 is supported
+       as defined in the RISC-V ISA manual.
+
+  * :c:macro:`RISCV_HWPROBE_EXT_ZFHMIN`: The Zfhmin extension version 1.0 is
+       supported as defined in the RISC-V ISA manual.
+
+  * :c:macro:`RISCV_HWPROBE_EXT_ZIHINTNTL`: The Zihintntl extension version 1.0
+       is supported as defined in the RISC-V ISA manual.
+
+  * :c:macro:`RISCV_HWPROBE_EXT_ZVFH`: The Zvfh extension is supported as
+       defined in the RISC-V Vector manual starting from commit e2ccd0548d6c
+       ("Remove draft warnings from Zvfh[min]").
+
+  * :c:macro:`RISCV_HWPROBE_EXT_ZVFHMIN`: The Zvfhmin extension is supported as
+       defined in the RISC-V Vector manual starting from commit e2ccd0548d6c
+       ("Remove draft warnings from Zvfh[min]").
+
+  * :c:macro:`RISCV_HWPROBE_EXT_ZFA`: The Zfa extension is supported as
+       defined in the RISC-V ISA manual starting from commit 056b6ff467c7
+       ("Zfa is ratified").
+
+  * :c:macro:`RISCV_HWPROBE_EXT_ZTSO`: The Ztso extension is supported as
+       defined in the RISC-V ISA manual starting from commit 5618fb5a216b
+       ("Ztso is now ratified.")
+
+  * :c:macro:`RISCV_HWPROBE_EXT_ZACAS`: The Zacas extension is supported as
+       defined in the Atomic Compare-and-Swap (CAS) instructions manual starting
+       from commit 5059e0ca641c ("update to ratified").
+
+  * :c:macro:`RISCV_HWPROBE_EXT_ZICOND`: The Zicond extension is supported as
+       defined in the RISC-V Integer Conditional (Zicond) operations extension
+       manual starting from commit 95cf1f9 ("Add changes requested by Ved
+       during signoff")
+
 * :c:macro:`RISCV_HWPROBE_KEY_CPUPERF_0`: A bitmask that contains performance
   information about the selected set of processors.
 
index 57c296a9d8f30d53d89be42c0f156852659bafea..2883dc95068173b19be3e3bbbe65f4da428255a0 100644 (file)
@@ -1,3 +1,3 @@
 .. SPDX-License-Identifier: GPL-2.0
 
-.. kernel-feat:: $srctree/Documentation/features s390
+.. kernel-feat:: features s390
index f722af3b6c9934b8fce94f306f083947892088b8..fae48fe81e9bd013bc63e92ac586481a3ab936c6 100644 (file)
@@ -1,3 +1,3 @@
 .. SPDX-License-Identifier: GPL-2.0
 
-.. kernel-feat:: $srctree/Documentation/features sh
+.. kernel-feat:: features sh
index c0c92468b0fe909ee1bb01c841531c06c0fead88..96835b6d598a1a8610d9518542d03c0314922f6c 100644 (file)
@@ -1,3 +1,3 @@
 .. SPDX-License-Identifier: GPL-2.0
 
-.. kernel-feat:: $srctree/Documentation/features sparc
+.. kernel-feat:: features sparc
index b663f15053ce85b2f3dcb31967e6a67aa5089265..a33616346a388cd7ea602f70f788506913c805e4 100644 (file)
@@ -1,3 +1,3 @@
 .. SPDX-License-Identifier: GPL-2.0
 
-.. kernel-feat:: $srctree/Documentation/features x86
+.. kernel-feat:: features x86
index dc8d9fd2c3f76cbb12b85229cba2101671d56cf4..719043cd8b46999301d32e77060b8340923a476d 100644 (file)
@@ -10,6 +10,191 @@ encrypting the guest memory. In TDX, a special module running in a special
 mode sits between the host and the guest and manages the guest/host
 separation.
 
+TDX Host Kernel Support
+=======================
+
+TDX introduces a new CPU mode called Secure Arbitration Mode (SEAM) and
+a new isolated range pointed by the SEAM Ranger Register (SEAMRR).  A
+CPU-attested software module called 'the TDX module' runs inside the new
+isolated range to provide the functionalities to manage and run protected
+VMs.
+
+TDX also leverages Intel Multi-Key Total Memory Encryption (MKTME) to
+provide crypto-protection to the VMs.  TDX reserves part of MKTME KeyIDs
+as TDX private KeyIDs, which are only accessible within the SEAM mode.
+BIOS is responsible for partitioning legacy MKTME KeyIDs and TDX KeyIDs.
+
+Before the TDX module can be used to create and run protected VMs, it
+must be loaded into the isolated range and properly initialized.  The TDX
+architecture doesn't require the BIOS to load the TDX module, but the
+kernel assumes it is loaded by the BIOS.
+
+TDX boot-time detection
+-----------------------
+
+The kernel detects TDX by detecting TDX private KeyIDs during kernel
+boot.  Below dmesg shows when TDX is enabled by BIOS::
+
+  [..] virt/tdx: BIOS enabled: private KeyID range: [16, 64)
+
+TDX module initialization
+---------------------------------------
+
+The kernel talks to the TDX module via the new SEAMCALL instruction.  The
+TDX module implements SEAMCALL leaf functions to allow the kernel to
+initialize it.
+
+If the TDX module isn't loaded, the SEAMCALL instruction fails with a
+special error.  In this case the kernel fails the module initialization
+and reports the module isn't loaded::
+
+  [..] virt/tdx: module not loaded
+
+Initializing the TDX module consumes roughly ~1/256th system RAM size to
+use it as 'metadata' for the TDX memory.  It also takes additional CPU
+time to initialize those metadata along with the TDX module itself.  Both
+are not trivial.  The kernel initializes the TDX module at runtime on
+demand.
+
+Besides initializing the TDX module, a per-cpu initialization SEAMCALL
+must be done on one cpu before any other SEAMCALLs can be made on that
+cpu.
+
+The kernel provides two functions, tdx_enable() and tdx_cpu_enable() to
+allow the user of TDX to enable the TDX module and enable TDX on local
+cpu respectively.
+
+Making SEAMCALL requires VMXON has been done on that CPU.  Currently only
+KVM implements VMXON.  For now both tdx_enable() and tdx_cpu_enable()
+don't do VMXON internally (not trivial), but depends on the caller to
+guarantee that.
+
+To enable TDX, the caller of TDX should: 1) temporarily disable CPU
+hotplug; 2) do VMXON and tdx_enable_cpu() on all online cpus; 3) call
+tdx_enable().  For example::
+
+        cpus_read_lock();
+        on_each_cpu(vmxon_and_tdx_cpu_enable());
+        ret = tdx_enable();
+        cpus_read_unlock();
+        if (ret)
+                goto no_tdx;
+        // TDX is ready to use
+
+And the caller of TDX must guarantee the tdx_cpu_enable() has been
+successfully done on any cpu before it wants to run any other SEAMCALL.
+A typical usage is do both VMXON and tdx_cpu_enable() in CPU hotplug
+online callback, and refuse to online if tdx_cpu_enable() fails.
+
+User can consult dmesg to see whether the TDX module has been initialized.
+
+If the TDX module is initialized successfully, dmesg shows something
+like below::
+
+  [..] virt/tdx: 262668 KBs allocated for PAMT
+  [..] virt/tdx: module initialized
+
+If the TDX module failed to initialize, dmesg also shows it failed to
+initialize::
+
+  [..] virt/tdx: module initialization failed ...
+
+TDX Interaction to Other Kernel Components
+------------------------------------------
+
+TDX Memory Policy
+~~~~~~~~~~~~~~~~~
+
+TDX reports a list of "Convertible Memory Region" (CMR) to tell the
+kernel which memory is TDX compatible.  The kernel needs to build a list
+of memory regions (out of CMRs) as "TDX-usable" memory and pass those
+regions to the TDX module.  Once this is done, those "TDX-usable" memory
+regions are fixed during module's lifetime.
+
+To keep things simple, currently the kernel simply guarantees all pages
+in the page allocator are TDX memory.  Specifically, the kernel uses all
+system memory in the core-mm "at the time of TDX module initialization"
+as TDX memory, and in the meantime, refuses to online any non-TDX-memory
+in the memory hotplug.
+
+Physical Memory Hotplug
+~~~~~~~~~~~~~~~~~~~~~~~
+
+Note TDX assumes convertible memory is always physically present during
+machine's runtime.  A non-buggy BIOS should never support hot-removal of
+any convertible memory.  This implementation doesn't handle ACPI memory
+removal but depends on the BIOS to behave correctly.
+
+CPU Hotplug
+~~~~~~~~~~~
+
+TDX module requires the per-cpu initialization SEAMCALL must be done on
+one cpu before any other SEAMCALLs can be made on that cpu.  The kernel
+provides tdx_cpu_enable() to let the user of TDX to do it when the user
+wants to use a new cpu for TDX task.
+
+TDX doesn't support physical (ACPI) CPU hotplug.  During machine boot,
+TDX verifies all boot-time present logical CPUs are TDX compatible before
+enabling TDX.  A non-buggy BIOS should never support hot-add/removal of
+physical CPU.  Currently the kernel doesn't handle physical CPU hotplug,
+but depends on the BIOS to behave correctly.
+
+Note TDX works with CPU logical online/offline, thus the kernel still
+allows to offline logical CPU and online it again.
+
+Kexec()
+~~~~~~~
+
+TDX host support currently lacks the ability to handle kexec.  For
+simplicity only one of them can be enabled in the Kconfig.  This will be
+fixed in the future.
+
+Erratum
+~~~~~~~
+
+The first few generations of TDX hardware have an erratum.  A partial
+write to a TDX private memory cacheline will silently "poison" the
+line.  Subsequent reads will consume the poison and generate a machine
+check.
+
+A partial write is a memory write where a write transaction of less than
+cacheline lands at the memory controller.  The CPU does these via
+non-temporal write instructions (like MOVNTI), or through UC/WC memory
+mappings.  Devices can also do partial writes via DMA.
+
+Theoretically, a kernel bug could do partial write to TDX private memory
+and trigger unexpected machine check.  What's more, the machine check
+code will present these as "Hardware error" when they were, in fact, a
+software-triggered issue.  But in the end, this issue is hard to trigger.
+
+If the platform has such erratum, the kernel prints additional message in
+machine check handler to tell user the machine check may be caused by
+kernel bug on TDX private memory.
+
+Interaction vs S3 and deeper states
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+TDX cannot survive from S3 and deeper states.  The hardware resets and
+disables TDX completely when platform goes to S3 and deeper.  Both TDX
+guests and the TDX module get destroyed permanently.
+
+The kernel uses S3 for suspend-to-ram, and use S4 and deeper states for
+hibernation.  Currently, for simplicity, the kernel chooses to make TDX
+mutually exclusive with S3 and hibernation.
+
+The kernel disables TDX during early boot when hibernation support is
+available::
+
+  [..] virt/tdx: initialization failed: Hibernation support is enabled
+
+Add 'nohibernate' kernel command line to disable hibernation in order to
+use TDX.
+
+ACPI S3 is disabled during kernel early boot if TDX is enabled.  The user
+needs to turn off TDX in the BIOS in order to use S3.
+
+TDX Guest Support
+=================
 Since the host cannot directly access guest registers or memory, much
 normal functionality of a hypervisor must be moved into the guest. This is
 implemented using a Virtualization Exception (#VE) that is handled by the
@@ -20,7 +205,7 @@ TDX includes new hypercall-like mechanisms for communicating from the
 guest to the hypervisor or the TDX module.
 
 New TDX Exceptions
-==================
+------------------
 
 TDX guests behave differently from bare-metal and traditional VMX guests.
 In TDX guests, otherwise normal instructions or memory accesses can cause
@@ -30,7 +215,7 @@ Instructions marked with an '*' conditionally cause exceptions.  The
 details for these instructions are discussed below.
 
 Instruction-based #VE
----------------------
+~~~~~~~~~~~~~~~~~~~~~
 
 - Port I/O (INS, OUTS, IN, OUT)
 - HLT
@@ -41,7 +226,7 @@ Instruction-based #VE
 - CPUID*
 
 Instruction-based #GP
----------------------
+~~~~~~~~~~~~~~~~~~~~~
 
 - All VMX instructions: INVEPT, INVVPID, VMCLEAR, VMFUNC, VMLAUNCH,
   VMPTRLD, VMPTRST, VMREAD, VMRESUME, VMWRITE, VMXOFF, VMXON
@@ -52,7 +237,7 @@ Instruction-based #GP
 - RDMSR*,WRMSR*
 
 RDMSR/WRMSR Behavior
---------------------
+~~~~~~~~~~~~~~~~~~~~
 
 MSR access behavior falls into three categories:
 
@@ -73,7 +258,7 @@ trapping and handling in the TDX module.  Other than possibly being slow,
 these MSRs appear to function just as they would on bare metal.
 
 CPUID Behavior
---------------
+~~~~~~~~~~~~~~
 
 For some CPUID leaves and sub-leaves, the virtualized bit fields of CPUID
 return values (in guest EAX/EBX/ECX/EDX) are configurable by the
@@ -93,7 +278,7 @@ not know how to handle. The guest kernel may ask the hypervisor for the
 value with a hypercall.
 
 #VE on Memory Accesses
-======================
+----------------------
 
 There are essentially two classes of TDX memory: private and shared.
 Private memory receives full TDX protections.  Its content is protected
@@ -107,7 +292,7 @@ entries.  This helps ensure that a guest does not place sensitive
 information in shared memory, exposing it to the untrusted hypervisor.
 
 #VE on Shared Memory
---------------------
+~~~~~~~~~~~~~~~~~~~~
 
 Access to shared mappings can cause a #VE.  The hypervisor ultimately
 controls whether a shared memory access causes a #VE, so the guest must be
@@ -127,7 +312,7 @@ be careful not to access device MMIO regions unless it is also prepared to
 handle a #VE.
 
 #VE on Private Pages
---------------------
+~~~~~~~~~~~~~~~~~~~~
 
 An access to private mappings can also cause a #VE.  Since all kernel
 memory is also private memory, the kernel might theoretically need to
@@ -145,7 +330,7 @@ The hypervisor is permitted to unilaterally move accepted pages to a
 to handle the exception.
 
 Linux #VE handler
-=================
+-----------------
 
 Just like page faults or #GP's, #VE exceptions can be either handled or be
 fatal.  Typically, an unhandled userspace #VE results in a SIGSEGV.
@@ -167,7 +352,7 @@ While the block is in place, any #VE is elevated to a double fault (#DF)
 which is not recoverable.
 
 MMIO handling
-=============
+-------------
 
 In non-TDX VMs, MMIO is usually implemented by giving a guest access to a
 mapping which will cause a VMEXIT on access, and then the hypervisor
@@ -189,7 +374,7 @@ MMIO access via other means (like structure overlays) may result in an
 oops.
 
 Shared Memory Conversions
-=========================
+-------------------------
 
 All TDX guest memory starts out as private at boot.  This memory can not
 be accessed by the hypervisor.  However, some kernel users like device
index 6b92c7bfa19daab87c01fd9e55a384076aee3887..28dcce1759be4bf212fc3e14a7b6691e34fbfedb 100644 (file)
@@ -1,3 +1,3 @@
 .. SPDX-License-Identifier: GPL-2.0
 
-.. kernel-feat:: $srctree/Documentation/features xtensa
+.. kernel-feat:: features xtensa
index a25c6d5df87b20ff2149353adcf854b1b7965f80..4662e1ff3d81f28f57417ff70ac81f84220df88f 100644 (file)
@@ -6,17 +6,16 @@ Block io priorities
 Intro
 -----
 
-With the introduction of cfq v3 (aka cfq-ts or time sliced cfq), basic io
-priorities are supported for reads on files.  This enables users to io nice
-processes or process groups, similar to what has been possible with cpu
-scheduling for ages.  This document mainly details the current possibilities
-with cfq; other io schedulers do not support io priorities thus far.
+The io priority feature enables users to io nice processes or process groups,
+similar to what has been possible with cpu scheduling for ages. Support for io
+priorities is io scheduler dependent and currently supported by bfq and
+mq-deadline.
 
 Scheduling classes
 ------------------
 
-CFQ implements three generic scheduling classes that determine how io is
-served for a process.
+Three generic scheduling classes are implemented for io priorities that
+determine how io is served for a process.
 
 IOPRIO_CLASS_RT: This is the realtime io class. This scheduling class is given
 higher priority than any other in the system, processes from this class are
index 33c4539155d94b544b3e5b43ddcf07cb96cd0c22..3599cf9267b4766c4628dde7d27d98fb95e325a9 100644 (file)
@@ -446,7 +446,7 @@ The command used: ::
 
 There are 24 issuers, each issuing 64 IOs concurrently. ``--verify=sha512``
 makes ``fio`` generate and read back the content each time which makes
-execution locality matter between the issuer and ``kcryptd``. The followings
+execution locality matter between the issuer and ``kcryptd``. The following
 are the read bandwidths and CPU utilizations depending on different affinity
 scope settings on ``kcryptd`` measured over five runs. Bandwidths are in
 MiBps, and CPU util in percents.
diff --git a/Documentation/dev-tools/checkuapi.rst b/Documentation/dev-tools/checkuapi.rst
new file mode 100644 (file)
index 0000000..9072f21
--- /dev/null
@@ -0,0 +1,477 @@
+.. SPDX-License-Identifier: GPL-2.0-only
+
+============
+UAPI Checker
+============
+
+The UAPI checker (``scripts/check-uapi.sh``) is a shell script which
+checks UAPI header files for userspace backwards-compatibility across
+the git tree.
+
+Options
+=======
+
+This section will describe the options with which ``check-uapi.sh``
+can be run.
+
+Usage::
+
+    check-uapi.sh [-b BASE_REF] [-p PAST_REF] [-j N] [-l ERROR_LOG] [-i] [-q] [-v]
+
+Available options::
+
+    -b BASE_REF    Base git reference to use for comparison. If unspecified or empty,
+                   will use any dirty changes in tree to UAPI files. If there are no
+                   dirty changes, HEAD will be used.
+    -p PAST_REF    Compare BASE_REF to PAST_REF (e.g. -p v6.1). If unspecified or empty,
+                   will use BASE_REF^1. Must be an ancestor of BASE_REF. Only headers
+                   that exist on PAST_REF will be checked for compatibility.
+    -j JOBS        Number of checks to run in parallel (default: number of CPU cores).
+    -l ERROR_LOG   Write error log to file (default: no error log is generated).
+    -i             Ignore ambiguous changes that may or may not break UAPI compatibility.
+    -q             Quiet operation.
+    -v             Verbose operation (print more information about each header being checked).
+
+Environmental args::
+
+    ABIDIFF  Custom path to abidiff binary
+    CC       C compiler (default is "gcc")
+    ARCH     Target architecture of C compiler (default is host arch)
+
+Exit codes::
+
+    0) Success
+    1) ABI difference detected
+    2) Prerequisite not met
+
+Examples
+========
+
+Basic Usage
+-----------
+
+First, let's try making a change to a UAPI header file that obviously
+won't break userspace::
+
+    cat << 'EOF' | patch -l -p1
+    --- a/include/uapi/linux/acct.h
+    +++ b/include/uapi/linux/acct.h
+    @@ -21,7 +21,9 @@
+     #include <asm/param.h>
+     #include <asm/byteorder.h>
+
+    -/*
+    +#define FOO
+    +
+    +/*
+      *  comp_t is a 16-bit "floating" point number with a 3-bit base 8
+      *  exponent and a 13-bit fraction.
+      *  comp2_t is 24-bit with 5-bit base 2 exponent and 20 bit fraction
+    diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
+    EOF
+
+Now, let's use the script to validate::
+
+    % ./scripts/check-uapi.sh
+    Installing user-facing UAPI headers from dirty tree... OK
+    Installing user-facing UAPI headers from HEAD... OK
+    Checking changes to UAPI headers between HEAD and dirty tree...
+    All 912 UAPI headers compatible with x86 appear to be backwards compatible
+
+Let's add another change that *might* break userspace::
+
+    cat << 'EOF' | patch -l -p1
+    --- a/include/uapi/linux/bpf.h
+    +++ b/include/uapi/linux/bpf.h
+    @@ -74,7 +74,7 @@ struct bpf_insn {
+            __u8    dst_reg:4;      /* dest register */
+            __u8    src_reg:4;      /* source register */
+            __s16   off;            /* signed offset */
+    -       __s32   imm;            /* signed immediate constant */
+    +       __u32   imm;            /* unsigned immediate constant */
+     };
+
+     /* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
+    EOF
+
+The script will catch this::
+
+    % ./scripts/check-uapi.sh
+    Installing user-facing UAPI headers from dirty tree... OK
+    Installing user-facing UAPI headers from HEAD... OK
+    Checking changes to UAPI headers between HEAD and dirty tree...
+    ==== ABI differences detected in include/linux/bpf.h from HEAD -> dirty tree ====
+        [C] 'struct bpf_insn' changed:
+          type size hasn't changed
+          1 data member change:
+            type of '__s32 imm' changed:
+              typedef name changed from __s32 to __u32 at int-ll64.h:27:1
+              underlying type 'int' changed:
+                type name changed from 'int' to 'unsigned int'
+                type size hasn't changed
+    ==================================================================================
+
+    error - 1/912 UAPI headers compatible with x86 appear _not_ to be backwards compatible
+
+In this case, the script is reporting the type change because it could
+break a userspace program that passes in a negative number. Now, let's
+say you know that no userspace program could possibly be using a negative
+value in ``imm``, so changing to an unsigned type there shouldn't hurt
+anything. You can pass the ``-i`` flag to the script to ignore changes
+in which the userspace backwards compatibility is ambiguous::
+
+    % ./scripts/check-uapi.sh -i
+    Installing user-facing UAPI headers from dirty tree... OK
+    Installing user-facing UAPI headers from HEAD... OK
+    Checking changes to UAPI headers between HEAD and dirty tree...
+    All 912 UAPI headers compatible with x86 appear to be backwards compatible
+
+Now, let's make a similar change that *will* break userspace::
+
+    cat << 'EOF' | patch -l -p1
+    --- a/include/uapi/linux/bpf.h
+    +++ b/include/uapi/linux/bpf.h
+    @@ -71,8 +71,8 @@ enum {
+
+     struct bpf_insn {
+            __u8    code;           /* opcode */
+    -       __u8    dst_reg:4;      /* dest register */
+            __u8    src_reg:4;      /* source register */
+    +       __u8    dst_reg:4;      /* dest register */
+            __s16   off;            /* signed offset */
+            __s32   imm;            /* signed immediate constant */
+     };
+    EOF
+
+Since we're re-ordering an existing struct member, there's no ambiguity,
+and the script will report the breakage even if you pass ``-i``::
+
+    % ./scripts/check-uapi.sh -i
+    Installing user-facing UAPI headers from dirty tree... OK
+    Installing user-facing UAPI headers from HEAD... OK
+    Checking changes to UAPI headers between HEAD and dirty tree...
+    ==== ABI differences detected in include/linux/bpf.h from HEAD -> dirty tree ====
+        [C] 'struct bpf_insn' changed:
+          type size hasn't changed
+          2 data member changes:
+            '__u8 dst_reg' offset changed from 8 to 12 (in bits) (by +4 bits)
+            '__u8 src_reg' offset changed from 12 to 8 (in bits) (by -4 bits)
+    ==================================================================================
+
+    error - 1/912 UAPI headers compatible with x86 appear _not_ to be backwards compatible
+
+Let's commit the breaking change, then commit the innocuous change::
+
+    % git commit -m 'Breaking UAPI change' include/uapi/linux/bpf.h
+    [detached HEAD f758e574663a] Breaking UAPI change
+     1 file changed, 1 insertion(+), 1 deletion(-)
+    % git commit -m 'Innocuous UAPI change' include/uapi/linux/acct.h
+    [detached HEAD 2e87df769081] Innocuous UAPI change
+     1 file changed, 3 insertions(+), 1 deletion(-)
+
+Now, let's run the script again with no arguments::
+
+    % ./scripts/check-uapi.sh
+    Installing user-facing UAPI headers from HEAD... OK
+    Installing user-facing UAPI headers from HEAD^1... OK
+    Checking changes to UAPI headers between HEAD^1 and HEAD...
+    All 912 UAPI headers compatible with x86 appear to be backwards compatible
+
+It doesn't catch any breaking change because, by default, it only
+compares ``HEAD`` to ``HEAD^1``. The breaking change was committed on
+``HEAD~2``. If we wanted the search scope to go back further, we'd have to
+use the ``-p`` option to pass a different past reference. In this case,
+let's pass ``-p HEAD~2`` to the script so it checks UAPI changes between
+``HEAD~2`` and ``HEAD``::
+
+    % ./scripts/check-uapi.sh -p HEAD~2
+    Installing user-facing UAPI headers from HEAD... OK
+    Installing user-facing UAPI headers from HEAD~2... OK
+    Checking changes to UAPI headers between HEAD~2 and HEAD...
+    ==== ABI differences detected in include/linux/bpf.h from HEAD~2 -> HEAD ====
+        [C] 'struct bpf_insn' changed:
+          type size hasn't changed
+          2 data member changes:
+            '__u8 dst_reg' offset changed from 8 to 12 (in bits) (by +4 bits)
+            '__u8 src_reg' offset changed from 12 to 8 (in bits) (by -4 bits)
+    ==============================================================================
+
+    error - 1/912 UAPI headers compatible with x86 appear _not_ to be backwards compatible
+
+Alternatively, we could have also run with ``-b HEAD~``. This would set the
+base reference to ``HEAD~`` so then the script would compare it to ``HEAD~^1``.
+
+Architecture-specific Headers
+-----------------------------
+
+Consider this change::
+
+    cat << 'EOF' | patch -l -p1
+    --- a/arch/arm64/include/uapi/asm/sigcontext.h
+    +++ b/arch/arm64/include/uapi/asm/sigcontext.h
+    @@ -70,6 +70,7 @@ struct sigcontext {
+     struct _aarch64_ctx {
+            __u32 magic;
+            __u32 size;
+    +       __u32 new_var;
+     };
+
+     #define FPSIMD_MAGIC   0x46508001
+    EOF
+
+This is a change to an arm64-specific UAPI header file. In this example, I'm
+running the script from an x86 machine with an x86 compiler, so, by default,
+the script only checks x86-compatible UAPI header files::
+
+    % ./scripts/check-uapi.sh
+    Installing user-facing UAPI headers from dirty tree... OK
+    Installing user-facing UAPI headers from HEAD... OK
+    No changes to UAPI headers were applied between HEAD and dirty tree
+
+With an x86 compiler, we can't check header files in ``arch/arm64``, so the
+script doesn't even try.
+
+If we want to check the header file, we'll have to use an arm64 compiler and
+set ``ARCH`` accordingly::
+
+    % CC=aarch64-linux-gnu-gcc ARCH=arm64 ./scripts/check-uapi.sh
+    Installing user-facing UAPI headers from dirty tree... OK
+    Installing user-facing UAPI headers from HEAD... OK
+    Checking changes to UAPI headers between HEAD and dirty tree...
+    ==== ABI differences detected in include/asm/sigcontext.h from HEAD -> dirty tree ====
+        [C] 'struct _aarch64_ctx' changed:
+          type size changed from 64 to 96 (in bits)
+          1 data member insertion:
+            '__u32 new_var', at offset 64 (in bits) at sigcontext.h:73:1
+        -- snip --
+        [C] 'struct zt_context' changed:
+          type size changed from 128 to 160 (in bits)
+          2 data member changes (1 filtered):
+            '__u16 nregs' offset changed from 64 to 96 (in bits) (by +32 bits)
+            '__u16 __reserved[3]' offset changed from 80 to 112 (in bits) (by +32 bits)
+    =======================================================================================
+
+    error - 1/884 UAPI headers compatible with arm64 appear _not_ to be backwards compatible
+
+We can see with ``ARCH`` and ``CC`` set properly for the file, the ABI
+change is reported properly. Also notice that the total number of UAPI
+header files checked by the script changes. This is because the number
+of headers installed for arm64 platforms is different than x86.
+
+Cross-Dependency Breakages
+--------------------------
+
+Consider this change::
+
+    cat << 'EOF' | patch -l -p1
+    --- a/include/uapi/linux/types.h
+    +++ b/include/uapi/linux/types.h
+    @@ -52,7 +52,7 @@ typedef __u32 __bitwise __wsum;
+     #define __aligned_be64 __be64 __attribute__((aligned(8)))
+     #define __aligned_le64 __le64 __attribute__((aligned(8)))
+
+    -typedef unsigned __bitwise __poll_t;
+    +typedef unsigned short __bitwise __poll_t;
+
+     #endif /*  __ASSEMBLY__ */
+     #endif /* _UAPI_LINUX_TYPES_H */
+    EOF
+
+Here, we're changing a ``typedef`` in ``types.h``. This doesn't break
+a UAPI in ``types.h``, but other UAPIs in the tree may break due to
+this change::
+
+    % ./scripts/check-uapi.sh
+    Installing user-facing UAPI headers from dirty tree... OK
+    Installing user-facing UAPI headers from HEAD... OK
+    Checking changes to UAPI headers between HEAD and dirty tree...
+    ==== ABI differences detected in include/linux/eventpoll.h from HEAD -> dirty tree ====
+        [C] 'struct epoll_event' changed:
+          type size changed from 96 to 80 (in bits)
+          2 data member changes:
+            type of '__poll_t events' changed:
+              underlying type 'unsigned int' changed:
+                type name changed from 'unsigned int' to 'unsigned short int'
+                type size changed from 32 to 16 (in bits)
+            '__u64 data' offset changed from 32 to 16 (in bits) (by -16 bits)
+    ========================================================================================
+    include/linux/eventpoll.h did not change between HEAD and dirty tree...
+    It's possible a change to one of the headers it includes caused this error:
+    #include <linux/fcntl.h>
+    #include <linux/types.h>
+
+Note that the script noticed the failing header file did not change,
+so it assumes one of its includes must have caused the breakage. Indeed,
+we can see ``linux/types.h`` is used from ``eventpoll.h``.
+
+UAPI Header Removals
+--------------------
+
+Consider this change::
+
+    cat << 'EOF' | patch -l -p1
+    diff --git a/include/uapi/asm-generic/Kbuild b/include/uapi/asm-generic/Kbuild
+    index ebb180aac74e..a9c88b0a8b3b 100644
+    --- a/include/uapi/asm-generic/Kbuild
+    +++ b/include/uapi/asm-generic/Kbuild
+    @@ -31,6 +31,6 @@ mandatory-y += stat.h
+     mandatory-y += statfs.h
+     mandatory-y += swab.h
+     mandatory-y += termbits.h
+    -mandatory-y += termios.h
+    +#mandatory-y += termios.h
+     mandatory-y += types.h
+     mandatory-y += unistd.h
+    EOF
+
+This script removes a UAPI header file from the install list. Let's run
+the script::
+
+    % ./scripts/check-uapi.sh
+    Installing user-facing UAPI headers from dirty tree... OK
+    Installing user-facing UAPI headers from HEAD... OK
+    Checking changes to UAPI headers between HEAD and dirty tree...
+    ==== UAPI header include/asm/termios.h was removed between HEAD and dirty tree ====
+
+    error - 1/912 UAPI headers compatible with x86 appear _not_ to be backwards compatible
+
+Removing a UAPI header is considered a breaking change, and the script
+will flag it as such.
+
+Checking Historic UAPI Compatibility
+------------------------------------
+
+You can use the ``-b`` and ``-p`` options to examine different chunks of your
+git tree. For example, to check all changed UAPI header files between tags
+v6.0 and v6.1, you'd run::
+
+    % ./scripts/check-uapi.sh -b v6.1 -p v6.0
+    Installing user-facing UAPI headers from v6.1... OK
+    Installing user-facing UAPI headers from v6.0... OK
+    Checking changes to UAPI headers between v6.0 and v6.1...
+
+    --- snip ---
+    error - 37/907 UAPI headers compatible with x86 appear _not_ to be backwards compatible
+
+Note: Before v5.3, a header file needed by the script is not present,
+so the script is unable to check changes before then.
+
+You'll notice that the script detected many UAPI changes that are not
+backwards compatible. Knowing that kernel UAPIs are supposed to be stable
+forever, this is an alarming result. This brings us to the next section:
+caveats.
+
+Caveats
+=======
+
+The UAPI checker makes no assumptions about the author's intention, so some
+types of changes may be flagged even though they intentionally break UAPI.
+
+Removals For Refactoring or Deprecation
+---------------------------------------
+
+Sometimes drivers for very old hardware are removed, such as in this example::
+
+    % ./scripts/check-uapi.sh -b ba47652ba655
+    Installing user-facing UAPI headers from ba47652ba655... OK
+    Installing user-facing UAPI headers from ba47652ba655^1... OK
+    Checking changes to UAPI headers between ba47652ba655^1 and ba47652ba655...
+    ==== UAPI header include/linux/meye.h was removed between ba47652ba655^1 and ba47652ba655 ====
+
+    error - 1/910 UAPI headers compatible with x86 appear _not_ to be backwards compatible
+
+The script will always flag removals (even if they're intentional).
+
+Struct Expansions
+-----------------
+
+Depending on how a structure is handled in kernelspace, a change which
+expands a struct could be non-breaking.
+
+If a struct is used as the argument to an ioctl, then the kernel driver
+must be able to handle ioctl commands of any size. Beyond that, you need
+to be careful when copying data from the user. Say, for example, that
+``struct foo`` is changed like this::
+
+    struct foo {
+        __u64 a; /* added in version 1 */
+    +   __u32 b; /* added in version 2 */
+    +   __u32 c; /* added in version 2 */
+    }
+
+By default, the script will flag this kind of change for further review::
+
+    [C] 'struct foo' changed:
+      type size changed from 64 to 128 (in bits)
+      2 data member insertions:
+        '__u32 b', at offset 64 (in bits)
+        '__u32 c', at offset 96 (in bits)
+
+However, it is possible that this change was made safely.
+
+If a userspace program was built with version 1, it will think
+``sizeof(struct foo)`` is 8. That size will be encoded in the
+ioctl value that gets sent to the kernel. If the kernel is built
+with version 2, it will think the ``sizeof(struct foo)`` is 16.
+
+The kernel can use the ``_IOC_SIZE`` macro to get the size encoded
+in the ioctl code that the user passed in and then use
+``copy_struct_from_user()`` to safely copy the value::
+
+    int handle_ioctl(unsigned long cmd, unsigned long arg)
+    {
+        switch _IOC_NR(cmd) {
+        0x01: {
+            struct foo my_cmd;  /* size 16 in the kernel */
+
+            ret = copy_struct_from_user(&my_cmd, arg, sizeof(struct foo), _IOC_SIZE(cmd));
+            ...
+
+``copy_struct_from_user`` will zero the struct in the kernel and then copy
+only the bytes passed in from the user (leaving new members zeroized).
+If the user passed in a larger struct, the extra members are ignored.
+
+If you know this situation is accounted for in the kernel code, you can
+pass ``-i`` to the script, and struct expansions like this will be ignored.
+
+Flex Array Migration
+--------------------
+
+While the script handles expansion into an existing flex array, it does
+still flag initial migration to flex arrays from 1-element fake flex
+arrays. For example::
+
+    struct foo {
+          __u32 x;
+    -     __u32 flex[1]; /* fake flex */
+    +     __u32 flex[];  /* real flex */
+    };
+
+This change would be flagged by the script::
+
+    [C] 'struct foo' changed:
+      type size changed from 64 to 32 (in bits)
+      1 data member change:
+        type of '__u32 flex[1]' changed:
+          type name changed from '__u32[1]' to '__u32[]'
+          array type size changed from 32 to 'unknown'
+          array type subrange 1 changed length from 1 to 'unknown'
+
+At this time, there's no way to filter these types of changes, so be
+aware of this possible false positive.
+
+Summary
+-------
+
+While many types of false positives are filtered out by the script,
+it's possible there are some cases where the script flags a change
+which does not break UAPI. It's also possible a change which *does*
+break userspace would not be flagged by this script. While the script
+has been run on much of the kernel history, there could still be corner
+cases that are not accounted for.
+
+The intention is for this script to be used as a quick check for
+maintainers or automated tooling, not as the end-all authority on
+patch compatibility. It's best to remember: use your best judgment
+(and ideally a unit test in userspace) to make sure your UAPI changes
+are backwards-compatible!
index 3d2286c683bc99575b18746ee5529d5d64dceff5..efa49cdc8e2eb3cd7f104fc90c5c0e4ff4e9ad3b 100644 (file)
@@ -31,6 +31,7 @@ Documentation/dev-tools/testing-overview.rst
    kselftest
    kunit/index
    ktap
+   checkuapi
 
 
 .. only::  subproject and html
index 024e9ad1d1e9e879b5bb69592052812f782bf2f4..bd689db6fdd2013d0fb0cf0e5ff156264e096f93 100644 (file)
@@ -139,6 +139,17 @@ If your installed version of gcc doesn't work, you can tweak the steps:
        $ ./tools/testing/kunit/kunit.py run --make_options=CC=/usr/bin/gcc-6
        $ lcov -t "my_kunit_tests" -o coverage.info -c -d .kunit/ --gcov-tool=/usr/bin/gcov-6
 
+Alternatively, LLVM-based toolchains can also be used:
+
+.. code-block:: bash
+
+       # Build with LLVM and append coverage options to the current config
+       $ ./tools/testing/kunit/kunit.py run --make_options LLVM=1 --kunitconfig=.kunit/ --kunitconfig=tools/testing/kunit/configs/coverage_uml.config
+       $ llvm-profdata merge -sparse default.profraw -o default.profdata
+       $ llvm-cov export --format=lcov .kunit/vmlinux -instr-profile default.profdata > coverage.info
+       # The coverage.info file is in lcov-compatible format and it can be used to e.g. generate HTML report
+       $ genhtml -o /tmp/coverage_html coverage.info
+
 
 Running tests manually
 ======================
index a9efab50eed83e06a89549aeb1fb4da1b2eba1d9..22955d56b3799bfc3f3b92874b638aa24c1edaa6 100644 (file)
@@ -671,8 +671,23 @@ Testing Static Functions
 ------------------------
 
 If we do not want to expose functions or variables for testing, one option is to
-conditionally ``#include`` the test file at the end of your .c file. For
-example:
+conditionally export the used symbol. For example:
+
+.. code-block:: c
+
+       /* In my_file.c */
+
+       VISIBLE_IF_KUNIT int do_interesting_thing();
+       EXPORT_SYMBOL_IF_KUNIT(do_interesting_thing);
+
+       /* In my_file.h */
+
+       #if IS_ENABLED(CONFIG_KUNIT)
+               int do_interesting_thing(void);
+       #endif
+
+Alternatively, you could conditionally ``#include`` the test file at the end of
+your .c file. For example:
 
 .. code-block:: c
 
index 3e886194b043bbb99cc32e5c4b314b483e0be665..2323fd5b7cdae1ebe440275d8f67649354a6f448 100644 (file)
@@ -28,7 +28,7 @@ $(obj)/%.example.dts: $(src)/%.yaml check_dtschema_version FORCE
 find_all_cmd = find $(srctree)/$(src) \( -name '*.yaml' ! \
                -name 'processed-schema*' \)
 
-find_cmd = $(find_all_cmd) | grep -F -e "$(subst :," -e ",$(DT_SCHEMA_FILES))"
+find_cmd = $(find_all_cmd) | sed 's|^$(srctree)/$(src)/||' | grep -F -e "$(subst :," -e ",$(DT_SCHEMA_FILES))" | sed 's|^|$(srctree)/$(src)/|'
 CHK_DT_DOCS := $(shell $(find_cmd))
 
 quiet_cmd_yamllint = LINT    $(src)
index a9fe01238a885d584950cabd282b1dd7a0685640..76b65ea149b65e39e8dbcc234b4f7638af69460e 100644 (file)
@@ -16,7 +16,7 @@ maintainers:
 
 properties:
   compatible:
-    const: "calxeda,hb-sregs-l2-ecc"
+    const: calxeda,hb-sregs-l2-ecc
 
   reg:
     maxItems: 1
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,idle-state.txt b/Documentation/devicetree/bindings/arm/msm/qcom,idle-state.txt
deleted file mode 100644 (file)
index 606b4b1..0000000
+++ /dev/null
@@ -1,84 +0,0 @@
-QCOM Idle States for cpuidle driver
-
-ARM provides idle-state node to define the cpuidle states, as defined in [1].
-cpuidle-qcom is the cpuidle driver for Qualcomm SoCs and uses these idle
-states. Idle states have different enter/exit latency and residency values.
-The idle states supported by the QCOM SoC are defined as -
-
-    * Standby
-    * Retention
-    * Standalone Power Collapse (Standalone PC or SPC)
-    * Power Collapse (PC)
-
-Standby: Standby does a little more in addition to architectural clock gating.
-When the WFI instruction is executed the ARM core would gate its internal
-clocks. In addition to gating the clocks, QCOM cpus use this instruction as a
-trigger to execute the SPM state machine. The SPM state machine waits for the
-interrupt to trigger the core back in to active. This triggers the cache
-hierarchy to enter standby states, when all cpus are idle. An interrupt brings
-the SPM state machine out of its wait, the next step is to ensure that the
-cache hierarchy is also out of standby, and then the cpu is allowed to resume
-execution. This state is defined as a generic ARM WFI state by the ARM cpuidle
-driver and is not defined in the DT. The SPM state machine should be
-configured to execute this state by default and after executing every other
-state below.
-
-Retention: Retention is a low power state where the core is clock gated and
-the memory and the registers associated with the core are retained. The
-voltage may be reduced to the minimum value needed to keep the processor
-registers active. The SPM should be configured to execute the retention
-sequence and would wait for interrupt, before restoring the cpu to execution
-state. Retention may have a slightly higher latency than Standby.
-
-Standalone PC: A cpu can power down and warmboot if there is a sufficient time
-between the time it enters idle and the next known wake up. SPC mode is used
-to indicate a core entering a power down state without consulting any other
-cpu or the system resources. This helps save power only on that core.  The SPM
-sequence for this idle state is programmed to power down the supply to the
-core, wait for the interrupt, restore power to the core, and ensure the
-system state including cache hierarchy is ready before allowing core to
-resume. Applying power and resetting the core causes the core to warmboot
-back into Elevation Level (EL) which trampolines the control back to the
-kernel. Entering a power down state for the cpu, needs to be done by trapping
-into a EL. Failing to do so, would result in a crash enforced by the warm boot
-code in the EL for the SoC. On SoCs with write-back L1 cache, the cache has to
-be flushed in s/w, before powering down the core.
-
-Power Collapse: This state is similar to the SPC mode, but distinguishes
-itself in that the cpu acknowledges and permits the SoC to enter deeper sleep
-modes. In a hierarchical power domain SoC, this means L2 and other caches can
-be flushed, system bus, clocks - lowered, and SoC main XO clock gated and
-voltages reduced, provided all cpus enter this state.  Since the span of low
-power modes possible at this state is vast, the exit latency and the residency
-of this low power mode would be considered high even though at a cpu level,
-this essentially is cpu power down. The SPM in this state also may handshake
-with the Resource power manager (RPM) processor in the SoC to indicate a
-complete application processor subsystem shut down.
-
-The idle-state for QCOM SoCs are distinguished by the compatible property of
-the idle-states device node.
-
-The devicetree representation of the idle state should be -
-
-Required properties:
-
-- compatible: Must be one of -
-                       "qcom,idle-state-ret",
-                       "qcom,idle-state-spc",
-                       "qcom,idle-state-pc",
-               and "arm,idle-state".
-
-Other required and optional properties are specified in [1].
-
-Example:
-
-       idle-states {
-               CPU_SPC: spc {
-                       compatible = "qcom,idle-state-spc", "arm,idle-state";
-                       entry-latency-us = <150>;
-                       exit-latency-us = <200>;
-                       min-residency-us = <2000>;
-               };
-       };
-
-[1]. Documentation/devicetree/bindings/cpu/idle-states.yaml
diff --git a/Documentation/devicetree/bindings/arm/qcom,coresight-remote-etm.yaml b/Documentation/devicetree/bindings/arm/qcom,coresight-remote-etm.yaml
new file mode 100644 (file)
index 0000000..4fd5752
--- /dev/null
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/qcom,coresight-remote-etm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Coresight Remote ETM(Embedded Trace Macrocell)
+
+maintainers:
+  - Jinlong Mao <quic_jinlmao@quicinc.com>
+  - Tao Zhang <quic_taozha@quicinc.com>
+
+description:
+  Support for ETM trace collection on remote processor using coresight
+  framework. Enabling this will allow turning on ETM tracing on remote
+  processor like modem processor via sysfs and collecting the trace
+  via coresight TMC sinks.
+
+properties:
+  compatible:
+    const: qcom,coresight-remote-etm
+
+  out-ports:
+    $ref: /schemas/graph.yaml#/properties/ports
+    additionalProperties: false
+
+    properties:
+      port:
+        description: Output connection to the CoreSight Trace bus.
+        $ref: /schemas/graph.yaml#/properties/port
+
+required:
+  - compatible
+  - out-ports
+
+additionalProperties: false
+
+examples:
+  - |
+    etm {
+        compatible = "qcom,coresight-remote-etm";
+
+        out-ports {
+            port {
+                modem_etm0_out_funnel_modem: endpoint {
+                    remote-endpoint = <&funnel_modem_in_modem_etm0>;
+                };
+            };
+        };
+    };
+...
index 3bad47b7b02bb9921fe23c7d57353fea06389f63..61ddc3b5b247b0fcde7fdb320819d1a4da5d6ce9 100644 (file)
@@ -44,6 +44,23 @@ properties:
     minItems: 1
     maxItems: 2
 
+  qcom,dsb-element-size:
+    description:
+      Specifies the DSB(Discrete Single Bit) element size supported by
+      the monitor. The associated aggregator will read this size before it
+      is enabled. DSB element size currently only supports 32-bit and 64-bit.
+    $ref: /schemas/types.yaml#/definitions/uint8
+    enum: [32, 64]
+
+  qcom,dsb-msrs-num:
+    description:
+      Specifies the number of DSB(Discrete Single Bit) MSR(mux select register)
+      registers supported by the monitor. If this property is not configured
+      or set to 0, it means this DSB TPDM doesn't support MSR.
+    $ref: /schemas/types.yaml#/definitions/uint32
+    minimum: 0
+    maximum: 32
+
   clocks:
     maxItems: 1
 
@@ -77,6 +94,9 @@ examples:
       compatible = "qcom,coresight-tpdm", "arm,primecell";
       reg = <0x0684c000 0x1000>;
 
+      qcom,dsb-element-size = /bits/ 8 <32>;
+      qcom,dsb-msrs-num = <16>;
+
       clocks = <&aoss_qmp>;
       clock-names = "apb_pclk";
 
index fde07e4b119dfb8a356c0362f000b0fce3125f18..406a922a714e8fc7d07adbbc312052e1cff8e1c2 100644 (file)
@@ -113,7 +113,7 @@ examples:
     hd44780 {
             compatible = "hit,hd44780";
             display-height-chars = <2>;
-            display-width-chars  = <16>;
+            display-width-chars = <16>;
             data-gpios = <&pcf8574 4 0>,
                          <&pcf8574 5 0>,
                          <&pcf8574 6 0>,
index b9a9f2cf32a1b698d6e26304f2d6bcea2315d8ca..07ccbda4a0ab5405f9fcd944a06a4bbd2f545d26 100644 (file)
@@ -66,6 +66,7 @@ allOf:
         compatible:
           contains:
             enum:
+              - qcom,qdu1000-llcc
               - qcom,sc7180-llcc
               - qcom,sm6350-llcc
     then:
@@ -103,7 +104,6 @@ allOf:
         compatible:
           contains:
             enum:
-              - qcom,qdu1000-llcc
               - qcom,sc8180x-llcc
               - qcom,sc8280xp-llcc
               - qcom,x1e80100-llcc
index 624984d51c10649738b0305363c7bf17648c7d35..7f8d98226437e480151fdd86e7cd3b5a7f5542ae 100644 (file)
@@ -125,7 +125,7 @@ examples:
     clk25m: clock-oscillator-25m {
       compatible = "fixed-clock";
       #clock-cells = <0>;
-      clock-frequency  = <25000000>;
+      clock-frequency = <25000000>;
       clock-output-names = "clk25m";
     };
 ...
index 7c8a3e8430d306e15dfdd4c1c7c65de7232f83a4..fb216ce68bb3579f5646b5eff39ebf524cd26a7e 100644 (file)
@@ -66,7 +66,6 @@ properties:
       Particularly, if use an output GPIO to control a VBUS regulator, should
       model it as a regulator. See bindings/regulator/fixed-regulator.yaml
 
-  # The following are optional properties for "usb-c-connector".
   power-role:
     description: Determines the power role that the Type C connector will
       support. "dual" refers to Dual Role Port (DRP).
@@ -119,30 +118,6 @@ properties:
 
   # The following are optional properties for "usb-c-connector" with power
   # delivery support.
-  source-pdos:
-    description: An array of u32 with each entry providing supported power
-      source data object(PDO), the detailed bit definitions of PDO can be found
-      in "Universal Serial Bus Power Delivery Specification" chapter 6.4.1.2
-      Source_Capabilities Message, the order of each entry(PDO) should follow
-      the PD spec chapter 6.4.1. Required for power source and power dual role.
-      User can specify the source PDO array via PDO_FIXED/BATT/VAR/PPS_APDO()
-      defined in dt-bindings/usb/pd.h.
-    minItems: 1
-    maxItems: 7
-    $ref: /schemas/types.yaml#/definitions/uint32-array
-
-  sink-pdos:
-    description: An array of u32 with each entry providing supported power sink
-      data object(PDO), the detailed bit definitions of PDO can be found in
-      "Universal Serial Bus Power Delivery Specification" chapter 6.4.1.3
-      Sink Capabilities Message, the order of each entry(PDO) should follow the
-      PD spec chapter 6.4.1. Required for power sink and power dual role. User
-      can specify the sink PDO array via PDO_FIXED/BATT/VAR/PPS_APDO() defined
-      in dt-bindings/usb/pd.h.
-    minItems: 1
-    maxItems: 7
-    $ref: /schemas/types.yaml#/definitions/uint32-array
-
   sink-vdos:
     description: An array of u32 with each entry, a Vendor Defined Message Object (VDO),
       providing additional information corresponding to the product, the detailed bit
@@ -166,10 +141,43 @@ properties:
     maxItems: 6
     $ref: /schemas/types.yaml#/definitions/uint32-array
 
-  op-sink-microwatt:
-    description: Sink required operating power in microwatt, if source can't
-      offer the power, Capability Mismatch is set. Required for power sink and
-      power dual role.
+  accessory-mode-audio:
+    type: boolean
+    description: Whether the device supports Audio Adapter Accessory Mode. This
+      is only necessary if there are no other means to discover supported
+      alternative modes (e.g. through the UCSI firmware interface).
+
+  accessory-mode-debug:
+    type: boolean
+    description: Whether the device supports Debug Accessory Mode. This
+      is only necessary if there are no other means to discover supported
+      alternative modes (e.g. through the UCSI firmware interface).
+
+  altmodes:
+    type: object
+    description: List of Alternative Modes supported by the schematics on the
+      particular device. This is only necessary if there are no other means to
+      discover supported alternative modes (e.g. through the UCSI firmware
+      interface).
+
+    additionalProperties: false
+
+    patternProperties:
+      "^(displayport)$":
+        type: object
+        description:
+          A single USB-C Alternative Mode as supported by the USB-C connector logic.
+
+        additionalProperties: false
+
+        properties:
+          svid:
+            $ref: /schemas/types.yaml#/definitions/uint16
+            description: Unique value assigned by USB-IF to the Vendor / AltMode.
+            enum: [ 0xff01 ]
+          vdo:
+            $ref: /schemas/types.yaml#/definitions/uint32
+            description: VDO returned by Discover Modes USB PD command.
 
   port:
     $ref: /schemas/graph.yaml#/properties/port
@@ -231,6 +239,20 @@ properties:
       SNK_READY for non-pd link.
     type: boolean
 
+  capabilities:
+    description: A child node to contain all the selectable USB Power Delivery capabilities.
+    type: object
+
+    patternProperties:
+      "^caps-[0-9]+$":
+        description: Child nodes under "capabilities" node. Each node contains a selectable USB
+          Power Delivery capability.
+        type: object
+        $ref: "#/$defs/capabilities"
+        unevaluatedProperties: false
+
+    additionalProperties: false
+
 dependencies:
   sink-vdos-v1: [ sink-vdos ]
   sink-vdos: [ sink-vdos-v1 ]
@@ -238,7 +260,42 @@ dependencies:
 required:
   - compatible
 
+$defs:
+  capabilities:
+    type: object
+
+    properties:
+      source-pdos:
+        description: An array of u32 with each entry providing supported power
+          source data object(PDO), the detailed bit definitions of PDO can be found
+          in "Universal Serial Bus Power Delivery Specification" chapter 6.4.1.2
+          Source_Capabilities Message, the order of each entry(PDO) should follow
+          the PD spec chapter 6.4.1. Required for power source and power dual role.
+          User can specify the source PDO array via PDO_FIXED/BATT/VAR/PPS_APDO()
+          defined in dt-bindings/usb/pd.h.
+        minItems: 1
+        maxItems: 7
+        $ref: /schemas/types.yaml#/definitions/uint32-array
+
+      sink-pdos:
+        description: An array of u32 with each entry providing supported power sink
+          data object(PDO), the detailed bit definitions of PDO can be found in
+          "Universal Serial Bus Power Delivery Specification" chapter 6.4.1.3
+          Sink Capabilities Message, the order of each entry(PDO) should follow the
+          PD spec chapter 6.4.1. Required for power sink and power dual role. User
+          can specify the sink PDO array via PDO_FIXED/BATT/VAR/PPS_APDO() defined
+          in dt-bindings/usb/pd.h.
+        minItems: 1
+        maxItems: 7
+        $ref: /schemas/types.yaml#/definitions/uint32-array
+
+      op-sink-microwatt:
+        description: Sink required operating power in microwatt, if source can't
+          offer the power, Capability Mismatch is set. Required for power sink and
+          power dual role.
+
 allOf:
+  - $ref: "#/$defs/capabilities"
   - if:
       properties:
         compatible:
@@ -267,7 +324,7 @@ anyOf:
         - typec-power-opmode
         - new-source-frs-typec-current
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   # Micro-USB connector with HS lines routed via controller (MUIC).
@@ -289,6 +346,13 @@ examples:
             compatible = "usb-c-connector";
             label = "USB-C";
 
+            altmodes {
+                displayport {
+                    svid = /bits/ 16 <0xff01>;
+                    vdo = <0x00001c46>;
+                };
+            };
+
             ports {
                 #address-cells = <1>;
                 #size-cells = <0>;
index b3a5356f9916e92bb6a04f10c94fb136699c7239..239480ef7c30d3b50445612b0c56be728c40512c 100644 (file)
@@ -243,7 +243,64 @@ description: |+
   just supports idle_standby, an idle-states node is not required.
 
   ===========================================
-  6 - References
+  6 - Qualcomm specific STATES
+  ===========================================
+
+  Idle states have different enter/exit latency and residency values.
+  The idle states supported by the QCOM SoC are defined as -
+
+    * Standby
+    * Retention
+    * Standalone Power Collapse (Standalone PC or SPC)
+    * Power Collapse (PC)
+
+  Standby: Standby does a little more in addition to architectural clock gating.
+  When the WFI instruction is executed the ARM core would gate its internal
+  clocks. In addition to gating the clocks, QCOM cpus use this instruction as a
+  trigger to execute the SPM state machine. The SPM state machine waits for the
+  interrupt to trigger the core back in to active. This triggers the cache
+  hierarchy to enter standby states, when all cpus are idle. An interrupt brings
+  the SPM state machine out of its wait, the next step is to ensure that the
+  cache hierarchy is also out of standby, and then the cpu is allowed to resume
+  execution. This state is defined as a generic ARM WFI state by the ARM cpuidle
+  driver and is not defined in the DT. The SPM state machine should be
+  configured to execute this state by default and after executing every other
+  state below.
+
+  Retention: Retention is a low power state where the core is clock gated and
+  the memory and the registers associated with the core are retained. The
+  voltage may be reduced to the minimum value needed to keep the processor
+  registers active. The SPM should be configured to execute the retention
+  sequence and would wait for interrupt, before restoring the cpu to execution
+  state. Retention may have a slightly higher latency than Standby.
+
+  Standalone PC: A cpu can power down and warmboot if there is a sufficient time
+  between the time it enters idle and the next known wake up. SPC mode is used
+  to indicate a core entering a power down state without consulting any other
+  cpu or the system resources. This helps save power only on that core.  The SPM
+  sequence for this idle state is programmed to power down the supply to the
+  core, wait for the interrupt, restore power to the core, and ensure the
+  system state including cache hierarchy is ready before allowing core to
+  resume. Applying power and resetting the core causes the core to warmboot
+  back into Elevation Level (EL) which trampolines the control back to the
+  kernel. Entering a power down state for the cpu, needs to be done by trapping
+  into a EL. Failing to do so, would result in a crash enforced by the warm boot
+  code in the EL for the SoC. On SoCs with write-back L1 cache, the cache has to
+  be flushed in s/w, before powering down the core.
+
+  Power Collapse: This state is similar to the SPC mode, but distinguishes
+  itself in that the cpu acknowledges and permits the SoC to enter deeper sleep
+  modes. In a hierarchical power domain SoC, this means L2 and other caches can
+  be flushed, system bus, clocks - lowered, and SoC main XO clock gated and
+  voltages reduced, provided all cpus enter this state.  Since the span of low
+  power modes possible at this state is vast, the exit latency and the residency
+  of this low power mode would be considered high even though at a cpu level,
+  this essentially is cpu power down. The SPM in this state also may handshake
+  with the Resource power manager (RPM) processor in the SoC to indicate a
+  complete application processor subsystem shut down.
+
+  ===========================================
+  7 - References
   ===========================================
 
   [1] ARM Linux Kernel documentation - CPUs bindings
@@ -301,9 +358,16 @@ patternProperties:
 
     properties:
       compatible:
-        enum:
-          - arm,idle-state
-          - riscv,idle-state
+        oneOf:
+          - items:
+              - enum:
+                  - qcom,idle-state-ret
+                  - qcom,idle-state-spc
+                  - qcom,idle-state-pc
+              - const: arm,idle-state
+          - enum:
+              - arm,idle-state
+              - riscv,idle-state
 
       arm,psci-suspend-param:
         $ref: /schemas/types.yaml#/definitions/uint32
@@ -852,4 +916,13 @@ examples:
         };
     };
 
+    // Example 4 - Qualcomm SPC
+    idle-states {
+      cpu_spc: cpu-spc {
+        compatible = "qcom,idle-state-spc", "arm,idle-state";
+        entry-latency-us = <150>;
+        exit-latency-us = <200>;
+        min-residency-us = <2000>;
+      };
+    };
 ...
diff --git a/Documentation/devicetree/bindings/display/panel/synaptics,r63353.yaml b/Documentation/devicetree/bindings/display/panel/synaptics,r63353.yaml
new file mode 100644 (file)
index 0000000..e5617d1
--- /dev/null
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/synaptics,r63353.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Synaptics R63353 based MIPI-DSI panels
+
+maintainers:
+  - Michael Trimarchi <michael@amarulasolutions.com>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    items:
+      - enum:
+          - sharp,ls068b3sx02
+      - const: syna,r63353
+
+  avdd-supply: true
+  dvdd-supply: true
+  reg: true
+
+required:
+  - compatible
+  - avdd-supply
+  - dvdd-supply
+  - reg
+  - reset-gpios
+  - port
+  - backlight
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    dsi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        panel@0 {
+            compatible = "sharp,ls068b3sx02", "syna,r63353";
+            reg = <0>;
+            avdd-supply = <&avdd_display>;
+            dvdd-supply = <&dvdd_display>;
+            reset-gpios = <&r_pio 0 5 GPIO_ACTIVE_LOW>; /* PL05 */
+            backlight = <&backlight>;
+
+            port {
+                panel_in: endpoint {
+                    remote-endpoint = <&mipi_dsi_out>;
+                };
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/rockchip/inno_hdmi-rockchip.txt b/Documentation/devicetree/bindings/display/rockchip/inno_hdmi-rockchip.txt
deleted file mode 100644 (file)
index cec2171..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-Rockchip specific extensions to the Innosilicon HDMI
-================================
-
-Required properties:
-- compatible:
-       "rockchip,rk3036-inno-hdmi";
-- reg:
-       Physical base address and length of the controller's registers.
-- clocks, clock-names:
-       Phandle to hdmi controller clock, name should be "pclk"
-- interrupts:
-       HDMI interrupt number
-- ports:
-       Contain one port node with endpoint definitions as defined in
-       Documentation/devicetree/bindings/graph.txt.
-- pinctrl-0, pinctrl-name:
-       Switch the iomux of HPD/CEC pins to HDMI function.
-
-Example:
-hdmi: hdmi@20034000 {
-       compatible = "rockchip,rk3036-inno-hdmi";
-       reg = <0x20034000 0x4000>;
-       interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
-       clocks = <&cru  PCLK_HDMI>;
-       clock-names = "pclk";
-       pinctrl-names = "default";
-       pinctrl-0 = <&hdmi_ctl>;
-
-       hdmi_in: port {
-               #address-cells = <1>;
-               #size-cells = <0>;
-               hdmi_in_lcdc: endpoint@0 {
-                       reg = <0>;
-                       remote-endpoint = <&lcdc_out_hdmi>;
-               };
-       };
-};
-
-&pinctrl {
-       hdmi {
-               hdmi_ctl: hdmi-ctl {
-                       rockchip,pins = <1 8  RK_FUNC_1 &pcfg_pull_none>,
-                                       <1 9  RK_FUNC_1 &pcfg_pull_none>,
-                                       <1 10 RK_FUNC_1 &pcfg_pull_none>,
-                                       <1 11 RK_FUNC_1 &pcfg_pull_none>;
-               };
-       };
-
-};
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip,inno-hdmi.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip,inno-hdmi.yaml
new file mode 100644 (file)
index 0000000..be78dcf
--- /dev/null
@@ -0,0 +1,139 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/rockchip/rockchip,inno-hdmi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip Innosilicon HDMI controller
+
+maintainers:
+  - Sandy Huang <hjc@rock-chips.com>
+  - Heiko Stuebner <heiko@sntech.de>
+
+properties:
+  compatible:
+    enum:
+      - rockchip,rk3036-inno-hdmi
+      - rockchip,rk3128-inno-hdmi
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    minItems: 1
+    items:
+      - description: The HDMI controller main clock
+      - description: The HDMI PHY reference clock
+
+  clock-names:
+    minItems: 1
+    items:
+      - const: pclk
+      - const: ref
+
+  power-domains:
+    maxItems: 1
+
+  ports:
+    $ref: /schemas/graph.yaml#/properties/ports
+
+    properties:
+      port@0:
+        $ref: /schemas/graph.yaml#/properties/port
+        description:
+          Port node with one endpoint connected to a vop node.
+
+      port@1:
+        $ref: /schemas/graph.yaml#/properties/port
+        description:
+          Port node with one endpoint connected to a hdmi-connector node.
+
+    required:
+      - port@0
+      - port@1
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+  - clock-names
+  - pinctrl-0
+  - pinctrl-names
+  - ports
+
+allOf:
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: rockchip,rk3036-inno-hdmi
+
+    then:
+      properties:
+        power-domains: false
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: rockchip,rk3128-inno-hdmi
+
+    then:
+      properties:
+        clocks:
+          minItems: 2
+        clock-names:
+          minItems: 2
+      required:
+        - power-domains
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/clock/rk3036-cru.h>
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/pinctrl/rockchip.h>
+    hdmi: hdmi@20034000 {
+      compatible = "rockchip,rk3036-inno-hdmi";
+      reg = <0x20034000 0x4000>;
+      interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
+      clocks = <&cru  PCLK_HDMI>;
+      clock-names = "pclk";
+      pinctrl-names = "default";
+      pinctrl-0 = <&hdmi_ctl>;
+
+      ports {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        hdmi_in: port@0 {
+          reg = <0>;
+          hdmi_in_vop: endpoint {
+            remote-endpoint = <&vop_out_hdmi>;
+          };
+        };
+
+        hdmi_out: port@1 {
+          reg = <1>;
+          hdmi_out_con: endpoint {
+            remote-endpoint = <&hdmi_con_in>;
+          };
+        };
+      };
+    };
+
+    pinctrl {
+      hdmi {
+        hdmi_ctl: hdmi-ctl {
+          rockchip,pins = <1 RK_PB0 1 &pcfg_pull_none>,
+                          <1 RK_PB1 1 &pcfg_pull_none>,
+                          <1 RK_PB2 1 &pcfg_pull_none>,
+                          <1 RK_PB3 1 &pcfg_pull_none>;
+        };
+      };
+    };
index 25d53fde92e1104490e3f8e604184b9449150be3..597c9cc6a312acb66b0355f84f9dd8977dbb2197 100644 (file)
@@ -85,7 +85,7 @@ allOf:
         clocks:
           minItems: 6
           maxItems: 6
-        regs:
+        reg:
           minItems: 2
           maxItems: 2
 
@@ -99,7 +99,7 @@ allOf:
         clocks:
           minItems: 4
           maxItems: 4
-        regs:
+        reg:
           minItems: 2
           maxItems: 2
 
@@ -116,7 +116,7 @@ allOf:
         clocks:
           minItems: 3
           maxItems: 3
-        regs:
+        reg:
           minItems: 1
           maxItems: 1
 
index 04d150d4d15d3cc74958c562ceaf921dc4bb24b0..e6afca558c2dfa4d84f5e821f519c1ad9dfa7d39 100644 (file)
@@ -19,19 +19,4 @@ properties:
 
 additionalProperties: true
 
-examples:
-  - |
-    dma: dma-controller@48000000 {
-        compatible = "ti,omap-sdma";
-        reg = <0x48000000 0x1000>;
-        interrupts = <0 12 0x4>,
-                     <0 13 0x4>,
-                     <0 14 0x4>,
-                     <0 15 0x4>;
-        #dma-cells = <1>;
-        dma-channels = <32>;
-        dma-requests = <127>;
-        dma-channel-mask = <0xfffe>;
-    };
-
 ...
index 346fe0fa4460e316223d80ed0ffbd890dfd65450..5ad2febc581e23a72d862b6e22c379bbe13bbaec 100644 (file)
@@ -40,15 +40,4 @@ required:
 
 additionalProperties: true
 
-examples:
-  - |
-    sdma_xbar: dma-router@4a002b78 {
-        compatible = "ti,dra7-dma-crossbar";
-        reg = <0x4a002b78 0xfc>;
-        #dma-cells = <1>;
-        dma-requests = <205>;
-        ti,dma-safe-map = <0>;
-        dma-masters = <&sdma>;
-    };
-
 ...
diff --git a/Documentation/devicetree/bindings/dma/loongson,ls2x-apbdma.yaml b/Documentation/devicetree/bindings/dma/loongson,ls2x-apbdma.yaml
new file mode 100644 (file)
index 0000000..6a1b49a
--- /dev/null
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/loongson,ls2x-apbdma.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Loongson LS2X APB DMA controller
+
+description:
+  The Loongson LS2X APB DMA controller is used for transferring data
+  between system memory and the peripherals on the APB bus.
+
+maintainers:
+  - Binbin Zhou <zhoubinbin@loongson.cn>
+
+allOf:
+  - $ref: dma-controller.yaml#
+
+properties:
+  compatible:
+    oneOf:
+      - const: loongson,ls2k1000-apbdma
+      - items:
+          - const: loongson,ls2k0500-apbdma
+          - const: loongson,ls2k1000-apbdma
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  '#dma-cells':
+    const: 1
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+  - '#dma-cells'
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/irq.h>
+    #include <dt-bindings/clock/loongson,ls2k-clk.h>
+
+    dma-controller@1fe00c00 {
+        compatible = "loongson,ls2k1000-apbdma";
+        reg = <0x1fe00c00 0x8>;
+        interrupt-parent = <&liointc1>;
+        interrupts = <12 IRQ_TYPE_LEVEL_HIGH>;
+        clocks = <&clk LOONGSON2_APB_CLK>;
+        #dma-cells = <1>;
+    };
+
+...
index 4003dbe94940c2150fc6105f9c18d5bd914aa39b..877147e95ecc5df1a34893ac88e0d83d70418347 100644 (file)
@@ -53,6 +53,9 @@ properties:
       ADMA_CHn_CTRL register.
     const: 1
 
+  dma-channel-mask:
+    maxItems: 1
+
 required:
   - compatible
   - reg
index 88d0de3d1b46b8a9ff56613bd66dd3bf86034e0e..deb64cb9ca3eacf092b1f92a14407092689212d3 100644 (file)
@@ -32,6 +32,8 @@ properties:
               - qcom,sm8350-gpi-dma
               - qcom,sm8450-gpi-dma
               - qcom,sm8550-gpi-dma
+              - qcom,sm8650-gpi-dma
+              - qcom,x1e80100-gpi-dma
           - const: qcom,sm6350-gpi-dma
       - items:
           - enum:
index c284abc6784aec5439ba43f3fb3a8eab66fcbc16..a42b6a26a6d3f25874186faad8ce91995857f1a2 100644 (file)
@@ -16,7 +16,7 @@ properties:
   compatible:
     items:
       - enum:
-          - renesas,r9a07g043-dmac # RZ/G2UL
+          - renesas,r9a07g043-dmac # RZ/G2UL and RZ/Five
           - renesas,r9a07g044-dmac # RZ/G2{L,LC}
           - renesas,r9a07g054-dmac # RZ/V2L
       - const: renesas,rz-dmac
index a1af0b9063653741f4bd6b6501339aea34cba13f..3b22183a1a379258f3c8c826dbc6597d1dc3b3a9 100644 (file)
@@ -29,6 +29,7 @@ properties:
   compatible:
     items:
       - enum:
+          - microchip,mpfs-pdma
           - sifive,fu540-c000-pdma
       - const: sifive,pdma0
     description:
index 4ca300a42a99c2f60184318d9b7e8d5906872e44..27b8e163656006b311264c242ff2aaa790c30ebb 100644 (file)
@@ -37,11 +37,11 @@ properties:
 
   reg:
     minItems: 3
-    maxItems: 5
+    maxItems: 9
 
   reg-names:
     minItems: 3
-    maxItems: 5
+    maxItems: 9
 
   "#dma-cells":
     const: 3
@@ -141,7 +141,10 @@ allOf:
         ti,sci-rm-range-tchan: false
 
         reg:
-          maxItems: 3
+          items:
+            - description: BCDMA Control /Status Registers region
+            - description: RX Channel Realtime Registers region
+            - description: Ring Realtime Registers region
 
         reg-names:
           items:
@@ -161,14 +164,29 @@ allOf:
       properties:
         reg:
           minItems: 5
+          items:
+            - description: BCDMA Control /Status Registers region
+            - description: Block Copy Channel Realtime Registers region
+            - description: RX Channel Realtime Registers region
+            - description: TX Channel Realtime Registers region
+            - description: Ring Realtime Registers region
+            - description: Ring Configuration Registers region
+            - description: TX Channel Configuration Registers region
+            - description: RX Channel Configuration Registers region
+            - description: Block Copy Channel Configuration Registers region
 
         reg-names:
+          minItems: 5
           items:
             - const: gcfg
             - const: bchanrt
             - const: rchanrt
             - const: tchanrt
             - const: ringrt
+            - const: ring
+            - const: tchan
+            - const: rchan
+            - const: bchan
 
       required:
         - ti,sci-rm-range-bchan
@@ -184,7 +202,11 @@ allOf:
         ti,sci-rm-range-bchan: false
 
         reg:
-          maxItems: 4
+          items:
+            - description: BCDMA Control /Status Registers region
+            - description: RX Channel Realtime Registers region
+            - description: TX Channel Realtime Registers region
+            - description: Ring Realtime Registers region
 
         reg-names:
           items:
@@ -220,8 +242,13 @@ examples:
                       <0x0 0x4c000000 0x0 0x20000>,
                       <0x0 0x4a820000 0x0 0x20000>,
                       <0x0 0x4aa40000 0x0 0x20000>,
-                      <0x0 0x4bc00000 0x0 0x100000>;
-                reg-names = "gcfg", "bchanrt", "rchanrt", "tchanrt", "ringrt";
+                      <0x0 0x4bc00000 0x0 0x100000>,
+                      <0x0 0x48600000 0x0 0x8000>,
+                      <0x0 0x484a4000 0x0 0x2000>,
+                      <0x0 0x484c2000 0x0 0x2000>,
+                      <0x0 0x48420000 0x0 0x2000>;
+                reg-names = "gcfg", "bchanrt", "rchanrt", "tchanrt", "ringrt",
+                            "ring", "tchan", "rchan", "bchan";
                 msi-parent = <&inta_main_dmss>;
                 #dma-cells = <3>;
 
index a69f62f854d8c3e8d4084c4aa2c6e0f6cccd5819..11e064c029946641c8aacf5e49eb61b41477658d 100644 (file)
@@ -45,14 +45,28 @@ properties:
       The second cell is the ASEL value for the channel
 
   reg:
-    maxItems: 4
+    minItems: 4
+    items:
+      - description: Packet DMA Control /Status Registers region
+      - description: RX Channel Realtime Registers region
+      - description: TX Channel Realtime Registers region
+      - description: Ring Realtime Registers region
+      - description: Ring Configuration Registers region
+      - description: TX Configuration Registers region
+      - description: RX Configuration Registers region
+      - description: RX Flow Configuration Registers region
 
   reg-names:
+    minItems: 4
     items:
       - const: gcfg
       - const: rchanrt
       - const: tchanrt
       - const: ringrt
+      - const: ring
+      - const: tchan
+      - const: rchan
+      - const: rflow
 
   msi-parent: true
 
@@ -136,8 +150,14 @@ examples:
                 reg = <0x0 0x485c0000 0x0 0x100>,
                       <0x0 0x4a800000 0x0 0x20000>,
                       <0x0 0x4aa00000 0x0 0x40000>,
-                      <0x0 0x4b800000 0x0 0x400000>;
-                reg-names = "gcfg", "rchanrt", "tchanrt", "ringrt";
+                      <0x0 0x4b800000 0x0 0x400000>,
+                      <0x0 0x485e0000 0x0 0x20000>,
+                      <0x0 0x484a0000 0x0 0x4000>,
+                      <0x0 0x484c0000 0x0 0x2000>,
+                      <0x0 0x48430000 0x0 0x4000>;
+                reg-names = "gcfg", "rchanrt", "tchanrt", "ringrt",
+                            "ring", "tchan", "rchan", "rflow";
+
                 msi-parent = <&inta_main_dmss>;
                 #dma-cells = <2>;
 
index 22f6c5e2f7f4b94fe92c477e8a4336b111896e50..b18cf2bfdb5b14789b0a9ff405d57cb717802ad3 100644 (file)
@@ -69,13 +69,24 @@ properties:
       - ti,j721e-navss-mcu-udmap
 
   reg:
-    maxItems: 3
+    minItems: 3
+    items:
+      - description: UDMA-P Control /Status Registers region
+      - description: RX Channel Realtime Registers region
+      - description: TX Channel Realtime Registers region
+      - description: TX Configuration Registers region
+      - description: RX Configuration Registers region
+      - description: RX Flow Configuration Registers region
 
   reg-names:
+    minItems: 3
     items:
       - const: gcfg
       - const: rchanrt
       - const: tchanrt
+      - const: tchan
+      - const: rchan
+      - const: rflow
 
   msi-parent: true
 
@@ -158,8 +169,11 @@ examples:
                 compatible = "ti,am654-navss-main-udmap";
                 reg = <0x0 0x31150000 0x0 0x100>,
                       <0x0 0x34000000 0x0 0x100000>,
-                      <0x0 0x35000000 0x0 0x100000>;
-                reg-names = "gcfg", "rchanrt", "tchanrt";
+                      <0x0 0x35000000 0x0 0x100000>,
+                      <0x0 0x30b00000 0x0 0x20000>,
+                      <0x0 0x30c00000 0x0 0x8000>,
+                      <0x0 0x30d00000 0x0 0x4000>;
+                reg-names = "gcfg", "rchanrt", "tchanrt", "tchan", "rchan", "rflow";
                 #dma-cells = <1>;
 
                 ti,ringacc = <&ringacc>;
diff --git a/Documentation/devicetree/bindings/dts-coding-style.rst b/Documentation/devicetree/bindings/dts-coding-style.rst
new file mode 100644 (file)
index 0000000..a9bdd2b
--- /dev/null
@@ -0,0 +1,196 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=====================================
+Devicetree Sources (DTS) Coding Style
+=====================================
+
+When writing Devicetree Sources (DTS) please observe below guidelines.  They
+should be considered complementary to any rules expressed already in
+the Devicetree Specification and the dtc compiler (including W=1 and W=2
+builds).
+
+Individual architectures and subarchitectures can define additional rules,
+making the coding style stricter.
+
+Naming and Valid Characters
+---------------------------
+
+The Devicetree Specification allows a broad range of characters in node
+and property names, but this coding style narrows the range down to achieve
+better code readability.
+
+1. Node and property names can use only the following characters:
+
+   * Lowercase characters: [a-z]
+   * Digits: [0-9]
+   * Dash: -
+
+2. Labels can use only the following characters:
+
+   * Lowercase characters: [a-z]
+   * Digits: [0-9]
+   * Underscore: _
+
+3. Unless a bus defines differently, unit addresses shall use lowercase
+   hexadecimal digits, without leading zeros (padding).
+
+4. Hex values in properties, e.g. "reg", shall use lowercase hex.  The address
+   part can be padded with leading zeros.
+
+Example::
+
+       gpi_dma2: dma-controller@a00000 {
+               compatible = "qcom,sm8550-gpi-dma", "qcom,sm6350-gpi-dma";
+               reg = <0x0 0x00a00000 0x0 0x60000>;
+       }
+
+Order of Nodes
+--------------
+
+1. Nodes on any bus, thus using unit addresses for children, shall be
+   ordered by unit address in ascending order.
+   Alternatively for some subarchitectures, nodes of the same type can be
+   grouped together, e.g. all I2C controllers one after another even if this
+   breaks unit address ordering.
+
+2. Nodes without unit addresses shall be ordered alpha-numerically by the node
+   name.  For a few node types, they can be ordered by the main property, e.g.
+   pin configuration states ordered by value of "pins" property.
+
+3. When extending nodes in the board DTS via &label, the entries shall be
+   ordered either alpha-numerically or by keeping the order from DTSI, where
+   the choice depends on the subarchitecture.
+
+The above-described ordering rules are easy to enforce during review, reduce
+chances of conflicts for simultaneous additions of new nodes to a file and help
+in navigating through the DTS source.
+
+Example::
+
+       /* SoC DTSI */
+
+       / {
+               cpus {
+                       /* ... */
+               };
+
+               psci {
+                       /* ... */
+               };
+
+               soc@0 {
+                       dma: dma-controller@10000 {
+                               /* ... */
+                       };
+
+                       clk: clock-controller@80000 {
+                               /* ... */
+                       };
+               };
+       };
+
+       /* Board DTS - alphabetical order */
+
+       &clk {
+               /* ... */
+       };
+
+       &dma {
+               /* ... */
+       };
+
+       /* Board DTS - alternative order, keep as DTSI */
+
+       &dma {
+               /* ... */
+       };
+
+       &clk {
+               /* ... */
+       };
+
+Order of Properties in Device Node
+----------------------------------
+
+The following order of properties in device nodes is preferred:
+
+1. "compatible"
+2. "reg"
+3. "ranges"
+4. Standard/common properties (defined by common bindings, e.g. without
+   vendor-prefixes)
+5. Vendor-specific properties
+6. "status" (if applicable)
+7. Child nodes, where each node is preceded with a blank line
+
+The "status" property is by default "okay", thus it can be omitted.
+
+The above-described ordering follows this approach:
+
+1. Most important properties start the node: compatible then bus addressing to
+   match unit address.
+2. Each node will have common properties in similar place.
+3. Status is the last information to annotate that device node is or is not
+   finished (board resources are needed).
+
+Example::
+
+       /* SoC DTSI */
+
+       device_node: device-class@6789abc {
+               compatible = "vendor,device";
+               reg = <0x0 0x06789abc 0x0 0xa123>;
+               ranges = <0x0 0x0 0x06789abc 0x1000>;
+               #dma-cells = <1>;
+               clocks = <&clock_controller 0>, <&clock_controller 1>;
+               clock-names = "bus", "host";
+               vendor,custom-property = <2>;
+               status = "disabled";
+
+               child_node: child-class@100 {
+                       reg = <0x100 0x200>;
+                       /* ... */
+               };
+       };
+
+       /* Board DTS */
+
+       &device_node {
+               vdd-supply = <&board_vreg1>;
+               status = "okay";
+       }
+
+Indentation
+-----------
+
+1. Use indentation according to Documentation/process/coding-style.rst.
+2. Each entry in arrays with multiple cells, e.g. "reg" with two IO addresses,
+   shall be enclosed in <>.
+3. For arrays spanning across lines, it is preferred to align the continued
+   entries with opening < from the first line.
+
+Example::
+
+       thermal-sensor@c271000 {
+               compatible = "qcom,sm8550-tsens", "qcom,tsens-v2";
+               reg = <0x0 0x0c271000 0x0 0x1000>,
+                     <0x0 0x0c222000 0x0 0x1000>;
+       };
+
+Organizing DTSI and DTS
+-----------------------
+
+The DTSI and DTS files shall be organized in a way representing the common,
+reusable parts of hardware.  Typically, this means organizing DTSI and DTS files
+into several files:
+
+1. DTSI with contents of the entire SoC, without nodes for hardware not present
+   on the SoC.
+2. If applicable: DTSI with common or re-usable parts of the hardware, e.g.
+   entire System-on-Module.
+3. DTS representing the board.
+
+Hardware components that are present on the board shall be placed in the
+board DTS, not in the SoC or SoM DTSI.  A partial exception is a common
+external reference SoC input clock, which could be coded as a fixed-clock in
+the SoC DTSI with its frequency provided by each board DTS.
index b6864d0ee81e4bbf89fa75d9ee25771c234ddab6..1812ef31d5f1e941d4ae0e5a53e06f278cd55aca 100644 (file)
@@ -123,6 +123,7 @@ properties:
           - enum:
               - onnn,cat24c04
               - onnn,cat24c05
+              - rohm,br24g04
           - const: atmel,24c04
       - items:
           - const: renesas,r1ex24016
diff --git a/Documentation/devicetree/bindings/fpga/altera-fpga2sdram-bridge.txt b/Documentation/devicetree/bindings/fpga/altera-fpga2sdram-bridge.txt
deleted file mode 100644 (file)
index 5dd0ff0..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-Altera FPGA To SDRAM Bridge Driver
-
-Required properties:
-- compatible           : Should contain "altr,socfpga-fpga2sdram-bridge"
-
-See Documentation/devicetree/bindings/fpga/fpga-bridge.txt for generic bindings.
-
-Example:
-       fpga_bridge3: fpga-bridge@ffc25080 {
-               compatible = "altr,socfpga-fpga2sdram-bridge";
-               reg = <0xffc25080 0x4>;
-               bridge-enable = <0>;
-       };
diff --git a/Documentation/devicetree/bindings/fpga/altera-freeze-bridge.txt b/Documentation/devicetree/bindings/fpga/altera-freeze-bridge.txt
deleted file mode 100644 (file)
index 8b26fbc..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-Altera Freeze Bridge Controller Driver
-
-The Altera Freeze Bridge Controller manages one or more freeze bridges.
-The controller can freeze/disable the bridges which prevents signal
-changes from passing through the bridge.  The controller can also
-unfreeze/enable the bridges which allows traffic to pass through the
-bridge normally.
-
-Required properties:
-- compatible           : Should contain "altr,freeze-bridge-controller"
-- regs                 : base address and size for freeze bridge module
-
-See Documentation/devicetree/bindings/fpga/fpga-bridge.txt for generic bindings.
-
-Example:
-       freeze-controller@100000450 {
-               compatible = "altr,freeze-bridge-controller";
-               regs = <0x1000 0x10>;
-               bridge-enable = <0>;
-       };
diff --git a/Documentation/devicetree/bindings/fpga/altera-hps2fpga-bridge.txt b/Documentation/devicetree/bindings/fpga/altera-hps2fpga-bridge.txt
deleted file mode 100644 (file)
index 68cce39..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-Altera FPGA/HPS Bridge Driver
-
-Required properties:
-- regs         : base address and size for AXI bridge module
-- compatible   : Should contain one of:
-                 "altr,socfpga-lwhps2fpga-bridge",
-                 "altr,socfpga-hps2fpga-bridge", or
-                 "altr,socfpga-fpga2hps-bridge"
-- resets       : Phandle and reset specifier for this bridge's reset
-- clocks       : Clocks used by this module.
-
-See Documentation/devicetree/bindings/fpga/fpga-bridge.txt for generic bindings.
-
-Example:
-       fpga_bridge0: fpga-bridge@ff400000 {
-               compatible = "altr,socfpga-lwhps2fpga-bridge";
-               reg = <0xff400000 0x100000>;
-               resets = <&rst LWHPS2FPGA_RESET>;
-               clocks = <&l4_main_clk>;
-               bridge-enable = <0>;
-       };
-
-       fpga_bridge1: fpga-bridge@ff500000 {
-               compatible = "altr,socfpga-hps2fpga-bridge";
-               reg = <0xff500000 0x10000>;
-               resets = <&rst HPS2FPGA_RESET>;
-               clocks = <&l4_main_clk>;
-               bridge-enable = <1>;
-       };
-
-       fpga_bridge2: fpga-bridge@ff600000 {
-               compatible = "altr,socfpga-fpga2hps-bridge";
-               reg = <0xff600000 0x100000>;
-               resets = <&rst FPGA2HPS_RESET>;
-               clocks = <&l4_main_clk>;
-       };
diff --git a/Documentation/devicetree/bindings/fpga/altr,freeze-bridge-controller.yaml b/Documentation/devicetree/bindings/fpga/altr,freeze-bridge-controller.yaml
new file mode 100644 (file)
index 0000000..fccffee
--- /dev/null
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/fpga/altr,freeze-bridge-controller.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Altera Freeze Bridge Controller
+
+description:
+  The Altera Freeze Bridge Controller manages one or more freeze bridges.
+  The controller can freeze/disable the bridges which prevents signal
+  changes from passing through the bridge. The controller can also
+  unfreeze/enable the bridges which allows traffic to pass through the bridge
+  normally.
+
+maintainers:
+  - Xu Yilun <yilun.xu@intel.com>
+
+allOf:
+  - $ref: fpga-bridge.yaml#
+
+properties:
+  compatible:
+    const: altr,freeze-bridge-controller
+
+  reg:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    fpga-bridge@100000450 {
+        compatible = "altr,freeze-bridge-controller";
+        reg = <0x1000 0x10>;
+        bridge-enable = <0>;
+    };
diff --git a/Documentation/devicetree/bindings/fpga/altr,socfpga-fpga2sdram-bridge.yaml b/Documentation/devicetree/bindings/fpga/altr,socfpga-fpga2sdram-bridge.yaml
new file mode 100644 (file)
index 0000000..22b5845
--- /dev/null
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/fpga/altr,socfpga-fpga2sdram-bridge.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Altera FPGA To SDRAM Bridge
+
+maintainers:
+  - Xu Yilun <yilun.xu@intel.com>
+
+allOf:
+  - $ref: fpga-bridge.yaml#
+
+properties:
+  compatible:
+    const: altr,socfpga-fpga2sdram-bridge
+
+  reg:
+    maxItems: 1
+
+required:
+  - compatible
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    fpga-bridge@ffc25080 {
+        compatible = "altr,socfpga-fpga2sdram-bridge";
+        reg = <0xffc25080 0x4>;
+        bridge-enable = <0>;
+    };
diff --git a/Documentation/devicetree/bindings/fpga/altr,socfpga-hps2fpga-bridge.yaml b/Documentation/devicetree/bindings/fpga/altr,socfpga-hps2fpga-bridge.yaml
new file mode 100644 (file)
index 0000000..d19c666
--- /dev/null
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/fpga/altr,socfpga-hps2fpga-bridge.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Altera FPGA/HPS Bridge
+
+maintainers:
+  - Xu Yilun <yilun.xu@intel.com>
+
+allOf:
+  - $ref: fpga-bridge.yaml#
+
+properties:
+  compatible:
+    enum:
+      - altr,socfpga-lwhps2fpga-bridge
+      - altr,socfpga-hps2fpga-bridge
+      - altr,socfpga-fpga2hps-bridge
+
+  reg:
+    maxItems: 1
+
+  resets:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - resets
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/reset/altr,rst-mgr.h>
+
+    fpga-bridge@ff400000 {
+      compatible = "altr,socfpga-lwhps2fpga-bridge";
+      reg = <0xff400000 0x100000>;
+      bridge-enable = <0>;
+      clocks = <&l4_main_clk>;
+      resets = <&rst LWHPS2FPGA_RESET>;
+    };
diff --git a/Documentation/devicetree/bindings/fpga/fpga-bridge.txt b/Documentation/devicetree/bindings/fpga/fpga-bridge.txt
deleted file mode 100644 (file)
index 72e0691..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-FPGA Bridge Device Tree Binding
-
-Optional properties:
-- bridge-enable                : 0 if driver should disable bridge at startup
-                         1 if driver should enable bridge at startup
-                         Default is to leave bridge in current state.
-
-Example:
-       fpga_bridge3: fpga-bridge@ffc25080 {
-               compatible = "altr,socfpga-fpga2sdram-bridge";
-               reg = <0xffc25080 0x4>;
-               bridge-enable = <0>;
-       };
diff --git a/Documentation/devicetree/bindings/fpga/fpga-bridge.yaml b/Documentation/devicetree/bindings/fpga/fpga-bridge.yaml
new file mode 100644 (file)
index 0000000..1ccb2aa
--- /dev/null
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/fpga/fpga-bridge.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: FPGA Bridge
+
+maintainers:
+  - Michal Simek <michal.simek@amd.com>
+
+properties:
+  $nodename:
+    pattern: "^fpga-bridge(@.*|-([0-9]|[1-9][0-9]+))?$"
+
+  bridge-enable:
+    description: |
+      0 if driver should disable bridge at startup
+      1 if driver should enable bridge at startup
+      Default is to leave bridge in current state.
+    $ref: /schemas/types.yaml#/definitions/uint32
+    enum: [ 0, 1 ]
+
+additionalProperties: true
+
+examples:
+  - |
+    fpga-bridge {
+        bridge-enable = <0>;
+    };
index a7d4b8e59e1930829859279531ae53d8cf4dc8c0..5bf731f9d99a35f3b307f58497640e9c3f4f88f3 100644 (file)
@@ -9,6 +9,9 @@ title: Xilinx LogiCORE Partial Reconfig Decoupler/AXI shutdown manager Softcore
 maintainers:
   - Nava kishore Manne <nava.kishore.manne@amd.com>
 
+allOf:
+  - $ref: fpga-bridge.yaml#
+
 description: |
   The Xilinx LogiCORE Partial Reconfig(PR) Decoupler manages one or more
   decouplers/fpga bridges. The controller can decouple/disable the bridges
@@ -51,7 +54,7 @@ required:
   - clocks
   - clock-names
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index c1060e5fcef3a95c4bf2ef55e897c6c09b790d03..d3d8a2e143ed25dee5634ae9539c413c4f51f865 100644 (file)
@@ -126,7 +126,7 @@ examples:
   - |
     #include <dt-bindings/interrupt-controller/arm-gic.h>
 
-        gpio@e000a000 {
+        gpio@a0020000 {
             compatible = "xlnx,xps-gpio-1.00.a";
             reg = <0xa0020000 0x10000>;
             #gpio-cells = <2>;
index e7daae86257890cae6d970cf09f19fdd98fbfa4b..132aaa49597b6b62e8cf72a0c73426edc4f63cf7 100644 (file)
@@ -22,36 +22,20 @@ properties:
   interrupts:
     maxItems: 1
 
-  clocks: {}
-  clock-names: {}
-  iommus: {}
-  power-domains: {}
-
-if:
-  properties:
-    compatible:
-      contains:
-        const: samsung,exynos5250-g2d
-
-then:
-  properties:
-    clocks:
-      items:
-        - description: fimg2d clock
-    clock-names:
-      items:
-        - const: fimg2d
-
-else:
-  properties:
-    clocks:
-      items:
-        - description: sclk_fimg2d clock
-        - description: fimg2d clock
-    clock-names:
-      items:
-        - const: sclk_fimg2d
-        - const: fimg2d
+  clocks:
+    minItems: 1
+    maxItems: 2
+
+  clock-names:
+    minItems: 1
+    maxItems: 2
+
+  iommus:
+    minItems: 1
+    maxItems: 2
+
+  power-domains:
+    maxItems: 1
 
 required:
   - compatible
@@ -60,6 +44,33 @@ required:
   - clocks
   - clock-names
 
+allOf:
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: samsung,exynos5250-g2d
+
+    then:
+      properties:
+        clocks:
+          items:
+            - description: fimg2d clock
+        clock-names:
+          items:
+            - const: fimg2d
+
+    else:
+      properties:
+        clocks:
+          items:
+            - description: sclk_fimg2d clock
+            - description: fimg2d clock
+        clock-names:
+          items:
+            - const: sclk_fimg2d
+            - const: fimg2d
+
 additionalProperties: false
 
 examples:
index d60626ffb28e228415953f931ca01941b1dd39ab..18bf44e06e8f304ec9534475391d8d4e97830cf2 100644 (file)
@@ -12,10 +12,11 @@ maintainers:
 properties:
   compatible:
     enum:
-      - "samsung,s5pv210-rotator"
-      - "samsung,exynos4210-rotator"
-      - "samsung,exynos4212-rotator"
-      - "samsung,exynos5250-rotator"
+      - samsung,s5pv210-rotator
+      - samsung,exynos4210-rotator
+      - samsung,exynos4212-rotator
+      - samsung,exynos5250-rotator
+
   reg:
     maxItems: 1
 
index 5317ac64426af7b976e4db4cd7a2c0771980fd7a..9fb530e65d0e8bb2993dd0bdfb5e9e4afeb80035 100644 (file)
@@ -21,40 +21,20 @@ properties:
   interrupts:
     maxItems: 1
 
-  clocks: {}
-  clock-names: {}
-  iommus: {}
-  power-domains: {}
-
-if:
-  properties:
-    compatible:
-      contains:
-        const: samsung,exynos5420-scaler
-
-then:
-  properties:
-    clocks:
-      items:
-        - description: mscl clock
-
-    clock-names:
-      items:
-        - const: mscl
-
-else:
-  properties:
-    clocks:
-      items:
-        - description: pclk clock
-        - description: aclk clock
-        - description: aclk_xiu clock
-
-    clock-names:
-      items:
-        - const: pclk
-        - const: aclk
-        - const: aclk_xiu
+  clocks:
+    minItems: 1
+    maxItems: 3
+
+  clock-names:
+    minItems: 1
+    maxItems: 3
+
+  iommus:
+    minItems: 1
+    maxItems: 2
+
+  power-domains:
+    maxItems: 1
 
 required:
   - compatible
@@ -63,6 +43,39 @@ required:
   - clocks
   - clock-names
 
+allOf:
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: samsung,exynos5420-scaler
+
+    then:
+      properties:
+        clocks:
+          items:
+            - description: mscl clock
+        clock-names:
+          items:
+            - const: mscl
+        iommus:
+          minItems: 2
+
+    else:
+      properties:
+        clocks:
+          items:
+            - description: pclk clock
+            - description: aclk clock
+            - description: aclk_xiu clock
+        clock-names:
+          items:
+            - const: pclk
+            - const: aclk
+            - const: aclk_xiu
+        iommus:
+          maxItems: 1
+
 additionalProperties: false
 
 examples:
index 94b75d9f66cdb7b1e2227ae5dde9475449fed07d..1b31b87c1800a00d8935d261432117ea5d601191 100644 (file)
@@ -19,6 +19,7 @@ allOf:
               - st,stm32f7-i2c
               - st,stm32mp13-i2c
               - st,stm32mp15-i2c
+              - st,stm32mp25-i2c
     then:
       properties:
         i2c-scl-rising-time-ns:
@@ -41,6 +42,30 @@ allOf:
         clock-frequency:
           enum: [100000, 400000]
 
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - st,stm32f4-i2c
+              - st,stm32f7-i2c
+              - st,stm32mp13-i2c
+              - st,stm32mp15-i2c
+    then:
+      properties:
+        interrupts:
+          minItems: 2
+
+        interrupt-names:
+          minItems: 2
+    else:
+      properties:
+        interrupts:
+          maxItems: 1
+
+        interrupt-names:
+          maxItems: 1
+
 properties:
   compatible:
     enum:
@@ -48,6 +73,7 @@ properties:
       - st,stm32f7-i2c
       - st,stm32mp13-i2c
       - st,stm32mp15-i2c
+      - st,stm32mp25-i2c
 
   reg:
     maxItems: 1
@@ -56,11 +82,13 @@ properties:
     items:
       - description: interrupt ID for I2C event
       - description: interrupt ID for I2C error
+    minItems: 1
 
   interrupt-names:
     items:
       - const: event
       - const: error
+    minItems: 1
 
   resets:
     maxItems: 1
index ce7ba634643c6939cb6abb6a29b0ee6a789feabf..ddec9747436c2954b1dc87bd90d4d84fea8268d3 100644 (file)
@@ -4,36 +4,92 @@
 $id: http://devicetree.org/schemas/iio/adc/adi,ad7091r5.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
-title: Analog Devices AD7091R5 4-Channel 12-Bit ADC
+title: Analog Devices AD7091R-2/-4/-5/-8 Multi-Channel 12-Bit ADCs
 
 maintainers:
   - Michael Hennerich <michael.hennerich@analog.com>
+  - Marcelo Schmitt <marcelo.schmitt@analog.com>
 
 description: |
-  Analog Devices AD7091R5 4-Channel 12-Bit ADC
+  Analog Devices AD7091R5 4-Channel 12-Bit ADC supporting I2C interface
   https://www.analog.com/media/en/technical-documentation/data-sheets/ad7091r-5.pdf
+  Analog Devices AD7091R-2/AD7091R-4/AD7091R-8 2-/4-/8-Channel 12-Bit ADCs
+  supporting SPI interface
+  https://www.analog.com/media/en/technical-documentation/data-sheets/AD7091R-2_7091R-4_7091R-8.pdf
 
 properties:
   compatible:
     enum:
+      - adi,ad7091r2
+      - adi,ad7091r4
       - adi,ad7091r5
+      - adi,ad7091r8
 
   reg:
     maxItems: 1
 
+  vdd-supply:
+    description:
+      Provide VDD power to the sensor (VDD range is from 2.7V to 5.25V).
+
+  vdrive-supply:
+    description:
+      Determines the voltage level at which the interface logic will operate.
+      The V_drive voltage range is from 1.8V to 5.25V and must not exceed VDD by
+      more than 0.3V.
+
   vref-supply:
     description:
       Phandle to the vref power supply
 
-  interrupts:
+  convst-gpios:
+    description:
+      GPIO connected to the CONVST pin.
+      This logic input is used to initiate conversions on the analog
+      input channels.
     maxItems: 1
 
+  reset-gpios:
+    maxItems: 1
+
+  interrupts:
+    description:
+      Interrupt for signaling when conversion results exceed the high limit for
+      ADC readings or fall below the low limit for them. Interrupt source must
+      be attached to ALERT/BUSY/GPO0 pin.
+    maxItems: 1
 
 required:
   - compatible
   - reg
 
-additionalProperties: false
+allOf:
+  - $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+  # AD7091R-2 does not have ALERT/BUSY/GPO pin
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - adi,ad7091r2
+    then:
+      properties:
+        interrupts: false
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - adi,ad7091r2
+              - adi,ad7091r4
+              - adi,ad7091r8
+    then:
+      required:
+        - convst-gpios
+
+unevaluatedProperties: false
 
 examples:
   - |
@@ -51,4 +107,22 @@ examples:
             interrupt-parent = <&gpio>;
         };
     };
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+    #include <dt-bindings/interrupt-controller/irq.h>
+    spi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        adc@0 {
+            compatible = "adi,ad7091r8";
+            reg = <0x0>;
+            spi-max-frequency = <1000000>;
+            vref-supply = <&adc_vref>;
+            convst-gpios = <&gpio 25 GPIO_ACTIVE_LOW>;
+            reset-gpios = <&gpio 27 GPIO_ACTIVE_LOW>;
+            interrupts = <22 IRQ_TYPE_EDGE_FALLING>;
+            interrupt-parent = <&gpio>;
+        };
+    };
 ...
index 5fcc8dd012f17c0780c701a33d0d5d675875004b..be2616ff9af68574444db483373c306d3ce38016 100644 (file)
@@ -80,9 +80,9 @@ examples:
             compatible = "adi,ad7780";
             reg = <0>;
 
-            avdd-supply      = <&vdd_supply>;
-            powerdown-gpios  = <&gpio0 12 GPIO_ACTIVE_HIGH>;
-            adi,gain-gpios   = <&gpio1  5 GPIO_ACTIVE_LOW>;
+            avdd-supply = <&vdd_supply>;
+            powerdown-gpios = <&gpio0 12 GPIO_ACTIVE_HIGH>;
+            adi,gain-gpios = <&gpio1  5 GPIO_ACTIVE_LOW>;
             adi,filter-gpios = <&gpio2 15 GPIO_ACTIVE_LOW>;
         };
     };
diff --git a/Documentation/devicetree/bindings/iio/adc/maxim,max34408.yaml b/Documentation/devicetree/bindings/iio/adc/maxim,max34408.yaml
new file mode 100644 (file)
index 0000000..4cba856
--- /dev/null
@@ -0,0 +1,139 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/adc/maxim,max34408.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Maxim MAX34408/MAX34409 current monitors with overcurrent control
+
+maintainers:
+  - Ivan Mikhaylov <fr0st61te@gmail.com>
+
+description: |
+  The MAX34408/MAX34409 are two- and four-channel current monitors that are
+  configured and monitored with a standard I2C/SMBus serial interface. Each
+  unidirectional current sensor offers precision high-side operation with a
+  low full-scale sense voltage. The devices automatically sequence through
+  two or four channels and collect the current-sense samples and average them
+  to reduce the effect of impulse noise. The raw ADC samples are compared to
+  user-programmable digital thresholds to indicate overcurrent conditions.
+  Overcurrent conditions trigger a hardware output to provide an immediate
+  indication to shut down any necessary external circuitry.
+
+  Specifications about the devices can be found at:
+  https://www.analog.com/media/en/technical-documentation/data-sheets/MAX34408-MAX34409.pdf
+
+properties:
+  compatible:
+    enum:
+      - maxim,max34408
+      - maxim,max34409
+
+  "#address-cells":
+    const: 1
+
+  "#size-cells":
+    const: 0
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  powerdown-gpios:
+    description:
+      Shutdown Output. Open-drain output. This output transitions to high impedance
+      when any of the digital comparator thresholds are exceeded as long as the ENA
+      pin is high.
+    maxItems: 1
+
+  powerdown-status-gpios:
+    description:
+      SHTDN Enable Input. CMOS digital input. Connect to GND to clear the latch and
+      unconditionally deassert (force low) the SHTDN output and reset the shutdown
+      delay. Connect to VDD to enable normal latch operation of the SHTDN output.
+    maxItems: 1
+
+  vdd-supply: true
+
+patternProperties:
+  "^channel@[0-3]$":
+    $ref: adc.yaml
+    type: object
+    description:
+      Represents the internal channels of the ADC.
+
+    properties:
+      reg:
+        items:
+          - minimum: 0
+            maximum: 3
+
+      maxim,rsense-val-micro-ohms:
+        description:
+          Adjust the Rsense value to monitor higher or lower current levels for
+          input.
+        enum: [250, 500, 1000, 5000, 10000, 50000, 100000, 200000, 500000]
+        default: 1000
+
+    required:
+      - reg
+      - maxim,rsense-val-micro-ohms
+
+    unevaluatedProperties: false
+
+required:
+  - compatible
+  - reg
+
+allOf:
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: maxim,max34408
+    then:
+      patternProperties:
+        "^channel@[2-3]$": false
+        "^channel@[0-1]$":
+          properties:
+            reg:
+              maximum: 1
+    else:
+      patternProperties:
+        "^channel@[0-3]$":
+          properties:
+            reg:
+              maximum: 3
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    i2c {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        adc@1e {
+              compatible = "maxim,max34409";
+              reg = <0x1e>;
+              powerdown-gpios = <&gpio0 1 GPIO_ACTIVE_LOW>;
+              powerdown-status-gpios = <&gpio0 2 GPIO_ACTIVE_HIGH>;
+
+              #address-cells = <1>;
+              #size-cells = <0>;
+
+              channel@0 {
+                  reg = <0x0>;
+                  maxim,rsense-val-micro-ohms = <5000>;
+              };
+
+              channel@1 {
+                  reg = <0x1>;
+                  maxim,rsense-val-micro-ohms = <10000>;
+             };
+        };
+    };
index 73def67fbe015b05b5e45ab3afae07aa0d1b05d3..5ed893ef5c189ddfe9f70d0c2a188ad4c47f9c1c 100644 (file)
@@ -25,7 +25,7 @@ properties:
       - const: qcom,spmi-iadc
 
   reg:
-    description: IADC base address and length in the SPMI PMIC register map
+    description: IADC base address in the SPMI PMIC register map
     maxItems: 1
 
   qcom,external-resistor-micro-ohms:
@@ -50,15 +50,17 @@ additionalProperties: false
 examples:
   - |
     #include <dt-bindings/interrupt-controller/irq.h>
-    spmi {
+
+    pmic {
         #address-cells = <1>;
         #size-cells = <0>;
-        pmic_iadc: adc@3600 {
+
+        adc@3600 {
             compatible = "qcom,pm8941-iadc", "qcom,spmi-iadc";
             reg = <0x3600>;
             interrupts = <0x0 0x36 0x0 IRQ_TYPE_EDGE_RISING>;
             qcom,external-resistor-micro-ohms = <10000>;
-            #io-channel-cells  = <1>;
+            #io-channel-cells = <1>;
         };
     };
 ...
index b3a626389870f19d6a2a6aea792c604019c48c1b..f39bc92c2b99bb368326b4a7bb4c95f8f8b102be 100644 (file)
@@ -43,9 +43,9 @@ examples:
         #address-cells = <1>;
         #size-cells = <0>;
 
-        pmic_rradc: adc@4500 {
+        adc@4500 {
             compatible = "qcom,pmi8998-rradc";
             reg = <0x4500>;
-            #io-channel-cells  = <1>;
+            #io-channel-cells = <1>;
         };
     };
index ad7d6fc49de58ea1ed6ed9f55ba11d97ae6c721b..40fa0710f1f0f8f4c9e501e56055f1c33a3b0aed 100644 (file)
@@ -236,11 +236,11 @@ additionalProperties: false
 
 examples:
   - |
-    spmi {
+    pmic {
         #address-cells = <1>;
         #size-cells = <0>;
-        /* VADC node */
-        pmic_vadc: adc@3100 {
+
+        adc@3100 {
             compatible = "qcom,spmi-vadc";
             reg = <0x3100>;
             interrupts = <0x0 0x31 0x0 0x1>;
@@ -281,9 +281,10 @@ examples:
     #include <dt-bindings/iio/qcom,spmi-adc7-pm8350.h>
     #include <dt-bindings/interrupt-controller/irq.h>
 
-    spmi {
+    pmic {
         #address-cells = <1>;
         #size-cells = <0>;
+
         adc@3100 {
             reg = <0x3100>;
             compatible = "qcom,spmi-adc7";
index 720c16a108d4e2034e4e8a4ec182e4a1ab3f60b0..f94057d8f60586934799735c0de01b6211d85dab 100644 (file)
@@ -67,19 +67,4 @@ required:
   - compatible
   - "#io-channel-cells"
 
-examples:
-  - |
-    #include <dt-bindings/clock/mt8183-clk.h>
-    pmic {
-        compatible = "ti,twl6035-pmic", "ti,palmas-pmic";
-        adc {
-            compatible = "ti,palmas-gpadc";
-            interrupts = <18 0>,
-                         <16 0>,
-                         <17 0>;
-            #io-channel-cells = <1>;
-            ti,channel0-current-microamp = <5>;
-            ti,channel3-current-microamp = <10>;
-        };
-    };
 ...
index 2ee6080deac7c6c09a09299adf42298b82b85719..67de9d4e3a1df6ca9bf90773db3c82d50a67a302 100644 (file)
@@ -12,6 +12,9 @@ maintainers:
 description: |
   Digital Step Attenuator IIO devices with gpio interface.
   Offer various frequency and attenuation ranges.
+  ADRF5750 2 dB LSB, 4-Bit, Silicon Digital Attenuator, 10 MHz to 60 GHz
+    https://www.analog.com/media/en/technical-documentation/data-sheets/adrf5740.pdf
+
   HMC425A 0.5 dB LSB GaAs MMIC 6-BIT DIGITAL POSITIVE CONTROL ATTENUATOR, 2.2 - 8.0 GHz
     https://www.analog.com/media/en/technical-documentation/data-sheets/hmc425A.pdf
 
@@ -22,6 +25,7 @@ description: |
 properties:
   compatible:
     enum:
+      - adi,adrf5740
       - adi,hmc425a
       - adi,hmc540s
 
diff --git a/Documentation/devicetree/bindings/iio/chemical/aosong,ags02ma.yaml b/Documentation/devicetree/bindings/iio/chemical/aosong,ags02ma.yaml
new file mode 100644 (file)
index 0000000..35e7b09
--- /dev/null
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/chemical/aosong,ags02ma.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Aosong AGS02MA VOC Sensor
+
+description: |
+  AGS02MA is an TVOC (Total Volatile Organic Compounds) i2c sensor with default
+  address of 0x1a.
+
+  Datasheet:
+    https://asairsensors.com/wp-content/uploads/2021/09/AGS02MA.pdf
+
+maintainers:
+  - Anshul Dalal <anshulusr@gmail.com>
+
+properties:
+  compatible:
+    enum:
+      - aosong,ags02ma
+
+  reg:
+    maxItems: 1
+
+  vdd-supply: true
+
+required:
+  - compatible
+  - reg
+  - vdd-supply
+
+additionalProperties: false
+
+examples:
+  - |
+    i2c {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        voc-sensor@1a {
+            compatible = "aosong,ags02ma";
+            reg = <0x1a>;
+            vdd-supply = <&vdd_regulator>;
+        };
+    };
index 3a84739736f620cf62a399e21bc50b7d0c03bf69..c81285d84db7a206d0633995072053ea208c0f55 100644 (file)
@@ -26,6 +26,11 @@ properties:
   vdd-supply: true
   vss-supply: true
 
+  adi,rbuf-gain2-en:
+    description: Specify to allow an external amplifier to be connected in a
+      gain of two configuration.
+    type: boolean
+
 required:
   - compatible
   - reg
diff --git a/Documentation/devicetree/bindings/iio/dac/microchip,mcp4821.yaml b/Documentation/devicetree/bindings/iio/dac/microchip,mcp4821.yaml
new file mode 100644 (file)
index 0000000..0dc577c
--- /dev/null
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/dac/microchip,mcp4821.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microchip MCP4821 and similar DACs
+
+description: |
+  Supports MCP48x1 (single channel) and MCP48x2 (dual channel) series of DACs.
+  Device supports simplex communication over SPI in Mode 0 and Mode 3.
+
+  +---------+--------------+-------------+
+  | Device  |  Resolution  |   Channels  |
+  |---------|--------------|-------------|
+  | MCP4801 |     8-bit    |      1      |
+  | MCP4802 |     8-bit    |      2      |
+  | MCP4811 |    10-bit    |      1      |
+  | MCP4812 |    10-bit    |      2      |
+  | MCP4821 |    12-bit    |      1      |
+  | MCP4822 |    12-bit    |      2      |
+  +---------+--------------+-------------+
+
+  Datasheet:
+    MCP48x1: https://ww1.microchip.com/downloads/en/DeviceDoc/22244B.pdf
+    MCP48x2: https://ww1.microchip.com/downloads/en/DeviceDoc/20002249B.pdf
+
+maintainers:
+  - Anshul Dalal <anshulusr@gmail.com>
+
+allOf:
+  - $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+properties:
+  compatible:
+    enum:
+      - microchip,mcp4801
+      - microchip,mcp4802
+      - microchip,mcp4811
+      - microchip,mcp4812
+      - microchip,mcp4821
+      - microchip,mcp4822
+
+  reg:
+    maxItems: 1
+
+  vdd-supply: true
+
+  ldac-gpios:
+    description: |
+      Active Low LDAC (Latch DAC Input) pin used to update the DAC output.
+    maxItems: 1
+
+  powerdown-gpios:
+    description: |
+      Active Low SHDN pin used to enter the shutdown mode.
+    maxItems: 1
+
+  spi-cpha: true
+  spi-cpol: true
+
+required:
+  - compatible
+  - reg
+  - vdd-supply
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    spi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        dac@0 {
+            compatible = "microchip,mcp4821";
+            reg = <0>;
+            vdd-supply = <&vdd_regulator>;
+            ldac-gpios = <&gpio0 1 GPIO_ACTIVE_HIGH>;
+            powerdown-gpios = <&gpio0 2 GPIO_ACTIVE_HIGH>;
+            spi-cpha;
+            spi-cpol;
+        };
+    };
diff --git a/Documentation/devicetree/bindings/iio/humidity/ti,hdc3020.yaml b/Documentation/devicetree/bindings/iio/humidity/ti,hdc3020.yaml
new file mode 100644 (file)
index 0000000..7f6d0f9
--- /dev/null
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/humidity/ti,hdc3020.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: HDC3020/HDC3021/HDC3022 humidity and temperature iio sensors
+
+maintainers:
+  - Li peiyu <579lpy@gmail.com>
+  - Javier Carrasco <javier.carrasco.cruz@gmail.com>
+
+description:
+  https://www.ti.com/lit/ds/symlink/hdc3020.pdf
+
+  The HDC302x is an integrated capacitive based relative humidity (RH)
+  and temperature sensor.
+
+properties:
+  compatible:
+    oneOf:
+      - items:
+          - enum:
+              - ti,hdc3021
+              - ti,hdc3022
+          - const: ti,hdc3020
+      - const: ti,hdc3020
+
+  interrupts:
+    maxItems: 1
+
+  vdd-supply: true
+
+  reg:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - vdd-supply
+
+additionalProperties: false
+
+examples:
+  - |
+    i2c {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        humidity-sensor@47 {
+            compatible = "ti,hdc3021", "ti,hdc3020";
+            reg = <0x47>;
+            vdd-supply = <&vcc_3v3>;
+        };
+    };
index 4e43c80e5119f021883bc374e9a6a3c53875113c..4cacc9948726f0b66f9d4c4e9691015e9fada808 100644 (file)
@@ -25,6 +25,10 @@ properties:
 
   spi-cpol: true
 
+  spi-cs-inactive-delay-ns:
+    minimum: 16000
+    default: 16000
+
   interrupts:
     maxItems: 1
 
index c73533c54588b17275282bec02a6ed36486a9686..9b7ad609f7dbe13ce6430bf859e955bc390c076d 100644 (file)
@@ -47,6 +47,10 @@ properties:
   spi-max-frequency:
     maximum: 2000000
 
+  spi-cs-inactive-delay-ns:
+    minimum: 16000
+    default: 16000
+
   interrupts:
     maxItems: 1
 
diff --git a/Documentation/devicetree/bindings/iio/imu/bosch,bmi323.yaml b/Documentation/devicetree/bindings/iio/imu/bosch,bmi323.yaml
new file mode 100644 (file)
index 0000000..64ef26e
--- /dev/null
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/imu/bosch,bmi323.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Bosch BMI323 6-Axis IMU
+
+maintainers:
+  - Jagath Jog J <jagathjog1996@gmail.com>
+
+description:
+  BMI323 is a 6-axis inertial measurement unit that supports acceleration and
+  gyroscopic measurements with hardware fifo buffering. Sensor also provides
+  events information such as motion, steps, orientation, single and double
+  tap detection.
+
+properties:
+  compatible:
+    const: bosch,bmi323
+
+  reg:
+    maxItems: 1
+
+  vdd-supply: true
+  vddio-supply: true
+
+  interrupts:
+    minItems: 1
+    maxItems: 2
+
+  interrupt-names:
+    minItems: 1
+    maxItems: 2
+    items:
+      enum:
+        - INT1
+        - INT2
+
+  drive-open-drain:
+    description:
+      set if the specified interrupt pin should be configured as
+      open drain. If not set, defaults to push-pull.
+
+  mount-matrix:
+    description:
+      an optional 3x3 mounting rotation matrix.
+
+required:
+  - compatible
+  - reg
+  - vdd-supply
+  - vddio-supply
+
+allOf:
+  - $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    // Example for I2C
+    #include <dt-bindings/interrupt-controller/irq.h>
+    i2c {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        imu@68 {
+            compatible = "bosch,bmi323";
+            reg = <0x68>;
+            vddio-supply = <&vddio>;
+            vdd-supply = <&vdd>;
+            interrupt-parent = <&gpio1>;
+            interrupts = <29 IRQ_TYPE_EDGE_RISING>;
+            interrupt-names = "INT1";
+        };
+    };
diff --git a/Documentation/devicetree/bindings/iio/light/liteon,ltr390.yaml b/Documentation/devicetree/bindings/iio/light/liteon,ltr390.yaml
new file mode 100644 (file)
index 0000000..5d98ef2
--- /dev/null
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/light/liteon,ltr390.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Lite-On LTR390 ALS and UV Sensor
+
+description: |
+  The Lite-On LTR390 is an ALS (Ambient Light Sensor) and a UV sensor in a
+  single package with i2c address of 0x53.
+
+  Datasheet:
+    https://optoelectronics.liteon.com/upload/download/DS86-2015-0004/LTR-390UV_Final_%20DS_V1%201.pdf
+
+maintainers:
+  - Anshul Dalal <anshulusr@gmail.com>
+
+properties:
+  compatible:
+    enum:
+      - liteon,ltr390
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+    description: |
+      Level interrupt pin with open drain output.
+      The sensor pulls this pin low when the measured reading is greater than
+      some configured threshold.
+
+  vdd-supply: true
+
+required:
+  - compatible
+  - reg
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/irq.h>
+
+    i2c {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        light-sensor@53 {
+            compatible = "liteon,ltr390";
+            reg = <0x53>;
+            interrupts = <18 IRQ_TYPE_EDGE_FALLING>;
+            vdd-supply = <&vdd_regulator>;
+        };
+    };
diff --git a/Documentation/devicetree/bindings/iio/light/vishay,veml6075.yaml b/Documentation/devicetree/bindings/iio/light/vishay,veml6075.yaml
new file mode 100644 (file)
index 0000000..abee04c
--- /dev/null
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/light/vishay,veml6075.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Vishay VEML6075 UVA and UVB sensor
+
+maintainers:
+  - Javier Carrasco <javier.carrasco.cruz@gmail.com>
+
+properties:
+  compatible:
+    const: vishay,veml6075
+
+  reg:
+    maxItems: 1
+
+  vdd-supply: true
+
+required:
+  - compatible
+  - reg
+
+additionalProperties: false
+
+examples:
+  - |
+    i2c {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        uv-sensor@10 {
+            compatible = "vishay,veml6075";
+            reg = <0x10>;
+            vdd-supply = <&vdd_reg>;
+        };
+    };
+...
diff --git a/Documentation/devicetree/bindings/iio/pressure/honeywell,hsc030pa.yaml b/Documentation/devicetree/bindings/iio/pressure/honeywell,hsc030pa.yaml
new file mode 100644 (file)
index 0000000..65a24ed
--- /dev/null
@@ -0,0 +1,142 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/pressure/honeywell,hsc030pa.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Honeywell TruStability HSC and SSC pressure sensor series
+
+description: |
+  support for Honeywell TruStability HSC and SSC digital pressure sensor
+  series.
+
+  These sensors have either an I2C, an SPI or an analog interface. Only the
+  digital versions are supported by this driver.
+
+  There are 118 models with different pressure ranges available in each family.
+  The vendor calls them "HSC series" and "SSC series". All of them have an
+  identical programming model but differ in pressure range, unit and transfer
+  function.
+
+  To support different models one needs to specify the pressure range as well
+  as the transfer function. Pressure range can either be provided via
+  pressure-triplet (directly extracted from the part number) or in case it's
+  a custom chip via numerical range limits converted to pascals.
+
+  The transfer function defines the ranges of raw conversion values delivered
+  by the sensor. pmin-pascal and pmax-pascal corespond to the minimum and
+  maximum pressure that can be measured.
+
+  Please note that in case of an SPI-based sensor, the clock signal should not
+  exceed 800kHz and the MOSI signal is not required.
+
+  Specifications about the devices can be found at:
+  https://prod-edam.honeywell.com/content/dam/honeywell-edam/sps/siot/en-us/products/sensors/pressure-sensors/board-mount-pressure-sensors/trustability-hsc-series/documents/sps-siot-trustability-hsc-series-high-accuracy-board-mount-pressure-sensors-50099148-a-en-ciid-151133.pdf
+  https://prod-edam.honeywell.com/content/dam/honeywell-edam/sps/siot/en-us/products/sensors/pressure-sensors/board-mount-pressure-sensors/trustability-ssc-series/documents/sps-siot-trustability-ssc-series-standard-accuracy-board-mount-pressure-sensors-50099533-a-en-ciid-151134.pdf
+
+maintainers:
+  - Petre Rodan <petre.rodan@subdimension.ro>
+
+properties:
+  compatible:
+    const: honeywell,hsc030pa
+
+  reg:
+    maxItems: 1
+
+  honeywell,transfer-function:
+    description: |
+      Transfer function which defines the range of valid values delivered by
+      the sensor.
+      0 - A, 10% to 90% of 2^14
+      1 - B, 5% to 95% of 2^14
+      2 - C, 5% to 85% of 2^14
+      3 - F, 4% to 94% of 2^14
+    enum: [0, 1, 2, 3]
+    $ref: /schemas/types.yaml#/definitions/uint32
+
+  honeywell,pressure-triplet:
+    description: |
+      Case-sensitive five character string that defines pressure range, unit
+      and type as part of the device nomenclature. In the unlikely case of a
+      custom chip, set to "NA" and provide pmin-pascal and pmax-pascal.
+    enum: [001BA, 1.6BA, 2.5BA, 004BA, 006BA, 010BA, 1.6MD, 2.5MD, 004MD,
+           006MD, 010MD, 016MD, 025MD, 040MD, 060MD, 100MD, 160MD, 250MD,
+           400MD, 600MD, 001BD, 1.6BD, 2.5BD, 004BD, 2.5MG, 004MG, 006MG,
+           010MG, 016MG, 025MG, 040MG, 060MG, 100MG, 160MG, 250MG, 400MG,
+           600MG, 001BG, 1.6BG, 2.5BG, 004BG, 006BG, 010BG, 100KA, 160KA,
+           250KA, 400KA, 600KA, 001GA, 160LD, 250LD, 400LD, 600LD, 001KD,
+           1.6KD, 2.5KD, 004KD, 006KD, 010KD, 016KD, 025KD, 040KD, 060KD,
+           100KD, 160KD, 250KD, 400KD, 250LG, 400LG, 600LG, 001KG, 1.6KG,
+           2.5KG, 004KG, 006KG, 010KG, 016KG, 025KG, 040KG, 060KG, 100KG,
+           160KG, 250KG, 400KG, 600KG, 001GG, 015PA, 030PA, 060PA, 100PA,
+           150PA, 0.5ND, 001ND, 002ND, 004ND, 005ND, 010ND, 020ND, 030ND,
+           001PD, 005PD, 015PD, 030PD, 060PD, 001NG, 002NG, 004NG, 005NG,
+           010NG, 020NG, 030NG, 001PG, 005PG, 015PG, 030PG, 060PG, 100PG,
+           150PG, NA]
+    $ref: /schemas/types.yaml#/definitions/string
+
+  honeywell,pmin-pascal:
+    description: |
+      Minimum pressure value the sensor can measure in pascal.
+      To be specified only if honeywell,pressure-triplet is set to "NA".
+
+  honeywell,pmax-pascal:
+    description: |
+      Maximum pressure value the sensor can measure in pascal.
+      To be specified only if honeywell,pressure-triplet is set to "NA".
+
+  vdd-supply:
+    description:
+      Provide VDD power to the sensor (either 3.3V or 5V depending on the chip)
+
+  spi-max-frequency:
+    maximum: 800000
+
+required:
+  - compatible
+  - reg
+  - honeywell,transfer-function
+  - honeywell,pressure-triplet
+
+additionalProperties: false
+
+dependentSchemas:
+  honeywell,pmin-pascal:
+    properties:
+      honeywell,pressure-triplet:
+        const: NA
+  honeywell,pmax-pascal:
+    properties:
+      honeywell,pressure-triplet:
+        const: NA
+
+examples:
+  - |
+    i2c {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        pressure@28 {
+            compatible = "honeywell,hsc030pa";
+            reg = <0x28>;
+            honeywell,transfer-function = <0>;
+            honeywell,pressure-triplet = "030PA";
+        };
+    };
+  - |
+    spi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        pressure@0 {
+            compatible = "honeywell,hsc030pa";
+            reg = <0>;
+            spi-max-frequency = <800000>;
+            honeywell,transfer-function = <0>;
+            honeywell,pressure-triplet = "NA";
+            honeywell,pmin-pascal = <0>;
+            honeywell,pmax-pascal = <200000>;
+        };
+    };
+...
index b31f8120f14ed837e20b68517bbc93a4c4ad7438..d9e903fbfd99ea3c7666f22b3d87851a04cc1d89 100644 (file)
@@ -53,12 +53,10 @@ properties:
   honeywell,pmin-pascal:
     description:
       Minimum pressure value the sensor can measure in pascal.
-    $ref: /schemas/types.yaml#/definitions/uint32
 
   honeywell,pmax-pascal:
     description:
       Maximum pressure value the sensor can measure in pascal.
-    $ref: /schemas/types.yaml#/definitions/uint32
 
   honeywell,transfer-function:
     description: |
index 4a55e7f25ae7cb5bdf163ee7720be100d043be88..03bb5d4fa8b5798be5b65e13b5ee5c8624a4b832 100644 (file)
@@ -4,7 +4,7 @@
 $id: http://devicetree.org/schemas/iio/temperature/melexis,mlx90632.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
-title: Melexis MLX90632 contactless Infra Red temperature sensor
+title: Melexis MLX90632 and MLX90635 contactless Infra Red temperature sensor
 
 maintainers:
   - Crt Mori <cmo@melexis.com>
@@ -27,9 +27,24 @@ description: |
   Since measured object emissivity effects Infra Red energy emitted,
   emissivity should be set before requesting the object temperature.
 
+  https://www.melexis.com/en/documents/documentation/datasheets/datasheet-mlx90635
+
+  MLX90635 is most suitable for consumer applications where
+  measured object temperature is in range between -20 to 100 degrees
+  Celsius with relative error of measurement 2 degree Celsius in
+  object temperature range for industrial applications, while just 0.2
+  degree Celsius for human body measurement applications. Since it can
+  operate and measure ambient temperature in range of -20 to 85 degrees
+  Celsius it is suitable also for outdoor use.
+
+  Since measured object emissivity effects Infra Red energy emitted,
+  emissivity should be set before requesting the object temperature.
+
 properties:
   compatible:
-    const: melexis,mlx90632
+    enum:
+      - melexis,mlx90632
+      - melexis,mlx90635
 
   reg:
     maxItems: 1
diff --git a/Documentation/devicetree/bindings/iio/temperature/microchip,mcp9600.yaml b/Documentation/devicetree/bindings/iio/temperature/microchip,mcp9600.yaml
new file mode 100644 (file)
index 0000000..d2cafa3
--- /dev/null
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/temperature/microchip,mcp9600.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microchip MCP9600 thermocouple EMF converter
+
+maintainers:
+  - Andrew Hepp <andrew.hepp@ahepp.dev>
+
+description:
+  https://ww1.microchip.com/downloads/en/DeviceDoc/MCP960X-Data-Sheet-20005426.pdf
+
+properties:
+  compatible:
+    const: microchip,mcp9600
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    minItems: 1
+    maxItems: 6
+
+  interrupt-names:
+    minItems: 1
+    maxItems: 6
+    items:
+      enum:
+        - open-circuit
+        - short-circuit
+        - alert1
+        - alert2
+        - alert3
+        - alert4
+
+  thermocouple-type:
+    $ref: /schemas/types.yaml#/definitions/uint32
+    description:
+      Type of thermocouple (THERMOCOUPLE_TYPE_K if omitted).
+      Use defines in dt-bindings/iio/temperature/thermocouple.h.
+      Supported types are B, E, J, K, N, R, S, T.
+
+  vdd-supply: true
+
+required:
+  - compatible
+  - reg
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/iio/temperature/thermocouple.h>
+    #include <dt-bindings/interrupt-controller/irq.h>
+    i2c {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        temperature-sensor@60 {
+            compatible = "microchip,mcp9600";
+            reg = <0x60>;
+            interrupt-parent = <&gpio>;
+            interrupts = <25 IRQ_TYPE_EDGE_RISING>;
+            interrupt-names = "open-circuit";
+            thermocouple-type = <THERMOCOUPLE_TYPE_K>;
+            vdd-supply = <&vdd>;
+        };
+    };
index d9002a3a0abb369dbaad2da74f09250d9e3b618d..cc1fbdc056572693b1171b92cddb1397f9e0f7bb 100644 (file)
@@ -4,6 +4,7 @@
    :maxdepth: 1
 
    ABI
+   dts-coding-style
    writing-bindings
    writing-schema
    submitting-patches
diff --git a/Documentation/devicetree/bindings/input/adafruit,seesaw-gamepad.yaml b/Documentation/devicetree/bindings/input/adafruit,seesaw-gamepad.yaml
new file mode 100644 (file)
index 0000000..5e86f6d
--- /dev/null
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/adafruit,seesaw-gamepad.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Adafruit Mini I2C Gamepad with seesaw
+
+maintainers:
+  - Anshul Dalal <anshulusr@gmail.com>
+
+description: |
+  Adafruit Mini I2C Gamepad
+
+    +-----------------------------+
+    |   ___                       |
+    |  /   \               (X)    |
+    | |  S  |  __   __  (Y)   (A) |
+    |  \___/  |ST| |SE|    (B)    |
+    |                             |
+    +-----------------------------+
+
+  S -> 10-bit precision bidirectional analog joystick
+  ST -> Start
+  SE -> Select
+  X, A, B, Y -> Digital action buttons
+
+  Datasheet: https://cdn-learn.adafruit.com/downloads/pdf/gamepad-qt.pdf
+  Product page: https://www.adafruit.com/product/5743
+  Arduino Driver: https://github.com/adafruit/Adafruit_Seesaw
+
+properties:
+  compatible:
+    const: adafruit,seesaw-gamepad
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+    description:
+      The gamepad's IRQ pin triggers a rising edge if interrupts are enabled.
+
+required:
+  - compatible
+  - reg
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/irq.h>
+
+    i2c {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        joystick@50 {
+            compatible = "adafruit,seesaw-gamepad";
+            interrupts = <18 IRQ_TYPE_EDGE_RISING>;
+            reg = <0x50>;
+        };
+    };
index 159cd9d9fe573c7315cc5066409dae6c8c478e94..cc78c2152921308fe0cad3e29ca78a5fad08f066 100644 (file)
@@ -31,7 +31,23 @@ patternProperties:
         maxItems: 1
 
       interrupts:
-        maxItems: 1
+        oneOf:
+          - items:
+              - description: Optional key interrupt or wakeup interrupt
+          - items:
+              - description: Key interrupt
+              - description: Wakeup interrupt
+
+      interrupt-names:
+        description:
+          Optional interrupt names, can be used to specify a separate dedicated
+          wake-up interrupt in addition to the gpio irq
+        oneOf:
+          - items:
+              - enum: [ irq, wakeup ]
+          - items:
+              - const: irq
+              - const: wakeup
 
       label:
         description: Descriptive name of the key.
@@ -97,6 +113,20 @@ patternProperties:
       - required:
           - gpios
 
+    allOf:
+      - if:
+          properties:
+            interrupts:
+              minItems: 2
+          required:
+            - interrupts
+        then:
+          properties:
+            interrupt-names:
+              minItems: 2
+          required:
+            - interrupt-names
+
     dependencies:
       wakeup-event-action: [ wakeup-source ]
       linux,input-value: [ gpios ]
@@ -137,6 +167,15 @@ examples:
             linux,code = <108>;
             interrupts = <1 IRQ_TYPE_EDGE_FALLING>;
         };
+
+        key-wakeup {
+            label = "GPIO Key WAKEUP";
+            linux,code = <143>;
+            interrupts-extended = <&intc 2 IRQ_TYPE_EDGE_FALLING>,
+                                  <&intc_wakeup 0 IRQ_TYPE_LEVEL_HIGH>;
+            interrupt-names = "irq", "wakeup";
+            wakeup-source;
+        };
     };
 
 ...
diff --git a/Documentation/devicetree/bindings/input/gpio-mouse.txt b/Documentation/devicetree/bindings/input/gpio-mouse.txt
deleted file mode 100644 (file)
index 519510a..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-Device-Tree bindings for GPIO attached mice
-
-This simply uses standard GPIO handles to define a simple mouse connected
-to 5-7 GPIO lines.
-
-Required properties:
-       - compatible: must be "gpio-mouse"
-       - scan-interval-ms: The scanning interval in milliseconds
-       - up-gpios: GPIO line phandle to the line indicating "up"
-       - down-gpios: GPIO line phandle to the line indicating "down"
-       - left-gpios: GPIO line phandle to the line indicating "left"
-       - right-gpios: GPIO line phandle to the line indicating "right"
-
-Optional properties:
-       - button-left-gpios: GPIO line handle to the left mouse button
-       - button-middle-gpios: GPIO line handle to the middle mouse button
-       - button-right-gpios: GPIO line handle to the right mouse button
-Example:
-
-#include <dt-bindings/gpio/gpio.h>
-
-gpio-mouse {
-       compatible = "gpio-mouse";
-       scan-interval-ms = <50>;
-       up-gpios = <&gpio0 0 GPIO_ACTIVE_LOW>;
-       down-gpios = <&gpio0 1 GPIO_ACTIVE_LOW>;
-       left-gpios = <&gpio0 2 GPIO_ACTIVE_LOW>;
-       right-gpios = <&gpio0 3 GPIO_ACTIVE_LOW>;
-       button-left-gpios = <&gpio0 4 GPIO_ACTIVE_LOW>;
-       button-middle-gpios = <&gpio0 5 GPIO_ACTIVE_LOW>;
-       button-right-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>;
-};
diff --git a/Documentation/devicetree/bindings/input/gpio-mouse.yaml b/Documentation/devicetree/bindings/input/gpio-mouse.yaml
new file mode 100644 (file)
index 0000000..3928ec6
--- /dev/null
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/gpio-mouse.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: GPIO attached mouse
+
+description: |
+  This simply uses standard GPIO handles to define a simple mouse connected
+  to 5-7 GPIO lines.
+
+maintainers:
+  - Anshul Dalal <anshulusr@gmail.com>
+
+properties:
+  compatible:
+    const: gpio-mouse
+
+  scan-interval-ms:
+    maxItems: 1
+
+  up-gpios:
+    maxItems: 1
+
+  down-gpios:
+    maxItems: 1
+
+  left-gpios:
+    maxItems: 1
+
+  right-gpios:
+    maxItems: 1
+
+  button-left-gpios:
+    maxItems: 1
+
+  button-middle-gpios:
+    maxItems: 1
+
+  button-right-gpios:
+    maxItems: 1
+
+required:
+  - compatible
+  - scan-interval-ms
+  - up-gpios
+  - down-gpios
+  - left-gpios
+  - right-gpios
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    gpio-mouse {
+        compatible = "gpio-mouse";
+        scan-interval-ms = <50>;
+        up-gpios = <&gpio0 0 GPIO_ACTIVE_LOW>;
+        down-gpios = <&gpio0 1 GPIO_ACTIVE_LOW>;
+        left-gpios = <&gpio0 2 GPIO_ACTIVE_LOW>;
+        right-gpios = <&gpio0 3 GPIO_ACTIVE_LOW>;
+        button-left-gpios = <&gpio0 4 GPIO_ACTIVE_LOW>;
+        button-middle-gpios = <&gpio0 5 GPIO_ACTIVE_LOW>;
+        button-right-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>;
+    };
index 3c430d38594f111126268f54774aaa4469fe1754..2c3f693b8982c4947da6f2ce6d76dd2b28add81e 100644 (file)
@@ -9,6 +9,9 @@ title: Azoteq IQS269A Capacitive Touch Controller
 maintainers:
   - Jeff LaBundy <jeff@labundy.com>
 
+allOf:
+  - $ref: input.yaml#
+
 description: |
   The Azoteq IQS269A is an 8-channel capacitive touch controller that features
   additional Hall-effect and inductive sensing capabilities.
@@ -17,7 +20,10 @@ description: |
 
 properties:
   compatible:
-    const: azoteq,iqs269a
+    enum:
+      - azoteq,iqs269a
+      - azoteq,iqs269a-00
+      - azoteq,iqs269a-d0
 
   reg:
     maxItems: 1
@@ -204,6 +210,73 @@ properties:
     default: 1
     description: Specifies the slider coordinate filter strength.
 
+  azoteq,touch-hold-ms:
+    multipleOf: 256
+    minimum: 256
+    maximum: 65280
+    default: 5120
+    description:
+      Specifies the length of time (in ms) for which the channel selected by
+      'azoteq,gpio3-select' must be held in a state of touch in order for an
+      approximately 60-ms pulse to be asserted on the GPIO4 pin.
+
+  linux,keycodes:
+    minItems: 1
+    maxItems: 8
+    description: |
+      Specifies the numeric keycodes associated with each available gesture in
+      the following order (enter 0 for unused gestures):
+      0: Slider 0 tap
+      1: Slider 0 hold
+      2: Slider 0 positive flick or swipe
+      3: Slider 0 negative flick or swipe
+      4: Slider 1 tap
+      5: Slider 1 hold
+      6: Slider 1 positive flick or swipe
+      7: Slider 1 negative flick or swipe
+
+  azoteq,gesture-swipe:
+    type: boolean
+    description:
+      Directs the device to interpret axial gestures as a swipe (finger remains
+      on slider) instead of a flick (finger leaves slider).
+
+  azoteq,timeout-tap-ms:
+    multipleOf: 16
+    minimum: 0
+    maximum: 4080
+    default: 400
+    description:
+      Specifies the length of time (in ms) within which a slider touch must be
+      released in order to be interpreted as a tap. Default and maximum values
+      as well as step size are reduced by a factor of 4 with device version 2.
+
+  azoteq,timeout-swipe-ms:
+    multipleOf: 16
+    minimum: 0
+    maximum: 4080
+    default: 2000
+    description:
+      Specifies the length of time (in ms) within which an axial gesture must be
+      completed in order to be interpreted as a flick or swipe. Default and max-
+      imum values as well as step size are reduced by a factor of 4 with device
+      version 2.
+
+  azoteq,thresh-swipe:
+    $ref: /schemas/types.yaml#/definitions/uint32
+    minimum: 0
+    maximum: 255
+    default: 128
+    description:
+      Specifies the number of points across which an axial gesture must travel
+      in order to be interpreted as a flick or swipe.
+
+dependencies:
+  azoteq,gesture-swipe: ["linux,keycodes"]
+  azoteq,timeout-tap-ms: ["linux,keycodes"]
+  azoteq,timeout-swipe-ms: ["linux,keycodes"]
+  azoteq,thresh-swipe: ["linux,keycodes"]
+
 patternProperties:
   "^channel@[0-7]$":
     type: object
@@ -454,6 +527,21 @@ patternProperties:
 
     additionalProperties: false
 
+if:
+  properties:
+    compatible:
+      contains:
+        enum:
+          - azoteq,iqs269a-d0
+then:
+  patternProperties:
+    "^channel@[0-7]$":
+      properties:
+        azoteq,slider1-select: false
+else:
+  properties:
+    azoteq,touch-hold-ms: false
+
 required:
   - compatible
   - reg
@@ -484,6 +572,14 @@ examples:
                     azoteq,hall-enable;
                     azoteq,suspend-mode = <2>;
 
+                    linux,keycodes = <KEY_PLAYPAUSE>,
+                                     <KEY_STOPCD>,
+                                     <KEY_NEXTSONG>,
+                                     <KEY_PREVIOUSSONG>;
+
+                    azoteq,timeout-tap-ms = <400>;
+                    azoteq,timeout-swipe-ms = <800>;
+
                     channel@0 {
                             reg = <0x0>;
 
index e34c9e78d38d8c0d65043ec24438681fe59119ef..70567d92c746ef8bc54eca9652b4f69fb579e74c 100644 (file)
@@ -90,26 +90,4 @@ required:
 
 unevaluatedProperties: false
 
-examples:
-  - |
-    #include <dt-bindings/input/input.h>
-    #include <dt-bindings/interrupt-controller/arm-gic.h>
-
-    pmic {
-        compatible = "mediatek,mt6397";
-
-        keys {
-          compatible = "mediatek,mt6397-keys";
-          mediatek,long-press-mode = <1>;
-          power-off-time-sec = <0>;
-
-          key-power {
-            linux,keycodes = <KEY_POWER>;
-            wakeup-source;
-          };
-
-          key-home {
-            linux,keycodes = <KEY_VOLUMEDOWN>;
-          };
-        };
-    };
+...
index 5b5d4f7d34827a12550df3d1480474b69caece2e..7ade03f1b32b8108f053523ce2a170fb55a54b3b 100644 (file)
@@ -45,13 +45,13 @@ properties:
       Enables the Linux input system's autorepeat feature on the input device.
 
   linux,keycodes:
-    minItems: 6
-    maxItems: 6
+    minItems: 3
+    maxItems: 8
     description: |
       Specifies an array of numeric keycode values to
       be used for the channels. If this property is
       omitted, KEY_A, KEY_B, etc are used as defaults.
-      The array must have exactly six entries.
+      The number of entries must correspond to the number of channels.
 
   microchip,sensor-gain:
     $ref: /schemas/types.yaml#/definitions/uint32
@@ -70,6 +70,59 @@ properties:
       open drain. This property allows using the active
       high push-pull output.
 
+  microchip,sensitivity-delta-sense:
+    $ref: /schemas/types.yaml#/definitions/uint32
+    default: 32
+    enum: [1, 2, 4, 8, 16, 32, 64, 128]
+    description:
+      Controls the sensitivity multiplier of a touch detection.
+      Higher value means more sensitive settings.
+      At the more sensitive settings, touches are detected for a smaller delta
+      capacitance corresponding to a "lighter" touch.
+
+  microchip,signal-guard:
+    $ref: /schemas/types.yaml#/definitions/uint32-array
+    minItems: 3
+    maxItems: 8
+    items:
+      enum: [0, 1]
+    description: |
+      0 - off
+      1 - on
+      The signal guard isolates the signal from virtual grounds.
+      If enabled then the behavior of the channel is changed to signal guard.
+      The number of entries must correspond to the number of channels.
+
+  microchip,input-threshold:
+    $ref: /schemas/types.yaml#/definitions/uint32-array
+    minItems: 3
+    maxItems: 8
+    items:
+      minimum: 0
+      maximum: 127
+    description:
+      Specifies the delta threshold that is used to determine if a touch has
+      been detected. A higher value means a larger difference in capacitance
+      is required for a touch to be registered, making the touch sensor less
+      sensitive.
+      The number of entries must correspond to the number of channels.
+
+  microchip,calib-sensitivity:
+    $ref: /schemas/types.yaml#/definitions/uint32-array
+    minItems: 3
+    maxItems: 8
+    items:
+      enum: [1, 2, 4]
+    description: |
+      Specifies an array of numeric values that controls the gain
+      used by the calibration routine to enable sensor inputs
+      to be more sensitive for proximity detection.
+      Gain is based on touch pad capacitance range
+      1 - 5-50pF
+      2 - 0-25pF
+      4 - 0-12.5pF
+      The number of entries must correspond to the number of channels.
+
 patternProperties:
   "^led@[0-7]$":
     type: object
@@ -99,10 +152,29 @@ allOf:
           contains:
             enum:
               - microchip,cap1106
+              - microchip,cap1203
+              - microchip,cap1206
+              - microchip,cap1293
+              - microchip,cap1298
     then:
       patternProperties:
         "^led@[0-7]$": false
 
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - microchip,cap1106
+              - microchip,cap1126
+              - microchip,cap1188
+              - microchip,cap1203
+              - microchip,cap1206
+    then:
+      properties:
+        microchip,signal-guard: false
+        microchip,calib-sensitivity: false
+
 required:
   - compatible
   - interrupts
@@ -122,6 +194,8 @@ examples:
         reg = <0x28>;
         autorepeat;
         microchip,sensor-gain = <2>;
+        microchip,sensitivity-delta-sense = <16>;
+        microchip,input-threshold = <21>, <18>, <46>, <46>, <46>, <21>;
 
         linux,keycodes = <103>,        /* KEY_UP */
                          <106>,        /* KEY_RIGHT */
index a401a0bfcbec21e098e4fe7bf9b85410d6ea83a8..4c8d303ff93c949f63926e544b2023e6bb8d492f 100644 (file)
@@ -28,21 +28,4 @@ required:
 
 additionalProperties: false
 
-examples:
-  - |
-    #include <dt-bindings/interrupt-controller/arm-gic.h>
-    sc2731_pmic: pmic@0 {
-      compatible = "sprd,sc2731";
-      reg = <0 0>;
-      spi-max-frequency = <26000000>;
-      interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
-      interrupt-controller;
-      #interrupt-cells = <2>;
-      #address-cells = <1>;
-      #size-cells = <0>;
-
-      vibrator@eb4 {
-        compatible = "sprd,sc2731-vibrator";
-        reg = <0xeb4>;
-      };
-    };
+...
diff --git a/Documentation/devicetree/bindings/input/ti,drv2665.txt b/Documentation/devicetree/bindings/input/ti,drv2665.txt
deleted file mode 100644 (file)
index 1ba97ac..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-* Texas Instruments - drv2665 Haptics driver
-
-Required properties:
-       - compatible - "ti,drv2665" - DRV2665
-       - reg -  I2C slave address
-       - vbat-supply - Required supply regulator
-
-Example:
-
-haptics: haptics@59 {
-       compatible = "ti,drv2665";
-       reg = <0x59>;
-       vbat-supply = <&vbat>;
-};
-
-For more product information please see the link below:
-http://www.ti.com/product/drv2665
diff --git a/Documentation/devicetree/bindings/input/ti,drv2667.txt b/Documentation/devicetree/bindings/input/ti,drv2667.txt
deleted file mode 100644 (file)
index 996382c..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-* Texas Instruments - drv2667 Haptics driver
-
-Required properties:
-       - compatible - "ti,drv2667" - DRV2667
-       - reg -  I2C slave address
-       - vbat-supply - Required supply regulator
-
-Example:
-
-haptics: haptics@59 {
-       compatible = "ti,drv2667";
-       reg = <0x59>;
-       vbat-supply = <&vbat>;
-};
-
-For more product information please see the link below:
-http://www.ti.com/product/drv2667
diff --git a/Documentation/devicetree/bindings/input/ti,drv266x.yaml b/Documentation/devicetree/bindings/input/ti,drv266x.yaml
new file mode 100644 (file)
index 0000000..da18188
--- /dev/null
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/ti,drv266x.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Texas Instruments - drv266x Haptics driver
+
+description: |
+  Product Page:
+    http://www.ti.com/product/drv2665
+    http://www.ti.com/product/drv2667
+
+maintainers:
+  - Anshul Dalal <anshulusr@gmail.com>
+
+properties:
+  compatible:
+    enum:
+      - ti,drv2665
+      - ti,drv2667
+
+  reg:
+    maxItems: 1
+
+  vbat-supply:
+    description: Required supply regulator
+
+required:
+  - compatible
+  - reg
+  - vbat-supply
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+
+    i2c {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        haptics@59 {
+            compatible = "ti,drv2667";
+            reg = <0x59>;
+            vbat-supply = <&vbat>;
+        };
+    };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/neonode,zforce.yaml b/Documentation/devicetree/bindings/input/touchscreen/neonode,zforce.yaml
new file mode 100644 (file)
index 0000000..c2ee89b
--- /dev/null
@@ -0,0 +1,72 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/touchscreen/neonode,zforce.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Neonode infrared touchscreen controller
+
+maintainers:
+  - Heiko Stuebner <heiko@sntech.de>
+
+allOf:
+  - $ref: touchscreen.yaml#
+
+properties:
+  compatible:
+    const: neonode,zforce
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  reset-gpios:
+    maxItems: 1
+
+  irq-gpios:
+    maxItems: 1
+
+  x-size:
+    deprecated: true
+    $ref: /schemas/types.yaml#/definitions/uint32
+
+  y-size:
+    deprecated: true
+    $ref: /schemas/types.yaml#/definitions/uint32
+
+  vdd-supply: true
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - reset-gpios
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/irq.h>
+
+    i2c {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        touchscreen@50 {
+            compatible = "neonode,zforce";
+            reg = <0x50>;
+            interrupts = <2 0>;
+            vdd-supply = <&reg_zforce_vdd>;
+
+            reset-gpios = <&gpio5 9 0>; /* RST */
+            irq-gpios = <&gpio5 6 0>; /* IRQ, optional */
+
+            touchscreen-min-x = <0>;
+            touchscreen-size-x = <800>;
+            touchscreen-min-y = <0>;
+            touchscreen-size-y = <600>;
+        };
+    };
+...
diff --git a/Documentation/devicetree/bindings/input/touchscreen/samsung,s6sy761.txt b/Documentation/devicetree/bindings/input/touchscreen/samsung,s6sy761.txt
deleted file mode 100644 (file)
index 6805d10..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-* Samsung S6SY761 touchscreen controller
-
-Required properties:
-- compatible           : must be "samsung,s6sy761"
-- reg                  : I2C slave address, (e.g. 0x48)
-- interrupts           : interrupt specification
-- avdd-supply          : analogic power supply
-- vdd-supply           : power supply
-
-Optional properties:
-- touchscreen-size-x   : see touchscreen.txt. This property is embedded in the
-                         device. If defined it forces a different x resolution.
-- touchscreen-size-y   : see touchscreen.txt. This property is embedded in the
-                         device. If defined it forces a different y resolution.
-
-Example:
-
-i2c@00000000 {
-
-       /* ... */
-
-       touchscreen@48 {
-               compatible = "samsung,s6sy761";
-               reg = <0x48>;
-               interrupt-parent = <&gpa1>;
-               interrupts = <1 IRQ_TYPE_NONE>;
-               avdd-supply = <&ldo30_reg>;
-               vdd-supply = <&ldo31_reg>;
-               touchscreen-size-x = <4096>;
-               touchscreen-size-y = <4096>;
-       };
-};
diff --git a/Documentation/devicetree/bindings/input/touchscreen/samsung,s6sy761.yaml b/Documentation/devicetree/bindings/input/touchscreen/samsung,s6sy761.yaml
new file mode 100644 (file)
index 0000000..1ffd17a
--- /dev/null
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/touchscreen/samsung,s6sy761.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung S6SY761 touchscreen controller
+
+maintainers:
+  - Andi Shyti <andi.shyti@kernel.org>
+
+allOf:
+  - $ref: touchscreen.yaml#
+
+properties:
+  compatible:
+    const: samsung,s6sy761
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  avdd-supply: true
+  vdd-supply: true
+
+unevaluatedProperties: false
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - avdd-supply
+  - vdd-supply
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/irq.h>
+    i2c {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        touchscreen@48 {
+            compatible = "samsung,s6sy761";
+            reg = <0x48>;
+            interrupt-parent = <&gpa1>;
+            interrupts = <1 IRQ_TYPE_LEVEL_HIGH>;
+            avdd-supply = <&ldo30_reg>;
+            vdd-supply = <&ldo31_reg>;
+            touchscreen-size-x = <4096>;
+            touchscreen-size-y = <4096>;
+        };
+    };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/zforce_ts.txt b/Documentation/devicetree/bindings/input/touchscreen/zforce_ts.txt
deleted file mode 100644 (file)
index e3c27c4..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-* Neonode infrared touchscreen controller
-
-Required properties:
-- compatible: must be "neonode,zforce"
-- reg: I2C address of the chip
-- interrupts: interrupt to which the chip is connected
-- reset-gpios: reset gpio the chip is connected to
-- x-size: horizontal resolution of touchscreen
-- y-size: vertical resolution of touchscreen
-
-Optional properties:
-- irq-gpios : interrupt gpio the chip is connected to
-- vdd-supply: Regulator controlling the controller supply
-
-Example:
-
-       i2c@00000000 {
-               /* ... */
-
-               zforce_ts@50 {
-                       compatible = "neonode,zforce";
-                       reg = <0x50>;
-                       interrupts = <2 0>;
-                       vdd-supply = <&reg_zforce_vdd>;
-
-                       reset-gpios = <&gpio5 9 0>; /* RST */
-                       irq-gpios = <&gpio5 6 0>; /* IRQ, optional */
-
-                       x-size = <800>;
-                       y-size = <600>;
-               };
-
-               /* ... */
-       };
index 73f809cdb783cf6026c59e39cb128136f36b7d49..05067e197abe810a8dd2457f6056b6eff3e5387c 100644 (file)
@@ -25,13 +25,16 @@ properties:
       - const: qcom,msm8998-bwmon       # BWMON v4
       - items:
           - enum:
+              - qcom,qcm2290-cpu-bwmon
               - qcom,sc7180-cpu-bwmon
               - qcom,sc7280-cpu-bwmon
               - qcom,sc8280xp-cpu-bwmon
               - qcom,sdm845-cpu-bwmon
+              - qcom,sm6115-cpu-bwmon
               - qcom,sm6350-llcc-bwmon
               - qcom,sm8250-cpu-bwmon
               - qcom,sm8550-cpu-bwmon
+              - qcom,sm8650-cpu-bwmon
           - const: qcom,sdm845-bwmon    # BWMON v4, unified register space
       - items:
           - enum:
@@ -40,6 +43,7 @@ properties:
               - qcom,sm6350-cpu-bwmon
               - qcom,sm8250-llcc-bwmon
               - qcom,sm8550-llcc-bwmon
+              - qcom,sm8650-llcc-bwmon
           - const: qcom,sc7280-llcc-bwmon
       - const: qcom,sc7280-llcc-bwmon   # BWMON v5
       - const: qcom,sdm845-llcc-bwmon   # BWMON v5
index 00b570c82903974cbaeaba09406ff0581c61d8e0..60441f0c5d7211f24b5fa536425b351767040a2d 100644 (file)
@@ -11,8 +11,13 @@ maintainers:
 
 description: |
   This interrupt controller is found in the Loongson-3 family of chips and
-  Loongson-2K1000 chip, as the primary package interrupt controller which
+  Loongson-2K series chips, as the primary package interrupt controller which
   can route local I/O interrupt to interrupt lines of cores.
+  Be aware of the following points.
+  1.The Loongson-2K0500 is a single core CPU;
+  2.The Loongson-2K0500/2K1000 has 64 device interrupt sources as inputs, so we
+    need to define two nodes in dts{i} to describe the "0-31" and "32-61" interrupt
+    sources respectively.
 
 allOf:
   - $ref: /schemas/interrupt-controller.yaml#
@@ -33,6 +38,7 @@ properties:
       - const: main
       - const: isr0
       - const: isr1
+    minItems: 2
 
   interrupt-controller: true
 
@@ -45,11 +51,9 @@ properties:
   interrupt-names:
     description: List of names for the parent interrupts.
     items:
-      - const: int0
-      - const: int1
-      - const: int2
-      - const: int3
+      pattern: int[0-3]
     minItems: 1
+    maxItems: 4
 
   '#interrupt-cells':
     const: 2
@@ -69,6 +73,7 @@ required:
   - compatible
   - reg
   - interrupts
+  - interrupt-names
   - interrupt-controller
   - '#interrupt-cells'
   - loongson,parent_int_map
@@ -86,7 +91,8 @@ if:
 then:
   properties:
     reg:
-      minItems: 3
+      minItems: 2
+      maxItems: 3
 
   required:
     - reg-names
index 86d61896f59135e963dbd2b70796dab8ac90ecc1..4bdc8321904bd043a62a09acf97f52bc224fdc75 100644 (file)
@@ -35,12 +35,16 @@ properties:
           - qcom,sdm845-pdc
           - qcom,sdx55-pdc
           - qcom,sdx65-pdc
+          - qcom,sdx75-pdc
           - qcom,sm4450-pdc
           - qcom,sm6350-pdc
           - qcom,sm8150-pdc
           - qcom,sm8250-pdc
           - qcom,sm8350-pdc
           - qcom,sm8450-pdc
+          - qcom,sm8550-pdc
+          - qcom,sm8650-pdc
+          - qcom,x1e80100-pdc
       - const: qcom,pdc
 
   reg:
index 2b153d7c5421639b1a2d563460e64c0bb0055f83..e44e4e5708a722902f8caca7814f634fdc1617af 100644 (file)
@@ -55,8 +55,8 @@ examples:
   - |
     #include <dt-bindings/interrupt-controller/irq-st.h>
     irq-syscfg {
-        compatible    = "st,stih407-irq-syscfg";
-        st,syscfg     = <&syscfg_cpu>;
+        compatible = "st,stih407-irq-syscfg";
+        st,syscfg = <&syscfg_cpu>;
         st,irq-device = <ST_IRQ_SYSCFG_PMU_0>,
                         <ST_IRQ_SYSCFG_PMU_1>;
         st,fiq-device = <ST_IRQ_SYSCFG_DISABLED>,
index 903edf85d72e4057d76a00e22b9ee824a4089c0c..7adb1de455a5b38dfb6c76303f11c8f95395e0dd 100644 (file)
@@ -24,6 +24,7 @@ properties:
   compatible:
     enum:
       - apple,t8103-dart
+      - apple,t8103-usb4-dart
       - apple,t8110-dart
       - apple,t6000-dart
 
index aa9e1c0895a508a2e6eed5831a73e92de06df9d4..a4042ae2477024b0230d7db843c74f6b1da7d732 100644 (file)
@@ -56,6 +56,8 @@ properties:
               - qcom,sm8350-smmu-500
               - qcom,sm8450-smmu-500
               - qcom,sm8550-smmu-500
+              - qcom,sm8650-smmu-500
+              - qcom,x1e80100-smmu-500
           - const: qcom,smmu-500
           - const: arm,mmu-500
 
@@ -89,6 +91,8 @@ properties:
               - qcom,sm8150-smmu-500
               - qcom,sm8250-smmu-500
               - qcom,sm8350-smmu-500
+              - qcom,sm8450-smmu-500
+              - qcom,sm8550-smmu-500
           - const: qcom,adreno-smmu
           - const: qcom,smmu-500
           - const: arm,mmu-500
@@ -429,6 +433,30 @@ allOf:
             - description: interface clock required to access smmu's registers
                 through the TCU's programming interface.
 
+  - if:
+      properties:
+        compatible:
+          items:
+            - enum:
+                - qcom,sm8350-smmu-500
+            - const: qcom,adreno-smmu
+            - const: qcom,smmu-500
+            - const: arm,mmu-500
+    then:
+      properties:
+        clock-names:
+          items:
+            - const: bus
+            - const: iface
+            - const: ahb
+            - const: hlos1_vote_gpu_smmu
+            - const: cx_gmu
+            - const: hub_cx_int
+            - const: hub_aon
+        clocks:
+          minItems: 7
+          maxItems: 7
+
   - if:
       properties:
         compatible:
@@ -453,6 +481,50 @@ allOf:
             - description: Voter clock required for HLOS SMMU access
             - description: Interface clock required for register access
 
+  - if:
+      properties:
+        compatible:
+          const: qcom,sm8450-smmu-500
+    then:
+      properties:
+        clock-names:
+          items:
+            - const: gmu
+            - const: hub
+            - const: hlos
+            - const: bus
+            - const: iface
+            - const: ahb
+
+        clocks:
+          items:
+            - description: GMU clock
+            - description: GPU HUB clock
+            - description: HLOS vote clock
+            - description: GPU memory bus clock
+            - description: GPU SNoC bus clock
+            - description: GPU AHB clock
+
+  - if:
+      properties:
+        compatible:
+          const: qcom,sm8550-smmu-500
+    then:
+      properties:
+        clock-names:
+          items:
+            - const: hlos
+            - const: bus
+            - const: iface
+            - const: ahb
+
+        clocks:
+          items:
+            - description: HLOS vote clock
+            - description: GPU memory bus clock
+            - description: GPU SNoC bus clock
+            - description: GPU AHB clock
+
   # Disallow clocks for all other platforms with specific compatibles
   - if:
       properties:
@@ -472,9 +544,8 @@ allOf:
               - qcom,sdx65-smmu-500
               - qcom,sm6350-smmu-500
               - qcom,sm6375-smmu-500
-              - qcom,sm8350-smmu-500
-              - qcom,sm8450-smmu-500
-              - qcom,sm8550-smmu-500
+              - qcom,sm8650-smmu-500
+              - qcom,x1e80100-smmu-500
     then:
       properties:
         clock-names: false
index ba9124f721f1514759d5a63c2036283a04e1d0aa..621dde0e45d8514cfcfe5a0cdd17f87a08ada5bc 100644 (file)
@@ -19,9 +19,14 @@ description: |+
 
 properties:
   compatible:
-    enum:
-      - rockchip,iommu
-      - rockchip,rk3568-iommu
+    oneOf:
+      - enum:
+          - rockchip,iommu
+          - rockchip,rk3568-iommu
+      - items:
+          - enum:
+              - rockchip,rk3588-iommu
+          - const: rockchip,rk3568-iommu
 
   reg:
     items:
diff --git a/Documentation/devicetree/bindings/leds/allwinner,sun50i-a100-ledc.yaml b/Documentation/devicetree/bindings/leds/allwinner,sun50i-a100-ledc.yaml
new file mode 100644 (file)
index 0000000..760cb33
--- /dev/null
@@ -0,0 +1,137 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/leds/allwinner,sun50i-a100-ledc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A100 LED Controller
+
+maintainers:
+  - Samuel Holland <samuel@sholland.org>
+
+description:
+  The LED controller found in Allwinner sunxi SoCs uses a one-wire serial
+  interface to drive up to 1024 RGB LEDs.
+
+properties:
+  compatible:
+    oneOf:
+      - const: allwinner,sun50i-a100-ledc
+      - items:
+          - enum:
+              - allwinner,sun20i-d1-ledc
+              - allwinner,sun50i-r329-ledc
+          - const: allwinner,sun50i-a100-ledc
+
+  reg:
+    maxItems: 1
+
+  "#address-cells":
+    const: 1
+
+  "#size-cells":
+    const: 0
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    items:
+      - description: Bus clock
+      - description: Module clock
+
+  clock-names:
+    items:
+      - const: bus
+      - const: mod
+
+  resets:
+    maxItems: 1
+
+  dmas:
+    maxItems: 1
+    description: TX DMA channel
+
+  dma-names:
+    const: tx
+
+  allwinner,pixel-format:
+    description: Pixel format (subpixel transmission order), default is "grb"
+    enum:
+      - bgr
+      - brg
+      - gbr
+      - grb
+      - rbg
+      - rgb
+
+  allwinner,t0h-ns:
+    default: 336
+    description: Length of high pulse when transmitting a "0" bit
+
+  allwinner,t0l-ns:
+    default: 840
+    description: Length of low pulse when transmitting a "0" bit
+
+  allwinner,t1h-ns:
+    default: 882
+    description: Length of high pulse when transmitting a "1" bit
+
+  allwinner,t1l-ns:
+    default: 294
+    description: Length of low pulse when transmitting a "1" bit
+
+  allwinner,treset-ns:
+    default: 300000
+    description: Minimum delay between transmission frames
+
+patternProperties:
+  "^multi-led@[0-9a-f]+$":
+    type: object
+    $ref: leds-class-multicolor.yaml#
+    unevaluatedProperties: false
+    properties:
+      reg:
+        minimum: 0
+        maximum: 1023
+        description: Index of the LED in the series (must be contiguous)
+
+    required:
+      - reg
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+  - clock-names
+  - resets
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/irq.h>
+    #include <dt-bindings/leds/common.h>
+
+    ledc: led-controller@2008000 {
+      compatible = "allwinner,sun20i-d1-ledc",
+                   "allwinner,sun50i-a100-ledc";
+      reg = <0x2008000 0x400>;
+      interrupts = <36 IRQ_TYPE_LEVEL_HIGH>;
+      clocks = <&ccu 12>, <&ccu 34>;
+      clock-names = "bus", "mod";
+      resets = <&ccu 12>;
+      dmas = <&dma 42>;
+      dma-names = "tx";
+      #address-cells = <1>;
+      #size-cells = <0>;
+
+      multi-led@0 {
+        reg = <0x0>;
+        color = <LED_COLOR_ID_RGB>;
+        function = LED_FUNCTION_INDICATOR;
+      };
+    };
+
+...
index feb5febaf361aef71a2b419d0fe50adc21c67c62..54d6d1f08e2489d91d104a89df31cbc31e823f2c 100644 (file)
@@ -10,15 +10,19 @@ maintainers:
   - Martin Kurbanov <mmkurbanov@sberdevices.ru>
 
 description: |
-  This controller is present on AW20036/AW20054/AW20072.
-  It is a 3x12/6x9/6x12 matrix LED programmed via
-  an I2C interface, up to 36/54/72 LEDs or 12/18/24 RGBs,
-  3 pattern controllers for auto breathing or group dimming control.
+  It is a matrix LED driver programmed via an I2C interface. Devices have
+  a set of individually controlled leds and support 3 pattern controllers
+  for auto breathing or group dimming control. Supported devices:
+    - AW20036 (3x12) 36 LEDs
+    - AW20054 (6x9)  54 LEDs
+    - AW20072 (6x12) 72 LEDs
+    - AW20108 (9x12) 108 LEDs
 
   For more product information please see the link below:
   aw20036 - https://www.awinic.com/en/productDetail/AW20036QNR#tech-docs
   aw20054 - https://www.awinic.com/en/productDetail/AW20054QNR#tech-docs
   aw20072 - https://www.awinic.com/en/productDetail/AW20072QNR#tech-docs
+  aw20108 - https://www.awinic.com/en/productDetail/AW20108QNR#tech-docs
 
 properties:
   compatible:
@@ -26,6 +30,7 @@ properties:
       - awinic,aw20036
       - awinic,aw20054
       - awinic,aw20072
+      - awinic,aw20108
 
   reg:
     maxItems: 1
@@ -36,13 +41,11 @@ properties:
   "#size-cells":
     const: 0
 
-  awinic,display-rows:
-    $ref: /schemas/types.yaml#/definitions/uint32
-    description:
-      Leds matrix size
+  enable-gpios:
+    maxItems: 1
 
 patternProperties:
-  "^led@[0-9a-f]$":
+  "^led@[0-9a-f]+$":
     type: object
     $ref: common.yaml#
     unevaluatedProperties: false
@@ -60,16 +63,11 @@ patternProperties:
           since the chip has a single global setting.
           The maximum output current of each LED is calculated by the
           following formula:
-            IMAXled = 160000 * (592 / 600.5) * (1 / display-rows)
+            IMAXled = 160000 * (592 / 600.5) * (1 / max-current-switch-number)
           And the minimum output current formula:
-            IMINled = 3300 * (592 / 600.5) * (1 / display-rows)
-
-required:
-  - compatible
-  - reg
-  - "#address-cells"
-  - "#size-cells"
-  - awinic,display-rows
+            IMINled = 3300 * (592 / 600.5) * (1 / max-current-switch-number)
+          where max-current-switch-number is determinated by led configuration
+          and depends on how leds are physically connected to the led driver.
 
 allOf:
   - if:
@@ -78,18 +76,67 @@ allOf:
           contains:
             const: awinic,aw20036
     then:
+      patternProperties:
+        "^led@[0-9a-f]+$":
+          properties:
+            reg:
+              items:
+                minimum: 0
+                maximum: 36
+
+  - if:
       properties:
-        awinic,display-rows:
-          enum: [1, 2, 3]
-    else:
+        compatible:
+          contains:
+            const: awinic,aw20054
+    then:
+      patternProperties:
+        "^led@[0-9a-f]+$":
+          properties:
+            reg:
+              items:
+                minimum: 0
+                maximum: 54
+
+  - if:
       properties:
-        awinic,display-rows:
-          enum: [1, 2, 3, 4, 5, 6, 7]
+        compatible:
+          contains:
+            const: awinic,aw20072
+    then:
+      patternProperties:
+        "^led@[0-9a-f]+$":
+          properties:
+            reg:
+              items:
+                minimum: 0
+                maximum: 72
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: awinic,aw20108
+    then:
+      patternProperties:
+        "^led@[0-9a-f]+$":
+          properties:
+            reg:
+              items:
+                minimum: 0
+                maximum: 108
+
+required:
+  - compatible
+  - reg
+  - "#address-cells"
+  - "#size-cells"
 
 additionalProperties: false
 
 examples:
   - |
+    #include <dt-bindings/gpio/gpio.h>
     #include <dt-bindings/leds/common.h>
 
     i2c {
@@ -101,7 +148,7 @@ examples:
             reg = <0x3a>;
             #address-cells = <1>;
             #size-cells = <0>;
-            awinic,display-rows = <3>;
+            enable-gpios = <&gpio 3 GPIO_ACTIVE_HIGH>;
 
             led@0 {
                 reg = <0x0>;
index 4191e33626f51ad3ccb5843e61caeb83eafea193..527a37368ed7422ae605d693e95b2765fa15246d 100644 (file)
@@ -14,8 +14,8 @@ description: |
   programmable switching frequency to optimize efficiency.
   It supports two different dimming modes:
 
-  - analog mode, via I2C commands (default)
-  - PWM controlled mode.
+  - analog mode, via I2C commands, as default mode (32 dimming levels)
+  - PWM controlled mode (optional)
 
   The datasheet is available at:
   https://www.monolithicpower.com/en/mp3309c.html
@@ -50,8 +50,6 @@ properties:
 required:
   - compatible
   - reg
-  - max-brightness
-  - default-brightness
 
 unevaluatedProperties: false
 
@@ -66,8 +64,8 @@ examples:
             compatible = "mps,mp3309c";
             reg = <0x17>;
             pwms = <&pwm1 0 3333333 0>; /* 300 Hz --> (1/f) * 1*10^9 */
-            max-brightness = <100>;
-            default-brightness = <80>;
+            brightness-levels = <0 4 8 16 32 64 128 255>;
+            default-brightness = <6>;
             mps,overvoltage-protection-microvolt = <24000000>;
         };
     };
index c8d0ba5f2327647d847603119cb38ceedcc3e0a5..55a8d1385e21049bd9922d0a3e112aea8e646f88 100644 (file)
@@ -167,7 +167,7 @@ properties:
       Note that this flag is mainly used for PWM-LEDs, where it is not possible
       to map brightness to current. Drivers for other controllers should use
       led-max-microamp.
-    $ref: /schemas/types.yaml#definitions/uint32
+    $ref: /schemas/types.yaml#/definitions/uint32
 
   panic-indicator:
     description:
index a8736fd5a5390e5bd45f64d944dfd5e291a9b85e..1ba607685f5f9b31cfabc0b1c4e7b619e3c19a71 100644 (file)
@@ -89,9 +89,11 @@ additionalProperties: false
 examples:
   - |
     #include <dt-bindings/leds/common.h>
-    spmi {
+
+    pmic {
         #address-cells = <1>;
         #size-cells = <0>;
+
         led-controller@ee00 {
             compatible = "qcom,pm8350c-flash-led", "qcom,spmi-flash-led";
             reg = <0xee00>;
diff --git a/Documentation/devicetree/bindings/loongarch/cpus.yaml b/Documentation/devicetree/bindings/loongarch/cpus.yaml
new file mode 100644 (file)
index 0000000..f175872
--- /dev/null
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/loongarch/cpus.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: LoongArch CPUs
+
+maintainers:
+  - Binbin Zhou <zhoubinbin@loongson.cn>
+
+description:
+  This document describes the list of LoongArch CPU cores that support FDT,
+  it describe the layout of CPUs in a system through the "cpus" node.
+
+allOf:
+  - $ref: /schemas/cpu.yaml#
+
+properties:
+  compatible:
+    enum:
+      - loongson,la264
+      - loongson,la364
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - clocks
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/clock/loongson,ls2k-clk.h>
+
+    cpus {
+        #size-cells = <0>;
+        #address-cells = <1>;
+
+        cpu@0 {
+            compatible = "loongson,la264";
+            device_type = "cpu";
+            reg = <0>;
+            clocks = <&clk LOONGSON2_NODE_CLK>;
+        };
+
+        cpu@1 {
+            compatible = "loongson,la264";
+            device_type = "cpu";
+            reg = <1>;
+            clocks = <&clk LOONGSON2_NODE_CLK>;
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/loongarch/loongson.yaml b/Documentation/devicetree/bindings/loongarch/loongson.yaml
new file mode 100644 (file)
index 0000000..e1a4a97
--- /dev/null
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/loongarch/loongson.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Loongson SoC-based boards
+
+maintainers:
+  - Binbin Zhou <zhoubinbin@loongson.cn>
+
+properties:
+  $nodename:
+    const: '/'
+  compatible:
+    oneOf:
+      - description: Loongson-2K0500 processor based boards
+        items:
+          - const: loongson,ls2k0500-ref
+          - const: loongson,ls2k0500
+
+      - description: Loongson-2K1000 processor based boards
+        items:
+          - const: loongson,ls2k1000-ref
+          - const: loongson,ls2k1000
+
+      - description: Loongson-2K2000 processor based boards
+        items:
+          - const: loongson,ls2k2000-ref
+          - const: loongson,ls2k2000
+
+additionalProperties: true
+
+...
index a38413f8d1321b0e1a8f4cade8341d011048a003..79eb523b843644d49b74843a3ae6bb9e832bf308 100644 (file)
@@ -23,6 +23,24 @@ properties:
               - qcom,ipq8074-apcs-apps-global
               - qcom,ipq9574-apcs-apps-global
           - const: qcom,ipq6018-apcs-apps-global
+      - items:
+          - enum:
+              - qcom,qcs404-apcs-apps-global
+          - const: qcom,msm8916-apcs-kpss-global
+          - const: syscon
+      - items:
+          - enum:
+              - qcom,msm8976-apcs-kpss-global
+          - const: qcom,msm8994-apcs-kpss-global
+          - const: syscon
+      - items:
+          - enum:
+              - qcom,msm8998-apcs-hmss-global
+              - qcom,sdm660-apcs-hmss-global
+              - qcom,sm4250-apcs-hmss-global
+              - qcom,sm6115-apcs-hmss-global
+              - qcom,sm6125-apcs-hmss-global
+          - const: qcom,msm8994-apcs-kpss-global
       - items:
           - enum:
               - qcom,sc7180-apss-shared
@@ -34,22 +52,14 @@ properties:
               - qcom,msm8916-apcs-kpss-global
               - qcom,msm8939-apcs-kpss-global
               - qcom,msm8953-apcs-kpss-global
-              - qcom,msm8976-apcs-kpss-global
               - qcom,msm8994-apcs-kpss-global
-              - qcom,qcs404-apcs-apps-global
               - qcom,sdx55-apcs-gcc
           - const: syscon
       - enum:
           - qcom,ipq6018-apcs-apps-global
-          - qcom,ipq8074-apcs-apps-global
           - qcom,msm8996-apcs-hmss-global
-          - qcom,msm8998-apcs-hmss-global
           - qcom,qcm2290-apcs-hmss-global
-          - qcom,sdm660-apcs-hmss-global
           - qcom,sdm845-apss-shared
-          - qcom,sm4250-apcs-hmss-global
-          - qcom,sm6115-apcs-hmss-global
-          - qcom,sm6125-apcs-hmss-global
 
   reg:
     maxItems: 1
@@ -80,20 +90,38 @@ allOf:
   - if:
       properties:
         compatible:
-          enum:
-            - qcom,msm8916-apcs-kpss-global
-            - qcom,msm8939-apcs-kpss-global
-            - qcom,qcs404-apcs-apps-global
+          contains:
+            enum:
+              - qcom,msm8916-apcs-kpss-global
+    then:
+      properties:
+        clocks:
+          items:
+            - description: primary pll parent of the clock driver
+            - description: auxiliary parent
+        clock-names:
+          items:
+            - const: pll
+            - const: aux
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - qcom,msm8939-apcs-kpss-global
     then:
       properties:
         clocks:
           items:
             - description: primary pll parent of the clock driver
             - description: auxiliary parent
+            - description: reference clock
         clock-names:
           items:
             - const: pll
             - const: aux
+            - const: ref
 
   - if:
       properties:
@@ -113,6 +141,7 @@ allOf:
             - const: ref
             - const: pll
             - const: aux
+
   - if:
       properties:
         compatible:
@@ -137,16 +166,10 @@ allOf:
         compatible:
           enum:
             - qcom,msm8953-apcs-kpss-global
-            - qcom,msm8976-apcs-kpss-global
             - qcom,msm8994-apcs-kpss-global
             - qcom,msm8996-apcs-hmss-global
-            - qcom,msm8998-apcs-hmss-global
             - qcom,qcm2290-apcs-hmss-global
-            - qcom,sdm660-apcs-hmss-global
             - qcom,sdm845-apss-shared
-            - qcom,sm4250-apcs-hmss-global
-            - qcom,sm6115-apcs-hmss-global
-            - qcom,sm6125-apcs-hmss-global
     then:
       properties:
         clocks: false
@@ -192,7 +215,8 @@ examples:
     #define GCC_APSS_AHB_CLK_SRC  1
     #define GCC_GPLL0_AO_OUT_MAIN 123
     apcs: mailbox@b011000 {
-        compatible = "qcom,qcs404-apcs-apps-global", "syscon";
+        compatible = "qcom,qcs404-apcs-apps-global",
+                     "qcom,msm8916-apcs-kpss-global", "syscon";
         reg = <0x0b011000 0x1000>;
         #mbox-cells = <1>;
         clocks = <&apcs_hfpll>, <&gcc GCC_GPLL0_AO_OUT_MAIN>;
index a35f9483dc716ee4fe45f2f96b59103c974387aa..8f004868aad988fa1d0be7bf10dc8b06f4ab82ce 100644 (file)
@@ -35,6 +35,7 @@ properties:
           - qcom,sm8450-ipcc
           - qcom,sm8550-ipcc
           - qcom,sm8650-ipcc
+          - qcom,x1e80100-ipcc
       - const: qcom,ipcc
 
   reg:
index 8b15a0532120f7bbd9610df84397d7d9fc14059e..fe83b5cb1278d5853a00ffa16f5cde82b456b788 100644 (file)
@@ -37,7 +37,9 @@ maintainers:
 
 properties:
   compatible:
-    const: xlnx,zynqmp-ipi-mailbox
+    enum:
+      - xlnx,zynqmp-ipi-mailbox
+      - xlnx,versal-ipi-mailbox
 
   method:
     description: |
@@ -58,6 +60,12 @@ properties:
   '#size-cells':
     const: 2
 
+  reg:
+    maxItems: 2
+
+  reg-names:
+    maxItems: 2
+
   xlnx,ipi-id:
     description: |
       Remote Xilinx IPI agent ID of which the mailbox is connected to.
@@ -76,7 +84,17 @@ patternProperties:
     properties:
 
       compatible:
-        const: xlnx,zynqmp-ipi-dest-mailbox
+        enum:
+          - xlnx,zynqmp-ipi-dest-mailbox
+          - xlnx,versal-ipi-dest-mailbox
+
+      reg:
+        minItems: 1
+        maxItems: 4
+
+      reg-names:
+        minItems: 1
+        maxItems: 4
 
       xlnx,ipi-id:
         description:
@@ -88,23 +106,44 @@ patternProperties:
         description:
           It contains tx(0) or rx(1) channel IPI id number.
 
-      reg:
-        maxItems: 4
-
-      reg-names:
-        items:
-          - const: local_request_region
-          - const: local_response_region
-          - const: remote_request_region
-          - const: remote_response_region
+    allOf:
+      - if:
+          properties:
+            compatible:
+              contains:
+                enum:
+                  - xlnx,zynqmp-ipi-dest-mailbox
+        then:
+          properties:
+            reg:
+              maxItems: 4
+
+            reg-names:
+              items:
+                - const: local_request_region
+                - const: local_response_region
+                - const: remote_request_region
+                - const: remote_response_region
+        else:
+          properties:
+            reg:
+              minItems: 1
+              items:
+                - description: Remote IPI agent control register region
+                - description: Remote IPI agent optional message buffers
+
+            reg-names:
+              minItems: 1
+              items:
+                - const: ctrl
+                - const: msg
 
     required:
       - compatible
       - reg
       - reg-names
       - "#mbox-cells"
-
-additionalProperties: false
+      - xlnx,ipi-id
 
 required:
   - compatible
@@ -113,6 +152,36 @@ required:
   - '#size-cells'
   - xlnx,ipi-id
 
+allOf:
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - xlnx,zynqmp-ipi-mailbox
+    then:
+      properties:
+        reg: false
+        reg-names: false
+
+    else:
+      properties:
+        reg:
+          items:
+            - description: Host IPI agent control register region
+            - description: Host IPI agent optional message buffers
+
+        reg-names:
+          items:
+            - const: ctrl
+            - const: msg
+
+      required:
+        - reg
+        - reg-names
+
+additionalProperties: false
+
 examples:
   - |
     #include<dt-bindings/interrupt-controller/arm-gic.h>
@@ -144,4 +213,41 @@ examples:
       };
     };
 
+  - |
+    #include<dt-bindings/interrupt-controller/arm-gic.h>
+
+    bus {
+      #address-cells = <2>;
+      #size-cells = <2>;
+      mailbox@ff300000 {
+        compatible = "xlnx,versal-ipi-mailbox";
+        interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
+        #address-cells = <2>;
+        #size-cells = <2>;
+        reg = <0x0 0xff300000 0x0 0x1000>,
+              <0x0 0xff990000 0x0 0x1ff>;
+        reg-names = "ctrl", "msg";
+        xlnx,ipi-id = <0>;
+        ranges;
+
+        /* buffered IPI */
+        mailbox@ff340000 {
+          compatible = "xlnx,versal-ipi-dest-mailbox";
+          reg = <0x0 0xff340000 0x0 0x1000>,
+                <0x0 0xff990400 0x0 0x1ff>;
+          reg-names = "ctrl", "msg";
+          #mbox-cells = <1>;
+          xlnx,ipi-id = <4>;
+        };
+
+        /* bufferless IPI */
+        mailbox@ff370000 {
+          compatible = "xlnx,versal-ipi-dest-mailbox";
+          reg = <0x0 0xff370000 0x0 0x1000>;
+          reg-names = "ctrl";
+          #mbox-cells = <1>;
+          xlnx,ipi-id = <7>;
+        };
+      };
+    };
 ...
index 6d5569e77b7a1239219c13ef2a163849ce5bfd86..6a11c1d11fb5f9a9ccd343c2cb461bd7f3411121 100644 (file)
@@ -17,7 +17,7 @@ properties:
   compatible:
     items:
       - enum:
-          - ti,k3-j721s2-wave521c
+          - ti,j721s2-wave521c
       - const: cnm,wave521c
 
   reg:
@@ -53,7 +53,7 @@ additionalProperties: false
 examples:
   - |
     vpu: video-codec@12345678 {
-        compatible = "ti,k3-j721s2-wave521c", "cnm,wave521c";
+        compatible = "ti,j721s2-wave521c", "cnm,wave521c";
         reg = <0x12345678 0x1000>;
         clocks = <&clks 42>;
         interrupts = <42>;
index d66e5154748274ecd5453aafb4cad18fbfe52782..b46cc780703c6cdecbf1deafdfe16dfd70c512d4 100644 (file)
@@ -50,7 +50,9 @@ properties:
 
   iommu-names:
     minItems: 1
-    maxItems: 2
+    items:
+      - const: left
+      - const: right
 
   power-domains:
     maxItems: 1
@@ -85,7 +87,7 @@ allOf:
             - const: sclk_mfc
         iommus:
           maxItems: 1
-        iommus-names: false
+        iommu-names: false
 
   - if:
       properties:
@@ -103,11 +105,9 @@ allOf:
             - const: aclk
             - const: aclk_xiu
         iommus:
-          maxItems: 2
-        iommus-names:
-          items:
-            - const: left
-            - const: right
+          minItems: 2
+        iommu-names:
+          minItems: 2
 
   - if:
       properties:
@@ -124,11 +124,9 @@ allOf:
             - const: mfc
             - const: sclk_mfc
         iommus:
-          maxItems: 2
-        iommus-names:
-          items:
-            - const: left
-            - const: right
+          minItems: 2
+        iommu-names:
+          minItems: 2
 
   - if:
       properties:
@@ -145,11 +143,9 @@ allOf:
           items:
             - const: mfc
         iommus:
-          maxItems: 2
-        iommus-names:
-          items:
-            - const: left
-            - const: right
+          minItems: 2
+        iommu-names:
+          minItems: 2
 
   - if:
       properties:
@@ -162,9 +158,6 @@ allOf:
         clocks:
           minItems: 1
           maxItems: 2
-        iommus:
-          minItems: 1
-          maxItems: 2
 
   - if:
       properties:
diff --git a/Documentation/devicetree/bindings/mfd/ams,as3711.yaml b/Documentation/devicetree/bindings/mfd/ams,as3711.yaml
new file mode 100644 (file)
index 0000000..ad8649c
--- /dev/null
@@ -0,0 +1,223 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/ams,as3711.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Austria MicroSystems AS3711 Quad Buck High Current PMIC with Charger
+
+maintainers:
+  - Guennadi Liakhovetski <guennadi.liakhovetski@linux.intel.com>
+
+description:
+  AS3711 is an I2C PMIC from Austria MicroSystems with multiple DC/DC and LDO
+  power supplies, a battery charger and an RTC.  So far only bindings for the
+  two step-up DC/DC converters are defined.
+
+properties:
+  compatible:
+    const: ams,as3711
+
+  reg:
+    maxItems: 1
+
+  backlight:
+    description:
+      Step-up converter configuration, to be used as a backlight source
+    type: object
+    additionalProperties: false
+    properties:
+      compatible:
+        const: ams,as3711-bl
+
+      su1-dev:
+        description: Framebuffer phandle for the first step-up converter
+        $ref: /schemas/types.yaml#/definitions/phandle
+
+      su1-max-uA:
+        description: Maximum current for the first step-up converter
+        $ref: /schemas/types.yaml#/definitions/uint32
+
+      su2-dev:
+        description: Framebuffer phandle for the second step-up converter
+        $ref: /schemas/types.yaml#/definitions/phandle
+
+      su2-max-uA:
+        description: Maximum current for the second step-up converter
+        $ref: /schemas/types.yaml#/definitions/uint32
+
+      su2-feedback-voltage:
+        description: Second step-up converter uses voltage feedback
+        type: boolean
+
+      su2-feedback-curr1:
+        description:
+          Second step-up converter uses CURR1 input for current feedback
+        type: boolean
+
+      su2-feedback-curr2:
+        description:
+          Second step-up converter uses CURR2 input for current feedback
+        type: boolean
+
+      su2-feedback-curr3:
+        description:
+          Second step-up converter uses CURR3 input for current feedback
+        type: boolean
+
+      su2-feedback-curr-auto:
+        description:
+          Second step-up converter uses automatic current feedback selection
+        type: boolean
+
+      su2-fbprot-lx-sd4:
+        description:
+          Second step-up converter uses LX_SD4 for over-voltage protection
+        type: boolean
+
+      su2-fbprot-gpio2:
+        description:
+          Second step-up converter uses GPIO2 for over-voltage protection
+        type: boolean
+
+      su2-fbprot-gpio3:
+        description:
+          Second step-up converter uses GPIO3 for over-voltage protection
+        type: boolean
+
+      su2-fbprot-gpio4:
+        description:
+          Second step-up converter uses GPIO4 for over-voltage protection
+        type: boolean
+
+      su2-auto-curr1:
+        description:
+          Second step-up converter uses CURR1 input for automatic current
+          feedback
+        type: boolean
+
+      su2-auto-curr2:
+        description:
+          Second step-up converter uses CURR2 input for automatic current
+          feedback
+        type: boolean
+
+      su2-auto-curr3:
+        description:
+          Second step-up converter uses CURR3 input for automatic current
+          feedback
+        type: boolean
+
+    required:
+      - compatible
+
+    dependentRequired:
+      # To use the SU1 converter as a backlight source the following two
+      # properties must be provided:
+      su1-dev: [ su1-max-uA ]
+      su1-max-uA: [ su1-dev ]
+
+      # To use the SU2 converter as a backlight source the following two
+      # properties must be provided:
+      su2-dev: [ su2-max-uA ]
+      su2-max-uA: [ su2-dev ]
+
+      su2-feedback-voltage: [ su2-dev ]
+      su2-feedback-curr1: [ su2-dev ]
+      su2-feedback-curr2: [ su2-dev ]
+      su2-feedback-curr3: [ su2-dev ]
+      su2-feedback-curr-auto: [ su2-dev ]
+      su2-fbprot-lx-sd4: [ su2-dev ]
+      su2-fbprot-gpio2: [ su2-dev ]
+      su2-fbprot-gpio3: [ su2-dev ]
+      su2-fbprot-gpio4: [ su2-dev ]
+      su2-auto-curr1: [ su2-feedback-curr-auto ]
+      su2-auto-curr2: [ su2-feedback-curr-auto ]
+      su2-auto-curr3: [ su2-feedback-curr-auto ]
+
+    dependentSchemas:
+      su2-dev:
+        allOf:
+          - oneOf:
+              - required:
+                  - su2-feedback-voltage
+              - required:
+                  - su2-feedback-curr1
+              - required:
+                  - su2-feedback-curr2
+              - required:
+                  - su2-feedback-curr3
+              - required:
+                  - su2-feedback-curr-auto
+          - oneOf:
+              - required:
+                  - su2-fbprot-lx-sd4
+              - required:
+                  - su2-fbprot-gpio2
+              - required:
+                  - su2-fbprot-gpio3
+              - required:
+                  - su2-fbprot-gpio4
+
+      su2-feedback-curr-auto:
+        anyOf:
+          - required:
+              - su2-auto-curr1
+          - required:
+              - su2-auto-curr2
+          - required:
+              - su2-auto-curr3
+
+  regulators:
+    description: Other DC/DC and LDO supplies
+    type: object
+    unevaluatedProperties: false
+    patternProperties:
+      "^(sd[1-4]|ldo[1-8])$":
+        type: object
+        $ref: /schemas/regulator/regulator.yaml#
+        unevaluatedProperties: false
+
+required:
+  - compatible
+  - reg
+
+additionalProperties: false
+
+examples:
+  - |
+    i2c {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        pmic@40 {
+            compatible = "ams,as3711";
+            reg = <0x40>;
+
+            regulators {
+                sd4 {
+                    regulator-name = "1.215V";
+                    regulator-min-microvolt = <1215000>;
+                    regulator-max-microvolt = <1235000>;
+                };
+                ldo2 {
+                    regulator-name = "2.8V CPU";
+                    regulator-min-microvolt = <2800000>;
+                    regulator-max-microvolt = <2800000>;
+                    regulator-always-on;
+                    regulator-boot-on;
+                };
+            };
+
+            backlight {
+                compatible = "ams,as3711-bl";
+                su2-dev = <&lcdc>;
+                su2-max-uA = <36000>;
+                su2-feedback-curr-auto;
+                su2-fbprot-gpio4;
+                su2-auto-curr1;
+                su2-auto-curr2;
+                su2-auto-curr3;
+            };
+        };
+    };
diff --git a/Documentation/devicetree/bindings/mfd/as3711.txt b/Documentation/devicetree/bindings/mfd/as3711.txt
deleted file mode 100644 (file)
index d98cf18..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-AS3711 is an I2C PMIC from Austria MicroSystems with multiple DCDC and LDO power
-supplies, a battery charger and an RTC. So far only bindings for the two stepup
-DCDC converters are defined. Other DCDC and LDO supplies are configured, using
-standard regulator properties, they must belong to a sub-node, called
-"regulators" and be called "sd1" to "sd4" and "ldo1" to "ldo8." Stepup converter
-configuration should be placed in a subnode, called "backlight."
-
-Compulsory properties:
-- compatible           : must be "ams,as3711"
-- reg                  : specifies the I2C address
-
-To use the SU1 converter as a backlight source the following two properties must
-be provided:
-- su1-dev              : framebuffer phandle
-- su1-max-uA           : maximum current
-
-To use the SU2 converter as a backlight source the following two properties must
-be provided:
-- su2-dev              : framebuffer phandle
-- su1-max-uA           : maximum current
-
-Additionally one of these properties must be provided to select the type of
-feedback used:
-- su2-feedback-voltage : voltage feedback is used
-- su2-feedback-curr1   : CURR1 input used for current feedback
-- su2-feedback-curr2   : CURR2 input used for current feedback
-- su2-feedback-curr3   : CURR3 input used for current feedback
-- su2-feedback-curr-auto: automatic current feedback selection
-
-and one of these to select the over-voltage protection pin
-- su2-fbprot-lx-sd4    : LX_SD4 is used for over-voltage protection
-- su2-fbprot-gpio2     : GPIO2 is used for over-voltage protection
-- su2-fbprot-gpio3     : GPIO3 is used for over-voltage protection
-- su2-fbprot-gpio4     : GPIO4 is used for over-voltage protection
-
-If "su2-feedback-curr-auto" is selected, one or more of the following properties
-have to be specified:
-- su2-auto-curr1       : use CURR1 input for current feedback
-- su2-auto-curr2       : use CURR2 input for current feedback
-- su2-auto-curr3       : use CURR3 input for current feedback
-
-Example:
-
-as3711@40 {
-       compatible = "ams,as3711";
-       reg = <0x40>;
-
-       regulators {
-               sd4 {
-                       regulator-name = "1.215V";
-                       regulator-min-microvolt = <1215000>;
-                       regulator-max-microvolt = <1235000>;
-               };
-               ldo2 {
-                       regulator-name = "2.8V CPU";
-                       regulator-min-microvolt = <2800000>;
-                       regulator-max-microvolt = <2800000>;
-                       regulator-always-on;
-                       regulator-boot-on;
-               };
-       };
-
-       backlight {
-               compatible = "ams,as3711-bl";
-               su2-dev = <&lcdc>;
-               su2-max-uA = <36000>;
-               su2-feedback-curr-auto;
-               su2-fbprot-gpio4;
-               su2-auto-curr1;
-               su2-auto-curr2;
-               su2-auto-curr3;
-       };
-};
index bdff5b6534538d1262f3459ee72e2c78dd8f250d..6a824351834ee0250e465af5e141b91dbe8a50d7 100644 (file)
@@ -17,7 +17,7 @@ description: |
   node.
 
   The SPMI controller part is provided by
-  Documentation/devicetree/bindings/mfd/hisilicon,hi6421-spmi-pmic.yaml
+  Documentation/devicetree/bindings/spmi/hisilicon,hisi-spmi-controller.yaml
 
 properties:
   $nodename:
@@ -42,13 +42,6 @@ properties:
 
     additionalProperties: false
 
-    properties:
-      '#address-cells':
-        const: 1
-
-      '#size-cells':
-        const: 0
-
     patternProperties:
       '^ldo[0-9]+$':
         type: object
@@ -66,72 +59,75 @@ additionalProperties: false
 
 examples:
   - |
+    #include <dt-bindings/spmi/spmi.h>
 
-    pmic: pmic@0 {
-      compatible = "hisilicon,hi6421v600-spmi";
-      reg = <0 0>;
-
-      #interrupt-cells = <2>;
-      interrupt-controller;
-      interrupt-parent = <&gpio28>;
-      interrupts = <0 0>;
-
-      regulators {
-        #address-cells = <1>;
+    spmi {
+        #address-cells = <2>;
         #size-cells = <0>;
 
-        ldo3: ldo3 {
-          regulator-name = "ldo3";
-          regulator-min-microvolt = <1500000>;
-          regulator-max-microvolt = <2000000>;
-          regulator-boot-on;
-        };
-
-        ldo4: ldo4 {
-          regulator-name = "ldo4";
-          regulator-min-microvolt = <1725000>;
-          regulator-max-microvolt = <1900000>;
-          regulator-boot-on;
-        };
-
-        ldo9: ldo9 {
-          regulator-name = "ldo9";
-          regulator-min-microvolt = <1750000>;
-          regulator-max-microvolt = <3300000>;
-          regulator-boot-on;
-        };
-
-        ldo15: ldo15 {
-          regulator-name = "ldo15";
-          regulator-min-microvolt = <1800000>;
-          regulator-max-microvolt = <3000000>;
-          regulator-always-on;
-        };
-
-        ldo16: ldo16 {
-          regulator-name = "ldo16";
-          regulator-min-microvolt = <1800000>;
-          regulator-max-microvolt = <3000000>;
-          regulator-boot-on;
-        };
-
-        ldo17: ldo17 {
-          regulator-name = "ldo17";
-          regulator-min-microvolt = <2500000>;
-          regulator-max-microvolt = <3300000>;
-        };
-
-        ldo33: ldo33 {
-          regulator-name = "ldo33";
-          regulator-min-microvolt = <2500000>;
-          regulator-max-microvolt = <3300000>;
-          regulator-boot-on;
-        };
-
-        ldo34: ldo34 {
-          regulator-name = "ldo34";
-          regulator-min-microvolt = <2600000>;
-          regulator-max-microvolt = <3300000>;
+        pmic@0 {
+            compatible = "hisilicon,hi6421v600-spmi";
+            reg = <0 SPMI_USID>;
+
+            #interrupt-cells = <2>;
+            interrupt-controller;
+            interrupt-parent = <&gpio28>;
+            interrupts = <0 0>;
+
+            regulators {
+                ldo3 {
+                    regulator-name = "ldo3";
+                    regulator-min-microvolt = <1500000>;
+                    regulator-max-microvolt = <2000000>;
+                    regulator-boot-on;
+                };
+
+                ldo4 {
+                    regulator-name = "ldo4";
+                    regulator-min-microvolt = <1725000>;
+                    regulator-max-microvolt = <1900000>;
+                    regulator-boot-on;
+                };
+
+                ldo9 {
+                    regulator-name = "ldo9";
+                    regulator-min-microvolt = <1750000>;
+                    regulator-max-microvolt = <3300000>;
+                    regulator-boot-on;
+                };
+
+                ldo15 {
+                    regulator-name = "ldo15";
+                    regulator-min-microvolt = <1800000>;
+                    regulator-max-microvolt = <3000000>;
+                    regulator-always-on;
+                };
+
+                ldo16 {
+                    regulator-name = "ldo16";
+                    regulator-min-microvolt = <1800000>;
+                    regulator-max-microvolt = <3000000>;
+                    regulator-boot-on;
+                };
+
+                ldo17 {
+                    regulator-name = "ldo17";
+                    regulator-min-microvolt = <2500000>;
+                    regulator-max-microvolt = <3300000>;
+                };
+
+                ldo33 {
+                    regulator-name = "ldo33";
+                    regulator-min-microvolt = <2500000>;
+                    regulator-max-microvolt = <3300000>;
+                    regulator-boot-on;
+                };
+
+                ldo34 {
+                    regulator-name = "ldo34";
+                    regulator-min-microvolt = <2600000>;
+                    regulator-max-microvolt = <3300000>;
+                };
+            };
         };
-      };
     };
index 9e4eed34dae8afb7fba708783c776a87edbc54f5..0c75d8bde5688217bdd6d2483e23a56ff77b9b6c 100644 (file)
@@ -99,10 +99,12 @@ examples:
   - |
     #include <dt-bindings/mfd/qcom-pm8008.h>
     #include <dt-bindings/interrupt-controller/irq.h>
-    qupv3_se13_i2c {
+
+    i2c {
       #address-cells = <1>;
       #size-cells = <0>;
-      pm8008i@8 {
+
+      pmic@8 {
         compatible = "qcom,pm8008";
         reg = <0x8>;
         #address-cells = <1>;
index 9fa56860393069b14f8b7f4547e3bc84a4304fab..8103fb61a16cc907ab46aeb728aab4a1139523e2 100644 (file)
@@ -66,6 +66,7 @@ properties:
           - qcom,pm8841
           - qcom,pm8909
           - qcom,pm8916
+          - qcom,pm8937
           - qcom,pm8941
           - qcom,pm8950
           - qcom,pm8953
@@ -134,9 +135,15 @@ patternProperties:
     type: object
     $ref: /schemas/sound/qcom,pm8916-wcd-analog-codec.yaml#
 
+  "^battery@[0-9a-f]+$":
+    type: object
+    oneOf:
+      - $ref: /schemas/power/supply/qcom,pm8916-bms-vm.yaml#
+
   "^charger@[0-9a-f]+$":
     type: object
     oneOf:
+      - $ref: /schemas/power/supply/qcom,pm8916-lbc.yaml#
       - $ref: /schemas/power/supply/qcom,pm8941-charger.yaml#
       - $ref: /schemas/power/supply/qcom,pm8941-coincell.yaml#
       - $ref: /schemas/power/supply/qcom,pmi8998-charger.yaml#
index 33c3d023a10681630f22facc5cd69dae0dad960b..798705ab6a46016ee84234dcde0da46f4cb8b2e0 100644 (file)
@@ -29,6 +29,8 @@ properties:
           - qcom,sdx65-tcsr
           - qcom,sm4450-tcsr
           - qcom,sm8150-tcsr
+          - qcom,sm8250-tcsr
+          - qcom,sm8350-tcsr
           - qcom,sm8450-tcsr
           - qcom,tcsr-apq8064
           - qcom,tcsr-apq8084
index 996bd4a17ca357ee5b6b8f8e740bf89eb955c9eb..a750fa23d7e7c27f00c271dec8d8e9f1ad3010c5 100644 (file)
@@ -19,7 +19,9 @@ description:
 properties:
   compatible:
     items:
-      - const: sprd,ums512-glbregs
+      - enum:
+          - sprd,ums512-glbregs
+          - sprd,ums9620-glbregs
       - const: syscon
       - const: simple-mfd
 
index 23a63265be3c8cf09e7eb52bc17cc6823909c03f..70b5dfce07d29b10080613c1be3cd768ecea24be 100644 (file)
@@ -61,8 +61,6 @@ required:
   - interrupts
   - clocks
   - clock-names
-  - dmas
-  - dma-names
 
 additionalProperties: false
 
index 8cc951feb7df4bd785c0b282b07abf1c3f27b4bd..59b83ea5e05eefd0c5bfa84d6e302e909452e1c6 100644 (file)
@@ -12,7 +12,7 @@ maintainers:
 
 properties:
   compatible:
-    const: "fsl,dpaa2-console"
+    const: fsl,dpaa2-console
 
   reg:
     maxItems: 1
index 2459a55ed540b85a1920e06456a7faac938c6836..940b126881674629eb00679e223bbd4900b4c7cc 100644 (file)
@@ -203,7 +203,7 @@ examples:
       bus-width = <4>;
       cap-sd-highspeed;
       cap-mmc-highspeed;
-      cd-gpios  = <&gpio2 31 0x4>;
+      cd-gpios = <&gpio2 31 0x4>;
       st,sig-dir-dat0;
       st,sig-dir-dat2;
       st,sig-dir-cmd;
index 09455f9fa8deb2fbfb9df8fde684cb3188ef31b8..4869ddef36fd89265a1bfe96bb9663b553ac5084 100644 (file)
@@ -18,7 +18,7 @@ allOf:
             const: marvell,armada-380-sdhci
     then:
       properties:
-        regs:
+        reg:
           minItems: 3
         reg-names:
           minItems: 3
@@ -26,7 +26,7 @@ allOf:
         - reg-names
     else:
       properties:
-        regs:
+        reg:
           maxItems: 1
         reg-names:
           maxItems: 1
index 973e478a399d37f76738671a96bdcb0f25b8d6a4..bf6cbc7c2ba3b5cb1ffc26d8d456d0073633a6fe 100644 (file)
@@ -120,7 +120,7 @@ examples:
       pinctrl-names = "default";
       pinctrl-0 = <&cps_sfpp0_pins>;
       tx-disable-gpios = <&cps_gpio1 29 GPIO_ACTIVE_HIGH>;
-      tx-fault-gpios  = <&cps_gpio1 26 GPIO_ACTIVE_HIGH>;
+      tx-fault-gpios = <&cps_gpio1 26 GPIO_ACTIVE_HIGH>;
     };
 
     mdio {
index a69de3e9228283d03b120a1a77eec2daaee8ef75..92bfe25f0571ebdc3d1df383be0a90ef7258eebb 100644 (file)
@@ -24,6 +24,7 @@ properties:
       - st,stm32f4-otp
       - st,stm32mp13-bsec
       - st,stm32mp15-bsec
+      - st,stm32mp25-bsec
 
   reg:
     maxItems: 1
index 7e15aae7d69e9780dc2fc7adfa28b0ebec0acaaa..22491f7f88521c853c1ed8e91c5f9d5db2d095ef 100644 (file)
@@ -64,6 +64,24 @@ properties:
 
   aspm-no-l0s: true
 
+  brcm,clkreq-mode:
+    description: A string that determines the operating
+      clkreq mode of the PCIe RC HW with respect to controlling the refclk
+      signal.  There are three different modes -- "safe", which drives the
+      refclk signal unconditionally and will work for all devices but does
+      not provide any power savings; "no-l1ss" -- which provides Clock
+      Power Management, L0s, and L1, but cannot provide L1 substate (L1SS)
+      power savings. If the downstream device connected to the RC is L1SS
+      capable AND the OS enables L1SS, all PCIe traffic may abruptly halt,
+      potentially hanging the system; "default" -- which provides L0s, L1,
+      and L1SS, but not compliant to provide Clock Power Management;
+      specifically, may not be able to meet the T_CLRon max timing of 400ns
+      as specified in "Dynamic Clock Control", section 3.2.5.2.2 PCI
+      Express Mini CEM 2.1 specification.  This situation is atypical and
+      should happen only with older devices.
+    $ref: /schemas/types.yaml#/definitions/string
+    enum: [ safe, no-l1ss, default ]
+
   brcm,scb-sizes:
     description: u64 giving the 64bit PCIe memory
       viewport size of a memory controller.  There may be up to
index eadba38171e135a00b513739aadbc492cc0be976..a93ab3b540666427adb21a2f1631f3a315a0a0c6 100644 (file)
@@ -41,6 +41,10 @@ properties:
           - qcom,pcie-sm8450-pcie0
           - qcom,pcie-sm8450-pcie1
           - qcom,pcie-sm8550
+      - items:
+          - enum:
+              - qcom,pcie-sm8650
+          - const: qcom,pcie-sm8550
       - items:
           - const: qcom,pcie-msm8998
           - const: qcom,pcie-msm8996
@@ -62,7 +66,8 @@ properties:
     maxItems: 8
 
   iommu-map:
-    maxItems: 2
+    minItems: 1
+    maxItems: 16
 
   # Common definitions for clocks, clock-names and reset.
   # Platform constraints are described later.
@@ -88,7 +93,7 @@ properties:
     minItems: 1
     maxItems: 12
 
-  resets-names:
+  reset-names:
     minItems: 1
     maxItems: 12
 
@@ -478,6 +483,33 @@ allOf:
           items:
             - const: pci # PCIe core reset
 
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - qcom,pcie-sc8180x
+    then:
+      properties:
+        clocks:
+          minItems: 8
+          maxItems: 8
+        clock-names:
+          items:
+            - const: pipe # PIPE clock
+            - const: aux # Auxiliary clock
+            - const: cfg # Configuration clock
+            - const: bus_master # Master AXI clock
+            - const: bus_slave # Slave AXI clock
+            - const: slave_q2a # Slave Q2A clock
+            - const: ref # REFERENCE clock
+            - const: tbu # PCIe TBU clock
+        resets:
+          maxItems: 1
+        reset-names:
+          items:
+            - const: pci # PCIe core reset
+
   - if:
       properties:
         compatible:
@@ -526,8 +558,33 @@ allOf:
         compatible:
           contains:
             enum:
-              - qcom,pcie-sc8180x
               - qcom,pcie-sm8150
+    then:
+      properties:
+        clocks:
+          minItems: 8
+          maxItems: 8
+        clock-names:
+          items:
+            - const: pipe # PIPE clock
+            - const: aux # Auxiliary clock
+            - const: cfg # Configuration clock
+            - const: bus_master # Master AXI clock
+            - const: bus_slave # Slave AXI clock
+            - const: slave_q2a # Slave Q2A clock
+            - const: tbu # PCIe TBU clock
+            - const: ref # REFERENCE clock
+        resets:
+          maxItems: 1
+        reset-names:
+          items:
+            - const: pci # PCIe core reset
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
               - qcom,pcie-sm8250
     then:
       oneOf:
index 8fdfbc763d704532d06c6628c341b8e0f61e7011..b6a7cb32f61e5d4f1d0cff979bf3e7ecd46116fb 100644 (file)
@@ -68,6 +68,15 @@ properties:
   phy-names:
     const: pcie
 
+  vpcie1v5-supply:
+    description: The 1.5v regulator to use for PCIe.
+
+  vpcie3v3-supply:
+    description: The 3.3v regulator to use for PCIe.
+
+  vpcie12v-supply:
+    description: The 12v regulator to use for PCIe.
+
 required:
   - compatible
   - reg
@@ -121,5 +130,7 @@ examples:
              clock-names = "pcie", "pcie_bus";
              power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
              resets = <&cpg 319>;
+             vpcie3v3-supply = <&pcie_3v3>;
+             vpcie12v-supply = <&pcie_12v>;
          };
     };
index 1ae8dcfa072cea19c1b897ffaa91fff9b7dcf703..5f719218c472c8476ae8fbfb6b65b898fe49ff6b 100644 (file)
@@ -49,6 +49,7 @@ properties:
       - description: APB clock for PCIe
       - description: Auxiliary clock for PCIe
       - description: PIPE clock
+      - description: Reference clock for PCIe
 
   clock-names:
     minItems: 5
@@ -59,6 +60,7 @@ properties:
       - const: pclk
       - const: aux
       - const: pipe
+      - const: ref
 
   interrupts:
     items:
index 62292185fe2e460d167a629669bc78496fc6be46..97f2579ea9082229c03a094d74d25da4e54eb8e4 100644 (file)
@@ -10,13 +10,11 @@ title: TI J721E PCI EP (PCIe Wrapper)
 maintainers:
   - Kishon Vijay Abraham I <kishon@ti.com>
 
-allOf:
-  - $ref: cdns-pcie-ep.yaml#
-
 properties:
   compatible:
     oneOf:
       - const: ti,j721e-pcie-ep
+      - const: ti,j784s4-pcie-ep
       - description: PCIe EP controller in AM64
         items:
           - const: ti,am64-pcie-ep
@@ -65,6 +63,41 @@ properties:
     items:
       - const: link_state
 
+allOf:
+  - $ref: cdns-pcie-ep.yaml#
+  - if:
+      properties:
+        compatible:
+          enum:
+            - ti,am64-pcie-ep
+    then:
+      properties:
+        num-lanes:
+          const: 1
+
+  - if:
+      properties:
+        compatible:
+          enum:
+            - ti,j7200-pcie-ep
+            - ti,j721e-pcie-ep
+    then:
+      properties:
+        num-lanes:
+          minimum: 1
+          maximum: 2
+
+  - if:
+      properties:
+        compatible:
+          enum:
+            - ti,j784s4-pcie-ep
+    then:
+      properties:
+        num-lanes:
+          minimum: 1
+          maximum: 4
+
 required:
   - compatible
   - reg
index a2c5eaea57f503f27392b7424bda35fb43d27950..b7a534cef24d314d20d9e725575301dcb499a782 100644 (file)
@@ -10,13 +10,11 @@ title: TI J721E PCI Host (PCIe Wrapper)
 maintainers:
   - Kishon Vijay Abraham I <kishon@ti.com>
 
-allOf:
-  - $ref: cdns-pcie-host.yaml#
-
 properties:
   compatible:
     oneOf:
       - const: ti,j721e-pcie-host
+      - const: ti,j784s4-pcie-host
       - description: PCIe controller in AM64
         items:
           - const: ti,am64-pcie-host
@@ -94,6 +92,41 @@ properties:
       interrupts:
         maxItems: 1
 
+allOf:
+  - $ref: cdns-pcie-host.yaml#
+  - if:
+      properties:
+        compatible:
+          enum:
+            - ti,am64-pcie-host
+    then:
+      properties:
+        num-lanes:
+          const: 1
+
+  - if:
+      properties:
+        compatible:
+          enum:
+            - ti,j7200-pcie-host
+            - ti,j721e-pcie-host
+    then:
+      properties:
+        num-lanes:
+          minimum: 1
+          maximum: 2
+
+  - if:
+      properties:
+        compatible:
+          enum:
+            - ti,j784s4-pcie-host
+    then:
+      properties:
+        num-lanes:
+          minimum: 1
+          maximum: 4
+
 required:
   - compatible
   - reg
index 53da2edd7c9abef7d13c588af2fbb63a17d509eb..120e3bb1e5454a4e2d6a68d846ded1f8d656719b 100644 (file)
@@ -83,7 +83,7 @@ examples:
                   <0x0 0x28050000 0x0 0x00010000>,
                   <0x0 0x24200000 0x0 0x00002000>,
                   <0x0 0x24162000 0x0 0x00001000>;
-            reg-names  = "dbi", "config", "ulreg", "smu", "mpu";
+            reg-names = "dbi", "config", "ulreg", "smu", "mpu";
             device_type = "pci";
             bus-range = <0x00 0xff>;
             num-lanes = <2>;
index c8c83acfb871d0f45a871bcac7167ca4fbc91997..81c2654b7e57e98ce89e44c46b6135b7fce49b59 100644 (file)
@@ -16,20 +16,8 @@ properties:
   "#phy-cells":
     const: 0
 
-  reg:
-    maxItems: 1
-
 required:
   - compatible
-  - reg
   - "#phy-cells"
 
 additionalProperties: false
-
-examples:
-  - |
-    phy@0 {
-          compatible = "amlogic,g12a-mipi-dphy-analog";
-          reg = <0x0 0xc>;
-          #phy-cells = <0>;
-    };
index 009a3980831856b9db5d99d90c8b8f670dcf5333..70def36e5688d0153b92166e5e35de67378c6698 100644 (file)
@@ -9,16 +9,6 @@ title: Amlogic AXG shared MIPI/PCIE analog PHY
 maintainers:
   - Remi Pommarel <repk@triplefau.lt>
 
-description: |+
-  The Everything-Else Power Domains node should be the child of a syscon
-  node with the required property:
-
-  - compatible: Should be the following:
-                "amlogic,meson-gx-hhi-sysctrl", "simple-mfd", "syscon"
-
-  Refer to the bindings described in
-  Documentation/devicetree/bindings/mfd/syscon.yaml
-
 properties:
   compatible:
     const: amlogic,axg-mipi-pcie-analog-phy
@@ -31,10 +21,3 @@ required:
   - "#phy-cells"
 
 additionalProperties: false
-
-examples:
-  - |
-    mpphy: phy {
-          compatible = "amlogic,axg-mipi-pcie-analog-phy";
-          #phy-cells = <0>;
-    };
index 6703689fcdbe103f791a189e0bb8f8ab095975d9..f6e494d0d89b82dabed0265782dbd9cd83de5737 100644 (file)
@@ -31,6 +31,7 @@ properties:
       - items:
           - enum:
               - mediatek,mt8188-mipi-tx
+              - mediatek,mt8195-mipi-tx
               - mediatek,mt8365-mipi-tx
           - const: mediatek,mt8183-mipi-tx
       - const: mediatek,mt2701-mipi-tx
index 2bb91542e984e788b20db49204cd97880ba6b224..acba0720125ddd3d327df154a3f30c6e2e8608aa 100644 (file)
@@ -235,6 +235,15 @@ patternProperties:
           Specify the flag to enable BC1.2 if support it
         type: boolean
 
+      mediatek,force-mode:
+        description:
+          The force mode is used to manually switch the shared phy mode between
+          USB3 and PCIe, when USB3 phy type is selected by the consumer, and
+          force-mode is set, will cause phy's power and pipe toggled and force
+          phy as USB3 mode which switched from default PCIe mode. But perfer to
+          use the property "mediatek,syscon-type" for newer SoCs that support it.
+        type: boolean
+
       mediatek,syscon-type:
         $ref: /schemas/types.yaml#/definitions/phandle-array
         maxItems: 1
index 2c3d6553a7bac692f688adaf9ac5ff568d862e46..6c03f2d5fca3cca6ad0cccc4ae3f8679e4c59026 100644 (file)
@@ -36,6 +36,8 @@ properties:
       - qcom,sm8450-qmp-gen4x2-pcie-phy
       - qcom,sm8550-qmp-gen3x2-pcie-phy
       - qcom,sm8550-qmp-gen4x2-pcie-phy
+      - qcom,sm8650-qmp-gen3x2-pcie-phy
+      - qcom,sm8650-qmp-gen4x2-pcie-phy
 
   reg:
     minItems: 1
@@ -147,6 +149,8 @@ allOf:
               - qcom,sm8450-qmp-gen3x2-pcie-phy
               - qcom,sm8550-qmp-gen3x2-pcie-phy
               - qcom,sm8550-qmp-gen4x2-pcie-phy
+              - qcom,sm8650-qmp-gen3x2-pcie-phy
+              - qcom,sm8650-qmp-gen4x2-pcie-phy
     then:
       properties:
         clocks:
@@ -189,6 +193,7 @@ allOf:
           contains:
             enum:
               - qcom,sm8550-qmp-gen4x2-pcie-phy
+              - qcom,sm8650-qmp-gen4x2-pcie-phy
     then:
       properties:
         resets:
index f3a3296c811cb3403201642703c77060b7ed1c36..8474eef8d0ff5233a075bf5c17ca0abaa14fbd41 100644 (file)
@@ -32,6 +32,7 @@ properties:
       - qcom,sm8350-qmp-ufs-phy
       - qcom,sm8450-qmp-ufs-phy
       - qcom,sm8550-qmp-ufs-phy
+      - qcom,sm8650-qmp-ufs-phy
 
   reg:
     maxItems: 1
@@ -112,6 +113,7 @@ allOf:
               - qcom,sm8250-qmp-ufs-phy
               - qcom,sm8350-qmp-ufs-phy
               - qcom,sm8550-qmp-ufs-phy
+              - qcom,sm8650-qmp-ufs-phy
     then:
       properties:
         clocks:
index 57702f7f2a46cf54bcf6f660704144780faee8f7..15d82c67f157b6ceadc366540fca0c200201d920 100644 (file)
@@ -32,6 +32,7 @@ properties:
       - qcom,sm8150-qmp-usb3-uni-phy
       - qcom,sm8250-qmp-usb3-uni-phy
       - qcom,sm8350-qmp-usb3-uni-phy
+      - qcom,x1e80100-qmp-usb3-uni-phy
 
 
   reg:
@@ -135,6 +136,7 @@ allOf:
               - qcom,sm8150-qmp-usb3-uni-phy
               - qcom,sm8250-qmp-usb3-uni-phy
               - qcom,sm8350-qmp-usb3-uni-phy
+              - qcom,x1e80100-qmp-usb3-uni-phy
     then:
       properties:
         clocks:
@@ -171,6 +173,7 @@ allOf:
             enum:
               - qcom,sa8775p-qmp-usb3-uni-phy
               - qcom,sc8280xp-qmp-usb3-uni-phy
+              - qcom,x1e80100-qmp-usb3-uni-phy
     then:
       required:
         - power-domains
index 9af203dc8793f34ee6adf58813c5838fc71200e8..2d0d7e9e643117f5ec625e49270ac94c70603e7e 100644 (file)
@@ -27,6 +27,8 @@ properties:
       - qcom,sm8350-qmp-usb3-dp-phy
       - qcom,sm8450-qmp-usb3-dp-phy
       - qcom,sm8550-qmp-usb3-dp-phy
+      - qcom,sm8650-qmp-usb3-dp-phy
+      - qcom,x1e80100-qmp-usb3-dp-phy
 
   reg:
     maxItems: 1
@@ -62,12 +64,12 @@ properties:
   "#clock-cells":
     const: 1
     description:
-      See include/dt-bindings/dt-bindings/phy/phy-qcom-qmp.h
+      See include/dt-bindings/phy/phy-qcom-qmp.h
 
   "#phy-cells":
     const: 1
     description:
-      See include/dt-bindings/dt-bindings/phy/phy-qcom-qmp.h
+      See include/dt-bindings/phy/phy-qcom-qmp.h
 
   orientation-switch:
     description:
@@ -128,6 +130,8 @@ allOf:
             - qcom,sc8280xp-qmp-usb43dp-phy
             - qcom,sm6350-qmp-usb3-dp-phy
             - qcom,sm8550-qmp-usb3-dp-phy
+            - qcom,sm8650-qmp-usb3-dp-phy
+            - qcom,x1e80100-qmp-usb3-dp-phy
     then:
       required:
         - power-domains
index c95828607ab6bc6b23217ebe5e380c60f2d224a1..b82f7f5731ed4a6879ffbdc1b970fdfe0557f944 100644 (file)
@@ -18,6 +18,8 @@ properties:
       - items:
           - enum:
               - qcom,sdx75-snps-eusb2-phy
+              - qcom,sm8650-snps-eusb2-phy
+              - qcom,x1e80100-snps-eusb2-phy
           - const: qcom,sm8550-snps-eusb2-phy
       - const: qcom,sm8550-snps-eusb2-phy
 
index 45a307d3ce1672b01530371192655a9692ba3593..c11495524dd2672d2d5c3595726f4026e153ead6 100644 (file)
@@ -31,6 +31,7 @@ properties:
               - ti,omap3-padconf
               - ti,omap4-padconf
               - ti,omap5-padconf
+              - ti,j7200-padconf
           - const: pinctrl-single
 
   reg:
index fad0118fd5219c86d8a1342379dc8a9960a746bb..23300606547c5585f5e4384148af15633e785f40 100644 (file)
@@ -23,13 +23,6 @@ properties:
   interrupts:
     maxItems: 1
 
-  interrupt-controller: true
-  "#interrupt-cells": true
-  gpio-controller: true
-  "#gpio-cells": true
-  gpio-ranges: true
-  wakeup-parent: true
-
   gpio-reserved-ranges:
     minItems: 1
     maxItems: 24
@@ -95,7 +88,10 @@ required:
   - compatible
   - reg
 
-additionalProperties: false
+allOf:
+  - $ref: /schemas/pinctrl/qcom,tlmm-common.yaml#
+
+unevaluatedProperties: false
 
 examples:
   - |
index 3d3086ae1ba6946a6d0e2934d6ef8d1ff500cb3a..e571cd64418f279868dc724b2ec41661a6056f15 100644 (file)
@@ -26,13 +26,6 @@ properties:
   interrupts:
     maxItems: 1
 
-  interrupt-controller: true
-  "#interrupt-cells": true
-  gpio-controller: true
-  "#gpio-cells": true
-  gpio-ranges: true
-  wakeup-parent: true
-
   gpio-reserved-ranges:
     minItems: 1
     maxItems: 27
@@ -100,7 +93,7 @@ required:
   - compatible
   - reg
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index 7c3e5e043f078df68f774a06f40b4157666e0f73..ed00fbaec11b05760dbe4885884066de78c85d07 100644 (file)
@@ -22,12 +22,6 @@ properties:
   interrupts:
     maxItems: 1
 
-  interrupt-controller: true
-  "#interrupt-cells": true
-  gpio-controller: true
-  "#gpio-cells": true
-  gpio-ranges: true
-
 patternProperties:
   "-state$":
     oneOf:
@@ -100,7 +94,7 @@ required:
   - compatible
   - reg
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index e053fbd588b51efabee91573c8f7bddecc6da01e..6f90dbbdbdcce2063913d9fcead5c16ce2f277b2 100644 (file)
@@ -23,13 +23,6 @@ properties:
   interrupts:
     maxItems: 1
 
-  interrupt-controller: true
-  "#interrupt-cells": true
-  gpio-controller: true
-  "#gpio-cells": true
-  gpio-ranges: true
-  wakeup-parent: true
-
   gpio-reserved-ranges:
     minItems: 1
     maxItems: 35
@@ -103,7 +96,7 @@ required:
   - compatible
   - reg
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index e5e9962b2174fd68b9adc55a01698ee4fcdb53b3..bca903b5da6d01e4aeb64a7a225220b01d32f714 100644 (file)
@@ -23,13 +23,6 @@ properties:
   interrupts:
     maxItems: 1
 
-  interrupt-controller: true
-  "#interrupt-cells": true
-  gpio-controller: true
-  "#gpio-cells": true
-  gpio-ranges: true
-  wakeup-parent: true
-
   gpio-reserved-ranges:
     minItems: 1
     maxItems: 33
@@ -97,7 +90,7 @@ required:
   - compatible
   - reg
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,lpass-lpi-common.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,lpass-lpi-common.yaml
new file mode 100644 (file)
index 0000000..3b50457
--- /dev/null
@@ -0,0 +1,75 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/qcom,lpass-lpi-common.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm SoC LPASS LPI TLMM Common Properties
+
+maintainers:
+  - Bjorn Andersson <andersson@kernel.org>
+  - Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+  - Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+
+description:
+  Common properties for the Top Level Mode Multiplexer pin controllers in the
+  Low Power Audio SubSystem (LPASS) Low Power Island (LPI) of Qualcomm SoCs.
+
+properties:
+  gpio-controller: true
+
+  "#gpio-cells":
+    description:
+      Specifying the pin number and flags, as defined in
+      include/dt-bindings/gpio/gpio.h
+    const: 2
+
+  gpio-ranges:
+    maxItems: 1
+
+required:
+  - gpio-controller
+  - "#gpio-cells"
+  - gpio-ranges
+
+allOf:
+  - $ref: pinctrl.yaml#
+
+additionalProperties: true
+
+$defs:
+  qcom-tlmm-state:
+    properties:
+      drive-strength:
+        enum: [2, 4, 6, 8, 10, 12, 14, 16]
+        default: 2
+        description:
+          Selects the drive strength for the specified pins, in mA.
+
+      slew-rate:
+        enum: [0, 1, 2, 3]
+        default: 0
+        description: |
+          0: No adjustments
+          1: Higher Slew rate (faster edges)
+          2: Lower Slew rate (slower edges)
+          3: Reserved (No adjustments)
+
+      bias-bus-hold: true
+      bias-pull-down: true
+      bias-pull-up: true
+      bias-disable: true
+      input-enable: true
+      output-high: true
+      output-low: true
+
+    required:
+      - pins
+      - function
+
+    allOf:
+      - $ref: pincfg-node.yaml#
+      - $ref: pinmux-node.yaml#
+
+    additionalProperties: true
+
index 5ece3b9d676b3d49d0efe1b132dc3679766c48d7..bd3cbb44c99a4638e9b9aa57c95c8b3def7171e6 100644 (file)
@@ -25,19 +25,7 @@ properties:
   interrupts:
     maxItems: 1
 
-  interrupt-controller: true
-  "#interrupt-cells": true
-  gpio-controller: true
   gpio-reserved-ranges: true
-  "#gpio-cells": true
-  gpio-ranges: true
-  wakeup-parent: true
-
-required:
-  - compatible
-  - reg
-
-additionalProperties: false
 
 patternProperties:
   "-state$":
@@ -110,6 +98,12 @@ $defs:
     required:
       - pins
 
+required:
+  - compatible
+  - reg
+
+unevaluatedProperties: false
+
 examples:
   - |
     #include <dt-bindings/interrupt-controller/arm-gic.h>
index 5885aee95c98e247f0f1af760f456892a2358b8f..299e0b4b0ab42500f2ba41937a901c3d90e2b713 100644 (file)
@@ -23,18 +23,6 @@ properties:
   interrupts:
     maxItems: 1
 
-  interrupt-controller: true
-  '#interrupt-cells': true
-  gpio-controller: true
-  '#gpio-cells': true
-  gpio-ranges: true
-
-required:
-  - compatible
-  - reg
-
-additionalProperties: false
-
 patternProperties:
   "-state$":
     oneOf:
@@ -74,6 +62,12 @@ $defs:
     required:
       - pins
 
+required:
+  - compatible
+  - reg
+
+unevaluatedProperties: false
+
 examples:
   - |
     #include <dt-bindings/interrupt-controller/arm-gic.h>
index a602bf0d27fbc65f64fb8131a972dfc626cf8f70..68d3fa2105b857cedf4a88923d7ba801a9185227 100644 (file)
@@ -23,12 +23,6 @@ properties:
   interrupts:
     maxItems: 1
 
-  interrupt-controller: true
-  "#interrupt-cells": true
-  gpio-controller: true
-  "#gpio-cells": true
-  gpio-ranges: true
-
   gpio-reserved-ranges:
     maxItems: 1
 
@@ -82,7 +76,7 @@ required:
   - compatible
   - reg
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index a05971611780947e2c156605a92abeac01e6dbe4..61f5be21f30cc0a550e85199794c228e22e2f38a 100644 (file)
@@ -23,13 +23,6 @@ properties:
   interrupts:
     maxItems: 1
 
-  interrupt-controller: true
-  "#interrupt-cells": true
-  gpio-controller: true
-  "#gpio-cells": true
-  gpio-ranges: true
-  wakeup-parent: true
-
   gpio-reserved-ranges:
     minItems: 1
     maxItems: 86
@@ -92,7 +85,7 @@ required:
   - compatible
   - reg
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index 5095e86fe9a2136a204126bb0050cb5371f79c6a..295dd5fcf4c3d261b3f6139b65caafdc5c093b5e 100644 (file)
@@ -25,19 +25,7 @@ properties:
   interrupts:
     maxItems: 1
 
-  interrupt-controller: true
-  "#interrupt-cells": true
-  gpio-controller: true
   gpio-reserved-ranges: true
-  "#gpio-cells": true
-  gpio-ranges: true
-  wakeup-parent: true
-
-required:
-  - compatible
-  - reg
-
-additionalProperties: false
 
 patternProperties:
   "-state$":
@@ -108,6 +96,12 @@ $defs:
     required:
       - pins
 
+required:
+  - compatible
+  - reg
+
+unevaluatedProperties: false
+
 examples:
   - |
     #include <dt-bindings/interrupt-controller/arm-gic.h>
index 063d004967bbeb3355b4d9153cadac8d18030f10..904af87f9eaff69e29fff946b170cdce56f119ea 100644 (file)
@@ -23,13 +23,6 @@ properties:
   interrupts:
     maxItems: 1
 
-  interrupt-controller: true
-  "#interrupt-cells": true
-  gpio-controller: true
-  "#gpio-cells": true
-  gpio-ranges: true
-  wakeup-parent: true
-
   gpio-reserved-ranges:
     minItems: 1
     maxItems: 61
@@ -114,7 +107,7 @@ required:
   - compatible
   - reg
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index 798aac9e6e31b207a89deaa5ac4e72e7e145d074..8a3a962f6c007379259c0363c4fa5aa2ba4277a1 100644 (file)
@@ -22,12 +22,7 @@ properties:
   interrupts:
     maxItems: 1
 
-  interrupt-controller: true
-  "#interrupt-cells": true
-  gpio-controller: true
   gpio-reserved-ranges: true
-  "#gpio-cells": true
-  gpio-ranges: true
 
 patternProperties:
   "-state$":
@@ -117,7 +112,7 @@ required:
   - compatible
   - reg
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index 9172b50f7a9825680e7359385bf42f785892b2e8..46618740bd312b975321427e5ffa34811c68e652 100644 (file)
@@ -23,13 +23,6 @@ properties:
   interrupts:
     maxItems: 1
 
-  interrupt-controller: true
-  "#interrupt-cells": true
-  gpio-controller: true
-  "#gpio-cells": true
-  gpio-ranges: true
-  wakeup-parent: true
-
   gpio-reserved-ranges:
     minItems: 1
     maxItems: 76
@@ -108,7 +101,7 @@ required:
   - compatible
   - reg
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index 8a3be65c51edee8ae16f1cd9cf21d828f9190716..840fdaabde12772e830d49e9c1056eb81e94dbe9 100644 (file)
@@ -23,13 +23,6 @@ properties:
   interrupts:
     maxItems: 1
 
-  interrupt-controller: true
-  "#interrupt-cells": true
-  gpio-controller: true
-  "#gpio-cells": true
-  gpio-ranges: true
-  wakeup-parent: true
-
   gpio-reserved-ranges:
     minItems: 1
     maxItems: 73
@@ -124,7 +117,7 @@ required:
   - compatible
   - reg
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index ca95de0b87a6a19f49a57a308092a29dcd7bc207..d4391c194ff7c66b446cb154c858edf5d82dde41 100644 (file)
@@ -23,13 +23,6 @@ properties:
   interrupts:
     maxItems: 1
 
-  interrupt-controller: true
-  "#interrupt-cells": true
-  gpio-controller: true
-  "#gpio-cells": true
-  gpio-ranges: true
-  wakeup-parent: true
-
   gpio-reserved-ranges:
     minItems: 1
     maxItems: 73
@@ -104,7 +97,7 @@ required:
   - compatible
   - reg
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index 41525ecfa8e3a4d4691e8462163c93b2af70a789..fa90981db40b38d378e42bed0a949a6776acbbcd 100644 (file)
@@ -25,13 +25,6 @@ properties:
   interrupts:
     maxItems: 1
 
-  interrupt-controller: true
-  "#interrupt-cells": true
-  gpio-controller: true
-  "#gpio-cells": true
-  gpio-ranges: true
-  wakeup-parent: true
-
   gpio-reserved-ranges:
     minItems: 1
     maxItems: 73
@@ -114,7 +107,7 @@ required:
   - compatible
   - reg
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index 59d406b609574f4a2c19f20c6e7c2859fddad89b..c5010c175b2386d91f614b6b32fe533cfbf9e1eb 100644 (file)
@@ -23,13 +23,6 @@ properties:
   interrupts:
     maxItems: 1
 
-  interrupt-controller: true
-  "#interrupt-cells": true
-  gpio-controller: true
-  "#gpio-cells": true
-  gpio-ranges: true
-  wakeup-parent: true
-
   gpio-reserved-ranges:
     minItems: 1
     maxItems: 75
@@ -133,7 +126,7 @@ required:
   - compatible
   - reg
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index bd6d7caf499afb6e1c3026d81c3e69f22285bc99..bcaa231adaf784c8e620cfa1521192710005906d 100644 (file)
@@ -23,13 +23,6 @@ properties:
   interrupts:
     maxItems: 1
 
-  interrupt-controller: true
-  "#interrupt-cells": true
-  gpio-controller: true
-  "#gpio-cells": true
-  gpio-ranges: true
-  wakeup-parent: true
-
   gpio-reserved-ranges:
     minItems: 1
     maxItems: 75
@@ -118,7 +111,7 @@ required:
   - compatible
   - reg
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index 80f960671857042ca4018b6a495822dbb3056c6e..fe717d8d47982408001aad49d714b8e3faa8c501 100644 (file)
@@ -158,34 +158,40 @@ examples:
   - |
     #include <dt-bindings/pinctrl/qcom,pmic-mpp.h>
 
-    pm8841_mpp: mpps@a000 {
-      compatible = "qcom,pm8841-mpp", "qcom,spmi-mpp";
-      reg = <0xa000 0>;
-      gpio-controller;
-      #gpio-cells = <2>;
-      gpio-ranges = <&pm8841_mpp 0 0 4>;
-      gpio-line-names = "VDD_PX_BIAS", "WLAN_LED_CTRL",
-              "BT_LED_CTRL", "GPIO-F";
-      interrupt-controller;
-      #interrupt-cells = <2>;
-
-      pinctrl-names = "default";
-      pinctrl-0 = <&pm8841_default>;
-
-      mpp1-state {
-        pins = "mpp1";
-        function = "digital";
-        input-enable;
-        power-source = <PM8841_MPP_S3>;
-      };
-
-      default-state {
-        gpio-pins {
-          pins = "mpp1", "mpp2", "mpp3", "mpp4";
-          function = "digital";
-          input-enable;
-          power-source = <PM8841_MPP_S3>;
+    pmic {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        pm8841_mpp: mpps@a000 {
+            compatible = "qcom,pm8841-mpp", "qcom,spmi-mpp";
+            reg = <0xa000>;
+
+            gpio-controller;
+            #gpio-cells = <2>;
+            gpio-ranges = <&pm8841_mpp 0 0 4>;
+            gpio-line-names = "VDD_PX_BIAS", "WLAN_LED_CTRL",
+                              "BT_LED_CTRL", "GPIO-F";
+            interrupt-controller;
+            #interrupt-cells = <2>;
+
+            pinctrl-names = "default";
+            pinctrl-0 = <&pm8841_default>;
+
+            mpp1-state {
+                pins = "mpp1";
+                function = "digital";
+                input-enable;
+                power-source = <PM8841_MPP_S3>;
+            };
+
+            default-state {
+                gpio-pins {
+                    pins = "mpp1", "mpp2", "mpp3", "mpp4";
+                    function = "digital";
+                    input-enable;
+                    power-source = <PM8841_MPP_S3>;
+                };
+            };
         };
-      };
     };
 ...
index c323f6d495a4495635ad0de9b7353315f28fa86c..e123beb33aef5ed7cafb2be83246f8b83f95f302 100644 (file)
@@ -22,13 +22,6 @@ properties:
   interrupts:
     maxItems: 1
 
-  interrupt-controller: true
-  "#interrupt-cells": true
-  gpio-controller: true
-  "#gpio-cells": true
-  gpio-ranges: true
-  wakeup-parent: true
-
 patternProperties:
   "-state$":
     oneOf:
@@ -92,7 +85,7 @@ required:
   - compatible
   - reg
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index b1b9cd319e50a3212f38386e39b8275a267ef02a..4009501b3414f4a066482b34897a221d7fdb3f17 100644 (file)
@@ -29,13 +29,6 @@ properties:
   interrupts:
     maxItems: 1
 
-  interrupt-controller: true
-  "#interrupt-cells": true
-  gpio-controller: true
-  "#gpio-cells": true
-  gpio-ranges: true
-  wakeup-parent: true
-
   gpio-reserved-ranges:
     minItems: 1
     maxItems: 60
@@ -130,7 +123,7 @@ required:
   - compatible
   - reg
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index 237cac4f6ce1dfcf1d28c9d9a7dea0669a0c423d..88afeae530c68234506c4e15294c0a3a617e5902 100644 (file)
@@ -23,10 +23,8 @@ properties:
   reg:
     maxItems: 1
 
-  interrupts: true
-  interrupt-controller: true
-  "#interrupt-cells": true
-  gpio-controller: true
+  interrupts:
+    maxItems: 1
 
   gpio-reserved-ranges:
     minItems: 1
@@ -35,10 +33,6 @@ properties:
   gpio-line-names:
     maxItems: 151
 
-  "#gpio-cells": true
-  gpio-ranges: true
-  wakeup-parent: true
-
 patternProperties:
   "-state$":
     oneOf:
@@ -101,7 +95,7 @@ required:
   - compatible
   - reg
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index 2173c5255638bdff31fffcbd8b90dbad8ebf37d5..e9abbf2c0689bc0cd02eda53f109ca3dce398141 100644 (file)
@@ -22,13 +22,8 @@ properties:
   reg:
     maxItems: 1
 
-  interrupts: true
-  interrupt-controller: true
-  "#interrupt-cells": true
-  gpio-controller: true
-  "#gpio-cells": true
-  gpio-ranges: true
-  wakeup-parent: true
+  interrupts:
+    maxItems: 1
 
   gpio-reserved-ranges:
     minItems: 1
@@ -37,12 +32,6 @@ properties:
   gpio-line-names:
     maxItems: 148
 
-required:
-  - compatible
-  - reg
-
-additionalProperties: false
-
 patternProperties:
   "-state$":
     oneOf:
@@ -108,6 +97,12 @@ $defs:
     required:
       - pins
 
+required:
+  - compatible
+  - reg
+
+unevaluatedProperties: false
+
 examples:
   - |
     #include <dt-bindings/interrupt-controller/arm-gic.h>
index 573e459b1c44a1391278facdcf3b655601e0aa65..5606f2136ad10141af7b69a5fb942e484ff93781 100644 (file)
@@ -29,13 +29,6 @@ properties:
   interrupts:
     maxItems: 1
 
-  interrupt-controller: true
-  "#interrupt-cells": true
-  gpio-controller: true
-  "#gpio-cells": true
-  gpio-ranges: true
-  wakeup-parent: true
-
   gpio-reserved-ranges:
     minItems: 1
     maxItems: 60
@@ -112,7 +105,7 @@ required:
   - reg
   - reg-names
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index 00c5a00e35fce2e041afd978813b006d8d8d8d88..08801cc4e476ff8088a043a87c84503efa506012 100644 (file)
@@ -20,16 +20,6 @@ properties:
   reg:
     maxItems: 2
 
-  gpio-controller: true
-
-  "#gpio-cells":
-    description: Specifying the pin number and flags, as defined in
-      include/dt-bindings/gpio/gpio.h
-    const: 2
-
-  gpio-ranges:
-    maxItems: 1
-
 patternProperties:
   "-state$":
     oneOf:
@@ -45,7 +35,8 @@ $defs:
     description:
       Pinctrl node's client devices use subnodes for desired pin configuration.
       Client device subnodes use below standard properties.
-    $ref: /schemas/pinctrl/pincfg-node.yaml
+    $ref: qcom,lpass-lpi-common.yaml#/$defs/qcom-tlmm-state
+    unevaluatedProperties: false
 
     properties:
       pins:
@@ -68,42 +59,14 @@ $defs:
           Specify the alternative function to be configured for the specified
           pins.
 
-      drive-strength:
-        enum: [2, 4, 6, 8, 10, 12, 14, 16]
-        default: 2
-        description:
-          Selects the drive strength for the specified pins, in mA.
-
-      slew-rate:
-        enum: [0, 1, 2, 3]
-        default: 0
-        description: |
-            0: No adjustments
-            1: Higher Slew rate (faster edges)
-            2: Lower Slew rate (slower edges)
-            3: Reserved (No adjustments)
-
-      bias-pull-down: true
-      bias-pull-up: true
-      bias-bus-hold: true
-      bias-disable: true
-      output-high: true
-      output-low: true
-
-    required:
-      - pins
-      - function
-
-    additionalProperties: false
-
 required:
   - compatible
   - reg
-  - gpio-controller
-  - "#gpio-cells"
-  - gpio-ranges
 
-additionalProperties: false
+allOf:
+  - $ref: qcom,lpass-lpi-common.yaml#
+
+unevaluatedProperties: false
 
 examples:
   - |
index c8735ab97e407dddb414118d2f282169f82fbed8..5329fe2a439789ca1466577db26f56bc4c779d65 100644 (file)
@@ -23,24 +23,6 @@ properties:
     description: Specifies the TLMM summary IRQ
     maxItems: 1
 
-  interrupt-controller: true
-
-  '#interrupt-cells':
-    description:
-      Specifies the PIN numbers and Flags, as defined in defined in
-      include/dt-bindings/interrupt-controller/irq.h
-    const: 2
-
-  gpio-controller: true
-
-  '#gpio-cells':
-    description: Specifying the pin number and flags, as defined in
-      include/dt-bindings/gpio/gpio.h
-    const: 2
-
-  gpio-ranges:
-    maxItems: 1
-
   gpio-reserved-ranges:
     minItems: 1
     maxItems: 88
@@ -48,8 +30,6 @@ properties:
   gpio-line-names:
     maxItems: 175
 
-  wakeup-parent: true
-
 patternProperties:
   "-state$":
     oneOf:
@@ -124,14 +104,8 @@ allOf:
 required:
   - compatible
   - reg
-  - interrupts
-  - interrupt-controller
-  - '#interrupt-cells'
-  - gpio-controller
-  - '#gpio-cells'
-  - gpio-ranges
-
-additionalProperties: false
+
+unevaluatedProperties: false
 
 examples:
   - |
index b086a5184235a67899d5b05e2ebea499e2768626..c122bb849f0f72b31d24cd0c23fdb0963c625247 100644 (file)
@@ -31,20 +31,7 @@ properties:
   interrupts:
     maxItems: 1
 
-  interrupt-controller: true
-  '#interrupt-cells': true
-  gpio-controller: true
   gpio-reserved-ranges: true
-  '#gpio-cells': true
-  gpio-ranges: true
-  wakeup-parent: true
-
-required:
-  - compatible
-  - reg
-  - reg-names
-
-additionalProperties: false
 
 patternProperties:
   "-state$":
@@ -106,6 +93,13 @@ $defs:
     required:
       - pins
 
+required:
+  - compatible
+  - reg
+  - reg-names
+
+unevaluatedProperties: false
+
 examples:
   - |
     #include <dt-bindings/interrupt-controller/arm-gic.h>
index a9167dac9ab58b239c226df894238bbbbb9fa737..240e6d45cc95e989d9b57b4c36319e5c872ea8f1 100644 (file)
@@ -32,16 +32,6 @@ properties:
       - const: core
       - const: audio
 
-  gpio-controller: true
-
-  "#gpio-cells":
-    description: Specifying the pin number and flags, as defined in
-      include/dt-bindings/gpio/gpio.h
-    const: 2
-
-  gpio-ranges:
-    maxItems: 1
-
 patternProperties:
   "-state$":
     oneOf:
@@ -57,7 +47,8 @@ $defs:
     description:
       Pinctrl node's client devices use subnodes for desired pin configuration.
       Client device subnodes use below standard properties.
-    $ref: /schemas/pinctrl/pincfg-node.yaml
+    $ref: qcom,lpass-lpi-common.yaml#/$defs/qcom-tlmm-state
+    unevaluatedProperties: false
 
     properties:
       pins:
@@ -79,48 +70,16 @@ $defs:
           Specify the alternative function to be configured for the specified
           pins.
 
-      drive-strength:
-        enum: [2, 4, 6, 8, 10, 12, 14, 16]
-        default: 2
-        description:
-          Selects the drive strength for the specified pins, in mA.
-
-      slew-rate:
-        enum: [0, 1, 2, 3]
-        default: 0
-        description: |
-          0: No adjustments
-          1: Higher Slew rate (faster edges)
-          2: Lower Slew rate (slower edges)
-          3: Reserved (No adjustments)
-
-      bias-bus-hold: true
-      bias-pull-down: true
-      bias-pull-up: true
-      bias-disable: true
-      input-enable: true
-      output-high: true
-      output-low: true
-
-    required:
-      - pins
-      - function
-
-    additionalProperties: false
-
 allOf:
-  - $ref: pinctrl.yaml#
+  - $ref: qcom,lpass-lpi-common.yaml#
 
 required:
   - compatible
   - reg
   - clocks
   - clock-names
-  - gpio-controller
-  - "#gpio-cells"
-  - gpio-ranges
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index 4bd6d7977d3e873a866e72ac6a388f4a3f2ae9b7..ed344deaf8b9e422d3b210638109948aafd90026 100644 (file)
@@ -25,19 +25,7 @@ properties:
   interrupts:
     maxItems: 1
 
-  interrupt-controller: true
-  "#interrupt-cells": true
-  gpio-controller: true
   gpio-reserved-ranges: true
-  "#gpio-cells": true
-  gpio-ranges: true
-  wakeup-parent: true
-
-required:
-  - compatible
-  - reg
-
-additionalProperties: false
 
 patternProperties:
   "-state$":
@@ -108,6 +96,12 @@ $defs:
     required:
       - pins
 
+required:
+  - compatible
+  - reg
+
+unevaluatedProperties: false
+
 examples:
   - |
     #include <dt-bindings/interrupt-controller/arm-gic.h>
index 508e0633b253e4a8ae12e54334cf7fb5f421f1b6..a00cb43df144b53641fc19f5b7c3669574d9c08e 100644 (file)
@@ -34,10 +34,6 @@ properties:
   interrupts:
     maxItems: 1
 
-  interrupt-controller: true
-  "#interrupt-cells": true
-  gpio-controller: true
-
   gpio-reserved-ranges:
     minItems: 1
     maxItems: 57
@@ -45,10 +41,6 @@ properties:
   gpio-line-names:
     maxItems: 114
 
-  "#gpio-cells": true
-  gpio-ranges: true
-  wakeup-parent: true
-
 patternProperties:
   "-state$":
     oneOf:
@@ -130,7 +122,7 @@ required:
   - compatible
   - reg
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index 84a15f77e710a5335f789d0d7fdcb05ecdbfc8ed..b56e717aa28e77449bd19cf5f821129c29f64e31 100644 (file)
@@ -25,23 +25,10 @@ properties:
   interrupts:
     maxItems: 1
 
-  interrupt-controller: true
-  "#interrupt-cells": true
-  gpio-controller: true
   gpio-reserved-ranges:
     minItems: 1
     maxItems: 75
 
-  "#gpio-cells": true
-  gpio-ranges: true
-  wakeup-parent: true
-
-required:
-  - compatible
-  - reg
-
-additionalProperties: false
-
 patternProperties:
   "-state$":
     oneOf:
@@ -98,6 +85,12 @@ $defs:
     required:
       - pins
 
+required:
+  - compatible
+  - reg
+
+unevaluatedProperties: false
+
 examples:
   - |
     #include <dt-bindings/interrupt-controller/arm-gic.h>
index d301881ddfa8e02031ef0cfb329bdfcdea8bda6b..dfe5616b9b858f8031a902f41c74a63c44e93174 100644 (file)
@@ -26,10 +26,6 @@ properties:
   interrupts:
     maxItems: 1
 
-  interrupt-controller: true
-  "#interrupt-cells": true
-  gpio-controller: true
-
   gpio-reserved-ranges:
     minItems: 1
     maxItems: 75
@@ -37,10 +33,6 @@ properties:
   gpio-line-names:
     maxItems: 150
 
-  "#gpio-cells": true
-  gpio-ranges: true
-  wakeup-parent: true
-
 patternProperties:
   "-state$":
     oneOf:
@@ -110,7 +102,7 @@ required:
   - compatible
   - reg
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index 67af99dd8f147bfb122ce6b20304bbfbcb0e862f..edbcff92bbf91f8bcfb88cbbd9c613f822169bf9 100644 (file)
@@ -23,12 +23,6 @@ properties:
   interrupts:
     maxItems: 1
 
-  interrupt-controller: true
-  "#interrupt-cells": true
-  gpio-controller: true
-  "#gpio-cells": true
-  gpio-ranges: true
-
   gpio-reserved-ranges:
     maxItems: 1
 
@@ -102,7 +96,7 @@ required:
   - compatible
   - reg
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index 27319782d94be59fae12e8dfb49ec16ef73a271f..a31b638c456d11db7e71809e160640c3f75c8614 100644 (file)
@@ -22,12 +22,6 @@ properties:
   interrupts:
     maxItems: 1
 
-  interrupt-controller: true
-  "#interrupt-cells": true
-  gpio-controller: true
-  "#gpio-cells": true
-  gpio-ranges: true
-
   gpio-reserved-ranges:
     maxItems: 1
 
@@ -122,7 +116,7 @@ required:
   - compatible
   - reg
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index 7cb96aa75b08bd92c335974d82e9586baf4b2d49..cb1d978d02c9a54e91583579553fb265fc94c35a 100644 (file)
@@ -22,10 +22,8 @@ properties:
   reg:
     maxItems: 1
 
-  interrupts: true
-  interrupt-controller: true
-  "#interrupt-cells": true
-  gpio-controller: true
+  interrupts:
+    maxItems: 1
 
   gpio-reserved-ranges:
     minItems: 1
@@ -34,10 +32,6 @@ properties:
   gpio-line-names:
     maxItems: 133
 
-  "#gpio-cells": true
-  gpio-ranges: true
-  wakeup-parent: true
-
 patternProperties:
   "-state$":
     oneOf:
@@ -100,7 +94,7 @@ required:
   - compatible
   - reg
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sm4450-tlmm.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,sm4450-tlmm.yaml
new file mode 100644 (file)
index 0000000..bb08ca5
--- /dev/null
@@ -0,0 +1,151 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/qcom,sm4450-tlmm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Technologies, Inc. SM4450 TLMM block
+
+maintainers:
+  - Tengfei Fan <quic_tengfan@quicinc.com>
+
+description:
+  Top Level Mode Multiplexer pin controller in Qualcomm SM4450 SoC.
+
+allOf:
+  - $ref: /schemas/pinctrl/qcom,tlmm-common.yaml#
+
+properties:
+  compatible:
+    const: qcom,sm4450-pinctrl
+
+  reg:
+    maxItems: 1
+
+  interrupts: true
+  interrupt-controller: true
+  "#interrupt-cells": true
+  gpio-controller: true
+
+  gpio-reserved-ranges:
+    minItems: 1
+    maxItems: 68
+
+  gpio-line-names:
+    maxItems: 136
+
+  "#gpio-cells": true
+  gpio-ranges: true
+  wakeup-parent: true
+
+patternProperties:
+  "-state$":
+    oneOf:
+      - $ref: "#/$defs/qcom-sm4450-tlmm-state"
+      - patternProperties:
+          "-pins$":
+            $ref: "#/$defs/qcom-sm4450-tlmm-state"
+        additionalProperties: false
+
+$defs:
+  qcom-sm4450-tlmm-state:
+    type: object
+    description:
+      Pinctrl node's client devices use subnodes for desired pin configuration.
+      Client device subnodes use below standard properties.
+    $ref: qcom,tlmm-common.yaml#/$defs/qcom-tlmm-state
+    unevaluatedProperties: false
+
+    properties:
+      pins:
+        description:
+          List of gpio pins affected by the properties specified in this
+          subnode.
+        items:
+          oneOf:
+            - pattern: "^gpio([0-9]|[1-9][0-9]|1[0-2][0-9]|13[0-5])$"
+            - enum: [ sdc2_clk, sdc2_cmd, sdc2_data, ufs_reset ]
+          minItems: 1
+          maxItems: 36
+
+      function:
+        description:
+          Specify the alternative function to be configured for the specified
+          pins.
+        enum: [ gpio, atest_char, atest_char0, atest_char1, atest_char2,
+                atest_char3, atest_usb0, atest_usb00, atest_usb01, atest_usb02,
+                atest_usb03, audio_ref, cam_mclk, cci_async, cci_i2c,
+                cci_timer0, cci_timer1, cci_timer2, cci_timer3, cci_timer4,
+                cmu_rng0, cmu_rng1, cmu_rng2, cmu_rng3, coex_uart1, cri_trng,
+                cri_trng0, cri_trng1, dbg_out, ddr_bist, ddr_pxi0, ddr_pxi1,
+                dp0_hot, gcc_gp1, gcc_gp2, gcc_gp3, host2wlan_sol, ibi_i3c,
+                jitter_bist, mdp_vsync, mdp_vsync0, mdp_vsync1, mdp_vsync2,
+                mdp_vsync3, mi2s0_data0, mi2s0_data1, mi2s0_sck, mi2s0_ws,
+                mi2s2_data0, mi2s2_data1, mi2s2_sck, mi2s2_ws, mi2s_mclk0,
+                mi2s_mclk1, nav_gpio0, nav_gpio1, nav_gpio2, pcie0_clk,
+                phase_flag0, phase_flag1, phase_flag10, phase_flag11,
+                phase_flag12, phase_flag13, phase_flag14, phase_flag15,
+                phase_flag16, phase_flag17, phase_flag18, phase_flag19,
+                phase_flag2, phase_flag20, phase_flag21, phase_flag22,
+                phase_flag23, phase_flag24, phase_flag25, phase_flag26,
+                phase_flag27, phase_flag28, phase_flag29, phase_flag3,
+                phase_flag30, phase_flag31, phase_flag4, phase_flag5,
+                phase_flag6, phase_flag7, phase_flag8, phase_flag9,
+                pll_bist, pll_clk, prng_rosc0, prng_rosc1, prng_rosc2,
+                prng_rosc3, qdss_cti, qdss_gpio, qdss_gpio0, qdss_gpio1,
+                qdss_gpio10, qdss_gpio11, qdss_gpio12, qdss_gpio13, qdss_gpio14,
+                qdss_gpio15, qdss_gpio2, qdss_gpio3, qdss_gpio4, qdss_gpio5,
+                qdss_gpio6, qdss_gpio7, qdss_gpio8, qdss_gpio9, qlink0_enable,
+                qlink0_request, qlink0_wmss, qlink1_enable, qlink1_request,
+                qlink1_wmss, qlink2_enable, qlink2_request, qlink2_wmss,
+                qup0_se0, qup0_se1, qup0_se2, qup0_se3, qup0_se4, qup0_se5,
+                qup0_se6, qup0_se7, qup1_se0, qup1_se1, qup1_se2, qup1_se3,
+                qup1_se4, qup1_se5, qup1_se6, sd_write, tb_trig, tgu_ch0,
+                tgu_ch1, tgu_ch2, tgu_ch3, tmess_prng0, tmess_prng1,
+                tmess_prng2, tmess_prng3, tsense_pwm1, tsense_pwm2, uim0_clk,
+                uim0_data, uim0_present, uim0_reset, uim1_clk, uim1_data,
+                uim1_present, uim1_reset, usb0_hs, usb0_phy, vfr_0, vfr_1,
+                vsense_trigger ]
+
+        required:
+          - pins
+
+required:
+  - compatible
+  - reg
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    tlmm: pinctrl@f100000 {
+        compatible = "qcom,sm4450-tlmm";
+        reg = <0x0f100000 0x300000>;
+        gpio-controller;
+        #gpio-cells = <2>;
+        gpio-ranges = <&tlmm 0 0 137>;
+        interrupt-controller;
+        #interrupt-cells = <2>;
+        interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+
+        gpio-wo-state {
+            pins = "gpio1";
+            function = "gpio";
+        };
+
+        uart-w-state {
+            rx-pins {
+                pins = "gpio23";
+                function = "qup1_se2";
+                bias-pull-up;
+            };
+
+            tx-pins {
+                pins = "gpio22";
+                function = "qup1_se2";
+                bias-disable;
+            };
+        };
+    };
+...
index abac3311fc550725cf28780ad4876526578f6627..f4cf2ce86fcd425b7522918442de25c836bf24f0 100644 (file)
@@ -31,16 +31,6 @@ properties:
     items:
       - const: audio
 
-  gpio-controller: true
-
-  "#gpio-cells":
-    description: Specifying the pin number and flags, as defined in
-      include/dt-bindings/gpio/gpio.h
-    const: 2
-
-  gpio-ranges:
-    maxItems: 1
-
 patternProperties:
   "-state$":
     oneOf:
@@ -56,7 +46,8 @@ $defs:
     description:
       Pinctrl node's client devices use subnodes for desired pin configuration.
       Client device subnodes use below standard properties.
-    $ref: /schemas/pinctrl/pincfg-node.yaml
+    $ref: qcom,lpass-lpi-common.yaml#/$defs/qcom-tlmm-state
+    unevaluatedProperties: false
 
     properties:
       pins:
@@ -75,48 +66,17 @@ $defs:
           Specify the alternative function to be configured for the specified
           pins.
 
-      drive-strength:
-        enum: [2, 4, 6, 8, 10, 12, 14, 16]
-        default: 2
-        description:
-          Selects the drive strength for the specified pins, in mA.
-
-      slew-rate:
-        enum: [0, 1, 2, 3]
-        default: 0
-        description: |
-          0: No adjustments
-          1: Higher Slew rate (faster edges)
-          2: Lower Slew rate (slower edges)
-          3: Reserved (No adjustments)
-
-      bias-bus-hold: true
-      bias-pull-down: true
-      bias-pull-up: true
-      bias-disable: true
-      input-enable: true
-      output-high: true
-      output-low: true
-
-    required:
-      - pins
-      - function
-
-    additionalProperties: false
 
 allOf:
-  - $ref: pinctrl.yaml#
+  - $ref: qcom,lpass-lpi-common.yaml#
 
 required:
   - compatible
   - reg
   - clocks
   - clock-names
-  - gpio-controller
-  - "#gpio-cells"
-  - gpio-ranges
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index 871df54f69a2eb192fd43a8c19d36e2fe54b4f8e..7f36f9b933330937ef9d7b334b041174f9617ec6 100644 (file)
@@ -29,13 +29,7 @@ properties:
   interrupts:
     maxItems: 1
 
-  interrupt-controller: true
-  "#interrupt-cells": true
-  gpio-controller: true
-  "#gpio-cells": true
-  gpio-ranges: true
   gpio-reserved-ranges: true
-  wakeup-parent: true
 
 patternProperties:
   "-state$":
@@ -97,7 +91,7 @@ required:
   - reg
   - reg-names
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index 8d77707b02b9fef344290b757f3308b5b51bb10e..ddeaeaa9a450abe2ac922a8f81d3ee03ab2b9e7e 100644 (file)
@@ -30,20 +30,7 @@ properties:
   interrupts:
     maxItems: 1
 
-  interrupt-controller: true
-  "#interrupt-cells": true
-  gpio-controller: true
   gpio-reserved-ranges: true
-  "#gpio-cells": true
-  gpio-ranges: true
-  wakeup-parent: true
-
-required:
-  - compatible
-  - reg
-  - reg-names
-
-additionalProperties: false
 
 patternProperties:
   "-state$":
@@ -105,6 +92,13 @@ $defs:
     required:
       - pins
 
+required:
+  - compatible
+  - reg
+  - reg-names
+
+unevaluatedProperties: false
+
 examples:
   - |
     #include <dt-bindings/interrupt-controller/arm-gic.h>
index 27af379cf791aa0b9fab6921098a3e5ee76850ea..a4771f87d93645549d9cfbb8b39315065d0b8cf6 100644 (file)
@@ -26,10 +26,6 @@ properties:
     minItems: 9
     maxItems: 9
 
-  interrupt-controller: true
-  "#interrupt-cells": true
-  gpio-controller: true
-
   gpio-reserved-ranges:
     minItems: 1
     maxItems: 78
@@ -37,16 +33,6 @@ properties:
   gpio-line-names:
     maxItems: 156
 
-  "#gpio-cells": true
-  gpio-ranges: true
-  wakeup-parent: true
-
-required:
-  - compatible
-  - reg
-
-additionalProperties: false
-
 patternProperties:
   "-state$":
     oneOf:
@@ -112,6 +98,12 @@ $defs:
     required:
       - pins
 
+required:
+  - compatible
+  - reg
+
+unevaluatedProperties: false
+
 examples:
   - |
     #include <dt-bindings/interrupt-controller/arm-gic.h>
index 6e02ba24825f111bdee19a16123d1207921462b2..047f82863f9bbfdfcd870a35656d0b56e6c018ba 100644 (file)
@@ -25,19 +25,7 @@ properties:
   interrupts:
     maxItems: 1
 
-  interrupt-controller: true
-  "#interrupt-cells": true
-  gpio-controller: true
   gpio-reserved-ranges: true
-  "#gpio-cells": true
-  gpio-ranges: true
-  wakeup-parent: true
-
-required:
-  - compatible
-  - reg
-
-additionalProperties: false
 
 patternProperties:
   "-state$":
@@ -113,6 +101,12 @@ $defs:
     required:
       - pins
 
+required:
+  - compatible
+  - reg
+
+unevaluatedProperties: false
+
 examples:
   - |
     #include <dt-bindings/interrupt-controller/arm-gic.h>
index ede0f3acad9c4f046728b179a2ce5fda63233a78..7f23f939ad3257bca34ef6d6c7d599fef2cbc1f8 100644 (file)
@@ -32,13 +32,6 @@ properties:
   interrupts:
     maxItems: 1
 
-  interrupt-controller: true
-  "#interrupt-cells": true
-  gpio-controller: true
-  "#gpio-cells": true
-  gpio-ranges: true
-  wakeup-parent: true
-
   gpio-reserved-ranges:
     minItems: 1
     maxItems: 60
@@ -111,7 +104,7 @@ required:
   - reg
   - reg-names
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index c6439626464ef9a5bb11a1c106f6e0098a4d0c87..bdb7ed4be02675e85561e35423adb6a1ec16dbc9 100644 (file)
@@ -30,13 +30,6 @@ properties:
   interrupts:
     maxItems: 1
 
-  interrupt-controller: true
-  "#interrupt-cells": true
-  gpio-controller: true
-  "#gpio-cells": true
-  gpio-ranges: true
-  wakeup-parent: true
-
   gpio-reserved-ranges:
     minItems: 1
     maxItems: 88
@@ -113,7 +106,7 @@ required:
   - reg
   - reg-names
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index 4b4be7efc150ac90bcb8a8f55fdb222f5085498b..750c996c10a713d9a78819f331d74cd24712ae11 100644 (file)
@@ -30,16 +30,6 @@ properties:
       - const: core
       - const: audio
 
-  gpio-controller: true
-
-  "#gpio-cells":
-    description: Specifying the pin number and flags, as defined in
-      include/dt-bindings/gpio/gpio.h
-    const: 2
-
-  gpio-ranges:
-    maxItems: 1
-
 patternProperties:
   "-state$":
     oneOf:
@@ -55,7 +45,8 @@ $defs:
     description:
       Pinctrl node's client devices use subnodes for desired pin configuration.
       Client device subnodes use below standard properties.
-    $ref: /schemas/pinctrl/pincfg-node.yaml
+    $ref: qcom,lpass-lpi-common.yaml#/$defs/qcom-tlmm-state
+    unevaluatedProperties: false
 
     properties:
       pins:
@@ -78,48 +69,16 @@ $defs:
           Specify the alternative function to be configured for the specified
           pins.
 
-      drive-strength:
-        enum: [2, 4, 6, 8, 10, 12, 14, 16]
-        default: 2
-        description:
-          Selects the drive strength for the specified pins, in mA.
-
-      slew-rate:
-        enum: [0, 1, 2, 3]
-        default: 0
-        description: |
-            0: No adjustments
-            1: Higher Slew rate (faster edges)
-            2: Lower Slew rate (slower edges)
-            3: Reserved (No adjustments)
-
-      bias-pull-down: true
-      bias-pull-up: true
-      bias-bus-hold: true
-      bias-disable: true
-      input-enable: true
-      output-high: true
-      output-low: true
-
-    required:
-      - pins
-      - function
-
-    additionalProperties: false
-
 allOf:
-  - $ref: pinctrl.yaml#
+  - $ref: qcom,lpass-lpi-common.yaml#
 
 required:
   - compatible
   - reg
   - clocks
   - clock-names
-  - gpio-controller
-  - "#gpio-cells"
-  - gpio-ranges
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index 021c5470852469f606bae49dcb31beb42927d3e7..b5d04347c064d598d8600333f415fa412255cd2b 100644 (file)
@@ -28,13 +28,6 @@ properties:
   interrupts:
     maxItems: 1
 
-  interrupt-controller: true
-  "#interrupt-cells": true
-  gpio-controller: true
-  "#gpio-cells": true
-  gpio-ranges: true
-  wakeup-parent: true
-
   gpio-reserved-ranges:
     minItems: 1
     maxItems: 90
@@ -106,7 +99,7 @@ required:
   - reg
   - reg-names
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index 2e65ae08dd211222997eaca8bcdbc89c2c045384..9d782f910b318c6e739c082e6cdf9d5176497560 100644 (file)
@@ -33,16 +33,6 @@ properties:
       - const: core
       - const: audio
 
-  gpio-controller: true
-
-  "#gpio-cells":
-    description: Specifying the pin number and flags, as defined in
-      include/dt-bindings/gpio/gpio.h
-    const: 2
-
-  gpio-ranges:
-    maxItems: 1
-
 patternProperties:
   "-state$":
     oneOf:
@@ -58,7 +48,8 @@ $defs:
     description:
       Pinctrl node's client devices use subnodes for desired pin configuration.
       Client device subnodes use below standard properties.
-    $ref: /schemas/pinctrl/pincfg-node.yaml
+    $ref: qcom,lpass-lpi-common.yaml#/$defs/qcom-tlmm-state
+    unevaluatedProperties: false
 
     properties:
       pins:
@@ -81,48 +72,16 @@ $defs:
           Specify the alternative function to be configured for the specified
           pins.
 
-      drive-strength:
-        enum: [2, 4, 6, 8, 10, 12, 14, 16]
-        default: 2
-        description:
-          Selects the drive strength for the specified pins, in mA.
-
-      slew-rate:
-        enum: [0, 1, 2, 3]
-        default: 0
-        description: |
-          0: No adjustments
-          1: Higher Slew rate (faster edges)
-          2: Lower Slew rate (slower edges)
-          3: Reserved (No adjustments)
-
-      bias-bus-hold: true
-      bias-pull-down: true
-      bias-pull-up: true
-      bias-disable: true
-      input-enable: true
-      output-high: true
-      output-low: true
-
-    required:
-      - pins
-      - function
-
-    additionalProperties: false
-
 allOf:
-  - $ref: pinctrl.yaml#
+  - $ref: qcom,lpass-lpi-common.yaml#
 
 required:
   - compatible
   - reg
   - clocks
   - clock-names
-  - gpio-controller
-  - "#gpio-cells"
-  - gpio-ranges
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index 6e8f41ff0a76bebdeb4eac05ddd0ab3be5d7b703..ec5e09611d810ca13b8cb446d97f5f4072467c8e 100644 (file)
@@ -25,10 +25,6 @@ properties:
   interrupts:
     maxItems: 1
 
-  interrupt-controller: true
-  "#interrupt-cells": true
-  gpio-controller: true
-
   gpio-reserved-ranges:
     minItems: 1
     maxItems: 102
@@ -36,16 +32,6 @@ properties:
   gpio-line-names:
     maxItems: 203
 
-  "#gpio-cells": true
-  gpio-ranges: true
-  wakeup-parent: true
-
-required:
-  - compatible
-  - reg
-
-additionalProperties: false
-
 patternProperties:
   "-state$":
     oneOf:
@@ -108,6 +94,12 @@ $defs:
     required:
       - pins
 
+required:
+  - compatible
+  - reg
+
+unevaluatedProperties: false
+
 examples:
   - |
     #include <dt-bindings/interrupt-controller/arm-gic.h>
index 1eefa9aa6a86cc1598951e4d63a4005a4522940d..e7565592da86245521be0994179781c0d250cd16 100644 (file)
@@ -32,16 +32,6 @@ properties:
       - const: core
       - const: audio
 
-  gpio-controller: true
-
-  "#gpio-cells":
-    description: Specifying the pin number and flags, as defined in
-      include/dt-bindings/gpio/gpio.h
-    const: 2
-
-  gpio-ranges:
-    maxItems: 1
-
 patternProperties:
   "-state$":
     oneOf:
@@ -57,7 +47,8 @@ $defs:
     description:
       Pinctrl node's client devices use subnodes for desired pin configuration.
       Client device subnodes use below standard properties.
-    $ref: /schemas/pinctrl/pincfg-node.yaml
+    $ref: qcom,lpass-lpi-common.yaml#/$defs/qcom-tlmm-state
+    unevaluatedProperties: false
 
     properties:
       pins:
@@ -81,48 +72,16 @@ $defs:
           Specify the alternative function to be configured for the specified
           pins.
 
-      drive-strength:
-        enum: [2, 4, 6, 8, 10, 12, 14, 16]
-        default: 2
-        description:
-          Selects the drive strength for the specified pins, in mA.
-
-      slew-rate:
-        enum: [0, 1, 2, 3]
-        default: 0
-        description: |
-          0: No adjustments
-          1: Higher Slew rate (faster edges)
-          2: Lower Slew rate (slower edges)
-          3: Reserved (No adjustments)
-
-      bias-bus-hold: true
-      bias-pull-down: true
-      bias-pull-up: true
-      bias-disable: true
-      input-enable: true
-      output-high: true
-      output-low: true
-
-    required:
-      - pins
-      - function
-
-    additionalProperties: false
-
 allOf:
-  - $ref: pinctrl.yaml#
+  - $ref: qcom,lpass-lpi-common.yaml#
 
 required:
   - compatible
   - reg
   - clocks
   - clock-names
-  - gpio-controller
-  - "#gpio-cells"
-  - gpio-ranges
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index 5163fe3f536520890530d9bbbfa2c8858e4e3b92..16fd2c5e233931565c40c905f120dfd6d94f69d6 100644 (file)
@@ -25,10 +25,6 @@ properties:
   interrupts:
     maxItems: 1
 
-  interrupt-controller: true
-  "#interrupt-cells": true
-  gpio-controller: true
-
   gpio-reserved-ranges:
     minItems: 1
     maxItems: 105
@@ -36,16 +32,6 @@ properties:
   gpio-line-names:
     maxItems: 210
 
-  "#gpio-cells": true
-  gpio-ranges: true
-  wakeup-parent: true
-
-required:
-  - compatible
-  - reg
-
-additionalProperties: false
-
 patternProperties:
   "-state$":
     oneOf:
@@ -107,6 +93,12 @@ $defs:
     required:
       - pins
 
+required:
+  - compatible
+  - reg
+
+unevaluatedProperties: false
+
 examples:
   - |
     #include <dt-bindings/interrupt-controller/arm-gic.h>
index ef974324684987480ec819da232d083f64b7a46f..bf4a72facae1d5568f642c6204d7a0a5ec6001e5 100644 (file)
@@ -16,7 +16,11 @@ description:
 
 properties:
   compatible:
-    const: qcom,sm8550-lpass-lpi-pinctrl
+    oneOf:
+      - const: qcom,sm8550-lpass-lpi-pinctrl
+      - items:
+          - const: qcom,x1e80100-lpass-lpi-pinctrl
+          - const: qcom,sm8550-lpass-lpi-pinctrl
 
   reg:
     items:
@@ -33,16 +37,6 @@ properties:
       - const: core
       - const: audio
 
-  gpio-controller: true
-
-  "#gpio-cells":
-    description: Specifying the pin number and flags, as defined in
-      include/dt-bindings/gpio/gpio.h
-    const: 2
-
-  gpio-ranges:
-    maxItems: 1
-
 patternProperties:
   "-state$":
     oneOf:
@@ -58,7 +52,8 @@ $defs:
     description:
       Pinctrl node's client devices use subnodes for desired pin configuration.
       Client device subnodes use below standard properties.
-    $ref: /schemas/pinctrl/pincfg-node.yaml
+    $ref: qcom,lpass-lpi-common.yaml#/$defs/qcom-tlmm-state
+    unevaluatedProperties: false
 
     properties:
       pins:
@@ -81,48 +76,16 @@ $defs:
           Specify the alternative function to be configured for the specified
           pins.
 
-      drive-strength:
-        enum: [2, 4, 6, 8, 10, 12, 14, 16]
-        default: 2
-        description:
-          Selects the drive strength for the specified pins, in mA.
-
-      slew-rate:
-        enum: [0, 1, 2, 3]
-        default: 0
-        description: |
-          0: No adjustments
-          1: Higher Slew rate (faster edges)
-          2: Lower Slew rate (slower edges)
-          3: Reserved (No adjustments)
-
-      bias-bus-hold: true
-      bias-pull-down: true
-      bias-pull-up: true
-      bias-disable: true
-      input-enable: true
-      output-high: true
-      output-low: true
-
-    required:
-      - pins
-      - function
-
-    additionalProperties: false
-
 allOf:
-  - $ref: pinctrl.yaml#
+  - $ref: qcom,lpass-lpi-common.yaml#
 
 required:
   - compatible
   - reg
   - clocks
   - clock-names
-  - gpio-controller
-  - "#gpio-cells"
-  - gpio-ranges
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index f789c7753a92cede9652116e455935f2c346f951..c2ae79df424fda1be7f8be2b2c3d304df9040c60 100644 (file)
@@ -22,10 +22,8 @@ properties:
   reg:
     maxItems: 1
 
-  interrupts: true
-  interrupt-controller: true
-  "#interrupt-cells": true
-  gpio-controller: true
+  interrupts:
+    maxItems: 1
 
   gpio-reserved-ranges:
     minItems: 1
@@ -34,10 +32,6 @@ properties:
   gpio-line-names:
     maxItems: 210
 
-  "#gpio-cells": true
-  gpio-ranges: true
-  wakeup-parent: true
-
 patternProperties:
   "-state$":
     oneOf:
@@ -117,7 +111,7 @@ required:
   - compatible
   - reg
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sm8650-lpass-lpi-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,sm8650-lpass-lpi-pinctrl.yaml
new file mode 100644 (file)
index 0000000..db72143
--- /dev/null
@@ -0,0 +1,107 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/qcom,sm8650-lpass-lpi-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm SM8650 SoC LPASS LPI TLMM
+
+maintainers:
+  - Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+  - Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+
+description:
+  Top Level Mode Multiplexer pin controller in the Low Power Audio SubSystem
+  (LPASS) Low Power Island (LPI) of Qualcomm SM8650 SoC.
+
+properties:
+  compatible:
+    const: qcom,sm8650-lpass-lpi-pinctrl
+
+  reg:
+    items:
+      - description: LPASS LPI TLMM Control and Status registers
+
+  clocks:
+    items:
+      - description: LPASS Core voting clock
+      - description: LPASS Audio voting clock
+
+  clock-names:
+    items:
+      - const: core
+      - const: audio
+
+patternProperties:
+  "-state$":
+    oneOf:
+      - $ref: "#/$defs/qcom-sm8650-lpass-state"
+      - patternProperties:
+          "-pins$":
+            $ref: "#/$defs/qcom-sm8650-lpass-state"
+        additionalProperties: false
+
+$defs:
+  qcom-sm8650-lpass-state:
+    type: object
+    description:
+      Pinctrl node's client devices use subnodes for desired pin configuration.
+      Client device subnodes use below standard properties.
+    $ref: qcom,lpass-lpi-common.yaml#/$defs/qcom-tlmm-state
+    unevaluatedProperties: false
+
+    properties:
+      pins:
+        description:
+          List of gpio pins affected by the properties specified in this
+          subnode.
+        items:
+          pattern: "^gpio([0-9]|1[0-9]|2[0-2])$"
+
+      function:
+        enum: [ dmic1_clk, dmic1_data, dmic2_clk, dmic2_data, dmic3_clk,
+                dmic3_data, dmic4_clk, dmic4_data, ext_mclk1_a, ext_mclk1_b,
+                ext_mclk1_c, ext_mclk1_d, ext_mclk1_e, gpio, i2s0_clk,
+                i2s0_data, i2s0_ws, i2s1_clk, i2s1_data, i2s1_ws, i2s2_clk,
+                i2s2_data, i2s2_ws, i2s3_clk, i2s3_data, i2s3_ws, i2s4_clk,
+                i2s4_data, i2s4_ws, qca_swr_clk, qca_swr_data, slimbus_clk,
+                slimbus_data, swr_rx_clk, swr_rx_data, swr_tx_clk, swr_tx_data,
+                wsa_swr_clk, wsa_swr_data, wsa2_swr_clk, wsa2_swr_data ]
+        description:
+          Specify the alternative function to be configured for the specified
+          pins.
+
+allOf:
+  - $ref: qcom,lpass-lpi-common.yaml#
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - clock-names
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/sound/qcom,q6dsp-lpass-ports.h>
+
+    lpass_tlmm: pinctrl@6e80000 {
+        compatible = "qcom,sm8650-lpass-lpi-pinctrl";
+        reg = <0x06e80000 0x20000>;
+
+        clocks = <&q6prmcc LPASS_HW_MACRO_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+                 <&q6prmcc LPASS_HW_DCODEC_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>;
+        clock-names = "core", "audio";
+
+        gpio-controller;
+        #gpio-cells = <2>;
+        gpio-ranges = <&lpass_tlmm 0 0 23>;
+
+        tx-swr-sleep-clk-state {
+            pins = "gpio0";
+            function = "swr_tx_clk";
+            drive-strength = <2>;
+            bias-pull-down;
+        };
+    };
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sm8650-tlmm.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,sm8650-tlmm.yaml
new file mode 100644 (file)
index 0000000..c0a06ab
--- /dev/null
@@ -0,0 +1,141 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/qcom,sm8650-tlmm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Technologies, Inc. SM8650 TLMM block
+
+maintainers:
+  - Bjorn Andersson <andersson@kernel.org>
+
+description:
+  Top Level Mode Multiplexer pin controller in Qualcomm SM8650 SoC.
+
+allOf:
+  - $ref: /schemas/pinctrl/qcom,tlmm-common.yaml#
+
+properties:
+  compatible:
+    const: qcom,sm8650-tlmm
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  gpio-reserved-ranges:
+    minItems: 1
+    maxItems: 105
+
+  gpio-line-names:
+    maxItems: 210
+
+patternProperties:
+  "-state$":
+    oneOf:
+      - $ref: "#/$defs/qcom-sm8650-tlmm-state"
+      - patternProperties:
+          "-pins$":
+            $ref: "#/$defs/qcom-sm8650-tlmm-state"
+        additionalProperties: false
+
+$defs:
+  qcom-sm8650-tlmm-state:
+    type: object
+    description:
+      Pinctrl node's client devices use subnodes for desired pin configuration.
+      Client device subnodes use below standard properties.
+    $ref: qcom,tlmm-common.yaml#/$defs/qcom-tlmm-state
+    unevaluatedProperties: false
+
+    properties:
+      pins:
+        description:
+          List of gpio pins affected by the properties specified in this
+          subnode.
+        items:
+          oneOf:
+            - pattern: "^gpio([0-9]|[1-9][0-9]|1[0-9][0-9]|20[0-9])$"
+            - enum: [ ufs_reset, sdc2_clk, sdc2_cmd, sdc2_data ]
+        minItems: 1
+        maxItems: 36
+
+      function:
+        description:
+          Specify the alternative function to be configured for the specified
+          pins.
+        enum: [ gpio, aoss_cti, atest_char, atest_usb, audio_ext_mclk0,
+                audio_ext_mclk1, audio_ref_clk, cam_aon_mclk2, cam_aon_mclk4,
+                cam_mclk, cci_async_in, cci_i2c_scl, cci_i2c_sda, cci_timer,
+                cmu_rng, coex_uart1_rx, coex_uart1_tx, coex_uart2_rx,
+                coex_uart2_tx, cri_trng, dbg_out_clk, ddr_bist_complete,
+                ddr_bist_fail, ddr_bist_start, ddr_bist_stop, ddr_pxi0,
+                ddr_pxi1, ddr_pxi2, ddr_pxi3, do_not, dp_hot, gcc_gp1,
+                gcc_gp2, gcc_gp3, gnss_adc0, gnss_adc1, i2chub0_se0,
+                i2chub0_se1, i2chub0_se2, i2chub0_se3, i2chub0_se4,
+                i2chub0_se5, i2chub0_se6, i2chub0_se7, i2chub0_se8,
+                i2chub0_se9, i2s0_data0, i2s0_data1, i2s0_sck, i2s0_ws,
+                i2s1_data0, i2s1_data1, i2s1_sck, i2s1_ws, ibi_i3c,
+                jitter_bist, mdp_vsync, mdp_vsync0_out, mdp_vsync1_out,
+                mdp_vsync2_out, mdp_vsync3_out, mdp_vsync_e, nav_gpio0,
+                nav_gpio1, nav_gpio2, nav_gpio3, pcie0_clk_req_n,
+                pcie1_clk_req_n, phase_flag, pll_bist_sync, pll_clk_aux,
+                prng_rosc0, prng_rosc1, prng_rosc2, prng_rosc3, qdss_cti,
+                qdss_gpio, qlink_big_enable, qlink_big_request,
+                qlink_little_enable, qlink_little_request, qlink_wmss,
+                qspi0, qspi1, qspi2, qspi3, qspi_clk, qspi_cs, qup1_se0,
+                qup1_se1, qup1_se2, qup1_se3, qup1_se4, qup1_se5, qup1_se6,
+                qup1_se7, qup2_se0, qup2_se1, qup2_se2, qup2_se3, qup2_se4,
+                qup2_se5, qup2_se6, qup2_se7, sd_write_protect, sdc40, sdc41,
+                sdc42, sdc43, sdc4_clk, sdc4_cmd, tb_trig_sdc2, tb_trig_sdc4,
+                tgu_ch0_trigout, tgu_ch1_trigout, tgu_ch2_trigout,
+                tgu_ch3_trigout, tmess_prng0, tmess_prng1, tmess_prng2,
+                tmess_prng3, tsense_pwm1, tsense_pwm2, tsense_pwm3, uim0_clk,
+                uim0_data, uim0_present, uim0_reset, uim1_clk, uim1_data,
+                uim1_present, uim1_reset, usb1_hs, usb_phy, vfr_0, vfr_1,
+                vsense_trigger_mirnat ]
+
+    required:
+      - pins
+
+required:
+  - compatible
+  - reg
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    tlmm: pinctrl@f100000 {
+        compatible = "qcom,sm8650-tlmm";
+        reg = <0x0f100000 0x300000>;
+        gpio-controller;
+        #gpio-cells = <2>;
+        gpio-ranges = <&tlmm 0 0 211>;
+        interrupt-controller;
+        #interrupt-cells = <2>;
+        interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+
+        gpio-wo-state {
+            pins = "gpio1";
+            function = "gpio";
+        };
+
+        uart-w-state {
+            rx-pins {
+                pins = "gpio60";
+                function = "qup1_se7";
+                bias-pull-up;
+            };
+
+            tx-pins {
+                pins = "gpio61";
+                function = "qup1_se7";
+                bias-disable;
+            };
+        };
+    };
+...
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,x1e80100-tlmm.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,x1e80100-tlmm.yaml
new file mode 100644 (file)
index 0000000..a1333e0
--- /dev/null
@@ -0,0 +1,137 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/qcom,x1e80100-tlmm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Technologies, Inc. X1E80100 TLMM block
+
+maintainers:
+  - Rajendra Nayak <quic_rjendra@quicinc.com>
+
+description:
+  Top Level Mode Multiplexer pin controller in Qualcomm X1E80100 SoC.
+
+allOf:
+  - $ref: /schemas/pinctrl/qcom,tlmm-common.yaml#
+
+properties:
+  compatible:
+    const: qcom,x1e80100-tlmm
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  gpio-reserved-ranges:
+    minItems: 1
+    maxItems: 119
+
+  gpio-line-names:
+    maxItems: 238
+
+patternProperties:
+  "-state$":
+    oneOf:
+      - $ref: "#/$defs/qcom-x1e80100-tlmm-state"
+      - patternProperties:
+          "-pins$":
+            $ref: "#/$defs/qcom-x1e80100-tlmm-state"
+        additionalProperties: false
+
+$defs:
+  qcom-x1e80100-tlmm-state:
+    type: object
+    description:
+      Pinctrl node's client devices use subnodes for desired pin configuration.
+      Client device subnodes use below standard properties.
+    $ref: qcom,tlmm-common.yaml#/$defs/qcom-tlmm-state
+    unevaluatedProperties: false
+
+    properties:
+      pins:
+        description:
+          List of gpio pins affected by the properties specified in this
+          subnode.
+        items:
+          oneOf:
+            - pattern: "^gpio([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-2][0-9]|23[0-7])$"
+            - enum: [ ufs_reset, sdc2_clk, sdc2_cmd, sdc2_data ]
+        minItems: 1
+        maxItems: 36
+
+      function:
+        description:
+          Specify the alternative function to be configured for the specified
+          pins.
+        enum: [ aon_cci, aoss_cti, atest_char, atest_char0,
+                atest_char1, atest_char2, atest_char3, atest_usb,
+                audio_ext, audio_ref, cam_aon, cam_mclk, cci_async,
+                cci_i2c, cci_timer0, cci_timer1, cci_timer2, cci_timer3,
+                cci_timer4, cmu_rng0, cmu_rng1, cmu_rng2, cmu_rng3,
+                cri_trng, dbg_out, ddr_bist, ddr_pxi0, ddr_pxi1,
+                ddr_pxi2, ddr_pxi3, ddr_pxi4, ddr_pxi5, ddr_pxi6, ddr_pxi7,
+                edp0_hot, edp0_lcd, edp1_hot, edp1_lcd, eusb0_ac, eusb1_ac,
+                eusb2_ac, eusb3_ac, eusb5_ac, eusb6_ac, gcc_gp1, gcc_gp2,
+                gcc_gp3, gpio, i2s0_data0, i2s0_data1, i2s0_sck, i2s0_ws, i2s1_data0,
+                i2s1_data1, i2s1_sck, i2s1_ws, ibi_i3c, jitter_bist, mdp_vsync0,
+                mdp_vsync1, mdp_vsync2, mdp_vsync3, mdp_vsync4, mdp_vsync5,
+                mdp_vsync6, mdp_vsync7, mdp_vsync8, pcie3_clk, pcie4_clk,
+                pcie5_clk, pcie6a_clk, pcie6b_clk, phase_flag, pll_bist, pll_clk,
+                prng_rosc0, prng_rosc1, prng_rosc2, prng_rosc3, qdss_cti,
+                qdss_gpio, qspi00, qspi01, qspi02, qspi03, qspi0_clk, qspi0_cs0,
+                qspi0_cs1, qup0_se0, qup0_se1, qup0_se2, qup0_se3, qup0_se4,
+                qup0_se5, qup0_se6, qup0_se7, qup1_se0, qup1_se1, qup1_se2, qup1_se3,
+                qup1_se4, qup1_se5, qup1_se6, qup1_se7, qup2_se0, qup2_se1, qup2_se2,
+                qup2_se3, qup2_se4, qup2_se5, qup2_se6, qup2_se7, sd_write, sdc4_clk,
+                sdc4_cmd, sdc4_data0, sdc4_data1, sdc4_data2, sdc4_data3, sys_throttle,
+                tb_trig, tgu_ch0, tgu_ch1, tgu_ch2, tgu_ch3, tgu_ch4, tgu_ch5,
+                tgu_ch6, tgu_ch7, tmess_prng0, tmess_prng1, tmess_prng2, tmess_prng3,
+                tsense_pwm1, tsense_pwm2, sense_pwm3, tsense_pwm4, usb0_dp, usb0_phy,
+                usb0_sbrx, usb0_sbtx, usb1_dp, usb1_phy, usb1_sbrx, usb1_sbtx,
+                usb2_dp, usb2_phy, usb2_sbrx, usb2_sbtx, vsense_trigger ]
+
+    required:
+      - pins
+
+required:
+  - compatible
+  - reg
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    tlmm: pinctrl@f100000 {
+        compatible = "qcom,x1e80100-tlmm";
+        reg = <0x0f100000 0xf00000>;
+        gpio-controller;
+        #gpio-cells = <2>;
+        gpio-ranges = <&tlmm 0 0 239>;
+        interrupt-controller;
+        #interrupt-cells = <2>;
+        interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+
+        gpio-wo-state {
+            pins = "gpio1";
+            function = "gpio";
+        };
+
+        uart-w-state {
+            rx-pins {
+                pins = "gpio26";
+                function = "qup2_se7";
+                bias-pull-up;
+            };
+
+            tx-pins {
+                pins = "gpio27";
+                function = "qup2_se7";
+                bias-disable;
+            };
+        };
+    };
+...
index 8271e7b2c162a33e8941d9c41ad8ce8c3b9007c2..8b8e4e1a000f62f826c152509a25b65623d0ea07 100644 (file)
@@ -20,7 +20,7 @@ description:
 
 properties:
   compatible:
-    const: "renesas,r7s9210-pinctrl" # RZ/A2M
+    const: renesas,r7s9210-pinctrl # RZ/A2M
 
   reg:
     maxItems: 1
index b5ca40d0e251dc17ac2ea0f6f392b80db736ea22..d476de82e5c3f487b0275d1f2d8d8b5a79a6a116 100644 (file)
@@ -185,17 +185,17 @@ examples:
                     sd1_mux {
                             pinmux = <RZG2L_PORT_PINMUX(19, 0, 1)>, /* CD */
                                      <RZG2L_PORT_PINMUX(19, 1, 1)>; /* WP */
-                            power-source  = <3300>;
+                            power-source = <3300>;
                     };
 
                     sd1_data {
                             pins = "SD1_DATA0", "SD1_DATA1", "SD1_DATA2", "SD1_DATA3";
-                            power-source  = <3300>;
+                            power-source = <3300>;
                     };
 
                     sd1_ctrl {
                             pins = "SD1_CLK", "SD1_CMD";
-                            power-source  = <3300>;
+                            power-source = <3300>;
                     };
             };
     };
index 1de91a51234df4908625264615788d0d316f76fc..4dfb49b0e07f733c16a647ed6defebec75cf1ac2 100644 (file)
@@ -28,15 +28,27 @@ description: |
 
 properties:
   compatible:
-    enum:
-      - samsung,s3c2410-wakeup-eint
-      - samsung,s3c2412-wakeup-eint
-      - samsung,s3c64xx-wakeup-eint
-      - samsung,s5pv210-wakeup-eint
-      - samsung,exynos4210-wakeup-eint
-      - samsung,exynos7-wakeup-eint
-      - samsung,exynos850-wakeup-eint
-      - samsung,exynosautov9-wakeup-eint
+    oneOf:
+      - enum:
+          - samsung,s3c2410-wakeup-eint
+          - samsung,s3c2412-wakeup-eint
+          - samsung,s3c64xx-wakeup-eint
+          - samsung,s5pv210-wakeup-eint
+          - samsung,exynos4210-wakeup-eint
+          - samsung,exynos7-wakeup-eint
+          - samsung,exynosautov920-wakeup-eint
+      - items:
+          - enum:
+              - samsung,exynos5433-wakeup-eint
+              - samsung,exynos7885-wakeup-eint
+              - samsung,exynos850-wakeup-eint
+          - const: samsung,exynos7-wakeup-eint
+      - items:
+          - enum:
+              - google,gs101-wakeup-eint
+              - samsung,exynosautov9-wakeup-eint
+          - const: samsung,exynos850-wakeup-eint
+          - const: samsung,exynos7-wakeup-eint
 
   interrupts:
     description:
@@ -79,11 +91,14 @@ allOf:
   - if:
       properties:
         compatible:
-          contains:
-            enum:
-              - samsung,s5pv210-wakeup-eint
-              - samsung,exynos4210-wakeup-eint
-              - samsung,exynos7-wakeup-eint
+          # Match without "contains", to skip newer variants which are still
+          # compatible with samsung,exynos7-wakeup-eint
+          enum:
+            - samsung,s5pv210-wakeup-eint
+            - samsung,exynos4210-wakeup-eint
+            - samsung,exynos5433-wakeup-eint
+            - samsung,exynos7-wakeup-eint
+            - samsung,exynos7885-wakeup-eint
     then:
       properties:
         interrupts:
@@ -98,7 +113,7 @@ allOf:
           contains:
             enum:
               - samsung,exynos850-wakeup-eint
-              - samsung,exynosautov9-wakeup-eint
+              - samsung,exynosautov920-wakeup-eint
     then:
       properties:
         interrupts: false
index 26614621774a5d822ce21d977aaa2876f3a2a983..118549c25976570c760c9d5059735e9667ff5135 100644 (file)
@@ -35,6 +35,7 @@ properties:
 
   compatible:
     enum:
+      - google,gs101-pinctrl
       - samsung,s3c2412-pinctrl
       - samsung,s3c2416-pinctrl
       - samsung,s3c2440-pinctrl
@@ -53,6 +54,7 @@ properties:
       - samsung,exynos7885-pinctrl
       - samsung,exynos850-pinctrl
       - samsung,exynosautov9-pinctrl
+      - samsung,exynosautov920-pinctrl
       - tesla,fsd-pinctrl
 
   interrupts:
@@ -313,7 +315,8 @@ examples:
         pinctrl-0 = <&initial_alive>;
 
         wakeup-interrupt-controller {
-            compatible = "samsung,exynos7-wakeup-eint";
+            compatible = "samsung,exynos5433-wakeup-eint",
+                         "samsung,exynos7-wakeup-eint";
             interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
         };
 
index b85f9e36ce4b77fb084d648e56d96dac67bece9e..d2676f92ef5bb8999d5932271c5f764d3ae3bfba 100644 (file)
@@ -39,7 +39,7 @@ properties:
       phandle to the SLCR.
 
 patternProperties:
-  '^(.*-)?(default|gpio)$':
+  '^(.*-)?(default|gpio-grp)$':
     type: object
     patternProperties:
       '^mux':
index 01b6f2b578437b93118a8d06bc107e958d3b012a..f13d315b5d5e8ed98513fd85bef314b7b66b80d2 100644 (file)
@@ -31,7 +31,7 @@ properties:
     const: xlnx,zynqmp-pinctrl
 
 patternProperties:
-  '^(.*-)?(default|gpio)$':
+  '^(.*-)?(default|gpio-grp)$':
     type: object
     patternProperties:
       '^mux':
index 407b7cfec783cdc5b7c27d33d027e4bef6db3a44..7a0f1a4008681508bce24769fde1ece1489a816e 100644 (file)
@@ -20,6 +20,7 @@ properties:
   compatible:
     items:
       - enum:
+          - fsl,imx8dl-scu-pd
           - fsl,imx8qm-scu-pd
           - fsl,imx8qxp-scu-pd
       - const: fsl,scu-pd
index 14a262bcbf7cd21ebf62ee02f738bf82cbde80e1..627f8a6078c299e32e4bc7597509a6ef52d119d7 100644 (file)
@@ -28,17 +28,15 @@ properties:
     items:
       - const: reboot-mode
 
-patternProperties:
-  "^mode-.+":
-    $ref: /schemas/types.yaml#/definitions/uint32
-    description: Vendor-specific mode value written to the mode register
+allOf:
+  - $ref: reboot-mode.yaml#
 
 required:
   - compatible
   - nvmem-cells
   - nvmem-cell-names
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index 5e460128b0d10911a541c959a9581efcb8b4d393..fc8105a7b9b268df5cb08ad32cde26c50ea955ce 100644 (file)
@@ -111,21 +111,24 @@ examples:
    #include <dt-bindings/interrupt-controller/irq.h>
    #include <dt-bindings/input/linux-event-codes.h>
    #include <dt-bindings/spmi/spmi.h>
-   spmi_bus: spmi@c440000 {
+
+   spmi@c440000 {
      reg = <0x0c440000 0x1100>;
      #address-cells = <2>;
      #size-cells = <0>;
-     pmk8350: pmic@0 {
+
+     pmic@0 {
        reg = <0x0 SPMI_USID>;
        #address-cells = <1>;
        #size-cells = <0>;
-       pmk8350_pon: pon_hlos@1300 {
-         reg = <0x1300>;
+
+       pon@800 {
          compatible = "qcom,pm8998-pon";
+         reg = <0x800>;
 
          pwrkey {
             compatible = "qcom,pm8941-pwrkey";
-            interrupts = < 0x0 0x8 0 IRQ_TYPE_EDGE_BOTH >;
+            interrupts = <0x0 0x8 0 IRQ_TYPE_EDGE_BOTH>;
             debounce = <15625>;
             bias-pull-up;
             linux,code = <KEY_POWER>;
index 9b1ffceefe3dec86250dc235f92545bb123ea3cf..b6acff199cdecea08c1243ed5e8ad71240d65e9a 100644 (file)
@@ -29,12 +29,10 @@ properties:
     $ref: /schemas/types.yaml#/definitions/uint32
     description: Offset in the register map for the mode register (in bytes)
 
-patternProperties:
-  "^mode-.+":
-    $ref: /schemas/types.yaml#/definitions/uint32
-    description: Vendor-specific mode value written to the mode register
+allOf:
+  - $ref: reboot-mode.yaml#
 
-additionalProperties: false
+unevaluatedProperties: false
 
 required:
   - compatible
index 45792e216981a99de457cc99ea2d8f2dfd130136..799831636194f50ffdb139bd146d8905802bd474 100644 (file)
@@ -57,7 +57,7 @@ examples:
 
     firmware {
       zynqmp-firmware {
-        zynqmp-power {
+        power-management {
           compatible = "xlnx,zynqmp-power";
           interrupts = <0 35 4>;
         };
@@ -70,7 +70,7 @@ examples:
 
     firmware {
       zynqmp-firmware {
-        zynqmp-power {
+        power-management {
           compatible = "xlnx,zynqmp-power";
           interrupt-parent = <&gic>;
           interrupts = <0 35 4>;
index d3ebc9de8c0b49734cb5ce2b138a2d4b67bed7f9..131b7e57d22f46d28a9501c5641fd3c03b7e6ca0 100644 (file)
@@ -20,6 +20,7 @@ properties:
       - ti,bq24192
       - ti,bq24192i
       - ti,bq24196
+      - ti,bq24296
 
   reg:
     maxItems: 1
index 07e38be39f1bc3608135206a44ca1491f33dc221..89f9603499b46024bb6301917a347d2dd8f1062e 100644 (file)
@@ -79,10 +79,10 @@ examples:
         interrupt-parent = <&gpio1>;
         interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
 
-        richtek,output-charge-current      = <500000>;
-        richtek,end-of-charge-percentage    = <10>;
-        richtek,battery-regulation-voltage  = <4200000>;
-        richtek,boost-output-voltage       = <5050000>;
+        richtek,output-charge-current = <500000>;
+        richtek,end-of-charge-percentage = <10>;
+        richtek,battery-regulation-voltage = <4200000>;
+        richtek,boost-output-voltage = <5050000>;
 
         richtek,min-input-voltage-regulation = <4500000>;
         richtek,avg-input-current-regulation = <500000>;
index 697333a56d5e228615f581ef74dbfc66535d03d0..75bc20b95688f6b7ab74579b8a859b8cdbc2a00c 100644 (file)
@@ -3,16 +3,20 @@ Specifying wakeup capability for devices
 
 Any device nodes
 ----------------
-Nodes that describe devices which has wakeup capability must contain an
+Nodes that describe devices which have wakeup capability may contain a
 "wakeup-source" boolean property.
 
-Also, if device is marked as a wakeup source, then all the primary
-interrupt(s) can be used as wakeup interrupt(s).
+If the device is marked as a wakeup-source, interrupt wake capability depends
+on the device specific "interrupt-names" property. If no interrupts are labeled
+as wake capable, then it is up to the device to determine which interrupts can
+wake the system.
 
-However if the devices have dedicated interrupt as the wakeup source
-then they need to specify/identify the same using device specific
-interrupt name. In such cases only that interrupt can be used as wakeup
-interrupt.
+However if a device has a dedicated interrupt as the wakeup source, then it
+needs to specify/identify it using a device specific interrupt name. In such
+cases only that interrupt can be used as a wakeup interrupt.
+
+While various legacy interrupt names exist, new devices should use "wakeup" as
+the canonical interrupt name.
 
 List of legacy properties and respective binding document
 ---------------------------------------------------------
index 153e146df7d4b24af4de004369f372585b9d93ff..afcdeed4e88af625ea4f0f371cc11ffdbe824859 100644 (file)
@@ -8,7 +8,6 @@ title: MediaTek DISP_PWM Controller
 
 maintainers:
   - Jitao Shi <jitao.shi@mediatek.com>
-  - Xinlei Lee <xinlei.lee@mediatek.com>
 
 allOf:
   - $ref: pwm.yaml#
diff --git a/Documentation/devicetree/bindings/pwm/pwm-omap-dmtimer.txt b/Documentation/devicetree/bindings/pwm/pwm-omap-dmtimer.txt
deleted file mode 100644 (file)
index 25ecfe1..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-* OMAP PWM for dual-mode timers
-
-Required properties:
-- compatible: Shall contain "ti,omap-dmtimer-pwm".
-- ti,timers: phandle to PWM capable OMAP timer. See timer/ti,timer-dm.yaml for info
-  about these timers.
-- #pwm-cells: Should be 3. See pwm.yaml in this directory for a description of
-  the cells format.
-
-Optional properties:
-- ti,prescaler: Should be a value between 0 and 7, see the timers datasheet
-- ti,clock-source: Set dmtimer parent clock, values between 0 and 2:
-  - 0x00 - high-frequency system clock (timer_sys_ck)
-  - 0x01 - 32-kHz always-on clock (timer_32k_ck)
-  - 0x02 - external clock (timer_ext_ck, OMAP2 only)
-
-Example:
-       pwm9: dmtimer-pwm@9 {
-               compatible = "ti,omap-dmtimer-pwm";
-               ti,timers = <&timer9>;
-               #pwm-cells = <3>;
-       };
diff --git a/Documentation/devicetree/bindings/pwm/ti,omap-dmtimer-pwm.yaml b/Documentation/devicetree/bindings/pwm/ti,omap-dmtimer-pwm.yaml
new file mode 100644 (file)
index 0000000..1e8e094
--- /dev/null
@@ -0,0 +1,59 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pwm/ti,omap-dmtimer-pwm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI dual mode timer PWM controller
+
+maintainers:
+  - Tony Lindgren <tony@atomide.com>
+
+description:
+  TI dual mode timer instances have an IO pin for PWM capability
+
+allOf:
+  - $ref: pwm.yaml#
+
+properties:
+  compatible:
+    const: ti,omap-dmtimer-pwm
+
+  "#pwm-cells":
+    const: 3
+
+  ti,timers:
+    description: Timer instance phandle for the PWM
+    $ref: /schemas/types.yaml#/definitions/phandle
+
+  ti,prescaler:
+    description: |
+      Legacy clock prescaler for timer. The timer counter is prescaled
+      with 2^n where n is the prescaler.
+    $ref: /schemas/types.yaml#/definitions/uint32
+    enum: [ 0, 1, 2, 3, 4, 5, 6, 7 ]
+    deprecated: true
+
+  ti,clock-source:
+    description: |
+      Legacy clock for timer, please use assigned-clocks instead.
+      0x00 - high-frequency system clock (timer_sys_ck)
+      0x01 - 32-kHz always-on clock (timer_32k_ck)
+      0x02 - external clock (timer_ext_ck, OMAP2 only)
+    $ref: /schemas/types.yaml#/definitions/uint32
+    enum: [ 0, 1, 2 ]
+    deprecated: true
+
+required:
+  - compatible
+  - ti,timers
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    pwm9: pwm {
+      compatible = "ti,omap-dmtimer-pwm";
+      ti,timers = <&timer9>;
+      #pwm-cells = <3>;
+    };
index 0221397eb51ec55d1dfeb7ac3c73d09bf008f314..f825ee9efd810515ff904937b33130b9e2d077b8 100644 (file)
@@ -62,8 +62,8 @@ examples:
              regulator-name = "buck1";
              regulator-min-microvolt = <600000>;
              regulator-max-microvolt = <2187500>;
-             regulator-min-microamp  = <3800000>;
-             regulator-max-microamp  = <6800000>;
+             regulator-min-microamp = <3800000>;
+             regulator-max-microamp = <6800000>;
              regulator-boot-on;
             };
 
index 6de5b027f990389b9cf734d67245e5efdf02487b..0d34af98403f84f9625175da62f8e34e6b75e4af 100644 (file)
@@ -98,8 +98,8 @@ examples:
              regulator-name = "buck1";
              regulator-min-microvolt = <400000>;
              regulator-max-microvolt = <3587500>;
-             regulator-min-microamp  = <460000>;
-             regulator-max-microamp  = <7600000>;
+             regulator-min-microamp = <460000>;
+             regulator-max-microamp = <7600000>;
              regulator-boot-on;
              mps,buck-ovp-disable;
              mps,buck-phase-delay = /bits/ 8 <2>;
index 30632efdad8bb2ce6605eb4ab1d8bb881a805f34..df36e29d974ca08850df5b0dc864f33d52d93470 100644 (file)
@@ -113,10 +113,10 @@ examples:
     };
 
     imx7d-cm4 {
-      compatible       = "fsl,imx7d-cm4";
-      memory-region    = <&m4_reserved_sysmem1>, <&m4_reserved_sysmem2>;
-      syscon           = <&src>;
-      clocks           = <&clks IMX7D_ARM_M4_ROOT_CLK>;
+      compatible = "fsl,imx7d-cm4";
+      memory-region = <&m4_reserved_sysmem1>, <&m4_reserved_sysmem2>;
+      syscon = <&src>;
+      clocks = <&clks IMX7D_ARM_M4_ROOT_CLK>;
     };
 
   - |
index f10f329677d84d06baa5d7fc6f0f68b559b7bcf0..c054b84fdcd5c51a31a628327bf2e99b851ff4d6 100644 (file)
@@ -18,7 +18,10 @@ properties:
     enum:
       - qcom,sc7180-adsp-pas
       - qcom,sc7180-mpss-pas
+      - qcom,sc7280-adsp-pas
+      - qcom,sc7280-cdsp-pas
       - qcom,sc7280-mpss-pas
+      - qcom,sc7280-wpss-pas
 
   reg:
     maxItems: 1
@@ -75,6 +78,7 @@ allOf:
         compatible:
           enum:
             - qcom,sc7180-adsp-pas
+            - qcom,sc7280-adsp-pas
     then:
       properties:
         power-domains:
@@ -109,6 +113,23 @@ allOf:
         compatible:
           enum:
             - qcom,sc7280-mpss-pas
+    then:
+      properties:
+        power-domains:
+          items:
+            - description: CX power domain
+            - description: MSS power domain
+        power-domain-names:
+          items:
+            - const: cx
+            - const: mss
+
+  - if:
+      properties:
+        compatible:
+          enum:
+            - qcom,sc7280-cdsp-pas
+            - qcom,sc7280-wpss-pas
     then:
       properties:
         power-domains:
index f392e367d673f5c2c8853379c1955b906211c41e..9d8670c00e3b3bdea5d2196b98538d97465abf0d 100644 (file)
@@ -32,6 +32,7 @@ properties:
     oneOf:
       - items:
           - enum:
+              - amd,mbv32
               - andestech,ax45mp
               - canaan,k210
               - sifive,bullet0
@@ -62,8 +63,8 @@ properties:
 
   mmu-type:
     description:
-      Identifies the MMU address translation mode used on this
-      hart.  These values originate from the RISC-V Privileged
+      Identifies the largest MMU address translation mode supported by
+      this hart.  These values originate from the RISC-V Privileged
       Specification document, available from
       https://riscv.org/specifications/
     $ref: /schemas/types.yaml#/definitions/string
@@ -79,6 +80,11 @@ properties:
     description:
       The blocksize in bytes for the Zicbom cache operations.
 
+  riscv,cbop-block-size:
+    $ref: /schemas/types.yaml#/definitions/uint32
+    description:
+      The blocksize in bytes for the Zicbop cache operations.
+
   riscv,cboz-block-size:
     $ref: /schemas/types.yaml#/definitions/uint32
     description:
index c91ab0e46648204e906982dedf999f90cc72a58f..63d81dc895e5ce4c08715ce1d6bf0958a757ca86 100644 (file)
@@ -48,7 +48,7 @@ properties:
       insensitive, letters in the riscv,isa string must be all
       lowercase.
     $ref: /schemas/types.yaml#/definitions/string
-    pattern: ^rv(?:64|32)imaf?d?q?c?b?k?j?p?v?h?(?:[hsxz](?:[a-z])+)?(?:_[hsxz](?:[a-z])+)*$
+    pattern: ^rv(?:64|32)imaf?d?q?c?b?k?j?p?v?h?(?:[hsxz](?:[0-9a-z])+)?(?:_[hsxz](?:[0-9a-z])+)*$
     deprecated: true
 
   riscv,isa-base:
@@ -171,6 +171,12 @@ properties:
             memory types as ratified in the 20191213 version of the privileged
             ISA specification.
 
+        - const: zacas
+          description: |
+            The Zacas extension for Atomic Compare-and-Swap (CAS) instructions
+            is supported as ratified at commit 5059e0ca641c ("update to
+            ratified") of the riscv-zacas.
+
         - const: zba
           description: |
             The standard Zba bit-manipulation extension for address generation
@@ -190,12 +196,111 @@ properties:
             multiplication as ratified at commit 6d33919 ("Merge pull request
             #158 from hirooih/clmul-fix-loop-end-condition") of riscv-bitmanip.
 
+        - const: zbkb
+          description:
+            The standard Zbkb bitmanip instructions for cryptography as ratified
+            in version 1.0 of RISC-V Cryptography Extensions Volume I
+            specification.
+
+        - const: zbkc
+          description:
+            The standard Zbkc carry-less multiply instructions as ratified
+            in version 1.0 of RISC-V Cryptography Extensions Volume I
+            specification.
+
+        - const: zbkx
+          description:
+            The standard Zbkx crossbar permutation instructions as ratified
+            in version 1.0 of RISC-V Cryptography Extensions Volume I
+            specification.
+
         - const: zbs
           description: |
             The standard Zbs bit-manipulation extension for single-bit
             instructions as ratified at commit 6d33919 ("Merge pull request #158
             from hirooih/clmul-fix-loop-end-condition") of riscv-bitmanip.
 
+        - const: zfa
+          description:
+            The standard Zfa extension for additional floating point
+            instructions, as ratified in commit 056b6ff ("Zfa is ratified") of
+            riscv-isa-manual.
+
+        - const: zfh
+          description:
+            The standard Zfh extension for 16-bit half-precision binary
+            floating-point instructions, as ratified in commit 64074bc ("Update
+            version numbers for Zfh/Zfinx") of riscv-isa-manual.
+
+        - const: zfhmin
+          description:
+            The standard Zfhmin extension which provides minimal support for
+            16-bit half-precision binary floating-point instructions, as ratified
+            in commit 64074bc ("Update version numbers for Zfh/Zfinx") of
+            riscv-isa-manual.
+
+        - const: zk
+          description:
+            The standard Zk Standard Scalar cryptography extension as ratified
+            in version 1.0 of RISC-V Cryptography Extensions Volume I
+            specification.
+
+        - const: zkn
+          description:
+            The standard Zkn NIST algorithm suite extensions as ratified in
+            version 1.0 of RISC-V Cryptography Extensions Volume I
+            specification.
+
+        - const: zknd
+          description: |
+            The standard Zknd for NIST suite: AES decryption instructions as
+            ratified in version 1.0 of RISC-V Cryptography Extensions Volume I
+            specification.
+
+        - const: zkne
+          description: |
+            The standard Zkne for NIST suite: AES encryption instructions as
+            ratified in version 1.0 of RISC-V Cryptography Extensions Volume I
+            specification.
+
+        - const: zknh
+          description: |
+            The standard Zknh for NIST suite: hash function instructions as
+            ratified in version 1.0 of RISC-V Cryptography Extensions Volume I
+            specification.
+
+        - const: zkr
+          description:
+            The standard Zkr entropy source extension as ratified in version
+            1.0 of RISC-V Cryptography Extensions Volume I specification.
+            This string being present means that the CSR associated to this
+            extension is accessible at the privilege level to which that
+            device-tree has been provided.
+
+        - const: zks
+          description:
+            The standard Zks ShangMi algorithm suite extensions as ratified in
+            version 1.0 of RISC-V Cryptography Extensions Volume I
+            specification.
+
+        - const: zksed
+          description: |
+            The standard Zksed for ShangMi suite: SM4 block cipher instructions
+            as ratified in version 1.0 of RISC-V Cryptography Extensions
+            Volume I specification.
+
+        - const: zksh
+          description: |
+            The standard Zksh for ShangMi suite: SM3 hash function instructions
+            as ratified in version 1.0 of RISC-V Cryptography Extensions
+            Volume I specification.
+
+        - const: zkt
+          description:
+            The standard Zkt for data independent execution latency as ratified
+            in version 1.0 of RISC-V Cryptography Extensions Volume I
+            specification.
+
         - const: zicbom
           description:
             The standard Zicbom extension for base cache management operations as
@@ -246,6 +351,12 @@ properties:
             The standard Zihintpause extension for pause hints, as ratified in
             commit d8ab5c7 ("Zihintpause is ratified") of the riscv-isa-manual.
 
+        - const: zihintntl
+          description:
+            The standard Zihintntl extension for non-temporal locality hints, as
+            ratified in commit 0dc91f5 ("Zihintntl is ratified") of the
+            riscv-isa-manual.
+
         - const: zihpm
           description:
             The standard Zihpm extension for hardware performance counters, as
@@ -258,5 +369,113 @@ properties:
             in commit 2e5236 ("Ztso is now ratified.") of the
             riscv-isa-manual.
 
+        - const: zvbb
+          description:
+            The standard Zvbb extension for vectored basic bit-manipulation
+            instructions, as ratified in commit 56ed795 ("Update
+            riscv-crypto-spec-vector.adoc") of riscv-crypto.
+
+        - const: zvbc
+          description:
+            The standard Zvbc extension for vectored carryless multiplication
+            instructions, as ratified in commit 56ed795 ("Update
+            riscv-crypto-spec-vector.adoc") of riscv-crypto.
+
+        - const: zvfh
+          description:
+            The standard Zvfh extension for vectored half-precision
+            floating-point instructions, as ratified in commit e2ccd05
+            ("Remove draft warnings from Zvfh[min]") of riscv-v-spec.
+
+        - const: zvfhmin
+          description:
+            The standard Zvfhmin extension for vectored minimal half-precision
+            floating-point instructions, as ratified in commit e2ccd05
+            ("Remove draft warnings from Zvfh[min]") of riscv-v-spec.
+
+        - const: zvkb
+          description:
+            The standard Zvkb extension for vector cryptography bit-manipulation
+            instructions, as ratified in commit 56ed795 ("Update
+            riscv-crypto-spec-vector.adoc") of riscv-crypto.
+
+        - const: zvkg
+          description:
+            The standard Zvkg extension for vector GCM/GMAC instructions, as
+            ratified in commit 56ed795 ("Update riscv-crypto-spec-vector.adoc")
+            of riscv-crypto.
+
+        - const: zvkn
+          description:
+            The standard Zvkn extension for NIST algorithm suite instructions, as
+            ratified in commit 56ed795 ("Update riscv-crypto-spec-vector.adoc")
+            of riscv-crypto.
+
+        - const: zvknc
+          description:
+            The standard Zvknc extension for NIST algorithm suite with carryless
+            multiply instructions, as ratified in commit 56ed795 ("Update
+            riscv-crypto-spec-vector.adoc") of riscv-crypto.
+
+        - const: zvkned
+          description:
+            The standard Zvkned extension for Vector AES block cipher
+            instructions, as ratified in commit 56ed795 ("Update
+            riscv-crypto-spec-vector.adoc") of riscv-crypto.
+
+        - const: zvkng
+          description:
+            The standard Zvkng extension for NIST algorithm suite with GCM
+            instructions, as ratified in commit 56ed795 ("Update
+            riscv-crypto-spec-vector.adoc") of riscv-crypto.
+
+        - const: zvknha
+          description: |
+            The standard Zvknha extension for NIST suite: vector SHA-2 secure,
+            hash (SHA-256 only) instructions, as ratified in commit
+            56ed795 ("Update riscv-crypto-spec-vector.adoc") of riscv-crypto.
+
+        - const: zvknhb
+          description: |
+            The standard Zvknhb extension for NIST suite: vector SHA-2 secure,
+            hash (SHA-256 and SHA-512) instructions, as ratified in commit
+            56ed795 ("Update riscv-crypto-spec-vector.adoc") of riscv-crypto.
+
+        - const: zvks
+          description:
+            The standard Zvks extension for ShangMi algorithm suite
+            instructions, as ratified in commit 56ed795 ("Update
+            riscv-crypto-spec-vector.adoc") of riscv-crypto.
+
+        - const: zvksc
+          description:
+            The standard Zvksc extension for ShangMi algorithm suite with
+            carryless multiplication instructions, as ratified in commit 56ed795
+            ("Update riscv-crypto-spec-vector.adoc") of riscv-crypto.
+
+        - const: zvksed
+          description: |
+            The standard Zvksed extension for ShangMi suite: SM4 block cipher
+            instructions, as ratified in commit 56ed795 ("Update
+            riscv-crypto-spec-vector.adoc") of riscv-crypto.
+
+        - const: zvksh
+          description: |
+            The standard Zvksh extension for ShangMi suite: SM3 secure hash
+            instructions, as ratified in commit 56ed795 ("Update
+            riscv-crypto-spec-vector.adoc") of riscv-crypto.
+
+        - const: zvksg
+          description:
+            The standard Zvksg extension for ShangMi algorithm suite with GCM
+            instructions, as ratified in commit 56ed795 ("Update
+            riscv-crypto-spec-vector.adoc") of riscv-crypto.
+
+        - const: zvkt
+          description:
+            The standard Zvkt extension for vector data-independent execution
+            latency, as ratified in commit 56ed795 ("Update
+            riscv-crypto-spec-vector.adoc") of riscv-crypto.
+
 additionalProperties: true
 ...
diff --git a/Documentation/devicetree/bindings/rtc/adi,max31335.yaml b/Documentation/devicetree/bindings/rtc/adi,max31335.yaml
new file mode 100644 (file)
index 0000000..0125cf6
--- /dev/null
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/adi,max31335.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices MAX31335 RTC
+
+maintainers:
+  - Antoniu Miclaus <antoniu.miclaus@analog.com>
+
+description:
+  Analog Devices MAX31335 I2C RTC ±2ppm Automotive Real-Time Clock with
+  Integrated MEMS Resonator.
+
+allOf:
+  - $ref: rtc.yaml#
+
+properties:
+  compatible:
+    const: adi,max31335
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  "#clock-cells":
+    description:
+      RTC can be used as a clock source through its clock output pin.
+    const: 0
+
+  adi,tc-diode:
+    description:
+      Select the diode configuration for the trickle charger.
+      schottky - Schottky diode in series.
+      standard+schottky - standard diode + Schottky diode in series.
+    enum: [schottky, standard+schottky]
+
+  trickle-resistor-ohms:
+    description:
+      Selected resistor for trickle charger. Should be specified if trickle
+      charger should be enabled.
+    enum: [3000, 6000, 11000]
+
+required:
+  - compatible
+  - reg
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/irq.h>
+    i2c {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        rtc@68 {
+            compatible = "adi,max31335";
+            reg = <0x68>;
+            pinctrl-0 = <&rtc_nint_pins>;
+            interrupts-extended = <&gpio1 16 IRQ_TYPE_LEVEL_HIGH>;
+            aux-voltage-chargeable = <1>;
+            trickle-resistor-ohms = <6000>;
+            adi,tc-diode = "schottky";
+        };
+    };
+...
index 1df7c45d95c18ef90c8e996be5b83bc243099155..b770149c5fd677137bbeee87178d4188e5a0b59b 100644 (file)
@@ -29,6 +29,8 @@ properties:
 
   trickle-diode-disable: true
 
+  wakeup-source: true
+
 required:
   - compatible
   - reg
diff --git a/Documentation/devicetree/bindings/rtc/nuvoton,ma35d1-rtc.yaml b/Documentation/devicetree/bindings/rtc/nuvoton,ma35d1-rtc.yaml
new file mode 100644 (file)
index 0000000..5e4ade8
--- /dev/null
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/nuvoton,ma35d1-rtc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Nuvoton MA35D1 Real Time Clock
+
+maintainers:
+  - Min-Jen Chen <mjchen@nuvoton.com>
+
+allOf:
+  - $ref: rtc.yaml#
+
+properties:
+  compatible:
+    enum:
+      - nuvoton,ma35d1-rtc
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/clock/nuvoton,ma35d1-clk.h>
+    rtc@40410000 {
+        compatible = "nuvoton,ma35d1-rtc";
+        reg = <0x40410000 0x200>;
+        interrupts = <GIC_SPI 5 IRQ_TYPE_EDGE_RISING>;
+        clocks = <&clk RTC_GATE>;
+    };
+
+...
index b95a69cc9ae0fef4e4111bade35faa4a7ddb339f..d274bb7a534b55ef83a05da2c2b8446f38342c88 100644 (file)
@@ -61,27 +61,27 @@ additionalProperties: false
 
 examples:
   - |
+    #include <dt-bindings/interrupt-controller/irq.h>
     #include <dt-bindings/spmi/spmi.h>
-    spmi_bus: spmi@c440000 {
-      reg = <0x0c440000 0x1100>;
-      #address-cells = <2>;
-      #size-cells = <0>;
-      pmicintc: pmic@0 {
-        reg = <0x0 SPMI_USID>;
-        compatible = "qcom,pm8921";
-        interrupts = <104 8>;
-        #interrupt-cells = <2>;
-        interrupt-controller;
-        #address-cells = <1>;
+
+    spmi {
+        #address-cells = <2>;
         #size-cells = <0>;
 
-        pm8921_rtc: rtc@11d {
-          compatible = "qcom,pm8921-rtc";
-          reg = <0x11d>;
-          interrupts = <0x27 0>;
-          nvmem-cells = <&rtc_offset>;
-          nvmem-cell-names = "offset";
+        pmic@0 {
+            compatible = "qcom,pm8941", "qcom,spmi-pmic";
+            reg = <0x0 SPMI_USID>;
+            #address-cells = <1>;
+            #size-cells = <0>;
+
+            rtc@6000 {
+                compatible = "qcom,pm8941-rtc";
+                reg = <0x6000>, <0x6100>;
+                reg-names = "rtc", "alarm";
+                interrupts = <0x0 0x61 0x1 IRQ_TYPE_EDGE_RISING>;
+                nvmem-cells = <&rtc_offset>;
+                nvmem-cell-names = "offset";
+            };
         };
-      };
     };
 ...
diff --git a/Documentation/devicetree/bindings/security/tpm/google,cr50.txt b/Documentation/devicetree/bindings/security/tpm/google,cr50.txt
deleted file mode 100644 (file)
index cd69c2e..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-* H1 Secure Microcontroller with Cr50 Firmware on SPI Bus.
-
-H1 Secure Microcontroller running Cr50 firmware provides several
-functions, including TPM-like functionality. It communicates over
-SPI using the FIFO protocol described in the PTP Spec, section 6.
-
-Required properties:
-- compatible: Should be "google,cr50".
-- spi-max-frequency: Maximum SPI frequency.
-
-Example:
-
-&spi0 {
-       tpm@0 {
-               compatible = "google,cr50";
-               reg = <0>;
-               spi-max-frequency = <800000>;
-       };
-};
diff --git a/Documentation/devicetree/bindings/security/tpm/ibmvtpm.txt b/Documentation/devicetree/bindings/security/tpm/ibmvtpm.txt
deleted file mode 100644 (file)
index d89f999..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-* Device Tree Bindings for IBM Virtual Trusted Platform Module(vtpm)
-
-Required properties:
-
-- compatible            : property name that conveys the platform architecture
-                          identifiers, as 'IBM,vtpm'
-- device_type           : specifies type of virtual device
-- interrupts            : property specifying the interrupt source number and
-                          sense code associated with this virtual I/O Adapters
-- ibm,my-drc-index      : integer index for the connector between the device
-                          and its parent - present only if Dynamic
-                          Reconfiguration(DR) Connector is enabled
-- ibm,#dma-address-cells: specifies the number of cells that are used to
-                          encode the physical address field of dma-window
-                          properties
-- ibm,#dma-size-cells   : specifies the number of cells that are used to
-                          encode the size field of dma-window properties
-- ibm,my-dma-window     : specifies DMA window associated with this virtual
-                          IOA
-- ibm,loc-code          : specifies the unique and persistent location code
-                          associated with this virtual I/O Adapters
-- linux,sml-base        : 64-bit base address of the reserved memory allocated
-                          for the firmware event log
-- linux,sml-size        : size of the memory allocated for the firmware event log
-
-Example (IBM Virtual Trusted Platform Module)
----------------------------------------------
-
-                vtpm@30000003 {
-                        ibm,#dma-size-cells = <0x2>;
-                        compatible = "IBM,vtpm";
-                        device_type = "IBM,vtpm";
-                        ibm,my-drc-index = <0x30000003>;
-                        ibm,#dma-address-cells = <0x2>;
-                        linux,sml-base = <0xc60e 0x0>;
-                        interrupts = <0xa0003 0x0>;
-                        ibm,my-dma-window = <0x10000003 0x0 0x0 0x0 0x10000000>;
-                        ibm,loc-code = "U8286.41A.10082DV-V3-C3";
-                        reg = <0x30000003>;
-                        linux,sml-size = <0xbce10200>;
-                };
diff --git a/Documentation/devicetree/bindings/security/tpm/st33zp24-i2c.txt b/Documentation/devicetree/bindings/security/tpm/st33zp24-i2c.txt
deleted file mode 100644 (file)
index 0dc121b..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-* STMicroelectronics SAS. ST33ZP24 TPM SoC
-
-Required properties:
-- compatible: Should be "st,st33zp24-i2c".
-- clock-frequency: I²C work frequency.
-- reg: address on the bus
-
-Optional ST33ZP24 Properties:
-- interrupts: GPIO interrupt to which the chip is connected
-- lpcpd-gpios: Output GPIO pin used for ST33ZP24 power management D1/D2 state.
-If set, power must be present when the platform is going into sleep/hibernate mode.
-
-Optional SoC Specific Properties:
-- pinctrl-names: Contains only one value - "default".
-- pintctrl-0: Specifies the pin control groups used for this controller.
-
-Example (for ARM-based BeagleBoard xM with ST33ZP24 on I2C2):
-
-&i2c2 {
-
-
-        st33zp24: st33zp24@13 {
-
-                compatible = "st,st33zp24-i2c";
-
-                reg = <0x13>;
-                clock-frequency = <400000>;
-
-                interrupt-parent = <&gpio5>;
-                interrupts = <7 IRQ_TYPE_LEVEL_HIGH>;
-
-                lpcpd-gpios = <&gpio5 15 GPIO_ACTIVE_HIGH>;
-        };
-};
diff --git a/Documentation/devicetree/bindings/security/tpm/st33zp24-spi.txt b/Documentation/devicetree/bindings/security/tpm/st33zp24-spi.txt
deleted file mode 100644 (file)
index 3719897..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-* STMicroelectronics SAS. ST33ZP24 TPM SoC
-
-Required properties:
-- compatible: Should be "st,st33zp24-spi".
-- spi-max-frequency: Maximum SPI frequency (<= 10000000).
-
-Optional ST33ZP24 Properties:
-- interrupts: GPIO interrupt to which the chip is connected
-- lpcpd-gpios: Output GPIO pin used for ST33ZP24 power management D1/D2 state.
-If set, power must be present when the platform is going into sleep/hibernate mode.
-
-Optional SoC Specific Properties:
-- pinctrl-names: Contains only one value - "default".
-- pintctrl-0: Specifies the pin control groups used for this controller.
-
-Example (for ARM-based BeagleBoard xM with ST33ZP24 on SPI4):
-
-&mcspi4 {
-
-
-        st33zp24@0 {
-
-                compatible = "st,st33zp24-spi";
-
-                spi-max-frequency = <10000000>;
-
-                interrupt-parent = <&gpio5>;
-                interrupts = <7 IRQ_TYPE_LEVEL_HIGH>;
-
-                lpcpd-gpios = <&gpio5 15 GPIO_ACTIVE_HIGH>;
-        };
-};
diff --git a/Documentation/devicetree/bindings/security/tpm/tpm-i2c.txt b/Documentation/devicetree/bindings/security/tpm/tpm-i2c.txt
deleted file mode 100644 (file)
index a65d7b7..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-* Device Tree Bindings for I2C based Trusted Platform Module(TPM)
-
-Required properties:
-
-- compatible     : 'manufacturer,model', eg. nuvoton,npct650
-- label          : human readable string describing the device, eg. "tpm"
-- linux,sml-base : 64-bit base address of the reserved memory allocated for
-                   the firmware event log
-- linux,sml-size : size of the memory allocated for the firmware event log
-
-Optional properties:
-
-- powered-while-suspended: present when the TPM is left powered on between
-                           suspend and resume (makes the suspend/resume
-                           callbacks do nothing).
-
-Example (for OpenPower Systems with Nuvoton TPM 2.0 on I2C)
-----------------------------------------------------------
-
-tpm@57 {
-       reg = <0x57>;
-       label = "tpm";
-       compatible = "nuvoton,npct650", "nuvoton,npct601";
-       linux,sml-base = <0x7f 0xfd450000>;
-       linux,sml-size = <0x10000>;
-};
diff --git a/Documentation/devicetree/bindings/security/tpm/tpm_tis_mmio.txt b/Documentation/devicetree/bindings/security/tpm/tpm_tis_mmio.txt
deleted file mode 100644 (file)
index 7c63044..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-Trusted Computing Group MMIO Trusted Platform Module
-
-The TCG defines multi vendor standard for accessing a TPM chip, this
-is the standard protocol defined to access the TPM via MMIO. Typically
-this interface will be implemented over Intel's LPC bus.
-
-Refer to the 'TCG PC Client Specific TPM Interface Specification (TIS)' TCG
-publication for the specification.
-
-Required properties:
-
-- compatible: should contain a string below for the chip, followed by
-              "tcg,tpm-tis-mmio". Valid chip strings are:
-                 * "atmel,at97sc3204"
-- reg: The location of the MMIO registers, should be at least 0x5000 bytes
-- interrupts: An optional interrupt indicating command completion.
-
-Example:
-
-       tpm_tis@90000 {
-                               compatible = "atmel,at97sc3204", "tcg,tpm-tis-mmio";
-                               reg = <0x90000 0x5000>;
-                               interrupt-parent = <&EIC0>;
-                               interrupts = <1 2>;
-       };
diff --git a/Documentation/devicetree/bindings/security/tpm/tpm_tis_spi.txt b/Documentation/devicetree/bindings/security/tpm/tpm_tis_spi.txt
deleted file mode 100644 (file)
index b800667..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-Required properties:
-- compatible: should be one of the following
-    "st,st33htpm-spi"
-    "infineon,slb9670"
-    "tcg,tpm_tis-spi"
-- spi-max-frequency: Maximum SPI frequency (depends on TPMs).
-
-Optional SoC Specific Properties:
-- pinctrl-names: Contains only one value - "default".
-- pintctrl-0: Specifies the pin control groups used for this controller.
-
-Example (for ARM-based BeagleBoard xM with TPM_TIS on SPI4):
-
-&mcspi4 {
-
-
-        tpm_tis@0 {
-
-                compatible = "tcg,tpm_tis-spi";
-
-                spi-max-frequency = <10000000>;
-        };
-};
diff --git a/Documentation/devicetree/bindings/serial/arm,dcc.yaml b/Documentation/devicetree/bindings/serial/arm,dcc.yaml
new file mode 100644 (file)
index 0000000..fd05893
--- /dev/null
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/serial/arm,dcc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM DCC (Data communication channel) serial emulation
+
+maintainers:
+  - Michal Simek <michal.simek@amd.com>
+
+description: |
+  ARM DCC (Data communication channel) serial emulation interface available
+  via JTAG can be also used as one of serial line tightly coupled with every
+  ARM CPU available in the system.
+
+properties:
+  compatible:
+    const: arm,dcc
+
+required:
+  - compatible
+
+additionalProperties: false
+
+examples:
+  - |
+    serial {
+      compatible = "arm,dcc";
+    };
index 920539926d7e237d8b84694ec174f62552842e25..7a105551fa6a89a3e2434a5410d315e409bccab5 100644 (file)
@@ -13,7 +13,7 @@ description: |
   https://www.nxp.com/webapp/Download?colCode=S32V234RM.
 
 maintainers:
-  - Chester Lin <clin@suse.com>
+  - Chester Lin <chester62515@gmail.com>
 
 allOf:
   - $ref: serial.yaml#
index 83035553044a23206940a7f9662cff1b183d8b9a..9c6dc16f88a6f0fd4a8c8d9d57bdd077019552bc 100644 (file)
@@ -9,10 +9,6 @@ title: Freescale i.MX Universal Asynchronous Receiver/Transmitter (UART)
 maintainers:
   - Fabio Estevam <festevam@gmail.com>
 
-allOf:
-  - $ref: serial.yaml#
-  - $ref: rs485.yaml#
-
 properties:
   compatible:
     oneOf:
@@ -68,7 +64,11 @@ properties:
       - const: tx
 
   interrupts:
-    maxItems: 1
+    items:
+      - description: UART RX Interrupt
+      - description: UART TX Interrupt
+      - description: UART RTS Interrupt
+    minItems: 1
 
   wakeup-source: true
 
@@ -110,6 +110,25 @@ required:
   - clock-names
   - interrupts
 
+allOf:
+  - $ref: serial.yaml#
+  - $ref: rs485.yaml#
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: fsl,imx1-uart
+    then:
+      properties:
+        interrupts:
+          minItems: 3
+          maxItems: 3
+    else:
+      properties:
+        interrupts:
+          maxItems: 1
+
 unevaluatedProperties: false
 
 examples:
index ee52bf8e8917d482253edefe0ea2260a022bc461..e0fa363ad7e2e5cf17126f9bf38fd3e1f183b6d8 100644 (file)
@@ -48,9 +48,17 @@ properties:
       - const: tx
       - const: rx
 
+  interconnects:
+    maxItems: 1
+
   interrupts:
     maxItems: 1
 
+  operating-points-v2: true
+
+  power-domains:
+    maxItems: 1
+
   qcom,rx-crci:
     $ref: /schemas/types.yaml#/definitions/uint32
     description:
@@ -99,7 +107,9 @@ unevaluatedProperties: false
 
 examples:
   - |
+    #include <dt-bindings/interconnect/qcom,msm8996.h>
     #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/power/qcom-rpmpd.h>
 
     serial@f991e000 {
         compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
@@ -109,4 +119,7 @@ examples:
         clock-names = "core", "iface";
         dmas = <&dma0 0>, <&dma0 1>;
         dma-names = "tx", "rx";
+        power-domains = <&rpmpd MSM8996_VDDCX>;
+        operating-points-v2 = <&uart_opp_table>;
+        interconnects = <&pnoc MASTER_BLSP_1 &bimc SLAVE_EBI_CH0>;
     };
index 9f7305200c47e96fc7f9e4a938555557202ffc08..64d3db6e54e5c3fdb758feb0773074d877d4492a 100644 (file)
@@ -17,7 +17,7 @@ properties:
     oneOf:
       - items:
           - enum:
-              - renesas,r9a07g043-sci     # RZ/G2UL
+              - renesas,r9a07g043-sci     # RZ/G2UL and RZ/Five
               - renesas,r9a07g044-sci     # RZ/G2{L,LC}
               - renesas,r9a07g054-sci     # RZ/V2L
           - const: renesas,sci            # generic SCI compatible UART
index 17c553123f96ed92db1eae340b89edbdec1e016f..1001d2a6ace85fbc37304304049b1e2cdaad781d 100644 (file)
@@ -11,6 +11,7 @@ maintainers:
 
 allOf:
   - $ref: serial.yaml#
+  - $ref: rs485.yaml#
 
 properties:
   compatible:
index 28ff77aa86c8543ae60bae5e30b1e3842a4b57ee..f4dbb6dc2b6ef2e1954f9268eca32ab49014c037 100644 (file)
@@ -20,6 +20,7 @@ properties:
               - sprd,sc9860-uart
               - sprd,sc9863a-uart
               - sprd,ums512-uart
+              - sprd,ums9620-uart
           - const: sprd,sc9836-uart
       - const: sprd,sc9836-uart
 
index 8108c564dd78a84a1d869a60b975dcb51e6480ff..aa32dc950e72ccdaf7fb1ac7f759d57a855fc9b6 100644 (file)
@@ -22,6 +22,7 @@ properties:
       - const: allwinner,sun6i-a31-spdif
       - const: allwinner,sun8i-h3-spdif
       - const: allwinner,sun50i-h6-spdif
+      - const: allwinner,sun50i-h616-spdif
       - items:
           - const: allwinner,sun8i-a83t-spdif
           - const: allwinner,sun8i-h3-spdif
@@ -62,6 +63,8 @@ allOf:
             enum:
               - allwinner,sun6i-a31-spdif
               - allwinner,sun8i-h3-spdif
+              - allwinner,sun50i-h6-spdif
+              - allwinner,sun50i-h616-spdif
 
     then:
       required:
@@ -73,7 +76,7 @@ allOf:
           contains:
             enum:
               - allwinner,sun8i-h3-spdif
-              - allwinner,sun50i-h6-spdif
+              - allwinner,sun50i-h616-spdif
 
     then:
       properties:
index f01c0dde0cf740e6ff9d500ddedb1e27d320f919..d28c102c0ce7f0fe94577e45b54daa7496331e7f 100644 (file)
@@ -18,7 +18,6 @@ description: |
 
   Specifications about the audio amplifier can be found at:
     https://www.ti.com/lit/gpn/tas2562
-    https://www.ti.com/lit/gpn/tas2563
     https://www.ti.com/lit/gpn/tas2564
     https://www.ti.com/lit/gpn/tas2110
 
@@ -29,7 +28,6 @@ properties:
   compatible:
     enum:
       - ti,tas2562
-      - ti,tas2563
       - ti,tas2564
       - ti,tas2110
 
index a69e6c223308e637de51d512cb18f210441dcc9e..9762386892495149c00259c59be3e04a3a09d2c6 100644 (file)
@@ -5,36 +5,46 @@
 $id: http://devicetree.org/schemas/sound/ti,tas2781.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
-title: Texas Instruments TAS2781 SmartAMP
+title: Texas Instruments TAS2563/TAS2781 SmartAMP
 
 maintainers:
   - Shenghao Ding <shenghao-ding@ti.com>
 
-description:
-  The TAS2781 is a mono, digital input Class-D audio amplifier
-  optimized for efficiently driving high peak power into small
-  loudspeakers. An integrated on-chip DSP supports Texas Instruments
-  Smart Amp speaker protection algorithm. The integrated speaker
-  voltage and current sense provides for real time
+description: |
+  The TAS2563/TAS2781 is a mono, digital input Class-D audio
+  amplifier optimized for efficiently driving high peak power into
+  small loudspeakers. An integrated on-chip DSP supports Texas
+  Instruments Smart Amp speaker protection algorithm. The
+  integrated speaker voltage and current sense provides for real time
   monitoring of loudspeaker behavior.
 
-allOf:
-  - $ref: dai-common.yaml#
+  Specifications about the audio amplifier can be found at:
+    https://www.ti.com/lit/gpn/tas2563
+    https://www.ti.com/lit/gpn/tas2781
 
 properties:
   compatible:
-    enum:
-      - ti,tas2781
+    description: |
+      ti,tas2563: 6.1-W Boosted Class-D Audio Amplifier With Integrated
+      DSP and IV Sense, 16/20/24/32bit stereo I2S or multichannel TDM.
+
+      ti,tas2781: 24-V Class-D Amplifier with Real Time Integrated Speaker
+      Protection and Audio Processing, 16/20/24/32bit stereo I2S or
+      multichannel TDM.
+    oneOf:
+      - items:
+          - enum:
+              - ti,tas2563
+          - const: ti,tas2781
+      - enum:
+          - ti,tas2781
 
   reg:
     description:
-      I2C address, in multiple tas2781s case, all the i2c address
+      I2C address, in multiple-AMP case, all the i2c address
       aggregate as one Audio Device to support multiple audio slots.
     maxItems: 8
     minItems: 1
-    items:
-      minimum: 0x38
-      maximum: 0x3f
 
   reset-gpios:
     maxItems: 1
@@ -49,6 +59,44 @@ required:
   - compatible
   - reg
 
+allOf:
+  - $ref: dai-common.yaml#
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - ti,tas2563
+    then:
+      properties:
+        reg:
+          description:
+            I2C address, in multiple-AMP case, all the i2c address
+            aggregate as one Audio Device to support multiple audio slots.
+          maxItems: 4
+          minItems: 1
+          items:
+            minimum: 0x4c
+            maximum: 0x4f
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - ti,tas2781
+    then:
+      properties:
+        reg:
+          description:
+            I2C address, in multiple-AMP case, all the i2c address
+            aggregate as one Audio Device to support multiple audio slots.
+          maxItems: 8
+          minItems: 1
+          items:
+            minimum: 0x38
+            maximum: 0x3f
+
 additionalProperties: false
 
 examples:
index 4b6c20fc819434883fc68ba79c153f67ac807344..fced6f2d8ecbb35955e3800f19d58980b792a764 100644 (file)
@@ -33,6 +33,7 @@ properties:
               - sifive,fu540-c000-clint # SiFive FU540
               - starfive,jh7100-clint   # StarFive JH7100
               - starfive,jh7110-clint   # StarFive JH7110
+              - starfive,jh8100-clint   # StarFive JH8100
           - const: sifive,clint0        # SiFive CLINT v0 IP block
       - items:
           - enum:
index fbd235650e52cca3dc3e43792e0c730aab65699c..2e92bcdeb423abeca98da6868d5a116615c13bbc 100644 (file)
@@ -17,7 +17,12 @@ properties:
       - const: thead,c900-aclint-mtimer
 
   reg:
-    maxItems: 1
+    items:
+      - description: MTIMECMP Registers
+
+  reg-names:
+    items:
+      - const: mtimecmp
 
   interrupts-extended:
     minItems: 1
@@ -28,6 +33,7 @@ additionalProperties: false
 required:
   - compatible
   - reg
+  - reg-names
   - interrupts-extended
 
 examples:
@@ -39,5 +45,6 @@ examples:
                             <&cpu3intc 7>,
                             <&cpu4intc 7>;
       reg = <0xac000000 0x00010000>;
+      reg-names = "mtimecmp";
     };
 ...
diff --git a/Documentation/devicetree/bindings/tpm/google,cr50.yaml b/Documentation/devicetree/bindings/tpm/google,cr50.yaml
new file mode 100644 (file)
index 0000000..9302e12
--- /dev/null
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/tpm/google,cr50.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Google Security Chip H1 (running Cr50 firmware)
+
+maintainers:
+  - Andrey Pronin <apronin@chromium.org>
+
+description: |
+  Google has designed a family of security chips called "Titan".
+  One member is the H1 built into Chromebooks and running Cr50 firmware:
+  https://www.osfc.io/2018/talks/google-secure-microcontroller-and-ccd-closed-case-debugging/
+
+  The chip provides several functions, including TPM 2.0 like functionality.
+  It communicates over SPI or I²C using the FIFO protocol described in the
+  TCG PC Client Platform TPM Profile Specification for TPM 2.0 (PTP), sec 6:
+  https://trustedcomputinggroup.org/resource/pc-client-platform-tpm-profile-ptp-specification/
+
+properties:
+  compatible:
+    const: google,cr50
+
+allOf:
+  - $ref: tpm-common.yaml#
+
+anyOf:
+  - $ref: /schemas/spi/spi-peripheral-props.yaml#
+  - $ref: tcg,tpm-tis-i2c.yaml#/properties/reg
+
+required:
+  - compatible
+  - reg
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    spi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        tpm@0 {
+            reg = <0>;
+            compatible = "google,cr50";
+            spi-max-frequency = <800000>;
+        };
+    };
+
+  - |
+    #include <dt-bindings/interrupt-controller/irq.h>
+    i2c {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        tpm@50 {
+            compatible = "google,cr50";
+            reg = <0x50>;
+            interrupts-extended = <&pio 88 IRQ_TYPE_EDGE_FALLING>;
+            pinctrl-names = "default";
+            pinctrl-0 = <&cr50_int>;
+        };
+    };
diff --git a/Documentation/devicetree/bindings/tpm/ibm,vtpm.yaml b/Documentation/devicetree/bindings/tpm/ibm,vtpm.yaml
new file mode 100644 (file)
index 0000000..50a3fd3
--- /dev/null
@@ -0,0 +1,104 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/tpm/ibm,vtpm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: IBM Virtual Trusted Platform Module (vTPM)
+
+maintainers:
+  - Nayna Jain <nayna@linux.ibm.com>
+
+description: |
+  Virtual TPM is used on IBM POWER7+ and POWER8 systems running POWERVM.
+  It is supported through the adjunct partition with firmware release 740
+  or higher.  With vTPM support, each lpar is able to have its own vTPM
+  without the physical TPM hardware.  The TPM functionality is provided by
+  communicating with the vTPM adjunct partition through Hypervisor calls
+  (Hcalls) and Command/Response Queue (CRQ) commands.
+
+properties:
+  compatible:
+    enum:
+      - IBM,vtpm
+      - IBM,vtpm20
+
+  device_type:
+    description:
+      type of virtual device
+    enum:
+      - IBM,vtpm
+      - IBM,vtpm20
+
+  reg:
+    maxItems: 1
+
+  'ibm,#dma-address-cells':
+    description:
+      number of cells that are used to encode the physical address field of
+      dma-window properties
+    $ref: /schemas/types.yaml#/definitions/uint32-array
+
+  'ibm,#dma-size-cells':
+    description:
+      number of cells that are used to encode the size field of
+      dma-window properties
+    $ref: /schemas/types.yaml#/definitions/uint32-array
+
+  ibm,my-dma-window:
+    description:
+      DMA window associated with this virtual I/O Adapter
+    $ref: /schemas/types.yaml#/definitions/uint32-array
+    minItems: 5
+    maxItems: 5
+
+  ibm,my-drc-index:
+    description:
+      integer index for the connector between the device and its parent;
+      present only if Dynamic Reconfiguration (DR) Connector is enabled
+    $ref: /schemas/types.yaml#/definitions/uint32
+
+  ibm,loc-code:
+    description:
+      unique and persistent location code associated with this virtual
+      I/O Adapter
+    $ref: /schemas/types.yaml#/definitions/string
+
+required:
+  - compatible
+  - device_type
+  - reg
+  - interrupts
+  - ibm,#dma-address-cells
+  - ibm,#dma-size-cells
+  - ibm,my-dma-window
+  - ibm,my-drc-index
+  - ibm,loc-code
+  - linux,sml-base
+  - linux,sml-size
+
+allOf:
+  - $ref: tpm-common.yaml#
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    soc {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        tpm@30000003 {
+            compatible = "IBM,vtpm";
+            device_type = "IBM,vtpm";
+            reg = <0x30000003>;
+            interrupts = <0xa0003 0x0>;
+            ibm,#dma-address-cells = <0x2>;
+            ibm,#dma-size-cells = <0x2>;
+            ibm,my-dma-window = <0x10000003 0x0 0x0 0x0 0x10000000>;
+            ibm,my-drc-index = <0x30000003>;
+            ibm,loc-code = "U8286.41A.10082DV-V3-C3";
+            linux,sml-base = <0xc60e 0x0>;
+            linux,sml-size = <0xbce10200>;
+        };
+    };
diff --git a/Documentation/devicetree/bindings/tpm/microsoft,ftpm.yaml b/Documentation/devicetree/bindings/tpm/microsoft,ftpm.yaml
new file mode 100644 (file)
index 0000000..fdb8196
--- /dev/null
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/tpm/microsoft,ftpm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microsoft firmware-based Trusted Platform Module (fTPM)
+
+maintainers:
+  - Thirupathaiah Annapureddy <thiruan@microsoft.com>
+  - Sasha Levin <sashal@kernel.org>
+
+description: |
+  Commodity CPU architectures, such as ARM and Intel CPUs, have started to
+  offer trusted computing features in their CPUs aimed at displacing dedicated
+  trusted hardware.  Unfortunately, these CPU architectures raise serious
+  challenges to building trusted systems because they omit providing secure
+  resources outside the CPU perimeter.
+
+  Microsoft's firmware-based TPM 2.0 (fTPM) leverages ARM TrustZone to overcome
+  these challenges and provide software with security guarantees similar to
+  those of dedicated trusted hardware.
+
+  https://www.microsoft.com/en-us/research/publication/ftpm-software-implementation-tpm-chip/
+  https://github.com/Microsoft/ms-tpm-20-ref/tree/main/Samples/ARM32-FirmwareTPM
+
+properties:
+  compatible:
+    const: microsoft,ftpm
+
+required:
+  - compatible
+  - linux,sml-base
+  - linux,sml-size
+
+allOf:
+  - $ref: tpm-common.yaml#
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    tpm {
+        compatible = "microsoft,ftpm";
+        linux,sml-base = <0x0 0xc0000000>;
+        linux,sml-size = <0x10000>;
+    };
diff --git a/Documentation/devicetree/bindings/tpm/tcg,tpm-tis-i2c.yaml b/Documentation/devicetree/bindings/tpm/tcg,tpm-tis-i2c.yaml
new file mode 100644 (file)
index 0000000..3ab4434
--- /dev/null
@@ -0,0 +1,90 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/tpm/tcg,tpm-tis-i2c.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: I²C-attached Trusted Platform Module conforming to TCG TIS specification
+
+maintainers:
+  - Lukas Wunner <lukas@wunner.de>
+
+description: |
+  The Trusted Computing Group (TCG) has defined a multi-vendor standard
+  for accessing a TPM chip.  It can be transported over various buses,
+  one of them being I²C.  The standard is named:
+  TCG PC Client Specific TPM Interface Specification (TIS)
+  https://trustedcomputinggroup.org/resource/pc-client-work-group-pc-client-specific-tpm-interface-specification-tis/
+
+  The I²C interface was not originally part of the standard, but added
+  in 2017 with a separate document:
+  TCG PC Client Platform TPM Profile Specification for TPM 2.0 (PTP)
+  https://trustedcomputinggroup.org/resource/pc-client-platform-tpm-profile-ptp-specification/
+
+  Recent TPM 2.0 chips conform to this generic interface, others use a
+  vendor-specific I²C interface.
+
+properties:
+  compatible:
+    oneOf:
+      - description: Generic TPM 2.0 chips conforming to TCG PTP interface
+        items:
+          - enum:
+              - infineon,slb9673
+              - nuvoton,npct75x
+          - const: tcg,tpm-tis-i2c
+
+      - description: TPM 1.2 and 2.0 chips with vendor-specific I²C interface
+        items:
+          - enum:
+              - atmel,at97sc3204t # TPM 1.2
+              - infineon,slb9635tt # TPM 1.2 (maximum 100 kHz)
+              - infineon,slb9645tt # TPM 1.2 (maximum 400 kHz)
+              - infineon,tpm_i2c_infineon # TPM 1.2
+              - nuvoton,npct501 # TPM 1.2
+              - nuvoton,npct601 # TPM 2.0
+              - st,st33zp24-i2c # TPM 2.0
+              - winbond,wpct301 # TPM 1.2
+
+  reg:
+    description: address of TPM on the I²C bus
+
+allOf:
+  - $ref: tpm-common.yaml#
+
+required:
+  - compatible
+  - reg
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    i2c {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        tpm@57 {
+            label = "tpm";
+            compatible = "nuvoton,npct601";
+            reg = <0x57>;
+            linux,sml-base = <0x7f 0xfd450000>;
+            linux,sml-size = <0x10000>;
+        };
+    };
+
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+    #include <dt-bindings/interrupt-controller/irq.h>
+    i2c {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        tpm@13 {
+            reg = <0x13>;
+            compatible = "st,st33zp24-i2c";
+            interrupt-parent = <&gpio5>;
+            interrupts = <7 IRQ_TYPE_LEVEL_HIGH>;
+            lpcpd-gpios = <&gpio5 15 GPIO_ACTIVE_HIGH>;
+        };
+    };
diff --git a/Documentation/devicetree/bindings/tpm/tcg,tpm-tis-mmio.yaml b/Documentation/devicetree/bindings/tpm/tcg,tpm-tis-mmio.yaml
new file mode 100644 (file)
index 0000000..87bce06
--- /dev/null
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/tpm/tcg,tpm-tis-mmio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MMIO-accessed Trusted Platform Module conforming to TCG TIS specification
+
+maintainers:
+  - Lukas Wunner <lukas@wunner.de>
+
+description: |
+  The Trusted Computing Group (TCG) has defined a multi-vendor standard
+  for accessing a TPM chip.  It can be transported over various buses,
+  one of them being LPC (via MMIO).  The standard is named:
+  TCG PC Client Specific TPM Interface Specification (TIS)
+  https://trustedcomputinggroup.org/resource/pc-client-work-group-pc-client-specific-tpm-interface-specification-tis/
+
+properties:
+  compatible:
+    items:
+      - enum:
+          - at97sc3201
+          - atmel,at97sc3204
+          - socionext,synquacer-tpm-mmio
+      - const: tcg,tpm-tis-mmio
+
+  reg:
+    description:
+      location and length of the MMIO registers, length should be
+      at least 0x5000 bytes
+
+allOf:
+  - $ref: tpm-common.yaml#
+
+required:
+  - compatible
+  - reg
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    tpm@90000 {
+        compatible = "atmel,at97sc3204", "tcg,tpm-tis-mmio";
+        reg = <0x90000 0x5000>;
+        interrupt-parent = <&EIC0>;
+        interrupts = <1 2>;
+    };
diff --git a/Documentation/devicetree/bindings/tpm/tcg,tpm_tis-spi.yaml b/Documentation/devicetree/bindings/tpm/tcg,tpm_tis-spi.yaml
new file mode 100644 (file)
index 0000000..c3413b4
--- /dev/null
@@ -0,0 +1,75 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/tpm/tcg,tpm_tis-spi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: SPI-attached Trusted Platform Module conforming to TCG TIS specification
+
+maintainers:
+  - Lukas Wunner <lukas@wunner.de>
+
+description: |
+  The Trusted Computing Group (TCG) has defined a multi-vendor standard
+  for accessing a TPM chip.  It can be transported over various buses,
+  one of them being SPI.  The standard is named:
+  TCG PC Client Specific TPM Interface Specification (TIS)
+  https://trustedcomputinggroup.org/resource/pc-client-work-group-pc-client-specific-tpm-interface-specification-tis/
+
+properties:
+  compatible:
+    items:
+      - enum:
+          - infineon,slb9670
+          - st,st33htpm-spi
+          - st,st33zp24-spi
+      - const: tcg,tpm_tis-spi
+
+allOf:
+  - $ref: tpm-common.yaml#
+  - $ref: /schemas/spi/spi-peripheral-props.yaml#
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: st,st33zp24-spi
+    then:
+      properties:
+        spi-max-frequency:
+          maximum: 10000000
+
+required:
+  - compatible
+  - reg
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    spi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        tpm@0 {
+            reg = <0>;
+            compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
+            spi-max-frequency = <10000000>;
+        };
+    };
+
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+    #include <dt-bindings/interrupt-controller/irq.h>
+    spi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        tpm@0 {
+            reg = <0>;
+            compatible = "st,st33zp24-spi", "tcg,tpm_tis-spi";
+            spi-max-frequency = <10000000>;
+            interrupt-parent = <&gpio5>;
+            interrupts = <7 IRQ_TYPE_LEVEL_HIGH>;
+            lpcpd-gpios = <&gpio5 15 GPIO_ACTIVE_HIGH>;
+        };
+    };
diff --git a/Documentation/devicetree/bindings/tpm/tpm-common.yaml b/Documentation/devicetree/bindings/tpm/tpm-common.yaml
new file mode 100644 (file)
index 0000000..9039062
--- /dev/null
@@ -0,0 +1,87 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/tpm/tpm-common.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Trusted Platform Module common properties
+
+maintainers:
+  - Lukas Wunner <lukas@wunner.de>
+
+properties:
+  $nodename:
+    pattern: '^tpm(@[0-9a-f]+)?$'
+
+  interrupts:
+    description: indicates command completion
+    maxItems: 1
+
+  label:
+    description: human readable string describing the device, e.g. "tpm"
+
+  linux,sml-base:
+    description:
+      base address of reserved memory allocated for firmware event log
+    $ref: /schemas/types.yaml#/definitions/uint64
+
+  linux,sml-size:
+    description:
+      size of reserved memory allocated for firmware event log
+    $ref: /schemas/types.yaml#/definitions/uint32
+
+  memory-region:
+    description: reserved memory allocated for firmware event log
+    maxItems: 1
+
+  powered-while-suspended:
+    description:
+      present when the TPM is left powered on between suspend and resume
+      (makes the suspend/resume callbacks do nothing)
+    type: boolean
+
+  resets:
+    description: Reset controller to reset the TPM
+    $ref: /schemas/types.yaml#/definitions/phandle
+
+  reset-gpios:
+    description: Output GPIO pin to reset the TPM
+    maxItems: 1
+
+# must always have both linux,sml-base and linux,sml-size
+dependentRequired:
+  linux,sml-base: ['linux,sml-size']
+  linux,sml-size: ['linux,sml-base']
+
+# must only have either memory-region or linux,sml-base
+# as well as either resets or reset-gpios
+dependentSchemas:
+  memory-region:
+    properties:
+      linux,sml-base: false
+  linux,sml-base:
+    properties:
+      memory-region: false
+  resets:
+    properties:
+      reset-gpios: false
+  reset-gpios:
+    properties:
+      resets: false
+
+allOf:
+  - if:
+      properties:
+        compatible:
+          contains:
+            pattern: '^st,st33zp24'
+    then:
+      properties:
+        lpcpd-gpios:
+          description:
+            Output GPIO pin used for ST33ZP24 power management of D1/D2 state.
+            If set, power must be present when the platform is going into
+            sleep/hibernate mode.
+          maxItems: 1
+
+additionalProperties: true
index a5fb2fa22026b85f8e0d90ef25c6d3055e6927ba..79dcd92c4a43452e4e92f943add7638b45a45f21 100644 (file)
@@ -49,8 +49,6 @@ properties:
           - ams,iaq-core
             # i2c serial eeprom (24cxx)
           - at,24c08
-            # i2c trusted platform module (TPM)
-          - atmel,at97sc3204t
             # ATSHA204 - i2c h/w symmetric crypto module
           - atmel,atsha204
             # ATSHA204A - i2c h/w symmetric crypto module
@@ -151,12 +149,6 @@ properties:
           - infineon,ir38263
             # Infineon IRPS5401 Voltage Regulator (PMIC)
           - infineon,irps5401
-            # Infineon SLB9635 (Soft-) I2C TPM (old protocol, max 100khz)
-          - infineon,slb9635tt
-            # Infineon SLB9645 I2C TPM (new protocol, max 400khz)
-          - infineon,slb9645tt
-            # Infineon SLB9673 I2C TPM 2.0
-          - infineon,slb9673
             # Infineon TLV493D-A1B6 I2C 3D Magnetic Sensor
           - infineon,tlv493d-a1b6
             # Infineon Multi-phase Digital VR Controller xdpe11280
@@ -185,6 +177,8 @@ properties:
           - isil,isl29030
             # Intersil ISL68137 Digital Output Configurable PWM Controller
           - isil,isl68137
+            # Intersil ISL76682 Ambient Light Sensor
+          - isil,isl76682
             # Linear Technology LTC2488
           - lineartechnology,ltc2488
             # 5 Bit Programmable, Pulse-Width Modulator
@@ -307,10 +301,6 @@ properties:
           - national,lm85
             # I2C ±0.33°C Accurate, 12-Bit + Sign Temperature Sensor and Thermal Window Comparator
           - national,lm92
-            # i2c trusted platform module (TPM)
-          - nuvoton,npct501
-            # i2c trusted platform module (TPM2)
-          - nuvoton,npct601
             # Nuvoton Temperature Sensor
           - nuvoton,w83773g
             # OKI ML86V7667 video decoder
@@ -355,8 +345,6 @@ properties:
           - silabs,si7020
             # Skyworks SKY81452: Six-Channel White LED Driver with Touch Panel Bias Supply
           - skyworks,sky81452
-            # Socionext SynQuacer TPM MMIO module
-          - socionext,synquacer-tpm-mmio
             # SparkFun Qwiic Joystick (COM-15168) with i2c interface
           - sparkfun,qwiic-joystick
             # i2c serial eeprom (24cxx)
@@ -411,8 +399,6 @@ properties:
           - winbond,w83793
             # Vicor Corporation Digital Supervisor
           - vicor,pli1209bc
-            # i2c trusted platform module (TPM)
-          - winbond,wpct301
 
 required:
   - compatible
index 594ebb3ee432037f98adaa3e7cfcdaa4fae81ff2..6ceafa4af29221efd6edad65befa6449658f8e71 100644 (file)
@@ -9,9 +9,6 @@ title: USB xHCI Controller
 maintainers:
   - Mathias Nyman <mathias.nyman@intel.com>
 
-allOf:
-  - $ref: usb-xhci.yaml#
-
 properties:
   compatible:
     oneOf:
@@ -25,6 +22,11 @@ properties:
               - marvell,armada-380-xhci
               - marvell,armada-8k-xhci
           - const: generic-xhci
+      - description: Broadcom SoCs with power domains
+        items:
+          - enum:
+              - brcm,bcm2711-xhci
+          - const: brcm,xhci-brcm-v2
       - description: Broadcom STB SoCs with xHCI
         enum:
           - brcm,xhci-brcm-v2
@@ -49,6 +51,9 @@ properties:
       - const: core
       - const: reg
 
+  power-domains:
+    maxItems: 1
+
 unevaluatedProperties: false
 
 required:
@@ -56,6 +61,20 @@ required:
   - reg
   - interrupts
 
+allOf:
+  - $ref: usb-xhci.yaml#
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: brcm,bcm2711-xhci
+    then:
+      required:
+        - power-domains
+    else:
+      properties:
+        power-domains: false
+
 examples:
   - |
     usb@f0931000 {
index ee08b9c3721f8998439258a10ef944ad11a8fccc..37cf5249e526bb8ebce78d12ba228b3f460e218c 100644 (file)
@@ -29,6 +29,11 @@ properties:
     description:
       the regulator that provides 3.3V core power to the hub.
 
+  peer-hub:
+    $ref: /schemas/types.yaml#/definitions/phandle
+    description:
+      phandle to the peer hub on the controller.
+
 required:
   - compatible
   - reg
index e9644e333d78135d83c3bc88cf903960140315f5..924fd3d748a8817517c01172680728ab7b0d87d4 100644 (file)
@@ -124,6 +124,17 @@ properties:
       defined in the xHCI spec on MTK's controller.
     default: 5000
 
+  rx-fifo-depth:
+    $ref: /schemas/types.yaml#/definitions/uint32
+    description:
+      It is a quirk used to work around Gen1 isoc-in endpoint transfer issue
+      that still send out unexpected ACK after device finishes the burst
+      transfer with a short packet and cause an exception, specially on a 4K
+      camera device, it happens on controller before about IPM v1.6.0;
+      the side-effect is that it may cause performance drop about 10%,
+      including bulk transfer, prefer to use 3k here. The size is in bytes.
+    enum: [1024, 2048, 3072, 4096]
+
   # the following properties are only used for case 1
   wakeup-source:
     description: enable USB remote wakeup, see power/wakeup-source.txt
index 28eb25ecba74ee1633c24ad726d1834ba60a2d9b..eaedb4cc6b6cceae8af44c7507086c4b36fc402c 100644 (file)
@@ -4,7 +4,7 @@
 $id: http://devicetree.org/schemas/usb/nxp,ptn5110.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
-title: NXP PTN5110 Typec Port Cotroller
+title: NXP PTN5110 Type-C Port Controller
 
 maintainers:
   - Li Jun <jun.li@nxp.com>
index 915c8205623b3abf98a43c9ab851e2bdb32c36f7..63d150b216c52873248ac478d840a713135dd576 100644 (file)
@@ -46,6 +46,8 @@ properties:
           - qcom,sm8350-dwc3
           - qcom,sm8450-dwc3
           - qcom,sm8550-dwc3
+          - qcom,sm8650-dwc3
+          - qcom,x1e80100-dwc3
       - const: qcom,dwc3
 
   reg:
@@ -97,12 +99,29 @@ properties:
       - const: apps-usb
 
   interrupts:
-    minItems: 1
-    maxItems: 4
+    description: |
+      Different types of interrupts are used based on HS PHY used on target:
+        - pwr_event: Used for wakeup based on other power events.
+        - hs_phY_irq: Apart from DP/DM/QUSB2 PHY interrupts, there is
+                       hs_phy_irq which is not triggered by default and its
+                       functionality is mutually exclusive to that of
+                       {dp/dm}_hs_phy_irq and qusb2_phy_irq.
+        - qusb2_phy: SoCs with QUSB2 PHY do not have separate DP/DM IRQs and
+                      expose only a single IRQ whose behavior can be modified
+                      by the QUSB2PHY_INTR_CTRL register. The required DPSE/
+                      DMSE configuration is done in QUSB2PHY_INTR_CTRL register
+                      of PHY address space.
+        - {dp/dm}_hs_phy_irq: These IRQ's directly reflect changes on the DP/
+                               DM pads of the SoC. These are used for wakeup
+                               only on SoCs with non-QUSB2 targets with
+                               exception of SDM670/SDM845/SM6350.
+        - ss_phy_irq: Used for remote wakeup in Super Speed mode of operation.
+    minItems: 2
+    maxItems: 5
 
   interrupt-names:
-    minItems: 1
-    maxItems: 4
+    minItems: 2
+    maxItems: 5
 
   qcom,select-utmi-as-pipe-clk:
     description:
@@ -263,6 +282,7 @@ allOf:
           contains:
             enum:
               - qcom,sc8280xp-dwc3
+              - qcom,x1e80100-dwc3
     then:
       properties:
         clocks:
@@ -288,8 +308,8 @@ allOf:
     then:
       properties:
         clocks:
-          minItems: 5
-          maxItems: 6
+          minItems: 4
+          maxItems: 5
         clock-names:
           oneOf:
             - items:
@@ -298,13 +318,11 @@ allOf:
                 - const: iface
                 - const: sleep
                 - const: mock_utmi
-                - const: bus
             - items:
                 - const: cfg_noc
                 - const: core
                 - const: sleep
                 - const: mock_utmi
-                - const: bus
 
   - if:
       properties:
@@ -318,6 +336,7 @@ allOf:
               - qcom,sm8250-dwc3
               - qcom,sm8450-dwc3
               - qcom,sm8550-dwc3
+              - qcom,sm8650-dwc3
     then:
       properties:
         clocks:
@@ -357,59 +376,20 @@ allOf:
         compatible:
           contains:
             enum:
-              - qcom,ipq4019-dwc3
+              - qcom,ipq5018-dwc3
               - qcom,ipq6018-dwc3
-              - qcom,ipq8064-dwc3
               - qcom,ipq8074-dwc3
-              - qcom,msm8994-dwc3
-              - qcom,qcs404-dwc3
-              - qcom,sc7180-dwc3
-              - qcom,sdm670-dwc3
-              - qcom,sdm845-dwc3
-              - qcom,sdx55-dwc3
-              - qcom,sdx65-dwc3
-              - qcom,sdx75-dwc3
-              - qcom,sm4250-dwc3
-              - qcom,sm6125-dwc3
-              - qcom,sm6350-dwc3
-              - qcom,sm8150-dwc3
-              - qcom,sm8250-dwc3
-              - qcom,sm8350-dwc3
-              - qcom,sm8450-dwc3
-              - qcom,sm8550-dwc3
-    then:
-      properties:
-        interrupts:
-          items:
-            - description: The interrupt that is asserted
-                when a wakeup event is received on USB2 bus.
-            - description: The interrupt that is asserted
-                when a wakeup event is received on USB3 bus.
-            - description: Wakeup event on DM line.
-            - description: Wakeup event on DP line.
-        interrupt-names:
-          items:
-            - const: hs_phy_irq
-            - const: ss_phy_irq
-            - const: dm_hs_phy_irq
-            - const: dp_hs_phy_irq
-
-  - if:
-      properties:
-        compatible:
-          contains:
-            enum:
               - qcom,msm8953-dwc3
-              - qcom,msm8996-dwc3
               - qcom,msm8998-dwc3
-              - qcom,sm6115-dwc3
     then:
       properties:
         interrupts:
-          maxItems: 2
+          minItems: 2
+          maxItems: 3
         interrupt-names:
           items:
-            - const: hs_phy_irq
+            - const: pwr_event
+            - const: qusb2_phy
             - const: ss_phy_irq
 
   - if:
@@ -417,37 +397,21 @@ allOf:
         compatible:
           contains:
             enum:
-              - qcom,ipq5018-dwc3
-              - qcom,ipq5332-dwc3
+              - qcom,msm8996-dwc3
+              - qcom,qcs404-dwc3
               - qcom,sdm660-dwc3
-    then:
-      properties:
-        interrupts:
-          minItems: 1
-          maxItems: 2
-        interrupt-names:
-          minItems: 1
-          items:
-            - const: hs_phy_irq
-            - const: ss_phy_irq
-
-  - if:
-      properties:
-        compatible:
-          contains:
-            enum:
-              - qcom,sc7280-dwc3
+              - qcom,sm6115-dwc3
+              - qcom,sm6125-dwc3
     then:
       properties:
         interrupts:
           minItems: 3
           maxItems: 4
         interrupt-names:
-          minItems: 3
           items:
+            - const: pwr_event
+            - const: qusb2_phy
             - const: hs_phy_irq
-            - const: dp_hs_phy_irq
-            - const: dm_hs_phy_irq
             - const: ss_phy_irq
 
   - if:
@@ -455,7 +419,8 @@ allOf:
         compatible:
           contains:
             enum:
-              - qcom,sc8280xp-dwc3
+              - qcom,ipq5332-dwc3
+              - qcom,x1e80100-dwc3
     then:
       properties:
         interrupts:
@@ -472,16 +437,35 @@ allOf:
         compatible:
           contains:
             enum:
+              - qcom,ipq4019-dwc3
+              - qcom,ipq8064-dwc3
+              - qcom,msm8994-dwc3
               - qcom,sa8775p-dwc3
+              - qcom,sc7180-dwc3
+              - qcom,sc7280-dwc3
+              - qcom,sc8280xp-dwc3
+              - qcom,sdm670-dwc3
+              - qcom,sdm845-dwc3
+              - qcom,sdx55-dwc3
+              - qcom,sdx65-dwc3
+              - qcom,sdx75-dwc3
+              - qcom,sm4250-dwc3
+              - qcom,sm6350-dwc3
+              - qcom,sm8150-dwc3
+              - qcom,sm8250-dwc3
+              - qcom,sm8350-dwc3
+              - qcom,sm8450-dwc3
+              - qcom,sm8550-dwc3
+              - qcom,sm8650-dwc3
     then:
       properties:
         interrupts:
-          minItems: 3
-          maxItems: 4
+          minItems: 4
+          maxItems: 5
         interrupt-names:
-          minItems: 3
           items:
             - const: pwr_event
+            - const: hs_phy_irq
             - const: dp_hs_phy_irq
             - const: dm_hs_phy_irq
             - const: ss_phy_irq
@@ -519,12 +503,13 @@ examples:
                           <&gcc GCC_USB30_PRIM_MASTER_CLK>;
             assigned-clock-rates = <19200000>, <150000000>;
 
-            interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
-                         <GIC_SPI 486 IRQ_TYPE_LEVEL_HIGH>,
+            interrupts = <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
+                         <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+                         <GIC_SPI 489 IRQ_TYPE_EDGE_BOTH>,
                          <GIC_SPI 488 IRQ_TYPE_EDGE_BOTH>,
-                         <GIC_SPI 489 IRQ_TYPE_EDGE_BOTH>;
-            interrupt-names = "hs_phy_irq", "ss_phy_irq",
-                          "dm_hs_phy_irq", "dp_hs_phy_irq";
+                         <GIC_SPI 486 IRQ_TYPE_LEVEL_HIGH>;
+            interrupt-names = "pwr_event", "hs_phy_irq",
+                          "dp_hs_phy_irq", "dm_hs_phy_irq", "ss_phy_irq";
 
             power-domains = <&gcc USB30_PRIM_GDSC>;
 
diff --git a/Documentation/devicetree/bindings/usb/qcom,wcd939x-usbss.yaml b/Documentation/devicetree/bindings/usb/qcom,wcd939x-usbss.yaml
new file mode 100644 (file)
index 0000000..7ddfd33
--- /dev/null
@@ -0,0 +1,102 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/qcom,wcd939x-usbss.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm WCD9380/WCD9385 USB SubSystem Altmode/Analog Audio Switch
+
+maintainers:
+  - Neil Armstrong <neil.armstrong@linaro.org>
+
+description:
+  Qualcomm WCD9390/WCD9395 is a standalone Hi-Fi audio codec IC with a
+  functionally separate USB SubSystem for Altmode/Analog Audio Switch
+  accessible over an I2C interface.
+  The Audio Headphone and Microphone data path between the Codec and the
+  USB-C Mux subsystems are external to the IC, thus requiring DT port-endpoint
+  graph description to handle USB-C altmode & orientation switching for Audio
+  Accessory Mode.
+
+properties:
+  compatible:
+    oneOf:
+      - const: qcom,wcd9390-usbss
+      - items:
+          - const: qcom,wcd9395-usbss
+          - const: qcom,wcd9390-usbss
+
+  reg:
+    maxItems: 1
+
+  reset-gpios:
+    maxItems: 1
+
+  vdd-supply:
+    description: USBSS VDD power supply
+
+  mode-switch:
+    description: Flag the port as possible handle of altmode switching
+    type: boolean
+
+  orientation-switch:
+    description: Flag the port as possible handler of orientation switching
+    type: boolean
+
+  ports:
+    $ref: /schemas/graph.yaml#/properties/ports
+    properties:
+      port@0:
+        $ref: /schemas/graph.yaml#/properties/port
+        description:
+          A port node to link the WCD939x USB SubSystem to a TypeC controller for the
+          purpose of handling altmode muxing and orientation switching.
+
+      port@1:
+        $ref: /schemas/graph.yaml#/properties/port
+        description:
+          A port node to link the WCD939x USB SubSystem to the Codec SubSystem for the
+          purpose of handling USB-C Audio Accessory Mode muxing and orientation switching.
+
+required:
+  - compatible
+  - reg
+  - ports
+
+additionalProperties: false
+
+examples:
+  - |
+    i2c {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        typec-mux@42 {
+            compatible = "qcom,wcd9390-usbss";
+            reg = <0x42>;
+
+            vdd-supply = <&vreg_bob>;
+
+            mode-switch;
+            orientation-switch;
+
+            ports {
+                #address-cells = <1>;
+                #size-cells = <0>;
+
+                port@0 {
+                    reg = <0>;
+                    wcd9390_usbss_sbu: endpoint {
+                        remote-endpoint = <&typec_sbu>;
+                    };
+                };
+                port@1 {
+                    reg = <1>;
+                    wcd9390_usbss_codec: endpoint {
+                        remote-endpoint = <&wcd9390_codec_usbss>;
+                    };
+                };
+            };
+        };
+    };
+...
index bad55dfb2fa036f4a9a08eba9aedae0eb4e7fea1..40ada78f2328895ac2378fab9a09c818e1034dfd 100644 (file)
@@ -19,7 +19,7 @@ properties:
       - items:
           - enum:
               - renesas,usbhs-r7s9210   # RZ/A2
-              - renesas,usbhs-r9a07g043 # RZ/G2UL
+              - renesas,usbhs-r9a07g043 # RZ/G2UL and RZ/Five
               - renesas,usbhs-r9a07g044 # RZ/G2{L,LC}
               - renesas,usbhs-r9a07g054 # RZ/V2L
           - const: renesas,rza2-usbhs
index ee5af4b381b1bc97b11954a2141485e1f06671e1..203a1eb66691f6f3ea69686e397bc5a986a830ba 100644 (file)
@@ -432,6 +432,10 @@ properties:
     items:
       enum: [1, 4, 8, 16, 32, 64, 128, 256]
 
+  num-hc-interrupters:
+    maximum: 8
+    default: 1
+
   port:
     $ref: /schemas/graph.yaml#/properties/port
     description:
index 323d664ae06a4de232eb21e2ee2ea8e814d69d17..1745e28b31105252e5b4b79dfe142f370c5d7294 100644 (file)
@@ -38,6 +38,10 @@ properties:
       - const: main
       - const: patch-address
 
+  reset-gpios:
+    description: GPIO used for the HRESET pin.
+    maxItems: 1
+
   wakeup-source: true
 
   interrupts:
@@ -90,6 +94,7 @@ additionalProperties: false
 
 examples:
   - |
+    #include <dt-bindings/gpio/gpio.h>
     #include <dt-bindings/interrupt-controller/irq.h>
     i2c {
         #address-cells = <1>;
@@ -106,6 +111,7 @@ examples:
 
             pinctrl-names = "default";
             pinctrl-0 = <&typec_pins>;
+            reset-gpios = <&gpio1 6 GPIO_ACTIVE_HIGH>;
 
             typec_con: connector {
                 compatible = "usb-c-connector";
index 180a261c3e8f376311ffcfca624ec14332f807ae..4238ae896ef6f5b88e4000fd7570764bb85ed7a2 100644 (file)
@@ -29,6 +29,12 @@ properties:
     description: Interrupt moderation interval
     default: 5000
 
+  num-hc-interrupters:
+    description: Maximum number of interrupters to allocate
+    $ref: /schemas/types.yaml#/definitions/uint16
+    minimum: 1
+    maximum: 1024
+
 additionalProperties: true
 
 examples:
index 871f38fef3c432daaad53d092113d27089fdcfcd..1a0dc04f1db47865766c3c71981cbdbd64c8343e 100644 (file)
@@ -121,6 +121,8 @@ patternProperties:
     description: Andes Technology Corporation
   "^anvo,.*":
     description: Anvo-Systems Dresden GmbH
+  "^aosong,.*":
+    description: Guangzhou Aosong Electronic Co., Ltd.
   "^apm,.*":
     description: Applied Micro Circuits Corporation (APM)
   "^apple,.*":
@@ -1295,6 +1297,8 @@ patternProperties:
     description: Skyworks Solutions, Inc.
   "^smartlabs,.*":
     description: SmartLabs LLC
+  "^smi,.*":
+    description: Silicon Motion Technology Corporation
   "^smsc,.*":
     description: Standard Microsystems Corporation
   "^snps,.*":
diff --git a/Documentation/devicetree/bindings/w1/amd,axi-1wire-host.yaml b/Documentation/devicetree/bindings/w1/amd,axi-1wire-host.yaml
new file mode 100644 (file)
index 0000000..ef70fa2
--- /dev/null
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/w1/amd,axi-1wire-host.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: AMD AXI 1-wire bus host for programmable logic
+
+maintainers:
+  - Kris Chaplin <kris.chaplin@amd.com>
+
+properties:
+  compatible:
+    const: amd,axi-1wire-host
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - interrupts
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+    onewire@a0000000 {
+        compatible = "amd,axi-1wire-host";
+        reg = <0xa0000000 0x10000>;
+        clocks = <&zynqmp_clk 0x47>;
+        interrupts = <GIC_SPI 0x59 IRQ_TYPE_LEVEL_HIGH>;
+    };
+
+...
index 44deb52beeb4766ae736c9a2645ce2be09857cc1..d0b241628cf13d4002735de4b9b5141cf90e0bde 100644 (file)
@@ -83,19 +83,9 @@ this to include other types of resources like doorbells.
 Client Drivers
 --------------
 
-A client driver typically only has to conditionally change its DMA map
-routine to use the mapping function :c:func:`pci_p2pdma_map_sg()` instead
-of the usual :c:func:`dma_map_sg()` function. Memory mapped in this
-way does not need to be unmapped.
-
-The client may also, optionally, make use of
-:c:func:`is_pci_p2pdma_page()` to determine when to use the P2P mapping
-functions and when to use the regular mapping functions. In some
-situations, it may be more appropriate to use a flag to indicate a
-given request is P2P memory and map appropriately. It is important to
-ensure that struct pages that back P2P memory stay out of code that
-does not have support for them as other code may treat the pages as
-regular memory which may not be appropriate.
+A client driver only has to use the mapping API :c:func:`dma_map_sg()`
+and :c:func:`dma_unmap_sg()` functions as usual, and the implementation
+will do the right thing for the P2P capable memory.
 
 
 Orchestrator Drivers
index bb264490a87a16a981095ac4b88de93662bcf134..3c28ccc4b61133bffe1c04f937ff63b25d7c0750 100644 (file)
@@ -41,11 +41,20 @@ the getter, devm_pwm_get() and devm_fwnode_pwm_get(), also exist.
 
 After being requested, a PWM has to be configured using::
 
-       int pwm_apply_state(struct pwm_device *pwm, struct pwm_state *state);
+       int pwm_apply_might_sleep(struct pwm_device *pwm, struct pwm_state *state);
 
 This API controls both the PWM period/duty_cycle config and the
 enable/disable state.
 
+PWM devices can be used from atomic context, if the PWM does not sleep. You
+can check if this the case with::
+
+        bool pwm_might_sleep(struct pwm_device *pwm);
+
+If false, the PWM can also be configured from atomic context with::
+
+       int pwm_apply_atomic(struct pwm_device *pwm, struct pwm_state *state);
+
 As a consumer, don't rely on the output's state for a disabled PWM. If it's
 easily possible, drivers are supposed to emit the inactive state, but some
 drivers cannot. If you rely on getting the inactive state, use .duty_cycle=0,
@@ -57,13 +66,13 @@ If supported by the driver, the signal can be optimized, for example to improve
 EMI by phase shifting the individual channels of a chip.
 
 The pwm_config(), pwm_enable() and pwm_disable() functions are just wrappers
-around pwm_apply_state() and should not be used if the user wants to change
+around pwm_apply_might_sleep() and should not be used if the user wants to change
 several parameter at once. For example, if you see pwm_config() and
 pwm_{enable,disable}() calls in the same function, this probably means you
-should switch to pwm_apply_state().
+should switch to pwm_apply_might_sleep().
 
 The PWM user API also allows one to query the PWM state that was passed to the
-last invocation of pwm_apply_state() using pwm_get_state(). Note this is
+last invocation of pwm_apply_might_sleep() using pwm_get_state(). Note this is
 different to what the driver has actually implemented if the request cannot be
 satisfied exactly with the hardware in use. There is currently no way for
 consumers to get the actually implemented settings.
index 8fd22073a847e9d1bbed3ccc5b9b59a5e71db300..d222bd3ee7495b86f711056aef5ccf5a180c5e20 100644 (file)
@@ -20,7 +20,7 @@
     |    openrisc: |  ..  |
     |      parisc: | TODO |
     |     powerpc: | TODO |
-    |       riscv: | TODO |
+    |       riscv: |  ok  |
     |        s390: | TODO |
     |          sh: | TODO |
     |       sparc: | TODO |
index 48b95d04f72d5a25df0de37ac87a52d8f7471998..4cc657d743f7f3b24b9a3a15efd9608ee5775554 100644 (file)
@@ -295,7 +295,6 @@ through which it can issue requests and negotiate::
        struct netfs_request_ops {
                void (*init_request)(struct netfs_io_request *rreq, struct file *file);
                void (*free_request)(struct netfs_io_request *rreq);
-               int (*begin_cache_operation)(struct netfs_io_request *rreq);
                void (*expand_readahead)(struct netfs_io_request *rreq);
                bool (*clamp_length)(struct netfs_io_subrequest *subreq);
                void (*issue_read)(struct netfs_io_subrequest *subreq);
@@ -317,20 +316,6 @@ The operations are as follows:
    [Optional] This is called as the request is being deallocated so that the
    filesystem can clean up any state it has attached there.
 
- * ``begin_cache_operation()``
-
-   [Optional] This is called to ask the network filesystem to call into the
-   cache (if present) to initialise the caching state for this read.  The netfs
-   library module cannot access the cache directly, so the cache should call
-   something like fscache_begin_read_operation() to do this.
-
-   The cache gets to store its state in ->cache_resources and must set a table
-   of operations of its own there (though of a different type).
-
-   This should return 0 on success and an error code otherwise.  If an error is
-   reported, the operation may proceed anyway, just without local caching (only
-   out of memory and interruption errors cause failure here).
-
  * ``expand_readahead()``
 
    [Optional] This is called to allow the filesystem to expand the size of a
@@ -460,14 +445,14 @@ When implementing a local cache to be used by the read helpers, two things are
 required: some way for the network filesystem to initialise the caching for a
 read request and a table of operations for the helpers to call.
 
-The network filesystem's ->begin_cache_operation() method is called to set up a
-cache and this must call into the cache to do the work.  If using fscache, for
-example, the cache would call::
+To begin a cache operation on an fscache object, the following function is
+called::
 
        int fscache_begin_read_operation(struct netfs_io_request *rreq,
                                         struct fscache_cookie *cookie);
 
-passing in the request pointer and the cookie corresponding to the file.
+passing in the request pointer and the cookie corresponding to the file.  This
+fills in the cache resources mentioned below.
 
 The netfs_io_request object contains a place for the cache to hang its
 state::
index 1c244866041a3cb985568ce5f19e67d947eed5e6..16551440144183c58517fe27e750233619ebf89d 100644 (file)
@@ -145,7 +145,9 @@ filesystem, an overlay filesystem needs to record in the upper filesystem
 that files have been removed.  This is done using whiteouts and opaque
 directories (non-directories are always opaque).
 
-A whiteout is created as a character device with 0/0 device number.
+A whiteout is created as a character device with 0/0 device number or
+as a zero-size regular file with the xattr "trusted.overlay.whiteout".
+
 When a whiteout is found in the upper level of a merged directory, any
 matching name in the lower level is ignored, and the whiteout itself
 is also hidden.
@@ -154,6 +156,13 @@ A directory is made opaque by setting the xattr "trusted.overlay.opaque"
 to "y".  Where the upper filesystem contains an opaque directory, any
 directory in the lower filesystem with the same name is ignored.
 
+An opaque directory should not conntain any whiteouts, because they do not
+serve any purpose.  A merge directory containing regular files with the xattr
+"trusted.overlay.whiteout", should be additionally marked by setting the xattr
+"trusted.overlay.opaque" to "x" on the merge directory itself.
+This is needed to avoid the overhead of checking the "trusted.overlay.whiteout"
+on all entries during readdir in the common case.
+
 readdir
 -------
 
@@ -534,8 +543,9 @@ A lower dir with a regular whiteout will always be handled by the overlayfs
 mount, so to support storing an effective whiteout file in an overlayfs mount an
 alternative form of whiteout is supported. This form is a regular, zero-size
 file with the "overlay.whiteout" xattr set, inside a directory with the
-"overlay.whiteouts" xattr set. Such whiteouts are never created by overlayfs,
-but can be used by userspace tools (like containers) that generate lower layers.
+"overlay.opaque" xattr set to "x" (see `whiteouts and opaque directories`_).
+These alternative whiteouts are never created by overlayfs, but can be used by
+userspace tools (like containers) that generate lower layers.
 These alternative whiteouts can be escaped using the standard xattr escape
 mechanism in order to properly nest to any depth.
 
index 7bed96d794fc2656d3bb9d69cbca3feef97ea6c8..6b30e43a0d11f49959d700e38fbfe115d71fd40c 100644 (file)
@@ -73,15 +73,14 @@ Auto Negotiation               Supported.
 Compound Request               Supported.
 Oplock Cache Mechanism         Supported.
 SMB2 leases(v1 lease)          Supported.
-Directory leases(v2 lease)     Planned for future.
+Directory leases(v2 lease)     Supported.
 Multi-credits                  Supported.
 NTLM/NTLMv2                    Supported.
 HMAC-SHA256 Signing            Supported.
 Secure negotiate               Supported.
 Signing Update                 Supported.
 Pre-authentication integrity   Supported.
-SMB3 encryption(CCM, GCM)      Supported. (CCM and GCM128 supported, GCM256 in
-                               progress)
+SMB3 encryption(CCM, GCM)      Supported. (CCM/GCM128 and CCM/GCM256 supported)
 SMB direct(RDMA)               Supported.
 SMB3 Multi-channel             Partially Supported. Planned to implement
                                replay/retry mechanisms for future.
@@ -112,6 +111,10 @@ DCE/RPC support                Partially Supported. a few calls(NetShareEnumAll,
                                for Witness protocol e.g.)
 ksmbd/nfsd interoperability    Planned for future. The features that ksmbd
                                support are Leases, Notify, ACLs and Share modes.
+SMB3.1.1 Compression           Planned for future.
+SMB3.1.1 over QUIC             Planned for future.
+Signing/Encryption over RDMA   Planned for future.
+SMB3.1.1 GMAC signing support  Planned for future.
 ============================== =================================================
 
 
index d414e145f912c2e334ef3e55718443acfde0716e..4202174a6262c56e15190424a59f2e2619de6d8b 100644 (file)
@@ -396,10 +396,11 @@ Memory barriers come in four basic varieties:
 
 
  (2) Address-dependency barriers (historical).
-     [!] This section is marked as HISTORICAL: For more up-to-date
-     information, including how compiler transformations related to pointer
-     comparisons can sometimes cause problems, see
-     Documentation/RCU/rcu_dereference.rst.
+     [!] This section is marked as HISTORICAL: it covers the long-obsolete
+     smp_read_barrier_depends() macro, the semantics of which are now
+     implicit in all marked accesses.  For more up-to-date information,
+     including how compiler transformations can sometimes break address
+     dependencies, see Documentation/RCU/rcu_dereference.rst.
 
      An address-dependency barrier is a weaker form of read barrier.  In the
      case where two loads are performed such that the second depends on the
@@ -560,9 +561,11 @@ There are certain things that the Linux kernel memory barriers do not guarantee:
 
 ADDRESS-DEPENDENCY BARRIERS (HISTORICAL)
 ----------------------------------------
-[!] This section is marked as HISTORICAL: For more up-to-date information,
-including how compiler transformations related to pointer comparisons can
-sometimes cause problems, see Documentation/RCU/rcu_dereference.rst.
+[!] This section is marked as HISTORICAL: it covers the long-obsolete
+smp_read_barrier_depends() macro, the semantics of which are now implicit
+in all marked accesses.  For more up-to-date information, including
+how compiler transformations can sometimes break address dependencies,
+see Documentation/RCU/rcu_dereference.rst.
 
 As of v4.15 of the Linux kernel, an smp_mb() was added to READ_ONCE() for
 DEC Alpha, which means that about the only people who need to pay attention
index 1ad01d52a8638dcf6ee8a1c6c3d58698abd0d8e4..8e4d19adee8cd17eae831db73692c9237b5e0ad1 100644 (file)
@@ -942,6 +942,10 @@ attribute-sets:
       -
         name: gro-ipv4-max-size
         type: u32
+      -
+        name: dpll-pin
+        type: nest
+        nested-attributes: link-dpll-pin-attrs
   -
     name: af-spec-attrs
     attributes:
@@ -1627,6 +1631,12 @@ attribute-sets:
       -
         name: used
         type: u8
+  -
+    name: link-dpll-pin-attrs
+    attributes:
+      -
+        name: id
+        type: u32
 
 sub-messages:
   -
index 1f0d81f44e14b25981dbb8c65c972fa9f20b55ce..c2046dec0c2f4065d81953e4164e706cb73d3d2c 100644 (file)
@@ -66,6 +66,10 @@ for aligning variables/macros, for reflowing text and other similar tasks.
 See the file :ref:`Documentation/process/clang-format.rst <clangformat>`
 for more details.
 
+Some basic editor settings, such as indentation and line endings, will be
+set automatically if you are using an editor that is compatible with
+EditorConfig. See the official EditorConfig website for more information:
+https://editorconfig.org/
 
 Abstraction layers
 ******************
index 6db37a46d3059ee8e3fe6c3ee80711b6bff26e0d..c48382c6b47746f57a090d7af838916a419ff481 100644 (file)
@@ -735,6 +735,10 @@ for aligning variables/macros, for reflowing text and other similar tasks.
 See the file :ref:`Documentation/process/clang-format.rst <clangformat>`
 for more details.
 
+Some basic editor settings, such as indentation and line endings, will be
+set automatically if you are using an editor that is compatible with
+EditorConfig. See the official EditorConfig website for more information:
+https://editorconfig.org/
 
 10) Kconfig configuration files
 -------------------------------
index b91e9ef4d0c21e45a4beb27eb8ac9f32a4d6669a..73203ba1e9011e3a5eb5d58f598f00f34b4ece80 100644 (file)
@@ -12,10 +12,11 @@ which uses ``libclang``.
 Below is a general summary of architectures that currently work. Level of
 support corresponds to ``S`` values in the ``MAINTAINERS`` file.
 
-============  ================  ==============================================
-Architecture  Level of support  Constraints
-============  ================  ==============================================
-``um``        Maintained        ``x86_64`` only.
-``x86``       Maintained        ``x86_64`` only.
-============  ================  ==============================================
+=============  ================  ==============================================
+Architecture   Level of support  Constraints
+=============  ================  ==============================================
+``loongarch``  Maintained        -
+``um``         Maintained        ``x86_64`` only.
+``x86``        Maintained        ``x86_64`` only.
+=============  ================  ==============================================
 
index b5fa2f0542a5d92e02971e4e7c4791442adf2955..03ace5f01b5c021e12adba23b83b8cb074c949ba 100644 (file)
@@ -37,8 +37,6 @@ import re
 import subprocess
 import sys
 
-from os import path
-
 from docutils import nodes, statemachine
 from docutils.statemachine import ViewList
 from docutils.parsers.rst import directives, Directive
@@ -76,33 +74,26 @@ class KernelFeat(Directive):
         self.state.document.settings.env.app.warn(message, prefix="")
 
     def run(self):
-
         doc = self.state.document
         if not doc.settings.file_insertion_enabled:
             raise self.warning("docutils: file insertion disabled")
 
         env = doc.settings.env
-        cwd = path.dirname(doc.current_source)
-        cmd = "get_feat.pl rest --enable-fname --dir "
-        cmd += self.arguments[0]
-
-        if len(self.arguments) > 1:
-            cmd += " --arch " + self.arguments[1]
 
-        srctree = path.abspath(os.environ["srctree"])
+        srctree = os.path.abspath(os.environ["srctree"])
 
-        fname = cmd
+        args = [
+            os.path.join(srctree, 'scripts/get_feat.pl'),
+            'rest',
+            '--enable-fname',
+            '--dir',
+            os.path.join(srctree, 'Documentation', self.arguments[0]),
+        ]
 
-        # extend PATH with $(srctree)/scripts
-        path_env = os.pathsep.join([
-            srctree + os.sep + "scripts",
-            os.environ["PATH"]
-        ])
-        shell_env = os.environ.copy()
-        shell_env["PATH"]    = path_env
-        shell_env["srctree"] = srctree
+        if len(self.arguments) > 1:
+            args.extend(['--arch', self.arguments[1]])
 
-        lines = self.runCmd(cmd, shell=True, cwd=cwd, env=shell_env)
+        lines = subprocess.check_output(args, cwd=os.path.dirname(doc.current_source)).decode('utf-8')
 
         line_regex = re.compile(r"^\.\. FILE (\S+)$")
 
@@ -118,33 +109,9 @@ class KernelFeat(Directive):
             else:
                 out_lines += line + "\n"
 
-        nodeList = self.nestedParse(out_lines, fname)
+        nodeList = self.nestedParse(out_lines, self.arguments[0])
         return nodeList
 
-    def runCmd(self, cmd, **kwargs):
-        u"""Run command ``cmd`` and return its stdout as unicode."""
-
-        try:
-            proc = subprocess.Popen(
-                cmd
-                , stdout = subprocess.PIPE
-                , stderr = subprocess.PIPE
-                , **kwargs
-            )
-            out, err = proc.communicate()
-
-            out, err = codecs.decode(out, 'utf-8'), codecs.decode(err, 'utf-8')
-
-            if proc.returncode != 0:
-                raise self.severe(
-                    u"command '%s' failed with return code %d"
-                    % (cmd, proc.returncode)
-                )
-        except OSError as exc:
-            raise self.severe(u"problems with '%s' directive: %s."
-                              % (self.name, ErrorString(exc)))
-        return out
-
     def nestedParse(self, lines, fname):
         content = ViewList()
         node    = nodes.section()
index a8a1aff6445e04868148091bc907d8f8dfe30cd6..5d47ed44394985a8a2bbc24b64ba146c18b36def 100644 (file)
@@ -1,4 +1,6 @@
 # jinja2>=3.1 is not compatible with Sphinx<4.0
 jinja2<3.1
+# alabaster>=0.7.14 is not compatible with Sphinx<=3.3
+alabaster<0.7.14
 Sphinx==2.4.4
 pyyaml
index b58efa99df527d3d870d9572e6ee7f18912fe99f..41f1efbe64bb2898f1770deb128630b316a68a08 100644 (file)
@@ -12,5 +12,7 @@
 <script type="text/javascript"> <!--
   var sbar = document.getElementsByClassName("sphinxsidebar")[0];
   let currents = document.getElementsByClassName("current")
-  sbar.scrollTop = currents[currents.length - 1].offsetTop;
+  if (currents.length) {
+    sbar.scrollTop = currents[currents.length - 1].offsetTop;
+  }
   --> </script>
index 1ce353cb232a3b72c663082bd374a352fd2ba620..dba3e5f6561295d0a05b84935b32f64505c51a15 100644 (file)
@@ -68,13 +68,14 @@ User API
 
 ::
 
-  int rpmsg_send(struct rpmsg_channel *rpdev, void *data, int len);
+  int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len);
 
-sends a message across to the remote processor on a given channel.
-The caller should specify the channel, the data it wants to send,
+sends a message across to the remote processor from the given endpoint.
+The caller should specify the endpoint, the data it wants to send,
 and its length (in bytes). The message will be sent on the specified
-channel, i.e. its source and destination address fields will be
-set to the channel's src and dst addresses.
+endpoint's channel, i.e. its source and destination address fields will be
+respectively set to the endpoint's src address and its parent channel
+dst addresses.
 
 In case there are no TX buffers available, the function will block until
 one becomes available (i.e. until the remote processor consumes
@@ -87,17 +88,18 @@ Returns 0 on success and an appropriate error value on failure.
 
 ::
 
-  int rpmsg_sendto(struct rpmsg_channel *rpdev, void *data, int len, u32 dst);
+  int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst);
 
-sends a message across to the remote processor on a given channel,
+sends a message across to the remote processor from a given endpoint,
 to a destination address provided by the caller.
 
-The caller should specify the channel, the data it wants to send,
+The caller should specify the endpoint, the data it wants to send,
 its length (in bytes), and an explicit destination address.
 
 The message will then be sent to the remote processor to which the
-channel belongs, using the channel's src address, and the user-provided
-dst address (thus the channel's dst address will be ignored).
+endpoints's channel belongs, using the endpoints's src address,
+and the user-provided dst address (thus the channel's dst address
+will be ignored).
 
 In case there are no TX buffers available, the function will block until
 one becomes available (i.e. until the remote processor consumes
@@ -110,18 +112,19 @@ Returns 0 on success and an appropriate error value on failure.
 
 ::
 
-  int rpmsg_send_offchannel(struct rpmsg_channel *rpdev, u32 src, u32 dst,
+  int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
                                                        void *data, int len);
 
 
 sends a message across to the remote processor, using the src and dst
 addresses provided by the user.
 
-The caller should specify the channel, the data it wants to send,
+The caller should specify the endpoint, the data it wants to send,
 its length (in bytes), and explicit source and destination addresses.
 The message will then be sent to the remote processor to which the
-channel belongs, but the channel's src and dst addresses will be
-ignored (and the user-provided addresses will be used instead).
+endpoint's channel belongs, but the endpoint's src and channel dst
+addresses will be ignored (and the user-provided addresses will
+be used instead).
 
 In case there are no TX buffers available, the function will block until
 one becomes available (i.e. until the remote processor consumes
@@ -134,13 +137,14 @@ Returns 0 on success and an appropriate error value on failure.
 
 ::
 
-  int rpmsg_trysend(struct rpmsg_channel *rpdev, void *data, int len);
+  int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len);
 
-sends a message across to the remote processor on a given channel.
-The caller should specify the channel, the data it wants to send,
+sends a message across to the remote processor from a given endpoint.
+The caller should specify the endpoint, the data it wants to send,
 and its length (in bytes). The message will be sent on the specified
-channel, i.e. its source and destination address fields will be
-set to the channel's src and dst addresses.
+endpoint's channel, i.e. its source and destination address fields will be
+respectively set to the endpoint's src address and its parent channel
+dst addresses.
 
 In case there are no TX buffers available, the function will immediately
 return -ENOMEM without waiting until one becomes available.
@@ -150,10 +154,10 @@ Returns 0 on success and an appropriate error value on failure.
 
 ::
 
-  int rpmsg_trysendto(struct rpmsg_channel *rpdev, void *data, int len, u32 dst)
+  int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst)
 
 
-sends a message across to the remote processor on a given channel,
+sends a message across to the remote processor from a given endoint,
 to a destination address provided by the user.
 
 The user should specify the channel, the data it wants to send,
@@ -171,7 +175,7 @@ Returns 0 on success and an appropriate error value on failure.
 
 ::
 
-  int rpmsg_trysend_offchannel(struct rpmsg_channel *rpdev, u32 src, u32 dst,
+  int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
                                                        void *data, int len);
 
 
@@ -284,7 +288,7 @@ content to the console.
        dev_info(&rpdev->dev, "chnl: 0x%x -> 0x%x\n", rpdev->src, rpdev->dst);
 
        /* send a message on our channel */
-       err = rpmsg_send(rpdev, "hello!", 6);
+       err = rpmsg_send(rpdev->ept, "hello!", 6);
        if (err) {
                pr_err("rpmsg_send failed: %d\n", err);
                return err;
index 826e59a698da197235986b567521732de8a34287..d4f93d6a2d6335f01e9b65aa8401e0ef9951b59c 100644 (file)
@@ -624,6 +624,10 @@ They are also listed in the folder /sys/bus/event_source/devices/cs_etm/format/
    * - timestamp
      - Session local version of the system wide setting: :ref:`ETMv4_MODE_TIMESTAMP
        <coresight-timestamp>`
+   * - cc_threshold
+     - Cycle count threshold value. If nothing is provided here or the provided value is 0, then the
+       default value i.e 0x100 will be used. If provided value is less than minimum cycles threshold
+       value, as indicated via TRCIDR3.CCITMIN, then the minimum value will be used instead.
 
 How to use the STM module
 -------------------------
index 16122a8895ba30b3b31072e61586a470f1d06ee7..7e7b8ec1793483c4843b2281475d4cf9e300b021 100644 (file)
@@ -218,6 +218,27 @@ of ftrace. Here is a list of some of the key files:
 
        This displays the total combined size of all the trace buffers.
 
+  buffer_subbuf_size_kb:
+
+       This sets or displays the sub buffer size. The ring buffer is broken up
+       into several same size "sub buffers". An event can not be bigger than
+       the size of the sub buffer. Normally, the sub buffer is the size of the
+       architecture's page (4K on x86). The sub buffer also contains meta data
+       at the start which also limits the size of an event.  That means when
+       the sub buffer is a page size, no event can be larger than the page
+       size minus the sub buffer meta data.
+
+       Note, the buffer_subbuf_size_kb is a way for the user to specify the
+       minimum size of the subbuffer. The kernel may make it bigger due to the
+       implementation details, or simply fail the operation if the kernel can
+       not handle the request.
+
+       Changing the sub buffer size allows for events to be larger than the
+       page size.
+
+       Note: When changing the sub-buffer size, tracing is stopped and any
+       data in the ring buffer and the snapshot buffer will be discarded.
+
   free_buffer:
 
        If a process is performing tracing, and the ring buffer should be
index f825970a14957346ba05e19981f6e5bece8f486f..e1636e579c9ccdfec96545a25502a6ef60de6383 100644 (file)
@@ -4,7 +4,7 @@ Kernel Probes (Kprobes)
 
 :Author: Jim Keniston <jkenisto@us.ibm.com>
 :Author: Prasanna S Panchamukhi <prasanna.panchamukhi@gmail.com>
-:Author: Masami Hiramatsu <mhiramat@redhat.com>
+:Author: Masami Hiramatsu <mhiramat@kernel.org>
 
 .. CONTENTS
 
@@ -321,6 +321,7 @@ architectures:
 - mips
 - s390
 - parisc
+- loongarch
 
 Configuring Kprobes
 ===================
index 82bfac180bdc04b4fcc3effed488fc96cdd3163d..cec38dda8298c1c0c5993f3f1602873eac91d467 100644 (file)
@@ -5,4 +5,4 @@
 :Original: Documentation/arch/loongarch/features.rst
 :Translator: Huacai Chen <chenhuacai@loongson.cn>
 
-.. kernel-feat:: $srctree/Documentation/features loongarch
+.. kernel-feat:: features loongarch
index da1b956e4a40f6a5d21988b853c0802011f1cd29..0d6df97db069bb3a650208733ec17aff19f27cf3 100644 (file)
@@ -10,4 +10,4 @@
 
 .. _cn_features:
 
-.. kernel-feat:: $srctree/Documentation/features mips
+.. kernel-feat:: features mips
index b64e430f55aef3a82edda61d3fab92c2c3c2bcd6..c2175fd32b54b9babe96cd7979bd0df8ed270041 100644 (file)
@@ -5,5 +5,5 @@
 :Original: Documentation/arch/loongarch/features.rst
 :Translator: Huacai Chen <chenhuacai@loongson.cn>
 
-.. kernel-feat:: $srctree/Documentation/features loongarch
+.. kernel-feat:: features loongarch
 
index f6941042003545858add1dd9e14e3b3c86d5f400..3d3906c4d08e29cd481b37c0d3db4d224ad7513a 100644 (file)
@@ -10,5 +10,5 @@
 
 .. _tw_features:
 
-.. kernel-feat:: $srctree/Documentation/features mips
+.. kernel-feat:: features mips
 
index 29072c166d236a25e175df3f1677b607a4449013..077dfac7ed98f7911d731312eb09631e41c63772 100644 (file)
@@ -448,15 +448,17 @@ Function-specific configfs interface
 The function name to use when creating the function directory is "ncm".
 The NCM function provides these attributes in its function directory:
 
-       =============== ==================================================
-       ifname          network device interface name associated with this
-                       function instance
-       qmult           queue length multiplier for high and super speed
-       host_addr       MAC address of host's end of this
-                       Ethernet over USB link
-       dev_addr        MAC address of device's end of this
-                       Ethernet over USB link
-       =============== ==================================================
+       ======================= ==================================================
+       ifname                  network device interface name associated with this
+                               function instance
+       qmult                   queue length multiplier for high and super speed
+       host_addr               MAC address of host's end of this
+                               Ethernet over USB link
+       dev_addr                MAC address of device's end of this
+                               Ethernet over USB link
+       max_segment_size        Segment size required for P2P connections. This
+                               will set MTU to 14 bytes
+       ======================= ==================================================
 
 and after creating the functions/ncm.<instance name> they contain default
 values: qmult is 5, dev_addr and host_addr are randomly selected.
index 818a1648b38729459d62d555e5903fe9de1aad8f..59b2132b584d70873c537150d456893e593fc46d 100644 (file)
@@ -81,9 +81,6 @@ feature must be kept in the implementation.
 Potential future improvements
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-- Report more events (suspend, resume, etc.) through
-  ``USB_RAW_IOCTL_EVENT_FETCH``.
-
 - Support ``O_NONBLOCK`` I/O. This would be another mode of operation, where
   Raw Gadget would not wait until the completion of each USB request.
 
index 7025b37510279530058dc68f66a1140fd72458a5..3ec0b7a455a0cf489b93683a49b5362cded0b570 100644 (file)
@@ -147,10 +147,29 @@ described as 'basic' will be available.
 The new VM has no virtual cpus and no memory.
 You probably want to use 0 as machine type.
 
+X86:
+^^^^
+
+Supported X86 VM types can be queried via KVM_CAP_VM_TYPES.
+
+S390:
+^^^^^
+
 In order to create user controlled virtual machines on S390, check
 KVM_CAP_S390_UCONTROL and use the flag KVM_VM_S390_UCONTROL as
 privileged user (CAP_SYS_ADMIN).
 
+MIPS:
+^^^^^
+
+To use hardware assisted virtualization on MIPS (VZ ASE) rather than
+the default trap & emulate implementation (which changes the virtual
+memory layout to fit in user mode), check KVM_CAP_MIPS_VZ and use the
+flag KVM_VM_MIPS_VZ.
+
+ARM64:
+^^^^^^
+
 On arm64, the physical address size for a VM (IPA Size limit) is limited
 to 40bits by default. The limit can be configured if the host supports the
 extension KVM_CAP_ARM_VM_IPA_SIZE. When supported, use
@@ -608,18 +627,6 @@ interrupt number dequeues the interrupt.
 This is an asynchronous vcpu ioctl and can be invoked from any thread.
 
 
-4.17 KVM_DEBUG_GUEST
---------------------
-
-:Capability: basic
-:Architectures: none
-:Type: vcpu ioctl
-:Parameters: none)
-:Returns: -1 on error
-
-Support for this has been removed.  Use KVM_SET_GUEST_DEBUG instead.
-
-
 4.18 KVM_GET_MSRS
 -----------------
 
@@ -6192,6 +6199,130 @@ to know what fields can be changed for the system register described by
 ``op0, op1, crn, crm, op2``. KVM rejects ID register values that describe a
 superset of the features supported by the system.
 
+4.140 KVM_SET_USER_MEMORY_REGION2
+---------------------------------
+
+:Capability: KVM_CAP_USER_MEMORY2
+:Architectures: all
+:Type: vm ioctl
+:Parameters: struct kvm_userspace_memory_region2 (in)
+:Returns: 0 on success, -1 on error
+
+KVM_SET_USER_MEMORY_REGION2 is an extension to KVM_SET_USER_MEMORY_REGION that
+allows mapping guest_memfd memory into a guest.  All fields shared with
+KVM_SET_USER_MEMORY_REGION identically.  Userspace can set KVM_MEM_GUEST_MEMFD
+in flags to have KVM bind the memory region to a given guest_memfd range of
+[guest_memfd_offset, guest_memfd_offset + memory_size].  The target guest_memfd
+must point at a file created via KVM_CREATE_GUEST_MEMFD on the current VM, and
+the target range must not be bound to any other memory region.  All standard
+bounds checks apply (use common sense).
+
+::
+
+  struct kvm_userspace_memory_region2 {
+       __u32 slot;
+       __u32 flags;
+       __u64 guest_phys_addr;
+       __u64 memory_size; /* bytes */
+       __u64 userspace_addr; /* start of the userspace allocated memory */
+       __u64 guest_memfd_offset;
+       __u32 guest_memfd;
+       __u32 pad1;
+       __u64 pad2[14];
+  };
+
+A KVM_MEM_GUEST_MEMFD region _must_ have a valid guest_memfd (private memory) and
+userspace_addr (shared memory).  However, "valid" for userspace_addr simply
+means that the address itself must be a legal userspace address.  The backing
+mapping for userspace_addr is not required to be valid/populated at the time of
+KVM_SET_USER_MEMORY_REGION2, e.g. shared memory can be lazily mapped/allocated
+on-demand.
+
+When mapping a gfn into the guest, KVM selects shared vs. private, i.e consumes
+userspace_addr vs. guest_memfd, based on the gfn's KVM_MEMORY_ATTRIBUTE_PRIVATE
+state.  At VM creation time, all memory is shared, i.e. the PRIVATE attribute
+is '0' for all gfns.  Userspace can control whether memory is shared/private by
+toggling KVM_MEMORY_ATTRIBUTE_PRIVATE via KVM_SET_MEMORY_ATTRIBUTES as needed.
+
+4.141 KVM_SET_MEMORY_ATTRIBUTES
+-------------------------------
+
+:Capability: KVM_CAP_MEMORY_ATTRIBUTES
+:Architectures: x86
+:Type: vm ioctl
+:Parameters: struct kvm_memory_attributes (in)
+:Returns: 0 on success, <0 on error
+
+KVM_SET_MEMORY_ATTRIBUTES allows userspace to set memory attributes for a range
+of guest physical memory.
+
+::
+
+  struct kvm_memory_attributes {
+       __u64 address;
+       __u64 size;
+       __u64 attributes;
+       __u64 flags;
+  };
+
+  #define KVM_MEMORY_ATTRIBUTE_PRIVATE           (1ULL << 3)
+
+The address and size must be page aligned.  The supported attributes can be
+retrieved via ioctl(KVM_CHECK_EXTENSION) on KVM_CAP_MEMORY_ATTRIBUTES.  If
+executed on a VM, KVM_CAP_MEMORY_ATTRIBUTES precisely returns the attributes
+supported by that VM.  If executed at system scope, KVM_CAP_MEMORY_ATTRIBUTES
+returns all attributes supported by KVM.  The only attribute defined at this
+time is KVM_MEMORY_ATTRIBUTE_PRIVATE, which marks the associated gfn as being
+guest private memory.
+
+Note, there is no "get" API.  Userspace is responsible for explicitly tracking
+the state of a gfn/page as needed.
+
+The "flags" field is reserved for future extensions and must be '0'.
+
+4.142 KVM_CREATE_GUEST_MEMFD
+----------------------------
+
+:Capability: KVM_CAP_GUEST_MEMFD
+:Architectures: none
+:Type: vm ioctl
+:Parameters: struct kvm_create_guest_memfd(in)
+:Returns: 0 on success, <0 on error
+
+KVM_CREATE_GUEST_MEMFD creates an anonymous file and returns a file descriptor
+that refers to it.  guest_memfd files are roughly analogous to files created
+via memfd_create(), e.g. guest_memfd files live in RAM, have volatile storage,
+and are automatically released when the last reference is dropped.  Unlike
+"regular" memfd_create() files, guest_memfd files are bound to their owning
+virtual machine (see below), cannot be mapped, read, or written by userspace,
+and cannot be resized  (guest_memfd files do however support PUNCH_HOLE).
+
+::
+
+  struct kvm_create_guest_memfd {
+       __u64 size;
+       __u64 flags;
+       __u64 reserved[6];
+  };
+
+Conceptually, the inode backing a guest_memfd file represents physical memory,
+i.e. is coupled to the virtual machine as a thing, not to a "struct kvm".  The
+file itself, which is bound to a "struct kvm", is that instance's view of the
+underlying memory, e.g. effectively provides the translation of guest addresses
+to host memory.  This allows for use cases where multiple KVM structures are
+used to manage a single virtual machine, e.g. when performing intrahost
+migration of a virtual machine.
+
+KVM currently only supports mapping guest_memfd via KVM_SET_USER_MEMORY_REGION2,
+and more specifically via the guest_memfd and guest_memfd_offset fields in
+"struct kvm_userspace_memory_region2", where guest_memfd_offset is the offset
+into the guest_memfd instance.  For a given guest_memfd file, there can be at
+most one mapping per page, i.e. binding multiple memory regions to a single
+guest_memfd range is not allowed (any number of memory regions can be bound to
+a single guest_memfd file, but the bound ranges must not overlap).
+
+See KVM_SET_USER_MEMORY_REGION2 for additional details.
+
 5. The kvm_run structure
 ========================
 
@@ -6824,6 +6955,30 @@ array field represents return values. The userspace should update the return
 values of SBI call before resuming the VCPU. For more details on RISC-V SBI
 spec refer, https://github.com/riscv/riscv-sbi-doc.
 
+::
+
+               /* KVM_EXIT_MEMORY_FAULT */
+               struct {
+  #define KVM_MEMORY_EXIT_FLAG_PRIVATE (1ULL << 3)
+                       __u64 flags;
+                       __u64 gpa;
+                       __u64 size;
+               } memory_fault;
+
+KVM_EXIT_MEMORY_FAULT indicates the vCPU has encountered a memory fault that
+could not be resolved by KVM.  The 'gpa' and 'size' (in bytes) describe the
+guest physical address range [gpa, gpa + size) of the fault.  The 'flags' field
+describes properties of the faulting access that are likely pertinent:
+
+ - KVM_MEMORY_EXIT_FLAG_PRIVATE - When set, indicates the memory fault occurred
+   on a private memory access.  When clear, indicates the fault occurred on a
+   shared access.
+
+Note!  KVM_EXIT_MEMORY_FAULT is unique among all KVM exit reasons in that it
+accompanies a return code of '-1', not '0'!  errno will always be set to EFAULT
+or EHWPOISON when KVM exits with KVM_EXIT_MEMORY_FAULT, userspace should assume
+kvm_run.exit_reason is stale/undefined for all other error numbers.
+
 ::
 
     /* KVM_EXIT_NOTIFY */
@@ -7858,6 +8013,27 @@ This capability is aimed to mitigate the threat that malicious VMs can
 cause CPU stuck (due to event windows don't open up) and make the CPU
 unavailable to host or other VMs.
 
+7.34 KVM_CAP_MEMORY_FAULT_INFO
+------------------------------
+
+:Architectures: x86
+:Returns: Informational only, -EINVAL on direct KVM_ENABLE_CAP.
+
+The presence of this capability indicates that KVM_RUN will fill
+kvm_run.memory_fault if KVM cannot resolve a guest page fault VM-Exit, e.g. if
+there is a valid memslot but no backing VMA for the corresponding host virtual
+address.
+
+The information in kvm_run.memory_fault is valid if and only if KVM_RUN returns
+an error with errno=EFAULT or errno=EHWPOISON *and* kvm_run.exit_reason is set
+to KVM_EXIT_MEMORY_FAULT.
+
+Note: Userspaces which attempt to resolve memory faults so that they can retry
+KVM_RUN are encouraged to guard against repeatedly receiving the same
+error/annotated fault.
+
+See KVM_EXIT_MEMORY_FAULT for more information.
+
 8. Other capabilities.
 ======================
 
@@ -8374,6 +8550,7 @@ PVHVM guests. Valid flags are::
   #define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL             (1 << 4)
   #define KVM_XEN_HVM_CONFIG_EVTCHN_SEND               (1 << 5)
   #define KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG      (1 << 6)
+  #define KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE      (1 << 7)
 
 The KVM_XEN_HVM_CONFIG_HYPERCALL_MSR flag indicates that the KVM_XEN_HVM_CONFIG
 ioctl is available, for the guest to set its hypercall page.
@@ -8417,6 +8594,11 @@ behave more correctly, not using the XEN_RUNSTATE_UPDATE flag until/unless
 specifically enabled (by the guest making the hypercall, causing the VMM
 to enable the KVM_XEN_ATTR_TYPE_RUNSTATE_UPDATE_FLAG attribute).
 
+The KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE flag indicates that KVM supports
+clearing the PVCLOCK_TSC_STABLE_BIT flag in Xen pvclock sources. This will be
+done when the KVM_CAP_XEN_HVM ioctl sets the
+KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE flag.
+
 8.31 KVM_CAP_PPC_MULTITCE
 -------------------------
 
@@ -8596,6 +8778,19 @@ block sizes is exposed in KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES as a
 64-bit bitmap (each bit describing a block size). The default value is
 0, to disable the eager page splitting.
 
+8.41 KVM_CAP_VM_TYPES
+---------------------
+
+:Capability: KVM_CAP_MEMORY_ATTRIBUTES
+:Architectures: x86
+:Type: system ioctl
+
+This capability returns a bitmap of support VM types.  The 1-setting of bit @n
+means the VM type with value @n is supported.  Possible values of @n are::
+
+  #define KVM_X86_DEFAULT_VM   0
+  #define KVM_X86_SW_PROTECTED_VM      1
+
 9. Known KVM API problems
 =========================
 
index 3a034db5e55f89120db01f8c7b4853bdca08cf4d..02880d5552d5fad072aa0fdc7383584249834928 100644 (file)
@@ -43,10 +43,9 @@ On x86:
 
 - vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock and kvm->arch.xen.xen_lock
 
-- kvm->arch.mmu_lock is an rwlock.  kvm->arch.tdp_mmu_pages_lock and
-  kvm->arch.mmu_unsync_pages_lock are taken inside kvm->arch.mmu_lock, and
-  cannot be taken without already holding kvm->arch.mmu_lock (typically with
-  ``read_lock`` for the TDP MMU, thus the need for additional spinlocks).
+- kvm->arch.mmu_lock is an rwlock; critical sections for
+  kvm->arch.tdp_mmu_pages_lock and kvm->arch.mmu_unsync_pages_lock must
+  also take kvm->arch.mmu_lock
 
 Everything else is a leaf: no other lock is taken inside the critical
 sections.
index 00c99cc459daa2c578eaf7609ab4e845944f5249..73d898383e51f9c3a7ef6769cf9f6f6095f522e9 100644 (file)
@@ -441,6 +441,13 @@ W: http://wiki.analog.com/AD7879
 W:     https://ez.analog.com/linux-software-drivers
 F:     drivers/input/touchscreen/ad7879.c
 
+ADAFRUIT MINI I2C GAMEPAD
+M:     Anshul Dalal <anshulusr@gmail.com>
+L:     linux-input@vger.kernel.org
+S:     Maintained
+F:     Documentation/devicetree/bindings/input/adafruit,seesaw-gamepad.yaml
+F:     drivers/input/joystick/adafruit-seesaw.c
+
 ADDRESS SPACE LAYOUT RANDOMIZATION (ASLR)
 M:     Jiri Kosina <jikos@kernel.org>
 S:     Maintained
@@ -890,6 +897,14 @@ Q: https://patchwork.kernel.org/project/linux-rdma/list/
 F:     drivers/infiniband/hw/efa/
 F:     include/uapi/rdma/efa-abi.h
 
+AMD AXI W1 DRIVER
+M:     Kris Chaplin <kris.chaplin@amd.com>
+R:     Thomas Delev <thomas.delev@amd.com>
+R:     Michal Simek <michal.simek@amd.com>
+S:     Maintained
+F:     Documentation/devicetree/bindings/w1/amd,axi-1wire-host.yaml
+F:     drivers/w1/masters/amd_axi_w1.c
+
 AMD CDX BUS DRIVER
 M:     Nipun Gupta <nipun.gupta@amd.com>
 M:     Nikhil Agarwal <nikhil.agarwal@amd.com>
@@ -1123,6 +1138,14 @@ F:       Documentation/ABI/testing/sysfs-bus-iio-adc-ad4130
 F:     Documentation/devicetree/bindings/iio/adc/adi,ad4130.yaml
 F:     drivers/iio/adc/ad4130.c
 
+ANALOG DEVICES INC AD7091R DRIVER
+M:     Marcelo Schmitt <marcelo.schmitt@analog.com>
+L:     linux-iio@vger.kernel.org
+S:     Supported
+W:     http://ez.analog.com/community/linux-device-drivers
+F:     Documentation/devicetree/bindings/iio/adc/adi,ad7091r*
+F:     drivers/iio/adc/drivers/iio/adc/ad7091r*
+
 ANALOG DEVICES INC AD7192 DRIVER
 M:     Alexandru Tachici <alexandru.tachici@analog.com>
 L:     linux-iio@vger.kernel.org
@@ -2048,7 +2071,6 @@ ARM/CORESIGHT FRAMEWORK AND DRIVERS
 M:     Suzuki K Poulose <suzuki.poulose@arm.com>
 R:     Mike Leach <mike.leach@linaro.org>
 R:     James Clark <james.clark@arm.com>
-R:     Leo Yan <leo.yan@linaro.org>
 L:     coresight@lists.linaro.org (moderated for non-subscribers)
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
@@ -3044,6 +3066,13 @@ S:       Supported
 W:     http://www.akm.com/
 F:     drivers/iio/magnetometer/ak8974.c
 
+AOSONG AGS02MA TVOC SENSOR DRIVER
+M:     Anshul Dalal <anshulusr@gmail.com>
+L:     linux-iio@vger.kernel.org
+S:     Maintained
+F:     Documentation/devicetree/bindings/iio/chemical/aosong,ags02ma.yaml
+F:     drivers/iio/chemical/ags02ma.c
+
 ASC7621 HARDWARE MONITOR DRIVER
 M:     George Joseph <george.joseph@fairview5.com>
 L:     linux-hwmon@vger.kernel.org
@@ -3139,10 +3168,10 @@ F:      drivers/hwmon/asus-ec-sensors.c
 
 ASUS NOTEBOOKS AND EEEPC ACPI/WMI EXTRAS DRIVERS
 M:     Corentin Chary <corentin.chary@gmail.com>
-L:     acpi4asus-user@lists.sourceforge.net
+M:     Luke D. Jones <luke@ljones.dev>
 L:     platform-driver-x86@vger.kernel.org
 S:     Maintained
-W:     http://acpi4asus.sf.net
+W:     https://asus-linux.org/
 F:     drivers/platform/x86/asus*.c
 F:     drivers/platform/x86/eeepc*.c
 
@@ -3361,9 +3390,8 @@ F:        Documentation/devicetree/bindings/iio/adc/avia-hx711.yaml
 F:     drivers/iio/adc/hx711.c
 
 AX.25 NETWORK LAYER
-M:     Ralf Baechle <ralf@linux-mips.org>
 L:     linux-hams@vger.kernel.org
-S:     Maintained
+S:     Orphan
 W:     https://linux-ax25.in-berlin.de
 F:     include/net/ax25.h
 F:     include/uapi/linux/ax25.h
@@ -3602,7 +3630,6 @@ F:        drivers/mtd/devices/block2mtd.c
 
 BLUETOOTH DRIVERS
 M:     Marcel Holtmann <marcel@holtmann.org>
-M:     Johan Hedberg <johan.hedberg@gmail.com>
 M:     Luiz Augusto von Dentz <luiz.dentz@gmail.com>
 L:     linux-bluetooth@vger.kernel.org
 S:     Supported
@@ -3643,6 +3670,13 @@ S:       Maintained
 F:     Documentation/devicetree/bindings/iio/accel/bosch,bma400.yaml
 F:     drivers/iio/accel/bma400*
 
+BOSCH SENSORTEC BMI323 IMU IIO DRIVER
+M:     Jagath Jog J <jagathjog1996@gmail.com>
+L:     linux-iio@vger.kernel.org
+S:     Maintained
+F:     Documentation/devicetree/bindings/iio/imu/bosch,bmi323.yaml
+F:     drivers/iio/imu/bmi323/
+
 BPF JIT for ARM
 M:     Russell King <linux@armlinux.org.uk>
 M:     Puranjay Mohan <puranjay12@gmail.com>
@@ -3658,6 +3692,13 @@ L:       bpf@vger.kernel.org
 S:     Supported
 F:     arch/arm64/net/
 
+BPF JIT for LOONGARCH
+M:     Tiezhu Yang <yangtiezhu@loongson.cn>
+R:     Hengqi Chen <hengqi.chen@gmail.com>
+L:     bpf@vger.kernel.org
+S:     Maintained
+F:     arch/loongarch/net/
+
 BPF JIT for MIPS (32-BIT AND 64-BIT)
 M:     Johan Almbladh <johan.almbladh@anyfinetworks.com>
 M:     Paul Burton <paulburton@kernel.org>
@@ -4128,14 +4169,14 @@ F:      drivers/firmware/broadcom/tee_bnxt_fw.c
 F:     drivers/net/ethernet/broadcom/bnxt/
 F:     include/linux/firmware/broadcom/tee_bnxt_fw.h
 
-BROADCOM BRCM80211 IEEE802.11n WIRELESS DRIVER
-M:     Arend van Spriel <aspriel@gmail.com>
-M:     Franky Lin <franky.lin@broadcom.com>
-M:     Hante Meuleman <hante.meuleman@broadcom.com>
+BROADCOM BRCM80211 IEEE802.11 WIRELESS DRIVERS
+M:     Arend van Spriel <arend.vanspriel@broadcom.com>
 L:     linux-wireless@vger.kernel.org
+L:     brcm80211@lists.linux.dev
 L:     brcm80211-dev-list.pdl@broadcom.com
 S:     Supported
 F:     drivers/net/wireless/broadcom/brcm80211/
+F:     include/linux/platform_data/brcmfmac.h
 
 BROADCOM BRCMSTB GPIO DRIVER
 M:     Doug Berger <opendmb@gmail.com>
@@ -4506,7 +4547,7 @@ F:        drivers/net/ieee802154/ca8210.c
 
 CACHEFILES: FS-CACHE BACKEND FOR CACHING ON MOUNTED FILESYSTEMS
 M:     David Howells <dhowells@redhat.com>
-L:     linux-cachefs@redhat.com (moderated for non-subscribers)
+L:     netfs@lists.linux.dev
 S:     Supported
 F:     Documentation/filesystems/caching/cachefiles.rst
 F:     fs/cachefiles/
@@ -4690,11 +4731,8 @@ F:       drivers/i2c/busses/i2c-octeon*
 F:     drivers/i2c/busses/i2c-thunderx*
 
 CAVIUM LIQUIDIO NETWORK DRIVER
-M:     Derek Chickles <dchickles@marvell.com>
-M:     Satanand Burla <sburla@marvell.com>
-M:     Felix Manlunas <fmanlunas@marvell.com>
 L:     netdev@vger.kernel.org
-S:     Supported
+S:     Orphan
 W:     http://www.marvell.com
 F:     drivers/net/ethernet/cavium/liquidio/
 
@@ -5198,7 +5236,7 @@ X:        drivers/clk/clkdev.c
 COMMON INTERNET FILE SYSTEM CLIENT (CIFS and SMB3)
 M:     Steve French <sfrench@samba.org>
 R:     Paulo Alcantara <pc@manguebit.com> (DFS, global name space)
-R:     Ronnie Sahlberg <lsahlber@redhat.com> (directory leases, sparse files)
+R:     Ronnie Sahlberg <ronniesahlberg@gmail.com> (directory leases, sparse files)
 R:     Shyam Prasad N <sprasad@microsoft.com> (multichannel)
 R:     Tom Talpey <tom@talpey.com> (RDMA, smbdirect)
 L:     linux-cifs@vger.kernel.org
@@ -5251,6 +5289,7 @@ M:        Dan Williams <dan.j.williams@intel.com>
 L:     linux-cxl@vger.kernel.org
 S:     Maintained
 F:     drivers/cxl/
+F:     include/linux/cxl-event.h
 F:     include/uapi/linux/cxl_mem.h
 F:     tools/testing/cxl/
 
@@ -5384,6 +5423,12 @@ F:       include/linux/counter.h
 F:     include/uapi/linux/counter.h
 F:     tools/counter/
 
+COUNTER WATCH EVENTS TOOL
+M:     Fabrice Gasnier <fabrice.gasnier@foss.st.com>
+L:     linux-iio@vger.kernel.org
+S:     Maintained
+F:     tools/counter/counter_watch_events.c
+
 CP2615 I2C DRIVER
 M:     Bence Csókás <bence98@sch.bme.hu>
 S:     Maintained
@@ -5913,7 +5958,6 @@ S:        Maintained
 F:     drivers/platform/x86/dell/dell-wmi-descriptor.c
 
 DELL WMI HARDWARE PRIVACY SUPPORT
-M:     Perry Yuan <Perry.Yuan@dell.com>
 L:     Dell.Client.Kernel@dell.com
 L:     platform-driver-x86@vger.kernel.org
 S:     Maintained
@@ -7910,12 +7954,13 @@ L:      rust-for-linux@vger.kernel.org
 S:     Maintained
 F:     rust/kernel/net/phy.rs
 
-EXEC & BINFMT API
+EXEC & BINFMT API, ELF
 R:     Eric Biederman <ebiederm@xmission.com>
 R:     Kees Cook <keescook@chromium.org>
 L:     linux-mm@kvack.org
 S:     Supported
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/execve
+F:     Documentation/userspace-api/ELF.rst
 F:     fs/*binfmt_*.c
 F:     fs/exec.c
 F:     include/linux/binfmts.h
@@ -8176,6 +8221,20 @@ S:       Supported
 F:     fs/iomap/
 F:     include/linux/iomap.h
 
+FILESYSTEMS [NETFS LIBRARY]
+M:     David Howells <dhowells@redhat.com>
+R:     Jeff Layton <jlayton@kernel.org>
+L:     netfs@lists.linux.dev
+L:     linux-fsdevel@vger.kernel.org
+S:     Supported
+F:     Documentation/filesystems/caching/
+F:     Documentation/filesystems/netfs_library.rst
+F:     fs/netfs/
+F:     include/linux/fscache*.h
+F:     include/linux/netfs.h
+F:     include/trace/events/fscache.h
+F:     include/trace/events/netfs.h
+
 FILESYSTEMS [STACKABLE]
 M:     Miklos Szeredi <miklos@szeredi.hu>
 M:     Amir Goldstein <amir73il@gmail.com>
@@ -8621,14 +8680,6 @@ F:       Documentation/power/freezing-of-tasks.rst
 F:     include/linux/freezer.h
 F:     kernel/freezer.c
 
-FS-CACHE: LOCAL CACHING FOR NETWORK FILESYSTEMS
-M:     David Howells <dhowells@redhat.com>
-L:     linux-cachefs@redhat.com (moderated for non-subscribers)
-S:     Supported
-F:     Documentation/filesystems/caching/
-F:     fs/fscache/
-F:     include/linux/fscache*.h
-
 FSCRYPT: FILE SYSTEM LEVEL ENCRYPTION SUPPORT
 M:     Eric Biggers <ebiggers@kernel.org>
 M:     Theodore Y. Ts'o <tytso@mit.edu>
@@ -9731,6 +9782,13 @@ F:       lib/test_hmm*
 F:     mm/hmm*
 F:     tools/testing/selftests/mm/*hmm*
 
+HONEYWELL HSC030PA PRESSURE SENSOR SERIES IIO DRIVER
+M:     Petre Rodan <petre.rodan@subdimension.ro>
+L:     linux-iio@vger.kernel.org
+S:     Maintained
+F:     Documentation/devicetree/bindings/iio/pressure/honeywell,hsc030pa.yaml
+F:     drivers/iio/pressure/hsc030pa*
+
 HONEYWELL MPRLS0025PA PRESSURE SENSOR SERIES IIO DRIVER
 M:     Andreas Klinger <ak@it-klinger.de>
 L:     linux-iio@vger.kernel.org
@@ -10011,7 +10069,7 @@ F:      Documentation/i2c/busses/i2c-parport.rst
 F:     drivers/i2c/busses/i2c-parport.c
 
 I2C SUBSYSTEM
-M:     Wolfram Sang <wsa@kernel.org>
+M:     Wolfram Sang <wsa+renesas@sang-engineering.com>
 L:     linux-i2c@vger.kernel.org
 S:     Maintained
 W:     https://i2c.wiki.kernel.org/
@@ -10033,7 +10091,7 @@ L:      linux-i2c@vger.kernel.org
 S:     Maintained
 W:     https://i2c.wiki.kernel.org/
 Q:     https://patchwork.ozlabs.org/project/linux-i2c/list/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/wsa/linux.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/andi.shyti/linux.git
 F:     Documentation/devicetree/bindings/i2c/
 F:     drivers/i2c/algos/
 F:     drivers/i2c/busses/
@@ -10189,7 +10247,6 @@ IBM Power SRIOV Virtual NIC Device Driver
 M:     Haren Myneni <haren@linux.ibm.com>
 M:     Rick Lindsley <ricklind@linux.ibm.com>
 R:     Nick Child <nnac123@linux.ibm.com>
-R:     Dany Madden <danymadden@us.ibm.com>
 R:     Thomas Falcon <tlfalcon@linux.ibm.com>
 L:     netdev@vger.kernel.org
 S:     Supported
@@ -10226,7 +10283,7 @@ F:      drivers/scsi/ibmvscsi/ibmvscsi*
 F:     include/scsi/viosrp.h
 
 IBM Power Virtual SCSI Device Target Driver
-M:     Michael Cyr <mikecyr@linux.ibm.com>
+M:     Tyrel Datwyler <tyreld@linux.ibm.com>
 L:     linux-scsi@vger.kernel.org
 L:     target-devel@vger.kernel.org
 S:     Supported
@@ -10346,8 +10403,8 @@ IIO LIGHT SENSOR GAIN-TIME-SCALE HELPERS
 M:     Matti Vaittinen <mazziesaccount@gmail.com>
 L:     linux-iio@vger.kernel.org
 S:     Maintained
-F:     drivers/iio/light/gain-time-scale-helper.c
-F:     drivers/iio/light/gain-time-scale-helper.h
+F:     drivers/iio/industrialio-gts-helper.c
+F:     include/linux/iio/iio-gts-helper.h
 
 IIO MULTIPLEXER
 M:     Peter Rosin <peda@axentia.se>
@@ -10744,11 +10801,11 @@ F:    drivers/gpio/gpio-tangier.h
 
 INTEL GVT-g DRIVERS (Intel GPU Virtualization)
 M:     Zhenyu Wang <zhenyuw@linux.intel.com>
-M:     Zhi Wang <zhi.a.wang@intel.com>
+M:     Zhi Wang <zhi.wang.linux@gmail.com>
 L:     intel-gvt-dev@lists.freedesktop.org
 L:     intel-gfx@lists.freedesktop.org
 S:     Supported
-W:     https://01.org/igvt-g
+W:     https://github.com/intel/gvt-linux/wiki
 T:     git https://github.com/intel/gvt-linux.git
 F:     drivers/gpu/drm/i915/gvt/
 
@@ -11070,7 +11127,6 @@ S:      Supported
 F:     drivers/net/wireless/intel/iwlegacy/
 
 INTEL WIRELESS WIFI LINK (iwlwifi)
-M:     Gregory Greenman <gregory.greenman@intel.com>
 M:     Miri Korenblit <miriam.rachel.korenblit@intel.com>
 L:     linux-wireless@vger.kernel.org
 S:     Supported
@@ -11668,6 +11724,7 @@ F:      fs/smb/server/
 KERNEL UNIT TESTING FRAMEWORK (KUnit)
 M:     Brendan Higgins <brendanhiggins@google.com>
 M:     David Gow <davidgow@google.com>
+R:     Rae Moar <rmoar@google.com>
 L:     linux-kselftest@vger.kernel.org
 L:     kunit-dev@googlegroups.com
 S:     Maintained
@@ -12583,6 +12640,13 @@ S:     Maintained
 F:     Documentation/devicetree/bindings/gpio/loongson,ls-gpio.yaml
 F:     drivers/gpio/gpio-loongson-64bit.c
 
+LOONGSON LS2X APB DMA DRIVER
+M:     Binbin Zhou <zhoubinbin@loongson.cn>
+L:     dmaengine@vger.kernel.org
+S:     Maintained
+F:     Documentation/devicetree/bindings/dma/loongson,ls2x-apbdma.yaml
+F:     drivers/dma/ls2x-apb-dma.c
+
 LOONGSON LS2X I2C DRIVER
 M:     Binbin Zhou <zhoubinbin@loongson.cn>
 L:     linux-i2c@vger.kernel.org
@@ -12724,17 +12788,23 @@ F:    Documentation/devicetree/bindings/i2c/i2c-mux-ltc4306.txt
 F:     drivers/i2c/muxes/i2c-mux-ltc4306.c
 
 LTP (Linux Test Project)
-M:     Mike Frysinger <vapier@gentoo.org>
 M:     Cyril Hrubis <chrubis@suse.cz>
-M:     Wanlong Gao <wanlong.gao@gmail.com>
 M:     Jan Stancek <jstancek@redhat.com>
-M:     Stanislav Kholmanskikh <stanislav.kholmanskikh@oracle.com>
-M:     Alexey Kodanev <alexey.kodanev@oracle.com>
+M:     Petr Vorel <pvorel@suse.cz>
+M:     Li Wang <liwang@redhat.com>
+M:     Yang Xu <xuyang2018.jy@fujitsu.com>
 L:     ltp@lists.linux.it (subscribers-only)
 S:     Maintained
 W:     http://linux-test-project.github.io/
 T:     git https://github.com/linux-test-project/ltp.git
 
+LTR390 AMBIENT/UV LIGHT SENSOR DRIVER
+M:     Anshul Dalal <anshulusr@gmail.com>
+L:     linux-iio@vger.kernel.org
+S:     Maintained
+F:     Documentation/devicetree/bindings/iio/light/liteon,ltr390.yaml
+F:     drivers/iio/light/ltr390.c
+
 LYNX 28G SERDES PHY DRIVER
 M:     Ioana Ciornei <ioana.ciornei@nxp.com>
 L:     netdev@vger.kernel.org
@@ -12833,6 +12903,8 @@ M:      Alejandro Colomar <alx@kernel.org>
 L:     linux-man@vger.kernel.org
 S:     Maintained
 W:     http://www.kernel.org/doc/man-pages
+T:     git git://git.kernel.org/pub/scm/docs/man-pages/man-pages.git
+T:     git git://www.alejandro-colomar.es/src/alx/linux/man-pages/man-pages.git
 
 MANAGEMENT COMPONENT TRANSPORT PROTOCOL (MCTP)
 M:     Jeremy Kerr <jk@codeconstruct.com.au>
@@ -12947,10 +13019,10 @@ S:    Maintained
 F:     drivers/thermal/armada_thermal.c
 
 MARVELL MVNETA ETHERNET DRIVER
-M:     Thomas Petazzoni <thomas.petazzoni@bootlin.com>
+M:     Marcin Wojtas <marcin.s.wojtas@gmail.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
-F:     drivers/net/ethernet/marvell/mvneta.*
+F:     drivers/net/ethernet/marvell/mvneta*
 
 MARVELL MVPP2 ETHERNET DRIVER
 M:     Marcin Wojtas <marcin.s.wojtas@gmail.com>
@@ -13070,6 +13142,14 @@ F:     Documentation/devicetree/bindings/hwmon/adi,max31827.yaml
 F:     Documentation/hwmon/max31827.rst
 F:     drivers/hwmon/max31827.c
 
+MAX31335 RTC DRIVER
+M:     Antoniu Miclaus <antoniu.miclaus@analog.com>
+L:     linux-rtc@vger.kernel.org
+S:     Supported
+W:     https://ez.analog.com/linux-software-drivers
+F:     Documentation/devicetree/bindings/rtc/adi,max31335.yaml
+F:     drivers/rtc/rtc-max31335.c
+
 MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER
 L:     linux-hwmon@vger.kernel.org
 S:     Orphan
@@ -13279,6 +13359,13 @@ F:     Documentation/ABI/testing/sysfs-bus-iio-potentiometer-mcp4531
 F:     drivers/iio/potentiometer/mcp4018.c
 F:     drivers/iio/potentiometer/mcp4531.c
 
+MCP4821 DAC DRIVER
+M:     Anshul Dalal <anshulusr@gmail.com>
+L:     linux-iio@vger.kernel.org
+S:     Maintained
+F:     Documentation/devicetree/bindings/iio/dac/microchip,mcp4821.yaml
+F:     drivers/iio/dac/mcp4821.c
+
 MCR20A IEEE-802.15.4 RADIO DRIVER
 M:     Stefan Schmidt <stefan@datenfreihafen.org>
 L:     linux-wpan@vger.kernel.org
@@ -13585,7 +13672,6 @@ F:      drivers/dma/mediatek/
 
 MEDIATEK ETHERNET DRIVER
 M:     Felix Fietkau <nbd@nbd.name>
-M:     John Crispin <john@phrozen.org>
 M:     Sean Wang <sean.wang@mediatek.com>
 M:     Mark Lee <Mark-MC.Lee@mediatek.com>
 M:     Lorenzo Bianconi <lorenzo@kernel.org>
@@ -13741,7 +13827,6 @@ F:      include/soc/mediatek/smi.h
 MEDIATEK SWITCH DRIVER
 M:     Arınç ÜNAL <arinc.unal@arinc9.com>
 M:     Daniel Golle <daniel@makrotopia.org>
-M:     Landen Chao <Landen.Chao@mediatek.com>
 M:     DENG Qingfang <dqfext@gmail.com>
 M:     Sean Wang <sean.wang@mediatek.com>
 L:     netdev@vger.kernel.org
@@ -13806,6 +13891,13 @@ S:     Supported
 W:     http://www.melexis.com
 F:     drivers/iio/temperature/mlx90632.c
 
+MELEXIS MLX90635 DRIVER
+M:     Crt Mori <cmo@melexis.com>
+L:     linux-iio@vger.kernel.org
+S:     Supported
+W:     http://www.melexis.com
+F:     drivers/iio/temperature/mlx90635.c
+
 MELFAS MIP4 TOUCHSCREEN DRIVER
 M:     Sangwon Jee <jeesw@melfas.com>
 S:     Supported
@@ -14276,11 +14368,12 @@ F:    Documentation/devicetree/bindings/regulator/mcp16502-regulator.txt
 F:     drivers/regulator/mcp16502.c
 
 MICROCHIP MCP3564 ADC DRIVER
-M:      Marius Cristea <marius.cristea@microchip.com>
-L:      linux-iio@vger.kernel.org
-S:      Supported
-F:      Documentation/devicetree/bindings/iio/adc/microchip,mcp3564.yaml
-F:      drivers/iio/adc/mcp3564.c
+M:     Marius Cristea <marius.cristea@microchip.com>
+L:     linux-iio@vger.kernel.org
+S:     Supported
+F:     Documentation/ABI/testing/sysfs-bus-iio-adc-mcp3564
+F:     Documentation/devicetree/bindings/iio/adc/microchip,mcp3564.yaml
+F:     drivers/iio/adc/mcp3564.c
 
 MICROCHIP MCP3911 ADC DRIVER
 M:     Marcus Folkesson <marcus.folkesson@gmail.com>
@@ -14740,6 +14833,13 @@ S:     Maintained
 F:     Documentation/driver-api/tty/moxa-smartio.rst
 F:     drivers/tty/mxser.*
 
+MP3309C BACKLIGHT DRIVER
+M:     Flavio Suligoi <f.suligoi@asem.it>
+L:     dri-devel@lists.freedesktop.org
+S:     Maintained
+F:     Documentation/devicetree/bindings/leds/backlight/mps,mp3309c.yaml
+F:     drivers/video/backlight/mp3309c.c
+
 MR800 AVERMEDIA USB FM RADIO DRIVER
 M:     Alexey Klimov <klimov.linux@gmail.com>
 L:     linux-media@vger.kernel.org
@@ -15080,6 +15180,7 @@ F:      Documentation/networking/net_cachelines/net_device.rst
 F:     drivers/connector/
 F:     drivers/net/
 F:     include/dt-bindings/net/
+F:     include/linux/cn_proc.h
 F:     include/linux/etherdevice.h
 F:     include/linux/fcdevice.h
 F:     include/linux/fddidevice.h
@@ -15087,6 +15188,7 @@ F:      include/linux/hippidevice.h
 F:     include/linux/if_*
 F:     include/linux/inetdevice.h
 F:     include/linux/netdevice.h
+F:     include/uapi/linux/cn_proc.h
 F:     include/uapi/linux/if_*
 F:     include/uapi/linux/netdevice.h
 X:     drivers/net/wireless/
@@ -15375,6 +15477,15 @@ F:     include/linux/nitro_enclaves.h
 F:     include/uapi/linux/nitro_enclaves.h
 F:     samples/nitro_enclaves/
 
+NITRO SECURE MODULE (NSM)
+M:     Alexander Graf <graf@amazon.com>
+L:     linux-kernel@vger.kernel.org
+L:     The AWS Nitro Enclaves Team <aws-nitro-enclaves-devel@amazon.com>
+S:     Supported
+W:     https://aws.amazon.com/ec2/nitro/nitro-enclaves/
+F:     drivers/misc/nsm.c
+F:     include/uapi/linux/nsm.h
+
 NOHZ, DYNTICKS SUPPORT
 M:     Frederic Weisbecker <frederic@kernel.org>
 M:     Thomas Gleixner <tglx@linutronix.de>
@@ -16750,9 +16861,8 @@ F:      Documentation/devicetree/bindings/pci/xilinx-versal-cpm.yaml
 F:     drivers/pci/controller/pcie-xilinx-cpm.c
 
 PCI ENDPOINT SUBSYSTEM
-M:     Lorenzo Pieralisi <lpieralisi@kernel.org>
+M:     Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
 M:     Krzysztof Wilczyński <kw@linux.com>
-R:     Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
 R:     Kishon Vijay Abraham I <kishon@kernel.org>
 L:     linux-pci@vger.kernel.org
 S:     Supported
@@ -17041,10 +17151,10 @@ PERFORMANCE EVENTS SUBSYSTEM
 M:     Peter Zijlstra <peterz@infradead.org>
 M:     Ingo Molnar <mingo@redhat.com>
 M:     Arnaldo Carvalho de Melo <acme@kernel.org>
+M:     Namhyung Kim <namhyung@kernel.org>
 R:     Mark Rutland <mark.rutland@arm.com>
 R:     Alexander Shishkin <alexander.shishkin@linux.intel.com>
 R:     Jiri Olsa <jolsa@kernel.org>
-R:     Namhyung Kim <namhyung@kernel.org>
 R:     Ian Rogers <irogers@google.com>
 R:     Adrian Hunter <adrian.hunter@intel.com>
 L:     linux-perf-users@vger.kernel.org
@@ -17072,7 +17182,7 @@ R:      John Garry <john.g.garry@oracle.com>
 R:     Will Deacon <will@kernel.org>
 R:     James Clark <james.clark@arm.com>
 R:     Mike Leach <mike.leach@linaro.org>
-R:     Leo Yan <leo.yan@linaro.org>
+R:     Leo Yan <leo.yan@linux.dev>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Supported
 F:     tools/build/feature/test-libopencsd.c
@@ -17645,12 +17755,11 @@ F:    Documentation/devicetree/bindings/leds/irled/pwm-ir-tx.yaml
 F:     drivers/media/rc/pwm-ir-tx.c
 
 PWM SUBSYSTEM
-M:     Thierry Reding <thierry.reding@gmail.com>
-R:     Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+M:     Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
 L:     linux-pwm@vger.kernel.org
 S:     Maintained
 Q:     https://patchwork.ozlabs.org/project/linux-pwm/list/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/thierry.reding/linux-pwm.git
+T:     git https://git.kernel.org/pub/scm/linux/kernel/git/ukleinek/linux.git
 F:     Documentation/devicetree/bindings/gpio/gpio-mvebu.yaml
 F:     Documentation/devicetree/bindings/pwm/
 F:     Documentation/driver-api/pwm.rst
@@ -17660,7 +17769,7 @@ F:      drivers/video/backlight/pwm_bl.c
 F:     include/dt-bindings/pwm/
 F:     include/linux/pwm.h
 F:     include/linux/pwm_backlight.h
-K:     pwm_(config|apply_state|ops)
+K:     pwm_(config|apply_might_sleep|apply_atomic|ops)
 
 PXA GPIO DRIVER
 M:     Robert Jarzmik <robert.jarzmik@free.fr>
@@ -17976,7 +18085,6 @@ F:      drivers/net/ethernet/qualcomm/emac/
 
 QUALCOMM ETHQOS ETHERNET DRIVER
 M:     Vinod Koul <vkoul@kernel.org>
-R:     Bhupesh Sharma <bhupesh.sharma@linaro.org>
 L:     netdev@vger.kernel.org
 L:     linux-arm-msm@vger.kernel.org
 S:     Maintained
@@ -18198,6 +18306,7 @@ F:      drivers/media/cec/usb/rainshadow/
 
 RALINK MIPS ARCHITECTURE
 M:     John Crispin <john@phrozen.org>
+M:     Sergio Paracuellos <sergio.paracuellos@gmail.com>
 L:     linux-mips@vger.kernel.org
 S:     Maintained
 F:     arch/mips/ralink
@@ -18322,7 +18431,7 @@ S:      Supported
 F:     drivers/infiniband/sw/rdmavt
 
 RDS - RELIABLE DATAGRAM SOCKETS
-M:     Santosh Shilimkar <santosh.shilimkar@oracle.com>
+M:     Allison Henderson <allison.henderson@oracle.com>
 L:     netdev@vger.kernel.org
 L:     linux-rdma@vger.kernel.org
 L:     rds-devel@oss.oracle.com (moderated for non-subscribers)
@@ -18364,7 +18473,6 @@ X:      include/linux/srcu*.h
 X:     kernel/rcu/srcu*.c
 
 REAL TIME CLOCK (RTC) SUBSYSTEM
-M:     Alessandro Zummo <a.zummo@towertech.it>
 M:     Alexandre Belloni <alexandre.belloni@bootlin.com>
 L:     linux-rtc@vger.kernel.org
 S:     Maintained
@@ -19052,7 +19160,7 @@ F:      drivers/iommu/s390-iommu.c
 
 S390 IUCV NETWORK LAYER
 M:     Alexandra Winter <wintera@linux.ibm.com>
-M:     Wenjia Zhang <wenjia@linux.ibm.com>
+M:     Thorsten Winkler <twinkler@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 L:     netdev@vger.kernel.org
 S:     Supported
@@ -19071,7 +19179,7 @@ F:      arch/s390/mm
 
 S390 NETWORK DRIVERS
 M:     Alexandra Winter <wintera@linux.ibm.com>
-M:     Wenjia Zhang <wenjia@linux.ibm.com>
+M:     Thorsten Winkler <twinkler@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 L:     netdev@vger.kernel.org
 S:     Supported
@@ -19289,7 +19397,6 @@ R:      Alim Akhtar <alim.akhtar@samsung.com>
 L:     linux-samsung-soc@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/krzk/linux.git
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/snawrocki/clk.git
 F:     Documentation/devicetree/bindings/clock/samsung,*.yaml
 F:     Documentation/devicetree/bindings/clock/samsung,s3c*
 F:     drivers/clk/samsung/
@@ -19739,6 +19846,19 @@ T:     git git://linuxtv.org/media_tree.git
 F:     drivers/media/i2c/rj54n1cb0c.c
 F:     include/media/i2c/rj54n1cb0c.h
 
+SHRINKER
+M:     Andrew Morton <akpm@linux-foundation.org>
+M:     Dave Chinner <david@fromorbit.com>
+R:     Qi Zheng <zhengqi.arch@bytedance.com>
+R:     Roman Gushchin <roman.gushchin@linux.dev>
+R:     Muchun Song <muchun.song@linux.dev>
+L:     linux-mm@kvack.org
+S:     Maintained
+F:     Documentation/admin-guide/mm/shrinker_debugfs.rst
+F:     include/linux/shrinker.h
+F:     mm/shrinker.c
+F:     mm/shrinker_debug.c
+
 SH_VOU V4L2 OUTPUT DRIVER
 L:     linux-media@vger.kernel.org
 S:     Orphan
@@ -20432,6 +20552,7 @@ F:      Documentation/translations/sp_SP/
 
 SPARC + UltraSPARC (sparc/sparc64)
 M:     "David S. Miller" <davem@davemloft.net>
+M:     Andreas Larsson <andreas@gaisler.com>
 L:     sparclinux@vger.kernel.org
 S:     Maintained
 Q:     http://patchwork.ozlabs.org/project/sparclinux/list/
@@ -22918,6 +23039,7 @@ M:      Alex Williamson <alex.williamson@redhat.com>
 L:     kvm@vger.kernel.org
 S:     Maintained
 T:     git https://github.com/awilliam/linux-vfio.git
+F:     Documentation/ABI/testing/debugfs-vfio
 F:     Documentation/ABI/testing/sysfs-devices-vfio-dev
 F:     Documentation/driver-api/vfio.rst
 F:     drivers/vfio/
@@ -22953,6 +23075,13 @@ L:     kvm@vger.kernel.org
 S:     Maintained
 F:     drivers/vfio/pci/mlx5/
 
+VFIO VIRTIO PCI DRIVER
+M:     Yishai Hadas <yishaih@nvidia.com>
+L:     kvm@vger.kernel.org
+L:     virtualization@lists.linux-foundation.org
+S:     Maintained
+F:     drivers/vfio/pci/virtio
+
 VFIO PCI DEVICE SPECIFIC DRIVERS
 R:     Jason Gunthorpe <jgg@nvidia.com>
 R:     Yishai Hadas <yishaih@nvidia.com>
@@ -23101,7 +23230,6 @@ M:      Amit Shah <amit@kernel.org>
 L:     virtualization@lists.linux.dev
 S:     Maintained
 F:     drivers/char/virtio_console.c
-F:     include/linux/virtio_console.h
 F:     include/uapi/linux/virtio_console.h
 
 VIRTIO CORE AND NET DRIVERS
@@ -23277,6 +23405,12 @@ S:     Maintained
 F:     drivers/input/serio/userio.c
 F:     include/uapi/linux/userio.h
 
+VISHAY VEML6075 UVA AND UVB LIGHT SENSOR DRIVER
+M:     Javier Carrasco <javier.carrasco.cruz@gmail.com>
+S:     Maintained
+F:     Documentation/devicetree/bindings/iio/light/vishay,veml6075.yaml
+F:     drivers/iio/light/veml6075.c
+
 VISL VIRTUAL STATELESS DECODER DRIVER
 M:     Daniel Almeida <daniel.almeida@collabora.com>
 L:     linux-media@vger.kernel.org
@@ -23317,11 +23451,8 @@ F:     include/linux/vmalloc.h
 F:     mm/vmalloc.c
 
 VME SUBSYSTEM
-M:     Martyn Welch <martyn@welchs.me.uk>
-M:     Manohar Vanga <manohar.vanga@gmail.com>
-M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 L:     linux-kernel@vger.kernel.org
-S:     Odd fixes
+S:     Orphan
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
 F:     Documentation/driver-api/vme.rst
 F:     drivers/staging/vme_user/
@@ -24212,13 +24343,6 @@ T:     git git://git.kernel.org/pub/scm/linux/kernel/git/dlemoal/zonefs.git
 F:     Documentation/filesystems/zonefs.rst
 F:     fs/zonefs/
 
-ZPOOL COMPRESSED PAGE STORAGE API
-M:     Dan Streetman <ddstreet@ieee.org>
-L:     linux-mm@kvack.org
-S:     Maintained
-F:     include/linux/zpool.h
-F:     mm/zpool.c
-
 ZR36067 VIDEO FOR LINUX DRIVER
 M:     Corentin Labbe <clabbe@baylibre.com>
 L:     mjpeg-users@lists.sourceforge.net
@@ -24264,11 +24388,15 @@ N:    zstd
 K:     zstd
 
 ZSWAP COMPRESSED SWAP CACHING
-M:     Seth Jennings <sjenning@redhat.com>
-M:     Dan Streetman <ddstreet@ieee.org>
-M:     Vitaly Wool <vitaly.wool@konsulko.com>
+M:     Johannes Weiner <hannes@cmpxchg.org>
+M:     Yosry Ahmed <yosryahmed@google.com>
+M:     Nhat Pham <nphamcs@gmail.com>
 L:     linux-mm@kvack.org
 S:     Maintained
+F:     Documentation/admin-guide/mm/zswap.rst
+F:     include/linux/zpool.h
+F:     include/linux/zswap.h
+F:     mm/zpool.c
 F:     mm/zswap.c
 
 THE REST
index f1b2fd97727506a8a79d57fe10ff4603f3fd7db6..7e0b2ad98905b91223d13ba03890d7839976ae0c 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0
 VERSION = 6
-PATCHLEVEL = 7
+PATCHLEVEL = 8
 SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc4
 NAME = Hurr durr I'ma ninja sloth
 
 # *DOCUMENTATION*
@@ -155,6 +155,15 @@ endif
 
 export KBUILD_EXTMOD
 
+# backward compatibility
+KBUILD_EXTRA_WARN ?= $(KBUILD_ENABLE_EXTRA_GCC_CHECKS)
+
+ifeq ("$(origin W)", "command line")
+  KBUILD_EXTRA_WARN := $(W)
+endif
+
+export KBUILD_EXTRA_WARN
+
 # Kbuild will save output files in the current working directory.
 # This does not need to match to the root of the kernel source tree.
 #
@@ -181,14 +190,11 @@ ifeq ("$(origin O)", "command line")
 endif
 
 ifneq ($(KBUILD_OUTPUT),)
-# Make's built-in functions such as $(abspath ...), $(realpath ...) cannot
-# expand a shell special character '~'. We use a somewhat tedious way here.
-abs_objtree := $(shell mkdir -p $(KBUILD_OUTPUT) && cd $(KBUILD_OUTPUT) && pwd)
-$(if $(abs_objtree),, \
-     $(error failed to create output directory "$(KBUILD_OUTPUT)"))
-
+# $(realpath ...) gets empty if the path does not exist. Run 'mkdir -p' first.
+$(shell mkdir -p "$(KBUILD_OUTPUT)")
 # $(realpath ...) resolves symlinks
-abs_objtree := $(realpath $(abs_objtree))
+abs_objtree := $(realpath $(KBUILD_OUTPUT))
+$(if $(abs_objtree),,$(error failed to create output directory "$(KBUILD_OUTPUT)"))
 endif # ifneq ($(KBUILD_OUTPUT),)
 
 ifneq ($(words $(subst :, ,$(abs_srctree))), 1)
@@ -288,15 +294,15 @@ may-sync-config   := 1
 single-build   :=
 
 ifneq ($(filter $(no-dot-config-targets), $(MAKECMDGOALS)),)
-       ifeq ($(filter-out $(no-dot-config-targets), $(MAKECMDGOALS)),)
+        ifeq ($(filter-out $(no-dot-config-targets), $(MAKECMDGOALS)),)
                need-config :=
-       endif
+        endif
 endif
 
 ifneq ($(filter $(no-sync-config-targets), $(MAKECMDGOALS)),)
-       ifeq ($(filter-out $(no-sync-config-targets), $(MAKECMDGOALS)),)
+        ifeq ($(filter-out $(no-sync-config-targets), $(MAKECMDGOALS)),)
                may-sync-config :=
-       endif
+        endif
 endif
 
 need-compiler := $(may-sync-config)
@@ -317,9 +323,9 @@ endif
 # We cannot build single targets and the others at the same time
 ifneq ($(filter $(single-targets), $(MAKECMDGOALS)),)
        single-build := 1
-       ifneq ($(filter-out $(single-targets), $(MAKECMDGOALS)),)
+        ifneq ($(filter-out $(single-targets), $(MAKECMDGOALS)),)
                mixed-build := 1
-       endif
+        endif
 endif
 
 # For "make -j clean all", "make -j mrproper defconfig all", etc.
@@ -609,8 +615,6 @@ export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL KBUILD_RUSTFLAGS_KERNEL
 export RCS_FIND_IGNORE := \( -name SCCS -o -name BitKeeper -o -name .svn -o    \
                          -name CVS -o -name .pc -o -name .hg -o -name .git \) \
                          -prune -o
-export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
-                        --exclude CVS --exclude .pc --exclude .hg --exclude .git
 
 # ===========================================================================
 # Rules shared between *config targets and build targets
@@ -982,6 +986,10 @@ NOSTDINC_FLAGS += -nostdinc
 # perform bounds checking.
 KBUILD_CFLAGS += $(call cc-option, -fstrict-flex-arrays=3)
 
+#Currently, disable -Wstringop-overflow for GCC 11, globally.
+KBUILD_CFLAGS-$(CONFIG_CC_NO_STRINGOP_OVERFLOW) += $(call cc-option, -Wno-stringop-overflow)
+KBUILD_CFLAGS-$(CONFIG_CC_STRINGOP_OVERFLOW) += $(call cc-option, -Wstringop-overflow)
+
 # disable invalid "can't wrap" optimizations for signed / pointers
 KBUILD_CFLAGS  += -fno-strict-overflow
 
@@ -1658,10 +1666,11 @@ help:
        @echo  '                       (sparse by default)'
        @echo  '  make C=2   [targets] Force check of all c source with $$CHECK'
        @echo  '  make RECORDMCOUNT_WARN=1 [targets] Warn about ignored mcount sections'
-       @echo  '  make W=n   [targets] Enable extra build checks, n=1,2,3 where'
+       @echo  '  make W=n   [targets] Enable extra build checks, n=1,2,3,c,e where'
        @echo  '                1: warnings which may be relevant and do not occur too often'
        @echo  '                2: warnings which occur quite often but may still be relevant'
        @echo  '                3: more obscure warnings, can most likely be ignored'
+       @echo  '                c: extra checks in the configuration stage (Kconfig)'
        @echo  '                e: warnings are being treated as errors'
        @echo  '                Multiple levels can be combined with W=12 or W=123'
        @$(if $(dtstree), \
index 5ca66aad0d0812f831a717561777273be7db9a9a..a5af0edd3eb8f3b64e6e51bffb2ac491cb31bc26 100644 (file)
@@ -301,6 +301,11 @@ config ARCH_HAS_DMA_CLEAR_UNCACHED
 config ARCH_HAS_CPU_FINALIZE_INIT
        bool
 
+# The architecture has a per-task state that includes the mm's PASID
+config ARCH_HAS_CPU_PASID
+       bool
+       select IOMMU_MM_DATA
+
 config HAVE_ARCH_THREAD_STRUCT_WHITELIST
        bool
        help
@@ -668,6 +673,7 @@ config SHADOW_CALL_STACK
        bool "Shadow Call Stack"
        depends on ARCH_SUPPORTS_SHADOW_CALL_STACK
        depends on DYNAMIC_FTRACE_WITH_ARGS || DYNAMIC_FTRACE_WITH_REGS || !FUNCTION_GRAPH_TRACER
+       depends on MMU
        help
          This option enables the compiler's Shadow Call Stack, which
          uses a shadow stack to protect function return addresses from
index fb3025396ac96477c497e966053a366d9e05200c..cfdf90bc8b3f862659726f09d167b301922c75bd 100644 (file)
@@ -80,7 +80,7 @@ init_rtc_epoch(void)
 static int
 alpha_rtc_read_time(struct device *dev, struct rtc_time *tm)
 {
-       int ret = mc146818_get_time(tm);
+       int ret = mc146818_get_time(tm, 10);
 
        if (ret < 0) {
                dev_err_ratelimited(dev, "unable to read current time\n");
index d6139dbae4ac9ff668da6c95d02abba01e87282b..feaf89f6936bcd1a82b9fe9ae12ef06daa8fcb64 100644 (file)
@@ -53,7 +53,7 @@ srmcons_do_receive_chars(struct tty_port *port)
        do {
                result.as_long = callback_getc(0);
                if (result.bits.status < 2) {
-                       tty_insert_flip_char(port, (char)result.bits.c, 0);
+                       tty_insert_flip_char(port, (u8)result.bits.c, 0);
                        count++;
                }
        } while((result.bits.status & 1) && (++loops < 10));
@@ -88,30 +88,27 @@ srmcons_receive_chars(struct timer_list *t)
 }
 
 /* called with callback_lock held */
-static int
-srmcons_do_write(struct tty_port *port, const char *buf, int count)
+static void
+srmcons_do_write(struct tty_port *port, const u8 *buf, size_t count)
 {
-       static char str_cr[1] = "\r";
-       long c, remaining = count;
+       size_t c;
        srmcons_result result;
-       char *cur;
-       int need_cr;
 
-       for (cur = (char *)buf; remaining > 0; ) {
-               need_cr = 0;
+       while (count > 0) {
+               bool need_cr = false;
                /* 
                 * Break it up into reasonable size chunks to allow a chance
                 * for input to get in
                 */
-               for (c = 0; c < min_t(long, 128L, remaining) && !need_cr; c++)
-                       if (cur[c] == '\n')
-                               need_cr = 1;
+               for (c = 0; c < min_t(size_t, 128U, count) && !need_cr; c++)
+                       if (buf[c] == '\n')
+                               need_cr = true;
                
                while (c > 0) {
-                       result.as_long = callback_puts(0, cur, c);
+                       result.as_long = callback_puts(0, buf, c);
                        c -= result.bits.c;
-                       remaining -= result.bits.c;
-                       cur += result.bits.c;
+                       count -= result.bits.c;
+                       buf += result.bits.c;
 
                        /*
                         * Check for pending input iff a tty port was provided
@@ -121,12 +118,11 @@ srmcons_do_write(struct tty_port *port, const char *buf, int count)
                }
 
                while (need_cr) {
-                       result.as_long = callback_puts(0, str_cr, 1);
+                       result.as_long = callback_puts(0, "\r", 1);
                        if (result.bits.c > 0)
-                               need_cr = 0;
+                               need_cr = false;
                }
        }
-       return count;
 }
 
 static ssize_t
@@ -135,7 +131,7 @@ srmcons_write(struct tty_struct *tty, const u8 *buf, size_t count)
        unsigned long flags;
 
        spin_lock_irqsave(&srmcons_callback_lock, flags);
-       srmcons_do_write(tty->port, (const char *) buf, count);
+       srmcons_do_write(tty->port, buf, count);
        spin_unlock_irqrestore(&srmcons_callback_lock, flags);
 
        return count;
index 563af3e75f01f2acb576110ef3210922a8dfb672..329c94cd45d8f68b3fa7866f92a6367a59f23530 100644 (file)
@@ -40,6 +40,7 @@ void dma_cache_wback(phys_addr_t start, unsigned long sz);
 
 /* TBD: optimize this */
 #define flush_cache_vmap(start, end)           flush_cache_all()
+#define flush_cache_vmap_early(start, end)     do { } while (0)
 #define flush_cache_vunmap(start, end)         flush_cache_all()
 
 #define flush_cache_dup_mm(mm)                 /* called on fork (VIVT only) */
index 9d96180797396bba26ace54f047f7a47bf82dd5f..a339223d9e052b35ea678d6a3e60faf6e5673671 100644 (file)
@@ -31,7 +31,7 @@
 static __always_inline bool arch_static_branch(struct static_key *key,
                                               bool branch)
 {
-       asm_volatile_goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)"   \n"
+       asm goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)"            \n"
                 "1:                                                    \n"
                 "nop                                                   \n"
                 ".pushsection __jump_table, \"aw\"                     \n"
@@ -47,7 +47,7 @@ l_yes:
 static __always_inline bool arch_static_branch_jump(struct static_key *key,
                                                    bool branch)
 {
-       asm_volatile_goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)"   \n"
+       asm goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)"            \n"
                 "1:                                                    \n"
                 "b %l[l_yes]                                           \n"
                 ".pushsection __jump_table, \"aw\"                     \n"
index 2a7fbbb83b7056e976fa446995d5bf33ecbf4764..197707bc7658898843d78bee7c0f02a10f7d26b9 100644 (file)
@@ -91,7 +91,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
  * Plug in direct dma map ops.
  */
 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
-                       const struct iommu_ops *iommu, bool coherent)
+                       bool coherent)
 {
        /*
         * IOC hardware snoops all DMA traffic keeping the caches consistent
index b67d05468251733273bb8865bf9f58bc99e10662..0af6709570d147f3cb914454ec7a9364a621ea9b 100644 (file)
@@ -35,6 +35,7 @@ config ARM
        select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT if CPU_V7
        select ARCH_SUPPORTS_ATOMIC_RMW
        select ARCH_SUPPORTS_HUGETLBFS if ARM_LPAE
+       select ARCH_SUPPORTS_PER_VMA_LOCK
        select ARCH_USE_BUILTIN_BSWAP
        select ARCH_USE_CMPXCHG_LOCKREF
        select ARCH_USE_MEMTEST
index e899de681f4752d4077b55a0cd4f8858c6e23df0..5be0e8fd2633c20e2d87abc843b53fca437942be 100644 (file)
@@ -45,8 +45,8 @@
                num-chipselects = <1>;
                cs-gpios = <&gpio0 ASPEED_GPIO(Z, 0) GPIO_ACTIVE_LOW>;
 
-               tpmdev@0 {
-                       compatible = "tcg,tpm_tis-spi";
+               tpm@0 {
+                       compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
                        spi-max-frequency = <33000000>;
                        reg = <0>;
                };
index a677c827e758fe2042fcf14a192832668e3ffbd0..5a8169bbda8792c76c1da960508c8a0c6bdd4b86 100644 (file)
@@ -80,8 +80,8 @@
                gpio-miso = <&gpio ASPEED_GPIO(R, 5) GPIO_ACTIVE_HIGH>;
                num-chipselects = <1>;
 
-               tpmdev@0 {
-                       compatible = "tcg,tpm_tis-spi";
+               tpm@0 {
+                       compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
                        spi-max-frequency = <33000000>;
                        reg = <0>;
                };
index 3f6010ef2b86f264fe88935a737b3ce9c60d762b..213023bc5aec4144751c9e7bc8e3e05c156386c8 100644 (file)
        status = "okay";
 
        tpm: tpm@2e {
-               compatible = "tcg,tpm-tis-i2c";
+               compatible = "nuvoton,npct75x", "tcg,tpm-tis-i2c";
                reg = <0x2e>;
        };
 };
index 31590d3186a2e099e44c663c46a87975b60aae27..00e5887c926f181d57bebe6b0b781ad2f2e8a514 100644 (file)
@@ -35,8 +35,8 @@
                gpio-mosi = <&gpio0 ASPEED_GPIO(X, 4) GPIO_ACTIVE_HIGH>;
                gpio-miso = <&gpio0 ASPEED_GPIO(X, 5) GPIO_ACTIVE_HIGH>;
 
-               tpmdev@0 {
-                       compatible = "tcg,tpm_tis-spi";
+               tpm@0 {
+                       compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
                        spi-max-frequency = <33000000>;
                        reg = <0>;
                };
index 98817a6675b9dda10fa138a054d73cc78539d9df..d233a191c139362d36e581ed7b9222b0ebee3189 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 #include "bcm2835-rpi.dtsi"
 
+#include <dt-bindings/power/raspberrypi-power.h>
 #include <dt-bindings/reset/raspberrypi,firmware-reset.h>
 
 / {
@@ -76,3 +77,7 @@
 &vchiq {
        interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
 };
+
+&xhci {
+       power-domains = <&power RPI_POWER_DOMAIN_USB>;
+};
index 4a379a14966d8da3252bb5726d876a8828a04688..22c7f1561344ed57978b4d91d6cdff6a6a84e9ff 100644 (file)
                        };
                };
 
+               xhci: usb@7e9c0000 {
+                       compatible = "brcm,bcm2711-xhci", "brcm,xhci-brcm-v2";
+                       reg = <0x0 0x7e9c0000 0x100000>;
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>;
+                       /* DWC2 and this IP block share the same USB PHY,
+                        * enabling both at the same time results in lockups.
+                        * So keep this node disabled and let the bootloader
+                        * decide which interface should be enabled.
+                        */
+                       status = "disabled";
+               };
+
                v3d: gpu@7ec00000 {
                        compatible = "brcm,2711-v3d";
                        reg = <0x0 0x7ec00000 0x4000>,
index 44cc4ff1d0df358ab66bb036d127175da1be74b6..d12fb44aeb140cfacf05a5b257d2106c79392279 100644 (file)
        tpm_tis: tpm@1 {
                pinctrl-names = "default";
                pinctrl-0 = <&pinctrl_tpm>;
-               compatible = "tcg,tpm_tis-spi";
+               compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
                reg = <1>;
                spi-max-frequency = <20000000>;
                interrupt-parent = <&gpio5>;
index 3a723843d5626f6cc4b9ee2750968c01e46306db..9984b343cdf0cad1abd9e0d4d142ded838c47980 100644 (file)
         * TCG specification - Section 6.4.1 Clocking:
         * TPM shall support a SPI clock frequency range of 10-24 MHz.
         */
-       st33htph: tpm-tis@0 {
+       st33htph: tpm@0 {
                compatible = "st,st33htpm-spi", "tcg,tpm_tis-spi";
                reg = <0>;
                spi-max-frequency = <24000000>;
index d7954ff466b491b32acf6962ab5d64f4843f8157..e5254e32aa8fc326dfcabce33705a9b25e272052 100644 (file)
 };
 
 &fimd {
+       samsung,invert-vclk;
        status = "okay";
 };
 
index b8730aa52ce6fe521a1b531be42c4ef891c969b5..a59331aa58e55e3ef514fc06b5a36472c901dcd3 100644 (file)
        pinctrl-names = "default";
        pinctrl-0 = <&spi1_pins>;
 
-       tpm_spi_tis@0 {
+       tpm@0 {
                compatible = "tcg,tpm_tis-spi";
                reg = <0>;
                spi-max-frequency = <500000>;
index 70480dd9e96db2e325ffca406e05a22d9841ccc2..6d0c9f7268bad238688a2c80dbc6f105f8663561 100644 (file)
@@ -68,6 +68,8 @@ struct locomo {
 #endif
 };
 
+static const struct bus_type locomo_bus_type;
+
 struct locomo_dev_info {
        unsigned long   offset;
        unsigned long   length;
@@ -842,7 +844,7 @@ static void locomo_bus_remove(struct device *dev)
                drv->remove(ldev);
 }
 
-struct bus_type locomo_bus_type = {
+static const struct bus_type locomo_bus_type = {
        .name           = "locomo-bus",
        .match          = locomo_match,
        .probe          = locomo_bus_probe,
index feb38a94c1a70a49c3d3d997957d493585bff54f..43bc1255a5db9f4ed8f65a98a69284e1439e44d2 100644 (file)
@@ -138,7 +138,8 @@ CONFIG_PWM_MXS=y
 CONFIG_NVMEM_MXS_OCOTP=y
 CONFIG_EXT4_FS=y
 # CONFIG_DNOTIFY is not set
-CONFIG_FSCACHE=m
+CONFIG_NETFS_SUPPORT=m
+CONFIG_FSCACHE=y
 CONFIG_FSCACHE_STATS=y
 CONFIG_CACHEFILES=m
 CONFIG_VFAT_FS=y
index f6181f69577fe538dc4a13a85cc479784ad9ebef..1075534b0a2eeba73be7d0e83b9d0c995e1f8cf9 100644 (file)
@@ -340,6 +340,8 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end)
                dsb(ishst);
 }
 
+#define flush_cache_vmap_early(start, end)     do { } while (0)
+
 static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
 {
        if (!cache_is_vipt_nonaliasing())
index aaaedafef7cce6ce0caefc726cf229aa510e15c2..9fd9ad5d9202070ad6760d1aeb633cca4835734d 100644 (file)
 #define LOCOMO_LPT_TOH(TOH)    ((TOH & 0x7) << 4)
 #define LOCOMO_LPT_TOL(TOL)    ((TOL & 0x7))
 
-extern struct bus_type locomo_bus_type;
-
 #define LOCOMO_DEVID_KEYBOARD  0
 #define LOCOMO_DEVID_FRONTLIGHT        1
 #define LOCOMO_DEVID_BACKLIGHT 2
index e12d7d096fc034058bfaa094bf9b314a2a7a983d..e4eb54f6cd9fef41fecad56e25c4136e75455756 100644 (file)
@@ -11,7 +11,7 @@
 
 static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
-       asm_volatile_goto("1:\n\t"
+       asm goto("1:\n\t"
                 WASM(nop) "\n\t"
                 ".pushsection __jump_table,  \"aw\"\n\t"
                 ".word 1b, %l[l_yes], %c0\n\t"
@@ -25,7 +25,7 @@ l_yes:
 
 static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
 {
-       asm_volatile_goto("1:\n\t"
+       asm goto("1:\n\t"
                 WASM(b) " %l[l_yes]\n\t"
                 ".pushsection __jump_table,  \"aw\"\n\t"
                 ".word 1b, %l[l_yes], %c0\n\t"
index 16b02f44c7d3126a925ee41db80697814cca988c..d657b84b6bf706a701d3e93a51a18e21755d0bde 100644 (file)
@@ -151,6 +151,8 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 
 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 
+#define pgdp_get(pgpd)         READ_ONCE(*pgdp)
+
 #define pud_page(pud)          pmd_page(__pmd(pud_val(pud)))
 #define pud_write(pud)         pmd_write(__pmd(pud_val(pud)))
 
index 59de137c6f53ba8aca6d34a3bf2767c2f03948e4..2a8a9fe46586d2c334827ad436ec3ea201877760 100644 (file)
@@ -11,6 +11,7 @@ menuconfig ARCH_DAVINCI
        select PM_GENERIC_DOMAINS_OF if PM && OF
        select REGMAP_MMIO
        select RESET_CONTROLLER
+       select PINCTRL
        select PINCTRL_SINGLE
 
 if ARCH_DAVINCI
index cfd9c933d2f09c6a80d86c9b50959728498cb37a..b94850b579952aefacbd1710bc3c317b4c4b77c9 100644 (file)
@@ -34,7 +34,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
 }
 
 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
-                       const struct iommu_ops *iommu, bool coherent)
+                       bool coherent)
 {
        if (IS_ENABLED(CONFIG_CPU_V7M)) {
                /*
index 5409225b4abc06b425c1483f53e087318c22a080..f68db05eba29fdaebb7d7cb8d9ec071e4f5f2910 100644 (file)
@@ -859,10 +859,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
        int i = 0;
        int order_idx = 0;
 
-       if (array_size <= PAGE_SIZE)
-               pages = kzalloc(array_size, GFP_KERNEL);
-       else
-               pages = vzalloc(array_size);
+       pages = kvzalloc(array_size, GFP_KERNEL);
        if (!pages)
                return NULL;
 
@@ -1713,7 +1710,7 @@ void arm_iommu_detach_device(struct device *dev)
 EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
 
 static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
-                                   const struct iommu_ops *iommu, bool coherent)
+                                   bool coherent)
 {
        struct dma_iommu_mapping *mapping;
 
@@ -1748,7 +1745,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev)
 #else
 
 static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
-                                   const struct iommu_ops *iommu, bool coherent)
+                                   bool coherent)
 {
 }
 
@@ -1757,7 +1754,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev) { }
 #endif /* CONFIG_ARM_DMA_USE_IOMMU */
 
 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
-                       const struct iommu_ops *iommu, bool coherent)
+                       bool coherent)
 {
        /*
         * Due to legacy code that sets the ->dma_coherent flag from a bus
@@ -1776,8 +1773,8 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
        if (dev->dma_ops)
                return;
 
-       if (iommu)
-               arm_setup_iommu_dma_ops(dev, dma_base, size, iommu, coherent);
+       if (device_iommu_mapped(dev))
+               arm_setup_iommu_dma_ops(dev, dma_base, size, coherent);
 
        xen_setup_dma_ops(dev);
        dev->archdata.dma_ops_setup = true;
index fef62e4a9edde6ce1343c8a81e4e65b883e11e83..07565b593ed681b0f1675f8ef7a934c1ae53dc51 100644 (file)
@@ -278,6 +278,37 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
 
+       if (!(flags & FAULT_FLAG_USER))
+               goto lock_mmap;
+
+       vma = lock_vma_under_rcu(mm, addr);
+       if (!vma)
+               goto lock_mmap;
+
+       if (!(vma->vm_flags & vm_flags)) {
+               vma_end_read(vma);
+               goto lock_mmap;
+       }
+       fault = handle_mm_fault(vma, addr, flags | FAULT_FLAG_VMA_LOCK, regs);
+       if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
+               vma_end_read(vma);
+
+       if (!(fault & VM_FAULT_RETRY)) {
+               count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
+               goto done;
+       }
+       count_vm_vma_lock_event(VMA_LOCK_RETRY);
+       if (fault & VM_FAULT_MAJOR)
+               flags |= FAULT_FLAG_TRIED;
+
+       /* Quick path to respond to signals */
+       if (fault_signal_pending(fault, regs)) {
+               if (!user_mode(regs))
+                       goto no_context;
+               return 0;
+       }
+lock_mmap:
+
 retry:
        vma = lock_mm_and_find_vma(mm, addr, regs);
        if (unlikely(!vma)) {
@@ -316,6 +347,7 @@ retry:
        }
 
        mmap_read_unlock(mm);
+done:
 
        /*
         * Handle the "normal" case first - VM_FAULT_MAJOR
index 24d71b5db62de43bac5639a325b26fbba16f3f33..111d4f703136e4b758dc6fe027ffd7780ea3714d 100644 (file)
@@ -28,6 +28,12 @@ static pgd_t tmp_pgd_table[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
 
 pmd_t tmp_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
 
+static __init void *kasan_alloc_block_raw(size_t size)
+{
+       return memblock_alloc_try_nid_raw(size, size, __pa(MAX_DMA_ADDRESS),
+                                     MEMBLOCK_ALLOC_NOLEAKTRACE, NUMA_NO_NODE);
+}
+
 static __init void *kasan_alloc_block(size_t size)
 {
        return memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS),
@@ -50,7 +56,7 @@ static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
                        if (!pte_none(READ_ONCE(*ptep)))
                                continue;
 
-                       p = kasan_alloc_block(PAGE_SIZE);
+                       p = kasan_alloc_block_raw(PAGE_SIZE);
                        if (!p) {
                                panic("%s failed to allocate shadow page for address 0x%lx\n",
                                      __func__, addr);
index 7e8773a2d99d086383fa334b6a10dd9b3a26b011..b68efe643a12ca3242e5f4c5b68acb187f9e0382 100644 (file)
@@ -800,6 +800,24 @@ static struct undef_hook neon_support_hook[] = {{
        .cpsr_mask      = PSR_T_BIT,
        .cpsr_val       = PSR_T_BIT,
        .fn             = vfp_support_entry,
+}, {
+       .instr_mask     = 0xff000800,
+       .instr_val      = 0xfc000800,
+       .cpsr_mask      = 0,
+       .cpsr_val       = 0,
+       .fn             = vfp_support_entry,
+}, {
+       .instr_mask     = 0xff000800,
+       .instr_val      = 0xfd000800,
+       .cpsr_mask      = 0,
+       .cpsr_val       = 0,
+       .fn             = vfp_support_entry,
+}, {
+       .instr_mask     = 0xff000800,
+       .instr_val      = 0xfe000800,
+       .cpsr_mask      = 0,
+       .cpsr_val       = 0,
+       .fn             = vfp_support_entry,
 }};
 
 static struct undef_hook vfp_support_hook = {
index 8f6cf1221b6a175cc249d46f809e156a3b868028..aa7c1d435139684d7b56f96f3f93945d331d64d6 100644 (file)
@@ -133,6 +133,7 @@ config ARM64
        select GENERIC_ARCH_TOPOLOGY
        select GENERIC_CLOCKEVENTS_BROADCAST
        select GENERIC_CPU_AUTOPROBE
+       select GENERIC_CPU_DEVICES
        select GENERIC_CPU_VULNERABILITIES
        select GENERIC_EARLY_IOREMAP
        select GENERIC_IDLE_POLL_SETUP
@@ -1038,8 +1039,12 @@ config ARM64_ERRATUM_2645198
 
          If unsure, say Y.
 
+config ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
+       bool
+
 config ARM64_ERRATUM_2966298
        bool "Cortex-A520: 2966298: workaround for speculatively executed unprivileged load"
+       select ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
        default y
        help
          This option adds the workaround for ARM Cortex-A520 erratum 2966298.
@@ -1051,6 +1056,20 @@ config ARM64_ERRATUM_2966298
 
          If unsure, say Y.
 
+config ARM64_ERRATUM_3117295
+       bool "Cortex-A510: 3117295: workaround for speculatively executed unprivileged load"
+       select ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
+       default y
+       help
+         This option adds the workaround for ARM Cortex-A510 erratum 3117295.
+
+         On an affected Cortex-A510 core, a speculatively executed unprivileged
+         load might leak data from a privileged level via a cache side channel.
+
+         Work around this problem by executing a TLBI before returning to EL0.
+
+         If unsure, say Y.
+
 config CAVIUM_ERRATUM_22375
        bool "Cavium erratum 22375, 24313"
        default y
index 47ecc4cff9d25b7752c94df9ab574ec52cbabd28..a88cdf91068713ebefc031f438b3b22a0247f943 100644 (file)
@@ -195,7 +195,7 @@ vdso_prepare: prepare0
        include/generated/vdso-offsets.h arch/arm64/kernel/vdso/vdso.so
 ifdef CONFIG_COMPAT_VDSO
        $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 \
-       include/generated/vdso32-offsets.h arch/arm64/kernel/vdso32/vdso.so
+       arch/arm64/kernel/vdso32/vdso.so
 endif
 endif
 
index 9747cb3fa03ac5c141b9bf660da3531ca2082def..d838e3a7af6e5ddda3751cc6f0bf4c73bccacc03 100644 (file)
                        #clock-cells = <1>;
                        clocks = <&cmu_top CLK_DOUT_CMU_MISC_BUS>,
                                 <&cmu_top CLK_DOUT_CMU_MISC_SSS>;
-                       clock-names = "dout_cmu_misc_bus", "dout_cmu_misc_sss";
+                       clock-names = "bus", "sss";
                };
 
                watchdog_cl0: watchdog@10060000 {
index 968f475b9a96c3c7334d670fd004ddcde08eed6f..27a902569e2a28434af3b6b15dcdb3a43f7a9606 100644 (file)
        };
 
        tpm: tpm@1 {
-               compatible = "tcg,tpm_tis-spi";
+               compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
                interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
                interrupt-parent = <&gpio2>;
                pinctrl-names = "default";
index 3f3f2a2c89cd504f22548178b0d718ed61d122fa..752caa38eb03bfd6831e61f857b517beb5bfe1a1 100644 (file)
@@ -89,7 +89,7 @@
        status = "okay";
 
        tpm@1 {
-               compatible = "tcg,tpm_tis-spi";
+               compatible = "atmel,attpm20p", "tcg,tpm_tis-spi";
                reg = <0x1>;
                spi-max-frequency = <36000000>;
        };
index 06fed93769966367b02c0a3d5f44f8264c080617..2aa6c1090fc7d7b81f7774354286c13a5463c06b 100644 (file)
        status = "okay";
 
        tpm@1 {
-               compatible = "tcg,tpm_tis-spi";
+               compatible = "atmel,attpm20p", "tcg,tpm_tis-spi";
                reg = <0x1>;
                spi-max-frequency = <36000000>;
        };
index feae77e038354c687d69904fdb5b577f32cfe26d..a08057410bdef5b3a2572cb5c5e2fe6ea35b5522 100644 (file)
        status = "okay";
 
        tpm: tpm@0 {
-               compatible = "infineon,slb9670";
+               compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
                reg = <0>;
                pinctrl-names = "default";
                pinctrl-0 = <&pinctrl_tpm>;
index cc9d468b43ab8d27be45ad5ed7bad945806ab64b..f87fa5a948ccc380c473778e9f0b61c68a0b7e7c 100644 (file)
                };
 
        };
+
+       reserved-memory {
+               #address-cells = <2>;
+               #size-cells = <2>;
+               ranges;
+
+               dsp_vdev0vring0: vdev0vring0@942f0000 {
+                       reg = <0 0x942f0000 0 0x8000>;
+                       no-map;
+               };
+
+               dsp_vdev0vring1: vdev0vring1@942f8000 {
+                       reg = <0 0x942f8000 0 0x8000>;
+                       no-map;
+               };
+
+               dsp_vdev0buffer: vdev0buffer@94300000 {
+                       compatible = "shared-dma-pool";
+                       reg = <0 0x94300000 0 0x100000>;
+                       no-map;
+               };
+       };
 };
 
 &flexspi {
index c24587c895e1f9734da4c4f4cf7becb697825f59..41c79d2ebdd6201dc10278204c064a4c01c71709 100644 (file)
        status = "okay";
 
        tpm@1 {
-               compatible = "tcg,tpm_tis-spi";
+               compatible = "atmel,attpm20p", "tcg,tpm_tis-spi";
                reg = <0x1>;
                spi-max-frequency = <36000000>;
        };
index 628ffba69862ad51f2072e88fc812b3a84e1b71c..d5c400b355af564123497cd1805e0b0ad56ded21 100644 (file)
        status = "okay";
 
        tpm@1 {
-               compatible = "tcg,tpm_tis-spi";
+               compatible = "atmel,attpm20p", "tcg,tpm_tis-spi";
                reg = <0x1>;
                spi-max-frequency = <36000000>;
        };
index 9caf7ca25444600a4a7979b3749d5175e32b0bbe..cae586cd45bdd59aa479e70bb290fc50b0392a3c 100644 (file)
        status = "okay";
 
        tpm@0 {
-               compatible = "tcg,tpm_tis-spi";
+               compatible = "atmel,attpm20p", "tcg,tpm_tis-spi";
                reg = <0x0>;
                spi-max-frequency = <36000000>;
        };
index 6376417e918c2083bb67c2f978d53602153d3cb9..d8cf1f27c3ec8a33b7ad527c1fc2b489747a2d84 100644 (file)
@@ -65,7 +65,7 @@
        status = "okay";
 
        tpm@0 {
-               compatible = "infineon,slb9670";
+               compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
                reg = <0>;
                spi-max-frequency = <43000000>;
        };
index 5506de83f61d423634511fba3f783f67a8987792..1b3396b1cee394659d0a77c104f05e1e7762569f 100644 (file)
        status = "okay";
        cs-gpios = <&pio 86 GPIO_ACTIVE_LOW>;
 
-       cr50@0 {
+       tpm@0 {
                compatible = "google,cr50";
                reg = <0>;
                spi-max-frequency = <1000000>;
index f2281250ac35da2514d73191cbcdb2e195afcbcb..d87aab8d7a79ed4ac8365b951f16c370b2efcc91 100644 (file)
        pinctrl-names = "default";
        pinctrl-0 = <&spi5_pins>;
 
-       cr50@0 {
+       tpm@0 {
                compatible = "google,cr50";
                reg = <0>;
                interrupts-extended = <&pio 171 IRQ_TYPE_EDGE_RISING>;
index bbdcd441c049d149f44534c4ee3ffd553d5d4646..3c6079edda190d3606ff5f1f36bc3ad10ff99019 100644 (file)
 &xhci0 {
        status = "okay";
 
+       rx-fifo-depth = <3072>;
        vusb33-supply = <&mt6359_vusb_ldo_reg>;
        vbus-supply = <&usb_vbus>;
 };
 &xhci1 {
        status = "okay";
 
+       rx-fifo-depth = <3072>;
        vusb33-supply = <&mt6359_vusb_ldo_reg>;
        vbus-supply = <&usb_vbus>;
 };
index 7f8327b0dbdb41d0a8baf3a34b0c6d0c97a60b8e..e423c57ddd41eceefaea483aaa343aa9b9bd1ee7 100644 (file)
                        compatible = "qcom,msm8916-bimc";
                        reg = <0x00400000 0x62000>;
                        #interconnect-cells = <1>;
-                       clock-names = "bus", "bus_a";
-                       clocks = <&rpmcc RPM_SMD_BIMC_CLK>,
-                                <&rpmcc RPM_SMD_BIMC_A_CLK>;
                };
 
                tsens: thermal-sensor@4a9000 {
                        compatible = "qcom,msm8916-pcnoc";
                        reg = <0x00500000 0x11000>;
                        #interconnect-cells = <1>;
-                       clock-names = "bus", "bus_a";
-                       clocks = <&rpmcc RPM_SMD_PCNOC_CLK>,
-                                <&rpmcc RPM_SMD_PCNOC_A_CLK>;
                };
 
                snoc: interconnect@580000 {
                        compatible = "qcom,msm8916-snoc";
                        reg = <0x00580000 0x14000>;
                        #interconnect-cells = <1>;
-                       clock-names = "bus", "bus_a";
-                       clocks = <&rpmcc RPM_SMD_SNOC_CLK>,
-                                <&rpmcc RPM_SMD_SNOC_A_CLK>;
                };
 
                stm: stm@802000 {
index 29f6bd9df2eb18fb7ea5049a76e459f8af76bf2e..82d85ff61045d31c13b6b5874acd315399a8886e 100644 (file)
                bimc: interconnect@400000 {
                        compatible = "qcom,msm8939-bimc";
                        reg = <0x00400000 0x62000>;
-                       clock-names = "bus", "bus_a";
-                       clocks = <&rpmcc RPM_SMD_BIMC_CLK>,
-                                <&rpmcc RPM_SMD_BIMC_A_CLK>;
                        #interconnect-cells = <1>;
                };
 
                pcnoc: interconnect@500000 {
                        compatible = "qcom,msm8939-pcnoc";
                        reg = <0x00500000 0x11000>;
-                       clock-names = "bus", "bus_a";
-                       clocks = <&rpmcc RPM_SMD_PCNOC_CLK>,
-                                <&rpmcc RPM_SMD_PCNOC_A_CLK>;
                        #interconnect-cells = <1>;
                };
 
                snoc: interconnect@580000 {
                        compatible = "qcom,msm8939-snoc";
                        reg = <0x00580000 0x14080>;
-                       clock-names = "bus", "bus_a";
-                       clocks = <&rpmcc RPM_SMD_SNOC_CLK>,
-                                <&rpmcc RPM_SMD_SNOC_A_CLK>;
                        #interconnect-cells = <1>;
 
                        snoc_mm: interconnect-snoc {
                                compatible = "qcom,msm8939-snoc-mm";
-                               clock-names = "bus", "bus_a";
-                               clocks = <&rpmcc RPM_SMD_SYSMMNOC_CLK>,
-                                        <&rpmcc RPM_SMD_SYSMMNOC_A_CLK>;
                                #interconnect-cells = <1>;
                        };
                };
index 8c6a7efa90c43efb17bc5725a4d7e1156078761a..8d41ed261adfbfc99e15c07755f54d8f4cf5cc80 100644 (file)
                        compatible = "qcom,msm8996-bimc";
                        reg = <0x00408000 0x5a000>;
                        #interconnect-cells = <1>;
-                       clock-names = "bus", "bus_a";
-                       clocks = <&rpmcc RPM_SMD_BIMC_CLK>,
-                                <&rpmcc RPM_SMD_BIMC_A_CLK>;
                };
 
                tsens0: thermal-sensor@4a9000 {
                        compatible = "qcom,msm8996-cnoc";
                        reg = <0x00500000 0x1000>;
                        #interconnect-cells = <1>;
-                       clock-names = "bus", "bus_a";
-                       clocks = <&rpmcc RPM_SMD_CNOC_CLK>,
-                                <&rpmcc RPM_SMD_CNOC_A_CLK>;
                };
 
                snoc: interconnect@524000 {
                        compatible = "qcom,msm8996-snoc";
                        reg = <0x00524000 0x1c000>;
                        #interconnect-cells = <1>;
-                       clock-names = "bus", "bus_a";
-                       clocks = <&rpmcc RPM_SMD_SNOC_CLK>,
-                                <&rpmcc RPM_SMD_SNOC_A_CLK>;
                };
 
                a0noc: interconnect@543000 {
                        compatible = "qcom,msm8996-a1noc";
                        reg = <0x00562000 0x5000>;
                        #interconnect-cells = <1>;
-                       clock-names = "bus", "bus_a";
-                       clocks = <&rpmcc RPM_SMD_AGGR1_NOC_CLK>,
-                                <&rpmcc RPM_SMD_AGGR1_NOC_A_CLK>;
                };
 
                a2noc: interconnect@583000 {
                        compatible = "qcom,msm8996-a2noc";
                        reg = <0x00583000 0x7000>;
                        #interconnect-cells = <1>;
-                       clock-names = "bus", "bus_a", "aggre2_ufs_axi", "ufs_axi";
-                       clocks = <&rpmcc RPM_SMD_AGGR2_NOC_CLK>,
-                                <&rpmcc RPM_SMD_AGGR2_NOC_A_CLK>,
-                                <&gcc GCC_AGGRE2_UFS_AXI_CLK>,
+                       clock-names = "aggre2_ufs_axi", "ufs_axi";
+                       clocks = <&gcc GCC_AGGRE2_UFS_AXI_CLK>,
                                 <&gcc GCC_UFS_AXI_CLK>;
                };
 
                        compatible = "qcom,msm8996-mnoc";
                        reg = <0x005a4000 0x1c000>;
                        #interconnect-cells = <1>;
-                       clock-names = "bus", "bus_a", "iface";
-                       clocks = <&rpmcc RPM_SMD_MMAXI_CLK>,
-                                <&rpmcc RPM_SMD_MMAXI_A_CLK>,
-                                <&mmcc AHB_CLK_SRC>;
+                       clock-names = "iface";
+                       clocks = <&mmcc AHB_CLK_SRC>;
                };
 
                pnoc: interconnect@5c0000 {
                        compatible = "qcom,msm8996-pnoc";
                        reg = <0x005c0000 0x3000>;
                        #interconnect-cells = <1>;
-                       clock-names = "bus", "bus_a";
-                       clocks = <&rpmcc RPM_SMD_PCNOC_CLK>,
-                                <&rpmcc RPM_SMD_PCNOC_A_CLK>;
                };
 
                tcsr_mutex: hwlock@740000 {
                                          "handover",
                                          "stop-ack";
 
-                       clocks = <&xo_board>,
-                                <&rpmcc RPM_SMD_AGGR2_NOC_CLK>;
-                       clock-names = "xo", "aggre2";
+                       clocks = <&xo_board>;
+                       clock-names = "xo";
 
                        memory-region = <&slpi_mem>;
 
                                 <&gcc GCC_MSS_GPLL0_DIV_CLK>,
                                 <&gcc GCC_MSS_SNOC_AXI_CLK>,
                                 <&gcc GCC_MSS_MNOC_BIMC_AXI_CLK>,
-                                <&rpmcc RPM_SMD_PCNOC_CLK>,
                                 <&rpmcc RPM_SMD_QDSS_CLK>;
-                       clock-names = "iface", "bus", "mem", "xo", "gpll0_mss",
-                                     "snoc_axi", "mnoc_axi", "pnoc", "qdss";
+                       clock-names = "iface",
+                                     "bus",
+                                     "mem",
+                                     "xo",
+                                     "gpll0_mss",
+                                     "snoc_axi",
+                                     "mnoc_axi",
+                                     "qdss";
 
                        resets = <&gcc GCC_MSS_RESTART>;
                        reset-names = "mss_restart";
index bb591c6bf57341b0665d7c514257a2b240063261..2793cc22d381af990a80018b96e16da4400d0fd1 100644 (file)
 
                        px-supply = <&vreg_lvs2a_1p8>;
 
-                       clocks = <&rpmcc RPM_SMD_XO_CLK_SRC>,
-                                <&rpmcc RPM_SMD_AGGR2_NOC_CLK>;
-                       clock-names = "xo", "aggre2";
+                       clocks = <&rpmcc RPM_SMD_XO_CLK_SRC>;
+                       clock-names = "xo";
 
                        memory-region = <&slpi_mem>;
 
index 6ac64ce9bb68ce1e7c872431bde6b86c2621c6d0..2f2eeaf2e945781056add9a0d5c5fce3541471c9 100644 (file)
                        reg = <0x00400000 0x80000>;
                        compatible = "qcom,qcs404-bimc";
                        #interconnect-cells = <1>;
-                       clock-names = "bus", "bus_a";
-                       clocks = <&rpmcc RPM_SMD_BIMC_CLK>,
-                               <&rpmcc RPM_SMD_BIMC_A_CLK>;
                };
 
                tsens: thermal-sensor@4a9000 {
                        reg = <0x00500000 0x15080>;
                        compatible = "qcom,qcs404-pcnoc";
                        #interconnect-cells = <1>;
-                       clock-names = "bus", "bus_a";
-                       clocks = <&rpmcc RPM_SMD_PNOC_CLK>,
-                               <&rpmcc RPM_SMD_PNOC_A_CLK>;
                };
 
                snoc: interconnect@580000 {
                        reg = <0x00580000 0x23080>;
                        compatible = "qcom,qcs404-snoc";
                        #interconnect-cells = <1>;
-                       clock-names = "bus", "bus_a";
-                       clocks = <&rpmcc RPM_SMD_SNOC_CLK>,
-                               <&rpmcc RPM_SMD_SNOC_A_CLK>;
                };
 
                remoteproc_cdsp: remoteproc@b00000 {
index 0431845578736aded1a3bb0b0251b86ede1382a3..cd0db4f31d4af915058d2a817cc397d3cc5f40e7 100644 (file)
 
                altmodes {
                        displayport {
-                               svid = <0xff01>;
+                               svid = /bits/ 16 <0xff01>;
                                vdo = <0x00001c46>;
                        };
                };
index 775700f78e0feb8998b36aaf3e0a28ddc739e0df..513fe5e76b688ed0ace12b3804169fdb7e2c8841 100644 (file)
                        compatible = "qcom,sdm660-bimc";
                        reg = <0x01008000 0x78000>;
                        #interconnect-cells = <1>;
-                       clock-names = "bus", "bus_a";
-                       clocks = <&rpmcc RPM_SMD_BIMC_CLK>,
-                                <&rpmcc RPM_SMD_BIMC_A_CLK>;
                };
 
                restart@10ac000 {
                        compatible = "qcom,sdm660-cnoc";
                        reg = <0x01500000 0x10000>;
                        #interconnect-cells = <1>;
-                       clock-names = "bus", "bus_a";
-                       clocks = <&rpmcc RPM_SMD_CNOC_CLK>,
-                                <&rpmcc RPM_SMD_CNOC_A_CLK>;
                };
 
                snoc: interconnect@1626000 {
                        compatible = "qcom,sdm660-snoc";
                        reg = <0x01626000 0x7090>;
                        #interconnect-cells = <1>;
-                       clock-names = "bus", "bus_a";
-                       clocks = <&rpmcc RPM_SMD_SNOC_CLK>,
-                                <&rpmcc RPM_SMD_SNOC_A_CLK>;
                };
 
                anoc2_smmu: iommu@16c0000 {
                        compatible = "qcom,sdm630-smmu-v2", "qcom,smmu-v2";
                        reg = <0x016c0000 0x40000>;
-
-                       assigned-clocks = <&rpmcc RPM_SMD_AGGR2_NOC_CLK>;
-                       assigned-clock-rates = <1000>;
-                       clocks = <&rpmcc RPM_SMD_AGGR2_NOC_CLK>;
-                       clock-names = "bus";
                        #global-interrupts = <2>;
                        #iommu-cells = <1>;
 
                        compatible = "qcom,sdm660-a2noc";
                        reg = <0x01704000 0xc100>;
                        #interconnect-cells = <1>;
-                       clock-names = "bus",
-                                     "bus_a",
-                                     "ipa",
+                       clock-names = "ipa",
                                      "ufs_axi",
                                      "aggre2_ufs_axi",
                                      "aggre2_usb3_axi",
                                      "cfg_noc_usb2_axi";
-                       clocks = <&rpmcc RPM_SMD_AGGR2_NOC_CLK>,
-                                <&rpmcc RPM_SMD_AGGR2_NOC_A_CLK>,
-                                <&rpmcc RPM_SMD_IPA_CLK>,
+                       clocks = <&rpmcc RPM_SMD_IPA_CLK>,
                                 <&gcc GCC_UFS_AXI_CLK>,
                                 <&gcc GCC_AGGRE2_UFS_AXI_CLK>,
                                 <&gcc GCC_AGGRE2_USB3_AXI_CLK>,
                        compatible = "qcom,sdm660-mnoc";
                        reg = <0x01745000 0xa010>;
                        #interconnect-cells = <1>;
-                       clock-names = "bus", "bus_a", "iface";
-                       clocks = <&rpmcc RPM_SMD_MMSSNOC_AXI_CLK>,
-                                <&rpmcc RPM_SMD_MMSSNOC_AXI_CLK_A>,
-                                <&mmcc AHB_CLK_SRC>;
+                       clock-names = "iface";
+                       clocks = <&mmcc AHB_CLK_SRC>;
                };
 
                tsens: thermal-sensor@10ae000 {
                        clocks = <&gcc GCC_GPU_CFG_AHB_CLK>,
                                 <&gcc GCC_BIMC_GFX_CLK>,
                                 <&gcc GCC_GPU_BIMC_GFX_CLK>;
-                       clock-names = "iface", "mem", "mem_iface";
+                       clock-names = "iface",
+                                     "mem",
+                                     "mem_iface";
                        #global-interrupts = <2>;
                        #iommu-cells = <1>;
 
                                 <&gcc GCC_USB30_MASTER_CLK>,
                                 <&gcc GCC_AGGRE2_USB3_AXI_CLK>,
                                 <&gcc GCC_USB30_SLEEP_CLK>,
-                                <&gcc GCC_USB30_MOCK_UTMI_CLK>,
-                                <&rpmcc RPM_SMD_AGGR2_NOC_CLK>;
+                                <&gcc GCC_USB30_MOCK_UTMI_CLK>;
                        clock-names = "cfg_noc",
                                      "core",
                                      "iface",
                                      "sleep",
-                                     "mock_utmi",
-                                     "bus";
+                                     "mock_utmi";
 
                        assigned-clocks = <&gcc GCC_USB30_MOCK_UTMI_CLK>,
-                                         <&gcc GCC_USB30_MASTER_CLK>,
-                                         <&rpmcc RPM_SMD_AGGR2_NOC_CLK>;
-                       assigned-clock-rates = <19200000>, <120000000>,
-                                              <19200000>;
+                                         <&gcc GCC_USB30_MASTER_CLK>;
+                       assigned-clock-rates = <19200000>, <120000000>;
 
                        interrupts = <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>,
                                     <GIC_SPI 243 IRQ_TYPE_LEVEL_HIGH>;
 
                        clocks = <&mmcc MNOC_AHB_CLK>,
                                 <&mmcc BIMC_SMMU_AHB_CLK>,
-                                <&rpmcc RPM_SMD_MMSSNOC_AXI_CLK>,
                                 <&mmcc BIMC_SMMU_AXI_CLK>;
                        clock-names = "iface-mm", "iface-smmu",
-                                     "bus-mm", "bus-smmu";
+                                     "bus-smmu";
                        #global-interrupts = <2>;
                        #iommu-cells = <1>;
 
                        compatible = "qcom,sdm660-gnoc";
                        reg = <0x17900000 0xe000>;
                        #interconnect-cells = <1>;
-                       /*
-                        * This one apparently features no clocks,
-                        * so let's not mess with the driver needlessly
-                        */
-                       clock-names = "bus", "bus_a";
-                       clocks = <&xo_board>, <&xo_board>;
                };
 
                apcs_glb: mailbox@17911000 {
index 0f9cc042d9bf06b3445c2cb125435c823f3b26b4..1cba1d857c96ba06e3f257b8a15f20a99a9250ee 100644 (file)
@@ -70,7 +70,7 @@
 &spi0 {
        status = "okay";
 
-       cr50@0 {
+       tpm@0 {
                compatible = "google,cr50";
                reg = <0>;
                interrupt-parent = <&gpio0>;
index c5e7de60c12140c0dae9789cc338ef5f1b9fac3c..5846a11f0e848fc059446a47b57ff732b45e9f4c 100644 (file)
@@ -706,7 +706,7 @@ camera: &i2c7 {
 &spi2 {
        status = "okay";
 
-       cr50@0 {
+       tpm@0 {
                compatible = "google,cr50";
                reg = <0>;
                interrupt-parent = <&gpio1>;
index 210bb43cff2c7d020fdf9dc1c8f2a99ca49415a0..d328f549b1a60a26bff884bccebe448fc0936f76 100644 (file)
@@ -229,7 +229,7 @@ alternative_has_cap_likely(const unsigned long cpucap)
        if (!cpucap_is_possible(cpucap))
                return false;
 
-       asm_volatile_goto(
+       asm goto(
        ALTERNATIVE_CB("b       %l[l_no]", %[cpucap], alt_cb_patch_nops)
        :
        : [cpucap] "i" (cpucap)
@@ -247,7 +247,7 @@ alternative_has_cap_unlikely(const unsigned long cpucap)
        if (!cpucap_is_possible(cpucap))
                return false;
 
-       asm_volatile_goto(
+       asm goto(
        ALTERNATIVE("nop", "b   %l[l_yes]", %[cpucap])
        :
        : [cpucap] "i" (cpucap)
index 7b1975bf4b90e7a999de178140bc92aeb63f1c02..513787e4332993e18ec82db1a47f7814ca553d4c 100644 (file)
@@ -760,32 +760,25 @@ alternative_endif
 .endm
 
        /*
-        * Check whether preempt/bh-disabled asm code should yield as soon as
-        * it is able. This is the case if we are currently running in task
-        * context, and either a softirq is pending, or the TIF_NEED_RESCHED
-        * flag is set and re-enabling preemption a single time would result in
-        * a preempt count of zero. (Note that the TIF_NEED_RESCHED flag is
-        * stored negated in the top word of the thread_info::preempt_count
+        * Check whether asm code should yield as soon as it is able. This is
+        * the case if we are currently running in task context, and the
+        * TIF_NEED_RESCHED flag is set. (Note that the TIF_NEED_RESCHED flag
+        * is stored negated in the top word of the thread_info::preempt_count
         * field)
         */
-       .macro          cond_yield, lbl:req, tmp:req, tmp2:req
+       .macro          cond_yield, lbl:req, tmp:req, tmp2
+#ifdef CONFIG_PREEMPT_VOLUNTARY
        get_current_task \tmp
        ldr             \tmp, [\tmp, #TSK_TI_PREEMPT]
        /*
         * If we are serving a softirq, there is no point in yielding: the
         * softirq will not be preempted no matter what we do, so we should
-        * run to completion as quickly as we can.
+        * run to completion as quickly as we can. The preempt_count field will
+        * have BIT(SOFTIRQ_SHIFT) set in this case, so the zero check will
+        * catch this case too.
         */
-       tbnz            \tmp, #SOFTIRQ_SHIFT, .Lnoyield_\@
-#ifdef CONFIG_PREEMPTION
-       sub             \tmp, \tmp, #PREEMPT_DISABLE_OFFSET
        cbz             \tmp, \lbl
 #endif
-       adr_l           \tmp, irq_stat + IRQ_CPUSTAT_SOFTIRQ_PENDING
-       get_this_cpu_offset     \tmp2
-       ldr             w\tmp, [\tmp, \tmp2]
-       cbnz            w\tmp, \lbl     // yield on pending softirq in task context
-.Lnoyield_\@:
        .endm
 
 /*
index f3034099fd952b204f45a683c84dd4e42dad0314..b1e43f56ee461eb8f36222412057e90c42d6f6bf 100644 (file)
@@ -38,7 +38,6 @@ struct cpuinfo_32bit {
 };
 
 struct cpuinfo_arm64 {
-       struct cpu      cpu;
        struct kobject  kobj;
        u64             reg_ctr;
        u64             reg_cntfrq;
index ae35939f395bb18e2e1c8862046edd8379113985..353fe08546cf9091c9965c6c6cbcd09e35169835 100644 (file)
@@ -392,6 +392,21 @@ static inline bool esr_is_data_abort(unsigned long esr)
        return ec == ESR_ELx_EC_DABT_LOW || ec == ESR_ELx_EC_DABT_CUR;
 }
 
+static inline bool esr_fsc_is_translation_fault(unsigned long esr)
+{
+       return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_FAULT;
+}
+
+static inline bool esr_fsc_is_permission_fault(unsigned long esr)
+{
+       return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_PERM;
+}
+
+static inline bool esr_fsc_is_access_flag_fault(unsigned long esr)
+{
+       return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_ACCESS;
+}
+
 const char *esr_get_class_string(unsigned long esr);
 #endif /* __ASSEMBLY */
 
index 50ce8b697ff361be7ee7fe571144b6a9e12ad374..e93548914c366f3476aea04c3bc35ff4b92c083d 100644 (file)
@@ -4,6 +4,8 @@
 
 #ifndef __ASSEMBLER__
 
+#include <linux/cpumask.h>
+
 #include <asm-generic/irq.h>
 
 void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu);
index 48ddc0f45d2283f2c5ba0de62f5d3231999a91f3..6aafbb7899916e631eab9241c39c1313a7c93707 100644 (file)
@@ -18,7 +18,7 @@
 static __always_inline bool arch_static_branch(struct static_key * const key,
                                               const bool branch)
 {
-       asm_volatile_goto(
+       asm goto(
                "1:     nop                                     \n\t"
                 "      .pushsection    __jump_table, \"aw\"    \n\t"
                 "      .align          3                       \n\t"
@@ -35,7 +35,7 @@ l_yes:
 static __always_inline bool arch_static_branch_jump(struct static_key * const key,
                                                    const bool branch)
 {
-       asm_volatile_goto(
+       asm goto(
                "1:     b               %l[l_yes]               \n\t"
                 "      .pushsection    __jump_table, \"aw\"    \n\t"
                 "      .align          3                       \n\t"
index b85f46a73e21bb7ea7df3832e48ed1ce4346a848..3c6f8ba1e47927cea13fd4ff7f25c46c846052cf 100644 (file)
 #define HCRX_HOST_FLAGS (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En)
 
 /* TCR_EL2 Registers bits */
+#define TCR_EL2_DS             (1UL << 32)
 #define TCR_EL2_RES1           ((1U << 31) | (1 << 23))
 #define TCR_EL2_TBI            (1 << 20)
 #define TCR_EL2_PS_SHIFT       16
                         TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK | TCR_EL2_T0SZ_MASK)
 
 /* VTCR_EL2 Registers bits */
+#define VTCR_EL2_DS            TCR_EL2_DS
 #define VTCR_EL2_RES1          (1U << 31)
 #define VTCR_EL2_HD            (1 << 22)
 #define VTCR_EL2_HA            (1 << 21)
  * Once we get to a point where the two describe the same thing, we'll
  * merge the definitions. One day.
  */
-#define __HFGRTR_EL2_RES0      (GENMASK(63, 56) | GENMASK(53, 51))
+#define __HFGRTR_EL2_RES0      HFGxTR_EL2_RES0
 #define __HFGRTR_EL2_MASK      GENMASK(49, 0)
-#define __HFGRTR_EL2_nMASK     (GENMASK(58, 57) | GENMASK(55, 54) | BIT(50))
+#define __HFGRTR_EL2_nMASK     ~(__HFGRTR_EL2_RES0 | __HFGRTR_EL2_MASK)
 
-#define __HFGWTR_EL2_RES0      (GENMASK(63, 56) | GENMASK(53, 51) |    \
-                                BIT(46) | BIT(42) | BIT(40) | BIT(28) | \
-                                GENMASK(26, 25) | BIT(21) | BIT(18) |  \
+/*
+ * The HFGWTR bits are a subset of HFGRTR bits. To ensure we don't miss any
+ * future additions, define __HFGWTR* macros relative to __HFGRTR* ones.
+ */
+#define __HFGRTR_ONLY_MASK     (BIT(46) | BIT(42) | BIT(40) | BIT(28) | \
+                                GENMASK(26, 25) | BIT(21) | BIT(18) | \
                                 GENMASK(15, 14) | GENMASK(10, 9) | BIT(2))
-#define __HFGWTR_EL2_MASK      GENMASK(49, 0)
-#define __HFGWTR_EL2_nMASK     (GENMASK(58, 57) | GENMASK(55, 54) | BIT(50))
-
-#define __HFGITR_EL2_RES0      GENMASK(63, 57)
-#define __HFGITR_EL2_MASK      GENMASK(54, 0)
-#define __HFGITR_EL2_nMASK     GENMASK(56, 55)
-
-#define __HDFGRTR_EL2_RES0     (BIT(49) | BIT(42) | GENMASK(39, 38) |  \
-                                GENMASK(21, 20) | BIT(8))
-#define __HDFGRTR_EL2_MASK     ~__HDFGRTR_EL2_nMASK
-#define __HDFGRTR_EL2_nMASK    GENMASK(62, 59)
-
-#define __HDFGWTR_EL2_RES0     (BIT(63) | GENMASK(59, 58) | BIT(51) | BIT(47) | \
-                                BIT(43) | GENMASK(40, 38) | BIT(34) | BIT(30) | \
-                                BIT(22) | BIT(9) | BIT(6))
-#define __HDFGWTR_EL2_MASK     ~__HDFGWTR_EL2_nMASK
-#define __HDFGWTR_EL2_nMASK    GENMASK(62, 60)
+#define __HFGWTR_EL2_RES0      (__HFGRTR_EL2_RES0 | __HFGRTR_ONLY_MASK)
+#define __HFGWTR_EL2_MASK      (__HFGRTR_EL2_MASK & ~__HFGRTR_ONLY_MASK)
+#define __HFGWTR_EL2_nMASK     ~(__HFGWTR_EL2_RES0 | __HFGWTR_EL2_MASK)
+
+#define __HFGITR_EL2_RES0      HFGITR_EL2_RES0
+#define __HFGITR_EL2_MASK      (BIT(62) | BIT(60) | GENMASK(54, 0))
+#define __HFGITR_EL2_nMASK     ~(__HFGITR_EL2_RES0 | __HFGITR_EL2_MASK)
+
+#define __HDFGRTR_EL2_RES0     HDFGRTR_EL2_RES0
+#define __HDFGRTR_EL2_MASK     (BIT(63) | GENMASK(58, 50) | GENMASK(48, 43) | \
+                                GENMASK(41, 40) | GENMASK(37, 22) | \
+                                GENMASK(19, 9) | GENMASK(7, 0))
+#define __HDFGRTR_EL2_nMASK    ~(__HDFGRTR_EL2_RES0 | __HDFGRTR_EL2_MASK)
+
+#define __HDFGWTR_EL2_RES0     HDFGWTR_EL2_RES0
+#define __HDFGWTR_EL2_MASK     (GENMASK(57, 52) | GENMASK(50, 48) | \
+                                GENMASK(46, 44) | GENMASK(42, 41) | \
+                                GENMASK(37, 35) | GENMASK(33, 31) | \
+                                GENMASK(29, 23) | GENMASK(21, 10) | \
+                                GENMASK(8, 7) | GENMASK(5, 0))
+#define __HDFGWTR_EL2_nMASK    ~(__HDFGWTR_EL2_RES0 | __HDFGWTR_EL2_MASK)
+
+#define __HAFGRTR_EL2_RES0     HAFGRTR_EL2_RES0
+#define __HAFGRTR_EL2_MASK     (GENMASK(49, 17) | GENMASK(4, 0))
+#define __HAFGRTR_EL2_nMASK    ~(__HAFGRTR_EL2_RES0 | __HAFGRTR_EL2_MASK)
 
 /* Similar definitions for HCRX_EL2 */
-#define __HCRX_EL2_RES0                (GENMASK(63, 16) | GENMASK(13, 12))
-#define __HCRX_EL2_MASK                (0)
-#define __HCRX_EL2_nMASK       (GENMASK(15, 14) | GENMASK(4, 0))
+#define __HCRX_EL2_RES0         HCRX_EL2_RES0
+#define __HCRX_EL2_MASK                (BIT(6))
+#define __HCRX_EL2_nMASK       ~(__HCRX_EL2_RES0 | __HCRX_EL2_MASK)
 
 /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
 #define HPFAR_MASK     (~UL(0xf))
index 78a550537b673f8d5c5d0f57ef157a84e9eecda8..b804fe832184466d68f597533858b90a6ffbc781 100644 (file)
@@ -17,6 +17,7 @@
 #include <asm/esr.h>
 #include <asm/kvm_arm.h>
 #include <asm/kvm_hyp.h>
+#include <asm/kvm_nested.h>
 #include <asm/ptrace.h>
 #include <asm/cputype.h>
 #include <asm/virt.h>
@@ -54,11 +55,6 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu);
 int kvm_inject_nested_sync(struct kvm_vcpu *vcpu, u64 esr_el2);
 int kvm_inject_nested_irq(struct kvm_vcpu *vcpu);
 
-static inline bool vcpu_has_feature(const struct kvm_vcpu *vcpu, int feature)
-{
-       return test_bit(feature, vcpu->kvm->arch.vcpu_features);
-}
-
 #if defined(__KVM_VHE_HYPERVISOR__) || defined(__KVM_NVHE_HYPERVISOR__)
 static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
 {
@@ -248,7 +244,7 @@ static inline bool __is_hyp_ctxt(const struct kvm_cpu_context *ctxt)
 
 static inline bool is_hyp_ctxt(const struct kvm_vcpu *vcpu)
 {
-       return __is_hyp_ctxt(&vcpu->arch.ctxt);
+       return vcpu_has_nv(vcpu) && __is_hyp_ctxt(&vcpu->arch.ctxt);
 }
 
 /*
@@ -404,14 +400,25 @@ static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
        return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
 }
 
-static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
+static inline
+bool kvm_vcpu_trap_is_permission_fault(const struct kvm_vcpu *vcpu)
 {
-       return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE;
+       return esr_fsc_is_permission_fault(kvm_vcpu_get_esr(vcpu));
 }
 
-static __always_inline u8 kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu *vcpu)
+static inline
+bool kvm_vcpu_trap_is_translation_fault(const struct kvm_vcpu *vcpu)
 {
-       return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_LEVEL;
+       return esr_fsc_is_translation_fault(kvm_vcpu_get_esr(vcpu));
+}
+
+static inline
+u64 kvm_vcpu_trap_get_perm_fault_granule(const struct kvm_vcpu *vcpu)
+{
+       unsigned long esr = kvm_vcpu_get_esr(vcpu);
+
+       BUG_ON(!esr_fsc_is_permission_fault(esr));
+       return BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(esr & ESR_ELx_FSC_LEVEL));
 }
 
 static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
@@ -454,12 +461,7 @@ static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
                 * first), then a permission fault to allow the flags
                 * to be set.
                 */
-               switch (kvm_vcpu_trap_get_fault_type(vcpu)) {
-               case ESR_ELx_FSC_PERM:
-                       return true;
-               default:
-                       return false;
-               }
+               return kvm_vcpu_trap_is_permission_fault(vcpu);
        }
 
        if (kvm_vcpu_trap_is_iabt(vcpu))
index 824f29f04916af028783d8914fdc311c41597c10..21c57b812569f22532bd57c7fb17af669d3eb370 100644 (file)
@@ -27,6 +27,7 @@
 #include <asm/fpsimd.h>
 #include <asm/kvm.h>
 #include <asm/kvm_asm.h>
+#include <asm/vncr_mapping.h>
 
 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
 
@@ -306,6 +307,7 @@ struct kvm_arch {
         * Atomic access to multiple idregs are guarded by kvm_arch.config_lock.
         */
 #define IDREG_IDX(id)          (((sys_reg_CRm(id) - 1) << 3) | sys_reg_Op2(id))
+#define IDX_IDREG(idx)         sys_reg(3, 0, 0, ((idx) >> 3) + 1, (idx) & Op2_mask)
 #define IDREG(kvm, id)         ((kvm)->arch.id_regs[IDREG_IDX(id)])
 #define KVM_ARM_ID_REG_NUM     (IDREG_IDX(sys_reg(3, 0, 0, 7, 7)) + 1)
        u64 id_regs[KVM_ARM_ID_REG_NUM];
@@ -324,33 +326,33 @@ struct kvm_vcpu_fault_info {
        u64 disr_el1;           /* Deferred [SError] Status Register */
 };
 
+/*
+ * VNCR() just places the VNCR_capable registers in the enum after
+ * __VNCR_START__, and the value (after correction) to be an 8-byte offset
+ * from the VNCR base. As we don't require the enum to be otherwise ordered,
+ * we need the terrible hack below to ensure that we correctly size the
+ * sys_regs array, no matter what.
+ *
+ * The __MAX__ macro has been lifted from Sean Eron Anderson's wonderful
+ * treasure trove of bit hacks:
+ * https://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
+ */
+#define __MAX__(x,y)   ((x) ^ (((x) ^ (y)) & -((x) < (y))))
+#define VNCR(r)                                                \
+       __before_##r,                                   \
+       r = __VNCR_START__ + ((VNCR_ ## r) / 8),        \
+       __after_##r = __MAX__(__before_##r - 1, r)
+
 enum vcpu_sysreg {
        __INVALID_SYSREG__,   /* 0 is reserved as an invalid value */
        MPIDR_EL1,      /* MultiProcessor Affinity Register */
        CLIDR_EL1,      /* Cache Level ID Register */
        CSSELR_EL1,     /* Cache Size Selection Register */
-       SCTLR_EL1,      /* System Control Register */
-       ACTLR_EL1,      /* Auxiliary Control Register */
-       CPACR_EL1,      /* Coprocessor Access Control */
-       ZCR_EL1,        /* SVE Control */
-       TTBR0_EL1,      /* Translation Table Base Register 0 */
-       TTBR1_EL1,      /* Translation Table Base Register 1 */
-       TCR_EL1,        /* Translation Control Register */
-       TCR2_EL1,       /* Extended Translation Control Register */
-       ESR_EL1,        /* Exception Syndrome Register */
-       AFSR0_EL1,      /* Auxiliary Fault Status Register 0 */
-       AFSR1_EL1,      /* Auxiliary Fault Status Register 1 */
-       FAR_EL1,        /* Fault Address Register */
-       MAIR_EL1,       /* Memory Attribute Indirection Register */
-       VBAR_EL1,       /* Vector Base Address Register */
-       CONTEXTIDR_EL1, /* Context ID Register */
        TPIDR_EL0,      /* Thread ID, User R/W */
        TPIDRRO_EL0,    /* Thread ID, User R/O */
        TPIDR_EL1,      /* Thread ID, Privileged */
-       AMAIR_EL1,      /* Aux Memory Attribute Indirection Register */
        CNTKCTL_EL1,    /* Timer Control Register (EL1) */
        PAR_EL1,        /* Physical Address Register */
-       MDSCR_EL1,      /* Monitor Debug System Control Register */
        MDCCINT_EL1,    /* Monitor Debug Comms Channel Interrupt Enable Reg */
        OSLSR_EL1,      /* OS Lock Status Register */
        DISR_EL1,       /* Deferred Interrupt Status Register */
@@ -381,26 +383,11 @@ enum vcpu_sysreg {
        APGAKEYLO_EL1,
        APGAKEYHI_EL1,
 
-       ELR_EL1,
-       SP_EL1,
-       SPSR_EL1,
-
-       CNTVOFF_EL2,
-       CNTV_CVAL_EL0,
-       CNTV_CTL_EL0,
-       CNTP_CVAL_EL0,
-       CNTP_CTL_EL0,
-
        /* Memory Tagging Extension registers */
        RGSR_EL1,       /* Random Allocation Tag Seed Register */
        GCR_EL1,        /* Tag Control Register */
-       TFSR_EL1,       /* Tag Fault Status Register (EL1) */
        TFSRE0_EL1,     /* Tag Fault Status Register (EL0) */
 
-       /* Permission Indirection Extension registers */
-       PIR_EL1,       /* Permission Indirection Register 1 (EL1) */
-       PIRE0_EL1,     /*  Permission Indirection Register 0 (EL1) */
-
        /* 32bit specific registers. */
        DACR32_EL2,     /* Domain Access Control Register */
        IFSR32_EL2,     /* Instruction Fault Status Register */
@@ -408,21 +395,14 @@ enum vcpu_sysreg {
        DBGVCR32_EL2,   /* Debug Vector Catch Register */
 
        /* EL2 registers */
-       VPIDR_EL2,      /* Virtualization Processor ID Register */
-       VMPIDR_EL2,     /* Virtualization Multiprocessor ID Register */
        SCTLR_EL2,      /* System Control Register (EL2) */
        ACTLR_EL2,      /* Auxiliary Control Register (EL2) */
-       HCR_EL2,        /* Hypervisor Configuration Register */
        MDCR_EL2,       /* Monitor Debug Configuration Register (EL2) */
        CPTR_EL2,       /* Architectural Feature Trap Register (EL2) */
-       HSTR_EL2,       /* Hypervisor System Trap Register */
        HACR_EL2,       /* Hypervisor Auxiliary Control Register */
-       HCRX_EL2,       /* Extended Hypervisor Configuration Register */
        TTBR0_EL2,      /* Translation Table Base Register 0 (EL2) */
        TTBR1_EL2,      /* Translation Table Base Register 1 (EL2) */
        TCR_EL2,        /* Translation Control Register (EL2) */
-       VTTBR_EL2,      /* Virtualization Translation Table Base Register */
-       VTCR_EL2,       /* Virtualization Translation Control Register */
        SPSR_EL2,       /* EL2 saved program status register */
        ELR_EL2,        /* EL2 exception link register */
        AFSR0_EL2,      /* Auxiliary Fault Status Register 0 (EL2) */
@@ -435,19 +415,62 @@ enum vcpu_sysreg {
        VBAR_EL2,       /* Vector Base Address Register (EL2) */
        RVBAR_EL2,      /* Reset Vector Base Address Register */
        CONTEXTIDR_EL2, /* Context ID Register (EL2) */
-       TPIDR_EL2,      /* EL2 Software Thread ID Register */
        CNTHCTL_EL2,    /* Counter-timer Hypervisor Control register */
        SP_EL2,         /* EL2 Stack Pointer */
-       HFGRTR_EL2,
-       HFGWTR_EL2,
-       HFGITR_EL2,
-       HDFGRTR_EL2,
-       HDFGWTR_EL2,
        CNTHP_CTL_EL2,
        CNTHP_CVAL_EL2,
        CNTHV_CTL_EL2,
        CNTHV_CVAL_EL2,
 
+       __VNCR_START__, /* Any VNCR-capable reg goes after this point */
+
+       VNCR(SCTLR_EL1),/* System Control Register */
+       VNCR(ACTLR_EL1),/* Auxiliary Control Register */
+       VNCR(CPACR_EL1),/* Coprocessor Access Control */
+       VNCR(ZCR_EL1),  /* SVE Control */
+       VNCR(TTBR0_EL1),/* Translation Table Base Register 0 */
+       VNCR(TTBR1_EL1),/* Translation Table Base Register 1 */
+       VNCR(TCR_EL1),  /* Translation Control Register */
+       VNCR(TCR2_EL1), /* Extended Translation Control Register */
+       VNCR(ESR_EL1),  /* Exception Syndrome Register */
+       VNCR(AFSR0_EL1),/* Auxiliary Fault Status Register 0 */
+       VNCR(AFSR1_EL1),/* Auxiliary Fault Status Register 1 */
+       VNCR(FAR_EL1),  /* Fault Address Register */
+       VNCR(MAIR_EL1), /* Memory Attribute Indirection Register */
+       VNCR(VBAR_EL1), /* Vector Base Address Register */
+       VNCR(CONTEXTIDR_EL1),   /* Context ID Register */
+       VNCR(AMAIR_EL1),/* Aux Memory Attribute Indirection Register */
+       VNCR(MDSCR_EL1),/* Monitor Debug System Control Register */
+       VNCR(ELR_EL1),
+       VNCR(SP_EL1),
+       VNCR(SPSR_EL1),
+       VNCR(TFSR_EL1), /* Tag Fault Status Register (EL1) */
+       VNCR(VPIDR_EL2),/* Virtualization Processor ID Register */
+       VNCR(VMPIDR_EL2),/* Virtualization Multiprocessor ID Register */
+       VNCR(HCR_EL2),  /* Hypervisor Configuration Register */
+       VNCR(HSTR_EL2), /* Hypervisor System Trap Register */
+       VNCR(VTTBR_EL2),/* Virtualization Translation Table Base Register */
+       VNCR(VTCR_EL2), /* Virtualization Translation Control Register */
+       VNCR(TPIDR_EL2),/* EL2 Software Thread ID Register */
+       VNCR(HCRX_EL2), /* Extended Hypervisor Configuration Register */
+
+       /* Permission Indirection Extension registers */
+       VNCR(PIR_EL1),   /* Permission Indirection Register 1 (EL1) */
+       VNCR(PIRE0_EL1), /*  Permission Indirection Register 0 (EL1) */
+
+       VNCR(HFGRTR_EL2),
+       VNCR(HFGWTR_EL2),
+       VNCR(HFGITR_EL2),
+       VNCR(HDFGRTR_EL2),
+       VNCR(HDFGWTR_EL2),
+       VNCR(HAFGRTR_EL2),
+
+       VNCR(CNTVOFF_EL2),
+       VNCR(CNTV_CVAL_EL0),
+       VNCR(CNTV_CTL_EL0),
+       VNCR(CNTP_CVAL_EL0),
+       VNCR(CNTP_CTL_EL0),
+
        NR_SYS_REGS     /* Nothing after this line! */
 };
 
@@ -464,6 +487,9 @@ struct kvm_cpu_context {
        u64 sys_regs[NR_SYS_REGS];
 
        struct kvm_vcpu *__hyp_running_vcpu;
+
+       /* This pointer has to be 4kB aligned. */
+       u64 *vncr_array;
 };
 
 struct kvm_host_data {
@@ -826,8 +852,19 @@ struct kvm_vcpu_arch {
  * accessed by a running VCPU.  For example, for userspace access or
  * for system registers that are never context switched, but only
  * emulated.
+ *
+ * Don't bother with VNCR-based accesses in the nVHE code, it has no
+ * business dealing with NV.
  */
-#define __ctxt_sys_reg(c,r)    (&(c)->sys_regs[(r)])
+static inline u64 *__ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
+{
+#if !defined (__KVM_NVHE_HYPERVISOR__)
+       if (unlikely(cpus_have_final_cap(ARM64_HAS_NESTED_VIRT) &&
+                    r >= __VNCR_START__ && ctxt->vncr_array))
+               return &ctxt->vncr_array[r - __VNCR_START__];
+#endif
+       return (u64 *)&ctxt->sys_regs[r];
+}
 
 #define ctxt_sys_reg(c,r)      (*__ctxt_sys_reg(c,r))
 
@@ -871,6 +908,7 @@ static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
        case AMAIR_EL1:         *val = read_sysreg_s(SYS_AMAIR_EL12);   break;
        case CNTKCTL_EL1:       *val = read_sysreg_s(SYS_CNTKCTL_EL12); break;
        case ELR_EL1:           *val = read_sysreg_s(SYS_ELR_EL12);     break;
+       case SPSR_EL1:          *val = read_sysreg_s(SYS_SPSR_EL12);    break;
        case PAR_EL1:           *val = read_sysreg_par();               break;
        case DACR32_EL2:        *val = read_sysreg_s(SYS_DACR32_EL2);   break;
        case IFSR32_EL2:        *val = read_sysreg_s(SYS_IFSR32_EL2);   break;
@@ -915,6 +953,7 @@ static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
        case AMAIR_EL1:         write_sysreg_s(val, SYS_AMAIR_EL12);    break;
        case CNTKCTL_EL1:       write_sysreg_s(val, SYS_CNTKCTL_EL12);  break;
        case ELR_EL1:           write_sysreg_s(val, SYS_ELR_EL12);      break;
+       case SPSR_EL1:          write_sysreg_s(val, SYS_SPSR_EL12);     break;
        case PAR_EL1:           write_sysreg_s(val, SYS_PAR_EL1);       break;
        case DACR32_EL2:        write_sysreg_s(val, SYS_DACR32_EL2);    break;
        case IFSR32_EL2:        write_sysreg_s(val, SYS_IFSR32_EL2);    break;
@@ -954,8 +993,6 @@ int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
                              struct kvm_vcpu_events *events);
 
-#define KVM_ARCH_WANT_MMU_NOTIFIER
-
 void kvm_arm_halt_guest(struct kvm *kvm);
 void kvm_arm_resume_guest(struct kvm *kvm);
 
@@ -1177,6 +1214,13 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
 #define kvm_vm_has_ran_once(kvm)                                       \
        (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &(kvm)->arch.flags))
 
+static inline bool __vcpu_has_feature(const struct kvm_arch *ka, int feature)
+{
+       return test_bit(feature, ka->vcpu_features);
+}
+
+#define vcpu_has_feature(v, f) __vcpu_has_feature(&(v)->kvm->arch, (f))
+
 int kvm_trng_call(struct kvm_vcpu *vcpu);
 #ifdef CONFIG_KVM
 extern phys_addr_t hyp_mem_base;
index 6cec8e9c6c9126b1f9b01ba0eed4a2f156deb334..4882905357f43b6b5146f5d4f38d13b100f8dbb8 100644 (file)
@@ -2,8 +2,9 @@
 #ifndef __ARM64_KVM_NESTED_H
 #define __ARM64_KVM_NESTED_H
 
-#include <asm/kvm_emulate.h>
+#include <linux/bitfield.h>
 #include <linux/kvm_host.h>
+#include <asm/kvm_emulate.h>
 
 static inline bool vcpu_has_nv(const struct kvm_vcpu *vcpu)
 {
@@ -12,12 +13,55 @@ static inline bool vcpu_has_nv(const struct kvm_vcpu *vcpu)
                vcpu_has_feature(vcpu, KVM_ARM_VCPU_HAS_EL2));
 }
 
-extern bool __check_nv_sr_forward(struct kvm_vcpu *vcpu);
+/* Translation helpers from non-VHE EL2 to EL1 */
+static inline u64 tcr_el2_ps_to_tcr_el1_ips(u64 tcr_el2)
+{
+       return (u64)FIELD_GET(TCR_EL2_PS_MASK, tcr_el2) << TCR_IPS_SHIFT;
+}
+
+static inline u64 translate_tcr_el2_to_tcr_el1(u64 tcr)
+{
+       return TCR_EPD1_MASK |                          /* disable TTBR1_EL1 */
+              ((tcr & TCR_EL2_TBI) ? TCR_TBI0 : 0) |
+              tcr_el2_ps_to_tcr_el1_ips(tcr) |
+              (tcr & TCR_EL2_TG0_MASK) |
+              (tcr & TCR_EL2_ORGN0_MASK) |
+              (tcr & TCR_EL2_IRGN0_MASK) |
+              (tcr & TCR_EL2_T0SZ_MASK);
+}
+
+static inline u64 translate_cptr_el2_to_cpacr_el1(u64 cptr_el2)
+{
+       u64 cpacr_el1 = 0;
+
+       if (cptr_el2 & CPTR_EL2_TTA)
+               cpacr_el1 |= CPACR_ELx_TTA;
+       if (!(cptr_el2 & CPTR_EL2_TFP))
+               cpacr_el1 |= CPACR_ELx_FPEN;
+       if (!(cptr_el2 & CPTR_EL2_TZ))
+               cpacr_el1 |= CPACR_ELx_ZEN;
 
-struct sys_reg_params;
-struct sys_reg_desc;
+       return cpacr_el1;
+}
+
+static inline u64 translate_sctlr_el2_to_sctlr_el1(u64 val)
+{
+       /* Only preserve the minimal set of bits we support */
+       val &= (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | SCTLR_ELx_SA |
+               SCTLR_ELx_I | SCTLR_ELx_IESB | SCTLR_ELx_WXN | SCTLR_ELx_EE);
+       val |= SCTLR_EL1_RES1;
+
+       return val;
+}
+
+static inline u64 translate_ttbr0_el2_to_ttbr0_el1(u64 ttbr0)
+{
+       /* Clear the ASID field */
+       return ttbr0 & ~GENMASK_ULL(63, 48);
+}
+
+extern bool __check_nv_sr_forward(struct kvm_vcpu *vcpu);
 
-void access_nested_id_reg(struct kvm_vcpu *v, struct sys_reg_params *p,
-                         const struct sys_reg_desc *r);
+int kvm_init_nv_sysregs(struct kvm *kvm);
 
 #endif /* __ARM64_KVM_NESTED_H */
index 10068500d60194e2b9747e274aa559b62f894a9d..cfdf40f734b12264ea9b4227839ffa191d512e33 100644 (file)
@@ -11,7 +11,8 @@
 #include <linux/kvm_host.h>
 #include <linux/types.h>
 
-#define KVM_PGTABLE_MAX_LEVELS         4U
+#define KVM_PGTABLE_FIRST_LEVEL                -1
+#define KVM_PGTABLE_LAST_LEVEL         3
 
 /*
  * The largest supported block sizes for KVM (no 52-bit PA support):
  *  - 64K (level 2):   512MB
  */
 #ifdef CONFIG_ARM64_4K_PAGES
-#define KVM_PGTABLE_MIN_BLOCK_LEVEL    1U
+#define KVM_PGTABLE_MIN_BLOCK_LEVEL    1
 #else
-#define KVM_PGTABLE_MIN_BLOCK_LEVEL    2U
+#define KVM_PGTABLE_MIN_BLOCK_LEVEL    2
 #endif
 
-#define kvm_lpa2_is_enabled()          false
+#define kvm_lpa2_is_enabled()          system_supports_lpa2()
+
+static inline u64 kvm_get_parange_max(void)
+{
+       if (kvm_lpa2_is_enabled() ||
+          (IS_ENABLED(CONFIG_ARM64_PA_BITS_52) && PAGE_SHIFT == 16))
+               return ID_AA64MMFR0_EL1_PARANGE_52;
+       else
+               return ID_AA64MMFR0_EL1_PARANGE_48;
+}
 
 static inline u64 kvm_get_parange(u64 mmfr0)
 {
+       u64 parange_max = kvm_get_parange_max();
        u64 parange = cpuid_feature_extract_unsigned_field(mmfr0,
                                ID_AA64MMFR0_EL1_PARANGE_SHIFT);
-       if (parange > ID_AA64MMFR0_EL1_PARANGE_MAX)
-               parange = ID_AA64MMFR0_EL1_PARANGE_MAX;
+       if (parange > parange_max)
+               parange = parange_max;
 
        return parange;
 }
@@ -43,6 +54,8 @@ typedef u64 kvm_pte_t;
 
 #define KVM_PTE_ADDR_MASK              GENMASK(47, PAGE_SHIFT)
 #define KVM_PTE_ADDR_51_48             GENMASK(15, 12)
+#define KVM_PTE_ADDR_MASK_LPA2         GENMASK(49, PAGE_SHIFT)
+#define KVM_PTE_ADDR_51_50_LPA2                GENMASK(9, 8)
 
 #define KVM_PHYS_INVALID               (-1ULL)
 
@@ -53,21 +66,34 @@ static inline bool kvm_pte_valid(kvm_pte_t pte)
 
 static inline u64 kvm_pte_to_phys(kvm_pte_t pte)
 {
-       u64 pa = pte & KVM_PTE_ADDR_MASK;
-
-       if (PAGE_SHIFT == 16)
-               pa |= FIELD_GET(KVM_PTE_ADDR_51_48, pte) << 48;
+       u64 pa;
+
+       if (kvm_lpa2_is_enabled()) {
+               pa = pte & KVM_PTE_ADDR_MASK_LPA2;
+               pa |= FIELD_GET(KVM_PTE_ADDR_51_50_LPA2, pte) << 50;
+       } else {
+               pa = pte & KVM_PTE_ADDR_MASK;
+               if (PAGE_SHIFT == 16)
+                       pa |= FIELD_GET(KVM_PTE_ADDR_51_48, pte) << 48;
+       }
 
        return pa;
 }
 
 static inline kvm_pte_t kvm_phys_to_pte(u64 pa)
 {
-       kvm_pte_t pte = pa & KVM_PTE_ADDR_MASK;
-
-       if (PAGE_SHIFT == 16) {
-               pa &= GENMASK(51, 48);
-               pte |= FIELD_PREP(KVM_PTE_ADDR_51_48, pa >> 48);
+       kvm_pte_t pte;
+
+       if (kvm_lpa2_is_enabled()) {
+               pte = pa & KVM_PTE_ADDR_MASK_LPA2;
+               pa &= GENMASK(51, 50);
+               pte |= FIELD_PREP(KVM_PTE_ADDR_51_50_LPA2, pa >> 50);
+       } else {
+               pte = pa & KVM_PTE_ADDR_MASK;
+               if (PAGE_SHIFT == 16) {
+                       pa &= GENMASK(51, 48);
+                       pte |= FIELD_PREP(KVM_PTE_ADDR_51_48, pa >> 48);
+               }
        }
 
        return pte;
@@ -78,28 +104,28 @@ static inline kvm_pfn_t kvm_pte_to_pfn(kvm_pte_t pte)
        return __phys_to_pfn(kvm_pte_to_phys(pte));
 }
 
-static inline u64 kvm_granule_shift(u32 level)
+static inline u64 kvm_granule_shift(s8 level)
 {
-       /* Assumes KVM_PGTABLE_MAX_LEVELS is 4 */
+       /* Assumes KVM_PGTABLE_LAST_LEVEL is 3 */
        return ARM64_HW_PGTABLE_LEVEL_SHIFT(level);
 }
 
-static inline u64 kvm_granule_size(u32 level)
+static inline u64 kvm_granule_size(s8 level)
 {
        return BIT(kvm_granule_shift(level));
 }
 
-static inline bool kvm_level_supports_block_mapping(u32 level)
+static inline bool kvm_level_supports_block_mapping(s8 level)
 {
        return level >= KVM_PGTABLE_MIN_BLOCK_LEVEL;
 }
 
 static inline u32 kvm_supported_block_sizes(void)
 {
-       u32 level = KVM_PGTABLE_MIN_BLOCK_LEVEL;
+       s8 level = KVM_PGTABLE_MIN_BLOCK_LEVEL;
        u32 r = 0;
 
-       for (; level < KVM_PGTABLE_MAX_LEVELS; level++)
+       for (; level <= KVM_PGTABLE_LAST_LEVEL; level++)
                r |= BIT(kvm_granule_shift(level));
 
        return r;
@@ -144,7 +170,7 @@ struct kvm_pgtable_mm_ops {
        void*           (*zalloc_page)(void *arg);
        void*           (*zalloc_pages_exact)(size_t size);
        void            (*free_pages_exact)(void *addr, size_t size);
-       void            (*free_unlinked_table)(void *addr, u32 level);
+       void            (*free_unlinked_table)(void *addr, s8 level);
        void            (*get_page)(void *addr);
        void            (*put_page)(void *addr);
        int             (*page_count)(void *addr);
@@ -240,7 +266,7 @@ struct kvm_pgtable_visit_ctx {
        u64                                     start;
        u64                                     addr;
        u64                                     end;
-       u32                                     level;
+       s8                                      level;
        enum kvm_pgtable_walk_flags             flags;
 };
 
@@ -343,7 +369,7 @@ static inline bool kvm_pgtable_walk_lock_held(void)
  */
 struct kvm_pgtable {
        u32                                     ia_bits;
-       u32                                     start_level;
+       s8                                      start_level;
        kvm_pteref_t                            pgd;
        struct kvm_pgtable_mm_ops               *mm_ops;
 
@@ -477,7 +503,7 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
  * The page-table is assumed to be unreachable by any hardware walkers prior to
  * freeing and therefore no TLB invalidation is performed.
  */
-void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, u32 level);
+void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level);
 
 /**
  * kvm_pgtable_stage2_create_unlinked() - Create an unlinked stage-2 paging structure.
@@ -501,7 +527,7 @@ void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *p
  * an ERR_PTR(error) on failure.
  */
 kvm_pte_t *kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt,
-                                             u64 phys, u32 level,
+                                             u64 phys, s8 level,
                                              enum kvm_pgtable_prot prot,
                                              void *mc, bool force_pte);
 
@@ -727,7 +753,7 @@ int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
  * Return: 0 on success, negative error code on failure.
  */
 int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr,
-                        kvm_pte_t *ptep, u32 *level);
+                        kvm_pte_t *ptep, s8 *level);
 
 /**
  * kvm_pgtable_stage2_pte_prot() - Retrieve the protection attributes of a
index e46250a0201721656ffeb81f231bf91bdcb3d239..ad9cfb5c1ff4e6b9e8352331d1be46afc732a81e 100644 (file)
@@ -56,10 +56,11 @@ static inline unsigned long hyp_vm_table_pages(void)
 
 static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages)
 {
-       unsigned long total = 0, i;
+       unsigned long total = 0;
+       int i;
 
        /* Provision the worst case scenario */
-       for (i = 0; i < KVM_PGTABLE_MAX_LEVELS; i++) {
+       for (i = KVM_PGTABLE_FIRST_LEVEL; i <= KVM_PGTABLE_LAST_LEVEL; i++) {
                nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE);
                total += nr_pages;
        }
index b4ae3210993273e8fd709b8f4d17a081bf39ff3d..4305995c8f82f416e6ce11280ac1dd19fbe25eec 100644 (file)
@@ -17,9 +17,6 @@
 #ifndef __ASSEMBLY__
 
 #include <generated/vdso-offsets.h>
-#ifdef CONFIG_COMPAT_VDSO
-#include <generated/vdso32-offsets.h>
-#endif
 
 #define VDSO_SYMBOL(base, name)                                                   \
 ({                                                                        \
diff --git a/arch/arm64/include/asm/vncr_mapping.h b/arch/arm64/include/asm/vncr_mapping.h
new file mode 100644 (file)
index 0000000..df2c47c
--- /dev/null
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * System register offsets in the VNCR page
+ * All offsets are *byte* displacements!
+ */
+
+#ifndef __ARM64_VNCR_MAPPING_H__
+#define __ARM64_VNCR_MAPPING_H__
+
+#define VNCR_VTTBR_EL2          0x020
+#define VNCR_VTCR_EL2           0x040
+#define VNCR_VMPIDR_EL2         0x050
+#define VNCR_CNTVOFF_EL2        0x060
+#define VNCR_HCR_EL2            0x078
+#define VNCR_HSTR_EL2           0x080
+#define VNCR_VPIDR_EL2          0x088
+#define VNCR_TPIDR_EL2          0x090
+#define VNCR_HCRX_EL2           0x0A0
+#define VNCR_VNCR_EL2           0x0B0
+#define VNCR_CPACR_EL1          0x100
+#define VNCR_CONTEXTIDR_EL1     0x108
+#define VNCR_SCTLR_EL1          0x110
+#define VNCR_ACTLR_EL1          0x118
+#define VNCR_TCR_EL1            0x120
+#define VNCR_AFSR0_EL1          0x128
+#define VNCR_AFSR1_EL1          0x130
+#define VNCR_ESR_EL1            0x138
+#define VNCR_MAIR_EL1           0x140
+#define VNCR_AMAIR_EL1          0x148
+#define VNCR_MDSCR_EL1          0x158
+#define VNCR_SPSR_EL1           0x160
+#define VNCR_CNTV_CVAL_EL0      0x168
+#define VNCR_CNTV_CTL_EL0       0x170
+#define VNCR_CNTP_CVAL_EL0      0x178
+#define VNCR_CNTP_CTL_EL0       0x180
+#define VNCR_SCXTNUM_EL1        0x188
+#define VNCR_TFSR_EL1          0x190
+#define VNCR_HFGRTR_EL2                0x1B8
+#define VNCR_HFGWTR_EL2                0x1C0
+#define VNCR_HFGITR_EL2                0x1C8
+#define VNCR_HDFGRTR_EL2       0x1D0
+#define VNCR_HDFGWTR_EL2       0x1D8
+#define VNCR_ZCR_EL1            0x1E0
+#define VNCR_HAFGRTR_EL2       0x1E8
+#define VNCR_TTBR0_EL1          0x200
+#define VNCR_TTBR1_EL1          0x210
+#define VNCR_FAR_EL1            0x220
+#define VNCR_ELR_EL1            0x230
+#define VNCR_SP_EL1             0x240
+#define VNCR_VBAR_EL1           0x250
+#define VNCR_TCR2_EL1          0x270
+#define VNCR_PIRE0_EL1         0x290
+#define VNCR_PIRE0_EL2         0x298
+#define VNCR_PIR_EL1           0x2A0
+#define VNCR_ICH_LR0_EL2        0x400
+#define VNCR_ICH_LR1_EL2        0x408
+#define VNCR_ICH_LR2_EL2        0x410
+#define VNCR_ICH_LR3_EL2        0x418
+#define VNCR_ICH_LR4_EL2        0x420
+#define VNCR_ICH_LR5_EL2        0x428
+#define VNCR_ICH_LR6_EL2        0x430
+#define VNCR_ICH_LR7_EL2        0x438
+#define VNCR_ICH_LR8_EL2        0x440
+#define VNCR_ICH_LR9_EL2        0x448
+#define VNCR_ICH_LR10_EL2       0x450
+#define VNCR_ICH_LR11_EL2       0x458
+#define VNCR_ICH_LR12_EL2       0x460
+#define VNCR_ICH_LR13_EL2       0x468
+#define VNCR_ICH_LR14_EL2       0x470
+#define VNCR_ICH_LR15_EL2       0x478
+#define VNCR_ICH_AP0R0_EL2      0x480
+#define VNCR_ICH_AP0R1_EL2      0x488
+#define VNCR_ICH_AP0R2_EL2      0x490
+#define VNCR_ICH_AP0R3_EL2      0x498
+#define VNCR_ICH_AP1R0_EL2      0x4A0
+#define VNCR_ICH_AP1R1_EL2      0x4A8
+#define VNCR_ICH_AP1R2_EL2      0x4B0
+#define VNCR_ICH_AP1R3_EL2      0x4B8
+#define VNCR_ICH_HCR_EL2        0x4C0
+#define VNCR_ICH_VMCR_EL2       0x4C8
+#define VNCR_VDISR_EL2          0x500
+#define VNCR_PMBLIMITR_EL1      0x800
+#define VNCR_PMBPTR_EL1         0x810
+#define VNCR_PMBSR_EL1          0x820
+#define VNCR_PMSCR_EL1          0x828
+#define VNCR_PMSEVFR_EL1        0x830
+#define VNCR_PMSICR_EL1         0x838
+#define VNCR_PMSIRR_EL1         0x840
+#define VNCR_PMSLATFR_EL1       0x848
+#define VNCR_TRFCR_EL1          0x880
+#define VNCR_MPAM1_EL1          0x900
+#define VNCR_MPAMHCR_EL2        0x930
+#define VNCR_MPAMVPMV_EL2       0x938
+#define VNCR_MPAMVPM0_EL2       0x940
+#define VNCR_MPAMVPM1_EL2       0x948
+#define VNCR_MPAMVPM2_EL2       0x950
+#define VNCR_MPAMVPM3_EL2       0x958
+#define VNCR_MPAMVPM4_EL2       0x960
+#define VNCR_MPAMVPM5_EL2       0x968
+#define VNCR_MPAMVPM6_EL2       0x970
+#define VNCR_MPAMVPM7_EL2       0x978
+
+#endif /* __ARM64_VNCR_MAPPING_H__ */
index d95b3d6b471a7d63957c47151fd6cb404ca0f4c7..467cb711727309eb991df38ece1af46b858e6178 100644 (file)
@@ -73,7 +73,13 @@ obj-$(CONFIG_ARM64_MTE)                      += mte.o
 obj-y                                  += vdso-wrap.o
 obj-$(CONFIG_COMPAT_VDSO)              += vdso32-wrap.o
 obj-$(CONFIG_UNWIND_PATCH_PAC_INTO_SCS)        += patch-scs.o
+
+# We need to prevent the SCS patching code from patching itself. Using
+# -mbranch-protection=none here to avoid the patchable PAC opcodes from being
+# generated triggers an issue with full LTO on Clang, which stops emitting PAC
+# instructions altogether. So disable LTO as well for the compilation unit.
 CFLAGS_patch-scs.o                     += -mbranch-protection=none
+CFLAGS_REMOVE_patch-scs.o              += $(CC_FLAGS_LTO)
 
 # Force dependency (vdso*-wrap.S includes vdso.so through incbin)
 $(obj)/vdso-wrap.o: $(obj)/vdso/vdso.so
index 5ff1942b04fcfd94e334b6b204f7f3885647c68d..5a7dbbe0ce639a8b7a74012c957b3c7335e8f160 100644 (file)
@@ -117,8 +117,6 @@ int main(void)
   DEFINE(DMA_FROM_DEVICE,      DMA_FROM_DEVICE);
   BLANK();
   DEFINE(PREEMPT_DISABLE_OFFSET, PREEMPT_DISABLE_OFFSET);
-  DEFINE(SOFTIRQ_SHIFT, SOFTIRQ_SHIFT);
-  DEFINE(IRQ_CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
   BLANK();
   DEFINE(CPU_BOOT_TASK,                offsetof(struct secondary_data, task));
   BLANK();
index e29e0fea63fb626bea3abd2ad707b3eb08df7f5b..967c7c7a4e7db3db7e3d05a7637e8e7d13e0d273 100644 (file)
@@ -416,6 +416,19 @@ static struct midr_range broken_aarch32_aes[] = {
 };
 #endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */
 
+#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
+static const struct midr_range erratum_spec_unpriv_load_list[] = {
+#ifdef CONFIG_ARM64_ERRATUM_3117295
+       MIDR_ALL_VERSIONS(MIDR_CORTEX_A510),
+#endif
+#ifdef CONFIG_ARM64_ERRATUM_2966298
+       /* Cortex-A520 r0p0 to r0p1 */
+       MIDR_REV_RANGE(MIDR_CORTEX_A520, 0, 0, 1),
+#endif
+       {},
+};
+#endif
+
 const struct arm64_cpu_capabilities arm64_errata[] = {
 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
        {
@@ -713,12 +726,12 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
                MIDR_FIXED(MIDR_CPU_VAR_REV(1,1), BIT(25)),
        },
 #endif
-#ifdef CONFIG_ARM64_ERRATUM_2966298
+#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
        {
-               .desc = "ARM erratum 2966298",
-               .capability = ARM64_WORKAROUND_2966298,
+               .desc = "ARM errata 2966298, 3117295",
+               .capability = ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD,
                /* Cortex-A520 r0p0 - r0p1 */
-               ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A520, 0, 0, 1),
+               ERRATA_MIDR_RANGE_LIST(erratum_spec_unpriv_load_list),
        },
 #endif
 #ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38
index 01a4c1d7fc09a8e2c1c484a5cadcb2c6ca8433bf..8d1a634a403eed6e13a210331a8f25133354ca28 100644 (file)
@@ -2341,7 +2341,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .capability = ARM64_HAS_NESTED_VIRT,
                .type = ARM64_CPUCAP_SYSTEM_FEATURE,
                .matches = has_nested_virt_support,
-               ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, NV, IMP)
+               ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, NV, NV2)
        },
        {
                .capability = ARM64_HAS_32BIT_EL0_DO_NOT_USE,
index a6030913cd58c44f1ce5cd7077fe61dac02c86db..7ef0e127b149fcb68ce4aaf83e1403c0648f289f 100644 (file)
@@ -428,16 +428,9 @@ alternative_else_nop_endif
        ldp     x28, x29, [sp, #16 * 14]
 
        .if     \el == 0
-alternative_if ARM64_WORKAROUND_2966298
-       tlbi    vale1, xzr
-       dsb     nsh
-alternative_else_nop_endif
-alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
-       ldr     lr, [sp, #S_LR]
-       add     sp, sp, #PT_REGS_SIZE           // restore sp
-       eret
-alternative_else_nop_endif
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+       alternative_insn "b .L_skip_tramp_exit_\@", nop, ARM64_UNMAP_KERNEL_AT_EL0
+
        msr     far_el1, x29
 
        ldr_this_cpu    x30, this_cpu_vector, x29
@@ -446,16 +439,26 @@ alternative_else_nop_endif
        ldr             lr, [sp, #S_LR]         // restore x30
        add             sp, sp, #PT_REGS_SIZE   // restore sp
        br              x29
+
+.L_skip_tramp_exit_\@:
 #endif
-       .else
+       .endif
+
        ldr     lr, [sp, #S_LR]
        add     sp, sp, #PT_REGS_SIZE           // restore sp
 
+       .if \el == 0
+       /* This must be after the last explicit memory access */
+alternative_if ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
+       tlbi    vale1, xzr
+       dsb     nsh
+alternative_else_nop_endif
+       .else
        /* Ensure any device/NC reads complete */
        alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412
+       .endif
 
        eret
-       .endif
        sb
        .endm
 
index 505f389be3e0df09d5afa2c803a7e8b235d02e2a..a5dc6f764195847251dc25c196304cbef44d8850 100644 (file)
@@ -898,10 +898,8 @@ int vec_set_vector_length(struct task_struct *task, enum vec_type type,
         * allocate SVE now in case it is needed for use in streaming
         * mode.
         */
-       if (system_supports_sve()) {
-               sve_free(task);
-               sve_alloc(task, true);
-       }
+       sve_free(task);
+       sve_alloc(task, true);
 
        if (free_sme)
                sme_free(task);
@@ -1219,8 +1217,10 @@ void fpsimd_release_task(struct task_struct *dead_task)
  */
 void sme_alloc(struct task_struct *task, bool flush)
 {
-       if (task->thread.sme_state && flush) {
-               memset(task->thread.sme_state, 0, sme_state_size(task));
+       if (task->thread.sme_state) {
+               if (flush)
+                       memset(task->thread.sme_state, 0,
+                              sme_state_size(task));
                return;
        }
 
index 09bb7fc7d3c2513b3bc8cb2e5545f8c31935e30a..dc6cf0e37194e428519d7d58524ad0f624f4bebb 100644 (file)
@@ -1108,12 +1108,13 @@ static int za_set(struct task_struct *target,
                }
        }
 
-       /* Allocate/reinit ZA storage */
-       sme_alloc(target, true);
-       if (!target->thread.sme_state) {
-               ret = -ENOMEM;
-               goto out;
-       }
+       /*
+        * Only flush the storage if PSTATE.ZA was not already set,
+        * otherwise preserve any existing data.
+        */
+       sme_alloc(target, !thread_za_enabled(&target->thread));
+       if (!target->thread.sme_state)
+               return -ENOMEM;
 
        /* If there is no data then disable ZA */
        if (!count) {
index 417a8a86b2db595b5741b77a6c362502bb2f3617..42c690bb2d608c972404dbb52918ed3d044c17b2 100644 (file)
@@ -402,19 +402,10 @@ static inline bool cpu_can_disable(unsigned int cpu)
        return false;
 }
 
-static int __init topology_init(void)
+bool arch_cpu_is_hotpluggable(int num)
 {
-       int i;
-
-       for_each_possible_cpu(i) {
-               struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
-               cpu->hotpluggable = cpu_can_disable(i);
-               register_cpu(cpu, i);
-       }
-
-       return 0;
+       return cpu_can_disable(num);
 }
-subsys_initcall(topology_init);
 
 static void dump_kernel_offset(void)
 {
index 2266fcdff78a0740fcd72a5c8125d17938d88df4..f5f80fdce0fe7aa2ab3b14ce931999b954312162 100644 (file)
@@ -127,9 +127,6 @@ obj-vdso := $(c-obj-vdso) $(c-obj-vdso-gettimeofday) $(asm-obj-vdso)
 targets += vdso.lds
 CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
 
-include/generated/vdso32-offsets.h: $(obj)/vdso32.so.dbg FORCE
-       $(call if_changed,vdsosym)
-
 # Strip rule for vdso.so
 $(obj)/vdso.so: OBJCOPYFLAGS := -S
 $(obj)/vdso.so: $(obj)/vdso32.so.dbg FORCE
@@ -166,9 +163,3 @@ quiet_cmd_vdsoas = AS32    $@
 
 quiet_cmd_vdsomunge = MUNGE   $@
       cmd_vdsomunge = $(obj)/$(munge) $< $@
-
-# Generate vDSO offsets using helper script (borrowed from the 64-bit vDSO)
-gen-vdsosym := $(srctree)/$(src)/../vdso/gen_vdso_offsets.sh
-quiet_cmd_vdsosym = VDSOSYM $@
-# The AArch64 nm should be able to read an AArch32 binary
-      cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
index 83c1e09be42e5b609d44d653e184b44d9cdc347c..6c3c8ca73e7fda8bb29792218bb11d031e7527ff 100644 (file)
@@ -21,16 +21,14 @@ if VIRTUALIZATION
 menuconfig KVM
        bool "Kernel-based Virtual Machine (KVM) support"
        depends on HAVE_KVM
+       select KVM_COMMON
        select KVM_GENERIC_HARDWARE_ENABLING
-       select MMU_NOTIFIER
-       select PREEMPT_NOTIFIERS
+       select KVM_GENERIC_MMU_NOTIFIER
        select HAVE_KVM_CPU_RELAX_INTERCEPT
        select KVM_MMIO
        select KVM_GENERIC_DIRTYLOG_READ_PROTECT
        select KVM_XFER_TO_GUEST_WORK
        select KVM_VFIO
-       select HAVE_KVM_EVENTFD
-       select HAVE_KVM_IRQFD
        select HAVE_KVM_DIRTY_RING_ACQ_REL
        select NEED_KVM_DIRTY_RING_WITH_BITMAP
        select HAVE_KVM_MSI
@@ -41,7 +39,6 @@ menuconfig KVM
        select HAVE_KVM_VCPU_RUN_PID_CHANGE
        select SCHED_INFO
        select GUEST_PERF_EVENTS if PERF_EVENTS
-       select INTERVAL_TREE
        select XARRAY_MULTI
        help
          Support hosting virtualized guest machines.
index 13ba691b848f7a4723edcee8ed58d9feb1cd3664..9dec8c419bf4028e11350f5bab3cc079cdaf67b7 100644 (file)
@@ -295,8 +295,7 @@ static u64 wfit_delay_ns(struct kvm_vcpu *vcpu)
        u64 val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu));
        struct arch_timer_context *ctx;
 
-       ctx = (vcpu_has_nv(vcpu) && is_hyp_ctxt(vcpu)) ? vcpu_hvtimer(vcpu)
-                                                      : vcpu_vtimer(vcpu);
+       ctx = is_hyp_ctxt(vcpu) ? vcpu_hvtimer(vcpu) : vcpu_vtimer(vcpu);
 
        return kvm_counter_compute_delta(ctx, val);
 }
index 4796104c44718146f89ef1e5d7b8eb95e187a787..a25265aca4324e490298794f5418aaa85b64480c 100644 (file)
@@ -221,7 +221,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
                r = vgic_present;
                break;
        case KVM_CAP_IOEVENTFD:
-       case KVM_CAP_DEVICE_CTRL:
        case KVM_CAP_USER_MEMORY:
        case KVM_CAP_SYNC_MMU:
        case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
@@ -669,6 +668,12 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
                        return ret;
        }
 
+       if (vcpu_has_nv(vcpu)) {
+               ret = kvm_init_nv_sysregs(vcpu->kvm);
+               if (ret)
+                       return ret;
+       }
+
        ret = kvm_timer_enable(vcpu);
        if (ret)
                return ret;
@@ -1837,6 +1842,7 @@ static int kvm_init_vector_slots(void)
 static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
 {
        struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
+       u64 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
        unsigned long tcr;
 
        /*
@@ -1859,6 +1865,10 @@ static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
        }
        tcr &= ~TCR_T0SZ_MASK;
        tcr |= TCR_T0SZ(hyp_va_bits);
+       tcr &= ~TCR_EL2_PS_MASK;
+       tcr |= FIELD_PREP(TCR_EL2_PS_MASK, kvm_get_parange(mmfr0));
+       if (kvm_lpa2_is_enabled())
+               tcr |= TCR_EL2_DS;
        params->tcr_el2 = tcr;
 
        params->pgd_pa = kvm_mmu_get_httbr();
index 06185216a297dd1ca085fd471000dc53f02008df..431fd429932dfaa557ac0977fbbc3baa58eef99d 100644 (file)
@@ -1012,6 +1012,7 @@ enum fgt_group_id {
        HDFGRTR_GROUP,
        HDFGWTR_GROUP,
        HFGITR_GROUP,
+       HAFGRTR_GROUP,
 
        /* Must be last */
        __NR_FGT_GROUP_IDS__
@@ -1042,10 +1043,20 @@ enum fg_filter_id {
 
 static const struct encoding_to_trap_config encoding_to_fgt[] __initconst = {
        /* HFGRTR_EL2, HFGWTR_EL2 */
+       SR_FGT(SYS_AMAIR2_EL1,          HFGxTR, nAMAIR2_EL1, 0),
+       SR_FGT(SYS_MAIR2_EL1,           HFGxTR, nMAIR2_EL1, 0),
+       SR_FGT(SYS_S2POR_EL1,           HFGxTR, nS2POR_EL1, 0),
+       SR_FGT(SYS_POR_EL1,             HFGxTR, nPOR_EL1, 0),
+       SR_FGT(SYS_POR_EL0,             HFGxTR, nPOR_EL0, 0),
        SR_FGT(SYS_PIR_EL1,             HFGxTR, nPIR_EL1, 0),
        SR_FGT(SYS_PIRE0_EL1,           HFGxTR, nPIRE0_EL1, 0),
+       SR_FGT(SYS_RCWMASK_EL1,         HFGxTR, nRCWMASK_EL1, 0),
        SR_FGT(SYS_TPIDR2_EL0,          HFGxTR, nTPIDR2_EL0, 0),
        SR_FGT(SYS_SMPRI_EL1,           HFGxTR, nSMPRI_EL1, 0),
+       SR_FGT(SYS_GCSCR_EL1,           HFGxTR, nGCS_EL1, 0),
+       SR_FGT(SYS_GCSPR_EL1,           HFGxTR, nGCS_EL1, 0),
+       SR_FGT(SYS_GCSCRE0_EL1,         HFGxTR, nGCS_EL0, 0),
+       SR_FGT(SYS_GCSPR_EL0,           HFGxTR, nGCS_EL0, 0),
        SR_FGT(SYS_ACCDATA_EL1,         HFGxTR, nACCDATA_EL1, 0),
        SR_FGT(SYS_ERXADDR_EL1,         HFGxTR, ERXADDR_EL1, 1),
        SR_FGT(SYS_ERXPFGCDN_EL1,       HFGxTR, ERXPFGCDN_EL1, 1),
@@ -1107,6 +1118,11 @@ static const struct encoding_to_trap_config encoding_to_fgt[] __initconst = {
        SR_FGT(SYS_AFSR1_EL1,           HFGxTR, AFSR1_EL1, 1),
        SR_FGT(SYS_AFSR0_EL1,           HFGxTR, AFSR0_EL1, 1),
        /* HFGITR_EL2 */
+       SR_FGT(OP_AT_S1E1A,             HFGITR, ATS1E1A, 1),
+       SR_FGT(OP_COSP_RCTX,            HFGITR, COSPRCTX, 1),
+       SR_FGT(OP_GCSPUSHX,             HFGITR, nGCSEPP, 0),
+       SR_FGT(OP_GCSPOPX,              HFGITR, nGCSEPP, 0),
+       SR_FGT(OP_GCSPUSHM,             HFGITR, nGCSPUSHM_EL1, 0),
        SR_FGT(OP_BRB_IALL,             HFGITR, nBRBIALL, 0),
        SR_FGT(OP_BRB_INJ,              HFGITR, nBRBINJ, 0),
        SR_FGT(SYS_DC_CVAC,             HFGITR, DCCVAC, 1),
@@ -1674,6 +1690,49 @@ static const struct encoding_to_trap_config encoding_to_fgt[] __initconst = {
        SR_FGT(SYS_PMCR_EL0,            HDFGWTR, PMCR_EL0, 1),
        SR_FGT(SYS_PMSWINC_EL0,         HDFGWTR, PMSWINC_EL0, 1),
        SR_FGT(SYS_OSLAR_EL1,           HDFGWTR, OSLAR_EL1, 1),
+       /*
+        * HAFGRTR_EL2
+        */
+       SR_FGT(SYS_AMEVTYPER1_EL0(15),  HAFGRTR, AMEVTYPER115_EL0, 1),
+       SR_FGT(SYS_AMEVTYPER1_EL0(14),  HAFGRTR, AMEVTYPER114_EL0, 1),
+       SR_FGT(SYS_AMEVTYPER1_EL0(13),  HAFGRTR, AMEVTYPER113_EL0, 1),
+       SR_FGT(SYS_AMEVTYPER1_EL0(12),  HAFGRTR, AMEVTYPER112_EL0, 1),
+       SR_FGT(SYS_AMEVTYPER1_EL0(11),  HAFGRTR, AMEVTYPER111_EL0, 1),
+       SR_FGT(SYS_AMEVTYPER1_EL0(10),  HAFGRTR, AMEVTYPER110_EL0, 1),
+       SR_FGT(SYS_AMEVTYPER1_EL0(9),   HAFGRTR, AMEVTYPER19_EL0, 1),
+       SR_FGT(SYS_AMEVTYPER1_EL0(8),   HAFGRTR, AMEVTYPER18_EL0, 1),
+       SR_FGT(SYS_AMEVTYPER1_EL0(7),   HAFGRTR, AMEVTYPER17_EL0, 1),
+       SR_FGT(SYS_AMEVTYPER1_EL0(6),   HAFGRTR, AMEVTYPER16_EL0, 1),
+       SR_FGT(SYS_AMEVTYPER1_EL0(5),   HAFGRTR, AMEVTYPER15_EL0, 1),
+       SR_FGT(SYS_AMEVTYPER1_EL0(4),   HAFGRTR, AMEVTYPER14_EL0, 1),
+       SR_FGT(SYS_AMEVTYPER1_EL0(3),   HAFGRTR, AMEVTYPER13_EL0, 1),
+       SR_FGT(SYS_AMEVTYPER1_EL0(2),   HAFGRTR, AMEVTYPER12_EL0, 1),
+       SR_FGT(SYS_AMEVTYPER1_EL0(1),   HAFGRTR, AMEVTYPER11_EL0, 1),
+       SR_FGT(SYS_AMEVTYPER1_EL0(0),   HAFGRTR, AMEVTYPER10_EL0, 1),
+       SR_FGT(SYS_AMEVCNTR1_EL0(15),   HAFGRTR, AMEVCNTR115_EL0, 1),
+       SR_FGT(SYS_AMEVCNTR1_EL0(14),   HAFGRTR, AMEVCNTR114_EL0, 1),
+       SR_FGT(SYS_AMEVCNTR1_EL0(13),   HAFGRTR, AMEVCNTR113_EL0, 1),
+       SR_FGT(SYS_AMEVCNTR1_EL0(12),   HAFGRTR, AMEVCNTR112_EL0, 1),
+       SR_FGT(SYS_AMEVCNTR1_EL0(11),   HAFGRTR, AMEVCNTR111_EL0, 1),
+       SR_FGT(SYS_AMEVCNTR1_EL0(10),   HAFGRTR, AMEVCNTR110_EL0, 1),
+       SR_FGT(SYS_AMEVCNTR1_EL0(9),    HAFGRTR, AMEVCNTR19_EL0, 1),
+       SR_FGT(SYS_AMEVCNTR1_EL0(8),    HAFGRTR, AMEVCNTR18_EL0, 1),
+       SR_FGT(SYS_AMEVCNTR1_EL0(7),    HAFGRTR, AMEVCNTR17_EL0, 1),
+       SR_FGT(SYS_AMEVCNTR1_EL0(6),    HAFGRTR, AMEVCNTR16_EL0, 1),
+       SR_FGT(SYS_AMEVCNTR1_EL0(5),    HAFGRTR, AMEVCNTR15_EL0, 1),
+       SR_FGT(SYS_AMEVCNTR1_EL0(4),    HAFGRTR, AMEVCNTR14_EL0, 1),
+       SR_FGT(SYS_AMEVCNTR1_EL0(3),    HAFGRTR, AMEVCNTR13_EL0, 1),
+       SR_FGT(SYS_AMEVCNTR1_EL0(2),    HAFGRTR, AMEVCNTR12_EL0, 1),
+       SR_FGT(SYS_AMEVCNTR1_EL0(1),    HAFGRTR, AMEVCNTR11_EL0, 1),
+       SR_FGT(SYS_AMEVCNTR1_EL0(0),    HAFGRTR, AMEVCNTR10_EL0, 1),
+       SR_FGT(SYS_AMCNTENCLR1_EL0,     HAFGRTR, AMCNTEN1, 1),
+       SR_FGT(SYS_AMCNTENSET1_EL0,     HAFGRTR, AMCNTEN1, 1),
+       SR_FGT(SYS_AMCNTENCLR0_EL0,     HAFGRTR, AMCNTEN0, 1),
+       SR_FGT(SYS_AMCNTENSET0_EL0,     HAFGRTR, AMCNTEN0, 1),
+       SR_FGT(SYS_AMEVCNTR0_EL0(3),    HAFGRTR, AMEVCNTR03_EL0, 1),
+       SR_FGT(SYS_AMEVCNTR0_EL0(2),    HAFGRTR, AMEVCNTR02_EL0, 1),
+       SR_FGT(SYS_AMEVCNTR0_EL0(1),    HAFGRTR, AMEVCNTR01_EL0, 1),
+       SR_FGT(SYS_AMEVCNTR0_EL0(0),    HAFGRTR, AMEVCNTR00_EL0, 1),
 };
 
 static union trap_config get_trap_config(u32 sysreg)
@@ -1894,6 +1953,10 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu)
                        val = sanitised_sys_reg(vcpu, HDFGWTR_EL2);
                break;
 
+       case HAFGRTR_GROUP:
+               val = sanitised_sys_reg(vcpu, HAFGRTR_EL2);
+               break;
+
        case HFGITR_GROUP:
                val = sanitised_sys_reg(vcpu, HFGITR_EL2);
                switch (tc.fgf) {
index 9ddcfe2c3e574fc8d85b01d91f244f2091350247..9e13c1bc2ad5415981c2579539fad598241c0bdc 100644 (file)
@@ -60,7 +60,7 @@ static inline bool __get_fault_info(u64 esr, struct kvm_vcpu_fault_info *fault)
         */
        if (!(esr & ESR_ELx_S1PTW) &&
            (cpus_have_final_cap(ARM64_WORKAROUND_834220) ||
-            (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_PERM)) {
+            esr_fsc_is_permission_fault(esr))) {
                if (!__translate_far_to_hpfar(far, &hpfar))
                        return false;
        } else {
index f99d8af0b9afb0ff31b549d08c6234f9a272eef2..a038320cdb089074328a29419a5fa1c121a5052f 100644 (file)
@@ -79,6 +79,45 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
                clr |= ~hfg & __ ## reg ## _nMASK;                      \
        } while(0)
 
+#define update_fgt_traps_cs(vcpu, reg, clr, set)                       \
+       do {                                                            \
+               struct kvm_cpu_context *hctxt =                         \
+                       &this_cpu_ptr(&kvm_host_data)->host_ctxt;       \
+               u64 c = 0, s = 0;                                       \
+                                                                       \
+               ctxt_sys_reg(hctxt, reg) = read_sysreg_s(SYS_ ## reg);  \
+               compute_clr_set(vcpu, reg, c, s);                       \
+               s |= set;                                               \
+               c |= clr;                                               \
+               if (c || s) {                                           \
+                       u64 val = __ ## reg ## _nMASK;                  \
+                       val |= s;                                       \
+                       val &= ~c;                                      \
+                       write_sysreg_s(val, SYS_ ## reg);               \
+               }                                                       \
+       } while(0)
+
+#define update_fgt_traps(vcpu, reg)            \
+       update_fgt_traps_cs(vcpu, reg, 0, 0)
+
+/*
+ * Validate the fine grain trap masks.
+ * Check that the masks do not overlap and that all bits are accounted for.
+ */
+#define CHECK_FGT_MASKS(reg)                                                   \
+       do {                                                                    \
+               BUILD_BUG_ON((__ ## reg ## _MASK) & (__ ## reg ## _nMASK));     \
+               BUILD_BUG_ON(~((__ ## reg ## _RES0) ^ (__ ## reg ## _MASK) ^    \
+                              (__ ## reg ## _nMASK)));                         \
+       } while(0)
+
+static inline bool cpu_has_amu(void)
+{
+       u64 pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1);
+
+       return cpuid_feature_extract_unsigned_field(pfr0,
+               ID_AA64PFR0_EL1_AMU_SHIFT);
+}
 
 static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
 {
@@ -86,6 +125,14 @@ static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
        u64 r_clr = 0, w_clr = 0, r_set = 0, w_set = 0, tmp;
        u64 r_val, w_val;
 
+       CHECK_FGT_MASKS(HFGRTR_EL2);
+       CHECK_FGT_MASKS(HFGWTR_EL2);
+       CHECK_FGT_MASKS(HFGITR_EL2);
+       CHECK_FGT_MASKS(HDFGRTR_EL2);
+       CHECK_FGT_MASKS(HDFGWTR_EL2);
+       CHECK_FGT_MASKS(HAFGRTR_EL2);
+       CHECK_FGT_MASKS(HCRX_EL2);
+
        if (!cpus_have_final_cap(ARM64_HAS_FGT))
                return;
 
@@ -110,12 +157,15 @@ static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
                compute_clr_set(vcpu, HFGWTR_EL2, w_clr, w_set);
        }
 
-       /* The default is not to trap anything but ACCDATA_EL1 */
-       r_val = __HFGRTR_EL2_nMASK & ~HFGxTR_EL2_nACCDATA_EL1;
+       /* The default to trap everything not handled or supported in KVM. */
+       tmp = HFGxTR_EL2_nAMAIR2_EL1 | HFGxTR_EL2_nMAIR2_EL1 | HFGxTR_EL2_nS2POR_EL1 |
+             HFGxTR_EL2_nPOR_EL1 | HFGxTR_EL2_nPOR_EL0 | HFGxTR_EL2_nACCDATA_EL1;
+
+       r_val = __HFGRTR_EL2_nMASK & ~tmp;
        r_val |= r_set;
        r_val &= ~r_clr;
 
-       w_val = __HFGWTR_EL2_nMASK & ~HFGxTR_EL2_nACCDATA_EL1;
+       w_val = __HFGWTR_EL2_nMASK & ~tmp;
        w_val |= w_set;
        w_val &= ~w_clr;
 
@@ -125,34 +175,12 @@ static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
        if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu))
                return;
 
-       ctxt_sys_reg(hctxt, HFGITR_EL2) = read_sysreg_s(SYS_HFGITR_EL2);
+       update_fgt_traps(vcpu, HFGITR_EL2);
+       update_fgt_traps(vcpu, HDFGRTR_EL2);
+       update_fgt_traps(vcpu, HDFGWTR_EL2);
 
-       r_set = r_clr = 0;
-       compute_clr_set(vcpu, HFGITR_EL2, r_clr, r_set);
-       r_val = __HFGITR_EL2_nMASK;
-       r_val |= r_set;
-       r_val &= ~r_clr;
-
-       write_sysreg_s(r_val, SYS_HFGITR_EL2);
-
-       ctxt_sys_reg(hctxt, HDFGRTR_EL2) = read_sysreg_s(SYS_HDFGRTR_EL2);
-       ctxt_sys_reg(hctxt, HDFGWTR_EL2) = read_sysreg_s(SYS_HDFGWTR_EL2);
-
-       r_clr = r_set = w_clr = w_set = 0;
-
-       compute_clr_set(vcpu, HDFGRTR_EL2, r_clr, r_set);
-       compute_clr_set(vcpu, HDFGWTR_EL2, w_clr, w_set);
-
-       r_val = __HDFGRTR_EL2_nMASK;
-       r_val |= r_set;
-       r_val &= ~r_clr;
-
-       w_val = __HDFGWTR_EL2_nMASK;
-       w_val |= w_set;
-       w_val &= ~w_clr;
-
-       write_sysreg_s(r_val, SYS_HDFGRTR_EL2);
-       write_sysreg_s(w_val, SYS_HDFGWTR_EL2);
+       if (cpu_has_amu())
+               update_fgt_traps(vcpu, HAFGRTR_EL2);
 }
 
 static inline void __deactivate_traps_hfgxtr(struct kvm_vcpu *vcpu)
@@ -171,6 +199,9 @@ static inline void __deactivate_traps_hfgxtr(struct kvm_vcpu *vcpu)
        write_sysreg_s(ctxt_sys_reg(hctxt, HFGITR_EL2), SYS_HFGITR_EL2);
        write_sysreg_s(ctxt_sys_reg(hctxt, HDFGRTR_EL2), SYS_HDFGRTR_EL2);
        write_sysreg_s(ctxt_sys_reg(hctxt, HDFGWTR_EL2), SYS_HDFGWTR_EL2);
+
+       if (cpu_has_amu())
+               write_sysreg_s(ctxt_sys_reg(hctxt, HAFGRTR_EL2), SYS_HAFGRTR_EL2);
 }
 
 static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
@@ -591,7 +622,7 @@ static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
        if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
                bool valid;
 
-               valid = kvm_vcpu_trap_get_fault_type(vcpu) == ESR_ELx_FSC_FAULT &&
+               valid = kvm_vcpu_trap_is_translation_fault(vcpu) &&
                        kvm_vcpu_dabt_isvalid(vcpu) &&
                        !kvm_vcpu_abt_issea(vcpu) &&
                        !kvm_vcpu_abt_iss1tw(vcpu);
index e91922daa8ca8e61b5f8e85763580ecae2400dde..51f043649146aa4ed4d373a8e9311adda9d95214 100644 (file)
@@ -69,6 +69,8 @@
        ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SSBS) \
        )
 
+#define PVM_ID_AA64PFR2_ALLOW 0ULL
+
 /*
  * Allow for protected VMs:
  * - Mixed-endian
  * - Privileged Access Never
  * - SError interrupt exceptions from speculative reads
  * - Enhanced Translation Synchronization
+ * - Control for cache maintenance permission
  */
 #define PVM_ID_AA64MMFR1_ALLOW (\
        ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_HAFDBS) | \
        ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_HPDS) | \
        ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_PAN) | \
        ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_SpecSEI) | \
-       ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_ETS) \
+       ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_ETS) | \
+       ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_CMOW) \
        )
 
 /*
        ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_E0PD) \
        )
 
+#define PVM_ID_AA64MMFR3_ALLOW (0ULL)
+
 /*
  * No support for Scalable Vectors for protected VMs:
  *     Requires additional support from KVM, e.g., context-switching and
        ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_RNDR) \
        )
 
+/* Restrict pointer authentication to the basic version. */
+#define PVM_ID_AA64ISAR1_RESTRICT_UNSIGNED (\
+       FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA), ID_AA64ISAR1_EL1_APA_PAuth) | \
+       FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API), ID_AA64ISAR1_EL1_API_PAuth) \
+       )
+
+#define PVM_ID_AA64ISAR2_RESTRICT_UNSIGNED (\
+       FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3), ID_AA64ISAR2_EL1_APA3_PAuth) \
+       )
+
 #define PVM_ID_AA64ISAR1_ALLOW (\
        ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_DPB) | \
-       ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) | \
-       ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) | \
        ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_JSCVT) | \
        ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_FCMA) | \
        ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_LRCPC) | \
        )
 
 #define PVM_ID_AA64ISAR2_ALLOW (\
+       ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_ATS1A)| \
        ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3) | \
-       ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) | \
        ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_MOPS) \
        )
 
index 1cc06e6797bda378a59b072de8c6f9a1612f6893..2994878d68ea7ecc752cd88558f954678ded3a76 100644 (file)
@@ -122,11 +122,7 @@ alternative_if ARM64_HAS_CNP
 alternative_else_nop_endif
        msr     ttbr0_el2, x2
 
-       /*
-        * Set the PS bits in TCR_EL2.
-        */
        ldr     x0, [x0, #NVHE_INIT_TCR_EL2]
-       tcr_compute_pa_size x0, #TCR_EL2_PS_SHIFT, x1, x2
        msr     tcr_el2, x0
 
        isb
@@ -292,6 +288,8 @@ alternative_else_nop_endif
        mov     sp, x0
 
        /* And turn the MMU back on! */
+       dsb     nsh
+       isb
        set_sctlr_el2   x2
        ret     x1
 SYM_FUNC_END(__pkvm_init_switch_pgd)
index 8d0a5834e8830059d43d464c4187ff63990e9770..861c76021a250cebc11cd7b5a82db626881eebe2 100644 (file)
@@ -91,7 +91,7 @@ static void host_s2_put_page(void *addr)
        hyp_put_page(&host_s2_pool, addr);
 }
 
-static void host_s2_free_unlinked_table(void *addr, u32 level)
+static void host_s2_free_unlinked_table(void *addr, s8 level)
 {
        kvm_pgtable_stage2_free_unlinked(&host_mmu.mm_ops, addr, level);
 }
@@ -443,7 +443,7 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
 {
        struct kvm_mem_range cur;
        kvm_pte_t pte;
-       u32 level;
+       s8 level;
        int ret;
 
        hyp_assert_lock_held(&host_mmu.lock);
@@ -462,7 +462,7 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
                cur.start = ALIGN_DOWN(addr, granule);
                cur.end = cur.start + granule;
                level++;
-       } while ((level < KVM_PGTABLE_MAX_LEVELS) &&
+       } while ((level <= KVM_PGTABLE_LAST_LEVEL) &&
                        !(kvm_level_supports_block_mapping(level) &&
                          range_included(&cur, range)));
 
index 65a7a186d7b217e599688f465b9d49b64ee2b9f0..b01a3d1078a8803f061496044066d749f9881f94 100644 (file)
@@ -260,7 +260,7 @@ static void fixmap_clear_slot(struct hyp_fixmap_slot *slot)
         * https://lore.kernel.org/kvm/20221017115209.2099-1-will@kernel.org/T/#mf10dfbaf1eaef9274c581b81c53758918c1d0f03
         */
        dsb(ishst);
-       __tlbi_level(vale2is, __TLBI_VADDR(addr, 0), (KVM_PGTABLE_MAX_LEVELS - 1));
+       __tlbi_level(vale2is, __TLBI_VADDR(addr, 0), KVM_PGTABLE_LAST_LEVEL);
        dsb(ish);
        isb();
 }
@@ -275,7 +275,7 @@ static int __create_fixmap_slot_cb(const struct kvm_pgtable_visit_ctx *ctx,
 {
        struct hyp_fixmap_slot *slot = per_cpu_ptr(&fixmap_slots, (u64)ctx->arg);
 
-       if (!kvm_pte_valid(ctx->old) || ctx->level != KVM_PGTABLE_MAX_LEVELS - 1)
+       if (!kvm_pte_valid(ctx->old) || ctx->level != KVM_PGTABLE_LAST_LEVEL)
                return -EINVAL;
 
        slot->addr = ctx->addr;
index b29f15418c0aff6fca086747c0a7333ec78048ac..26dd9a20ad6e6dccd2083e5804e4a063997c958e 100644 (file)
@@ -136,6 +136,10 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
                        cptr_set |= CPTR_EL2_TTA;
        }
 
+       /* Trap External Trace */
+       if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_ExtTrcBuff), feature_ids))
+               mdcr_clear |= MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT;
+
        vcpu->arch.mdcr_el2 |= mdcr_set;
        vcpu->arch.mdcr_el2 &= ~mdcr_clear;
        vcpu->arch.cptr_el2 |= cptr_set;
index 0d5e0a89ddce5df353fb5cccd6269686eaa674db..bc58d1b515af198586b64ef70b15e8926f19efea 100644 (file)
@@ -181,7 +181,7 @@ static int fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx *ctx,
        if (!kvm_pte_valid(ctx->old))
                return 0;
 
-       if (ctx->level != (KVM_PGTABLE_MAX_LEVELS - 1))
+       if (ctx->level != KVM_PGTABLE_LAST_LEVEL)
                return -EINVAL;
 
        phys = kvm_pte_to_phys(ctx->old);
index 1966fdee740ebfd639affa543694b88d72a4e3d2..c651df904fe3eb940e07785aac1ac76079743666 100644 (file)
@@ -79,7 +79,10 @@ static bool kvm_pgtable_walk_skip_cmo(const struct kvm_pgtable_visit_ctx *ctx)
 
 static bool kvm_phys_is_valid(u64 phys)
 {
-       return phys < BIT(id_aa64mmfr0_parange_to_phys_shift(ID_AA64MMFR0_EL1_PARANGE_MAX));
+       u64 parange_max = kvm_get_parange_max();
+       u8 shift = id_aa64mmfr0_parange_to_phys_shift(parange_max);
+
+       return phys < BIT(shift);
 }
 
 static bool kvm_block_mapping_supported(const struct kvm_pgtable_visit_ctx *ctx, u64 phys)
@@ -98,7 +101,7 @@ static bool kvm_block_mapping_supported(const struct kvm_pgtable_visit_ctx *ctx,
        return IS_ALIGNED(ctx->addr, granule);
 }
 
-static u32 kvm_pgtable_idx(struct kvm_pgtable_walk_data *data, u32 level)
+static u32 kvm_pgtable_idx(struct kvm_pgtable_walk_data *data, s8 level)
 {
        u64 shift = kvm_granule_shift(level);
        u64 mask = BIT(PAGE_SHIFT - 3) - 1;
@@ -114,7 +117,7 @@ static u32 kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr)
        return (addr & mask) >> shift;
 }
 
-static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level)
+static u32 kvm_pgd_pages(u32 ia_bits, s8 start_level)
 {
        struct kvm_pgtable pgt = {
                .ia_bits        = ia_bits,
@@ -124,9 +127,9 @@ static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level)
        return kvm_pgd_page_idx(&pgt, -1ULL) + 1;
 }
 
-static bool kvm_pte_table(kvm_pte_t pte, u32 level)
+static bool kvm_pte_table(kvm_pte_t pte, s8 level)
 {
-       if (level == KVM_PGTABLE_MAX_LEVELS - 1)
+       if (level == KVM_PGTABLE_LAST_LEVEL)
                return false;
 
        if (!kvm_pte_valid(pte))
@@ -154,11 +157,11 @@ static kvm_pte_t kvm_init_table_pte(kvm_pte_t *childp, struct kvm_pgtable_mm_ops
        return pte;
 }
 
-static kvm_pte_t kvm_init_valid_leaf_pte(u64 pa, kvm_pte_t attr, u32 level)
+static kvm_pte_t kvm_init_valid_leaf_pte(u64 pa, kvm_pte_t attr, s8 level)
 {
        kvm_pte_t pte = kvm_phys_to_pte(pa);
-       u64 type = (level == KVM_PGTABLE_MAX_LEVELS - 1) ? KVM_PTE_TYPE_PAGE :
-                                                          KVM_PTE_TYPE_BLOCK;
+       u64 type = (level == KVM_PGTABLE_LAST_LEVEL) ? KVM_PTE_TYPE_PAGE :
+                                                      KVM_PTE_TYPE_BLOCK;
 
        pte |= attr & (KVM_PTE_LEAF_ATTR_LO | KVM_PTE_LEAF_ATTR_HI);
        pte |= FIELD_PREP(KVM_PTE_TYPE, type);
@@ -203,11 +206,11 @@ static bool kvm_pgtable_walk_continue(const struct kvm_pgtable_walker *walker,
 }
 
 static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data,
-                             struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pgtable, u32 level);
+                             struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pgtable, s8 level);
 
 static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
                                      struct kvm_pgtable_mm_ops *mm_ops,
-                                     kvm_pteref_t pteref, u32 level)
+                                     kvm_pteref_t pteref, s8 level)
 {
        enum kvm_pgtable_walk_flags flags = data->walker->flags;
        kvm_pte_t *ptep = kvm_dereference_pteref(data->walker, pteref);
@@ -272,12 +275,13 @@ out:
 }
 
 static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data,
-                             struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pgtable, u32 level)
+                             struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pgtable, s8 level)
 {
        u32 idx;
        int ret = 0;
 
-       if (WARN_ON_ONCE(level >= KVM_PGTABLE_MAX_LEVELS))
+       if (WARN_ON_ONCE(level < KVM_PGTABLE_FIRST_LEVEL ||
+                        level > KVM_PGTABLE_LAST_LEVEL))
                return -EINVAL;
 
        for (idx = kvm_pgtable_idx(data, level); idx < PTRS_PER_PTE; ++idx) {
@@ -340,7 +344,7 @@ int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
 
 struct leaf_walk_data {
        kvm_pte_t       pte;
-       u32             level;
+       s8              level;
 };
 
 static int leaf_walker(const struct kvm_pgtable_visit_ctx *ctx,
@@ -355,7 +359,7 @@ static int leaf_walker(const struct kvm_pgtable_visit_ctx *ctx,
 }
 
 int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr,
-                        kvm_pte_t *ptep, u32 *level)
+                        kvm_pte_t *ptep, s8 *level)
 {
        struct leaf_walk_data data;
        struct kvm_pgtable_walker walker = {
@@ -408,7 +412,8 @@ static int hyp_set_prot_attr(enum kvm_pgtable_prot prot, kvm_pte_t *ptep)
        }
 
        attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_AP, ap);
-       attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_SH, sh);
+       if (!kvm_lpa2_is_enabled())
+               attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_SH, sh);
        attr |= KVM_PTE_LEAF_ATTR_LO_S1_AF;
        attr |= prot & KVM_PTE_LEAF_ATTR_HI_SW;
        *ptep = attr;
@@ -467,7 +472,7 @@ static int hyp_map_walker(const struct kvm_pgtable_visit_ctx *ctx,
        if (hyp_map_walker_try_leaf(ctx, data))
                return 0;
 
-       if (WARN_ON(ctx->level == KVM_PGTABLE_MAX_LEVELS - 1))
+       if (WARN_ON(ctx->level == KVM_PGTABLE_LAST_LEVEL))
                return -EINVAL;
 
        childp = (kvm_pte_t *)mm_ops->zalloc_page(NULL);
@@ -563,14 +568,19 @@ u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
 int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
                         struct kvm_pgtable_mm_ops *mm_ops)
 {
-       u64 levels = ARM64_HW_PGTABLE_LEVELS(va_bits);
+       s8 start_level = KVM_PGTABLE_LAST_LEVEL + 1 -
+                        ARM64_HW_PGTABLE_LEVELS(va_bits);
+
+       if (start_level < KVM_PGTABLE_FIRST_LEVEL ||
+           start_level > KVM_PGTABLE_LAST_LEVEL)
+               return -EINVAL;
 
        pgt->pgd = (kvm_pteref_t)mm_ops->zalloc_page(NULL);
        if (!pgt->pgd)
                return -ENOMEM;
 
        pgt->ia_bits            = va_bits;
-       pgt->start_level        = KVM_PGTABLE_MAX_LEVELS - levels;
+       pgt->start_level        = start_level;
        pgt->mm_ops             = mm_ops;
        pgt->mmu                = NULL;
        pgt->force_pte_cb       = NULL;
@@ -624,7 +634,7 @@ struct stage2_map_data {
 u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
 {
        u64 vtcr = VTCR_EL2_FLAGS;
-       u8 lvls;
+       s8 lvls;
 
        vtcr |= kvm_get_parange(mmfr0) << VTCR_EL2_PS_SHIFT;
        vtcr |= VTCR_EL2_T0SZ(phys_shift);
@@ -635,6 +645,15 @@ u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
        lvls = stage2_pgtable_levels(phys_shift);
        if (lvls < 2)
                lvls = 2;
+
+       /*
+        * When LPA2 is enabled, the HW supports an extra level of translation
+        * (for 5 in total) when using 4K pages. It also introduces VTCR_EL2.SL2
+        * to as an addition to SL0 to enable encoding this extra start level.
+        * However, since we always use concatenated pages for the first level
+        * lookup, we will never need this extra level and therefore do not need
+        * to touch SL2.
+        */
        vtcr |= VTCR_EL2_LVLS_TO_SL0(lvls);
 
 #ifdef CONFIG_ARM64_HW_AFDBM
@@ -654,6 +673,9 @@ u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
                vtcr |= VTCR_EL2_HA;
 #endif /* CONFIG_ARM64_HW_AFDBM */
 
+       if (kvm_lpa2_is_enabled())
+               vtcr |= VTCR_EL2_DS;
+
        /* Set the vmid bits */
        vtcr |= (get_vmid_bits(mmfr1) == 16) ?
                VTCR_EL2_VS_16BIT :
@@ -711,7 +733,9 @@ static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot p
        if (prot & KVM_PGTABLE_PROT_W)
                attr |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W;
 
-       attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S2_SH, sh);
+       if (!kvm_lpa2_is_enabled())
+               attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S2_SH, sh);
+
        attr |= KVM_PTE_LEAF_ATTR_LO_S2_AF;
        attr |= prot & KVM_PTE_LEAF_ATTR_HI_SW;
        *ptep = attr;
@@ -902,7 +926,7 @@ static bool stage2_leaf_mapping_allowed(const struct kvm_pgtable_visit_ctx *ctx,
 {
        u64 phys = stage2_map_walker_phys_addr(ctx, data);
 
-       if (data->force_pte && (ctx->level < (KVM_PGTABLE_MAX_LEVELS - 1)))
+       if (data->force_pte && ctx->level < KVM_PGTABLE_LAST_LEVEL)
                return false;
 
        return kvm_block_mapping_supported(ctx, phys);
@@ -981,7 +1005,7 @@ static int stage2_map_walk_leaf(const struct kvm_pgtable_visit_ctx *ctx,
        if (ret != -E2BIG)
                return ret;
 
-       if (WARN_ON(ctx->level == KVM_PGTABLE_MAX_LEVELS - 1))
+       if (WARN_ON(ctx->level == KVM_PGTABLE_LAST_LEVEL))
                return -EINVAL;
 
        if (!data->memcache)
@@ -1151,7 +1175,7 @@ struct stage2_attr_data {
        kvm_pte_t                       attr_set;
        kvm_pte_t                       attr_clr;
        kvm_pte_t                       pte;
-       u32                             level;
+       s8                              level;
 };
 
 static int stage2_attr_walker(const struct kvm_pgtable_visit_ctx *ctx,
@@ -1194,7 +1218,7 @@ static int stage2_attr_walker(const struct kvm_pgtable_visit_ctx *ctx,
 static int stage2_update_leaf_attrs(struct kvm_pgtable *pgt, u64 addr,
                                    u64 size, kvm_pte_t attr_set,
                                    kvm_pte_t attr_clr, kvm_pte_t *orig_pte,
-                                   u32 *level, enum kvm_pgtable_walk_flags flags)
+                                   s8 *level, enum kvm_pgtable_walk_flags flags)
 {
        int ret;
        kvm_pte_t attr_mask = KVM_PTE_LEAF_ATTR_LO | KVM_PTE_LEAF_ATTR_HI;
@@ -1296,7 +1320,7 @@ int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
                                   enum kvm_pgtable_prot prot)
 {
        int ret;
-       u32 level;
+       s8 level;
        kvm_pte_t set = 0, clr = 0;
 
        if (prot & KVM_PTE_LEAF_ATTR_HI_SW)
@@ -1349,7 +1373,7 @@ int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size)
 }
 
 kvm_pte_t *kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt,
-                                             u64 phys, u32 level,
+                                             u64 phys, s8 level,
                                              enum kvm_pgtable_prot prot,
                                              void *mc, bool force_pte)
 {
@@ -1407,7 +1431,7 @@ kvm_pte_t *kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt,
  * fully populated tree up to the PTE entries. Note that @level is
  * interpreted as in "level @level entry".
  */
-static int stage2_block_get_nr_page_tables(u32 level)
+static int stage2_block_get_nr_page_tables(s8 level)
 {
        switch (level) {
        case 1:
@@ -1418,7 +1442,7 @@ static int stage2_block_get_nr_page_tables(u32 level)
                return 0;
        default:
                WARN_ON_ONCE(level < KVM_PGTABLE_MIN_BLOCK_LEVEL ||
-                            level >= KVM_PGTABLE_MAX_LEVELS);
+                            level > KVM_PGTABLE_LAST_LEVEL);
                return -EINVAL;
        };
 }
@@ -1431,13 +1455,13 @@ static int stage2_split_walker(const struct kvm_pgtable_visit_ctx *ctx,
        struct kvm_s2_mmu *mmu;
        kvm_pte_t pte = ctx->old, new, *childp;
        enum kvm_pgtable_prot prot;
-       u32 level = ctx->level;
+       s8 level = ctx->level;
        bool force_pte;
        int nr_pages;
        u64 phys;
 
        /* No huge-pages exist at the last level */
-       if (level == KVM_PGTABLE_MAX_LEVELS - 1)
+       if (level == KVM_PGTABLE_LAST_LEVEL)
                return 0;
 
        /* We only split valid block mappings */
@@ -1514,7 +1538,7 @@ int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
        u64 vtcr = mmu->vtcr;
        u32 ia_bits = VTCR_EL2_IPA(vtcr);
        u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
-       u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
+       s8 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
 
        pgd_sz = kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE;
        pgt->pgd = (kvm_pteref_t)mm_ops->zalloc_pages_exact(pgd_sz);
@@ -1537,7 +1561,7 @@ size_t kvm_pgtable_stage2_pgd_size(u64 vtcr)
 {
        u32 ia_bits = VTCR_EL2_IPA(vtcr);
        u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
-       u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
+       s8 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
 
        return kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE;
 }
@@ -1573,7 +1597,7 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
        pgt->pgd = NULL;
 }
 
-void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, u32 level)
+void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level)
 {
        kvm_pteref_t ptep = (kvm_pteref_t)pgtable;
        struct kvm_pgtable_walker walker = {
index d87c8fcc4c24f6f7b36678951634ff1ff00d6b4d..d14504821b794eb84dc7c44fec72097cee5ceceb 100644 (file)
@@ -223,12 +223,12 @@ static void stage2_free_unlinked_table_rcu_cb(struct rcu_head *head)
 {
        struct page *page = container_of(head, struct page, rcu_head);
        void *pgtable = page_to_virt(page);
-       u32 level = page_private(page);
+       s8 level = page_private(page);
 
        kvm_pgtable_stage2_free_unlinked(&kvm_s2_mm_ops, pgtable, level);
 }
 
-static void stage2_free_unlinked_table(void *addr, u32 level)
+static void stage2_free_unlinked_table(void *addr, s8 level)
 {
        struct page *page = virt_to_page(addr);
 
@@ -804,13 +804,13 @@ static int get_user_mapping_size(struct kvm *kvm, u64 addr)
        struct kvm_pgtable pgt = {
                .pgd            = (kvm_pteref_t)kvm->mm->pgd,
                .ia_bits        = vabits_actual,
-               .start_level    = (KVM_PGTABLE_MAX_LEVELS -
-                                  CONFIG_PGTABLE_LEVELS),
+               .start_level    = (KVM_PGTABLE_LAST_LEVEL -
+                                  CONFIG_PGTABLE_LEVELS + 1),
                .mm_ops         = &kvm_user_mm_ops,
        };
        unsigned long flags;
        kvm_pte_t pte = 0;      /* Keep GCC quiet... */
-       u32 level = ~0;
+       s8 level = S8_MAX;
        int ret;
 
        /*
@@ -829,7 +829,9 @@ static int get_user_mapping_size(struct kvm *kvm, u64 addr)
         * Not seeing an error, but not updating level? Something went
         * deeply wrong...
         */
-       if (WARN_ON(level >= KVM_PGTABLE_MAX_LEVELS))
+       if (WARN_ON(level > KVM_PGTABLE_LAST_LEVEL))
+               return -EFAULT;
+       if (WARN_ON(level < KVM_PGTABLE_FIRST_LEVEL))
                return -EFAULT;
 
        /* Oops, the userspace PTs are gone... Replay the fault */
@@ -1374,7 +1376,7 @@ static bool kvm_vma_mte_allowed(struct vm_area_struct *vma)
 
 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                          struct kvm_memory_slot *memslot, unsigned long hva,
-                         unsigned long fault_status)
+                         bool fault_is_perm)
 {
        int ret = 0;
        bool write_fault, writable, force_pte = false;
@@ -1388,17 +1390,17 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        gfn_t gfn;
        kvm_pfn_t pfn;
        bool logging_active = memslot_is_logging(memslot);
-       unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu);
        long vma_pagesize, fault_granule;
        enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
        struct kvm_pgtable *pgt;
 
-       fault_granule = 1UL << ARM64_HW_PGTABLE_LEVEL_SHIFT(fault_level);
+       if (fault_is_perm)
+               fault_granule = kvm_vcpu_trap_get_perm_fault_granule(vcpu);
        write_fault = kvm_is_write_fault(vcpu);
        exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
        VM_BUG_ON(write_fault && exec_fault);
 
-       if (fault_status == ESR_ELx_FSC_PERM && !write_fault && !exec_fault) {
+       if (fault_is_perm && !write_fault && !exec_fault) {
                kvm_err("Unexpected L2 read permission error\n");
                return -EFAULT;
        }
@@ -1409,8 +1411,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
         * only exception to this is when dirty logging is enabled at runtime
         * and a write fault needs to collapse a block entry into a table.
         */
-       if (fault_status != ESR_ELx_FSC_PERM ||
-           (logging_active && write_fault)) {
+       if (!fault_is_perm || (logging_active && write_fault)) {
                ret = kvm_mmu_topup_memory_cache(memcache,
                                                 kvm_mmu_cache_min_pages(vcpu->arch.hw_mmu));
                if (ret)
@@ -1527,8 +1528,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
         * backed by a THP and thus use block mapping if possible.
         */
        if (vma_pagesize == PAGE_SIZE && !(force_pte || device)) {
-               if (fault_status ==  ESR_ELx_FSC_PERM &&
-                   fault_granule > PAGE_SIZE)
+               if (fault_is_perm && fault_granule > PAGE_SIZE)
                        vma_pagesize = fault_granule;
                else
                        vma_pagesize = transparent_hugepage_adjust(kvm, memslot,
@@ -1541,7 +1541,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                }
        }
 
-       if (fault_status != ESR_ELx_FSC_PERM && !device && kvm_has_mte(kvm)) {
+       if (!fault_is_perm && !device && kvm_has_mte(kvm)) {
                /* Check the VMM hasn't introduced a new disallowed VMA */
                if (mte_allowed) {
                        sanitise_mte_tags(kvm, pfn, vma_pagesize);
@@ -1567,7 +1567,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
         * permissions only if vma_pagesize equals fault_granule. Otherwise,
         * kvm_pgtable_stage2_map() should be called to change block size.
         */
-       if (fault_status == ESR_ELx_FSC_PERM && vma_pagesize == fault_granule)
+       if (fault_is_perm && vma_pagesize == fault_granule)
                ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot);
        else
                ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
@@ -1618,7 +1618,7 @@ static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
  */
 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
 {
-       unsigned long fault_status;
+       unsigned long esr;
        phys_addr_t fault_ipa;
        struct kvm_memory_slot *memslot;
        unsigned long hva;
@@ -1626,12 +1626,12 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
        gfn_t gfn;
        int ret, idx;
 
-       fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
+       esr = kvm_vcpu_get_esr(vcpu);
 
        fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
        is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
 
-       if (fault_status == ESR_ELx_FSC_FAULT) {
+       if (esr_fsc_is_permission_fault(esr)) {
                /* Beyond sanitised PARange (which is the IPA limit) */
                if (fault_ipa >= BIT_ULL(get_kvm_ipa_limit())) {
                        kvm_inject_size_fault(vcpu);
@@ -1666,9 +1666,9 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
                              kvm_vcpu_get_hfar(vcpu), fault_ipa);
 
        /* Check the stage-2 fault is trans. fault or write fault */
-       if (fault_status != ESR_ELx_FSC_FAULT &&
-           fault_status != ESR_ELx_FSC_PERM &&
-           fault_status != ESR_ELx_FSC_ACCESS) {
+       if (!esr_fsc_is_translation_fault(esr) &&
+           !esr_fsc_is_permission_fault(esr) &&
+           !esr_fsc_is_access_flag_fault(esr)) {
                kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
                        kvm_vcpu_trap_get_class(vcpu),
                        (unsigned long)kvm_vcpu_trap_get_fault(vcpu),
@@ -1730,13 +1730,14 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
        /* Userspace should not be able to register out-of-bounds IPAs */
        VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->arch.hw_mmu));
 
-       if (fault_status == ESR_ELx_FSC_ACCESS) {
+       if (esr_fsc_is_access_flag_fault(esr)) {
                handle_access_fault(vcpu, fault_ipa);
                ret = 1;
                goto out_unlock;
        }
 
-       ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
+       ret = user_mem_abort(vcpu, fault_ipa, memslot, hva,
+                            esr_fsc_is_permission_fault(esr));
        if (ret == 0)
                ret = 1;
 out:
index 042695a210cebdcf753120a380175a46169e82d9..ba95d044bc98fd397d2b59e48aeb92d56a39c656 100644 (file)
  * This list should get updated as new features get added to the NV
  * support, and new extension to the architecture.
  */
-void access_nested_id_reg(struct kvm_vcpu *v, struct sys_reg_params *p,
-                         const struct sys_reg_desc *r)
+static u64 limit_nv_id_reg(u32 id, u64 val)
 {
-       u32 id = reg_to_encoding(r);
-       u64 val, tmp;
-
-       val = p->regval;
+       u64 tmp;
 
        switch (id) {
        case SYS_ID_AA64ISAR0_EL1:
@@ -158,5 +154,17 @@ void access_nested_id_reg(struct kvm_vcpu *v, struct sys_reg_params *p,
                break;
        }
 
-       p->regval = val;
+       return val;
+}
+int kvm_init_nv_sysregs(struct kvm *kvm)
+{
+       mutex_lock(&kvm->arch.config_lock);
+
+       for (int i = 0; i < KVM_ARM_ID_REG_NUM; i++)
+               kvm->arch.id_regs[i] = limit_nv_id_reg(IDX_IDREG(i),
+                                                      kvm->arch.id_regs[i]);
+
+       mutex_unlock(&kvm->arch.config_lock);
+
+       return 0;
 }
index 5bb4de162cab5df9ff443e3c37b3917f444944a3..68d1d05672bd4fea76329c936291f9b9c37cc5d3 100644 (file)
@@ -280,12 +280,11 @@ int __init kvm_set_ipa_limit(void)
        parange = cpuid_feature_extract_unsigned_field(mmfr0,
                                ID_AA64MMFR0_EL1_PARANGE_SHIFT);
        /*
-        * IPA size beyond 48 bits could not be supported
-        * on either 4K or 16K page size. Hence let's cap
-        * it to 48 bits, in case it's reported as larger
-        * on the system.
+        * IPA size beyond 48 bits for 4K and 16K page size is only supported
+        * when LPA2 is available. So if we have LPA2, enable it, else cap to 48
+        * bits, in case it's reported as larger on the system.
         */
-       if (PAGE_SIZE != SZ_64K)
+       if (!kvm_lpa2_is_enabled() && PAGE_SIZE != SZ_64K)
                parange = min(parange, (unsigned int)ID_AA64MMFR0_EL1_PARANGE_48);
 
        /*
index ff45d688bd7daa8445b99cda9cf87c1650df18e8..30253bd19917f46640ecffc914d91a7d1102dc81 100644 (file)
@@ -45,44 +45,170 @@ static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
 static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
                      u64 val);
 
-static bool read_from_write_only(struct kvm_vcpu *vcpu,
-                                struct sys_reg_params *params,
-                                const struct sys_reg_desc *r)
+static bool bad_trap(struct kvm_vcpu *vcpu,
+                    struct sys_reg_params *params,
+                    const struct sys_reg_desc *r,
+                    const char *msg)
 {
-       WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
+       WARN_ONCE(1, "Unexpected %s\n", msg);
        print_sys_reg_instr(params);
        kvm_inject_undefined(vcpu);
        return false;
 }
 
+static bool read_from_write_only(struct kvm_vcpu *vcpu,
+                                struct sys_reg_params *params,
+                                const struct sys_reg_desc *r)
+{
+       return bad_trap(vcpu, params, r,
+                       "sys_reg read to write-only register");
+}
+
 static bool write_to_read_only(struct kvm_vcpu *vcpu,
                               struct sys_reg_params *params,
                               const struct sys_reg_desc *r)
 {
-       WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n");
-       print_sys_reg_instr(params);
-       kvm_inject_undefined(vcpu);
-       return false;
+       return bad_trap(vcpu, params, r,
+                       "sys_reg write to read-only register");
+}
+
+#define PURE_EL2_SYSREG(el2)                                           \
+       case el2: {                                                     \
+               *el1r = el2;                                            \
+               return true;                                            \
+       }
+
+#define MAPPED_EL2_SYSREG(el2, el1, fn)                                        \
+       case el2: {                                                     \
+               *xlate = fn;                                            \
+               *el1r = el1;                                            \
+               return true;                                            \
+       }
+
+static bool get_el2_to_el1_mapping(unsigned int reg,
+                                  unsigned int *el1r, u64 (**xlate)(u64))
+{
+       switch (reg) {
+               PURE_EL2_SYSREG(  VPIDR_EL2     );
+               PURE_EL2_SYSREG(  VMPIDR_EL2    );
+               PURE_EL2_SYSREG(  ACTLR_EL2     );
+               PURE_EL2_SYSREG(  HCR_EL2       );
+               PURE_EL2_SYSREG(  MDCR_EL2      );
+               PURE_EL2_SYSREG(  HSTR_EL2      );
+               PURE_EL2_SYSREG(  HACR_EL2      );
+               PURE_EL2_SYSREG(  VTTBR_EL2     );
+               PURE_EL2_SYSREG(  VTCR_EL2      );
+               PURE_EL2_SYSREG(  RVBAR_EL2     );
+               PURE_EL2_SYSREG(  TPIDR_EL2     );
+               PURE_EL2_SYSREG(  HPFAR_EL2     );
+               PURE_EL2_SYSREG(  CNTHCTL_EL2   );
+               MAPPED_EL2_SYSREG(SCTLR_EL2,   SCTLR_EL1,
+                                 translate_sctlr_el2_to_sctlr_el1           );
+               MAPPED_EL2_SYSREG(CPTR_EL2,    CPACR_EL1,
+                                 translate_cptr_el2_to_cpacr_el1            );
+               MAPPED_EL2_SYSREG(TTBR0_EL2,   TTBR0_EL1,
+                                 translate_ttbr0_el2_to_ttbr0_el1           );
+               MAPPED_EL2_SYSREG(TTBR1_EL2,   TTBR1_EL1,   NULL             );
+               MAPPED_EL2_SYSREG(TCR_EL2,     TCR_EL1,
+                                 translate_tcr_el2_to_tcr_el1               );
+               MAPPED_EL2_SYSREG(VBAR_EL2,    VBAR_EL1,    NULL             );
+               MAPPED_EL2_SYSREG(AFSR0_EL2,   AFSR0_EL1,   NULL             );
+               MAPPED_EL2_SYSREG(AFSR1_EL2,   AFSR1_EL1,   NULL             );
+               MAPPED_EL2_SYSREG(ESR_EL2,     ESR_EL1,     NULL             );
+               MAPPED_EL2_SYSREG(FAR_EL2,     FAR_EL1,     NULL             );
+               MAPPED_EL2_SYSREG(MAIR_EL2,    MAIR_EL1,    NULL             );
+               MAPPED_EL2_SYSREG(AMAIR_EL2,   AMAIR_EL1,   NULL             );
+               MAPPED_EL2_SYSREG(ELR_EL2,     ELR_EL1,     NULL             );
+               MAPPED_EL2_SYSREG(SPSR_EL2,    SPSR_EL1,    NULL             );
+       default:
+               return false;
+       }
 }
 
 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
 {
        u64 val = 0x8badf00d8badf00d;
+       u64 (*xlate)(u64) = NULL;
+       unsigned int el1r;
+
+       if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU))
+               goto memory_read;
 
-       if (vcpu_get_flag(vcpu, SYSREGS_ON_CPU) &&
-           __vcpu_read_sys_reg_from_cpu(reg, &val))
+       if (unlikely(get_el2_to_el1_mapping(reg, &el1r, &xlate))) {
+               if (!is_hyp_ctxt(vcpu))
+                       goto memory_read;
+
+               /*
+                * If this register does not have an EL1 counterpart,
+                * then read the stored EL2 version.
+                */
+               if (reg == el1r)
+                       goto memory_read;
+
+               /*
+                * If we have a non-VHE guest and that the sysreg
+                * requires translation to be used at EL1, use the
+                * in-memory copy instead.
+                */
+               if (!vcpu_el2_e2h_is_set(vcpu) && xlate)
+                       goto memory_read;
+
+               /* Get the current version of the EL1 counterpart. */
+               WARN_ON(!__vcpu_read_sys_reg_from_cpu(el1r, &val));
                return val;
+       }
 
+       /* EL1 register can't be on the CPU if the guest is in vEL2. */
+       if (unlikely(is_hyp_ctxt(vcpu)))
+               goto memory_read;
+
+       if (__vcpu_read_sys_reg_from_cpu(reg, &val))
+               return val;
+
+memory_read:
        return __vcpu_sys_reg(vcpu, reg);
 }
 
 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
 {
-       if (vcpu_get_flag(vcpu, SYSREGS_ON_CPU) &&
-           __vcpu_write_sys_reg_to_cpu(val, reg))
+       u64 (*xlate)(u64) = NULL;
+       unsigned int el1r;
+
+       if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU))
+               goto memory_write;
+
+       if (unlikely(get_el2_to_el1_mapping(reg, &el1r, &xlate))) {
+               if (!is_hyp_ctxt(vcpu))
+                       goto memory_write;
+
+               /*
+                * Always store a copy of the write to memory to avoid having
+                * to reverse-translate virtual EL2 system registers for a
+                * non-VHE guest hypervisor.
+                */
+               __vcpu_sys_reg(vcpu, reg) = val;
+
+               /* No EL1 counterpart? We're done here.? */
+               if (reg == el1r)
+                       return;
+
+               if (!vcpu_el2_e2h_is_set(vcpu) && xlate)
+                       val = xlate(val);
+
+               /* Redirect this to the EL1 version of the register. */
+               WARN_ON(!__vcpu_write_sys_reg_to_cpu(val, el1r));
+               return;
+       }
+
+       /* EL1 register can't be on the CPU if the guest is in vEL2. */
+       if (unlikely(is_hyp_ctxt(vcpu)))
+               goto memory_write;
+
+       if (__vcpu_write_sys_reg_to_cpu(val, reg))
                return;
 
-       __vcpu_sys_reg(vcpu, reg) = val;
+memory_write:
+        __vcpu_sys_reg(vcpu, reg) = val;
 }
 
 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
@@ -1505,8 +1631,6 @@ static bool access_id_reg(struct kvm_vcpu *vcpu,
                return write_to_read_only(vcpu, p, r);
 
        p->regval = read_id_reg(vcpu, r);
-       if (vcpu_has_nv(vcpu))
-               access_nested_id_reg(vcpu, p, r);
 
        return true;
 }
@@ -1885,6 +2009,32 @@ static unsigned int el2_visibility(const struct kvm_vcpu *vcpu,
        return REG_HIDDEN;
 }
 
+static bool bad_vncr_trap(struct kvm_vcpu *vcpu,
+                         struct sys_reg_params *p,
+                         const struct sys_reg_desc *r)
+{
+       /*
+        * We really shouldn't be here, and this is likely the result
+        * of a misconfigured trap, as this register should target the
+        * VNCR page, and nothing else.
+        */
+       return bad_trap(vcpu, p, r,
+                       "trap of VNCR-backed register");
+}
+
+static bool bad_redir_trap(struct kvm_vcpu *vcpu,
+                          struct sys_reg_params *p,
+                          const struct sys_reg_desc *r)
+{
+       /*
+        * We really shouldn't be here, and this is likely the result
+        * of a misconfigured trap, as this register should target the
+        * corresponding EL1, and nothing else.
+        */
+       return bad_trap(vcpu, p, r,
+                       "trap of EL2 register redirected to EL1");
+}
+
 #define EL2_REG(name, acc, rst, v) {           \
        SYS_DESC(SYS_##name),                   \
        .access = acc,                          \
@@ -1894,6 +2044,9 @@ static unsigned int el2_visibility(const struct kvm_vcpu *vcpu,
        .val = v,                               \
 }
 
+#define EL2_REG_VNCR(name, rst, v)     EL2_REG(name, bad_vncr_trap, rst, v)
+#define EL2_REG_REDIR(name, rst, v)    EL2_REG(name, bad_redir_trap, rst, v)
+
 /*
  * EL{0,1}2 registers are the EL2 view on an EL0 or EL1 register when
  * HCR_EL2.E2H==1, and only in the sysreg table for convenience of
@@ -2508,32 +2661,33 @@ static const struct sys_reg_desc sys_reg_descs[] = {
        { PMU_SYS_REG(PMCCFILTR_EL0), .access = access_pmu_evtyper,
          .reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 },
 
-       EL2_REG(VPIDR_EL2, access_rw, reset_unknown, 0),
-       EL2_REG(VMPIDR_EL2, access_rw, reset_unknown, 0),
+       EL2_REG_VNCR(VPIDR_EL2, reset_unknown, 0),
+       EL2_REG_VNCR(VMPIDR_EL2, reset_unknown, 0),
        EL2_REG(SCTLR_EL2, access_rw, reset_val, SCTLR_EL2_RES1),
        EL2_REG(ACTLR_EL2, access_rw, reset_val, 0),
-       EL2_REG(HCR_EL2, access_rw, reset_val, 0),
+       EL2_REG_VNCR(HCR_EL2, reset_val, 0),
        EL2_REG(MDCR_EL2, access_rw, reset_val, 0),
        EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_NVHE_EL2_RES1),
-       EL2_REG(HSTR_EL2, access_rw, reset_val, 0),
-       EL2_REG(HFGRTR_EL2, access_rw, reset_val, 0),
-       EL2_REG(HFGWTR_EL2, access_rw, reset_val, 0),
-       EL2_REG(HFGITR_EL2, access_rw, reset_val, 0),
-       EL2_REG(HACR_EL2, access_rw, reset_val, 0),
+       EL2_REG_VNCR(HSTR_EL2, reset_val, 0),
+       EL2_REG_VNCR(HFGRTR_EL2, reset_val, 0),
+       EL2_REG_VNCR(HFGWTR_EL2, reset_val, 0),
+       EL2_REG_VNCR(HFGITR_EL2, reset_val, 0),
+       EL2_REG_VNCR(HACR_EL2, reset_val, 0),
 
-       EL2_REG(HCRX_EL2, access_rw, reset_val, 0),
+       EL2_REG_VNCR(HCRX_EL2, reset_val, 0),
 
        EL2_REG(TTBR0_EL2, access_rw, reset_val, 0),
        EL2_REG(TTBR1_EL2, access_rw, reset_val, 0),
        EL2_REG(TCR_EL2, access_rw, reset_val, TCR_EL2_RES1),
-       EL2_REG(VTTBR_EL2, access_rw, reset_val, 0),
-       EL2_REG(VTCR_EL2, access_rw, reset_val, 0),
+       EL2_REG_VNCR(VTTBR_EL2, reset_val, 0),
+       EL2_REG_VNCR(VTCR_EL2, reset_val, 0),
 
        { SYS_DESC(SYS_DACR32_EL2), trap_undef, reset_unknown, DACR32_EL2 },
-       EL2_REG(HDFGRTR_EL2, access_rw, reset_val, 0),
-       EL2_REG(HDFGWTR_EL2, access_rw, reset_val, 0),
-       EL2_REG(SPSR_EL2, access_rw, reset_val, 0),
-       EL2_REG(ELR_EL2, access_rw, reset_val, 0),
+       EL2_REG_VNCR(HDFGRTR_EL2, reset_val, 0),
+       EL2_REG_VNCR(HDFGWTR_EL2, reset_val, 0),
+       EL2_REG_VNCR(HAFGRTR_EL2, reset_val, 0),
+       EL2_REG_REDIR(SPSR_EL2, reset_val, 0),
+       EL2_REG_REDIR(ELR_EL2, reset_val, 0),
        { SYS_DESC(SYS_SP_EL1), access_sp_el1},
 
        /* AArch32 SPSR_* are RES0 if trapped from a NV guest */
@@ -2549,10 +2703,10 @@ static const struct sys_reg_desc sys_reg_descs[] = {
        { SYS_DESC(SYS_IFSR32_EL2), trap_undef, reset_unknown, IFSR32_EL2 },
        EL2_REG(AFSR0_EL2, access_rw, reset_val, 0),
        EL2_REG(AFSR1_EL2, access_rw, reset_val, 0),
-       EL2_REG(ESR_EL2, access_rw, reset_val, 0),
+       EL2_REG_REDIR(ESR_EL2, reset_val, 0),
        { SYS_DESC(SYS_FPEXC32_EL2), trap_undef, reset_val, FPEXC32_EL2, 0x700 },
 
-       EL2_REG(FAR_EL2, access_rw, reset_val, 0),
+       EL2_REG_REDIR(FAR_EL2, reset_val, 0),
        EL2_REG(HPFAR_EL2, access_rw, reset_val, 0),
 
        EL2_REG(MAIR_EL2, access_rw, reset_val, 0),
@@ -2565,24 +2719,9 @@ static const struct sys_reg_desc sys_reg_descs[] = {
        EL2_REG(CONTEXTIDR_EL2, access_rw, reset_val, 0),
        EL2_REG(TPIDR_EL2, access_rw, reset_val, 0),
 
-       EL2_REG(CNTVOFF_EL2, access_rw, reset_val, 0),
+       EL2_REG_VNCR(CNTVOFF_EL2, reset_val, 0),
        EL2_REG(CNTHCTL_EL2, access_rw, reset_val, 0),
 
-       EL12_REG(SCTLR, access_vm_reg, reset_val, 0x00C50078),
-       EL12_REG(CPACR, access_rw, reset_val, 0),
-       EL12_REG(TTBR0, access_vm_reg, reset_unknown, 0),
-       EL12_REG(TTBR1, access_vm_reg, reset_unknown, 0),
-       EL12_REG(TCR, access_vm_reg, reset_val, 0),
-       { SYS_DESC(SYS_SPSR_EL12), access_spsr},
-       { SYS_DESC(SYS_ELR_EL12), access_elr},
-       EL12_REG(AFSR0, access_vm_reg, reset_unknown, 0),
-       EL12_REG(AFSR1, access_vm_reg, reset_unknown, 0),
-       EL12_REG(ESR, access_vm_reg, reset_unknown, 0),
-       EL12_REG(FAR, access_vm_reg, reset_unknown, 0),
-       EL12_REG(MAIR, access_vm_reg, reset_unknown, 0),
-       EL12_REG(AMAIR, access_vm_reg, reset_amair_el1, 0),
-       EL12_REG(VBAR, access_rw, reset_val, 0),
-       EL12_REG(CONTEXTIDR, access_vm_reg, reset_val, 0),
        EL12_REG(CNTKCTL, access_rw, reset_val, 0),
 
        EL2_REG(SP_EL2, NULL, reset_unknown, 0),
index 2dad2d095160d804a083295e08367e005115b4f9..e2764d0ffa9f32094c57580ed5d987f99b5d2ade 100644 (file)
@@ -590,7 +590,11 @@ static struct vgic_irq *vgic_its_check_cache(struct kvm *kvm, phys_addr_t db,
        unsigned long flags;
 
        raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
+
        irq = __vgic_its_check_cache(dist, db, devid, eventid);
+       if (irq)
+               vgic_get_irq_kref(irq);
+
        raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
 
        return irq;
@@ -769,6 +773,7 @@ int vgic_its_inject_cached_translation(struct kvm *kvm, struct kvm_msi *msi)
        raw_spin_lock_irqsave(&irq->irq_lock, flags);
        irq->pending_latch = true;
        vgic_queue_irq_unlock(kvm, irq, flags);
+       vgic_put_irq(kvm, irq);
 
        return 0;
 }
index a764b0ab8bf913147fc269c915e9d4be3cb408bb..c15ee1df036a22b42d0059cfe546857450fb91d7 100644 (file)
@@ -357,31 +357,13 @@ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
                                         gpa_t addr, unsigned int len,
                                         unsigned long val)
 {
-       u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
-       int i;
-       unsigned long flags;
-
-       for (i = 0; i < len * 8; i++) {
-               struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
-
-               raw_spin_lock_irqsave(&irq->irq_lock, flags);
-               if (test_bit(i, &val)) {
-                       /*
-                        * pending_latch is set irrespective of irq type
-                        * (level or edge) to avoid dependency that VM should
-                        * restore irq config before pending info.
-                        */
-                       irq->pending_latch = true;
-                       vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
-               } else {
-                       irq->pending_latch = false;
-                       raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
-               }
+       int ret;
 
-               vgic_put_irq(vcpu->kvm, irq);
-       }
+       ret = vgic_uaccess_write_spending(vcpu, addr, len, val);
+       if (ret)
+               return ret;
 
-       return 0;
+       return vgic_uaccess_write_cpending(vcpu, addr, len, ~val);
 }
 
 /* We want to avoid outer shareable. */
index ff558c05e990c728abd5361054cb04ef44083818..cf76523a219456dda0891ac9e6f21fb4eadf7e48 100644 (file)
@@ -301,9 +301,8 @@ static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
                vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2);
 }
 
-void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
-                             gpa_t addr, unsigned int len,
-                             unsigned long val)
+static void __set_pending(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len,
+                         unsigned long val, bool is_user)
 {
        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
        int i;
@@ -312,14 +311,22 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
        for_each_set_bit(i, &val, len * 8) {
                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 
-               /* GICD_ISPENDR0 SGI bits are WI */
-               if (is_vgic_v2_sgi(vcpu, irq)) {
+               /* GICD_ISPENDR0 SGI bits are WI when written from the guest. */
+               if (is_vgic_v2_sgi(vcpu, irq) && !is_user) {
                        vgic_put_irq(vcpu->kvm, irq);
                        continue;
                }
 
                raw_spin_lock_irqsave(&irq->irq_lock, flags);
 
+               /*
+                * GICv2 SGIs are terribly broken. We can't restore
+                * the source of the interrupt, so just pick the vcpu
+                * itself as the source...
+                */
+               if (is_vgic_v2_sgi(vcpu, irq))
+                       irq->source |= BIT(vcpu->vcpu_id);
+
                if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
                        /* HW SGI? Ask the GIC to inject it */
                        int err;
@@ -335,7 +342,7 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
                }
 
                irq->pending_latch = true;
-               if (irq->hw)
+               if (irq->hw && !is_user)
                        vgic_irq_set_phys_active(irq, true);
 
                vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
@@ -343,33 +350,18 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
        }
 }
 
+void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
+                             gpa_t addr, unsigned int len,
+                             unsigned long val)
+{
+       __set_pending(vcpu, addr, len, val, false);
+}
+
 int vgic_uaccess_write_spending(struct kvm_vcpu *vcpu,
                                gpa_t addr, unsigned int len,
                                unsigned long val)
 {
-       u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
-       int i;
-       unsigned long flags;
-
-       for_each_set_bit(i, &val, len * 8) {
-               struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
-
-               raw_spin_lock_irqsave(&irq->irq_lock, flags);
-               irq->pending_latch = true;
-
-               /*
-                * GICv2 SGIs are terribly broken. We can't restore
-                * the source of the interrupt, so just pick the vcpu
-                * itself as the source...
-                */
-               if (is_vgic_v2_sgi(vcpu, irq))
-                       irq->source |= BIT(vcpu->vcpu_id);
-
-               vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
-
-               vgic_put_irq(vcpu->kvm, irq);
-       }
-
+       __set_pending(vcpu, addr, len, val, true);
        return 0;
 }
 
@@ -394,9 +386,9 @@ static void vgic_hw_irq_cpending(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
                vgic_irq_set_phys_active(irq, false);
 }
 
-void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
-                             gpa_t addr, unsigned int len,
-                             unsigned long val)
+static void __clear_pending(struct kvm_vcpu *vcpu,
+                           gpa_t addr, unsigned int len,
+                           unsigned long val, bool is_user)
 {
        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
        int i;
@@ -405,14 +397,22 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
        for_each_set_bit(i, &val, len * 8) {
                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 
-               /* GICD_ICPENDR0 SGI bits are WI */
-               if (is_vgic_v2_sgi(vcpu, irq)) {
+               /* GICD_ICPENDR0 SGI bits are WI when written from the guest. */
+               if (is_vgic_v2_sgi(vcpu, irq) && !is_user) {
                        vgic_put_irq(vcpu->kvm, irq);
                        continue;
                }
 
                raw_spin_lock_irqsave(&irq->irq_lock, flags);
 
+               /*
+                * More fun with GICv2 SGIs! If we're clearing one of them
+                * from userspace, which source vcpu to clear? Let's not
+                * even think of it, and blow the whole set.
+                */
+               if (is_vgic_v2_sgi(vcpu, irq))
+                       irq->source = 0;
+
                if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
                        /* HW SGI? Ask the GIC to clear its pending bit */
                        int err;
@@ -427,7 +427,7 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
                        continue;
                }
 
-               if (irq->hw)
+               if (irq->hw && !is_user)
                        vgic_hw_irq_cpending(vcpu, irq);
                else
                        irq->pending_latch = false;
@@ -437,33 +437,18 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
        }
 }
 
+void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
+                             gpa_t addr, unsigned int len,
+                             unsigned long val)
+{
+       __clear_pending(vcpu, addr, len, val, false);
+}
+
 int vgic_uaccess_write_cpending(struct kvm_vcpu *vcpu,
                                gpa_t addr, unsigned int len,
                                unsigned long val)
 {
-       u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
-       int i;
-       unsigned long flags;
-
-       for_each_set_bit(i, &val, len * 8) {
-               struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
-
-               raw_spin_lock_irqsave(&irq->irq_lock, flags);
-               /*
-                * More fun with GICv2 SGIs! If we're clearing one of them
-                * from userspace, which source vcpu to clear? Let's not
-                * even think of it, and blow the whole set.
-                */
-               if (is_vgic_v2_sgi(vcpu, irq))
-                       irq->source = 0;
-
-               irq->pending_latch = false;
-
-               raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
-
-               vgic_put_irq(vcpu->kvm, irq);
-       }
-
+       __clear_pending(vcpu, addr, len, val, true);
        return 0;
 }
 
index 3cb101e8cb29baca75d3fd25287c9dfe932f7677..61886e43e3a10fe8c84ce6febffc5d65703a2cce 100644 (file)
@@ -47,7 +47,7 @@ void arch_teardown_dma_ops(struct device *dev)
 #endif
 
 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
-                       const struct iommu_ops *iommu, bool coherent)
+                       bool coherent)
 {
        int cls = cache_line_size_of_cpu();
 
@@ -58,7 +58,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
                   ARCH_DMA_MINALIGN, cls);
 
        dev->dma_coherent = coherent;
-       if (iommu)
+       if (device_iommu_mapped(dev))
                iommu_setup_dma_ops(dev, dma_base, dma_base + size - 1);
 
        xen_setup_dma_ops(dev);
index 1e07d74d7a6c93baa2a01e7f63a5801c5fe3da0d..b912b1409fc09aaf08705b8d75b5d221ae0d020e 100644 (file)
@@ -84,7 +84,6 @@ WORKAROUND_2077057
 WORKAROUND_2457168
 WORKAROUND_2645198
 WORKAROUND_2658417
-WORKAROUND_2966298
 WORKAROUND_AMPERE_AC03_CPU_38
 WORKAROUND_TRBE_OVERWRITE_FILL_MODE
 WORKAROUND_TSB_FLUSH_FAILURE
@@ -100,3 +99,4 @@ WORKAROUND_NVIDIA_CARMEL_CNP
 WORKAROUND_QCOM_FALKOR_E1003
 WORKAROUND_REPEAT_TLBI
 WORKAROUND_SPECULATIVE_AT
+WORKAROUND_SPECULATIVE_UNPRIV_LOAD
index 908d8b0bc4fdc645f6ff5b06965df704a97f49cf..d011a81575d21e08a85621e8a58c59ba8e5e7dd8 100644 (file)
@@ -43,6 +43,7 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
  */
 extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
 #define flush_cache_vmap(start, end)           cache_wbinv_all()
+#define flush_cache_vmap_early(start, end)     do { } while (0)
 #define flush_cache_vunmap(start, end)         cache_wbinv_all()
 
 #define flush_icache_range(start, end)         cache_wbinv_range(start, end)
index 40be16907267d673581278512742567213187a38..6513ac5d257888fbd41385c9263305dfefd18de6 100644 (file)
@@ -41,6 +41,7 @@ void flush_icache_mm_range(struct mm_struct *mm,
 void flush_icache_deferred(struct mm_struct *mm);
 
 #define flush_cache_vmap(start, end)           do { } while (0)
+#define flush_cache_vmap_early(start, end)     do { } while (0)
 #define flush_cache_vunmap(start, end)         do { } while (0)
 
 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
index af722e4dfb47d8239c969b25834e9b231a9b4244..ff559e5162aa1cad8da170adc5d047672f849222 100644 (file)
@@ -34,7 +34,8 @@ CONFIG_GENERIC_PHY=y
 CONFIG_EXT4_FS=y
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA=y
-CONFIG_FSCACHE=m
+CONFIG_NETFS_SUPPORT=m
+CONFIG_FSCACHE=y
 CONFIG_FSCACHE_STATS=y
 CONFIG_CACHEFILES=m
 CONFIG_MSDOS_FS=y
index 98a3f4b168bd2687f3e4828aa681d29e0c13b97e..ef2e37a10a0feb9af3543481cffddd75c5b3a8ef 100644 (file)
@@ -12,7 +12,7 @@
 static __always_inline bool arch_static_branch(struct static_key *key,
                                               bool branch)
 {
-       asm_volatile_goto(
+       asm goto(
                "1:     nop32                                   \n"
                "       .pushsection    __jump_table, \"aw\"    \n"
                "       .align          2                       \n"
@@ -29,7 +29,7 @@ label:
 static __always_inline bool arch_static_branch_jump(struct static_key *key,
                                                    bool branch)
 {
-       asm_volatile_goto(
+       asm goto(
                "1:     bsr32           %l[label]               \n"
                "       .pushsection    __jump_table, \"aw\"    \n"
                "       .align          2                       \n"
index beb8499dd8ed84330beecbcd61977df0aa3474f8..bfa21465d83afcd4908cc000e49a8c47aea0d165 100644 (file)
@@ -4,6 +4,7 @@ obj-y += net/
 obj-y += vdso/
 
 obj-$(CONFIG_KVM) += kvm/
+obj-$(CONFIG_BUILTIN_DTB) += boot/dts/
 
 # for cleaning
 subdir- += boot
index ee123820a4760ad38296ecc3c94da8a06068b33c..929f68926b3432e52a6d7b84a92d402a5d8bc11a 100644 (file)
@@ -5,12 +5,14 @@ config LOONGARCH
        select ACPI
        select ACPI_GENERIC_GSI if ACPI
        select ACPI_MCFG if ACPI
+       select ACPI_HOTPLUG_CPU if ACPI_PROCESSOR && HOTPLUG_CPU
        select ACPI_PPTT if ACPI
        select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
        select ARCH_BINFMT_ELF_STATE
        select ARCH_DISABLE_KASAN_INLINE
        select ARCH_ENABLE_MEMORY_HOTPLUG
        select ARCH_ENABLE_MEMORY_HOTREMOVE
+       select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
        select ARCH_HAS_ACPI_TABLE_UPGRADE      if ACPI
        select ARCH_HAS_CPU_FINALIZE_INIT
        select ARCH_HAS_FORTIFY_SOURCE
@@ -71,6 +73,7 @@ config LOONGARCH
        select GENERIC_CLOCKEVENTS
        select GENERIC_CMOS_UPDATE
        select GENERIC_CPU_AUTOPROBE
+       select GENERIC_CPU_DEVICES
        select GENERIC_ENTRY
        select GENERIC_GETTIMEOFDAY
        select GENERIC_IOREMAP if !ARCH_IOREMAP
@@ -97,6 +100,7 @@ config LOONGARCH
        select HAVE_ARCH_KFENCE
        select HAVE_ARCH_KGDB if PERF_EVENTS
        select HAVE_ARCH_MMAP_RND_BITS if MMU
+       select HAVE_ARCH_SECCOMP
        select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_TRACEHOOK
        select HAVE_ARCH_TRANSPARENT_HUGEPAGE
@@ -140,6 +144,7 @@ config LOONGARCH
        select HAVE_REGS_AND_STACK_ACCESS_API
        select HAVE_RETHOOK
        select HAVE_RSEQ
+       select HAVE_RUST
        select HAVE_SAMPLE_FTRACE_DIRECT
        select HAVE_SAMPLE_FTRACE_DIRECT_MULTI
        select HAVE_SETUP_PER_CPU_AREA if NUMA
@@ -374,6 +379,24 @@ config CMDLINE_FORCE
 
 endchoice
 
+config BUILTIN_DTB
+       bool "Enable built-in dtb in kernel"
+       depends on OF
+       help
+         Some existing systems do not provide a canonical device tree to
+         the kernel at boot time. Let's provide a device tree table in the
+         kernel, keyed by the dts filename, containing the relevant DTBs.
+
+         Built-in DTBs are generic enough and can be used as references.
+
+config BUILTIN_DTB_NAME
+       string "Source file for built-in dtb"
+       depends on BUILTIN_DTB
+       help
+         Base name (without suffix, relative to arch/loongarch/boot/dts/)
+         for the DTS file that will be used to produce the DTB linked into
+         the kernel.
+
 config DMI
        bool "Enable DMI scanning"
        select DMI_SCAN_MACHINE_NON_EFI_FALLBACK
@@ -575,6 +598,9 @@ config ARCH_SELECTS_CRASH_DUMP
        depends on CRASH_DUMP
        select RELOCATABLE
 
+config ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION
+       def_bool CRASH_CORE
+
 config RELOCATABLE
        bool "Relocatable kernel"
        help
@@ -608,23 +634,6 @@ config RANDOMIZE_BASE_MAX_OFFSET
 
          This is limited by the size of the lower address memory, 256MB.
 
-config SECCOMP
-       bool "Enable seccomp to safely compute untrusted bytecode"
-       depends on PROC_FS
-       default y
-       help
-         This kernel feature is useful for number crunching applications
-         that may need to compute untrusted bytecode during their
-         execution. By using pipes or other transports made available to
-         the process as file descriptors supporting the read/write
-         syscalls, it's possible to isolate those applications in
-         their own address space using seccomp. Once seccomp is
-         enabled via /proc/<pid>/seccomp, it cannot be disabled
-         and the task is only allowed to execute a few safe syscalls
-         defined by each seccomp mode.
-
-         If unsure, say Y. Only embedded should say N here.
-
 endmenu
 
 config ARCH_SELECT_MEMORY_MODEL
@@ -643,10 +652,6 @@ config ARCH_SPARSEMEM_ENABLE
          or have huge holes in the physical address space for other reasons.
          See <file:Documentation/mm/numa.rst> for more.
 
-config ARCH_ENABLE_THP_MIGRATION
-       def_bool y
-       depends on TRANSPARENT_HUGEPAGE
-
 config ARCH_MEMORY_PROBE
        def_bool y
        depends on MEMORY_HOTPLUG
index 4ba8d67ddb097743be4e68493604579142eee2d5..983aa2b1629a69fe74c4c8358d094fbd424283b9 100644 (file)
@@ -6,6 +6,7 @@
 boot   := arch/loongarch/boot
 
 KBUILD_DEFCONFIG := loongson3_defconfig
+KBUILD_DTBS      := dtbs
 
 image-name-y                   := vmlinux
 image-name-$(CONFIG_EFI_ZBOOT) := vmlinuz
@@ -81,8 +82,11 @@ KBUILD_AFLAGS_MODULE         += -Wa,-mla-global-with-abs
 KBUILD_CFLAGS_MODULE           += -fplt -Wa,-mla-global-with-abs,-mla-local-with-abs
 endif
 
+KBUILD_RUSTFLAGS_MODULE                += -Crelocation-model=pic
+
 ifeq ($(CONFIG_RELOCATABLE),y)
 KBUILD_CFLAGS_KERNEL           += -fPIE
+KBUILD_RUSTFLAGS_KERNEL                += -Crelocation-model=pie
 LDFLAGS_vmlinux                        += -static -pie --no-dynamic-linker -z notext $(call ld-option, --apply-dynamic-relocs)
 endif
 
@@ -141,7 +145,7 @@ endif
 
 vdso-install-y += arch/loongarch/vdso/vdso.so.dbg
 
-all:   $(notdir $(KBUILD_IMAGE))
+all:   $(notdir $(KBUILD_IMAGE)) $(KBUILD_DTBS)
 
 vmlinuz.efi: vmlinux.efi
 
index 5f1f55e911adf543ab5c113b06f81488ee984e59..747d0c3f63892926757b28fc29119069767e2310 100644 (file)
@@ -1,4 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0-only
-dtstree        := $(srctree)/$(src)
 
-dtb-y := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts))
+dtb-y = loongson-2k0500-ref.dtb loongson-2k1000-ref.dtb loongson-2k2000-ref.dtb
+
+obj-$(CONFIG_BUILTIN_DTB)      += $(addsuffix .dtb.o, $(CONFIG_BUILTIN_DTB_NAME))
diff --git a/arch/loongarch/boot/dts/loongson-2k0500-ref.dts b/arch/loongarch/boot/dts/loongson-2k0500-ref.dts
new file mode 100644 (file)
index 0000000..b38071a
--- /dev/null
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+/dts-v1/;
+
+#include "loongson-2k0500.dtsi"
+
+/ {
+       compatible = "loongson,ls2k0500-ref", "loongson,ls2k0500";
+       model = "Loongson-2K0500 Reference Board";
+
+       aliases {
+               ethernet0 = &gmac0;
+               ethernet1 = &gmac1;
+               serial0 = &uart0;
+       };
+
+       chosen {
+               stdout-path = "serial0:115200n8";
+       };
+
+       memory@200000 {
+               device_type = "memory";
+               reg = <0x0 0x00200000 0x0 0x0ee00000>,
+                     <0x0 0x90000000 0x0 0x60000000>;
+       };
+
+       reserved-memory {
+               #address-cells = <2>;
+               #size-cells = <2>;
+               ranges;
+
+               linux,cma {
+                       compatible = "shared-dma-pool";
+                       reusable;
+                       size = <0x0 0x2000000>;
+                       linux,cma-default;
+               };
+       };
+};
+
+&gmac0 {
+       status = "okay";
+
+       phy-mode = "rgmii";
+       bus_id = <0x0>;
+};
+
+&gmac1 {
+       status = "okay";
+
+       phy-mode = "rgmii";
+       bus_id = <0x1>;
+};
+
+&i2c0 {
+       status = "okay";
+
+       #address-cells = <1>;
+       #size-cells = <0>;
+       eeprom@57{
+               compatible = "atmel,24c16";
+               reg = <0x57>;
+               pagesize = <16>;
+       };
+};
+
+&ehci0 {
+       status = "okay";
+};
+
+&ohci0 {
+       status = "okay";
+};
+
+&sata {
+       status = "okay";
+};
+
+&uart0 {
+       status = "okay";
+};
+
+&rtc0 {
+       status = "okay";
+};
diff --git a/arch/loongarch/boot/dts/loongson-2k0500.dtsi b/arch/loongarch/boot/dts/loongson-2k0500.dtsi
new file mode 100644 (file)
index 0000000..444779c
--- /dev/null
@@ -0,0 +1,266 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+       #address-cells = <2>;
+       #size-cells = <2>;
+
+       cpus {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               cpu0: cpu@0 {
+                       compatible = "loongson,la264";
+                       device_type = "cpu";
+                       reg = <0x0>;
+                       clocks = <&cpu_clk>;
+               };
+       };
+
+       cpu_clk: cpu-clk {
+               compatible = "fixed-clock";
+               #clock-cells = <0>;
+               clock-frequency = <500000000>;
+       };
+
+       cpuintc: interrupt-controller {
+               compatible = "loongson,cpu-interrupt-controller";
+               #interrupt-cells = <1>;
+               interrupt-controller;
+       };
+
+       bus@10000000 {
+               compatible = "simple-bus";
+               ranges = <0x0 0x10000000 0x0 0x10000000 0x0 0x10000000>,
+                        <0x0 0x02000000 0x0 0x02000000 0x0 0x02000000>,
+                        <0x0 0x20000000 0x0 0x20000000 0x0 0x10000000>,
+                        <0x0 0x40000000 0x0 0x40000000 0x0 0x40000000>,
+                        <0xfe 0x0 0xfe 0x0 0x0 0x40000000>;
+               #address-cells = <2>;
+               #size-cells = <2>;
+
+               isa@16400000 {
+                       compatible = "isa";
+                       #size-cells = <1>;
+                       #address-cells = <2>;
+                       ranges = <1 0x0 0x0 0x16400000 0x4000>;
+               };
+
+               liointc0: interrupt-controller@1fe11400 {
+                       compatible = "loongson,liointc-2.0";
+                       reg = <0x0 0x1fe11400 0x0 0x40>,
+                             <0x0 0x1fe11040 0x0 0x8>;
+                       reg-names = "main", "isr0";
+
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
+                       interrupt-parent = <&cpuintc>;
+                       interrupts = <2>;
+                       interrupt-names = "int0";
+
+                       loongson,parent_int_map = <0xffffffff>, /* int0 */
+                                                 <0x00000000>, /* int1 */
+                                                 <0x00000000>, /* int2 */
+                                                 <0x00000000>; /* int3 */
+               };
+
+               liointc1: interrupt-controller@1fe11440 {
+                       compatible = "loongson,liointc-2.0";
+                       reg = <0x0 0x1fe11440 0x0 0x40>,
+                             <0x0 0x1fe11048 0x0 0x8>;
+                       reg-names = "main", "isr0";
+
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
+                       interrupt-parent = <&cpuintc>;
+                       interrupts = <4>;
+                       interrupt-names = "int2";
+
+                       loongson,parent_int_map = <0x00000000>, /* int0 */
+                                                 <0x00000000>, /* int1 */
+                                                 <0xffffffff>, /* int2 */
+                                                 <0x00000000>; /* int3 */
+               };
+
+               eiointc: interrupt-controller@1fe11600 {
+                       compatible = "loongson,ls2k0500-eiointc";
+                       reg = <0x0 0x1fe11600 0x0 0xea00>;
+                       interrupt-controller;
+                       #interrupt-cells = <1>;
+                       interrupt-parent = <&cpuintc>;
+                       interrupts = <3>;
+               };
+
+               gmac0: ethernet@1f020000 {
+                       compatible = "snps,dwmac-3.70a";
+                       reg = <0x0 0x1f020000 0x0 0x10000>;
+                       interrupt-parent = <&liointc0>;
+                       interrupts = <12 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupt-names = "macirq";
+                       status = "disabled";
+               };
+
+               gmac1: ethernet@1f030000 {
+                       compatible = "snps,dwmac-3.70a";
+                       reg = <0x0 0x1f030000 0x0 0x10000>;
+                       interrupt-parent = <&liointc0>;
+                       interrupts = <14 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupt-names = "macirq";
+                       status = "disabled";
+               };
+
+               sata: sata@1f040000 {
+                       compatible = "snps,spear-ahci";
+                       reg = <0x0 0x1f040000 0x0 0x10000>;
+                       interrupt-parent = <&eiointc>;
+                       interrupts = <75>;
+                       status = "disabled";
+               };
+
+               ehci0: usb@1f050000 {
+                       compatible = "generic-ehci";
+                       reg = <0x0 0x1f050000 0x0 0x8000>;
+                       interrupt-parent = <&eiointc>;
+                       interrupts = <71>;
+                       status = "disabled";
+               };
+
+               ohci0: usb@1f058000 {
+                       compatible = "generic-ohci";
+                       reg = <0x0 0x1f058000 0x0 0x8000>;
+                       interrupt-parent = <&eiointc>;
+                       interrupts = <72>;
+                       status = "disabled";
+               };
+
+               uart0: serial@1ff40800 {
+                       compatible = "ns16550a";
+                       reg = <0x0 0x1ff40800 0x0 0x10>;
+                       clock-frequency = <100000000>;
+                       interrupt-parent = <&eiointc>;
+                       interrupts = <2>;
+                       no-loopback-test;
+                       status = "disabled";
+               };
+
+               i2c0: i2c@1ff48000 {
+                       compatible = "loongson,ls2k-i2c";
+                       reg = <0x0 0x1ff48000 0x0 0x0800>;
+                       interrupt-parent = <&eiointc>;
+                       interrupts = <14>;
+                       status = "disabled";
+               };
+
+               i2c@1ff48800 {
+                       compatible = "loongson,ls2k-i2c";
+                       reg = <0x0 0x1ff48800 0x0 0x0800>;
+                       interrupt-parent = <&eiointc>;
+                       interrupts = <15>;
+                       status = "disabled";
+               };
+
+               i2c@1ff49000 {
+                       compatible = "loongson,ls2k-i2c";
+                       reg = <0x0 0x1ff49000 0x0 0x0800>;
+                       interrupt-parent = <&eiointc>;
+                       interrupts = <16>;
+                       status = "disabled";
+               };
+
+               i2c@1ff49800 {
+                       compatible = "loongson,ls2k-i2c";
+                       reg = <0x0 0x1ff49800 0x0 0x0800>;
+                       interrupt-parent = <&eiointc>;
+                       interrupts = <17>;
+                       status = "disabled";
+               };
+
+               i2c@1ff4a000 {
+                       compatible = "loongson,ls2k-i2c";
+                       reg = <0x0 0x1ff4a000 0x0 0x0800>;
+                       interrupt-parent = <&eiointc>;
+                       interrupts = <18>;
+                       status = "disabled";
+               };
+
+               i2c@1ff4a800 {
+                       compatible = "loongson,ls2k-i2c";
+                       reg = <0x0 0x1ff4a800 0x0 0x0800>;
+                       interrupt-parent = <&eiointc>;
+                       interrupts = <19>;
+                       status = "disabled";
+               };
+
+               pmc: power-management@1ff6c000 {
+                       compatible = "loongson,ls2k0500-pmc", "syscon";
+                       reg = <0x0 0x1ff6c000 0x0 0x58>;
+                       interrupt-parent = <&eiointc>;
+                       interrupts = <56>;
+                       loongson,suspend-address = <0x0 0x1c000500>;
+
+                       syscon-reboot {
+                               compatible = "syscon-reboot";
+                               offset = <0x30>;
+                               mask = <0x1>;
+                       };
+
+                       syscon-poweroff {
+                               compatible = "syscon-poweroff";
+                               regmap = <&pmc>;
+                               offset = <0x14>;
+                               mask = <0x3c00>;
+                               value = <0x3c00>;
+                       };
+               };
+
+               rtc0: rtc@1ff6c100 {
+                       compatible = "loongson,ls2k0500-rtc", "loongson,ls7a-rtc";
+                       reg = <0x0 0x1ff6c100 0x0 0x100>;
+                       interrupt-parent = <&eiointc>;
+                       interrupts = <35>;
+                       status = "disabled";
+               };
+
+               pcie@1a000000 {
+                       compatible = "loongson,ls2k-pci";
+                       reg = <0x0 0x1a000000 0x0 0x02000000>,
+                             <0xfe 0x0 0x0 0x20000000>;
+                       #address-cells = <3>;
+                       #size-cells = <2>;
+                       device_type = "pci";
+                       bus-range = <0x0 0x5>;
+                       ranges = <0x01000000 0x0 0x00004000 0x0 0x16404000 0x0 0x00004000>,
+                                <0x02000000 0x0 0x40000000 0x0 0x40000000 0x0 0x40000000>;
+
+                       pcie@0,0 {
+                               reg = <0x0000 0x0 0x0 0x0 0x0>;
+                               #address-cells = <3>;
+                               #size-cells = <2>;
+                               device_type = "pci";
+                               interrupt-parent = <&eiointc>;
+                               #interrupt-cells = <1>;
+                               interrupt-map-mask = <0x0 0x0 0x0 0x0>;
+                               interrupt-map = <0x0 0x0 0x0 0x0 &eiointc 81>;
+                               ranges;
+                       };
+
+                       pcie@1,0 {
+                               reg = <0x0800 0x0 0x0 0x0 0x0>;
+                               #address-cells = <3>;
+                               #size-cells = <2>;
+                               device_type = "pci";
+                               interrupt-parent = <&eiointc>;
+                               #interrupt-cells = <1>;
+                               interrupt-map-mask = <0x0 0x0 0x0 0x0>;
+                               interrupt-map = <0x0 0x0 0x0 0x0 &eiointc 82>;
+                               ranges;
+                       };
+               };
+       };
+};
diff --git a/arch/loongarch/boot/dts/loongson-2k1000-ref.dts b/arch/loongarch/boot/dts/loongson-2k1000-ref.dts
new file mode 100644 (file)
index 0000000..132a2d1
--- /dev/null
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+/dts-v1/;
+
+#include "loongson-2k1000.dtsi"
+
+/ {
+       compatible = "loongson,ls2k1000-ref", "loongson,ls2k1000";
+       model = "Loongson-2K1000 Reference Board";
+
+       aliases {
+               serial0 = &uart0;
+       };
+
+       chosen {
+               stdout-path = "serial0:115200n8";
+       };
+
+       memory@200000 {
+               device_type = "memory";
+               reg = <0x0 0x00200000 0x0 0x06e00000>,
+                     <0x0 0x08000000 0x0 0x07000000>,
+                     <0x0 0x90000000 0x1 0xe0000000>;
+       };
+
+       reserved-memory {
+               #address-cells = <2>;
+               #size-cells = <2>;
+               ranges;
+
+               linux,cma {
+                       compatible = "shared-dma-pool";
+                       reusable;
+                       size = <0x0 0x2000000>;
+                       linux,cma-default;
+               };
+       };
+};
+
+&gmac0 {
+       status = "okay";
+
+       phy-mode = "rgmii";
+       phy-handle = <&phy0>;
+       mdio {
+               compatible = "snps,dwmac-mdio";
+               #address-cells = <1>;
+               #size-cells = <0>;
+               phy0: ethernet-phy@0 {
+                       reg = <0>;
+               };
+       };
+};
+
+&gmac1 {
+       status = "okay";
+
+       phy-mode = "rgmii";
+       phy-handle = <&phy1>;
+       mdio {
+               compatible = "snps,dwmac-mdio";
+               #address-cells = <1>;
+               #size-cells = <0>;
+               phy1: ethernet-phy@1 {
+                       reg = <16>;
+               };
+       };
+};
+
+&i2c2 {
+       status = "okay";
+
+       pinctrl-0 = <&i2c0_pins_default>;
+       pinctrl-names = "default";
+
+       #address-cells = <1>;
+       #size-cells = <0>;
+       eeprom@57{
+               compatible = "atmel,24c16";
+               reg = <0x57>;
+               pagesize = <16>;
+       };
+};
+
+&spi0 {
+       status = "okay";
+
+       #address-cells = <1>;
+       #size-cells = <0>;
+       spidev@0 {
+               compatible = "rohm,dh2228fv";
+               spi-max-frequency = <100000000>;
+               reg = <0>;
+       };
+};
+
+&ehci0 {
+       status = "okay";
+};
+
+&ohci0 {
+       status = "okay";
+};
+
+&sata {
+       status = "okay";
+};
+
+&uart0 {
+       status = "okay";
+};
+
+&clk {
+       status = "okay";
+};
+
+&rtc0 {
+       status = "okay";
+};
+
+&pctrl {
+       status = "okay";
+
+       sdio_pins_default: sdio-pins {
+               sdio-pinmux {
+                       groups = "sdio";
+                       function = "sdio";
+               };
+               sdio-det-pinmux {
+                       groups = "pwm2";
+                       function = "gpio";
+               };
+       };
+
+       pwm1_pins_default: pwm1-pins {
+               pinmux {
+                       groups = "pwm1";
+                       function = "pwm1";
+               };
+       };
+
+       pwm0_pins_default: pwm0-pins {
+               pinmux {
+                       groups = "pwm0";
+                       function = "pwm0";
+               };
+       };
+
+       i2c1_pins_default: i2c1-pins {
+               pinmux {
+                       groups = "i2c1";
+                       function = "i2c1";
+               };
+       };
+
+       i2c0_pins_default: i2c0-pins {
+               pinmux {
+                       groups = "i2c0";
+                       function = "i2c0";
+               };
+       };
+
+       nand_pins_default: nand-pins {
+               pinmux {
+                       groups = "nand";
+                       function = "nand";
+               };
+       };
+
+       hda_pins_default: hda-pins {
+               grp0-pinmux {
+                       groups = "hda";
+                       function = "hda";
+               };
+               grp1-pinmux {
+                       groups = "i2s";
+                       function = "gpio";
+               };
+       };
+};
diff --git a/arch/loongarch/boot/dts/loongson-2k1000.dtsi b/arch/loongarch/boot/dts/loongson-2k1000.dtsi
new file mode 100644 (file)
index 0000000..49a70f8
--- /dev/null
@@ -0,0 +1,492 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/clock/loongson,ls2k-clk.h>
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+       #address-cells = <2>;
+       #size-cells = <2>;
+
+       cpus {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               cpu0: cpu@0 {
+                       compatible = "loongson,la264";
+                       device_type = "cpu";
+                       reg= <0x0>;
+                       clocks = <&clk LOONGSON2_NODE_CLK>;
+               };
+
+               cpu1: cpu@1 {
+                       compatible = "loongson,la264";
+                       device_type = "cpu";
+                       reg = <0x1>;
+                       clocks = <&clk LOONGSON2_NODE_CLK>;
+               };
+       };
+
+       ref_100m: clock-ref-100m {
+               compatible = "fixed-clock";
+               #clock-cells = <0>;
+               clock-frequency = <100000000>;
+               clock-output-names = "ref_100m";
+       };
+
+       cpuintc: interrupt-controller {
+               compatible = "loongson,cpu-interrupt-controller";
+               #interrupt-cells = <1>;
+               interrupt-controller;
+       };
+
+       /* i2c of the dvi eeprom edid */
+       i2c-gpio-0 {
+               compatible = "i2c-gpio";
+               scl-gpios = <&gpio0 0 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+               sda-gpios = <&gpio0 1 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+               i2c-gpio,delay-us = <5>;        /* ~100 kHz */
+               #address-cells = <1>;
+               #size-cells = <0>;
+               status = "disabled";
+       };
+
+       /* i2c of the eeprom edid */
+       i2c-gpio-1 {
+               compatible = "i2c-gpio";
+               scl-gpios = <&gpio0 33 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+               sda-gpios = <&gpio0 32 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+               i2c-gpio,delay-us = <5>;        /* ~100 kHz */
+               #address-cells = <1>;
+               #size-cells = <0>;
+               status = "disabled";
+       };
+
+       thermal-zones {
+               cpu-thermal {
+                       polling-delay-passive = <1000>;
+                       polling-delay = <5000>;
+                       thermal-sensors = <&tsensor 0>;
+
+                       trips {
+                               cpu_alert: cpu-alert {
+                                       temperature = <33000>;
+                                       hysteresis = <2000>;
+                                       type = "active";
+                               };
+
+                               cpu_crit: cpu-crit {
+                                       temperature = <85000>;
+                                       hysteresis = <5000>;
+                                       type = "critical";
+                               };
+                       };
+               };
+       };
+
+       bus@10000000 {
+               compatible = "simple-bus";
+               ranges = <0x0 0x10000000 0x0 0x10000000 0x0 0x10000000>,
+                        <0x0 0x02000000 0x0 0x02000000 0x0 0x02000000>,
+                        <0x0 0x20000000 0x0 0x20000000 0x0 0x10000000>,
+                        <0x0 0x40000000 0x0 0x40000000 0x0 0x40000000>,
+                        <0xfe 0x0 0xfe 0x0 0x0 0x40000000>;
+               #address-cells = <2>;
+               #size-cells = <2>;
+               dma-coherent;
+
+               liointc0: interrupt-controller@1fe01400 {
+                       compatible = "loongson,liointc-2.0";
+                       reg = <0x0 0x1fe01400 0x0 0x40>,
+                             <0x0 0x1fe01040 0x0 0x8>,
+                             <0x0 0x1fe01140 0x0 0x8>;
+                       reg-names = "main", "isr0", "isr1";
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
+                       interrupt-parent = <&cpuintc>;
+                       interrupts = <2>;
+                       interrupt-names = "int0";
+                       loongson,parent_int_map = <0xffffffff>, /* int0 */
+                                                 <0x00000000>, /* int1 */
+                                                 <0x00000000>, /* int2 */
+                                                 <0x00000000>; /* int3 */
+               };
+
+               liointc1: interrupt-controller@1fe01440 {
+                       compatible = "loongson,liointc-2.0";
+                       reg = <0x0 0x1fe01440 0x0 0x40>,
+                             <0x0 0x1fe01048 0x0 0x8>,
+                             <0x0 0x1fe01148 0x0 0x8>;
+                       reg-names = "main", "isr0", "isr1";
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
+                       interrupt-parent = <&cpuintc>;
+                       interrupts = <3>;
+                       interrupt-names = "int1";
+                       loongson,parent_int_map = <0x00000000>, /* int0 */
+                                                 <0xffffffff>, /* int1 */
+                                                 <0x00000000>, /* int2 */
+                                                 <0x00000000>; /* int3 */
+               };
+
+               chipid@1fe00000 {
+                       compatible = "loongson,ls2k-chipid";
+                       reg = <0x0 0x1fe00000 0x0 0x30>;
+                       little-endian;
+               };
+
+               pctrl: pinctrl@1fe00420 {
+                       compatible = "loongson,ls2k-pinctrl";
+                       reg = <0x0 0x1fe00420 0x0 0x18>;
+                       status = "disabled";
+               };
+
+               clk: clock-controller@1fe00480 {
+                       compatible = "loongson,ls2k-clk";
+                       reg = <0x0 0x1fe00480 0x0 0x58>;
+                       #clock-cells = <1>;
+                       clocks = <&ref_100m>;
+                       clock-names = "ref_100m";
+                       status = "disabled";
+               };
+
+               gpio0: gpio@1fe00500 {
+                       compatible = "loongson,ls2k-gpio";
+                       reg = <0x0 0x1fe00500 0x0 0x38>;
+                       ngpios = <64>;
+                       #gpio-cells = <2>;
+                       gpio-controller;
+                       gpio-ranges = <&pctrl 0x0 0x0 15>,
+                                     <&pctrl 16 16 15>,
+                                     <&pctrl 32 32 10>,
+                                     <&pctrl 44 44 20>;
+                       interrupt-parent = <&liointc1>;
+                       interrupts = <28 IRQ_TYPE_LEVEL_HIGH>,
+                                    <29 IRQ_TYPE_LEVEL_HIGH>,
+                                    <30 IRQ_TYPE_LEVEL_HIGH>,
+                                    <30 IRQ_TYPE_LEVEL_HIGH>,
+                                    <26 IRQ_TYPE_LEVEL_HIGH>,
+                                    <26 IRQ_TYPE_LEVEL_HIGH>,
+                                    <26 IRQ_TYPE_LEVEL_HIGH>,
+                                    <26 IRQ_TYPE_LEVEL_HIGH>,
+                                    <26 IRQ_TYPE_LEVEL_HIGH>,
+                                    <26 IRQ_TYPE_LEVEL_HIGH>,
+                                    <26 IRQ_TYPE_LEVEL_HIGH>,
+                                    <26 IRQ_TYPE_LEVEL_HIGH>,
+                                    <26 IRQ_TYPE_LEVEL_HIGH>,
+                                    <26 IRQ_TYPE_LEVEL_HIGH>,
+                                    <26 IRQ_TYPE_LEVEL_HIGH>,
+                                    <>,
+                                    <26 IRQ_TYPE_LEVEL_HIGH>,
+                                    <26 IRQ_TYPE_LEVEL_HIGH>,
+                                    <26 IRQ_TYPE_LEVEL_HIGH>,
+                                    <26 IRQ_TYPE_LEVEL_HIGH>,
+                                    <26 IRQ_TYPE_LEVEL_HIGH>,
+                                    <26 IRQ_TYPE_LEVEL_HIGH>,
+                                    <26 IRQ_TYPE_LEVEL_HIGH>,
+                                    <26 IRQ_TYPE_LEVEL_HIGH>,
+                                    <26 IRQ_TYPE_LEVEL_HIGH>,
+                                    <26 IRQ_TYPE_LEVEL_HIGH>,
+                                    <26 IRQ_TYPE_LEVEL_HIGH>,
+                                    <26 IRQ_TYPE_LEVEL_HIGH>,
+                                    <26 IRQ_TYPE_LEVEL_HIGH>,
+                                    <26 IRQ_TYPE_LEVEL_HIGH>,
+                                    <26 IRQ_TYPE_LEVEL_HIGH>,
+                                    <26 IRQ_TYPE_LEVEL_HIGH>,
+                                    <27 IRQ_TYPE_LEVEL_HIGH>,
+                                    <27 IRQ_TYPE_LEVEL_HIGH>,
+                                    <27 IRQ_TYPE_LEVEL_HIGH>,
+                                    <27 IRQ_TYPE_LEVEL_HIGH>,
+                                    <27 IRQ_TYPE_LEVEL_HIGH>,
+                                    <>,
+                                    <27 IRQ_TYPE_LEVEL_HIGH>,
+                                    <27 IRQ_TYPE_LEVEL_HIGH>,
+                                    <27 IRQ_TYPE_LEVEL_HIGH>,
+                                    <27 IRQ_TYPE_LEVEL_HIGH>,
+                                    <>,
+                                    <>,
+                                    <27 IRQ_TYPE_LEVEL_HIGH>,
+                                    <27 IRQ_TYPE_LEVEL_HIGH>,
+                                    <27 IRQ_TYPE_LEVEL_HIGH>,
+                                    <27 IRQ_TYPE_LEVEL_HIGH>,
+                                    <27 IRQ_TYPE_LEVEL_HIGH>,
+                                    <27 IRQ_TYPE_LEVEL_HIGH>,
+                                    <27 IRQ_TYPE_LEVEL_HIGH>,
+                                    <27 IRQ_TYPE_LEVEL_HIGH>,
+                                    <27 IRQ_TYPE_LEVEL_HIGH>,
+                                    <27 IRQ_TYPE_LEVEL_HIGH>,
+                                    <27 IRQ_TYPE_LEVEL_HIGH>,
+                                    <27 IRQ_TYPE_LEVEL_HIGH>,
+                                    <27 IRQ_TYPE_LEVEL_HIGH>,
+                                    <27 IRQ_TYPE_LEVEL_HIGH>,
+                                    <27 IRQ_TYPE_LEVEL_HIGH>,
+                                    <27 IRQ_TYPE_LEVEL_HIGH>,
+                                    <27 IRQ_TYPE_LEVEL_HIGH>,
+                                    <27 IRQ_TYPE_LEVEL_HIGH>,
+                                    <27 IRQ_TYPE_LEVEL_HIGH>,
+                                    <27 IRQ_TYPE_LEVEL_HIGH>;
+               };
+
+               tsensor: thermal-sensor@1fe01500 {
+                       compatible = "loongson,ls2k1000-thermal";
+                       reg = <0x0 0x1fe01500 0x0 0x30>;
+                       interrupt-parent = <&liointc0>;
+                       interrupts = <7 IRQ_TYPE_LEVEL_HIGH>;
+                       #thermal-sensor-cells = <1>;
+               };
+
+               dma-controller@1fe00c00 {
+                       compatible = "loongson,ls2k1000-apbdma";
+                       reg = <0x0 0x1fe00c00 0x0 0x8>;
+                       interrupt-parent = <&liointc1>;
+                       interrupts = <12 IRQ_TYPE_LEVEL_HIGH>;
+                       clocks = <&clk LOONGSON2_APB_CLK>;
+                       #dma-cells = <1>;
+                       status = "disabled";
+               };
+
+               dma-controller@1fe00c10 {
+                       compatible = "loongson,ls2k1000-apbdma";
+                       reg = <0x0 0x1fe00c10 0x0 0x8>;
+                       interrupt-parent = <&liointc1>;
+                       interrupts = <13 IRQ_TYPE_LEVEL_HIGH>;
+                       clocks = <&clk LOONGSON2_APB_CLK>;
+                       #dma-cells = <1>;
+                       status = "disabled";
+               };
+
+               dma-controller@1fe00c20 {
+                       compatible = "loongson,ls2k1000-apbdma";
+                       reg = <0x0 0x1fe00c20 0x0 0x8>;
+                       interrupt-parent = <&liointc1>;
+                       interrupts = <14 IRQ_TYPE_LEVEL_HIGH>;
+                       clocks = <&clk LOONGSON2_APB_CLK>;
+                       #dma-cells = <1>;
+                       status = "disabled";
+               };
+
+               dma-controller@1fe00c30 {
+                       compatible = "loongson,ls2k1000-apbdma";
+                       reg = <0x0 0x1fe00c30 0x0 0x8>;
+                       interrupt-parent = <&liointc1>;
+                       interrupts = <15 IRQ_TYPE_LEVEL_HIGH>;
+                       clocks = <&clk LOONGSON2_APB_CLK>;
+                       #dma-cells = <1>;
+                       status = "disabled";
+               };
+
+               dma-controller@1fe00c40 {
+                       compatible = "loongson,ls2k1000-apbdma";
+                       reg = <0x0 0x1fe00c40 0x0 0x8>;
+                       interrupt-parent = <&liointc1>;
+                       interrupts = <16 IRQ_TYPE_LEVEL_HIGH>;
+                       clocks = <&clk LOONGSON2_APB_CLK>;
+                       #dma-cells = <1>;
+                       status = "disabled";
+               };
+
+               uart0: serial@1fe20000 {
+                       compatible = "ns16550a";
+                       reg = <0x0 0x1fe20000 0x0 0x10>;
+                       clock-frequency = <125000000>;
+                       interrupt-parent = <&liointc0>;
+                       interrupts = <0x0 IRQ_TYPE_LEVEL_HIGH>;
+                       no-loopback-test;
+                       status = "disabled";
+               };
+
+               i2c2: i2c@1fe21000 {
+                       compatible = "loongson,ls2k-i2c";
+                       reg = <0x0 0x1fe21000 0x0 0x8>;
+                       interrupt-parent = <&liointc0>;
+                       interrupts = <22 IRQ_TYPE_LEVEL_HIGH>;
+                       status = "disabled";
+               };
+
+               i2c3: i2c@1fe21800 {
+                       compatible = "loongson,ls2k-i2c";
+                       reg = <0x0 0x1fe21800 0x0 0x8>;
+                       interrupt-parent = <&liointc0>;
+                       interrupts = <23 IRQ_TYPE_LEVEL_HIGH>;
+                       status = "disabled";
+               };
+
+               pmc: power-management@1fe27000 {
+                       compatible = "loongson,ls2k1000-pmc", "loongson,ls2k0500-pmc", "syscon";
+                       reg = <0x0 0x1fe27000 0x0 0x58>;
+                       interrupt-parent = <&liointc1>;
+                       interrupts = <11 IRQ_TYPE_LEVEL_HIGH>;
+                       loongson,suspend-address = <0x0 0x1c000500>;
+
+                       syscon-reboot {
+                               compatible = "syscon-reboot";
+                               offset = <0x30>;
+                               mask = <0x1>;
+                       };
+
+                       syscon-poweroff {
+                               compatible = "syscon-poweroff";
+                               regmap = <&pmc>;
+                               offset = <0x14>;
+                               mask = <0x3c00>;
+                               value = <0x3c00>;
+                       };
+               };
+
+               rtc0: rtc@1fe27800 {
+                       compatible = "loongson,ls2k1000-rtc";
+                       reg = <0x0 0x1fe27800 0x0 0x100>;
+                       interrupt-parent = <&liointc1>;
+                       interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
+                       status = "disabled";
+               };
+
+               spi0: spi@1fff0220 {
+                       compatible = "loongson,ls2k1000-spi";
+                       reg = <0x0 0x1fff0220 0x0 0x10>;
+                       clocks = <&clk LOONGSON2_BOOT_CLK>;
+                       status = "disabled";
+               };
+
+               pcie@1a000000 {
+                       compatible = "loongson,ls2k-pci";
+                       reg = <0x0 0x1a000000 0x0 0x02000000>,
+                             <0xfe 0x0 0x0 0x20000000>;
+                       #address-cells = <3>;
+                       #size-cells = <2>;
+                       device_type = "pci";
+                       bus-range = <0x0 0xff>;
+                       ranges = <0x01000000 0x0 0x00008000 0x0 0x18008000 0x0 0x00008000>,
+                                <0x02000000 0x0 0x60000000 0x0 0x60000000 0x0 0x20000000>;
+
+                       gmac0: ethernet@3,0 {
+                               reg = <0x1800 0x0 0x0 0x0 0x0>;
+                               interrupt-parent = <&liointc0>;
+                               interrupts = <12 IRQ_TYPE_LEVEL_HIGH>,
+                                            <13 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupt-names = "macirq", "eth_lpi";
+                               status = "disabled";
+                       };
+
+                       gmac1: ethernet@3,1 {
+                               reg = <0x1900 0x0 0x0 0x0 0x0>;
+                               interrupt-parent = <&liointc0>;
+                               interrupts = <14 IRQ_TYPE_LEVEL_HIGH>,
+                                            <15 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupt-names = "macirq", "eth_lpi";
+                               status = "disabled";
+                       };
+
+                       ehci0: usb@4,1 {
+                               reg = <0x2100 0x0 0x0 0x0 0x0>;
+                               interrupt-parent = <&liointc1>;
+                               interrupts = <18 IRQ_TYPE_LEVEL_HIGH>;
+                               status = "disabled";
+                       };
+
+                       ohci0: usb@4,2 {
+                               reg = <0x2200 0x0 0x0 0x0 0x0>;
+                               interrupt-parent = <&liointc1>;
+                               interrupts = <19 IRQ_TYPE_LEVEL_HIGH>;
+                               status = "disabled";
+                       };
+
+                       display@6,0 {
+                               reg = <0x3000 0x0 0x0 0x0 0x0>;
+                               interrupt-parent = <&liointc0>;
+                               interrupts = <28 IRQ_TYPE_LEVEL_HIGH>;
+                               status = "disabled";
+                       };
+
+                       hda@7,0 {
+                               reg = <0x3800 0x0 0x0 0x0 0x0>;
+                               interrupt-parent = <&liointc0>;
+                               interrupts = <4 IRQ_TYPE_LEVEL_HIGH>;
+                               status = "disabled";
+                       };
+
+                       sata: sata@8,0 {
+                               reg = <0x4000 0x0 0x0 0x0 0x0>;
+                               interrupt-parent = <&liointc0>;
+                               interrupts = <19 IRQ_TYPE_LEVEL_HIGH>;
+                               status = "disabled";
+                       };
+
+                       pcie@9,0 {
+                               reg = <0x4800 0x0 0x0 0x0 0x0>;
+                               #address-cells = <3>;
+                               #size-cells = <2>;
+                               device_type = "pci";
+                               #interrupt-cells = <1>;
+                               interrupt-map-mask = <0x0 0x0 0x0 0x0>;
+                               interrupt-map = <0x0 0x0 0x0 0x0 &liointc1 0x0 IRQ_TYPE_LEVEL_HIGH>;
+                               ranges;
+                       };
+
+                       pcie@a,0 {
+                               reg = <0x5000 0x0 0x0 0x0 0x0>;
+                               #address-cells = <3>;
+                               #size-cells = <2>;
+                               device_type = "pci";
+                               interrupt-parent = <&liointc1>;
+                               #interrupt-cells = <1>;
+                               interrupt-map-mask = <0x0 0x0 0x0 0x0>;
+                               interrupt-map = <0x0 0x0 0x0 0x0 &liointc1 1 IRQ_TYPE_LEVEL_HIGH>;
+                               ranges;
+                       };
+
+                       pcie@b,0 {
+                               reg = <0x5800 0x0 0x0 0x0 0x0>;
+                               #address-cells = <3>;
+                               #size-cells = <2>;
+                               device_type = "pci";
+                               interrupt-parent = <&liointc1>;
+                               #interrupt-cells = <1>;
+                               interrupt-map-mask = <0x0 0x0 0x0 0x0>;
+                               interrupt-map = <0x0 0x0 0x0 0x0 &liointc1 2 IRQ_TYPE_LEVEL_HIGH>;
+                               ranges;
+                       };
+
+                       pcie@c,0 {
+                               reg = <0x6000 0x0 0x0 0x0 0x0>;
+                               #address-cells = <3>;
+                               #size-cells = <2>;
+                               device_type = "pci";
+                               interrupt-parent = <&liointc1>;
+                               #interrupt-cells = <1>;
+                               interrupt-map-mask = <0x0 0x0 0x0 0x0>;
+                               interrupt-map = <0x0 0x0 0x0 0x0 &liointc1 3 IRQ_TYPE_LEVEL_HIGH>;
+                               ranges;
+                       };
+
+                       pcie@d,0 {
+                               reg = <0x6800 0x0 0x0 0x0 0x0>;
+                               #address-cells = <3>;
+                               #size-cells = <2>;
+                               device_type = "pci";
+                               interrupt-parent = <&liointc1>;
+                               #interrupt-cells = <1>;
+                               interrupt-map-mask = <0x0 0x0 0x0 0x0>;
+                               interrupt-map = <0x0 0x0 0x0 0x0 &liointc1 4 IRQ_TYPE_LEVEL_HIGH>;
+                               ranges;
+                       };
+
+                       pcie@e,0 {
+                               reg = <0x7000 0x0 0x0 0x0 0x0>;
+                               #address-cells = <3>;
+                               #size-cells = <2>;
+                               device_type = "pci";
+                               interrupt-parent = <&liointc1>;
+                               #interrupt-cells = <1>;
+                               interrupt-map-mask = <0x0 0x0 0x0 0x0>;
+                               interrupt-map = <0x0 0x0 0x0 0x0 &liointc1 5 IRQ_TYPE_LEVEL_HIGH>;
+                               ranges;
+                       };
+               };
+       };
+};
diff --git a/arch/loongarch/boot/dts/loongson-2k2000-ref.dts b/arch/loongarch/boot/dts/loongson-2k2000-ref.dts
new file mode 100644 (file)
index 0000000..dca91ca
--- /dev/null
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+/dts-v1/;
+
+#include "loongson-2k2000.dtsi"
+
+/ {
+       compatible = "loongson,ls2k2000-ref", "loongson,ls2k2000";
+       model = "Loongson-2K2000 Reference Board";
+
+       aliases {
+               serial0 = &uart0;
+       };
+
+       chosen {
+               stdout-path = "serial0:115200n8";
+       };
+
+       memory@200000 {
+               device_type = "memory";
+               reg = <0x0 0x00200000 0x0 0x0ee00000>,
+                     <0x0 0x90000000 0x0 0x70000000>;
+       };
+
+       reserved-memory {
+               #address-cells = <2>;
+               #size-cells = <2>;
+               ranges;
+
+               linux,cma {
+                       compatible = "shared-dma-pool";
+                       reusable;
+                       size = <0x0 0x2000000>;
+                       linux,cma-default;
+               };
+       };
+};
+
+&sata {
+       status = "okay";
+};
+
+&uart0 {
+       status = "okay";
+};
+
+&rtc0 {
+       status = "okay";
+};
+
+&xhci0 {
+       status = "okay";
+};
+
+&xhci1 {
+       status = "okay";
+};
+
+&gmac0 {
+       status = "okay";
+};
+
+&gmac1 {
+       status = "okay";
+};
+
+&gmac2 {
+       status = "okay";
+};
diff --git a/arch/loongarch/boot/dts/loongson-2k2000.dtsi b/arch/loongarch/boot/dts/loongson-2k2000.dtsi
new file mode 100644 (file)
index 0000000..a231949
--- /dev/null
@@ -0,0 +1,300 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+       #address-cells = <2>;
+       #size-cells = <2>;
+
+       cpus {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               cpu0: cpu@1 {
+                       compatible = "loongson,la364";
+                       device_type = "cpu";
+                       reg = <0x0>;
+                       clocks = <&cpu_clk>;
+               };
+
+               cpu1: cpu@2 {
+                       compatible = "loongson,la364";
+                       device_type = "cpu";
+                       reg = <0x1>;
+                       clocks = <&cpu_clk>;
+               };
+       };
+
+       cpu_clk: cpu-clk {
+               compatible = "fixed-clock";
+               #clock-cells = <0>;
+               clock-frequency = <1400000000>;
+       };
+
+       cpuintc: interrupt-controller {
+               compatible = "loongson,cpu-interrupt-controller";
+               #interrupt-cells = <1>;
+               interrupt-controller;
+       };
+
+       bus@10000000 {
+               compatible = "simple-bus";
+               ranges = <0x0 0x10000000 0x0 0x10000000 0x0 0x10000000>,
+                        <0x0 0x02000000 0x0 0x02000000 0x0 0x02000000>,
+                        <0x0 0x40000000 0x0 0x40000000 0x0 0x40000000>,
+                        <0xfe 0x0 0xfe 0x0 0x0 0x40000000>;
+               #address-cells = <2>;
+               #size-cells = <2>;
+
+               pmc: power-management@100d0000 {
+                       compatible = "loongson,ls2k2000-pmc", "loongson,ls2k0500-pmc", "syscon";
+                       reg = <0x0 0x100d0000 0x0 0x58>;
+                       interrupt-parent = <&eiointc>;
+                       interrupts = <47>;
+                       loongson,suspend-address = <0x0 0x1c000500>;
+
+                       syscon-reboot {
+                               compatible = "syscon-reboot";
+                               offset = <0x30>;
+                               mask = <0x1>;
+                       };
+
+                       syscon-poweroff {
+                               compatible = "syscon-poweroff";
+                               regmap = <&pmc>;
+                               offset = <0x14>;
+                               mask = <0x3c00>;
+                               value = <0x3c00>;
+                       };
+               };
+
+               liointc: interrupt-controller@1fe01400 {
+                       compatible = "loongson,liointc-1.0";
+                       reg = <0x0 0x1fe01400 0x0 0x64>;
+
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
+                       interrupt-parent = <&cpuintc>;
+                       interrupts = <2>;
+                       interrupt-names = "int0";
+                       loongson,parent_int_map = <0xffffffff>, /* int0 */
+                                                 <0x00000000>, /* int1 */
+                                                 <0x00000000>, /* int2 */
+                                                 <0x00000000>; /* int3 */
+               };
+
+               eiointc: interrupt-controller@1fe01600 {
+                       compatible = "loongson,ls2k2000-eiointc";
+                       reg = <0x0 0x1fe01600 0x0 0xea00>;
+                       interrupt-controller;
+                       #interrupt-cells = <1>;
+                       interrupt-parent = <&cpuintc>;
+                       interrupts = <3>;
+               };
+
+               pic: interrupt-controller@10000000 {
+                       compatible = "loongson,pch-pic-1.0";
+                       reg = <0x0 0x10000000 0x0 0x400>;
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
+                       loongson,pic-base-vec = <0>;
+                       interrupt-parent = <&eiointc>;
+               };
+
+               msi: msi-controller@1fe01140 {
+                       compatible = "loongson,pch-msi-1.0";
+                       reg = <0x0 0x1fe01140 0x0 0x8>;
+                       msi-controller;
+                       loongson,msi-base-vec = <64>;
+                       loongson,msi-num-vecs = <192>;
+                       interrupt-parent = <&eiointc>;
+               };
+
+               rtc0: rtc@100d0100 {
+                       compatible = "loongson,ls2k2000-rtc", "loongson,ls7a-rtc";
+                       reg = <0x0 0x100d0100 0x0 0x100>;
+                       interrupt-parent = <&pic>;
+                       interrupts = <52 IRQ_TYPE_LEVEL_HIGH>;
+                       status = "disabled";
+               };
+
+               uart0: serial@1fe001e0 {
+                       compatible = "ns16550a";
+                       reg = <0x0 0x1fe001e0 0x0 0x10>;
+                       clock-frequency = <100000000>;
+                       interrupt-parent = <&liointc>;
+                       interrupts = <10 IRQ_TYPE_LEVEL_HIGH>;
+                       no-loopback-test;
+                       status = "disabled";
+               };
+
+               pcie@1a000000 {
+                       compatible = "loongson,ls2k-pci";
+                       reg = <0x0 0x1a000000 0x0 0x02000000>,
+                             <0xfe 0x0 0x0 0x20000000>;
+                       #address-cells = <3>;
+                       #size-cells = <2>;
+                       device_type = "pci";
+                       bus-range = <0x0 0xff>;
+                       ranges = <0x01000000 0x0 0x00008000 0x0 0x18400000 0x0 0x00008000>,
+                                <0x02000000 0x0 0x60000000 0x0 0x60000000 0x0 0x20000000>;
+
+                       gmac0: ethernet@3,0 {
+                               reg = <0x1800 0x0 0x0 0x0 0x0>;
+                               interrupts = <12 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupt-parent = <&pic>;
+                               status = "disabled";
+                       };
+
+                       gmac1: ethernet@3,1 {
+                               reg = <0x1900 0x0 0x0 0x0 0x0>;
+                               interrupts = <14 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupt-parent = <&pic>;
+                               status = "disabled";
+                       };
+
+                       gmac2: ethernet@3,2 {
+                               reg = <0x1a00 0x0 0x0 0x0 0x0>;
+                               interrupts = <17 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupt-parent = <&pic>;
+                               status = "disabled";
+                       };
+
+                       xhci0: usb@4,0 {
+                               reg = <0x2000 0x0 0x0 0x0 0x0>;
+                               interrupts = <48 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupt-parent = <&pic>;
+                               status = "disabled";
+                       };
+
+                       xhci1: usb@19,0 {
+                               reg = <0xc800 0x0 0x0 0x0 0x0>;
+                               interrupts = <22 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupt-parent = <&pic>;
+                               status = "disabled";
+                       };
+
+                       display@6,1 {
+                               reg = <0x3100 0x0 0x0 0x0 0x0>;
+                               interrupts = <28 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupt-parent = <&pic>;
+                               status = "disabled";
+                       };
+
+                       hda@7,0 {
+                               reg = <0x3800 0x0 0x0 0x0 0x0>;
+                               interrupts = <58 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupt-parent = <&pic>;
+                               status = "disabled";
+                       };
+
+                       sata: sata@8,0 {
+                               reg = <0x4000 0x0 0x0 0x0 0x0>;
+                               interrupts = <16 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupt-parent = <&pic>;
+                               status = "disabled";
+                       };
+
+                       pcie@9,0 {
+                               reg = <0x4800 0x0 0x0 0x0 0x0>;
+                               #address-cells = <3>;
+                               #size-cells = <2>;
+                               device_type = "pci";
+                               interrupt-parent = <&pic>;
+                               #interrupt-cells = <1>;
+                               interrupt-map-mask = <0x0 0x0 0x0 0x0>;
+                               interrupt-map = <0x0 0x0 0x0 0x0 &pic 32 IRQ_TYPE_LEVEL_HIGH>;
+                               ranges;
+                       };
+
+                       pcie@a,0 {
+                               reg = <0x5000 0x0 0x0 0x0 0x0>;
+                               #address-cells = <3>;
+                               #size-cells = <2>;
+                               device_type = "pci";
+                               interrupt-parent = <&pic>;
+                               #interrupt-cells = <1>;
+                               interrupt-map-mask = <0x0 0x0 0x0 0x0>;
+                               interrupt-map = <0x0 0x0 0x0 0x0 &pic 33 IRQ_TYPE_LEVEL_HIGH>;
+                               ranges;
+                       };
+
+                       pcie@b,0 {
+                               reg = <0x5800 0x0 0x0 0x0 0x0>;
+                               #address-cells = <3>;
+                               #size-cells = <2>;
+                               device_type = "pci";
+                               interrupt-parent = <&pic>;
+                               #interrupt-cells = <1>;
+                               interrupt-map-mask = <0x0 0x0 0x0 0x0>;
+                               interrupt-map = <0x0 0x0 0x0 0x0 &pic 34 IRQ_TYPE_LEVEL_HIGH>;
+                               ranges;
+                       };
+
+                       pcie@c,0 {
+                               reg = <0x6000 0x0 0x0 0x0 0x0>;
+                               #address-cells = <3>;
+                               #size-cells = <2>;
+                               device_type = "pci";
+                               interrupt-parent = <&pic>;
+                               #interrupt-cells = <1>;
+                               interrupt-map-mask = <0x0 0x0 0x0 0x0>;
+                               interrupt-map = <0x0 0x0 0x0 0x0 &pic 35 IRQ_TYPE_LEVEL_HIGH>;
+                               ranges;
+                       };
+
+                       pcie@d,0 {
+                               reg = <0x6800 0x0 0x0 0x0 0x0>;
+                               #address-cells = <3>;
+                               #size-cells = <2>;
+                               device_type = "pci";
+                               interrupt-parent = <&pic>;
+                               #interrupt-cells = <1>;
+                               interrupt-map-mask = <0x0 0x0 0x0 0x0>;
+                               interrupt-map = <0x0 0x0 0x0 0x0 &pic 36 IRQ_TYPE_LEVEL_HIGH>;
+                               ranges;
+                       };
+
+                       pcie@e,0 {
+                               reg = <0x7000 0x0 0x0 0x0 0x0>;
+                               #address-cells = <3>;
+                               #size-cells = <2>;
+                               device_type = "pci";
+                               interrupt-parent = <&pic>;
+                               #interrupt-cells = <1>;
+                               interrupt-map-mask = <0x0 0x0 0x0 0x0>;
+                               interrupt-map = <0x0 0x0 0x0 0x0 &pic 37 IRQ_TYPE_LEVEL_HIGH>;
+                               ranges;
+                       };
+
+                       pcie@f,0 {
+                               reg = <0x7800 0x0 0x0 0x0 0x0>;
+                               #address-cells = <3>;
+                               #size-cells = <2>;
+                               device_type = "pci";
+                               interrupt-parent = <&pic>;
+                               #interrupt-cells = <1>;
+                               interrupt-map-mask = <0x0 0x0 0x0 0x0>;
+                               interrupt-map = <0x0 0x0 0x0 0x0 &pic 40 IRQ_TYPE_LEVEL_HIGH>;
+                               ranges;
+                       };
+
+                       pcie@10,0 {
+                               reg = <0x8000 0x0 0x0 0x0 0x0>;
+                               #address-cells = <3>;
+                               #size-cells = <2>;
+                               device_type = "pci";
+                               interrupt-parent = <&pic>;
+                               #interrupt-cells = <1>;
+                               interrupt-map-mask = <0x0 0x0 0x0 0x0>;
+                               interrupt-map = <0x0 0x0 0x0 0x0 &pic 30 IRQ_TYPE_LEVEL_HIGH>;
+                               ranges;
+                       };
+               };
+       };
+};
index 60e331af98398149df56cfed7bc7b27abe21c526..f18c2ba871eff6c6c1b84808f0e9f7296c854db3 100644 (file)
@@ -6,6 +6,8 @@ CONFIG_HIGH_RES_TIMERS=y
 CONFIG_BPF_SYSCALL=y
 CONFIG_BPF_JIT=y
 CONFIG_PREEMPT=y
+CONFIG_PREEMPT_DYNAMIC=y
+CONFIG_SCHED_CORE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_TASKSTATS=y
@@ -19,6 +21,7 @@ CONFIG_BLK_CGROUP=y
 CONFIG_CFS_BANDWIDTH=y
 CONFIG_RT_GROUP_SCHED=y
 CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_RDMA=y
 CONFIG_CGROUP_FREEZER=y
 CONFIG_CGROUP_HUGETLB=y
 CONFIG_CPUSETS=y
@@ -26,6 +29,7 @@ CONFIG_CGROUP_DEVICE=y
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_PERF=y
 CONFIG_CGROUP_BPF=y
+CONFIG_CGROUP_MISC=y
 CONFIG_NAMESPACES=y
 CONFIG_USER_NS=y
 CONFIG_CHECKPOINT_RESTORE=y
@@ -35,6 +39,8 @@ CONFIG_BLK_DEV_INITRD=y
 CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_PERF_EVENTS=y
+CONFIG_KEXEC=y
+CONFIG_CRASH_DUMP=y
 CONFIG_LOONGARCH=y
 CONFIG_64BIT=y
 CONFIG_MACH_LOONGSON64=y
@@ -44,13 +50,11 @@ CONFIG_DMI=y
 CONFIG_EFI=y
 CONFIG_SMP=y
 CONFIG_HOTPLUG_CPU=y
-CONFIG_NR_CPUS=64
+CONFIG_NR_CPUS=256
 CONFIG_NUMA=y
 CONFIG_CPU_HAS_FPU=y
 CONFIG_CPU_HAS_LSX=y
 CONFIG_CPU_HAS_LASX=y
-CONFIG_KEXEC=y
-CONFIG_CRASH_DUMP=y
 CONFIG_RANDOMIZE_BASE=y
 CONFIG_SUSPEND=y
 CONFIG_HIBERNATION=y
@@ -62,10 +66,6 @@ CONFIG_ACPI_IPMI=m
 CONFIG_ACPI_HOTPLUG_CPU=y
 CONFIG_ACPI_PCI_SLOT=y
 CONFIG_ACPI_HOTPLUG_MEMORY=y
-CONFIG_EFI_ZBOOT=y
-CONFIG_EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER=y
-CONFIG_EFI_CAPSULE_LOADER=m
-CONFIG_EFI_TEST=m
 CONFIG_VIRTUALIZATION=y
 CONFIG_KVM=m
 CONFIG_JUMP_LABEL=y
@@ -74,10 +74,18 @@ CONFIG_MODULE_FORCE_LOAD=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
 CONFIG_MODVERSIONS=y
+CONFIG_BLK_DEV_ZONED=y
 CONFIG_BLK_DEV_THROTTLING=y
+CONFIG_BLK_DEV_THROTTLING_LOW=y
+CONFIG_BLK_WBT=y
+CONFIG_BLK_CGROUP_IOLATENCY=y
+CONFIG_BLK_CGROUP_FC_APPID=y
+CONFIG_BLK_CGROUP_IOCOST=y
+CONFIG_BLK_CGROUP_IOPRIO=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_BSD_DISKLABEL=y
 CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_CMDLINE_PARTITION=y
 CONFIG_IOSCHED_BFQ=y
 CONFIG_BFQ_GROUP_IOSCHED=y
 CONFIG_BINFMT_MISC=m
@@ -93,6 +101,8 @@ CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y
 CONFIG_MEMORY_HOTREMOVE=y
 CONFIG_KSM=y
 CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CMA=y
+CONFIG_CMA_SYSFS=y
 CONFIG_USERFAULTFD=y
 CONFIG_NET=y
 CONFIG_PACKET=y
@@ -128,6 +138,7 @@ CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_IPV6_ROUTE_INFO=y
 CONFIG_INET6_ESP=m
 CONFIG_IPV6_MROUTE=y
+CONFIG_MPTCP=y
 CONFIG_NETWORK_PHY_TIMESTAMPING=y
 CONFIG_NETFILTER=y
 CONFIG_BRIDGE_NETFILTER=m
@@ -352,6 +363,7 @@ CONFIG_PCIEAER=y
 CONFIG_PCI_IOV=y
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_SHPC=y
+CONFIG_PCI_HOST_GENERIC=y
 CONFIG_PCCARD=m
 CONFIG_YENTA=m
 CONFIG_RAPIDIO=y
@@ -365,6 +377,10 @@ CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_FW_LOADER_COMPRESS=y
 CONFIG_FW_LOADER_COMPRESS_ZSTD=y
+CONFIG_EFI_ZBOOT=y
+CONFIG_EFI_BOOTLOADER_CONTROL=m
+CONFIG_EFI_CAPSULE_LOADER=m
+CONFIG_EFI_TEST=m
 CONFIG_MTD=m
 CONFIG_MTD_BLOCK=m
 CONFIG_MTD_CFI=m
@@ -586,6 +602,7 @@ CONFIG_RTW89_8852AE=m
 CONFIG_RTW89_8852CE=m
 CONFIG_ZD1211RW=m
 CONFIG_USB_NET_RNDIS_WLAN=m
+CONFIG_USB4_NET=m
 CONFIG_INPUT_MOUSEDEV=y
 CONFIG_INPUT_MOUSEDEV_PSAUX=y
 CONFIG_INPUT_EVDEV=y
@@ -691,6 +708,9 @@ CONFIG_SND_HDA_CODEC_SIGMATEL=y
 CONFIG_SND_HDA_CODEC_HDMI=y
 CONFIG_SND_HDA_CODEC_CONEXANT=y
 CONFIG_SND_USB_AUDIO=m
+CONFIG_SND_SOC=m
+CONFIG_SND_SOC_LOONGSON_CARD=m
+CONFIG_SND_VIRTIO=m
 CONFIG_HIDRAW=y
 CONFIG_UHID=m
 CONFIG_HID_A4TECH=m
@@ -738,6 +758,11 @@ CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_EFI=y
 CONFIG_RTC_DRV_LOONGSON=y
 CONFIG_DMADEVICES=y
+CONFIG_LS2X_APB_DMA=y
+CONFIG_UDMABUF=y
+CONFIG_DMABUF_HEAPS=y
+CONFIG_DMABUF_HEAPS_SYSTEM=y
+CONFIG_DMABUF_HEAPS_CMA=y
 CONFIG_UIO=m
 CONFIG_UIO_PDRV_GENIRQ=m
 CONFIG_UIO_DMEM_GENIRQ=m
@@ -778,7 +803,15 @@ CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
 CONFIG_DEVFREQ_GOV_PERFORMANCE=y
 CONFIG_DEVFREQ_GOV_POWERSAVE=y
 CONFIG_DEVFREQ_GOV_USERSPACE=y
+CONFIG_NTB=m
+CONFIG_NTB_MSI=y
+CONFIG_NTB_IDT=m
+CONFIG_NTB_EPF=m
+CONFIG_NTB_SWITCHTEC=m
+CONFIG_NTB_PERF=m
+CONFIG_NTB_TRANSPORT=m
 CONFIG_PWM=y
+CONFIG_USB4=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
 CONFIG_EXT2_FS_POSIX_ACL=y
@@ -797,6 +830,10 @@ CONFIG_GFS2_FS_LOCKING_DLM=y
 CONFIG_OCFS2_FS=m
 CONFIG_BTRFS_FS=y
 CONFIG_BTRFS_FS_POSIX_ACL=y
+CONFIG_F2FS_FS=m
+CONFIG_F2FS_FS_SECURITY=y
+CONFIG_F2FS_CHECK_FS=y
+CONFIG_F2FS_FS_COMPRESSION=y
 CONFIG_FANOTIFY=y
 CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
 CONFIG_QUOTA=y
@@ -883,7 +920,6 @@ CONFIG_KEY_DH_OPERATIONS=y
 CONFIG_SECURITY=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SELINUX_BOOTPARAM=y
-CONFIG_SECURITY_SELINUX_DISABLE=y
 CONFIG_SECURITY_APPARMOR=y
 CONFIG_SECURITY_YAMA=y
 CONFIG_DEFAULT_SECURITY_DAC=y
@@ -914,6 +950,9 @@ CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
 CONFIG_CRYPTO_CRC32_LOONGARCH=m
 CONFIG_CRYPTO_DEV_VIRTIO=m
+CONFIG_DMA_CMA=y
+CONFIG_DMA_NUMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=0
 CONFIG_PRINTK_TIME=y
 CONFIG_STRIP_ASM_SYMS=y
 CONFIG_MAGIC_SYSRQ=y
index 8de6c4b83a61a8088903abc67ddc50beb35233d5..49e29b29996f0f4473c5d628c936c7528630ad52 100644 (file)
@@ -32,8 +32,10 @@ static inline bool acpi_has_cpu_in_madt(void)
        return true;
 }
 
+#define MAX_CORE_PIC 256
+
 extern struct list_head acpi_wakeup_device_list;
-extern struct acpi_madt_core_pic acpi_core_pic[NR_CPUS];
+extern struct acpi_madt_core_pic acpi_core_pic[MAX_CORE_PIC];
 
 extern int __init parse_acpi_topology(void);
 
index c60796869b2b80377d9d6afca9c8705f8d2433e1..6d5846dd075cbdde654760422fac5d9536605d4a 100644 (file)
@@ -24,13 +24,15 @@ struct loongson_board_info {
        const char *board_vendor;
 };
 
+#define NR_WORDS DIV_ROUND_UP(NR_CPUS, BITS_PER_LONG)
+
 struct loongson_system_configuration {
        int nr_cpus;
        int nr_nodes;
        int boot_cpu_id;
        int cores_per_node;
        int cores_per_package;
-       unsigned long cores_io_master;
+       unsigned long cores_io_master[NR_WORDS];
        unsigned long suspend_addr;
        const char *cpuname;
 };
@@ -42,7 +44,7 @@ extern struct loongson_system_configuration loongson_sysconf;
 
 static inline bool io_master(int cpu)
 {
-       return test_bit(cpu, &loongson_sysconf.cores_io_master);
+       return test_bit(cpu, loongson_sysconf.cores_io_master);
 }
 
 #endif /* _ASM_BOOTINFO_H */
diff --git a/arch/loongarch/include/asm/crash_core.h b/arch/loongarch/include/asm/crash_core.h
new file mode 100644 (file)
index 0000000..218bdbf
--- /dev/null
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _LOONGARCH_CRASH_CORE_H
+#define _LOONGARCH_CRASH_CORE_H
+
+#define CRASH_ALIGN                    SZ_2M
+
+#define CRASH_ADDR_LOW_MAX             SZ_4G
+#define CRASH_ADDR_HIGH_MAX            memblock_end_of_DRAM()
+
+extern phys_addr_t memblock_end_of_DRAM(void);
+
+#endif
index 9b16a3b8e70608c8765f838cfd21925d4fe51145..f16bd42456e4ccf3ad6c8917165176b8ef5d8f05 100644 (file)
@@ -241,8 +241,6 @@ void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs);
 do {                                                                   \
        current->thread.vdso = &vdso_info;                              \
                                                                        \
-       loongarch_set_personality_fcsr(state);                          \
-                                                                       \
        if (personality(current->personality) != PER_LINUX)             \
                set_personality(PER_LINUX);                             \
 } while (0)
@@ -259,7 +257,6 @@ do {                                                                        \
        clear_thread_flag(TIF_32BIT_ADDR);                              \
                                                                        \
        current->thread.vdso = &vdso_info;                              \
-       loongarch_set_personality_fcsr(state);                          \
                                                                        \
        p = personality(current->personality);                          \
        if (p != PER_LINUX32 && p != PER_LINUX)                         \
@@ -340,6 +337,4 @@ extern int arch_elf_pt_proc(void *ehdr, void *phdr, struct file *elf,
 extern int arch_check_elf(void *ehdr, bool has_interpreter, void *interp_ehdr,
                          struct arch_elf_state *state);
 
-extern void loongarch_set_personality_fcsr(struct arch_elf_state *state);
-
 #endif /* _ASM_ELF_H */
index a11996eb5892dd169a1e5a0ba9ad20fb854f4be8..de891c2c83d4a980284cc5376dbc0934b7233a13 100644 (file)
@@ -63,7 +63,7 @@ ftrace_regs_get_instruction_pointer(struct ftrace_regs *fregs)
 static __always_inline void
 ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs, unsigned long ip)
 {
-       regs_set_return_value(&fregs->regs, ip);
+       instruction_pointer_set(&fregs->regs, ip);
 }
 
 #define ftrace_regs_get_argument(fregs, n) \
index 3cea299a5ef58313a305f7d5a086ae9a32e8aa95..29acfe3de3faae797beca198e58f9e5a5e570bbc 100644 (file)
@@ -22,7 +22,7 @@
 
 static __always_inline bool arch_static_branch(struct static_key * const key, const bool branch)
 {
-       asm_volatile_goto(
+       asm goto(
                "1:     nop                     \n\t"
                JUMP_TABLE_ENTRY
                :  :  "i"(&((char *)key)[branch]) :  : l_yes);
@@ -35,7 +35,7 @@ l_yes:
 
 static __always_inline bool arch_static_branch_jump(struct static_key * const key, const bool branch)
 {
-       asm_volatile_goto(
+       asm goto(
                "1:     b       %l[l_yes]       \n\t"
                JUMP_TABLE_ENTRY
                :  :  "i"(&((char *)key)[branch]) :  : l_yes);
index 11328700d4fa8d37aca8371c43b0d38e5f5fd2a0..2d62f7b0d377b5d81fb5f2ec292d7f990a728b44 100644 (file)
@@ -45,7 +45,10 @@ struct kvm_vcpu_stat {
        u64 signal_exits;
 };
 
+#define KVM_MEM_HUGEPAGE_CAPABLE       (1UL << 0)
+#define KVM_MEM_HUGEPAGE_INCAPABLE     (1UL << 1)
 struct kvm_arch_memory_slot {
+       unsigned long flags;
 };
 
 struct kvm_context {
@@ -92,8 +95,10 @@ enum emulation_result {
 };
 
 #define KVM_LARCH_FPU          (0x1 << 0)
-#define KVM_LARCH_SWCSR_LATEST (0x1 << 1)
-#define KVM_LARCH_HWCSR_USABLE (0x1 << 2)
+#define KVM_LARCH_LSX          (0x1 << 1)
+#define KVM_LARCH_LASX         (0x1 << 2)
+#define KVM_LARCH_SWCSR_LATEST (0x1 << 3)
+#define KVM_LARCH_HWCSR_USABLE (0x1 << 4)
 
 struct kvm_vcpu_arch {
        /*
@@ -175,6 +180,21 @@ static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, unsigned
        csr->csrs[reg] = val;
 }
 
+static inline bool kvm_guest_has_fpu(struct kvm_vcpu_arch *arch)
+{
+       return arch->cpucfg[2] & CPUCFG2_FP;
+}
+
+static inline bool kvm_guest_has_lsx(struct kvm_vcpu_arch *arch)
+{
+       return arch->cpucfg[2] & CPUCFG2_LSX;
+}
+
+static inline bool kvm_guest_has_lasx(struct kvm_vcpu_arch *arch)
+{
+       return arch->cpucfg[2] & CPUCFG2_LASX;
+}
+
 /* Debug: dump vcpu state */
 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
 
@@ -183,7 +203,6 @@ void kvm_flush_tlb_all(void);
 void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa);
 int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write);
 
-#define KVM_ARCH_WANT_MMU_NOTIFIER
 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, bool blockable);
 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
index 553cfa2b2b1cfbd7c45619a09abd13874d0e9f8b..0cb4fdb8a9b5970dfefb24a34c82d1451ff27fa9 100644 (file)
@@ -55,7 +55,26 @@ void kvm_save_fpu(struct loongarch_fpu *fpu);
 void kvm_restore_fpu(struct loongarch_fpu *fpu);
 void kvm_restore_fcsr(struct loongarch_fpu *fpu);
 
-void kvm_acquire_timer(struct kvm_vcpu *vcpu);
+#ifdef CONFIG_CPU_HAS_LSX
+int kvm_own_lsx(struct kvm_vcpu *vcpu);
+void kvm_save_lsx(struct loongarch_fpu *fpu);
+void kvm_restore_lsx(struct loongarch_fpu *fpu);
+#else
+static inline int kvm_own_lsx(struct kvm_vcpu *vcpu) { return -EINVAL; }
+static inline void kvm_save_lsx(struct loongarch_fpu *fpu) { }
+static inline void kvm_restore_lsx(struct loongarch_fpu *fpu) { }
+#endif
+
+#ifdef CONFIG_CPU_HAS_LASX
+int kvm_own_lasx(struct kvm_vcpu *vcpu);
+void kvm_save_lasx(struct loongarch_fpu *fpu);
+void kvm_restore_lasx(struct loongarch_fpu *fpu);
+#else
+static inline int kvm_own_lasx(struct kvm_vcpu *vcpu) { return -EINVAL; }
+static inline void kvm_save_lasx(struct loongarch_fpu *fpu) { }
+static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { }
+#endif
+
 void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz);
 void kvm_reset_timer(struct kvm_vcpu *vcpu);
 void kvm_save_timer(struct kvm_vcpu *vcpu);
diff --git a/arch/loongarch/include/asm/shmparam.h b/arch/loongarch/include/asm/shmparam.h
deleted file mode 100644 (file)
index c9554f4..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
- */
-#ifndef _ASM_SHMPARAM_H
-#define _ASM_SHMPARAM_H
-
-#define __ARCH_FORCE_SHMLBA    1
-
-#define        SHMLBA  SZ_64K           /* attach addr a multiple of this */
-
-#endif /* _ASM_SHMPARAM_H */
index c6ad2ee6106cb0389f0ce626c6d0df65647d351b..923d0bd382941acc5794d7622f7adfe1b2533422 100644 (file)
@@ -79,6 +79,7 @@ struct kvm_fpu {
 #define LOONGARCH_REG_64(TYPE, REG)    (TYPE | KVM_REG_SIZE_U64 | (REG << LOONGARCH_REG_SHIFT))
 #define KVM_IOC_CSRID(REG)             LOONGARCH_REG_64(KVM_REG_LOONGARCH_CSR, REG)
 #define KVM_IOC_CPUCFG(REG)            LOONGARCH_REG_64(KVM_REG_LOONGARCH_CPUCFG, REG)
+#define KVM_LOONGARCH_VCPU_CPUCFG      0
 
 struct kvm_debug_exit_arch {
 };
index 8e00a754e548943ae4dba5d330a1354426c713d1..5cf59c617126b7d00f65b3310ef39ea7bfb98e96 100644 (file)
@@ -29,11 +29,9 @@ int disabled_cpus;
 
 u64 acpi_saved_sp;
 
-#define MAX_CORE_PIC 256
-
 #define PREFIX                 "ACPI: "
 
-struct acpi_madt_core_pic acpi_core_pic[NR_CPUS];
+struct acpi_madt_core_pic acpi_core_pic[MAX_CORE_PIC];
 
 void __init __iomem * __acpi_map_table(unsigned long phys, unsigned long size)
 {
@@ -119,7 +117,7 @@ acpi_parse_eio_master(union acpi_subtable_headers *header, const unsigned long e
                return -EINVAL;
 
        core = eiointc->node * CORES_PER_EIO_NODE;
-       set_bit(core, &(loongson_sysconf.cores_io_master));
+       set_bit(core, loongson_sysconf.cores_io_master);
 
        return 0;
 }
index acb5d3385675c974d98a71b8b659ed1088914acd..000825406c1f62cdebd32e79714738987d20d5cc 100644 (file)
@@ -140,4 +140,6 @@ void __init efi_init(void)
 
                early_memunmap(tbl, sizeof(*tbl));
        }
+
+       efi_esrt_init();
 }
index 183e94fc9c69ce8761f3d70b730558bd7f26ff55..0fa81ced28dcdd053cf79f0aaffa8b127df482e1 100644 (file)
@@ -23,8 +23,3 @@ int arch_check_elf(void *_ehdr, bool has_interpreter, void *_interp_ehdr,
 {
        return 0;
 }
-
-void loongarch_set_personality_fcsr(struct arch_elf_state *state)
-{
-       current->thread.fpu.fcsr = boot_cpu_data.fpu_csr0;
-}
index 6b3bfb0092e60b34946490415ff7cd2a51287886..2f1f5b08638f818c2abb7a617b3b79250812b3b8 100644 (file)
@@ -5,13 +5,16 @@
  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
  */
 #include <linux/acpi.h>
+#include <linux/clk.h>
 #include <linux/efi.h>
 #include <linux/export.h>
 #include <linux/memblock.h>
+#include <linux/of_clk.h>
 #include <asm/early_ioremap.h>
 #include <asm/bootinfo.h>
 #include <asm/loongson.h>
 #include <asm/setup.h>
+#include <asm/time.h>
 
 u64 efi_system_table;
 struct loongson_system_configuration loongson_sysconf;
@@ -36,7 +39,16 @@ void __init init_environ(void)
 
 static int __init init_cpu_fullname(void)
 {
-       int cpu;
+       struct device_node *root;
+       int cpu, ret;
+       char *model;
+
+       /* Parsing cpuname from DTS model property */
+       root = of_find_node_by_path("/");
+       ret = of_property_read_string(root, "model", (const char **)&model);
+       of_node_put(root);
+       if (ret == 0)
+               loongson_sysconf.cpuname = strsep(&model, " ");
 
        if (loongson_sysconf.cpuname && !strncmp(loongson_sysconf.cpuname, "Loongson", 8)) {
                for (cpu = 0; cpu < NR_CPUS; cpu++)
@@ -46,6 +58,26 @@ static int __init init_cpu_fullname(void)
 }
 arch_initcall(init_cpu_fullname);
 
+static int __init fdt_cpu_clk_init(void)
+{
+       struct clk *clk;
+       struct device_node *np;
+
+       np = of_get_cpu_node(0, NULL);
+       if (!np)
+               return -ENODEV;
+
+       clk = of_clk_get(np, 0);
+       if (IS_ERR(clk))
+               return -ENODEV;
+
+       cpu_clock_freq = clk_get_rate(clk);
+       clk_put(clk);
+
+       return 0;
+}
+late_initcall(fdt_cpu_clk_init);
+
 static ssize_t boardinfo_show(struct kobject *kobj,
                              struct kobj_attribute *attr, char *buf)
 {
index d53ab10f464465e3f88910614afefce92b5af607..4382e36ae3d44466663aefaa5af1f23717876f35 100644 (file)
@@ -349,6 +349,7 @@ SYM_FUNC_START(_restore_lsx_upper)
        lsx_restore_all_upper a0 t0 t1
        jr      ra
 SYM_FUNC_END(_restore_lsx_upper)
+EXPORT_SYMBOL(_restore_lsx_upper)
 
 SYM_FUNC_START(_init_lsx_upper)
        lsx_init_all_upper t1
@@ -384,6 +385,7 @@ SYM_FUNC_START(_restore_lasx_upper)
        lasx_restore_all_upper a0 t0 t1
        jr      ra
 SYM_FUNC_END(_restore_lasx_upper)
+EXPORT_SYMBOL(_restore_lasx_upper)
 
 SYM_FUNC_START(_init_lasx_upper)
        lasx_init_all_upper t1
index 0ecab4216392899cf8655d0d67228cb280c6df02..c4f7de2e28054ceb6458c964c26e86596cea8602 100644 (file)
@@ -74,6 +74,11 @@ SYM_CODE_START(kernel_entry)                 # kernel entry point
        la.pcrel        t0, fw_arg2
        st.d            a2, t0, 0
 
+#ifdef CONFIG_PAGE_SIZE_4KB
+       li.d            t0, 0
+       li.d            t1, CSR_STFILL
+       csrxchg         t0, t1, LOONGARCH_CSR_IMPCTL1
+#endif
        /* KSave3 used for percpu base, initialized as 0 */
        csrwr           zero, PERCPU_BASE_KS
        /* GPR21 used for percpu base (runtime), initialized as 0 */
@@ -126,6 +131,11 @@ SYM_CODE_START(smpboot_entry)
 
        JUMP_VIRT_ADDR  t0, t1
 
+#ifdef CONFIG_PAGE_SIZE_4KB
+       li.d            t0, 0
+       li.d            t1, CSR_STFILL
+       csrxchg         t0, t1, LOONGARCH_CSR_IMPCTL1
+#endif
        /* Enable PG */
        li.w            t0, 0xb0                # PLV=0, IE=0, PG=1
        csrwr           t0, LOONGARCH_CSR_CRMD
index 767d94cce0de07d74892733b339a55dd5e6ded0e..f2ff8b5d591e4fd638109d2c98d75543c01a112c 100644 (file)
@@ -85,6 +85,7 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
        regs->csr_euen = euen;
        lose_fpu(0);
        lose_lbt(0);
+       current->thread.fpu.fcsr = boot_cpu_data.fpu_csr0;
 
        clear_thread_flag(TIF_LSX_CTX_LIVE);
        clear_thread_flag(TIF_LASX_CTX_LIVE);
index d183a745fb85d4efcef51bbdab6d09a1e047b966..edf2bba80130670364e144ad301868a7dfd3bf93 100644 (file)
@@ -252,38 +252,23 @@ static void __init arch_reserve_vmcore(void)
 #endif
 }
 
-/* 2MB alignment for crash kernel regions */
-#define CRASH_ALIGN    SZ_2M
-#define CRASH_ADDR_MAX SZ_4G
-
-static void __init arch_parse_crashkernel(void)
+static void __init arch_reserve_crashkernel(void)
 {
-#ifdef CONFIG_KEXEC
        int ret;
-       unsigned long long total_mem;
+       unsigned long long low_size = 0;
        unsigned long long crash_base, crash_size;
+       char *cmdline = boot_command_line;
+       bool high = false;
 
-       total_mem = memblock_phys_mem_size();
-       ret = parse_crashkernel(boot_command_line, total_mem,
-                               &crash_size, &crash_base,
-                               NULL, NULL);
-       if (ret < 0 || crash_size <= 0)
+       if (!IS_ENABLED(CONFIG_KEXEC_CORE))
                return;
 
-       if (crash_base <= 0) {
-               crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN, CRASH_ALIGN, CRASH_ADDR_MAX);
-               if (!crash_base) {
-                       pr_warn("crashkernel reservation failed - No suitable area found.\n");
-                       return;
-               }
-       } else if (!memblock_phys_alloc_range(crash_size, CRASH_ALIGN, crash_base, crash_base + crash_size)) {
-               pr_warn("Invalid memory region reserved for crash kernel\n");
+       ret = parse_crashkernel(cmdline, memblock_phys_mem_size(),
+                               &crash_size, &crash_base, &low_size, &high);
+       if (ret)
                return;
-       }
 
-       crashk_res.start = crash_base;
-       crashk_res.end   = crash_base + crash_size - 1;
-#endif
+       reserve_crashkernel_generic(cmdline, crash_size, crash_base, low_size, high);
 }
 
 static void __init fdt_setup(void)
@@ -295,8 +280,12 @@ static void __init fdt_setup(void)
        if (acpi_os_get_root_pointer())
                return;
 
-       /* Look for a device tree configuration table entry */
-       fdt_pointer = efi_fdt_pointer();
+       /* Prefer to use built-in dtb, checking its legality first. */
+       if (!fdt_check_header(__dtb_start))
+               fdt_pointer = __dtb_start;
+       else
+               fdt_pointer = efi_fdt_pointer(); /* Fallback to firmware dtb */
+
        if (!fdt_pointer || fdt_check_header(fdt_pointer))
                return;
 
@@ -330,7 +319,9 @@ static void __init bootcmdline_init(char **cmdline_p)
                if (boot_command_line[0])
                        strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
 
-               strlcat(boot_command_line, init_command_line, COMMAND_LINE_SIZE);
+               if (!strstr(boot_command_line, init_command_line))
+                       strlcat(boot_command_line, init_command_line, COMMAND_LINE_SIZE);
+
                goto out;
        }
 #endif
@@ -357,7 +348,7 @@ out:
 void __init platform_init(void)
 {
        arch_reserve_vmcore();
-       arch_parse_crashkernel();
+       arch_reserve_crashkernel();
 
 #ifdef CONFIG_ACPI_TABLE_UPGRADE
        acpi_table_upgrade();
@@ -467,15 +458,6 @@ static void __init resource_init(void)
                request_resource(res, &data_resource);
                request_resource(res, &bss_resource);
        }
-
-#ifdef CONFIG_KEXEC
-       if (crashk_res.start < crashk_res.end) {
-               insert_resource(&iomem_resource, &crashk_res);
-               pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
-                       (unsigned long)((crashk_res.end - crashk_res.start + 1) >> 20),
-                       (unsigned long)(crashk_res.start  >> 20));
-       }
-#endif
 }
 
 static int __init add_legacy_isa_io(struct fwnode_handle *fwnode,
index 5bca12d16e0691c8e9511f6043a7a5af9655af20..2b49d30eb7c0185e043e462859e76a4ae64ecd67 100644 (file)
@@ -208,7 +208,7 @@ static void __init fdt_smp_setup(void)
        }
 
        loongson_sysconf.nr_cpus = num_processors;
-       set_bit(0, &(loongson_sysconf.cores_io_master));
+       set_bit(0, loongson_sysconf.cores_io_master);
 #endif
 }
 
@@ -216,6 +216,9 @@ void __init loongson_smp_setup(void)
 {
        fdt_smp_setup();
 
+       if (loongson_sysconf.cores_per_package == 0)
+               loongson_sysconf.cores_per_package = num_processors;
+
        cpu_data[0].core = cpu_logical_map(0) % loongson_sysconf.cores_per_package;
        cpu_data[0].package = cpu_logical_map(0) / loongson_sysconf.cores_per_package;
 
@@ -506,7 +509,6 @@ asmlinkage void start_secondary(void)
        sync_counter();
        cpu = raw_smp_processor_id();
        set_my_cpu_offset(per_cpu_offset(cpu));
-       rcutree_report_cpu_starting(cpu);
 
        cpu_probe();
        constant_clockevent_init();
index 3fd1660066983b247d1493864a9437e831282e3e..75d5c51a7cd3def4227a3777625abecca37c8ede 100644 (file)
 
 #include <acpi/processor.h>
 
-static DEFINE_PER_CPU(struct cpu, cpu_devices);
-
 #ifdef CONFIG_HOTPLUG_CPU
-int arch_register_cpu(int cpu)
+bool arch_cpu_is_hotpluggable(int cpu)
 {
-       int ret;
-       struct cpu *c = &per_cpu(cpu_devices, cpu);
-
-       c->hotpluggable = 1;
-       ret = register_cpu(c, cpu);
-       if (ret < 0)
-               pr_warn("register_cpu %d failed (%d)\n", cpu, ret);
-
-       return ret;
-}
-EXPORT_SYMBOL(arch_register_cpu);
-
-void arch_unregister_cpu(int cpu)
-{
-       struct cpu *c = &per_cpu(cpu_devices, cpu);
-
-       c->hotpluggable = 0;
-       unregister_cpu(c);
+       return !io_master(cpu);
 }
-EXPORT_SYMBOL(arch_unregister_cpu);
 #endif
-
-static int __init topology_init(void)
-{
-       int i, ret;
-
-       for_each_present_cpu(i) {
-               struct cpu *c = &per_cpu(cpu_devices, i);
-
-               c->hotpluggable = !io_master(i);
-               ret = register_cpu(c, i);
-               if (ret < 0)
-                       pr_warn("topology_init: register_cpu %d failed (%d)\n", i, ret);
-       }
-
-       return 0;
-}
-
-subsys_initcall(topology_init);
index fda425babfb203d3fc36052b9722515ab7c568e6..61f7e33b1f95731c3c1a207337e30a63ff028ad6 100644 (file)
@@ -22,14 +22,13 @@ config KVM
        depends on AS_HAS_LVZ_EXTENSION
        depends on HAVE_KVM
        select HAVE_KVM_DIRTY_RING_ACQ_REL
-       select HAVE_KVM_EVENTFD
        select HAVE_KVM_VCPU_ASYNC_IOCTL
+       select KVM_COMMON
        select KVM_GENERIC_DIRTYLOG_READ_PROTECT
        select KVM_GENERIC_HARDWARE_ENABLING
+       select KVM_GENERIC_MMU_NOTIFIER
        select KVM_MMIO
        select KVM_XFER_TO_GUEST_WORK
-       select MMU_NOTIFIER
-       select PREEMPT_NOTIFIERS
        help
          Support hosting virtualized guest machines using
          hardware virtualization extensions. You will need
index ce8de3fa472cc64c14c80099ec55e06d24bf79be..ed1d89d53e2e6da0c8b73ed17d97146721347d16 100644 (file)
@@ -200,17 +200,8 @@ int kvm_emu_idle(struct kvm_vcpu *vcpu)
        ++vcpu->stat.idle_exits;
        trace_kvm_exit_idle(vcpu, KVM_TRACE_EXIT_IDLE);
 
-       if (!kvm_arch_vcpu_runnable(vcpu)) {
-               /*
-                * Switch to the software timer before halt-polling/blocking as
-                * the guest's timer may be a break event for the vCPU, and the
-                * hypervisor timer runs only when the CPU is in guest mode.
-                * Switch before halt-polling so that KVM recognizes an expired
-                * timer before blocking.
-                */
-               kvm_save_timer(vcpu);
-               kvm_vcpu_block(vcpu);
-       }
+       if (!kvm_arch_vcpu_runnable(vcpu))
+               kvm_vcpu_halt(vcpu);
 
        return EMULATE_DONE;
 }
@@ -643,6 +634,11 @@ static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu)
 {
        struct kvm_run *run = vcpu->run;
 
+       if (!kvm_guest_has_fpu(&vcpu->arch)) {
+               kvm_queue_exception(vcpu, EXCCODE_INE, 0);
+               return RESUME_GUEST;
+       }
+
        /*
         * If guest FPU not present, the FPU operation should have been
         * treated as a reserved instruction!
@@ -659,6 +655,36 @@ static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu)
        return RESUME_GUEST;
 }
 
+/*
+ * kvm_handle_lsx_disabled() - Guest used LSX while disabled in root.
+ * @vcpu:      Virtual CPU context.
+ *
+ * Handle when the guest attempts to use LSX when it is disabled in the root
+ * context.
+ */
+static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu)
+{
+       if (kvm_own_lsx(vcpu))
+               kvm_queue_exception(vcpu, EXCCODE_INE, 0);
+
+       return RESUME_GUEST;
+}
+
+/*
+ * kvm_handle_lasx_disabled() - Guest used LASX while disabled in root.
+ * @vcpu:      Virtual CPU context.
+ *
+ * Handle when the guest attempts to use LASX when it is disabled in the root
+ * context.
+ */
+static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu)
+{
+       if (kvm_own_lasx(vcpu))
+               kvm_queue_exception(vcpu, EXCCODE_INE, 0);
+
+       return RESUME_GUEST;
+}
+
 /*
  * LoongArch KVM callback handling for unimplemented guest exiting
  */
@@ -687,6 +713,8 @@ static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = {
        [EXCCODE_TLBS]                  = kvm_handle_write_fault,
        [EXCCODE_TLBM]                  = kvm_handle_write_fault,
        [EXCCODE_FPDIS]                 = kvm_handle_fpu_disabled,
+       [EXCCODE_LSXDIS]                = kvm_handle_lsx_disabled,
+       [EXCCODE_LASXDIS]               = kvm_handle_lasx_disabled,
        [EXCCODE_GSPR]                  = kvm_handle_gspr,
 };
 
index 1c1d5199500eec1ef6ffe44d7e7e1bf059c069c8..86a2f2d0cb27e3d213012d6987abde4ab1dae60e 100644 (file)
@@ -287,7 +287,6 @@ int kvm_arch_hardware_enable(void)
        if (env & CSR_GCFG_MATC_ROOT)
                gcfg |= CSR_GCFG_MATC_ROOT;
 
-       gcfg |= CSR_GCFG_TIT;
        write_csr_gcfg(gcfg);
 
        kvm_flush_tlb_all();
index 80480df5f55054cd23ed7657bdacdee5d3a6dc7e..50a6acd7ffe4c94b986c5f7a9802420f090a7d79 100644 (file)
 #include <asm/tlb.h>
 #include <asm/kvm_mmu.h>
 
+static inline bool kvm_hugepage_capable(struct kvm_memory_slot *slot)
+{
+       return slot->arch.flags & KVM_MEM_HUGEPAGE_CAPABLE;
+}
+
+static inline bool kvm_hugepage_incapable(struct kvm_memory_slot *slot)
+{
+       return slot->arch.flags & KVM_MEM_HUGEPAGE_INCAPABLE;
+}
+
 static inline void kvm_ptw_prepare(struct kvm *kvm, kvm_ptw_ctx *ctx)
 {
        ctx->level = kvm->arch.root_level;
@@ -365,6 +375,69 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
        kvm_ptw_top(kvm->arch.pgd, start << PAGE_SHIFT, end << PAGE_SHIFT, &ctx);
 }
 
+int kvm_arch_prepare_memory_region(struct kvm *kvm, const struct kvm_memory_slot *old,
+                                  struct kvm_memory_slot *new, enum kvm_mr_change change)
+{
+       gpa_t gpa_start;
+       hva_t hva_start;
+       size_t size, gpa_offset, hva_offset;
+
+       if ((change != KVM_MR_MOVE) && (change != KVM_MR_CREATE))
+               return 0;
+       /*
+        * Prevent userspace from creating a memory region outside of the
+        * VM GPA address space
+        */
+       if ((new->base_gfn + new->npages) > (kvm->arch.gpa_size >> PAGE_SHIFT))
+               return -ENOMEM;
+
+       new->arch.flags = 0;
+       size = new->npages * PAGE_SIZE;
+       gpa_start = new->base_gfn << PAGE_SHIFT;
+       hva_start = new->userspace_addr;
+       if (IS_ALIGNED(size, PMD_SIZE) && IS_ALIGNED(gpa_start, PMD_SIZE)
+                       && IS_ALIGNED(hva_start, PMD_SIZE))
+               new->arch.flags |= KVM_MEM_HUGEPAGE_CAPABLE;
+       else {
+               /*
+                * Pages belonging to memslots that don't have the same
+                * alignment within a PMD for userspace and GPA cannot be
+                * mapped with PMD entries, because we'll end up mapping
+                * the wrong pages.
+                *
+                * Consider a layout like the following:
+                *
+                *    memslot->userspace_addr:
+                *    +-----+--------------------+--------------------+---+
+                *    |abcde|fgh  Stage-1 block  |    Stage-1 block tv|xyz|
+                *    +-----+--------------------+--------------------+---+
+                *
+                *    memslot->base_gfn << PAGE_SIZE:
+                *      +---+--------------------+--------------------+-----+
+                *      |abc|def  Stage-2 block  |    Stage-2 block   |tvxyz|
+                *      +---+--------------------+--------------------+-----+
+                *
+                * If we create those stage-2 blocks, we'll end up with this
+                * incorrect mapping:
+                *   d -> f
+                *   e -> g
+                *   f -> h
+                */
+               gpa_offset = gpa_start & (PMD_SIZE - 1);
+               hva_offset = hva_start & (PMD_SIZE - 1);
+               if (gpa_offset != hva_offset) {
+                       new->arch.flags |= KVM_MEM_HUGEPAGE_INCAPABLE;
+               } else {
+                       if (gpa_offset == 0)
+                               gpa_offset = PMD_SIZE;
+                       if ((size + gpa_offset) < (PMD_SIZE * 2))
+                               new->arch.flags |= KVM_MEM_HUGEPAGE_INCAPABLE;
+               }
+       }
+
+       return 0;
+}
+
 void kvm_arch_commit_memory_region(struct kvm *kvm,
                                   struct kvm_memory_slot *old,
                                   const struct kvm_memory_slot *new,
@@ -562,47 +635,23 @@ out:
 }
 
 static bool fault_supports_huge_mapping(struct kvm_memory_slot *memslot,
-                               unsigned long hva, unsigned long map_size, bool write)
+                               unsigned long hva, bool write)
 {
-       size_t size;
-       gpa_t gpa_start;
-       hva_t uaddr_start, uaddr_end;
+       hva_t start, end;
 
        /* Disable dirty logging on HugePages */
        if (kvm_slot_dirty_track_enabled(memslot) && write)
                return false;
 
-       size = memslot->npages * PAGE_SIZE;
-       gpa_start = memslot->base_gfn << PAGE_SHIFT;
-       uaddr_start = memslot->userspace_addr;
-       uaddr_end = uaddr_start + size;
+       if (kvm_hugepage_capable(memslot))
+               return true;
 
-       /*
-        * Pages belonging to memslots that don't have the same alignment
-        * within a PMD for userspace and GPA cannot be mapped with stage-2
-        * PMD entries, because we'll end up mapping the wrong pages.
-        *
-        * Consider a layout like the following:
-        *
-        *    memslot->userspace_addr:
-        *    +-----+--------------------+--------------------+---+
-        *    |abcde|fgh  Stage-1 block  |    Stage-1 block tv|xyz|
-        *    +-----+--------------------+--------------------+---+
-        *
-        *    memslot->base_gfn << PAGE_SIZE:
-        *      +---+--------------------+--------------------+-----+
-        *      |abc|def  Stage-2 block  |    Stage-2 block   |tvxyz|
-        *      +---+--------------------+--------------------+-----+
-        *
-        * If we create those stage-2 blocks, we'll end up with this incorrect
-        * mapping:
-        *   d -> f
-        *   e -> g
-        *   f -> h
-        */
-       if ((gpa_start & (map_size - 1)) != (uaddr_start & (map_size - 1)))
+       if (kvm_hugepage_incapable(memslot))
                return false;
 
+       start = memslot->userspace_addr;
+       end = start + memslot->npages * PAGE_SIZE;
+
        /*
         * Next, let's make sure we're not trying to map anything not covered
         * by the memslot. This means we have to prohibit block size mappings
@@ -615,8 +664,7 @@ static bool fault_supports_huge_mapping(struct kvm_memory_slot *memslot,
         * userspace_addr or the base_gfn, as both are equally aligned (per
         * the check above) and equally sized.
         */
-       return (hva & ~(map_size - 1)) >= uaddr_start &&
-               (hva & ~(map_size - 1)) + map_size <= uaddr_end;
+       return (hva >= ALIGN(start, PMD_SIZE)) && (hva < ALIGN_DOWN(end, PMD_SIZE));
 }
 
 /*
@@ -627,7 +675,7 @@ static bool fault_supports_huge_mapping(struct kvm_memory_slot *memslot,
  *
  * There are several ways to safely use this helper:
  *
- * - Check mmu_invalidate_retry_hva() after grabbing the mapping level, before
+ * - Check mmu_invalidate_retry_gfn() after grabbing the mapping level, before
  *   consuming it.  In this case, mmu_lock doesn't need to be held during the
  *   lookup, but it does need to be held while checking the MMU notifier.
  *
@@ -807,7 +855,7 @@ retry:
 
        /* Check if an invalidation has taken place since we got pfn */
        spin_lock(&kvm->mmu_lock);
-       if (mmu_invalidate_retry_hva(kvm, mmu_seq, hva)) {
+       if (mmu_invalidate_retry_gfn(kvm, mmu_seq, gfn)) {
                /*
                 * This can happen when mappings are changed asynchronously, but
                 * also synchronously if a COW is triggered by
@@ -842,7 +890,7 @@ retry:
 
        /* Disable dirty logging on HugePages */
        level = 0;
-       if (!fault_supports_huge_mapping(memslot, hva, PMD_SIZE, write)) {
+       if (!fault_supports_huge_mapping(memslot, hva, write)) {
                level = 0;
        } else {
                level = host_pfn_mapping_level(kvm, gfn, memslot);
@@ -901,12 +949,6 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
 {
 }
 
-int kvm_arch_prepare_memory_region(struct kvm *kvm, const struct kvm_memory_slot *old,
-                                  struct kvm_memory_slot *new, enum kvm_mr_change change)
-{
-       return 0;
-}
-
 void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
                                        const struct kvm_memory_slot *memslot)
 {
index 0ed9040307b71511b42aa6913358ec1acdd7b25b..ba976509bfe819ec51fdaa08f2a1ba4a334755cd 100644 (file)
@@ -245,6 +245,37 @@ SYM_FUNC_START(kvm_restore_fpu)
        jr                 ra
 SYM_FUNC_END(kvm_restore_fpu)
 
+#ifdef CONFIG_CPU_HAS_LSX
+SYM_FUNC_START(kvm_save_lsx)
+       fpu_save_csr    a0 t1
+       fpu_save_cc     a0 t1 t2
+       lsx_save_data   a0 t1
+       jr              ra
+SYM_FUNC_END(kvm_save_lsx)
+
+SYM_FUNC_START(kvm_restore_lsx)
+       lsx_restore_data a0 t1
+       fpu_restore_cc   a0 t1 t2
+       fpu_restore_csr  a0 t1 t2
+       jr               ra
+SYM_FUNC_END(kvm_restore_lsx)
+#endif
+
+#ifdef CONFIG_CPU_HAS_LASX
+SYM_FUNC_START(kvm_save_lasx)
+       fpu_save_csr    a0 t1
+       fpu_save_cc     a0 t1 t2
+       lasx_save_data  a0 t1
+       jr              ra
+SYM_FUNC_END(kvm_save_lasx)
+
+SYM_FUNC_START(kvm_restore_lasx)
+       lasx_restore_data a0 t1
+       fpu_restore_cc    a0 t1 t2
+       fpu_restore_csr   a0 t1 t2
+       jr                ra
+SYM_FUNC_END(kvm_restore_lasx)
+#endif
        .section ".rodata"
 SYM_DATA(kvm_exception_size, .quad kvm_exc_entry_end - kvm_exc_entry)
 SYM_DATA(kvm_enter_guest_size, .quad kvm_enter_guest_end - kvm_enter_guest)
index 284bf553fefef5ef3940a3adca572bfbc4d0462c..111328f6087285a01ccf4077672cf4cc85266866 100644 (file)
@@ -64,41 +64,24 @@ void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long timer_hz)
        kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TVAL, 0);
 }
 
-/*
- * Restore hard timer state and enable guest to access timer registers
- * without trap, should be called with irq disabled
- */
-void kvm_acquire_timer(struct kvm_vcpu *vcpu)
-{
-       unsigned long cfg;
-
-       cfg = read_csr_gcfg();
-       if (!(cfg & CSR_GCFG_TIT))
-               return;
-
-       /* Enable guest access to hard timer */
-       write_csr_gcfg(cfg & ~CSR_GCFG_TIT);
-
-       /*
-        * Freeze the soft-timer and sync the guest stable timer with it. We do
-        * this with interrupts disabled to avoid latency.
-        */
-       hrtimer_cancel(&vcpu->arch.swtimer);
-}
-
 /*
  * Restore soft timer state from saved context.
  */
 void kvm_restore_timer(struct kvm_vcpu *vcpu)
 {
-       unsigned long cfg, delta, period;
+       unsigned long cfg, estat;
+       unsigned long ticks, delta, period;
        ktime_t expire, now;
        struct loongarch_csrs *csr = vcpu->arch.csr;
 
        /*
         * Set guest stable timer cfg csr
+        * Disable timer before restore estat CSR register, avoid to
+        * get invalid timer interrupt for old timer cfg
         */
        cfg = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG);
+
+       write_gcsr_timercfg(0);
        kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ESTAT);
        kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TCFG);
        if (!(cfg & CSR_TCFG_EN)) {
@@ -107,24 +90,56 @@ void kvm_restore_timer(struct kvm_vcpu *vcpu)
                return;
        }
 
+       /*
+        * Freeze the soft-timer and sync the guest stable timer with it.
+        */
+       hrtimer_cancel(&vcpu->arch.swtimer);
+
+       /*
+        * From LoongArch Reference Manual Volume 1 Chapter 7.6.2
+        * If oneshot timer is fired, CSR TVAL will be -1, there are two
+        * conditions:
+        *  1) timer is fired during exiting to host
+        *  2) timer is fired and vm is doing timer irq, and then exiting to
+        *     host. Host should not inject timer irq to avoid spurious
+        *     timer interrupt again
+        */
+       ticks = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TVAL);
+       estat = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT);
+       if (!(cfg & CSR_TCFG_PERIOD) && (ticks > cfg)) {
+               /*
+                * Writing 0 to LOONGARCH_CSR_TVAL will inject timer irq
+                * and set CSR TVAL with -1
+                */
+               write_gcsr_timertick(0);
+
+               /*
+                * Writing CSR_TINTCLR_TI to LOONGARCH_CSR_TINTCLR will clear
+                * timer interrupt, and CSR TVAL keeps unchanged with -1, it
+                * avoids spurious timer interrupt
+                */
+               if (!(estat & CPU_TIMER))
+                       gcsr_write(CSR_TINTCLR_TI, LOONGARCH_CSR_TINTCLR);
+               return;
+       }
+
        /*
         * Set remainder tick value if not expired
         */
+       delta = 0;
        now = ktime_get();
        expire = vcpu->arch.expire;
        if (ktime_before(now, expire))
                delta = ktime_to_tick(vcpu, ktime_sub(expire, now));
-       else {
-               if (cfg & CSR_TCFG_PERIOD) {
-                       period = cfg & CSR_TCFG_VAL;
-                       delta = ktime_to_tick(vcpu, ktime_sub(now, expire));
-                       delta = period - (delta % period);
-               } else
-                       delta = 0;
+       else if (cfg & CSR_TCFG_PERIOD) {
+               period = cfg & CSR_TCFG_VAL;
+               delta = ktime_to_tick(vcpu, ktime_sub(now, expire));
+               delta = period - (delta % period);
+
                /*
                 * Inject timer here though sw timer should inject timer
                 * interrupt async already, since sw timer may be cancelled
-                * during injecting intr async in function kvm_acquire_timer
+                * during injecting intr async
                 */
                kvm_queue_irq(vcpu, INT_TI);
        }
@@ -139,27 +154,41 @@ void kvm_restore_timer(struct kvm_vcpu *vcpu)
  */
 static void _kvm_save_timer(struct kvm_vcpu *vcpu)
 {
-       unsigned long ticks, delta;
+       unsigned long ticks, delta, cfg;
        ktime_t expire;
        struct loongarch_csrs *csr = vcpu->arch.csr;
 
+       cfg = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG);
        ticks = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TVAL);
-       delta = tick_to_ns(vcpu, ticks);
-       expire = ktime_add_ns(ktime_get(), delta);
-       vcpu->arch.expire = expire;
-       if (ticks) {
+
+       /*
+        * From LoongArch Reference Manual Volume 1 Chapter 7.6.2
+        * If period timer is fired, CSR TVAL will be reloaded from CSR TCFG
+        * If oneshot timer is fired, CSR TVAL will be -1
+        * Here judge one-shot timer fired by checking whether TVAL is larger
+        * than TCFG
+        */
+       if (ticks < cfg) {
+               delta = tick_to_ns(vcpu, ticks);
+               expire = ktime_add_ns(ktime_get(), delta);
+               vcpu->arch.expire = expire;
+
                /*
-                * Update hrtimer to use new timeout
                 * HRTIMER_MODE_PINNED is suggested since vcpu may run in
                 * the same physical cpu in next time
                 */
-               hrtimer_cancel(&vcpu->arch.swtimer);
                hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED);
-       } else
+       } else if (vcpu->stat.generic.blocking) {
                /*
-                * Inject timer interrupt so that hall polling can dectect and exit
+                * Inject timer interrupt so that halt polling can dectect and exit.
+                * VCPU is scheduled out already and sleeps in rcuwait queue and
+                * will not poll pending events again. kvm_queue_irq() is not enough,
+                * hrtimer swtimer should be used here.
                 */
-               kvm_queue_irq(vcpu, INT_TI);
+               expire = ktime_add_ns(ktime_get(), 10);
+               vcpu->arch.expire = expire;
+               hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED);
+       }
 }
 
 /*
@@ -168,21 +197,15 @@ static void _kvm_save_timer(struct kvm_vcpu *vcpu)
  */
 void kvm_save_timer(struct kvm_vcpu *vcpu)
 {
-       unsigned long cfg;
        struct loongarch_csrs *csr = vcpu->arch.csr;
 
        preempt_disable();
-       cfg = read_csr_gcfg();
-       if (!(cfg & CSR_GCFG_TIT)) {
-               /* Disable guest use of hard timer */
-               write_csr_gcfg(cfg | CSR_GCFG_TIT);
-
-               /* Save hard timer state */
-               kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TCFG);
-               kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TVAL);
-               if (kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG) & CSR_TCFG_EN)
-                       _kvm_save_timer(vcpu);
-       }
+
+       /* Save hard timer state */
+       kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TCFG);
+       kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TVAL);
+       if (kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG) & CSR_TCFG_EN)
+               _kvm_save_timer(vcpu);
 
        /* Save timer-related state to vCPU context */
        kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ESTAT);
index a1e35d6554185cf69912cae41e0b26ff20e70739..c2484ad4cffa2102a61f2bd4d0a6f537a6501906 100644 (file)
@@ -102,6 +102,8 @@ TRACE_EVENT(kvm_exit_gspr,
 #define KVM_TRACE_AUX_DISCARD          4
 
 #define KVM_TRACE_AUX_FPU              1
+#define KVM_TRACE_AUX_LSX              2
+#define KVM_TRACE_AUX_LASX             3
 
 #define kvm_trace_symbol_aux_op                                \
        { KVM_TRACE_AUX_SAVE,           "save" },       \
@@ -111,7 +113,9 @@ TRACE_EVENT(kvm_exit_gspr,
        { KVM_TRACE_AUX_DISCARD,        "discard" }
 
 #define kvm_trace_symbol_aux_state                     \
-       { KVM_TRACE_AUX_FPU,     "FPU" }
+       { KVM_TRACE_AUX_FPU,     "FPU" },               \
+       { KVM_TRACE_AUX_LSX,     "LSX" },               \
+       { KVM_TRACE_AUX_LASX,    "LASX" }
 
 TRACE_EVENT(kvm_aux,
            TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op,
index 73d0c2b9c1a5769215a68bfd572f2785679ef01e..27701991886dda7e3a6f75bd8a7f71a86995735b 100644 (file)
@@ -95,7 +95,6 @@ static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
                 * check vmid before vcpu enter guest
                 */
                local_irq_disable();
-               kvm_acquire_timer(vcpu);
                kvm_deliver_intr(vcpu);
                kvm_deliver_exception(vcpu);
                /* Make sure the vcpu mode has been written */
@@ -187,8 +186,15 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
 
 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
 {
-       return kvm_pending_timer(vcpu) ||
+       int ret;
+
+       /* Protect from TOD sync and vcpu_load/put() */
+       preempt_disable();
+       ret = kvm_pending_timer(vcpu) ||
                kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT) & (1 << INT_TI);
+       preempt_enable();
+
+       return ret;
 }
 
 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
@@ -244,23 +250,6 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
        return -EINVAL;
 }
 
-/**
- * kvm_migrate_count() - Migrate timer.
- * @vcpu:       Virtual CPU.
- *
- * Migrate hrtimer to the current CPU by cancelling and restarting it
- * if the hrtimer is active.
- *
- * Must be called when the vCPU is migrated to a different CPU, so that
- * the timer can interrupt the guest at the new CPU, and the timer irq can
- * be delivered to the vCPU.
- */
-static void kvm_migrate_count(struct kvm_vcpu *vcpu)
-{
-       if (hrtimer_cancel(&vcpu->arch.swtimer))
-               hrtimer_restart(&vcpu->arch.swtimer);
-}
-
 static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val)
 {
        unsigned long gintc;
@@ -309,6 +298,76 @@ static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val)
        return ret;
 }
 
+static int _kvm_get_cpucfg(int id, u64 *v)
+{
+       int ret = 0;
+
+       if (id < 0 && id >= KVM_MAX_CPUCFG_REGS)
+               return -EINVAL;
+
+       switch (id) {
+       case 2:
+               /* Return CPUCFG2 features which have been supported by KVM */
+               *v = CPUCFG2_FP     | CPUCFG2_FPSP  | CPUCFG2_FPDP     |
+                    CPUCFG2_FPVERS | CPUCFG2_LLFTP | CPUCFG2_LLFTPREV |
+                    CPUCFG2_LAM;
+               /*
+                * If LSX is supported by CPU, it is also supported by KVM,
+                * as we implement it.
+                */
+               if (cpu_has_lsx)
+                       *v |= CPUCFG2_LSX;
+               /*
+                * if LASX is supported by CPU, it is also supported by KVM,
+                * as we implement it.
+                */
+               if (cpu_has_lasx)
+                       *v |= CPUCFG2_LASX;
+
+               break;
+       default:
+               ret = -EINVAL;
+               break;
+       }
+       return ret;
+}
+
+static int kvm_check_cpucfg(int id, u64 val)
+{
+       u64 mask;
+       int ret = 0;
+
+       if (id < 0 && id >= KVM_MAX_CPUCFG_REGS)
+               return -EINVAL;
+
+       if (_kvm_get_cpucfg(id, &mask))
+               return ret;
+
+       switch (id) {
+       case 2:
+               /* CPUCFG2 features checking */
+               if (val & ~mask)
+                       /* The unsupported features should not be set */
+                       ret = -EINVAL;
+               else if (!(val & CPUCFG2_LLFTP))
+                       /* The LLFTP must be set, as guest must has a constant timer */
+                       ret = -EINVAL;
+               else if ((val & CPUCFG2_FP) && (!(val & CPUCFG2_FPSP) || !(val & CPUCFG2_FPDP)))
+                       /* Single and double float point must both be set when enable FP */
+                       ret = -EINVAL;
+               else if ((val & CPUCFG2_LSX) && !(val & CPUCFG2_FP))
+                       /* FP should be set when enable LSX */
+                       ret = -EINVAL;
+               else if ((val & CPUCFG2_LASX) && !(val & CPUCFG2_LSX))
+                       /* LSX, FP should be set when enable LASX, and FP has been checked before. */
+                       ret = -EINVAL;
+               break;
+       default:
+               break;
+       }
+       return ret;
+}
+
 static int kvm_get_one_reg(struct kvm_vcpu *vcpu,
                const struct kvm_one_reg *reg, u64 *v)
 {
@@ -378,10 +437,10 @@ static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
                break;
        case KVM_REG_LOONGARCH_CPUCFG:
                id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
-               if (id >= 0 && id < KVM_MAX_CPUCFG_REGS)
-                       vcpu->arch.cpucfg[id] = (u32)v;
-               else
-                       ret = -EINVAL;
+               ret = kvm_check_cpucfg(id, v);
+               if (ret)
+                       break;
+               vcpu->arch.cpucfg[id] = (u32)v;
                break;
        case KVM_REG_LOONGARCH_KVM:
                switch (reg->id) {
@@ -471,10 +530,94 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
        return -EINVAL;
 }
 
+static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu,
+                                        struct kvm_device_attr *attr)
+{
+       switch (attr->attr) {
+       case 2:
+               return 0;
+       default:
+               return -ENXIO;
+       }
+
+       return -ENXIO;
+}
+
+static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu,
+                                      struct kvm_device_attr *attr)
+{
+       int ret = -ENXIO;
+
+       switch (attr->group) {
+       case KVM_LOONGARCH_VCPU_CPUCFG:
+               ret = kvm_loongarch_cpucfg_has_attr(vcpu, attr);
+               break;
+       default:
+               break;
+       }
+
+       return ret;
+}
+
+static int kvm_loongarch_get_cpucfg_attr(struct kvm_vcpu *vcpu,
+                                        struct kvm_device_attr *attr)
+{
+       int ret = 0;
+       uint64_t val;
+       uint64_t __user *uaddr = (uint64_t __user *)attr->addr;
+
+       ret = _kvm_get_cpucfg(attr->attr, &val);
+       if (ret)
+               return ret;
+
+       put_user(val, uaddr);
+
+       return ret;
+}
+
+static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu,
+                                      struct kvm_device_attr *attr)
+{
+       int ret = -ENXIO;
+
+       switch (attr->group) {
+       case KVM_LOONGARCH_VCPU_CPUCFG:
+               ret = kvm_loongarch_get_cpucfg_attr(vcpu, attr);
+               break;
+       default:
+               break;
+       }
+
+       return ret;
+}
+
+static int kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu *vcpu,
+                                        struct kvm_device_attr *attr)
+{
+       return -ENXIO;
+}
+
+static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu,
+                                      struct kvm_device_attr *attr)
+{
+       int ret = -ENXIO;
+
+       switch (attr->group) {
+       case KVM_LOONGARCH_VCPU_CPUCFG:
+               ret = kvm_loongarch_cpucfg_set_attr(vcpu, attr);
+               break;
+       default:
+               break;
+       }
+
+       return ret;
+}
+
 long kvm_arch_vcpu_ioctl(struct file *filp,
                         unsigned int ioctl, unsigned long arg)
 {
        long r;
+       struct kvm_device_attr attr;
        void __user *argp = (void __user *)arg;
        struct kvm_vcpu *vcpu = filp->private_data;
 
@@ -514,6 +657,27 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
                break;
        }
+       case KVM_HAS_DEVICE_ATTR: {
+               r = -EFAULT;
+               if (copy_from_user(&attr, argp, sizeof(attr)))
+                       break;
+               r = kvm_loongarch_vcpu_has_attr(vcpu, &attr);
+               break;
+       }
+       case KVM_GET_DEVICE_ATTR: {
+               r = -EFAULT;
+               if (copy_from_user(&attr, argp, sizeof(attr)))
+                       break;
+               r = kvm_loongarch_vcpu_get_attr(vcpu, &attr);
+               break;
+       }
+       case KVM_SET_DEVICE_ATTR: {
+               r = -EFAULT;
+               if (copy_from_user(&attr, argp, sizeof(attr)))
+                       break;
+               r = kvm_loongarch_vcpu_set_attr(vcpu, &attr);
+               break;
+       }
        default:
                r = -ENOIOCTLCMD;
                break;
@@ -561,12 +725,96 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu)
        preempt_enable();
 }
 
+#ifdef CONFIG_CPU_HAS_LSX
+/* Enable LSX and restore context */
+int kvm_own_lsx(struct kvm_vcpu *vcpu)
+{
+       if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch))
+               return -EINVAL;
+
+       preempt_disable();
+
+       /* Enable LSX for guest */
+       set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN);
+       switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
+       case KVM_LARCH_FPU:
+               /*
+                * Guest FPU state already loaded,
+                * only restore upper LSX state
+                */
+               _restore_lsx_upper(&vcpu->arch.fpu);
+               break;
+       default:
+               /* Neither FP or LSX already active,
+                * restore full LSX state
+                */
+               kvm_restore_lsx(&vcpu->arch.fpu);
+               break;
+       }
+
+       trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX);
+       vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU;
+       preempt_enable();
+
+       return 0;
+}
+#endif
+
+#ifdef CONFIG_CPU_HAS_LASX
+/* Enable LASX and restore context */
+int kvm_own_lasx(struct kvm_vcpu *vcpu)
+{
+       if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch) || !kvm_guest_has_lasx(&vcpu->arch))
+               return -EINVAL;
+
+       preempt_disable();
+
+       set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
+       switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) {
+       case KVM_LARCH_LSX:
+       case KVM_LARCH_LSX | KVM_LARCH_FPU:
+               /* Guest LSX state already loaded, only restore upper LASX state */
+               _restore_lasx_upper(&vcpu->arch.fpu);
+               break;
+       case KVM_LARCH_FPU:
+               /* Guest FP state already loaded, only restore upper LSX & LASX state */
+               _restore_lsx_upper(&vcpu->arch.fpu);
+               _restore_lasx_upper(&vcpu->arch.fpu);
+               break;
+       default:
+               /* Neither FP or LSX already active, restore full LASX state */
+               kvm_restore_lasx(&vcpu->arch.fpu);
+               break;
+       }
+
+       trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LASX);
+       vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU;
+       preempt_enable();
+
+       return 0;
+}
+#endif
+
 /* Save context and disable FPU */
 void kvm_lose_fpu(struct kvm_vcpu *vcpu)
 {
        preempt_disable();
 
-       if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
+       if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) {
+               kvm_save_lasx(&vcpu->arch.fpu);
+               vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX);
+               trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LASX);
+
+               /* Disable LASX & LSX & FPU */
+               clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
+       } else if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) {
+               kvm_save_lsx(&vcpu->arch.fpu);
+               vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU);
+               trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LSX);
+
+               /* Disable LSX & FPU */
+               clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN);
+       } else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
                kvm_save_fpu(&vcpu->arch.fpu);
                vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU;
                trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
@@ -789,17 +1037,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
        unsigned long flags;
 
        local_irq_save(flags);
-       if (vcpu->arch.last_sched_cpu != cpu) {
-               kvm_debug("[%d->%d]KVM vCPU[%d] switch\n",
-                               vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
-               /*
-                * Migrate the timer interrupt to the current CPU so that it
-                * always interrupts the guest and synchronously triggers a
-                * guest timer interrupt.
-                */
-               kvm_migrate_count(vcpu);
-       }
-
        /* Restore guest state to registers */
        _kvm_vcpu_load(vcpu, cpu);
        local_irq_restore(flags);
index cc3e81fe0186f4f0fa8de9cedfc75138583ce23f..c608adc9984581d0419594a8eb87ae18a3e9ec63 100644 (file)
@@ -44,6 +44,9 @@ void *kasan_mem_to_shadow(const void *addr)
                unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff;
                unsigned long offset = 0;
 
+               if (maddr >= FIXADDR_START)
+                       return (void *)(kasan_early_shadow_page);
+
                maddr &= XRANGE_SHADOW_MASK;
                switch (xrange) {
                case XKPRANGE_CC_SEG:
index 2c0a411f23aa778bb62160bd511252736fc987be..0b95d32b30c94704a0108fdffcae68c148403ce7 100644 (file)
@@ -284,12 +284,16 @@ static void setup_tlb_handler(int cpu)
                set_handler(EXCCODE_TLBNR * VECSIZE, handle_tlb_protect, VECSIZE);
                set_handler(EXCCODE_TLBNX * VECSIZE, handle_tlb_protect, VECSIZE);
                set_handler(EXCCODE_TLBPE * VECSIZE, handle_tlb_protect, VECSIZE);
-       }
+       } else {
+               int vec_sz __maybe_unused;
+               void *addr __maybe_unused;
+               struct page *page __maybe_unused;
+
+               /* Avoid lockdep warning */
+               rcutree_report_cpu_starting(cpu);
+
 #ifdef CONFIG_NUMA
-       else {
-               void *addr;
-               struct page *page;
-               const int vec_sz = sizeof(exception_handlers);
+               vec_sz = sizeof(exception_handlers);
 
                if (pcpu_handlers[cpu])
                        return;
@@ -305,8 +309,8 @@ static void setup_tlb_handler(int cpu)
                csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_EENTRY);
                csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_MERRENTRY);
                csr_write64(pcpu_handlers[cpu] + 80*VECSIZE, LOONGARCH_CSR_TLBRENTRY);
-       }
 #endif
+       }
 }
 
 void tlb_init(int cpu)
index 4fcd6cd6da234d4dc4120cb2c4b95a69e2577358..e73323d759d0b85b275aa389fc684b74d12cb13e 100644 (file)
@@ -201,6 +201,11 @@ bool bpf_jit_supports_kfunc_call(void)
        return true;
 }
 
+bool bpf_jit_supports_far_kfunc_call(void)
+{
+       return true;
+}
+
 /* initialized on the first pass of build_body() */
 static int out_offset = -1;
 static int emit_bpf_tail_call(struct jit_ctx *ctx)
@@ -465,7 +470,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
        const u8 dst = regmap[insn->dst_reg];
        const s16 off = insn->off;
        const s32 imm = insn->imm;
-       const u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm;
        const bool is32 = BPF_CLASS(insn->code) == BPF_ALU || BPF_CLASS(insn->code) == BPF_JMP32;
 
        switch (code) {
@@ -923,8 +927,12 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
 
        /* dst = imm64 */
        case BPF_LD | BPF_IMM | BPF_DW:
+       {
+               const u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm;
+
                move_imm(ctx, dst, imm64, is32);
                return 1;
+       }
 
        /* dst = *(size *)(src + off) */
        case BPF_LDX | BPF_MEM | BPF_B:
index c74c9921304f2273fea31278cfafce7b143a75ea..f597cd08a96be0a19084884bd175678a6a83d6ab 100644 (file)
@@ -2,6 +2,7 @@
 # Objects to go into the VDSO.
 
 KASAN_SANITIZE := n
+UBSAN_SANITIZE := n
 KCOV_INSTRUMENT := n
 
 # Include the generic Makefile to check the built vdso.
index 43e39040d3ac6cd38a4bd4fc3dc04e03d5c71bf5..76ef1a67c3611bbcd864d27fe59127c4c27f038a 100644 (file)
 KBUILD_DEFCONFIG := multi_defconfig
 
 ifdef cross_compiling
-       ifeq ($(CROSS_COMPILE),)
+        ifeq ($(CROSS_COMPILE),)
                CROSS_COMPILE := $(call cc-cross-prefix, \
                        m68k-linux-gnu- m68k-linux- m68k-unknown-linux-gnu-)
-       endif
+        endif
 endif
 
 #
index 3a74d493eb3e6e34af2db27f4f2474c992e6a6b0..17b2987c2bf5436db1e09c166795d3e42a282b35 100644 (file)
@@ -23,9 +23,9 @@ static int stderr_id;
 static struct tty_port nfcon_tty_port;
 static struct tty_driver *nfcon_tty_driver;
 
-static void nfputs(const char *str, unsigned int count)
+static void nfputs(const u8 *str, size_t count)
 {
-       char buf[68];
+       u8 buf[68];
        unsigned long phys = virt_to_phys(buf);
 
        buf[64] = 0;
index ed12358c4783b468ae834106925732ef5875c772..9a71b0148461a4551fe4aae49ca9cf8fea6d46fe 100644 (file)
@@ -191,6 +191,7 @@ extern void cache_push_v(unsigned long vaddr, int len);
 #define flush_cache_all() __flush_cache_all()
 
 #define flush_cache_vmap(start, end)           flush_cache_all()
+#define flush_cache_vmap_early(start, end)     do { } while (0)
 #define flush_cache_vunmap(start, end)         flush_cache_all()
 
 static inline void flush_cache_mm(struct mm_struct *mm)
index 7b2d7f6f23c0dc888cbd07e04be3defd652ba53b..4da7bc4ac4a37aeee1335ae309dc4f659ac4f6a8 100644 (file)
@@ -3,11 +3,9 @@ CONFIG_POSIX_MQUEUE=y
 CONFIG_AUDIT=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_EXPERT=y
 # CONFIG_BASE_FULL is not set
 CONFIG_KALLSYMS_ALL=y
-CONFIG_EXPERT=y
 CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR=1
 CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR=1
 CONFIG_XILINX_MICROBLAZE0_USE_BARREL=1
@@ -20,7 +18,6 @@ CONFIG_CMDLINE_FORCE=y
 CONFIG_HIGHMEM=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
 CONFIG_PARTITION_ADVANCED=y
 # CONFIG_EFI_PARTITION is not set
 CONFIG_CMA=y
@@ -28,6 +25,10 @@ CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
 # CONFIG_IPV6 is not set
 CONFIG_BRIDGE=m
 CONFIG_PCI=y
@@ -43,6 +44,7 @@ CONFIG_NETDEVICES=y
 CONFIG_XILINX_EMACLITE=y
 CONFIG_XILINX_AXI_EMAC=y
 CONFIG_XILINX_LL_TEMAC=y
+CONFIG_MARVELL_PHY=y
 # CONFIG_INPUT is not set
 # CONFIG_SERIO is not set
 # CONFIG_VT is not set
@@ -77,14 +79,13 @@ CONFIG_TMPFS=y
 CONFIG_CRAMFS=y
 CONFIG_ROMFS_FS=y
 CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
 CONFIG_CIFS=y
-CONFIG_CIFS_STATS2=y
 CONFIG_ENCRYPTED_KEYS=y
 CONFIG_DMA_CMA=y
 CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
 CONFIG_KGDB=y
 CONFIG_KGDB_TESTS=y
 CONFIG_KGDB_KDB=y
-CONFIG_DEBUG_SLAB=y
 CONFIG_DETECT_HUNG_TASK=y
 CONFIG_DEBUG_SPINLOCK=y
index b13d8adf3be47dbfd6f65e1e63ee3217feafe04b..20d30f6265cdce2a915ddffc52d0bb67e6e0edac 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/string.h>
 
 #include <asm/bootinfo.h>
+#include <prom.h>
 
 int prom_argc;
 char **prom_argv;
index 2388d68786f4a7c40dcadfed78fd8ecfc91f4896..a7a6d31a7a4148ada6ad340d0723ef8c7a73f0be 100644 (file)
 #include <linux/mm.h>
 #include <linux/dma-map-ops.h> /* for dma_default_coherent */
 
+#include <asm/bootinfo.h>
 #include <asm/mipsregs.h>
 
 #include <au1000.h>
 
-extern void __init board_setup(void);
-extern void __init alchemy_set_lpj(void);
-
 static bool alchemy_dma_coherent(void)
 {
        switch (alchemy_get_cputype()) {
index f521874ebb07b22495b01972ca92828491311779..67f067706af27342317c4265d2b3dda97681d391 100644 (file)
@@ -847,7 +847,7 @@ int __init db1200_dev_setup(void)
        i2c_register_board_info(0, db1200_i2c_devs,
                                ARRAY_SIZE(db1200_i2c_devs));
        spi_register_board_info(db1200_spi_devs,
-                               ARRAY_SIZE(db1200_i2c_devs));
+                               ARRAY_SIZE(db1200_spi_devs));
 
        /* SWITCHES:    S6.8 I2C/SPI selector  (OFF=I2C  ON=SPI)
         *              S6.7 AC97/I2S selector (OFF=AC97 ON=I2S)
index fd91d9c9a2525ddd7efa870eff23da64f3b3e3df..6c6837181f5555f3f9b8ebf495c03a22bef735b4 100644 (file)
@@ -589,7 +589,7 @@ int __init db1550_dev_setup(void)
        i2c_register_board_info(0, db1550_i2c_devs,
                                ARRAY_SIZE(db1550_i2c_devs));
        spi_register_board_info(db1550_spi_devs,
-                               ARRAY_SIZE(db1550_i2c_devs));
+                               ARRAY_SIZE(db1550_spi_devs));
 
        c = clk_get(NULL, "psc0_intclk");
        if (!IS_ERR(c)) {
index 437a737c01dd05727b716dc4a913357a94511c00..46994f9bb8219cc23aa660a3cd387780d842e0ee 100644 (file)
@@ -147,21 +147,21 @@ static const struct gpio_keys_button
 bcm47xx_buttons_buffalo_whr_g125[] __initconst = {
        BCM47XX_GPIO_KEY(0, KEY_WPS_BUTTON),
        BCM47XX_GPIO_KEY(4, KEY_RESTART),
-       BCM47XX_GPIO_KEY(5, BTN_0), /* Router / AP mode swtich */
+       BCM47XX_GPIO_KEY(5, BTN_0), /* Router / AP mode switch */
 };
 
 static const struct gpio_keys_button
 bcm47xx_buttons_buffalo_whr_g54s[] __initconst = {
        BCM47XX_GPIO_KEY(0, KEY_WPS_BUTTON),
        BCM47XX_GPIO_KEY_H(4, KEY_RESTART),
-       BCM47XX_GPIO_KEY(5, BTN_0), /* Router / AP mode swtich */
+       BCM47XX_GPIO_KEY(5, BTN_0), /* Router / AP mode switch */
 };
 
 static const struct gpio_keys_button
 bcm47xx_buttons_buffalo_whr_hp_g54[] __initconst = {
        BCM47XX_GPIO_KEY(0, KEY_WPS_BUTTON),
        BCM47XX_GPIO_KEY(4, KEY_RESTART),
-       BCM47XX_GPIO_KEY(5, BTN_0), /* Router / AP mode swtich */
+       BCM47XX_GPIO_KEY(5, BTN_0), /* Router / AP mode switch */
 };
 
 static const struct gpio_keys_button
index 01aff80a59672dee1b675c3625aecb6f70eb52b9..99f321b6e417bd4250ab7cec31ae74ad2d396ec3 100644 (file)
@@ -702,7 +702,7 @@ static struct ssb_sprom bcm63xx_sprom = {
        .boardflags_hi          = 0x0000,
 };
 
-int bcm63xx_get_fallback_sprom(struct ssb_bus *bus, struct ssb_sprom *out)
+static int bcm63xx_get_fallback_sprom(struct ssb_bus *bus, struct ssb_sprom *out)
 {
        if (bus->bustype == SSB_BUSTYPE_PCI) {
                memcpy(out, &bcm63xx_sprom, sizeof(struct ssb_sprom));
index 86a6e25908664b59dafae3ca2297f70e296220c9..3144965fb7dc4fa78693201f614e7b883590ba75 100644 (file)
@@ -174,7 +174,7 @@ static void enetsw_set(struct clk *clk, int enable)
        }
 
        if (enable) {
-               /* reset switch core afer clock change */
+               /* reset switch core after clock change */
                bcm63xx_core_set_reset(BCM63XX_RESET_ENETSW, 1);
                msleep(10);
                bcm63xx_core_set_reset(BCM63XX_RESET_ENETSW, 0);
@@ -304,7 +304,7 @@ static void xtm_set(struct clk *clk, int enable)
        bcm_hwclock_set(CKCTL_6368_SAR_EN, enable);
 
        if (enable) {
-               /* reset sar core afer clock change */
+               /* reset sar core after clock change */
                bcm63xx_core_set_reset(BCM63XX_RESET_SAR, 1);
                mdelay(1);
                bcm63xx_core_set_reset(BCM63XX_RESET_SAR, 0);
index d277b4dc6c688eb394544b556e0941a54654c1b9..f94151f7c96fe1d988cd3d88f8451bbdf012955c 100644 (file)
@@ -26,7 +26,7 @@ static struct platform_device bcm63xx_rng_device = {
        .resource       = rng_resources,
 };
 
-int __init bcm63xx_rng_register(void)
+static int __init bcm63xx_rng_register(void)
 {
        if (!BCMCPU_IS_6368())
                return -ENODEV;
index 3bc7f3bfc9ad5c5e45737bcf1510bfcd5b5483e7..5d6bf0445b299cf0e91a4f7992f134e5648ca1c2 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
 #include <bcm63xx_cpu.h>
+#include <bcm63xx_dev_uart.h>
 
 static struct resource uart0_resources[] = {
        {
index 42130914a3c210993c07d971449a424d40775060..302bf7ed5ad5abfaa6cb94e4a4e0dcdf1ffbceb1 100644 (file)
@@ -34,7 +34,7 @@ static struct platform_device bcm63xx_wdt_device = {
        },
 };
 
-int __init bcm63xx_wdt_register(void)
+static int __init bcm63xx_wdt_register(void)
 {
        wdt_resources[0].start = bcm63xx_regset_address(RSET_WDT);
        wdt_resources[0].end = wdt_resources[0].start;
index 2548013442f6d95bdda071f89cc112d97d8a0d0a..6240a8f88ea366b5d440f6de3416191dead812b8 100644 (file)
@@ -72,7 +72,7 @@ static inline int enable_irq_for_cpu(int cpu, struct irq_data *d,
  */
 
 #define BUILD_IPIC_INTERNAL(width)                                     \
-void __dispatch_internal_##width(int cpu)                              \
+static void __dispatch_internal_##width(int cpu)                       \
 {                                                                      \
        u32 pending[width / 32];                                        \
        unsigned int src, tgt;                                          \
index d811e3e03f819a5005a480d56d5aee5a090fcc3c..c13ddb544a23bf0ebfd6bd627c9ed022a44cda0e 100644 (file)
@@ -159,7 +159,7 @@ void __init plat_mem_setup(void)
        board_setup();
 }
 
-int __init bcm63xx_register_devices(void)
+static int __init bcm63xx_register_devices(void)
 {
        /* register gpiochip */
        bcm63xx_gpio_init();
index a86065854c0c8c6c92254c4d7746fda8e6801250..74b83807df30a7be13f1f9466753b2560ce9b50b 100644 (file)
@@ -178,7 +178,7 @@ int bcm63xx_timer_set(int id, int monotonic, unsigned int countdown_us)
 
 EXPORT_SYMBOL(bcm63xx_timer_set);
 
-int bcm63xx_timer_init(void)
+static int bcm63xx_timer_init(void)
 {
        int ret, irq;
        u32 reg;
index 2f1ac38fe1ccac751d1be526660981507a34aebb..95405292accdff5e4211cd8ac4cababf2788d028 100644 (file)
@@ -3,7 +3,7 @@
  * MIPS-specific debug support for pre-boot environment
  *
  * NOTE: putc() is board specific, if your board have a 16550 compatible uart,
- * please select SYS_SUPPORTS_ZBOOT_UART16550 for your machine. othewise, you
+ * please select SYS_SUPPORTS_ZBOOT_UART16550 for your machine. otherwise, you
  * need to implement your own putc().
  */
 #include <linux/compiler.h>
index 5795d0af1e1b2e9518bc4cbab56a4773c95ba78b..d237a834b85ee5c6a880d61368eae83467efd7e2 100644 (file)
@@ -25,8 +25,8 @@
        /* Clear BSS */
        PTR_LA  a0, _edata
        PTR_LA  a2, _end
-1:     sw      zero, 0(a0)
-       addiu   a0, a0, 4
+1:     PTR_S   zero, 0(a0)
+       PTR_ADDIU a0, a0, PTRSIZE
        bne     a2, a0, 1b
 
        PTR_LA  a0, (.heap)          /* heap address */
index 6972b97235daf4a97a57205180ed5b63ebf98409..549c5d6ef6d7c7443634acbdc2a6d8e9c3139aa1 100644 (file)
@@ -443,7 +443,7 @@ int main(int argc, char *argv[])
        efh.f_symptr = 0;
        efh.f_nsyms = 0;
        efh.f_opthdr = sizeof eah;
-       efh.f_flags = 0x100f;   /* Stripped, not sharable. */
+       efh.f_flags = 0x100f;   /* Stripped, not shareable. */
 
        memset(esecs, 0, sizeof esecs);
        strcpy(esecs[0].s_name, ".text");
index 124817609ce0136c4742dd2cf5674b6d26bbb7f1..af62a210a40b9147047fd8b46204550e79fa1ab0 100644 (file)
@@ -113,7 +113,7 @@ static struct clocksource clocksource_mips = {
 
 unsigned long long notrace sched_clock(void)
 {
-       /* 64-bit arithmatic can overflow, so use 128-bit.  */
+       /* 64-bit arithmetic can overflow, so use 128-bit.  */
        u64 t1, t2, t3;
        unsigned long long rv;
        u64 mult = clocksource_mips.mult;
index b7019d21808e04719e9913270dbab0c90438eadb..76446db66defdc9780ef9526b095aec7979a5a2a 100644 (file)
@@ -143,7 +143,7 @@ static void cvmx_boot_vector_init(void *mem)
                uint64_t v = _cvmx_bootvector_data[i];
 
                if (OCTEON_IS_OCTEON1PLUS() && (i == 0 || i == 7))
-                       v &= 0xffffffff00000000ull; /* KScratch not availble. */
+                       v &= 0xffffffff00000000ull; /* KScratch not available */
                cvmx_write_csr(CVMX_MIO_BOOT_LOC_ADR, i * 8);
                cvmx_write_csr(CVMX_MIO_BOOT_LOC_DAT, v);
        }
index 334bf8e577e50170acc021d8e160d829891e91b6..628ebdf4b9c55df52c08a70c352649cc0e7a4ff4 100644 (file)
@@ -264,7 +264,7 @@ int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min,
         * Convert !0 address_min and 0 address_max to special case of
         * range that specifies an exact memory block to allocate.  Do
         * this before other checks and adjustments so that this
-        * tranformation will be validated.
+        * transformation will be validated.
         */
        if (address_min && !address_max)
                address_max = address_min + req_size;
index aa7bbf8d0df558c74f4bb438a18985ff7c30c37c..042a6bc44b5c25ae7c15b748f52c5a3935297eae 100644 (file)
@@ -192,7 +192,7 @@ cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id,
 }
 
 /*
- * Shutdown a queue a free it's command buffers to the FPA. The
+ * Shutdown a queue and free its command buffers to the FPA. The
  * hardware connected to the queue must be stopped before this
  * function is called.
  *
@@ -285,7 +285,7 @@ int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id)
 
 /*
  * Return the command buffer to be written to. The purpose of this
- * function is to allow CVMX routine access t othe low level buffer
+ * function is to allow CVMX routine access tthe low level buffer
  * for initial hardware setup. User applications should not call this
  * function directly.
  *
index 607b4e65957977adcd03a467f07bbfa312c5be9d..1fceb7fd2c944cdc151f5b40a0c114deb1639e7d 100644 (file)
@@ -103,7 +103,7 @@ uint32_t cvmx_helper_qlm_jtag_shift(int qlm, int bits, uint32_t data)
 /**
  * Shift long sequences of zeros into the QLM JTAG chain. It is
  * common to need to shift more than 32 bits of zeros into the
- * chain. This function is a convience wrapper around
+ * chain. This function is a convenience wrapper around
  * cvmx_helper_qlm_jtag_shift() to shift more than 32 bits of
  * zeros at a time.
  *
index 15faca494c80720c120569463b4da3ade29e3c9f..6e70b859a0ac6c6db6eb13e63597a41b7e69ffcc 100644 (file)
@@ -615,7 +615,7 @@ int cvmx_pko_rate_limit_bits(int port, uint64_t bits_s, int burst)
        /*
         * Each packet has a 12 bytes of interframe gap, an 8 byte
         * preamble, and a 4 byte CRC. These are not included in the
-        * per word count. Multiply by 8 to covert to bits and divide
+        * per word count. Multiply by 8 to convert to bits and divide
         * by 256 for limit granularity.
         */
        pko_mem_port_rate0.s.rate_pkt = (12 + 8 + 4) * 8 * tokens_per_bit / 256;
index f76783c24338b9a236eb3a3cea172c491ef51139..5e1dd4e6e82fb8e1438014403aeba8159ae0d468 100644 (file)
@@ -973,7 +973,7 @@ int __init octeon_prune_device_tree(void)
                         * zero.
                         */
 
-                       /* Asume that CS1 immediately follows. */
+                       /* Assume that CS1 immediately follows. */
                        mio_boot_reg_cfg.u64 =
                                cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs + 1));
                        region1_base = mio_boot_reg_cfg.s.base << 16;
index 2e099d55a564a6ecf3dc347ace84ad25e4278dd9..9a266bf7833993b5facbdb63c97e555ad4d9ce27 100644 (file)
@@ -23,9 +23,6 @@
 
 #include <cobalt.h>
 
-extern void cobalt_machine_restart(char *command);
-extern void cobalt_machine_halt(void);
-
 const char *get_system_type(void)
 {
        switch (cobalt_board_id) {
index b51f738a39a05ad9bd4c41971027774365ec30da..4714074c8bd7f557ee57e7f0354d1f411bb1e04d 100644 (file)
@@ -287,7 +287,8 @@ CONFIG_BTRFS_FS_POSIX_ACL=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
-CONFIG_FSCACHE=m
+CONFIG_NETFS_SUPPORT=m
+CONFIG_FSCACHE=y
 CONFIG_FSCACHE_STATS=y
 CONFIG_CACHEFILES=m
 CONFIG_PROC_KCORE=y
index 38f17b6584218739adbe4c8139f21be774101cf5..3389e6e885d9fa104a5342cd1d5a22fe5639c36c 100644 (file)
@@ -238,7 +238,8 @@ CONFIG_BTRFS_FS=m
 CONFIG_QUOTA=y
 CONFIG_QFMT_V2=m
 CONFIG_AUTOFS_FS=m
-CONFIG_FSCACHE=m
+CONFIG_NETFS_SUPPORT=m
+CONFIG_FSCACHE=y
 CONFIG_CACHEFILES=m
 CONFIG_ISO9660_FS=m
 CONFIG_JOLIET=y
index 07839a4b397e5bcc006a68d06286a3d9d8d11875..78f4987520664b4e606e85ef3a7d78183a205aa0 100644 (file)
@@ -356,7 +356,8 @@ CONFIG_QFMT_V2=m
 CONFIG_AUTOFS_FS=y
 CONFIG_FUSE_FS=m
 CONFIG_VIRTIO_FS=m
-CONFIG_FSCACHE=m
+CONFIG_NETFS_SUPPORT=m
+CONFIG_FSCACHE=y
 CONFIG_ISO9660_FS=m
 CONFIG_JOLIET=y
 CONFIG_MSDOS_FS=m
index 166d2ad372d142d9a919d83d073588f4f02ea80a..54774f90c23eafa397784f15d8f38b40c5a1254b 100644 (file)
@@ -68,7 +68,8 @@ CONFIG_EXT4_FS_POSIX_ACL=y
 CONFIG_EXT4_FS_SECURITY=y
 CONFIG_AUTOFS_FS=m
 CONFIG_FUSE_FS=m
-CONFIG_FSCACHE=m
+CONFIG_NETFS_SUPPORT=m
+CONFIG_FSCACHE=y
 CONFIG_ISO9660_FS=m
 CONFIG_JOLIET=y
 CONFIG_ZISOFS=y
index 66188739f54d20a41ce18acb0a88a4fdf16e8718..fb78e6fd5de4804e221fba63bceeb4dcd4a492a9 100644 (file)
@@ -37,7 +37,7 @@ static unsigned int nr_prom_mem __initdata;
  */
 #define ARC_PAGE_SHIFT 12
 
-struct linux_mdesc * __init ArcGetMemoryDescriptor(struct linux_mdesc *Current)
+static struct linux_mdesc * __init ArcGetMemoryDescriptor(struct linux_mdesc *Current)
 {
        return (struct linux_mdesc *) ARC_CALL1(get_mdesc, Current);
 }
index 5e9e840a93144cf7700c5f7b39fb6b66e9bb3794..93e1e70393eeb9a933835ac8ac72f5388d66950c 100644 (file)
 /*
  * For 64bit kernels working with a 32bit ARC PROM pointer arguments
  * for ARC calls need to reside in CKEG0/1. But as soon as the kernel
- * switches to it's first kernel thread stack is set to an address in
+ * switches to its first kernel thread stack is set to an address in
  * XKPHYS, so anything on stack can't be used anymore. This is solved
- * by using a * static declartion variables are put into BSS, which is
+ * by using a * static declaration variables are put into BSS, which is
  * linked to a CKSEG0 address. Since this is only used on UP platforms
- * there is not spinlock needed
+ * there is no spinlock needed
  */
 #define O32_STATIC     static
 #else
index f36c2519ed9768b8eb570e9b8feb0f1ad0bf8de5..1f14132b3fc98afb6c44de0b7efda4d820978278 100644 (file)
@@ -97,6 +97,8 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end)
                __flush_cache_vmap();
 }
 
+#define flush_cache_vmap_early(start, end)     do { } while (0)
+
 extern void (*__flush_cache_vunmap)(void);
 
 static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
index 4044eaf989ac7dad0f2094c5d4cfab05ac9fb5c3..0921ddda11a4b353c1c4d754417d3de4d003c12f 100644 (file)
@@ -241,7 +241,8 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
        "       .set    pop"
        : "=&r" (sum), "=&r" (tmp)
        : "r" (saddr), "r" (daddr),
-         "0" (htonl(len)), "r" (htonl(proto)), "r" (sum));
+         "0" (htonl(len)), "r" (htonl(proto)), "r" (sum)
+       : "memory");
 
        return csum_fold(sum);
 }
index c7013e1cb53fb780a054d2423425f032c3f9159e..e70392429246e12c1a1af79025885bf487e2d2b0 100644 (file)
@@ -10,7 +10,7 @@
 
 /*
  * mips_debugfs_dir corresponds to the "mips" directory at the top level
- * of the DebugFS hierarchy. MIPS-specific DebugFS entires should be
+ * of the DebugFS hierarchy. MIPS-specific DebugFS entries should be
  * placed beneath this directory.
  */
 extern struct dentry *mips_debugfs_dir;
index 27415a288adf56743f370e5017762d06ce3f5e5a..dc397f630c6608c85ba5224686e3470e5d5dc82e 100644 (file)
@@ -5,7 +5,7 @@
 #include <linux/io.h>
 #include <linux/memblock.h>
 
-#define dmi_early_remap(x, l)          ioremap_cache(x, l)
+#define dmi_early_remap(x, l)          ioremap(x, l)
 #define dmi_early_unmap(x, l)          iounmap(x)
 #define dmi_remap(x, l)                        ioremap_cache(x, l)
 #define dmi_unmap(x)                   iounmap(x)
index 85bbd967e05fa3ad2001fe1624d41304d951d745..af58d6ae06b85ea1f36ecc1cc676f9352d4a3ed1 100644 (file)
@@ -159,7 +159,7 @@ void iounmap(const volatile void __iomem *addr);
  * address is not guaranteed to be usable directly as a virtual
  * address.
  *
- * This version of ioremap ensures that the memory is marked cachable by
+ * This version of ioremap ensures that the memory is marked cacheable by
  * the CPU.  Also enables full write-combining.         Useful for some
  * memory-like regions on I/O busses.
  */
@@ -177,7 +177,7 @@ void iounmap(const volatile void __iomem *addr);
  * address is not guaranteed to be usable directly as a virtual
  * address.
  *
- * This version of ioremap ensures that the memory is marked uncachable
+ * This version of ioremap ensures that the memory is marked uncacheable
  * but accelerated by means of write-combining feature. It is specifically
  * useful for PCIe prefetchable windows, which may vastly improve a
  * communications performance. If it was determined on boot stage, what
index 081be98c71ef48c698f4aa6ba14945239a666a9d..ff5d388502d4ab56ec28d71ad4126d542bb65977 100644 (file)
@@ -39,7 +39,7 @@ extern void jump_label_apply_nops(struct module *mod);
 
 static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
-       asm_volatile_goto("1:\t" B_INSN " 2f\n\t"
+       asm goto("1:\t" B_INSN " 2f\n\t"
                "2:\t.insn\n\t"
                ".pushsection __jump_table,  \"aw\"\n\t"
                WORD_INSN " 1b, %l[l_yes], %0\n\t"
@@ -53,7 +53,7 @@ l_yes:
 
 static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
 {
-       asm_volatile_goto("1:\t" J_INSN " %l[l_yes]\n\t"
+       asm goto("1:\t" J_INSN " %l[l_yes]\n\t"
                ".pushsection __jump_table,  \"aw\"\n\t"
                WORD_INSN " 1b, %l[l_yes], %0\n\t"
                ".popsection\n\t"
index 54a85f1d4f2c8d8822f25974c000d2d7aafc86c0..179f320cc2313b985d3a6d03c6437c85aa69a7dc 100644 (file)
@@ -810,8 +810,6 @@ int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
 pgd_t *kvm_pgd_alloc(void);
 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
 
-#define KVM_ARCH_WANT_MMU_NOTIFIER
-
 /* Emulation */
 enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause);
 int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
index a7eec3364a64abb60f1dae67ad26c80738878533..41546777902ba0fe25af0f442c688169f9220b48 100644 (file)
 
 #include <asm/cpu.h>
 
+void alchemy_set_lpj(void);
+void board_setup(void);
+
 /* helpers to access the SYS_* registers */
 static inline unsigned long alchemy_rdsys(int regofs)
 {
index 0a0cd4270c6f26966ffdfaa781a24c8f759728db..b82e513c8523a42c4c7b126dcc2718d453b97639 100644 (file)
@@ -259,7 +259,7 @@ static inline void set_dma_mode(unsigned int dmanr, unsigned int mode)
        if (!chan)
                return;
        /*
-        * set_dma_mode is only allowed to change endianess, direction,
+        * set_dma_mode is only allowed to change endianness, direction,
         * transfer size, device FIFO width, and coherency settings.
         * Make sure anything else is masked off.
         */
index 82bc2766e2ec1386f3fa2492670650795e902d8a..d820b481ac564198096328176cbb103993392994 100644 (file)
@@ -435,7 +435,7 @@ static inline void alchemy_gpio2_disable_int(int gpio2)
 /**
  * alchemy_gpio2_enable -  Activate GPIO2 block.
  *
- * The GPIO2 block must be enabled excplicitly to work.         On systems
+ * The GPIO2 block must be enabled explicitly to work.  On systems
  * where this isn't done by the bootloader, this macro can be used.
  */
 static inline void alchemy_gpio2_enable(void)
index 5b9fce73f11d1301fa5724049bfd9f8625ea7061..97f9d5e9446d22e1371b1c9f6fe09d59610d27ed 100644 (file)
@@ -19,4 +19,7 @@ extern int cobalt_board_id;
 #define COBALT_BRD_ID_QUBE2    0x5
 #define COBALT_BRD_ID_RAQ2     0x6
 
+void cobalt_machine_halt(void);
+void cobalt_machine_restart(char *command);
+
 #endif /* __ASM_COBALT_H */
index 5855ba1bd1ecf323ed621f2bd8917f7cceeed428..40eaa72e54d070a42b88a9e5c1cf9ebceefd5219 100644 (file)
@@ -55,7 +55,7 @@ extern __iomem void *ltq_sys1_membase;
 #define ltq_sys1_w32_mask(clear, set, reg)   \
        ltq_sys1_w32((ltq_sys1_r32(reg) & ~(clear)) | (set), reg)
 
-/* allow the gpio and pinctrl drivers to talk to eachother */
+/* allow the gpio and pinctrl drivers to talk to each other */
 extern int pinctrl_falcon_get_range_size(int id);
 extern void pinctrl_falcon_add_gpio_range(struct pinctrl_gpio_range *range);
 
index 545f91f2ae16ac9840464b45bccb5bdbf684633e..721eafc4644e509e6f43db746fbb00ed83a18699 100644 (file)
@@ -42,7 +42,7 @@ struct loongson_fan_policy {
        /* period between two check. (Unit: S) */
        u8      adjust_period;
 
-       /* fan adjust usually depend on a temprature input */
+       /* fan adjust usually depend on a temperature input */
        get_temp_fun    depend_temp;
 
        /* up_step/down_step used when type is STEP_SPEED_POLICY */
index b5be7511f6cde72135899d2e17f43f5ec5a779c1..fec767507604923ffc34358b0fdf3170a68f94b0 100644 (file)
@@ -227,7 +227,7 @@ static inline void csr_writeq(u64 val, u32 reg)
 #define LOONGSON_CSR_NODECNT   0x408
 #define LOONGSON_CSR_CPUTEMP   0x428
 
-/* PerCore CSR, only accessable by local cores */
+/* PerCore CSR, only accessible by local cores */
 #define LOONGSON_CSR_IPI_STATUS        0x1000
 #define LOONGSON_CSR_IPI_EN    0x1004
 #define LOONGSON_CSR_IPI_SET   0x1008
index d7e54971ec668bb86be18447ffccaeb0ae66343b..1ce4ba97852f7c54b9408385758f372464092e17 100644 (file)
  * The kernel is still located in 0x80000000(kseg0). However,
  * the physical mask has been shifted to 0x80000000 which exploits the alias
  * on the Malta board. As a result of which, we override the __pa_symbol
- * to peform direct mapping from virtual to physical addresses. In other
+ * to perform direct mapping from virtual to physical addresses. In other
  * words, the 0x80000000 virtual address maps to 0x80000000 physical address
  * which in turn aliases to 0x0. We do this in order to be able to use a flat
  * 2GB of memory (0x80000000 - 0xffffffff) so we can avoid the I/O hole in
  * 0x10000000 - 0x1fffffff.
  * The last 64KB of physical memory are reserved for correct HIGHMEM
- * macros arithmetics.
+ * macros arithmetic.
  *
  */
 
index 5368891d424b7724fff66f477f5bcd52db4be538..31a31fe78d7759edaa0e437f52694ea9be964c52 100644 (file)
@@ -16,7 +16,7 @@
  */
 
 /* Revision 1.48 autogenerated on 08/17/99 15:20:01 */
-/* This bonito64 version editted from bonito.h Revision 1.48 on 11/09/00 */
+/* This bonito64 version edited from bonito.h Revision 1.48 on 11/09/00 */
 
 #ifndef _ASM_MIPS_BOARDS_BONITO64_H
 #define _ASM_MIPS_BOARDS_BONITO64_H
index b54453f1648c1fb34e35beabaa2c6e7e00884423..5f3a7a9f42bf0d4b038343fe98451a9b5b0bf116 100644 (file)
@@ -22,7 +22,7 @@ extern void __iomem *mips_cpc_base;
  *                              the CPC
  *
  * Returns the default physical base address of the Cluster Power Controller
- * memory mapped registers. This is platform dependant & must therefore be
+ * memory mapped registers. This is platform dependent & must therefore be
  * implemented per-platform.
  */
 extern phys_addr_t mips_cpc_default_phys_base(void);
index 2d53704d9f2461e1dadfc93a16b96904c821f5c2..ec58cb76d076d54234d189b0cec4f26643a670ac 100644 (file)
@@ -98,7 +98,7 @@
 
 /*
  * R4640/R4650 cp0 register names.  These registers are listed
- * here only for completeness; without MMU these CPUs are not useable
+ * here only for completeness; without MMU these CPUs are not usable
  * by Linux.  A future ELKS port might take make Linux run on them
  * though ...
  */
 #define EXCCODE_THREAD         25      /* Thread exceptions (MT) */
 #define EXCCODE_DSPDIS         26      /* DSP disabled exception */
 #define EXCCODE_GE             27      /* Virtualized guest exception (VZ) */
-#define EXCCODE_CACHEERR       30      /* Parity/ECC occured on a core */
+#define EXCCODE_CACHEERR       30      /* Parity/ECC occurred on a core */
 
 /* Implementation specific trap codes used by MIPS cores */
 #define MIPS_EXCCODE_TLBPAR    16      /* TLB parity error exception */
index c1c0b3230e0a9cbd27393ea6cc307f9a11843abd..028bf1d6daeeb688aac4589029473ad393424d23 100644 (file)
@@ -114,7 +114,7 @@ struct cvmx_bootinfo {
 
        /*
         * flags indicating various configuration options.  These
-        * flags supercede the 'flags' variable and should be used
+        * flags supersede the 'flags' variable and should be used
         * instead if available.
         */
        uint32_t config_flags;
index a07a36f7d8141cf8d9946f57ec8bebfea080bafa..67e1b2162b194536af2924494c9f52e8422bec72 100644 (file)
@@ -145,7 +145,7 @@ typedef struct {
 /**
  * This structure contains the global state of all command queues.
  * It is stored in a bootmem named block and shared by all
- * applications running on Octeon. Tickets are stored in a differnet
+ * applications running on Octeon. Tickets are stored in a different
  * cache line that queue information to reduce the contention on the
  * ll/sc used to get a ticket. If this is not the case, the update
  * of queue state causes the ll/sc to fail quite often.
@@ -172,7 +172,7 @@ cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id,
                                                  int pool_size);
 
 /**
- * Shutdown a queue a free it's command buffers to the FPA. The
+ * Shutdown a queue and free its command buffers to the FPA. The
  * hardware connected to the queue must be stopped before this
  * function is called.
  *
@@ -194,7 +194,7 @@ int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id);
 
 /**
  * Return the command buffer to be written to. The purpose of this
- * function is to allow CVMX routine access t othe low level buffer
+ * function is to allow CVMX routine access tthe low level buffer
  * for initial hardware setup. User applications should not call this
  * function directly.
  *
index 5fec8476e421238cef239cc4cea53fe5a13705f9..f18a7f24daf8249379e76202f3fc19a2f6e82061 100644 (file)
@@ -91,7 +91,7 @@ typedef enum {
 } cvmx_pko_status_t;
 
 /**
- * This enumeration represents the differnet locking modes supported by PKO.
+ * This enumeration represents the different locking modes supported by PKO.
  */
 typedef enum {
        /*
index a3b23811e0c3048d8862e56ea3625136cd17479f..21b4378244fafb1e84be59b00a96c7d2feb808f1 100644 (file)
@@ -1342,7 +1342,7 @@ static inline void cvmx_pow_tag_sw_wait(void)
  * This function does NOT wait for previous tag switches to complete,
  * so the caller must ensure that there is not a pending tag switch.
  *
- * @wait:   When set, call stalls until work becomes avaiable, or times out.
+ * @wait:   When set, call stalls until work becomes available, or times out.
  *              If not set, returns immediately.
  *
  * Returns: the WQE pointer from POW. Returns NULL if no work
@@ -1376,7 +1376,7 @@ static inline struct cvmx_wqe *cvmx_pow_work_request_sync_nocheck(cvmx_pow_wait_
  * This function waits for any previous tag switch to complete before
  * requesting the new work.
  *
- * @wait:   When set, call stalls until work becomes avaiable, or times out.
+ * @wait:   When set, call stalls until work becomes available, or times out.
  *              If not set, returns immediately.
  *
  * Returns: the WQE pointer from POW. Returns NULL if no work
index 6c68517c2770274c966a5b0680de1ebd2e1a1378..e53b61a8e32f1790f4be2e44293b17756fe4039a 100644 (file)
@@ -54,7 +54,7 @@
 #define OM_CHECK_SUBMODEL        0x02000000
 /* Match all models previous than the one specified */
 #define OM_MATCH_PREVIOUS_MODELS  0x04000000
-/* Ignores the minor revison on newer parts */
+/* Ignores the minor revision on newer parts */
 #define OM_IGNORE_MINOR_REVISION  0x08000000
 #define OM_FLAG_MASK             0xff000000
 
 #define OCTEON_CN52XX_PASS2    OCTEON_CN52XX_PASS2_X
 
 /*
- * CN3XXX models with old revision enconding
+ * CN3XXX models with old revision encoding
  */
 #define OCTEON_CN38XX_PASS1    0x000d0000
 #define OCTEON_CN38XX_PASS2    0x000d0001
index 5978a8dfb917b0ec39b8c472794057d256047679..ef9585d96f6b08f2ba6d0edcb5031acdb6ca573d 100644 (file)
@@ -173,7 +173,7 @@ static inline unsigned long ___pa(unsigned long x)
        if (IS_ENABLED(CONFIG_64BIT)) {
                /*
                 * For MIPS64 the virtual address may either be in one of
-                * the compatibility segements ckseg0 or ckseg1, or it may
+                * the compatibility segments ckseg0 or ckseg1, or it may
                 * be in xkphys.
                 */
                return x < CKSEG0 ? XPHYSADDR(x) : CPHYSADDR(x);
index 3fd6e22c108b415ecceac2f253faa29e32b9fb99..d993df6302dcf432e1c989efa334c1f392616c79 100644 (file)
@@ -23,7 +23,7 @@
 #ifdef CONFIG_PCI_DRIVERS_LEGACY
 
 /*
- * Each pci channel is a top-level PCI bus seem by CPU.         A machine  with
+ * Each PCI channel is a top-level PCI bus seem by CPU.         A machine with
  * multiple PCI channels may have multiple PCI host controllers or a
  * single controller supporting multiple channels.
  */
index 421e78c30253cbadd43aeb892ebdb4cf83ef0470..088623ba7b8b177e61020c962c27e9b293d7e3a5 100644 (file)
@@ -201,7 +201,7 @@ enum pgtable_bits {
  * The final layouts of the PTE bits are:
  *
  *   64-bit, R1 or earlier:     CCC D V G [S H] M A W R P
- *   32-bit, R1 or earler:      CCC D V G M A W R P
+ *   32-bit, R1 or earlier:     CCC D V G M A W R P
  *   64-bit, R2 or later:       CCC D V G RI/R XI [S H] M A W P
  *   32-bit, R2 or later:       CCC D V G RI/R XI M A W P
  */
index daf3cf244ea972c9a8bf134a09fa081931645425..d14d0e37ad02ddf10b42cfed590c65f97f8de424 100644 (file)
@@ -60,6 +60,7 @@ static inline void instruction_pointer_set(struct pt_regs *regs,
                                            unsigned long val)
 {
        regs->cp0_epc = val;
+       regs->cp0_cause &= ~CAUSEF_BD;
 }
 
 /* Query offset/name of register from its name/offset */
@@ -154,6 +155,8 @@ static inline long regs_return_value(struct pt_regs *regs)
 }
 
 #define instruction_pointer(regs) ((regs)->cp0_epc)
+extern unsigned long exception_ip(struct pt_regs *regs);
+#define exception_ip(regs) exception_ip(regs)
 #define profile_pc(regs) instruction_pointer(regs)
 
 extern asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall);
index 3a070cec97e7d7be66f21cdb6a1b43c437ed632b..5e96f9d3262497e60e221bc2a8e58d2d23907d9d 100644 (file)
@@ -96,7 +96,7 @@ struct sgimc_regs {
        volatile u32 lbursttp;  /* Time period for long bursts */
 
        /* MC chip can drive up to 4 bank 4 SIMMs each. All SIMMs in bank must
-        * be the same size. The size encoding for supported SIMMs is bellow */
+        * be the same size. The size encoding for supported SIMMs is below */
        u32 _unused11[9];
        volatile u32 mconfig0;  /* Memory config register zero */
        u32 _unused12;
index 117f85e4bef59a7b315d497fa56aecf594eeba7a..3d1670b3e05273423ad0fef59d4a67d671ad53c6 100644 (file)
@@ -851,7 +851,7 @@ typedef union kldev_s {          /* for device structure allocation */
 /*
  * TBD - Allocation issues.
  *
- * Do we need to Mark off sepatate heaps for lboard_t, rboard_t, component,
+ * Do we need to Mark off separate heaps for lboard_t, rboard_t, component,
  * errinfo and allocate from them, or have a single heap and allocate all
  * structures from it. Debug is easier in the former method since we can
  * dump all similar structs in one command, but there will be lots of holes,
index aabd097933fe97f599361dd002d6e9f2fcdd999b..44c04a82d0b7d4ef0a983ce97dbbae9fbe8db271 100644 (file)
@@ -19,7 +19,7 @@
  *
  * Ordering barriers can be more efficient than completion barriers, since:
  *
- *   a) Ordering barriers only require memory access instructions which preceed
+ *   a) Ordering barriers only require memory access instructions which precede
  *      them in program order (older instructions) to reach a point in the
  *      load/store datapath beyond which reordering is not possible before
  *      allowing memory access instructions which follow them (younger
index ecae7470faa4058403b259c9ea199267684e30db..b9d76e8ac5a23d553c2a1285a88eeeb142c09762 100644 (file)
@@ -27,7 +27,7 @@ struct thread_info {
        unsigned long           flags;          /* low level flags */
        unsigned long           tp_value;       /* thread pointer */
        __u32                   cpu;            /* current CPU */
-       int                     preempt_count;  /* 0 => preemptable, <0 => BUG */
+       int                     preempt_count;  /* 0 => preemptible, <0 => BUG */
        struct pt_regs          *regs;
        long                    syscall;        /* syscall number */
 };
index 2e107886f97ac73d38c6ad70c5fb56e7d2a9cd73..7ef06dcdc46ec5d6edcd0647038403b24419df4a 100644 (file)
@@ -46,7 +46,7 @@ typedef unsigned int cycles_t;
  *
  * There is a suggested workaround and also the erratum can't strike if
  * the compare interrupt isn't being used as the clock source device.
- * However for now the implementaton of this function doesn't get these
+ * However for now the implementation of this function doesn't get these
  * fine details right.
  */
 static inline int can_use_mips_counter(unsigned int prid)
index a327ca21270eccfa5ace49ca33f502242dd825e1..6cd88191fefa9ce6d8323dd70d45aa9a198ac9d0 100644 (file)
@@ -32,7 +32,7 @@ static inline unsigned long get_vdso_base(void)
 #else
        /*
         * Get the base load address of the VDSO. We have to avoid generating
-        * relocations and references to the GOT because ld.so does not peform
+        * relocations and references to the GOT because ld.so does not perform
         * relocations on the VDSO. We use the current offset from the VDSO base
         * and perform a PC-relative branch which gives the absolute address in
         * ra, and take the difference. The assembler chokes on
index c6e1fc77c9968874feefc79b7c94a165d0ad89d2..9c48d9a21aa01ff16019d25249f4bcb97120ce08 100644 (file)
@@ -88,7 +88,7 @@
 #define MADV_HUGEPAGE  14              /* Worth backing with hugepages */
 #define MADV_NOHUGEPAGE 15             /* Not worth backing with hugepages */
 
-#define MADV_DONTDUMP  16              /* Explicity exclude from the core dump,
+#define MADV_DONTDUMP  16              /* Explicitly exclude from core dump,
                                           overrides the coredump filter bits */
 #define MADV_DODUMP    17              /* Clear the MADV_NODUMP flag */
 
index 128af72f2dfead88156e27b8dde059613820dda5..d546642fc67e6702620a2e535d155c21314b3909 100644 (file)
@@ -62,7 +62,7 @@ struct msqid64_ds {
        unsigned long  __unused5;
 };
 #else
-#warning no endianess set
+#warning no endianness set
 #endif
 
 #endif /* _ASM_MSGBUF_H */
index de7460c3a72e914b573e7b2680dbd9d56866f58f..bda7f193baab9f6709e43212fbc2960a0eae1cc0 100644 (file)
@@ -1138,7 +1138,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
                 * This processor doesn't have an MMU, so it's not
                 * "real easy" to run Linux on it. It is left purely
                 * for documentation.  Commented out because it shares
-                * it's c0_prid id number with the TX3900.
+                * its c0_prid id number with the TX3900.
                 */
                c->cputype = CPU_R4650;
                __cpu_name[cpu] = "R4650";
index 5582a4ca1e9e36ad5dac4d23caa4d6c4bfb11a5d..7aa2c2360ff60219bb8fb9f03a8a528edf7f53a1 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <asm/cpu-features.h>
 #include <asm/cpu-info.h>
+#include <asm/fpu.h>
 
 #ifdef CONFIG_MIPS_FP_SUPPORT
 
@@ -309,6 +310,11 @@ void mips_set_personality_nan(struct arch_elf_state *state)
        struct cpuinfo_mips *c = &boot_cpu_data;
        struct task_struct *t = current;
 
+       /* Do this early so t->thread.fpu.fcr31 won't be clobbered in case
+        * we are preempted before the lose_fpu(0) in start_thread.
+        */
+       lose_fpu(0);
+
        t->thread.fpu.fcr31 = c->fpu_csr31;
        switch (state->nan_2008) {
        case 0:
index b6de8e88c1bd428f1f1b7eee6b1118c9304e3601..a572ce36a24f21d25bf99943deac10545f7c3de5 100644 (file)
@@ -272,18 +272,17 @@ NESTED(except_vec_vi, 0, sp)
        .set    push
        .set    noreorder
        PTR_LA  v1, except_vec_vi_handler
-FEXPORT(except_vec_vi_lui)
-       lui     v0, 0           /* Patched */
        jr      v1
 FEXPORT(except_vec_vi_ori)
-        ori    v0, 0           /* Patched */
+        ori    v0, zero, 0             /* Offset in vi_handlers[] */
        .set    pop
        END(except_vec_vi)
 EXPORT(except_vec_vi_end)
 
 /*
  * Common Vectored Interrupt code
- * Complete the register saves and invoke the handler which is passed in $v0
+ * Complete the register saves and invoke the handler, $v0 holds
+ * offset into vi_handlers[]
  */
 NESTED(except_vec_vi_handler, 0, sp)
        SAVE_TEMP
@@ -331,6 +330,7 @@ NESTED(except_vec_vi_handler, 0, sp)
        /* Save task's sp on IRQ stack so that unwinding can follow it */
        LONG_S  s1, 0(sp)
 2:
+       PTR_L   v0, vi_handlers(v0)
        jalr    v0
 
        /* Restore sp */
index 316b27d0d2fbd0646460438e25c0314c4a4d3575..dc39f5b3fb8307d7a5a3cb1e02beda10265d62b9 100644 (file)
@@ -55,7 +55,7 @@ NOKPROBE_SYMBOL(insn_has_delayslot);
  * one; putting breakpoint on top of atomic ll/sc pair is bad idea;
  * so we need to prevent it and refuse kprobes insertion for such
  * instructions; cannot do much about breakpoint in the middle of
- * ll/sc pair; it is upto user to avoid those places
+ * ll/sc pair; it is up to user to avoid those places
  */
 static int insn_has_ll_or_sc(union mips_instruction insn)
 {
index f88ce78e13e3a2de9f51d33531398c96d807d7f2..6062e6fa589a87158a1ed1d09eff6eb9152e0e19 100644 (file)
@@ -28,6 +28,8 @@ __init void mips_set_machine_name(const char *name)
 
        strscpy(mips_machine_name, name, sizeof(mips_machine_name));
        pr_info("MIPS: machine is %s\n", mips_get_machine_name());
+
+       dump_stack_set_arch_desc(name);
 }
 
 char *mips_get_machine_name(void)
index d9df543f7e2c4cd17b29522840154f1e323cacb0..59288c13b581b89ccb46214c7be02126a017dab2 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/seccomp.h>
 #include <linux/ftrace.h>
 
+#include <asm/branch.h>
 #include <asm/byteorder.h>
 #include <asm/cpu.h>
 #include <asm/cpu-info.h>
 #define CREATE_TRACE_POINTS
 #include <trace/events/syscalls.h>
 
+unsigned long exception_ip(struct pt_regs *regs)
+{
+       return exception_epc(regs);
+}
+EXPORT_SYMBOL(exception_ip);
+
 /*
  * Called by kernel/ptrace.c when detaching..
  *
index 58fc8d089402bd86649596ef422aaee7c7814fcb..7eeeaf1ff95d26e6b3256f3cf20e603d312beef2 100644 (file)
@@ -380,7 +380,7 @@ void *__init relocate_kernel(void)
                }
 #endif /* CONFIG_USE_OF */
 
-               /* Copy the kernel to it's new location */
+               /* Copy the kernel to its new location */
                memcpy(loc_new, &_text, kernel_length);
 
                /* Perform relocations on the new kernel */
index 8f0a7263a9d61ff87ee2af5872e02a380bc6cebc..de894a0211d7a687bc08872c3a6b318a30b379cb 100644 (file)
@@ -70,7 +70,7 @@ copy_word:
 done:
 #ifdef CONFIG_SMP
        /* kexec_flag reset is signal to other CPUs what kernel
-          was moved to it's location. Note - we need relocated address
+          was moved to its location. Note - we need relocated address
           of kexec_flag.  */
 
        bal             1f
index 328426c3ed6f9f74f2217a1c7811ae9fde94f0f9..9c30de1515976159e0210724a6e19313832a0fad 100644 (file)
@@ -147,7 +147,7 @@ static unsigned long __init init_initrd(void)
        /*
         * Board specific code or command line parser should have
         * already set up initrd_start and initrd_end. In these cases
-        * perfom sanity checks and use them if all looks good.
+        * perform sanity checks and use them if all looks good.
         */
        if (!initrd_start || initrd_end <= initrd_start)
                goto disable;
@@ -322,11 +322,11 @@ static void __init bootmem_init(void)
                panic("Incorrect memory mapping !!!");
 
        if (max_pfn > PFN_DOWN(HIGHMEM_START)) {
+               max_low_pfn = PFN_DOWN(HIGHMEM_START);
 #ifdef CONFIG_HIGHMEM
-               highstart_pfn = PFN_DOWN(HIGHMEM_START);
+               highstart_pfn = max_low_pfn;
                highend_pfn = max_pfn;
 #else
-               max_low_pfn = PFN_DOWN(HIGHMEM_START);
                max_pfn = max_low_pfn;
 #endif
        }
index ccbf580827f6e74f904070e48ccd1e39d0b5dce4..4a10f18a880604df29b5a7130ea2682b6c679c42 100644 (file)
@@ -570,7 +570,7 @@ void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
                return (void __user __force *)(-1UL);
 
        /*
-        * FPU emulator may have it's own trampoline active just
+        * FPU emulator may have its own trampoline active just
         * above the user stack, 16-bytes before the next lowest
         * 16 byte boundary.  Try to avoid trashing it.
         */
index c58c0c3c5b40ceed5d61109ca29707f5cfc3572e..a1c1cb5de91321468f338d41a01df2f40efaf293 100644 (file)
@@ -2007,7 +2007,13 @@ unsigned long vi_handlers[64];
 
 void reserve_exception_space(phys_addr_t addr, unsigned long size)
 {
-       memblock_reserve(addr, size);
+       /*
+        * reserve exception space on CPUs other than CPU0
+        * is too late, since memblock is unavailable when APs
+        * up
+        */
+       if (smp_processor_id() == 0)
+               memblock_reserve(addr, size);
 }
 
 void __init *set_except_vector(int n, void *addr)
@@ -2055,108 +2061,71 @@ static void do_default_vi(void)
        panic("Caught unexpected vectored interrupt.");
 }
 
-static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
+void *set_vi_handler(int n, vi_handler_t addr)
 {
+       extern const u8 except_vec_vi[];
+       extern const u8 except_vec_vi_ori[], except_vec_vi_end[];
+       extern const u8 rollback_except_vec_vi[];
        unsigned long handler;
        unsigned long old_handler = vi_handlers[n];
        int srssets = current_cpu_data.srsets;
        u16 *h;
        unsigned char *b;
+       const u8 *vec_start;
+       int ori_offset;
+       int handler_len;
 
        BUG_ON(!cpu_has_veic && !cpu_has_vint);
 
        if (addr == NULL) {
                handler = (unsigned long) do_default_vi;
-               srs = 0;
        } else
                handler = (unsigned long) addr;
        vi_handlers[n] = handler;
 
        b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
 
-       if (srs >= srssets)
-               panic("Shadow register set %d not supported", srs);
-
        if (cpu_has_veic) {
                if (board_bind_eic_interrupt)
-                       board_bind_eic_interrupt(n, srs);
+                       board_bind_eic_interrupt(n, 0);
        } else if (cpu_has_vint) {
                /* SRSMap is only defined if shadow sets are implemented */
                if (srssets > 1)
-                       change_c0_srsmap(0xf << n*4, srs << n*4);
+                       change_c0_srsmap(0xf << n*4, 0 << n*4);
        }
 
-       if (srs == 0) {
-               /*
-                * If no shadow set is selected then use the default handler
-                * that does normal register saving and standard interrupt exit
-                */
-               extern const u8 except_vec_vi[], except_vec_vi_lui[];
-               extern const u8 except_vec_vi_ori[], except_vec_vi_end[];
-               extern const u8 rollback_except_vec_vi[];
-               const u8 *vec_start = using_rollback_handler() ?
-                                     rollback_except_vec_vi : except_vec_vi;
+       vec_start = using_rollback_handler() ? rollback_except_vec_vi :
+                                              except_vec_vi;
 #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
-               const int lui_offset = except_vec_vi_lui - vec_start + 2;
-               const int ori_offset = except_vec_vi_ori - vec_start + 2;
+       ori_offset = except_vec_vi_ori - vec_start + 2;
 #else
-               const int lui_offset = except_vec_vi_lui - vec_start;
-               const int ori_offset = except_vec_vi_ori - vec_start;
+       ori_offset = except_vec_vi_ori - vec_start;
 #endif
-               const int handler_len = except_vec_vi_end - vec_start;
+       handler_len = except_vec_vi_end - vec_start;
 
-               if (handler_len > VECTORSPACING) {
-                       /*
-                        * Sigh... panicing won't help as the console
-                        * is probably not configured :(
-                        */
-                       panic("VECTORSPACING too small");
-               }
-
-               set_handler(((unsigned long)b - ebase), vec_start,
-#ifdef CONFIG_CPU_MICROMIPS
-                               (handler_len - 1));
-#else
-                               handler_len);
-#endif
-               h = (u16 *)(b + lui_offset);
-               *h = (handler >> 16) & 0xffff;
-               h = (u16 *)(b + ori_offset);
-               *h = (handler & 0xffff);
-               local_flush_icache_range((unsigned long)b,
-                                        (unsigned long)(b+handler_len));
-       }
-       else {
+       if (handler_len > VECTORSPACING) {
                /*
-                * In other cases jump directly to the interrupt handler. It
-                * is the handler's responsibility to save registers if required
-                * (eg hi/lo) and return from the exception using "eret".
+                * Sigh... panicing won't help as the console
+                * is probably not configured :(
                 */
-               u32 insn;
+               panic("VECTORSPACING too small");
+       }
 
-               h = (u16 *)b;
-               /* j handler */
+       set_handler(((unsigned long)b - ebase), vec_start,
 #ifdef CONFIG_CPU_MICROMIPS
-               insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
+                       (handler_len - 1));
 #else
-               insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
+                       handler_len);
 #endif
-               h[0] = (insn >> 16) & 0xffff;
-               h[1] = insn & 0xffff;
-               h[2] = 0;
-               h[3] = 0;
-               local_flush_icache_range((unsigned long)b,
-                                        (unsigned long)(b+8));
-       }
+       /* insert offset into vi_handlers[] */
+       h = (u16 *)(b + ori_offset);
+       *h = n * sizeof(handler);
+       local_flush_icache_range((unsigned long)b,
+                                (unsigned long)(b+handler_len));
 
        return (void *)old_handler;
 }
 
-void *set_vi_handler(int n, vi_handler_t addr)
-{
-       return set_vi_srs_handler(n, addr, 0);
-}
-
 /*
  * Timer interrupt
  */
@@ -2416,7 +2385,7 @@ void __init trap_init(void)
                set_except_vector(i, handle_reserved);
 
        /*
-        * Copy the EJTAG debug exception vector handler code to it's final
+        * Copy the EJTAG debug exception vector handler code to its final
         * destination.
         */
        if (cpu_has_ejtag && board_ejtag_handler_setup)
index e9a0cfd02ae202f0679445d1191964d2633681b2..737d0d4fdcd35161bfd8865ef5de057a7c3da14a 100644 (file)
@@ -6,9 +6,9 @@
  * Copyright (C) 2004, 2005 MIPS Technologies, Inc.  All rights reserved.
  * Copyright (C) 2013 Imagination Technologies Ltd.
  *
- * VPE spport module for loading a MIPS SP program into VPE1. The SP
+ * VPE support module for loading a MIPS SP program into VPE1. The SP
  * environment is rather simple since there are no TLBs. It needs
- * to be relocatable (or partiall linked). Initialize your stack in
+ * to be relocatable (or partially linked). Initialize your stack in
  * the startup-code. The loader looks for the symbol __start and sets
  * up the execution to resume from there. To load and run, simply do
  * a cat SP 'binary' to the /dev/vpe1 device.
index a8cdba75f98dde949c9cd86ef073027cc49bfad3..18e7a17d51158ee45901b4fda71f4996eb6d8a9a 100644 (file)
@@ -20,13 +20,11 @@ config KVM
        depends on HAVE_KVM
        depends on MIPS_FP_SUPPORT
        select EXPORT_UASM
-       select PREEMPT_NOTIFIERS
+       select KVM_COMMON
        select KVM_GENERIC_DIRTYLOG_READ_PROTECT
-       select HAVE_KVM_EVENTFD
        select HAVE_KVM_VCPU_ASYNC_IOCTL
        select KVM_MMIO
-       select MMU_NOTIFIER
-       select INTERVAL_TREE
+       select KVM_GENERIC_MMU_NOTIFIER
        select KVM_GENERIC_HARDWARE_ENABLING
        help
          Support for hosting Guest kernels.
index e64372b8f66afca427b78389f37b128e305c5cc2..0feec52222fb97b7f95453e0768d0f303f724e69 100644 (file)
@@ -531,7 +531,7 @@ static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
  * to be used for a period of time, but the exact ktime corresponding to the
  * final Count that must be restored is not known.
  *
- * It is gauranteed that a timer interrupt immediately after restore will be
+ * It is guaranteed that a timer interrupt immediately after restore will be
  * handled, but not if CP0_Compare is exactly at @count. That case should
  * already be handled when the hardware timer state is saved.
  *
index a3cf293658581ed6a599da2b870f3c10c67a6be1..0c45767eacf67429ea3910628a2f44c219a4da34 100644 (file)
@@ -108,10 +108,9 @@ void __init prom_init(void)
        prom_init_cmdline();
 
 #if defined(CONFIG_MIPS_MT_SMP)
-       if (cpu_has_mipsmt) {
-               lantiq_smp_ops = vsmp_smp_ops;
+       lantiq_smp_ops = vsmp_smp_ops;
+       if (cpu_has_mipsmt)
                lantiq_smp_ops.init_secondary = lantiq_init_secondary;
-               register_smp_ops(&lantiq_smp_ops);
-       }
+       register_smp_ops(&lantiq_smp_ops);
 #endif
 }
index 0084820cffaa8d9c905e2a87cd8510e1e2410c9f..b10300a527af243380b2d318a090aca8fbf25ea2 100644 (file)
@@ -17,7 +17,7 @@ static int __init loongson2_cpufreq_init(void)
 {
        struct cpuinfo_mips *c = &current_cpu_data;
 
-       /* Only 2F revision and it's successors support CPUFreq */
+       /* Only 2F revision and its successors support CPUFreq */
        if ((c->processor_id & PRID_REV_MASK) >= PRID_REV_LOONGSON2F)
                return platform_device_register(&loongson2_cpufreq_device);
 
index f25caa6aa9d306e84d719e97ea54f7b8faa449c1..553142c1f14fe2261d963b3784f3ed9e6c086cd2 100644 (file)
@@ -103,6 +103,9 @@ void __init szmem(unsigned int node)
        if (loongson_sysconf.vgabios_addr)
                memblock_reserve(virt_to_phys((void *)loongson_sysconf.vgabios_addr),
                                SZ_256K);
+       /* set nid for reserved memory */
+       memblock_set_node((u64)node << 44, (u64)(node + 1) << 44,
+                       &memblock.reserved, node);
 }
 
 #ifndef CONFIG_NUMA
index 8f61e93c0c5bcf07134cc22a06913c57e5140af4..68dafd6d3e2571f615e9c9e7d9b2c895de80468a 100644 (file)
@@ -132,6 +132,8 @@ static void __init node_mem_init(unsigned int node)
 
                /* Reserve pfn range 0~node[0]->node_start_pfn */
                memblock_reserve(0, PAGE_SIZE * start_pfn);
+               /* set nid for reserved memory on node 0 */
+               memblock_set_node(0, 1ULL << 44, &memblock.reserved, 0);
        }
 }
 
index 498bdc1bb0ede8aafbf1767065a9002fb2e4b26f..5a990cdef91a6fb44d4b89688022441a85a4303f 100644 (file)
@@ -516,7 +516,7 @@ static void __init loongson3_prepare_cpus(unsigned int max_cpus)
 }
 
 /*
- * Setup the PC, SP, and GP of a secondary processor and start it runing!
+ * Setup the PC, SP, and GP of a secondary processor and start it running!
  */
 static int loongson3_boot_secondary(int cpu, struct task_struct *idle)
 {
index b45bf026ee55e662f328ca4dc162b1365afd3563..10413b6f6662bcf99433725cf4c037a91308b155 100644 (file)
@@ -1650,7 +1650,7 @@ static void coherency_setup(void)
 
        /*
         * c0_status.cu=0 specifies that updates by the sc instruction use
-        * the coherency mode specified by the TLB; 1 means cachable
+        * the coherency mode specified by the TLB; 1 means cacheable
         * coherent update on write will be used.  Not all processors have
         * this bit and; some wire it to zero, others like Toshiba had the
         * silly idea of putting something else there ...
index 45dff5cd4b8e7a0278442acee6818a47bd72eb76..e528583d13311d8a0da246439f40ac613f47a808 100644 (file)
@@ -25,7 +25,7 @@
         * This is a very bad place to be.  Our cache error
         * detection has triggered.  If we have write-back data
         * in the cache, we may not be able to recover.  As a
-        * first-order desperate measure, turn off KSEG0 cacheing.
+        * first-order desperate measure, turn off KSEG0 caching.
         */
        mfc0    k0,CP0_CONFIG
        li      k1,~CONF_CM_CMASK
index 3c4fc97b9f394b0c2f2d22c5f59d23c0105cbfb3..0f3cec663a12cd51498157c390f974213cb5a658 100644 (file)
@@ -138,7 +138,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
 
 #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
-               const struct iommu_ops *iommu, bool coherent)
+               bool coherent)
 {
        dev->dma_coherent = coherent;
 }
index c2e0e5aebe903199dfcd144b46e4e6c2b798c26d..39f129205b0c30d3926fe40bbb30921873d6e7c5 100644 (file)
@@ -422,8 +422,17 @@ void __init paging_init(void)
                       " %ldk highmem ignored\n",
                       (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
                max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
+
+               max_mapnr = max_low_pfn;
+       } else if (highend_pfn) {
+               max_mapnr = highend_pfn;
+       } else {
+               max_mapnr = max_low_pfn;
        }
+#else
+       max_mapnr = max_low_pfn;
 #endif
+       high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
 
        free_area_init(max_zone_pfns);
 }
@@ -459,13 +468,6 @@ void __init mem_init(void)
         */
        BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (PFN_PTE_SHIFT > PAGE_SHIFT));
 
-#ifdef CONFIG_HIGHMEM
-       max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
-#else
-       max_mapnr = max_low_pfn;
-#endif
-       high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
-
        maar_init();
        memblock_free_all();
        setup_zero_pages();     /* Setup zeroed pages.  */
index b6dad2fd5575d7dbfffa201ec5187ab065423108..d8243d61ef32529da9276cd887900bfb5bd130e6 100644 (file)
@@ -72,6 +72,10 @@ void __iomem *ioremap_prot(phys_addr_t phys_addr, unsigned long size,
            flags == _CACHE_UNCACHED)
                return (void __iomem *) CKSEG1ADDR(phys_addr);
 
+       /* Early remaps should use the unmapped regions til' VM is available */
+       if (WARN_ON_ONCE(!slab_is_available()))
+               return NULL;
+
        /*
         * Don't allow anybody to remap RAM that may be allocated by the page
         * allocator, since that could lead to races & data clobbering.
index f6db65410c655ae1fe27e37915e06a4b0d577174..173f7b36033bd222f542b3277f80435c8d6ea6c2 100644 (file)
@@ -183,7 +183,7 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
        int idx, pid;
 
        /*
-        * Handle debugger faulting in for debugee.
+        * Handle debugger faulting in for debuggee.
         */
        if (current->active_mm != vma->vm_mm)
                return;
index 7e2a0011a6fb9b8516125a492b99db4ba16471cc..4106084e57d728679753880eec82892f693f61fb 100644 (file)
@@ -301,7 +301,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
        int idx, pid;
 
        /*
-        * Handle debugger faulting in for debugee.
+        * Handle debugger faulting in for debuggee.
         */
        if (current->active_mm != vma->vm_mm)
                return;
index b4e1c783e61779e346e19a8eca88aea1f63f2b19..4017fa0e2f68d2edb315e06bc0011184edf40349 100644 (file)
@@ -789,7 +789,7 @@ void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
 
        if (check_for_high_segbits) {
                /*
-                * The kernel currently implicitely assumes that the
+                * The kernel currently implicitly assumes that the
                 * MIPS SEGBITS parameter for the processor is
                 * (PGDIR_SHIFT+PGDIR_BITS) or less, and will never
                 * allocate virtual addresses outside the maximum
@@ -1715,7 +1715,7 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
 /*
  * Check if PTE is present, if not then jump to LABEL. PTR points to
  * the page table where this PTE is located, PTE will be re-loaded
- * with it's original value.
+ * with its original value.
  */
 static void
 build_pte_present(u32 **p, struct uasm_reloc **r,
index ace5db3fbd171fc63421cc716c2f00d4967a8501..40a878b672f5d39ddb06667a17fca7003f782e55 100644 (file)
@@ -95,7 +95,7 @@
 /*
  * Mapping of 64-bit eBPF registers to 32-bit native MIPS registers.
  *
- * 1) Native register pairs are ordered according to CPU endiannes, following
+ * 1) Native register pairs are ordered according to CPU endianness, following
  *    the MIPS convention for passing 64-bit arguments and return values.
  * 2) The eBPF return value, arguments and callee-saved registers are mapped
  *    to their native MIPS equivalents.
index 0d1b36ba1c21c9fcf9d57799ea764bc0dadf0b00..068113f5c49d489fb399b4d503fcdeccd7fd3639 100644 (file)
@@ -49,7 +49,7 @@ static int loongson_pcibios_config_access(unsigned char access_type,
                 */
 #ifdef CONFIG_CS5536
                /* cs5536_pci_conf_read4/write4() will call _rdmsr/_wrmsr() to
-                * access the regsters PCI_MSR_ADDR, PCI_MSR_DATA_LO,
+                * access the registers PCI_MSR_ADDR, PCI_MSR_DATA_LO,
                 * PCI_MSR_DATA_HI, which is bigger than PCI_MSR_CTRL, so, it
                 * will not go this branch, but the others. so, no calling dead
                 * loop here.
index 1c722dd0c1302d00abb0e8820ba0d23f1870caff..58625d1b64658ea53a2089ca5271d983f2fdd9aa 100644 (file)
@@ -453,7 +453,7 @@ static int alchemy_pci_probe(struct platform_device *pdev)
 
        /* we can't ioremap the entire pci config space because it's too large,
         * nor can we dynamically ioremap it because some drivers use the
-        * PCI config routines from within atomic contex and that becomes a
+        * PCI config routines from within atomic context and that becomes a
         * problem in get_vm_area().  Instead we use one wired TLB entry to
         * handle all config accesses for all busses.
         */
index e17d862cfa4c6ab586017506f23ca3459d96c6f1..a925842ee1256fd44988e082fba7d1fa25df11e0 100644 (file)
@@ -16,7 +16,7 @@
  * the CFG_SEL bit in the PCI_MISC_CONFIG register.
  *
  * Devices on the bus can perform DMA requests via chip BAR1. PCI host
- * controller BARs are programmend as if an external device is programmed.
+ * controller BARs are programmed as if an external device is programmed.
  * Which means that during configuration, IDSEL pin of the chip should be
  * asserted.
  *
index 80f7293166bb76dee8ec12be4e328a912ced2d7e..68a8cefed420bf41ce99fabffc424bce8eb101e7 100644 (file)
@@ -152,7 +152,7 @@ static int ltq_pci_startup(struct platform_device *pdev)
                temp_buffer &= ~0xf0000;
        /* enable internal arbiter */
        temp_buffer |= (1 << INTERNAL_ARB_ENABLE_BIT);
-       /* enable internal PCI master reqest */
+       /* enable internal PCI master request */
        temp_buffer &= (~(3 << PCI_MASTER0_REQ_MASK_2BITS));
 
        /* enable EBU request */
index d19d9d4563092180229677347f32c6c18464261d..36d12cea3512230f801a96164970bdf9bf241275 100644 (file)
@@ -376,7 +376,7 @@ static void octeon_pci_initialize(void)
        ctl_status.s.timer = 1;
        cvmx_write_csr(CVMX_NPI_CTL_STATUS, ctl_status.u64);
 
-       /* Deassert PCI reset and advertize PCX Host Mode Device Capability
+       /* Deassert PCI reset and advertise PCX Host Mode Device Capability
           (64b) */
        cvmx_write_csr(CVMX_CIU_SOFT_PRST, 0x4);
        cvmx_read_csr(CVMX_CIU_SOFT_PRST);
index 68d5211afea8b4004b6c1d1ec4ff186d61a6cc45..45ddbaa6c1237985b90e7cc49b80ab7fdefb9ecc 100644 (file)
@@ -114,7 +114,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3,
  *
  * The function is complicated by the ultimate brokenness of the IOC3 chip
  * which is used in SGI systems.  The IOC3 can only handle 32-bit PCI
- * accesses and does only decode parts of it's address space.
+ * accesses and does only decode parts of its address space.
  */
 static int pci_conf0_read_config(struct pci_bus *bus, unsigned int devfn,
                                 int where, int size, u32 *value)
index c9edd3fb380df5ff7c978a3b81ed55bfa6759eee..2583e318e8c6b3d26182438a8732ce6a62fa88d8 100644 (file)
@@ -1037,7 +1037,7 @@ retry:
                        in_fif_p_count = dbg_data.s.data & 0xff;
                } while (in_fif_p_count != ((old_in_fif_p_count+1) & 0xff));
 
-               /* Update in_fif_p_count for it's offset with respect to out_p_count */
+               /* Update in_fif_p_count for its offset with respect to out_p_count */
                in_fif_p_count = (in_fif_p_count + in_p_offset) & 0xff;
 
                /* Read the OUT_P_COUNT from the debug select */
index 137781d0bd0a12a00ad9ab15f002e190bffa9c7e..5a9fd3fe41d7c59ed184603c33b041e714689d7d 100644 (file)
@@ -175,7 +175,7 @@ void __init prom_soc_init(struct ralink_soc_info *soc_info)
                 * mips_cm_probe() wipes out bootloader
                 * config for CM regions and we have to configure them
                 * again. This SoC cannot talk to pamlbus devices
-                * witout proper iocu region set up.
+                * without proper iocu region set up.
                 *
                 * FIXME: it would be better to do this with values
                 * from DT, but we need this very early because
index 27c14ede191eb7b1353e3a2cedd6d9d80bc2b385..9877fcc512b1578731fb6235a35256a61b172afb 100644 (file)
@@ -5,7 +5,7 @@
 
 obj-y  := ip27-berr.o ip27-irq.o ip27-init.o ip27-klconfig.o \
           ip27-klnuma.o ip27-memory.o ip27-nmi.o ip27-reset.o ip27-timer.o \
-          ip27-hubio.o ip27-xtalk.o
+          ip27-xtalk.o
 
 obj-$(CONFIG_EARLY_PRINTK)     += ip27-console.o
 obj-$(CONFIG_SMP)              += ip27-smp.o
index 923a63a51cda39482c227936c17f828ceae3227b..9eb497cb5d525c74e775ca741bd4ec664209280b 100644 (file)
@@ -22,6 +22,8 @@
 #include <asm/traps.h>
 #include <linux/uaccess.h>
 
+#include "ip27-common.h"
+
 static void dump_hub_information(unsigned long errst0, unsigned long errst1)
 {
        static char *err_type[2][8] = {
@@ -57,7 +59,7 @@ static void dump_hub_information(unsigned long errst0, unsigned long errst1)
               [st0.pi_stat0_fmt.s0_err_type] ? : "invalid");
 }
 
-int ip27_be_handler(struct pt_regs *regs, int is_fixup)
+static int ip27_be_handler(struct pt_regs *regs, int is_fixup)
 {
        unsigned long errst0, errst1;
        int data = regs->cp0_cause & 4;
index ed008a08464c208cc1944cfbd6fe5de31e14fee4..a0059fa13934539af5fb616120f66b77054a2219 100644 (file)
@@ -10,6 +10,7 @@ extern void hub_rt_clock_event_init(void);
 extern void hub_rtc_init(nasid_t nasid);
 extern void install_cpu_nmi_handler(int slice);
 extern void install_ipi(void);
+extern void ip27_be_init(void);
 extern void ip27_reboot_setup(void);
 extern const struct plat_smp_ops ip27_smp_ops;
 extern unsigned long node_getfirstfree(nasid_t nasid);
@@ -17,4 +18,5 @@ extern void per_cpu_init(void);
 extern void replicate_kernel_text(void);
 extern void setup_replication_mask(void);
 
+
 #endif /* __IP27_COMMON_H */
diff --git a/arch/mips/sgi-ip27/ip27-hubio.c b/arch/mips/sgi-ip27/ip27-hubio.c
deleted file mode 100644 (file)
index 8352eb6..0000000
+++ /dev/null
@@ -1,185 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 1992-1997, 2000-2003 Silicon Graphics, Inc.
- * Copyright (C) 2004 Christoph Hellwig.
- *
- * Support functions for the HUB ASIC - mostly PIO mapping related.
- */
-
-#include <linux/bitops.h>
-#include <linux/string.h>
-#include <linux/mmzone.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/arch.h>
-#include <asm/sn/agent.h>
-#include <asm/sn/io.h>
-#include <asm/xtalk/xtalk.h>
-
-
-static int force_fire_and_forget = 1;
-
-/**
- * hub_pio_map -  establish a HUB PIO mapping
- *
- * @hub:       hub to perform PIO mapping on
- * @widget:    widget ID to perform PIO mapping for
- * @xtalk_addr: xtalk_address that needs to be mapped
- * @size:      size of the PIO mapping
- *
- **/
-unsigned long hub_pio_map(nasid_t nasid, xwidgetnum_t widget,
-                         unsigned long xtalk_addr, size_t size)
-{
-       unsigned i;
-
-       /* use small-window mapping if possible */
-       if ((xtalk_addr % SWIN_SIZE) + size <= SWIN_SIZE)
-               return NODE_SWIN_BASE(nasid, widget) + (xtalk_addr % SWIN_SIZE);
-
-       if ((xtalk_addr % BWIN_SIZE) + size > BWIN_SIZE) {
-               printk(KERN_WARNING "PIO mapping at hub %d widget %d addr 0x%lx"
-                               " too big (%ld)\n",
-                               nasid, widget, xtalk_addr, size);
-               return 0;
-       }
-
-       xtalk_addr &= ~(BWIN_SIZE-1);
-       for (i = 0; i < HUB_NUM_BIG_WINDOW; i++) {
-               if (test_and_set_bit(i, hub_data(nasid)->h_bigwin_used))
-                       continue;
-
-               /*
-                * The code below does a PIO write to setup an ITTE entry.
-                *
-                * We need to prevent other CPUs from seeing our updated
-                * memory shadow of the ITTE (in the piomap) until the ITTE
-                * entry is actually set up; otherwise, another CPU might
-                * attempt a PIO prematurely.
-                *
-                * Also, the only way we can know that an entry has been
-                * received  by the hub and can be used by future PIO reads/
-                * writes is by reading back the ITTE entry after writing it.
-                *
-                * For these two reasons, we PIO read back the ITTE entry
-                * after we write it.
-                */
-               IIO_ITTE_PUT(nasid, i, HUB_PIO_MAP_TO_MEM, widget, xtalk_addr);
-               __raw_readq(IIO_ITTE_GET(nasid, i));
-
-               return NODE_BWIN_BASE(nasid, widget) + (xtalk_addr % BWIN_SIZE);
-       }
-
-       printk(KERN_WARNING "unable to establish PIO mapping for at"
-                       " hub %d widget %d addr 0x%lx\n",
-                       nasid, widget, xtalk_addr);
-       return 0;
-}
-
-
-/*
- * hub_setup_prb(nasid, prbnum, credits, conveyor)
- *
- *     Put a PRB into fire-and-forget mode if conveyor isn't set.  Otherwise,
- *     put it into conveyor belt mode with the specified number of credits.
- */
-static void hub_setup_prb(nasid_t nasid, int prbnum, int credits)
-{
-       union iprb_u prb;
-       int prb_offset;
-
-       /*
-        * Get the current register value.
-        */
-       prb_offset = IIO_IOPRB(prbnum);
-       prb.iprb_regval = REMOTE_HUB_L(nasid, prb_offset);
-
-       /*
-        * Clear out some fields.
-        */
-       prb.iprb_ovflow = 1;
-       prb.iprb_bnakctr = 0;
-       prb.iprb_anakctr = 0;
-
-       /*
-        * Enable or disable fire-and-forget mode.
-        */
-       prb.iprb_ff = force_fire_and_forget ? 1 : 0;
-
-       /*
-        * Set the appropriate number of PIO credits for the widget.
-        */
-       prb.iprb_xtalkctr = credits;
-
-       /*
-        * Store the new value to the register.
-        */
-       REMOTE_HUB_S(nasid, prb_offset, prb.iprb_regval);
-}
-
-/**
- * hub_set_piomode  -  set pio mode for a given hub
- *
- * @nasid:     physical node ID for the hub in question
- *
- * Put the hub into either "PIO conveyor belt" mode or "fire-and-forget" mode.
- * To do this, we have to make absolutely sure that no PIOs are in progress
- * so we turn off access to all widgets for the duration of the function.
- *
- * XXX - This code should really check what kind of widget we're talking
- * to. Bridges can only handle three requests, but XG will do more.
- * How many can crossbow handle to widget 0?  We're assuming 1.
- *
- * XXX - There is a bug in the crossbow that link reset PIOs do not
- * return write responses.  The easiest solution to this problem is to
- * leave widget 0 (xbow) in fire-and-forget mode at all times. This
- * only affects pio's to xbow registers, which should be rare.
- **/
-static void hub_set_piomode(nasid_t nasid)
-{
-       u64 ii_iowa;
-       union hubii_wcr_u ii_wcr;
-       unsigned i;
-
-       ii_iowa = REMOTE_HUB_L(nasid, IIO_OUTWIDGET_ACCESS);
-       REMOTE_HUB_S(nasid, IIO_OUTWIDGET_ACCESS, 0);
-
-       ii_wcr.wcr_reg_value = REMOTE_HUB_L(nasid, IIO_WCR);
-
-       if (ii_wcr.iwcr_dir_con) {
-               /*
-                * Assume a bridge here.
-                */
-               hub_setup_prb(nasid, 0, 3);
-       } else {
-               /*
-                * Assume a crossbow here.
-                */
-               hub_setup_prb(nasid, 0, 1);
-       }
-
-       /*
-        * XXX - Here's where we should take the widget type into
-        * when account assigning credits.
-        */
-       for (i = HUB_WIDGET_ID_MIN; i <= HUB_WIDGET_ID_MAX; i++)
-               hub_setup_prb(nasid, i, 3);
-
-       REMOTE_HUB_S(nasid, IIO_OUTWIDGET_ACCESS, ii_iowa);
-}
-
-/*
- * hub_pio_init         -  PIO-related hub initialization
- *
- * @hub:       hubinfo structure for our hub
- */
-void hub_pio_init(nasid_t nasid)
-{
-       unsigned i;
-
-       /* initialize big window piomaps for this hub */
-       bitmap_zero(hub_data(nasid)->h_bigwin_used, HUB_NUM_BIG_WINDOW);
-       for (i = 0; i < HUB_NUM_BIG_WINDOW; i++)
-               IIO_ITTE_DISABLE(nasid, i);
-
-       hub_set_piomode(nasid);
-}
index a0dd3bd2b81b359491b447917486890ebc18fd4b..8f5299b269e7e7d1b104d6fa4616de4f7fdfc34d 100644 (file)
@@ -23,6 +23,8 @@
 #include <asm/sn/intr.h>
 #include <asm/sn/irq_alloc.h>
 
+#include "ip27-common.h"
+
 struct hub_irq_data {
        u64     *irq_mask[2];
        cpuid_t cpu;
index f79c4839371661237141b866d89743a101411c53..b8ca94cfb4fef34b42f9e5307e7dcfc09ef8a6d2 100644 (file)
@@ -23,6 +23,7 @@
 #include <asm/page.h>
 #include <asm/pgalloc.h>
 #include <asm/sections.h>
+#include <asm/sgialib.h>
 
 #include <asm/sn/arch.h>
 #include <asm/sn/agent.h>
index 84889b57d5ff684e32bc2a1897583a0f4770853e..fc2816398d0cf04a48c1f704ade54a65b97e15f8 100644 (file)
@@ -11,6 +11,8 @@
 #include <asm/sn/arch.h>
 #include <asm/sn/agent.h>
 
+#include "ip27-common.h"
+
 #if 0
 #define NODE_NUM_CPUS(n)       CNODE_NUM_CPUS(n)
 #else
 typedef unsigned long machreg_t;
 
 static arch_spinlock_t nmi_lock = __ARCH_SPIN_LOCK_UNLOCKED;
-
-/*
- * Let's see what else we need to do here. Set up sp, gp?
- */
-void nmi_dump(void)
-{
-       void cont_nmi_dump(void);
-
-       cont_nmi_dump();
-}
+static void nmi_dump(void);
 
 void install_cpu_nmi_handler(int slice)
 {
@@ -53,7 +46,7 @@ void install_cpu_nmi_handler(int slice)
  * into the eframe format for the node under consideration.
  */
 
-void nmi_cpu_eframe_save(nasid_t nasid, int slice)
+static void nmi_cpu_eframe_save(nasid_t nasid, int slice)
 {
        struct reg_struct *nr;
        int             i;
@@ -129,7 +122,7 @@ void nmi_cpu_eframe_save(nasid_t nasid, int slice)
        pr_emerg("\n");
 }
 
-void nmi_dump_hub_irq(nasid_t nasid, int slice)
+static void nmi_dump_hub_irq(nasid_t nasid, int slice)
 {
        u64 mask0, mask1, pend0, pend1;
 
@@ -153,7 +146,7 @@ void nmi_dump_hub_irq(nasid_t nasid, int slice)
  * Copy the cpu registers which have been saved in the IP27prom format
  * into the eframe format for the node under consideration.
  */
-void nmi_node_eframe_save(nasid_t nasid)
+static void nmi_node_eframe_save(nasid_t nasid)
 {
        int slice;
 
@@ -170,8 +163,7 @@ void nmi_node_eframe_save(nasid_t nasid)
 /*
  * Save the nmi cpu registers for all cpus in the system.
  */
-void
-nmi_eframes_save(void)
+static void nmi_eframes_save(void)
 {
        nasid_t nasid;
 
@@ -179,8 +171,7 @@ nmi_eframes_save(void)
                nmi_node_eframe_save(nasid);
 }
 
-void
-cont_nmi_dump(void)
+static void nmi_dump(void)
 {
 #ifndef REAL_NMI_SIGNAL
        static atomic_t nmied_cpus = ATOMIC_INIT(0);
index b91f8c4fdc786011172f8111e7e0dfc3e04705e1..7c6dcf6e73f701c68595bd3b26677ff8d667b56a 100644 (file)
@@ -3,6 +3,7 @@
 #include <linux/io.h>
 
 #include <asm/sn/ioc3.h>
+#include <asm/setup.h>
 
 static inline struct ioc3_uartregs *console_uart(void)
 {
index 75a34684e7045977a89faa54b1ec740eb13af5ff..e8547636a7482a4a4c08738bccf7f246b8061d26 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/percpu.h>
 #include <linux/memblock.h>
 
+#include <asm/bootinfo.h>
 #include <asm/smp-ops.h>
 #include <asm/sgialib.h>
 #include <asm/time.h>
index a8e0c776ca6c628faa0b0ef4828de3fb4e9f51a2..b8a0e4cfa9ce882dcba3c0dc4e911716d47a457b 100644 (file)
@@ -18,6 +18,8 @@
 #include <asm/ip32/crime.h>
 #include <asm/ip32/mace.h>
 
+#include "ip32-common.h"
+
 struct sgi_crime __iomem *crime;
 struct sgi_mace __iomem *mace;
 
@@ -39,7 +41,7 @@ void __init crime_init(void)
               id, rev, field, (unsigned long) CRIME_BASE);
 }
 
-irqreturn_t crime_memerr_intr(unsigned int irq, void *dev_id)
+irqreturn_t crime_memerr_intr(int irq, void *dev_id)
 {
        unsigned long stat, addr;
        int fatal = 0;
@@ -90,7 +92,7 @@ irqreturn_t crime_memerr_intr(unsigned int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-irqreturn_t crime_cpuerr_intr(unsigned int irq, void *dev_id)
+irqreturn_t crime_cpuerr_intr(int irq, void *dev_id)
 {
        unsigned long stat = crime->cpu_error_stat & CRIME_CPU_ERROR_MASK;
        unsigned long addr = crime->cpu_error_addr & CRIME_CPU_ERROR_ADDR_MASK;
index 478b63b4c808f35456bb0b4ba69de4450edb7404..7cbc27941f928399c3cd5166741f5495c55b7eaa 100644 (file)
@@ -18,6 +18,8 @@
 #include <asm/ptrace.h>
 #include <asm/tlbdebug.h>
 
+#include "ip32-common.h"
+
 static int ip32_be_handler(struct pt_regs *regs, int is_fixup)
 {
        int data = regs->cp0_cause & 4;
diff --git a/arch/mips/sgi-ip32/ip32-common.h b/arch/mips/sgi-ip32/ip32-common.h
new file mode 100644 (file)
index 0000000..cfc0225
--- /dev/null
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __IP32_COMMON_H
+#define __IP32_COMMON_H
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+
+void __init crime_init(void);
+irqreturn_t crime_memerr_intr(int irq, void *dev_id);
+irqreturn_t crime_cpuerr_intr(int irq, void *dev_id);
+void __init ip32_be_init(void);
+void ip32_prepare_poweroff(void);
+
+#endif /* __IP32_COMMON_H */
index e21ea1de05e31953ce51f04122512cd27b2d9c46..29d04468a06b8f5c4004a25a18ad94dcb08013f9 100644 (file)
@@ -28,6 +28,8 @@
 #include <asm/ip32/mace.h>
 #include <asm/ip32/ip32_ints.h>
 
+#include "ip32-common.h"
+
 /* issue a PIO read to make sure no PIO writes are pending */
 static inline void flush_crime_bus(void)
 {
@@ -107,10 +109,6 @@ static inline void flush_mace_bus(void)
  * is quite different anyway.
  */
 
-/* Some initial interrupts to set up */
-extern irqreturn_t crime_memerr_intr(int irq, void *dev_id);
-extern irqreturn_t crime_cpuerr_intr(int irq, void *dev_id);
-
 /*
  * This is for pure CRIME interrupts - ie not MACE.  The advantage?
  * We get to split the register in half and do faster lookups.
index 3fc8d0a0bdfa45cc8b3aead0bd31144a874e17bb..5fee33744f674bdbdd777ba63b7d15f92d661a99 100644 (file)
@@ -15,6 +15,7 @@
 #include <asm/ip32/crime.h>
 #include <asm/bootinfo.h>
 #include <asm/page.h>
+#include <asm/sgialib.h>
 
 extern void crime_init(void);
 
index 18d1c115cd534a2d78a1ee5f8b53681e46fc021f..6bdc1421cda46cad28b5b253bf53703005ed09bf 100644 (file)
@@ -29,6 +29,8 @@
 #include <asm/ip32/crime.h>
 #include <asm/ip32/ip32_ints.h>
 
+#include "ip32-common.h"
+
 #define POWERDOWN_TIMEOUT      120
 /*
  * Blink frequency during reboot grace period and when panicked.
index 8019dae1721a811cef26fb75430a2b3ca151d6dd..aeb0805aae57bacfef7b95877042a6dc476a14a5 100644 (file)
@@ -26,8 +26,7 @@
 #include <asm/ip32/mace.h>
 #include <asm/ip32/ip32_ints.h>
 
-extern void ip32_be_init(void);
-extern void crime_init(void);
+#include "ip32-common.h"
 
 #ifdef CONFIG_SGI_O2MACE_ETH
 /*
index 5ae30b78d38d3bd5e25cadad8169b99364c45a36..d9249f5a632e0c4a921040df00590ca65c1a0434 100644 (file)
@@ -348,7 +348,7 @@ static void final_fixup(struct pci_dev *dev)
        unsigned char bist;
        int ret;
 
-       /* Do build-in self test */
+       /* Do built-in self test */
        ret = pci_read_config_byte(dev, PCI_BIST, &bist);
        if ((ret != PCIBIOS_SUCCESSFUL) || !(bist & PCI_BIST_CAPABLE))
                return;
index 348cea0977927a523022217bc3637b498a8e4185..81484a776b333a2d9c9b402461f296b3b091e219 100644 (file)
@@ -38,6 +38,7 @@ void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
 #define flush_icache_pages flush_icache_pages
 
 #define flush_cache_vmap(start, end)           flush_dcache_range(start, end)
+#define flush_cache_vmap_early(start, end)     do { } while (0)
 #define flush_cache_vunmap(start, end)         flush_dcache_range(start, end)
 
 extern void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
index d14ccc948a29b920854b6c750febffac625619fd..5c845e8d59d92f8cd3594fccf1476503d8957149 100644 (file)
@@ -25,7 +25,6 @@ config PARISC
        select RTC_DRV_GENERIC
        select INIT_ALL_POSSIBLE
        select BUG
-       select BUILDTIME_TABLE_SORT
        select HAVE_KERNEL_UNCOMPRESSED
        select HAVE_PCI
        select HAVE_PERF_EVENTS
index 920db57b6b4cc866018c05dd00ca49142c7f949c..7486b3b3059491490f5626224f9177eb5efd6cdb 100644 (file)
@@ -50,12 +50,12 @@ export CROSS32CC
 
 # Set default cross compiler for kernel build
 ifdef cross_compiling
-       ifeq ($(CROSS_COMPILE),)
+        ifeq ($(CROSS_COMPILE),)
                CC_SUFFIXES = linux linux-gnu unknown-linux-gnu suse-linux
                CROSS_COMPILE := $(call cc-cross-prefix, \
                        $(foreach a,$(CC_ARCHES), \
                        $(foreach s,$(CC_SUFFIXES),$(a)-$(s)-)))
-       endif
+        endif
 endif
 
 ifdef CONFIG_DYNAMIC_FTRACE
index 74d17d7e759da9dfa89aa1a504b94de4554db16d..5937d5edaba1eac5a0c4e4b055c3e77fcbe3bf62 100644 (file)
        .section __ex_table,"aw"                        !       \
        .align 4                                        !       \
        .word (fault_addr - .), (except_addr - .)       !       \
+       or %r0,%r0,%r0                                  !       \
        .previous
 
 
index b4006f2a97052da67eaf3d5dcbac95197dc0fd69..ba4c05bc24d6901124deb89152c82a62aa3f5e4d 100644 (file)
@@ -41,6 +41,7 @@ void flush_kernel_vmap_range(void *vaddr, int size);
 void invalidate_kernel_vmap_range(void *vaddr, int size);
 
 #define flush_cache_vmap(start, end)           flush_cache_all()
+#define flush_cache_vmap_early(start, end)     do { } while (0)
 #define flush_cache_vunmap(start, end)         flush_cache_all()
 
 void flush_dcache_folio(struct folio *folio);
diff --git a/arch/parisc/include/asm/extable.h b/arch/parisc/include/asm/extable.h
new file mode 100644 (file)
index 0000000..4ea23e3
--- /dev/null
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PARISC_EXTABLE_H
+#define __PARISC_EXTABLE_H
+
+#include <asm/ptrace.h>
+#include <linux/compiler.h>
+
+/*
+ * The exception table consists of three addresses:
+ *
+ * - A relative address to the instruction that is allowed to fault.
+ * - A relative address at which the program should continue (fixup routine)
+ * - An asm statement which specifies which CPU register will
+ *   receive -EFAULT when an exception happens if the lowest bit in
+ *   the fixup address is set.
+ *
+ * Note: The register specified in the err_opcode instruction will be
+ * modified at runtime if a fault happens. Register %r0 will be ignored.
+ *
+ * Since relative addresses are used, 32bit values are sufficient even on
+ * 64bit kernel.
+ */
+
+struct pt_regs;
+int fixup_exception(struct pt_regs *regs);
+
+#define ARCH_HAS_RELATIVE_EXTABLE
+struct exception_table_entry {
+       int insn;       /* relative address of insn that is allowed to fault. */
+       int fixup;      /* relative address of fixup routine */
+       int err_opcode; /* sample opcode with register which holds error code */
+};
+
+#define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr, opcode )\
+       ".section __ex_table,\"aw\"\n"                     \
+       ".align 4\n"                                       \
+       ".word (" #fault_addr " - .), (" #except_addr " - .)\n" \
+       opcode "\n"                                        \
+       ".previous\n"
+
+/*
+ * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry
+ * (with lowest bit set) for which the fault handler in fixup_exception() will
+ * load -EFAULT on fault into the register specified by the err_opcode instruction,
+ * and zeroes the target register in case of a read fault in get_user().
+ */
+#define ASM_EXCEPTIONTABLE_VAR(__err_var)              \
+       int __err_var = 0
+#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr, register )\
+       ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1, "or %%r0,%%r0," register)
+
+static inline void swap_ex_entry_fixup(struct exception_table_entry *a,
+                                      struct exception_table_entry *b,
+                                      struct exception_table_entry tmp,
+                                      int delta)
+{
+       a->fixup = b->fixup + delta;
+       b->fixup = tmp.fixup - delta;
+       a->err_opcode = b->err_opcode;
+       b->err_opcode = tmp.err_opcode;
+}
+#define swap_ex_entry_fixup swap_ex_entry_fixup
+
+#endif
index 94428798b6aa63e8d4b0878cc7555826cf080e47..317ebc5edc9fe99950f4efe55d989db453f46d0d 100644 (file)
@@ -12,7 +12,7 @@
 
 static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
-       asm_volatile_goto("1:\n\t"
+       asm goto("1:\n\t"
                 "nop\n\t"
                 ".pushsection __jump_table,  \"aw\"\n\t"
                 ".align %1\n\t"
@@ -29,7 +29,7 @@ l_yes:
 
 static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
 {
-       asm_volatile_goto("1:\n\t"
+       asm goto("1:\n\t"
                 "b,n %l[l_yes]\n\t"
                 ".pushsection __jump_table,  \"aw\"\n\t"
                 ".align %1\n\t"
index c822bd0c0e3c6ccb86b4190d15500589c70f353a..51f40eaf7780659263f37b7c10fa7bd4ecf4ced7 100644 (file)
@@ -8,7 +8,8 @@
                "copy %%r0,%0\n"                        \
                "8:\tlpa %%r0(%1),%0\n"                 \
                "9:\n"                                  \
-               ASM_EXCEPTIONTABLE_ENTRY(8b, 9b)        \
+               ASM_EXCEPTIONTABLE_ENTRY(8b, 9b,        \
+                               "or %%r0,%%r0,%%r0")    \
                : "=&r" (pa)                            \
                : "r" (va)                              \
                : "memory"                              \
@@ -22,7 +23,8 @@
                "copy %%r0,%0\n"                        \
                "8:\tlpa %%r0(%%sr3,%1),%0\n"           \
                "9:\n"                                  \
-               ASM_EXCEPTIONTABLE_ENTRY(8b, 9b)        \
+               ASM_EXCEPTIONTABLE_ENTRY(8b, 9b,        \
+                               "or %%r0,%%r0,%%r0")    \
                : "=&r" (pa)                            \
                : "r" (va)                              \
                : "memory"                              \
index 4165079898d9e7af239a31a1bc77821e6081706a..88d0ae5769dde54e29176e286da359eb6a54e7bf 100644 (file)
@@ -7,6 +7,7 @@
  */
 #include <asm/page.h>
 #include <asm/cache.h>
+#include <asm/extable.h>
 
 #include <linux/bug.h>
 #include <linux/string.h>
 #define STD_USER(sr, x, ptr)   __put_user_asm(sr, "std", x, ptr)
 #endif
 
-/*
- * The exception table contains two values: the first is the relative offset to
- * the address of the instruction that is allowed to fault, and the second is
- * the relative offset to the address of the fixup routine. Since relative
- * addresses are used, 32bit values are sufficient even on 64bit kernel.
- */
-
-#define ARCH_HAS_RELATIVE_EXTABLE
-struct exception_table_entry {
-       int insn;       /* relative address of insn that is allowed to fault. */
-       int fixup;      /* relative address of fixup routine */
-};
-
-#define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
-       ".section __ex_table,\"aw\"\n"                     \
-       ".align 4\n"                                       \
-       ".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \
-       ".previous\n"
-
-/*
- * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry
- * (with lowest bit set) for which the fault handler in fixup_exception() will
- * load -EFAULT into %r29 for a read or write fault, and zeroes the target
- * register in case of a read fault in get_user().
- */
-#define ASM_EXCEPTIONTABLE_REG 29
-#define ASM_EXCEPTIONTABLE_VAR(__variable)             \
-       register long __variable __asm__ ("r29") = 0
-#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\
-       ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1)
-
 #define __get_user_internal(sr, val, ptr)              \
 ({                                                     \
        ASM_EXCEPTIONTABLE_VAR(__gu_err);               \
@@ -83,7 +53,7 @@ struct exception_table_entry {
                                                        \
        __asm__("1: " ldx " 0(%%sr%2,%3),%0\n"          \
                "9:\n"                                  \
-               ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
+               ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b, "%1")   \
                : "=r"(__gu_val), "+r"(__gu_err)        \
                : "i"(sr), "r"(ptr));                   \
                                                        \
@@ -115,8 +85,8 @@ struct exception_table_entry {
                "1: ldw 0(%%sr%2,%3),%0\n"              \
                "2: ldw 4(%%sr%2,%3),%R0\n"             \
                "9:\n"                                  \
-               ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
-               ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
+               ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b, "%1")   \
+               ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b, "%1")   \
                : "=&r"(__gu_tmp.l), "+r"(__gu_err)     \
                : "i"(sr), "r"(ptr));                   \
                                                        \
@@ -174,7 +144,7 @@ struct exception_table_entry {
        __asm__ __volatile__ (                                  \
                "1: " stx " %1,0(%%sr%2,%3)\n"                  \
                "9:\n"                                          \
-               ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b)         \
+               ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b, "%0")   \
                : "+r"(__pu_err)                                \
                : "r"(x), "i"(sr), "r"(ptr))
 
@@ -186,15 +156,14 @@ struct exception_table_entry {
                "1: stw %1,0(%%sr%2,%3)\n"                      \
                "2: stw %R1,4(%%sr%2,%3)\n"                     \
                "9:\n"                                          \
-               ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b)         \
-               ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b)         \
+               ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b, "%0")   \
+               ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b, "%0")   \
                : "+r"(__pu_err)                                \
                : "r"(__val), "i"(sr), "r"(ptr));               \
 } while (0)
 
 #endif /* !defined(CONFIG_64BIT) */
 
-
 /*
  * Complex access routines -- external declarations
  */
@@ -216,7 +185,4 @@ unsigned long __must_check raw_copy_from_user(void *dst, const void __user *src,
 #define INLINE_COPY_TO_USER
 #define INLINE_COPY_FROM_USER
 
-struct pt_regs;
-int fixup_exception(struct pt_regs *regs);
-
 #endif /* __PARISC_UACCESS_H */
index 268d90a9325b468603b634b86b48980a31b4fba7..422f3e1e6d9cad718c264c7d7c9bd30872846555 100644 (file)
@@ -58,7 +58,7 @@ int pa_serialize_tlb_flushes __ro_after_init;
 
 struct pdc_cache_info cache_info __ro_after_init;
 #ifndef CONFIG_PA20
-struct pdc_btlb_info btlb_info __ro_after_init;
+struct pdc_btlb_info btlb_info;
 #endif
 
 DEFINE_STATIC_KEY_TRUE(parisc_has_cache);
@@ -264,6 +264,10 @@ parisc_cache_init(void)
        icache_stride = CAFL_STRIDE(cache_info.ic_conf);
 #undef CAFL_STRIDE
 
+       /* stride needs to be non-zero, otherwise cache flushes will not work */
+       WARN_ON(cache_info.dc_size && dcache_stride == 0);
+       WARN_ON(cache_info.ic_size && icache_stride == 0);
+
        if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
                                                PDC_MODEL_NVA_UNSUPPORTED) {
                printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
@@ -850,7 +854,7 @@ SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
 #endif
                        "   fic,m       %3(%4,%0)\n"
                        "2: sync\n"
-                       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b)
+                       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b, "%1")
                        : "+r" (start), "+r" (error)
                        : "r" (end), "r" (dcache_stride), "i" (SR_USER));
        }
@@ -865,7 +869,7 @@ SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
 #endif
                        "   fdc,m       %3(%4,%0)\n"
                        "2: sync\n"
-                       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b)
+                       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b, "%1")
                        : "+r" (start), "+r" (error)
                        : "r" (end), "r" (icache_stride), "i" (SR_USER));
        }
index 25f9b9e9d6dfbc70f21787e29a170334cb102dc7..c7ff339732ba5a762eac90e1b3072aef45c58318 100644 (file)
@@ -742,7 +742,7 @@ parse_tree_node(struct device *parent, int index, struct hardware_path *modpath)
        };
 
        if (device_for_each_child(parent, &recurse_data, descend_children))
-               { /* nothing */ };
+               { /* nothing */ }
 
        return d.dev;
 }
@@ -1004,6 +1004,9 @@ static __init int qemu_print_iodc_data(struct device *lin_dev, void *data)
 
        pr_info("\n");
 
+       /* Prevent hung task messages when printing on serial console */
+       cond_resched();
+
        pr_info("#define HPA_%08lx_DESCRIPTION \"%s\"\n",
                hpa, parisc_hardware_description(&dev->id));
 
index 904ca3b9e7a7150a1209763c9e4ddfee90281c01..c69f6d5946e9063f0f3b802ea3424b74f79caa35 100644 (file)
@@ -123,10 +123,10 @@ static unsigned long f_extend(unsigned long address)
 #ifdef CONFIG_64BIT
        if(unlikely(parisc_narrow_firmware)) {
                if((address & 0xff000000) == 0xf0000000)
-                       return 0xf0f0f0f000000000UL | (u32)address;
+                       return (0xfffffff0UL << 32) | (u32)address;
 
                if((address & 0xf0000000) == 0xf0000000)
-                       return 0xffffffff00000000UL | (u32)address;
+                       return (0xffffffffUL << 32) | (u32)address;
        }
 #endif
        return address;
index ce25acfe4889d0df8048e448a16d76e414ee1262..c520e551a165258609cba5e068037493bd7e57a8 100644 (file)
@@ -120,8 +120,8 @@ static int emulate_ldh(struct pt_regs *regs, int toreg)
 "2:    ldbs    1(%%sr1,%3), %0\n"
 "      depw    %2, 23, 24, %0\n"
 "3:    \n"
-       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b)
-       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b)
+       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%1")
+       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%1")
        : "+r" (val), "+r" (ret), "=&r" (temp1)
        : "r" (saddr), "r" (regs->isr) );
 
@@ -152,8 +152,8 @@ static int emulate_ldw(struct pt_regs *regs, int toreg, int flop)
 "      mtctl   %2,11\n"
 "      vshd    %0,%3,%0\n"
 "3:    \n"
-       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b)
-       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b)
+       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%1")
+       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%1")
        : "+r" (val), "+r" (ret), "=&r" (temp1), "=&r" (temp2)
        : "r" (saddr), "r" (regs->isr) );
 
@@ -189,8 +189,8 @@ static int emulate_ldd(struct pt_regs *regs, int toreg, int flop)
 "      mtsar   %%r19\n"
 "      shrpd   %0,%%r20,%%sar,%0\n"
 "3:    \n"
-       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b)
-       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b)
+       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%1")
+       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%1")
        : "=r" (val), "+r" (ret)
        : "0" (val), "r" (saddr), "r" (regs->isr)
        : "r19", "r20" );
@@ -209,9 +209,9 @@ static int emulate_ldd(struct pt_regs *regs, int toreg, int flop)
 "      vshd    %0,%R0,%0\n"
 "      vshd    %R0,%4,%R0\n"
 "4:    \n"
-       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 4b)
-       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 4b)
-       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 4b)
+       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 4b, "%1")
+       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 4b, "%1")
+       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 4b, "%1")
        : "+r" (val), "+r" (ret), "+r" (saddr), "=&r" (shift), "=&r" (temp1)
        : "r" (regs->isr) );
     }
@@ -244,8 +244,8 @@ static int emulate_sth(struct pt_regs *regs, int frreg)
 "1:    stb %1, 0(%%sr1, %3)\n"
 "2:    stb %2, 1(%%sr1, %3)\n"
 "3:    \n"
-       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b)
-       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b)
+       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%0")
+       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%0")
        : "+r" (ret), "=&r" (temp1)
        : "r" (val), "r" (regs->ior), "r" (regs->isr) );
 
@@ -285,8 +285,8 @@ static int emulate_stw(struct pt_regs *regs, int frreg, int flop)
 "      stw     %%r20,0(%%sr1,%2)\n"
 "      stw     %%r21,4(%%sr1,%2)\n"
 "3:    \n"
-       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b)
-       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b)
+       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%0")
+       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%0")
        : "+r" (ret)
        : "r" (val), "r" (regs->ior), "r" (regs->isr)
        : "r19", "r20", "r21", "r22", "r1" );
@@ -329,10 +329,10 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop)
 "3:    std     %%r20,0(%%sr1,%2)\n"
 "4:    std     %%r21,8(%%sr1,%2)\n"
 "5:    \n"
-       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 5b)
-       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 5b)
-       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 5b)
-       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(4b, 5b)
+       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 5b, "%0")
+       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 5b, "%0")
+       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 5b, "%0")
+       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(4b, 5b, "%0")
        : "+r" (ret)
        : "r" (val), "r" (regs->ior), "r" (regs->isr)
        : "r19", "r20", "r21", "r22", "r1" );
@@ -357,11 +357,11 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop)
 "4:    stw     %%r1,4(%%sr1,%2)\n"
 "5:    stw     %R1,8(%%sr1,%2)\n"
 "6:    \n"
-       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 6b)
-       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 6b)
-       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 6b)
-       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(4b, 6b)
-       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(5b, 6b)
+       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 6b, "%0")
+       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 6b, "%0")
+       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 6b, "%0")
+       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(4b, 6b, "%0")
+       ASM_EXCEPTIONTABLE_ENTRY_EFAULT(5b, 6b, "%0")
        : "+r" (ret)
        : "r" (val), "r" (regs->ior), "r" (regs->isr)
        : "r19", "r20", "r21", "r1" );
index 548051b0b4aff692741847a04b09208d1e68d279..b445e47903cfd0b813035c2056f11a4f818cf6d2 100644 (file)
@@ -127,7 +127,7 @@ SECTIONS
        }
 #endif
 
-       RO_DATA(8)
+       RO_DATA(PAGE_SIZE)
 
        /* unwind info */
        . = ALIGN(4);
index 2fe5b44986e0924e3981ebc1edb9d074c08e6fda..c39de84e98b05172bdec0f474261ccde4a06cf00 100644 (file)
@@ -150,11 +150,16 @@ int fixup_exception(struct pt_regs *regs)
                 * Fix up get_user() and put_user().
                 * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() sets the least-significant
                 * bit in the relative address of the fixup routine to indicate
-                * that gr[ASM_EXCEPTIONTABLE_REG] should be loaded with
-                * -EFAULT to report a userspace access error.
+                * that the register encoded in the "or %r0,%r0,register"
+                * opcode should be loaded with -EFAULT to report a userspace
+                * access error.
                 */
                if (fix->fixup & 1) {
-                       regs->gr[ASM_EXCEPTIONTABLE_REG] = -EFAULT;
+                       int fault_error_reg = fix->err_opcode & 0x1f;
+                       if (!WARN_ON(!fault_error_reg))
+                               regs->gr[fault_error_reg] = -EFAULT;
+                       pr_debug("Unalignment fixup of register %d at %pS\n",
+                               fault_error_reg, (void*)regs->iaoq[0]);
 
                        /* zero target register for get_user() */
                        if (parisc_acctyp(0, regs->iir) == VM_READ) {
index 414b978b8010b0cbac4511b606ee16c1a5236cc8..b9fc064d38d281f1c32584e79edd705c670b1731 100644 (file)
@@ -859,6 +859,7 @@ config THREAD_SHIFT
        int "Thread shift" if EXPERT
        range 13 15
        default "15" if PPC_256K_PAGES
+       default "15" if PPC_PSERIES || PPC_POWERNV
        default "14" if PPC64
        default "13"
        help
index ccb2034506f0f1e9971cd897b6860ba6a18330b8..d841a97010a0f8dc5bd7b9ac801b35003cc34931 100644 (file)
@@ -21,8 +21,8 @@
  * Vio firmware always attempts to fetch MAX_VIO_GET_CHARS chars.  The 'count'
  * parm is included to conform to put_chars() function pointer template
  */
-extern int hvc_get_chars(uint32_t vtermno, char *buf, int count);
-extern int hvc_put_chars(uint32_t vtermno, const char *buf, int count);
+extern ssize_t hvc_get_chars(uint32_t vtermno, u8 *buf, size_t count);
+extern ssize_t hvc_put_chars(uint32_t vtermno, const u8 *buf, size_t count);
 
 /* Provided by HVC VIO */
 void hvc_vio_init_early(void);
index 464a7519ed6443ef40f9eda29ce738c98d0b6138..9058edcb632b49043f0899136f210ec18e3fb59d 100644 (file)
@@ -64,7 +64,7 @@ struct hvsi_priv {
        unsigned int    inbuf_len;      /* data in input buffer */
        unsigned char   inbuf[HVSI_INBUF_SIZE];
        unsigned int    inbuf_cur;      /* Cursor in input buffer */
-       unsigned int    inbuf_pktlen;   /* packet length from cursor */
+       size_t          inbuf_pktlen;   /* packet length from cursor */
        atomic_t        seqno;          /* packet sequence number */
        unsigned int    opened:1;       /* driver opened */
        unsigned int    established:1;  /* protocol established */
@@ -72,24 +72,26 @@ struct hvsi_priv {
        unsigned int    mctrl_update:1; /* modem control updated */
        unsigned short  mctrl;          /* modem control */
        struct tty_struct *tty;         /* tty structure */
-       int (*get_chars)(uint32_t termno, char *buf, int count);
-       int (*put_chars)(uint32_t termno, const char *buf, int count);
+       ssize_t (*get_chars)(uint32_t termno, u8 *buf, size_t count);
+       ssize_t (*put_chars)(uint32_t termno, const u8 *buf, size_t count);
        uint32_t        termno;
 };
 
 /* hvsi lib functions */
 struct hvc_struct;
 extern void hvsilib_init(struct hvsi_priv *pv,
-                        int (*get_chars)(uint32_t termno, char *buf, int count),
-                        int (*put_chars)(uint32_t termno, const char *buf,
-                                         int count),
+                        ssize_t (*get_chars)(uint32_t termno, u8 *buf,
+                                             size_t count),
+                        ssize_t (*put_chars)(uint32_t termno, const u8 *buf,
+                                             size_t count),
                         int termno, int is_console);
 extern int hvsilib_open(struct hvsi_priv *pv, struct hvc_struct *hp);
 extern void hvsilib_close(struct hvsi_priv *pv, struct hvc_struct *hp);
 extern int hvsilib_read_mctrl(struct hvsi_priv *pv);
 extern int hvsilib_write_mctrl(struct hvsi_priv *pv, int dtr);
 extern void hvsilib_establish(struct hvsi_priv *pv);
-extern int hvsilib_get_chars(struct hvsi_priv *pv, char *buf, int count);
-extern int hvsilib_put_chars(struct hvsi_priv *pv, const char *buf, int count);
+extern ssize_t hvsilib_get_chars(struct hvsi_priv *pv, u8 *buf, size_t count);
+extern ssize_t hvsilib_put_chars(struct hvsi_priv *pv, const u8 *buf,
+                                size_t count);
 
 #endif /* _HVSI_H */
index 93ce3ec253877d38da5e3f9c3ac76205354d3496..2f2a86ed2280aac66df0535d7938cf4a673446f7 100644 (file)
@@ -17,7 +17,7 @@
 
 static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
-       asm_volatile_goto("1:\n\t"
+       asm goto("1:\n\t"
                 "nop # arch_static_branch\n\t"
                 ".pushsection __jump_table,  \"aw\"\n\t"
                 ".long 1b - ., %l[l_yes] - .\n\t"
@@ -32,7 +32,7 @@ l_yes:
 
 static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
 {
-       asm_volatile_goto("1:\n\t"
+       asm goto("1:\n\t"
                 "b %l[l_yes] # arch_static_branch_jump\n\t"
                 ".pushsection __jump_table,  \"aw\"\n\t"
                 ".long 1b - ., %l[l_yes] - .\n\t"
index 8799b37be295cf5ea33918f93d6b554cedcb94ca..8abac532146e7b2744e4dd70bfecd373946a39f4 100644 (file)
@@ -63,8 +63,6 @@
 
 #include <linux/mmu_notifier.h>
 
-#define KVM_ARCH_WANT_MMU_NOTIFIER
-
 #define HPTEG_CACHE_NUM                        (1 << 15)
 #define HPTEG_HASH_BITS_PTE            13
 #define HPTEG_HASH_BITS_PTE_LONG       12
index b66b0c615f4f1964d52f9a16ff1f84ebdcc9b2ea..af304e6cb486c5cf76ed479c4843f3cf35c1b44b 100644 (file)
@@ -313,9 +313,11 @@ extern int early_init_dt_scan_recoverable_ranges(unsigned long node,
                                 const char *uname, int depth, void *data);
 void __init opal_configure_cores(void);
 
-extern int opal_get_chars(uint32_t vtermno, char *buf, int count);
-extern int opal_put_chars(uint32_t vtermno, const char *buf, int total_len);
-extern int opal_put_chars_atomic(uint32_t vtermno, const char *buf, int total_len);
+extern ssize_t opal_get_chars(uint32_t vtermno, u8 *buf, size_t count);
+extern ssize_t opal_put_chars(uint32_t vtermno, const u8 *buf,
+                             size_t total_len);
+extern ssize_t opal_put_chars_atomic(uint32_t vtermno, const u8 *buf,
+                                    size_t total_len);
 extern int opal_flush_chars(uint32_t vtermno, bool wait);
 extern int opal_flush_console(uint32_t vtermno);
 
index f1f9890f50d3ef84dfd62b5d66db68315f0698b6..de10437fd20652ee63a6d214638bded13cdbc6c3 100644 (file)
@@ -74,7 +74,7 @@ __pu_failed:                                                  \
 /* -mprefixed can generate offsets beyond range, fall back hack */
 #ifdef CONFIG_PPC_KERNEL_PREFIXED
 #define __put_user_asm_goto(x, addr, label, op)                        \
-       asm_volatile_goto(                                      \
+       asm goto(                                       \
                "1:     " op " %0,0(%1) # put_user\n"           \
                EX_TABLE(1b, %l2)                               \
                :                                               \
@@ -83,7 +83,7 @@ __pu_failed:                                                  \
                : label)
 #else
 #define __put_user_asm_goto(x, addr, label, op)                        \
-       asm_volatile_goto(                                      \
+       asm goto(                                       \
                "1:     " op "%U1%X1 %0,%1      # put_user\n"   \
                EX_TABLE(1b, %l2)                               \
                :                                               \
@@ -97,7 +97,7 @@ __pu_failed:                                                  \
        __put_user_asm_goto(x, ptr, label, "std")
 #else /* __powerpc64__ */
 #define __put_user_asm2_goto(x, addr, label)                   \
-       asm_volatile_goto(                                      \
+       asm goto(                                       \
                "1:     stw%X1 %0, %1\n"                        \
                "2:     stw%X1 %L0, %L1\n"                      \
                EX_TABLE(1b, %l2)                               \
@@ -146,7 +146,7 @@ do {                                                                \
 /* -mprefixed can generate offsets beyond range, fall back hack */
 #ifdef CONFIG_PPC_KERNEL_PREFIXED
 #define __get_user_asm_goto(x, addr, label, op)                        \
-       asm_volatile_goto(                                      \
+       asm_goto_output(                                        \
                "1:     "op" %0,0(%1)   # get_user\n"           \
                EX_TABLE(1b, %l2)                               \
                : "=r" (x)                                      \
@@ -155,7 +155,7 @@ do {                                                                \
                : label)
 #else
 #define __get_user_asm_goto(x, addr, label, op)                        \
-       asm_volatile_goto(                                      \
+       asm_goto_output(                                        \
                "1:     "op"%U1%X1 %0, %1       # get_user\n"   \
                EX_TABLE(1b, %l2)                               \
                : "=r" (x)                                      \
@@ -169,7 +169,7 @@ do {                                                                \
        __get_user_asm_goto(x, addr, label, "ld")
 #else /* __powerpc64__ */
 #define __get_user_asm2_goto(x, addr, label)                   \
-       asm_volatile_goto(                                      \
+       asm_goto_output(                                        \
                "1:     lwz%X1 %0, %1\n"                        \
                "2:     lwz%X1 %L0, %L1\n"                      \
                EX_TABLE(1b, %l2)                               \
index ebe259bdd46298e0654fb681b0cf8853c8381079..d71eac3b2887b98ff5ea23ec1874243283524f4c 100644 (file)
@@ -1287,20 +1287,20 @@ spapr_tce_platform_iommu_attach_dev(struct iommu_domain *platform_domain,
        struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
        struct iommu_group *grp = iommu_group_get(dev);
        struct iommu_table_group *table_group;
-       int ret = -EINVAL;
 
        /* At first attach the ownership is already set */
        if (!domain)
                return 0;
 
-       if (!grp)
-               return -ENODEV;
-
        table_group = iommu_group_get_iommudata(grp);
-       ret = table_group->ops->take_ownership(table_group);
+       /*
+        * The domain being set to PLATFORM from earlier
+        * BLOCKED. The table_group ownership has to be released.
+        */
+       table_group->ops->release_ownership(table_group);
        iommu_group_put(grp);
 
-       return ret;
+       return 0;
 }
 
 static const struct iommu_domain_ops spapr_tce_platform_domain_ops = {
@@ -1312,13 +1312,32 @@ static struct iommu_domain spapr_tce_platform_domain = {
        .ops = &spapr_tce_platform_domain_ops,
 };
 
-static struct iommu_domain spapr_tce_blocked_domain = {
-       .type = IOMMU_DOMAIN_BLOCKED,
+static int
+spapr_tce_blocked_iommu_attach_dev(struct iommu_domain *platform_domain,
+                                    struct device *dev)
+{
+       struct iommu_group *grp = iommu_group_get(dev);
+       struct iommu_table_group *table_group;
+       int ret = -EINVAL;
+
        /*
         * FIXME: SPAPR mixes blocked and platform behaviors, the blocked domain
         * also sets the dma_api ops
         */
-       .ops = &spapr_tce_platform_domain_ops,
+       table_group = iommu_group_get_iommudata(grp);
+       ret = table_group->ops->take_ownership(table_group);
+       iommu_group_put(grp);
+
+       return ret;
+}
+
+static const struct iommu_domain_ops spapr_tce_blocked_domain_ops = {
+       .attach_dev = spapr_tce_blocked_iommu_attach_dev,
+};
+
+static struct iommu_domain spapr_tce_blocked_domain = {
+       .type = IOMMU_DOMAIN_BLOCKED,
+       .ops = &spapr_tce_blocked_domain_ops,
 };
 
 static bool spapr_tce_iommu_capable(struct device *dev, enum iommu_cap cap)
index 938e66829eae65cc52d170f7753ee0685cdaa4e3..d5c48d1b0a31ea533281934e414320bbf77368d2 100644 (file)
@@ -230,7 +230,7 @@ again:
         * This allows interrupts to be unmasked without hard disabling, and
         * also without new hard interrupts coming in ahead of pending ones.
         */
-       asm_volatile_goto(
+       asm goto(
 "1:                                    \n"
 "              lbz     9,%0(13)        \n"
 "              cmpwi   9,0             \n"
index 902611954200df90c43e2b2516ce242006dba09c..074263429faf2e49b516fb1ef1ac3544b902febe 100644 (file)
@@ -19,13 +19,11 @@ if VIRTUALIZATION
 
 config KVM
        bool
-       select PREEMPT_NOTIFIERS
-       select HAVE_KVM_EVENTFD
+       select KVM_COMMON
        select HAVE_KVM_VCPU_ASYNC_IOCTL
        select KVM_VFIO
        select IRQ_BYPASS_MANAGER
        select HAVE_KVM_IRQ_BYPASS
-       select INTERVAL_TREE
 
 config KVM_BOOK3S_HANDLER
        bool
@@ -42,7 +40,7 @@ config KVM_BOOK3S_64_HANDLER
 config KVM_BOOK3S_PR_POSSIBLE
        bool
        select KVM_MMIO
-       select MMU_NOTIFIER
+       select KVM_GENERIC_MMU_NOTIFIER
 
 config KVM_BOOK3S_HV_POSSIBLE
        bool
@@ -85,7 +83,7 @@ config KVM_BOOK3S_64_HV
        tristate "KVM for POWER7 and later using hypervisor mode in host"
        depends on KVM_BOOK3S_64 && PPC_POWERNV
        select KVM_BOOK3S_HV_POSSIBLE
-       select MMU_NOTIFIER
+       select KVM_GENERIC_MMU_NOTIFIER
        select CMA
        help
          Support running unmodified book3s_64 guest kernels in
@@ -194,7 +192,7 @@ config KVM_E500V2
        depends on !CONTEXT_TRACKING_USER
        select KVM
        select KVM_MMIO
-       select MMU_NOTIFIER
+       select KVM_GENERIC_MMU_NOTIFIER
        help
          Support running unmodified E500 guest kernels in virtual machines on
          E500v2 host processors.
@@ -211,7 +209,7 @@ config KVM_E500MC
        select KVM
        select KVM_MMIO
        select KVM_BOOKE_HV
-       select MMU_NOTIFIER
+       select KVM_GENERIC_MMU_NOTIFIER
        help
          Support running unmodified E500MC/E5500/E6500 guest kernels in
          virtual machines on E500MC/E5500/E6500 host processors.
@@ -225,7 +223,6 @@ config KVM_MPIC
        bool "KVM in-kernel MPIC emulation"
        depends on KVM && PPC_E500
        select HAVE_KVM_IRQCHIP
-       select HAVE_KVM_IRQFD
        select HAVE_KVM_IRQ_ROUTING
        select HAVE_KVM_MSI
        help
@@ -238,7 +235,6 @@ config KVM_XICS
        bool "KVM in-kernel XICS emulation"
        depends on KVM_BOOK3S_64 && !KVM_MPIC
        select HAVE_KVM_IRQCHIP
-       select HAVE_KVM_IRQFD
        default y
        help
          Include support for the XICS (eXternal Interrupt Controller
index e48126a59ba7805d4696c03bf1ddbd46180245f9..52427fc2a33fa4ad7032bcc6323bc6364918d98f 100644 (file)
@@ -6240,7 +6240,7 @@ static int kvmhv_svm_off(struct kvm *kvm)
        }
 
        srcu_idx = srcu_read_lock(&kvm->srcu);
-       for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+       for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
                struct kvm_memory_slot *memslot;
                struct kvm_memslots *slots = __kvm_memslots(kvm, i);
                int bkt;
index f6af752698d0363e1432a3ab2f111ba325f417fc..23407fbd73c9346e05113db812c6897e2a33e7e7 100644 (file)
@@ -528,7 +528,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_ENABLE_CAP:
        case KVM_CAP_ONE_REG:
        case KVM_CAP_IOEVENTFD:
-       case KVM_CAP_DEVICE_CTRL:
        case KVM_CAP_IMMEDIATE_EXIT:
        case KVM_CAP_SET_GUEST_DEBUG:
                r = 1;
@@ -578,7 +577,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
                break;
 #endif
 
-#ifdef CONFIG_HAVE_KVM_IRQFD
+#ifdef CONFIG_HAVE_KVM_IRQCHIP
        case KVM_CAP_IRQFD_RESAMPLE:
                r = !xive_enabled();
                break;
@@ -632,13 +631,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
                break;
 #endif
        case KVM_CAP_SYNC_MMU:
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
-               r = hv_enabled;
-#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
+               BUILD_BUG_ON(!IS_ENABLED(CONFIG_KVM_GENERIC_MMU_NOTIFIER));
                r = 1;
-#else
-               r = 0;
-#endif
                break;
 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
        case KVM_CAP_PPC_HTAB_FD:
index cdf3838f08d3762406e3880b82b3f7da0255989f..45dd77e3ccf6f8dff9c074ce3e58788e73e7104e 100644 (file)
@@ -424,7 +424,7 @@ static int __init opal_message_init(struct device_node *opal_node)
        return 0;
 }
 
-int opal_get_chars(uint32_t vtermno, char *buf, int count)
+ssize_t opal_get_chars(uint32_t vtermno, u8 *buf, size_t count)
 {
        s64 rc;
        __be64 evt, len;
@@ -441,10 +441,11 @@ int opal_get_chars(uint32_t vtermno, char *buf, int count)
        return 0;
 }
 
-static int __opal_put_chars(uint32_t vtermno, const char *data, int total_len, bool atomic)
+static ssize_t __opal_put_chars(uint32_t vtermno, const u8 *data,
+                               size_t total_len, bool atomic)
 {
        unsigned long flags = 0 /* shut up gcc */;
-       int written;
+       ssize_t written;
        __be64 olen;
        s64 rc;
 
@@ -484,7 +485,7 @@ static int __opal_put_chars(uint32_t vtermno, const char *data, int total_len, b
                if (atomic) {
                        /* Should not happen */
                        pr_warn("atomic console write returned partial "
-                               "len=%d written=%d\n", total_len, written);
+                               "len=%zu written=%zd\n", total_len, written);
                }
                if (!written)
                        written = -EAGAIN;
@@ -497,7 +498,7 @@ out:
        return written;
 }
 
-int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
+ssize_t opal_put_chars(uint32_t vtermno, const u8 *data, size_t total_len)
 {
        return __opal_put_chars(vtermno, data, total_len, false);
 }
@@ -508,7 +509,8 @@ int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
  * true at the moment because console space can race with OPAL's console
  * writes.
  */
-int opal_put_chars_atomic(uint32_t vtermno, const char *data, int total_len)
+ssize_t opal_put_chars_atomic(uint32_t vtermno, const u8 *data,
+                             size_t total_len)
 {
        return __opal_put_chars(vtermno, data, total_len, true);
 }
index 1ac52963e08b47365415a93c09702354cb896d4c..8803c947998e1e20dea9e2cf2e7019330fa62ca9 100644 (file)
@@ -25,7 +25,7 @@
  *     firmware.
  * @count: not used?
  */
-int hvc_get_chars(uint32_t vtermno, char *buf, int count)
+ssize_t hvc_get_chars(uint32_t vtermno, u8 *buf, size_t count)
 {
        long ret;
        unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
@@ -52,7 +52,7 @@ EXPORT_SYMBOL(hvc_get_chars);
  *     firmware. Must be at least 16 bytes, even if count is less than 16.
  * @count: Send this number of characters.
  */
-int hvc_put_chars(uint32_t vtermno, const char *buf, int count)
+ssize_t hvc_put_chars(uint32_t vtermno, const u8 *buf, size_t count)
 {
        unsigned long *lbuf = (unsigned long *) buf;
        long ret;
index 3868483fbe292d5455a853cafd2da1df31eec7d4..ef7707ea0db7bc0b356470f0763d05d4e3c1d469 100644 (file)
@@ -54,7 +54,7 @@ static void quirk_fsl_pcie_early(struct pci_dev *dev)
 
        /* if we aren't in host mode don't bother */
        pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type);
-       if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE)
+       if ((hdr_type & PCI_HEADER_TYPE_MASK) != PCI_HEADER_TYPE_BRIDGE)
                return;
 
        dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
@@ -581,7 +581,7 @@ static int fsl_add_bridge(struct platform_device *pdev, int is_primary)
                hose->ops = &fsl_indirect_pcie_ops;
                /* For PCIE read HEADER_TYPE to identify controller mode */
                early_read_config_byte(hose, 0, 0, PCI_HEADER_TYPE, &hdr_type);
-               if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE)
+               if ((hdr_type & PCI_HEADER_TYPE_MASK) != PCI_HEADER_TYPE_BRIDGE)
                        goto no_bridge;
 
        } else {
index cd4c9a204d08c93453543021a01f5fa82b7168c3..bffbd869a0682842883591788da784648acf1626 100644 (file)
@@ -53,24 +53,28 @@ config RISCV
        select ARCH_USE_MEMTEST
        select ARCH_USE_QUEUED_RWLOCKS
        select ARCH_USES_CFI_TRAPS if CFI_CLANG
+       select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if SMP && MMU
        select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
        select ARCH_WANT_FRAME_POINTERS
        select ARCH_WANT_GENERAL_HUGETLB if !RISCV_ISA_SVNAPOT
        select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
        select ARCH_WANT_LD_ORPHAN_WARN if !XIP_KERNEL
        select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
+       select ARCH_WANTS_NO_INSTR
        select ARCH_WANTS_THP_SWAP if HAVE_ARCH_TRANSPARENT_HUGEPAGE
        select BINFMT_FLAT_NO_DATA_START_OFFSET if !MMU
        select BUILDTIME_TABLE_SORT if MMU
        select CLINT_TIMER if !MMU
        select CLONE_BACKWARDS
        select COMMON_CLK
-       select CPU_PM if CPU_IDLE || HIBERNATION
+       select CPU_PM if CPU_IDLE || HIBERNATION || SUSPEND
        select EDAC_SUPPORT
        select FRAME_POINTER if PERF_EVENTS || (FUNCTION_TRACER && !DYNAMIC_FTRACE)
+       select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY if DYNAMIC_FTRACE
        select GENERIC_ARCH_TOPOLOGY
        select GENERIC_ATOMIC64 if !64BIT
        select GENERIC_CLOCKEVENTS_BROADCAST if SMP
+       select GENERIC_CPU_DEVICES
        select GENERIC_EARLY_IOREMAP
        select GENERIC_ENTRY
        select GENERIC_GETTIMEOFDAY if HAVE_GENERIC_VDSO
@@ -113,6 +117,7 @@ config RISCV
        select HAVE_DEBUG_KMEMLEAK
        select HAVE_DMA_CONTIGUOUS if MMU
        select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && (CLANG_SUPPORTS_DYNAMIC_FTRACE || GCC_SUPPORTS_DYNAMIC_FTRACE)
+       select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
        select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
        select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
        select HAVE_FUNCTION_GRAPH_TRACER
@@ -140,6 +145,8 @@ config RISCV
        select HAVE_REGS_AND_STACK_ACCESS_API
        select HAVE_RETHOOK if !XIP_KERNEL
        select HAVE_RSEQ
+       select HAVE_SAMPLE_FTRACE_DIRECT
+       select HAVE_SAMPLE_FTRACE_DIRECT_MULTI
        select HAVE_STACKPROTECTOR
        select HAVE_SYSCALL_TRACEPOINTS
        select HOTPLUG_CORE_SYNC_DEAD if HOTPLUG_CPU
@@ -181,6 +188,20 @@ config HAVE_SHADOW_CALL_STACK
        # https://github.com/riscv-non-isa/riscv-elf-psabi-doc/commit/a484e843e6eeb51f0cb7b8819e50da6d2444d769
        depends on $(ld-option,--no-relax-gp)
 
+config RISCV_USE_LINKER_RELAXATION
+       def_bool y
+       # https://github.com/llvm/llvm-project/commit/6611d58f5bbcbec77262d392e2923e1d680f6985
+       depends on !LD_IS_LLD || LLD_VERSION >= 150000
+
+# https://github.com/llvm/llvm-project/commit/bbc0f99f3bc96f1db16f649fc21dd18e5b0918f6
+config ARCH_HAS_BROKEN_DWARF5
+       def_bool y
+       depends on RISCV_USE_LINKER_RELAXATION
+       # https://github.com/llvm/llvm-project/commit/1df5ea29b43690b6622db2cad7b745607ca4de6a
+       depends on AS_IS_LLVM && AS_VERSION < 180000
+       # https://github.com/llvm/llvm-project/commit/7ffabb61a5569444b5ac9322e22e5471cc5e4a77
+       depends on LD_IS_LLD && LLD_VERSION < 180000
+
 config ARCH_MMAP_RND_BITS_MIN
        default 18 if 64BIT
        default 8
@@ -414,7 +435,9 @@ config NUMA
        depends on SMP && MMU
        select ARCH_SUPPORTS_NUMA_BALANCING
        select GENERIC_ARCH_NUMA
+       select HAVE_SETUP_PER_CPU_AREA
        select NEED_PER_CPU_EMBED_FIRST_CHUNK
+       select NEED_PER_CPU_PAGE_FIRST_CHUNK
        select OF_NUMA
        select USE_PERCPU_NUMA_NODE_ID
        help
@@ -525,6 +548,28 @@ config RISCV_ISA_V_DEFAULT_ENABLE
 
          If you don't know what to do here, say Y.
 
+config RISCV_ISA_V_UCOPY_THRESHOLD
+       int "Threshold size for vectorized user copies"
+       depends on RISCV_ISA_V
+       default 768
+       help
+         Prefer using vectorized copy_to_user()/copy_from_user() when the
+         workload size exceeds this value.
+
+config RISCV_ISA_V_PREEMPTIVE
+       bool "Run kernel-mode Vector with kernel preemption"
+       depends on PREEMPTION
+       depends on RISCV_ISA_V
+       default y
+       help
+         Usually, in-kernel SIMD routines are run with preemption disabled.
+         Functions which envoke long running SIMD thus must yield core's
+         vector unit to prevent blocking other tasks for too long.
+
+         This config allows kernel to run SIMD without explicitly disable
+         preemption. Enabling this config will result in higher memory
+         consumption due to the allocation of per-task's kernel Vector context.
+
 config TOOLCHAIN_HAS_ZBB
        bool
        default y
@@ -651,6 +696,20 @@ config RISCV_MISALIGNED
          load/store for both kernel and userspace. When disable, misaligned
          accesses will generate SIGBUS in userspace and panic in kernel.
 
+config RISCV_EFFICIENT_UNALIGNED_ACCESS
+       bool "Assume the CPU supports fast unaligned memory accesses"
+       depends on NONPORTABLE
+       select DCACHE_WORD_ACCESS if MMU
+       select HAVE_EFFICIENT_UNALIGNED_ACCESS
+       help
+         Say Y here if you want the kernel to assume that the CPU supports
+         efficient unaligned memory accesses.  When enabled, this option
+         improves the performance of the kernel on such CPUs.  However, the
+         kernel will run much more slowly, or will not be able to run at all,
+         on CPUs that do not support efficient unaligned memory accesses.
+
+         If unsure what to do here, say N.
+
 endmenu # "Platform type"
 
 menu "Kernel features"
@@ -722,6 +781,25 @@ config COMPAT
 
          If you want to execute 32-bit userspace applications, say Y.
 
+config PARAVIRT
+       bool "Enable paravirtualization code"
+       depends on RISCV_SBI
+       help
+         This changes the kernel so it can modify itself when it is run
+         under a hypervisor, potentially improving performance significantly
+         over full virtualization.
+
+config PARAVIRT_TIME_ACCOUNTING
+       bool "Paravirtual steal time accounting"
+       depends on PARAVIRT
+       help
+         Select this option to enable fine granularity task steal time
+         accounting. Time spent executing other tasks in parallel with
+         the current vCPU is discounted from the vCPU power. To account for
+         that, there can be a small performance impact.
+
+         If in doubt, say N here.
+
 config RELOCATABLE
        bool "Build a relocatable kernel"
        depends on MMU && 64BIT && !XIP_KERNEL
@@ -902,13 +980,13 @@ config RISCV_ISA_FALLBACK
          on the replacement properties, "riscv,isa-base" and
          "riscv,isa-extensions".
 
-endmenu # "Boot options"
-
 config BUILTIN_DTB
-       bool
+       bool "Built-in device tree"
        depends on OF && NONPORTABLE
        default y if XIP_KERNEL
 
+endmenu # "Boot options"
+
 config PORTABLE
        bool
        default !NONPORTABLE
index f5c432b005e77a46b4cdc1ed3f8b8ae160d2b1a0..910ba8837add866f622fba84f7c0c3535ee175e6 100644 (file)
@@ -98,6 +98,7 @@ config ERRATA_THEAD_CMO
        depends on ERRATA_THEAD && MMU
        select DMA_DIRECT_REMAP
        select RISCV_DMA_NONCOHERENT
+       select RISCV_NONSTANDARD_CACHE_OPS
        default y
        help
          This will apply the cache management errata to handle the
index a74be78678eb0bcabf3d9571669a125401adb64d..0b7d109258e7d850846bb3c5f084a0482f07d02b 100644 (file)
@@ -43,8 +43,7 @@ else
        KBUILD_LDFLAGS += -melf32lriscv
 endif
 
-ifeq ($(CONFIG_LD_IS_LLD),y)
-ifeq ($(call test-lt, $(CONFIG_LLD_VERSION), 150000),y)
+ifndef CONFIG_RISCV_USE_LINKER_RELAXATION
        KBUILD_CFLAGS += -mno-relax
        KBUILD_AFLAGS += -mno-relax
 ifndef CONFIG_AS_IS_LLVM
@@ -52,7 +51,6 @@ ifndef CONFIG_AS_IS_LLVM
        KBUILD_AFLAGS += -Wa,-mno-relax
 endif
 endif
-endif
 
 ifeq ($(CONFIG_SHADOW_CALL_STACK),y)
        KBUILD_LDFLAGS += --no-relax-gp
@@ -108,7 +106,9 @@ KBUILD_AFLAGS_MODULE += $(call as-option,-Wa$(comma)-mno-relax)
 # unaligned accesses.  While unaligned accesses are explicitly allowed in the
 # RISC-V ISA, they're emulated by machine mode traps on all extant
 # architectures.  It's faster to have GCC emit only aligned accesses.
+ifneq ($(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS),y)
 KBUILD_CFLAGS += $(call cc-option,-mstrict-align)
+endif
 
 ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y)
 prepare: stack_protector_prepare
@@ -163,6 +163,8 @@ BOOT_TARGETS := Image Image.gz loader loader.bin xipImage vmlinuz.efi
 
 all:   $(notdir $(KBUILD_IMAGE))
 
+loader.bin: loader
+Image.gz loader vmlinuz.efi: Image
 $(BOOT_TARGETS): vmlinux
        $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
        @$(kecho) '  Kernel: $(boot)/$@ is ready'
index 93256540d07882af2b12a6faf0642d93ddc4970b..ead1cc35d88b2f13bfecf935a6e66e6049a24a75 100644 (file)
                                              <&cpu63_intc 3>;
                };
 
-               clint_mtimer0: timer@70ac000000 {
+               clint_mtimer0: timer@70ac004000 {
                        compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
-                       reg = <0x00000070 0xac000000 0x00000000 0x00007ff8>;
+                       reg = <0x00000070 0xac004000 0x00000000 0x0000c000>;
+                       reg-names = "mtimecmp";
                        interrupts-extended = <&cpu0_intc 7>,
                                              <&cpu1_intc 7>,
                                              <&cpu2_intc 7>,
                                              <&cpu3_intc 7>;
                };
 
-               clint_mtimer1: timer@70ac010000 {
+               clint_mtimer1: timer@70ac014000 {
                        compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
-                       reg = <0x00000070 0xac010000 0x00000000 0x00007ff8>;
+                       reg = <0x00000070 0xac014000 0x00000000 0x0000c000>;
+                       reg-names = "mtimecmp";
                        interrupts-extended = <&cpu4_intc 7>,
                                              <&cpu5_intc 7>,
                                              <&cpu6_intc 7>,
                                              <&cpu7_intc 7>;
                };
 
-               clint_mtimer2: timer@70ac020000 {
+               clint_mtimer2: timer@70ac024000 {
                        compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
-                       reg = <0x00000070 0xac020000 0x00000000 0x00007ff8>;
+                       reg = <0x00000070 0xac024000 0x00000000 0x0000c000>;
+                       reg-names = "mtimecmp";
                        interrupts-extended = <&cpu8_intc 7>,
                                              <&cpu9_intc 7>,
                                              <&cpu10_intc 7>,
                                              <&cpu11_intc 7>;
                };
 
-               clint_mtimer3: timer@70ac030000 {
+               clint_mtimer3: timer@70ac034000 {
                        compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
-                       reg = <0x00000070 0xac030000 0x00000000 0x00007ff8>;
+                       reg = <0x00000070 0xac034000 0x00000000 0x0000c000>;
+                       reg-names = "mtimecmp";
                        interrupts-extended = <&cpu12_intc 7>,
                                              <&cpu13_intc 7>,
                                              <&cpu14_intc 7>,
                                              <&cpu15_intc 7>;
                };
 
-               clint_mtimer4: timer@70ac040000 {
+               clint_mtimer4: timer@70ac044000 {
                        compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
-                       reg = <0x00000070 0xac040000 0x00000000 0x00007ff8>;
+                       reg = <0x00000070 0xac044000 0x00000000 0x0000c000>;
+                       reg-names = "mtimecmp";
                        interrupts-extended = <&cpu16_intc 7>,
                                              <&cpu17_intc 7>,
                                              <&cpu18_intc 7>,
                                              <&cpu19_intc 7>;
                };
 
-               clint_mtimer5: timer@70ac050000 {
+               clint_mtimer5: timer@70ac054000 {
                        compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
-                       reg = <0x00000070 0xac050000 0x00000000 0x00007ff8>;
+                       reg = <0x00000070 0xac054000 0x00000000 0x0000c000>;
+                       reg-names = "mtimecmp";
                        interrupts-extended = <&cpu20_intc 7>,
                                              <&cpu21_intc 7>,
                                              <&cpu22_intc 7>,
                                              <&cpu23_intc 7>;
                };
 
-               clint_mtimer6: timer@70ac060000 {
+               clint_mtimer6: timer@70ac064000 {
                        compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
-                       reg = <0x00000070 0xac060000 0x00000000 0x00007ff8>;
+                       reg = <0x00000070 0xac064000 0x00000000 0x0000c000>;
+                       reg-names = "mtimecmp";
                        interrupts-extended = <&cpu24_intc 7>,
                                              <&cpu25_intc 7>,
                                              <&cpu26_intc 7>,
                                              <&cpu27_intc 7>;
                };
 
-               clint_mtimer7: timer@70ac070000 {
+               clint_mtimer7: timer@70ac074000 {
                        compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
-                       reg = <0x00000070 0xac070000 0x00000000 0x00007ff8>;
+                       reg = <0x00000070 0xac074000 0x00000000 0x0000c000>;
+                       reg-names = "mtimecmp";
                        interrupts-extended = <&cpu28_intc 7>,
                                              <&cpu29_intc 7>,
                                              <&cpu30_intc 7>,
                                              <&cpu31_intc 7>;
                };
 
-               clint_mtimer8: timer@70ac080000 {
+               clint_mtimer8: timer@70ac084000 {
                        compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
-                       reg = <0x00000070 0xac080000 0x00000000 0x00007ff8>;
+                       reg = <0x00000070 0xac084000 0x00000000 0x0000c000>;
+                       reg-names = "mtimecmp";
                        interrupts-extended = <&cpu32_intc 7>,
                                              <&cpu33_intc 7>,
                                              <&cpu34_intc 7>,
                                              <&cpu35_intc 7>;
                };
 
-               clint_mtimer9: timer@70ac090000 {
+               clint_mtimer9: timer@70ac094000 {
                        compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
-                       reg = <0x00000070 0xac090000 0x00000000 0x00007ff8>;
+                       reg = <0x00000070 0xac094000 0x00000000 0x0000c000>;
+                       reg-names = "mtimecmp";
                        interrupts-extended = <&cpu36_intc 7>,
                                              <&cpu37_intc 7>,
                                              <&cpu38_intc 7>,
                                              <&cpu39_intc 7>;
                };
 
-               clint_mtimer10: timer@70ac0a0000 {
+               clint_mtimer10: timer@70ac0a4000 {
                        compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
-                       reg = <0x00000070 0xac0a0000 0x00000000 0x00007ff8>;
+                       reg = <0x00000070 0xac0a4000 0x00000000 0x0000c000>;
+                       reg-names = "mtimecmp";
                        interrupts-extended = <&cpu40_intc 7>,
                                              <&cpu41_intc 7>,
                                              <&cpu42_intc 7>,
                                              <&cpu43_intc 7>;
                };
 
-               clint_mtimer11: timer@70ac0b0000 {
+               clint_mtimer11: timer@70ac0b4000 {
                        compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
-                       reg = <0x00000070 0xac0b0000 0x00000000 0x00007ff8>;
+                       reg = <0x00000070 0xac0b4000 0x00000000 0x0000c000>;
+                       reg-names = "mtimecmp";
                        interrupts-extended = <&cpu44_intc 7>,
                                              <&cpu45_intc 7>,
                                              <&cpu46_intc 7>,
                                              <&cpu47_intc 7>;
                };
 
-               clint_mtimer12: timer@70ac0c0000 {
+               clint_mtimer12: timer@70ac0c4000 {
                        compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
-                       reg = <0x00000070 0xac0c0000 0x00000000 0x00007ff8>;
+                       reg = <0x00000070 0xac0c4000 0x00000000 0x0000c000>;
+                       reg-names = "mtimecmp";
                        interrupts-extended = <&cpu48_intc 7>,
                                              <&cpu49_intc 7>,
                                              <&cpu50_intc 7>,
                                              <&cpu51_intc 7>;
                };
 
-               clint_mtimer13: timer@70ac0d0000 {
+               clint_mtimer13: timer@70ac0d4000 {
                        compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
-                       reg = <0x00000070 0xac0d0000 0x00000000 0x00007ff8>;
+                       reg = <0x00000070 0xac0d4000 0x00000000 0x0000c000>;
+                       reg-names = "mtimecmp";
                        interrupts-extended = <&cpu52_intc 7>,
                                              <&cpu53_intc 7>,
                                              <&cpu54_intc 7>,
                                              <&cpu55_intc 7>;
                };
 
-               clint_mtimer14: timer@70ac0e0000 {
+               clint_mtimer14: timer@70ac0e4000 {
                        compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
-                       reg = <0x00000070 0xac0e0000 0x00000000 0x00007ff8>;
+                       reg = <0x00000070 0xac0e4000 0x00000000 0x0000c000>;
+                       reg-names = "mtimecmp";
                        interrupts-extended = <&cpu56_intc 7>,
                                              <&cpu57_intc 7>,
                                              <&cpu58_intc 7>,
                                              <&cpu59_intc 7>;
                };
 
-               clint_mtimer15: timer@70ac0f0000 {
+               clint_mtimer15: timer@70ac0f4000 {
                        compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
-                       reg = <0x00000070 0xac0f0000 0x00000000 0x00007ff8>;
+                       reg = <0x00000070 0xac0f4000 0x00000000 0x0000c000>;
+                       reg-names = "mtimecmp";
                        interrupts-extended = <&cpu60_intc 7>,
                                              <&cpu61_intc 7>,
                                              <&cpu62_intc 7>,
index 905881282a7cd115fa222a68faab57545e868e10..eaf34e871e308f0db7a0a578b34940d8d551b163 100644 (file)
@@ -149,6 +149,7 @@ CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_DW=y
 CONFIG_SERIAL_OF_PLATFORM=y
 CONFIG_SERIAL_SH_SCI=y
+CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
 CONFIG_VIRTIO_CONSOLE=y
 CONFIG_HW_RANDOM=y
 CONFIG_HW_RANDOM_VIRTIO=y
diff --git a/arch/riscv/configs/rv32_defconfig b/arch/riscv/configs/rv32_defconfig
deleted file mode 100644 (file)
index 89b601e..0000000
+++ /dev/null
@@ -1,139 +0,0 @@
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_NO_HZ_IDLE=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_BPF_SYSCALL=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_CGROUPS=y
-CONFIG_CGROUP_SCHED=y
-CONFIG_CFS_BANDWIDTH=y
-CONFIG_CGROUP_BPF=y
-CONFIG_NAMESPACES=y
-CONFIG_USER_NS=y
-CONFIG_CHECKPOINT_RESTORE=y
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_EXPERT=y
-# CONFIG_SYSFS_SYSCALL is not set
-CONFIG_PROFILING=y
-CONFIG_SOC_SIFIVE=y
-CONFIG_SOC_VIRT=y
-CONFIG_NONPORTABLE=y
-CONFIG_ARCH_RV32I=y
-CONFIG_SMP=y
-CONFIG_HOTPLUG_CPU=y
-CONFIG_PM=y
-CONFIG_CPU_IDLE=y
-CONFIG_VIRTUALIZATION=y
-CONFIG_KVM=m
-CONFIG_JUMP_LABEL=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_ADVANCED_ROUTER=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_IP_PNP_RARP=y
-CONFIG_NETLINK_DIAG=y
-CONFIG_NET_9P=y
-CONFIG_NET_9P_VIRTIO=y
-CONFIG_PCI=y
-CONFIG_PCIEPORTBUS=y
-CONFIG_PCI_HOST_GENERIC=y
-CONFIG_PCIE_XILINX=y
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_VIRTIO_BLK=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_BLK_DEV_SR=y
-CONFIG_SCSI_VIRTIO=y
-CONFIG_ATA=y
-CONFIG_SATA_AHCI=y
-CONFIG_SATA_AHCI_PLATFORM=y
-CONFIG_NETDEVICES=y
-CONFIG_VIRTIO_NET=y
-CONFIG_MACB=y
-CONFIG_E1000E=y
-CONFIG_R8169=y
-CONFIG_MICROSEMI_PHY=y
-CONFIG_INPUT_MOUSEDEV=y
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_OF_PLATFORM=y
-CONFIG_VIRTIO_CONSOLE=y
-CONFIG_HW_RANDOM=y
-CONFIG_HW_RANDOM_VIRTIO=y
-CONFIG_SPI=y
-CONFIG_SPI_SIFIVE=y
-# CONFIG_PTP_1588_CLOCK is not set
-CONFIG_DRM=y
-CONFIG_DRM_RADEON=y
-CONFIG_DRM_VIRTIO_GPU=y
-CONFIG_FB=y
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_USB=y
-CONFIG_USB_XHCI_HCD=y
-CONFIG_USB_XHCI_PLATFORM=y
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_HCD_PLATFORM=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_OHCI_HCD_PLATFORM=y
-CONFIG_USB_STORAGE=y
-CONFIG_USB_UAS=y
-CONFIG_MMC=y
-CONFIG_MMC_SPI=y
-CONFIG_RTC_CLASS=y
-CONFIG_VIRTIO_PCI=y
-CONFIG_VIRTIO_BALLOON=y
-CONFIG_VIRTIO_INPUT=y
-CONFIG_VIRTIO_MMIO=y
-CONFIG_RPMSG_CHAR=y
-CONFIG_RPMSG_CTRL=y
-CONFIG_RPMSG_VIRTIO=y
-CONFIG_EXT4_FS=y
-CONFIG_EXT4_FS_POSIX_ACL=y
-CONFIG_AUTOFS_FS=y
-CONFIG_MSDOS_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-CONFIG_TMPFS_POSIX_ACL=y
-CONFIG_HUGETLBFS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V4=y
-CONFIG_NFS_V4_1=y
-CONFIG_NFS_V4_2=y
-CONFIG_ROOT_NFS=y
-CONFIG_9P_FS=y
-CONFIG_CRYPTO_USER_API_HASH=y
-CONFIG_CRYPTO_DEV_VIRTIO=y
-CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_PAGEALLOC=y
-CONFIG_SCHED_STACK_END_CHECK=y
-CONFIG_DEBUG_VM=y
-CONFIG_DEBUG_VM_PGFLAGS=y
-CONFIG_DEBUG_MEMORY_INIT=y
-CONFIG_DEBUG_PER_CPU_MAPS=y
-CONFIG_SOFTLOCKUP_DETECTOR=y
-CONFIG_WQ_WATCHDOG=y
-CONFIG_DEBUG_TIMEKEEPING=y
-CONFIG_DEBUG_RT_MUTEXES=y
-CONFIG_DEBUG_SPINLOCK=y
-CONFIG_DEBUG_MUTEXES=y
-CONFIG_DEBUG_RWSEMS=y
-CONFIG_DEBUG_ATOMIC_SLEEP=y
-CONFIG_STACKTRACE=y
-CONFIG_DEBUG_LIST=y
-CONFIG_DEBUG_PLIST=y
-CONFIG_DEBUG_SG=y
-# CONFIG_RCU_TRACE is not set
-CONFIG_RCU_EQS_DEBUG=y
-# CONFIG_FTRACE is not set
-# CONFIG_RUNTIME_TESTING_MENU is not set
-CONFIG_MEMTEST=y
index 0554ed4bf087cf6cd06dc2967c0c2c38e9784887..b1c410bbc1aece3c1fe0bea8cbd68271c8c0e29a 100644 (file)
 #include <asm/alternative.h>
 #include <asm/cacheflush.h>
 #include <asm/cpufeature.h>
+#include <asm/dma-noncoherent.h>
 #include <asm/errata_list.h>
 #include <asm/hwprobe.h>
+#include <asm/io.h>
 #include <asm/patch.h>
 #include <asm/vendorid_list.h>
 
@@ -33,6 +35,69 @@ static bool errata_probe_pbmt(unsigned int stage,
        return false;
 }
 
+/*
+ * th.dcache.ipa rs1 (invalidate, physical address)
+ * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
+ *   0000001    01010      rs1       000      00000  0001011
+ * th.dcache.iva rs1 (invalidate, virtual address)
+ *   0000001    00110      rs1       000      00000  0001011
+ *
+ * th.dcache.cpa rs1 (clean, physical address)
+ * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
+ *   0000001    01001      rs1       000      00000  0001011
+ * th.dcache.cva rs1 (clean, virtual address)
+ *   0000001    00101      rs1       000      00000  0001011
+ *
+ * th.dcache.cipa rs1 (clean then invalidate, physical address)
+ * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
+ *   0000001    01011      rs1       000      00000  0001011
+ * th.dcache.civa rs1 (clean then invalidate, virtual address)
+ *   0000001    00111      rs1       000      00000  0001011
+ *
+ * th.sync.s (make sure all cache operations finished)
+ * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
+ *   0000000    11001     00000      000      00000  0001011
+ */
+#define THEAD_INVAL_A0 ".long 0x02a5000b"
+#define THEAD_CLEAN_A0 ".long 0x0295000b"
+#define THEAD_FLUSH_A0 ".long 0x02b5000b"
+#define THEAD_SYNC_S   ".long 0x0190000b"
+
+#define THEAD_CMO_OP(_op, _start, _size, _cachesize)                   \
+asm volatile("mv a0, %1\n\t"                                           \
+            "j 2f\n\t"                                                 \
+            "3:\n\t"                                                   \
+            THEAD_##_op##_A0 "\n\t"                                    \
+            "add a0, a0, %0\n\t"                                       \
+            "2:\n\t"                                                   \
+            "bltu a0, %2, 3b\n\t"                                      \
+            THEAD_SYNC_S                                               \
+            : : "r"(_cachesize),                                       \
+                "r"((unsigned long)(_start) & ~((_cachesize) - 1UL)),  \
+                "r"((unsigned long)(_start) + (_size))                 \
+            : "a0")
+
+static void thead_errata_cache_inv(phys_addr_t paddr, size_t size)
+{
+       THEAD_CMO_OP(INVAL, paddr, size, riscv_cbom_block_size);
+}
+
+static void thead_errata_cache_wback(phys_addr_t paddr, size_t size)
+{
+       THEAD_CMO_OP(CLEAN, paddr, size, riscv_cbom_block_size);
+}
+
+static void thead_errata_cache_wback_inv(phys_addr_t paddr, size_t size)
+{
+       THEAD_CMO_OP(FLUSH, paddr, size, riscv_cbom_block_size);
+}
+
+static const struct riscv_nonstd_cache_ops thead_errata_cmo_ops = {
+       .wback = &thead_errata_cache_wback,
+       .inv = &thead_errata_cache_inv,
+       .wback_inv = &thead_errata_cache_wback_inv,
+};
+
 static bool errata_probe_cmo(unsigned int stage,
                             unsigned long arch_id, unsigned long impid)
 {
@@ -48,6 +113,7 @@ static bool errata_probe_cmo(unsigned int stage,
        if (stage == RISCV_ALTERNATIVES_BOOT) {
                riscv_cbom_block_size = L1_CACHE_BYTES;
                riscv_noncoherent_supported();
+               riscv_noncoherent_register_cache_ops(&thead_errata_cmo_ops);
        }
 
        return true;
@@ -77,8 +143,7 @@ static u32 thead_errata_probe(unsigned int stage,
        if (errata_probe_pbmt(stage, archid, impid))
                cpu_req_errata |= BIT(ERRATA_THEAD_PBMT);
 
-       if (errata_probe_cmo(stage, archid, impid))
-               cpu_req_errata |= BIT(ERRATA_THEAD_CMO);
+       errata_probe_cmo(stage, archid, impid);
 
        if (errata_probe_pmu(stage, archid, impid))
                cpu_req_errata |= BIT(ERRATA_THEAD_PMU);
diff --git a/arch/riscv/include/asm/arch_hweight.h b/arch/riscv/include/asm/arch_hweight.h
new file mode 100644 (file)
index 0000000..85b2c44
--- /dev/null
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Based on arch/x86/include/asm/arch_hweight.h
+ */
+
+#ifndef _ASM_RISCV_HWEIGHT_H
+#define _ASM_RISCV_HWEIGHT_H
+
+#include <asm/alternative-macros.h>
+#include <asm/hwcap.h>
+
+#if (BITS_PER_LONG == 64)
+#define CPOPW  "cpopw "
+#elif (BITS_PER_LONG == 32)
+#define CPOPW  "cpop "
+#else
+#error "Unexpected BITS_PER_LONG"
+#endif
+
+static __always_inline unsigned int __arch_hweight32(unsigned int w)
+{
+#ifdef CONFIG_RISCV_ISA_ZBB
+       asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
+                                     RISCV_ISA_EXT_ZBB, 1)
+                         : : : : legacy);
+
+       asm (".option push\n"
+            ".option arch,+zbb\n"
+            CPOPW "%0, %0\n"
+            ".option pop\n"
+            : "+r" (w) : :);
+
+       return w;
+
+legacy:
+#endif
+       return __sw_hweight32(w);
+}
+
+static inline unsigned int __arch_hweight16(unsigned int w)
+{
+       return __arch_hweight32(w & 0xffff);
+}
+
+static inline unsigned int __arch_hweight8(unsigned int w)
+{
+       return __arch_hweight32(w & 0xff);
+}
+
+#if BITS_PER_LONG == 64
+static __always_inline unsigned long __arch_hweight64(__u64 w)
+{
+# ifdef CONFIG_RISCV_ISA_ZBB
+       asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
+                                     RISCV_ISA_EXT_ZBB, 1)
+                         : : : : legacy);
+
+       asm (".option push\n"
+            ".option arch,+zbb\n"
+            "cpop %0, %0\n"
+            ".option pop\n"
+            : "+r" (w) : :);
+
+       return w;
+
+legacy:
+# endif
+       return __sw_hweight64(w);
+}
+#else /* BITS_PER_LONG == 64 */
+static inline unsigned long __arch_hweight64(__u64 w)
+{
+       return  __arch_hweight32((u32)w) +
+               __arch_hweight32((u32)(w >> 32));
+}
+#endif /* !(BITS_PER_LONG == 64) */
+
+#endif /* _ASM_RISCV_HWEIGHT_H */
diff --git a/arch/riscv/include/asm/archrandom.h b/arch/riscv/include/asm/archrandom.h
new file mode 100644 (file)
index 0000000..5345360
--- /dev/null
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Kernel interface for the RISCV arch_random_* functions
+ *
+ * Copyright (c) 2023 Rivos Inc.
+ *
+ */
+
+#ifndef ASM_RISCV_ARCHRANDOM_H
+#define ASM_RISCV_ARCHRANDOM_H
+
+#include <asm/csr.h>
+#include <asm/processor.h>
+
+#define SEED_RETRY_LOOPS 100
+
+static inline bool __must_check csr_seed_long(unsigned long *v)
+{
+       unsigned int retry = SEED_RETRY_LOOPS, valid_seeds = 0;
+       const int needed_seeds = sizeof(long) / sizeof(u16);
+       u16 *entropy = (u16 *)v;
+
+       do {
+               /*
+                * The SEED CSR must be accessed with a read-write instruction.
+                */
+               unsigned long csr_seed = csr_swap(CSR_SEED, 0);
+               unsigned long opst = csr_seed & SEED_OPST_MASK;
+
+               switch (opst) {
+               case SEED_OPST_ES16:
+                       entropy[valid_seeds++] = csr_seed & SEED_ENTROPY_MASK;
+                       if (valid_seeds == needed_seeds)
+                               return true;
+                       break;
+
+               case SEED_OPST_DEAD:
+                       pr_err_once("archrandom: Unrecoverable error\n");
+                       return false;
+
+               case SEED_OPST_BIST:
+               case SEED_OPST_WAIT:
+               default:
+                       cpu_relax();
+                       continue;
+               }
+       } while (--retry);
+
+       return false;
+}
+
+static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs)
+{
+       return 0;
+}
+
+static inline size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs)
+{
+       if (!max_longs)
+               return 0;
+
+       /*
+        * If Zkr is supported and csr_seed_long succeeds, we return one long
+        * worth of entropy.
+        */
+       if (riscv_has_extension_likely(RISCV_ISA_EXT_ZKR) && csr_seed_long(v))
+               return 1;
+
+       return 0;
+}
+
+#endif /* ASM_RISCV_ARCHRANDOM_H */
index 00a96e7a966445175a687d481efa29f6862b3675..0c8bfd54fc4e05beec2fed22fc7f73ddcc997ab7 100644 (file)
@@ -6,6 +6,7 @@
 #define EX_TYPE_FIXUP                  1
 #define EX_TYPE_BPF                    2
 #define EX_TYPE_UACCESS_ERR_ZERO       3
+#define EX_TYPE_LOAD_UNALIGNED_ZEROPAD 4
 
 #ifdef CONFIG_MMU
 
 #define EX_DATA_REG_ZERO_SHIFT 5
 #define EX_DATA_REG_ZERO       GENMASK(9, 5)
 
+#define EX_DATA_REG_DATA_SHIFT 0
+#define EX_DATA_REG_DATA       GENMASK(4, 0)
+#define EX_DATA_REG_ADDR_SHIFT 5
+#define EX_DATA_REG_ADDR       GENMASK(9, 5)
+
 #define EX_DATA_REG(reg, gpr)                                          \
        "((.L__gpr_num_" #gpr ") << " __stringify(EX_DATA_REG_##reg##_SHIFT) ")"
 
 #define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err)                     \
        _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero)
 
+#define _ASM_EXTABLE_LOAD_UNALIGNED_ZEROPAD(insn, fixup, data, addr)           \
+       __DEFINE_ASM_GPR_NUMS                                                   \
+       __ASM_EXTABLE_RAW(#insn, #fixup,                                        \
+                         __stringify(EX_TYPE_LOAD_UNALIGNED_ZEROPAD),          \
+                         "("                                                   \
+                           EX_DATA_REG(DATA, data) " | "                       \
+                           EX_DATA_REG(ADDR, addr)                             \
+                         ")")
+
 #endif /* __ASSEMBLY__ */
 
 #else /* CONFIG_MMU */
index 36b955c762ba08e92ca0441ee8fbae9219c1f2fa..cd627ec289f163a630b73dd03dd52a6b28692997 100644 (file)
@@ -9,6 +9,33 @@ long long __lshrti3(long long a, int b);
 long long __ashrti3(long long a, int b);
 long long __ashlti3(long long a, int b);
 
+#ifdef CONFIG_RISCV_ISA_V
+
+#ifdef CONFIG_MMU
+asmlinkage int enter_vector_usercopy(void *dst, void *src, size_t n);
+#endif /* CONFIG_MMU  */
+
+void xor_regs_2_(unsigned long bytes, unsigned long *__restrict p1,
+                const unsigned long *__restrict p2);
+void xor_regs_3_(unsigned long bytes, unsigned long *__restrict p1,
+                const unsigned long *__restrict p2,
+                const unsigned long *__restrict p3);
+void xor_regs_4_(unsigned long bytes, unsigned long *__restrict p1,
+                const unsigned long *__restrict p2,
+                const unsigned long *__restrict p3,
+                const unsigned long *__restrict p4);
+void xor_regs_5_(unsigned long bytes, unsigned long *__restrict p1,
+                const unsigned long *__restrict p2,
+                const unsigned long *__restrict p3,
+                const unsigned long *__restrict p4,
+                const unsigned long *__restrict p5);
+
+#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
+asmlinkage void riscv_v_context_nesting_start(struct pt_regs *regs);
+asmlinkage void riscv_v_context_nesting_end(struct pt_regs *regs);
+#endif /* CONFIG_RISCV_ISA_V_PREEMPTIVE */
+
+#endif /* CONFIG_RISCV_ISA_V */
 
 #define DECLARE_DO_ERROR_INFO(name)    asmlinkage void name(struct pt_regs *regs)
 
index 224b4dc02b50bc6761cbef064445e472ef053ce2..329d8244a9b3fd516104808db5a959acfb469b22 100644 (file)
@@ -39,7 +39,7 @@ static __always_inline unsigned long variable__ffs(unsigned long word)
 {
        int num;
 
-       asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
+       asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
                                      RISCV_ISA_EXT_ZBB, 1)
                          : : : : legacy);
 
@@ -95,7 +95,7 @@ static __always_inline unsigned long variable__fls(unsigned long word)
 {
        int num;
 
-       asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
+       asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
                                      RISCV_ISA_EXT_ZBB, 1)
                          : : : : legacy);
 
@@ -154,7 +154,7 @@ static __always_inline int variable_ffs(int x)
        if (!x)
                return 0;
 
-       asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
+       asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
                                      RISCV_ISA_EXT_ZBB, 1)
                          : : : : legacy);
 
@@ -209,7 +209,7 @@ static __always_inline int variable_fls(unsigned int x)
        if (!x)
                return 0;
 
-       asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
+       asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
                                      RISCV_ISA_EXT_ZBB, 1)
                          : : : : legacy);
 
@@ -271,7 +271,9 @@ legacy:
 #include <asm-generic/bitops/fls64.h>
 #include <asm-generic/bitops/sched.h>
 
-#include <asm-generic/bitops/hweight.h>
+#include <asm/arch_hweight.h>
+
+#include <asm-generic/bitops/const_hweight.h>
 
 #if (BITS_PER_LONG == 64)
 #define __AMO(op)      "amo" #op ".d"
index 3cb53c4df27cfe4772f2f74623fc861f6a72360f..a129dac4521d35d69af22f713f5cdad27edf7f86 100644 (file)
@@ -37,7 +37,8 @@ static inline void flush_dcache_page(struct page *page)
        flush_icache_mm(vma->vm_mm, 0)
 
 #ifdef CONFIG_64BIT
-#define flush_cache_vmap(start, end)   flush_tlb_kernel_range(start, end)
+#define flush_cache_vmap(start, end)           flush_tlb_kernel_range(start, end)
+#define flush_cache_vmap_early(start, end)     local_flush_tlb_kernel_range(start, end)
 #endif
 
 #ifndef CONFIG_SMP
diff --git a/arch/riscv/include/asm/checksum.h b/arch/riscv/include/asm/checksum.h
new file mode 100644 (file)
index 0000000..88e6f14
--- /dev/null
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Checksum routines
+ *
+ * Copyright (C) 2023 Rivos Inc.
+ */
+#ifndef __ASM_RISCV_CHECKSUM_H
+#define __ASM_RISCV_CHECKSUM_H
+
+#include <linux/in6.h>
+#include <linux/uaccess.h>
+
+#define ip_fast_csum ip_fast_csum
+
+extern unsigned int do_csum(const unsigned char *buff, int len);
+#define do_csum do_csum
+
+/* Default version is sufficient for 32 bit */
+#ifndef CONFIG_32BIT
+#define _HAVE_ARCH_IPV6_CSUM
+__sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+                       const struct in6_addr *daddr,
+                       __u32 len, __u8 proto, __wsum sum);
+#endif
+
+/* Define riscv versions of functions before importing asm-generic/checksum.h */
+#include <asm-generic/checksum.h>
+
+/**
+ * Quickly compute an IP checksum with the assumption that IPv4 headers will
+ * always be in multiples of 32-bits, and have an ihl of at least 5.
+ *
+ * @ihl: the number of 32 bit segments and must be greater than or equal to 5.
+ * @iph: assumed to be word aligned given that NET_IP_ALIGN is set to 2 on
+ *  riscv, defining IP headers to be aligned.
+ */
+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
+{
+       unsigned long csum = 0;
+       int pos = 0;
+
+       do {
+               csum += ((const unsigned int *)iph)[pos];
+               if (IS_ENABLED(CONFIG_32BIT))
+                       csum += csum < ((const unsigned int *)iph)[pos];
+       } while (++pos < ihl);
+
+       /*
+        * ZBB only saves three instructions on 32-bit and five on 64-bit so not
+        * worth checking if supported without Alternatives.
+        */
+       if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) &&
+           IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
+               unsigned long fold_temp;
+
+               asm goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0,
+                                             RISCV_ISA_EXT_ZBB, 1)
+                   :
+                   :
+                   :
+                   : no_zbb);
+
+               if (IS_ENABLED(CONFIG_32BIT)) {
+                       asm(".option push                               \n\
+                       .option arch,+zbb                               \n\
+                               not     %[fold_temp], %[csum]           \n\
+                               rori    %[csum], %[csum], 16            \n\
+                               sub     %[csum], %[fold_temp], %[csum]  \n\
+                       .option pop"
+                       : [csum] "+r" (csum), [fold_temp] "=&r" (fold_temp));
+               } else {
+                       asm(".option push                               \n\
+                       .option arch,+zbb                               \n\
+                               rori    %[fold_temp], %[csum], 32       \n\
+                               add     %[csum], %[fold_temp], %[csum]  \n\
+                               srli    %[csum], %[csum], 32            \n\
+                               not     %[fold_temp], %[csum]           \n\
+                               roriw   %[csum], %[csum], 16            \n\
+                               subw    %[csum], %[fold_temp], %[csum]  \n\
+                       .option pop"
+                       : [csum] "+r" (csum), [fold_temp] "=&r" (fold_temp));
+               }
+               return (__force __sum16)(csum >> 16);
+       }
+no_zbb:
+#ifndef CONFIG_32BIT
+       csum += ror64(csum, 32);
+       csum >>= 32;
+#endif
+       return csum_fold((__force __wsum)csum);
+}
+
+#endif /* __ASM_RISCV_CHECKSUM_H */
index aa128466c4d4ec7b533dd8403d8c43e97aff0eac..176b570ef982761bec7810eda1332b105e640623 100644 (file)
 /**
  * struct cpu_operations - Callback operations for hotplugging CPUs.
  *
- * @name:              Name of the boot protocol.
- * @cpu_prepare:       Early one-time preparation step for a cpu. If there
- *                     is a mechanism for doing so, tests whether it is
- *                     possible to boot the given HART.
  * @cpu_start:         Boots a cpu into the kernel.
- * @cpu_disable:       Prepares a cpu to die. May fail for some
- *                     mechanism-specific reason, which will cause the hot
- *                     unplug to be aborted. Called from the cpu to be killed.
  * @cpu_stop:          Makes a cpu leave the kernel. Must not fail. Called from
  *                     the cpu being stopped.
  * @cpu_is_stopped:    Ensures a cpu has left the kernel. Called from another
  *                     cpu.
  */
 struct cpu_operations {
-       const char      *name;
-       int             (*cpu_prepare)(unsigned int cpu);
        int             (*cpu_start)(unsigned int cpu,
                                     struct task_struct *tidle);
 #ifdef CONFIG_HOTPLUG_CPU
-       int             (*cpu_disable)(unsigned int cpu);
        void            (*cpu_stop)(void);
        int             (*cpu_is_stopped)(unsigned int cpu);
 #endif
 };
 
 extern const struct cpu_operations cpu_ops_spinwait;
-extern const struct cpu_operations *cpu_ops[NR_CPUS];
-void __init cpu_set_ops(int cpu);
+extern const struct cpu_operations *cpu_ops;
+void __init cpu_set_ops(void);
 
 #endif /* ifndef __ASM_CPU_OPS_H */
index a418c3112cd60cf2207b455d78c4520539364fc3..0bd11862b7607b9ffebf8460ea6cc00cc1e4ff62 100644 (file)
@@ -59,6 +59,8 @@ struct riscv_isa_ext_data {
        const unsigned int id;
        const char *name;
        const char *property;
+       const unsigned int *subset_ext_ids;
+       const unsigned int subset_ext_size;
 };
 
 extern const struct riscv_isa_ext_data riscv_isa_ext[];
@@ -67,7 +69,7 @@ extern bool riscv_isa_fallback;
 
 unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap);
 
-bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit);
+bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, unsigned int bit);
 #define riscv_isa_extension_available(isa_bitmap, ext) \
        __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext)
 
@@ -78,7 +80,7 @@ riscv_has_extension_likely(const unsigned long ext)
                           "ext must be < RISCV_ISA_EXT_MAX");
 
        if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
-               asm_volatile_goto(
+               asm goto(
                ALTERNATIVE("j  %l[l_no]", "nop", 0, %[ext], 1)
                :
                : [ext] "i" (ext)
@@ -101,7 +103,7 @@ riscv_has_extension_unlikely(const unsigned long ext)
                           "ext must be < RISCV_ISA_EXT_MAX");
 
        if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
-               asm_volatile_goto(
+               asm goto(
                ALTERNATIVE("nop", "j   %l[l_yes]", 0, %[ext], 1)
                :
                : [ext] "i" (ext)
@@ -133,4 +135,6 @@ static __always_inline bool riscv_cpu_has_extension_unlikely(int cpu, const unsi
        return __riscv_isa_extension_available(hart_isa[cpu].isa, ext);
 }
 
+DECLARE_STATIC_KEY_FALSE(fast_misaligned_access_speed_key);
+
 #endif
index 306a19a5509c10e63663330b04e1118abb054b74..510014051f5dbb1aa61098e4974e7e7ac02145ee 100644 (file)
 #define CSR_VTYPE              0xc21
 #define CSR_VLENB              0xc22
 
+/* Scalar Crypto Extension - Entropy */
+#define CSR_SEED               0x015
+#define SEED_OPST_MASK         _AC(0xC0000000, UL)
+#define SEED_OPST_BIST         _AC(0x00000000, UL)
+#define SEED_OPST_WAIT         _AC(0x40000000, UL)
+#define SEED_OPST_ES16         _AC(0x80000000, UL)
+#define SEED_OPST_DEAD         _AC(0xC0000000, UL)
+#define SEED_ENTROPY_MASK      _AC(0xFFFF, UL)
+
 #ifdef CONFIG_RISCV_M_MODE
 # define CSR_STATUS    CSR_MSTATUS
 # define CSR_IE                CSR_MIE
index 7ab5e34318c85fe05df525a5f80a49d25051bcd7..2293e535f8659af02ef2e52ce1752827c415532e 100644 (file)
@@ -4,6 +4,23 @@
 #define _ASM_RISCV_ENTRY_COMMON_H
 
 #include <asm/stacktrace.h>
+#include <asm/thread_info.h>
+#include <asm/vector.h>
+
+static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
+                                                 unsigned long ti_work)
+{
+       if (ti_work & _TIF_RISCV_V_DEFER_RESTORE) {
+               clear_thread_flag(TIF_RISCV_V_DEFER_RESTORE);
+               /*
+                * We are already called with irq disabled, so go without
+                * keeping track of riscv_v_flags.
+                */
+               riscv_v_vstate_restore(&current->thread.vstate, regs);
+       }
+}
+
+#define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare
 
 void handle_page_fault(struct pt_regs *regs);
 void handle_break(struct pt_regs *regs);
index 83ed25e4355343c25101882b7c0b31cf462af542..ea33288f8a25b4f76e59bd65e8f869ee842c6e14 100644 (file)
@@ -24,9 +24,8 @@
 
 #ifdef CONFIG_ERRATA_THEAD
 #define        ERRATA_THEAD_PBMT 0
-#define        ERRATA_THEAD_CMO 1
-#define        ERRATA_THEAD_PMU 2
-#define        ERRATA_THEAD_NUMBER 3
+#define        ERRATA_THEAD_PMU 1
+#define        ERRATA_THEAD_NUMBER 2
 #endif
 
 #ifdef __ASSEMBLY__
@@ -94,54 +93,17 @@ asm volatile(ALTERNATIVE(                                           \
 #define ALT_THEAD_PMA(_val)
 #endif
 
-/*
- * th.dcache.ipa rs1 (invalidate, physical address)
- * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
- *   0000001    01010      rs1       000      00000  0001011
- * th.dache.iva rs1 (invalida, virtual address)
- *   0000001    00110      rs1       000      00000  0001011
- *
- * th.dcache.cpa rs1 (clean, physical address)
- * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
- *   0000001    01001      rs1       000      00000  0001011
- * th.dcache.cva rs1 (clean, virtual address)
- *   0000001    00101      rs1       000      00000  0001011
- *
- * th.dcache.cipa rs1 (clean then invalidate, physical address)
- * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
- *   0000001    01011      rs1       000      00000  0001011
- * th.dcache.civa rs1 (... virtual address)
- *   0000001    00111      rs1       000      00000  0001011
- *
- * th.sync.s (make sure all cache operations finished)
- * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
- *   0000000    11001     00000      000      00000  0001011
- */
-#define THEAD_INVAL_A0 ".long 0x0265000b"
-#define THEAD_CLEAN_A0 ".long 0x0255000b"
-#define THEAD_FLUSH_A0 ".long 0x0275000b"
-#define THEAD_SYNC_S   ".long 0x0190000b"
-
 #define ALT_CMO_OP(_op, _start, _size, _cachesize)                     \
-asm volatile(ALTERNATIVE_2(                                            \
-       __nops(6),                                                      \
+asm volatile(ALTERNATIVE(                                              \
+       __nops(5),                                                      \
        "mv a0, %1\n\t"                                                 \
        "j 2f\n\t"                                                      \
        "3:\n\t"                                                        \
        CBO_##_op(a0)                                                   \
        "add a0, a0, %0\n\t"                                            \
        "2:\n\t"                                                        \
-       "bltu a0, %2, 3b\n\t"                                           \
-       "nop", 0, RISCV_ISA_EXT_ZICBOM, CONFIG_RISCV_ISA_ZICBOM,        \
-       "mv a0, %1\n\t"                                                 \
-       "j 2f\n\t"                                                      \
-       "3:\n\t"                                                        \
-       THEAD_##_op##_A0 "\n\t"                                         \
-       "add a0, a0, %0\n\t"                                            \
-       "2:\n\t"                                                        \
-       "bltu a0, %2, 3b\n\t"                                           \
-       THEAD_SYNC_S, THEAD_VENDOR_ID,                                  \
-                       ERRATA_THEAD_CMO, CONFIG_ERRATA_THEAD_CMO)      \
+       "bltu a0, %2, 3b\n\t",                                          \
+       0, RISCV_ISA_EXT_ZICBOM, CONFIG_RISCV_ISA_ZICBOM)               \
        : : "r"(_cachesize),                                            \
            "r"((unsigned long)(_start) & ~((_cachesize) - 1UL)),       \
            "r"((unsigned long)(_start) + (_size))                      \
index 2b2f5df7ef2c7de42216b4166ae3d1f4a789731f..3291721229523456247532009bc2ed2ddc444540 100644 (file)
@@ -128,7 +128,23 @@ do {                                                                       \
 struct dyn_ftrace;
 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
 #define ftrace_init_nop ftrace_init_nop
-#endif
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+struct ftrace_ops;
+struct ftrace_regs;
+void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
+                      struct ftrace_ops *op, struct ftrace_regs *fregs);
+#define ftrace_graph_func ftrace_graph_func
+
+static inline void __arch_ftrace_set_direct_caller(struct pt_regs *regs, unsigned long addr)
+{
+               regs->t1 = addr;
+}
+#define arch_ftrace_set_direct_caller(fregs, addr) \
+       __arch_ftrace_set_direct_caller(&(fregs)->regs, addr)
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
+
+#endif /* __ASSEMBLY__ */
 
 #endif /* CONFIG_DYNAMIC_FTRACE */
 
index 4c5b0e929890fadcebb3caace0afe97dfa46d8bf..20f9c3ba2341412812ba003caf86f546c162bd34 100644 (file)
@@ -11,6 +11,9 @@ static inline void arch_clear_hugepage_flags(struct page *page)
 }
 #define arch_clear_hugepage_flags arch_clear_hugepage_flags
 
+bool arch_hugetlb_migration_supported(struct hstate *h);
+#define arch_hugetlb_migration_supported arch_hugetlb_migration_supported
+
 #ifdef CONFIG_RISCV_ISA_SVNAPOT
 #define __HAVE_ARCH_HUGE_PTE_CLEAR
 void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
index 06d30526ef3b837d4e6c7fe8d14cb39f11e676f1..5340f818746b71a805319eb6f941fa311c9b36a2 100644 (file)
 #include <uapi/asm/hwcap.h>
 
 #define RISCV_ISA_EXT_a                ('a' - 'a')
-#define RISCV_ISA_EXT_b                ('b' - 'a')
 #define RISCV_ISA_EXT_c                ('c' - 'a')
 #define RISCV_ISA_EXT_d                ('d' - 'a')
 #define RISCV_ISA_EXT_f                ('f' - 'a')
 #define RISCV_ISA_EXT_h                ('h' - 'a')
 #define RISCV_ISA_EXT_i                ('i' - 'a')
-#define RISCV_ISA_EXT_j                ('j' - 'a')
-#define RISCV_ISA_EXT_k                ('k' - 'a')
 #define RISCV_ISA_EXT_m                ('m' - 'a')
-#define RISCV_ISA_EXT_p                ('p' - 'a')
 #define RISCV_ISA_EXT_q                ('q' - 'a')
-#define RISCV_ISA_EXT_s                ('s' - 'a')
-#define RISCV_ISA_EXT_u                ('u' - 'a')
 #define RISCV_ISA_EXT_v                ('v' - 'a')
 
 /*
 #define RISCV_ISA_EXT_ZIHPM            42
 #define RISCV_ISA_EXT_SMSTATEEN                43
 #define RISCV_ISA_EXT_ZICOND           44
+#define RISCV_ISA_EXT_ZBC              45
+#define RISCV_ISA_EXT_ZBKB             46
+#define RISCV_ISA_EXT_ZBKC             47
+#define RISCV_ISA_EXT_ZBKX             48
+#define RISCV_ISA_EXT_ZKND             49
+#define RISCV_ISA_EXT_ZKNE             50
+#define RISCV_ISA_EXT_ZKNH             51
+#define RISCV_ISA_EXT_ZKR              52
+#define RISCV_ISA_EXT_ZKSED            53
+#define RISCV_ISA_EXT_ZKSH             54
+#define RISCV_ISA_EXT_ZKT              55
+#define RISCV_ISA_EXT_ZVBB             56
+#define RISCV_ISA_EXT_ZVBC             57
+#define RISCV_ISA_EXT_ZVKB             58
+#define RISCV_ISA_EXT_ZVKG             59
+#define RISCV_ISA_EXT_ZVKNED           60
+#define RISCV_ISA_EXT_ZVKNHA           61
+#define RISCV_ISA_EXT_ZVKNHB           62
+#define RISCV_ISA_EXT_ZVKSED           63
+#define RISCV_ISA_EXT_ZVKSH            64
+#define RISCV_ISA_EXT_ZVKT             65
+#define RISCV_ISA_EXT_ZFH              66
+#define RISCV_ISA_EXT_ZFHMIN           67
+#define RISCV_ISA_EXT_ZIHINTNTL                68
+#define RISCV_ISA_EXT_ZVFH             69
+#define RISCV_ISA_EXT_ZVFHMIN          70
+#define RISCV_ISA_EXT_ZFA              71
+#define RISCV_ISA_EXT_ZTSO             72
+#define RISCV_ISA_EXT_ZACAS            73
 
-#define RISCV_ISA_EXT_MAX              64
+#define RISCV_ISA_EXT_MAX              128
+#define RISCV_ISA_EXT_INVALID          U32_MAX
 
 #ifdef CONFIG_RISCV_M_MODE
 #define RISCV_ISA_EXT_SxAIA            RISCV_ISA_EXT_SMAIA
index 5c48f48e79a67823707806a3e7872e1ed6f3388c..630507dff5ead30a368fcae56489c5ccb988e3e7 100644 (file)
@@ -15,4 +15,28 @@ static inline bool riscv_hwprobe_key_is_valid(__s64 key)
        return key >= 0 && key <= RISCV_HWPROBE_MAX_KEY;
 }
 
+static inline bool hwprobe_key_is_bitmask(__s64 key)
+{
+       switch (key) {
+       case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
+       case RISCV_HWPROBE_KEY_IMA_EXT_0:
+       case RISCV_HWPROBE_KEY_CPUPERF_0:
+               return true;
+       }
+
+       return false;
+}
+
+static inline bool riscv_hwprobe_pair_cmp(struct riscv_hwprobe *pair,
+                                         struct riscv_hwprobe *other_pair)
+{
+       if (pair->key != other_pair->key)
+               return false;
+
+       if (hwprobe_key_is_bitmask(pair->key))
+               return (pair->value & other_pair->value) == other_pair->value;
+
+       return pair->value == other_pair->value;
+}
+
 #endif
index 14a5ea8d8ef0f4a2f4477fb65778e4f8ea449e2a..4a35d787c0191475b3a5d8dc7452e448541dc8e9 100644 (file)
@@ -17,7 +17,7 @@
 static __always_inline bool arch_static_branch(struct static_key * const key,
                                               const bool branch)
 {
-       asm_volatile_goto(
+       asm goto(
                "       .align          2                       \n\t"
                "       .option push                            \n\t"
                "       .option norelax                         \n\t"
@@ -39,7 +39,7 @@ label:
 static __always_inline bool arch_static_branch_jump(struct static_key * const key,
                                                    const bool branch)
 {
-       asm_volatile_goto(
+       asm goto(
                "       .align          2                       \n\t"
                "       .option push                            \n\t"
                "       .option norelax                         \n\t"
index 0bbffd528096d97e02e070be23ae05271a283538..7388edd88986f94b7167ba4b84f802ac5fba0b91 100644 (file)
@@ -18,9 +18,9 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
        pte_t *pte = virt_to_kpte(addr);
 
        if (protect)
-               set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
+               set_pte(pte, __pte(pte_val(ptep_get(pte)) & ~_PAGE_PRESENT));
        else
-               set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
+               set_pte(pte, __pte(pte_val(ptep_get(pte)) | _PAGE_PRESENT));
 
        flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
 
index 0eefd9c991ae24e99ea946ac11615075aefbf085..484d04a92fa6b7f02a5ef0b24faee9a87f98b1bb 100644 (file)
@@ -41,6 +41,7 @@
        KVM_ARCH_REQ_FLAGS(4, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 #define KVM_REQ_HFENCE                 \
        KVM_ARCH_REQ_FLAGS(5, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+#define KVM_REQ_STEAL_UPDATE           KVM_ARCH_REQ(6)
 
 enum kvm_riscv_hfence_type {
        KVM_RISCV_HFENCE_UNKNOWN = 0,
@@ -262,13 +263,17 @@ struct kvm_vcpu_arch {
 
        /* 'static' configurations which are set only once */
        struct kvm_vcpu_config cfg;
+
+       /* SBI steal-time accounting */
+       struct {
+               gpa_t shmem;
+               u64 last_steal;
+       } sta;
 };
 
 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
 
-#define KVM_ARCH_WANT_MMU_NOTIFIER
-
 #define KVM_RISCV_GSTAGE_TLB_MIN_ORDER         12
 
 void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,
@@ -372,4 +377,7 @@ bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask);
 void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu);
 void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
 
+void kvm_riscv_vcpu_sbi_sta_reset(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu);
+
 #endif /* __RISCV_KVM_HOST_H__ */
index 6a453f7f8b562639dc1d767648364f68fa6e49ae..b96705258cf9641fbc43810cda7c01f970db1f58 100644 (file)
 #define KVM_SBI_VERSION_MINOR 0
 
 enum kvm_riscv_sbi_ext_status {
-       KVM_RISCV_SBI_EXT_UNINITIALIZED,
-       KVM_RISCV_SBI_EXT_AVAILABLE,
-       KVM_RISCV_SBI_EXT_UNAVAILABLE,
+       KVM_RISCV_SBI_EXT_STATUS_UNINITIALIZED,
+       KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE,
+       KVM_RISCV_SBI_EXT_STATUS_ENABLED,
+       KVM_RISCV_SBI_EXT_STATUS_DISABLED,
 };
 
 struct kvm_vcpu_sbi_context {
@@ -36,7 +37,7 @@ struct kvm_vcpu_sbi_extension {
        unsigned long extid_start;
        unsigned long extid_end;
 
-       bool default_unavail;
+       bool default_disabled;
 
        /**
         * SBI extension handler. It can be defined for a given extension or group of
@@ -59,11 +60,21 @@ int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu,
                                   const struct kvm_one_reg *reg);
 int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu,
                                   const struct kvm_one_reg *reg);
+int kvm_riscv_vcpu_set_reg_sbi(struct kvm_vcpu *vcpu,
+                              const struct kvm_one_reg *reg);
+int kvm_riscv_vcpu_get_reg_sbi(struct kvm_vcpu *vcpu,
+                              const struct kvm_one_reg *reg);
 const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(
                                struct kvm_vcpu *vcpu, unsigned long extid);
+bool riscv_vcpu_supports_sbi_ext(struct kvm_vcpu *vcpu, int idx);
 int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run);
 void kvm_riscv_vcpu_sbi_init(struct kvm_vcpu *vcpu);
 
+int kvm_riscv_vcpu_get_reg_sbi_sta(struct kvm_vcpu *vcpu, unsigned long reg_num,
+                                  unsigned long *reg_val);
+int kvm_riscv_vcpu_set_reg_sbi_sta(struct kvm_vcpu *vcpu, unsigned long reg_num,
+                                  unsigned long reg_val);
+
 #ifdef CONFIG_RISCV_SBI_V01
 extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01;
 #endif
@@ -74,6 +85,7 @@ extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence;
 extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_srst;
 extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm;
 extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_dbcn;
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_sta;
 extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_experimental;
 extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_vendor;
 
diff --git a/arch/riscv/include/asm/paravirt.h b/arch/riscv/include/asm/paravirt.h
new file mode 100644 (file)
index 0000000..c0abde7
--- /dev/null
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_PARAVIRT_H
+#define _ASM_RISCV_PARAVIRT_H
+
+#ifdef CONFIG_PARAVIRT
+#include <linux/static_call_types.h>
+
+struct static_key;
+extern struct static_key paravirt_steal_enabled;
+extern struct static_key paravirt_steal_rq_enabled;
+
+u64 dummy_steal_clock(int cpu);
+
+DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock);
+
+static inline u64 paravirt_steal_clock(int cpu)
+{
+       return static_call(pv_steal_clock)(cpu);
+}
+
+int __init pv_time_init(void);
+
+#else
+
+#define pv_time_init() do {} while (0)
+
+#endif /* CONFIG_PARAVIRT */
+#endif /* _ASM_RISCV_PARAVIRT_H */
diff --git a/arch/riscv/include/asm/paravirt_api_clock.h b/arch/riscv/include/asm/paravirt_api_clock.h
new file mode 100644 (file)
index 0000000..65ac7ce
--- /dev/null
@@ -0,0 +1 @@
+#include <asm/paravirt.h>
index 9a2c780a11e9530bcad95a677553dabeba67bb5f..b42017d76924f74386bc712719280af21781bb5d 100644 (file)
@@ -202,7 +202,7 @@ static inline int pud_user(pud_t pud)
 
 static inline void set_pud(pud_t *pudp, pud_t pud)
 {
-       *pudp = pud;
+       WRITE_ONCE(*pudp, pud);
 }
 
 static inline void pud_clear(pud_t *pudp)
@@ -278,7 +278,7 @@ static inline unsigned long _pmd_pfn(pmd_t pmd)
 static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
 {
        if (pgtable_l4_enabled)
-               *p4dp = p4d;
+               WRITE_ONCE(*p4dp, p4d);
        else
                set_pud((pud_t *)p4dp, (pud_t){ p4d_val(p4d) });
 }
@@ -340,18 +340,12 @@ static inline struct page *p4d_page(p4d_t p4d)
 #define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
 
 #define pud_offset pud_offset
-static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
-{
-       if (pgtable_l4_enabled)
-               return p4d_pgtable(*p4d) + pud_index(address);
-
-       return (pud_t *)p4d;
-}
+pud_t *pud_offset(p4d_t *p4d, unsigned long address);
 
 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
 {
        if (pgtable_l5_enabled)
-               *pgdp = pgd;
+               WRITE_ONCE(*pgdp, pgd);
        else
                set_p4d((p4d_t *)pgdp, (p4d_t){ pgd_val(pgd) });
 }
@@ -404,12 +398,6 @@ static inline struct page *pgd_page(pgd_t pgd)
 #define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1))
 
 #define p4d_offset p4d_offset
-static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
-{
-       if (pgtable_l5_enabled)
-               return pgd_pgtable(*pgd) + p4d_index(address);
-
-       return (p4d_t *)pgd;
-}
+p4d_t *p4d_offset(pgd_t *pgd, unsigned long address);
 
 #endif /* _ASM_RISCV_PGTABLE_64_H */
index 7b4287f360547e32a9fbe70cf76d47659b77c214..0c94260b5d0c126f6302f39a59507f19eed48dac 100644 (file)
@@ -248,7 +248,7 @@ static inline int pmd_leaf(pmd_t pmd)
 
 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
 {
-       *pmdp = pmd;
+       WRITE_ONCE(*pmdp, pmd);
 }
 
 static inline void pmd_clear(pmd_t *pmdp)
@@ -510,7 +510,7 @@ static inline int pte_same(pte_t pte_a, pte_t pte_b)
  */
 static inline void set_pte(pte_t *ptep, pte_t pteval)
 {
-       *ptep = pteval;
+       WRITE_ONCE(*ptep, pteval);
 }
 
 void flush_icache_pte(pte_t pte);
@@ -544,19 +544,12 @@ static inline void pte_clear(struct mm_struct *mm,
        __set_pte_at(ptep, __pte(0));
 }
 
-#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-static inline int ptep_set_access_flags(struct vm_area_struct *vma,
-                                       unsigned long address, pte_t *ptep,
-                                       pte_t entry, int dirty)
-{
-       if (!pte_same(*ptep, entry))
-               __set_pte_at(ptep, entry);
-       /*
-        * update_mmu_cache will unconditionally execute, handling both
-        * the case that the PTE changed and the spurious fault case.
-        */
-       return true;
-}
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS      /* defined in mm/pgtable.c */
+extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
+                                pte_t *ptep, pte_t entry, int dirty);
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG  /* defined in mm/pgtable.c */
+extern int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long address,
+                                    pte_t *ptep);
 
 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
@@ -569,16 +562,6 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
        return pte;
 }
 
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
-                                           unsigned long address,
-                                           pte_t *ptep)
-{
-       if (!pte_young(*ptep))
-               return 0;
-       return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
-}
-
 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
 static inline void ptep_set_wrprotect(struct mm_struct *mm,
                                      unsigned long address, pte_t *ptep)
@@ -882,7 +865,7 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
 #define TASK_SIZE_MIN  (PGDIR_SIZE_L3 * PTRS_PER_PGD / 2)
 
 #ifdef CONFIG_COMPAT
-#define TASK_SIZE_32   (_AC(0x80000000, UL) - PAGE_SIZE)
+#define TASK_SIZE_32   (_AC(0x80000000, UL))
 #define TASK_SIZE      (test_thread_flag(TIF_32BIT) ? \
                         TASK_SIZE_32 : TASK_SIZE_64)
 #else
index f19f861cda549014eee042efb651709f5da00475..a8509cc31ab25a5dcc75765bdb99e43e87dded3b 100644 (file)
@@ -16,7 +16,7 @@
 
 #ifdef CONFIG_64BIT
 #define DEFAULT_MAP_WINDOW     (UL(1) << (MMAP_VA_BITS - 1))
-#define STACK_TOP_MAX          TASK_SIZE_64
+#define STACK_TOP_MAX          TASK_SIZE
 
 #define arch_get_mmap_end(addr, len, flags)                    \
 ({                                                             \
 struct task_struct;
 struct pt_regs;
 
+/*
+ * We use a flag to track in-kernel Vector context. Currently the flag has the
+ * following meaning:
+ *
+ *  - bit 0: indicates whether the in-kernel Vector context is active. The
+ *    activation of this state disables the preemption. On a non-RT kernel, it
+ *    also disable bh.
+ *  - bits 8: is used for tracking preemptible kernel-mode Vector, when
+ *    RISCV_ISA_V_PREEMPTIVE is enabled. Calling kernel_vector_begin() does not
+ *    disable the preemption if the thread's kernel_vstate.datap is allocated.
+ *    Instead, the kernel set this bit field. Then the trap entry/exit code
+ *    knows if we are entering/exiting the context that owns preempt_v.
+ *     - 0: the task is not using preempt_v
+ *     - 1: the task is actively using preempt_v. But whether does the task own
+ *          the preempt_v context is decided by bits in RISCV_V_CTX_DEPTH_MASK.
+ *  - bit 16-23 are RISCV_V_CTX_DEPTH_MASK, used by context tracking routine
+ *     when preempt_v starts:
+ *     - 0: the task is actively using, and own preempt_v context.
+ *     - non-zero: the task was using preempt_v, but then took a trap within.
+ *       Thus, the task does not own preempt_v. Any use of Vector will have to
+ *       save preempt_v, if dirty, and fallback to non-preemptible kernel-mode
+ *       Vector.
+ *  - bit 30: The in-kernel preempt_v context is saved, and requries to be
+ *    restored when returning to the context that owns the preempt_v.
+ *  - bit 31: The in-kernel preempt_v context is dirty, as signaled by the
+ *    trap entry code. Any context switches out-of current task need to save
+ *    it to the task's in-kernel V context. Also, any traps nesting on-top-of
+ *    preempt_v requesting to use V needs a save.
+ */
+#define RISCV_V_CTX_DEPTH_MASK         0x00ff0000
+
+#define RISCV_V_CTX_UNIT_DEPTH         0x00010000
+#define RISCV_KERNEL_MODE_V            0x00000001
+#define RISCV_PREEMPT_V                        0x00000100
+#define RISCV_PREEMPT_V_DIRTY          0x80000000
+#define RISCV_PREEMPT_V_NEED_RESTORE   0x40000000
+
 /* CPU-specific state of a task */
 struct thread_struct {
        /* Callee-saved registers */
@@ -81,9 +118,11 @@ struct thread_struct {
        unsigned long s[12];    /* s[0]: frame pointer */
        struct __riscv_d_ext_state fstate;
        unsigned long bad_cause;
-       unsigned long vstate_ctrl;
+       u32 riscv_v_flags;
+       u32 vstate_ctrl;
        struct __riscv_v_ext_state vstate;
        unsigned long align_ctl;
+       struct __riscv_v_ext_state kernel_vstate;
 };
 
 /* Whitelist the fstate from the task_struct for hardened usercopy */
index 0892f4421bc4a5d0046750930b5637b355c15c26..6e68f8dff76bc6d09f7a5e555e54474587021ed9 100644 (file)
@@ -29,8 +29,10 @@ enum sbi_ext_id {
        SBI_EXT_RFENCE = 0x52464E43,
        SBI_EXT_HSM = 0x48534D,
        SBI_EXT_SRST = 0x53525354,
+       SBI_EXT_SUSP = 0x53555350,
        SBI_EXT_PMU = 0x504D55,
        SBI_EXT_DBCN = 0x4442434E,
+       SBI_EXT_STA = 0x535441,
 
        /* Experimentals extensions must lie within this range */
        SBI_EXT_EXPERIMENTAL_START = 0x08000000,
@@ -114,6 +116,14 @@ enum sbi_srst_reset_reason {
        SBI_SRST_RESET_REASON_SYS_FAILURE,
 };
 
+enum sbi_ext_susp_fid {
+       SBI_EXT_SUSP_SYSTEM_SUSPEND = 0,
+};
+
+enum sbi_ext_susp_sleep_type {
+       SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM = 0,
+};
+
 enum sbi_ext_pmu_fid {
        SBI_EXT_PMU_NUM_COUNTERS = 0,
        SBI_EXT_PMU_COUNTER_GET_INFO,
@@ -243,6 +253,22 @@ enum sbi_ext_dbcn_fid {
        SBI_EXT_DBCN_CONSOLE_WRITE_BYTE = 2,
 };
 
+/* SBI STA (steal-time accounting) extension */
+enum sbi_ext_sta_fid {
+       SBI_EXT_STA_STEAL_TIME_SET_SHMEM = 0,
+};
+
+struct sbi_sta_struct {
+       __le32 sequence;
+       __le32 flags;
+       __le64 steal;
+       u8 preempted;
+       u8 pad[47];
+} __packed;
+
+#define SBI_STA_SHMEM_DISABLE          -1
+
+/* SBI spec version fields */
 #define SBI_SPEC_VERSION_DEFAULT       0x1
 #define SBI_SPEC_VERSION_MAJOR_SHIFT   24
 #define SBI_SPEC_VERSION_MAJOR_MASK    0x7f
@@ -271,8 +297,13 @@ struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0,
                        unsigned long arg3, unsigned long arg4,
                        unsigned long arg5);
 
+#ifdef CONFIG_RISCV_SBI_V01
 void sbi_console_putchar(int ch);
 int sbi_console_getchar(void);
+#else
+static inline void sbi_console_putchar(int ch) { }
+static inline int sbi_console_getchar(void) { return -ENOENT; }
+#endif
 long sbi_get_mvendorid(void);
 long sbi_get_marchid(void);
 long sbi_get_mimpid(void);
@@ -329,6 +360,11 @@ static inline unsigned long sbi_mk_version(unsigned long major,
 }
 
 int sbi_err_map_linux_errno(int err);
+
+extern bool sbi_debug_console_available;
+int sbi_debug_console_write(const char *bytes, unsigned int num_bytes);
+int sbi_debug_console_read(char *bytes, unsigned int num_bytes);
+
 #else /* CONFIG_RISCV_SBI */
 static inline int sbi_remote_fence_i(const struct cpumask *cpu_mask) { return -1; }
 static inline void sbi_init(void) {}
index 32336e8a17cb071dce1acfd70e8f00e1765d3d05..a393d5035c54330874c49ca982f641f13c53c02e 100644 (file)
@@ -13,6 +13,7 @@ extern char _start_kernel[];
 extern char __init_data_begin[], __init_data_end[];
 extern char __init_text_begin[], __init_text_end[];
 extern char __alt_start[], __alt_end[];
+extern char __exittext_begin[], __exittext_end[];
 
 static inline bool is_va_kernel_text(uintptr_t va)
 {
diff --git a/arch/riscv/include/asm/simd.h b/arch/riscv/include/asm/simd.h
new file mode 100644 (file)
index 0000000..54efbf5
--- /dev/null
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2023 SiFive
+ */
+
+#ifndef __ASM_SIMD_H
+#define __ASM_SIMD_H
+
+#include <linux/compiler.h>
+#include <linux/irqflags.h>
+#include <linux/percpu.h>
+#include <linux/preempt.h>
+#include <linux/types.h>
+#include <linux/thread_info.h>
+
+#include <asm/vector.h>
+
+#ifdef CONFIG_RISCV_ISA_V
+/*
+ * may_use_simd - whether it is allowable at this time to issue vector
+ *                instructions or access the vector register file
+ *
+ * Callers must not assume that the result remains true beyond the next
+ * preempt_enable() or return from softirq context.
+ */
+static __must_check inline bool may_use_simd(void)
+{
+       /*
+        * RISCV_KERNEL_MODE_V is only set while preemption is disabled,
+        * and is clear whenever preemption is enabled.
+        */
+       if (in_hardirq() || in_nmi())
+               return false;
+
+       /*
+        * Nesting is acheived in preempt_v by spreading the control for
+        * preemptible and non-preemptible kernel-mode Vector into two fields.
+        * Always try to match with prempt_v if kernel V-context exists. Then,
+        * fallback to check non preempt_v if nesting happens, or if the config
+        * is not set.
+        */
+       if (IS_ENABLED(CONFIG_RISCV_ISA_V_PREEMPTIVE) && current->thread.kernel_vstate.datap) {
+               if (!riscv_preempt_v_started(current))
+                       return true;
+       }
+       /*
+        * Non-preemptible kernel-mode Vector temporarily disables bh. So we
+        * must not return true on irq_disabled(). Otherwise we would fail the
+        * lockdep check calling local_bh_enable()
+        */
+       return !irqs_disabled() && !(riscv_v_flags() & RISCV_KERNEL_MODE_V);
+}
+
+#else /* ! CONFIG_RISCV_ISA_V */
+
+static __must_check inline bool may_use_simd(void)
+{
+       return false;
+}
+
+#endif /* ! CONFIG_RISCV_ISA_V */
+
+#endif
index f7e8ef2418b99fc98362a1a977f8038f7592fbac..b1495a7e06ce693b4fc698ee4d62549bd0614700 100644 (file)
@@ -21,4 +21,9 @@ static inline bool on_thread_stack(void)
        return !(((unsigned long)(current->stack) ^ current_stack_pointer) & ~(THREAD_SIZE - 1));
 }
 
+
+#ifdef CONFIG_VMAP_STACK
+DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
+#endif /* CONFIG_VMAP_STACK */
+
 #endif /* _ASM_RISCV_STACKTRACE_H */
index f90d8e42f3c7911908ec1f5f19929ab5ba67ff3a..7efdb0584d47ac9887126a00dcfcc045619b27b5 100644 (file)
@@ -53,8 +53,7 @@ static inline void __switch_to_fpu(struct task_struct *prev,
        struct pt_regs *regs;
 
        regs = task_pt_regs(prev);
-       if (unlikely(regs->status & SR_SD))
-               fstate_save(prev, regs);
+       fstate_save(prev, regs);
        fstate_restore(next, task_pt_regs(next));
 }
 
index 574779900bfb339eeb446e49f4aae119fe382ae3..5d473343634b9d3af3c1f1872da25e6e60f77162 100644 (file)
@@ -28,7 +28,6 @@
 
 #define THREAD_SHIFT            (PAGE_SHIFT + THREAD_SIZE_ORDER)
 #define OVERFLOW_STACK_SIZE     SZ_4K
-#define SHADOW_OVERFLOW_STACK_SIZE (1024)
 
 #define IRQ_STACK_SIZE         THREAD_SIZE
 
@@ -103,12 +102,14 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
 #define TIF_NOTIFY_SIGNAL      9       /* signal notifications exist */
 #define TIF_UPROBE             10      /* uprobe breakpoint or singlestep */
 #define TIF_32BIT              11      /* compat-mode 32bit process */
+#define TIF_RISCV_V_DEFER_RESTORE      12 /* restore Vector before returing to user */
 
 #define _TIF_NOTIFY_RESUME     (1 << TIF_NOTIFY_RESUME)
 #define _TIF_SIGPENDING                (1 << TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED      (1 << TIF_NEED_RESCHED)
 #define _TIF_NOTIFY_SIGNAL     (1 << TIF_NOTIFY_SIGNAL)
 #define _TIF_UPROBE            (1 << TIF_UPROBE)
+#define _TIF_RISCV_V_DEFER_RESTORE     (1 << TIF_RISCV_V_DEFER_RESTORE)
 
 #define _TIF_WORK_MASK \
        (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | \
index 1eb5682b2af6065c9019e398df729f5b97a573c6..50b63b5c15bd8b19dac37176ef98c3489c837e05 100644 (file)
@@ -16,7 +16,7 @@ static void tlb_flush(struct mmu_gather *tlb);
 static inline void tlb_flush(struct mmu_gather *tlb)
 {
 #ifdef CONFIG_MMU
-       if (tlb->fullmm || tlb->need_flush_all)
+       if (tlb->fullmm || tlb->need_flush_all || tlb->freed_tables)
                flush_tlb_mm(tlb->mm);
        else
                flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end,
diff --git a/arch/riscv/include/asm/tlbbatch.h b/arch/riscv/include/asm/tlbbatch.h
new file mode 100644 (file)
index 0000000..46014f7
--- /dev/null
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2023 Rivos Inc.
+ */
+
+#ifndef _ASM_RISCV_TLBBATCH_H
+#define _ASM_RISCV_TLBBATCH_H
+
+#include <linux/cpumask.h>
+
+struct arch_tlbflush_unmap_batch {
+       struct cpumask cpumask;
+};
+
+#endif /* _ASM_RISCV_TLBBATCH_H */
index 8f3418c5f1724ba45e412ca52e0ef59ba0140638..4112cc8d1d69f9fbde77a524820a5de1e7931acf 100644 (file)
@@ -41,11 +41,20 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
                     unsigned long end);
 void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
 void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
                        unsigned long end);
 #endif
+
+bool arch_tlbbatch_should_defer(struct mm_struct *mm);
+void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
+                              struct mm_struct *mm,
+                              unsigned long uaddr);
+void arch_flush_tlb_batched_pending(struct mm_struct *mm);
+void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
+
 #else /* CONFIG_SMP && CONFIG_MMU */
 
 #define flush_tlb_all() local_flush_tlb_all()
@@ -66,6 +75,7 @@ static inline void flush_tlb_kernel_range(unsigned long start,
 
 #define flush_tlb_mm(mm) flush_tlb_all()
 #define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all()
+#define local_flush_tlb_kernel_range(start, end) flush_tlb_all()
 #endif /* !CONFIG_SMP || !CONFIG_MMU */
 
 #endif /* _ASM_RISCV_TLBFLUSH_H */
index 87aaef656257cbde40331aadaf1cb0b1ea374455..0cd6f0a027d1f7ae7bb95b509bad3400c9fa71a5 100644 (file)
 extern unsigned long riscv_v_vsize;
 int riscv_v_setup_vsize(void);
 bool riscv_v_first_use_handler(struct pt_regs *regs);
+void kernel_vector_begin(void);
+void kernel_vector_end(void);
+void get_cpu_vector_context(void);
+void put_cpu_vector_context(void);
+void riscv_v_thread_free(struct task_struct *tsk);
+void __init riscv_v_setup_ctx_cache(void);
+void riscv_v_thread_alloc(struct task_struct *tsk);
+
+static inline u32 riscv_v_flags(void)
+{
+       return READ_ONCE(current->thread.riscv_v_flags);
+}
 
 static __always_inline bool has_vector(void)
 {
@@ -162,36 +174,89 @@ static inline void riscv_v_vstate_discard(struct pt_regs *regs)
        __riscv_v_vstate_dirty(regs);
 }
 
-static inline void riscv_v_vstate_save(struct task_struct *task,
+static inline void riscv_v_vstate_save(struct __riscv_v_ext_state *vstate,
                                       struct pt_regs *regs)
 {
        if ((regs->status & SR_VS) == SR_VS_DIRTY) {
-               struct __riscv_v_ext_state *vstate = &task->thread.vstate;
-
                __riscv_v_vstate_save(vstate, vstate->datap);
                __riscv_v_vstate_clean(regs);
        }
 }
 
-static inline void riscv_v_vstate_restore(struct task_struct *task,
+static inline void riscv_v_vstate_restore(struct __riscv_v_ext_state *vstate,
                                          struct pt_regs *regs)
 {
        if ((regs->status & SR_VS) != SR_VS_OFF) {
-               struct __riscv_v_ext_state *vstate = &task->thread.vstate;
-
                __riscv_v_vstate_restore(vstate, vstate->datap);
                __riscv_v_vstate_clean(regs);
        }
 }
 
+static inline void riscv_v_vstate_set_restore(struct task_struct *task,
+                                             struct pt_regs *regs)
+{
+       if ((regs->status & SR_VS) != SR_VS_OFF) {
+               set_tsk_thread_flag(task, TIF_RISCV_V_DEFER_RESTORE);
+               riscv_v_vstate_on(regs);
+       }
+}
+
+#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
+static inline bool riscv_preempt_v_dirty(struct task_struct *task)
+{
+       return !!(task->thread.riscv_v_flags & RISCV_PREEMPT_V_DIRTY);
+}
+
+static inline bool riscv_preempt_v_restore(struct task_struct *task)
+{
+       return !!(task->thread.riscv_v_flags & RISCV_PREEMPT_V_NEED_RESTORE);
+}
+
+static inline void riscv_preempt_v_clear_dirty(struct task_struct *task)
+{
+       barrier();
+       task->thread.riscv_v_flags &= ~RISCV_PREEMPT_V_DIRTY;
+}
+
+static inline void riscv_preempt_v_set_restore(struct task_struct *task)
+{
+       barrier();
+       task->thread.riscv_v_flags |= RISCV_PREEMPT_V_NEED_RESTORE;
+}
+
+static inline bool riscv_preempt_v_started(struct task_struct *task)
+{
+       return !!(task->thread.riscv_v_flags & RISCV_PREEMPT_V);
+}
+
+#else /* !CONFIG_RISCV_ISA_V_PREEMPTIVE */
+static inline bool riscv_preempt_v_dirty(struct task_struct *task) { return false; }
+static inline bool riscv_preempt_v_restore(struct task_struct *task) { return false; }
+static inline bool riscv_preempt_v_started(struct task_struct *task) { return false; }
+#define riscv_preempt_v_clear_dirty(tsk)       do {} while (0)
+#define riscv_preempt_v_set_restore(tsk)       do {} while (0)
+#endif /* CONFIG_RISCV_ISA_V_PREEMPTIVE */
+
 static inline void __switch_to_vector(struct task_struct *prev,
                                      struct task_struct *next)
 {
        struct pt_regs *regs;
 
-       regs = task_pt_regs(prev);
-       riscv_v_vstate_save(prev, regs);
-       riscv_v_vstate_restore(next, task_pt_regs(next));
+       if (riscv_preempt_v_started(prev)) {
+               if (riscv_preempt_v_dirty(prev)) {
+                       __riscv_v_vstate_save(&prev->thread.kernel_vstate,
+                                             prev->thread.kernel_vstate.datap);
+                       riscv_preempt_v_clear_dirty(prev);
+               }
+       } else {
+               regs = task_pt_regs(prev);
+               riscv_v_vstate_save(&prev->thread.vstate, regs);
+       }
+
+       if (riscv_preempt_v_started(next))
+               riscv_preempt_v_set_restore(next);
+       else
+               riscv_v_vstate_set_restore(next, task_pt_regs(next));
 }
 
 void riscv_v_vstate_ctrl_init(struct task_struct *tsk);
@@ -208,11 +273,14 @@ static inline bool riscv_v_vstate_query(struct pt_regs *regs) { return false; }
 static inline bool riscv_v_vstate_ctrl_user_allowed(void) { return false; }
 #define riscv_v_vsize (0)
 #define riscv_v_vstate_discard(regs)           do {} while (0)
-#define riscv_v_vstate_save(task, regs)                do {} while (0)
-#define riscv_v_vstate_restore(task, regs)     do {} while (0)
+#define riscv_v_vstate_save(vstate, regs)      do {} while (0)
+#define riscv_v_vstate_restore(vstate, regs)   do {} while (0)
 #define __switch_to_vector(__prev, __next)     do {} while (0)
 #define riscv_v_vstate_off(regs)               do {} while (0)
 #define riscv_v_vstate_on(regs)                        do {} while (0)
+#define riscv_v_thread_free(tsk)               do {} while (0)
+#define  riscv_v_setup_ctx_cache()             do {} while (0)
+#define riscv_v_thread_alloc(tsk)              do {} while (0)
 
 #endif /* CONFIG_RISCV_ISA_V */
 
index 7c086ac6ecd4a82e11b9cd7cbd8d6944fb68ae1f..f3f031e34191d6fb993f650126be4dc793fee3ed 100644 (file)
@@ -9,6 +9,7 @@
 #define _ASM_RISCV_WORD_AT_A_TIME_H
 
 
+#include <asm/asm-extable.h>
 #include <linux/kernel.h>
 
 struct word_at_a_time {
@@ -45,4 +46,30 @@ static inline unsigned long find_zero(unsigned long mask)
 /* The mask we created is directly usable as a bytemask */
 #define zero_bytemask(mask) (mask)
 
+#ifdef CONFIG_DCACHE_WORD_ACCESS
+
+/*
+ * Load an unaligned word from kernel space.
+ *
+ * In the (very unlikely) case of the word being a page-crosser
+ * and the next page not being mapped, take the exception and
+ * return zeroes in the non-existing part.
+ */
+static inline unsigned long load_unaligned_zeropad(const void *addr)
+{
+       unsigned long ret;
+
+       /* Load word from unaligned pointer addr */
+       asm(
+       "1:     " REG_L " %0, %2\n"
+       "2:\n"
+       _ASM_EXTABLE_LOAD_UNALIGNED_ZEROPAD(1b, 2b, %0, %1)
+       : "=&r" (ret)
+       : "r" (addr), "m" (*(unsigned long *)addr));
+
+       return ret;
+}
+
+#endif /* CONFIG_DCACHE_WORD_ACCESS */
+
 #endif /* _ASM_RISCV_WORD_AT_A_TIME_H */
index d4ffc3c37649ffb1808335417be4b510808ae4bf..b65bf6306f69c6f05ba40752d7933bd78382100f 100644 (file)
@@ -13,7 +13,7 @@
         add \reg, \reg, t0
 .endm
 .macro XIP_FIXUP_FLASH_OFFSET reg
-       la t1, __data_loc
+       la t0, __data_loc
        REG_L t1, _xip_phys_offset
        sub \reg, \reg, t1
        add \reg, \reg, t0
diff --git a/arch/riscv/include/asm/xor.h b/arch/riscv/include/asm/xor.h
new file mode 100644 (file)
index 0000000..9601186
--- /dev/null
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2021 SiFive
+ */
+
+#include <linux/hardirq.h>
+#include <asm-generic/xor.h>
+#ifdef CONFIG_RISCV_ISA_V
+#include <asm/vector.h>
+#include <asm/switch_to.h>
+#include <asm/asm-prototypes.h>
+
+static void xor_vector_2(unsigned long bytes, unsigned long *__restrict p1,
+                        const unsigned long *__restrict p2)
+{
+       kernel_vector_begin();
+       xor_regs_2_(bytes, p1, p2);
+       kernel_vector_end();
+}
+
+static void xor_vector_3(unsigned long bytes, unsigned long *__restrict p1,
+                        const unsigned long *__restrict p2,
+                        const unsigned long *__restrict p3)
+{
+       kernel_vector_begin();
+       xor_regs_3_(bytes, p1, p2, p3);
+       kernel_vector_end();
+}
+
+static void xor_vector_4(unsigned long bytes, unsigned long *__restrict p1,
+                        const unsigned long *__restrict p2,
+                        const unsigned long *__restrict p3,
+                        const unsigned long *__restrict p4)
+{
+       kernel_vector_begin();
+       xor_regs_4_(bytes, p1, p2, p3, p4);
+       kernel_vector_end();
+}
+
+static void xor_vector_5(unsigned long bytes, unsigned long *__restrict p1,
+                        const unsigned long *__restrict p2,
+                        const unsigned long *__restrict p3,
+                        const unsigned long *__restrict p4,
+                        const unsigned long *__restrict p5)
+{
+       kernel_vector_begin();
+       xor_regs_5_(bytes, p1, p2, p3, p4, p5);
+       kernel_vector_end();
+}
+
+static struct xor_block_template xor_block_rvv = {
+       .name = "rvv",
+       .do_2 = xor_vector_2,
+       .do_3 = xor_vector_3,
+       .do_4 = xor_vector_4,
+       .do_5 = xor_vector_5
+};
+
+#undef XOR_TRY_TEMPLATES
+#define XOR_TRY_TEMPLATES           \
+       do {        \
+               xor_speed(&xor_block_8regs);    \
+               xor_speed(&xor_block_32regs);    \
+               if (has_vector()) { \
+                       xor_speed(&xor_block_rvv);\
+               } \
+       } while (0)
+#endif
index b659ffcfcdb454cf4d12cd513bfcb23cf32b11c5..9f2a8e3ff2048e78201e188a5fc832a67429a06c 100644 (file)
@@ -30,6 +30,35 @@ struct riscv_hwprobe {
 #define                RISCV_HWPROBE_EXT_ZBB           (1 << 4)
 #define                RISCV_HWPROBE_EXT_ZBS           (1 << 5)
 #define                RISCV_HWPROBE_EXT_ZICBOZ        (1 << 6)
+#define                RISCV_HWPROBE_EXT_ZBC           (1 << 7)
+#define                RISCV_HWPROBE_EXT_ZBKB          (1 << 8)
+#define                RISCV_HWPROBE_EXT_ZBKC          (1 << 9)
+#define                RISCV_HWPROBE_EXT_ZBKX          (1 << 10)
+#define                RISCV_HWPROBE_EXT_ZKND          (1 << 11)
+#define                RISCV_HWPROBE_EXT_ZKNE          (1 << 12)
+#define                RISCV_HWPROBE_EXT_ZKNH          (1 << 13)
+#define                RISCV_HWPROBE_EXT_ZKSED         (1 << 14)
+#define                RISCV_HWPROBE_EXT_ZKSH          (1 << 15)
+#define                RISCV_HWPROBE_EXT_ZKT           (1 << 16)
+#define                RISCV_HWPROBE_EXT_ZVBB          (1 << 17)
+#define                RISCV_HWPROBE_EXT_ZVBC          (1 << 18)
+#define                RISCV_HWPROBE_EXT_ZVKB          (1 << 19)
+#define                RISCV_HWPROBE_EXT_ZVKG          (1 << 20)
+#define                RISCV_HWPROBE_EXT_ZVKNED        (1 << 21)
+#define                RISCV_HWPROBE_EXT_ZVKNHA        (1 << 22)
+#define                RISCV_HWPROBE_EXT_ZVKNHB        (1 << 23)
+#define                RISCV_HWPROBE_EXT_ZVKSED        (1 << 24)
+#define                RISCV_HWPROBE_EXT_ZVKSH         (1 << 25)
+#define                RISCV_HWPROBE_EXT_ZVKT          (1 << 26)
+#define                RISCV_HWPROBE_EXT_ZFH           (1 << 27)
+#define                RISCV_HWPROBE_EXT_ZFHMIN        (1 << 28)
+#define                RISCV_HWPROBE_EXT_ZIHINTNTL     (1 << 29)
+#define                RISCV_HWPROBE_EXT_ZVFH          (1 << 30)
+#define                RISCV_HWPROBE_EXT_ZVFHMIN       (1 << 31)
+#define                RISCV_HWPROBE_EXT_ZFA           (1ULL << 32)
+#define                RISCV_HWPROBE_EXT_ZTSO          (1ULL << 33)
+#define                RISCV_HWPROBE_EXT_ZACAS         (1ULL << 34)
+#define                RISCV_HWPROBE_EXT_ZICOND        (1ULL << 35)
 #define RISCV_HWPROBE_KEY_CPUPERF_0    5
 #define                RISCV_HWPROBE_MISALIGNED_UNKNOWN        (0 << 0)
 #define                RISCV_HWPROBE_MISALIGNED_EMULATED       (1 << 0)
@@ -40,4 +69,7 @@ struct riscv_hwprobe {
 #define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE    6
 /* Increase RISCV_HWPROBE_MAX_KEY when adding items. */
 
+/* Flags */
+#define RISCV_HWPROBE_WHICH_CPUS       (1 << 0)
+
 #endif
index 60d3b21dead7d8846050d20a96ef1a0b3ad1ba20..7499e88a947c086c5f569e98a899f50d098a8335 100644 (file)
@@ -139,6 +139,33 @@ enum KVM_RISCV_ISA_EXT_ID {
        KVM_RISCV_ISA_EXT_ZIHPM,
        KVM_RISCV_ISA_EXT_SMSTATEEN,
        KVM_RISCV_ISA_EXT_ZICOND,
+       KVM_RISCV_ISA_EXT_ZBC,
+       KVM_RISCV_ISA_EXT_ZBKB,
+       KVM_RISCV_ISA_EXT_ZBKC,
+       KVM_RISCV_ISA_EXT_ZBKX,
+       KVM_RISCV_ISA_EXT_ZKND,
+       KVM_RISCV_ISA_EXT_ZKNE,
+       KVM_RISCV_ISA_EXT_ZKNH,
+       KVM_RISCV_ISA_EXT_ZKR,
+       KVM_RISCV_ISA_EXT_ZKSED,
+       KVM_RISCV_ISA_EXT_ZKSH,
+       KVM_RISCV_ISA_EXT_ZKT,
+       KVM_RISCV_ISA_EXT_ZVBB,
+       KVM_RISCV_ISA_EXT_ZVBC,
+       KVM_RISCV_ISA_EXT_ZVKB,
+       KVM_RISCV_ISA_EXT_ZVKG,
+       KVM_RISCV_ISA_EXT_ZVKNED,
+       KVM_RISCV_ISA_EXT_ZVKNHA,
+       KVM_RISCV_ISA_EXT_ZVKNHB,
+       KVM_RISCV_ISA_EXT_ZVKSED,
+       KVM_RISCV_ISA_EXT_ZVKSH,
+       KVM_RISCV_ISA_EXT_ZVKT,
+       KVM_RISCV_ISA_EXT_ZFH,
+       KVM_RISCV_ISA_EXT_ZFHMIN,
+       KVM_RISCV_ISA_EXT_ZIHINTNTL,
+       KVM_RISCV_ISA_EXT_ZVFH,
+       KVM_RISCV_ISA_EXT_ZVFHMIN,
+       KVM_RISCV_ISA_EXT_ZFA,
        KVM_RISCV_ISA_EXT_MAX,
 };
 
@@ -157,9 +184,16 @@ enum KVM_RISCV_SBI_EXT_ID {
        KVM_RISCV_SBI_EXT_EXPERIMENTAL,
        KVM_RISCV_SBI_EXT_VENDOR,
        KVM_RISCV_SBI_EXT_DBCN,
+       KVM_RISCV_SBI_EXT_STA,
        KVM_RISCV_SBI_EXT_MAX,
 };
 
+/* SBI STA extension registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */
+struct kvm_riscv_sbi_sta {
+       unsigned long shmem_lo;
+       unsigned long shmem_hi;
+};
+
 /* Possible states for kvm_riscv_timer */
 #define KVM_RISCV_TIMER_STATE_OFF      0
 #define KVM_RISCV_TIMER_STATE_ON       1
@@ -241,6 +275,12 @@ enum KVM_RISCV_SBI_EXT_ID {
 #define KVM_REG_RISCV_VECTOR_REG(n)    \
                ((n) + sizeof(struct __riscv_v_ext_state) / sizeof(unsigned long))
 
+/* Registers for specific SBI extensions are mapped as type 10 */
+#define KVM_REG_RISCV_SBI_STATE                (0x0a << KVM_REG_RISCV_TYPE_SHIFT)
+#define KVM_REG_RISCV_SBI_STA          (0x0 << KVM_REG_RISCV_SUBTYPE_SHIFT)
+#define KVM_REG_RISCV_SBI_STA_REG(name)                \
+               (offsetof(struct kvm_riscv_sbi_sta, name) / sizeof(unsigned long))
+
 /* Device Control API: RISC-V AIA */
 #define KVM_DEV_RISCV_APLIC_ALIGN              0x1000
 #define KVM_DEV_RISCV_APLIC_SIZE               0x4000
index 82940b6a79a2659320c172582e36c32111b997a6..f71910718053d841a361fd97e7d62da4f86bebcf 100644 (file)
@@ -50,6 +50,7 @@ obj-y += setup.o
 obj-y  += signal.o
 obj-y  += syscall_table.o
 obj-y  += sys_riscv.o
+obj-y  += sys_hwprobe.o
 obj-y  += time.o
 obj-y  += traps.o
 obj-y  += riscv_ksyms.o
@@ -63,6 +64,7 @@ obj-$(CONFIG_MMU) += vdso.o vdso/
 obj-$(CONFIG_RISCV_MISALIGNED) += traps_misaligned.o
 obj-$(CONFIG_FPU)              += fpu.o
 obj-$(CONFIG_RISCV_ISA_V)      += vector.o
+obj-$(CONFIG_RISCV_ISA_V)      += kernel_mode_vector.o
 obj-$(CONFIG_SMP)              += smpboot.o
 obj-$(CONFIG_SMP)              += smp.o
 obj-$(CONFIG_SMP)              += cpu_ops.o
@@ -85,6 +87,7 @@ obj-$(CONFIG_SMP)             += sbi-ipi.o
 obj-$(CONFIG_SMP) += cpu_ops_sbi.o
 endif
 obj-$(CONFIG_HOTPLUG_CPU)      += cpu-hotplug.o
+obj-$(CONFIG_PARAVIRT)         += paravirt.o
 obj-$(CONFIG_KGDB)             += kgdb.o
 obj-$(CONFIG_KEXEC_CORE)       += kexec_relocate.o crash_save_regs.o machine_kexec.o
 obj-$(CONFIG_KEXEC_FILE)       += elf_kexec.o machine_kexec_file.o
index 457a18efcb114880511f9eb342ccb2fb7e377382..28b58fc5ad1996112c67b7f6030f9ba81a26b388 100644 (file)
@@ -18,7 +18,7 @@
 
 bool cpu_has_hotplug(unsigned int cpu)
 {
-       if (cpu_ops[cpu]->cpu_stop)
+       if (cpu_ops->cpu_stop)
                return true;
 
        return false;
@@ -29,25 +29,18 @@ bool cpu_has_hotplug(unsigned int cpu)
  */
 int __cpu_disable(void)
 {
-       int ret = 0;
        unsigned int cpu = smp_processor_id();
 
-       if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_stop)
+       if (!cpu_ops->cpu_stop)
                return -EOPNOTSUPP;
 
-       if (cpu_ops[cpu]->cpu_disable)
-               ret = cpu_ops[cpu]->cpu_disable(cpu);
-
-       if (ret)
-               return ret;
-
        remove_cpu_topology(cpu);
        numa_remove_cpu(cpu);
        set_cpu_online(cpu, false);
        riscv_ipi_disable();
        irq_migrate_all_off_this_cpu();
 
-       return ret;
+       return 0;
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -62,8 +55,8 @@ void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)
        pr_notice("CPU%u: off\n", cpu);
 
        /* Verify from the firmware if the cpu is really stopped*/
-       if (cpu_ops[cpu]->cpu_is_stopped)
-               ret = cpu_ops[cpu]->cpu_is_stopped(cpu);
+       if (cpu_ops->cpu_is_stopped)
+               ret = cpu_ops->cpu_is_stopped(cpu);
        if (ret)
                pr_warn("CPU%d may not have stopped: %d\n", cpu, ret);
 }
@@ -77,7 +70,7 @@ void __noreturn arch_cpu_idle_dead(void)
 
        cpuhp_ap_report_dead();
 
-       cpu_ops[smp_processor_id()]->cpu_stop();
+       cpu_ops->cpu_stop();
        /* It should never reach here */
        BUG();
 }
index eb479a88a954ef07be7b887485bbbb2834b1b6bd..6a8bd8f4db0711b7ee02d0f4f7ce3ccb20e36526 100644 (file)
 #include <asm/sbi.h>
 #include <asm/smp.h>
 
-const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init;
+const struct cpu_operations *cpu_ops __ro_after_init = &cpu_ops_spinwait;
 
 extern const struct cpu_operations cpu_ops_sbi;
 #ifndef CONFIG_RISCV_BOOT_SPINWAIT
 const struct cpu_operations cpu_ops_spinwait = {
-       .name           = "",
-       .cpu_prepare    = NULL,
        .cpu_start      = NULL,
 };
 #endif
 
-void __init cpu_set_ops(int cpuid)
+void __init cpu_set_ops(void)
 {
 #if IS_ENABLED(CONFIG_RISCV_SBI)
        if (sbi_probe_extension(SBI_EXT_HSM)) {
-               if (!cpuid)
-                       pr_info("SBI HSM extension detected\n");
-               cpu_ops[cpuid] = &cpu_ops_sbi;
-       } else
+               pr_info("SBI HSM extension detected\n");
+               cpu_ops = &cpu_ops_sbi;
+       }
 #endif
-               cpu_ops[cpuid] = &cpu_ops_spinwait;
 }
index efa0f0816634c40772005bfe564cda4411957a81..1cc7df740eddc9a2977fde8c428e19fd90ec9126 100644 (file)
@@ -79,23 +79,7 @@ static int sbi_cpu_start(unsigned int cpuid, struct task_struct *tidle)
        return sbi_hsm_hart_start(hartid, boot_addr, hsm_data);
 }
 
-static int sbi_cpu_prepare(unsigned int cpuid)
-{
-       if (!cpu_ops_sbi.cpu_start) {
-               pr_err("cpu start method not defined for CPU [%d]\n", cpuid);
-               return -ENODEV;
-       }
-       return 0;
-}
-
 #ifdef CONFIG_HOTPLUG_CPU
-static int sbi_cpu_disable(unsigned int cpuid)
-{
-       if (!cpu_ops_sbi.cpu_stop)
-               return -EOPNOTSUPP;
-       return 0;
-}
-
 static void sbi_cpu_stop(void)
 {
        int ret;
@@ -118,11 +102,8 @@ static int sbi_cpu_is_stopped(unsigned int cpuid)
 #endif
 
 const struct cpu_operations cpu_ops_sbi = {
-       .name           = "sbi",
-       .cpu_prepare    = sbi_cpu_prepare,
        .cpu_start      = sbi_cpu_start,
 #ifdef CONFIG_HOTPLUG_CPU
-       .cpu_disable    = sbi_cpu_disable,
        .cpu_stop       = sbi_cpu_stop,
        .cpu_is_stopped = sbi_cpu_is_stopped,
 #endif
index d98d19226b5f5175aca387cf22558bad9df1f2c5..613872b0a21acb2a708f194d22f186f376f8a748 100644 (file)
@@ -39,15 +39,6 @@ static void cpu_update_secondary_bootdata(unsigned int cpuid,
        WRITE_ONCE(__cpu_spinwait_task_pointer[hartid], tidle);
 }
 
-static int spinwait_cpu_prepare(unsigned int cpuid)
-{
-       if (!cpu_ops_spinwait.cpu_start) {
-               pr_err("cpu start method not defined for CPU [%d]\n", cpuid);
-               return -ENODEV;
-       }
-       return 0;
-}
-
 static int spinwait_cpu_start(unsigned int cpuid, struct task_struct *tidle)
 {
        /*
@@ -64,7 +55,5 @@ static int spinwait_cpu_start(unsigned int cpuid, struct task_struct *tidle)
 }
 
 const struct cpu_operations cpu_ops_spinwait = {
-       .name           = "spinwait",
-       .cpu_prepare    = spinwait_cpu_prepare,
        .cpu_start      = spinwait_cpu_start,
 };
index b3785ffc15703cdf55efc2c523179dd2b64695c1..89920f84d0a34385471e9afbf9c26d287cbbd838 100644 (file)
@@ -8,8 +8,10 @@
 
 #include <linux/acpi.h>
 #include <linux/bitmap.h>
+#include <linux/cpu.h>
 #include <linux/cpuhotplug.h>
 #include <linux/ctype.h>
+#include <linux/jump_label.h>
 #include <linux/log2.h>
 #include <linux/memory.h>
 #include <linux/module.h>
@@ -44,6 +46,8 @@ struct riscv_isainfo hart_isa[NR_CPUS];
 /* Performance information */
 DEFINE_PER_CPU(long, misaligned_access_speed);
 
+static cpumask_t fast_misaligned_access;
+
 /**
  * riscv_isa_extension_base() - Get base extension word
  *
@@ -70,7 +74,7 @@ EXPORT_SYMBOL_GPL(riscv_isa_extension_base);
  *
  * NOTE: If isa_bitmap is NULL then Host ISA bitmap will be used.
  */
-bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit)
+bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, unsigned int bit)
 {
        const unsigned long *bmap = (isa_bitmap) ? isa_bitmap : riscv_isa;
 
@@ -102,17 +106,101 @@ static bool riscv_isa_extension_check(int id)
                        return false;
                }
                return true;
+       case RISCV_ISA_EXT_INVALID:
+               return false;
        }
 
        return true;
 }
 
-#define __RISCV_ISA_EXT_DATA(_name, _id) {     \
-       .name = #_name,                         \
-       .property = #_name,                     \
-       .id = _id,                              \
+#define _RISCV_ISA_EXT_DATA(_name, _id, _subset_exts, _subset_exts_size) {     \
+       .name = #_name,                                                         \
+       .property = #_name,                                                     \
+       .id = _id,                                                              \
+       .subset_ext_ids = _subset_exts,                                         \
+       .subset_ext_size = _subset_exts_size                                    \
 }
 
+#define __RISCV_ISA_EXT_DATA(_name, _id) _RISCV_ISA_EXT_DATA(_name, _id, NULL, 0)
+
+/* Used to declare pure "lasso" extension (Zk for instance) */
+#define __RISCV_ISA_EXT_BUNDLE(_name, _bundled_exts) \
+       _RISCV_ISA_EXT_DATA(_name, RISCV_ISA_EXT_INVALID, _bundled_exts, ARRAY_SIZE(_bundled_exts))
+
+/* Used to declare extensions that are a superset of other extensions (Zvbb for instance) */
+#define __RISCV_ISA_EXT_SUPERSET(_name, _id, _sub_exts) \
+       _RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts))
+
+static const unsigned int riscv_zk_bundled_exts[] = {
+       RISCV_ISA_EXT_ZBKB,
+       RISCV_ISA_EXT_ZBKC,
+       RISCV_ISA_EXT_ZBKX,
+       RISCV_ISA_EXT_ZKND,
+       RISCV_ISA_EXT_ZKNE,
+       RISCV_ISA_EXT_ZKR,
+       RISCV_ISA_EXT_ZKT,
+};
+
+static const unsigned int riscv_zkn_bundled_exts[] = {
+       RISCV_ISA_EXT_ZBKB,
+       RISCV_ISA_EXT_ZBKC,
+       RISCV_ISA_EXT_ZBKX,
+       RISCV_ISA_EXT_ZKND,
+       RISCV_ISA_EXT_ZKNE,
+       RISCV_ISA_EXT_ZKNH,
+};
+
+static const unsigned int riscv_zks_bundled_exts[] = {
+       RISCV_ISA_EXT_ZBKB,
+       RISCV_ISA_EXT_ZBKC,
+       RISCV_ISA_EXT_ZKSED,
+       RISCV_ISA_EXT_ZKSH
+};
+
+#define RISCV_ISA_EXT_ZVKN     \
+       RISCV_ISA_EXT_ZVKNED,   \
+       RISCV_ISA_EXT_ZVKNHB,   \
+       RISCV_ISA_EXT_ZVKB,     \
+       RISCV_ISA_EXT_ZVKT
+
+static const unsigned int riscv_zvkn_bundled_exts[] = {
+       RISCV_ISA_EXT_ZVKN
+};
+
+static const unsigned int riscv_zvknc_bundled_exts[] = {
+       RISCV_ISA_EXT_ZVKN,
+       RISCV_ISA_EXT_ZVBC
+};
+
+static const unsigned int riscv_zvkng_bundled_exts[] = {
+       RISCV_ISA_EXT_ZVKN,
+       RISCV_ISA_EXT_ZVKG
+};
+
+#define RISCV_ISA_EXT_ZVKS     \
+       RISCV_ISA_EXT_ZVKSED,   \
+       RISCV_ISA_EXT_ZVKSH,    \
+       RISCV_ISA_EXT_ZVKB,     \
+       RISCV_ISA_EXT_ZVKT
+
+static const unsigned int riscv_zvks_bundled_exts[] = {
+       RISCV_ISA_EXT_ZVKS
+};
+
+static const unsigned int riscv_zvksc_bundled_exts[] = {
+       RISCV_ISA_EXT_ZVKS,
+       RISCV_ISA_EXT_ZVBC
+};
+
+static const unsigned int riscv_zvksg_bundled_exts[] = {
+       RISCV_ISA_EXT_ZVKS,
+       RISCV_ISA_EXT_ZVKG
+};
+
+static const unsigned int riscv_zvbb_exts[] = {
+       RISCV_ISA_EXT_ZVKB
+};
+
 /*
  * The canonical order of ISA extension names in the ISA string is defined in
  * chapter 27 of the unprivileged specification.
@@ -160,10 +248,6 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = {
        __RISCV_ISA_EXT_DATA(d, RISCV_ISA_EXT_d),
        __RISCV_ISA_EXT_DATA(q, RISCV_ISA_EXT_q),
        __RISCV_ISA_EXT_DATA(c, RISCV_ISA_EXT_c),
-       __RISCV_ISA_EXT_DATA(b, RISCV_ISA_EXT_b),
-       __RISCV_ISA_EXT_DATA(k, RISCV_ISA_EXT_k),
-       __RISCV_ISA_EXT_DATA(j, RISCV_ISA_EXT_j),
-       __RISCV_ISA_EXT_DATA(p, RISCV_ISA_EXT_p),
        __RISCV_ISA_EXT_DATA(v, RISCV_ISA_EXT_v),
        __RISCV_ISA_EXT_DATA(h, RISCV_ISA_EXT_h),
        __RISCV_ISA_EXT_DATA(zicbom, RISCV_ISA_EXT_ZICBOM),
@@ -172,11 +256,49 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = {
        __RISCV_ISA_EXT_DATA(zicond, RISCV_ISA_EXT_ZICOND),
        __RISCV_ISA_EXT_DATA(zicsr, RISCV_ISA_EXT_ZICSR),
        __RISCV_ISA_EXT_DATA(zifencei, RISCV_ISA_EXT_ZIFENCEI),
+       __RISCV_ISA_EXT_DATA(zihintntl, RISCV_ISA_EXT_ZIHINTNTL),
        __RISCV_ISA_EXT_DATA(zihintpause, RISCV_ISA_EXT_ZIHINTPAUSE),
        __RISCV_ISA_EXT_DATA(zihpm, RISCV_ISA_EXT_ZIHPM),
+       __RISCV_ISA_EXT_DATA(zacas, RISCV_ISA_EXT_ZACAS),
+       __RISCV_ISA_EXT_DATA(zfa, RISCV_ISA_EXT_ZFA),
+       __RISCV_ISA_EXT_DATA(zfh, RISCV_ISA_EXT_ZFH),
+       __RISCV_ISA_EXT_DATA(zfhmin, RISCV_ISA_EXT_ZFHMIN),
        __RISCV_ISA_EXT_DATA(zba, RISCV_ISA_EXT_ZBA),
        __RISCV_ISA_EXT_DATA(zbb, RISCV_ISA_EXT_ZBB),
+       __RISCV_ISA_EXT_DATA(zbc, RISCV_ISA_EXT_ZBC),
+       __RISCV_ISA_EXT_DATA(zbkb, RISCV_ISA_EXT_ZBKB),
+       __RISCV_ISA_EXT_DATA(zbkc, RISCV_ISA_EXT_ZBKC),
+       __RISCV_ISA_EXT_DATA(zbkx, RISCV_ISA_EXT_ZBKX),
        __RISCV_ISA_EXT_DATA(zbs, RISCV_ISA_EXT_ZBS),
+       __RISCV_ISA_EXT_BUNDLE(zk, riscv_zk_bundled_exts),
+       __RISCV_ISA_EXT_BUNDLE(zkn, riscv_zkn_bundled_exts),
+       __RISCV_ISA_EXT_DATA(zknd, RISCV_ISA_EXT_ZKND),
+       __RISCV_ISA_EXT_DATA(zkne, RISCV_ISA_EXT_ZKNE),
+       __RISCV_ISA_EXT_DATA(zknh, RISCV_ISA_EXT_ZKNH),
+       __RISCV_ISA_EXT_DATA(zkr, RISCV_ISA_EXT_ZKR),
+       __RISCV_ISA_EXT_BUNDLE(zks, riscv_zks_bundled_exts),
+       __RISCV_ISA_EXT_DATA(zkt, RISCV_ISA_EXT_ZKT),
+       __RISCV_ISA_EXT_DATA(zksed, RISCV_ISA_EXT_ZKSED),
+       __RISCV_ISA_EXT_DATA(zksh, RISCV_ISA_EXT_ZKSH),
+       __RISCV_ISA_EXT_DATA(ztso, RISCV_ISA_EXT_ZTSO),
+       __RISCV_ISA_EXT_SUPERSET(zvbb, RISCV_ISA_EXT_ZVBB, riscv_zvbb_exts),
+       __RISCV_ISA_EXT_DATA(zvbc, RISCV_ISA_EXT_ZVBC),
+       __RISCV_ISA_EXT_DATA(zvfh, RISCV_ISA_EXT_ZVFH),
+       __RISCV_ISA_EXT_DATA(zvfhmin, RISCV_ISA_EXT_ZVFHMIN),
+       __RISCV_ISA_EXT_DATA(zvkb, RISCV_ISA_EXT_ZVKB),
+       __RISCV_ISA_EXT_DATA(zvkg, RISCV_ISA_EXT_ZVKG),
+       __RISCV_ISA_EXT_BUNDLE(zvkn, riscv_zvkn_bundled_exts),
+       __RISCV_ISA_EXT_BUNDLE(zvknc, riscv_zvknc_bundled_exts),
+       __RISCV_ISA_EXT_DATA(zvkned, RISCV_ISA_EXT_ZVKNED),
+       __RISCV_ISA_EXT_BUNDLE(zvkng, riscv_zvkng_bundled_exts),
+       __RISCV_ISA_EXT_DATA(zvknha, RISCV_ISA_EXT_ZVKNHA),
+       __RISCV_ISA_EXT_DATA(zvknhb, RISCV_ISA_EXT_ZVKNHB),
+       __RISCV_ISA_EXT_BUNDLE(zvks, riscv_zvks_bundled_exts),
+       __RISCV_ISA_EXT_BUNDLE(zvksc, riscv_zvksc_bundled_exts),
+       __RISCV_ISA_EXT_DATA(zvksed, RISCV_ISA_EXT_ZVKSED),
+       __RISCV_ISA_EXT_DATA(zvksh, RISCV_ISA_EXT_ZVKSH),
+       __RISCV_ISA_EXT_BUNDLE(zvksg, riscv_zvksg_bundled_exts),
+       __RISCV_ISA_EXT_DATA(zvkt, RISCV_ISA_EXT_ZVKT),
        __RISCV_ISA_EXT_DATA(smaia, RISCV_ISA_EXT_SMAIA),
        __RISCV_ISA_EXT_DATA(smstateen, RISCV_ISA_EXT_SMSTATEEN),
        __RISCV_ISA_EXT_DATA(ssaia, RISCV_ISA_EXT_SSAIA),
@@ -189,6 +311,31 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = {
 
 const size_t riscv_isa_ext_count = ARRAY_SIZE(riscv_isa_ext);
 
+static void __init match_isa_ext(const struct riscv_isa_ext_data *ext, const char *name,
+                                const char *name_end, struct riscv_isainfo *isainfo)
+{
+       if ((name_end - name == strlen(ext->name)) &&
+            !strncasecmp(name, ext->name, name_end - name)) {
+               /*
+                * If this is a bundle, enable all the ISA extensions that
+                * comprise the bundle.
+                */
+               if (ext->subset_ext_size) {
+                       for (int i = 0; i < ext->subset_ext_size; i++) {
+                               if (riscv_isa_extension_check(ext->subset_ext_ids[i]))
+                                       set_bit(ext->subset_ext_ids[i], isainfo->isa);
+                       }
+               }
+
+               /*
+                * This is valid even for bundle extensions which uses the RISCV_ISA_EXT_INVALID id
+                * (rejected by riscv_isa_extension_check()).
+                */
+               if (riscv_isa_extension_check(ext->id))
+                       set_bit(ext->id, isainfo->isa);
+       }
+}
+
 static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct riscv_isainfo *isainfo,
                                          unsigned long *isa2hwcap, const char *isa)
 {
@@ -321,14 +468,6 @@ static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct risc
                if (*isa == '_')
                        ++isa;
 
-#define SET_ISA_EXT_MAP(name, bit)                                             \
-               do {                                                            \
-                       if ((ext_end - ext == strlen(name)) &&                  \
-                            !strncasecmp(ext, name, strlen(name)) &&           \
-                            riscv_isa_extension_check(bit))                    \
-                               set_bit(bit, isainfo->isa);                     \
-               } while (false)                                                 \
-
                if (unlikely(ext_err))
                        continue;
                if (!ext_long) {
@@ -340,10 +479,8 @@ static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct risc
                        }
                } else {
                        for (int i = 0; i < riscv_isa_ext_count; i++)
-                               SET_ISA_EXT_MAP(riscv_isa_ext[i].name,
-                                               riscv_isa_ext[i].id);
+                               match_isa_ext(&riscv_isa_ext[i], ext, ext_end, isainfo);
                }
-#undef SET_ISA_EXT_MAP
        }
 }
 
@@ -442,18 +579,26 @@ static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap)
                }
 
                for (int i = 0; i < riscv_isa_ext_count; i++) {
+                       const struct riscv_isa_ext_data *ext = &riscv_isa_ext[i];
+
                        if (of_property_match_string(cpu_node, "riscv,isa-extensions",
-                                                    riscv_isa_ext[i].property) < 0)
+                                                    ext->property) < 0)
                                continue;
 
-                       if (!riscv_isa_extension_check(riscv_isa_ext[i].id))
-                               continue;
+                       if (ext->subset_ext_size) {
+                               for (int j = 0; j < ext->subset_ext_size; j++) {
+                                       if (riscv_isa_extension_check(ext->subset_ext_ids[i]))
+                                               set_bit(ext->subset_ext_ids[j], isainfo->isa);
+                               }
+                       }
 
-                       /* Only single letter extensions get set in hwcap */
-                       if (strnlen(riscv_isa_ext[i].name, 2) == 1)
-                               this_hwcap |= isa2hwcap[riscv_isa_ext[i].id];
+                       if (riscv_isa_extension_check(ext->id)) {
+                               set_bit(ext->id, isainfo->isa);
 
-                       set_bit(riscv_isa_ext[i].id, isainfo->isa);
+                               /* Only single letter extensions get set in hwcap */
+                               if (strnlen(riscv_isa_ext[i].name, 2) == 1)
+                                       this_hwcap |= isa2hwcap[riscv_isa_ext[i].id];
+                       }
                }
 
                of_node_put(cpu_node);
@@ -643,6 +788,16 @@ static int check_unaligned_access(void *param)
                (speed == RISCV_HWPROBE_MISALIGNED_FAST) ? "fast" : "slow");
 
        per_cpu(misaligned_access_speed, cpu) = speed;
+
+       /*
+        * Set the value of fast_misaligned_access of a CPU. These operations
+        * are atomic to avoid race conditions.
+        */
+       if (speed == RISCV_HWPROBE_MISALIGNED_FAST)
+               cpumask_set_cpu(cpu, &fast_misaligned_access);
+       else
+               cpumask_clear_cpu(cpu, &fast_misaligned_access);
+
        return 0;
 }
 
@@ -655,13 +810,69 @@ static void check_unaligned_access_nonboot_cpu(void *param)
                check_unaligned_access(pages[cpu]);
 }
 
+DEFINE_STATIC_KEY_FALSE(fast_misaligned_access_speed_key);
+
+static void modify_unaligned_access_branches(cpumask_t *mask, int weight)
+{
+       if (cpumask_weight(mask) == weight)
+               static_branch_enable_cpuslocked(&fast_misaligned_access_speed_key);
+       else
+               static_branch_disable_cpuslocked(&fast_misaligned_access_speed_key);
+}
+
+static void set_unaligned_access_static_branches_except_cpu(int cpu)
+{
+       /*
+        * Same as set_unaligned_access_static_branches, except excludes the
+        * given CPU from the result. When a CPU is hotplugged into an offline
+        * state, this function is called before the CPU is set to offline in
+        * the cpumask, and thus the CPU needs to be explicitly excluded.
+        */
+
+       cpumask_t fast_except_me;
+
+       cpumask_and(&fast_except_me, &fast_misaligned_access, cpu_online_mask);
+       cpumask_clear_cpu(cpu, &fast_except_me);
+
+       modify_unaligned_access_branches(&fast_except_me, num_online_cpus() - 1);
+}
+
+static void set_unaligned_access_static_branches(void)
+{
+       /*
+        * This will be called after check_unaligned_access_all_cpus so the
+        * result of unaligned access speed for all CPUs will be available.
+        *
+        * To avoid the number of online cpus changing between reading
+        * cpu_online_mask and calling num_online_cpus, cpus_read_lock must be
+        * held before calling this function.
+        */
+
+       cpumask_t fast_and_online;
+
+       cpumask_and(&fast_and_online, &fast_misaligned_access, cpu_online_mask);
+
+       modify_unaligned_access_branches(&fast_and_online, num_online_cpus());
+}
+
+static int lock_and_set_unaligned_access_static_branch(void)
+{
+       cpus_read_lock();
+       set_unaligned_access_static_branches();
+       cpus_read_unlock();
+
+       return 0;
+}
+
+arch_initcall_sync(lock_and_set_unaligned_access_static_branch);
+
 static int riscv_online_cpu(unsigned int cpu)
 {
        static struct page *buf;
 
        /* We are already set since the last check */
        if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_UNKNOWN)
-               return 0;
+               goto exit;
 
        buf = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
        if (!buf) {
@@ -671,6 +882,17 @@ static int riscv_online_cpu(unsigned int cpu)
 
        check_unaligned_access(buf);
        __free_pages(buf, MISALIGNED_BUFFER_ORDER);
+
+exit:
+       set_unaligned_access_static_branches();
+
+       return 0;
+}
+
+static int riscv_offline_cpu(unsigned int cpu)
+{
+       set_unaligned_access_static_branches_except_cpu(cpu);
+
        return 0;
 }
 
@@ -705,9 +927,12 @@ static int check_unaligned_access_all_cpus(void)
        /* Check core 0. */
        smp_call_on_cpu(0, check_unaligned_access, bufs[0], true);
 
-       /* Setup hotplug callback for any new CPUs that come online. */
+       /*
+        * Setup hotplug callbacks for any new CPUs that come online or go
+        * offline.
+        */
        cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "riscv:online",
-                                 riscv_online_cpu, NULL);
+                                 riscv_online_cpu, riscv_offline_cpu);
 
 out:
        unaligned_emulation_finish();
index aa6209a74c83ffc29f8622277196b799ebdd4321..b64bf1624a0529b719eff68f732e430dea073e65 100644 (file)
@@ -60,7 +60,7 @@ int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
 static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data)
 {
        efi_memory_desc_t *md = data;
-       pte_t pte = READ_ONCE(*ptep);
+       pte_t pte = ptep_get(ptep);
        unsigned long val;
 
        if (md->attribute & EFI_MEMORY_RO) {
index 54ca4564a92631388783a7978e8f49f40e556364..9d1a305d55087bb3a6bdc73f8ed8ebe3206775b1 100644 (file)
@@ -83,6 +83,10 @@ SYM_CODE_START(handle_exception)
        /* Load the kernel shadow call stack pointer if coming from userspace */
        scs_load_current_if_task_changed s5
 
+#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
+       move a0, sp
+       call riscv_v_context_nesting_start
+#endif
        move a0, sp /* pt_regs */
        la ra, ret_from_exception
 
@@ -138,6 +142,10 @@ SYM_CODE_START_NOALIGN(ret_from_exception)
         */
        csrw CSR_SCRATCH, tp
 1:
+#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
+       move a0, sp
+       call riscv_v_context_nesting_end
+#endif
        REG_L a0, PT_STATUS(sp)
        /*
         * The current load reservation is effectively part of the processor's
index 03a6434a8cdd0035bbc59629b1751c00df24b918..f5aa24d9e1c150e651f5eeb144da8671e9ac5ddc 100644 (file)
@@ -178,32 +178,28 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
 }
 
 #ifdef CONFIG_DYNAMIC_FTRACE
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
+                      struct ftrace_ops *op, struct ftrace_regs *fregs)
+{
+       struct pt_regs *regs = arch_ftrace_get_regs(fregs);
+       unsigned long *parent = (unsigned long *)&regs->ra;
+
+       prepare_ftrace_return(parent, ip, frame_pointer(regs));
+}
+#else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
 extern void ftrace_graph_call(void);
-extern void ftrace_graph_regs_call(void);
 int ftrace_enable_ftrace_graph_caller(void)
 {
-       int ret;
-
-       ret = __ftrace_modify_call((unsigned long)&ftrace_graph_call,
-                                   (unsigned long)&prepare_ftrace_return, true, true);
-       if (ret)
-               return ret;
-
-       return __ftrace_modify_call((unsigned long)&ftrace_graph_regs_call,
+       return __ftrace_modify_call((unsigned long)&ftrace_graph_call,
                                    (unsigned long)&prepare_ftrace_return, true, true);
 }
 
 int ftrace_disable_ftrace_graph_caller(void)
 {
-       int ret;
-
-       ret = __ftrace_modify_call((unsigned long)&ftrace_graph_call,
-                                   (unsigned long)&prepare_ftrace_return, false, true);
-       if (ret)
-               return ret;
-
-       return __ftrace_modify_call((unsigned long)&ftrace_graph_regs_call,
+       return __ftrace_modify_call((unsigned long)&ftrace_graph_call,
                                    (unsigned long)&prepare_ftrace_return, false, true);
 }
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
 #endif /* CONFIG_DYNAMIC_FTRACE */
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
index 76ace1e0b46f623a119cc07a08ad8923079e1c81..4236a69c35cb33524cdb7190d8ecb294768d8e56 100644 (file)
@@ -11,7 +11,6 @@
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/csr.h>
-#include <asm/cpu_ops_sbi.h>
 #include <asm/hwcap.h>
 #include <asm/image.h>
 #include <asm/scs.h>
@@ -89,6 +88,7 @@ relocate_enable_mmu:
        /* Compute satp for kernel page tables, but don't load it yet */
        srl a2, a0, PAGE_SHIFT
        la a1, satp_mode
+       XIP_FIXUP_OFFSET a1
        REG_L a1, 0(a1)
        or a2, a2, a1
 
@@ -265,10 +265,12 @@ SYM_CODE_START(_start_kernel)
        la sp, _end + THREAD_SIZE
        XIP_FIXUP_OFFSET sp
        mv s0, a0
+       mv s1, a1
        call __copy_data
 
-       /* Restore a0 copy */
+       /* Restore a0 & a1 copy */
        mv a0, s0
+       mv a1, s1
 #endif
 
 #ifndef CONFIG_XIP_KERNEL
diff --git a/arch/riscv/kernel/kernel_mode_vector.c b/arch/riscv/kernel/kernel_mode_vector.c
new file mode 100644 (file)
index 0000000..6afe80c
--- /dev/null
@@ -0,0 +1,247 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ * Author: Catalin Marinas <catalin.marinas@arm.com>
+ * Copyright (C) 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2021 SiFive
+ */
+#include <linux/compiler.h>
+#include <linux/irqflags.h>
+#include <linux/percpu.h>
+#include <linux/preempt.h>
+#include <linux/types.h>
+
+#include <asm/vector.h>
+#include <asm/switch_to.h>
+#include <asm/simd.h>
+#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
+#include <asm/asm-prototypes.h>
+#endif
+
+static inline void riscv_v_flags_set(u32 flags)
+{
+       WRITE_ONCE(current->thread.riscv_v_flags, flags);
+}
+
+static inline void riscv_v_start(u32 flags)
+{
+       int orig;
+
+       orig = riscv_v_flags();
+       BUG_ON((orig & flags) != 0);
+       riscv_v_flags_set(orig | flags);
+       barrier();
+}
+
+static inline void riscv_v_stop(u32 flags)
+{
+       int orig;
+
+       barrier();
+       orig = riscv_v_flags();
+       BUG_ON((orig & flags) == 0);
+       riscv_v_flags_set(orig & ~flags);
+}
+
+/*
+ * Claim ownership of the CPU vector context for use by the calling context.
+ *
+ * The caller may freely manipulate the vector context metadata until
+ * put_cpu_vector_context() is called.
+ */
+void get_cpu_vector_context(void)
+{
+       /*
+        * disable softirqs so it is impossible for softirqs to nest
+        * get_cpu_vector_context() when kernel is actively using Vector.
+        */
+       if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+               local_bh_disable();
+       else
+               preempt_disable();
+
+       riscv_v_start(RISCV_KERNEL_MODE_V);
+}
+
+/*
+ * Release the CPU vector context.
+ *
+ * Must be called from a context in which get_cpu_vector_context() was
+ * previously called, with no call to put_cpu_vector_context() in the
+ * meantime.
+ */
+void put_cpu_vector_context(void)
+{
+       riscv_v_stop(RISCV_KERNEL_MODE_V);
+
+       if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+               local_bh_enable();
+       else
+               preempt_enable();
+}
+
+#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
+static __always_inline u32 *riscv_v_flags_ptr(void)
+{
+       return &current->thread.riscv_v_flags;
+}
+
+static inline void riscv_preempt_v_set_dirty(void)
+{
+       *riscv_v_flags_ptr() |= RISCV_PREEMPT_V_DIRTY;
+}
+
+static inline void riscv_preempt_v_reset_flags(void)
+{
+       *riscv_v_flags_ptr() &= ~(RISCV_PREEMPT_V_DIRTY | RISCV_PREEMPT_V_NEED_RESTORE);
+}
+
+static inline void riscv_v_ctx_depth_inc(void)
+{
+       *riscv_v_flags_ptr() += RISCV_V_CTX_UNIT_DEPTH;
+}
+
+static inline void riscv_v_ctx_depth_dec(void)
+{
+       *riscv_v_flags_ptr() -= RISCV_V_CTX_UNIT_DEPTH;
+}
+
+static inline u32 riscv_v_ctx_get_depth(void)
+{
+       return *riscv_v_flags_ptr() & RISCV_V_CTX_DEPTH_MASK;
+}
+
+static int riscv_v_stop_kernel_context(void)
+{
+       if (riscv_v_ctx_get_depth() != 0 || !riscv_preempt_v_started(current))
+               return 1;
+
+       riscv_preempt_v_clear_dirty(current);
+       riscv_v_stop(RISCV_PREEMPT_V);
+       return 0;
+}
+
+static int riscv_v_start_kernel_context(bool *is_nested)
+{
+       struct __riscv_v_ext_state *kvstate, *uvstate;
+
+       kvstate = &current->thread.kernel_vstate;
+       if (!kvstate->datap)
+               return -ENOENT;
+
+       if (riscv_preempt_v_started(current)) {
+               WARN_ON(riscv_v_ctx_get_depth() == 0);
+               *is_nested = true;
+               get_cpu_vector_context();
+               if (riscv_preempt_v_dirty(current)) {
+                       __riscv_v_vstate_save(kvstate, kvstate->datap);
+                       riscv_preempt_v_clear_dirty(current);
+               }
+               riscv_preempt_v_set_restore(current);
+               return 0;
+       }
+
+       /* Transfer the ownership of V from user to kernel, then save */
+       riscv_v_start(RISCV_PREEMPT_V | RISCV_PREEMPT_V_DIRTY);
+       if ((task_pt_regs(current)->status & SR_VS) == SR_VS_DIRTY) {
+               uvstate = &current->thread.vstate;
+               __riscv_v_vstate_save(uvstate, uvstate->datap);
+       }
+       riscv_preempt_v_clear_dirty(current);
+       return 0;
+}
+
+/* low-level V context handling code, called with irq disabled */
+asmlinkage void riscv_v_context_nesting_start(struct pt_regs *regs)
+{
+       int depth;
+
+       if (!riscv_preempt_v_started(current))
+               return;
+
+       depth = riscv_v_ctx_get_depth();
+       if (depth == 0 && (regs->status & SR_VS) == SR_VS_DIRTY)
+               riscv_preempt_v_set_dirty();
+
+       riscv_v_ctx_depth_inc();
+}
+
+asmlinkage void riscv_v_context_nesting_end(struct pt_regs *regs)
+{
+       struct __riscv_v_ext_state *vstate = &current->thread.kernel_vstate;
+       u32 depth;
+
+       WARN_ON(!irqs_disabled());
+
+       if (!riscv_preempt_v_started(current))
+               return;
+
+       riscv_v_ctx_depth_dec();
+       depth = riscv_v_ctx_get_depth();
+       if (depth == 0) {
+               if (riscv_preempt_v_restore(current)) {
+                       __riscv_v_vstate_restore(vstate, vstate->datap);
+                       __riscv_v_vstate_clean(regs);
+                       riscv_preempt_v_reset_flags();
+               }
+       }
+}
+#else
+#define riscv_v_start_kernel_context(nested)   (-ENOENT)
+#define riscv_v_stop_kernel_context()          (-ENOENT)
+#endif /* CONFIG_RISCV_ISA_V_PREEMPTIVE */
+
+/*
+ * kernel_vector_begin(): obtain the CPU vector registers for use by the calling
+ * context
+ *
+ * Must not be called unless may_use_simd() returns true.
+ * Task context in the vector registers is saved back to memory as necessary.
+ *
+ * A matching call to kernel_vector_end() must be made before returning from the
+ * calling context.
+ *
+ * The caller may freely use the vector registers until kernel_vector_end() is
+ * called.
+ */
+void kernel_vector_begin(void)
+{
+       bool nested = false;
+
+       if (WARN_ON(!has_vector()))
+               return;
+
+       BUG_ON(!may_use_simd());
+
+       if (riscv_v_start_kernel_context(&nested)) {
+               get_cpu_vector_context();
+               riscv_v_vstate_save(&current->thread.vstate, task_pt_regs(current));
+       }
+
+       if (!nested)
+               riscv_v_vstate_set_restore(current, task_pt_regs(current));
+
+       riscv_v_enable();
+}
+EXPORT_SYMBOL_GPL(kernel_vector_begin);
+
+/*
+ * kernel_vector_end(): give the CPU vector registers back to the current task
+ *
+ * Must be called from a context in which kernel_vector_begin() was previously
+ * called, with no call to kernel_vector_end() in the meantime.
+ *
+ * The caller must not use the vector registers after this function is called,
+ * unless kernel_vector_begin() is called again in the meantime.
+ */
+void kernel_vector_end(void)
+{
+       if (WARN_ON(!has_vector()))
+               return;
+
+       riscv_v_disable();
+
+       if (riscv_v_stop_kernel_context())
+               put_cpu_vector_context();
+}
+EXPORT_SYMBOL_GPL(kernel_vector_end);
index 58dd96a2a15340ee83c473436a1b2cf25d407c1f..b7561288e8da616de4e2082622207a0f33e9b7f4 100644 (file)
@@ -3,12 +3,12 @@
 
 #include <linux/init.h>
 #include <linux/linkage.h>
+#include <linux/export.h>
 #include <asm/asm.h>
 #include <asm/csr.h>
 #include <asm/unistd.h>
 #include <asm/thread_info.h>
 #include <asm/asm-offsets.h>
-#include <asm-generic/export.h>
 #include <asm/ftrace.h>
 
        .text
        .endm
 
 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
-       .macro SAVE_ALL
+
+/**
+* SAVE_ABI_REGS - save regs against the pt_regs struct
+*
+* @all: tell if saving all the regs
+*
+* If all is set, all the regs will be saved, otherwise only ABI
+* related regs (a0-a7,epc,ra and optional s0) will be saved.
+*
+* After the stack is established,
+*
+* 0(sp) stores the PC of the traced function which can be accessed
+* by &(fregs)->regs->epc in tracing function. Note that the real
+* function entry address should be computed with -FENTRY_RA_OFFSET.
+*
+* 8(sp) stores the function return address (i.e. parent IP) that
+* can be accessed by &(fregs)->regs->ra in tracing function.
+*
+* The other regs are saved at the respective localtion and accessed
+* by the respective pt_regs member.
+*
+* Here is the layout of stack for your reference.
+*
+* PT_SIZE_ON_STACK  ->  +++++++++
+*                       + ..... +
+*                       + t3-t6 +
+*                       + s2-s11+
+*                       + a0-a7 + --++++-> ftrace_caller saved
+*                       + s1    +   +
+*                       + s0    + --+
+*                       + t0-t2 +   +
+*                       + tp    +   +
+*                       + gp    +   +
+*                       + sp    +   +
+*                       + ra    + --+ // parent IP
+*               sp  ->  + epc   + --+ // PC
+*                       +++++++++
+**/
+       .macro SAVE_ABI_REGS, all=0
        addi    sp, sp, -PT_SIZE_ON_STACK
 
-       REG_S t0,  PT_EPC(sp)
-       REG_S x1,  PT_RA(sp)
-       REG_S x2,  PT_SP(sp)
-       REG_S x3,  PT_GP(sp)
-       REG_S x4,  PT_TP(sp)
-       REG_S x5,  PT_T0(sp)
-       save_from_x6_to_x31
+       REG_S   t0,  PT_EPC(sp)
+       REG_S   x1,  PT_RA(sp)
+
+       // save the ABI regs
+
+       REG_S   x10, PT_A0(sp)
+       REG_S   x11, PT_A1(sp)
+       REG_S   x12, PT_A2(sp)
+       REG_S   x13, PT_A3(sp)
+       REG_S   x14, PT_A4(sp)
+       REG_S   x15, PT_A5(sp)
+       REG_S   x16, PT_A6(sp)
+       REG_S   x17, PT_A7(sp)
+
+       // save the leftover regs
+
+       .if \all == 1
+       REG_S   x2, PT_SP(sp)
+       REG_S   x3, PT_GP(sp)
+       REG_S   x4, PT_TP(sp)
+       REG_S   x5, PT_T0(sp)
+       REG_S   x6, PT_T1(sp)
+       REG_S   x7, PT_T2(sp)
+       REG_S   x8, PT_S0(sp)
+       REG_S   x9, PT_S1(sp)
+       REG_S   x18, PT_S2(sp)
+       REG_S   x19, PT_S3(sp)
+       REG_S   x20, PT_S4(sp)
+       REG_S   x21, PT_S5(sp)
+       REG_S   x22, PT_S6(sp)
+       REG_S   x23, PT_S7(sp)
+       REG_S   x24, PT_S8(sp)
+       REG_S   x25, PT_S9(sp)
+       REG_S   x26, PT_S10(sp)
+       REG_S   x27, PT_S11(sp)
+       REG_S   x28, PT_T3(sp)
+       REG_S   x29, PT_T4(sp)
+       REG_S   x30, PT_T5(sp)
+       REG_S   x31, PT_T6(sp)
+
+       // save s0 if FP_TEST defined
+
+       .else
+#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
+       REG_S   x8, PT_S0(sp)
+#endif
+       .endif
        .endm
 
-       .macro RESTORE_ALL
-       REG_L x1,  PT_RA(sp)
-       REG_L x2,  PT_SP(sp)
-       REG_L x3,  PT_GP(sp)
-       REG_L x4,  PT_TP(sp)
-       /* Restore t0 with PT_EPC */
-       REG_L x5,  PT_EPC(sp)
-       restore_from_x6_to_x31
+       .macro RESTORE_ABI_REGS, all=0
+       REG_L   t0, PT_EPC(sp)
+       REG_L   x1, PT_RA(sp)
+       REG_L   x10, PT_A0(sp)
+       REG_L   x11, PT_A1(sp)
+       REG_L   x12, PT_A2(sp)
+       REG_L   x13, PT_A3(sp)
+       REG_L   x14, PT_A4(sp)
+       REG_L   x15, PT_A5(sp)
+       REG_L   x16, PT_A6(sp)
+       REG_L   x17, PT_A7(sp)
 
+       .if \all == 1
+       REG_L   x2, PT_SP(sp)
+       REG_L   x3, PT_GP(sp)
+       REG_L   x4, PT_TP(sp)
+       REG_L   x6, PT_T1(sp)
+       REG_L   x7, PT_T2(sp)
+       REG_L   x8, PT_S0(sp)
+       REG_L   x9, PT_S1(sp)
+       REG_L   x18, PT_S2(sp)
+       REG_L   x19, PT_S3(sp)
+       REG_L   x20, PT_S4(sp)
+       REG_L   x21, PT_S5(sp)
+       REG_L   x22, PT_S6(sp)
+       REG_L   x23, PT_S7(sp)
+       REG_L   x24, PT_S8(sp)
+       REG_L   x25, PT_S9(sp)
+       REG_L   x26, PT_S10(sp)
+       REG_L   x27, PT_S11(sp)
+       REG_L   x28, PT_T3(sp)
+       REG_L   x29, PT_T4(sp)
+       REG_L   x30, PT_T5(sp)
+       REG_L   x31, PT_T6(sp)
+
+       .else
+#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
+       REG_L   x8, PT_S0(sp)
+#endif
+       .endif
        addi    sp, sp, PT_SIZE_ON_STACK
        .endm
+
+       .macro PREPARE_ARGS
+       addi    a0, t0, -FENTRY_RA_OFFSET
+       la      a1, function_trace_op
+       REG_L   a2, 0(a1)
+       mv      a1, ra
+       mv      a3, sp
+       .endm
+
 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
 
+#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 SYM_FUNC_START(ftrace_caller)
        SAVE_ABI
 
@@ -105,34 +224,39 @@ SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL)
        call    ftrace_stub
 #endif
        RESTORE_ABI
-       jr t0
+       jr      t0
 SYM_FUNC_END(ftrace_caller)
 
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+#else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
 SYM_FUNC_START(ftrace_regs_caller)
-       SAVE_ALL
-
-       addi    a0, t0, -FENTRY_RA_OFFSET
-       la      a1, function_trace_op
-       REG_L   a2, 0(a1)
-       mv      a1, ra
-       mv      a3, sp
+       mv      t1, zero
+       SAVE_ABI_REGS 1
+       PREPARE_ARGS
 
 SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL)
        call    ftrace_stub
 
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       addi    a0, sp, PT_RA
-       REG_L   a1, PT_EPC(sp)
-       addi    a1, a1, -FENTRY_RA_OFFSET
-#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
-       mv      a2, s0
-#endif
-SYM_INNER_LABEL(ftrace_graph_regs_call, SYM_L_GLOBAL)
+       RESTORE_ABI_REGS 1
+       bnez    t1, .Ldirect
+       jr      t0
+.Ldirect:
+       jr      t1
+SYM_FUNC_END(ftrace_regs_caller)
+
+SYM_FUNC_START(ftrace_caller)
+       SAVE_ABI_REGS 0
+       PREPARE_ARGS
+
+SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
        call    ftrace_stub
-#endif
 
-       RESTORE_ALL
-       jr t0
-SYM_FUNC_END(ftrace_regs_caller)
+       RESTORE_ABI_REGS 0
+       jr      t0
+SYM_FUNC_END(ftrace_caller)
 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+SYM_CODE_START(ftrace_stub_direct_tramp)
+       jr      t0
+SYM_CODE_END(ftrace_stub_direct_tramp)
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
index b4dd9ed6849e30f13922a5ab4e398f87de984e9b..d7ec69ac6910c6ea0dadcbf7757511cc2f5f2914 100644 (file)
@@ -4,12 +4,12 @@
 #include <linux/init.h>
 #include <linux/linkage.h>
 #include <linux/cfi_types.h>
+#include <linux/export.h>
 #include <asm/asm.h>
 #include <asm/csr.h>
 #include <asm/unistd.h>
 #include <asm/thread_info.h>
 #include <asm/asm-offsets.h>
-#include <asm-generic/export.h>
 #include <asm/ftrace.h>
 
        .text
index aac019ed63b1bdaa766262a5266deb4c8c5a4bdf..5e5a82644451e16d8bdfe229e2ce89b4c389c31e 100644 (file)
@@ -723,8 +723,8 @@ static int add_relocation_to_accumulate(struct module *me, int type,
 
                        if (!bucket) {
                                kfree(entry);
-                               kfree(rel_head);
                                kfree(rel_head->rel_entry);
+                               kfree(rel_head);
                                return -ENOMEM;
                        }
 
@@ -747,6 +747,10 @@ initialize_relocation_hashtable(unsigned int num_relocations,
 {
        /* Can safely assume that bits is not greater than sizeof(long) */
        unsigned long hashtable_size = roundup_pow_of_two(num_relocations);
+       /*
+        * When hashtable_size == 1, hashtable_bits == 0.
+        * This is valid because the hashing algorithm returns 0 in this case.
+        */
        unsigned int hashtable_bits = ilog2(hashtable_size);
 
        /*
@@ -760,10 +764,10 @@ initialize_relocation_hashtable(unsigned int num_relocations,
        hashtable_size <<= should_double_size;
 
        *relocation_hashtable = kmalloc_array(hashtable_size,
-                                             sizeof(*relocation_hashtable),
+                                             sizeof(**relocation_hashtable),
                                              GFP_KERNEL);
        if (!*relocation_hashtable)
-               return -ENOMEM;
+               return 0;
 
        __hash_init(*relocation_hashtable, hashtable_size);
 
@@ -779,6 +783,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
        Elf_Sym *sym;
        void *location;
        unsigned int i, type;
+       unsigned int j_idx = 0;
        Elf_Addr v;
        int res;
        unsigned int num_relocations = sechdrs[relsec].sh_size / sizeof(*rel);
@@ -789,8 +794,8 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
        hashtable_bits = initialize_relocation_hashtable(num_relocations,
                                                         &relocation_hashtable);
 
-       if (hashtable_bits < 0)
-               return hashtable_bits;
+       if (!relocation_hashtable)
+               return -ENOMEM;
 
        INIT_LIST_HEAD(&used_buckets_list);
 
@@ -829,9 +834,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
                v = sym->st_value + rel[i].r_addend;
 
                if (type == R_RISCV_PCREL_LO12_I || type == R_RISCV_PCREL_LO12_S) {
-                       unsigned int j;
+                       unsigned int j = j_idx;
+                       bool found = false;
 
-                       for (j = 0; j < sechdrs[relsec].sh_size / sizeof(*rel); j++) {
+                       do {
                                unsigned long hi20_loc =
                                        sechdrs[sechdrs[relsec].sh_info].sh_addr
                                        + rel[j].r_offset;
@@ -860,16 +866,26 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
                                        hi20 = (offset + 0x800) & 0xfffff000;
                                        lo12 = offset - hi20;
                                        v = lo12;
+                                       found = true;
 
                                        break;
                                }
-                       }
-                       if (j == sechdrs[relsec].sh_size / sizeof(*rel)) {
+
+                               j++;
+                               if (j > sechdrs[relsec].sh_size / sizeof(*rel))
+                                       j = 0;
+
+                       } while (j_idx != j);
+
+                       if (!found) {
                                pr_err(
                                  "%s: Can not find HI20 relocation information\n",
                                  me->name);
                                return -EINVAL;
                        }
+
+                       /* Record the previous j-loop end index */
+                       j_idx = j;
                }
 
                if (reloc_handlers[type].accumulate_handler)
@@ -894,7 +910,8 @@ void *module_alloc(unsigned long size)
 {
        return __vmalloc_node_range(size, 1, MODULES_VADDR,
                                    MODULES_END, GFP_KERNEL,
-                                   PAGE_KERNEL, 0, NUMA_NO_NODE,
+                                   PAGE_KERNEL, VM_FLUSH_RESET_PERMS,
+                                   NUMA_NO_NODE,
                                    __builtin_return_address(0));
 }
 #endif
diff --git a/arch/riscv/kernel/paravirt.c b/arch/riscv/kernel/paravirt.c
new file mode 100644 (file)
index 0000000..8e114f5
--- /dev/null
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023 Ventana Micro Systems Inc.
+ */
+
+#define pr_fmt(fmt) "riscv-pv: " fmt
+
+#include <linux/cpuhotplug.h>
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/jump_label.h>
+#include <linux/kconfig.h>
+#include <linux/kernel.h>
+#include <linux/percpu-defs.h>
+#include <linux/printk.h>
+#include <linux/static_call.h>
+#include <linux/types.h>
+
+#include <asm/barrier.h>
+#include <asm/page.h>
+#include <asm/paravirt.h>
+#include <asm/sbi.h>
+
+struct static_key paravirt_steal_enabled;
+struct static_key paravirt_steal_rq_enabled;
+
+static u64 native_steal_clock(int cpu)
+{
+       return 0;
+}
+
+DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
+
+static bool steal_acc = true;
+static int __init parse_no_stealacc(char *arg)
+{
+       steal_acc = false;
+       return 0;
+}
+
+early_param("no-steal-acc", parse_no_stealacc);
+
+DEFINE_PER_CPU(struct sbi_sta_struct, steal_time) __aligned(64);
+
+static bool __init has_pv_steal_clock(void)
+{
+       if (sbi_spec_version >= sbi_mk_version(2, 0) &&
+           sbi_probe_extension(SBI_EXT_STA) > 0) {
+               pr_info("SBI STA extension detected\n");
+               return true;
+       }
+
+       return false;
+}
+
+static int sbi_sta_steal_time_set_shmem(unsigned long lo, unsigned long hi,
+                                       unsigned long flags)
+{
+       struct sbiret ret;
+
+       ret = sbi_ecall(SBI_EXT_STA, SBI_EXT_STA_STEAL_TIME_SET_SHMEM,
+                       lo, hi, flags, 0, 0, 0);
+       if (ret.error) {
+               if (lo == SBI_STA_SHMEM_DISABLE && hi == SBI_STA_SHMEM_DISABLE)
+                       pr_warn("Failed to disable steal-time shmem");
+               else
+                       pr_warn("Failed to set steal-time shmem");
+               return sbi_err_map_linux_errno(ret.error);
+       }
+
+       return 0;
+}
+
+static int pv_time_cpu_online(unsigned int cpu)
+{
+       struct sbi_sta_struct *st = this_cpu_ptr(&steal_time);
+       phys_addr_t pa = __pa(st);
+       unsigned long lo = (unsigned long)pa;
+       unsigned long hi = IS_ENABLED(CONFIG_32BIT) ? upper_32_bits((u64)pa) : 0;
+
+       return sbi_sta_steal_time_set_shmem(lo, hi, 0);
+}
+
+static int pv_time_cpu_down_prepare(unsigned int cpu)
+{
+       return sbi_sta_steal_time_set_shmem(SBI_STA_SHMEM_DISABLE,
+                                           SBI_STA_SHMEM_DISABLE, 0);
+}
+
+static u64 pv_time_steal_clock(int cpu)
+{
+       struct sbi_sta_struct *st = per_cpu_ptr(&steal_time, cpu);
+       u32 sequence;
+       u64 steal;
+
+       /*
+        * Check the sequence field before and after reading the steal
+        * field. Repeat the read if it is different or odd.
+        */
+       do {
+               sequence = READ_ONCE(st->sequence);
+               virt_rmb();
+               steal = READ_ONCE(st->steal);
+               virt_rmb();
+       } while ((le32_to_cpu(sequence) & 1) ||
+                sequence != READ_ONCE(st->sequence));
+
+       return le64_to_cpu(steal);
+}
+
+int __init pv_time_init(void)
+{
+       int ret;
+
+       if (!has_pv_steal_clock())
+               return 0;
+
+       ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+                               "riscv/pv_time:online",
+                               pv_time_cpu_online,
+                               pv_time_cpu_down_prepare);
+       if (ret < 0)
+               return ret;
+
+       static_call_update(pv_steal_clock, pv_time_steal_clock);
+
+       static_key_slow_inc(&paravirt_steal_enabled);
+       if (steal_acc)
+               static_key_slow_inc(&paravirt_steal_rq_enabled);
+
+       pr_info("Computing paravirt steal-time\n");
+
+       return 0;
+}
index 13ee7bf589a15e1fed201415dbe352494c1ba526..37e87fdcf6a00057663ffd636c50e85e865be3c4 100644 (file)
@@ -14,6 +14,7 @@
 #include <asm/fixmap.h>
 #include <asm/ftrace.h>
 #include <asm/patch.h>
+#include <asm/sections.h>
 
 struct patch_insn {
        void *addr;
@@ -25,6 +26,14 @@ struct patch_insn {
 int riscv_patch_in_stop_machine = false;
 
 #ifdef CONFIG_MMU
+
+static inline bool is_kernel_exittext(uintptr_t addr)
+{
+       return system_state < SYSTEM_RUNNING &&
+               addr >= (uintptr_t)__exittext_begin &&
+               addr < (uintptr_t)__exittext_end;
+}
+
 /*
  * The fix_to_virt(, idx) needs a const value (not a dynamic variable of
  * reg-a0) or BUILD_BUG_ON failed with "idx >= __end_of_fixed_addresses".
@@ -35,7 +44,7 @@ static __always_inline void *patch_map(void *addr, const unsigned int fixmap)
        uintptr_t uintaddr = (uintptr_t) addr;
        struct page *page;
 
-       if (core_kernel_text(uintaddr))
+       if (core_kernel_text(uintaddr) || is_kernel_exittext(uintaddr))
                page = phys_to_page(__pa_symbol(addr));
        else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
                page = vmalloc_to_page(addr);
index 68e786c84c949b23b9fa529686fc21ce89e94d64..f6d4dedffb8422051a3598ead6cea3d0bac96d7a 100644 (file)
@@ -38,8 +38,7 @@ static char *get_early_cmdline(uintptr_t dtb_pa)
        if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) ||
            IS_ENABLED(CONFIG_CMDLINE_FORCE) ||
            fdt_cmdline_size == 0 /* CONFIG_CMDLINE_FALLBACK */) {
-               strncat(early_cmdline, CONFIG_CMDLINE,
-                       COMMAND_LINE_SIZE - fdt_cmdline_size);
+               strlcat(early_cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
        }
 
        return early_cmdline;
index 4f21d970a1292b06be357b8b33ed541751bbb091..92922dbd5b5c1f9b5d57643ecbd7a1599c5ac4c3 100644 (file)
@@ -171,6 +171,7 @@ void flush_thread(void)
        riscv_v_vstate_off(task_pt_regs(current));
        kfree(current->thread.vstate.datap);
        memset(&current->thread.vstate, 0, sizeof(struct __riscv_v_ext_state));
+       clear_tsk_thread_flag(current, TIF_RISCV_V_DEFER_RESTORE);
 #endif
 }
 
@@ -178,7 +179,7 @@ void arch_release_task_struct(struct task_struct *tsk)
 {
        /* Free the vector context of datap. */
        if (has_vector())
-               kfree(tsk->thread.vstate.datap);
+               riscv_v_thread_free(tsk);
 }
 
 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
@@ -187,6 +188,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
        *dst = *src;
        /* clear entire V context, including datap for a new task */
        memset(&dst->thread.vstate, 0, sizeof(struct __riscv_v_ext_state));
+       memset(&dst->thread.kernel_vstate, 0, sizeof(struct __riscv_v_ext_state));
+       clear_tsk_thread_flag(dst, TIF_RISCV_V_DEFER_RESTORE);
 
        return 0;
 }
@@ -221,7 +224,15 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
                childregs->a0 = 0; /* Return value of fork() */
                p->thread.s[0] = 0;
        }
+       p->thread.riscv_v_flags = 0;
+       if (has_vector())
+               riscv_v_thread_alloc(p);
        p->thread.ra = (unsigned long)ret_from_fork;
        p->thread.sp = (unsigned long)childregs; /* kernel sp */
        return 0;
 }
+
+void __init arch_task_cache_init(void)
+{
+       riscv_v_setup_ctx_cache();
+}
index 2afe460de16a62ba21cf3c7db4a96c5ec8f7d3a5..e8515aa9d80bf82fd6ff2598664b9fe18a6b1de3 100644 (file)
@@ -99,8 +99,11 @@ static int riscv_vr_get(struct task_struct *target,
         * Ensure the vector registers have been saved to the memory before
         * copying them to membuf.
         */
-       if (target == current)
-               riscv_v_vstate_save(current, task_pt_regs(current));
+       if (target == current) {
+               get_cpu_vector_context();
+               riscv_v_vstate_save(&current->thread.vstate, task_pt_regs(current));
+               put_cpu_vector_context();
+       }
 
        ptrace_vstate.vstart = vstate->vstart;
        ptrace_vstate.vl = vstate->vl;
index 5a62ed1da45332c85820fdfdd7e90046b1ae3380..e66e0999a80057058c66c71fa907a0fb0152bc00 100644 (file)
@@ -7,6 +7,7 @@
 
 #include <linux/bits.h>
 #include <linux/init.h>
+#include <linux/mm.h>
 #include <linux/pm.h>
 #include <linux/reboot.h>
 #include <asm/sbi.h>
@@ -571,6 +572,66 @@ long sbi_get_mimpid(void)
 }
 EXPORT_SYMBOL_GPL(sbi_get_mimpid);
 
+bool sbi_debug_console_available;
+
+int sbi_debug_console_write(const char *bytes, unsigned int num_bytes)
+{
+       phys_addr_t base_addr;
+       struct sbiret ret;
+
+       if (!sbi_debug_console_available)
+               return -EOPNOTSUPP;
+
+       if (is_vmalloc_addr(bytes))
+               base_addr = page_to_phys(vmalloc_to_page(bytes)) +
+                           offset_in_page(bytes);
+       else
+               base_addr = __pa(bytes);
+       if (PAGE_SIZE < (offset_in_page(bytes) + num_bytes))
+               num_bytes = PAGE_SIZE - offset_in_page(bytes);
+
+       if (IS_ENABLED(CONFIG_32BIT))
+               ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE,
+                               num_bytes, lower_32_bits(base_addr),
+                               upper_32_bits(base_addr), 0, 0, 0);
+       else
+               ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE,
+                               num_bytes, base_addr, 0, 0, 0, 0);
+
+       if (ret.error == SBI_ERR_FAILURE)
+               return -EIO;
+       return ret.error ? sbi_err_map_linux_errno(ret.error) : ret.value;
+}
+
+int sbi_debug_console_read(char *bytes, unsigned int num_bytes)
+{
+       phys_addr_t base_addr;
+       struct sbiret ret;
+
+       if (!sbi_debug_console_available)
+               return -EOPNOTSUPP;
+
+       if (is_vmalloc_addr(bytes))
+               base_addr = page_to_phys(vmalloc_to_page(bytes)) +
+                           offset_in_page(bytes);
+       else
+               base_addr = __pa(bytes);
+       if (PAGE_SIZE < (offset_in_page(bytes) + num_bytes))
+               num_bytes = PAGE_SIZE - offset_in_page(bytes);
+
+       if (IS_ENABLED(CONFIG_32BIT))
+               ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_READ,
+                               num_bytes, lower_32_bits(base_addr),
+                               upper_32_bits(base_addr), 0, 0, 0);
+       else
+               ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_READ,
+                               num_bytes, base_addr, 0, 0, 0, 0);
+
+       if (ret.error == SBI_ERR_FAILURE)
+               return -EIO;
+       return ret.error ? sbi_err_map_linux_errno(ret.error) : ret.value;
+}
+
 void __init sbi_init(void)
 {
        int ret;
@@ -612,6 +673,11 @@ void __init sbi_init(void)
                        sbi_srst_reboot_nb.priority = 192;
                        register_restart_handler(&sbi_srst_reboot_nb);
                }
+               if ((sbi_spec_version >= sbi_mk_version(2, 0)) &&
+                   (sbi_probe_extension(SBI_EXT_DBCN) > 0)) {
+                       pr_info("SBI DBCN extension detected\n");
+                       sbi_debug_console_available = true;
+               }
        } else {
                __sbi_set_timer = __sbi_set_timer_v01;
                __sbi_send_ipi  = __sbi_send_ipi_v01;
index 535a837de55d1ba3aa8a45fe4123404ce1a9430f..4f73c0ae44b25d443a64eb9ae75a01d7372d4fca 100644 (file)
@@ -26,7 +26,6 @@
 #include <asm/alternative.h>
 #include <asm/cacheflush.h>
 #include <asm/cpufeature.h>
-#include <asm/cpu_ops.h>
 #include <asm/early_ioremap.h>
 #include <asm/pgtable.h>
 #include <asm/setup.h>
@@ -51,7 +50,6 @@ atomic_t hart_lottery __section(".sdata")
 #endif
 ;
 unsigned long boot_cpu_hartid;
-static DEFINE_PER_CPU(struct cpu, cpu_devices);
 
 /*
  * Place kernel memory regions on the resource tree so that
@@ -299,23 +297,10 @@ void __init setup_arch(char **cmdline_p)
        riscv_user_isa_enable();
 }
 
-static int __init topology_init(void)
+bool arch_cpu_is_hotpluggable(int cpu)
 {
-       int i, ret;
-
-       for_each_possible_cpu(i) {
-               struct cpu *cpu = &per_cpu(cpu_devices, i);
-
-               cpu->hotpluggable = cpu_has_hotplug(i);
-               ret = register_cpu(cpu, i);
-               if (unlikely(ret))
-                       pr_warn("Warning: %s: register_cpu %d failed (%d)\n",
-                              __func__, i, ret);
-       }
-
-       return 0;
+       return cpu_has_hotplug(cpu);
 }
-subsys_initcall(topology_init);
 
 void free_initmem(void)
 {
index 88b6220b260879ee75ac6a6824def025b004041b..501e66debf69721d53db2515cea4df970a6b2784 100644 (file)
@@ -86,12 +86,15 @@ static long save_v_state(struct pt_regs *regs, void __user **sc_vec)
        /* datap is designed to be 16 byte aligned for better performance */
        WARN_ON(unlikely(!IS_ALIGNED((unsigned long)datap, 16)));
 
-       riscv_v_vstate_save(current, regs);
+       get_cpu_vector_context();
+       riscv_v_vstate_save(&current->thread.vstate, regs);
+       put_cpu_vector_context();
+
        /* Copy everything of vstate but datap. */
        err = __copy_to_user(&state->v_state, &current->thread.vstate,
                             offsetof(struct __riscv_v_ext_state, datap));
        /* Copy the pointer datap itself. */
-       err |= __put_user(datap, &state->v_state.datap);
+       err |= __put_user((__force void *)datap, &state->v_state.datap);
        /* Copy the whole vector content to user space datap. */
        err |= __copy_to_user(datap, current->thread.vstate.datap, riscv_v_vsize);
        /* Copy magic to the user space after saving  all vector conetext */
@@ -134,7 +137,7 @@ static long __restore_v_state(struct pt_regs *regs, void __user *sc_vec)
        if (unlikely(err))
                return err;
 
-       riscv_v_vstate_restore(current, regs);
+       riscv_v_vstate_set_restore(current, regs);
 
        return err;
 }
index 40420afbb1a09fc90ea00107816adde1002d6479..45dd4035416efdc59eab5dd1092e7f0ff60cfbe6 100644 (file)
@@ -81,7 +81,7 @@ static inline void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
 
 #ifdef CONFIG_HOTPLUG_CPU
        if (cpu_has_hotplug(cpu))
-               cpu_ops[cpu]->cpu_stop();
+               cpu_ops->cpu_stop();
 #endif
 
        for(;;)
index d162bf339beb16e3e4ffcb2b7d755ded9712e895..519b6bd946e5d1b69edf3379e31b345e38a03deb 100644 (file)
@@ -49,7 +49,6 @@ void __init smp_prepare_boot_cpu(void)
 void __init smp_prepare_cpus(unsigned int max_cpus)
 {
        int cpuid;
-       int ret;
        unsigned int curr_cpuid;
 
        init_cpu_topology();
@@ -66,11 +65,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
        for_each_possible_cpu(cpuid) {
                if (cpuid == curr_cpuid)
                        continue;
-               if (cpu_ops[cpuid]->cpu_prepare) {
-                       ret = cpu_ops[cpuid]->cpu_prepare(cpuid);
-                       if (ret)
-                               continue;
-               }
                set_cpu_present(cpuid, true);
                numa_store_cpu_info(cpuid);
        }
@@ -125,18 +119,7 @@ static int __init acpi_parse_rintc(union acpi_subtable_headers *header, const un
 
 static void __init acpi_parse_and_init_cpus(void)
 {
-       int cpuid;
-
-       cpu_set_ops(0);
-
        acpi_table_parse_madt(ACPI_MADT_TYPE_RINTC, acpi_parse_rintc, 0);
-
-       for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) {
-               if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID) {
-                       cpu_set_ops(cpuid);
-                       set_cpu_possible(cpuid, true);
-               }
-       }
 }
 #else
 #define acpi_parse_and_init_cpus(...)  do { } while (0)
@@ -150,8 +133,6 @@ static void __init of_parse_and_init_cpus(void)
        int cpuid = 1;
        int rc;
 
-       cpu_set_ops(0);
-
        for_each_of_cpu_node(dn) {
                rc = riscv_early_of_processor_hartid(dn, &hart);
                if (rc < 0)
@@ -179,27 +160,28 @@ static void __init of_parse_and_init_cpus(void)
        if (cpuid > nr_cpu_ids)
                pr_warn("Total number of cpus [%d] is greater than nr_cpus option value [%d]\n",
                        cpuid, nr_cpu_ids);
-
-       for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) {
-               if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID) {
-                       cpu_set_ops(cpuid);
-                       set_cpu_possible(cpuid, true);
-               }
-       }
 }
 
 void __init setup_smp(void)
 {
+       int cpuid;
+
+       cpu_set_ops();
+
        if (acpi_disabled)
                of_parse_and_init_cpus();
        else
                acpi_parse_and_init_cpus();
+
+       for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++)
+               if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID)
+                       set_cpu_possible(cpuid, true);
 }
 
 static int start_secondary_cpu(int cpu, struct task_struct *tidle)
 {
-       if (cpu_ops[cpu]->cpu_start)
-               return cpu_ops[cpu]->cpu_start(cpu, tidle);
+       if (cpu_ops->cpu_start)
+               return cpu_ops->cpu_start(cpu, tidle);
 
        return -EOPNOTSUPP;
 }
index 3c89b8ec69c49cce4809986f51784fcbfff53630..239509367e4233336806c19da964a06537d5a9b5 100644 (file)
@@ -4,8 +4,12 @@
  * Copyright (c) 2022 Ventana Micro Systems Inc.
  */
 
+#define pr_fmt(fmt) "suspend: " fmt
+
 #include <linux/ftrace.h>
+#include <linux/suspend.h>
 #include <asm/csr.h>
+#include <asm/sbi.h>
 #include <asm/suspend.h>
 
 void suspend_save_csrs(struct suspend_context *context)
@@ -85,3 +89,43 @@ int cpu_suspend(unsigned long arg,
 
        return rc;
 }
+
+#ifdef CONFIG_RISCV_SBI
+static int sbi_system_suspend(unsigned long sleep_type,
+                             unsigned long resume_addr,
+                             unsigned long opaque)
+{
+       struct sbiret ret;
+
+       ret = sbi_ecall(SBI_EXT_SUSP, SBI_EXT_SUSP_SYSTEM_SUSPEND,
+                       sleep_type, resume_addr, opaque, 0, 0, 0);
+       if (ret.error)
+               return sbi_err_map_linux_errno(ret.error);
+
+       return ret.value;
+}
+
+static int sbi_system_suspend_enter(suspend_state_t state)
+{
+       return cpu_suspend(SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM, sbi_system_suspend);
+}
+
+static const struct platform_suspend_ops sbi_system_suspend_ops = {
+       .valid = suspend_valid_only_mem,
+       .enter = sbi_system_suspend_enter,
+};
+
+static int __init sbi_system_suspend_init(void)
+{
+       if (sbi_spec_version >= sbi_mk_version(2, 0) &&
+           sbi_probe_extension(SBI_EXT_SUSP) > 0) {
+               pr_info("SBI SUSP extension detected\n");
+               if (IS_ENABLED(CONFIG_SUSPEND))
+                       suspend_set_ops(&sbi_system_suspend_ops);
+       }
+
+       return 0;
+}
+
+arch_initcall(sbi_system_suspend_init);
+#endif /* CONFIG_RISCV_SBI */
diff --git a/arch/riscv/kernel/sys_hwprobe.c b/arch/riscv/kernel/sys_hwprobe.c
new file mode 100644 (file)
index 0000000..a7c56b4
--- /dev/null
@@ -0,0 +1,411 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * The hwprobe interface, for allowing userspace to probe to see which features
+ * are supported by the hardware.  See Documentation/arch/riscv/hwprobe.rst for
+ * more details.
+ */
+#include <linux/syscalls.h>
+#include <asm/cacheflush.h>
+#include <asm/cpufeature.h>
+#include <asm/hwprobe.h>
+#include <asm/sbi.h>
+#include <asm/switch_to.h>
+#include <asm/uaccess.h>
+#include <asm/unistd.h>
+#include <asm/vector.h>
+#include <vdso/vsyscall.h>
+
+
+static void hwprobe_arch_id(struct riscv_hwprobe *pair,
+                           const struct cpumask *cpus)
+{
+       u64 id = -1ULL;
+       bool first = true;
+       int cpu;
+
+       for_each_cpu(cpu, cpus) {
+               u64 cpu_id;
+
+               switch (pair->key) {
+               case RISCV_HWPROBE_KEY_MVENDORID:
+                       cpu_id = riscv_cached_mvendorid(cpu);
+                       break;
+               case RISCV_HWPROBE_KEY_MIMPID:
+                       cpu_id = riscv_cached_mimpid(cpu);
+                       break;
+               case RISCV_HWPROBE_KEY_MARCHID:
+                       cpu_id = riscv_cached_marchid(cpu);
+                       break;
+               }
+
+               if (first) {
+                       id = cpu_id;
+                       first = false;
+               }
+
+               /*
+                * If there's a mismatch for the given set, return -1 in the
+                * value.
+                */
+               if (id != cpu_id) {
+                       id = -1ULL;
+                       break;
+               }
+       }
+
+       pair->value = id;
+}
+
+static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
+                            const struct cpumask *cpus)
+{
+       int cpu;
+       u64 missing = 0;
+
+       pair->value = 0;
+       if (has_fpu())
+               pair->value |= RISCV_HWPROBE_IMA_FD;
+
+       if (riscv_isa_extension_available(NULL, c))
+               pair->value |= RISCV_HWPROBE_IMA_C;
+
+       if (has_vector())
+               pair->value |= RISCV_HWPROBE_IMA_V;
+
+       /*
+        * Loop through and record extensions that 1) anyone has, and 2) anyone
+        * doesn't have.
+        */
+       for_each_cpu(cpu, cpus) {
+               struct riscv_isainfo *isainfo = &hart_isa[cpu];
+
+#define EXT_KEY(ext)                                                                   \
+       do {                                                                            \
+               if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_EXT_##ext)) \
+                       pair->value |= RISCV_HWPROBE_EXT_##ext;                         \
+               else                                                                    \
+                       missing |= RISCV_HWPROBE_EXT_##ext;                             \
+       } while (false)
+
+               /*
+                * Only use EXT_KEY() for extensions which can be exposed to userspace,
+                * regardless of the kernel's configuration, as no other checks, besides
+                * presence in the hart_isa bitmap, are made.
+                */
+               EXT_KEY(ZBA);
+               EXT_KEY(ZBB);
+               EXT_KEY(ZBS);
+               EXT_KEY(ZICBOZ);
+               EXT_KEY(ZBC);
+
+               EXT_KEY(ZBKB);
+               EXT_KEY(ZBKC);
+               EXT_KEY(ZBKX);
+               EXT_KEY(ZKND);
+               EXT_KEY(ZKNE);
+               EXT_KEY(ZKNH);
+               EXT_KEY(ZKSED);
+               EXT_KEY(ZKSH);
+               EXT_KEY(ZKT);
+               EXT_KEY(ZIHINTNTL);
+               EXT_KEY(ZTSO);
+               EXT_KEY(ZACAS);
+               EXT_KEY(ZICOND);
+
+               if (has_vector()) {
+                       EXT_KEY(ZVBB);
+                       EXT_KEY(ZVBC);
+                       EXT_KEY(ZVKB);
+                       EXT_KEY(ZVKG);
+                       EXT_KEY(ZVKNED);
+                       EXT_KEY(ZVKNHA);
+                       EXT_KEY(ZVKNHB);
+                       EXT_KEY(ZVKSED);
+                       EXT_KEY(ZVKSH);
+                       EXT_KEY(ZVKT);
+                       EXT_KEY(ZVFH);
+                       EXT_KEY(ZVFHMIN);
+               }
+
+               if (has_fpu()) {
+                       EXT_KEY(ZFH);
+                       EXT_KEY(ZFHMIN);
+                       EXT_KEY(ZFA);
+               }
+#undef EXT_KEY
+       }
+
+       /* Now turn off reporting features if any CPU is missing it. */
+       pair->value &= ~missing;
+}
+
+static bool hwprobe_ext0_has(const struct cpumask *cpus, unsigned long ext)
+{
+       struct riscv_hwprobe pair;
+
+       hwprobe_isa_ext0(&pair, cpus);
+       return (pair.value & ext);
+}
+
+static u64 hwprobe_misaligned(const struct cpumask *cpus)
+{
+       int cpu;
+       u64 perf = -1ULL;
+
+       for_each_cpu(cpu, cpus) {
+               int this_perf = per_cpu(misaligned_access_speed, cpu);
+
+               if (perf == -1ULL)
+                       perf = this_perf;
+
+               if (perf != this_perf) {
+                       perf = RISCV_HWPROBE_MISALIGNED_UNKNOWN;
+                       break;
+               }
+       }
+
+       if (perf == -1ULL)
+               return RISCV_HWPROBE_MISALIGNED_UNKNOWN;
+
+       return perf;
+}
+
+static void hwprobe_one_pair(struct riscv_hwprobe *pair,
+                            const struct cpumask *cpus)
+{
+       switch (pair->key) {
+       case RISCV_HWPROBE_KEY_MVENDORID:
+       case RISCV_HWPROBE_KEY_MARCHID:
+       case RISCV_HWPROBE_KEY_MIMPID:
+               hwprobe_arch_id(pair, cpus);
+               break;
+       /*
+        * The kernel already assumes that the base single-letter ISA
+        * extensions are supported on all harts, and only supports the
+        * IMA base, so just cheat a bit here and tell that to
+        * userspace.
+        */
+       case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
+               pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA;
+               break;
+
+       case RISCV_HWPROBE_KEY_IMA_EXT_0:
+               hwprobe_isa_ext0(pair, cpus);
+               break;
+
+       case RISCV_HWPROBE_KEY_CPUPERF_0:
+               pair->value = hwprobe_misaligned(cpus);
+               break;
+
+       case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
+               pair->value = 0;
+               if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ))
+                       pair->value = riscv_cboz_block_size;
+               break;
+
+       /*
+        * For forward compatibility, unknown keys don't fail the whole
+        * call, but get their element key set to -1 and value set to 0
+        * indicating they're unrecognized.
+        */
+       default:
+               pair->key = -1;
+               pair->value = 0;
+               break;
+       }
+}
+
+static int hwprobe_get_values(struct riscv_hwprobe __user *pairs,
+                             size_t pair_count, size_t cpusetsize,
+                             unsigned long __user *cpus_user,
+                             unsigned int flags)
+{
+       size_t out;
+       int ret;
+       cpumask_t cpus;
+
+       /* Check the reserved flags. */
+       if (flags != 0)
+               return -EINVAL;
+
+       /*
+        * The interface supports taking in a CPU mask, and returns values that
+        * are consistent across that mask. Allow userspace to specify NULL and
+        * 0 as a shortcut to all online CPUs.
+        */
+       cpumask_clear(&cpus);
+       if (!cpusetsize && !cpus_user) {
+               cpumask_copy(&cpus, cpu_online_mask);
+       } else {
+               if (cpusetsize > cpumask_size())
+                       cpusetsize = cpumask_size();
+
+               ret = copy_from_user(&cpus, cpus_user, cpusetsize);
+               if (ret)
+                       return -EFAULT;
+
+               /*
+                * Userspace must provide at least one online CPU, without that
+                * there's no way to define what is supported.
+                */
+               cpumask_and(&cpus, &cpus, cpu_online_mask);
+               if (cpumask_empty(&cpus))
+                       return -EINVAL;
+       }
+
+       for (out = 0; out < pair_count; out++, pairs++) {
+               struct riscv_hwprobe pair;
+
+               if (get_user(pair.key, &pairs->key))
+                       return -EFAULT;
+
+               pair.value = 0;
+               hwprobe_one_pair(&pair, &cpus);
+               ret = put_user(pair.key, &pairs->key);
+               if (ret == 0)
+                       ret = put_user(pair.value, &pairs->value);
+
+               if (ret)
+                       return -EFAULT;
+       }
+
+       return 0;
+}
+
+static int hwprobe_get_cpus(struct riscv_hwprobe __user *pairs,
+                           size_t pair_count, size_t cpusetsize,
+                           unsigned long __user *cpus_user,
+                           unsigned int flags)
+{
+       cpumask_t cpus, one_cpu;
+       bool clear_all = false;
+       size_t i;
+       int ret;
+
+       if (flags != RISCV_HWPROBE_WHICH_CPUS)
+               return -EINVAL;
+
+       if (!cpusetsize || !cpus_user)
+               return -EINVAL;
+
+       if (cpusetsize > cpumask_size())
+               cpusetsize = cpumask_size();
+
+       ret = copy_from_user(&cpus, cpus_user, cpusetsize);
+       if (ret)
+               return -EFAULT;
+
+       if (cpumask_empty(&cpus))
+               cpumask_copy(&cpus, cpu_online_mask);
+
+       cpumask_and(&cpus, &cpus, cpu_online_mask);
+
+       cpumask_clear(&one_cpu);
+
+       for (i = 0; i < pair_count; i++) {
+               struct riscv_hwprobe pair, tmp;
+               int cpu;
+
+               ret = copy_from_user(&pair, &pairs[i], sizeof(pair));
+               if (ret)
+                       return -EFAULT;
+
+               if (!riscv_hwprobe_key_is_valid(pair.key)) {
+                       clear_all = true;
+                       pair = (struct riscv_hwprobe){ .key = -1, };
+                       ret = copy_to_user(&pairs[i], &pair, sizeof(pair));
+                       if (ret)
+                               return -EFAULT;
+               }
+
+               if (clear_all)
+                       continue;
+
+               tmp = (struct riscv_hwprobe){ .key = pair.key, };
+
+               for_each_cpu(cpu, &cpus) {
+                       cpumask_set_cpu(cpu, &one_cpu);
+
+                       hwprobe_one_pair(&tmp, &one_cpu);
+
+                       if (!riscv_hwprobe_pair_cmp(&tmp, &pair))
+                               cpumask_clear_cpu(cpu, &cpus);
+
+                       cpumask_clear_cpu(cpu, &one_cpu);
+               }
+       }
+
+       if (clear_all)
+               cpumask_clear(&cpus);
+
+       ret = copy_to_user(cpus_user, &cpus, cpusetsize);
+       if (ret)
+               return -EFAULT;
+
+       return 0;
+}
+
+static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs,
+                           size_t pair_count, size_t cpusetsize,
+                           unsigned long __user *cpus_user,
+                           unsigned int flags)
+{
+       if (flags & RISCV_HWPROBE_WHICH_CPUS)
+               return hwprobe_get_cpus(pairs, pair_count, cpusetsize,
+                                       cpus_user, flags);
+
+       return hwprobe_get_values(pairs, pair_count, cpusetsize,
+                                 cpus_user, flags);
+}
+
+#ifdef CONFIG_MMU
+
+static int __init init_hwprobe_vdso_data(void)
+{
+       struct vdso_data *vd = __arch_get_k_vdso_data();
+       struct arch_vdso_data *avd = &vd->arch_data;
+       u64 id_bitsmash = 0;
+       struct riscv_hwprobe pair;
+       int key;
+
+       /*
+        * Initialize vDSO data with the answers for the "all CPUs" case, to
+        * save a syscall in the common case.
+        */
+       for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) {
+               pair.key = key;
+               hwprobe_one_pair(&pair, cpu_online_mask);
+
+               WARN_ON_ONCE(pair.key < 0);
+
+               avd->all_cpu_hwprobe_values[key] = pair.value;
+               /*
+                * Smash together the vendor, arch, and impl IDs to see if
+                * they're all 0 or any negative.
+                */
+               if (key <= RISCV_HWPROBE_KEY_MIMPID)
+                       id_bitsmash |= pair.value;
+       }
+
+       /*
+        * If the arch, vendor, and implementation ID are all the same across
+        * all harts, then assume all CPUs are the same, and allow the vDSO to
+        * answer queries for arbitrary masks. However if all values are 0 (not
+        * populated) or any value returns -1 (varies across CPUs), then the
+        * vDSO should defer to the kernel for exotic cpu masks.
+        */
+       avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1;
+       return 0;
+}
+
+arch_initcall_sync(init_hwprobe_vdso_data);
+
+#endif /* CONFIG_MMU */
+
+SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs,
+               size_t, pair_count, size_t, cpusetsize, unsigned long __user *,
+               cpus, unsigned int, flags)
+{
+       return do_riscv_hwprobe(pairs, pair_count, cpusetsize,
+                               cpus, flags);
+}
index a2ca5b7756a5b0ca716de4fe9ae8bd51693bdb97..f1c1416a9f1e51d320e57a92e75fb2219695e00a 100644 (file)
@@ -7,15 +7,7 @@
 
 #include <linux/syscalls.h>
 #include <asm/cacheflush.h>
-#include <asm/cpufeature.h>
-#include <asm/hwprobe.h>
-#include <asm/sbi.h>
-#include <asm/vector.h>
-#include <asm/switch_to.h>
-#include <asm/uaccess.h>
-#include <asm/unistd.h>
 #include <asm-generic/mman-common.h>
-#include <vdso/vsyscall.h>
 
 static long riscv_sys_mmap(unsigned long addr, unsigned long len,
                           unsigned long prot, unsigned long flags,
@@ -77,283 +69,6 @@ SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end,
        return 0;
 }
 
-/*
- * The hwprobe interface, for allowing userspace to probe to see which features
- * are supported by the hardware.  See Documentation/arch/riscv/hwprobe.rst for more
- * details.
- */
-static void hwprobe_arch_id(struct riscv_hwprobe *pair,
-                           const struct cpumask *cpus)
-{
-       u64 id = -1ULL;
-       bool first = true;
-       int cpu;
-
-       for_each_cpu(cpu, cpus) {
-               u64 cpu_id;
-
-               switch (pair->key) {
-               case RISCV_HWPROBE_KEY_MVENDORID:
-                       cpu_id = riscv_cached_mvendorid(cpu);
-                       break;
-               case RISCV_HWPROBE_KEY_MIMPID:
-                       cpu_id = riscv_cached_mimpid(cpu);
-                       break;
-               case RISCV_HWPROBE_KEY_MARCHID:
-                       cpu_id = riscv_cached_marchid(cpu);
-                       break;
-               }
-
-               if (first) {
-                       id = cpu_id;
-                       first = false;
-               }
-
-               /*
-                * If there's a mismatch for the given set, return -1 in the
-                * value.
-                */
-               if (id != cpu_id) {
-                       id = -1ULL;
-                       break;
-               }
-       }
-
-       pair->value = id;
-}
-
-static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
-                            const struct cpumask *cpus)
-{
-       int cpu;
-       u64 missing = 0;
-
-       pair->value = 0;
-       if (has_fpu())
-               pair->value |= RISCV_HWPROBE_IMA_FD;
-
-       if (riscv_isa_extension_available(NULL, c))
-               pair->value |= RISCV_HWPROBE_IMA_C;
-
-       if (has_vector())
-               pair->value |= RISCV_HWPROBE_IMA_V;
-
-       /*
-        * Loop through and record extensions that 1) anyone has, and 2) anyone
-        * doesn't have.
-        */
-       for_each_cpu(cpu, cpus) {
-               struct riscv_isainfo *isainfo = &hart_isa[cpu];
-
-#define EXT_KEY(ext)                                                                   \
-       do {                                                                            \
-               if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_EXT_##ext)) \
-                       pair->value |= RISCV_HWPROBE_EXT_##ext;                         \
-               else                                                                    \
-                       missing |= RISCV_HWPROBE_EXT_##ext;                             \
-       } while (false)
-
-               /*
-                * Only use EXT_KEY() for extensions which can be exposed to userspace,
-                * regardless of the kernel's configuration, as no other checks, besides
-                * presence in the hart_isa bitmap, are made.
-                */
-               EXT_KEY(ZBA);
-               EXT_KEY(ZBB);
-               EXT_KEY(ZBS);
-               EXT_KEY(ZICBOZ);
-#undef EXT_KEY
-       }
-
-       /* Now turn off reporting features if any CPU is missing it. */
-       pair->value &= ~missing;
-}
-
-static bool hwprobe_ext0_has(const struct cpumask *cpus, u64 ext)
-{
-       struct riscv_hwprobe pair;
-
-       hwprobe_isa_ext0(&pair, cpus);
-       return (pair.value & ext);
-}
-
-static u64 hwprobe_misaligned(const struct cpumask *cpus)
-{
-       int cpu;
-       u64 perf = -1ULL;
-
-       for_each_cpu(cpu, cpus) {
-               int this_perf = per_cpu(misaligned_access_speed, cpu);
-
-               if (perf == -1ULL)
-                       perf = this_perf;
-
-               if (perf != this_perf) {
-                       perf = RISCV_HWPROBE_MISALIGNED_UNKNOWN;
-                       break;
-               }
-       }
-
-       if (perf == -1ULL)
-               return RISCV_HWPROBE_MISALIGNED_UNKNOWN;
-
-       return perf;
-}
-
-static void hwprobe_one_pair(struct riscv_hwprobe *pair,
-                            const struct cpumask *cpus)
-{
-       switch (pair->key) {
-       case RISCV_HWPROBE_KEY_MVENDORID:
-       case RISCV_HWPROBE_KEY_MARCHID:
-       case RISCV_HWPROBE_KEY_MIMPID:
-               hwprobe_arch_id(pair, cpus);
-               break;
-       /*
-        * The kernel already assumes that the base single-letter ISA
-        * extensions are supported on all harts, and only supports the
-        * IMA base, so just cheat a bit here and tell that to
-        * userspace.
-        */
-       case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
-               pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA;
-               break;
-
-       case RISCV_HWPROBE_KEY_IMA_EXT_0:
-               hwprobe_isa_ext0(pair, cpus);
-               break;
-
-       case RISCV_HWPROBE_KEY_CPUPERF_0:
-               pair->value = hwprobe_misaligned(cpus);
-               break;
-
-       case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
-               pair->value = 0;
-               if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ))
-                       pair->value = riscv_cboz_block_size;
-               break;
-
-       /*
-        * For forward compatibility, unknown keys don't fail the whole
-        * call, but get their element key set to -1 and value set to 0
-        * indicating they're unrecognized.
-        */
-       default:
-               pair->key = -1;
-               pair->value = 0;
-               break;
-       }
-}
-
-static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs,
-                           size_t pair_count, size_t cpu_count,
-                           unsigned long __user *cpus_user,
-                           unsigned int flags)
-{
-       size_t out;
-       int ret;
-       cpumask_t cpus;
-
-       /* Check the reserved flags. */
-       if (flags != 0)
-               return -EINVAL;
-
-       /*
-        * The interface supports taking in a CPU mask, and returns values that
-        * are consistent across that mask. Allow userspace to specify NULL and
-        * 0 as a shortcut to all online CPUs.
-        */
-       cpumask_clear(&cpus);
-       if (!cpu_count && !cpus_user) {
-               cpumask_copy(&cpus, cpu_online_mask);
-       } else {
-               if (cpu_count > cpumask_size())
-                       cpu_count = cpumask_size();
-
-               ret = copy_from_user(&cpus, cpus_user, cpu_count);
-               if (ret)
-                       return -EFAULT;
-
-               /*
-                * Userspace must provide at least one online CPU, without that
-                * there's no way to define what is supported.
-                */
-               cpumask_and(&cpus, &cpus, cpu_online_mask);
-               if (cpumask_empty(&cpus))
-                       return -EINVAL;
-       }
-
-       for (out = 0; out < pair_count; out++, pairs++) {
-               struct riscv_hwprobe pair;
-
-               if (get_user(pair.key, &pairs->key))
-                       return -EFAULT;
-
-               pair.value = 0;
-               hwprobe_one_pair(&pair, &cpus);
-               ret = put_user(pair.key, &pairs->key);
-               if (ret == 0)
-                       ret = put_user(pair.value, &pairs->value);
-
-               if (ret)
-                       return -EFAULT;
-       }
-
-       return 0;
-}
-
-#ifdef CONFIG_MMU
-
-static int __init init_hwprobe_vdso_data(void)
-{
-       struct vdso_data *vd = __arch_get_k_vdso_data();
-       struct arch_vdso_data *avd = &vd->arch_data;
-       u64 id_bitsmash = 0;
-       struct riscv_hwprobe pair;
-       int key;
-
-       /*
-        * Initialize vDSO data with the answers for the "all CPUs" case, to
-        * save a syscall in the common case.
-        */
-       for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) {
-               pair.key = key;
-               hwprobe_one_pair(&pair, cpu_online_mask);
-
-               WARN_ON_ONCE(pair.key < 0);
-
-               avd->all_cpu_hwprobe_values[key] = pair.value;
-               /*
-                * Smash together the vendor, arch, and impl IDs to see if
-                * they're all 0 or any negative.
-                */
-               if (key <= RISCV_HWPROBE_KEY_MIMPID)
-                       id_bitsmash |= pair.value;
-       }
-
-       /*
-        * If the arch, vendor, and implementation ID are all the same across
-        * all harts, then assume all CPUs are the same, and allow the vDSO to
-        * answer queries for arbitrary masks. However if all values are 0 (not
-        * populated) or any value returns -1 (varies across CPUs), then the
-        * vDSO should defer to the kernel for exotic cpu masks.
-        */
-       avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1;
-       return 0;
-}
-
-arch_initcall_sync(init_hwprobe_vdso_data);
-
-#endif /* CONFIG_MMU */
-
-SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs,
-               size_t, pair_count, size_t, cpu_count, unsigned long __user *,
-               cpus, unsigned int, flags)
-{
-       return do_riscv_hwprobe(pairs, pair_count, cpu_count,
-                               cpus, flags);
-}
-
 /* Not defined using SYSCALL_DEFINE0 to avoid error injection */
 asmlinkage long __riscv_sys_ni_syscall(const struct pt_regs *__unused)
 {
index 23641e82a9df2a244727d9e5b674f8c546299ea2..ba3477197789769f846941f2d2a07fe887885d1b 100644 (file)
@@ -12,6 +12,7 @@
 #include <asm/sbi.h>
 #include <asm/processor.h>
 #include <asm/timex.h>
+#include <asm/paravirt.h>
 
 unsigned long riscv_timebase __ro_after_init;
 EXPORT_SYMBOL_GPL(riscv_timebase);
@@ -45,4 +46,6 @@ void __init time_init(void)
        timer_probe();
 
        tick_setup_hrtimer_broadcast();
+
+       pv_time_init();
 }
index 5255f8134aeff5484e24d83b727ff9908a1b1c44..8ded225e8c5b1313d800c8f87878212c48c9b250 100644 (file)
@@ -319,7 +319,7 @@ static inline int get_insn(struct pt_regs *regs, ulong mepc, ulong *r_insn)
 static inline int load_u8(struct pt_regs *regs, const u8 *addr, u8 *r_val)
 {
        if (user_mode(regs)) {
-               return __get_user(*r_val, addr);
+               return __get_user(*r_val, (u8 __user *)addr);
        } else {
                *r_val = *addr;
                return 0;
@@ -329,7 +329,7 @@ static inline int load_u8(struct pt_regs *regs, const u8 *addr, u8 *r_val)
 static inline int store_u8(struct pt_regs *regs, u8 *addr, u8 val)
 {
        if (user_mode(regs)) {
-               return __put_user(val, addr);
+               return __put_user(val, (u8 __user *)addr);
        } else {
                *addr = val;
                return 0;
@@ -343,7 +343,7 @@ static inline int store_u8(struct pt_regs *regs, u8 *addr, u8 val)
        if (user_mode(regs)) {                          \
                __ret = __get_user(insn, insn_addr);    \
        } else {                                        \
-               insn = *insn_addr;                      \
+               insn = *(__force u16 *)insn_addr;       \
                __ret = 0;                              \
        }                                               \
                                                        \
index cadf725ef798370bbe248f80dcf207c7fd439e7b..1e926e4b5881b6b2c44ec8438870809539f773c5 100644 (file)
@@ -3,26 +3,22 @@
  * Copyright 2023 Rivos, Inc
  */
 
+#include <linux/string.h>
 #include <linux/types.h>
 #include <vdso/datapage.h>
 #include <vdso/helpers.h>
 
 extern int riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
-                        size_t cpu_count, unsigned long *cpus,
+                        size_t cpusetsize, unsigned long *cpus,
                         unsigned int flags);
 
-/* Add a prototype to avoid -Wmissing-prototypes warning. */
-int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
-                        size_t cpu_count, unsigned long *cpus,
-                        unsigned int flags);
-
-int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
-                        size_t cpu_count, unsigned long *cpus,
-                        unsigned int flags)
+static int riscv_vdso_get_values(struct riscv_hwprobe *pairs, size_t pair_count,
+                                size_t cpusetsize, unsigned long *cpus,
+                                unsigned int flags)
 {
        const struct vdso_data *vd = __arch_get_vdso_data();
        const struct arch_vdso_data *avd = &vd->arch_data;
-       bool all_cpus = !cpu_count && !cpus;
+       bool all_cpus = !cpusetsize && !cpus;
        struct riscv_hwprobe *p = pairs;
        struct riscv_hwprobe *end = pairs + pair_count;
 
@@ -33,7 +29,7 @@ int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
         * masks.
         */
        if ((flags != 0) || (!all_cpus && !avd->homogeneous_cpus))
-               return riscv_hwprobe(pairs, pair_count, cpu_count, cpus, flags);
+               return riscv_hwprobe(pairs, pair_count, cpusetsize, cpus, flags);
 
        /* This is something we can handle, fill out the pairs. */
        while (p < end) {
@@ -50,3 +46,71 @@ int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
 
        return 0;
 }
+
+static int riscv_vdso_get_cpus(struct riscv_hwprobe *pairs, size_t pair_count,
+                              size_t cpusetsize, unsigned long *cpus,
+                              unsigned int flags)
+{
+       const struct vdso_data *vd = __arch_get_vdso_data();
+       const struct arch_vdso_data *avd = &vd->arch_data;
+       struct riscv_hwprobe *p = pairs;
+       struct riscv_hwprobe *end = pairs + pair_count;
+       unsigned char *c = (unsigned char *)cpus;
+       bool empty_cpus = true;
+       bool clear_all = false;
+       int i;
+
+       if (!cpusetsize || !cpus)
+               return -EINVAL;
+
+       for (i = 0; i < cpusetsize; i++) {
+               if (c[i]) {
+                       empty_cpus = false;
+                       break;
+               }
+       }
+
+       if (empty_cpus || flags != RISCV_HWPROBE_WHICH_CPUS || !avd->homogeneous_cpus)
+               return riscv_hwprobe(pairs, pair_count, cpusetsize, cpus, flags);
+
+       while (p < end) {
+               if (riscv_hwprobe_key_is_valid(p->key)) {
+                       struct riscv_hwprobe t = {
+                               .key = p->key,
+                               .value = avd->all_cpu_hwprobe_values[p->key],
+                       };
+
+                       if (!riscv_hwprobe_pair_cmp(&t, p))
+                               clear_all = true;
+               } else {
+                       clear_all = true;
+                       p->key = -1;
+                       p->value = 0;
+               }
+               p++;
+       }
+
+       if (clear_all) {
+               for (i = 0; i < cpusetsize; i++)
+                       c[i] = 0;
+       }
+
+       return 0;
+}
+
+/* Add a prototype to avoid -Wmissing-prototypes warning. */
+int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
+                        size_t cpusetsize, unsigned long *cpus,
+                        unsigned int flags);
+
+int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
+                        size_t cpusetsize, unsigned long *cpus,
+                        unsigned int flags)
+{
+       if (flags & RISCV_HWPROBE_WHICH_CPUS)
+               return riscv_vdso_get_cpus(pairs, pair_count, cpusetsize,
+                                          cpus, flags);
+
+       return riscv_vdso_get_values(pairs, pair_count, cpusetsize,
+                                    cpus, flags);
+}
index 578b6292487e1bb5e32309ee6874b8ba7a0c8315..6727d1d3b8f282c16a161c96ba898a17db87176e 100644 (file)
 #include <asm/bug.h>
 
 static bool riscv_v_implicit_uacc = IS_ENABLED(CONFIG_RISCV_ISA_V_DEFAULT_ENABLE);
+static struct kmem_cache *riscv_v_user_cachep;
+#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
+static struct kmem_cache *riscv_v_kernel_cachep;
+#endif
 
 unsigned long riscv_v_vsize __read_mostly;
 EXPORT_SYMBOL_GPL(riscv_v_vsize);
@@ -47,6 +51,21 @@ int riscv_v_setup_vsize(void)
        return 0;
 }
 
+void __init riscv_v_setup_ctx_cache(void)
+{
+       if (!has_vector())
+               return;
+
+       riscv_v_user_cachep = kmem_cache_create_usercopy("riscv_vector_ctx",
+                                                        riscv_v_vsize, 16, SLAB_PANIC,
+                                                        0, riscv_v_vsize, NULL);
+#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
+       riscv_v_kernel_cachep = kmem_cache_create("riscv_vector_kctx",
+                                                 riscv_v_vsize, 16,
+                                                 SLAB_PANIC, NULL);
+#endif
+}
+
 static bool insn_is_vector(u32 insn_buf)
 {
        u32 opcode = insn_buf & __INSN_OPCODE_MASK;
@@ -80,20 +99,37 @@ static bool insn_is_vector(u32 insn_buf)
        return false;
 }
 
-static int riscv_v_thread_zalloc(void)
+static int riscv_v_thread_zalloc(struct kmem_cache *cache,
+                                struct __riscv_v_ext_state *ctx)
 {
        void *datap;
 
-       datap = kzalloc(riscv_v_vsize, GFP_KERNEL);
+       datap = kmem_cache_zalloc(cache, GFP_KERNEL);
        if (!datap)
                return -ENOMEM;
 
-       current->thread.vstate.datap = datap;
-       memset(&current->thread.vstate, 0, offsetof(struct __riscv_v_ext_state,
-                                                   datap));
+       ctx->datap = datap;
+       memset(ctx, 0, offsetof(struct __riscv_v_ext_state, datap));
        return 0;
 }
 
+void riscv_v_thread_alloc(struct task_struct *tsk)
+{
+#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
+       riscv_v_thread_zalloc(riscv_v_kernel_cachep, &tsk->thread.kernel_vstate);
+#endif
+}
+
+void riscv_v_thread_free(struct task_struct *tsk)
+{
+       if (tsk->thread.vstate.datap)
+               kmem_cache_free(riscv_v_user_cachep, tsk->thread.vstate.datap);
+#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
+       if (tsk->thread.kernel_vstate.datap)
+               kmem_cache_free(riscv_v_kernel_cachep, tsk->thread.kernel_vstate.datap);
+#endif
+}
+
 #define VSTATE_CTRL_GET_CUR(x) ((x) & PR_RISCV_V_VSTATE_CTRL_CUR_MASK)
 #define VSTATE_CTRL_GET_NEXT(x) (((x) & PR_RISCV_V_VSTATE_CTRL_NEXT_MASK) >> 2)
 #define VSTATE_CTRL_MAKE_NEXT(x) (((x) << 2) & PR_RISCV_V_VSTATE_CTRL_NEXT_MASK)
@@ -122,7 +158,8 @@ static inline void riscv_v_ctrl_set(struct task_struct *tsk, int cur, int nxt,
        ctrl |= VSTATE_CTRL_MAKE_NEXT(nxt);
        if (inherit)
                ctrl |= PR_RISCV_V_VSTATE_CTRL_INHERIT;
-       tsk->thread.vstate_ctrl = ctrl;
+       tsk->thread.vstate_ctrl &= ~PR_RISCV_V_VSTATE_CTRL_MASK;
+       tsk->thread.vstate_ctrl |= ctrl;
 }
 
 bool riscv_v_vstate_ctrl_user_allowed(void)
@@ -162,12 +199,12 @@ bool riscv_v_first_use_handler(struct pt_regs *regs)
         * context where VS has been off. So, try to allocate the user's V
         * context and resume execution.
         */
-       if (riscv_v_thread_zalloc()) {
+       if (riscv_v_thread_zalloc(riscv_v_user_cachep, &current->thread.vstate)) {
                force_sig(SIGBUS);
                return true;
        }
        riscv_v_vstate_on(regs);
-       riscv_v_vstate_restore(current, regs);
+       riscv_v_vstate_set_restore(current, regs);
        return true;
 }
 
index 50767647fbc649de81d7722528e9bcd116d88657..8c3daa1b05313af6b35311be22b7c8de3515633d 100644 (file)
@@ -29,10 +29,12 @@ SECTIONS
        HEAD_TEXT_SECTION
        INIT_TEXT_SECTION(PAGE_SIZE)
        /* we have to discard exit text and such at runtime, not link time */
+       __exittext_begin = .;
        .exit.text :
        {
                EXIT_TEXT
        }
+       __exittext_end = .;
 
        .text : {
                _text = .;
index 492dd4b8f3d69a0bcdb6e133d47d6444e2e72804..002ca58dd998cb78b662837b5ebac988fb6c77bb 100644 (file)
@@ -69,10 +69,12 @@ SECTIONS
                __soc_builtin_dtb_table_end = .;
        }
        /* we have to discard exit text and such at runtime, not link time */
+       __exittext_begin = .;
        .exit.text :
        {
                EXIT_TEXT
        }
+       __exittext_end = .;
 
        __init_text_end = .;
        . = ALIGN(SECTION_ALIGN);
index dfc237d7875b53bb2f3e7e716c1b85af8b335458..d490db94385883eb3e74048c799cea3f6a07e8cf 100644 (file)
@@ -20,18 +20,17 @@ if VIRTUALIZATION
 config KVM
        tristate "Kernel-based Virtual Machine (KVM) support (EXPERIMENTAL)"
        depends on RISCV_SBI && MMU
-       select HAVE_KVM_EVENTFD
        select HAVE_KVM_IRQCHIP
-       select HAVE_KVM_IRQFD
        select HAVE_KVM_IRQ_ROUTING
        select HAVE_KVM_MSI
        select HAVE_KVM_VCPU_ASYNC_IOCTL
+       select KVM_COMMON
        select KVM_GENERIC_DIRTYLOG_READ_PROTECT
        select KVM_GENERIC_HARDWARE_ENABLING
        select KVM_MMIO
        select KVM_XFER_TO_GUEST_WORK
-       select MMU_NOTIFIER
-       select PREEMPT_NOTIFIERS
+       select KVM_GENERIC_MMU_NOTIFIER
+       select SCHED_INFO
        help
          Support hosting virtualized guest machines.
 
index 4c2067fc59fcbf004e0c7df753a6c31ad70ee8e0..c9646521f1132e5a9ea8b1e1c327823224927bbc 100644 (file)
@@ -26,6 +26,7 @@ kvm-$(CONFIG_RISCV_SBI_V01) += vcpu_sbi_v01.o
 kvm-y += vcpu_sbi_base.o
 kvm-y += vcpu_sbi_replace.o
 kvm-y += vcpu_sbi_hsm.o
+kvm-y += vcpu_sbi_sta.o
 kvm-y += vcpu_timer.o
 kvm-$(CONFIG_RISCV_PMU_SBI) += vcpu_pmu.o vcpu_sbi_pmu.o
 kvm-y += aia.o
index 068c7459387102c19f7759f445c878c510670530..a9e2fd7245e1e92676b0f249ac35017ac251d1f8 100644 (file)
@@ -103,7 +103,7 @@ static bool gstage_get_leaf_entry(struct kvm *kvm, gpa_t addr,
        *ptep_level = current_level;
        ptep = (pte_t *)kvm->arch.pgd;
        ptep = &ptep[gstage_pte_index(addr, current_level)];
-       while (ptep && pte_val(*ptep)) {
+       while (ptep && pte_val(ptep_get(ptep))) {
                if (gstage_pte_leaf(ptep)) {
                        *ptep_level = current_level;
                        *ptepp = ptep;
@@ -113,7 +113,7 @@ static bool gstage_get_leaf_entry(struct kvm *kvm, gpa_t addr,
                if (current_level) {
                        current_level--;
                        *ptep_level = current_level;
-                       ptep = (pte_t *)gstage_pte_page_vaddr(*ptep);
+                       ptep = (pte_t *)gstage_pte_page_vaddr(ptep_get(ptep));
                        ptep = &ptep[gstage_pte_index(addr, current_level)];
                } else {
                        ptep = NULL;
@@ -149,25 +149,25 @@ static int gstage_set_pte(struct kvm *kvm, u32 level,
                if (gstage_pte_leaf(ptep))
                        return -EEXIST;
 
-               if (!pte_val(*ptep)) {
+               if (!pte_val(ptep_get(ptep))) {
                        if (!pcache)
                                return -ENOMEM;
                        next_ptep = kvm_mmu_memory_cache_alloc(pcache);
                        if (!next_ptep)
                                return -ENOMEM;
-                       *ptep = pfn_pte(PFN_DOWN(__pa(next_ptep)),
-                                       __pgprot(_PAGE_TABLE));
+                       set_pte(ptep, pfn_pte(PFN_DOWN(__pa(next_ptep)),
+                                             __pgprot(_PAGE_TABLE)));
                } else {
                        if (gstage_pte_leaf(ptep))
                                return -EEXIST;
-                       next_ptep = (pte_t *)gstage_pte_page_vaddr(*ptep);
+                       next_ptep = (pte_t *)gstage_pte_page_vaddr(ptep_get(ptep));
                }
 
                current_level--;
                ptep = &next_ptep[gstage_pte_index(addr, current_level)];
        }
 
-       *ptep = *new_pte;
+       set_pte(ptep, *new_pte);
        if (gstage_pte_leaf(ptep))
                gstage_remote_tlb_flush(kvm, current_level, addr);
 
@@ -239,11 +239,11 @@ static void gstage_op_pte(struct kvm *kvm, gpa_t addr,
 
        BUG_ON(addr & (page_size - 1));
 
-       if (!pte_val(*ptep))
+       if (!pte_val(ptep_get(ptep)))
                return;
 
        if (ptep_level && !gstage_pte_leaf(ptep)) {
-               next_ptep = (pte_t *)gstage_pte_page_vaddr(*ptep);
+               next_ptep = (pte_t *)gstage_pte_page_vaddr(ptep_get(ptep));
                next_ptep_level = ptep_level - 1;
                ret = gstage_level_to_page_size(next_ptep_level,
                                                &next_page_size);
@@ -261,7 +261,7 @@ static void gstage_op_pte(struct kvm *kvm, gpa_t addr,
                if (op == GSTAGE_OP_CLEAR)
                        set_pte(ptep, __pte(0));
                else if (op == GSTAGE_OP_WP)
-                       set_pte(ptep, __pte(pte_val(*ptep) & ~_PAGE_WRITE));
+                       set_pte(ptep, __pte(pte_val(ptep_get(ptep)) & ~_PAGE_WRITE));
                gstage_remote_tlb_flush(kvm, ptep_level, addr);
        }
 }
@@ -603,7 +603,7 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
                                   &ptep, &ptep_level))
                return false;
 
-       return pte_young(*ptep);
+       return pte_young(ptep_get(ptep));
 }
 
 int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
index e087c809073c1bbb12956a1f5e4774d3c1db48ed..b5ca9f2e98acd216caf4dd7537d106ce4f5bcdc0 100644 (file)
@@ -83,6 +83,8 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
        vcpu->arch.hfence_tail = 0;
        memset(vcpu->arch.hfence_queue, 0, sizeof(vcpu->arch.hfence_queue));
 
+       kvm_riscv_vcpu_sbi_sta_reset(vcpu);
+
        /* Reset the guest CSRs for hotplug usecase */
        if (loaded)
                kvm_arch_vcpu_load(vcpu, smp_processor_id());
@@ -541,6 +543,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 
        kvm_riscv_vcpu_aia_load(vcpu, cpu);
 
+       kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
+
        vcpu->cpu = cpu;
 }
 
@@ -614,6 +618,9 @@ static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu)
 
                if (kvm_check_request(KVM_REQ_HFENCE, vcpu))
                        kvm_riscv_hfence_process(vcpu);
+
+               if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
+                       kvm_riscv_vcpu_record_steal_time(vcpu);
        }
 }
 
@@ -757,8 +764,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
                /* Update HVIP CSR for current CPU */
                kvm_riscv_update_hvip(vcpu);
 
-               if (ret <= 0 ||
-                   kvm_riscv_gstage_vmid_ver_changed(&vcpu->kvm->arch.vmid) ||
+               if (kvm_riscv_gstage_vmid_ver_changed(&vcpu->kvm->arch.vmid) ||
                    kvm_request_pending(vcpu) ||
                    xfer_to_guest_mode_work_pending()) {
                        vcpu->mode = OUTSIDE_GUEST_MODE;
index f8c9fa0c03c5abbd8a8035f255455e7d5d1c9288..5f7355e960084b4a4a17ea294e92352f7a70da60 100644 (file)
@@ -42,15 +42,42 @@ static const unsigned long kvm_isa_ext_arr[] = {
        KVM_ISA_EXT_ARR(SVPBMT),
        KVM_ISA_EXT_ARR(ZBA),
        KVM_ISA_EXT_ARR(ZBB),
+       KVM_ISA_EXT_ARR(ZBC),
+       KVM_ISA_EXT_ARR(ZBKB),
+       KVM_ISA_EXT_ARR(ZBKC),
+       KVM_ISA_EXT_ARR(ZBKX),
        KVM_ISA_EXT_ARR(ZBS),
+       KVM_ISA_EXT_ARR(ZFA),
+       KVM_ISA_EXT_ARR(ZFH),
+       KVM_ISA_EXT_ARR(ZFHMIN),
        KVM_ISA_EXT_ARR(ZICBOM),
        KVM_ISA_EXT_ARR(ZICBOZ),
        KVM_ISA_EXT_ARR(ZICNTR),
        KVM_ISA_EXT_ARR(ZICOND),
        KVM_ISA_EXT_ARR(ZICSR),
        KVM_ISA_EXT_ARR(ZIFENCEI),
+       KVM_ISA_EXT_ARR(ZIHINTNTL),
        KVM_ISA_EXT_ARR(ZIHINTPAUSE),
        KVM_ISA_EXT_ARR(ZIHPM),
+       KVM_ISA_EXT_ARR(ZKND),
+       KVM_ISA_EXT_ARR(ZKNE),
+       KVM_ISA_EXT_ARR(ZKNH),
+       KVM_ISA_EXT_ARR(ZKR),
+       KVM_ISA_EXT_ARR(ZKSED),
+       KVM_ISA_EXT_ARR(ZKSH),
+       KVM_ISA_EXT_ARR(ZKT),
+       KVM_ISA_EXT_ARR(ZVBB),
+       KVM_ISA_EXT_ARR(ZVBC),
+       KVM_ISA_EXT_ARR(ZVFH),
+       KVM_ISA_EXT_ARR(ZVFHMIN),
+       KVM_ISA_EXT_ARR(ZVKB),
+       KVM_ISA_EXT_ARR(ZVKG),
+       KVM_ISA_EXT_ARR(ZVKNED),
+       KVM_ISA_EXT_ARR(ZVKNHA),
+       KVM_ISA_EXT_ARR(ZVKNHB),
+       KVM_ISA_EXT_ARR(ZVKSED),
+       KVM_ISA_EXT_ARR(ZVKSH),
+       KVM_ISA_EXT_ARR(ZVKT),
 };
 
 static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
@@ -92,13 +119,40 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
        case KVM_RISCV_ISA_EXT_SVNAPOT:
        case KVM_RISCV_ISA_EXT_ZBA:
        case KVM_RISCV_ISA_EXT_ZBB:
+       case KVM_RISCV_ISA_EXT_ZBC:
+       case KVM_RISCV_ISA_EXT_ZBKB:
+       case KVM_RISCV_ISA_EXT_ZBKC:
+       case KVM_RISCV_ISA_EXT_ZBKX:
        case KVM_RISCV_ISA_EXT_ZBS:
+       case KVM_RISCV_ISA_EXT_ZFA:
+       case KVM_RISCV_ISA_EXT_ZFH:
+       case KVM_RISCV_ISA_EXT_ZFHMIN:
        case KVM_RISCV_ISA_EXT_ZICNTR:
        case KVM_RISCV_ISA_EXT_ZICOND:
        case KVM_RISCV_ISA_EXT_ZICSR:
        case KVM_RISCV_ISA_EXT_ZIFENCEI:
+       case KVM_RISCV_ISA_EXT_ZIHINTNTL:
        case KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
        case KVM_RISCV_ISA_EXT_ZIHPM:
+       case KVM_RISCV_ISA_EXT_ZKND:
+       case KVM_RISCV_ISA_EXT_ZKNE:
+       case KVM_RISCV_ISA_EXT_ZKNH:
+       case KVM_RISCV_ISA_EXT_ZKR:
+       case KVM_RISCV_ISA_EXT_ZKSED:
+       case KVM_RISCV_ISA_EXT_ZKSH:
+       case KVM_RISCV_ISA_EXT_ZKT:
+       case KVM_RISCV_ISA_EXT_ZVBB:
+       case KVM_RISCV_ISA_EXT_ZVBC:
+       case KVM_RISCV_ISA_EXT_ZVFH:
+       case KVM_RISCV_ISA_EXT_ZVFHMIN:
+       case KVM_RISCV_ISA_EXT_ZVKB:
+       case KVM_RISCV_ISA_EXT_ZVKG:
+       case KVM_RISCV_ISA_EXT_ZVKNED:
+       case KVM_RISCV_ISA_EXT_ZVKNHA:
+       case KVM_RISCV_ISA_EXT_ZVKNHB:
+       case KVM_RISCV_ISA_EXT_ZVKSED:
+       case KVM_RISCV_ISA_EXT_ZVKSH:
+       case KVM_RISCV_ISA_EXT_ZVKT:
                return false;
        /* Extensions which can be disabled using Smstateen */
        case KVM_RISCV_ISA_EXT_SSAIA:
@@ -485,7 +539,7 @@ static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
                if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN))
                        rc = kvm_riscv_vcpu_smstateen_set_csr(vcpu, reg_num,
                                                              reg_val);
-break;
+               break;
        default:
                rc = -ENOENT;
                break;
@@ -931,50 +985,106 @@ static inline unsigned long num_isa_ext_regs(const struct kvm_vcpu *vcpu)
        return copy_isa_ext_reg_indices(vcpu, NULL);;
 }
 
-static inline unsigned long num_sbi_ext_regs(void)
+static int copy_sbi_ext_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
 {
-       /*
-        * number of KVM_REG_RISCV_SBI_SINGLE +
-        * 2 x (number of KVM_REG_RISCV_SBI_MULTI)
-        */
-       return KVM_RISCV_SBI_EXT_MAX + 2*(KVM_REG_RISCV_SBI_MULTI_REG_LAST+1);
-}
-
-static int copy_sbi_ext_reg_indices(u64 __user *uindices)
-{
-       int n;
+       unsigned int n = 0;
 
-       /* copy KVM_REG_RISCV_SBI_SINGLE */
-       n = KVM_RISCV_SBI_EXT_MAX;
-       for (int i = 0; i < n; i++) {
+       for (int i = 0; i < KVM_RISCV_SBI_EXT_MAX; i++) {
                u64 size = IS_ENABLED(CONFIG_32BIT) ?
                           KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
                u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
                          KVM_REG_RISCV_SBI_SINGLE | i;
 
+               if (!riscv_vcpu_supports_sbi_ext(vcpu, i))
+                       continue;
+
                if (uindices) {
                        if (put_user(reg, uindices))
                                return -EFAULT;
                        uindices++;
                }
+
+               n++;
        }
 
-       /* copy KVM_REG_RISCV_SBI_MULTI */
-       n = KVM_REG_RISCV_SBI_MULTI_REG_LAST + 1;
-       for (int i = 0; i < n; i++) {
-               u64 size = IS_ENABLED(CONFIG_32BIT) ?
-                          KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
-               u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
-                         KVM_REG_RISCV_SBI_MULTI_EN | i;
+       return n;
+}
+
+static unsigned long num_sbi_ext_regs(struct kvm_vcpu *vcpu)
+{
+       return copy_sbi_ext_reg_indices(vcpu, NULL);
+}
+
+static int copy_sbi_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
+{
+       struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
+       int total = 0;
+
+       if (scontext->ext_status[KVM_RISCV_SBI_EXT_STA] == KVM_RISCV_SBI_EXT_STATUS_ENABLED) {
+               u64 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+               int n = sizeof(struct kvm_riscv_sbi_sta) / sizeof(unsigned long);
+
+               for (int i = 0; i < n; i++) {
+                       u64 reg = KVM_REG_RISCV | size |
+                                 KVM_REG_RISCV_SBI_STATE |
+                                 KVM_REG_RISCV_SBI_STA | i;
+
+                       if (uindices) {
+                               if (put_user(reg, uindices))
+                                       return -EFAULT;
+                               uindices++;
+                       }
+               }
+
+               total += n;
+       }
+
+       return total;
+}
+
+static inline unsigned long num_sbi_regs(struct kvm_vcpu *vcpu)
+{
+       return copy_sbi_reg_indices(vcpu, NULL);
+}
+
+static inline unsigned long num_vector_regs(const struct kvm_vcpu *vcpu)
+{
+       if (!riscv_isa_extension_available(vcpu->arch.isa, v))
+               return 0;
+
+       /* vstart, vl, vtype, vcsr, vlenb and 32 vector regs */
+       return 37;
+}
+
+static int copy_vector_reg_indices(const struct kvm_vcpu *vcpu,
+                               u64 __user *uindices)
+{
+       const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
+       int n = num_vector_regs(vcpu);
+       u64 reg, size;
+       int i;
+
+       if (n == 0)
+               return 0;
+
+       /* copy vstart, vl, vtype, vcsr and vlenb */
+       size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+       for (i = 0; i < 5; i++) {
+               reg = KVM_REG_RISCV | size | KVM_REG_RISCV_VECTOR | i;
 
                if (uindices) {
                        if (put_user(reg, uindices))
                                return -EFAULT;
                        uindices++;
                }
+       }
 
-               reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
-                         KVM_REG_RISCV_SBI_MULTI_DIS | i;
+       /* vector_regs have a variable 'vlenb' size */
+       size = __builtin_ctzl(cntx->vector.vlenb);
+       size <<= KVM_REG_SIZE_SHIFT;
+       for (i = 0; i < 32; i++) {
+               reg = KVM_REG_RISCV | KVM_REG_RISCV_VECTOR | size |
+                       KVM_REG_RISCV_VECTOR_REG(i);
 
                if (uindices) {
                        if (put_user(reg, uindices))
@@ -983,7 +1093,7 @@ static int copy_sbi_ext_reg_indices(u64 __user *uindices)
                }
        }
 
-       return num_sbi_ext_regs();
+       return n;
 }
 
 /*
@@ -1001,8 +1111,10 @@ unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu)
        res += num_timer_regs();
        res += num_fp_f_regs(vcpu);
        res += num_fp_d_regs(vcpu);
+       res += num_vector_regs(vcpu);
        res += num_isa_ext_regs(vcpu);
-       res += num_sbi_ext_regs();
+       res += num_sbi_ext_regs(vcpu);
+       res += num_sbi_regs(vcpu);
 
        return res;
 }
@@ -1045,14 +1157,25 @@ int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
                return ret;
        uindices += ret;
 
+       ret = copy_vector_reg_indices(vcpu, uindices);
+       if (ret < 0)
+               return ret;
+       uindices += ret;
+
        ret = copy_isa_ext_reg_indices(vcpu, uindices);
        if (ret < 0)
                return ret;
        uindices += ret;
 
-       ret = copy_sbi_ext_reg_indices(uindices);
+       ret = copy_sbi_ext_reg_indices(vcpu, uindices);
        if (ret < 0)
                return ret;
+       uindices += ret;
+
+       ret = copy_sbi_reg_indices(vcpu, uindices);
+       if (ret < 0)
+               return ret;
+       uindices += ret;
 
        return 0;
 }
@@ -1075,12 +1198,14 @@ int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
        case KVM_REG_RISCV_FP_D:
                return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
                                                 KVM_REG_RISCV_FP_D);
+       case KVM_REG_RISCV_VECTOR:
+               return kvm_riscv_vcpu_set_reg_vector(vcpu, reg);
        case KVM_REG_RISCV_ISA_EXT:
                return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
        case KVM_REG_RISCV_SBI_EXT:
                return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg);
-       case KVM_REG_RISCV_VECTOR:
-               return kvm_riscv_vcpu_set_reg_vector(vcpu, reg);
+       case KVM_REG_RISCV_SBI_STATE:
+               return kvm_riscv_vcpu_set_reg_sbi(vcpu, reg);
        default:
                break;
        }
@@ -1106,12 +1231,14 @@ int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
        case KVM_REG_RISCV_FP_D:
                return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
                                                 KVM_REG_RISCV_FP_D);
+       case KVM_REG_RISCV_VECTOR:
+               return kvm_riscv_vcpu_get_reg_vector(vcpu, reg);
        case KVM_REG_RISCV_ISA_EXT:
                return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
        case KVM_REG_RISCV_SBI_EXT:
                return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg);
-       case KVM_REG_RISCV_VECTOR:
-               return kvm_riscv_vcpu_get_reg_vector(vcpu, reg);
+       case KVM_REG_RISCV_SBI_STATE:
+               return kvm_riscv_vcpu_get_reg_sbi(vcpu, reg);
        default:
                break;
        }
index a04ff98085d93936a70a2ff1aec7ad3e305f7aaa..72a2ffb8dcd158a4f5b4d0839d26ee399e2b93b6 100644 (file)
@@ -70,6 +70,10 @@ static const struct kvm_riscv_sbi_extension_entry sbi_ext[] = {
                .ext_idx = KVM_RISCV_SBI_EXT_DBCN,
                .ext_ptr = &vcpu_sbi_ext_dbcn,
        },
+       {
+               .ext_idx = KVM_RISCV_SBI_EXT_STA,
+               .ext_ptr = &vcpu_sbi_ext_sta,
+       },
        {
                .ext_idx = KVM_RISCV_SBI_EXT_EXPERIMENTAL,
                .ext_ptr = &vcpu_sbi_ext_experimental,
@@ -80,6 +84,34 @@ static const struct kvm_riscv_sbi_extension_entry sbi_ext[] = {
        },
 };
 
+static const struct kvm_riscv_sbi_extension_entry *
+riscv_vcpu_get_sbi_ext(struct kvm_vcpu *vcpu, unsigned long idx)
+{
+       const struct kvm_riscv_sbi_extension_entry *sext = NULL;
+
+       if (idx >= KVM_RISCV_SBI_EXT_MAX)
+               return NULL;
+
+       for (int i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
+               if (sbi_ext[i].ext_idx == idx) {
+                       sext = &sbi_ext[i];
+                       break;
+               }
+       }
+
+       return sext;
+}
+
+bool riscv_vcpu_supports_sbi_ext(struct kvm_vcpu *vcpu, int idx)
+{
+       struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
+       const struct kvm_riscv_sbi_extension_entry *sext;
+
+       sext = riscv_vcpu_get_sbi_ext(vcpu, idx);
+
+       return sext && scontext->ext_status[sext->ext_idx] != KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE;
+}
+
 void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
        struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
@@ -140,28 +172,19 @@ static int riscv_vcpu_set_sbi_ext_single(struct kvm_vcpu *vcpu,
                                         unsigned long reg_num,
                                         unsigned long reg_val)
 {
-       unsigned long i;
-       const struct kvm_riscv_sbi_extension_entry *sext = NULL;
        struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
-
-       if (reg_num >= KVM_RISCV_SBI_EXT_MAX)
-               return -ENOENT;
+       const struct kvm_riscv_sbi_extension_entry *sext;
 
        if (reg_val != 1 && reg_val != 0)
                return -EINVAL;
 
-       for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
-               if (sbi_ext[i].ext_idx == reg_num) {
-                       sext = &sbi_ext[i];
-                       break;
-               }
-       }
-       if (!sext)
+       sext = riscv_vcpu_get_sbi_ext(vcpu, reg_num);
+       if (!sext || scontext->ext_status[sext->ext_idx] == KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE)
                return -ENOENT;
 
        scontext->ext_status[sext->ext_idx] = (reg_val) ?
-                       KVM_RISCV_SBI_EXT_AVAILABLE :
-                       KVM_RISCV_SBI_EXT_UNAVAILABLE;
+                       KVM_RISCV_SBI_EXT_STATUS_ENABLED :
+                       KVM_RISCV_SBI_EXT_STATUS_DISABLED;
 
        return 0;
 }
@@ -170,24 +193,16 @@ static int riscv_vcpu_get_sbi_ext_single(struct kvm_vcpu *vcpu,
                                         unsigned long reg_num,
                                         unsigned long *reg_val)
 {
-       unsigned long i;
-       const struct kvm_riscv_sbi_extension_entry *sext = NULL;
        struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
+       const struct kvm_riscv_sbi_extension_entry *sext;
 
-       if (reg_num >= KVM_RISCV_SBI_EXT_MAX)
-               return -ENOENT;
-
-       for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
-               if (sbi_ext[i].ext_idx == reg_num) {
-                       sext = &sbi_ext[i];
-                       break;
-               }
-       }
-       if (!sext)
+       sext = riscv_vcpu_get_sbi_ext(vcpu, reg_num);
+       if (!sext || scontext->ext_status[sext->ext_idx] == KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE)
                return -ENOENT;
 
        *reg_val = scontext->ext_status[sext->ext_idx] ==
-                               KVM_RISCV_SBI_EXT_AVAILABLE;
+                               KVM_RISCV_SBI_EXT_STATUS_ENABLED;
+
        return 0;
 }
 
@@ -310,6 +325,69 @@ int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu,
        return 0;
 }
 
+int kvm_riscv_vcpu_set_reg_sbi(struct kvm_vcpu *vcpu,
+                              const struct kvm_one_reg *reg)
+{
+       unsigned long __user *uaddr =
+                       (unsigned long __user *)(unsigned long)reg->addr;
+       unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
+                                           KVM_REG_SIZE_MASK |
+                                           KVM_REG_RISCV_SBI_STATE);
+       unsigned long reg_subtype, reg_val;
+
+       if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
+               return -EINVAL;
+
+       if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
+               return -EFAULT;
+
+       reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
+       reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
+
+       switch (reg_subtype) {
+       case KVM_REG_RISCV_SBI_STA:
+               return kvm_riscv_vcpu_set_reg_sbi_sta(vcpu, reg_num, reg_val);
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int kvm_riscv_vcpu_get_reg_sbi(struct kvm_vcpu *vcpu,
+                              const struct kvm_one_reg *reg)
+{
+       unsigned long __user *uaddr =
+                       (unsigned long __user *)(unsigned long)reg->addr;
+       unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
+                                           KVM_REG_SIZE_MASK |
+                                           KVM_REG_RISCV_SBI_STATE);
+       unsigned long reg_subtype, reg_val;
+       int ret;
+
+       if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
+               return -EINVAL;
+
+       reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
+       reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
+
+       switch (reg_subtype) {
+       case KVM_REG_RISCV_SBI_STA:
+               ret = kvm_riscv_vcpu_get_reg_sbi_sta(vcpu, reg_num, &reg_val);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       if (ret)
+               return ret;
+
+       if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
+               return -EFAULT;
+
+       return 0;
+}
+
 const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(
                                struct kvm_vcpu *vcpu, unsigned long extid)
 {
@@ -325,7 +403,7 @@ const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(
                if (ext->extid_start <= extid && ext->extid_end >= extid) {
                        if (entry->ext_idx >= KVM_RISCV_SBI_EXT_MAX ||
                            scontext->ext_status[entry->ext_idx] ==
-                                               KVM_RISCV_SBI_EXT_AVAILABLE)
+                                               KVM_RISCV_SBI_EXT_STATUS_ENABLED)
                                return ext;
 
                        return NULL;
@@ -413,12 +491,12 @@ void kvm_riscv_vcpu_sbi_init(struct kvm_vcpu *vcpu)
 
                if (ext->probe && !ext->probe(vcpu)) {
                        scontext->ext_status[entry->ext_idx] =
-                               KVM_RISCV_SBI_EXT_UNAVAILABLE;
+                               KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE;
                        continue;
                }
 
-               scontext->ext_status[entry->ext_idx] = ext->default_unavail ?
-                                       KVM_RISCV_SBI_EXT_UNAVAILABLE :
-                                       KVM_RISCV_SBI_EXT_AVAILABLE;
+               scontext->ext_status[entry->ext_idx] = ext->default_disabled ?
+                                       KVM_RISCV_SBI_EXT_STATUS_DISABLED :
+                                       KVM_RISCV_SBI_EXT_STATUS_ENABLED;
        }
 }
index 23b57c931b1522777ff4631abb8f24193a0bcd3f..9c2ab3dfa93aa535788a4a04a36717220653e864 100644 (file)
@@ -204,6 +204,6 @@ static int kvm_sbi_ext_dbcn_handler(struct kvm_vcpu *vcpu,
 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_dbcn = {
        .extid_start = SBI_EXT_DBCN,
        .extid_end = SBI_EXT_DBCN,
-       .default_unavail = true,
+       .default_disabled = true,
        .handler = kvm_sbi_ext_dbcn_handler,
 };
diff --git a/arch/riscv/kvm/vcpu_sbi_sta.c b/arch/riscv/kvm/vcpu_sbi_sta.c
new file mode 100644 (file)
index 0000000..01f09fe
--- /dev/null
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2023 Ventana Micro Systems Inc.
+ */
+
+#include <linux/kconfig.h>
+#include <linux/kernel.h>
+#include <linux/kvm_host.h>
+#include <linux/mm.h>
+#include <linux/sizes.h>
+
+#include <asm/bug.h>
+#include <asm/current.h>
+#include <asm/kvm_vcpu_sbi.h>
+#include <asm/page.h>
+#include <asm/sbi.h>
+#include <asm/uaccess.h>
+
+void kvm_riscv_vcpu_sbi_sta_reset(struct kvm_vcpu *vcpu)
+{
+       vcpu->arch.sta.shmem = INVALID_GPA;
+       vcpu->arch.sta.last_steal = 0;
+}
+
+void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu)
+{
+       gpa_t shmem = vcpu->arch.sta.shmem;
+       u64 last_steal = vcpu->arch.sta.last_steal;
+       u32 *sequence_ptr, sequence;
+       u64 *steal_ptr, steal;
+       unsigned long hva;
+       gfn_t gfn;
+
+       if (shmem == INVALID_GPA)
+               return;
+
+       /*
+        * shmem is 64-byte aligned (see the enforcement in
+        * kvm_sbi_sta_steal_time_set_shmem()) and the size of sbi_sta_struct
+        * is 64 bytes, so we know all its offsets are in the same page.
+        */
+       gfn = shmem >> PAGE_SHIFT;
+       hva = kvm_vcpu_gfn_to_hva(vcpu, gfn);
+
+       if (WARN_ON(kvm_is_error_hva(hva))) {
+               vcpu->arch.sta.shmem = INVALID_GPA;
+               return;
+       }
+
+       sequence_ptr = (u32 *)(hva + offset_in_page(shmem) +
+                              offsetof(struct sbi_sta_struct, sequence));
+       steal_ptr = (u64 *)(hva + offset_in_page(shmem) +
+                           offsetof(struct sbi_sta_struct, steal));
+
+       if (WARN_ON(get_user(sequence, sequence_ptr)))
+               return;
+
+       sequence = le32_to_cpu(sequence);
+       sequence += 1;
+
+       if (WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr)))
+               return;
+
+       if (!WARN_ON(get_user(steal, steal_ptr))) {
+               steal = le64_to_cpu(steal);
+               vcpu->arch.sta.last_steal = READ_ONCE(current->sched_info.run_delay);
+               steal += vcpu->arch.sta.last_steal - last_steal;
+               WARN_ON(put_user(cpu_to_le64(steal), steal_ptr));
+       }
+
+       sequence += 1;
+       WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr));
+
+       kvm_vcpu_mark_page_dirty(vcpu, gfn);
+}
+
+static int kvm_sbi_sta_steal_time_set_shmem(struct kvm_vcpu *vcpu)
+{
+       struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
+       unsigned long shmem_phys_lo = cp->a0;
+       unsigned long shmem_phys_hi = cp->a1;
+       u32 flags = cp->a2;
+       struct sbi_sta_struct zero_sta = {0};
+       unsigned long hva;
+       bool writable;
+       gpa_t shmem;
+       int ret;
+
+       if (flags != 0)
+               return SBI_ERR_INVALID_PARAM;
+
+       if (shmem_phys_lo == SBI_STA_SHMEM_DISABLE &&
+           shmem_phys_hi == SBI_STA_SHMEM_DISABLE) {
+               vcpu->arch.sta.shmem = INVALID_GPA;
+               return 0;
+       }
+
+       if (shmem_phys_lo & (SZ_64 - 1))
+               return SBI_ERR_INVALID_PARAM;
+
+       shmem = shmem_phys_lo;
+
+       if (shmem_phys_hi != 0) {
+               if (IS_ENABLED(CONFIG_32BIT))
+                       shmem |= ((gpa_t)shmem_phys_hi << 32);
+               else
+                       return SBI_ERR_INVALID_ADDRESS;
+       }
+
+       hva = kvm_vcpu_gfn_to_hva_prot(vcpu, shmem >> PAGE_SHIFT, &writable);
+       if (kvm_is_error_hva(hva) || !writable)
+               return SBI_ERR_INVALID_ADDRESS;
+
+       ret = kvm_vcpu_write_guest(vcpu, shmem, &zero_sta, sizeof(zero_sta));
+       if (ret)
+               return SBI_ERR_FAILURE;
+
+       vcpu->arch.sta.shmem = shmem;
+       vcpu->arch.sta.last_steal = current->sched_info.run_delay;
+
+       return 0;
+}
+
+static int kvm_sbi_ext_sta_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
+                                  struct kvm_vcpu_sbi_return *retdata)
+{
+       struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
+       unsigned long funcid = cp->a6;
+       int ret;
+
+       switch (funcid) {
+       case SBI_EXT_STA_STEAL_TIME_SET_SHMEM:
+               ret = kvm_sbi_sta_steal_time_set_shmem(vcpu);
+               break;
+       default:
+               ret = SBI_ERR_NOT_SUPPORTED;
+               break;
+       }
+
+       retdata->err_val = ret;
+
+       return 0;
+}
+
+static unsigned long kvm_sbi_ext_sta_probe(struct kvm_vcpu *vcpu)
+{
+       return !!sched_info_on();
+}
+
+const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_sta = {
+       .extid_start = SBI_EXT_STA,
+       .extid_end = SBI_EXT_STA,
+       .handler = kvm_sbi_ext_sta_handler,
+       .probe = kvm_sbi_ext_sta_probe,
+};
+
+int kvm_riscv_vcpu_get_reg_sbi_sta(struct kvm_vcpu *vcpu,
+                                  unsigned long reg_num,
+                                  unsigned long *reg_val)
+{
+       switch (reg_num) {
+       case KVM_REG_RISCV_SBI_STA_REG(shmem_lo):
+               *reg_val = (unsigned long)vcpu->arch.sta.shmem;
+               break;
+       case KVM_REG_RISCV_SBI_STA_REG(shmem_hi):
+               if (IS_ENABLED(CONFIG_32BIT))
+                       *reg_val = upper_32_bits(vcpu->arch.sta.shmem);
+               else
+                       *reg_val = 0;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int kvm_riscv_vcpu_set_reg_sbi_sta(struct kvm_vcpu *vcpu,
+                                  unsigned long reg_num,
+                                  unsigned long reg_val)
+{
+       switch (reg_num) {
+       case KVM_REG_RISCV_SBI_STA_REG(shmem_lo):
+               if (IS_ENABLED(CONFIG_32BIT)) {
+                       gpa_t hi = upper_32_bits(vcpu->arch.sta.shmem);
+
+                       vcpu->arch.sta.shmem = reg_val;
+                       vcpu->arch.sta.shmem |= hi << 32;
+               } else {
+                       vcpu->arch.sta.shmem = reg_val;
+               }
+               break;
+       case KVM_REG_RISCV_SBI_STA_REG(shmem_hi):
+               if (IS_ENABLED(CONFIG_32BIT)) {
+                       gpa_t lo = lower_32_bits(vcpu->arch.sta.shmem);
+
+                       vcpu->arch.sta.shmem = ((gpa_t)reg_val << 32);
+                       vcpu->arch.sta.shmem |= lo;
+               } else if (reg_val != 0) {
+                       return -EINVAL;
+               }
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
index d74df8eb4d71a56faed0f4a7f4303bef88e6e399..0c26189aa01cf5b0ca5c6ef6ddbaa70153f3132c 100644 (file)
@@ -15,7 +15,7 @@
        .altmacro
        .option norelax
 
-ENTRY(__kvm_riscv_switch_to)
+SYM_FUNC_START(__kvm_riscv_switch_to)
        /* Save Host GPRs (except A0 and T0-T6) */
        REG_S   ra, (KVM_ARCH_HOST_RA)(a0)
        REG_S   sp, (KVM_ARCH_HOST_SP)(a0)
@@ -45,7 +45,7 @@ ENTRY(__kvm_riscv_switch_to)
        REG_L   t0, (KVM_ARCH_GUEST_SSTATUS)(a0)
        REG_L   t1, (KVM_ARCH_GUEST_HSTATUS)(a0)
        REG_L   t2, (KVM_ARCH_GUEST_SCOUNTEREN)(a0)
-       la      t4, __kvm_switch_return
+       la      t4, .Lkvm_switch_return
        REG_L   t5, (KVM_ARCH_GUEST_SEPC)(a0)
 
        /* Save Host and Restore Guest SSTATUS */
@@ -113,7 +113,7 @@ ENTRY(__kvm_riscv_switch_to)
 
        /* Back to Host */
        .align 2
-__kvm_switch_return:
+.Lkvm_switch_return:
        /* Swap Guest A0 with SSCRATCH */
        csrrw   a0, CSR_SSCRATCH, a0
 
@@ -208,9 +208,9 @@ __kvm_switch_return:
 
        /* Return to C code */
        ret
-ENDPROC(__kvm_riscv_switch_to)
+SYM_FUNC_END(__kvm_riscv_switch_to)
 
-ENTRY(__kvm_riscv_unpriv_trap)
+SYM_CODE_START(__kvm_riscv_unpriv_trap)
        /*
         * We assume that faulting unpriv load/store instruction is
         * 4-byte long and blindly increment SEPC by 4.
@@ -231,12 +231,10 @@ ENTRY(__kvm_riscv_unpriv_trap)
        csrr    a1, CSR_HTINST
        REG_S   a1, (KVM_ARCH_TRAP_HTINST)(a0)
        sret
-ENDPROC(__kvm_riscv_unpriv_trap)
+SYM_CODE_END(__kvm_riscv_unpriv_trap)
 
 #ifdef CONFIG_FPU
-       .align 3
-       .global __kvm_riscv_fp_f_save
-__kvm_riscv_fp_f_save:
+SYM_FUNC_START(__kvm_riscv_fp_f_save)
        csrr t2, CSR_SSTATUS
        li t1, SR_FS
        csrs CSR_SSTATUS, t1
@@ -276,10 +274,9 @@ __kvm_riscv_fp_f_save:
        sw t0, KVM_ARCH_FP_F_FCSR(a0)
        csrw CSR_SSTATUS, t2
        ret
+SYM_FUNC_END(__kvm_riscv_fp_f_save)
 
-       .align 3
-       .global __kvm_riscv_fp_d_save
-__kvm_riscv_fp_d_save:
+SYM_FUNC_START(__kvm_riscv_fp_d_save)
        csrr t2, CSR_SSTATUS
        li t1, SR_FS
        csrs CSR_SSTATUS, t1
@@ -319,10 +316,9 @@ __kvm_riscv_fp_d_save:
        sw t0, KVM_ARCH_FP_D_FCSR(a0)
        csrw CSR_SSTATUS, t2
        ret
+SYM_FUNC_END(__kvm_riscv_fp_d_save)
 
-       .align 3
-       .global __kvm_riscv_fp_f_restore
-__kvm_riscv_fp_f_restore:
+SYM_FUNC_START(__kvm_riscv_fp_f_restore)
        csrr t2, CSR_SSTATUS
        li t1, SR_FS
        lw t0, KVM_ARCH_FP_F_FCSR(a0)
@@ -362,10 +358,9 @@ __kvm_riscv_fp_f_restore:
        fscsr t0
        csrw CSR_SSTATUS, t2
        ret
+SYM_FUNC_END(__kvm_riscv_fp_f_restore)
 
-       .align 3
-       .global __kvm_riscv_fp_d_restore
-__kvm_riscv_fp_d_restore:
+SYM_FUNC_START(__kvm_riscv_fp_d_restore)
        csrr t2, CSR_SSTATUS
        li t1, SR_FS
        lw t0, KVM_ARCH_FP_D_FCSR(a0)
@@ -405,4 +400,5 @@ __kvm_riscv_fp_d_restore:
        fscsr t0
        csrw CSR_SSTATUS, t2
        ret
+SYM_FUNC_END(__kvm_riscv_fp_d_restore)
 #endif
index b339a2682f252bb8c0ac6d3803a8eab46e1e1443..d92d1348045c8cfc60ddd7b6d524f8db535e4619 100644 (file)
@@ -76,6 +76,7 @@ int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu,
        cntx->vector.datap = kmalloc(riscv_v_vsize, GFP_KERNEL);
        if (!cntx->vector.datap)
                return -ENOMEM;
+       cntx->vector.vlenb = riscv_v_vsize / 32;
 
        vcpu->arch.host_context.vector.datap = kzalloc(riscv_v_vsize, GFP_KERNEL);
        if (!vcpu->arch.host_context.vector.datap)
@@ -115,6 +116,9 @@ static int kvm_riscv_vcpu_vreg_addr(struct kvm_vcpu *vcpu,
                case KVM_REG_RISCV_VECTOR_CSR_REG(vcsr):
                        *reg_addr = &cntx->vector.vcsr;
                        break;
+               case KVM_REG_RISCV_VECTOR_CSR_REG(vlenb):
+                       *reg_addr = &cntx->vector.vlenb;
+                       break;
                case KVM_REG_RISCV_VECTOR_CSR_REG(datap):
                default:
                        return -ENOENT;
@@ -173,6 +177,18 @@ int kvm_riscv_vcpu_set_reg_vector(struct kvm_vcpu *vcpu,
        if (!riscv_isa_extension_available(isa, v))
                return -ENOENT;
 
+       if (reg_num == KVM_REG_RISCV_VECTOR_CSR_REG(vlenb)) {
+               struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
+               unsigned long reg_val;
+
+               if (copy_from_user(&reg_val, uaddr, reg_size))
+                       return -EFAULT;
+               if (reg_val != cntx->vector.vlenb)
+                       return -EINVAL;
+
+               return 0;
+       }
+
        rc = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size, &reg_addr);
        if (rc)
                return rc;
index 7e2b50c692c1bb583fbfb8fc187d2107f1615f78..ce58bc48e5b87b44c24d09d0a3f4a987f88207c0 100644 (file)
@@ -179,7 +179,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
                r = kvm_riscv_aia_available();
                break;
        case KVM_CAP_IOEVENTFD:
-       case KVM_CAP_DEVICE_CTRL:
        case KVM_CAP_USER_MEMORY:
        case KVM_CAP_SYNC_MMU:
        case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
index 26cb2502ecf8969b76a47e874f6be9e90219887b..bd6e6c1b0497b48419bedb70eb451010c6128eb9 100644 (file)
@@ -6,8 +6,14 @@ lib-y                  += memmove.o
 lib-y                  += strcmp.o
 lib-y                  += strlen.o
 lib-y                  += strncmp.o
+lib-y                  += csum.o
+ifeq ($(CONFIG_MMU), y)
+lib-$(CONFIG_RISCV_ISA_V)      += uaccess_vector.o
+endif
 lib-$(CONFIG_MMU)      += uaccess.o
 lib-$(CONFIG_64BIT)    += tishift.o
 lib-$(CONFIG_RISCV_ISA_ZICBOZ) += clear_page.o
 
 obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
+lib-$(CONFIG_RISCV_ISA_V)      += xor.o
+lib-$(CONFIG_RISCV_ISA_V)      += riscv_v_helpers.o
index b22de1231144c29758d3fae335a5d727f44b51bb..20ff03f5b0f23ea6179fc64a89aeea63a4f2340c 100644 (file)
@@ -4,9 +4,9 @@
  */
 
 #include <linux/linkage.h>
+#include <linux/export.h>
 #include <asm/asm.h>
 #include <asm/alternative-macros.h>
-#include <asm-generic/export.h>
 #include <asm/hwcap.h>
 #include <asm/insn-def.h>
 #include <asm/page.h>
diff --git a/arch/riscv/lib/csum.c b/arch/riscv/lib/csum.c
new file mode 100644 (file)
index 0000000..74af3ab
--- /dev/null
@@ -0,0 +1,328 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Checksum library
+ *
+ * Influenced by arch/arm64/lib/csum.c
+ * Copyright (C) 2023 Rivos Inc.
+ */
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/jump_label.h>
+#include <linux/kasan-checks.h>
+#include <linux/kernel.h>
+
+#include <asm/cpufeature.h>
+
+#include <net/checksum.h>
+
+/* Default version is sufficient for 32 bit */
+#ifndef CONFIG_32BIT
+__sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+                       const struct in6_addr *daddr,
+                       __u32 len, __u8 proto, __wsum csum)
+{
+       unsigned int ulen, uproto;
+       unsigned long sum = (__force unsigned long)csum;
+
+       sum += (__force unsigned long)saddr->s6_addr32[0];
+       sum += (__force unsigned long)saddr->s6_addr32[1];
+       sum += (__force unsigned long)saddr->s6_addr32[2];
+       sum += (__force unsigned long)saddr->s6_addr32[3];
+
+       sum += (__force unsigned long)daddr->s6_addr32[0];
+       sum += (__force unsigned long)daddr->s6_addr32[1];
+       sum += (__force unsigned long)daddr->s6_addr32[2];
+       sum += (__force unsigned long)daddr->s6_addr32[3];
+
+       ulen = (__force unsigned int)htonl((unsigned int)len);
+       sum += ulen;
+
+       uproto = (__force unsigned int)htonl(proto);
+       sum += uproto;
+
+       /*
+        * Zbb support saves 4 instructions, so not worth checking without
+        * alternatives if supported
+        */
+       if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) &&
+           IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
+               unsigned long fold_temp;
+
+               /*
+                * Zbb is likely available when the kernel is compiled with Zbb
+                * support, so nop when Zbb is available and jump when Zbb is
+                * not available.
+                */
+               asm goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0,
+                                             RISCV_ISA_EXT_ZBB, 1)
+                                 :
+                                 :
+                                 :
+                                 : no_zbb);
+               asm(".option push                                       \n\
+               .option arch,+zbb                                       \n\
+                       rori    %[fold_temp], %[sum], 32                \n\
+                       add     %[sum], %[fold_temp], %[sum]            \n\
+                       srli    %[sum], %[sum], 32                      \n\
+                       not     %[fold_temp], %[sum]                    \n\
+                       roriw   %[sum], %[sum], 16                      \n\
+                       subw    %[sum], %[fold_temp], %[sum]            \n\
+               .option pop"
+               : [sum] "+r" (sum), [fold_temp] "=&r" (fold_temp));
+               return (__force __sum16)(sum >> 16);
+       }
+no_zbb:
+       sum += ror64(sum, 32);
+       sum >>= 32;
+       return csum_fold((__force __wsum)sum);
+}
+EXPORT_SYMBOL(csum_ipv6_magic);
+#endif /* !CONFIG_32BIT */
+
+#ifdef CONFIG_32BIT
+#define OFFSET_MASK 3
+#elif CONFIG_64BIT
+#define OFFSET_MASK 7
+#endif
+
+static inline __no_sanitize_address unsigned long
+do_csum_common(const unsigned long *ptr, const unsigned long *end,
+              unsigned long data)
+{
+       unsigned int shift;
+       unsigned long csum = 0, carry = 0;
+
+       /*
+        * Do 32-bit reads on RV32 and 64-bit reads otherwise. This should be
+        * faster than doing 32-bit reads on architectures that support larger
+        * reads.
+        */
+       while (ptr < end) {
+               csum += data;
+               carry += csum < data;
+               data = *(ptr++);
+       }
+
+       /*
+        * Perform alignment (and over-read) bytes on the tail if any bytes
+        * leftover.
+        */
+       shift = ((long)ptr - (long)end) * 8;
+#ifdef __LITTLE_ENDIAN
+       data = (data << shift) >> shift;
+#else
+       data = (data >> shift) << shift;
+#endif
+       csum += data;
+       carry += csum < data;
+       csum += carry;
+       csum += csum < carry;
+
+       return csum;
+}
+
+/*
+ * Algorithm accounts for buff being misaligned.
+ * If buff is not aligned, will over-read bytes but not use the bytes that it
+ * shouldn't. The same thing will occur on the tail-end of the read.
+ */
+static inline __no_sanitize_address unsigned int
+do_csum_with_alignment(const unsigned char *buff, int len)
+{
+       unsigned int offset, shift;
+       unsigned long csum, data;
+       const unsigned long *ptr, *end;
+
+       /*
+        * Align address to closest word (double word on rv64) that comes before
+        * buff. This should always be in the same page and cache line.
+        * Directly call KASAN with the alignment we will be using.
+        */
+       offset = (unsigned long)buff & OFFSET_MASK;
+       kasan_check_read(buff, len);
+       ptr = (const unsigned long *)(buff - offset);
+
+       /*
+        * Clear the most significant bytes that were over-read if buff was not
+        * aligned.
+        */
+       shift = offset * 8;
+       data = *(ptr++);
+#ifdef __LITTLE_ENDIAN
+       data = (data >> shift) << shift;
+#else
+       data = (data << shift) >> shift;
+#endif
+       end = (const unsigned long *)(buff + len);
+       csum = do_csum_common(ptr, end, data);
+
+#ifdef CC_HAS_ASM_GOTO_TIED_OUTPUT
+       /*
+        * Zbb support saves 6 instructions, so not worth checking without
+        * alternatives if supported
+        */
+       if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) &&
+           IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
+               unsigned long fold_temp;
+
+               /*
+                * Zbb is likely available when the kernel is compiled with Zbb
+                * support, so nop when Zbb is available and jump when Zbb is
+                * not available.
+                */
+               asm goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0,
+                                             RISCV_ISA_EXT_ZBB, 1)
+                                 :
+                                 :
+                                 :
+                                 : no_zbb);
+
+#ifdef CONFIG_32BIT
+               asm_goto_output(".option push                   \n\
+               .option arch,+zbb                               \n\
+                       rori    %[fold_temp], %[csum], 16       \n\
+                       andi    %[offset], %[offset], 1         \n\
+                       add     %[csum], %[fold_temp], %[csum]  \n\
+                       beq     %[offset], zero, %l[end]        \n\
+                       rev8    %[csum], %[csum]                \n\
+               .option pop"
+                       : [csum] "+r" (csum), [fold_temp] "=&r" (fold_temp)
+                       : [offset] "r" (offset)
+                       :
+                       : end);
+
+               return (unsigned short)csum;
+#else /* !CONFIG_32BIT */
+               asm_goto_output(".option push                   \n\
+               .option arch,+zbb                               \n\
+                       rori    %[fold_temp], %[csum], 32       \n\
+                       add     %[csum], %[fold_temp], %[csum]  \n\
+                       srli    %[csum], %[csum], 32            \n\
+                       roriw   %[fold_temp], %[csum], 16       \n\
+                       addw    %[csum], %[fold_temp], %[csum]  \n\
+                       andi    %[offset], %[offset], 1         \n\
+                       beq     %[offset], zero, %l[end]        \n\
+                       rev8    %[csum], %[csum]                \n\
+               .option pop"
+                       : [csum] "+r" (csum), [fold_temp] "=&r" (fold_temp)
+                       : [offset] "r" (offset)
+                       :
+                       : end);
+
+               return (csum << 16) >> 48;
+#endif /* !CONFIG_32BIT */
+end:
+               return csum >> 16;
+       }
+no_zbb:
+#endif /* CC_HAS_ASM_GOTO_TIED_OUTPUT */
+#ifndef CONFIG_32BIT
+       csum += ror64(csum, 32);
+       csum >>= 32;
+#endif
+       csum = (u32)csum + ror32((u32)csum, 16);
+       if (offset & 1)
+               return (u16)swab32(csum);
+       return csum >> 16;
+}
+
+/*
+ * Does not perform alignment, should only be used if machine has fast
+ * misaligned accesses, or when buff is known to be aligned.
+ */
+static inline __no_sanitize_address unsigned int
+do_csum_no_alignment(const unsigned char *buff, int len)
+{
+       unsigned long csum, data;
+       const unsigned long *ptr, *end;
+
+       ptr = (const unsigned long *)(buff);
+       data = *(ptr++);
+
+       kasan_check_read(buff, len);
+
+       end = (const unsigned long *)(buff + len);
+       csum = do_csum_common(ptr, end, data);
+
+       /*
+        * Zbb support saves 6 instructions, so not worth checking without
+        * alternatives if supported
+        */
+       if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) &&
+           IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
+               unsigned long fold_temp;
+
+               /*
+                * Zbb is likely available when the kernel is compiled with Zbb
+                * support, so nop when Zbb is available and jump when Zbb is
+                * not available.
+                */
+               asm goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0,
+                                             RISCV_ISA_EXT_ZBB, 1)
+                                 :
+                                 :
+                                 :
+                                 : no_zbb);
+
+#ifdef CONFIG_32BIT
+               asm (".option push                              \n\
+               .option arch,+zbb                               \n\
+                       rori    %[fold_temp], %[csum], 16       \n\
+                       add     %[csum], %[fold_temp], %[csum]  \n\
+               .option pop"
+                       : [csum] "+r" (csum), [fold_temp] "=&r" (fold_temp)
+                       :
+                       : );
+
+#else /* !CONFIG_32BIT */
+               asm (".option push                              \n\
+               .option arch,+zbb                               \n\
+                       rori    %[fold_temp], %[csum], 32       \n\
+                       add     %[csum], %[fold_temp], %[csum]  \n\
+                       srli    %[csum], %[csum], 32            \n\
+                       roriw   %[fold_temp], %[csum], 16       \n\
+                       addw    %[csum], %[fold_temp], %[csum]  \n\
+               .option pop"
+                       : [csum] "+r" (csum), [fold_temp] "=&r" (fold_temp)
+                       :
+                       : );
+#endif /* !CONFIG_32BIT */
+               return csum >> 16;
+       }
+no_zbb:
+#ifndef CONFIG_32BIT
+       csum += ror64(csum, 32);
+       csum >>= 32;
+#endif
+       csum = (u32)csum + ror32((u32)csum, 16);
+       return csum >> 16;
+}
+
+/*
+ * Perform a checksum on an arbitrary memory address.
+ * Will do a light-weight address alignment if buff is misaligned, unless
+ * cpu supports fast misaligned accesses.
+ */
+unsigned int do_csum(const unsigned char *buff, int len)
+{
+       if (unlikely(len <= 0))
+               return 0;
+
+       /*
+        * Significant performance gains can be seen by not doing alignment
+        * on machines with fast misaligned accesses.
+        *
+        * There is some duplicate code between the "with_alignment" and
+        * "no_alignment" implmentations, but the overlap is too awkward to be
+        * able to fit in one function without introducing multiple static
+        * branches. The largest chunk of overlap was delegated into the
+        * do_csum_common function.
+        */
+       if (static_branch_likely(&fast_misaligned_access_speed_key))
+               return do_csum_no_alignment(buff, len);
+
+       if (((unsigned long)buff & OFFSET_MASK) == 0)
+               return do_csum_no_alignment(buff, len);
+
+       return do_csum_with_alignment(buff, len);
+}
diff --git a/arch/riscv/lib/riscv_v_helpers.c b/arch/riscv/lib/riscv_v_helpers.c
new file mode 100644 (file)
index 0000000..be38a93
--- /dev/null
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2023 SiFive
+ * Author: Andy Chiu <andy.chiu@sifive.com>
+ */
+#include <linux/linkage.h>
+#include <asm/asm.h>
+
+#include <asm/vector.h>
+#include <asm/simd.h>
+
+#ifdef CONFIG_MMU
+#include <asm/asm-prototypes.h>
+#endif
+
+#ifdef CONFIG_MMU
+size_t riscv_v_usercopy_threshold = CONFIG_RISCV_ISA_V_UCOPY_THRESHOLD;
+int __asm_vector_usercopy(void *dst, void *src, size_t n);
+int fallback_scalar_usercopy(void *dst, void *src, size_t n);
+asmlinkage int enter_vector_usercopy(void *dst, void *src, size_t n)
+{
+       size_t remain, copied;
+
+       /* skip has_vector() check because it has been done by the asm  */
+       if (!may_use_simd())
+               goto fallback;
+
+       kernel_vector_begin();
+       remain = __asm_vector_usercopy(dst, src, n);
+       kernel_vector_end();
+
+       if (remain) {
+               copied = n - remain;
+               dst += copied;
+               src += copied;
+               n = remain;
+               goto fallback;
+       }
+
+       return remain;
+
+fallback:
+       return fallback_scalar_usercopy(dst, src, n);
+}
+#endif
index ef90075c4b0a9c153c02cdfc0d0fbdc98e151134..c8294bf72c064439919cc4895c6a51180f7ddec6 100644 (file)
@@ -4,7 +4,7 @@
  */
 
 #include <linux/linkage.h>
-#include <asm-generic/export.h>
+#include <linux/export.h>
 
 SYM_FUNC_START(__lshrti3)
        beqz    a2, .L1
index 3ab438f30d1328707862134f819e8a74598c6dce..bc22c078aba81a8170506eddb8642d3353ac461c 100644 (file)
@@ -1,8 +1,10 @@
 #include <linux/linkage.h>
-#include <asm-generic/export.h>
+#include <linux/export.h>
 #include <asm/asm.h>
 #include <asm/asm-extable.h>
 #include <asm/csr.h>
+#include <asm/hwcap.h>
+#include <asm/alternative-macros.h>
 
        .macro fixup op reg addr lbl
 100:
        .endm
 
 SYM_FUNC_START(__asm_copy_to_user)
+#ifdef CONFIG_RISCV_ISA_V
+       ALTERNATIVE("j fallback_scalar_usercopy", "nop", 0, RISCV_ISA_EXT_v, CONFIG_RISCV_ISA_V)
+       REG_L   t0, riscv_v_usercopy_threshold
+       bltu    a2, t0, fallback_scalar_usercopy
+       tail enter_vector_usercopy
+#endif
+SYM_FUNC_START(fallback_scalar_usercopy)
 
        /* Enable access to user memory */
        li t6, SR_SUM
@@ -181,6 +190,7 @@ SYM_FUNC_START(__asm_copy_to_user)
        sub a0, t5, a0
        ret
 SYM_FUNC_END(__asm_copy_to_user)
+SYM_FUNC_END(fallback_scalar_usercopy)
 EXPORT_SYMBOL(__asm_copy_to_user)
 SYM_FUNC_ALIAS(__asm_copy_from_user, __asm_copy_to_user)
 EXPORT_SYMBOL(__asm_copy_from_user)
diff --git a/arch/riscv/lib/uaccess_vector.S b/arch/riscv/lib/uaccess_vector.S
new file mode 100644 (file)
index 0000000..51ab558
--- /dev/null
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <linux/linkage.h>
+#include <asm-generic/export.h>
+#include <asm/asm.h>
+#include <asm/asm-extable.h>
+#include <asm/csr.h>
+
+#define pDst a0
+#define pSrc a1
+#define iNum a2
+
+#define iVL a3
+
+#define ELEM_LMUL_SETTING m8
+#define vData v0
+
+       .macro fixup op reg addr lbl
+100:
+       \op \reg, \addr
+       _asm_extable    100b, \lbl
+       .endm
+
+SYM_FUNC_START(__asm_vector_usercopy)
+       /* Enable access to user memory */
+       li      t6, SR_SUM
+       csrs    CSR_STATUS, t6
+
+loop:
+       vsetvli iVL, iNum, e8, ELEM_LMUL_SETTING, ta, ma
+       fixup vle8.v vData, (pSrc), 10f
+       sub iNum, iNum, iVL
+       add pSrc, pSrc, iVL
+       fixup vse8.v vData, (pDst), 11f
+       add pDst, pDst, iVL
+       bnez iNum, loop
+
+       /* Exception fixup for vector load is shared with normal exit */
+10:
+       /* Disable access to user memory */
+       csrc    CSR_STATUS, t6
+       mv      a0, iNum
+       ret
+
+       /* Exception fixup code for vector store. */
+11:
+       /* Undo the subtraction after vle8.v */
+       add     iNum, iNum, iVL
+       /* Make sure the scalar fallback skip already processed bytes */
+       csrr    t2, CSR_VSTART
+       sub     iNum, iNum, t2
+       j       10b
+SYM_FUNC_END(__asm_vector_usercopy)
diff --git a/arch/riscv/lib/xor.S b/arch/riscv/lib/xor.S
new file mode 100644 (file)
index 0000000..b28f243
--- /dev/null
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2021 SiFive
+ */
+#include <linux/linkage.h>
+#include <linux/export.h>
+#include <asm/asm.h>
+
+SYM_FUNC_START(xor_regs_2_)
+       vsetvli a3, a0, e8, m8, ta, ma
+       vle8.v v0, (a1)
+       vle8.v v8, (a2)
+       sub a0, a0, a3
+       vxor.vv v16, v0, v8
+       add a2, a2, a3
+       vse8.v v16, (a1)
+       add a1, a1, a3
+       bnez a0, xor_regs_2_
+       ret
+SYM_FUNC_END(xor_regs_2_)
+EXPORT_SYMBOL(xor_regs_2_)
+
+SYM_FUNC_START(xor_regs_3_)
+       vsetvli a4, a0, e8, m8, ta, ma
+       vle8.v v0, (a1)
+       vle8.v v8, (a2)
+       sub a0, a0, a4
+       vxor.vv v0, v0, v8
+       vle8.v v16, (a3)
+       add a2, a2, a4
+       vxor.vv v16, v0, v16
+       add a3, a3, a4
+       vse8.v v16, (a1)
+       add a1, a1, a4
+       bnez a0, xor_regs_3_
+       ret
+SYM_FUNC_END(xor_regs_3_)
+EXPORT_SYMBOL(xor_regs_3_)
+
+SYM_FUNC_START(xor_regs_4_)
+       vsetvli a5, a0, e8, m8, ta, ma
+       vle8.v v0, (a1)
+       vle8.v v8, (a2)
+       sub a0, a0, a5
+       vxor.vv v0, v0, v8
+       vle8.v v16, (a3)
+       add a2, a2, a5
+       vxor.vv v0, v0, v16
+       vle8.v v24, (a4)
+       add a3, a3, a5
+       vxor.vv v16, v0, v24
+       add a4, a4, a5
+       vse8.v v16, (a1)
+       add a1, a1, a5
+       bnez a0, xor_regs_4_
+       ret
+SYM_FUNC_END(xor_regs_4_)
+EXPORT_SYMBOL(xor_regs_4_)
+
+SYM_FUNC_START(xor_regs_5_)
+       vsetvli a6, a0, e8, m8, ta, ma
+       vle8.v v0, (a1)
+       vle8.v v8, (a2)
+       sub a0, a0, a6
+       vxor.vv v0, v0, v8
+       vle8.v v16, (a3)
+       add a2, a2, a6
+       vxor.vv v0, v0, v16
+       vle8.v v24, (a4)
+       add a3, a3, a6
+       vxor.vv v0, v0, v24
+       vle8.v v8, (a5)
+       add a4, a4, a6
+       vxor.vv v16, v0, v8
+       add a5, a5, a6
+       vse8.v v16, (a1)
+       add a1, a1, a6
+       bnez a0, xor_regs_5_
+       ret
+SYM_FUNC_END(xor_regs_5_)
+EXPORT_SYMBOL(xor_regs_5_)
index 3a4dfc8babcf8c3ef4cf2d4c39731b0e9067eb14..2c869f8026a88929ad258952414816733d270b8f 100644 (file)
@@ -13,10 +13,9 @@ endif
 KCOV_INSTRUMENT_init.o := n
 
 obj-y += init.o
-obj-$(CONFIG_MMU) += extable.o fault.o pageattr.o
+obj-$(CONFIG_MMU) += extable.o fault.o pageattr.o pgtable.o
 obj-y += cacheflush.o
 obj-y += context.o
-obj-y += pgtable.o
 obj-y += pmem.o
 
 ifeq ($(CONFIG_MMU),y)
index 4e4e469b8dd66cfdf3e24346a514db2d3dd55773..843107f834b231a032c6853b5d58382ed6165a37 100644 (file)
@@ -129,7 +129,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
 }
 
 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
-               const struct iommu_ops *iommu, bool coherent)
+                       bool coherent)
 {
        WARN_TAINT(!coherent && riscv_cbom_block_size > ARCH_DMA_MINALIGN,
                   TAINT_CPU_OUT_OF_SPEC,
index 35484d830fd6d7fe0a2a521bc736b1c5883afa51..dd1530af3ef15bf74cec58b5a4394918f6a8beb0 100644 (file)
@@ -27,6 +27,14 @@ static bool ex_handler_fixup(const struct exception_table_entry *ex,
        return true;
 }
 
+static inline unsigned long regs_get_gpr(struct pt_regs *regs, unsigned int offset)
+{
+       if (unlikely(!offset || offset > MAX_REG_OFFSET))
+               return 0;
+
+       return *(unsigned long *)((unsigned long)regs + offset);
+}
+
 static inline void regs_set_gpr(struct pt_regs *regs, unsigned int offset,
                                unsigned long val)
 {
@@ -50,6 +58,27 @@ static bool ex_handler_uaccess_err_zero(const struct exception_table_entry *ex,
        return true;
 }
 
+static bool
+ex_handler_load_unaligned_zeropad(const struct exception_table_entry *ex,
+                                 struct pt_regs *regs)
+{
+       int reg_data = FIELD_GET(EX_DATA_REG_DATA, ex->data);
+       int reg_addr = FIELD_GET(EX_DATA_REG_ADDR, ex->data);
+       unsigned long data, addr, offset;
+
+       addr = regs_get_gpr(regs, reg_addr * sizeof(unsigned long));
+
+       offset = addr & 0x7UL;
+       addr &= ~0x7UL;
+
+       data = *(unsigned long *)addr >> (offset * 8);
+
+       regs_set_gpr(regs, reg_data * sizeof(unsigned long), data);
+
+       regs->epc = get_ex_fixup(ex);
+       return true;
+}
+
 bool fixup_exception(struct pt_regs *regs)
 {
        const struct exception_table_entry *ex;
@@ -65,6 +94,8 @@ bool fixup_exception(struct pt_regs *regs)
                return ex_handler_bpf(ex, regs);
        case EX_TYPE_UACCESS_ERR_ZERO:
                return ex_handler_uaccess_err_zero(ex, regs);
+       case EX_TYPE_LOAD_UNALIGNED_ZEROPAD:
+               return ex_handler_load_unaligned_zeropad(ex, regs);
        }
 
        BUG();
index 081339ddf47ef4bae95f02f80c227318b869cd6e..3ba1d4dde5dd1a27651c9fd10714e84761a30b53 100644 (file)
@@ -136,24 +136,24 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
        pgd = (pgd_t *)pfn_to_virt(pfn) + index;
        pgd_k = init_mm.pgd + index;
 
-       if (!pgd_present(*pgd_k)) {
+       if (!pgd_present(pgdp_get(pgd_k))) {
                no_context(regs, addr);
                return;
        }
-       set_pgd(pgd, *pgd_k);
+       set_pgd(pgd, pgdp_get(pgd_k));
 
        p4d_k = p4d_offset(pgd_k, addr);
-       if (!p4d_present(*p4d_k)) {
+       if (!p4d_present(p4dp_get(p4d_k))) {
                no_context(regs, addr);
                return;
        }
 
        pud_k = pud_offset(p4d_k, addr);
-       if (!pud_present(*pud_k)) {
+       if (!pud_present(pudp_get(pud_k))) {
                no_context(regs, addr);
                return;
        }
-       if (pud_leaf(*pud_k))
+       if (pud_leaf(pudp_get(pud_k)))
                goto flush_tlb;
 
        /*
@@ -161,11 +161,11 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
         * to copy individual PTEs
         */
        pmd_k = pmd_offset(pud_k, addr);
-       if (!pmd_present(*pmd_k)) {
+       if (!pmd_present(pmdp_get(pmd_k))) {
                no_context(regs, addr);
                return;
        }
-       if (pmd_leaf(*pmd_k))
+       if (pmd_leaf(pmdp_get(pmd_k)))
                goto flush_tlb;
 
        /*
@@ -175,7 +175,7 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
         * silently loop forever.
         */
        pte_k = pte_offset_kernel(pmd_k, addr);
-       if (!pte_present(*pte_k)) {
+       if (!pte_present(ptep_get(pte_k))) {
                no_context(regs, addr);
                return;
        }
index b52f0210481facd89623a5f4730421064a0748b8..29c7606414d276d1c3639e2a80e10037ea899cfc 100644 (file)
@@ -54,7 +54,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
        }
 
        if (sz == PMD_SIZE) {
-               if (want_pmd_share(vma, addr) && pud_none(*pud))
+               if (want_pmd_share(vma, addr) && pud_none(pudp_get(pud)))
                        pte = huge_pmd_share(mm, vma, addr, pud);
                else
                        pte = (pte_t *)pmd_alloc(mm, pud, addr);
@@ -93,11 +93,11 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
        pmd_t *pmd;
 
        pgd = pgd_offset(mm, addr);
-       if (!pgd_present(*pgd))
+       if (!pgd_present(pgdp_get(pgd)))
                return NULL;
 
        p4d = p4d_offset(pgd, addr);
-       if (!p4d_present(*p4d))
+       if (!p4d_present(p4dp_get(p4d)))
                return NULL;
 
        pud = pud_offset(p4d, addr);
@@ -105,7 +105,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
                /* must be pud huge, non-present or none */
                return (pte_t *)pud;
 
-       if (!pud_present(*pud))
+       if (!pud_present(pudp_get(pud)))
                return NULL;
 
        pmd = pmd_offset(pud, addr);
@@ -113,7 +113,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
                /* must be pmd huge, non-present or none */
                return (pte_t *)pmd;
 
-       if (!pmd_present(*pmd))
+       if (!pmd_present(pmdp_get(pmd)))
                return NULL;
 
        for_each_napot_order(order) {
@@ -125,6 +125,26 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
        return pte;
 }
 
+unsigned long hugetlb_mask_last_page(struct hstate *h)
+{
+       unsigned long hp_size = huge_page_size(h);
+
+       switch (hp_size) {
+#ifndef __PAGETABLE_PMD_FOLDED
+       case PUD_SIZE:
+               return P4D_SIZE - PUD_SIZE;
+#endif
+       case PMD_SIZE:
+               return PUD_SIZE - PMD_SIZE;
+       case napot_cont_size(NAPOT_CONT64KB_ORDER):
+               return PMD_SIZE - napot_cont_size(NAPOT_CONT64KB_ORDER);
+       default:
+               break;
+       }
+
+       return 0UL;
+}
+
 static pte_t get_clear_contig(struct mm_struct *mm,
                              unsigned long addr,
                              pte_t *ptep,
@@ -177,13 +197,36 @@ pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
        return entry;
 }
 
+static void clear_flush(struct mm_struct *mm,
+                       unsigned long addr,
+                       pte_t *ptep,
+                       unsigned long pgsize,
+                       unsigned long ncontig)
+{
+       struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
+       unsigned long i, saddr = addr;
+
+       for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
+               ptep_get_and_clear(mm, addr, ptep);
+
+       flush_tlb_range(&vma, saddr, addr);
+}
+
+/*
+ * When dealing with NAPOT mappings, the privileged specification indicates that
+ * "if an update needs to be made, the OS generally should first mark all of the
+ * PTEs invalid, then issue SFENCE.VMA instruction(s) covering all 4 KiB regions
+ * within the range, [...] then update the PTE(s), as described in Section
+ * 4.2.1.". That's the equivalent of the Break-Before-Make approach used by
+ * arm64.
+ */
 void set_huge_pte_at(struct mm_struct *mm,
                     unsigned long addr,
                     pte_t *ptep,
                     pte_t pte,
                     unsigned long sz)
 {
-       unsigned long hugepage_shift;
+       unsigned long hugepage_shift, pgsize;
        int i, pte_num;
 
        if (sz >= PGDIR_SIZE)
@@ -198,7 +241,22 @@ void set_huge_pte_at(struct mm_struct *mm,
                hugepage_shift = PAGE_SHIFT;
 
        pte_num = sz >> hugepage_shift;
-       for (i = 0; i < pte_num; i++, ptep++, addr += (1 << hugepage_shift))
+       pgsize = 1 << hugepage_shift;
+
+       if (!pte_present(pte)) {
+               for (i = 0; i < pte_num; i++, ptep++, addr += pgsize)
+                       set_ptes(mm, addr, ptep, pte, 1);
+               return;
+       }
+
+       if (!pte_napot(pte)) {
+               set_ptes(mm, addr, ptep, pte, 1);
+               return;
+       }
+
+       clear_flush(mm, addr, ptep, pgsize, pte_num);
+
+       for (i = 0; i < pte_num; i++, ptep++, addr += pgsize)
                set_pte_at(mm, addr, ptep, pte);
 }
 
@@ -293,7 +351,7 @@ void huge_pte_clear(struct mm_struct *mm,
                    pte_t *ptep,
                    unsigned long sz)
 {
-       pte_t pte = READ_ONCE(*ptep);
+       pte_t pte = ptep_get(ptep);
        int i, pte_num;
 
        if (!pte_napot(pte)) {
@@ -306,7 +364,7 @@ void huge_pte_clear(struct mm_struct *mm,
                pte_clear(mm, addr, ptep);
 }
 
-static __init bool is_napot_size(unsigned long size)
+static bool is_napot_size(unsigned long size)
 {
        unsigned long order;
 
@@ -334,7 +392,7 @@ arch_initcall(napot_hugetlbpages_init);
 
 #else
 
-static __init bool is_napot_size(unsigned long size)
+static bool is_napot_size(unsigned long size)
 {
        return false;
 }
@@ -351,7 +409,7 @@ int pmd_huge(pmd_t pmd)
        return pmd_leaf(pmd);
 }
 
-bool __init arch_hugetlb_valid_size(unsigned long size)
+static bool __hugetlb_valid_size(unsigned long size)
 {
        if (size == HPAGE_SIZE)
                return true;
@@ -363,6 +421,16 @@ bool __init arch_hugetlb_valid_size(unsigned long size)
                return false;
 }
 
+bool __init arch_hugetlb_valid_size(unsigned long size)
+{
+       return __hugetlb_valid_size(size);
+}
+
+bool arch_hugetlb_migration_supported(struct hstate *h)
+{
+       return __hugetlb_valid_size(huge_page_size(h));
+}
+
 #ifdef CONFIG_CONTIG_ALLOC
 static __init int gigantic_pages_init(void)
 {
index 2e011cbddf3af373ea68d703e7e5736c5b04155a..fa34cf55037bd37ad0b8d3bb3b67f6f91d243f58 100644 (file)
@@ -174,6 +174,9 @@ void __init mem_init(void)
 
 /* Limit the memory size via mem. */
 static phys_addr_t memory_limit;
+#ifdef CONFIG_XIP_KERNEL
+#define memory_limit   (*(phys_addr_t *)XIP_FIXUP(&memory_limit))
+#endif /* CONFIG_XIP_KERNEL */
 
 static int __init early_mem(char *p)
 {
@@ -952,7 +955,7 @@ static void __init create_fdt_early_page_table(uintptr_t fix_fdt_va,
         * setup_vm_final installs the linear mapping. For 32-bit kernel, as the
         * kernel is mapped in the linear mapping, that makes no difference.
         */
-       dtb_early_va = kernel_mapping_pa_to_va(XIP_FIXUP(dtb_pa));
+       dtb_early_va = kernel_mapping_pa_to_va(dtb_pa);
 #endif
 
        dtb_early_pa = dtb_pa;
@@ -1055,9 +1058,13 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
 #endif
 
        kernel_map.virt_addr = KERNEL_LINK_ADDR + kernel_map.virt_offset;
-       kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL);
 
 #ifdef CONFIG_XIP_KERNEL
+#ifdef CONFIG_64BIT
+       kernel_map.page_offset = PAGE_OFFSET_L3;
+#else
+       kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL);
+#endif
        kernel_map.xiprom = (uintptr_t)CONFIG_XIP_PHYS_ADDR;
        kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom);
 
@@ -1067,6 +1074,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
 
        kernel_map.va_kernel_xip_pa_offset = kernel_map.virt_addr - kernel_map.xiprom;
 #else
+       kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL);
        kernel_map.phys_addr = (uintptr_t)(&_start);
        kernel_map.size = (uintptr_t)(&_end) - kernel_map.phys_addr;
 #endif
@@ -1377,16 +1385,39 @@ void __init misc_mem_init(void)
        early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT);
        arch_numa_init();
        sparse_init();
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+       /* The entire VMEMMAP region has been populated. Flush TLB for this region */
+       local_flush_tlb_kernel_range(VMEMMAP_START, VMEMMAP_END);
+#endif
        zone_sizes_init();
        arch_reserve_crashkernel();
        memblock_dump_all();
 }
 
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
+void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
+                              unsigned long addr, unsigned long next)
+{
+       pmd_set_huge(pmd, virt_to_phys(p), PAGE_KERNEL);
+}
+
+int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node,
+                               unsigned long addr, unsigned long next)
+{
+       vmemmap_verify((pte_t *)pmdp, node, addr, next);
+       return 1;
+}
+
 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
                               struct vmem_altmap *altmap)
 {
-       return vmemmap_populate_basepages(start, end, node, NULL);
+       /*
+        * Note that SPARSEMEM_VMEMMAP is only selected for rv64 and that we
+        * can't use hugepage mappings for 2-level page table because in case of
+        * memory hotplug, we are not able to update all the page tables with
+        * the new PMDs.
+        */
+       return vmemmap_populate_hugepages(start, end, node, NULL);
 }
 #endif
 
index 5e39dcf23fdbc15e12cedcf6b75a51ccfea6cf9d..c301c8d291d2df54f93b5579c3e56ed1461e4dc7 100644 (file)
@@ -31,7 +31,7 @@ static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned
        phys_addr_t phys_addr;
        pte_t *ptep, *p;
 
-       if (pmd_none(*pmd)) {
+       if (pmd_none(pmdp_get(pmd))) {
                p = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
                set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(p)), PAGE_TABLE));
        }
@@ -39,7 +39,7 @@ static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned
        ptep = pte_offset_kernel(pmd, vaddr);
 
        do {
-               if (pte_none(*ptep)) {
+               if (pte_none(ptep_get(ptep))) {
                        phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
                        set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL));
                        memset(__va(phys_addr), KASAN_SHADOW_INIT, PAGE_SIZE);
@@ -53,7 +53,7 @@ static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned
        pmd_t *pmdp, *p;
        unsigned long next;
 
-       if (pud_none(*pud)) {
+       if (pud_none(pudp_get(pud))) {
                p = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
                set_pud(pud, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE));
        }
@@ -63,7 +63,8 @@ static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned
        do {
                next = pmd_addr_end(vaddr, end);
 
-               if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) {
+               if (pmd_none(pmdp_get(pmdp)) && IS_ALIGNED(vaddr, PMD_SIZE) &&
+                   (next - vaddr) >= PMD_SIZE) {
                        phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE);
                        if (phys_addr) {
                                set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL));
@@ -83,7 +84,7 @@ static void __init kasan_populate_pud(p4d_t *p4d,
        pud_t *pudp, *p;
        unsigned long next;
 
-       if (p4d_none(*p4d)) {
+       if (p4d_none(p4dp_get(p4d))) {
                p = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
                set_p4d(p4d, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE));
        }
@@ -93,7 +94,8 @@ static void __init kasan_populate_pud(p4d_t *p4d,
        do {
                next = pud_addr_end(vaddr, end);
 
-               if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) {
+               if (pud_none(pudp_get(pudp)) && IS_ALIGNED(vaddr, PUD_SIZE) &&
+                   (next - vaddr) >= PUD_SIZE) {
                        phys_addr = memblock_phys_alloc(PUD_SIZE, PUD_SIZE);
                        if (phys_addr) {
                                set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_KERNEL));
@@ -113,7 +115,7 @@ static void __init kasan_populate_p4d(pgd_t *pgd,
        p4d_t *p4dp, *p;
        unsigned long next;
 
-       if (pgd_none(*pgd)) {
+       if (pgd_none(pgdp_get(pgd))) {
                p = memblock_alloc(PTRS_PER_P4D * sizeof(p4d_t), PAGE_SIZE);
                set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
        }
@@ -123,7 +125,8 @@ static void __init kasan_populate_p4d(pgd_t *pgd,
        do {
                next = p4d_addr_end(vaddr, end);
 
-               if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) && (next - vaddr) >= P4D_SIZE) {
+               if (p4d_none(p4dp_get(p4dp)) && IS_ALIGNED(vaddr, P4D_SIZE) &&
+                   (next - vaddr) >= P4D_SIZE) {
                        phys_addr = memblock_phys_alloc(P4D_SIZE, P4D_SIZE);
                        if (phys_addr) {
                                set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_KERNEL));
@@ -145,7 +148,7 @@ static void __init kasan_populate_pgd(pgd_t *pgdp,
        do {
                next = pgd_addr_end(vaddr, end);
 
-               if (pgd_none(*pgdp) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
+               if (pgd_none(pgdp_get(pgdp)) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
                    (next - vaddr) >= PGDIR_SIZE) {
                        phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE);
                        if (phys_addr) {
@@ -168,7 +171,7 @@ static void __init kasan_early_clear_pud(p4d_t *p4dp,
        if (!pgtable_l4_enabled) {
                pudp = (pud_t *)p4dp;
        } else {
-               base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(*p4dp)));
+               base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(p4dp_get(p4dp))));
                pudp = base_pud + pud_index(vaddr);
        }
 
@@ -193,7 +196,7 @@ static void __init kasan_early_clear_p4d(pgd_t *pgdp,
        if (!pgtable_l5_enabled) {
                p4dp = (p4d_t *)pgdp;
        } else {
-               base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgdp)));
+               base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(pgdp_get(pgdp))));
                p4dp = base_p4d + p4d_index(vaddr);
        }
 
@@ -239,14 +242,14 @@ static void __init kasan_early_populate_pud(p4d_t *p4dp,
        if (!pgtable_l4_enabled) {
                pudp = (pud_t *)p4dp;
        } else {
-               base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(*p4dp)));
+               base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(p4dp_get(p4dp))));
                pudp = base_pud + pud_index(vaddr);
        }
 
        do {
                next = pud_addr_end(vaddr, end);
 
-               if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) &&
+               if (pud_none(pudp_get(pudp)) && IS_ALIGNED(vaddr, PUD_SIZE) &&
                    (next - vaddr) >= PUD_SIZE) {
                        phys_addr = __pa((uintptr_t)kasan_early_shadow_pmd);
                        set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_TABLE));
@@ -277,14 +280,14 @@ static void __init kasan_early_populate_p4d(pgd_t *pgdp,
        if (!pgtable_l5_enabled) {
                p4dp = (p4d_t *)pgdp;
        } else {
-               base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgdp)));
+               base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(pgdp_get(pgdp))));
                p4dp = base_p4d + p4d_index(vaddr);
        }
 
        do {
                next = p4d_addr_end(vaddr, end);
 
-               if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) &&
+               if (p4d_none(p4dp_get(p4dp)) && IS_ALIGNED(vaddr, P4D_SIZE) &&
                    (next - vaddr) >= P4D_SIZE) {
                        phys_addr = __pa((uintptr_t)kasan_early_shadow_pud);
                        set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_TABLE));
@@ -305,7 +308,7 @@ static void __init kasan_early_populate_pgd(pgd_t *pgdp,
        do {
                next = pgd_addr_end(vaddr, end);
 
-               if (pgd_none(*pgdp) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
+               if (pgd_none(pgdp_get(pgdp)) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
                    (next - vaddr) >= PGDIR_SIZE) {
                        phys_addr = __pa((uintptr_t)kasan_early_shadow_p4d);
                        set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_TABLE));
@@ -381,7 +384,7 @@ static void __init kasan_shallow_populate_pud(p4d_t *p4d,
        do {
                next = pud_addr_end(vaddr, end);
 
-               if (pud_none(*pud_k)) {
+               if (pud_none(pudp_get(pud_k))) {
                        p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
                        set_pud(pud_k, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE));
                        continue;
@@ -401,7 +404,7 @@ static void __init kasan_shallow_populate_p4d(pgd_t *pgd,
        do {
                next = p4d_addr_end(vaddr, end);
 
-               if (p4d_none(*p4d_k)) {
+               if (p4d_none(p4dp_get(p4d_k))) {
                        p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
                        set_p4d(p4d_k, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE));
                        continue;
@@ -420,7 +423,7 @@ static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long
        do {
                next = pgd_addr_end(vaddr, end);
 
-               if (pgd_none(*pgd_k)) {
+               if (pgd_none(pgdp_get(pgd_k))) {
                        p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
                        set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
                        continue;
@@ -438,6 +441,14 @@ static void __init kasan_shallow_populate(void *start, void *end)
        kasan_shallow_populate_pgd(vaddr, vend);
 }
 
+#ifdef CONFIG_KASAN_VMALLOC
+void __init kasan_populate_early_vm_area_shadow(void *start, unsigned long size)
+{
+       kasan_populate(kasan_mem_to_shadow(start),
+                      kasan_mem_to_shadow(start + size));
+}
+#endif
+
 static void __init create_tmp_mapping(void)
 {
        void *ptr;
@@ -451,7 +462,7 @@ static void __init create_tmp_mapping(void)
 
        /* Copy the last p4d since it is shared with the kernel mapping. */
        if (pgtable_l5_enabled) {
-               ptr = (p4d_t *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END));
+               ptr = (p4d_t *)pgd_page_vaddr(pgdp_get(pgd_offset_k(KASAN_SHADOW_END)));
                memcpy(tmp_p4d, ptr, sizeof(p4d_t) * PTRS_PER_P4D);
                set_pgd(&tmp_pg_dir[pgd_index(KASAN_SHADOW_END)],
                        pfn_pgd(PFN_DOWN(__pa(tmp_p4d)), PAGE_TABLE));
@@ -462,7 +473,7 @@ static void __init create_tmp_mapping(void)
 
        /* Copy the last pud since it is shared with the kernel mapping. */
        if (pgtable_l4_enabled) {
-               ptr = (pud_t *)p4d_page_vaddr(*(base_p4d + p4d_index(KASAN_SHADOW_END)));
+               ptr = (pud_t *)p4d_page_vaddr(p4dp_get(base_p4d + p4d_index(KASAN_SHADOW_END)));
                memcpy(tmp_pud, ptr, sizeof(pud_t) * PTRS_PER_PUD);
                set_p4d(&base_p4d[p4d_index(KASAN_SHADOW_END)],
                        pfn_p4d(PFN_DOWN(__pa(tmp_pud)), PAGE_TABLE));
index fc5fc4f785c481c20acec4b68ba1a75d278ee150..410056a50aa9f9c1603e5bbb200ffa709d41cb7c 100644 (file)
@@ -29,7 +29,7 @@ static unsigned long set_pageattr_masks(unsigned long val, struct mm_walk *walk)
 static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr,
                              unsigned long next, struct mm_walk *walk)
 {
-       p4d_t val = READ_ONCE(*p4d);
+       p4d_t val = p4dp_get(p4d);
 
        if (p4d_leaf(val)) {
                val = __p4d(set_pageattr_masks(p4d_val(val), walk));
@@ -42,7 +42,7 @@ static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr,
 static int pageattr_pud_entry(pud_t *pud, unsigned long addr,
                              unsigned long next, struct mm_walk *walk)
 {
-       pud_t val = READ_ONCE(*pud);
+       pud_t val = pudp_get(pud);
 
        if (pud_leaf(val)) {
                val = __pud(set_pageattr_masks(pud_val(val), walk));
@@ -55,7 +55,7 @@ static int pageattr_pud_entry(pud_t *pud, unsigned long addr,
 static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr,
                              unsigned long next, struct mm_walk *walk)
 {
-       pmd_t val = READ_ONCE(*pmd);
+       pmd_t val = pmdp_get(pmd);
 
        if (pmd_leaf(val)) {
                val = __pmd(set_pageattr_masks(pmd_val(val), walk));
@@ -68,7 +68,7 @@ static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr,
 static int pageattr_pte_entry(pte_t *pte, unsigned long addr,
                              unsigned long next, struct mm_walk *walk)
 {
-       pte_t val = READ_ONCE(*pte);
+       pte_t val = ptep_get(pte);
 
        val = __pte(set_pageattr_masks(pte_val(val), walk));
        set_pte(pte, val);
@@ -108,10 +108,10 @@ static int __split_linear_mapping_pmd(pud_t *pudp,
                    vaddr <= (vaddr & PMD_MASK) && end >= next)
                        continue;
 
-               if (pmd_leaf(*pmdp)) {
+               if (pmd_leaf(pmdp_get(pmdp))) {
                        struct page *pte_page;
-                       unsigned long pfn = _pmd_pfn(*pmdp);
-                       pgprot_t prot = __pgprot(pmd_val(*pmdp) & ~_PAGE_PFN_MASK);
+                       unsigned long pfn = _pmd_pfn(pmdp_get(pmdp));
+                       pgprot_t prot = __pgprot(pmd_val(pmdp_get(pmdp)) & ~_PAGE_PFN_MASK);
                        pte_t *ptep_new;
                        int i;
 
@@ -148,10 +148,10 @@ static int __split_linear_mapping_pud(p4d_t *p4dp,
                    vaddr <= (vaddr & PUD_MASK) && end >= next)
                        continue;
 
-               if (pud_leaf(*pudp)) {
+               if (pud_leaf(pudp_get(pudp))) {
                        struct page *pmd_page;
-                       unsigned long pfn = _pud_pfn(*pudp);
-                       pgprot_t prot = __pgprot(pud_val(*pudp) & ~_PAGE_PFN_MASK);
+                       unsigned long pfn = _pud_pfn(pudp_get(pudp));
+                       pgprot_t prot = __pgprot(pud_val(pudp_get(pudp)) & ~_PAGE_PFN_MASK);
                        pmd_t *pmdp_new;
                        int i;
 
@@ -197,10 +197,10 @@ static int __split_linear_mapping_p4d(pgd_t *pgdp,
                    vaddr <= (vaddr & P4D_MASK) && end >= next)
                        continue;
 
-               if (p4d_leaf(*p4dp)) {
+               if (p4d_leaf(p4dp_get(p4dp))) {
                        struct page *pud_page;
-                       unsigned long pfn = _p4d_pfn(*p4dp);
-                       pgprot_t prot = __pgprot(p4d_val(*p4dp) & ~_PAGE_PFN_MASK);
+                       unsigned long pfn = _p4d_pfn(p4dp_get(p4dp));
+                       pgprot_t prot = __pgprot(p4d_val(p4dp_get(p4dp)) & ~_PAGE_PFN_MASK);
                        pud_t *pudp_new;
                        int i;
 
@@ -305,8 +305,13 @@ static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
                                goto unlock;
                }
        } else if (is_kernel_mapping(start) || is_linear_mapping(start)) {
-               lm_start = (unsigned long)lm_alias(start);
-               lm_end = (unsigned long)lm_alias(end);
+               if (is_kernel_mapping(start)) {
+                       lm_start = (unsigned long)lm_alias(start);
+                       lm_end = (unsigned long)lm_alias(end);
+               } else {
+                       lm_start = start;
+                       lm_end = end;
+               }
 
                ret = split_linear_mapping(lm_start, lm_end);
                if (ret)
@@ -378,7 +383,7 @@ int set_direct_map_invalid_noflush(struct page *page)
 int set_direct_map_default_noflush(struct page *page)
 {
        return __set_memory((unsigned long)page_address(page), 1,
-                           PAGE_KERNEL, __pgprot(0));
+                           PAGE_KERNEL, __pgprot(_PAGE_EXEC));
 }
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
@@ -406,29 +411,29 @@ bool kernel_page_present(struct page *page)
        pte_t *pte;
 
        pgd = pgd_offset_k(addr);
-       if (!pgd_present(*pgd))
+       if (!pgd_present(pgdp_get(pgd)))
                return false;
-       if (pgd_leaf(*pgd))
+       if (pgd_leaf(pgdp_get(pgd)))
                return true;
 
        p4d = p4d_offset(pgd, addr);
-       if (!p4d_present(*p4d))
+       if (!p4d_present(p4dp_get(p4d)))
                return false;
-       if (p4d_leaf(*p4d))
+       if (p4d_leaf(p4dp_get(p4d)))
                return true;
 
        pud = pud_offset(p4d, addr);
-       if (!pud_present(*pud))
+       if (!pud_present(pudp_get(pud)))
                return false;
-       if (pud_leaf(*pud))
+       if (pud_leaf(pudp_get(pud)))
                return true;
 
        pmd = pmd_offset(pud, addr);
-       if (!pmd_present(*pmd))
+       if (!pmd_present(pmdp_get(pmd)))
                return false;
-       if (pmd_leaf(*pmd))
+       if (pmd_leaf(pmdp_get(pmd)))
                return true;
 
        pte = pte_offset_kernel(pmd, addr);
-       return pte_present(*pte);
+       return pte_present(ptep_get(pte));
 }
index fef4e7328e490535aa093688e4136661ada0f381..ef887efcb67900d94b97e603225f3c4d088fea9b 100644 (file)
@@ -5,6 +5,47 @@
 #include <linux/kernel.h>
 #include <linux/pgtable.h>
 
+int ptep_set_access_flags(struct vm_area_struct *vma,
+                         unsigned long address, pte_t *ptep,
+                         pte_t entry, int dirty)
+{
+       if (!pte_same(ptep_get(ptep), entry))
+               __set_pte_at(ptep, entry);
+       /*
+        * update_mmu_cache will unconditionally execute, handling both
+        * the case that the PTE changed and the spurious fault case.
+        */
+       return true;
+}
+
+int ptep_test_and_clear_young(struct vm_area_struct *vma,
+                             unsigned long address,
+                             pte_t *ptep)
+{
+       if (!pte_young(ptep_get(ptep)))
+               return 0;
+       return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
+}
+EXPORT_SYMBOL_GPL(ptep_test_and_clear_young);
+
+#ifdef CONFIG_64BIT
+pud_t *pud_offset(p4d_t *p4d, unsigned long address)
+{
+       if (pgtable_l4_enabled)
+               return p4d_pgtable(p4dp_get(p4d)) + pud_index(address);
+
+       return (pud_t *)p4d;
+}
+
+p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
+{
+       if (pgtable_l5_enabled)
+               return pgd_pgtable(pgdp_get(pgd)) + p4d_index(address);
+
+       return (p4d_t *)pgd;
+}
+#endif
+
 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
 int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
 {
@@ -25,7 +66,7 @@ int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
 
 int pud_clear_huge(pud_t *pud)
 {
-       if (!pud_leaf(READ_ONCE(*pud)))
+       if (!pud_leaf(pudp_get(pud)))
                return 0;
        pud_clear(pud);
        return 1;
@@ -33,7 +74,7 @@ int pud_clear_huge(pud_t *pud)
 
 int pud_free_pmd_page(pud_t *pud, unsigned long addr)
 {
-       pmd_t *pmd = pud_pgtable(*pud);
+       pmd_t *pmd = pud_pgtable(pudp_get(pud));
        int i;
 
        pud_clear(pud);
@@ -63,7 +104,7 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
 
 int pmd_clear_huge(pmd_t *pmd)
 {
-       if (!pmd_leaf(READ_ONCE(*pmd)))
+       if (!pmd_leaf(pmdp_get(pmd)))
                return 0;
        pmd_clear(pmd);
        return 1;
@@ -71,7 +112,7 @@ int pmd_clear_huge(pmd_t *pmd)
 
 int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
 {
-       pte_t *pte = (pte_t *)pmd_page_vaddr(*pmd);
+       pte_t *pte = (pte_t *)pmd_page_vaddr(pmdp_get(pmd));
 
        pmd_clear(pmd);
 
@@ -88,7 +129,7 @@ pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
        pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
 
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
-       VM_BUG_ON(pmd_trans_huge(*pmdp));
+       VM_BUG_ON(pmd_trans_huge(pmdp_get(pmdp)));
        /*
         * When leaf PTE entries (regular pages) are collapsed into a leaf
         * PMD entry (huge page), a valid non-leaf PTE is converted into a
index e6659d7368b35403d1b91739080496bfc45442af..893566e004b73fcf9a8dbc94f766e59cd00f1bb1 100644 (file)
@@ -66,6 +66,12 @@ static inline void local_flush_tlb_range_asid(unsigned long start,
                local_flush_tlb_range_threshold_asid(start, size, stride, asid);
 }
 
+/* Flush a range of kernel pages without broadcasting */
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+       local_flush_tlb_range_asid(start, end - start, PAGE_SIZE, FLUSH_TLB_NO_ASID);
+}
+
 static void __ipi_flush_tlb_all(void *info)
 {
        local_flush_tlb_all();
@@ -93,29 +99,23 @@ static void __ipi_flush_tlb_range_asid(void *info)
        local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
 }
 
-static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
-                             unsigned long size, unsigned long stride)
+static void __flush_tlb_range(struct cpumask *cmask, unsigned long asid,
+                             unsigned long start, unsigned long size,
+                             unsigned long stride)
 {
        struct flush_tlb_range_data ftd;
-       const struct cpumask *cmask;
-       unsigned long asid = FLUSH_TLB_NO_ASID;
        bool broadcast;
 
-       if (mm) {
-               unsigned int cpuid;
+       if (cpumask_empty(cmask))
+               return;
 
-               cmask = mm_cpumask(mm);
-               if (cpumask_empty(cmask))
-                       return;
+       if (cmask != cpu_online_mask) {
+               unsigned int cpuid;
 
                cpuid = get_cpu();
                /* check if the tlbflush needs to be sent to other CPUs */
                broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
-
-               if (static_branch_unlikely(&use_asid_allocator))
-                       asid = atomic_long_read(&mm->context.id) & asid_mask;
        } else {
-               cmask = cpu_online_mask;
                broadcast = true;
        }
 
@@ -135,25 +135,34 @@ static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
                local_flush_tlb_range_asid(start, size, stride, asid);
        }
 
-       if (mm)
+       if (cmask != cpu_online_mask)
                put_cpu();
 }
 
+static inline unsigned long get_mm_asid(struct mm_struct *mm)
+{
+       return static_branch_unlikely(&use_asid_allocator) ?
+                       atomic_long_read(&mm->context.id) & asid_mask : FLUSH_TLB_NO_ASID;
+}
+
 void flush_tlb_mm(struct mm_struct *mm)
 {
-       __flush_tlb_range(mm, 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
+       __flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm),
+                         0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
 }
 
 void flush_tlb_mm_range(struct mm_struct *mm,
                        unsigned long start, unsigned long end,
                        unsigned int page_size)
 {
-       __flush_tlb_range(mm, start, end - start, page_size);
+       __flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm),
+                         start, end - start, page_size);
 }
 
 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
 {
-       __flush_tlb_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE);
+       __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
+                         addr, PAGE_SIZE, PAGE_SIZE);
 }
 
 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
@@ -185,18 +194,45 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
                }
        }
 
-       __flush_tlb_range(vma->vm_mm, start, end - start, stride_size);
+       __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
+                         start, end - start, stride_size);
 }
 
 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
 {
-       __flush_tlb_range(NULL, start, end - start, PAGE_SIZE);
+       __flush_tlb_range((struct cpumask *)cpu_online_mask, FLUSH_TLB_NO_ASID,
+                         start, end - start, PAGE_SIZE);
 }
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
                        unsigned long end)
 {
-       __flush_tlb_range(vma->vm_mm, start, end - start, PMD_SIZE);
+       __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
+                         start, end - start, PMD_SIZE);
 }
 #endif
+
+bool arch_tlbbatch_should_defer(struct mm_struct *mm)
+{
+       return true;
+}
+
+void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
+                              struct mm_struct *mm,
+                              unsigned long uaddr)
+{
+       cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
+}
+
+void arch_flush_tlb_batched_pending(struct mm_struct *mm)
+{
+       flush_tlb_mm(mm);
+}
+
+void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
+{
+       __flush_tlb_range(&batch->cpumask, FLUSH_TLB_NO_ASID, 0,
+                         FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
+       cpumask_clear(&batch->cpumask);
+}
index 58dc64dd94a82c8d8cc42a71ec69954dc548934a..719a97e7edb2c12277a8e08dd214e0eb03be094a 100644 (file)
@@ -795,6 +795,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
        struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
        struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
        struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
+       bool is_struct_ops = flags & BPF_TRAMP_F_INDIRECT;
        void *orig_call = func_addr;
        bool save_ret;
        u32 insn;
@@ -878,7 +879,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
 
        stack_size = round_up(stack_size, 16);
 
-       if (func_addr) {
+       if (!is_struct_ops) {
                /* For the trampoline called from function entry,
                 * the frame of traced function and the frame of
                 * trampoline need to be considered.
@@ -998,7 +999,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
 
        emit_ld(RV_REG_S1, -sreg_off, RV_REG_FP, ctx);
 
-       if (func_addr) {
+       if (!is_struct_ops) {
                /* trampoline called from function entry */
                emit_ld(RV_REG_T0, stack_size - 8, RV_REG_SP, ctx);
                emit_ld(RV_REG_FP, stack_size - 16, RV_REG_SP, ctx);
index 8f39f04247966d0b492ed378438b33d20474e99c..fe565f3a3a917d0da83dbd8329a503910fa41948 100644 (file)
@@ -216,7 +216,6 @@ config S390
        select HAVE_VIRT_CPU_ACCOUNTING_IDLE
        select IOMMU_HELPER             if PCI
        select IOMMU_SUPPORT            if PCI
-       select KEXEC
        select MMU_GATHER_MERGE_VMAS
        select MMU_GATHER_NO_GATHER
        select MMU_GATHER_RCU_TABLE_FREE
@@ -443,7 +442,7 @@ config COMMAND_LINE_SIZE
          line.
 
 config COMPAT
-       def_bool y
+       def_bool n
        prompt "Kernel support for 31 bit emulation"
        select ARCH_WANT_OLD_COMPAT_IPC
        select COMPAT_OLD_SIGACTION
@@ -454,7 +453,9 @@ config COMPAT
          Select this option if you want to enable your system kernel to
          handle system-calls from ELF binaries for 31 bit ESA.  This option
          (and some other stuff like libraries and such) is needed for
-         executing 31 bit applications.  It is safe to say "Y".
+         executing 31 bit applications.
+
+         If unsure say N.
 
 config SMP
        def_bool y
index 85490d9373fc1446490945d7718770f3874d8736..cae2dd34fbb49d16ee020e72fb669010dca832f8 100644 (file)
@@ -40,6 +40,7 @@ CONFIG_SCHED_AUTOGROUP=y
 CONFIG_EXPERT=y
 # CONFIG_SYSFS_SYSCALL is not set
 CONFIG_PROFILING=y
+CONFIG_KEXEC=y
 CONFIG_KEXEC_FILE=y
 CONFIG_KEXEC_SIG=y
 CONFIG_CRASH_DUMP=y
@@ -636,8 +637,9 @@ CONFIG_FUSE_FS=y
 CONFIG_CUSE=m
 CONFIG_VIRTIO_FS=m
 CONFIG_OVERLAY_FS=m
+CONFIG_NETFS_SUPPORT=m
 CONFIG_NETFS_STATS=y
-CONFIG_FSCACHE=m
+CONFIG_FSCACHE=y
 CONFIG_CACHEFILES=m
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
index fb690fbbf54befbf06dc89fde45590e50788be9e..42b988873e5443df15b054d78610697fdf769293 100644 (file)
@@ -38,6 +38,7 @@ CONFIG_SCHED_AUTOGROUP=y
 CONFIG_EXPERT=y
 # CONFIG_SYSFS_SYSCALL is not set
 CONFIG_PROFILING=y
+CONFIG_KEXEC=y
 CONFIG_KEXEC_FILE=y
 CONFIG_KEXEC_SIG=y
 CONFIG_CRASH_DUMP=y
@@ -621,8 +622,9 @@ CONFIG_FUSE_FS=y
 CONFIG_CUSE=m
 CONFIG_VIRTIO_FS=m
 CONFIG_OVERLAY_FS=m
+CONFIG_NETFS_SUPPORT=m
 CONFIG_NETFS_STATS=y
-CONFIG_FSCACHE=m
+CONFIG_FSCACHE=y
 CONFIG_CACHEFILES=m
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
index 47028450eee157b5b100e82b590dde720555c173..30d2a16876650e9c3ea32997f771131e6372e2fc 100644 (file)
@@ -10,7 +10,6 @@ CONFIG_BLK_DEV_INITRD=y
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_CRASH_DUMP=y
 CONFIG_MARCH_Z13=y
-# CONFIG_COMPAT is not set
 CONFIG_NR_CPUS=2
 CONFIG_HZ_100=y
 # CONFIG_CHSC_SCH is not set
index 94b6919026dfb8d75d74ca8c9d3aa52f80fc72e1..796007125dff21cbe5c5db0a0eebaf6f0436a4c0 100644 (file)
@@ -111,4 +111,10 @@ static inline void stfle(u64 *stfle_fac_list, int size)
        preempt_enable();
 }
 
+/**
+ * stfle_size - Actual size of the facility list as specified by stfle
+ * (number of double words)
+ */
+unsigned int stfle_size(void);
+
 #endif /* __ASM_FACILITY_H */
index 895f774bbcc55353cc7a3d302b009796a799446f..bf78cf381dfcdac92a170b754328acd16846eb2e 100644 (file)
@@ -25,7 +25,7 @@
  */
 static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
-       asm_volatile_goto("0:   brcl 0,%l[label]\n"
+       asm goto("0:    brcl 0,%l[label]\n"
                          ".pushsection __jump_table,\"aw\"\n"
                          ".balign      8\n"
                          ".long        0b-.,%l[label]-.\n"
@@ -39,7 +39,7 @@ label:
 
 static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
 {
-       asm_volatile_goto("0:   brcl 15,%l[label]\n"
+       asm goto("0:    brcl 15,%l[label]\n"
                          ".pushsection __jump_table,\"aw\"\n"
                          ".balign      8\n"
                          ".long        0b-.,%l[label]-.\n"
index 67a298b6cf6e9f316ea6178ce64efc837e5caac9..52664105a473f97b923fb104c2e0c279b4b02cef 100644 (file)
@@ -818,7 +818,7 @@ struct s390_io_adapter {
 
 struct kvm_s390_cpu_model {
        /* facility mask supported by kvm & hosting machine */
-       __u64 fac_mask[S390_ARCH_FAC_LIST_SIZE_U64];
+       __u64 fac_mask[S390_ARCH_FAC_MASK_SIZE_U64];
        struct kvm_s390_vm_cpu_subfunc subfuncs;
        /* facility list requested by guest (in dma page) */
        __u64 *fac_list;
index 287bb88f76986e127388efd03c18d117bf4c417e..2686bee800e3d5a35f2d4918aac38f4020e0889c 100644 (file)
@@ -11,6 +11,8 @@
 /* I/O size constraints */
 #define ZPCI_MAX_READ_SIZE     8
 #define ZPCI_MAX_WRITE_SIZE    128
+#define ZPCI_BOUNDARY_SIZE     (1 << 12)
+#define ZPCI_BOUNDARY_MASK     (ZPCI_BOUNDARY_SIZE - 1)
 
 /* I/O Map */
 #define ZPCI_IOMAP_SHIFT               48
@@ -125,16 +127,18 @@ out:
 int zpci_write_block(volatile void __iomem *dst, const void *src,
                     unsigned long len);
 
-static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max)
+static inline int zpci_get_max_io_size(u64 src, u64 dst, int len, int max)
 {
-       int count = len > max ? max : len, size = 1;
+       int offset = dst & ZPCI_BOUNDARY_MASK;
+       int size;
 
-       while (!(src & 0x1) && !(dst & 0x1) && ((size << 1) <= count)) {
-               dst = dst >> 1;
-               src = src >> 1;
-               size = size << 1;
-       }
-       return size;
+       size = min3(len, ZPCI_BOUNDARY_SIZE - offset, max);
+       if (IS_ALIGNED(src, 8) && IS_ALIGNED(dst, 8) && IS_ALIGNED(size, 8))
+               return size;
+
+       if (size >= 8)
+               return 8;
+       return rounddown_pow_of_two(size);
 }
 
 static inline int zpci_memcpy_fromio(void *dst,
@@ -144,9 +148,9 @@ static inline int zpci_memcpy_fromio(void *dst,
        int size, rc = 0;
 
        while (n > 0) {
-               size = zpci_get_max_write_size((u64 __force) src,
-                                              (u64) dst, n,
-                                              ZPCI_MAX_READ_SIZE);
+               size = zpci_get_max_io_size((u64 __force) src,
+                                           (u64) dst, n,
+                                           ZPCI_MAX_READ_SIZE);
                rc = zpci_read_single(dst, src, size);
                if (rc)
                        break;
@@ -166,9 +170,9 @@ static inline int zpci_memcpy_toio(volatile void __iomem *dst,
                return -EINVAL;
 
        while (n > 0) {
-               size = zpci_get_max_write_size((u64 __force) dst,
-                                              (u64) src, n,
-                                              ZPCI_MAX_WRITE_SIZE);
+               size = zpci_get_max_io_size((u64 __force) dst,
+                                           (u64) src, n,
+                                           ZPCI_MAX_WRITE_SIZE);
                if (size > 8) /* main path */
                        rc = zpci_write_block(dst, src, size);
                else
index 353def93973b312b0a696d4d6aa36e70b416ebf1..7a562b4199c81b2b2bcb30f4efe0a84e59075a99 100644 (file)
@@ -41,7 +41,7 @@ obj-y += sysinfo.o lgr.o os_info.o ctlreg.o
 obj-y  += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
 obj-y  += entry.o reipl.o kdebugfs.o alternative.o
 obj-y  += nospec-branch.o ipl_vmparm.o machine_kexec_reloc.o unwind_bc.o
-obj-y  += smp.o text_amode31.o stacktrace.o abs_lowcore.o
+obj-y  += smp.o text_amode31.o stacktrace.o abs_lowcore.o facility.o
 
 extra-y                                += vmlinux.lds
 
diff --git a/arch/s390/kernel/facility.c b/arch/s390/kernel/facility.c
new file mode 100644 (file)
index 0000000..f021272
--- /dev/null
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2023
+ */
+
+#include <asm/facility.h>
+
+unsigned int stfle_size(void)
+{
+       static unsigned int size;
+       unsigned int r;
+       u64 dummy;
+
+       r = READ_ONCE(size);
+       if (!r) {
+               r = __stfle_asm(&dummy, 1) + 1;
+               WRITE_ONCE(size, r);
+       }
+       return r;
+}
+EXPORT_SYMBOL(stfle_size);
index 9e7c15fccfea92232e8370586071b71a5bf59500..a4f3449cc814162b9972faa67e33481b8a42a65f 100644 (file)
@@ -208,7 +208,6 @@ void __load_fpu_regs(void)
        }
        clear_cpu_flag(CIF_FPU);
 }
-EXPORT_SYMBOL(__load_fpu_regs);
 
 void load_fpu_regs(void)
 {
index 39a91b00438a7f6ba48fb541d8f24b51070391d2..bf8a672b15a41afd3a9e0384ad19f83eb8eadba1 100644 (file)
@@ -111,11 +111,11 @@ static void paicrypt_event_destroy(struct perf_event *event)
        mutex_unlock(&pai_reserve_mutex);
 }
 
-static u64 paicrypt_getctr(struct paicrypt_map *cpump, int nr, bool kernel)
+static u64 paicrypt_getctr(unsigned long *page, int nr, bool kernel)
 {
        if (kernel)
                nr += PAI_CRYPTO_MAXCTR;
-       return cpump->page[nr];
+       return page[nr];
 }
 
 /* Read the counter values. Return value from location in CMP. For event
@@ -129,13 +129,13 @@ static u64 paicrypt_getdata(struct perf_event *event, bool kernel)
        int i;
 
        if (event->attr.config != PAI_CRYPTO_BASE) {
-               return paicrypt_getctr(cpump,
+               return paicrypt_getctr(cpump->page,
                                       event->attr.config - PAI_CRYPTO_BASE,
                                       kernel);
        }
 
        for (i = 1; i <= paicrypt_cnt; i++) {
-               u64 val = paicrypt_getctr(cpump, i, kernel);
+               u64 val = paicrypt_getctr(cpump->page, i, kernel);
 
                if (!val)
                        continue;
@@ -317,10 +317,14 @@ static void paicrypt_start(struct perf_event *event, int flags)
         * Events are added, deleted and re-added when 2 or more events
         * are active at the same time.
         */
-       if (!event->hw.last_tag) {
-               event->hw.last_tag = 1;
-               sum = paicrypt_getall(event);           /* Get current value */
-               local64_set(&event->hw.prev_count, sum);
+       if (!event->attr.sample_period) {       /* Counting */
+               if (!event->hw.last_tag) {
+                       event->hw.last_tag = 1;
+                       sum = paicrypt_getall(event);   /* Get current value */
+                       local64_set(&event->hw.prev_count, sum);
+               }
+       } else {                                /* Sampling */
+               perf_sched_cb_inc(event->pmu);
        }
 }
 
@@ -336,19 +340,18 @@ static int paicrypt_add(struct perf_event *event, int flags)
                local_ctl_set_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
        }
        cpump->event = event;
-       if (flags & PERF_EF_START && !event->attr.sample_period) {
-               /* Only counting needs initial counter value */
+       if (flags & PERF_EF_START)
                paicrypt_start(event, PERF_EF_RELOAD);
-       }
        event->hw.state = 0;
-       if (event->attr.sample_period)
-               perf_sched_cb_inc(event->pmu);
        return 0;
 }
 
 static void paicrypt_stop(struct perf_event *event, int flags)
 {
-       paicrypt_read(event);
+       if (!event->attr.sample_period) /* Counting */
+               paicrypt_read(event);
+       else                            /* Sampling */
+               perf_sched_cb_dec(event->pmu);
        event->hw.state = PERF_HES_STOPPED;
 }
 
@@ -357,11 +360,7 @@ static void paicrypt_del(struct perf_event *event, int flags)
        struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
        struct paicrypt_map *cpump = mp->mapptr;
 
-       if (event->attr.sample_period)
-               perf_sched_cb_dec(event->pmu);
-       if (!event->attr.sample_period)
-               /* Only counting needs to read counter */
-               paicrypt_stop(event, PERF_EF_UPDATE);
+       paicrypt_stop(event, PERF_EF_UPDATE);
        if (--cpump->active_events == 0) {
                local_ctl_clear_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
                WRITE_ONCE(S390_lowcore.ccd, 0);
@@ -373,8 +372,7 @@ static void paicrypt_del(struct perf_event *event, int flags)
  * 2 bytes: Number of counter
  * 8 bytes: Value of counter
  */
-static size_t paicrypt_copy(struct pai_userdata *userdata,
-                           struct paicrypt_map *cpump,
+static size_t paicrypt_copy(struct pai_userdata *userdata, unsigned long *page,
                            bool exclude_user, bool exclude_kernel)
 {
        int i, outidx = 0;
@@ -383,9 +381,9 @@ static size_t paicrypt_copy(struct pai_userdata *userdata,
                u64 val = 0;
 
                if (!exclude_kernel)
-                       val += paicrypt_getctr(cpump, i, true);
+                       val += paicrypt_getctr(page, i, true);
                if (!exclude_user)
-                       val += paicrypt_getctr(cpump, i, false);
+                       val += paicrypt_getctr(page, i, false);
                if (val) {
                        userdata[outidx].num = i;
                        userdata[outidx].value = val;
@@ -395,25 +393,14 @@ static size_t paicrypt_copy(struct pai_userdata *userdata,
        return outidx * sizeof(struct pai_userdata);
 }
 
-static int paicrypt_push_sample(void)
+static int paicrypt_push_sample(size_t rawsize, struct paicrypt_map *cpump,
+                               struct perf_event *event)
 {
-       struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
-       struct paicrypt_map *cpump = mp->mapptr;
-       struct perf_event *event = cpump->event;
        struct perf_sample_data data;
        struct perf_raw_record raw;
        struct pt_regs regs;
-       size_t rawsize;
        int overflow;
 
-       if (!cpump->event)              /* No event active */
-               return 0;
-       rawsize = paicrypt_copy(cpump->save, cpump,
-                               cpump->event->attr.exclude_user,
-                               cpump->event->attr.exclude_kernel);
-       if (!rawsize)                   /* No incremented counters */
-               return 0;
-
        /* Setup perf sample */
        memset(&regs, 0, sizeof(regs));
        memset(&raw, 0, sizeof(raw));
@@ -444,6 +431,25 @@ static int paicrypt_push_sample(void)
        return overflow;
 }
 
+/* Check if there is data to be saved on schedule out of a task. */
+static int paicrypt_have_sample(void)
+{
+       struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
+       struct paicrypt_map *cpump = mp->mapptr;
+       struct perf_event *event = cpump->event;
+       size_t rawsize;
+       int rc = 0;
+
+       if (!event)             /* No event active */
+               return 0;
+       rawsize = paicrypt_copy(cpump->save, cpump->page,
+                               cpump->event->attr.exclude_user,
+                               cpump->event->attr.exclude_kernel);
+       if (rawsize)                    /* No incremented counters */
+               rc = paicrypt_push_sample(rawsize, cpump, event);
+       return rc;
+}
+
 /* Called on schedule-in and schedule-out. No access to event structure,
  * but for sampling only event CRYPTO_ALL is allowed.
  */
@@ -453,7 +459,7 @@ static void paicrypt_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sch
         * results on schedule_out and if page was dirty, clear values.
         */
        if (!sched_in)
-               paicrypt_push_sample();
+               paicrypt_have_sample();
 }
 
 /* Attribute definitions for paicrypt interface. As with other CPU
index e7013a2e8960508566083cfa9344e6daa60638fb..af7f2b538c8fd47a19f73029264462bd6c4fdcbd 100644 (file)
@@ -276,9 +276,9 @@ static int paiext_event_init(struct perf_event *event)
        return 0;
 }
 
-static u64 paiext_getctr(struct paiext_map *cpump, int nr)
+static u64 paiext_getctr(unsigned long *area, int nr)
 {
-       return cpump->area[nr];
+       return area[nr];
 }
 
 /* Read the counter values. Return value from location in buffer. For event
@@ -292,10 +292,11 @@ static u64 paiext_getdata(struct perf_event *event)
        int i;
 
        if (event->attr.config != PAI_NNPA_BASE)
-               return paiext_getctr(cpump, event->attr.config - PAI_NNPA_BASE);
+               return paiext_getctr(cpump->area,
+                                    event->attr.config - PAI_NNPA_BASE);
 
        for (i = 1; i <= paiext_cnt; i++)
-               sum += paiext_getctr(cpump, i);
+               sum += paiext_getctr(cpump->area, i);
 
        return sum;
 }
@@ -320,11 +321,15 @@ static void paiext_start(struct perf_event *event, int flags)
 {
        u64 sum;
 
-       if (event->hw.last_tag)
-               return;
-       event->hw.last_tag = 1;
-       sum = paiext_getall(event);             /* Get current value */
-       local64_set(&event->hw.prev_count, sum);
+       if (!event->attr.sample_period) {       /* Counting */
+               if (!event->hw.last_tag) {
+                       event->hw.last_tag = 1;
+                       sum = paiext_getall(event);     /* Get current value */
+                       local64_set(&event->hw.prev_count, sum);
+               }
+       } else {                                /* Sampling */
+               perf_sched_cb_inc(event->pmu);
+       }
 }
 
 static int paiext_add(struct perf_event *event, int flags)
@@ -341,21 +346,19 @@ static int paiext_add(struct perf_event *event, int flags)
                debug_sprintf_event(paiext_dbg, 4, "%s 1508 %llx acc %llx\n",
                                    __func__, S390_lowcore.aicd, pcb->acc);
        }
-       if (flags & PERF_EF_START && !event->attr.sample_period) {
-               /* Only counting needs initial counter value */
+       cpump->event = event;
+       if (flags & PERF_EF_START)
                paiext_start(event, PERF_EF_RELOAD);
-       }
        event->hw.state = 0;
-       if (event->attr.sample_period) {
-               cpump->event = event;
-               perf_sched_cb_inc(event->pmu);
-       }
        return 0;
 }
 
 static void paiext_stop(struct perf_event *event, int flags)
 {
-       paiext_read(event);
+       if (!event->attr.sample_period) /* Counting */
+               paiext_read(event);
+       else                            /* Sampling */
+               perf_sched_cb_dec(event->pmu);
        event->hw.state = PERF_HES_STOPPED;
 }
 
@@ -365,12 +368,7 @@ static void paiext_del(struct perf_event *event, int flags)
        struct paiext_map *cpump = mp->mapptr;
        struct paiext_cb *pcb = cpump->paiext_cb;
 
-       if (event->attr.sample_period)
-               perf_sched_cb_dec(event->pmu);
-       if (!event->attr.sample_period) {
-               /* Only counting needs to read counter */
-               paiext_stop(event, PERF_EF_UPDATE);
-       }
+       paiext_stop(event, PERF_EF_UPDATE);
        if (--cpump->active_events == 0) {
                /* Disable CPU instruction lookup for PAIE1 control block */
                local_ctl_clear_bit(0, CR0_PAI_EXTENSION_BIT);
@@ -386,13 +384,12 @@ static void paiext_del(struct perf_event *event, int flags)
  * 2 bytes: Number of counter
  * 8 bytes: Value of counter
  */
-static size_t paiext_copy(struct paiext_map *cpump)
+static size_t paiext_copy(struct pai_userdata *userdata, unsigned long *area)
 {
-       struct pai_userdata *userdata = cpump->save;
        int i, outidx = 0;
 
        for (i = 1; i <= paiext_cnt; i++) {
-               u64 val = paiext_getctr(cpump, i);
+               u64 val = paiext_getctr(area, i);
 
                if (val) {
                        userdata[outidx].num = i;
@@ -418,21 +415,14 @@ static size_t paiext_copy(struct paiext_map *cpump)
  * sched_task() callback. That callback is not active after paiext_del()
  * returns and has deleted the event on that CPU.
  */
-static int paiext_push_sample(void)
+static int paiext_push_sample(size_t rawsize, struct paiext_map *cpump,
+                             struct perf_event *event)
 {
-       struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr);
-       struct paiext_map *cpump = mp->mapptr;
-       struct perf_event *event = cpump->event;
        struct perf_sample_data data;
        struct perf_raw_record raw;
        struct pt_regs regs;
-       size_t rawsize;
        int overflow;
 
-       rawsize = paiext_copy(cpump);
-       if (!rawsize)                   /* No incremented counters */
-               return 0;
-
        /* Setup perf sample */
        memset(&regs, 0, sizeof(regs));
        memset(&raw, 0, sizeof(raw));
@@ -461,6 +451,23 @@ static int paiext_push_sample(void)
        return overflow;
 }
 
+/* Check if there is data to be saved on schedule out of a task. */
+static int paiext_have_sample(void)
+{
+       struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr);
+       struct paiext_map *cpump = mp->mapptr;
+       struct perf_event *event = cpump->event;
+       size_t rawsize;
+       int rc = 0;
+
+       if (!event)
+               return 0;
+       rawsize = paiext_copy(cpump->save, cpump->area);
+       if (rawsize)                    /* Incremented counters */
+               rc = paiext_push_sample(rawsize, cpump, event);
+       return rc;
+}
+
 /* Called on schedule-in and schedule-out. No access to event structure,
  * but for sampling only event NNPA_ALL is allowed.
  */
@@ -470,7 +477,7 @@ static void paiext_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched
         * results on schedule_out and if page was dirty, clear values.
         */
        if (!sched_in)
-               paiext_push_sample();
+               paiext_have_sample();
 }
 
 /* Attribute definitions for pai extension1 interface. As with other CPU
index 2e6754b62b2093c789e1de27e1d3b7105e6ee462..f1897a8bb221078cf3b0da87c1d614241e0544b9 100644 (file)
@@ -917,7 +917,6 @@ static int s390_fpregs_set(struct task_struct *target,
        else
                memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
 
-       /* If setting FPC, must validate it first. */
        if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
                u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
                rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc,
index 45fdf2a9b2e326140033a78bb19f95ccddfac255..72e9b7dcdf7d977a14a1ea7f9d39a06c36e4b97f 100644 (file)
@@ -20,19 +20,16 @@ config KVM
        def_tristate y
        prompt "Kernel-based Virtual Machine (KVM) support"
        depends on HAVE_KVM
-       select PREEMPT_NOTIFIERS
        select HAVE_KVM_CPU_RELAX_INTERCEPT
        select HAVE_KVM_VCPU_ASYNC_IOCTL
-       select HAVE_KVM_EVENTFD
        select KVM_ASYNC_PF
        select KVM_ASYNC_PF_SYNC
+       select KVM_COMMON
        select HAVE_KVM_IRQCHIP
-       select HAVE_KVM_IRQFD
        select HAVE_KVM_IRQ_ROUTING
        select HAVE_KVM_INVALID_WAKEUPS
        select HAVE_KVM_NO_POLL
        select KVM_VFIO
-       select INTERVAL_TREE
        select MMU_NOTIFIER
        help
          Support hosting paravirtualized guest machines using the SIE
index 3765c4223bf944f7c96a4b44430ca739e14696b2..80879fc73c9005184349184479cbefc719a78765 100644 (file)
@@ -213,8 +213,8 @@ int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
        else if (dbg->arch.nr_hw_bp > MAX_BP_COUNT)
                return -EINVAL;
 
-       bp_data = memdup_user(dbg->arch.hw_bp,
-                             sizeof(*bp_data) * dbg->arch.nr_hw_bp);
+       bp_data = memdup_array_user(dbg->arch.hw_bp, dbg->arch.nr_hw_bp,
+                                   sizeof(*bp_data));
        if (IS_ERR(bp_data))
                return PTR_ERR(bp_data);
 
index acc81ca6492e6455a9cac2c4b93c5fb6b6e22295..ea63ac76988914d85bdfb7fdbe7bd209f4066741 100644 (file)
@@ -563,7 +563,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_ENABLE_CAP:
        case KVM_CAP_S390_CSS_SUPPORT:
        case KVM_CAP_IOEVENTFD:
-       case KVM_CAP_DEVICE_CTRL:
        case KVM_CAP_S390_IRQCHIP:
        case KVM_CAP_VM_ATTRIBUTES:
        case KVM_CAP_MP_STATE:
index 621a17fd1a1bb52fd7875a134a1acac25f004209..f875a404a0a02555d5875128fafedcfe54d5b4d6 100644 (file)
@@ -676,8 +676,12 @@ static int handle_pqap(struct kvm_vcpu *vcpu)
        if (vcpu->kvm->arch.crypto.pqap_hook) {
                pqap_hook = *vcpu->kvm->arch.crypto.pqap_hook;
                ret = pqap_hook(vcpu);
-               if (!ret && vcpu->run->s.regs.gprs[1] & 0x00ff0000)
-                       kvm_s390_set_psw_cc(vcpu, 3);
+               if (!ret) {
+                       if (vcpu->run->s.regs.gprs[1] & 0x00ff0000)
+                               kvm_s390_set_psw_cc(vcpu, 3);
+                       else
+                               kvm_s390_set_psw_cc(vcpu, 0);
+               }
                up_read(&vcpu->kvm->arch.crypto.pqap_hook_rwsem);
                return ret;
        }
index 8207a892bbe22f37d4f9a98d3e2f0cc7210292e9..3af3bd20ac7b8f075e08b85b34f7e257f4687eeb 100644 (file)
@@ -19,6 +19,7 @@
 #include <asm/nmi.h>
 #include <asm/dis.h>
 #include <asm/fpu/api.h>
+#include <asm/facility.h>
 #include "kvm-s390.h"
 #include "gaccess.h"
 
@@ -984,12 +985,26 @@ static void retry_vsie_icpt(struct vsie_page *vsie_page)
 static int handle_stfle(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 {
        struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
-       __u32 fac = READ_ONCE(vsie_page->scb_o->fac) & 0x7ffffff8U;
+       __u32 fac = READ_ONCE(vsie_page->scb_o->fac);
 
+       /*
+        * Alternate-STFLE-Interpretive-Execution facilities are not supported
+        * -> format-0 flcb
+        */
        if (fac && test_kvm_facility(vcpu->kvm, 7)) {
                retry_vsie_icpt(vsie_page);
+               /*
+                * The facility list origin (FLO) is in bits 1 - 28 of the FLD
+                * so we need to mask here before reading.
+                */
+               fac = fac & 0x7ffffff8U;
+               /*
+                * format-0 -> size of nested guest's facility list == guest's size
+                * guest's size == host's size, since STFLE is interpretatively executed
+                * using a format-0 for the guest, too.
+                */
                if (read_guest_real(vcpu, fac, &vsie_page->fac,
-                                   sizeof(vsie_page->fac)))
+                                   stfle_size() * sizeof(u64)))
                        return set_validity_icpt(scb_s, 0x1090U);
                scb_s->fac = (__u32)(__u64) &vsie_page->fac;
        }
@@ -1220,7 +1235,6 @@ static int acquire_gmap_shadow(struct kvm_vcpu *vcpu,
        gmap = gmap_shadow(vcpu->arch.gmap, asce, edat);
        if (IS_ERR(gmap))
                return PTR_ERR(gmap);
-       gmap->private = vcpu->kvm;
        vcpu->kvm->stat.gmap_shadow_create++;
        WRITE_ONCE(vsie_page->gmap, gmap);
        return 0;
index ab4098886e562f64cd54055d1a249ea09c61c6c9..ac4c78546d973713859079520552148ae7b2c0b7 100644 (file)
@@ -280,7 +280,6 @@ static void do_sigbus(struct pt_regs *regs)
 static void do_exception(struct pt_regs *regs, int access)
 {
        struct vm_area_struct *vma;
-       struct task_struct *tsk;
        unsigned long address;
        struct mm_struct *mm;
        enum fault_type type;
@@ -289,7 +288,6 @@ static void do_exception(struct pt_regs *regs, int access)
        vm_fault_t fault;
        bool is_write;
 
-       tsk = current;
        /*
         * The instruction that caused the program check has
         * been nullified. Don't signal single step via SIGTRAP.
@@ -297,7 +295,7 @@ static void do_exception(struct pt_regs *regs, int access)
        clear_thread_flag(TIF_PER_TRAP);
        if (kprobe_page_fault(regs, 14))
                return;
-       mm = tsk->mm;
+       mm = current->mm;
        address = get_fault_address(regs);
        is_write = fault_is_write(regs);
        type = get_fault_type(regs);
index 6f96b5a71c6383d07eb447cb80df70214bdd1910..8da39deb56ca4952a6f8e436d153ec6f54292932 100644 (file)
@@ -1691,6 +1691,7 @@ struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
                return ERR_PTR(-ENOMEM);
        new->mm = parent->mm;
        new->parent = gmap_get(parent);
+       new->private = parent->private;
        new->orig_asce = asce;
        new->edat_level = edat_level;
        new->initialized = false;
index 5880893329310db5a6a65643b7641b4c6109973e..a90499c087f0c5e917c1f36072f56c0979951d1a 100644 (file)
@@ -97,9 +97,9 @@ static inline int __memcpy_toio_inuser(void __iomem *dst,
                return -EINVAL;
 
        while (n > 0) {
-               size = zpci_get_max_write_size((u64 __force) dst,
-                                              (u64 __force) src, n,
-                                              ZPCI_MAX_WRITE_SIZE);
+               size = zpci_get_max_io_size((u64 __force) dst,
+                                           (u64 __force) src, n,
+                                           ZPCI_MAX_WRITE_SIZE);
                if (size > 8) /* main path */
                        rc = __pcistb_mio_inuser(dst, src, size, &status);
                else
@@ -242,9 +242,9 @@ static inline int __memcpy_fromio_inuser(void __user *dst,
        u8 status;
 
        while (n > 0) {
-               size = zpci_get_max_write_size((u64 __force) src,
-                                              (u64 __force) dst, n,
-                                              ZPCI_MAX_READ_SIZE);
+               size = zpci_get_max_io_size((u64 __force) src,
+                                           (u64 __force) dst, n,
+                                           ZPCI_MAX_READ_SIZE);
                rc = __pcilg_mio_inuser(dst, src, size, &status);
                if (rc)
                        break;
index 0f279360838a4a7492c1ad86d38ac1f43173fc9f..30d117f9ad7eeafbc43bcaf921b94c941bb1cd8c 100644 (file)
@@ -1220,7 +1220,7 @@ static int __init arch_setup(void)
                lcdc_info.ch[0].num_modes               = ARRAY_SIZE(ecovec_dvi_modes);
 
                /* No backlight */
-               gpio_backlight_data.fbdev = NULL;
+               gpio_backlight_data.dev = NULL;
 
                gpio_set_value(GPIO_PTA2, 1);
                gpio_set_value(GPIO_PTU1, 1);
index cf59b98446e4d3e87c7fc9837ab5b06e9016f153..7b427c17fbfecb24d63e717023aad19ce1c953e8 100644 (file)
@@ -171,7 +171,8 @@ CONFIG_BTRFS_FS=y
 CONFIG_AUTOFS_FS=m
 CONFIG_FUSE_FS=y
 CONFIG_CUSE=m
-CONFIG_FSCACHE=m
+CONFIG_NETFS_SUPPORT=m
+CONFIG_FSCACHE=y
 CONFIG_CACHEFILES=m
 CONFIG_ISO9660_FS=m
 CONFIG_JOLIET=y
index 878b6b551bd2d0119dd17f2918b73d3ae4120a77..51112f54552b329a307577a5a047f97172d56381 100644 (file)
@@ -90,6 +90,7 @@ extern void copy_from_user_page(struct vm_area_struct *vma,
        unsigned long len);
 
 #define flush_cache_vmap(start, end)           local_flush_cache_all(NULL)
+#define flush_cache_vmap_early(start, end)     do { } while (0)
 #define flush_cache_vunmap(start, end)         local_flush_cache_all(NULL)
 
 #define flush_dcache_mmap_lock(mapping)                do { } while (0)
index 6e86644480488f692753a84950ea25e8235985c7..118744d349e21e43175017296ea978269a5a7ef4 100644 (file)
@@ -1,11 +1,10 @@
 # SPDX-License-Identifier: GPL-2.0
 obj-y += vsyscall.o vsyscall-syscall.o vsyscall-syms.o
 
-$(obj)/vsyscall-syscall.o: \
-       $(foreach F,trapa,$(obj)/vsyscall-$F.so)
+$(obj)/vsyscall-syscall.o: $(obj)/vsyscall-trapa.so
 
 # Teach kbuild about targets
-targets += $(foreach F,trapa,vsyscall-$F.o vsyscall-$F.so)
+targets += vsyscall-trapa.o vsyscall-traps.so
 targets += vsyscall-note.o vsyscall.lds vsyscall-dummy.o
 
 # The DSO images are built using a special linker script
index f3b7270bf71b26ae7dcf77378d2b336363f307f5..9fee0ccfccb8e1b95a9d21ab293774fd6797eeb7 100644 (file)
@@ -48,6 +48,7 @@ static inline void flush_dcache_page(struct page *page)
 #define flush_dcache_mmap_unlock(mapping)      do { } while (0)
 
 #define flush_cache_vmap(start, end)           flush_cache_all()
+#define flush_cache_vmap_early(start, end)     do { } while (0)
 #define flush_cache_vunmap(start, end)         flush_cache_all()
 
 /* When a context switch happens we must flush all user windows so that
index 0e879004efff16e69afadb5867731960f20f9781..2b1261b77ecd1b9f93bdecbd1fa08a1985b08171 100644 (file)
@@ -75,6 +75,7 @@ void flush_ptrace_access(struct vm_area_struct *, struct page *,
 #define flush_dcache_mmap_unlock(mapping)      do { } while (0)
 
 #define flush_cache_vmap(start, end)           do { } while (0)
+#define flush_cache_vmap_early(start, end)     do { } while (0)
 #define flush_cache_vunmap(start, end)         do { } while (0)
 
 #endif /* !__ASSEMBLY__ */
index 94eb529dcb77623caf637387e694d8e5ddc049a8..2718cbea826a7d13aefacd26fee3719b69856746 100644 (file)
@@ -10,7 +10,7 @@
 
 static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
-       asm_volatile_goto("1:\n\t"
+       asm goto("1:\n\t"
                 "nop\n\t"
                 "nop\n\t"
                 ".pushsection __jump_table,  \"aw\"\n\t"
@@ -26,7 +26,7 @@ l_yes:
 
 static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
 {
-       asm_volatile_goto("1:\n\t"
+       asm goto("1:\n\t"
                 "b %l[l_yes]\n\t"
                 "nop\n\t"
                 ".pushsection __jump_table,  \"aw\"\n\t"
index 3c38ca40a22bace28681258759f98f38b334c4bf..a84598568300d331e035f25a78977b515cb9fbc5 100644 (file)
 #include <linux/export.h>
 #include <linux/slab.h>
 #include <linux/interrupt.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
 
 #include <asm/apb.h>
 #include <asm/iommu.h>
@@ -456,7 +459,6 @@ static void sabre_pbm_init(struct pci_pbm_info *pbm,
 static const struct of_device_id sabre_match[];
 static int sabre_probe(struct platform_device *op)
 {
-       const struct of_device_id *match;
        const struct linux_prom64_registers *pr_regs;
        struct device_node *dp = op->dev.of_node;
        struct pci_pbm_info *pbm;
@@ -466,8 +468,7 @@ static int sabre_probe(struct platform_device *op)
        const u32 *vdma;
        u64 clear_irq;
 
-       match = of_match_device(sabre_match, &op->dev);
-       hummingbird_p = match && (match->data != NULL);
+       hummingbird_p = (uintptr_t)device_get_match_data(&op->dev);
        if (!hummingbird_p) {
                struct device_node *cpu_dp;
 
index 23b47f7fdb1d5290dd97377b5eb283dc34fa3b9b..5d8dd49495863dc64d03809373e32887102f7baa 100644 (file)
 #include <linux/slab.h>
 #include <linux/export.h>
 #include <linux/interrupt.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
 #include <linux/numa.h>
 
 #include <asm/iommu.h>
@@ -1459,15 +1462,13 @@ out_err:
        return err;
 }
 
-static const struct of_device_id schizo_match[];
 static int schizo_probe(struct platform_device *op)
 {
-       const struct of_device_id *match;
+       unsigned long chip_type = (unsigned long)device_get_match_data(&op->dev);
 
-       match = of_match_device(schizo_match, &op->dev);
-       if (!match)
+       if (!chip_type)
                return -EINVAL;
-       return __schizo_init(op, (unsigned long)match->data);
+       return __schizo_init(op, chip_type);
 }
 
 /* The ordering of this table is very important.  Some Tomatillo
index d08c3a0443f3a77f8fe8cb388c09ca2f5e16f67c..7f5eedf1f5e0ad3ff23fcdbcf882ece7eced120c 100644 (file)
@@ -3,9 +3,6 @@
 # Building vDSO images for sparc.
 #
 
-VDSO64-$(CONFIG_SPARC64)       := y
-VDSOCOMPAT-$(CONFIG_COMPAT)    := y
-
 # files to link into the vdso
 vobjs-y := vdso-note.o vclock_gettime.o
 
@@ -13,22 +10,15 @@ vobjs-y := vdso-note.o vclock_gettime.o
 obj-y                          += vma.o
 
 # vDSO images to build
-vdso_img-$(VDSO64-y)           += 64
-vdso_img-$(VDSOCOMPAT-y)       += 32
+obj-$(CONFIG_SPARC64)          += vdso-image-64.o
+obj-$(CONFIG_COMPAT)           += vdso-image-32.o
 
-vobjs := $(foreach F,$(vobjs-y),$(obj)/$F)
+vobjs := $(addprefix $(obj)/, $(vobjs-y))
 
 $(obj)/vdso.o: $(obj)/vdso.so
 
 targets += vdso.lds $(vobjs-y)
-
-# Build the vDSO image C files and link them in.
-vdso_img_objs := $(vdso_img-y:%=vdso-image-%.o)
-vdso_img_cfiles := $(vdso_img-y:%=vdso-image-%.c)
-vdso_img_sodbg := $(vdso_img-y:%=vdso%.so.dbg)
-obj-y += $(vdso_img_objs)
-targets += $(vdso_img_cfiles)
-targets += $(vdso_img_sodbg) $(vdso_img-y:%=vdso%.so)
+targets += $(foreach x, 32 64, vdso-image-$(x).c vdso$(x).so vdso$(x).so.dbg)
 
 CPPFLAGS_vdso.lds += -P -C
 
index 82f05f250634807c9f78774bca9213dfd5de2038..34957dcb88b9c31befa3c2b7e08809de73b23a3e 100644 (file)
@@ -115,7 +115,9 @@ archprepare:
        $(Q)$(MAKE) $(build)=$(HOST_DIR)/um include/generated/user_constants.h
 
 LINK-$(CONFIG_LD_SCRIPT_STATIC) += -static
-LINK-$(CONFIG_LD_SCRIPT_DYN) += $(call cc-option, -no-pie)
+ifdef CONFIG_LD_SCRIPT_DYN
+LINK-$(call gcc-min-version, 60100)$(CONFIG_CC_IS_CLANG) += -no-pie
+endif
 LINK-$(CONFIG_LD_SCRIPT_DYN_RPATH) += -Wl,-rpath,/lib
 
 CFLAGS_NO_HARDENING := $(call cc-option, -fno-PIC,) $(call cc-option, -fno-pic,) \
index ac35de5316a6a547d4c8293b61387ddbae546476..67323b02899990b58c9f008565e7df2b7f7dcbda 100644 (file)
@@ -4,7 +4,12 @@
 #
 
 GPROF_OPT += -pg
+
+ifdef CONFIG_CC_IS_CLANG
+GCOV_OPT += -fprofile-instr-generate -fcoverage-mapping
+else
 GCOV_OPT += -fprofile-arcs -ftest-coverage
+endif
 
 CFLAGS-$(CONFIG_GCOV) += $(GCOV_OPT)
 CFLAGS-$(CONFIG_GPROF) += $(GPROF_OPT)
index 3fec3b8406e98a2bad48c19630ce0cd801f738b4..e14b9cdf7a33df26367e766099405d350510fdde 100644 (file)
@@ -30,7 +30,7 @@ struct chan {
 extern void chan_interrupt(struct line *line, int irq);
 extern int parse_chan_pair(char *str, struct line *line, int device,
                           const struct chan_opts *opts, char **error_out);
-extern int write_chan(struct chan *chan, const char *buf, int len,
+extern int write_chan(struct chan *chan, const u8 *buf, size_t len,
                             int write_irq);
 extern int console_write_chan(struct chan *chan, const char *buf, 
                              int len);
index 26a702a06515492a8797e8f4fa7bd59282435060..37538b4168da6af584d0b5b2ccc035516edb9b0f 100644 (file)
@@ -33,14 +33,14 @@ static void not_configged_close(int fd, void *data)
               "UML\n");
 }
 
-static int not_configged_read(int fd, char *c_out, void *data)
+static int not_configged_read(int fd, u8 *c_out, void *data)
 {
        printk(KERN_ERR "Using a channel type which is configured out of "
               "UML\n");
        return -EIO;
 }
 
-static int not_configged_write(int fd, const char *buf, int len, void *data)
+static int not_configged_write(int fd, const u8 *buf, size_t len, void *data)
 {
        printk(KERN_ERR "Using a channel type which is configured out of "
               "UML\n");
@@ -247,8 +247,7 @@ void deactivate_chan(struct chan *chan, int irq)
                deactivate_fd(chan->fd, irq);
 }
 
-int write_chan(struct chan *chan, const char *buf, int len,
-              int write_irq)
+int write_chan(struct chan *chan, const u8 *buf, size_t len, int write_irq)
 {
        int n, ret = 0;
 
@@ -540,7 +539,7 @@ void chan_interrupt(struct line *line, int irq)
        struct tty_port *port = &line->port;
        struct chan *chan = line->chan_in;
        int err;
-       char c;
+       u8 c;
 
        if (!chan || !chan->ops->read)
                goto out;
index 25727ed648b7273bf960aa4259aa1374123c493d..ec04e47b9d799d0939a531b59e55c8e58b2ee135 100644 (file)
@@ -19,7 +19,7 @@ void generic_close(int fd, void *unused)
        close(fd);
 }
 
-int generic_read(int fd, char *c_out, void *unused)
+int generic_read(int fd, __u8 *c_out, void *unused)
 {
        int n;
 
@@ -35,7 +35,7 @@ int generic_read(int fd, char *c_out, void *unused)
 
 /* XXX Trivial wrapper around write */
 
-int generic_write(int fd, const char *buf, int n, void *unused)
+int generic_write(int fd, const __u8 *buf, size_t n, void *unused)
 {
        int err;
 
@@ -141,7 +141,7 @@ struct winch_data {
        int pipe_fd;
 };
 
-static int winch_thread(void *arg)
+static __noreturn int winch_thread(void *arg)
 {
        struct winch_data *data = arg;
        sigset_t sigs;
@@ -153,8 +153,8 @@ static int winch_thread(void *arg)
        pipe_fd = data->pipe_fd;
        count = write(pipe_fd, &c, sizeof(c));
        if (count != sizeof(c))
-               printk(UM_KERN_ERR "winch_thread : failed to write "
-                      "synchronization byte, err = %d\n", -count);
+               os_info("winch_thread : failed to write synchronization byte, err = %d\n",
+                       -count);
 
        /*
         * We are not using SIG_IGN on purpose, so don't fix it as I thought to
@@ -166,29 +166,29 @@ static int winch_thread(void *arg)
        sigfillset(&sigs);
        /* Block all signals possible. */
        if (sigprocmask(SIG_SETMASK, &sigs, NULL) < 0) {
-               printk(UM_KERN_ERR "winch_thread : sigprocmask failed, "
-                      "errno = %d\n", errno);
-               exit(1);
+               os_info("winch_thread : sigprocmask failed, errno = %d\n",
+                       errno);
+               goto wait_kill;
        }
        /* In sigsuspend(), block anything else than SIGWINCH. */
        sigdelset(&sigs, SIGWINCH);
 
        if (setsid() < 0) {
-               printk(UM_KERN_ERR "winch_thread : setsid failed, errno = %d\n",
+               os_info("winch_thread : setsid failed, errno = %d\n",
                       errno);
-               exit(1);
+               goto wait_kill;
        }
 
        if (ioctl(pty_fd, TIOCSCTTY, 0) < 0) {
-               printk(UM_KERN_ERR "winch_thread : TIOCSCTTY failed on "
-                      "fd %d err = %d\n", pty_fd, errno);
-               exit(1);
+               os_info("winch_thread : TIOCSCTTY failed on "
+                       "fd %d err = %d\n", pty_fd, errno);
+               goto wait_kill;
        }
 
        if (tcsetpgrp(pty_fd, os_getpid()) < 0) {
-               printk(UM_KERN_ERR "winch_thread : tcsetpgrp failed on "
-                      "fd %d err = %d\n", pty_fd, errno);
-               exit(1);
+               os_info("winch_thread : tcsetpgrp failed on fd %d err = %d\n",
+                       pty_fd, errno);
+               goto wait_kill;
        }
 
        /*
@@ -199,8 +199,8 @@ static int winch_thread(void *arg)
         */
        count = read(pipe_fd, &c, sizeof(c));
        if (count != sizeof(c))
-               printk(UM_KERN_ERR "winch_thread : failed to read "
-                      "synchronization byte, err = %d\n", errno);
+               os_info("winch_thread : failed to read synchronization byte, err = %d\n",
+                       errno);
 
        while(1) {
                /*
@@ -211,9 +211,15 @@ static int winch_thread(void *arg)
 
                count = write(pipe_fd, &c, sizeof(c));
                if (count != sizeof(c))
-                       printk(UM_KERN_ERR "winch_thread : write failed, "
-                              "err = %d\n", errno);
+                       os_info("winch_thread : write failed, err = %d\n",
+                               errno);
        }
+
+wait_kill:
+       c = 2;
+       count = write(pipe_fd, &c, sizeof(c));
+       while (1)
+               pause();
 }
 
 static int winch_tramp(int fd, struct tty_port *port, int *fd_out,
index 4e51b85e2a23a524066333353585de857403e5ae..e158e16fb3cc9852af0cd496034ce8cd20e03eb2 100644 (file)
@@ -7,6 +7,7 @@
 #define __CHAN_USER_H__
 
 #include <init.h>
+#include <linux/types.h>
 
 struct chan_opts {
        void (*const announce)(char *dev_name, int dev);
@@ -19,8 +20,8 @@ struct chan_ops {
        void *(*init)(char *, int, const struct chan_opts *);
        int (*open)(int, int, int, void *, char **);
        void (*close)(int, void *);
-       int (*read)(int, char *, void *);
-       int (*write)(int, const char *, int, void *);
+       int (*read)(int, __u8 *, void *);
+       int (*write)(int, const __u8 *, size_t, void *);
        int (*console_write)(int, const char *, int);
        int (*window_size)(int, void *, unsigned short *, unsigned short *);
        void (*free)(void *);
@@ -31,8 +32,8 @@ extern const struct chan_ops fd_ops, null_ops, port_ops, pts_ops, pty_ops,
        tty_ops, xterm_ops;
 
 extern void generic_close(int fd, void *unused);
-extern int generic_read(int fd, char *c_out, void *unused);
-extern int generic_write(int fd, const char *buf, int n, void *unused);
+extern int generic_read(int fd, __u8 *c_out, void *unused);
+extern int generic_write(int fd, const __u8 *buf, size_t n, void *unused);
 extern int generic_console_write(int fd, const char *buf, int n);
 extern int generic_window_size(int fd, void *unused, unsigned short *rows_out,
                               unsigned short *cols_out);
index b98545f3edb5033260dad9590d812f3ddc522360..ffc5cb92fa367751daad52e62c949dcd9a5649e7 100644 (file)
@@ -83,7 +83,7 @@ unsigned int line_chars_in_buffer(struct tty_struct *tty)
  *
  * Must be called while holding line->lock!
  */
-static int buffer_data(struct line *line, const char *buf, int len)
+static int buffer_data(struct line *line, const u8 *buf, size_t len)
 {
        int end, room;
 
@@ -629,15 +629,18 @@ static irqreturn_t winch_interrupt(int irq, void *data)
 
        if (fd != -1) {
                err = generic_read(fd, &c, NULL);
-               if (err < 0) {
+               /* A read of 2 means the winch thread failed and has warned */
+               if (err < 0 || (err == 1 && c == 2)) {
                        if (err != -EAGAIN) {
                                winch->fd = -1;
                                list_del(&winch->list);
                                os_close_file(fd);
-                               printk(KERN_ERR "winch_interrupt : "
-                                      "read failed, errno = %d\n", -err);
-                               printk(KERN_ERR "fd %d is losing SIGWINCH "
-                                      "support\n", winch->tty_fd);
+                               if (err < 0) {
+                                       printk(KERN_ERR "winch_interrupt : read failed, errno = %d\n",
+                                              -err);
+                                       printk(KERN_ERR "fd %d is losing SIGWINCH support\n",
+                                              winch->tty_fd);
+                               }
                                INIT_WORK(&winch->work, __free_winch);
                                schedule_work(&winch->work);
                                return IRQ_HANDLED;
index e84fb9b4165e86f498031b004184772dd33471f9..e8bd6f3dfb507771e1d7f9e89252904e14240199 100644 (file)
@@ -47,9 +47,9 @@ struct line {
         *
         * buffer points to a buffer allocated on demand, of length
         * LINE_BUFSIZE, head to the start of the ring, tail to the end.*/
-       char *buffer;
-       char *head;
-       char *tail;
+       u8 *buffer;
+       u8 *head;
+       u8 *tail;
 
        int sigio;
        struct delayed_work task;
index 3d7836c46507010b03f057873665a090e3afa40d..cabcc501b448a3006abd9d7d417da75d90460a5a 100644 (file)
@@ -204,7 +204,7 @@ static int uml_net_close(struct net_device *dev)
        return 0;
 }
 
-static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct uml_net_private *lp = netdev_priv(dev);
        unsigned long flags;
index 87087763a417f946e15fb33992ccb269db218d3e..30d59b8481b40711e018f81c5826acc02d3bc42b 100644 (file)
@@ -28,7 +28,7 @@ static int null_open(int input, int output, int primary, void *d,
        return (fd < 0) ? -errno : fd;
 }
 
-static int null_read(int fd, char *c_out, void *unused)
+static int null_read(int fd, __u8 *c_out, void *unused)
 {
        return -ENODEV;
 }
index ffe2ee8a02465bc649914353becacd59c0dc6188..97a37c06299721f11ef8ca70a256b7b6d1ba540d 100644 (file)
@@ -971,7 +971,7 @@ static long um_pci_map_platform(unsigned long offset, size_t size,
        *ops = &um_pci_device_bar_ops;
        *priv = &um_pci_platform_device->resptr[0];
 
-       return 0;
+       return offset;
 }
 
 static const struct logic_iomem_region_ops um_pci_platform_ops = {
index 4b6d1b526bc1217e2e89d4670f9c4385e68dacc7..66fe06db872f05bb775f0089a4f134f77563efe4 100644 (file)
@@ -75,7 +75,7 @@ extern void setup_clear_cpu_cap(unsigned int bit);
  */
 static __always_inline bool _static_cpu_has(u16 bit)
 {
-       asm_volatile_goto("1: jmp 6f\n"
+       asm goto("1: jmp 6f\n"
                 "2:\n"
                 ".skip -(((5f-4f) - (2b-1b)) > 0) * "
                         "((5f-4f) - (2b-1b)),0x90\n"
index 5b072aba5b658f95ab8b0248f569d1fd1f2cee5b..a7555e43ed14ae21431aa58b2e8a8a556b538518 100644 (file)
@@ -12,7 +12,6 @@
 typedef struct mm_context {
        struct mm_id id;
        struct uml_arch_mm_context arch;
-       struct page *stub_pages[2];
 } mm_context_t;
 
 extern void __switch_mm(struct mm_id * mm_idp);
index 7414154b8e9aeaeb74a4ad765aaa6db1549a3510..6c3779541845bd834bbc53e4fdb346e62c18d884 100644 (file)
@@ -22,7 +22,6 @@ struct mm_struct;
 struct thread_struct {
        struct pt_regs regs;
        struct pt_regs *segv_regs;
-       int singlestep_syscall;
        void *fault_addr;
        jmp_buf *fault_catcher;
        struct task_struct *prev_sched;
index d8b8b4f07e429d8eb4c148b02e5581abe171455a..789b83013f355cb60e0fe9e21a671eabd79bf64b 100644 (file)
@@ -34,7 +34,6 @@ extern int handle_page_fault(unsigned long address, unsigned long ip,
 
 extern unsigned int do_IRQ(int irq, struct uml_pt_regs *regs);
 extern void initial_thread_cb(void (*proc)(void *), void *arg);
-extern int is_syscall(unsigned long addr);
 
 extern void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs);
 
@@ -50,7 +49,7 @@ extern void do_uml_exitcalls(void);
  * Are we disallowed to sleep? Used to choose between GFP_KERNEL and
  * GFP_ATOMIC.
  */
-extern int __cant_sleep(void);
+extern int __uml_cant_sleep(void);
 extern int get_current_pid(void);
 extern int copy_from_user_proc(void *to, void *from, int size);
 extern char *uml_strdup(const char *string);
@@ -58,7 +57,7 @@ extern char *uml_strdup(const char *string);
 extern unsigned long to_irq_stack(unsigned long *mask_out);
 extern unsigned long from_irq_stack(int nested);
 
-extern int singlestepping(void *t);
+extern int singlestepping(void);
 
 extern void segv_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs);
 extern void bus_handler(int sig, struct siginfo *si, struct uml_pt_regs *regs);
index 0df646c6651ea00c99bdbdeab8f02aa53f19524b..aff8906304ea8c7d97e00c002fc5ca4bac6f120a 100644 (file)
@@ -323,9 +323,6 @@ extern void sigio_broken(int fd);
 extern int __add_sigio_fd(int fd);
 extern int __ignore_sigio_fd(int fd);
 
-/* prctl.c */
-extern int os_arch_prctl(int pid, int option, unsigned long *arg2);
-
 /* tty.c */
 extern int get_pty(void);
 
index 95455e8996e7252fc44cd2a3576809efb5d209e2..8a705d8f96ce6eb60907e154d5ccd505c91201a7 100644 (file)
 extern int ptrace_getregs(long pid, unsigned long *regs_out);
 extern int ptrace_setregs(long pid, unsigned long *regs_in);
 
-/* syscall emulation path in ptrace */
-
-#ifndef PTRACE_SYSEMU
-#define PTRACE_SYSEMU 31
-#endif
-#ifndef PTRACE_SYSEMU_SINGLESTEP
-#define PTRACE_SYSEMU_SINGLESTEP 32
-#endif
-
-/* On architectures, that started to support PTRACE_O_TRACESYSGOOD
- * in linux 2.4, there are two different definitions of
- * PTRACE_SETOPTIONS: linux 2.4 uses 21 while linux 2.6 uses 0x4200.
- * For binary compatibility, 2.6 also supports the old "21", named
- * PTRACE_OLDSETOPTION. On these architectures, UML always must use
- * "21", to ensure the kernel runs on 2.4 and 2.6 host without
- * recompilation. So, we use PTRACE_OLDSETOPTIONS in UML.
- * We also want to be able to build the kernel on 2.4, which doesn't
- * have PTRACE_OLDSETOPTIONS. So, if it is missing, we declare
- * PTRACE_OLDSETOPTIONS to be the same as PTRACE_SETOPTIONS.
- *
- * On architectures, that start to support PTRACE_O_TRACESYSGOOD on
- * linux 2.6, PTRACE_OLDSETOPTIONS never is defined, and also isn't
- * supported by the host kernel. In that case, our trick lets us use
- * the new 0x4200 with the name PTRACE_OLDSETOPTIONS.
- */
-#ifndef PTRACE_OLDSETOPTIONS
-#define PTRACE_OLDSETOPTIONS PTRACE_SETOPTIONS
-#endif
-
-void set_using_sysemu(int value);
-int get_using_sysemu(void);
-extern int sysemu_supported;
-
-#define SELECT_PTRACE_OPERATION(sysemu_mode, singlestep_mode) \
-       (((int[3][3] ) { \
-               { PTRACE_SYSCALL, PTRACE_SYSCALL, PTRACE_SINGLESTEP }, \
-               { PTRACE_SYSEMU, PTRACE_SYSEMU, PTRACE_SINGLESTEP }, \
-               { PTRACE_SYSEMU, PTRACE_SYSEMU_SINGLESTEP, \
-                 PTRACE_SYSEMU_SINGLESTEP } }) \
-               [sysemu_mode][singlestep_mode])
-
 #endif
index 2f9c3ce5b45e8cfcc1eb4f67e14f5fbbd7615fe3..a0450326521cd55e0ec81678dbca1ba435d119ee 100644 (file)
@@ -14,8 +14,6 @@ extern int save_fp_registers(int pid, unsigned long *fp_regs);
 extern int restore_fp_registers(int pid, unsigned long *fp_regs);
 extern int save_fpx_registers(int pid, unsigned long *fp_regs);
 extern int restore_fpx_registers(int pid, unsigned long *fp_regs);
-extern int save_registers(int pid, struct uml_pt_regs *regs);
-extern int restore_pid_registers(int pid, struct uml_pt_regs *regs);
 extern int init_pid_registers(int pid);
 extern void get_safe_registers(unsigned long *regs, unsigned long *fp_regs);
 extern int get_fp_registers(int pid, unsigned long *regs);
index 106b7da2f8d6f787f29515b20a7cebd43d8db74b..ab95648e93e156496c9d239b55de2d7821e87303 100644 (file)
@@ -220,7 +220,7 @@ void arch_cpu_idle(void)
        um_idle_sleep();
 }
 
-int __cant_sleep(void) {
+int __uml_cant_sleep(void) {
        return in_atomic() || irqs_disabled() || in_interrupt();
        /* Is in_interrupt() really needed? */
 }
@@ -332,17 +332,9 @@ int __init make_proc_sysemu(void)
 
 late_initcall(make_proc_sysemu);
 
-int singlestepping(void * t)
+int singlestepping(void)
 {
-       struct task_struct *task = t ? t : current;
-
-       if (!test_thread_flag(TIF_SINGLESTEP))
-               return 0;
-
-       if (task->thread.singlestep_syscall)
-               return 1;
-
-       return 2;
+       return test_thread_flag(TIF_SINGLESTEP);
 }
 
 /*
index 5154b27de580f742a26d3f94ccdd9c980fb51193..6600a2782796740ce901a441dd629b3268b57f43 100644 (file)
@@ -12,7 +12,6 @@
 void user_enable_single_step(struct task_struct *child)
 {
        set_tsk_thread_flag(child, TIF_SINGLESTEP);
-       child->thread.singlestep_syscall = 0;
 
 #ifdef SUBARCH_SET_SINGLESTEPPING
        SUBARCH_SET_SINGLESTEPPING(child, 1);
@@ -22,7 +21,6 @@ void user_enable_single_step(struct task_struct *child)
 void user_disable_single_step(struct task_struct *child)
 {
        clear_tsk_thread_flag(child, TIF_SINGLESTEP);
-       child->thread.singlestep_syscall = 0;
 
 #ifdef SUBARCH_SET_SINGLESTEPPING
        SUBARCH_SET_SINGLESTEPPING(child, 0);
index ae4658f576ab7ad26658187f130e4d4d57d48267..a56b44522766fec85a945ad8788b2b3019b4673b 100644 (file)
@@ -120,18 +120,6 @@ void do_signal(struct pt_regs *regs)
                }
        }
 
-       /*
-        * This closes a way to execute a system call on the host.  If
-        * you set a breakpoint on a system call instruction and singlestep
-        * from it, the tracing thread used to PTRACE_SINGLESTEP the process
-        * rather than PTRACE_SYSCALL it, allowing the system call to execute
-        * on the host.  The tracing thread will check this flag and
-        * PTRACE_SYSCALL if necessary.
-        */
-       if (test_thread_flag(TIF_SINGLESTEP))
-               current->thread.singlestep_syscall =
-                       is_syscall(PT_REGS_IP(&current->thread.regs));
-
        /*
         * if there's no signal to deliver, we just put the saved sigmask
         * back
index aaee96f07172da74017c2d2d35cdac0ab853a0c8..198269e384c43b174208d683ec4f18242d726fac 100644 (file)
@@ -236,7 +236,9 @@ EXPORT_SYMBOL(strnlen_user);
  *                       argument and comparison of the previous
  *                       futex value with another constant.
  *
- * @encoded_op:        encoded operation to execute
+ * @op:                operation to execute
+ * @oparg:     argument to operation
+ * @oval:      old value at uaddr
  * @uaddr:     pointer to user space address
  *
  * Return:
index fddd1dec27e6d328808ccd663a6bc8ec49a21aef..3e270da6b6f67e2745e2959abd62d60fa89b282b 100644 (file)
@@ -432,9 +432,29 @@ static void time_travel_update_time(unsigned long long next, bool idle)
        time_travel_del_event(&ne);
 }
 
+static void time_travel_update_time_rel(unsigned long long offs)
+{
+       unsigned long flags;
+
+       /*
+        * Disable interrupts before calculating the new time so
+        * that a real timer interrupt (signal) can't happen at
+        * a bad time e.g. after we read time_travel_time but
+        * before we've completed updating the time.
+        */
+       local_irq_save(flags);
+       time_travel_update_time(time_travel_time + offs, false);
+       local_irq_restore(flags);
+}
+
 void time_travel_ndelay(unsigned long nsec)
 {
-       time_travel_update_time(time_travel_time + nsec, false);
+       /*
+        * Not strictly needed to use _rel() version since this is
+        * only used in INFCPU/EXT modes, but it doesn't hurt and
+        * is more readable too.
+        */
+       time_travel_update_time_rel(nsec);
 }
 EXPORT_SYMBOL(time_travel_ndelay);
 
@@ -568,7 +588,11 @@ static void time_travel_set_start(void)
 #define time_travel_time 0
 #define time_travel_ext_waiting 0
 
-static inline void time_travel_update_time(unsigned long long ns, bool retearly)
+static inline void time_travel_update_time(unsigned long long ns, bool idle)
+{
+}
+
+static inline void time_travel_update_time_rel(unsigned long long offs)
 {
 }
 
@@ -720,9 +744,7 @@ static u64 timer_read(struct clocksource *cs)
                 */
                if (!irqs_disabled() && !in_interrupt() && !in_softirq() &&
                    !time_travel_ext_waiting)
-                       time_travel_update_time(time_travel_time +
-                                               TIMER_MULTIPLIER,
-                                               false);
+                       time_travel_update_time_rel(TIMER_MULTIPLIER);
                return time_travel_time / TIMER_MULTIPLIER;
        }
 
index b459745f52e248063e44909bb5705cbe05932da1..3cb8ac63be6ed90af0bc2a17f3905b257226ea4c 100644 (file)
@@ -46,7 +46,7 @@ int run_helper(void (*pre_exec)(void *), void *pre_data, char **argv)
        unsigned long stack, sp;
        int pid, fds[2], ret, n;
 
-       stack = alloc_stack(0, __cant_sleep());
+       stack = alloc_stack(0, __uml_cant_sleep());
        if (stack == 0)
                return -ENOMEM;
 
@@ -70,7 +70,7 @@ int run_helper(void (*pre_exec)(void *), void *pre_data, char **argv)
        data.pre_data = pre_data;
        data.argv = argv;
        data.fd = fds[1];
-       data.buf = __cant_sleep() ? uml_kmalloc(PATH_MAX, UM_GFP_ATOMIC) :
+       data.buf = __uml_cant_sleep() ? uml_kmalloc(PATH_MAX, UM_GFP_ATOMIC) :
                                        uml_kmalloc(PATH_MAX, UM_GFP_KERNEL);
        pid = clone(helper_child, (void *) sp, CLONE_VM, &data);
        if (pid < 0) {
@@ -121,7 +121,7 @@ int run_helper_thread(int (*proc)(void *), void *arg, unsigned int flags,
        unsigned long stack, sp;
        int pid, status, err;
 
-       stack = alloc_stack(0, __cant_sleep());
+       stack = alloc_stack(0, __uml_cant_sleep());
        if (stack == 0)
                return -ENOMEM;
 
index b123955be7accf01958684dbbd2fea0ef8130202..bd80b921add06ce5c5c244a5cd29cc1e52ac1b74 100644 (file)
 #include <sysdep/ptrace_user.h>
 #include <registers.h>
 
-int save_registers(int pid, struct uml_pt_regs *regs)
-{
-       int err;
-
-       err = ptrace(PTRACE_GETREGS, pid, 0, regs->gp);
-       if (err < 0)
-               return -errno;
-       return 0;
-}
-
-int restore_pid_registers(int pid, struct uml_pt_regs *regs)
-{
-       int err;
-
-       err = ptrace(PTRACE_SETREGS, pid, 0, regs->gp);
-       if (err < 0)
-               return -errno;
-       return 0;
-}
-
 /* This is set once at boot time and not changed thereafter */
 
 static unsigned long exec_regs[MAX_REG_NR];
index 9464833e741af3370d6d8910071e1b7e0905c8ac..1f5c3f2523d1e29782eb2edc04de3728ef6de8db 100644 (file)
@@ -177,48 +177,11 @@ static void handle_segv(int pid, struct uml_pt_regs *regs, unsigned long *aux_fp
        segv(regs->faultinfo, 0, 1, NULL);
 }
 
-/*
- * To use the same value of using_sysemu as the caller, ask it that value
- * (in local_using_sysemu
- */
-static void handle_trap(int pid, struct uml_pt_regs *regs,
-                       int local_using_sysemu)
+static void handle_trap(int pid, struct uml_pt_regs *regs)
 {
-       int err, status;
-
        if ((UPT_IP(regs) >= STUB_START) && (UPT_IP(regs) < STUB_END))
                fatal_sigsegv();
 
-       if (!local_using_sysemu)
-       {
-               err = ptrace(PTRACE_POKEUSER, pid, PT_SYSCALL_NR_OFFSET,
-                            __NR_getpid);
-               if (err < 0) {
-                       printk(UM_KERN_ERR "%s - nullifying syscall failed, errno = %d\n",
-                              __func__, errno);
-                       fatal_sigsegv();
-               }
-
-               err = ptrace(PTRACE_SYSCALL, pid, 0, 0);
-               if (err < 0) {
-                       printk(UM_KERN_ERR "%s - continuing to end of syscall failed, errno = %d\n",
-                              __func__, errno);
-                       fatal_sigsegv();
-               }
-
-               CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED | __WALL));
-               if ((err < 0) || !WIFSTOPPED(status) ||
-                   (WSTOPSIG(status) != SIGTRAP + 0x80)) {
-                       err = ptrace_dump_regs(pid);
-                       if (err)
-                               printk(UM_KERN_ERR "Failed to get registers from process, errno = %d\n",
-                                      -err);
-                       printk(UM_KERN_ERR "%s - failed to wait at end of syscall, errno = %d, status = %d\n",
-                              __func__, errno, status);
-                       fatal_sigsegv();
-               }
-       }
-
        handle_syscall(regs);
 }
 
@@ -226,7 +189,7 @@ extern char __syscall_stub_start[];
 
 /**
  * userspace_tramp() - userspace trampoline
- * @stack:     pointer to the new userspace stack page, can be NULL, if? FIXME:
+ * @stack:     pointer to the new userspace stack page
  *
  * The userspace trampoline is used to setup a new userspace process in start_userspace() after it was clone()'ed.
  * This function will run on a temporary stack page.
@@ -241,9 +204,13 @@ extern char __syscall_stub_start[];
  */
 static int userspace_tramp(void *stack)
 {
+       struct sigaction sa;
        void *addr;
        int fd;
        unsigned long long offset;
+       unsigned long segv_handler = STUB_CODE +
+                                    (unsigned long) stub_segv_handler -
+                                    (unsigned long) __syscall_stub_start;
 
        ptrace(PTRACE_TRACEME, 0, 0, 0);
 
@@ -254,39 +221,30 @@ static int userspace_tramp(void *stack)
        addr = mmap64((void *) STUB_CODE, UM_KERN_PAGE_SIZE,
                      PROT_EXEC, MAP_FIXED | MAP_PRIVATE, fd, offset);
        if (addr == MAP_FAILED) {
-               printk(UM_KERN_ERR "mapping mmap stub at 0x%lx failed, errno = %d\n",
-                      STUB_CODE, errno);
+               os_info("mapping mmap stub at 0x%lx failed, errno = %d\n",
+                       STUB_CODE, errno);
                exit(1);
        }
 
-       if (stack != NULL) {
-               fd = phys_mapping(uml_to_phys(stack), &offset);
-               addr = mmap((void *) STUB_DATA,
-                           STUB_DATA_PAGES * UM_KERN_PAGE_SIZE, PROT_READ | PROT_WRITE,
-                           MAP_FIXED | MAP_SHARED, fd, offset);
-               if (addr == MAP_FAILED) {
-                       printk(UM_KERN_ERR "mapping segfault stack at 0x%lx failed, errno = %d\n",
-                              STUB_DATA, errno);
-                       exit(1);
-               }
+       fd = phys_mapping(uml_to_phys(stack), &offset);
+       addr = mmap((void *) STUB_DATA,
+                   STUB_DATA_PAGES * UM_KERN_PAGE_SIZE, PROT_READ | PROT_WRITE,
+                   MAP_FIXED | MAP_SHARED, fd, offset);
+       if (addr == MAP_FAILED) {
+               os_info("mapping segfault stack at 0x%lx failed, errno = %d\n",
+                       STUB_DATA, errno);
+               exit(1);
        }
-       if (stack != NULL) {
-               struct sigaction sa;
-
-               unsigned long v = STUB_CODE +
-                                 (unsigned long) stub_segv_handler -
-                                 (unsigned long) __syscall_stub_start;
-
-               set_sigstack((void *) STUB_DATA, STUB_DATA_PAGES * UM_KERN_PAGE_SIZE);
-               sigemptyset(&sa.sa_mask);
-               sa.sa_flags = SA_ONSTACK | SA_NODEFER | SA_SIGINFO;
-               sa.sa_sigaction = (void *) v;
-               sa.sa_restorer = NULL;
-               if (sigaction(SIGSEGV, &sa, NULL) < 0) {
-                       printk(UM_KERN_ERR "%s - setting SIGSEGV handler failed - errno = %d\n",
-                              __func__, errno);
-                       exit(1);
-               }
+
+       set_sigstack((void *) STUB_DATA, STUB_DATA_PAGES * UM_KERN_PAGE_SIZE);
+       sigemptyset(&sa.sa_mask);
+       sa.sa_flags = SA_ONSTACK | SA_NODEFER | SA_SIGINFO;
+       sa.sa_sigaction = (void *) segv_handler;
+       sa.sa_restorer = NULL;
+       if (sigaction(SIGSEGV, &sa, NULL) < 0) {
+               os_info("%s - setting SIGSEGV handler failed - errno = %d\n",
+                       __func__, errno);
+               exit(1);
        }
 
        kill(os_getpid(), SIGSTOP);
@@ -298,7 +256,7 @@ int kill_userspace_mm[NR_CPUS];
 
 /**
  * start_userspace() - prepare a new userspace process
- * @stub_stack:        pointer to the stub stack. Can be NULL, if? FIXME:
+ * @stub_stack:        pointer to the stub stack.
  *
  * Setups a new temporary stack page that is used while userspace_tramp() runs
  * Clones the kernel process into a new userspace process, with FDs only.
@@ -355,10 +313,10 @@ int start_userspace(unsigned long stub_stack)
                goto out_kill;
        }
 
-       if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL,
+       if (ptrace(PTRACE_SETOPTIONS, pid, NULL,
                   (void *) PTRACE_O_TRACESYSGOOD) < 0) {
                err = -errno;
-               printk(UM_KERN_ERR "%s : PTRACE_OLDSETOPTIONS failed, errno = %d\n",
+               printk(UM_KERN_ERR "%s : PTRACE_SETOPTIONS failed, errno = %d\n",
                       __func__, errno);
                goto out_kill;
        }
@@ -380,8 +338,6 @@ int start_userspace(unsigned long stub_stack)
 void userspace(struct uml_pt_regs *regs, unsigned long *aux_fp_regs)
 {
        int err, status, op, pid = userspace_pid[0];
-       /* To prevent races if using_sysemu changes under us.*/
-       int local_using_sysemu;
        siginfo_t si;
 
        /* Handle any immediate reschedules or signals */
@@ -411,11 +367,10 @@ void userspace(struct uml_pt_regs *regs, unsigned long *aux_fp_regs)
                        fatal_sigsegv();
                }
 
-               /* Now we set local_using_sysemu to be used for one loop */
-               local_using_sysemu = get_using_sysemu();
-
-               op = SELECT_PTRACE_OPERATION(local_using_sysemu,
-                                            singlestepping(NULL));
+               if (singlestepping())
+                       op = PTRACE_SYSEMU_SINGLESTEP;
+               else
+                       op = PTRACE_SYSEMU;
 
                if (ptrace(op, pid, 0, 0)) {
                        printk(UM_KERN_ERR "%s - ptrace continue failed, op = %d, errno = %d\n",
@@ -474,7 +429,7 @@ void userspace(struct uml_pt_regs *regs, unsigned long *aux_fp_regs)
                                else handle_segv(pid, regs, aux_fp_regs);
                                break;
                        case SIGTRAP + 0x80:
-                               handle_trap(pid, regs, local_using_sysemu);
+                               handle_trap(pid, regs);
                                break;
                        case SIGTRAP:
                                relay_signal(SIGTRAP, (struct siginfo *)&si, regs);
@@ -597,10 +552,10 @@ int copy_context_skas0(unsigned long new_stack, int pid)
                goto out_kill;
        }
 
-       if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL,
+       if (ptrace(PTRACE_SETOPTIONS, pid, NULL,
                   (void *)PTRACE_O_TRACESYSGOOD) < 0) {
                err = -errno;
-               printk(UM_KERN_ERR "%s : PTRACE_OLDSETOPTIONS failed, errno = %d\n",
+               printk(UM_KERN_ERR "%s : PTRACE_SETOPTIONS failed, errno = %d\n",
                       __func__, errno);
                goto out_kill;
        }
index e3ee4db58b40d0ee85149e86a8b8b28b52c0ac22..8b0e98ab842ccaabdb87e0f2e2c796be677d8587 100644 (file)
@@ -112,102 +112,32 @@ static int start_ptraced_child(void)
        return pid;
 }
 
-/* When testing for SYSEMU support, if it is one of the broken versions, we
- * must just avoid using sysemu, not panic, but only if SYSEMU features are
- * broken.
- * So only for SYSEMU features we test mustpanic, while normal host features
- * must work anyway!
- */
-static int stop_ptraced_child(int pid, int exitcode, int mustexit)
+static void stop_ptraced_child(int pid, int exitcode)
 {
-       int status, n, ret = 0;
+       int status, n;
+
+       if (ptrace(PTRACE_CONT, pid, 0, 0) < 0)
+               fatal_perror("stop_ptraced_child : ptrace failed");
 
-       if (ptrace(PTRACE_CONT, pid, 0, 0) < 0) {
-               perror("stop_ptraced_child : ptrace failed");
-               return -1;
-       }
        CATCH_EINTR(n = waitpid(pid, &status, 0));
        if (!WIFEXITED(status) || (WEXITSTATUS(status) != exitcode)) {
                int exit_with = WEXITSTATUS(status);
-               if (exit_with == 2)
-                       non_fatal("check_ptrace : child exited with status 2. "
-                                 "\nDisabling SYSEMU support.\n");
-               non_fatal("check_ptrace : child exited with exitcode %d, while "
-                         "expecting %d; status 0x%x\n", exit_with,
-                         exitcode, status);
-               if (mustexit)
-                       exit(1);
-               ret = -1;
+               fatal("stop_ptraced_child : child exited with exitcode %d, "
+                     "while expecting %d; status 0x%x\n", exit_with,
+                     exitcode, status);
        }
-
-       return ret;
-}
-
-/* Changed only during early boot */
-static int force_sysemu_disabled = 0;
-
-static int __init nosysemu_cmd_param(char *str, int* add)
-{
-       force_sysemu_disabled = 1;
-       return 0;
 }
 
-__uml_setup("nosysemu", nosysemu_cmd_param,
-"nosysemu\n"
-"    Turns off syscall emulation patch for ptrace (SYSEMU).\n"
-"    SYSEMU is a performance-patch introduced by Laurent Vivier. It changes\n"
-"    behaviour of ptrace() and helps reduce host context switch rates.\n"
-"    To make it work, you need a kernel patch for your host, too.\n"
-"    See http://perso.wanadoo.fr/laurent.vivier/UML/ for further \n"
-"    information.\n\n");
-
 static void __init check_sysemu(void)
 {
-       unsigned long regs[MAX_REG_NR];
        int pid, n, status, count=0;
 
-       os_info("Checking syscall emulation patch for ptrace...");
-       sysemu_supported = 0;
-       pid = start_ptraced_child();
-
-       if (ptrace(PTRACE_SYSEMU, pid, 0, 0) < 0)
-               goto fail;
-
-       CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED));
-       if (n < 0)
-               fatal_perror("check_sysemu : wait failed");
-       if (!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGTRAP))
-               fatal("check_sysemu : expected SIGTRAP, got status = %d\n",
-                     status);
-
-       if (ptrace(PTRACE_GETREGS, pid, 0, regs) < 0)
-               fatal_perror("check_sysemu : PTRACE_GETREGS failed");
-       if (PT_SYSCALL_NR(regs) != __NR_getpid) {
-               non_fatal("check_sysemu got system call number %d, "
-                         "expected %d...", PT_SYSCALL_NR(regs), __NR_getpid);
-               goto fail;
-       }
-
-       n = ptrace(PTRACE_POKEUSER, pid, PT_SYSCALL_RET_OFFSET, os_getpid());
-       if (n < 0) {
-               non_fatal("check_sysemu : failed to modify system call "
-                         "return");
-               goto fail;
-       }
-
-       if (stop_ptraced_child(pid, 0, 0) < 0)
-               goto fail_stopped;
-
-       sysemu_supported = 1;
-       os_info("OK\n");
-       set_using_sysemu(!force_sysemu_disabled);
-
-       os_info("Checking advanced syscall emulation patch for ptrace...");
+       os_info("Checking syscall emulation for ptrace...");
        pid = start_ptraced_child();
 
-       if ((ptrace(PTRACE_OLDSETOPTIONS, pid, 0,
+       if ((ptrace(PTRACE_SETOPTIONS, pid, 0,
                   (void *) PTRACE_O_TRACESYSGOOD) < 0))
-               fatal_perror("check_sysemu: PTRACE_OLDSETOPTIONS failed");
+               fatal_perror("check_sysemu: PTRACE_SETOPTIONS failed");
 
        while (1) {
                count++;
@@ -240,20 +170,15 @@ static void __init check_sysemu(void)
                        goto fail;
                }
        }
-       if (stop_ptraced_child(pid, 0, 0) < 0)
-               goto fail_stopped;
+       stop_ptraced_child(pid, 0);
 
-       sysemu_supported = 2;
        os_info("OK\n");
 
-       if (!force_sysemu_disabled)
-               set_using_sysemu(sysemu_supported);
        return;
 
 fail:
-       stop_ptraced_child(pid, 1, 0);
-fail_stopped:
-       non_fatal("missing\n");
+       stop_ptraced_child(pid, 1);
+       fatal("missing\n");
 }
 
 static void __init check_ptrace(void)
@@ -263,9 +188,9 @@ static void __init check_ptrace(void)
        os_info("Checking that ptrace can change system call numbers...");
        pid = start_ptraced_child();
 
-       if ((ptrace(PTRACE_OLDSETOPTIONS, pid, 0,
+       if ((ptrace(PTRACE_SETOPTIONS, pid, 0,
                   (void *) PTRACE_O_TRACESYSGOOD) < 0))
-               fatal_perror("check_ptrace: PTRACE_OLDSETOPTIONS failed");
+               fatal_perror("check_ptrace: PTRACE_SETOPTIONS failed");
 
        while (1) {
                if (ptrace(PTRACE_SYSCALL, pid, 0, 0) < 0)
@@ -291,7 +216,7 @@ static void __init check_ptrace(void)
                        break;
                }
        }
-       stop_ptraced_child(pid, 0, 1);
+       stop_ptraced_child(pid, 0);
        os_info("OK\n");
        check_sysemu();
 }
@@ -370,7 +295,7 @@ void __init os_early_checks(void)
        pid = start_ptraced_child();
        if (init_pid_registers(pid))
                fatal("Failed to initialize default registers");
-       stop_ptraced_child(pid, 1, 1);
+       stop_ptraced_child(pid, 1);
 }
 
 int __init parse_iomem(char *str, int *add)
index fc0f2a9dee5af9665a5394b2b0607c0cf945082c..1dca4ffbd572f77ac0747feff11cdb9379ac5936 100644 (file)
@@ -173,23 +173,38 @@ __uml_setup("quiet", quiet_cmd_param,
 "quiet\n"
 "    Turns off information messages during boot.\n\n");
 
+/*
+ * The os_info/os_warn functions will be called by helper threads. These
+ * have a very limited stack size and using the libc formatting functions
+ * may overflow the stack.
+ * So pull in the kernel vscnprintf and use that instead with a fixed
+ * on-stack buffer.
+ */
+int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
+
 void os_info(const char *fmt, ...)
 {
+       char buf[256];
        va_list list;
+       int len;
 
        if (quiet_info)
                return;
 
        va_start(list, fmt);
-       vfprintf(stderr, fmt, list);
+       len = vscnprintf(buf, sizeof(buf), fmt, list);
+       fwrite(buf, len, 1, stderr);
        va_end(list);
 }
 
 void os_warn(const char *fmt, ...)
 {
+       char buf[256];
        va_list list;
+       int len;
 
        va_start(list, fmt);
-       vfprintf(stderr, fmt, list);
+       len = vscnprintf(buf, sizeof(buf), fmt, list);
+       fwrite(buf, len, 1, stderr);
        va_end(list);
 }
index 53f2e7797b1df47e3029e4174ade82f1a8987779..5edec175b9bfc92dfac8832fc3600b843407828b 100644 (file)
@@ -59,6 +59,7 @@ config X86
        #
        select ACPI_LEGACY_TABLES_LOOKUP        if ACPI
        select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
+       select ACPI_HOTPLUG_CPU                 if ACPI_PROCESSOR && HOTPLUG_CPU
        select ARCH_32BIT_OFF_T                 if X86_32
        select ARCH_CLOCKSOURCE_INIT
        select ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
@@ -71,6 +72,7 @@ config X86
        select ARCH_HAS_CACHE_LINE_SIZE
        select ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION
        select ARCH_HAS_CPU_FINALIZE_INIT
+       select ARCH_HAS_CPU_PASID               if IOMMU_SVA
        select ARCH_HAS_CURRENT_STACK_POINTER
        select ARCH_HAS_DEBUG_VIRTUAL
        select ARCH_HAS_DEBUG_VM_PGTABLE        if !X86_PAE
@@ -148,6 +150,7 @@ config X86
        select GENERIC_CLOCKEVENTS_MIN_ADJUST
        select GENERIC_CMOS_UPDATE
        select GENERIC_CPU_AUTOPROBE
+       select GENERIC_CPU_DEVICES
        select GENERIC_CPU_VULNERABILITIES
        select GENERIC_EARLY_IOREMAP
        select GENERIC_ENTRY
@@ -1967,6 +1970,11 @@ config INTEL_TDX_HOST
        depends on CPU_SUP_INTEL
        depends on X86_64
        depends on KVM_INTEL
+       depends on X86_X2APIC
+       select ARCH_KEEP_MEMBLOCK
+       depends on CONTIG_ALLOC
+       depends on !KEXEC_CORE
+       depends on X86_MCE
        help
          Intel Trust Domain Extensions (TDX) protects guest VMs from malicious
          host and certain physical attacks.  This option enables necessary TDX
index b9224cf2ee4d6fcb234be76e072d37fa1cc7ad53..2a7279d80460a8adf0218a954646d9d8343ddf3e 100644 (file)
@@ -379,7 +379,7 @@ config X86_CMOV
 config X86_MINIMUM_CPU_FAMILY
        int
        default "64" if X86_64
-       default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8)
+       default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCORE2 || MK7 || MK8)
        default "5" if X86_32 && X86_CMPXCHG64
        default "4"
 
index 1a068de12a564fe452cd5c003feb907fd3de42fd..2264db14a25d3b034ffa93685d6353564d4a9225 100644 (file)
@@ -112,13 +112,13 @@ ifeq ($(CONFIG_X86_32),y)
         # temporary until string.h is fixed
         KBUILD_CFLAGS += -ffreestanding
 
-       ifeq ($(CONFIG_STACKPROTECTOR),y)
-               ifeq ($(CONFIG_SMP),y)
+        ifeq ($(CONFIG_STACKPROTECTOR),y)
+                ifeq ($(CONFIG_SMP),y)
                        KBUILD_CFLAGS += -mstack-protector-guard-reg=fs -mstack-protector-guard-symbol=__stack_chk_guard
-               else
+                else
                        KBUILD_CFLAGS += -mstack-protector-guard=global
-               endif
-       endif
+                endif
+        endif
 else
         BITS := 64
         UTS_MACHINE := x86_64
index b2771710ed989cc805e6310bd88a1743350a9dc5..a1bbedd989e42ed5f9e433f556613613094ae74d 100644 (file)
@@ -106,8 +106,7 @@ extra_header_fields:
        .word   0                               # MinorSubsystemVersion
        .long   0                               # Win32VersionValue
 
-       .long   setup_size + ZO__end + pecompat_vsize
-                                               # SizeOfImage
+       .long   setup_size + ZO__end            # SizeOfImage
 
        .long   salign                          # SizeOfHeaders
        .long   0                               # CheckSum
@@ -143,7 +142,7 @@ section_table:
        .ascii  ".setup"
        .byte   0
        .byte   0
-       .long   setup_size - salign             # VirtualSize
+       .long   pecompat_fstart - salign        # VirtualSize
        .long   salign                          # VirtualAddress
        .long   pecompat_fstart - salign        # SizeOfRawData
        .long   salign                          # PointerToRawData
@@ -156,8 +155,8 @@ section_table:
 #ifdef CONFIG_EFI_MIXED
        .asciz  ".compat"
 
-       .long   8                               # VirtualSize
-       .long   setup_size + ZO__end            # VirtualAddress
+       .long   pecompat_fsize                  # VirtualSize
+       .long   pecompat_fstart                 # VirtualAddress
        .long   pecompat_fsize                  # SizeOfRawData
        .long   pecompat_fstart                 # PointerToRawData
 
@@ -172,17 +171,16 @@ section_table:
         * modes this image supports.
         */
        .pushsection ".pecompat", "a", @progbits
-       .balign falign
-       .set    pecompat_vsize, salign
+       .balign salign
        .globl  pecompat_fstart
 pecompat_fstart:
        .byte   0x1                             # Version
        .byte   8                               # Size
        .word   IMAGE_FILE_MACHINE_I386         # PE machine type
        .long   setup_size + ZO_efi32_pe_entry  # Entrypoint
+       .byte   0x0                             # Sentinel
        .popsection
 #else
-       .set    pecompat_vsize, 0
        .set    pecompat_fstart, setup_size
 #endif
        .ascii  ".text"
index 83bb7efad8ae7139ca66f850d7bb21b4859bd3e0..3a2d1360abb016902495f5879632335d883b8c03 100644 (file)
@@ -24,6 +24,9 @@ SECTIONS
        .text           : { *(.text .text.*) }
        .text32         : { *(.text32) }
 
+       .pecompat       : { *(.pecompat) }
+       PROVIDE(pecompat_fsize = setup_size - pecompat_fstart);
+
        . = ALIGN(16);
        .rodata         : { *(.rodata*) }
 
@@ -36,9 +39,6 @@ SECTIONS
        . = ALIGN(16);
        .data           : { *(.data*) }
 
-       .pecompat       : { *(.pecompat) }
-       PROVIDE(pecompat_fsize = setup_size - pecompat_fstart);
-
        .signature      : {
                setup_sig = .;
                LONG(0x5a5aaa55)
index 78e413269791ee6e967873b6f417c418b771a820..1655aa56a0a5126ab6dcc3ec8221f9fbf8822eb5 100644 (file)
@@ -22,13 +22,13 @@ static unsigned long try_accept_one(phys_addr_t start, unsigned long len,
         */
        switch (pg_level) {
        case PG_LEVEL_4K:
-               page_size = 0;
+               page_size = TDX_PS_4K;
                break;
        case PG_LEVEL_2M:
-               page_size = 1;
+               page_size = TDX_PS_2M;
                break;
        case PG_LEVEL_1G:
-               page_size = 2;
+               page_size = TDX_PS_1G;
                break;
        default:
                return 0;
index 6ae2d16a7613b714cb58283dafa600db5829ba6f..76c310b19b11d898db11cf498d7c82449bbf7dc2 100644 (file)
@@ -10,13 +10,14 @@ enum cc_vendor {
        CC_VENDOR_INTEL,
 };
 
-extern enum cc_vendor cc_vendor;
-
 #ifdef CONFIG_ARCH_HAS_CC_PLATFORM
+extern enum cc_vendor cc_vendor;
 void cc_set_mask(u64 mask);
 u64 cc_mkenc(u64 val);
 u64 cc_mkdec(u64 val);
 #else
+#define cc_vendor (CC_VENDOR_NONE)
+
 static inline u64 cc_mkenc(u64 val)
 {
        return val;
index fecc4fe1d68aff799c7b91b363d69e961cefbab1..f8f9a9b7939587b2b8f6e00794e94ea69e6a338a 100644 (file)
@@ -23,10 +23,6 @@ static inline void prefill_possible_map(void) {}
 
 #endif /* CONFIG_SMP */
 
-struct x86_cpu {
-       struct cpu cpu;
-};
-
 #ifdef CONFIG_HOTPLUG_CPU
 extern void soft_restart_cpu(void);
 #endif
index a26bebbdff87ed20c45bdb98dcc4a8873f5c30f5..a1273698fc430b41951c241b6b76dfa9b7887692 100644 (file)
@@ -168,7 +168,7 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
  */
 static __always_inline bool _static_cpu_has(u16 bit)
 {
-       asm_volatile_goto(
+       asm goto(
                ALTERNATIVE_TERNARY("jmp 6f", %P[feature], "", "jmp %l[t_no]")
                ".pushsection .altinstr_aux,\"ax\"\n"
                "6:\n"
index 632c26cdeeda5ddc6d71e599acd65a278dd8dc01..fdf723b6f6d0ce9f6742ef3c67adce3c8d57c002 100644 (file)
 #define X86_FEATURE_K6_MTRR            ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
 #define X86_FEATURE_CYRIX_ARR          ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
 #define X86_FEATURE_CENTAUR_MCR                ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
-
-/* CPU types for specific tunings: */
 #define X86_FEATURE_K8                 ( 3*32+ 4) /* "" Opteron, Athlon64 */
-/* FREE, was #define X86_FEATURE_K7                    ( 3*32+ 5) "" Athlon */
+#define X86_FEATURE_ZEN5               ( 3*32+ 5) /* "" CPU based on Zen5 microarchitecture */
 #define X86_FEATURE_P3                 ( 3*32+ 6) /* "" P3 */
 #define X86_FEATURE_P4                 ( 3*32+ 7) /* "" P4 */
 #define X86_FEATURE_CONSTANT_TSC       ( 3*32+ 8) /* TSC ticks at a constant rate */
 #define X86_FEATURE_CAT_L3             ( 7*32+ 4) /* Cache Allocation Technology L3 */
 #define X86_FEATURE_CAT_L2             ( 7*32+ 5) /* Cache Allocation Technology L2 */
 #define X86_FEATURE_CDP_L3             ( 7*32+ 6) /* Code and Data Prioritization L3 */
+#define X86_FEATURE_TDX_HOST_PLATFORM  ( 7*32+ 7) /* Platform supports being a TDX host */
 #define X86_FEATURE_HW_PSTATE          ( 7*32+ 8) /* AMD HW-PState */
 #define X86_FEATURE_PROC_FEEDBACK      ( 7*32+ 9) /* AMD ProcFeedbackInterface */
 #define X86_FEATURE_XCOMPACTED         ( 7*32+10) /* "" Use compacted XSTATE (XSAVES or XSAVEC) */
 #define X86_BUG_EIBRS_PBRSB            X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
 #define X86_BUG_SMT_RSB                        X86_BUG(29) /* CPU is vulnerable to Cross-Thread Return Address Predictions */
 #define X86_BUG_GDS                    X86_BUG(30) /* CPU is affected by Gather Data Sampling */
+#define X86_BUG_TDX_PW_MCE             X86_BUG(31) /* CPU may incur #MC if non-TD software does partial write to TDX private memory */
 
 /* BUG word 2 */
 #define X86_BUG_SRSO                   X86_BUG(1*32 + 0) /* AMD SRSO bug */
index 197316121f04e154dad9ba4d9a7169674c623dd5..b65e9c46b92210293d767ab01434593c2aad27a0 100644 (file)
 #define INTEL_FAM6_ATOM_CRESTMONT_X    0xAF /* Sierra Forest */
 #define INTEL_FAM6_ATOM_CRESTMONT      0xB6 /* Grand Ridge */
 
+#define INTEL_FAM6_ATOM_DARKMONT_X     0xDD /* Clearwater Forest */
+
 /* Xeon Phi */
 
 #define INTEL_FAM6_XEON_PHI_KNL                0x57 /* Knights Landing */
index 071572e23d3a06783e3a1f63e11bb47e99af9daa..cbbef32517f0049a3df51842162032ff1946e901 100644 (file)
@@ -24,7 +24,7 @@
 
 static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
-       asm_volatile_goto("1:"
+       asm goto("1:"
                "jmp %l[l_yes] # objtool NOPs this \n\t"
                JUMP_TABLE_ENTRY
                : :  "i" (key), "i" (2 | branch) : : l_yes);
@@ -38,7 +38,7 @@ l_yes:
 
 static __always_inline bool arch_static_branch(struct static_key * const key, const bool branch)
 {
-       asm_volatile_goto("1:"
+       asm goto("1:"
                ".byte " __stringify(BYTES_NOP5) "\n\t"
                JUMP_TABLE_ENTRY
                : :  "i" (key), "i" (branch) : : l_yes);
@@ -52,7 +52,7 @@ l_yes:
 
 static __always_inline bool arch_static_branch_jump(struct static_key * const key, const bool branch)
 {
-       asm_volatile_goto("1:"
+       asm goto("1:"
                "jmp %l[l_yes]\n\t"
                JUMP_TABLE_ENTRY
                : :  "i" (key), "i" (branch) : : l_yes);
index 8fa6ac0e2d7665f936756748c0e1b4ab08a2c5a7..d91b37f5b4bb45106ee927fcd98b66f1b82a54c1 100644 (file)
@@ -64,6 +64,7 @@ static inline bool kmsan_virt_addr_valid(void *addr)
 {
        unsigned long x = (unsigned long)addr;
        unsigned long y = x - __START_KERNEL_map;
+       bool ret;
 
        /* use the carry flag to determine if x was < __START_KERNEL_map */
        if (unlikely(x > y)) {
@@ -79,7 +80,21 @@ static inline bool kmsan_virt_addr_valid(void *addr)
                        return false;
        }
 
-       return pfn_valid(x >> PAGE_SHIFT);
+       /*
+        * pfn_valid() relies on RCU, and may call into the scheduler on exiting
+        * the critical section. However, this would result in recursion with
+        * KMSAN. Therefore, disable preemption here, and re-enable preemption
+        * below while suppressing reschedules to avoid recursion.
+        *
+        * Note, this sacrifices occasionally breaking scheduling guarantees.
+        * Although, a kernel compiled with KMSAN has already given up on any
+        * performance guarantees due to being heavily instrumented.
+        */
+       preempt_disable();
+       ret = pfn_valid(x >> PAGE_SHIFT);
+       preempt_enable_no_resched();
+
+       return ret;
 }
 
 #endif /* !MODULE */
index 26b628d84594b93fea349b0a738768cfb9a9a15b..378ed944b849fb0448a13bd7f12a7a542ab7e388 100644 (file)
@@ -55,8 +55,10 @@ KVM_X86_OP(set_rflags)
 KVM_X86_OP(get_if_flag)
 KVM_X86_OP(flush_tlb_all)
 KVM_X86_OP(flush_tlb_current)
+#if IS_ENABLED(CONFIG_HYPERV)
 KVM_X86_OP_OPTIONAL(flush_remote_tlbs)
 KVM_X86_OP_OPTIONAL(flush_remote_tlbs_range)
+#endif
 KVM_X86_OP(flush_tlb_gva)
 KVM_X86_OP(flush_tlb_guest)
 KVM_X86_OP(vcpu_pre_run)
@@ -135,6 +137,7 @@ KVM_X86_OP(msr_filter_changed)
 KVM_X86_OP(complete_emulated_msr)
 KVM_X86_OP(vcpu_deliver_sipi_vector)
 KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons);
+KVM_X86_OP_OPTIONAL(get_untagged_addr)
 
 #undef KVM_X86_OP
 #undef KVM_X86_OP_OPTIONAL
index 6c98f4bb4228ba5a05acb55f30fa965020c206c4..058bc636356a1133ad151457d8bf0b56528e7f39 100644 (file)
@@ -22,7 +22,7 @@ KVM_X86_PMU_OP(get_msr)
 KVM_X86_PMU_OP(set_msr)
 KVM_X86_PMU_OP(refresh)
 KVM_X86_PMU_OP(init)
-KVM_X86_PMU_OP(reset)
+KVM_X86_PMU_OP_OPTIONAL(reset)
 KVM_X86_PMU_OP_OPTIONAL(deliver_pmi)
 KVM_X86_PMU_OP_OPTIONAL(cleanup)
 
index 6711da000bb7e97b041098dbb3a7a93b7696f0c0..d271ba20a0b214104a1f11832a1007f5bb35190e 100644 (file)
                          | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \
                          | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \
                          | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_VMXE \
-                         | X86_CR4_SMAP | X86_CR4_PKE | X86_CR4_UMIP))
+                         | X86_CR4_SMAP | X86_CR4_PKE | X86_CR4_UMIP \
+                         | X86_CR4_LAM_SUP))
 
 #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
 
@@ -500,8 +501,23 @@ struct kvm_pmc {
        u8 idx;
        bool is_paused;
        bool intr;
+       /*
+        * Base value of the PMC counter, relative to the *consumed* count in
+        * the associated perf_event.  This value includes counter updates from
+        * the perf_event and emulated_count since the last time the counter
+        * was reprogrammed, but it is *not* the current value as seen by the
+        * guest or userspace.
+        *
+        * The count is relative to the associated perf_event so that KVM
+        * doesn't need to reprogram the perf_event every time the guest writes
+        * to the counter.
+        */
        u64 counter;
-       u64 prev_counter;
+       /*
+        * PMC events triggered by KVM emulation that haven't been fully
+        * processed, i.e. haven't undergone overflow detection.
+        */
+       u64 emulated_counter;
        u64 eventsel;
        struct perf_event *perf_event;
        struct kvm_vcpu *vcpu;
@@ -937,8 +953,10 @@ struct kvm_vcpu_arch {
        /* used for guest single stepping over the given code position */
        unsigned long singlestep_rip;
 
+#ifdef CONFIG_KVM_HYPERV
        bool hyperv_enabled;
        struct kvm_vcpu_hv *hyperv;
+#endif
 #ifdef CONFIG_KVM_XEN
        struct kvm_vcpu_xen xen;
 #endif
@@ -1095,6 +1113,7 @@ enum hv_tsc_page_status {
        HV_TSC_PAGE_BROKEN,
 };
 
+#ifdef CONFIG_KVM_HYPERV
 /* Hyper-V emulation context */
 struct kvm_hv {
        struct mutex hv_lock;
@@ -1125,9 +1144,11 @@ struct kvm_hv {
         */
        unsigned int synic_auto_eoi_used;
 
-       struct hv_partition_assist_pg *hv_pa_pg;
        struct kvm_hv_syndbg hv_syndbg;
+
+       bool xsaves_xsavec_checked;
 };
+#endif
 
 struct msr_bitmap_range {
        u32 flags;
@@ -1136,6 +1157,7 @@ struct msr_bitmap_range {
        unsigned long *bitmap;
 };
 
+#ifdef CONFIG_KVM_XEN
 /* Xen emulation context */
 struct kvm_xen {
        struct mutex xen_lock;
@@ -1147,6 +1169,7 @@ struct kvm_xen {
        struct idr evtchn_ports;
        unsigned long poll_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)];
 };
+#endif
 
 enum kvm_irqchip_mode {
        KVM_IRQCHIP_NONE,
@@ -1255,6 +1278,7 @@ enum kvm_apicv_inhibit {
 };
 
 struct kvm_arch {
+       unsigned long vm_type;
        unsigned long n_used_mmu_pages;
        unsigned long n_requested_mmu_pages;
        unsigned long n_max_mmu_pages;
@@ -1347,8 +1371,13 @@ struct kvm_arch {
        /* reads protected by irq_srcu, writes by irq_lock */
        struct hlist_head mask_notifier_list;
 
+#ifdef CONFIG_KVM_HYPERV
        struct kvm_hv hyperv;
+#endif
+
+#ifdef CONFIG_KVM_XEN
        struct kvm_xen xen;
+#endif
 
        bool backwards_tsc_observed;
        bool boot_vcpu_runs_old_kvmclock;
@@ -1406,9 +1435,8 @@ struct kvm_arch {
         *      the MMU lock in read mode + RCU or
         *      the MMU lock in write mode
         *
-        * For writes, this list is protected by:
-        *      the MMU lock in read mode + the tdp_mmu_pages_lock or
-        *      the MMU lock in write mode
+        * For writes, this list is protected by tdp_mmu_pages_lock; see
+        * below for the details.
         *
         * Roots will remain in the list until their tdp_mmu_root_count
         * drops to zero, at which point the thread that decremented the
@@ -1425,8 +1453,10 @@ struct kvm_arch {
         *  - possible_nx_huge_pages;
         *  - the possible_nx_huge_page_link field of kvm_mmu_page structs used
         *    by the TDP MMU
-        * It is acceptable, but not necessary, to acquire this lock when
-        * the thread holds the MMU lock in write mode.
+        * Because the lock is only taken within the MMU lock, strictly
+        * speaking it is redundant to acquire this lock when the thread
+        * holds the MMU lock in write mode.  However it often simplifies
+        * the code to do so.
         */
        spinlock_t tdp_mmu_pages_lock;
 #endif /* CONFIG_X86_64 */
@@ -1441,6 +1471,7 @@ struct kvm_arch {
 #if IS_ENABLED(CONFIG_HYPERV)
        hpa_t   hv_root_tdp;
        spinlock_t hv_root_tdp_lock;
+       struct hv_partition_assist_pg *hv_pa_pg;
 #endif
        /*
         * VM-scope maximum vCPU ID. Used to determine the size of structures
@@ -1613,9 +1644,11 @@ struct kvm_x86_ops {
 
        void (*flush_tlb_all)(struct kvm_vcpu *vcpu);
        void (*flush_tlb_current)(struct kvm_vcpu *vcpu);
+#if IS_ENABLED(CONFIG_HYPERV)
        int  (*flush_remote_tlbs)(struct kvm *kvm);
        int  (*flush_remote_tlbs_range)(struct kvm *kvm, gfn_t gfn,
                                        gfn_t nr_pages);
+#endif
 
        /*
         * Flush any TLB entries associated with the given GVA.
@@ -1761,6 +1794,8 @@ struct kvm_x86_ops {
         * Returns vCPU specific APICv inhibit reasons
         */
        unsigned long (*vcpu_get_apicv_inhibit_reasons)(struct kvm_vcpu *vcpu);
+
+       gva_t (*get_untagged_addr)(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags);
 };
 
 struct kvm_x86_nested_ops {
@@ -1824,6 +1859,7 @@ static inline struct kvm *kvm_arch_alloc_vm(void)
 #define __KVM_HAVE_ARCH_VM_FREE
 void kvm_arch_free_vm(struct kvm *kvm);
 
+#if IS_ENABLED(CONFIG_HYPERV)
 #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS
 static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
 {
@@ -1835,6 +1871,15 @@ static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
 }
 
 #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
+static inline int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn,
+                                                  u64 nr_pages)
+{
+       if (!kvm_x86_ops.flush_remote_tlbs_range)
+               return -EOPNOTSUPP;
+
+       return static_call(kvm_x86_flush_remote_tlbs_range)(kvm, gfn, nr_pages);
+}
+#endif /* CONFIG_HYPERV */
 
 #define kvm_arch_pmi_in_guest(vcpu) \
        ((vcpu) && (vcpu)->arch.handling_intr_from_guest)
@@ -1848,6 +1893,9 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu);
 void kvm_mmu_init_vm(struct kvm *kvm);
 void kvm_mmu_uninit_vm(struct kvm *kvm);
 
+void kvm_mmu_init_memslot_memory_attributes(struct kvm *kvm,
+                                           struct kvm_memory_slot *slot);
+
 void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu);
 void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
 void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
@@ -2086,6 +2134,12 @@ void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd);
 void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
                       int tdp_max_root_level, int tdp_huge_page_level);
 
+#ifdef CONFIG_KVM_PRIVATE_MEM
+#define kvm_arch_has_private_mem(kvm) ((kvm)->arch.vm_type != KVM_X86_DEFAULT_VM)
+#else
+#define kvm_arch_has_private_mem(kvm) false
+#endif
+
 static inline u16 kvm_read_ldt(void)
 {
        u16 ldt;
@@ -2133,16 +2187,15 @@ enum {
 #define HF_SMM_MASK            (1 << 1)
 #define HF_SMM_INSIDE_NMI_MASK (1 << 2)
 
-# define __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
-# define KVM_ADDRESS_SPACE_NUM 2
+# define KVM_MAX_NR_ADDRESS_SPACES     2
+/* SMM is currently unsupported for guests with private memory. */
+# define kvm_arch_nr_memslot_as_ids(kvm) (kvm_arch_has_private_mem(kvm) ? 1 : 2)
 # define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0)
 # define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm)
 #else
 # define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, 0)
 #endif
 
-#define KVM_ARCH_WANT_MMU_NOTIFIER
-
 int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
 int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
 int kvm_cpu_has_extint(struct kvm_vcpu *v);
index 737a52b89e64c11b22a7b902f308ee155eb6be33..f1bd7b91b3c63735738825f15cd3c82fca7579ce 100644 (file)
 #define MSR_RELOAD_PMC0                        0x000014c1
 #define MSR_RELOAD_FIXED_CTR0          0x00001309
 
+/* KeyID partitioning between MKTME and TDX */
+#define MSR_IA32_MKTME_KEYID_PARTITIONING      0x00000087
+
 /*
  * AMD64 MSRs. Not complete. See the architecture manual for a more
  * complete list.
index 4b081e0d3306b79cca3dc222bb7406cde371d517..363266cbcadaf29e5bdeba4b0bfd5ab0ccb7355f 100644 (file)
@@ -13,7 +13,7 @@
 #define __GEN_RMWcc(fullop, _var, cc, clobbers, ...)                   \
 ({                                                                     \
        bool c = false;                                                 \
-       asm_volatile_goto (fullop "; j" #cc " %l[cc_label]"             \
+       asm goto (fullop "; j" #cc " %l[cc_label]"              \
                        : : [var] "m" (_var), ## __VA_ARGS__            \
                        : clobbers : cc_label);                         \
        if (0) {                                                        \
index ccce7ebd8677287359c4de9271fe29ecf6793d31..fdfd41511b02118faa14bab179dbab9d2f588d18 100644 (file)
        (TDX_RDX | TDX_RBX | TDX_RSI | TDX_RDI | TDX_R8  | TDX_R9  | \
         TDX_R10 | TDX_R11 | TDX_R12 | TDX_R13 | TDX_R14 | TDX_R15)
 
+/* TDX supported page sizes from the TDX module ABI. */
+#define TDX_PS_4K      0
+#define TDX_PS_2M      1
+#define TDX_PS_1G      2
+#define TDX_PS_NR      (TDX_PS_1G + 1)
+
 #ifndef __ASSEMBLY__
 
 #include <linux/compiler_attributes.h>
index d6cd9344f6c78e5555486e5d9f231fd27de9da6a..48f8dd47cf6882ac9e3920d6e7105c0eff430528 100644 (file)
@@ -205,7 +205,7 @@ static inline void clwb(volatile void *__p)
 #ifdef CONFIG_X86_USER_SHADOW_STACK
 static inline int write_user_shstk_64(u64 __user *addr, u64 val)
 {
-       asm_volatile_goto("1: wrussq %[val], (%[addr])\n"
+       asm goto("1: wrussq %[val], (%[addr])\n"
                          _ASM_EXTABLE(1b, %l[fail])
                          :: [addr] "r" (addr), [val] "r" (val)
                          :: fail);
index 21f9407be5d357a8f4204addc66841dc50d9bf51..7e88705e907f411b416d25e533e06623997555ea 100644 (file)
@@ -58,12 +58,29 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
                ,,regs->di,,regs->si,,regs->dx                          \
                ,,regs->r10,,regs->r8,,regs->r9)                        \
 
+
+/* SYSCALL_PT_ARGS is Adapted from s390x */
+#define SYSCALL_PT_ARG6(m, t1, t2, t3, t4, t5, t6)                     \
+       SYSCALL_PT_ARG5(m, t1, t2, t3, t4, t5), m(t6, (regs->bp))
+#define SYSCALL_PT_ARG5(m, t1, t2, t3, t4, t5)                         \
+       SYSCALL_PT_ARG4(m, t1, t2, t3, t4),  m(t5, (regs->di))
+#define SYSCALL_PT_ARG4(m, t1, t2, t3, t4)                             \
+       SYSCALL_PT_ARG3(m, t1, t2, t3),  m(t4, (regs->si))
+#define SYSCALL_PT_ARG3(m, t1, t2, t3)                                 \
+       SYSCALL_PT_ARG2(m, t1, t2), m(t3, (regs->dx))
+#define SYSCALL_PT_ARG2(m, t1, t2)                                     \
+       SYSCALL_PT_ARG1(m, t1), m(t2, (regs->cx))
+#define SYSCALL_PT_ARG1(m, t1) m(t1, (regs->bx))
+#define SYSCALL_PT_ARGS(x, ...) SYSCALL_PT_ARG##x(__VA_ARGS__)
+
+#define __SC_COMPAT_CAST(t, a)                                         \
+       (__typeof(__builtin_choose_expr(__TYPE_IS_L(t), 0, 0U)))        \
+       (unsigned int)a
+
 /* Mapping of registers to parameters for syscalls on i386 */
 #define SC_IA32_REGS_TO_ARGS(x, ...)                                   \
-       __MAP(x,__SC_ARGS                                               \
-             ,,(unsigned int)regs->bx,,(unsigned int)regs->cx          \
-             ,,(unsigned int)regs->dx,,(unsigned int)regs->si          \
-             ,,(unsigned int)regs->di,,(unsigned int)regs->bp)
+       SYSCALL_PT_ARGS(x, __SC_COMPAT_CAST,                            \
+                       __MAP(x, __SC_TYPE, __VA_ARGS__))               \
 
 #define __SYS_STUB0(abi, name)                                         \
        long __##abi##_##name(const struct pt_regs *regs);              \
index f3d5305a60fc50b13708d80ffecab0a529239ee0..eba178996d8459b2ead1800e83157346f9c50c04 100644 (file)
 #define TDX_SEAMCALL_GP                        (TDX_SW_ERROR | X86_TRAP_GP)
 #define TDX_SEAMCALL_UD                        (TDX_SW_ERROR | X86_TRAP_UD)
 
+/*
+ * TDX module SEAMCALL leaf function error codes
+ */
+#define TDX_SUCCESS            0ULL
+#define TDX_RND_NO_ENTROPY     0x8000020300000000ULL
+
 #ifndef __ASSEMBLY__
 
+#include <uapi/asm/mce.h>
+
 /*
  * Used by the #VE exception handler to gather the #VE exception
  * info from the TDX module. This is a software only structure
@@ -83,6 +91,36 @@ static inline long tdx_kvm_hypercall(unsigned int nr, unsigned long p1,
 u64 __seamcall(u64 fn, struct tdx_module_args *args);
 u64 __seamcall_ret(u64 fn, struct tdx_module_args *args);
 u64 __seamcall_saved_ret(u64 fn, struct tdx_module_args *args);
+void tdx_init(void);
+
+#include <asm/archrandom.h>
+
+typedef u64 (*sc_func_t)(u64 fn, struct tdx_module_args *args);
+
+static inline u64 sc_retry(sc_func_t func, u64 fn,
+                          struct tdx_module_args *args)
+{
+       int retry = RDRAND_RETRY_LOOPS;
+       u64 ret;
+
+       do {
+               ret = func(fn, args);
+       } while (ret == TDX_RND_NO_ENTROPY && --retry);
+
+       return ret;
+}
+
+#define seamcall(_fn, _args)           sc_retry(__seamcall, (_fn), (_args))
+#define seamcall_ret(_fn, _args)       sc_retry(__seamcall_ret, (_fn), (_args))
+#define seamcall_saved_ret(_fn, _args) sc_retry(__seamcall_saved_ret, (_fn), (_args))
+int tdx_cpu_enable(void);
+int tdx_enable(void);
+const char *tdx_dump_mce_info(struct mce *m);
+#else
+static inline void tdx_init(void) { }
+static inline int tdx_cpu_enable(void) { return -ENODEV; }
+static inline int tdx_enable(void)  { return -ENODEV; }
+static inline const char *tdx_dump_mce_info(struct mce *m) { return NULL; }
 #endif /* CONFIG_INTEL_TDX_HOST */
 
 #endif /* !__ASSEMBLY__ */
index 5c367c1290c355fb3849800a38c88c3553175903..237dc8cdd12b9482f38f8543b85dbde88fb98d65 100644 (file)
@@ -133,7 +133,7 @@ extern int __get_user_bad(void);
 
 #ifdef CONFIG_X86_32
 #define __put_user_goto_u64(x, addr, label)                    \
-       asm_volatile_goto("\n"                                  \
+       asm goto("\n"                                   \
                     "1:        movl %%eax,0(%1)\n"             \
                     "2:        movl %%edx,4(%1)\n"             \
                     _ASM_EXTABLE_UA(1b, %l2)                   \
@@ -295,7 +295,7 @@ do {                                                                        \
 } while (0)
 
 #define __get_user_asm(x, addr, itype, ltype, label)                   \
-       asm_volatile_goto("\n"                                          \
+       asm_goto_output("\n"                                            \
                     "1:        mov"itype" %[umem],%[output]\n"         \
                     _ASM_EXTABLE_UA(1b, %l2)                           \
                     : [output] ltype(x)                                \
@@ -375,7 +375,7 @@ do {                                                                        \
        __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold);              \
        __typeof__(*(_ptr)) __old = *_old;                              \
        __typeof__(*(_ptr)) __new = (_new);                             \
-       asm_volatile_goto("\n"                                          \
+       asm_goto_output("\n"                                            \
                     "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\
                     _ASM_EXTABLE_UA(1b, %l[label])                     \
                     : CC_OUT(z) (success),                             \
@@ -394,7 +394,7 @@ do {                                                                        \
        __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold);              \
        __typeof__(*(_ptr)) __old = *_old;                              \
        __typeof__(*(_ptr)) __new = (_new);                             \
-       asm_volatile_goto("\n"                                          \
+       asm_goto_output("\n"                                            \
                     "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n"             \
                     _ASM_EXTABLE_UA(1b, %l[label])                     \
                     : CC_OUT(z) (success),                             \
@@ -477,7 +477,7 @@ struct __large_struct { unsigned long buf[100]; };
  * aliasing issues.
  */
 #define __put_user_goto(x, addr, itype, ltype, label)                  \
-       asm_volatile_goto("\n"                                          \
+       asm goto("\n"                                                   \
                "1:     mov"itype" %0,%1\n"                             \
                _ASM_EXTABLE_UA(1b, %l2)                                \
                : : ltype(x), "m" (__m(addr))                           \
index 1a6a1f98794967d260e2898b0dbb62f830d45664..a448d0964fc06ebd0c15cd0b550e3c2cefbf57bf 100644 (file)
@@ -562,4 +562,7 @@ struct kvm_pmu_event_filter {
 /* x86-specific KVM_EXIT_HYPERCALL flags. */
 #define KVM_EXIT_HYPERCALL_LONG_MODE   BIT(0)
 
+#define KVM_X86_DEFAULT_VM     0
+#define KVM_X86_SW_PROTECTED_VM        1
+
 #endif /* _ASM_X86_KVM_H */
index cc130b57542ac4033c1c653809f429306eb3e460..1d85cb7071cb21c84899477ec4a150d2fcc4da43 100644 (file)
@@ -403,7 +403,7 @@ noinstr void BUG_func(void)
 {
        BUG();
 }
-EXPORT_SYMBOL_GPL(BUG_func);
+EXPORT_SYMBOL(BUG_func);
 
 #define CALL_RIP_REL_OPCODE    0xff
 #define CALL_RIP_REL_MODRM     0x15
index 4feaa670d5783b8fe9bb226d0635c09bb15939da..89c0c8a3fc7e6c0c84ce98541d6cd1430ab91b41 100644 (file)
@@ -259,10 +259,9 @@ static u32 __init search_agp_bridge(u32 *order, int *valid_agp)
                                                        order);
                                }
 
-                               /* No multi-function device? */
                                type = read_pci_config_byte(bus, slot, func,
                                                               PCI_HEADER_TYPE);
-                               if (!(type & 0x80))
+                               if (!(type & PCI_HEADER_TYPE_MFD))
                                        break;
                        }
                }
index 9f42d1c59e095ee6923a78cb2ecb04fbe375a438..f3abca334199d8eae235f1560f99448eb9675a27 100644 (file)
@@ -538,7 +538,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
 
        /* Figure out Zen generations: */
        switch (c->x86) {
-       case 0x17: {
+       case 0x17:
                switch (c->x86_model) {
                case 0x00 ... 0x2f:
                case 0x50 ... 0x5f:
@@ -554,8 +554,8 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
                        goto warn;
                }
                break;
-       }
-       case 0x19: {
+
+       case 0x19:
                switch (c->x86_model) {
                case 0x00 ... 0x0f:
                case 0x20 ... 0x5f:
@@ -569,7 +569,20 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
                        goto warn;
                }
                break;
-       }
+
+       case 0x1a:
+               switch (c->x86_model) {
+               case 0x00 ... 0x0f:
+               case 0x20 ... 0x2f:
+               case 0x40 ... 0x4f:
+               case 0x70 ... 0x7f:
+                       setup_force_cpu_cap(X86_FEATURE_ZEN5);
+                       break;
+               default:
+                       goto warn;
+               }
+               break;
+
        default:
                break;
        }
@@ -1039,6 +1052,11 @@ static void init_amd_zen4(struct cpuinfo_x86 *c)
                msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT);
 }
 
+static void init_amd_zen5(struct cpuinfo_x86 *c)
+{
+       init_amd_zen_common();
+}
+
 static void init_amd(struct cpuinfo_x86 *c)
 {
        u64 vm_cr;
@@ -1084,6 +1102,8 @@ static void init_amd(struct cpuinfo_x86 *c)
                init_amd_zen3(c);
        else if (boot_cpu_has(X86_FEATURE_ZEN4))
                init_amd_zen4(c);
+       else if (boot_cpu_has(X86_FEATURE_ZEN5))
+               init_amd_zen5(c);
 
        /*
         * Enable workaround for FXSAVE leak on CPUs
index 94bff381ef20298f0f31a4753a085daca40bf03a..0b97bcde70c6102a4b82b561c3256ec53b614770 100644 (file)
@@ -66,6 +66,7 @@
 #include <asm/set_memory.h>
 #include <asm/traps.h>
 #include <asm/sev.h>
+#include <asm/tdx.h>
 
 #include "cpu.h"
 
@@ -1986,6 +1987,7 @@ static __init void identify_boot_cpu(void)
        setup_cr_pinning();
 
        tsx_init();
+       tdx_init();
        lkgs_init();
 }
 
index fd5ce12c4f9aa502c200a78dda3741ba70c068dc..bc39252bc54f2ec8a834961639d180b3f84c13ac 100644 (file)
@@ -53,6 +53,7 @@
 #include <asm/mce.h>
 #include <asm/msr.h>
 #include <asm/reboot.h>
+#include <asm/tdx.h>
 
 #include "internal.h"
 
@@ -229,12 +230,20 @@ static void wait_for_panic(void)
        panic("Panicing machine check CPU died");
 }
 
+static const char *mce_dump_aux_info(struct mce *m)
+{
+       if (boot_cpu_has_bug(X86_BUG_TDX_PW_MCE))
+               return tdx_dump_mce_info(m);
+
+       return NULL;
+}
+
 static noinstr void mce_panic(const char *msg, struct mce *final, char *exp)
 {
        struct llist_node *pending;
        struct mce_evt_llist *l;
        int apei_err = 0;
-       struct page *p;
+       const char *memmsg;
 
        /*
         * Allow instrumentation around external facilities usage. Not that it
@@ -285,6 +294,11 @@ static noinstr void mce_panic(const char *msg, struct mce *final, char *exp)
        }
        if (exp)
                pr_emerg(HW_ERR "Machine check: %s\n", exp);
+
+       memmsg = mce_dump_aux_info(final);
+       if (memmsg)
+               pr_emerg(HW_ERR "Machine check: %s\n", memmsg);
+
        if (!fake_panic) {
                if (panic_timeout == 0)
                        panic_timeout = mca_cfg.panic_timeout;
@@ -297,6 +311,7 @@ static noinstr void mce_panic(const char *msg, struct mce *final, char *exp)
                 */
                if (kexec_crash_loaded()) {
                        if (final && (final->status & MCI_STATUS_ADDRV)) {
+                               struct page *p;
                                p = pfn_to_online_page(final->addr >> PAGE_SHIFT);
                                if (p)
                                        SetPageHWPoison(p);
index a6c1867fc7aa3ea0eb9b24fc1992c90119d2febb..59f4aefc6bc1625fe0a8e3cb43f8f62ad5a89eab 100644 (file)
@@ -779,13 +779,13 @@ static int __init check_dev_quirk(int num, int slot, int func)
        type = read_pci_config_byte(num, slot, func,
                                    PCI_HEADER_TYPE);
 
-       if ((type & 0x7f) == PCI_HEADER_TYPE_BRIDGE) {
+       if ((type & PCI_HEADER_TYPE_MASK) == PCI_HEADER_TYPE_BRIDGE) {
                sec = read_pci_config_byte(num, slot, func, PCI_SECONDARY_BUS);
                if (sec > num)
                        early_pci_scan_bus(sec);
        }
 
-       if (!(type & 0x80))
+       if (!(type & PCI_HEADER_TYPE_MFD))
                return -1;
 
        return 0;
index 558076dbde5bfca582139f8de63bd9ffa1050d6f..247f2225aa9f36f0a0fef0a22ed921b4748a7de5 100644 (file)
@@ -274,12 +274,13 @@ static int __restore_fpregs_from_user(void __user *buf, u64 ufeatures,
  * Attempt to restore the FPU registers directly from user memory.
  * Pagefaults are handled and any errors returned are fatal.
  */
-static bool restore_fpregs_from_user(void __user *buf, u64 xrestore,
-                                    bool fx_only, unsigned int size)
+static bool restore_fpregs_from_user(void __user *buf, u64 xrestore, bool fx_only)
 {
        struct fpu *fpu = &current->thread.fpu;
        int ret;
 
+       /* Restore enabled features only. */
+       xrestore &= fpu->fpstate->user_xfeatures;
 retry:
        fpregs_lock();
        /* Ensure that XFD is up to date */
@@ -309,7 +310,7 @@ retry:
                if (ret != X86_TRAP_PF)
                        return false;
 
-               if (!fault_in_readable(buf, size))
+               if (!fault_in_readable(buf, fpu->fpstate->user_size))
                        goto retry;
                return false;
        }
@@ -339,7 +340,6 @@ static bool __fpu_restore_sig(void __user *buf, void __user *buf_fx,
        struct user_i387_ia32_struct env;
        bool success, fx_only = false;
        union fpregs_state *fpregs;
-       unsigned int state_size;
        u64 user_xfeatures = 0;
 
        if (use_xsave()) {
@@ -349,17 +349,14 @@ static bool __fpu_restore_sig(void __user *buf, void __user *buf_fx,
                        return false;
 
                fx_only = !fx_sw_user.magic1;
-               state_size = fx_sw_user.xstate_size;
                user_xfeatures = fx_sw_user.xfeatures;
        } else {
                user_xfeatures = XFEATURE_MASK_FPSSE;
-               state_size = fpu->fpstate->user_size;
        }
 
        if (likely(!ia32_fxstate)) {
                /* Restore the FPU registers directly from user memory. */
-               return restore_fpregs_from_user(buf_fx, user_xfeatures, fx_only,
-                                               state_size);
+               return restore_fpregs_from_user(buf_fx, user_xfeatures, fx_only);
        }
 
        /*
index 8ff2bf921519dd4da308636c121bb0e16968abe7..a38d0c93a66e825a38987a65fdd09ad97bbae2ec 100644 (file)
@@ -1438,7 +1438,7 @@ irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
        memset(&curr_time, 0, sizeof(struct rtc_time));
 
        if (hpet_rtc_flags & (RTC_UIE | RTC_AIE)) {
-               if (unlikely(mc146818_get_time(&curr_time) < 0)) {
+               if (unlikely(mc146818_get_time(&curr_time, 10) < 0)) {
                        pr_err_ratelimited("unable to read current time from RTC\n");
                        return IRQ_HANDLED;
                }
index dfe9945b9becee7f6d0ca89d00fd3c1eb4e496c5..428ee74002e1eac63d0e269510f536ba2644c3f7 100644 (file)
@@ -434,7 +434,8 @@ static void __init sev_map_percpu_data(void)
 {
        int cpu;
 
-       if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
+       if (cc_vendor != CC_VENDOR_AMD ||
+           !cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
                return;
 
        for_each_possible_cpu(cpu) {
index a95d0900e8c6fddfe1a2c5c1cc98bd3b8ed49f75..5bb395551c441ebc71c1e40ec355eac3f34dac69 100644 (file)
@@ -24,8 +24,8 @@
 
 static int kvmclock __initdata = 1;
 static int kvmclock_vsyscall __initdata = 1;
-static int msr_kvm_system_time __ro_after_init = MSR_KVM_SYSTEM_TIME;
-static int msr_kvm_wall_clock __ro_after_init = MSR_KVM_WALL_CLOCK;
+static int msr_kvm_system_time __ro_after_init;
+static int msr_kvm_wall_clock __ro_after_init;
 static u64 kvm_sched_clock_offset __ro_after_init;
 
 static int __init parse_no_kvmclock(char *arg)
@@ -195,7 +195,8 @@ static void kvm_setup_secondary_clock(void)
 
 void kvmclock_disable(void)
 {
-       native_write_msr(msr_kvm_system_time, 0, 0);
+       if (msr_kvm_system_time)
+               native_write_msr(msr_kvm_system_time, 0, 0);
 }
 
 static void __init kvmclock_init_mem(void)
@@ -294,7 +295,10 @@ void __init kvmclock_init(void)
        if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE2)) {
                msr_kvm_system_time = MSR_KVM_SYSTEM_TIME_NEW;
                msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK_NEW;
-       } else if (!kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) {
+       } else if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) {
+               msr_kvm_system_time = MSR_KVM_SYSTEM_TIME;
+               msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK;
+       } else {
                return;
        }
 
index 1309b9b053386b8470619511d9c435b9d7ab68d0..2e7066980f3e8bf665613ab4fd2f9ccb03ad7627 100644 (file)
@@ -67,7 +67,7 @@ void mach_get_cmos_time(struct timespec64 *now)
                return;
        }
 
-       if (mc146818_get_time(&tm)) {
+       if (mc146818_get_time(&tm, 1000)) {
                pr_err("Unable to read current time from RTC\n");
                now->tv_sec = now->tv_nsec = 0;
                return;
index ec2c21a1844e3e8e128ead89213588ec6828bca9..84201071dfacd186da34cdca12cbda41a39eedf1 100644 (file)
@@ -1031,6 +1031,8 @@ void __init setup_arch(char **cmdline_p)
         *
         * Moreover, on machines with SandyBridge graphics or in setups that use
         * crashkernel the entire 1M is reserved anyway.
+        *
+        * Note the host kernel TDX also requires the first 1MB being reserved.
         */
        x86_platform.realmode_reserve();
 
index 0bab0313003362ef7549bb820bb8c2b6b59259f9..d42c28b8bfd80c15c8b1814f60143cf72b79b541 100644 (file)
 #include <asm/io_apic.h>
 #include <asm/cpu.h>
 
-static DEFINE_PER_CPU(struct x86_cpu, cpu_devices);
-
 #ifdef CONFIG_HOTPLUG_CPU
-int arch_register_cpu(int cpu)
+bool arch_cpu_is_hotpluggable(int cpu)
 {
-       struct x86_cpu *xc = per_cpu_ptr(&cpu_devices, cpu);
-
-       xc->cpu.hotpluggable = cpu > 0;
-       return register_cpu(&xc->cpu, cpu);
-}
-EXPORT_SYMBOL(arch_register_cpu);
-
-void arch_unregister_cpu(int num)
-{
-       unregister_cpu(&per_cpu(cpu_devices, num).cpu);
-}
-EXPORT_SYMBOL(arch_unregister_cpu);
-#else /* CONFIG_HOTPLUG_CPU */
-
-int __init arch_register_cpu(int num)
-{
-       return register_cpu(&per_cpu(cpu_devices, num).cpu, num);
+       return cpu > 0;
 }
 #endif /* CONFIG_HOTPLUG_CPU */
-
-static int __init topology_init(void)
-{
-       int i;
-
-       for_each_present_cpu(i)
-               arch_register_cpu(i);
-
-       return 0;
-}
-subsys_initcall(topology_init);
index b0737a15c470251b1222a52afe28e0d0a82b74d2..c3b2f863acf0f3f28c7402c86de8cbaa47eb930c 100644 (file)
@@ -566,7 +566,7 @@ static bool fixup_iopl_exception(struct pt_regs *regs)
  */
 static bool try_fixup_enqcmd_gp(void)
 {
-#ifdef CONFIG_IOMMU_SVA
+#ifdef CONFIG_ARCH_HAS_CPU_PASID
        u32 pasid;
 
        /*
@@ -592,7 +592,7 @@ static bool try_fixup_enqcmd_gp(void)
        if (!mm_valid_pasid(current->mm))
                return false;
 
-       pasid = current->mm->pasid;
+       pasid = mm_get_enqcmd_pasid(current->mm);
 
        /*
         * Did this thread already have its PASID activated?
index 950c12868d304004ff56e7bc95f7c5f395766a33..87e3da7b0439790dac6b35aa4f95e8e7573284d7 100644 (file)
@@ -23,17 +23,15 @@ config KVM
        depends on HAVE_KVM
        depends on HIGH_RES_TIMERS
        depends on X86_LOCAL_APIC
-       select PREEMPT_NOTIFIERS
-       select MMU_NOTIFIER
+       select KVM_COMMON
+       select KVM_GENERIC_MMU_NOTIFIER
        select HAVE_KVM_IRQCHIP
        select HAVE_KVM_PFNCACHE
-       select HAVE_KVM_IRQFD
        select HAVE_KVM_DIRTY_RING_TSO
        select HAVE_KVM_DIRTY_RING_ACQ_REL
        select IRQ_BYPASS_MANAGER
        select HAVE_KVM_IRQ_BYPASS
        select HAVE_KVM_IRQ_ROUTING
-       select HAVE_KVM_EVENTFD
        select KVM_ASYNC_PF
        select USER_RETURN_NOTIFIER
        select KVM_MMIO
@@ -46,7 +44,6 @@ config KVM
        select KVM_XFER_TO_GUEST_WORK
        select KVM_GENERIC_DIRTYLOG_READ_PROTECT
        select KVM_VFIO
-       select INTERVAL_TREE
        select HAVE_KVM_PM_NOTIFIER if PM
        select KVM_GENERIC_HARDWARE_ENABLING
        help
@@ -65,18 +62,30 @@ config KVM
 
 config KVM_WERROR
        bool "Compile KVM with -Werror"
-       # KASAN may cause the build to fail due to larger frames
-       default y if X86_64 && !KASAN
-       # We use the dependency on !COMPILE_TEST to not be enabled
-       # blindly in allmodconfig or allyesconfig configurations
-       depends on KVM
-       depends on (X86_64 && !KASAN) || !COMPILE_TEST
-       depends on EXPERT
+       # Disallow KVM's -Werror if KASAN is enabled, e.g. to guard against
+       # randomized configs from selecting KVM_WERROR=y, which doesn't play
+       # nice with KASAN.  KASAN builds generates warnings for the default
+       # FRAME_WARN, i.e. KVM_WERROR=y with KASAN=y requires special tuning.
+       # Building KVM with -Werror and KASAN is still doable via enabling
+       # the kernel-wide WERROR=y.
+       depends on KVM && EXPERT && !KASAN
        help
          Add -Werror to the build flags for KVM.
 
          If in doubt, say "N".
 
+config KVM_SW_PROTECTED_VM
+       bool "Enable support for KVM software-protected VMs"
+       depends on EXPERT
+       depends on KVM && X86_64
+       select KVM_GENERIC_PRIVATE_MEM
+       help
+         Enable support for KVM software-protected VMs.  Currently "protected"
+         means the VM can be backed with memory provided by
+         KVM_CREATE_GUEST_MEMFD.
+
+         If unsure, say "N".
+
 config KVM_INTEL
        tristate "KVM for Intel (and compatible) processors support"
        depends on KVM && IA32_FEAT_CTL
@@ -129,6 +138,20 @@ config KVM_SMM
 
          If unsure, say Y.
 
+config KVM_HYPERV
+       bool "Support for Microsoft Hyper-V emulation"
+       depends on KVM
+       default y
+       help
+         Provides KVM support for emulating Microsoft Hyper-V.  This allows KVM
+         to expose a subset of the paravirtualized interfaces defined in the
+         Hyper-V Hypervisor Top-Level Functional Specification (TLFS):
+         https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
+         These interfaces are required for the correct and performant functioning
+         of Windows and Hyper-V guests on KVM.
+
+         If unsure, say "Y".
+
 config KVM_XEN
        bool "Support for Xen hypercall interface"
        depends on KVM
index 80e3fe184d17e64984d2f2d4adba00ec9d5ad57e..475b5fa917a62d03d1a255234833140473b9c737 100644 (file)
@@ -11,25 +11,27 @@ include $(srctree)/virt/kvm/Makefile.kvm
 
 kvm-y                  += x86.o emulate.o i8259.o irq.o lapic.o \
                           i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \
-                          hyperv.o debugfs.o mmu/mmu.o mmu/page_track.o \
+                          debugfs.o mmu/mmu.o mmu/page_track.o \
                           mmu/spte.o
 
-ifdef CONFIG_HYPERV
-kvm-y                  += kvm_onhyperv.o
-endif
-
 kvm-$(CONFIG_X86_64) += mmu/tdp_iter.o mmu/tdp_mmu.o
+kvm-$(CONFIG_KVM_HYPERV) += hyperv.o
 kvm-$(CONFIG_KVM_XEN)  += xen.o
 kvm-$(CONFIG_KVM_SMM)  += smm.o
 
 kvm-intel-y            += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o \
-                          vmx/hyperv.o vmx/nested.o vmx/posted_intr.o
+                          vmx/nested.o vmx/posted_intr.o
+
 kvm-intel-$(CONFIG_X86_SGX_KVM)        += vmx/sgx.o
+kvm-intel-$(CONFIG_KVM_HYPERV) += vmx/hyperv.o vmx/hyperv_evmcs.o
 
 kvm-amd-y              += svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o svm/avic.o \
-                          svm/sev.o svm/hyperv.o
+                          svm/sev.o
+kvm-amd-$(CONFIG_KVM_HYPERV) += svm/hyperv.o
 
 ifdef CONFIG_HYPERV
+kvm-y                  += kvm_onhyperv.o
+kvm-intel-y            += vmx/vmx_onhyperv.o vmx/hyperv_evmcs.o
 kvm-amd-y              += svm/svm_onhyperv.o
 endif
 
index 42d3f47f4c07d20e5698151d70d5baa3faa11156..adba49afb5fe63b1de9345579615284593e00468 100644 (file)
@@ -314,11 +314,15 @@ EXPORT_SYMBOL_GPL(kvm_update_cpuid_runtime);
 
 static bool kvm_cpuid_has_hyperv(struct kvm_cpuid_entry2 *entries, int nent)
 {
+#ifdef CONFIG_KVM_HYPERV
        struct kvm_cpuid_entry2 *entry;
 
        entry = cpuid_entry2_find(entries, nent, HYPERV_CPUID_INTERFACE,
                                  KVM_CPUID_INDEX_NOT_SIGNIFICANT);
        return entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX;
+#else
+       return false;
+#endif
 }
 
 static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
@@ -433,11 +437,13 @@ static int kvm_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2,
                return 0;
        }
 
+#ifdef CONFIG_KVM_HYPERV
        if (kvm_cpuid_has_hyperv(e2, nent)) {
                r = kvm_hv_vcpu_init(vcpu);
                if (r)
                        return r;
        }
+#endif
 
        r = kvm_check_cpuid(vcpu, e2, nent);
        if (r)
@@ -469,7 +475,7 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
                return -E2BIG;
 
        if (cpuid->nent) {
-               e = vmemdup_user(entries, array_size(sizeof(*e), cpuid->nent));
+               e = vmemdup_array_user(entries, cpuid->nent, sizeof(*e));
                if (IS_ERR(e))
                        return PTR_ERR(e);
 
@@ -513,7 +519,7 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
                return -E2BIG;
 
        if (cpuid->nent) {
-               e2 = vmemdup_user(entries, array_size(sizeof(*e2), cpuid->nent));
+               e2 = vmemdup_array_user(entries, cpuid->nent, sizeof(*e2));
                if (IS_ERR(e2))
                        return PTR_ERR(e2);
        }
@@ -671,7 +677,7 @@ void kvm_set_cpu_caps(void)
        kvm_cpu_cap_mask(CPUID_7_1_EAX,
                F(AVX_VNNI) | F(AVX512_BF16) | F(CMPCCXADD) |
                F(FZRM) | F(FSRS) | F(FSRC) |
-               F(AMX_FP16) | F(AVX_IFMA)
+               F(AMX_FP16) | F(AVX_IFMA) | F(LAM)
        );
 
        kvm_cpu_cap_init_kvm_defined(CPUID_7_1_EDX,
@@ -679,6 +685,11 @@ void kvm_set_cpu_caps(void)
                F(AMX_COMPLEX)
        );
 
+       kvm_cpu_cap_init_kvm_defined(CPUID_7_2_EDX,
+               F(INTEL_PSFD) | F(IPRED_CTRL) | F(RRSBA_CTRL) | F(DDPD_U) |
+               F(BHI_CTRL) | F(MCDT_NO)
+       );
+
        kvm_cpu_cap_mask(CPUID_D_1_EAX,
                F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES) | f_xfd
        );
@@ -960,13 +971,13 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
                break;
        /* function 7 has additional index. */
        case 7:
-               entry->eax = min(entry->eax, 1u);
+               max_idx = entry->eax = min(entry->eax, 2u);
                cpuid_entry_override(entry, CPUID_7_0_EBX);
                cpuid_entry_override(entry, CPUID_7_ECX);
                cpuid_entry_override(entry, CPUID_7_EDX);
 
-               /* KVM only supports 0x7.0 and 0x7.1, capped above via min(). */
-               if (entry->eax == 1) {
+               /* KVM only supports up to 0x7.2, capped above via min(). */
+               if (max_idx >= 1) {
                        entry = do_host_cpuid(array, function, 1);
                        if (!entry)
                                goto out;
@@ -976,6 +987,16 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
                        entry->ebx = 0;
                        entry->ecx = 0;
                }
+               if (max_idx >= 2) {
+                       entry = do_host_cpuid(array, function, 2);
+                       if (!entry)
+                               goto out;
+
+                       cpuid_entry_override(entry, CPUID_7_2_EDX);
+                       entry->ecx = 0;
+                       entry->ebx = 0;
+                       entry->eax = 0;
+               }
                break;
        case 0xa: { /* Architectural Performance Monitoring */
                union cpuid10_eax eax;
index 0b90532b6e261430c7997e933f59f5531312d627..856e3037e74f3ffc7fdeb72f2067812080d71910 100644 (file)
@@ -47,11 +47,6 @@ static inline bool kvm_vcpu_is_legal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
        return !(gpa & vcpu->arch.reserved_gpa_bits);
 }
 
-static inline bool kvm_vcpu_is_illegal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
-{
-       return !kvm_vcpu_is_legal_gpa(vcpu, gpa);
-}
-
 static inline bool kvm_vcpu_is_legal_aligned_gpa(struct kvm_vcpu *vcpu,
                                                 gpa_t gpa, gpa_t alignment)
 {
@@ -279,4 +274,12 @@ static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu,
                        vcpu->arch.governed_features.enabled);
 }
 
+static inline bool kvm_vcpu_is_legal_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
+{
+       if (guest_can_use(vcpu, X86_FEATURE_LAM))
+               cr3 &= ~(X86_CR3_LAM_U48 | X86_CR3_LAM_U57);
+
+       return kvm_vcpu_is_legal_gpa(vcpu, cr3);
+}
+
 #endif
index eea6ea7f14af98b76661978eea17ccfea5b2f19d..95ea1a1f7403ea8cd3da1051ac1632d046d8081b 100644 (file)
@@ -111,7 +111,7 @@ static int kvm_mmu_rmaps_stat_show(struct seq_file *m, void *v)
        mutex_lock(&kvm->slots_lock);
        write_lock(&kvm->mmu_lock);
 
-       for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+       for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
                int bkt;
 
                slots = __kvm_memslots(kvm, i);
index 2673cd5c46cb486b9e78a20c589462552f0b4747..e223043ef5b26f23be5b2f0606641f66c5cd18aa 100644 (file)
@@ -687,8 +687,8 @@ static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
                                       struct segmented_address addr,
                                       unsigned *max_size, unsigned size,
-                                      bool write, bool fetch,
-                                      enum x86emul_mode mode, ulong *linear)
+                                      enum x86emul_mode mode, ulong *linear,
+                                      unsigned int flags)
 {
        struct desc_struct desc;
        bool usable;
@@ -701,7 +701,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
        *max_size = 0;
        switch (mode) {
        case X86EMUL_MODE_PROT64:
-               *linear = la;
+               *linear = la = ctxt->ops->get_untagged_addr(ctxt, la, flags);
                va_bits = ctxt_virt_addr_bits(ctxt);
                if (!__is_canonical_address(la, va_bits))
                        goto bad;
@@ -717,11 +717,11 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
                if (!usable)
                        goto bad;
                /* code segment in protected mode or read-only data segment */
-               if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
-                                       || !(desc.type & 2)) && write)
+               if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8)) || !(desc.type & 2)) &&
+                   (flags & X86EMUL_F_WRITE))
                        goto bad;
                /* unreadable code segment */
-               if (!fetch && (desc.type & 8) && !(desc.type & 2))
+               if (!(flags & X86EMUL_F_FETCH) && (desc.type & 8) && !(desc.type & 2))
                        goto bad;
                lim = desc_limit_scaled(&desc);
                if (!(desc.type & 8) && (desc.type & 4)) {
@@ -757,8 +757,8 @@ static int linearize(struct x86_emulate_ctxt *ctxt,
                     ulong *linear)
 {
        unsigned max_size;
-       return __linearize(ctxt, addr, &max_size, size, write, false,
-                          ctxt->mode, linear);
+       return __linearize(ctxt, addr, &max_size, size, ctxt->mode, linear,
+                          write ? X86EMUL_F_WRITE : 0);
 }
 
 static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
@@ -771,7 +771,8 @@ static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
 
        if (ctxt->op_bytes != sizeof(unsigned long))
                addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
-       rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode, &linear);
+       rc = __linearize(ctxt, addr, &max_size, 1, ctxt->mode, &linear,
+                        X86EMUL_F_FETCH);
        if (rc == X86EMUL_CONTINUE)
                ctxt->_eip = addr.ea;
        return rc;
@@ -907,8 +908,8 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
         * boundary check itself.  Instead, we use max_size to check
         * against op_size.
         */
-       rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
-                        &linear);
+       rc = __linearize(ctxt, addr, &max_size, 0, ctxt->mode, &linear,
+                        X86EMUL_F_FETCH);
        if (unlikely(rc != X86EMUL_CONTINUE))
                return rc;
 
@@ -3439,8 +3440,10 @@ static int em_invlpg(struct x86_emulate_ctxt *ctxt)
 {
        int rc;
        ulong linear;
+       unsigned int max_size;
 
-       rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
+       rc = __linearize(ctxt, ctxt->src.addr.mem, &max_size, 1, ctxt->mode,
+                        &linear, X86EMUL_F_INVLPG);
        if (rc == X86EMUL_CONTINUE)
                ctxt->ops->invlpg(ctxt, linear);
        /* Disable writeback. */
index 423a73395c102ca908453016e416dbdfb7fc2b5b..ad463b1ed4e4a87c29aa9d5af3842fbd4e039e41 100644 (file)
@@ -16,6 +16,7 @@ KVM_GOVERNED_X86_FEATURE(PAUSEFILTER)
 KVM_GOVERNED_X86_FEATURE(PFTHRESHOLD)
 KVM_GOVERNED_X86_FEATURE(VGIF)
 KVM_GOVERNED_X86_FEATURE(VNMI)
+KVM_GOVERNED_X86_FEATURE(LAM)
 
 #undef KVM_GOVERNED_X86_FEATURE
 #undef KVM_GOVERNED_FEATURE
index 4943f6b2bbee491651bdacf288e4cdbda2e49dec..8a47f8541eab7098c991837c2b4e03c4822c445a 100644 (file)
@@ -1322,6 +1322,56 @@ static bool hv_check_msr_access(struct kvm_vcpu_hv *hv_vcpu, u32 msr)
        return false;
 }
 
+#define KVM_HV_WIN2016_GUEST_ID 0x1040a00003839
+#define KVM_HV_WIN2016_GUEST_ID_MASK (~GENMASK_ULL(23, 16)) /* mask out the service version */
+
+/*
+ * Hyper-V enabled Windows Server 2016 SMP VMs fail to boot in !XSAVES && XSAVEC
+ * configuration.
+ * Such configuration can result from, for example, AMD Erratum 1386 workaround.
+ *
+ * Print a notice so users aren't left wondering what's suddenly gone wrong.
+ */
+static void __kvm_hv_xsaves_xsavec_maybe_warn(struct kvm_vcpu *vcpu)
+{
+       struct kvm *kvm = vcpu->kvm;
+       struct kvm_hv *hv = to_kvm_hv(kvm);
+
+       /* Check again under the hv_lock.  */
+       if (hv->xsaves_xsavec_checked)
+               return;
+
+       if ((hv->hv_guest_os_id & KVM_HV_WIN2016_GUEST_ID_MASK) !=
+           KVM_HV_WIN2016_GUEST_ID)
+               return;
+
+       hv->xsaves_xsavec_checked = true;
+
+       /* UP configurations aren't affected */
+       if (atomic_read(&kvm->online_vcpus) < 2)
+               return;
+
+       if (guest_cpuid_has(vcpu, X86_FEATURE_XSAVES) ||
+           !guest_cpuid_has(vcpu, X86_FEATURE_XSAVEC))
+               return;
+
+       pr_notice_ratelimited("Booting SMP Windows KVM VM with !XSAVES && XSAVEC. "
+                             "If it fails to boot try disabling XSAVEC in the VM config.\n");
+}
+
+void kvm_hv_xsaves_xsavec_maybe_warn(struct kvm_vcpu *vcpu)
+{
+       struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
+
+       if (!vcpu->arch.hyperv_enabled ||
+           hv->xsaves_xsavec_checked)
+               return;
+
+       mutex_lock(&hv->hv_lock);
+       __kvm_hv_xsaves_xsavec_maybe_warn(vcpu);
+       mutex_unlock(&hv->hv_lock);
+}
+
 static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
                             bool host)
 {
index f83b8db72b118cabc6278195d2f6fb99048af34a..923e64903da9afeeff80f76062c45bd5ab076717 100644 (file)
@@ -24,6 +24,8 @@
 #include <linux/kvm_host.h>
 #include "x86.h"
 
+#ifdef CONFIG_KVM_HYPERV
+
 /* "Hv#1" signature */
 #define HYPERV_CPUID_SIGNATURE_EAX 0x31237648
 
@@ -105,6 +107,17 @@ int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vcpu_id, u32 sint);
 void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector);
 int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages);
 
+static inline bool kvm_hv_synic_has_vector(struct kvm_vcpu *vcpu, int vector)
+{
+       return to_hv_vcpu(vcpu) && test_bit(vector, to_hv_synic(vcpu)->vec_bitmap);
+}
+
+static inline bool kvm_hv_synic_auto_eoi_set(struct kvm_vcpu *vcpu, int vector)
+{
+       return to_hv_vcpu(vcpu) &&
+              test_bit(vector, to_hv_synic(vcpu)->auto_eoi_bitmap);
+}
+
 void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu);
 
 bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu);
@@ -169,6 +182,8 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
                           struct pvclock_vcpu_time_info *hv_clock);
 void kvm_hv_request_tsc_page_update(struct kvm *kvm);
 
+void kvm_hv_xsaves_xsavec_maybe_warn(struct kvm_vcpu *vcpu);
+
 void kvm_hv_init_vm(struct kvm *kvm);
 void kvm_hv_destroy_vm(struct kvm *kvm);
 int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu);
@@ -236,6 +251,77 @@ static inline int kvm_hv_verify_vp_assist(struct kvm_vcpu *vcpu)
        return kvm_hv_get_assist_page(vcpu);
 }
 
+static inline void kvm_hv_nested_transtion_tlb_flush(struct kvm_vcpu *vcpu,
+                                                    bool tdp_enabled)
+{
+       /*
+        * KVM_REQ_HV_TLB_FLUSH flushes entries from either L1's VP_ID or
+        * L2's VP_ID upon request from the guest. Make sure we check for
+        * pending entries in the right FIFO upon L1/L2 transition as these
+        * requests are put by other vCPUs asynchronously.
+        */
+       if (to_hv_vcpu(vcpu) && tdp_enabled)
+               kvm_make_request(KVM_REQ_HV_TLB_FLUSH, vcpu);
+}
+
 int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu);
+#else /* CONFIG_KVM_HYPERV */
+static inline void kvm_hv_setup_tsc_page(struct kvm *kvm,
+                                        struct pvclock_vcpu_time_info *hv_clock) {}
+static inline void kvm_hv_request_tsc_page_update(struct kvm *kvm) {}
+static inline void kvm_hv_xsaves_xsavec_maybe_warn(struct kvm_vcpu *vcpu) {}
+static inline void kvm_hv_init_vm(struct kvm *kvm) {}
+static inline void kvm_hv_destroy_vm(struct kvm *kvm) {}
+static inline int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
+{
+       return 0;
+}
+static inline void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu) {}
+static inline bool kvm_hv_hypercall_enabled(struct kvm_vcpu *vcpu)
+{
+       return false;
+}
+static inline int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
+{
+       return HV_STATUS_ACCESS_DENIED;
+}
+static inline void kvm_hv_vcpu_purge_flush_tlb(struct kvm_vcpu *vcpu) {}
+static inline void kvm_hv_free_pa_page(struct kvm *kvm) {}
+static inline bool kvm_hv_synic_has_vector(struct kvm_vcpu *vcpu, int vector)
+{
+       return false;
+}
+static inline bool kvm_hv_synic_auto_eoi_set(struct kvm_vcpu *vcpu, int vector)
+{
+       return false;
+}
+static inline void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector) {}
+static inline bool kvm_hv_invtsc_suppressed(struct kvm_vcpu *vcpu)
+{
+       return false;
+}
+static inline void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu, bool hyperv_enabled) {}
+static inline bool kvm_hv_has_stimer_pending(struct kvm_vcpu *vcpu)
+{
+       return false;
+}
+static inline bool kvm_hv_is_tlb_flush_hcall(struct kvm_vcpu *vcpu)
+{
+       return false;
+}
+static inline bool guest_hv_cpuid_has_l2_tlb_flush(struct kvm_vcpu *vcpu)
+{
+       return false;
+}
+static inline int kvm_hv_verify_vp_assist(struct kvm_vcpu *vcpu)
+{
+       return 0;
+}
+static inline u32 kvm_hv_get_vpindex(struct kvm_vcpu *vcpu)
+{
+       return vcpu->vcpu_idx;
+}
+static inline void kvm_hv_nested_transtion_tlb_flush(struct kvm_vcpu *vcpu, bool tdp_enabled) {}
+#endif /* CONFIG_KVM_HYPERV */
 
-#endif
+#endif /* __ARCH_X86_KVM_HYPERV_H__ */
index b2c397dd2bc6620c54f045e7d503a326fedf81b4..ad9ca8a60144c773dd2ab7de10af67a436f790cb 100644 (file)
@@ -118,8 +118,10 @@ static int kvm_cpu_get_extint(struct kvm_vcpu *v)
        if (!lapic_in_kernel(v))
                return v->arch.interrupt.nr;
 
+#ifdef CONFIG_KVM_XEN
        if (kvm_xen_has_interrupt(v))
                return v->kvm->arch.xen.upcall_vector;
+#endif
 
        if (irqchip_split(v->kvm)) {
                int vector = v->arch.pending_external_vector;
index 16d076a1b91acc65aed41eed3536bad1744a8a1f..68f3f6c26046936601cb0e0a9383c8946ccad8ce 100644 (file)
@@ -144,7 +144,7 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
        return kvm_irq_delivery_to_apic(kvm, NULL, &irq, NULL);
 }
 
-
+#ifdef CONFIG_KVM_HYPERV
 static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry *e,
                    struct kvm *kvm, int irq_source_id, int level,
                    bool line_status)
@@ -154,6 +154,7 @@ static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry *e,
 
        return kvm_hv_synic_set_irq(kvm, e->hv_sint.vcpu, e->hv_sint.sint);
 }
+#endif
 
 int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
                              struct kvm *kvm, int irq_source_id, int level,
@@ -163,9 +164,11 @@ int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
        int r;
 
        switch (e->type) {
+#ifdef CONFIG_KVM_HYPERV
        case KVM_IRQ_ROUTING_HV_SINT:
                return kvm_hv_set_sint(e, kvm, irq_source_id, level,
                                       line_status);
+#endif
 
        case KVM_IRQ_ROUTING_MSI:
                if (kvm_msi_route_invalid(kvm, e))
@@ -314,11 +317,13 @@ int kvm_set_routing_entry(struct kvm *kvm,
                if (kvm_msi_route_invalid(kvm, e))
                        return -EINVAL;
                break;
+#ifdef CONFIG_KVM_HYPERV
        case KVM_IRQ_ROUTING_HV_SINT:
                e->set = kvm_hv_set_sint;
                e->hv_sint.vcpu = ue->u.hv_sint.vcpu;
                e->hv_sint.sint = ue->u.hv_sint.sint;
                break;
+#endif
 #ifdef CONFIG_KVM_XEN
        case KVM_IRQ_ROUTING_XEN_EVTCHN:
                return kvm_xen_setup_evtchn(kvm, e, ue);
@@ -438,5 +443,7 @@ void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu,
 
 void kvm_arch_irq_routing_update(struct kvm *kvm)
 {
+#ifdef CONFIG_KVM_HYPERV
        kvm_hv_irq_routing_update(kvm);
+#endif
 }
index be7aeb9b8ea3b152b269870e5a737642a492e192..e6d149825169dda3ace396ca979923c4a2d108e8 100644 (file)
@@ -88,6 +88,12 @@ struct x86_instruction_info {
 #define X86EMUL_IO_NEEDED       5 /* IO is needed to complete emulation */
 #define X86EMUL_INTERCEPTED     6 /* Intercepted by nested VMCB/VMCS */
 
+/* x86-specific emulation flags */
+#define X86EMUL_F_WRITE                        BIT(0)
+#define X86EMUL_F_FETCH                        BIT(1)
+#define X86EMUL_F_IMPLICIT             BIT(2)
+#define X86EMUL_F_INVLPG               BIT(3)
+
 struct x86_emulate_ops {
        void (*vm_bugged)(struct x86_emulate_ctxt *ctxt);
        /*
@@ -224,6 +230,9 @@ struct x86_emulate_ops {
        int (*leave_smm)(struct x86_emulate_ctxt *ctxt);
        void (*triple_fault)(struct x86_emulate_ctxt *ctxt);
        int (*set_xcr)(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr);
+
+       gva_t (*get_untagged_addr)(struct x86_emulate_ctxt *ctxt, gva_t addr,
+                                  unsigned int flags);
 };
 
 /* Type, address-of, and value of an instruction's operand. */
index f9ca3e7432b2e8fc983dd61bbc2887e38f4a93a2..eefab3dc8498b79cb083b827ec8c415315aaae14 100644 (file)
 int hv_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, gfn_t nr_pages);
 int hv_flush_remote_tlbs(struct kvm *kvm);
 void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp);
+static inline hpa_t hv_get_partition_assist_page(struct kvm_vcpu *vcpu)
+{
+       /*
+        * Partition assist page is something which Hyper-V running in L0
+        * requires from KVM running in L1 before direct TLB flush for L2
+        * guests can be enabled. KVM doesn't currently use the page but to
+        * comply with TLFS it still needs to be allocated. For now, this
+        * is a single page shared among all vCPUs.
+        */
+       struct hv_partition_assist_pg **p_hv_pa_pg =
+               &vcpu->kvm->arch.hv_pa_pg;
+
+       if (!*p_hv_pa_pg)
+               *p_hv_pa_pg = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT);
+
+       if (!*p_hv_pa_pg)
+               return INVALID_PAGE;
+
+       return __pa(*p_hv_pa_pg);
+}
 #else /* !CONFIG_HYPERV */
 static inline int hv_flush_remote_tlbs(struct kvm *kvm)
 {
index 245b20973caee481055da37cd1ae01a78bbb335b..3242f3da2457671bafde8d5ad7823c9a3d3a07be 100644 (file)
@@ -1475,8 +1475,7 @@ static int apic_set_eoi(struct kvm_lapic *apic)
        apic_clear_isr(vector, apic);
        apic_update_ppr(apic);
 
-       if (to_hv_vcpu(apic->vcpu) &&
-           test_bit(vector, to_hv_synic(apic->vcpu)->vec_bitmap))
+       if (kvm_hv_synic_has_vector(apic->vcpu, vector))
                kvm_hv_synic_send_eoi(apic->vcpu, vector);
 
        kvm_ioapic_send_eoi(apic, vector);
@@ -2905,7 +2904,7 @@ int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
         */
 
        apic_clear_irr(vector, apic);
-       if (to_hv_vcpu(vcpu) && test_bit(vector, to_hv_synic(vcpu)->auto_eoi_bitmap)) {
+       if (kvm_hv_synic_auto_eoi_set(vcpu, vector)) {
                /*
                 * For auto-EOI interrupts, there might be another pending
                 * interrupt above PPR, so check whether to raise another
index bb8c86eefac047de05f25c937fafd66f7e2ebe6a..60f21bb4c27b196d7da820a304771b31b700899f 100644 (file)
@@ -146,6 +146,14 @@ static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu)
        return kvm_get_pcid(vcpu, kvm_read_cr3(vcpu));
 }
 
+static inline unsigned long kvm_get_active_cr3_lam_bits(struct kvm_vcpu *vcpu)
+{
+       if (!guest_can_use(vcpu, X86_FEATURE_LAM))
+               return 0;
+
+       return kvm_read_cr3(vcpu) & (X86_CR3_LAM_U48 | X86_CR3_LAM_U57);
+}
+
 static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
 {
        u64 root_hpa = vcpu->arch.mmu->root.hpa;
index 0b1f991b9a312a5177b2efa75177835b57f115e6..2d6cdeab1f8a3e78306148d44a4665a1d51d8b1e 100644 (file)
@@ -271,15 +271,11 @@ static inline unsigned long kvm_mmu_get_guest_pgd(struct kvm_vcpu *vcpu,
 
 static inline bool kvm_available_flush_remote_tlbs_range(void)
 {
+#if IS_ENABLED(CONFIG_HYPERV)
        return kvm_x86_ops.flush_remote_tlbs_range;
-}
-
-int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages)
-{
-       if (!kvm_x86_ops.flush_remote_tlbs_range)
-               return -EOPNOTSUPP;
-
-       return static_call(kvm_x86_flush_remote_tlbs_range)(kvm, gfn, nr_pages);
+#else
+       return false;
+#endif
 }
 
 static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index);
@@ -795,16 +791,26 @@ static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
        return &slot->arch.lpage_info[level - 2][idx];
 }
 
+/*
+ * The most significant bit in disallow_lpage tracks whether or not memory
+ * attributes are mixed, i.e. not identical for all gfns at the current level.
+ * The lower order bits are used to refcount other cases where a hugepage is
+ * disallowed, e.g. if KVM has shadow a page table at the gfn.
+ */
+#define KVM_LPAGE_MIXED_FLAG   BIT(31)
+
 static void update_gfn_disallow_lpage_count(const struct kvm_memory_slot *slot,
                                            gfn_t gfn, int count)
 {
        struct kvm_lpage_info *linfo;
-       int i;
+       int old, i;
 
        for (i = PG_LEVEL_2M; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
                linfo = lpage_info_slot(gfn, slot, i);
+
+               old = linfo->disallow_lpage;
                linfo->disallow_lpage += count;
-               WARN_ON_ONCE(linfo->disallow_lpage < 0);
+               WARN_ON_ONCE((old ^ linfo->disallow_lpage) & KVM_LPAGE_MIXED_FLAG);
        }
 }
 
@@ -1382,7 +1388,7 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
                gfn_t end = slot->base_gfn + gfn_offset + __fls(mask);
 
                if (READ_ONCE(eager_page_split))
-                       kvm_mmu_try_split_huge_pages(kvm, slot, start, end, PG_LEVEL_4K);
+                       kvm_mmu_try_split_huge_pages(kvm, slot, start, end + 1, PG_LEVEL_4K);
 
                kvm_mmu_slot_gfn_write_protect(kvm, slot, start, PG_LEVEL_2M);
 
@@ -2840,9 +2846,9 @@ int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot,
                        /*
                         * Recheck after taking the spinlock, a different vCPU
                         * may have since marked the page unsync.  A false
-                        * positive on the unprotected check above is not
+                        * negative on the unprotected check above is not
                         * possible as clearing sp->unsync _must_ hold mmu_lock
-                        * for write, i.e. unsync cannot transition from 0->1
+                        * for write, i.e. unsync cannot transition from 1->0
                         * while this CPU holds mmu_lock for read (or write).
                         */
                        if (READ_ONCE(sp->unsync))
@@ -3056,7 +3062,7 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
  *
  * There are several ways to safely use this helper:
  *
- * - Check mmu_invalidate_retry_hva() after grabbing the mapping level, before
+ * - Check mmu_invalidate_retry_gfn() after grabbing the mapping level, before
  *   consuming it.  In this case, mmu_lock doesn't need to be held during the
  *   lookup, but it does need to be held while checking the MMU notifier.
  *
@@ -3137,9 +3143,9 @@ out:
        return level;
 }
 
-int kvm_mmu_max_mapping_level(struct kvm *kvm,
-                             const struct kvm_memory_slot *slot, gfn_t gfn,
-                             int max_level)
+static int __kvm_mmu_max_mapping_level(struct kvm *kvm,
+                                      const struct kvm_memory_slot *slot,
+                                      gfn_t gfn, int max_level, bool is_private)
 {
        struct kvm_lpage_info *linfo;
        int host_level;
@@ -3151,6 +3157,9 @@ int kvm_mmu_max_mapping_level(struct kvm *kvm,
                        break;
        }
 
+       if (is_private)
+               return max_level;
+
        if (max_level == PG_LEVEL_4K)
                return PG_LEVEL_4K;
 
@@ -3158,6 +3167,16 @@ int kvm_mmu_max_mapping_level(struct kvm *kvm,
        return min(host_level, max_level);
 }
 
+int kvm_mmu_max_mapping_level(struct kvm *kvm,
+                             const struct kvm_memory_slot *slot, gfn_t gfn,
+                             int max_level)
+{
+       bool is_private = kvm_slot_can_be_private(slot) &&
+                         kvm_mem_is_private(kvm, gfn);
+
+       return __kvm_mmu_max_mapping_level(kvm, slot, gfn, max_level, is_private);
+}
+
 void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
 {
        struct kvm_memory_slot *slot = fault->slot;
@@ -3178,8 +3197,9 @@ void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
         * Enforce the iTLB multihit workaround after capturing the requested
         * level, which will be used to do precise, accurate accounting.
         */
-       fault->req_level = kvm_mmu_max_mapping_level(vcpu->kvm, slot,
-                                                    fault->gfn, fault->max_level);
+       fault->req_level = __kvm_mmu_max_mapping_level(vcpu->kvm, slot,
+                                                      fault->gfn, fault->max_level,
+                                                      fault->is_private);
        if (fault->req_level == PG_LEVEL_4K || fault->huge_page_disallowed)
                return;
 
@@ -3556,7 +3576,7 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
                return;
 
        if (is_tdp_mmu_page(sp))
-               kvm_tdp_mmu_put_root(kvm, sp, false);
+               kvm_tdp_mmu_put_root(kvm, sp);
        else if (!--sp->root_count && sp->role.invalid)
                kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
 
@@ -3739,7 +3759,7 @@ static int mmu_first_shadow_root_alloc(struct kvm *kvm)
            kvm_page_track_write_tracking_enabled(kvm))
                goto out_success;
 
-       for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+       for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
                slots = __kvm_memslots(kvm, i);
                kvm_for_each_memslot(slot, bkt, slots) {
                        /*
@@ -3782,7 +3802,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
        hpa_t root;
 
        root_pgd = kvm_mmu_get_guest_pgd(vcpu, mmu);
-       root_gfn = root_pgd >> PAGE_SHIFT;
+       root_gfn = (root_pgd & __PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
 
        if (!kvm_vcpu_is_visible_gfn(vcpu, root_gfn)) {
                mmu->root.hpa = kvm_mmu_get_dummy_root();
@@ -4259,6 +4279,55 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
        kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true, NULL);
 }
 
+static inline u8 kvm_max_level_for_order(int order)
+{
+       BUILD_BUG_ON(KVM_MAX_HUGEPAGE_LEVEL > PG_LEVEL_1G);
+
+       KVM_MMU_WARN_ON(order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G) &&
+                       order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M) &&
+                       order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_4K));
+
+       if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G))
+               return PG_LEVEL_1G;
+
+       if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M))
+               return PG_LEVEL_2M;
+
+       return PG_LEVEL_4K;
+}
+
+static void kvm_mmu_prepare_memory_fault_exit(struct kvm_vcpu *vcpu,
+                                             struct kvm_page_fault *fault)
+{
+       kvm_prepare_memory_fault_exit(vcpu, fault->gfn << PAGE_SHIFT,
+                                     PAGE_SIZE, fault->write, fault->exec,
+                                     fault->is_private);
+}
+
+static int kvm_faultin_pfn_private(struct kvm_vcpu *vcpu,
+                                  struct kvm_page_fault *fault)
+{
+       int max_order, r;
+
+       if (!kvm_slot_can_be_private(fault->slot)) {
+               kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
+               return -EFAULT;
+       }
+
+       r = kvm_gmem_get_pfn(vcpu->kvm, fault->slot, fault->gfn, &fault->pfn,
+                            &max_order);
+       if (r) {
+               kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
+               return r;
+       }
+
+       fault->max_level = min(kvm_max_level_for_order(max_order),
+                              fault->max_level);
+       fault->map_writable = !(fault->slot->flags & KVM_MEM_READONLY);
+
+       return RET_PF_CONTINUE;
+}
+
 static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
 {
        struct kvm_memory_slot *slot = fault->slot;
@@ -4291,6 +4360,14 @@ static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
                        return RET_PF_EMULATE;
        }
 
+       if (fault->is_private != kvm_mem_is_private(vcpu->kvm, fault->gfn)) {
+               kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
+               return -EFAULT;
+       }
+
+       if (fault->is_private)
+               return kvm_faultin_pfn_private(vcpu, fault);
+
        async = false;
        fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, false, &async,
                                          fault->write, &fault->map_writable,
@@ -4366,7 +4443,7 @@ static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
                return true;
 
        return fault->slot &&
-              mmu_invalidate_retry_hva(vcpu->kvm, fault->mmu_seq, fault->hva);
+              mmu_invalidate_retry_gfn(vcpu->kvm, fault->mmu_seq, fault->gfn);
 }
 
 static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
@@ -6228,7 +6305,7 @@ static bool kvm_rmap_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_e
        if (!kvm_memslots_have_rmaps(kvm))
                return flush;
 
-       for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+       for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
                slots = __kvm_memslots(kvm, i);
 
                kvm_for_each_memslot_in_gfn_range(&iter, slots, gfn_start, gfn_end) {
@@ -6260,7 +6337,9 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
 
        write_lock(&kvm->mmu_lock);
 
-       kvm_mmu_invalidate_begin(kvm, 0, -1ul);
+       kvm_mmu_invalidate_begin(kvm);
+
+       kvm_mmu_invalidate_range_add(kvm, gfn_start, gfn_end);
 
        flush = kvm_rmap_zap_gfn_range(kvm, gfn_start, gfn_end);
 
@@ -6270,7 +6349,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
        if (flush)
                kvm_flush_remote_tlbs_range(kvm, gfn_start, gfn_end - gfn_start);
 
-       kvm_mmu_invalidate_end(kvm, 0, -1ul);
+       kvm_mmu_invalidate_end(kvm);
 
        write_unlock(&kvm->mmu_lock);
 }
@@ -6723,7 +6802,7 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
         * modifier prior to checking for a wrap of the MMIO generation so
         * that a wrap in any address space is detected.
         */
-       gen &= ~((u64)KVM_ADDRESS_SPACE_NUM - 1);
+       gen &= ~((u64)kvm_arch_nr_memslot_as_ids(kvm) - 1);
 
        /*
         * The very rare case: if the MMIO generation number has wrapped,
@@ -7176,3 +7255,163 @@ void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
        if (kvm->arch.nx_huge_page_recovery_thread)
                kthread_stop(kvm->arch.nx_huge_page_recovery_thread);
 }
+
+#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
+                                       struct kvm_gfn_range *range)
+{
+       /*
+        * Zap SPTEs even if the slot can't be mapped PRIVATE.  KVM x86 only
+        * supports KVM_MEMORY_ATTRIBUTE_PRIVATE, and so it *seems* like KVM
+        * can simply ignore such slots.  But if userspace is making memory
+        * PRIVATE, then KVM must prevent the guest from accessing the memory
+        * as shared.  And if userspace is making memory SHARED and this point
+        * is reached, then at least one page within the range was previously
+        * PRIVATE, i.e. the slot's possible hugepage ranges are changing.
+        * Zapping SPTEs in this case ensures KVM will reassess whether or not
+        * a hugepage can be used for affected ranges.
+        */
+       if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm)))
+               return false;
+
+       return kvm_unmap_gfn_range(kvm, range);
+}
+
+static bool hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
+                               int level)
+{
+       return lpage_info_slot(gfn, slot, level)->disallow_lpage & KVM_LPAGE_MIXED_FLAG;
+}
+
+static void hugepage_clear_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
+                                int level)
+{
+       lpage_info_slot(gfn, slot, level)->disallow_lpage &= ~KVM_LPAGE_MIXED_FLAG;
+}
+
+static void hugepage_set_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
+                              int level)
+{
+       lpage_info_slot(gfn, slot, level)->disallow_lpage |= KVM_LPAGE_MIXED_FLAG;
+}
+
+static bool hugepage_has_attrs(struct kvm *kvm, struct kvm_memory_slot *slot,
+                              gfn_t gfn, int level, unsigned long attrs)
+{
+       const unsigned long start = gfn;
+       const unsigned long end = start + KVM_PAGES_PER_HPAGE(level);
+
+       if (level == PG_LEVEL_2M)
+               return kvm_range_has_memory_attributes(kvm, start, end, attrs);
+
+       for (gfn = start; gfn < end; gfn += KVM_PAGES_PER_HPAGE(level - 1)) {
+               if (hugepage_test_mixed(slot, gfn, level - 1) ||
+                   attrs != kvm_get_memory_attributes(kvm, gfn))
+                       return false;
+       }
+       return true;
+}
+
+bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
+                                        struct kvm_gfn_range *range)
+{
+       unsigned long attrs = range->arg.attributes;
+       struct kvm_memory_slot *slot = range->slot;
+       int level;
+
+       lockdep_assert_held_write(&kvm->mmu_lock);
+       lockdep_assert_held(&kvm->slots_lock);
+
+       /*
+        * Calculate which ranges can be mapped with hugepages even if the slot
+        * can't map memory PRIVATE.  KVM mustn't create a SHARED hugepage over
+        * a range that has PRIVATE GFNs, and conversely converting a range to
+        * SHARED may now allow hugepages.
+        */
+       if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm)))
+               return false;
+
+       /*
+        * The sequence matters here: upper levels consume the result of lower
+        * level's scanning.
+        */
+       for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
+               gfn_t nr_pages = KVM_PAGES_PER_HPAGE(level);
+               gfn_t gfn = gfn_round_for_level(range->start, level);
+
+               /* Process the head page if it straddles the range. */
+               if (gfn != range->start || gfn + nr_pages > range->end) {
+                       /*
+                        * Skip mixed tracking if the aligned gfn isn't covered
+                        * by the memslot, KVM can't use a hugepage due to the
+                        * misaligned address regardless of memory attributes.
+                        */
+                       if (gfn >= slot->base_gfn) {
+                               if (hugepage_has_attrs(kvm, slot, gfn, level, attrs))
+                                       hugepage_clear_mixed(slot, gfn, level);
+                               else
+                                       hugepage_set_mixed(slot, gfn, level);
+                       }
+                       gfn += nr_pages;
+               }
+
+               /*
+                * Pages entirely covered by the range are guaranteed to have
+                * only the attributes which were just set.
+                */
+               for ( ; gfn + nr_pages <= range->end; gfn += nr_pages)
+                       hugepage_clear_mixed(slot, gfn, level);
+
+               /*
+                * Process the last tail page if it straddles the range and is
+                * contained by the memslot.  Like the head page, KVM can't
+                * create a hugepage if the slot size is misaligned.
+                */
+               if (gfn < range->end &&
+                   (gfn + nr_pages) <= (slot->base_gfn + slot->npages)) {
+                       if (hugepage_has_attrs(kvm, slot, gfn, level, attrs))
+                               hugepage_clear_mixed(slot, gfn, level);
+                       else
+                               hugepage_set_mixed(slot, gfn, level);
+               }
+       }
+       return false;
+}
+
+void kvm_mmu_init_memslot_memory_attributes(struct kvm *kvm,
+                                           struct kvm_memory_slot *slot)
+{
+       int level;
+
+       if (!kvm_arch_has_private_mem(kvm))
+               return;
+
+       for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
+               /*
+                * Don't bother tracking mixed attributes for pages that can't
+                * be huge due to alignment, i.e. process only pages that are
+                * entirely contained by the memslot.
+                */
+               gfn_t end = gfn_round_for_level(slot->base_gfn + slot->npages, level);
+               gfn_t start = gfn_round_for_level(slot->base_gfn, level);
+               gfn_t nr_pages = KVM_PAGES_PER_HPAGE(level);
+               gfn_t gfn;
+
+               if (start < slot->base_gfn)
+                       start += nr_pages;
+
+               /*
+                * Unlike setting attributes, every potential hugepage needs to
+                * be manually checked as the attributes may already be mixed.
+                */
+               for (gfn = start; gfn < end; gfn += nr_pages) {
+                       unsigned long attrs = kvm_get_memory_attributes(kvm, gfn);
+
+                       if (hugepage_has_attrs(kvm, slot, gfn, level, attrs))
+                               hugepage_clear_mixed(slot, gfn, level);
+                       else
+                               hugepage_set_mixed(slot, gfn, level);
+               }
+       }
+}
+#endif
index decc1f1536694f31529f3d8c2c7cf67d1185a8c0..0669a8a668cacd4d0be68affbecbb686524c5213 100644 (file)
@@ -13,6 +13,7 @@
 #endif
 
 /* Page table builder macros common to shadow (host) PTEs and guest PTEs. */
+#define __PT_BASE_ADDR_MASK GENMASK_ULL(51, 12)
 #define __PT_LEVEL_SHIFT(level, bits_per_level)        \
        (PAGE_SHIFT + ((level) - 1) * (bits_per_level))
 #define __PT_INDEX(address, level, bits_per_level) \
@@ -201,6 +202,7 @@ struct kvm_page_fault {
 
        /* Derived from mmu and global state.  */
        const bool is_tdp;
+       const bool is_private;
        const bool nx_huge_page_workaround_enabled;
 
        /*
@@ -296,6 +298,7 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
                .max_level = KVM_MAX_HUGEPAGE_LEVEL,
                .req_level = PG_LEVEL_4K,
                .goal_level = PG_LEVEL_4K,
+               .is_private = kvm_mem_is_private(vcpu->kvm, cr2_or_gpa >> PAGE_SHIFT),
        };
        int r;
 
index c85255073f67231f0d25417232c99c9eeceb8f7b..4d4e98fe4f3548baf9156f3a9e0fd67003df7fdf 100644 (file)
@@ -62,7 +62,7 @@
 #endif
 
 /* Common logic, but per-type values.  These also need to be undefined. */
-#define PT_BASE_ADDR_MASK      ((pt_element_t)(((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1)))
+#define PT_BASE_ADDR_MASK      ((pt_element_t)__PT_BASE_ADDR_MASK)
 #define PT_LVL_ADDR_MASK(lvl)  __PT_LVL_ADDR_MASK(PT_BASE_ADDR_MASK, lvl, PT_LEVEL_BITS)
 #define PT_LVL_OFFSET_MASK(lvl)        __PT_LVL_OFFSET_MASK(PT_BASE_ADDR_MASK, lvl, PT_LEVEL_BITS)
 #define PT_INDEX(addr, lvl)    __PT_INDEX(addr, lvl, PT_LEVEL_BITS)
index 6cd4dd631a2fac815be473c62333c940a074c925..6ae19b4ee5b1cb17d4ddda85197379cde425b03e 100644 (file)
@@ -73,11 +73,8 @@ static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
        tdp_mmu_free_sp(sp);
 }
 
-void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
-                         bool shared)
+void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
 {
-       kvm_lockdep_assert_mmu_lock_held(kvm, shared);
-
        if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
                return;
 
@@ -106,10 +103,16 @@ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
  */
 static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
                                              struct kvm_mmu_page *prev_root,
-                                             bool shared, bool only_valid)
+                                             bool only_valid)
 {
        struct kvm_mmu_page *next_root;
 
+       /*
+        * While the roots themselves are RCU-protected, fields such as
+        * role.invalid are protected by mmu_lock.
+        */
+       lockdep_assert_held(&kvm->mmu_lock);
+
        rcu_read_lock();
 
        if (prev_root)
@@ -132,7 +135,7 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
        rcu_read_unlock();
 
        if (prev_root)
-               kvm_tdp_mmu_put_root(kvm, prev_root, shared);
+               kvm_tdp_mmu_put_root(kvm, prev_root);
 
        return next_root;
 }
@@ -144,26 +147,22 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
  * recent root. (Unless keeping a live reference is desirable.)
  *
  * If shared is set, this function is operating under the MMU lock in read
- * mode. In the unlikely event that this thread must free a root, the lock
- * will be temporarily dropped and reacquired in write mode.
+ * mode.
  */
-#define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, _only_valid)\
-       for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, _only_valid);       \
-            _root;                                                             \
-            _root = tdp_mmu_next_root(_kvm, _root, _shared, _only_valid))      \
-               if (kvm_lockdep_assert_mmu_lock_held(_kvm, _shared) &&          \
-                   kvm_mmu_page_as_id(_root) != _as_id) {                      \
+#define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _only_valid)\
+       for (_root = tdp_mmu_next_root(_kvm, NULL, _only_valid);        \
+            ({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root;      \
+            _root = tdp_mmu_next_root(_kvm, _root, _only_valid))       \
+               if (kvm_mmu_page_as_id(_root) != _as_id) {              \
                } else
 
-#define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared)   \
-       __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true)
+#define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id   \
+       __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, true)
 
-#define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _shared)                 \
-       for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, false);             \
-            _root;                                                             \
-            _root = tdp_mmu_next_root(_kvm, _root, _shared, false))            \
-               if (!kvm_lockdep_assert_mmu_lock_held(_kvm, _shared)) {         \
-               } else
+#define for_each_tdp_mmu_root_yield_safe(_kvm, _root)                  \
+       for (_root = tdp_mmu_next_root(_kvm, NULL, false);              \
+            ({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root;      \
+            _root = tdp_mmu_next_root(_kvm, _root, false))
 
 /*
  * Iterate over all TDP MMU roots.  Requires that mmu_lock be held for write,
@@ -276,28 +275,18 @@ static void tdp_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
  *
  * @kvm: kvm instance
  * @sp: the page to be removed
- * @shared: This operation may not be running under the exclusive use of
- *         the MMU lock and the operation must synchronize with other
- *         threads that might be adding or removing pages.
  */
-static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp,
-                             bool shared)
+static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
 {
        tdp_unaccount_mmu_page(kvm, sp);
 
        if (!sp->nx_huge_page_disallowed)
                return;
 
-       if (shared)
-               spin_lock(&kvm->arch.tdp_mmu_pages_lock);
-       else
-               lockdep_assert_held_write(&kvm->mmu_lock);
-
+       spin_lock(&kvm->arch.tdp_mmu_pages_lock);
        sp->nx_huge_page_disallowed = false;
        untrack_possible_nx_huge_page(kvm, sp);
-
-       if (shared)
-               spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
+       spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
 }
 
 /**
@@ -326,7 +315,7 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
 
        trace_kvm_mmu_prepare_zap_page(sp);
 
-       tdp_mmu_unlink_sp(kvm, sp, shared);
+       tdp_mmu_unlink_sp(kvm, sp);
 
        for (i = 0; i < SPTE_ENT_PER_PAGE; i++) {
                tdp_ptep_t sptep = pt + i;
@@ -832,7 +821,8 @@ bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush)
 {
        struct kvm_mmu_page *root;
 
-       for_each_tdp_mmu_root_yield_safe(kvm, root, false)
+       lockdep_assert_held_write(&kvm->mmu_lock);
+       for_each_tdp_mmu_root_yield_safe(kvm, root)
                flush = tdp_mmu_zap_leafs(kvm, root, start, end, true, flush);
 
        return flush;
@@ -854,7 +844,8 @@ void kvm_tdp_mmu_zap_all(struct kvm *kvm)
         * is being destroyed or the userspace VMM has exited.  In both cases,
         * KVM_RUN is unreachable, i.e. no vCPUs will ever service the request.
         */
-       for_each_tdp_mmu_root_yield_safe(kvm, root, false)
+       lockdep_assert_held_write(&kvm->mmu_lock);
+       for_each_tdp_mmu_root_yield_safe(kvm, root)
                tdp_mmu_zap_root(kvm, root, false);
 }
 
@@ -868,7 +859,7 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
 
        read_lock(&kvm->mmu_lock);
 
-       for_each_tdp_mmu_root_yield_safe(kvm, root, true) {
+       for_each_tdp_mmu_root_yield_safe(kvm, root) {
                if (!root->tdp_mmu_scheduled_root_to_zap)
                        continue;
 
@@ -891,7 +882,7 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
                 * the root must be reachable by mmu_notifiers while it's being
                 * zapped
                 */
-               kvm_tdp_mmu_put_root(kvm, root, true);
+               kvm_tdp_mmu_put_root(kvm, root);
        }
 
        read_unlock(&kvm->mmu_lock);
@@ -1125,7 +1116,7 @@ bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
 {
        struct kvm_mmu_page *root;
 
-       __for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false, false)
+       __for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false)
                flush = tdp_mmu_zap_leafs(kvm, root, range->start, range->end,
                                          range->may_block, flush);
 
@@ -1314,7 +1305,7 @@ bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
 
        lockdep_assert_held_read(&kvm->mmu_lock);
 
-       for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
+       for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
                spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
                             slot->base_gfn + slot->npages, min_level);
 
@@ -1346,6 +1337,8 @@ static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm,
 {
        struct kvm_mmu_page *sp;
 
+       kvm_lockdep_assert_mmu_lock_held(kvm, shared);
+
        /*
         * Since we are allocating while under the MMU lock we have to be
         * careful about GFP flags. Use GFP_NOWAIT to avoid blocking on direct
@@ -1496,11 +1489,10 @@ void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
        int r = 0;
 
        kvm_lockdep_assert_mmu_lock_held(kvm, shared);
-
-       for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, shared) {
+       for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id) {
                r = tdp_mmu_split_huge_pages_root(kvm, root, start, end, target_level, shared);
                if (r) {
-                       kvm_tdp_mmu_put_root(kvm, root, shared);
+                       kvm_tdp_mmu_put_root(kvm, root);
                        break;
                }
        }
@@ -1522,12 +1514,13 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
 
        rcu_read_lock();
 
-       tdp_root_for_each_leaf_pte(iter, root, start, end) {
+       tdp_root_for_each_pte(iter, root, start, end) {
 retry:
-               if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
+               if (!is_shadow_present_pte(iter.old_spte) ||
+                   !is_last_spte(iter.old_spte, iter.level))
                        continue;
 
-               if (!is_shadow_present_pte(iter.old_spte))
+               if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
                        continue;
 
                KVM_MMU_WARN_ON(kvm_ad_enabled() &&
@@ -1560,8 +1553,7 @@ bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
        bool spte_set = false;
 
        lockdep_assert_held_read(&kvm->mmu_lock);
-
-       for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
+       for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
                spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
                                slot->base_gfn + slot->npages);
 
@@ -1695,8 +1687,7 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
        struct kvm_mmu_page *root;
 
        lockdep_assert_held_read(&kvm->mmu_lock);
-
-       for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
+       for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
                zap_collapsible_spte_range(kvm, root, slot);
 }
 
index 733a3aef3a96eaa32964e8fc042c26fae0a0ee9c..20d97aa46c490fff98f9d3a6cbc116935d71a726 100644 (file)
@@ -17,8 +17,7 @@ __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root)
        return refcount_inc_not_zero(&root->tdp_mmu_root_count);
 }
 
-void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
-                         bool shared);
+void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root);
 
 bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush);
 bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp);
index 9ae07db6f0f6481e060c260834050e5c5abfe14a..87cc6c8809ad88898894bd0ea6199ab70e2a91ac 100644 (file)
@@ -127,9 +127,9 @@ static void kvm_perf_overflow(struct perf_event *perf_event,
        struct kvm_pmc *pmc = perf_event->overflow_handler_context;
 
        /*
-        * Ignore overflow events for counters that are scheduled to be
-        * reprogrammed, e.g. if a PMI for the previous event races with KVM's
-        * handling of a related guest WRMSR.
+        * Ignore asynchronous overflow events for counters that are scheduled
+        * to be reprogrammed, e.g. if a PMI for the previous event races with
+        * KVM's handling of a related guest WRMSR.
         */
        if (test_and_set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi))
                return;
@@ -161,6 +161,15 @@ static u64 pmc_get_pebs_precise_level(struct kvm_pmc *pmc)
        return 1;
 }
 
+static u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
+{
+       u64 sample_period = (-counter_value) & pmc_bitmask(pmc);
+
+       if (!sample_period)
+               sample_period = pmc_bitmask(pmc) + 1;
+       return sample_period;
+}
+
 static int pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, u64 config,
                                 bool exclude_user, bool exclude_kernel,
                                 bool intr)
@@ -215,17 +224,30 @@ static int pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, u64 config,
        return 0;
 }
 
-static void pmc_pause_counter(struct kvm_pmc *pmc)
+static bool pmc_pause_counter(struct kvm_pmc *pmc)
 {
        u64 counter = pmc->counter;
-
-       if (!pmc->perf_event || pmc->is_paused)
-               return;
+       u64 prev_counter;
 
        /* update counter, reset event value to avoid redundant accumulation */
-       counter += perf_event_pause(pmc->perf_event, true);
+       if (pmc->perf_event && !pmc->is_paused)
+               counter += perf_event_pause(pmc->perf_event, true);
+
+       /*
+        * Snapshot the previous counter *after* accumulating state from perf.
+        * If overflow already happened, hardware (via perf) is responsible for
+        * generating a PMI.  KVM just needs to detect overflow on emulated
+        * counter events that haven't yet been processed.
+        */
+       prev_counter = counter & pmc_bitmask(pmc);
+
+       counter += pmc->emulated_counter;
        pmc->counter = counter & pmc_bitmask(pmc);
+
+       pmc->emulated_counter = 0;
        pmc->is_paused = true;
+
+       return pmc->counter < prev_counter;
 }
 
 static bool pmc_resume_counter(struct kvm_pmc *pmc)
@@ -250,6 +272,51 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc)
        return true;
 }
 
+static void pmc_release_perf_event(struct kvm_pmc *pmc)
+{
+       if (pmc->perf_event) {
+               perf_event_release_kernel(pmc->perf_event);
+               pmc->perf_event = NULL;
+               pmc->current_config = 0;
+               pmc_to_pmu(pmc)->event_count--;
+       }
+}
+
+static void pmc_stop_counter(struct kvm_pmc *pmc)
+{
+       if (pmc->perf_event) {
+               pmc->counter = pmc_read_counter(pmc);
+               pmc_release_perf_event(pmc);
+       }
+}
+
+static void pmc_update_sample_period(struct kvm_pmc *pmc)
+{
+       if (!pmc->perf_event || pmc->is_paused ||
+           !is_sampling_event(pmc->perf_event))
+               return;
+
+       perf_event_period(pmc->perf_event,
+                         get_sample_period(pmc, pmc->counter));
+}
+
+void pmc_write_counter(struct kvm_pmc *pmc, u64 val)
+{
+       /*
+        * Drop any unconsumed accumulated counts, the WRMSR is a write, not a
+        * read-modify-write.  Adjust the counter value so that its value is
+        * relative to the current count, as reading the current count from
+        * perf is faster than pausing and repgrogramming the event in order to
+        * reset it to '0'.  Note, this very sneakily offsets the accumulated
+        * emulated count too, by using pmc_read_counter()!
+        */
+       pmc->emulated_counter = 0;
+       pmc->counter += val - pmc_read_counter(pmc);
+       pmc->counter &= pmc_bitmask(pmc);
+       pmc_update_sample_period(pmc);
+}
+EXPORT_SYMBOL_GPL(pmc_write_counter);
+
 static int filter_cmp(const void *pa, const void *pb, u64 mask)
 {
        u64 a = *(u64 *)pa & mask;
@@ -383,14 +450,15 @@ static void reprogram_counter(struct kvm_pmc *pmc)
        struct kvm_pmu *pmu = pmc_to_pmu(pmc);
        u64 eventsel = pmc->eventsel;
        u64 new_config = eventsel;
+       bool emulate_overflow;
        u8 fixed_ctr_ctrl;
 
-       pmc_pause_counter(pmc);
+       emulate_overflow = pmc_pause_counter(pmc);
 
        if (!pmc_event_is_allowed(pmc))
                goto reprogram_complete;
 
-       if (pmc->counter < pmc->prev_counter)
+       if (emulate_overflow)
                __kvm_perf_overflow(pmc, false);
 
        if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
@@ -430,7 +498,6 @@ static void reprogram_counter(struct kvm_pmc *pmc)
 
 reprogram_complete:
        clear_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->reprogram_pmi);
-       pmc->prev_counter = 0;
 }
 
 void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
@@ -639,32 +706,60 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        return 0;
 }
 
-/* refresh PMU settings. This function generally is called when underlying
- * settings are changed (such as changes of PMU CPUID by guest VMs), which
- * should rarely happen.
+static void kvm_pmu_reset(struct kvm_vcpu *vcpu)
+{
+       struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+       struct kvm_pmc *pmc;
+       int i;
+
+       pmu->need_cleanup = false;
+
+       bitmap_zero(pmu->reprogram_pmi, X86_PMC_IDX_MAX);
+
+       for_each_set_bit(i, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX) {
+               pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i);
+               if (!pmc)
+                       continue;
+
+               pmc_stop_counter(pmc);
+               pmc->counter = 0;
+               pmc->emulated_counter = 0;
+
+               if (pmc_is_gp(pmc))
+                       pmc->eventsel = 0;
+       }
+
+       pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = 0;
+
+       static_call_cond(kvm_x86_pmu_reset)(vcpu);
+}
+
+
+/*
+ * Refresh the PMU configuration for the vCPU, e.g. if userspace changes CPUID
+ * and/or PERF_CAPABILITIES.
  */
 void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
 {
        if (KVM_BUG_ON(kvm_vcpu_has_run(vcpu), vcpu->kvm))
                return;
 
+       /*
+        * Stop/release all existing counters/events before realizing the new
+        * vPMU model.
+        */
+       kvm_pmu_reset(vcpu);
+
        bitmap_zero(vcpu_to_pmu(vcpu)->all_valid_pmc_idx, X86_PMC_IDX_MAX);
        static_call(kvm_x86_pmu_refresh)(vcpu);
 }
 
-void kvm_pmu_reset(struct kvm_vcpu *vcpu)
-{
-       static_call(kvm_x86_pmu_reset)(vcpu);
-}
-
 void kvm_pmu_init(struct kvm_vcpu *vcpu)
 {
        struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
 
        memset(pmu, 0, sizeof(*pmu));
        static_call(kvm_x86_pmu_init)(vcpu);
-       pmu->event_count = 0;
-       pmu->need_cleanup = false;
        kvm_pmu_refresh(vcpu);
 }
 
@@ -700,8 +795,7 @@ void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
 
 static void kvm_pmu_incr_counter(struct kvm_pmc *pmc)
 {
-       pmc->prev_counter = pmc->counter;
-       pmc->counter = (pmc->counter + 1) & pmc_bitmask(pmc);
+       pmc->emulated_counter++;
        kvm_pmu_request_counter_reprogram(pmc);
 }
 
index 1d64113de4883ec77d4aa8c7c83f27a8c01a667d..7caeb3d8d4fd1739bba12b0d133185fda8a041df 100644 (file)
@@ -66,7 +66,8 @@ static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
 {
        u64 counter, enabled, running;
 
-       counter = pmc->counter;
+       counter = pmc->counter + pmc->emulated_counter;
+
        if (pmc->perf_event && !pmc->is_paused)
                counter += perf_event_read_value(pmc->perf_event,
                                                 &enabled, &running);
@@ -74,29 +75,7 @@ static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
        return counter & pmc_bitmask(pmc);
 }
 
-static inline void pmc_write_counter(struct kvm_pmc *pmc, u64 val)
-{
-       pmc->counter += val - pmc_read_counter(pmc);
-       pmc->counter &= pmc_bitmask(pmc);
-}
-
-static inline void pmc_release_perf_event(struct kvm_pmc *pmc)
-{
-       if (pmc->perf_event) {
-               perf_event_release_kernel(pmc->perf_event);
-               pmc->perf_event = NULL;
-               pmc->current_config = 0;
-               pmc_to_pmu(pmc)->event_count--;
-       }
-}
-
-static inline void pmc_stop_counter(struct kvm_pmc *pmc)
-{
-       if (pmc->perf_event) {
-               pmc->counter = pmc_read_counter(pmc);
-               pmc_release_perf_event(pmc);
-       }
-}
+void pmc_write_counter(struct kvm_pmc *pmc, u64 val);
 
 static inline bool pmc_is_gp(struct kvm_pmc *pmc)
 {
@@ -146,25 +125,6 @@ static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
        return NULL;
 }
 
-static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
-{
-       u64 sample_period = (-counter_value) & pmc_bitmask(pmc);
-
-       if (!sample_period)
-               sample_period = pmc_bitmask(pmc) + 1;
-       return sample_period;
-}
-
-static inline void pmc_update_sample_period(struct kvm_pmc *pmc)
-{
-       if (!pmc->perf_event || pmc->is_paused ||
-           !is_sampling_event(pmc->perf_event))
-               return;
-
-       perf_event_period(pmc->perf_event,
-                         get_sample_period(pmc, pmc->counter));
-}
-
 static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
 {
        struct kvm_pmu *pmu = pmc_to_pmu(pmc);
@@ -261,7 +221,6 @@ bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr);
 int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
 int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
 void kvm_pmu_refresh(struct kvm_vcpu *vcpu);
-void kvm_pmu_reset(struct kvm_vcpu *vcpu);
 void kvm_pmu_init(struct kvm_vcpu *vcpu);
 void kvm_pmu_cleanup(struct kvm_vcpu *vcpu);
 void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
index b816506783755a1ce87d511014c9903fe3e25798..aadefcaa9561d0a31e589784da7e871e4a0de2e0 100644 (file)
@@ -16,6 +16,7 @@ enum kvm_only_cpuid_leafs {
        CPUID_7_1_EDX,
        CPUID_8000_0007_EDX,
        CPUID_8000_0022_EAX,
+       CPUID_7_2_EDX,
        NR_KVM_CPU_CAPS,
 
        NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS,
@@ -46,6 +47,14 @@ enum kvm_only_cpuid_leafs {
 #define X86_FEATURE_AMX_COMPLEX         KVM_X86_FEATURE(CPUID_7_1_EDX, 8)
 #define X86_FEATURE_PREFETCHITI         KVM_X86_FEATURE(CPUID_7_1_EDX, 14)
 
+/* Intel-defined sub-features, CPUID level 0x00000007:2 (EDX) */
+#define X86_FEATURE_INTEL_PSFD         KVM_X86_FEATURE(CPUID_7_2_EDX, 0)
+#define X86_FEATURE_IPRED_CTRL         KVM_X86_FEATURE(CPUID_7_2_EDX, 1)
+#define KVM_X86_FEATURE_RRSBA_CTRL     KVM_X86_FEATURE(CPUID_7_2_EDX, 2)
+#define X86_FEATURE_DDPD_U             KVM_X86_FEATURE(CPUID_7_2_EDX, 3)
+#define X86_FEATURE_BHI_CTRL           KVM_X86_FEATURE(CPUID_7_2_EDX, 4)
+#define X86_FEATURE_MCDT_NO            KVM_X86_FEATURE(CPUID_7_2_EDX, 5)
+
 /* CPUID level 0x80000007 (EDX). */
 #define KVM_X86_FEATURE_CONSTANT_TSC   KVM_X86_FEATURE(CPUID_8000_0007_EDX, 8)
 
@@ -80,6 +89,7 @@ static const struct cpuid_reg reverse_cpuid[] = {
        [CPUID_8000_0007_EDX] = {0x80000007, 0, CPUID_EDX},
        [CPUID_8000_0021_EAX] = {0x80000021, 0, CPUID_EAX},
        [CPUID_8000_0022_EAX] = {0x80000022, 0, CPUID_EAX},
+       [CPUID_7_2_EDX]       = {         7, 2, CPUID_EDX},
 };
 
 /*
@@ -106,18 +116,19 @@ static __always_inline void reverse_cpuid_check(unsigned int x86_leaf)
  */
 static __always_inline u32 __feature_translate(int x86_feature)
 {
-       if (x86_feature == X86_FEATURE_SGX1)
-               return KVM_X86_FEATURE_SGX1;
-       else if (x86_feature == X86_FEATURE_SGX2)
-               return KVM_X86_FEATURE_SGX2;
-       else if (x86_feature == X86_FEATURE_SGX_EDECCSSA)
-               return KVM_X86_FEATURE_SGX_EDECCSSA;
-       else if (x86_feature == X86_FEATURE_CONSTANT_TSC)
-               return KVM_X86_FEATURE_CONSTANT_TSC;
-       else if (x86_feature == X86_FEATURE_PERFMON_V2)
-               return KVM_X86_FEATURE_PERFMON_V2;
-
-       return x86_feature;
+#define KVM_X86_TRANSLATE_FEATURE(f)   \
+       case X86_FEATURE_##f: return KVM_X86_FEATURE_##f
+
+       switch (x86_feature) {
+       KVM_X86_TRANSLATE_FEATURE(SGX1);
+       KVM_X86_TRANSLATE_FEATURE(SGX2);
+       KVM_X86_TRANSLATE_FEATURE(SGX_EDECCSSA);
+       KVM_X86_TRANSLATE_FEATURE(CONSTANT_TSC);
+       KVM_X86_TRANSLATE_FEATURE(PERFMON_V2);
+       KVM_X86_TRANSLATE_FEATURE(RRSBA_CTRL);
+       default:
+               return x86_feature;
+       }
 }
 
 static __always_inline u32 __feature_leaf(int x86_feature)
index 02f4784b5d446b2f0a86604b589c85981b13e5d1..d3f8bfc05832ee0a2249cbaeed22f081027a9a2e 100644 (file)
@@ -11,6 +11,7 @@
 #include "../hyperv.h"
 #include "svm.h"
 
+#ifdef CONFIG_KVM_HYPERV
 static inline void nested_svm_hv_update_vm_vp_ids(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
@@ -41,5 +42,13 @@ static inline bool nested_svm_l2_tlb_flush_enabled(struct kvm_vcpu *vcpu)
 }
 
 void svm_hv_inject_synthetic_vmexit_post_tlb_flush(struct kvm_vcpu *vcpu);
+#else /* CONFIG_KVM_HYPERV */
+static inline void nested_svm_hv_update_vm_vp_ids(struct kvm_vcpu *vcpu) {}
+static inline bool nested_svm_l2_tlb_flush_enabled(struct kvm_vcpu *vcpu)
+{
+       return false;
+}
+static inline void svm_hv_inject_synthetic_vmexit_post_tlb_flush(struct kvm_vcpu *vcpu) {}
+#endif /* CONFIG_KVM_HYPERV */
 
 #endif /* __ARCH_X86_KVM_SVM_HYPERV_H__ */
index 3fea8c47679e6899742c6f5aa08046da041439a2..dee62362a360ade493e0ca1d6ec19972ab70b72c 100644 (file)
@@ -187,7 +187,6 @@ void recalc_intercepts(struct vcpu_svm *svm)
  */
 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
 {
-       struct hv_vmcb_enlightenments *hve = &svm->nested.ctl.hv_enlightenments;
        int i;
 
        /*
@@ -198,11 +197,16 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
         * - Nested hypervisor (L1) is using Hyper-V emulation interface and
         * tells KVM (L0) there were no changes in MSR bitmap for L2.
         */
-       if (!svm->nested.force_msr_bitmap_recalc &&
-           kvm_hv_hypercall_enabled(&svm->vcpu) &&
-           hve->hv_enlightenments_control.msr_bitmap &&
-           (svm->nested.ctl.clean & BIT(HV_VMCB_NESTED_ENLIGHTENMENTS)))
-               goto set_msrpm_base_pa;
+#ifdef CONFIG_KVM_HYPERV
+       if (!svm->nested.force_msr_bitmap_recalc) {
+               struct hv_vmcb_enlightenments *hve = &svm->nested.ctl.hv_enlightenments;
+
+               if (kvm_hv_hypercall_enabled(&svm->vcpu) &&
+                   hve->hv_enlightenments_control.msr_bitmap &&
+                   (svm->nested.ctl.clean & BIT(HV_VMCB_NESTED_ENLIGHTENMENTS)))
+                       goto set_msrpm_base_pa;
+       }
+#endif
 
        if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
                return true;
@@ -230,7 +234,9 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
 
        svm->nested.force_msr_bitmap_recalc = false;
 
+#ifdef CONFIG_KVM_HYPERV
 set_msrpm_base_pa:
+#endif
        svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
 
        return true;
@@ -247,18 +253,6 @@ static bool nested_svm_check_bitmap_pa(struct kvm_vcpu *vcpu, u64 pa, u32 size)
            kvm_vcpu_is_legal_gpa(vcpu, addr + size - 1);
 }
 
-static bool nested_svm_check_tlb_ctl(struct kvm_vcpu *vcpu, u8 tlb_ctl)
-{
-       /* Nested FLUSHBYASID is not supported yet.  */
-       switch(tlb_ctl) {
-               case TLB_CONTROL_DO_NOTHING:
-               case TLB_CONTROL_FLUSH_ALL_ASID:
-                       return true;
-               default:
-                       return false;
-       }
-}
-
 static bool __nested_vmcb_check_controls(struct kvm_vcpu *vcpu,
                                         struct vmcb_ctrl_area_cached *control)
 {
@@ -278,9 +272,6 @@ static bool __nested_vmcb_check_controls(struct kvm_vcpu *vcpu,
                                           IOPM_SIZE)))
                return false;
 
-       if (CC(!nested_svm_check_tlb_ctl(vcpu, control->tlb_ctl)))
-               return false;
-
        if (CC((control->int_ctl & V_NMI_ENABLE_MASK) &&
               !vmcb12_is_intercept(control, INTERCEPT_NMI))) {
                return false;
@@ -311,7 +302,7 @@ static bool __nested_vmcb_check_save(struct kvm_vcpu *vcpu,
        if ((save->efer & EFER_LME) && (save->cr0 & X86_CR0_PG)) {
                if (CC(!(save->cr4 & X86_CR4_PAE)) ||
                    CC(!(save->cr0 & X86_CR0_PE)) ||
-                   CC(kvm_vcpu_is_illegal_gpa(vcpu, save->cr3)))
+                   CC(!kvm_vcpu_is_legal_cr3(vcpu, save->cr3)))
                        return false;
        }
 
@@ -378,12 +369,14 @@ void __nested_copy_vmcb_control_to_cache(struct kvm_vcpu *vcpu,
        to->msrpm_base_pa &= ~0x0fffULL;
        to->iopm_base_pa  &= ~0x0fffULL;
 
+#ifdef CONFIG_KVM_HYPERV
        /* Hyper-V extensions (Enlightened VMCB) */
        if (kvm_hv_hypercall_enabled(vcpu)) {
                to->clean = from->clean;
                memcpy(&to->hv_enlightenments, &from->hv_enlightenments,
                       sizeof(to->hv_enlightenments));
        }
+#endif
 }
 
 void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
@@ -487,14 +480,8 @@ static void nested_save_pending_event_to_vmcb12(struct vcpu_svm *svm,
 
 static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu)
 {
-       /*
-        * KVM_REQ_HV_TLB_FLUSH flushes entries from either L1's VP_ID or
-        * L2's VP_ID upon request from the guest. Make sure we check for
-        * pending entries in the right FIFO upon L1/L2 transition as these
-        * requests are put by other vCPUs asynchronously.
-        */
-       if (to_hv_vcpu(vcpu) && npt_enabled)
-               kvm_make_request(KVM_REQ_HV_TLB_FLUSH, vcpu);
+       /* Handle pending Hyper-V TLB flush requests */
+       kvm_hv_nested_transtion_tlb_flush(vcpu, npt_enabled);
 
        /*
         * TODO: optimize unconditional TLB flush/MMU sync.  A partial list of
@@ -520,7 +507,7 @@ static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu)
 static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
                               bool nested_npt, bool reload_pdptrs)
 {
-       if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3)))
+       if (CC(!kvm_vcpu_is_legal_cr3(vcpu, cr3)))
                return -EINVAL;
 
        if (reload_pdptrs && !nested_npt && is_pae_paging(vcpu) &&
index 373ff6a6687b3a7fcb82cc75e1d8e96994161a4b..b6a7ad4d69145096d55e610ef8d789b87c2a5fb0 100644 (file)
@@ -161,7 +161,6 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
        if (pmc) {
                pmc_write_counter(pmc, data);
-               pmc_update_sample_period(pmc);
                return 0;
        }
        /* MSR_EVNTSELn */
@@ -233,21 +232,6 @@ static void amd_pmu_init(struct kvm_vcpu *vcpu)
        }
 }
 
-static void amd_pmu_reset(struct kvm_vcpu *vcpu)
-{
-       struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
-       int i;
-
-       for (i = 0; i < KVM_AMD_PMC_MAX_GENERIC; i++) {
-               struct kvm_pmc *pmc = &pmu->gp_counters[i];
-
-               pmc_stop_counter(pmc);
-               pmc->counter = pmc->prev_counter = pmc->eventsel = 0;
-       }
-
-       pmu->global_ctrl = pmu->global_status = 0;
-}
-
 struct kvm_pmu_ops amd_pmu_ops __initdata = {
        .hw_event_available = amd_hw_event_available,
        .pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
@@ -259,7 +243,6 @@ struct kvm_pmu_ops amd_pmu_ops __initdata = {
        .set_msr = amd_pmu_set_msr,
        .refresh = amd_pmu_refresh,
        .init = amd_pmu_init,
-       .reset = amd_pmu_reset,
        .EVENTSEL_EVENT = AMD64_EVENTSEL_EVENT,
        .MAX_NR_GP_COUNTERS = KVM_AMD_PMC_MAX_GENERIC,
        .MIN_NR_GP_COUNTERS = AMD64_NUM_COUNTERS,
index 6ee925d666484689d0279b1aa6503dc403fc9063..f760106c31f8a58d2941dbabd82531b9779089fa 100644 (file)
@@ -2191,10 +2191,13 @@ void __init sev_hardware_setup(void)
        /*
         * SEV must obviously be supported in hardware.  Sanity check that the
         * CPU supports decode assists, which is mandatory for SEV guests to
-        * support instruction emulation.
+        * support instruction emulation.  Ditto for flushing by ASID, as SEV
+        * guests are bound to a single ASID, i.e. KVM can't rotate to a new
+        * ASID to effect a TLB flush.
         */
        if (!boot_cpu_has(X86_FEATURE_SEV) ||
-           WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_DECODEASSISTS)))
+           WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_DECODEASSISTS)) ||
+           WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_FLUSHBYASID)))
                goto out;
 
        /* Retrieve SEV CPUID information */
index 7fb51424fc745ebd5a4ce8399eaf780e86494542..e90b429c84f158bdd8d4348172d56eac1e80763b 100644 (file)
@@ -3563,8 +3563,15 @@ static void svm_inject_nmi(struct kvm_vcpu *vcpu)
        if (svm->nmi_l1_to_l2)
                return;
 
-       svm->nmi_masked = true;
-       svm_set_iret_intercept(svm);
+       /*
+        * No need to manually track NMI masking when vNMI is enabled, hardware
+        * automatically sets V_NMI_BLOCKING_MASK as appropriate, including the
+        * case where software directly injects an NMI.
+        */
+       if (!is_vnmi_enabled(svm)) {
+               svm->nmi_masked = true;
+               svm_set_iret_intercept(svm);
+       }
        ++vcpu->stat.nmi_injections;
 }
 
@@ -5079,6 +5086,13 @@ static __init void svm_set_cpu_caps(void)
                kvm_cpu_cap_set(X86_FEATURE_SVM);
                kvm_cpu_cap_set(X86_FEATURE_VMCBCLEAN);
 
+               /*
+                * KVM currently flushes TLBs on *every* nested SVM transition,
+                * and so for all intents and purposes KVM supports flushing by
+                * ASID, i.e. KVM is guaranteed to honor every L1 ASID flush.
+                */
+               kvm_cpu_cap_set(X86_FEATURE_FLUSHBYASID);
+
                if (nrips)
                        kvm_cpu_cap_set(X86_FEATURE_NRIPS);
 
index c409f934c377fc15d17d6d04f167027afc1e5189..8ef95139cd245572a530b83e68756834a619cf41 100644 (file)
@@ -148,7 +148,9 @@ struct vmcb_ctrl_area_cached {
        u64 virt_ext;
        u32 clean;
        union {
+#if IS_ENABLED(CONFIG_HYPERV) || IS_ENABLED(CONFIG_KVM_HYPERV)
                struct hv_vmcb_enlightenments hv_enlightenments;
+#endif
                u8 reserved_sw[32];
        };
 };
index 7af8422d3382101321cc88c4402dbfa83cb24990..3971b3ea5d04b31daa2079156fcf8873b6cc487a 100644 (file)
 int svm_hv_enable_l2_tlb_flush(struct kvm_vcpu *vcpu)
 {
        struct hv_vmcb_enlightenments *hve;
-       struct hv_partition_assist_pg **p_hv_pa_pg =
-                       &to_kvm_hv(vcpu->kvm)->hv_pa_pg;
+       hpa_t partition_assist_page = hv_get_partition_assist_page(vcpu);
 
-       if (!*p_hv_pa_pg)
-               *p_hv_pa_pg = kzalloc(PAGE_SIZE, GFP_KERNEL);
-
-       if (!*p_hv_pa_pg)
+       if (partition_assist_page == INVALID_PAGE)
                return -ENOMEM;
 
        hve = &to_svm(vcpu)->vmcb->control.hv_enlightenments;
 
-       hve->partition_assist_page = __pa(*p_hv_pa_pg);
+       hve->partition_assist_page = partition_assist_page;
        hve->hv_vm_id = (unsigned long)vcpu->kvm;
        if (!hve->hv_enlightenments_control.nested_flush_hypercall) {
                hve->hv_enlightenments_control.nested_flush_hypercall = 1;
index 36c8af87a707ac0556fb1e50157e70c6305df798..4e725854c63a10c8645fa3b875a7a718020e96fe 100644 (file)
@@ -8,7 +8,7 @@
 
 #define svm_asm(insn, clobber...)                              \
 do {                                                           \
-       asm_volatile_goto("1: " __stringify(insn) "\n\t"        \
+       asm goto("1: " __stringify(insn) "\n\t" \
                          _ASM_EXTABLE(1b, %l[fault])           \
                          ::: clobber : fault);                 \
        return;                                                 \
@@ -18,7 +18,7 @@ fault:                                                                \
 
 #define svm_asm1(insn, op1, clobber...)                                \
 do {                                                           \
-       asm_volatile_goto("1: "  __stringify(insn) " %0\n\t"    \
+       asm goto("1: "  __stringify(insn) " %0\n\t"     \
                          _ASM_EXTABLE(1b, %l[fault])           \
                          :: op1 : clobber : fault);            \
        return;                                                 \
@@ -28,7 +28,7 @@ fault:                                                                \
 
 #define svm_asm2(insn, op1, op2, clobber...)                           \
 do {                                                                   \
-       asm_volatile_goto("1: "  __stringify(insn) " %1, %0\n\t"        \
+       asm goto("1: "  __stringify(insn) " %1, %0\n\t" \
                          _ASM_EXTABLE(1b, %l[fault])                   \
                          :: op1, op2 : clobber : fault);               \
        return;                                                         \
index ef2ebabb059c8cdf2f88eb7b98682e29940e4dd8..9499f9c6b07711bb1254ce574584ebc166d293fc 100644 (file)
@@ -270,16 +270,16 @@ SYM_FUNC_START(__svm_vcpu_run)
        RESTORE_GUEST_SPEC_CTRL_BODY
        RESTORE_HOST_SPEC_CTRL_BODY
 
-10:    cmpb $0, kvm_rebooting
+10:    cmpb $0, _ASM_RIP(kvm_rebooting)
        jne 2b
        ud2
-30:    cmpb $0, kvm_rebooting
+30:    cmpb $0, _ASM_RIP(kvm_rebooting)
        jne 4b
        ud2
-50:    cmpb $0, kvm_rebooting
+50:    cmpb $0, _ASM_RIP(kvm_rebooting)
        jne 6b
        ud2
-70:    cmpb $0, kvm_rebooting
+70:    cmpb $0, _ASM_RIP(kvm_rebooting)
        jne 8b
        ud2
 
@@ -381,7 +381,7 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
        RESTORE_GUEST_SPEC_CTRL_BODY
        RESTORE_HOST_SPEC_CTRL_BODY
 
-3:     cmpb $0, kvm_rebooting
+3:     cmpb $0, _ASM_RIP(kvm_rebooting)
        jne 2b
        ud2
 
index 313b8bb5b8a7cb0befdb65cafcb9752c966dd81a..fab6a1ad98dc188c2b0f662bef9c4fd5e4bdbb8d 100644 (file)
 
 #define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK
 
-/*
- * Enlightened VMCSv1 doesn't support these:
- *
- *     POSTED_INTR_NV                  = 0x00000002,
- *     GUEST_INTR_STATUS               = 0x00000810,
- *     APIC_ACCESS_ADDR                = 0x00002014,
- *     POSTED_INTR_DESC_ADDR           = 0x00002016,
- *     EOI_EXIT_BITMAP0                = 0x0000201c,
- *     EOI_EXIT_BITMAP1                = 0x0000201e,
- *     EOI_EXIT_BITMAP2                = 0x00002020,
- *     EOI_EXIT_BITMAP3                = 0x00002022,
- *     GUEST_PML_INDEX                 = 0x00000812,
- *     PML_ADDRESS                     = 0x0000200e,
- *     VM_FUNCTION_CONTROL             = 0x00002018,
- *     EPTP_LIST_ADDRESS               = 0x00002024,
- *     VMREAD_BITMAP                   = 0x00002026,
- *     VMWRITE_BITMAP                  = 0x00002028,
- *
- *     TSC_MULTIPLIER                  = 0x00002032,
- *     PLE_GAP                         = 0x00004020,
- *     PLE_WINDOW                      = 0x00004022,
- *     VMX_PREEMPTION_TIMER_VALUE      = 0x0000482E,
- *
- * Currently unsupported in KVM:
- *     GUEST_IA32_RTIT_CTL             = 0x00002814,
- */
-#define EVMCS1_SUPPORTED_PINCTRL                                       \
-       (PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |                          \
-        PIN_BASED_EXT_INTR_MASK |                                      \
-        PIN_BASED_NMI_EXITING |                                        \
-        PIN_BASED_VIRTUAL_NMIS)
-
-#define EVMCS1_SUPPORTED_EXEC_CTRL                                     \
-       (CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |                          \
-        CPU_BASED_HLT_EXITING |                                        \
-        CPU_BASED_CR3_LOAD_EXITING |                                   \
-        CPU_BASED_CR3_STORE_EXITING |                                  \
-        CPU_BASED_UNCOND_IO_EXITING |                                  \
-        CPU_BASED_MOV_DR_EXITING |                                     \
-        CPU_BASED_USE_TSC_OFFSETTING |                                 \
-        CPU_BASED_MWAIT_EXITING |                                      \
-        CPU_BASED_MONITOR_EXITING |                                    \
-        CPU_BASED_INVLPG_EXITING |                                     \
-        CPU_BASED_RDPMC_EXITING |                                      \
-        CPU_BASED_INTR_WINDOW_EXITING |                                \
-        CPU_BASED_CR8_LOAD_EXITING |                                   \
-        CPU_BASED_CR8_STORE_EXITING |                                  \
-        CPU_BASED_RDTSC_EXITING |                                      \
-        CPU_BASED_TPR_SHADOW |                                         \
-        CPU_BASED_USE_IO_BITMAPS |                                     \
-        CPU_BASED_MONITOR_TRAP_FLAG |                                  \
-        CPU_BASED_USE_MSR_BITMAPS |                                    \
-        CPU_BASED_NMI_WINDOW_EXITING |                                 \
-        CPU_BASED_PAUSE_EXITING |                                      \
-        CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
-
-#define EVMCS1_SUPPORTED_2NDEXEC                                       \
-       (SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |                        \
-        SECONDARY_EXEC_WBINVD_EXITING |                                \
-        SECONDARY_EXEC_ENABLE_VPID |                                   \
-        SECONDARY_EXEC_ENABLE_EPT |                                    \
-        SECONDARY_EXEC_UNRESTRICTED_GUEST |                            \
-        SECONDARY_EXEC_DESC |                                          \
-        SECONDARY_EXEC_ENABLE_RDTSCP |                                 \
-        SECONDARY_EXEC_ENABLE_INVPCID |                                \
-        SECONDARY_EXEC_ENABLE_XSAVES |                                 \
-        SECONDARY_EXEC_RDSEED_EXITING |                                \
-        SECONDARY_EXEC_RDRAND_EXITING |                                \
-        SECONDARY_EXEC_TSC_SCALING |                                   \
-        SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE |                         \
-        SECONDARY_EXEC_PT_USE_GPA |                                    \
-        SECONDARY_EXEC_PT_CONCEAL_VMX |                                \
-        SECONDARY_EXEC_BUS_LOCK_DETECTION |                            \
-        SECONDARY_EXEC_NOTIFY_VM_EXITING |                             \
-        SECONDARY_EXEC_ENCLS_EXITING)
-
-#define EVMCS1_SUPPORTED_3RDEXEC (0ULL)
-
-#define EVMCS1_SUPPORTED_VMEXIT_CTRL                                   \
-       (VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |                            \
-        VM_EXIT_SAVE_DEBUG_CONTROLS |                                  \
-        VM_EXIT_ACK_INTR_ON_EXIT |                                     \
-        VM_EXIT_HOST_ADDR_SPACE_SIZE |                                 \
-        VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |                           \
-        VM_EXIT_SAVE_IA32_PAT |                                        \
-        VM_EXIT_LOAD_IA32_PAT |                                        \
-        VM_EXIT_SAVE_IA32_EFER |                                       \
-        VM_EXIT_LOAD_IA32_EFER |                                       \
-        VM_EXIT_CLEAR_BNDCFGS |                                        \
-        VM_EXIT_PT_CONCEAL_PIP |                                       \
-        VM_EXIT_CLEAR_IA32_RTIT_CTL)
-
-#define EVMCS1_SUPPORTED_VMENTRY_CTRL                                  \
-       (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR |                           \
-        VM_ENTRY_LOAD_DEBUG_CONTROLS |                                 \
-        VM_ENTRY_IA32E_MODE |                                          \
-        VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL |                          \
-        VM_ENTRY_LOAD_IA32_PAT |                                       \
-        VM_ENTRY_LOAD_IA32_EFER |                                      \
-        VM_ENTRY_LOAD_BNDCFGS |                                        \
-        VM_ENTRY_PT_CONCEAL_PIP |                                      \
-        VM_ENTRY_LOAD_IA32_RTIT_CTL)
-
-#define EVMCS1_SUPPORTED_VMFUNC (0)
-
-#define EVMCS1_OFFSET(x) offsetof(struct hv_enlightened_vmcs, x)
-#define EVMCS1_FIELD(number, name, clean_field)[ROL16(number, 6)] = \
-               {EVMCS1_OFFSET(name), clean_field}
-
-const struct evmcs_field vmcs_field_to_evmcs_1[] = {
-       /* 64 bit rw */
-       EVMCS1_FIELD(GUEST_RIP, guest_rip,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
-       EVMCS1_FIELD(GUEST_RSP, guest_rsp,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC),
-       EVMCS1_FIELD(GUEST_RFLAGS, guest_rflags,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC),
-       EVMCS1_FIELD(HOST_IA32_PAT, host_ia32_pat,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
-       EVMCS1_FIELD(HOST_IA32_EFER, host_ia32_efer,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
-       EVMCS1_FIELD(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
-       EVMCS1_FIELD(HOST_CR0, host_cr0,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
-       EVMCS1_FIELD(HOST_CR3, host_cr3,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
-       EVMCS1_FIELD(HOST_CR4, host_cr4,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
-       EVMCS1_FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
-       EVMCS1_FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
-       EVMCS1_FIELD(HOST_RIP, host_rip,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
-       EVMCS1_FIELD(IO_BITMAP_A, io_bitmap_a,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP),
-       EVMCS1_FIELD(IO_BITMAP_B, io_bitmap_b,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP),
-       EVMCS1_FIELD(MSR_BITMAP, msr_bitmap,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP),
-       EVMCS1_FIELD(GUEST_ES_BASE, guest_es_base,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
-       EVMCS1_FIELD(GUEST_CS_BASE, guest_cs_base,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
-       EVMCS1_FIELD(GUEST_SS_BASE, guest_ss_base,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
-       EVMCS1_FIELD(GUEST_DS_BASE, guest_ds_base,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
-       EVMCS1_FIELD(GUEST_FS_BASE, guest_fs_base,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
-       EVMCS1_FIELD(GUEST_GS_BASE, guest_gs_base,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
-       EVMCS1_FIELD(GUEST_LDTR_BASE, guest_ldtr_base,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
-       EVMCS1_FIELD(GUEST_TR_BASE, guest_tr_base,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
-       EVMCS1_FIELD(GUEST_GDTR_BASE, guest_gdtr_base,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
-       EVMCS1_FIELD(GUEST_IDTR_BASE, guest_idtr_base,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
-       EVMCS1_FIELD(TSC_OFFSET, tsc_offset,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2),
-       EVMCS1_FIELD(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2),
-       EVMCS1_FIELD(VMCS_LINK_POINTER, vmcs_link_pointer,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
-       EVMCS1_FIELD(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
-       EVMCS1_FIELD(GUEST_IA32_PAT, guest_ia32_pat,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
-       EVMCS1_FIELD(GUEST_IA32_EFER, guest_ia32_efer,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
-       EVMCS1_FIELD(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
-       EVMCS1_FIELD(GUEST_PDPTR0, guest_pdptr0,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
-       EVMCS1_FIELD(GUEST_PDPTR1, guest_pdptr1,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
-       EVMCS1_FIELD(GUEST_PDPTR2, guest_pdptr2,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
-       EVMCS1_FIELD(GUEST_PDPTR3, guest_pdptr3,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
-       EVMCS1_FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
-       EVMCS1_FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
-       EVMCS1_FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
-       EVMCS1_FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR),
-       EVMCS1_FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR),
-       EVMCS1_FIELD(CR0_READ_SHADOW, cr0_read_shadow,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR),
-       EVMCS1_FIELD(CR4_READ_SHADOW, cr4_read_shadow,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR),
-       EVMCS1_FIELD(GUEST_CR0, guest_cr0,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR),
-       EVMCS1_FIELD(GUEST_CR3, guest_cr3,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR),
-       EVMCS1_FIELD(GUEST_CR4, guest_cr4,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR),
-       EVMCS1_FIELD(GUEST_DR7, guest_dr7,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR),
-       EVMCS1_FIELD(HOST_FS_BASE, host_fs_base,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER),
-       EVMCS1_FIELD(HOST_GS_BASE, host_gs_base,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER),
-       EVMCS1_FIELD(HOST_TR_BASE, host_tr_base,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER),
-       EVMCS1_FIELD(HOST_GDTR_BASE, host_gdtr_base,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER),
-       EVMCS1_FIELD(HOST_IDTR_BASE, host_idtr_base,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER),
-       EVMCS1_FIELD(HOST_RSP, host_rsp,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER),
-       EVMCS1_FIELD(EPT_POINTER, ept_pointer,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT),
-       EVMCS1_FIELD(GUEST_BNDCFGS, guest_bndcfgs,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
-       EVMCS1_FIELD(XSS_EXIT_BITMAP, xss_exit_bitmap,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2),
-       EVMCS1_FIELD(ENCLS_EXITING_BITMAP, encls_exiting_bitmap,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2),
-       EVMCS1_FIELD(TSC_MULTIPLIER, tsc_multiplier,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2),
-       /*
-        * Not used by KVM:
-        *
-        * EVMCS1_FIELD(0x00006828, guest_ia32_s_cet,
-        *           HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
-        * EVMCS1_FIELD(0x0000682A, guest_ssp,
-        *           HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC),
-        * EVMCS1_FIELD(0x0000682C, guest_ia32_int_ssp_table_addr,
-        *           HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
-        * EVMCS1_FIELD(0x00002816, guest_ia32_lbr_ctl,
-        *           HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
-        * EVMCS1_FIELD(0x00006C18, host_ia32_s_cet,
-        *           HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
-        * EVMCS1_FIELD(0x00006C1A, host_ssp,
-        *           HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
-        * EVMCS1_FIELD(0x00006C1C, host_ia32_int_ssp_table_addr,
-        *           HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
-        */
-
-       /* 64 bit read only */
-       EVMCS1_FIELD(GUEST_PHYSICAL_ADDRESS, guest_physical_address,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
-       EVMCS1_FIELD(EXIT_QUALIFICATION, exit_qualification,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
-       /*
-        * Not defined in KVM:
-        *
-        * EVMCS1_FIELD(0x00006402, exit_io_instruction_ecx,
-        *              HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE);
-        * EVMCS1_FIELD(0x00006404, exit_io_instruction_esi,
-        *              HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE);
-        * EVMCS1_FIELD(0x00006406, exit_io_instruction_esi,
-        *              HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE);
-        * EVMCS1_FIELD(0x00006408, exit_io_instruction_eip,
-        *              HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE);
-        */
-       EVMCS1_FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
-
-       /*
-        * No mask defined in the spec as Hyper-V doesn't currently support
-        * these. Future proof by resetting the whole clean field mask on
-        * access.
-        */
-       EVMCS1_FIELD(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL),
-       EVMCS1_FIELD(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL),
-       EVMCS1_FIELD(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL),
-
-       /* 32 bit rw */
-       EVMCS1_FIELD(TPR_THRESHOLD, tpr_threshold,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
-       EVMCS1_FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC),
-       EVMCS1_FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC),
-       EVMCS1_FIELD(EXCEPTION_BITMAP, exception_bitmap,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EXCPN),
-       EVMCS1_FIELD(VM_ENTRY_CONTROLS, vm_entry_controls,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY),
-       EVMCS1_FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT),
-       EVMCS1_FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE,
-                    vm_entry_exception_error_code,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT),
-       EVMCS1_FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT),
-       EVMCS1_FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
-       EVMCS1_FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1),
-       EVMCS1_FIELD(VM_EXIT_CONTROLS, vm_exit_controls,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1),
-       EVMCS1_FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1),
-       EVMCS1_FIELD(GUEST_ES_LIMIT, guest_es_limit,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
-       EVMCS1_FIELD(GUEST_CS_LIMIT, guest_cs_limit,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
-       EVMCS1_FIELD(GUEST_SS_LIMIT, guest_ss_limit,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
-       EVMCS1_FIELD(GUEST_DS_LIMIT, guest_ds_limit,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
-       EVMCS1_FIELD(GUEST_FS_LIMIT, guest_fs_limit,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
-       EVMCS1_FIELD(GUEST_GS_LIMIT, guest_gs_limit,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
-       EVMCS1_FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
-       EVMCS1_FIELD(GUEST_TR_LIMIT, guest_tr_limit,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
-       EVMCS1_FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
-       EVMCS1_FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
-       EVMCS1_FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
-       EVMCS1_FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
-       EVMCS1_FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
-       EVMCS1_FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
-       EVMCS1_FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
-       EVMCS1_FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
-       EVMCS1_FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
-       EVMCS1_FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
-       EVMCS1_FIELD(GUEST_ACTIVITY_STATE, guest_activity_state,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
-       EVMCS1_FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
-
-       /* 32 bit read only */
-       EVMCS1_FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
-       EVMCS1_FIELD(VM_EXIT_REASON, vm_exit_reason,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
-       EVMCS1_FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
-       EVMCS1_FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
-       EVMCS1_FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
-       EVMCS1_FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
-       EVMCS1_FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
-       EVMCS1_FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
-
-       /* No mask defined in the spec (not used) */
-       EVMCS1_FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL),
-       EVMCS1_FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL),
-       EVMCS1_FIELD(CR3_TARGET_COUNT, cr3_target_count,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL),
-       EVMCS1_FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL),
-       EVMCS1_FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL),
-       EVMCS1_FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL),
-
-       /* 16 bit rw */
-       EVMCS1_FIELD(HOST_ES_SELECTOR, host_es_selector,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
-       EVMCS1_FIELD(HOST_CS_SELECTOR, host_cs_selector,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
-       EVMCS1_FIELD(HOST_SS_SELECTOR, host_ss_selector,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
-       EVMCS1_FIELD(HOST_DS_SELECTOR, host_ds_selector,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
-       EVMCS1_FIELD(HOST_FS_SELECTOR, host_fs_selector,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
-       EVMCS1_FIELD(HOST_GS_SELECTOR, host_gs_selector,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
-       EVMCS1_FIELD(HOST_TR_SELECTOR, host_tr_selector,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
-       EVMCS1_FIELD(GUEST_ES_SELECTOR, guest_es_selector,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
-       EVMCS1_FIELD(GUEST_CS_SELECTOR, guest_cs_selector,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
-       EVMCS1_FIELD(GUEST_SS_SELECTOR, guest_ss_selector,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
-       EVMCS1_FIELD(GUEST_DS_SELECTOR, guest_ds_selector,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
-       EVMCS1_FIELD(GUEST_FS_SELECTOR, guest_fs_selector,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
-       EVMCS1_FIELD(GUEST_GS_SELECTOR, guest_gs_selector,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
-       EVMCS1_FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
-       EVMCS1_FIELD(GUEST_TR_SELECTOR, guest_tr_selector,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
-       EVMCS1_FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id,
-                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT),
-};
-const unsigned int nr_evmcs_1_fields = ARRAY_SIZE(vmcs_field_to_evmcs_1);
-
 u64 nested_get_evmptr(struct kvm_vcpu *vcpu)
 {
        struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
@@ -608,40 +195,6 @@ int nested_evmcs_check_controls(struct vmcs12 *vmcs12)
        return 0;
 }
 
-#if IS_ENABLED(CONFIG_HYPERV)
-DEFINE_STATIC_KEY_FALSE(__kvm_is_using_evmcs);
-
-/*
- * KVM on Hyper-V always uses the latest known eVMCSv1 revision, the assumption
- * is: in case a feature has corresponding fields in eVMCS described and it was
- * exposed in VMX feature MSRs, KVM is free to use it. Warn if KVM meets a
- * feature which has no corresponding eVMCS field, this likely means that KVM
- * needs to be updated.
- */
-#define evmcs_check_vmcs_conf(field, ctrl)                                     \
-       do {                                                                    \
-               typeof(vmcs_conf->field) unsupported;                           \
-                                                                               \
-               unsupported = vmcs_conf->field & ~EVMCS1_SUPPORTED_ ## ctrl;    \
-               if (unsupported) {                                              \
-                       pr_warn_once(#field " unsupported with eVMCS: 0x%llx\n",\
-                                    (u64)unsupported);                         \
-                       vmcs_conf->field &= EVMCS1_SUPPORTED_ ## ctrl;          \
-               }                                                               \
-       }                                                                       \
-       while (0)
-
-void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf)
-{
-       evmcs_check_vmcs_conf(cpu_based_exec_ctrl, EXEC_CTRL);
-       evmcs_check_vmcs_conf(pin_based_exec_ctrl, PINCTRL);
-       evmcs_check_vmcs_conf(cpu_based_2nd_exec_ctrl, 2NDEXEC);
-       evmcs_check_vmcs_conf(cpu_based_3rd_exec_ctrl, 3RDEXEC);
-       evmcs_check_vmcs_conf(vmentry_ctrl, VMENTRY_CTRL);
-       evmcs_check_vmcs_conf(vmexit_ctrl, VMEXIT_CTRL);
-}
-#endif
-
 int nested_enable_evmcs(struct kvm_vcpu *vcpu,
                        uint16_t *vmcs_version)
 {
index 9623fe1651c48be2ae1f53affa96af14f3f8971e..a87407412615c2b5a7658eaff4a01130219dd36f 100644 (file)
 #ifndef __KVM_X86_VMX_HYPERV_H
 #define __KVM_X86_VMX_HYPERV_H
 
-#include <linux/jump_label.h>
-
-#include <asm/hyperv-tlfs.h>
-#include <asm/mshyperv.h>
-#include <asm/vmx.h>
-
-#include "../hyperv.h"
-
-#include "capabilities.h"
-#include "vmcs.h"
+#include <linux/kvm_host.h>
 #include "vmcs12.h"
+#include "vmx.h"
 
-struct vmcs_config;
-
-#define current_evmcs ((struct hv_enlightened_vmcs *)this_cpu_read(current_vmcs))
-
-#define KVM_EVMCS_VERSION 1
+#define EVMPTR_INVALID (-1ULL)
+#define EVMPTR_MAP_PENDING (-2ULL)
 
-struct evmcs_field {
-       u16 offset;
-       u16 clean_field;
+enum nested_evmptrld_status {
+       EVMPTRLD_DISABLED,
+       EVMPTRLD_SUCCEEDED,
+       EVMPTRLD_VMFAIL,
+       EVMPTRLD_ERROR,
 };
 
-extern const struct evmcs_field vmcs_field_to_evmcs_1[];
-extern const unsigned int nr_evmcs_1_fields;
-
-static __always_inline int evmcs_field_offset(unsigned long field,
-                                             u16 *clean_field)
-{
-       unsigned int index = ROL16(field, 6);
-       const struct evmcs_field *evmcs_field;
-
-       if (unlikely(index >= nr_evmcs_1_fields))
-               return -ENOENT;
-
-       evmcs_field = &vmcs_field_to_evmcs_1[index];
-
-       /*
-        * Use offset=0 to detect holes in eVMCS. This offset belongs to
-        * 'revision_id' but this field has no encoding and is supposed to
-        * be accessed directly.
-        */
-       if (unlikely(!evmcs_field->offset))
-               return -ENOENT;
-
-       if (clean_field)
-               *clean_field = evmcs_field->clean_field;
-
-       return evmcs_field->offset;
-}
-
-static inline u64 evmcs_read_any(struct hv_enlightened_vmcs *evmcs,
-                                unsigned long field, u16 offset)
+#ifdef CONFIG_KVM_HYPERV
+static inline bool evmptr_is_valid(u64 evmptr)
 {
-       /*
-        * vmcs12_read_any() doesn't care whether the supplied structure
-        * is 'struct vmcs12' or 'struct hv_enlightened_vmcs' as it takes
-        * the exact offset of the required field, use it for convenience
-        * here.
-        */
-       return vmcs12_read_any((void *)evmcs, field, offset);
+       return evmptr != EVMPTR_INVALID && evmptr != EVMPTR_MAP_PENDING;
 }
 
-#if IS_ENABLED(CONFIG_HYPERV)
-
-DECLARE_STATIC_KEY_FALSE(__kvm_is_using_evmcs);
-
-static __always_inline bool kvm_is_using_evmcs(void)
+static inline bool nested_vmx_is_evmptr12_valid(struct vcpu_vmx *vmx)
 {
-       return static_branch_unlikely(&__kvm_is_using_evmcs);
+       return evmptr_is_valid(vmx->nested.hv_evmcs_vmptr);
 }
 
-static __always_inline int get_evmcs_offset(unsigned long field,
-                                           u16 *clean_field)
+static inline bool evmptr_is_set(u64 evmptr)
 {
-       int offset = evmcs_field_offset(field, clean_field);
-
-       WARN_ONCE(offset < 0, "accessing unsupported EVMCS field %lx\n", field);
-       return offset;
+       return evmptr != EVMPTR_INVALID;
 }
 
-static __always_inline void evmcs_write64(unsigned long field, u64 value)
+static inline bool nested_vmx_is_evmptr12_set(struct vcpu_vmx *vmx)
 {
-       u16 clean_field;
-       int offset = get_evmcs_offset(field, &clean_field);
-
-       if (offset < 0)
-               return;
-
-       *(u64 *)((char *)current_evmcs + offset) = value;
-
-       current_evmcs->hv_clean_fields &= ~clean_field;
+       return evmptr_is_set(vmx->nested.hv_evmcs_vmptr);
 }
 
-static __always_inline void evmcs_write32(unsigned long field, u32 value)
+static inline struct hv_enlightened_vmcs *nested_vmx_evmcs(struct vcpu_vmx *vmx)
 {
-       u16 clean_field;
-       int offset = get_evmcs_offset(field, &clean_field);
-
-       if (offset < 0)
-               return;
-
-       *(u32 *)((char *)current_evmcs + offset) = value;
-       current_evmcs->hv_clean_fields &= ~clean_field;
+       return vmx->nested.hv_evmcs;
 }
 
-static __always_inline void evmcs_write16(unsigned long field, u16 value)
+static inline bool guest_cpuid_has_evmcs(struct kvm_vcpu *vcpu)
 {
-       u16 clean_field;
-       int offset = get_evmcs_offset(field, &clean_field);
-
-       if (offset < 0)
-               return;
-
-       *(u16 *)((char *)current_evmcs + offset) = value;
-       current_evmcs->hv_clean_fields &= ~clean_field;
+       /*
+        * eVMCS is exposed to the guest if Hyper-V is enabled in CPUID and
+        * eVMCS has been explicitly enabled by userspace.
+        */
+       return vcpu->arch.hyperv_enabled &&
+              to_vmx(vcpu)->nested.enlightened_vmcs_enabled;
 }
 
-static __always_inline u64 evmcs_read64(unsigned long field)
+u64 nested_get_evmptr(struct kvm_vcpu *vcpu);
+uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu);
+int nested_enable_evmcs(struct kvm_vcpu *vcpu,
+                       uint16_t *vmcs_version);
+void nested_evmcs_filter_control_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
+int nested_evmcs_check_controls(struct vmcs12 *vmcs12);
+bool nested_evmcs_l2_tlb_flush_enabled(struct kvm_vcpu *vcpu);
+void vmx_hv_inject_synthetic_vmexit_post_tlb_flush(struct kvm_vcpu *vcpu);
+#else
+static inline bool evmptr_is_valid(u64 evmptr)
 {
-       int offset = get_evmcs_offset(field, NULL);
-
-       if (offset < 0)
-               return 0;
-
-       return *(u64 *)((char *)current_evmcs + offset);
+       return false;
 }
 
-static __always_inline u32 evmcs_read32(unsigned long field)
+static inline bool nested_vmx_is_evmptr12_valid(struct vcpu_vmx *vmx)
 {
-       int offset = get_evmcs_offset(field, NULL);
-
-       if (offset < 0)
-               return 0;
-
-       return *(u32 *)((char *)current_evmcs + offset);
+       return false;
 }
 
-static __always_inline u16 evmcs_read16(unsigned long field)
+static inline bool evmptr_is_set(u64 evmptr)
 {
-       int offset = get_evmcs_offset(field, NULL);
-
-       if (offset < 0)
-               return 0;
-
-       return *(u16 *)((char *)current_evmcs + offset);
+       return false;
 }
 
-static inline void evmcs_load(u64 phys_addr)
+static inline bool nested_vmx_is_evmptr12_set(struct vcpu_vmx *vmx)
 {
-       struct hv_vp_assist_page *vp_ap =
-               hv_get_vp_assist_page(smp_processor_id());
-
-       if (current_evmcs->hv_enlightenments_control.nested_flush_hypercall)
-               vp_ap->nested_control.features.directhypercall = 1;
-       vp_ap->current_nested_vmcs = phys_addr;
-       vp_ap->enlighten_vmentry = 1;
+       return false;
 }
 
-void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf);
-#else /* !IS_ENABLED(CONFIG_HYPERV) */
-static __always_inline bool kvm_is_using_evmcs(void) { return false; }
-static __always_inline void evmcs_write64(unsigned long field, u64 value) {}
-static __always_inline void evmcs_write32(unsigned long field, u32 value) {}
-static __always_inline void evmcs_write16(unsigned long field, u16 value) {}
-static __always_inline u64 evmcs_read64(unsigned long field) { return 0; }
-static __always_inline u32 evmcs_read32(unsigned long field) { return 0; }
-static __always_inline u16 evmcs_read16(unsigned long field) { return 0; }
-static inline void evmcs_load(u64 phys_addr) {}
-#endif /* IS_ENABLED(CONFIG_HYPERV) */
-
-#define EVMPTR_INVALID (-1ULL)
-#define EVMPTR_MAP_PENDING (-2ULL)
-
-static inline bool evmptr_is_valid(u64 evmptr)
+static inline struct hv_enlightened_vmcs *nested_vmx_evmcs(struct vcpu_vmx *vmx)
 {
-       return evmptr != EVMPTR_INVALID && evmptr != EVMPTR_MAP_PENDING;
+       return NULL;
 }
-
-enum nested_evmptrld_status {
-       EVMPTRLD_DISABLED,
-       EVMPTRLD_SUCCEEDED,
-       EVMPTRLD_VMFAIL,
-       EVMPTRLD_ERROR,
-};
-
-u64 nested_get_evmptr(struct kvm_vcpu *vcpu);
-uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu);
-int nested_enable_evmcs(struct kvm_vcpu *vcpu,
-                       uint16_t *vmcs_version);
-void nested_evmcs_filter_control_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
-int nested_evmcs_check_controls(struct vmcs12 *vmcs12);
-bool nested_evmcs_l2_tlb_flush_enabled(struct kvm_vcpu *vcpu);
-void vmx_hv_inject_synthetic_vmexit_post_tlb_flush(struct kvm_vcpu *vcpu);
+#endif
 
 #endif /* __KVM_X86_VMX_HYPERV_H */
diff --git a/arch/x86/kvm/vmx/hyperv_evmcs.c b/arch/x86/kvm/vmx/hyperv_evmcs.c
new file mode 100644 (file)
index 0000000..904bfcd
--- /dev/null
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file contains common code for working with Enlightened VMCS which is
+ * used both by Hyper-V on KVM and KVM on Hyper-V.
+ */
+
+#include "hyperv_evmcs.h"
+
+#define EVMCS1_OFFSET(x) offsetof(struct hv_enlightened_vmcs, x)
+#define EVMCS1_FIELD(number, name, clean_field)[ROL16(number, 6)] = \
+               {EVMCS1_OFFSET(name), clean_field}
+
+const struct evmcs_field vmcs_field_to_evmcs_1[] = {
+       /* 64 bit rw */
+       EVMCS1_FIELD(GUEST_RIP, guest_rip,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
+       EVMCS1_FIELD(GUEST_RSP, guest_rsp,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC),
+       EVMCS1_FIELD(GUEST_RFLAGS, guest_rflags,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC),
+       EVMCS1_FIELD(HOST_IA32_PAT, host_ia32_pat,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
+       EVMCS1_FIELD(HOST_IA32_EFER, host_ia32_efer,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
+       EVMCS1_FIELD(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
+       EVMCS1_FIELD(HOST_CR0, host_cr0,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
+       EVMCS1_FIELD(HOST_CR3, host_cr3,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
+       EVMCS1_FIELD(HOST_CR4, host_cr4,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
+       EVMCS1_FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
+       EVMCS1_FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
+       EVMCS1_FIELD(HOST_RIP, host_rip,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
+       EVMCS1_FIELD(IO_BITMAP_A, io_bitmap_a,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP),
+       EVMCS1_FIELD(IO_BITMAP_B, io_bitmap_b,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP),
+       EVMCS1_FIELD(MSR_BITMAP, msr_bitmap,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP),
+       EVMCS1_FIELD(GUEST_ES_BASE, guest_es_base,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
+       EVMCS1_FIELD(GUEST_CS_BASE, guest_cs_base,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
+       EVMCS1_FIELD(GUEST_SS_BASE, guest_ss_base,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
+       EVMCS1_FIELD(GUEST_DS_BASE, guest_ds_base,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
+       EVMCS1_FIELD(GUEST_FS_BASE, guest_fs_base,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
+       EVMCS1_FIELD(GUEST_GS_BASE, guest_gs_base,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
+       EVMCS1_FIELD(GUEST_LDTR_BASE, guest_ldtr_base,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
+       EVMCS1_FIELD(GUEST_TR_BASE, guest_tr_base,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
+       EVMCS1_FIELD(GUEST_GDTR_BASE, guest_gdtr_base,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
+       EVMCS1_FIELD(GUEST_IDTR_BASE, guest_idtr_base,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
+       EVMCS1_FIELD(TSC_OFFSET, tsc_offset,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2),
+       EVMCS1_FIELD(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2),
+       EVMCS1_FIELD(VMCS_LINK_POINTER, vmcs_link_pointer,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
+       EVMCS1_FIELD(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
+       EVMCS1_FIELD(GUEST_IA32_PAT, guest_ia32_pat,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
+       EVMCS1_FIELD(GUEST_IA32_EFER, guest_ia32_efer,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
+       EVMCS1_FIELD(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
+       EVMCS1_FIELD(GUEST_PDPTR0, guest_pdptr0,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
+       EVMCS1_FIELD(GUEST_PDPTR1, guest_pdptr1,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
+       EVMCS1_FIELD(GUEST_PDPTR2, guest_pdptr2,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
+       EVMCS1_FIELD(GUEST_PDPTR3, guest_pdptr3,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
+       EVMCS1_FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
+       EVMCS1_FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
+       EVMCS1_FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
+       EVMCS1_FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR),
+       EVMCS1_FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR),
+       EVMCS1_FIELD(CR0_READ_SHADOW, cr0_read_shadow,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR),
+       EVMCS1_FIELD(CR4_READ_SHADOW, cr4_read_shadow,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR),
+       EVMCS1_FIELD(GUEST_CR0, guest_cr0,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR),
+       EVMCS1_FIELD(GUEST_CR3, guest_cr3,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR),
+       EVMCS1_FIELD(GUEST_CR4, guest_cr4,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR),
+       EVMCS1_FIELD(GUEST_DR7, guest_dr7,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR),
+       EVMCS1_FIELD(HOST_FS_BASE, host_fs_base,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER),
+       EVMCS1_FIELD(HOST_GS_BASE, host_gs_base,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER),
+       EVMCS1_FIELD(HOST_TR_BASE, host_tr_base,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER),
+       EVMCS1_FIELD(HOST_GDTR_BASE, host_gdtr_base,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER),
+       EVMCS1_FIELD(HOST_IDTR_BASE, host_idtr_base,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER),
+       EVMCS1_FIELD(HOST_RSP, host_rsp,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER),
+       EVMCS1_FIELD(EPT_POINTER, ept_pointer,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT),
+       EVMCS1_FIELD(GUEST_BNDCFGS, guest_bndcfgs,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
+       EVMCS1_FIELD(XSS_EXIT_BITMAP, xss_exit_bitmap,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2),
+       EVMCS1_FIELD(ENCLS_EXITING_BITMAP, encls_exiting_bitmap,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2),
+       EVMCS1_FIELD(TSC_MULTIPLIER, tsc_multiplier,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2),
+       /*
+        * Not used by KVM:
+        *
+        * EVMCS1_FIELD(0x00006828, guest_ia32_s_cet,
+        *           HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
+        * EVMCS1_FIELD(0x0000682A, guest_ssp,
+        *           HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC),
+        * EVMCS1_FIELD(0x0000682C, guest_ia32_int_ssp_table_addr,
+        *           HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
+        * EVMCS1_FIELD(0x00002816, guest_ia32_lbr_ctl,
+        *           HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
+        * EVMCS1_FIELD(0x00006C18, host_ia32_s_cet,
+        *           HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
+        * EVMCS1_FIELD(0x00006C1A, host_ssp,
+        *           HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
+        * EVMCS1_FIELD(0x00006C1C, host_ia32_int_ssp_table_addr,
+        *           HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
+        */
+
+       /* 64 bit read only */
+       EVMCS1_FIELD(GUEST_PHYSICAL_ADDRESS, guest_physical_address,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
+       EVMCS1_FIELD(EXIT_QUALIFICATION, exit_qualification,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
+       /*
+        * Not defined in KVM:
+        *
+        * EVMCS1_FIELD(0x00006402, exit_io_instruction_ecx,
+        *              HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE);
+        * EVMCS1_FIELD(0x00006404, exit_io_instruction_esi,
+        *              HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE);
+        * EVMCS1_FIELD(0x00006406, exit_io_instruction_esi,
+        *              HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE);
+        * EVMCS1_FIELD(0x00006408, exit_io_instruction_eip,
+        *              HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE);
+        */
+       EVMCS1_FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
+
+       /*
+        * No mask defined in the spec as Hyper-V doesn't currently support
+        * these. Future proof by resetting the whole clean field mask on
+        * access.
+        */
+       EVMCS1_FIELD(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL),
+       EVMCS1_FIELD(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL),
+       EVMCS1_FIELD(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL),
+
+       /* 32 bit rw */
+       EVMCS1_FIELD(TPR_THRESHOLD, tpr_threshold,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
+       EVMCS1_FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC),
+       EVMCS1_FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC),
+       EVMCS1_FIELD(EXCEPTION_BITMAP, exception_bitmap,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EXCPN),
+       EVMCS1_FIELD(VM_ENTRY_CONTROLS, vm_entry_controls,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY),
+       EVMCS1_FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT),
+       EVMCS1_FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE,
+                    vm_entry_exception_error_code,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT),
+       EVMCS1_FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT),
+       EVMCS1_FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
+       EVMCS1_FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1),
+       EVMCS1_FIELD(VM_EXIT_CONTROLS, vm_exit_controls,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1),
+       EVMCS1_FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1),
+       EVMCS1_FIELD(GUEST_ES_LIMIT, guest_es_limit,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
+       EVMCS1_FIELD(GUEST_CS_LIMIT, guest_cs_limit,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
+       EVMCS1_FIELD(GUEST_SS_LIMIT, guest_ss_limit,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
+       EVMCS1_FIELD(GUEST_DS_LIMIT, guest_ds_limit,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
+       EVMCS1_FIELD(GUEST_FS_LIMIT, guest_fs_limit,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
+       EVMCS1_FIELD(GUEST_GS_LIMIT, guest_gs_limit,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
+       EVMCS1_FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
+       EVMCS1_FIELD(GUEST_TR_LIMIT, guest_tr_limit,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
+       EVMCS1_FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
+       EVMCS1_FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
+       EVMCS1_FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
+       EVMCS1_FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
+       EVMCS1_FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
+       EVMCS1_FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
+       EVMCS1_FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
+       EVMCS1_FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
+       EVMCS1_FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
+       EVMCS1_FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
+       EVMCS1_FIELD(GUEST_ACTIVITY_STATE, guest_activity_state,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
+       EVMCS1_FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
+
+       /* 32 bit read only */
+       EVMCS1_FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
+       EVMCS1_FIELD(VM_EXIT_REASON, vm_exit_reason,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
+       EVMCS1_FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
+       EVMCS1_FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
+       EVMCS1_FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
+       EVMCS1_FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
+       EVMCS1_FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
+       EVMCS1_FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
+
+       /* No mask defined in the spec (not used) */
+       EVMCS1_FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL),
+       EVMCS1_FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL),
+       EVMCS1_FIELD(CR3_TARGET_COUNT, cr3_target_count,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL),
+       EVMCS1_FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL),
+       EVMCS1_FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL),
+       EVMCS1_FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL),
+
+       /* 16 bit rw */
+       EVMCS1_FIELD(HOST_ES_SELECTOR, host_es_selector,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
+       EVMCS1_FIELD(HOST_CS_SELECTOR, host_cs_selector,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
+       EVMCS1_FIELD(HOST_SS_SELECTOR, host_ss_selector,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
+       EVMCS1_FIELD(HOST_DS_SELECTOR, host_ds_selector,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
+       EVMCS1_FIELD(HOST_FS_SELECTOR, host_fs_selector,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
+       EVMCS1_FIELD(HOST_GS_SELECTOR, host_gs_selector,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
+       EVMCS1_FIELD(HOST_TR_SELECTOR, host_tr_selector,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
+       EVMCS1_FIELD(GUEST_ES_SELECTOR, guest_es_selector,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
+       EVMCS1_FIELD(GUEST_CS_SELECTOR, guest_cs_selector,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
+       EVMCS1_FIELD(GUEST_SS_SELECTOR, guest_ss_selector,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
+       EVMCS1_FIELD(GUEST_DS_SELECTOR, guest_ds_selector,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
+       EVMCS1_FIELD(GUEST_FS_SELECTOR, guest_fs_selector,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
+       EVMCS1_FIELD(GUEST_GS_SELECTOR, guest_gs_selector,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
+       EVMCS1_FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
+       EVMCS1_FIELD(GUEST_TR_SELECTOR, guest_tr_selector,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2),
+       EVMCS1_FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id,
+                    HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT),
+};
+const unsigned int nr_evmcs_1_fields = ARRAY_SIZE(vmcs_field_to_evmcs_1);
diff --git a/arch/x86/kvm/vmx/hyperv_evmcs.h b/arch/x86/kvm/vmx/hyperv_evmcs.h
new file mode 100644 (file)
index 0000000..a543fcc
--- /dev/null
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This file contains common definitions for working with Enlightened VMCS which
+ * are used both by Hyper-V on KVM and KVM on Hyper-V.
+ */
+#ifndef __KVM_X86_VMX_HYPERV_EVMCS_H
+#define __KVM_X86_VMX_HYPERV_EVMCS_H
+
+#include <asm/hyperv-tlfs.h>
+
+#include "capabilities.h"
+#include "vmcs12.h"
+
+#define KVM_EVMCS_VERSION 1
+
+/*
+ * Enlightened VMCSv1 doesn't support these:
+ *
+ *     POSTED_INTR_NV                  = 0x00000002,
+ *     GUEST_INTR_STATUS               = 0x00000810,
+ *     APIC_ACCESS_ADDR                = 0x00002014,
+ *     POSTED_INTR_DESC_ADDR           = 0x00002016,
+ *     EOI_EXIT_BITMAP0                = 0x0000201c,
+ *     EOI_EXIT_BITMAP1                = 0x0000201e,
+ *     EOI_EXIT_BITMAP2                = 0x00002020,
+ *     EOI_EXIT_BITMAP3                = 0x00002022,
+ *     GUEST_PML_INDEX                 = 0x00000812,
+ *     PML_ADDRESS                     = 0x0000200e,
+ *     VM_FUNCTION_CONTROL             = 0x00002018,
+ *     EPTP_LIST_ADDRESS               = 0x00002024,
+ *     VMREAD_BITMAP                   = 0x00002026,
+ *     VMWRITE_BITMAP                  = 0x00002028,
+ *
+ *     TSC_MULTIPLIER                  = 0x00002032,
+ *     PLE_GAP                         = 0x00004020,
+ *     PLE_WINDOW                      = 0x00004022,
+ *     VMX_PREEMPTION_TIMER_VALUE      = 0x0000482E,
+ *
+ * Currently unsupported in KVM:
+ *     GUEST_IA32_RTIT_CTL             = 0x00002814,
+ */
+#define EVMCS1_SUPPORTED_PINCTRL                                       \
+       (PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |                          \
+        PIN_BASED_EXT_INTR_MASK |                                      \
+        PIN_BASED_NMI_EXITING |                                        \
+        PIN_BASED_VIRTUAL_NMIS)
+
+#define EVMCS1_SUPPORTED_EXEC_CTRL                                     \
+       (CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |                          \
+        CPU_BASED_HLT_EXITING |                                        \
+        CPU_BASED_CR3_LOAD_EXITING |                                   \
+        CPU_BASED_CR3_STORE_EXITING |                                  \
+        CPU_BASED_UNCOND_IO_EXITING |                                  \
+        CPU_BASED_MOV_DR_EXITING |                                     \
+        CPU_BASED_USE_TSC_OFFSETTING |                                 \
+        CPU_BASED_MWAIT_EXITING |                                      \
+        CPU_BASED_MONITOR_EXITING |                                    \
+        CPU_BASED_INVLPG_EXITING |                                     \
+        CPU_BASED_RDPMC_EXITING |                                      \
+        CPU_BASED_INTR_WINDOW_EXITING |                                \
+        CPU_BASED_CR8_LOAD_EXITING |                                   \
+        CPU_BASED_CR8_STORE_EXITING |                                  \
+        CPU_BASED_RDTSC_EXITING |                                      \
+        CPU_BASED_TPR_SHADOW |                                         \
+        CPU_BASED_USE_IO_BITMAPS |                                     \
+        CPU_BASED_MONITOR_TRAP_FLAG |                                  \
+        CPU_BASED_USE_MSR_BITMAPS |                                    \
+        CPU_BASED_NMI_WINDOW_EXITING |                                 \
+        CPU_BASED_PAUSE_EXITING |                                      \
+        CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
+
+#define EVMCS1_SUPPORTED_2NDEXEC                                       \
+       (SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |                        \
+        SECONDARY_EXEC_WBINVD_EXITING |                                \
+        SECONDARY_EXEC_ENABLE_VPID |                                   \
+        SECONDARY_EXEC_ENABLE_EPT |                                    \
+        SECONDARY_EXEC_UNRESTRICTED_GUEST |                            \
+        SECONDARY_EXEC_DESC |                                          \
+        SECONDARY_EXEC_ENABLE_RDTSCP |                                 \
+        SECONDARY_EXEC_ENABLE_INVPCID |                                \
+        SECONDARY_EXEC_ENABLE_XSAVES |                                 \
+        SECONDARY_EXEC_RDSEED_EXITING |                                \
+        SECONDARY_EXEC_RDRAND_EXITING |                                \
+        SECONDARY_EXEC_TSC_SCALING |                                   \
+        SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE |                         \
+        SECONDARY_EXEC_PT_USE_GPA |                                    \
+        SECONDARY_EXEC_PT_CONCEAL_VMX |                                \
+        SECONDARY_EXEC_BUS_LOCK_DETECTION |                            \
+        SECONDARY_EXEC_NOTIFY_VM_EXITING |                             \
+        SECONDARY_EXEC_ENCLS_EXITING)
+
+#define EVMCS1_SUPPORTED_3RDEXEC (0ULL)
+
+#define EVMCS1_SUPPORTED_VMEXIT_CTRL                                   \
+       (VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |                            \
+        VM_EXIT_SAVE_DEBUG_CONTROLS |                                  \
+        VM_EXIT_ACK_INTR_ON_EXIT |                                     \
+        VM_EXIT_HOST_ADDR_SPACE_SIZE |                                 \
+        VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |                           \
+        VM_EXIT_SAVE_IA32_PAT |                                        \
+        VM_EXIT_LOAD_IA32_PAT |                                        \
+        VM_EXIT_SAVE_IA32_EFER |                                       \
+        VM_EXIT_LOAD_IA32_EFER |                                       \
+        VM_EXIT_CLEAR_BNDCFGS |                                        \
+        VM_EXIT_PT_CONCEAL_PIP |                                       \
+        VM_EXIT_CLEAR_IA32_RTIT_CTL)
+
+#define EVMCS1_SUPPORTED_VMENTRY_CTRL                                  \
+       (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR |                           \
+        VM_ENTRY_LOAD_DEBUG_CONTROLS |                                 \
+        VM_ENTRY_IA32E_MODE |                                          \
+        VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL |                          \
+        VM_ENTRY_LOAD_IA32_PAT |                                       \
+        VM_ENTRY_LOAD_IA32_EFER |                                      \
+        VM_ENTRY_LOAD_BNDCFGS |                                        \
+        VM_ENTRY_PT_CONCEAL_PIP |                                      \
+        VM_ENTRY_LOAD_IA32_RTIT_CTL)
+
+#define EVMCS1_SUPPORTED_VMFUNC (0)
+
+struct evmcs_field {
+       u16 offset;
+       u16 clean_field;
+};
+
+extern const struct evmcs_field vmcs_field_to_evmcs_1[];
+extern const unsigned int nr_evmcs_1_fields;
+
+static __always_inline int evmcs_field_offset(unsigned long field,
+                                             u16 *clean_field)
+{
+       const struct evmcs_field *evmcs_field;
+       unsigned int index = ROL16(field, 6);
+
+       if (unlikely(index >= nr_evmcs_1_fields))
+               return -ENOENT;
+
+       evmcs_field = &vmcs_field_to_evmcs_1[index];
+
+       /*
+        * Use offset=0 to detect holes in eVMCS. This offset belongs to
+        * 'revision_id' but this field has no encoding and is supposed to
+        * be accessed directly.
+        */
+       if (unlikely(!evmcs_field->offset))
+               return -ENOENT;
+
+       if (clean_field)
+               *clean_field = evmcs_field->clean_field;
+
+       return evmcs_field->offset;
+}
+
+static inline u64 evmcs_read_any(struct hv_enlightened_vmcs *evmcs,
+                                unsigned long field, u16 offset)
+{
+       /*
+        * vmcs12_read_any() doesn't care whether the supplied structure
+        * is 'struct vmcs12' or 'struct hv_enlightened_vmcs' as it takes
+        * the exact offset of the required field, use it for convenience
+        * here.
+        */
+       return vmcs12_read_any((void *)evmcs, field, offset);
+}
+
+#endif /* __KVM_X86_VMX_HYPERV_H */
index 65826fe23f3385f63075b6f551b832602ed7e3d4..6329a306856b28972ca32af5f708bb9408c60896 100644 (file)
@@ -179,7 +179,7 @@ static int nested_vmx_failValid(struct kvm_vcpu *vcpu,
         * VM_INSTRUCTION_ERROR is not shadowed. Enlightened VMCS 'shadows' all
         * fields and thus must be synced.
         */
-       if (to_vmx(vcpu)->nested.hv_evmcs_vmptr != EVMPTR_INVALID)
+       if (nested_vmx_is_evmptr12_set(to_vmx(vcpu)))
                to_vmx(vcpu)->nested.need_vmcs12_to_shadow_sync = true;
 
        return kvm_skip_emulated_instruction(vcpu);
@@ -194,7 +194,7 @@ static int nested_vmx_fail(struct kvm_vcpu *vcpu, u32 vm_instruction_error)
         * can't be done if there isn't a current VMCS.
         */
        if (vmx->nested.current_vmptr == INVALID_GPA &&
-           !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
+           !nested_vmx_is_evmptr12_valid(vmx))
                return nested_vmx_failInvalid(vcpu);
 
        return nested_vmx_failValid(vcpu, vm_instruction_error);
@@ -226,10 +226,11 @@ static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
 
 static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
 {
+#ifdef CONFIG_KVM_HYPERV
        struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
-       if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
+       if (nested_vmx_is_evmptr12_valid(vmx)) {
                kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true);
                vmx->nested.hv_evmcs = NULL;
        }
@@ -241,6 +242,34 @@ static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
                hv_vcpu->nested.vm_id = 0;
                hv_vcpu->nested.vp_id = 0;
        }
+#endif
+}
+
+static bool nested_evmcs_handle_vmclear(struct kvm_vcpu *vcpu, gpa_t vmptr)
+{
+#ifdef CONFIG_KVM_HYPERV
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+       /*
+        * When Enlightened VMEntry is enabled on the calling CPU we treat
+        * memory area pointer by vmptr as Enlightened VMCS (as there's no good
+        * way to distinguish it from VMCS12) and we must not corrupt it by
+        * writing to the non-existent 'launch_state' field. The area doesn't
+        * have to be the currently active EVMCS on the calling CPU and there's
+        * nothing KVM has to do to transition it from 'active' to 'non-active'
+        * state. It is possible that the area will stay mapped as
+        * vmx->nested.hv_evmcs but this shouldn't be a problem.
+        */
+       if (!guest_cpuid_has_evmcs(vcpu) ||
+           !evmptr_is_valid(nested_get_evmptr(vcpu)))
+               return false;
+
+       if (nested_vmx_evmcs(vmx) && vmptr == vmx->nested.hv_evmcs_vmptr)
+               nested_release_evmcs(vcpu);
+
+       return true;
+#else
+       return false;
+#endif
 }
 
 static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx,
@@ -572,7 +601,6 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
        int msr;
        unsigned long *msr_bitmap_l1;
        unsigned long *msr_bitmap_l0 = vmx->nested.vmcs02.msr_bitmap;
-       struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
        struct kvm_host_map *map = &vmx->nested.msr_bitmap_map;
 
        /* Nothing to do if the MSR bitmap is not in use.  */
@@ -588,10 +616,13 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
         * - Nested hypervisor (L1) has enabled 'Enlightened MSR Bitmap' feature
         *   and tells KVM (L0) there were no changes in MSR bitmap for L2.
         */
-       if (!vmx->nested.force_msr_bitmap_recalc && evmcs &&
-           evmcs->hv_enlightenments_control.msr_bitmap &&
-           evmcs->hv_clean_fields & HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP)
-               return true;
+       if (!vmx->nested.force_msr_bitmap_recalc) {
+               struct hv_enlightened_vmcs *evmcs = nested_vmx_evmcs(vmx);
+
+               if (evmcs && evmcs->hv_enlightenments_control.msr_bitmap &&
+                   evmcs->hv_clean_fields & HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP)
+                       return true;
+       }
 
        if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->msr_bitmap), map))
                return false;
@@ -1085,7 +1116,7 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
                               bool nested_ept, bool reload_pdptrs,
                               enum vm_entry_failure_code *entry_failure_code)
 {
-       if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3))) {
+       if (CC(!kvm_vcpu_is_legal_cr3(vcpu, cr3))) {
                *entry_failure_code = ENTRY_FAIL_DEFAULT;
                return -EINVAL;
        }
@@ -1139,14 +1170,8 @@ static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu,
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
-       /*
-        * KVM_REQ_HV_TLB_FLUSH flushes entries from either L1's VP_ID or
-        * L2's VP_ID upon request from the guest. Make sure we check for
-        * pending entries in the right FIFO upon L1/L2 transition as these
-        * requests are put by other vCPUs asynchronously.
-        */
-       if (to_hv_vcpu(vcpu) && enable_ept)
-               kvm_make_request(KVM_REQ_HV_TLB_FLUSH, vcpu);
+       /* Handle pending Hyper-V TLB flush requests */
+       kvm_hv_nested_transtion_tlb_flush(vcpu, enable_ept);
 
        /*
         * If vmcs12 doesn't use VPID, L1 expects linear and combined mappings
@@ -1578,8 +1603,9 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
 
 static void copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx, u32 hv_clean_fields)
 {
+#ifdef CONFIG_KVM_HYPERV
        struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
-       struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
+       struct hv_enlightened_vmcs *evmcs = nested_vmx_evmcs(vmx);
        struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(&vmx->vcpu);
 
        /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */
@@ -1818,12 +1844,16 @@ static void copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx, u32 hv_clean_fields
         */
 
        return;
+#else /* CONFIG_KVM_HYPERV */
+       KVM_BUG_ON(1, vmx->vcpu.kvm);
+#endif /* CONFIG_KVM_HYPERV */
 }
 
 static void copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
 {
+#ifdef CONFIG_KVM_HYPERV
        struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
-       struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
+       struct hv_enlightened_vmcs *evmcs = nested_vmx_evmcs(vmx);
 
        /*
         * Should not be changed by KVM:
@@ -1992,6 +2022,9 @@ static void copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
        evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs;
 
        return;
+#else /* CONFIG_KVM_HYPERV */
+       KVM_BUG_ON(1, vmx->vcpu.kvm);
+#endif /* CONFIG_KVM_HYPERV */
 }
 
 /*
@@ -2001,6 +2034,7 @@ static void copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
 static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld(
        struct kvm_vcpu *vcpu, bool from_launch)
 {
+#ifdef CONFIG_KVM_HYPERV
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        bool evmcs_gpa_changed = false;
        u64 evmcs_gpa;
@@ -2082,13 +2116,16 @@ static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld(
        }
 
        return EVMPTRLD_SUCCEEDED;
+#else
+       return EVMPTRLD_DISABLED;
+#endif
 }
 
 void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
-       if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
+       if (nested_vmx_is_evmptr12_valid(vmx))
                copy_vmcs12_to_enlightened(vmx);
        else
                copy_vmcs12_to_shadow(vmx);
@@ -2242,7 +2279,7 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs0
        u32 exec_control;
        u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12);
 
-       if (vmx->nested.dirty_vmcs12 || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
+       if (vmx->nested.dirty_vmcs12 || nested_vmx_is_evmptr12_valid(vmx))
                prepare_vmcs02_early_rare(vmx, vmcs12);
 
        /*
@@ -2403,7 +2440,7 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs0
 
 static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
 {
-       struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs;
+       struct hv_enlightened_vmcs *hv_evmcs = nested_vmx_evmcs(vmx);
 
        if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
                           HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) {
@@ -2535,15 +2572,15 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
                          enum vm_entry_failure_code *entry_failure_code)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
+       struct hv_enlightened_vmcs *evmcs = nested_vmx_evmcs(vmx);
        bool load_guest_pdptrs_vmcs12 = false;
 
-       if (vmx->nested.dirty_vmcs12 || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
+       if (vmx->nested.dirty_vmcs12 || nested_vmx_is_evmptr12_valid(vmx)) {
                prepare_vmcs02_rare(vmx, vmcs12);
                vmx->nested.dirty_vmcs12 = false;
 
-               load_guest_pdptrs_vmcs12 = !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr) ||
-                       !(vmx->nested.hv_evmcs->hv_clean_fields &
-                         HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1);
+               load_guest_pdptrs_vmcs12 = !nested_vmx_is_evmptr12_valid(vmx) ||
+                       !(evmcs->hv_clean_fields & HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1);
        }
 
        if (vmx->nested.nested_run_pending &&
@@ -2664,9 +2701,8 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
         * bits when it changes a field in eVMCS. Mark all fields as clean
         * here.
         */
-       if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
-               vmx->nested.hv_evmcs->hv_clean_fields |=
-                       HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
+       if (nested_vmx_is_evmptr12_valid(vmx))
+               evmcs->hv_clean_fields |= HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
 
        return 0;
 }
@@ -2717,7 +2753,7 @@ static bool nested_vmx_check_eptp(struct kvm_vcpu *vcpu, u64 new_eptp)
        }
 
        /* Reserved bits should not be set */
-       if (CC(kvm_vcpu_is_illegal_gpa(vcpu, new_eptp) || ((new_eptp >> 7) & 0x1f)))
+       if (CC(!kvm_vcpu_is_legal_gpa(vcpu, new_eptp) || ((new_eptp >> 7) & 0x1f)))
                return false;
 
        /* AD, if set, should be supported */
@@ -2888,8 +2924,10 @@ static int nested_vmx_check_controls(struct kvm_vcpu *vcpu,
            nested_check_vm_entry_controls(vcpu, vmcs12))
                return -EINVAL;
 
+#ifdef CONFIG_KVM_HYPERV
        if (guest_cpuid_has_evmcs(vcpu))
                return nested_evmcs_check_controls(vmcs12);
+#endif
 
        return 0;
 }
@@ -2912,7 +2950,7 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
 
        if (CC(!nested_host_cr0_valid(vcpu, vmcs12->host_cr0)) ||
            CC(!nested_host_cr4_valid(vcpu, vmcs12->host_cr4)) ||
-           CC(kvm_vcpu_is_illegal_gpa(vcpu, vmcs12->host_cr3)))
+           CC(!kvm_vcpu_is_legal_cr3(vcpu, vmcs12->host_cr3)))
                return -EINVAL;
 
        if (CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu)) ||
@@ -3161,6 +3199,7 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
        return 0;
 }
 
+#ifdef CONFIG_KVM_HYPERV
 static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -3188,6 +3227,7 @@ static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu)
 
        return true;
 }
+#endif
 
 static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
 {
@@ -3279,6 +3319,7 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
 
 static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu)
 {
+#ifdef CONFIG_KVM_HYPERV
        /*
         * Note: nested_get_evmcs_page() also updates 'vp_assist_page' copy
         * in 'struct kvm_vcpu_hv' in case eVMCS is in use, this is mandatory
@@ -3295,6 +3336,7 @@ static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu)
 
                return false;
        }
+#endif
 
        if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu))
                return false;
@@ -3538,7 +3580,7 @@ vmentry_fail_vmexit:
 
        load_vmcs12_host_state(vcpu, vmcs12);
        vmcs12->vm_exit_reason = exit_reason.full;
-       if (enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
+       if (enable_shadow_vmcs || nested_vmx_is_evmptr12_valid(vmx))
                vmx->nested.need_vmcs12_to_shadow_sync = true;
        return NVMX_VMENTRY_VMEXIT;
 }
@@ -3569,7 +3611,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
        if (CC(evmptrld_status == EVMPTRLD_VMFAIL))
                return nested_vmx_failInvalid(vcpu);
 
-       if (CC(!evmptr_is_valid(vmx->nested.hv_evmcs_vmptr) &&
+       if (CC(!nested_vmx_is_evmptr12_valid(vmx) &&
               vmx->nested.current_vmptr == INVALID_GPA))
                return nested_vmx_failInvalid(vcpu);
 
@@ -3584,8 +3626,10 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
        if (CC(vmcs12->hdr.shadow_vmcs))
                return nested_vmx_failInvalid(vcpu);
 
-       if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
-               copy_enlightened_to_vmcs12(vmx, vmx->nested.hv_evmcs->hv_clean_fields);
+       if (nested_vmx_is_evmptr12_valid(vmx)) {
+               struct hv_enlightened_vmcs *evmcs = nested_vmx_evmcs(vmx);
+
+               copy_enlightened_to_vmcs12(vmx, evmcs->hv_clean_fields);
                /* Enlightened VMCS doesn't have launch state */
                vmcs12->launch_state = !launch;
        } else if (enable_shadow_vmcs) {
@@ -4329,11 +4373,11 @@ static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
-       if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
+       if (nested_vmx_is_evmptr12_valid(vmx))
                sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
 
        vmx->nested.need_sync_vmcs02_to_vmcs12_rare =
-               !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr);
+               !nested_vmx_is_evmptr12_valid(vmx);
 
        vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
        vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
@@ -4732,6 +4776,7 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
        /* trying to cancel vmlaunch/vmresume is a bug */
        WARN_ON_ONCE(vmx->nested.nested_run_pending);
 
+#ifdef CONFIG_KVM_HYPERV
        if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
                /*
                 * KVM_REQ_GET_NESTED_STATE_PAGES is also used to map
@@ -4741,6 +4786,7 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
                 */
                (void)nested_get_evmcs_page(vcpu);
        }
+#endif
 
        /* Service pending TLB flush requests for L2 before switching to L1. */
        kvm_service_local_tlb_flush_requests(vcpu);
@@ -4854,7 +4900,7 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
        }
 
        if ((vm_exit_reason != -1) &&
-           (enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)))
+           (enable_shadow_vmcs || nested_vmx_is_evmptr12_valid(vmx)))
                vmx->nested.need_vmcs12_to_shadow_sync = true;
 
        /* in case we halted in L2 */
@@ -4980,6 +5026,7 @@ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
                else
                        *ret = off;
 
+               *ret = vmx_get_untagged_addr(vcpu, *ret, 0);
                /* Long mode: #GP(0)/#SS(0) if the memory address is in a
                 * non-canonical form. This is the only check on the memory
                 * destination for long mode!
@@ -5292,18 +5339,7 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
        if (vmptr == vmx->nested.vmxon_ptr)
                return nested_vmx_fail(vcpu, VMXERR_VMCLEAR_VMXON_POINTER);
 
-       /*
-        * When Enlightened VMEntry is enabled on the calling CPU we treat
-        * memory area pointer by vmptr as Enlightened VMCS (as there's no good
-        * way to distinguish it from VMCS12) and we must not corrupt it by
-        * writing to the non-existent 'launch_state' field. The area doesn't
-        * have to be the currently active EVMCS on the calling CPU and there's
-        * nothing KVM has to do to transition it from 'active' to 'non-active'
-        * state. It is possible that the area will stay mapped as
-        * vmx->nested.hv_evmcs but this shouldn't be a problem.
-        */
-       if (likely(!guest_cpuid_has_evmcs(vcpu) ||
-                  !evmptr_is_valid(nested_get_evmptr(vcpu)))) {
+       if (likely(!nested_evmcs_handle_vmclear(vcpu, vmptr))) {
                if (vmptr == vmx->nested.current_vmptr)
                        nested_release_vmcs12(vcpu);
 
@@ -5320,8 +5356,6 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
                                           vmptr + offsetof(struct vmcs12,
                                                            launch_state),
                                           &zero, sizeof(zero));
-       } else if (vmx->nested.hv_evmcs && vmptr == vmx->nested.hv_evmcs_vmptr) {
-               nested_release_evmcs(vcpu);
        }
 
        return nested_vmx_succeed(vcpu);
@@ -5360,7 +5394,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
        /* Decode instruction info and find the field to read */
        field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf));
 
-       if (!evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
+       if (!nested_vmx_is_evmptr12_valid(vmx)) {
                /*
                 * In VMX non-root operation, when the VMCS-link pointer is INVALID_GPA,
                 * any VMREAD sets the ALU flags for VMfailInvalid.
@@ -5398,7 +5432,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
                        return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
 
                /* Read the field, zero-extended to a u64 value */
-               value = evmcs_read_any(vmx->nested.hv_evmcs, field, offset);
+               value = evmcs_read_any(nested_vmx_evmcs(vmx), field, offset);
        }
 
        /*
@@ -5586,7 +5620,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
                return nested_vmx_fail(vcpu, VMXERR_VMPTRLD_VMXON_POINTER);
 
        /* Forbid normal VMPTRLD if Enlightened version was used */
-       if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
+       if (nested_vmx_is_evmptr12_valid(vmx))
                return 1;
 
        if (vmx->nested.current_vmptr != vmptr) {
@@ -5649,7 +5683,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
        if (!nested_vmx_check_permission(vcpu))
                return 1;
 
-       if (unlikely(evmptr_is_valid(to_vmx(vcpu)->nested.hv_evmcs_vmptr)))
+       if (unlikely(nested_vmx_is_evmptr12_valid(to_vmx(vcpu))))
                return 1;
 
        if (get_vmx_mem_address(vcpu, exit_qual, instr_info,
@@ -5797,6 +5831,10 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
        vpid02 = nested_get_vpid02(vcpu);
        switch (type) {
        case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
+               /*
+                * LAM doesn't apply to addresses that are inputs to TLB
+                * invalidation.
+                */
                if (!operand.vpid ||
                    is_noncanonical_address(operand.gla, vcpu))
                        return nested_vmx_fail(vcpu,
@@ -6208,11 +6246,13 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
                 * Handle L2's bus locks in L0 directly.
                 */
                return true;
+#ifdef CONFIG_KVM_HYPERV
        case EXIT_REASON_VMCALL:
                /* Hyper-V L2 TLB flush hypercall is handled by L0 */
                return guest_hv_cpuid_has_l2_tlb_flush(vcpu) &&
                        nested_evmcs_l2_tlb_flush_enabled(vcpu) &&
                        kvm_hv_is_tlb_flush_hcall(vcpu);
+#endif
        default:
                break;
        }
@@ -6435,7 +6475,7 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
                        kvm_state.size += sizeof(user_vmx_nested_state->vmcs12);
 
                        /* 'hv_evmcs_vmptr' can also be EVMPTR_MAP_PENDING here */
-                       if (vmx->nested.hv_evmcs_vmptr != EVMPTR_INVALID)
+                       if (nested_vmx_is_evmptr12_set(vmx))
                                kvm_state.flags |= KVM_STATE_NESTED_EVMCS;
 
                        if (is_guest_mode(vcpu) &&
@@ -6491,7 +6531,7 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
        } else  {
                copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu));
                if (!vmx->nested.need_vmcs12_to_shadow_sync) {
-                       if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
+                       if (nested_vmx_is_evmptr12_valid(vmx))
                                /*
                                 * L1 hypervisor is not obliged to keep eVMCS
                                 * clean fields data always up-to-date while
@@ -6632,6 +6672,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
                        return -EINVAL;
 
                set_current_vmptr(vmx, kvm_state->hdr.vmx.vmcs12_pa);
+#ifdef CONFIG_KVM_HYPERV
        } else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) {
                /*
                 * nested_vmx_handle_enlightened_vmptrld() cannot be called
@@ -6641,6 +6682,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
                 */
                vmx->nested.hv_evmcs_vmptr = EVMPTR_MAP_PENDING;
                kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
+#endif
        } else {
                return -EINVAL;
        }
@@ -7096,7 +7138,9 @@ struct kvm_x86_nested_ops vmx_nested_ops = {
        .set_state = vmx_set_nested_state,
        .get_nested_state_pages = vmx_get_nested_state_pages,
        .write_log_dirty = nested_vmx_write_pml_buffer,
+#ifdef CONFIG_KVM_HYPERV
        .enable_evmcs = nested_enable_evmcs,
        .get_evmcs_version = nested_get_evmcs_version,
        .hv_inject_synthetic_vmexit_post_tlb_flush = vmx_hv_inject_synthetic_vmexit_post_tlb_flush,
+#endif
 };
index b4b9d51438c6b3e263ce80722ae69eadc26f1fb9..cce4e2aa30fbf8b98b114a069ef8445cb2b9aaf4 100644 (file)
@@ -3,6 +3,7 @@
 #define __KVM_X86_VMX_NESTED_H
 
 #include "kvm_cache_regs.h"
+#include "hyperv.h"
 #include "vmcs12.h"
 #include "vmx.h"
 
@@ -57,7 +58,7 @@ static inline int vmx_has_valid_vmcs12(struct kvm_vcpu *vcpu)
 
        /* 'hv_evmcs_vmptr' can also be EVMPTR_MAP_PENDING here */
        return vmx->nested.current_vmptr != -1ull ||
-               vmx->nested.hv_evmcs_vmptr != EVMPTR_INVALID;
+               nested_vmx_is_evmptr12_set(vmx);
 }
 
 static inline u16 nested_get_vpid02(struct kvm_vcpu *vcpu)
index 820d3e1f6b4f825fc653e745a4bb839ca2f57416..a6216c8747291f4c8aeed534117fad1f3808acb8 100644 (file)
@@ -437,11 +437,9 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                            !(msr & MSR_PMC_FULL_WIDTH_BIT))
                                data = (s64)(s32)data;
                        pmc_write_counter(pmc, data);
-                       pmc_update_sample_period(pmc);
                        break;
                } else if ((pmc = get_fixed_pmc(pmu, msr))) {
                        pmc_write_counter(pmc, data);
-                       pmc_update_sample_period(pmc);
                        break;
                } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
                        reserved_bits = pmu->reserved_bits;
@@ -632,26 +630,6 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu)
 
 static void intel_pmu_reset(struct kvm_vcpu *vcpu)
 {
-       struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
-       struct kvm_pmc *pmc = NULL;
-       int i;
-
-       for (i = 0; i < KVM_INTEL_PMC_MAX_GENERIC; i++) {
-               pmc = &pmu->gp_counters[i];
-
-               pmc_stop_counter(pmc);
-               pmc->counter = pmc->prev_counter = pmc->eventsel = 0;
-       }
-
-       for (i = 0; i < KVM_PMC_MAX_FIXED; i++) {
-               pmc = &pmu->fixed_counters[i];
-
-               pmc_stop_counter(pmc);
-               pmc->counter = pmc->prev_counter = 0;
-       }
-
-       pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = 0;
-
        intel_pmu_release_guest_lbr_event(vcpu);
 }
 
index 3e822e58249753c5ff267b5ce4f69e2ede9a00c9..6fef01e0536e5079c34e6efc6b64716119c10c4e 100644 (file)
@@ -37,6 +37,7 @@ static int sgx_get_encls_gva(struct kvm_vcpu *vcpu, unsigned long offset,
        if (!IS_ALIGNED(*gva, alignment)) {
                fault = true;
        } else if (likely(is_64_bit_mode(vcpu))) {
+               *gva = vmx_get_untagged_addr(vcpu, *gva, 0);
                fault = is_noncanonical_address(*gva, vcpu);
        } else {
                *gva &= 0xffffffff;
index be275a0410a89957e79ffe57747e50ed36876e2f..906ecd001511355d0939e4e90a3994a7bd9809e3 100644 (file)
@@ -289,7 +289,7 @@ SYM_INNER_LABEL_ALIGN(vmx_vmexit, SYM_L_GLOBAL)
        RET
 
 .Lfixup:
-       cmpb $0, kvm_rebooting
+       cmpb $0, _ASM_RIP(kvm_rebooting)
        jne .Lvmfail
        ud2
 .Lvmfail:
index e0f86f11c345416d85951b16edb24cb586b8efdc..1111d9d089038b2f17b372891a235222b74f87bf 100644 (file)
@@ -66,6 +66,7 @@
 #include "vmx.h"
 #include "x86.h"
 #include "smm.h"
+#include "vmx_onhyperv.h"
 
 MODULE_AUTHOR("Qumranet");
 MODULE_LICENSE("GPL");
@@ -523,22 +524,14 @@ module_param(enlightened_vmcs, bool, 0444);
 static int hv_enable_l2_tlb_flush(struct kvm_vcpu *vcpu)
 {
        struct hv_enlightened_vmcs *evmcs;
-       struct hv_partition_assist_pg **p_hv_pa_pg =
-                       &to_kvm_hv(vcpu->kvm)->hv_pa_pg;
-       /*
-        * Synthetic VM-Exit is not enabled in current code and so All
-        * evmcs in singe VM shares same assist page.
-        */
-       if (!*p_hv_pa_pg)
-               *p_hv_pa_pg = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT);
+       hpa_t partition_assist_page = hv_get_partition_assist_page(vcpu);
 
-       if (!*p_hv_pa_pg)
+       if (partition_assist_page == INVALID_PAGE)
                return -ENOMEM;
 
        evmcs = (struct hv_enlightened_vmcs *)to_vmx(vcpu)->loaded_vmcs->vmcs;
 
-       evmcs->partition_assist_page =
-               __pa(*p_hv_pa_pg);
+       evmcs->partition_assist_page = partition_assist_page;
        evmcs->hv_vm_id = (unsigned long)vcpu->kvm;
        evmcs->hv_enlightenments_control.nested_flush_hypercall = 1;
 
@@ -745,7 +738,7 @@ static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx,
  */
 static int kvm_cpu_vmxoff(void)
 {
-       asm_volatile_goto("1: vmxoff\n\t"
+       asm goto("1: vmxoff\n\t"
                          _ASM_EXTABLE(1b, %l[fault])
                          ::: "cc", "memory" : fault);
 
@@ -2055,6 +2048,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                if (vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index,
                                    &msr_info->data))
                        return 1;
+#ifdef CONFIG_KVM_HYPERV
                /*
                 * Enlightened VMCS v1 doesn't have certain VMCS fields but
                 * instead of just ignoring the features, different Hyper-V
@@ -2065,6 +2059,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                if (!msr_info->host_initiated && guest_cpuid_has_evmcs(vcpu))
                        nested_evmcs_filter_control_msr(vcpu, msr_info->index,
                                                        &msr_info->data);
+#endif
                break;
        case MSR_IA32_RTIT_CTL:
                if (!vmx_pt_mode_is_host_guest())
@@ -2789,7 +2784,7 @@ static int kvm_cpu_vmxon(u64 vmxon_pointer)
 
        cr4_set_bits(X86_CR4_VMXE);
 
-       asm_volatile_goto("1: vmxon %[vmxon_pointer]\n\t"
+       asm goto("1: vmxon %[vmxon_pointer]\n\t"
                          _ASM_EXTABLE(1b, %l[fault])
                          : : [vmxon_pointer] "m"(vmxon_pointer)
                          : : fault);
@@ -3400,7 +3395,8 @@ static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa,
                        update_guest_cr3 = false;
                vmx_ept_load_pdptrs(vcpu);
        } else {
-               guest_cr3 = root_hpa | kvm_get_active_pcid(vcpu);
+               guest_cr3 = root_hpa | kvm_get_active_pcid(vcpu) |
+                           kvm_get_active_cr3_lam_bits(vcpu);
        }
 
        if (update_guest_cr3)
@@ -4833,7 +4829,10 @@ static void __vmx_vcpu_reset(struct kvm_vcpu *vcpu)
        vmx->nested.posted_intr_nv = -1;
        vmx->nested.vmxon_ptr = INVALID_GPA;
        vmx->nested.current_vmptr = INVALID_GPA;
+
+#ifdef CONFIG_KVM_HYPERV
        vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID;
+#endif
 
        vcpu->arch.microcode_version = 0x100000000ULL;
        vmx->msr_ia32_feature_control_valid_bits = FEAT_CTL_LOCKED;
@@ -5782,7 +5781,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
         * would also use advanced VM-exit information for EPT violations to
         * reconstruct the page fault error code.
         */
-       if (unlikely(allow_smaller_maxphyaddr && kvm_vcpu_is_illegal_gpa(vcpu, gpa)))
+       if (unlikely(allow_smaller_maxphyaddr && !kvm_vcpu_is_legal_gpa(vcpu, gpa)))
                return kvm_emulate_instruction(vcpu, 0);
 
        return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
@@ -6757,10 +6756,10 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu)
                return;
 
        /*
-        * Grab the memslot so that the hva lookup for the mmu_notifier retry
-        * is guaranteed to use the same memslot as the pfn lookup, i.e. rely
-        * on the pfn lookup's validation of the memslot to ensure a valid hva
-        * is used for the retry check.
+        * Explicitly grab the memslot using KVM's internal slot ID to ensure
+        * KVM doesn't unintentionally grab a userspace memslot.  It _should_
+        * be impossible for userspace to create a memslot for the APIC when
+        * APICv is enabled, but paranoia won't hurt in this case.
         */
        slot = id_to_memslot(slots, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT);
        if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
@@ -6785,8 +6784,7 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu)
                return;
 
        read_lock(&vcpu->kvm->mmu_lock);
-       if (mmu_invalidate_retry_hva(kvm, mmu_seq,
-                                    gfn_to_hva_memslot(slot, gfn))) {
+       if (mmu_invalidate_retry_gfn(kvm, mmu_seq, gfn)) {
                kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
                read_unlock(&vcpu->kvm->mmu_lock);
                goto out;
@@ -7674,6 +7672,9 @@ static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
        cr4_fixed1_update(X86_CR4_UMIP,       ecx, feature_bit(UMIP));
        cr4_fixed1_update(X86_CR4_LA57,       ecx, feature_bit(LA57));
 
+       entry = kvm_find_cpuid_entry_index(vcpu, 0x7, 1);
+       cr4_fixed1_update(X86_CR4_LAM_SUP,    eax, feature_bit(LAM));
+
 #undef cr4_fixed1_update
 }
 
@@ -7760,6 +7761,7 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
                kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_XSAVES);
 
        kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_VMX);
+       kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_LAM);
 
        vmx_setup_uret_msrs(vmx);
 
@@ -8206,6 +8208,50 @@ static void vmx_vm_destroy(struct kvm *kvm)
        free_pages((unsigned long)kvm_vmx->pid_table, vmx_get_pid_table_order(kvm));
 }
 
+/*
+ * Note, the SDM states that the linear address is masked *after* the modified
+ * canonicality check, whereas KVM masks (untags) the address and then performs
+ * a "normal" canonicality check.  Functionally, the two methods are identical,
+ * and when the masking occurs relative to the canonicality check isn't visible
+ * to software, i.e. KVM's behavior doesn't violate the SDM.
+ */
+gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags)
+{
+       int lam_bit;
+       unsigned long cr3_bits;
+
+       if (flags & (X86EMUL_F_FETCH | X86EMUL_F_IMPLICIT | X86EMUL_F_INVLPG))
+               return gva;
+
+       if (!is_64_bit_mode(vcpu))
+               return gva;
+
+       /*
+        * Bit 63 determines if the address should be treated as user address
+        * or a supervisor address.
+        */
+       if (!(gva & BIT_ULL(63))) {
+               cr3_bits = kvm_get_active_cr3_lam_bits(vcpu);
+               if (!(cr3_bits & (X86_CR3_LAM_U57 | X86_CR3_LAM_U48)))
+                       return gva;
+
+               /* LAM_U48 is ignored if LAM_U57 is set. */
+               lam_bit = cr3_bits & X86_CR3_LAM_U57 ? 56 : 47;
+       } else {
+               if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_LAM_SUP))
+                       return gva;
+
+               lam_bit = kvm_is_cr4_bit_set(vcpu, X86_CR4_LA57) ? 56 : 47;
+       }
+
+       /*
+        * Untag the address by sign-extending the lam_bit, but NOT to bit 63.
+        * Bit 63 is retained from the raw virtual address so that untagging
+        * doesn't change a user access to a supervisor access, and vice versa.
+        */
+       return (sign_extend64(gva, lam_bit) & ~BIT_ULL(63)) | (gva & BIT_ULL(63));
+}
+
 static struct kvm_x86_ops vmx_x86_ops __initdata = {
        .name = KBUILD_MODNAME,
 
@@ -8346,6 +8392,8 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
        .complete_emulated_msr = kvm_complete_insn_gp,
 
        .vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
+
+       .get_untagged_addr = vmx_get_untagged_addr,
 };
 
 static unsigned int vmx_handle_intel_pt_intr(void)
index c2130d2c8e24bb5ff3a529a4bde67f875376adda..e3b0985bb74a1f4d57be41cbb0d283abbc476625 100644 (file)
@@ -241,9 +241,11 @@ struct nested_vmx {
                bool guest_mode;
        } smm;
 
+#ifdef CONFIG_KVM_HYPERV
        gpa_t hv_evmcs_vmptr;
        struct kvm_host_map hv_evmcs_map;
        struct hv_enlightened_vmcs *hv_evmcs;
+#endif
 };
 
 struct vcpu_vmx {
@@ -420,6 +422,8 @@ void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
 u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
 u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
 
+gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags);
+
 static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
                                             int type, bool value)
 {
@@ -745,14 +749,4 @@ static inline bool vmx_can_use_ipiv(struct kvm_vcpu *vcpu)
        return  lapic_in_kernel(vcpu) && enable_ipiv;
 }
 
-static inline bool guest_cpuid_has_evmcs(struct kvm_vcpu *vcpu)
-{
-       /*
-        * eVMCS is exposed to the guest if Hyper-V is enabled in CPUID and
-        * eVMCS has been explicitly enabled by userspace.
-        */
-       return vcpu->arch.hyperv_enabled &&
-              to_vmx(vcpu)->nested.enlightened_vmcs_enabled;
-}
-
 #endif /* __KVM_X86_VMX_H */
diff --git a/arch/x86/kvm/vmx/vmx_onhyperv.c b/arch/x86/kvm/vmx/vmx_onhyperv.c
new file mode 100644 (file)
index 0000000..b9a8b91
--- /dev/null
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include "capabilities.h"
+#include "vmx_onhyperv.h"
+
+DEFINE_STATIC_KEY_FALSE(__kvm_is_using_evmcs);
+
+/*
+ * KVM on Hyper-V always uses the latest known eVMCSv1 revision, the assumption
+ * is: in case a feature has corresponding fields in eVMCS described and it was
+ * exposed in VMX feature MSRs, KVM is free to use it. Warn if KVM meets a
+ * feature which has no corresponding eVMCS field, this likely means that KVM
+ * needs to be updated.
+ */
+#define evmcs_check_vmcs_conf(field, ctrl)                                     \
+       do {                                                                    \
+               typeof(vmcs_conf->field) unsupported;                           \
+                                                                               \
+               unsupported = vmcs_conf->field & ~EVMCS1_SUPPORTED_ ## ctrl;    \
+               if (unsupported) {                                              \
+                       pr_warn_once(#field " unsupported with eVMCS: 0x%llx\n",\
+                                    (u64)unsupported);                         \
+                       vmcs_conf->field &= EVMCS1_SUPPORTED_ ## ctrl;          \
+               }                                                               \
+       }                                                                       \
+       while (0)
+
+void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf)
+{
+       evmcs_check_vmcs_conf(cpu_based_exec_ctrl, EXEC_CTRL);
+       evmcs_check_vmcs_conf(pin_based_exec_ctrl, PINCTRL);
+       evmcs_check_vmcs_conf(cpu_based_2nd_exec_ctrl, 2NDEXEC);
+       evmcs_check_vmcs_conf(cpu_based_3rd_exec_ctrl, 3RDEXEC);
+       evmcs_check_vmcs_conf(vmentry_ctrl, VMENTRY_CTRL);
+       evmcs_check_vmcs_conf(vmexit_ctrl, VMEXIT_CTRL);
+}
diff --git a/arch/x86/kvm/vmx/vmx_onhyperv.h b/arch/x86/kvm/vmx/vmx_onhyperv.h
new file mode 100644 (file)
index 0000000..eb48153
--- /dev/null
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ARCH_X86_KVM_VMX_ONHYPERV_H__
+#define __ARCH_X86_KVM_VMX_ONHYPERV_H__
+
+#include <asm/hyperv-tlfs.h>
+#include <asm/mshyperv.h>
+
+#include <linux/jump_label.h>
+
+#include "capabilities.h"
+#include "hyperv_evmcs.h"
+#include "vmcs12.h"
+
+#define current_evmcs ((struct hv_enlightened_vmcs *)this_cpu_read(current_vmcs))
+
+#if IS_ENABLED(CONFIG_HYPERV)
+
+DECLARE_STATIC_KEY_FALSE(__kvm_is_using_evmcs);
+
+static __always_inline bool kvm_is_using_evmcs(void)
+{
+       return static_branch_unlikely(&__kvm_is_using_evmcs);
+}
+
+static __always_inline int get_evmcs_offset(unsigned long field,
+                                           u16 *clean_field)
+{
+       int offset = evmcs_field_offset(field, clean_field);
+
+       WARN_ONCE(offset < 0, "accessing unsupported EVMCS field %lx\n", field);
+       return offset;
+}
+
+static __always_inline void evmcs_write64(unsigned long field, u64 value)
+{
+       u16 clean_field;
+       int offset = get_evmcs_offset(field, &clean_field);
+
+       if (offset < 0)
+               return;
+
+       *(u64 *)((char *)current_evmcs + offset) = value;
+
+       current_evmcs->hv_clean_fields &= ~clean_field;
+}
+
+static __always_inline void evmcs_write32(unsigned long field, u32 value)
+{
+       u16 clean_field;
+       int offset = get_evmcs_offset(field, &clean_field);
+
+       if (offset < 0)
+               return;
+
+       *(u32 *)((char *)current_evmcs + offset) = value;
+       current_evmcs->hv_clean_fields &= ~clean_field;
+}
+
+static __always_inline void evmcs_write16(unsigned long field, u16 value)
+{
+       u16 clean_field;
+       int offset = get_evmcs_offset(field, &clean_field);
+
+       if (offset < 0)
+               return;
+
+       *(u16 *)((char *)current_evmcs + offset) = value;
+       current_evmcs->hv_clean_fields &= ~clean_field;
+}
+
+static __always_inline u64 evmcs_read64(unsigned long field)
+{
+       int offset = get_evmcs_offset(field, NULL);
+
+       if (offset < 0)
+               return 0;
+
+       return *(u64 *)((char *)current_evmcs + offset);
+}
+
+static __always_inline u32 evmcs_read32(unsigned long field)
+{
+       int offset = get_evmcs_offset(field, NULL);
+
+       if (offset < 0)
+               return 0;
+
+       return *(u32 *)((char *)current_evmcs + offset);
+}
+
+static __always_inline u16 evmcs_read16(unsigned long field)
+{
+       int offset = get_evmcs_offset(field, NULL);
+
+       if (offset < 0)
+               return 0;
+
+       return *(u16 *)((char *)current_evmcs + offset);
+}
+
+static inline void evmcs_load(u64 phys_addr)
+{
+       struct hv_vp_assist_page *vp_ap =
+               hv_get_vp_assist_page(smp_processor_id());
+
+       if (current_evmcs->hv_enlightenments_control.nested_flush_hypercall)
+               vp_ap->nested_control.features.directhypercall = 1;
+       vp_ap->current_nested_vmcs = phys_addr;
+       vp_ap->enlighten_vmentry = 1;
+}
+
+void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf);
+#else /* !IS_ENABLED(CONFIG_HYPERV) */
+static __always_inline bool kvm_is_using_evmcs(void) { return false; }
+static __always_inline void evmcs_write64(unsigned long field, u64 value) {}
+static __always_inline void evmcs_write32(unsigned long field, u32 value) {}
+static __always_inline void evmcs_write16(unsigned long field, u16 value) {}
+static __always_inline u64 evmcs_read64(unsigned long field) { return 0; }
+static __always_inline u32 evmcs_read32(unsigned long field) { return 0; }
+static __always_inline u16 evmcs_read16(unsigned long field) { return 0; }
+static inline void evmcs_load(u64 phys_addr) {}
+#endif /* IS_ENABLED(CONFIG_HYPERV) */
+
+#endif /* __ARCH_X86_KVM_VMX_ONHYPERV_H__ */
index 33af7b4c6eb4a6ed3925942302dffdd1f1af333b..8060e5fc6dbd83e145f6c08fca370e4a42d9861b 100644 (file)
@@ -6,7 +6,7 @@
 
 #include <asm/vmx.h>
 
-#include "hyperv.h"
+#include "vmx_onhyperv.h"
 #include "vmcs.h"
 #include "../x86.h"
 
@@ -94,7 +94,7 @@ static __always_inline unsigned long __vmcs_readl(unsigned long field)
 
 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
 
-       asm_volatile_goto("1: vmread %[field], %[output]\n\t"
+       asm_goto_output("1: vmread %[field], %[output]\n\t"
                          "jna %l[do_fail]\n\t"
 
                          _ASM_EXTABLE(1b, %l[do_exception])
@@ -188,7 +188,7 @@ static __always_inline unsigned long vmcs_readl(unsigned long field)
 
 #define vmx_asm1(insn, op1, error_args...)                             \
 do {                                                                   \
-       asm_volatile_goto("1: " __stringify(insn) " %0\n\t"             \
+       asm goto("1: " __stringify(insn) " %0\n\t"                      \
                          ".byte 0x2e\n\t" /* branch not taken hint */  \
                          "jna %l[error]\n\t"                           \
                          _ASM_EXTABLE(1b, %l[fault])                   \
@@ -205,7 +205,7 @@ fault:                                                                      \
 
 #define vmx_asm2(insn, op1, op2, error_args...)                                \
 do {                                                                   \
-       asm_volatile_goto("1: "  __stringify(insn) " %1, %0\n\t"        \
+       asm goto("1: "  __stringify(insn) " %1, %0\n\t"                 \
                          ".byte 0x2e\n\t" /* branch not taken hint */  \
                          "jna %l[error]\n\t"                           \
                          _ASM_EXTABLE(1b, %l[fault])                   \
index cec0fc2a4b1cb8fea7222810c42b25da49246fe1..bf10a9073a0928aaccc9d929f78d48ffcbbc06b2 100644 (file)
@@ -1284,7 +1284,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
         * stuff CR3, e.g. for RSM emulation, and there is no guarantee that
         * the current vCPU mode is accurate.
         */
-       if (kvm_vcpu_is_illegal_gpa(vcpu, cr3))
+       if (!kvm_vcpu_is_legal_cr3(vcpu, cr3))
                return 1;
 
        if (is_pae_paging(vcpu) && !load_pdptrs(vcpu, cr3))
@@ -1504,6 +1504,8 @@ static unsigned num_msrs_to_save;
 static const u32 emulated_msrs_all[] = {
        MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
        MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
+
+#ifdef CONFIG_KVM_HYPERV
        HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
        HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC,
        HV_X64_MSR_TSC_FREQUENCY, HV_X64_MSR_APIC_FREQUENCY,
@@ -1521,6 +1523,7 @@ static const u32 emulated_msrs_all[] = {
        HV_X64_MSR_SYNDBG_CONTROL, HV_X64_MSR_SYNDBG_STATUS,
        HV_X64_MSR_SYNDBG_SEND_BUFFER, HV_X64_MSR_SYNDBG_RECV_BUFFER,
        HV_X64_MSR_SYNDBG_PENDING_BUFFER,
+#endif
 
        MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
        MSR_KVM_PV_EOI_EN, MSR_KVM_ASYNC_PF_INT, MSR_KVM_ASYNC_PF_ACK,
@@ -1779,6 +1782,10 @@ static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        if ((efer ^ old_efer) & KVM_MMU_EFER_ROLE_BITS)
                kvm_mmu_reset_context(vcpu);
 
+       if (!static_cpu_has(X86_FEATURE_XSAVES) &&
+           (efer & EFER_SVME))
+               kvm_hv_xsaves_xsavec_maybe_warn(vcpu);
+
        return 0;
 }
 
@@ -2510,26 +2517,29 @@ static inline int gtod_is_based_on_tsc(int mode)
 }
 #endif
 
-static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
+static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu, bool new_generation)
 {
 #ifdef CONFIG_X86_64
-       bool vcpus_matched;
        struct kvm_arch *ka = &vcpu->kvm->arch;
        struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
 
-       vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
-                        atomic_read(&vcpu->kvm->online_vcpus));
+       /*
+        * To use the masterclock, the host clocksource must be based on TSC
+        * and all vCPUs must have matching TSCs.  Note, the count for matching
+        * vCPUs doesn't include the reference vCPU, hence "+1".
+        */
+       bool use_master_clock = (ka->nr_vcpus_matched_tsc + 1 ==
+                                atomic_read(&vcpu->kvm->online_vcpus)) &&
+                               gtod_is_based_on_tsc(gtod->clock.vclock_mode);
 
        /*
-        * Once the masterclock is enabled, always perform request in
-        * order to update it.
-        *
-        * In order to enable masterclock, the host clocksource must be TSC
-        * and the vcpus need to have matched TSCs.  When that happens,
-        * perform request to enable masterclock.
+        * Request a masterclock update if the masterclock needs to be toggled
+        * on/off, or when starting a new generation and the masterclock is
+        * enabled (compute_guest_tsc() requires the masterclock snapshot to be
+        * taken _after_ the new generation is created).
         */
-       if (ka->use_master_clock ||
-           (gtod_is_based_on_tsc(gtod->clock.vclock_mode) && vcpus_matched))
+       if ((ka->use_master_clock && new_generation) ||
+           (ka->use_master_clock != use_master_clock))
                kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
 
        trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc,
@@ -2706,7 +2716,7 @@ static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc,
        vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
        vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
 
-       kvm_track_tsc_matching(vcpu);
+       kvm_track_tsc_matching(vcpu, !matched);
 }
 
 static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 *user_value)
@@ -3104,7 +3114,8 @@ u64 get_kvmclock_ns(struct kvm *kvm)
 
 static void kvm_setup_guest_pvclock(struct kvm_vcpu *v,
                                    struct gfn_to_pfn_cache *gpc,
-                                   unsigned int offset)
+                                   unsigned int offset,
+                                   bool force_tsc_unstable)
 {
        struct kvm_vcpu_arch *vcpu = &v->arch;
        struct pvclock_vcpu_time_info *guest_hv_clock;
@@ -3141,6 +3152,10 @@ static void kvm_setup_guest_pvclock(struct kvm_vcpu *v,
        }
 
        memcpy(guest_hv_clock, &vcpu->hv_clock, sizeof(*guest_hv_clock));
+
+       if (force_tsc_unstable)
+               guest_hv_clock->flags &= ~PVCLOCK_TSC_STABLE_BIT;
+
        smp_wmb();
 
        guest_hv_clock->version = ++vcpu->hv_clock.version;
@@ -3161,6 +3176,16 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
        u64 tsc_timestamp, host_tsc;
        u8 pvclock_flags;
        bool use_master_clock;
+#ifdef CONFIG_KVM_XEN
+       /*
+        * For Xen guests we may need to override PVCLOCK_TSC_STABLE_BIT as unless
+        * explicitly told to use TSC as its clocksource Xen will not set this bit.
+        * This default behaviour led to bugs in some guest kernels which cause
+        * problems if they observe PVCLOCK_TSC_STABLE_BIT in the pvclock flags.
+        */
+       bool xen_pvclock_tsc_unstable =
+               ka->xen_hvm_config.flags & KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE;
+#endif
 
        kernel_ns = 0;
        host_tsc = 0;
@@ -3239,13 +3264,15 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
        vcpu->hv_clock.flags = pvclock_flags;
 
        if (vcpu->pv_time.active)
-               kvm_setup_guest_pvclock(v, &vcpu->pv_time, 0);
+               kvm_setup_guest_pvclock(v, &vcpu->pv_time, 0, false);
 #ifdef CONFIG_KVM_XEN
        if (vcpu->xen.vcpu_info_cache.active)
                kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_info_cache,
-                                       offsetof(struct compat_vcpu_info, time));
+                                       offsetof(struct compat_vcpu_info, time),
+                                       xen_pvclock_tsc_unstable);
        if (vcpu->xen.vcpu_time_info_cache.active)
-               kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_time_info_cache, 0);
+               kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_time_info_cache, 0,
+                                       xen_pvclock_tsc_unstable);
 #endif
        kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
        return 0;
@@ -4020,6 +4047,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                 * the need to ignore the workaround.
                 */
                break;
+#ifdef CONFIG_KVM_HYPERV
        case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
        case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
        case HV_X64_MSR_SYNDBG_OPTIONS:
@@ -4032,6 +4060,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        case HV_X64_MSR_TSC_INVARIANT_CONTROL:
                return kvm_hv_set_msr_common(vcpu, msr, data,
                                             msr_info->host_initiated);
+#endif
        case MSR_IA32_BBL_CR_CTL3:
                /* Drop writes to this legacy MSR -- see rdmsr
                 * counterpart for further detail.
@@ -4377,6 +4406,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                 */
                msr_info->data = 0x20000000;
                break;
+#ifdef CONFIG_KVM_HYPERV
        case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
        case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
        case HV_X64_MSR_SYNDBG_OPTIONS:
@@ -4390,6 +4420,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                return kvm_hv_get_msr_common(vcpu,
                                             msr_info->index, &msr_info->data,
                                             msr_info->host_initiated);
+#endif
        case MSR_IA32_BBL_CR_CTL3:
                /* This legacy MSR exists but isn't fully documented in current
                 * silicon.  It is however accessed by winxp in very narrow
@@ -4527,6 +4558,7 @@ static inline bool kvm_can_mwait_in_guest(void)
                boot_cpu_has(X86_FEATURE_ARAT);
 }
 
+#ifdef CONFIG_KVM_HYPERV
 static int kvm_ioctl_get_supported_hv_cpuid(struct kvm_vcpu *vcpu,
                                            struct kvm_cpuid2 __user *cpuid_arg)
 {
@@ -4547,6 +4579,14 @@ static int kvm_ioctl_get_supported_hv_cpuid(struct kvm_vcpu *vcpu,
 
        return 0;
 }
+#endif
+
+static bool kvm_is_vm_type_supported(unsigned long type)
+{
+       return type == KVM_X86_DEFAULT_VM ||
+              (type == KVM_X86_SW_PROTECTED_VM &&
+               IS_ENABLED(CONFIG_KVM_SW_PROTECTED_VM) && tdp_enabled);
+}
 
 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 {
@@ -4573,9 +4613,11 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_PIT_STATE2:
        case KVM_CAP_SET_IDENTITY_MAP_ADDR:
        case KVM_CAP_VCPU_EVENTS:
+#ifdef CONFIG_KVM_HYPERV
        case KVM_CAP_HYPERV:
        case KVM_CAP_HYPERV_VAPIC:
        case KVM_CAP_HYPERV_SPIN:
+       case KVM_CAP_HYPERV_TIME:
        case KVM_CAP_HYPERV_SYNIC:
        case KVM_CAP_HYPERV_SYNIC2:
        case KVM_CAP_HYPERV_VP_INDEX:
@@ -4585,6 +4627,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_HYPERV_CPUID:
        case KVM_CAP_HYPERV_ENFORCE_CPUID:
        case KVM_CAP_SYS_HYPERV_CPUID:
+#endif
        case KVM_CAP_PCI_SEGMENT:
        case KVM_CAP_DEBUGREGS:
        case KVM_CAP_X86_ROBUST_SINGLESTEP:
@@ -4594,7 +4637,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_GET_TSC_KHZ:
        case KVM_CAP_KVMCLOCK_CTRL:
        case KVM_CAP_READONLY_MEM:
-       case KVM_CAP_HYPERV_TIME:
        case KVM_CAP_IOAPIC_POLARITY_IGNORED:
        case KVM_CAP_TSC_DEADLINE_TIMER:
        case KVM_CAP_DISABLE_QUIRKS:
@@ -4625,6 +4667,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_ENABLE_CAP:
        case KVM_CAP_VM_DISABLE_NX_HUGE_PAGES:
        case KVM_CAP_IRQFD_RESAMPLE:
+       case KVM_CAP_MEMORY_FAULT_INFO:
                r = 1;
                break;
        case KVM_CAP_EXIT_HYPERCALL:
@@ -4638,7 +4681,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
                    KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL |
                    KVM_XEN_HVM_CONFIG_SHARED_INFO |
                    KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL |
-                   KVM_XEN_HVM_CONFIG_EVTCHN_SEND;
+                   KVM_XEN_HVM_CONFIG_EVTCHN_SEND |
+                   KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE;
                if (sched_info_on())
                        r |= KVM_XEN_HVM_CONFIG_RUNSTATE |
                             KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG;
@@ -4704,12 +4748,14 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
                r = kvm_x86_ops.nested_ops->get_state ?
                        kvm_x86_ops.nested_ops->get_state(NULL, NULL, 0) : 0;
                break;
+#ifdef CONFIG_KVM_HYPERV
        case KVM_CAP_HYPERV_DIRECT_TLBFLUSH:
                r = kvm_x86_ops.enable_l2_tlb_flush != NULL;
                break;
        case KVM_CAP_HYPERV_ENLIGHTENED_VMCS:
                r = kvm_x86_ops.nested_ops->enable_evmcs != NULL;
                break;
+#endif
        case KVM_CAP_SMALLER_MAXPHYADDR:
                r = (int) allow_smaller_maxphyaddr;
                break;
@@ -4738,6 +4784,11 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_X86_NOTIFY_VMEXIT:
                r = kvm_caps.has_notify_vmexit;
                break;
+       case KVM_CAP_VM_TYPES:
+               r = BIT(KVM_X86_DEFAULT_VM);
+               if (kvm_is_vm_type_supported(KVM_X86_SW_PROTECTED_VM))
+                       r |= BIT(KVM_X86_SW_PROTECTED_VM);
+               break;
        default:
                break;
        }
@@ -4871,9 +4922,11 @@ long kvm_arch_dev_ioctl(struct file *filp,
        case KVM_GET_MSRS:
                r = msr_io(NULL, argp, do_get_msr_feature, 1);
                break;
+#ifdef CONFIG_KVM_HYPERV
        case KVM_GET_SUPPORTED_HV_CPUID:
                r = kvm_ioctl_get_supported_hv_cpuid(NULL, argp);
                break;
+#endif
        case KVM_GET_DEVICE_ATTR: {
                struct kvm_device_attr attr;
                r = -EFAULT;
@@ -5699,14 +5752,11 @@ static int kvm_vcpu_ioctl_device_attr(struct kvm_vcpu *vcpu,
 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
                                     struct kvm_enable_cap *cap)
 {
-       int r;
-       uint16_t vmcs_version;
-       void __user *user_ptr;
-
        if (cap->flags)
                return -EINVAL;
 
        switch (cap->cap) {
+#ifdef CONFIG_KVM_HYPERV
        case KVM_CAP_HYPERV_SYNIC2:
                if (cap->args[0])
                        return -EINVAL;
@@ -5718,16 +5768,22 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
                return kvm_hv_activate_synic(vcpu, cap->cap ==
                                             KVM_CAP_HYPERV_SYNIC2);
        case KVM_CAP_HYPERV_ENLIGHTENED_VMCS:
-               if (!kvm_x86_ops.nested_ops->enable_evmcs)
-                       return -ENOTTY;
-               r = kvm_x86_ops.nested_ops->enable_evmcs(vcpu, &vmcs_version);
-               if (!r) {
-                       user_ptr = (void __user *)(uintptr_t)cap->args[0];
-                       if (copy_to_user(user_ptr, &vmcs_version,
-                                        sizeof(vmcs_version)))
-                               r = -EFAULT;
+               {
+                       int r;
+                       uint16_t vmcs_version;
+                       void __user *user_ptr;
+
+                       if (!kvm_x86_ops.nested_ops->enable_evmcs)
+                               return -ENOTTY;
+                       r = kvm_x86_ops.nested_ops->enable_evmcs(vcpu, &vmcs_version);
+                       if (!r) {
+                               user_ptr = (void __user *)(uintptr_t)cap->args[0];
+                               if (copy_to_user(user_ptr, &vmcs_version,
+                                                sizeof(vmcs_version)))
+                                       r = -EFAULT;
+                       }
+                       return r;
                }
-               return r;
        case KVM_CAP_HYPERV_DIRECT_TLBFLUSH:
                if (!kvm_x86_ops.enable_l2_tlb_flush)
                        return -ENOTTY;
@@ -5736,6 +5792,7 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
 
        case KVM_CAP_HYPERV_ENFORCE_CPUID:
                return kvm_hv_set_enforce_cpuid(vcpu, cap->args[0]);
+#endif
 
        case KVM_CAP_ENFORCE_PV_FEATURE_CPUID:
                vcpu->arch.pv_cpuid.enforce = cap->args[0];
@@ -6128,9 +6185,11 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                srcu_read_unlock(&vcpu->kvm->srcu, idx);
                break;
        }
+#ifdef CONFIG_KVM_HYPERV
        case KVM_GET_SUPPORTED_HV_CPUID:
                r = kvm_ioctl_get_supported_hv_cpuid(vcpu, argp);
                break;
+#endif
 #ifdef CONFIG_KVM_XEN
        case KVM_XEN_VCPU_GET_ATTR: {
                struct kvm_xen_vcpu_attr xva;
@@ -6961,6 +7020,9 @@ set_identity_unlock:
                r = -EEXIST;
                if (kvm->arch.vpit)
                        goto create_pit_unlock;
+               r = -ENOENT;
+               if (!pic_in_kernel(kvm))
+                       goto create_pit_unlock;
                r = -ENOMEM;
                kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
                if (kvm->arch.vpit)
@@ -7188,6 +7250,7 @@ set_pit2_out:
                r = static_call(kvm_x86_mem_enc_unregister_region)(kvm, &region);
                break;
        }
+#ifdef CONFIG_KVM_HYPERV
        case KVM_HYPERV_EVENTFD: {
                struct kvm_hyperv_eventfd hvevfd;
 
@@ -7197,6 +7260,7 @@ set_pit2_out:
                r = kvm_vm_ioctl_hv_eventfd(kvm, &hvevfd);
                break;
        }
+#endif
        case KVM_SET_PMU_EVENT_FILTER:
                r = kvm_vm_ioctl_set_pmu_event_filter(kvm, argp);
                break;
@@ -8432,6 +8496,15 @@ static void emulator_vm_bugged(struct x86_emulate_ctxt *ctxt)
                kvm_vm_bugged(kvm);
 }
 
+static gva_t emulator_get_untagged_addr(struct x86_emulate_ctxt *ctxt,
+                                       gva_t addr, unsigned int flags)
+{
+       if (!kvm_x86_ops.get_untagged_addr)
+               return addr;
+
+       return static_call(kvm_x86_get_untagged_addr)(emul_to_vcpu(ctxt), addr, flags);
+}
+
 static const struct x86_emulate_ops emulate_ops = {
        .vm_bugged           = emulator_vm_bugged,
        .read_gpr            = emulator_read_gpr,
@@ -8476,6 +8549,7 @@ static const struct x86_emulate_ops emulate_ops = {
        .leave_smm           = emulator_leave_smm,
        .triple_fault        = emulator_triple_fault,
        .set_xcr             = emulator_set_xcr,
+       .get_untagged_addr   = emulator_get_untagged_addr,
 };
 
 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
@@ -10575,19 +10649,20 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
 
 static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu)
 {
-       u64 eoi_exit_bitmap[4];
-
        if (!kvm_apic_hw_enabled(vcpu->arch.apic))
                return;
 
+#ifdef CONFIG_KVM_HYPERV
        if (to_hv_vcpu(vcpu)) {
+               u64 eoi_exit_bitmap[4];
+
                bitmap_or((ulong *)eoi_exit_bitmap,
                          vcpu->arch.ioapic_handled_vectors,
                          to_hv_synic(vcpu)->vec_bitmap, 256);
                static_call_cond(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap);
                return;
        }
-
+#endif
        static_call_cond(kvm_x86_load_eoi_exitmap)(
                vcpu, (u64 *)vcpu->arch.ioapic_handled_vectors);
 }
@@ -10678,9 +10753,11 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                 * the flushes are considered "remote" and not "local" because
                 * the requests can be initiated from other vCPUs.
                 */
+#ifdef CONFIG_KVM_HYPERV
                if (kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu) &&
                    kvm_hv_vcpu_flush_tlb(vcpu))
                        kvm_vcpu_flush_tlb_guest(vcpu);
+#endif
 
                if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
                        vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
@@ -10733,6 +10810,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                        vcpu_load_eoi_exitmap(vcpu);
                if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu))
                        kvm_vcpu_reload_apic_access_page(vcpu);
+#ifdef CONFIG_KVM_HYPERV
                if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) {
                        vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
                        vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH;
@@ -10763,6 +10841,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                 */
                if (kvm_check_request(KVM_REQ_HV_STIMER, vcpu))
                        kvm_hv_process_stimers(vcpu);
+#endif
                if (kvm_check_request(KVM_REQ_APICV_UPDATE, vcpu))
                        kvm_vcpu_update_apicv(vcpu);
                if (kvm_check_request(KVM_REQ_APF_READY, vcpu))
@@ -11081,6 +11160,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
 {
        int r;
 
+       vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
        vcpu->arch.l1tf_flush_l1d = true;
 
        for (;;) {
@@ -11598,7 +11678,7 @@ static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
                 */
                if (!(sregs->cr4 & X86_CR4_PAE) || !(sregs->efer & EFER_LMA))
                        return false;
-               if (kvm_vcpu_is_illegal_gpa(vcpu, sregs->cr3))
+               if (!kvm_vcpu_is_legal_cr3(vcpu, sregs->cr3))
                        return false;
        } else {
                /*
@@ -12207,7 +12287,6 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
        }
 
        if (!init_event) {
-               kvm_pmu_reset(vcpu);
                vcpu->arch.smbase = 0x30000;
 
                vcpu->arch.msr_misc_features_enables = 0;
@@ -12424,7 +12503,9 @@ void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
 
 void kvm_arch_free_vm(struct kvm *kvm)
 {
-       kfree(to_kvm_hv(kvm)->hv_pa_pg);
+#if IS_ENABLED(CONFIG_HYPERV)
+       kfree(kvm->arch.hv_pa_pg);
+#endif
        __kvm_arch_free_vm(kvm);
 }
 
@@ -12434,9 +12515,11 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
        int ret;
        unsigned long flags;
 
-       if (type)
+       if (!kvm_is_vm_type_supported(type))
                return -EINVAL;
 
+       kvm->arch.vm_type = type;
+
        ret = kvm_page_track_init(kvm);
        if (ret)
                goto out;
@@ -12575,8 +12658,8 @@ void __user * __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
                hva = slot->userspace_addr;
        }
 
-       for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
-               struct kvm_userspace_memory_region m;
+       for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
+               struct kvm_userspace_memory_region2 m;
 
                m.slot = id | (i << 16);
                m.flags = 0;
@@ -12726,6 +12809,10 @@ static int kvm_alloc_memslot_metadata(struct kvm *kvm,
                }
        }
 
+#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+       kvm_mmu_init_memslot_memory_attributes(kvm, slot);
+#endif
+
        if (kvm_page_track_create_memslot(kvm, slot, npages))
                goto out_free;
 
@@ -13536,6 +13623,10 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
 
        switch (type) {
        case INVPCID_TYPE_INDIV_ADDR:
+               /*
+                * LAM doesn't apply to addresses that are inputs to TLB
+                * invalidation.
+                */
                if ((!pcid_enabled && (operand.pcid != 0)) ||
                    is_noncanonical_address(operand.gla, vcpu)) {
                        kvm_inject_gp(vcpu, 0);
index 5184fde1dc541a90ca150f16d71eb2bd9506b4af..2f7e191666580085c85785ada86789fb9d1842b1 100644 (file)
@@ -530,6 +530,8 @@ bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type);
                __reserved_bits |= X86_CR4_VMXE;        \
        if (!__cpu_has(__c, X86_FEATURE_PCID))          \
                __reserved_bits |= X86_CR4_PCIDE;       \
+       if (!__cpu_has(__c, X86_FEATURE_LAM))           \
+               __reserved_bits |= X86_CR4_LAM_SUP;     \
        __reserved_bits;                                \
 })
 
index 523bb6df5ac9858a052b7da5fbb568f0488436ec..4b4e738c6f1b79e474d18519a51e72f3d53286cc 100644 (file)
@@ -1162,7 +1162,9 @@ int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc)
 {
        /* Only some feature flags need to be *enabled* by userspace */
        u32 permitted_flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL |
-               KVM_XEN_HVM_CONFIG_EVTCHN_SEND;
+               KVM_XEN_HVM_CONFIG_EVTCHN_SEND |
+               KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE;
+       u32 old_flags;
 
        if (xhc->flags & ~permitted_flags)
                return -EINVAL;
@@ -1183,9 +1185,14 @@ int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc)
        else if (!xhc->msr && kvm->arch.xen_hvm_config.msr)
                static_branch_slow_dec_deferred(&kvm_xen_enabled);
 
+       old_flags = kvm->arch.xen_hvm_config.flags;
        memcpy(&kvm->arch.xen_hvm_config, xhc, sizeof(*xhc));
 
        mutex_unlock(&kvm->arch.xen.xen_lock);
+
+       if ((old_flags ^ xhc->flags) & KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE)
+               kvm_make_all_cpus_request(kvm, KVM_REQ_CLOCK_UPDATE);
+
        return 0;
 }
 
index 20ef350a60fbb59a4b183bc3e54b1d517e6bba9b..10d5ed8b5990f4d2f64436b71905a9d817df11a1 100644 (file)
@@ -163,23 +163,23 @@ SYM_CODE_END(__get_user_8_handle_exception)
 #endif
 
 /* get_user */
-       _ASM_EXTABLE(1b, __get_user_handle_exception)
-       _ASM_EXTABLE(2b, __get_user_handle_exception)
-       _ASM_EXTABLE(3b, __get_user_handle_exception)
+       _ASM_EXTABLE_UA(1b, __get_user_handle_exception)
+       _ASM_EXTABLE_UA(2b, __get_user_handle_exception)
+       _ASM_EXTABLE_UA(3b, __get_user_handle_exception)
 #ifdef CONFIG_X86_64
-       _ASM_EXTABLE(4b, __get_user_handle_exception)
+       _ASM_EXTABLE_UA(4b, __get_user_handle_exception)
 #else
-       _ASM_EXTABLE(4b, __get_user_8_handle_exception)
-       _ASM_EXTABLE(5b, __get_user_8_handle_exception)
+       _ASM_EXTABLE_UA(4b, __get_user_8_handle_exception)
+       _ASM_EXTABLE_UA(5b, __get_user_8_handle_exception)
 #endif
 
 /* __get_user */
-       _ASM_EXTABLE(6b, __get_user_handle_exception)
-       _ASM_EXTABLE(7b, __get_user_handle_exception)
-       _ASM_EXTABLE(8b, __get_user_handle_exception)
+       _ASM_EXTABLE_UA(6b, __get_user_handle_exception)
+       _ASM_EXTABLE_UA(7b, __get_user_handle_exception)
+       _ASM_EXTABLE_UA(8b, __get_user_handle_exception)
 #ifdef CONFIG_X86_64
-       _ASM_EXTABLE(9b, __get_user_handle_exception)
+       _ASM_EXTABLE_UA(9b, __get_user_handle_exception)
 #else
-       _ASM_EXTABLE(9b, __get_user_8_handle_exception)
-       _ASM_EXTABLE(10b, __get_user_8_handle_exception)
+       _ASM_EXTABLE_UA(9b, __get_user_8_handle_exception)
+       _ASM_EXTABLE_UA(10b, __get_user_8_handle_exception)
 #endif
index 2877f59341775aa38a68d152f72e0d55606c1cac..975c9c18263d2afd926c12a8bfffe0c2d72d43cd 100644 (file)
@@ -133,15 +133,15 @@ SYM_CODE_START_LOCAL(__put_user_handle_exception)
        RET
 SYM_CODE_END(__put_user_handle_exception)
 
-       _ASM_EXTABLE(1b, __put_user_handle_exception)
-       _ASM_EXTABLE(2b, __put_user_handle_exception)
-       _ASM_EXTABLE(3b, __put_user_handle_exception)
-       _ASM_EXTABLE(4b, __put_user_handle_exception)
-       _ASM_EXTABLE(5b, __put_user_handle_exception)
-       _ASM_EXTABLE(6b, __put_user_handle_exception)
-       _ASM_EXTABLE(7b, __put_user_handle_exception)
-       _ASM_EXTABLE(9b, __put_user_handle_exception)
+       _ASM_EXTABLE_UA(1b, __put_user_handle_exception)
+       _ASM_EXTABLE_UA(2b, __put_user_handle_exception)
+       _ASM_EXTABLE_UA(3b, __put_user_handle_exception)
+       _ASM_EXTABLE_UA(4b, __put_user_handle_exception)
+       _ASM_EXTABLE_UA(5b, __put_user_handle_exception)
+       _ASM_EXTABLE_UA(6b, __put_user_handle_exception)
+       _ASM_EXTABLE_UA(7b, __put_user_handle_exception)
+       _ASM_EXTABLE_UA(9b, __put_user_handle_exception)
 #ifdef CONFIG_X86_32
-       _ASM_EXTABLE(8b, __put_user_handle_exception)
-       _ASM_EXTABLE(10b, __put_user_handle_exception)
+       _ASM_EXTABLE_UA(8b, __put_user_handle_exception)
+       _ASM_EXTABLE_UA(10b, __put_user_handle_exception)
 #endif
index ea2eb2ec90e2bc96368090bb0478a2c8f591df0e..55c4b07ec1f6319bb781071453d034c9efa9ed65 100644 (file)
@@ -283,6 +283,9 @@ static int setup_mcfg_map(struct acpi_pci_root_info *ci)
        info->mcfg_added = false;
        seg = info->sd.domain;
 
+       dev_dbg(dev, "%s(%04x %pR ECAM %pa)\n", __func__, seg,
+               &root->secondary, &root->mcfg_addr);
+
        /* return success if MMCFG is not in use */
        if (raw_pci_ext_ops && raw_pci_ext_ops != &pci_mmcfg)
                return 0;
index 4b3efaa82ab7c14a747cea8dbe50ad6645cf1981..0cc9520666efbbe0c61a53aff2467ac5afb433f6 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * mmconfig-shared.c - Low-level direct PCI config space access via
- *                     MMCONFIG - common code between i386 and x86-64.
+ * Low-level direct PCI config space access via ECAM - common code between
+ * i386 and x86-64.
  *
  * This code does:
  * - known chipset handling
@@ -11,6 +11,8 @@
  * themselves.
  */
 
+#define pr_fmt(fmt) "PCI: " fmt
+
 #include <linux/acpi.h>
 #include <linux/efi.h>
 #include <linux/pci.h>
@@ -24,9 +26,7 @@
 #include <asm/pci_x86.h>
 #include <asm/acpi.h>
 
-#define PREFIX "PCI: "
-
-/* Indicate if the mmcfg resources have been placed into the resource table. */
+/* Indicate if the ECAM resources have been placed into the resource table */
 static bool pci_mmcfg_running_state;
 static bool pci_mmcfg_arch_init_failed;
 static DEFINE_MUTEX(pci_mmcfg_lock);
@@ -90,7 +90,7 @@ static struct pci_mmcfg_region *pci_mmconfig_alloc(int segment, int start,
        res->end = addr + PCI_MMCFG_BUS_OFFSET(end + 1) - 1;
        res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
        snprintf(new->name, PCI_MMCFG_RESOURCE_NAME_LEN,
-                "PCI MMCONFIG %04x [bus %02x-%02x]", segment, start, end);
+                "PCI ECAM %04x [bus %02x-%02x]", segment, start, end);
        res->name = new->name;
 
        return new;
@@ -102,16 +102,15 @@ struct pci_mmcfg_region *__init pci_mmconfig_add(int segment, int start,
        struct pci_mmcfg_region *new;
 
        new = pci_mmconfig_alloc(segment, start, end, addr);
-       if (new) {
-               mutex_lock(&pci_mmcfg_lock);
-               list_add_sorted(new);
-               mutex_unlock(&pci_mmcfg_lock);
+       if (!new)
+               return NULL;
 
-               pr_info(PREFIX
-                      "MMCONFIG for domain %04x [bus %02x-%02x] at %pR "
-                      "(base %#lx)\n",
-                      segment, start, end, &new->res, (unsigned long)addr);
-       }
+       mutex_lock(&pci_mmcfg_lock);
+       list_add_sorted(new);
+       mutex_unlock(&pci_mmcfg_lock);
+
+       pr_info("ECAM %pR (base %#lx) for domain %04x [bus %02x-%02x]\n",
+               &new->res, (unsigned long)addr, segment, start, end);
 
        return new;
 }
@@ -205,7 +204,7 @@ static const char *__init pci_mmcfg_amd_fam10h(void)
        msr <<= 32;
        msr |= low;
 
-       /* mmconfig is not enable */
+       /* ECAM is not enabled */
        if (!(msr & FAM10H_MMIO_CONF_ENABLE))
                return NULL;
 
@@ -367,7 +366,7 @@ static int __init pci_mmcfg_check_hostbridge(void)
                        name = pci_mmcfg_probes[i].probe();
 
                if (name)
-                       pr_info(PREFIX "%s with MMCONFIG support\n", name);
+                       pr_info("%s with ECAM support\n", name);
        }
 
        /* some end_bus_number is crazy, fix it */
@@ -443,9 +442,11 @@ static bool is_acpi_reserved(u64 start, u64 end, enum e820_type not_used)
        return mcfg_res.flags;
 }
 
-static bool is_efi_mmio(u64 start, u64 end, enum e820_type not_used)
+static bool is_efi_mmio(struct resource *res)
 {
 #ifdef CONFIG_EFI
+       u64 start = res->start;
+       u64 end = res->start + resource_size(res);
        efi_memory_desc_t *md;
        u64 size, mmio_start, mmio_end;
 
@@ -455,11 +456,6 @@ static bool is_efi_mmio(u64 start, u64 end, enum e820_type not_used)
                        mmio_start = md->phys_addr;
                        mmio_end = mmio_start + size;
 
-                       /*
-                        * N.B. Caller supplies (start, start + size),
-                        * so to match, mmio_end is the first address
-                        * *past* the EFI_MEMORY_MAPPED_IO area.
-                        */
                        if (mmio_start <= start && end <= mmio_end)
                                return true;
                }
@@ -490,11 +486,10 @@ static bool __ref is_mmconf_reserved(check_reserved_t is_reserved,
                return false;
 
        if (dev)
-               dev_info(dev, "MMCONFIG at %pR reserved as %s\n",
+               dev_info(dev, "ECAM %pR reserved as %s\n",
                         &cfg->res, method);
        else
-               pr_info(PREFIX "MMCONFIG at %pR reserved as %s\n",
-                      &cfg->res, method);
+               pr_info("ECAM %pR reserved as %s\n", &cfg->res, method);
 
        if (old_size != size) {
                /* update end_bus */
@@ -503,47 +498,51 @@ static bool __ref is_mmconf_reserved(check_reserved_t is_reserved,
                cfg->res.end = cfg->res.start +
                    PCI_MMCFG_BUS_OFFSET(num_buses) - 1;
                snprintf(cfg->name, PCI_MMCFG_RESOURCE_NAME_LEN,
-                        "PCI MMCONFIG %04x [bus %02x-%02x]",
+                        "PCI ECAM %04x [bus %02x-%02x]",
                         cfg->segment, cfg->start_bus, cfg->end_bus);
 
                if (dev)
-                       dev_info(dev,
-                               "MMCONFIG "
-                               "at %pR (base %#lx) (size reduced!)\n",
-                               &cfg->res, (unsigned long) cfg->address);
+                       dev_info(dev, "ECAM %pR (base %#lx) (size reduced!)\n",
+                                &cfg->res, (unsigned long) cfg->address);
                else
-                       pr_info(PREFIX
-                               "MMCONFIG for %04x [bus%02x-%02x] "
-                               "at %pR (base %#lx) (size reduced!)\n",
-                               cfg->segment, cfg->start_bus, cfg->end_bus,
-                               &cfg->res, (unsigned long) cfg->address);
+                       pr_info("ECAM %pR (base %#lx) for %04x [bus%02x-%02x] (size reduced!)\n",
+                               &cfg->res, (unsigned long) cfg->address,
+                               cfg->segment, cfg->start_bus, cfg->end_bus);
        }
 
        return true;
 }
 
-static bool __ref
-pci_mmcfg_check_reserved(struct device *dev, struct pci_mmcfg_region *cfg, int early)
+static bool __ref pci_mmcfg_reserved(struct device *dev,
+                                    struct pci_mmcfg_region *cfg, int early)
 {
+       struct resource *conflict;
+
        if (!early && !acpi_disabled) {
                if (is_mmconf_reserved(is_acpi_reserved, cfg, dev,
                                       "ACPI motherboard resource"))
                        return true;
 
                if (dev)
-                       dev_info(dev, FW_INFO
-                                "MMCONFIG at %pR not reserved in "
-                                "ACPI motherboard resources\n",
+                       dev_info(dev, FW_INFO "ECAM %pR not reserved in ACPI motherboard resources\n",
                                 &cfg->res);
                else
-                       pr_info(FW_INFO PREFIX
-                              "MMCONFIG at %pR not reserved in "
-                              "ACPI motherboard resources\n",
-                              &cfg->res);
-
-               if (is_mmconf_reserved(is_efi_mmio, cfg, dev,
-                                      "EfiMemoryMappedIO"))
+                       pr_info(FW_INFO "ECAM %pR not reserved in ACPI motherboard resources\n",
+                               &cfg->res);
+
+               if (is_efi_mmio(&cfg->res)) {
+                       pr_info("ECAM %pR is EfiMemoryMappedIO; assuming valid\n",
+                               &cfg->res);
+                       conflict = insert_resource_conflict(&iomem_resource,
+                                                           &cfg->res);
+                       if (conflict)
+                               pr_warn("ECAM %pR conflicts with %s %pR\n",
+                                       &cfg->res, conflict->name, conflict);
+                       else
+                               pr_info("ECAM %pR reserved to work around lack of ACPI motherboard _CRS\n",
+                                       &cfg->res);
                        return true;
+               }
        }
 
        /*
@@ -569,30 +568,31 @@ static void __init pci_mmcfg_reject_broken(int early)
        struct pci_mmcfg_region *cfg;
 
        list_for_each_entry(cfg, &pci_mmcfg_list, list) {
-               if (pci_mmcfg_check_reserved(NULL, cfg, early) == 0) {
-                       pr_info(PREFIX "not using MMCONFIG\n");
+               if (!pci_mmcfg_reserved(NULL, cfg, early)) {
+                       pr_info("not using ECAM (%pR not reserved)\n",
+                               &cfg->res);
                        free_all_mmcfg();
                        return;
                }
        }
 }
 
-static int __init acpi_mcfg_check_entry(struct acpi_table_mcfg *mcfg,
-                                       struct acpi_mcfg_allocation *cfg)
+static bool __init acpi_mcfg_valid_entry(struct acpi_table_mcfg *mcfg,
+                                        struct acpi_mcfg_allocation *cfg)
 {
        if (cfg->address < 0xFFFFFFFF)
-               return 0;
+               return true;
 
        if (!strncmp(mcfg->header.oem_id, "SGI", 3))
-               return 0;
+               return true;
 
        if ((mcfg->header.revision >= 1) && (dmi_get_bios_year() >= 2010))
-               return 0;
+               return true;
 
-       pr_err(PREFIX "MCFG region for %04x [bus %02x-%02x] at %#llx "
-              "is above 4GB, ignored\n", cfg->pci_segment,
-              cfg->start_bus_number, cfg->end_bus_number, cfg->address);
-       return -EINVAL;
+       pr_err("ECAM at %#llx for %04x [bus %02x-%02x] is above 4GB, ignored\n",
+              cfg->address, cfg->pci_segment, cfg->start_bus_number,
+              cfg->end_bus_number);
+       return false;
 }
 
 static int __init pci_parse_mcfg(struct acpi_table_header *header)
@@ -616,21 +616,21 @@ static int __init pci_parse_mcfg(struct acpi_table_header *header)
                i -= sizeof(struct acpi_mcfg_allocation);
        }
        if (entries == 0) {
-               pr_err(PREFIX "MMCONFIG has no entries\n");
+               pr_err("MCFG has no entries\n");
                return -ENODEV;
        }
 
        cfg_table = (struct acpi_mcfg_allocation *) &mcfg[1];
        for (i = 0; i < entries; i++) {
                cfg = &cfg_table[i];
-               if (acpi_mcfg_check_entry(mcfg, cfg)) {
+               if (!acpi_mcfg_valid_entry(mcfg, cfg)) {
                        free_all_mmcfg();
                        return -ENODEV;
                }
 
                if (pci_mmconfig_add(cfg->pci_segment, cfg->start_bus_number,
                                   cfg->end_bus_number, cfg->address) == NULL) {
-                       pr_warn(PREFIX "no memory for MCFG entries\n");
+                       pr_warn("no memory for MCFG entries\n");
                        free_all_mmcfg();
                        return -ENOMEM;
                }
@@ -667,6 +667,8 @@ static int pci_mmcfg_for_each_region(int (*func)(__u64 start, __u64 size,
 
 static void __init __pci_mmcfg_init(int early)
 {
+       pr_debug("%s(%s)\n", __func__, early ? "early" : "late");
+
        pci_mmcfg_reject_broken(early);
        if (list_empty(&pci_mmcfg_list))
                return;
@@ -693,6 +695,8 @@ static int __initdata known_bridge;
 
 void __init pci_mmcfg_early_init(void)
 {
+       pr_debug("%s() pci_probe %#x\n", __func__, pci_probe);
+
        if (pci_probe & PCI_PROBE_MMCONF) {
                if (pci_mmcfg_check_hostbridge())
                        known_bridge = 1;
@@ -706,14 +710,16 @@ void __init pci_mmcfg_early_init(void)
 
 void __init pci_mmcfg_late_init(void)
 {
-       /* MMCONFIG disabled */
+       pr_debug("%s() pci_probe %#x\n", __func__, pci_probe);
+
+       /* ECAM disabled */
        if ((pci_probe & PCI_PROBE_MMCONF) == 0)
                return;
 
        if (known_bridge)
                return;
 
-       /* MMCONFIG hasn't been enabled yet, try again */
+       /* ECAM hasn't been enabled yet, try again */
        if (pci_probe & PCI_PROBE_MASK & ~PCI_PROBE_MMCONF) {
                acpi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg);
                __pci_mmcfg_init(0);
@@ -726,7 +732,9 @@ static int __init pci_mmcfg_late_insert_resources(void)
 
        pci_mmcfg_running_state = true;
 
-       /* If we are not using MMCONFIG, don't insert the resources. */
+       pr_debug("%s() pci_probe %#x\n", __func__, pci_probe);
+
+       /* If we are not using ECAM, don't insert the resources. */
        if ((pci_probe & PCI_PROBE_MMCONF) == 0)
                return 1;
 
@@ -735,21 +743,24 @@ static int __init pci_mmcfg_late_insert_resources(void)
         * marked so it won't cause request errors when __request_region is
         * called.
         */
-       list_for_each_entry(cfg, &pci_mmcfg_list, list)
-               if (!cfg->res.parent)
+       list_for_each_entry(cfg, &pci_mmcfg_list, list) {
+               if (!cfg->res.parent) {
+                       pr_debug("%s() insert %pR\n", __func__, &cfg->res);
                        insert_resource(&iomem_resource, &cfg->res);
+               }
+       }
 
        return 0;
 }
 
 /*
- * Perform MMCONFIG resource insertion after PCI initialization to allow for
+ * Perform ECAM resource insertion after PCI initialization to allow for
  * misprogrammed MCFG tables that state larger sizes but actually conflict
  * with other system resources.
  */
 late_initcall(pci_mmcfg_late_insert_resources);
 
-/* Add MMCFG information for host bridges */
+/* Add ECAM information for host bridges */
 int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end,
                        phys_addr_t addr)
 {
@@ -757,6 +768,8 @@ int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end,
        struct resource *tmp = NULL;
        struct pci_mmcfg_region *cfg;
 
+       dev_dbg(dev, "%s(%04x [bus %02x-%02x])\n", __func__, seg, start, end);
+
        if (!(pci_probe & PCI_PROBE_MMCONF) || pci_mmcfg_arch_init_failed)
                return -ENODEV;
 
@@ -767,15 +780,17 @@ int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end,
        cfg = pci_mmconfig_lookup(seg, start);
        if (cfg) {
                if (cfg->end_bus < end)
-                       dev_info(dev, FW_INFO
-                                "MMCONFIG for "
-                                "domain %04x [bus %02x-%02x] "
-                                "only partially covers this bridge\n",
-                                 cfg->segment, cfg->start_bus, cfg->end_bus);
+                       dev_info(dev, FW_INFO "ECAM %pR for domain %04x [bus %02x-%02x] only partially covers this bridge\n",
+                                &cfg->res, cfg->segment, cfg->start_bus,
+                                cfg->end_bus);
                mutex_unlock(&pci_mmcfg_lock);
                return -EEXIST;
        }
 
+       /*
+        * Don't move earlier; we must return -EEXIST, not -EINVAL, if
+        * pci_mmconfig_lookup() finds something
+        */
        if (!addr) {
                mutex_unlock(&pci_mmcfg_lock);
                return -EINVAL;
@@ -784,10 +799,10 @@ int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end,
        rc = -EBUSY;
        cfg = pci_mmconfig_alloc(seg, start, end, addr);
        if (cfg == NULL) {
-               dev_warn(dev, "fail to add MMCONFIG (out of memory)\n");
+               dev_warn(dev, "fail to add ECAM (out of memory)\n");
                rc = -ENOMEM;
-       } else if (!pci_mmcfg_check_reserved(dev, cfg, 0)) {
-               dev_warn(dev, FW_BUG "MMCONFIG %pR isn't reserved\n",
+       } else if (!pci_mmcfg_reserved(dev, cfg, 0)) {
+               dev_warn(dev, FW_BUG "ECAM %pR isn't reserved\n",
                         &cfg->res);
        } else {
                /* Insert resource if it's not in boot stage */
@@ -796,16 +811,13 @@ int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end,
                                                       &cfg->res);
 
                if (tmp) {
-                       dev_warn(dev,
-                                "MMCONFIG %pR conflicts with "
-                                "%s %pR\n",
+                       dev_warn(dev, "ECAM %pR conflicts with %s %pR\n",
                                 &cfg->res, tmp->name, tmp);
                } else if (pci_mmcfg_arch_map(cfg)) {
-                       dev_warn(dev, "fail to map MMCONFIG %pR.\n",
-                                &cfg->res);
+                       dev_warn(dev, "fail to map ECAM %pR\n", &cfg->res);
                } else {
                        list_add_sorted(cfg);
-                       dev_info(dev, "MMCONFIG at %pR (base %#lx)\n",
+                       dev_info(dev, "ECAM %pR (base %#lx)\n",
                                 &cfg->res, (unsigned long)addr);
                        cfg = NULL;
                        rc = 0;
@@ -823,7 +835,7 @@ int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end,
        return rc;
 }
 
-/* Delete MMCFG information for host bridges */
+/* Delete ECAM information for host bridges */
 int pci_mmconfig_delete(u16 seg, u8 start, u8 end)
 {
        struct pci_mmcfg_region *cfg;
index bfa7898753220b9548cd87eba7afaf565ae76a3e..f9ef97c593cf1ad13babeeab6ee4a9db0b614ba0 100644 (file)
@@ -131,7 +131,7 @@ const struct pci_raw_ops pci_mmcfg = {
 
 int __init pci_mmcfg_arch_init(void)
 {
-       printk(KERN_INFO "PCI: Using MMCONFIG for extended config space\n");
+       printk(KERN_INFO "PCI: Using ECAM for extended config space\n");
        raw_pci_ext_ops = &pci_mmcfg;
        return 1;
 }
index 0c7b6e66c64484d2bfa402033e70a3f5c901a485..cb5aa79a759e1ce9e5c1e5b77c3463afd5a05d30 100644 (file)
@@ -6,6 +6,8 @@
  * space mapped. This allows lockless config space operation.
  */
 
+#define pr_fmt(fmt) "PCI: " fmt
+
 #include <linux/pci.h>
 #include <linux/init.h>
 #include <linux/acpi.h>
@@ -14,8 +16,6 @@
 #include <asm/e820/api.h>
 #include <asm/pci_x86.h>
 
-#define PREFIX "PCI: "
-
 static char __iomem *pci_dev_base(unsigned int seg, unsigned int bus, unsigned int devfn)
 {
        struct pci_mmcfg_region *cfg = pci_mmconfig_lookup(seg, bus);
@@ -111,6 +111,25 @@ static void __iomem *mcfg_ioremap(struct pci_mmcfg_region *cfg)
        return addr;
 }
 
+int pci_mmcfg_arch_map(struct pci_mmcfg_region *cfg)
+{
+       cfg->virt = mcfg_ioremap(cfg);
+       if (!cfg->virt) {
+               pr_err("can't map ECAM at %pR\n", &cfg->res);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+void pci_mmcfg_arch_unmap(struct pci_mmcfg_region *cfg)
+{
+       if (cfg && cfg->virt) {
+               iounmap(cfg->virt + PCI_MMCFG_BUS_OFFSET(cfg->start_bus));
+               cfg->virt = NULL;
+       }
+}
+
 int __init pci_mmcfg_arch_init(void)
 {
        struct pci_mmcfg_region *cfg;
@@ -133,22 +152,3 @@ void __init pci_mmcfg_arch_free(void)
        list_for_each_entry(cfg, &pci_mmcfg_list, list)
                pci_mmcfg_arch_unmap(cfg);
 }
-
-int pci_mmcfg_arch_map(struct pci_mmcfg_region *cfg)
-{
-       cfg->virt = mcfg_ioremap(cfg);
-       if (!cfg->virt) {
-               pr_err(PREFIX "can't map MMCONFIG at %pR\n", &cfg->res);
-               return -ENOMEM;
-       }
-
-       return 0;
-}
-
-void pci_mmcfg_arch_unmap(struct pci_mmcfg_region *cfg)
-{
-       if (cfg && cfg->virt) {
-               iounmap(cfg->virt + PCI_MMCFG_BUS_OFFSET(cfg->start_bus));
-               cfg->virt = NULL;
-       }
-}
index 4f15280732edb9983422e0b4a8ce5ede8fa138bb..244c643bb0b5f8bbbbc85c0ff1fe1874d23fce07 100644 (file)
@@ -3,6 +3,8 @@
  * BIOS32 and PCI BIOS handling.
  */
 
+#include <linux/bits.h>
+#include <linux/bitfield.h>
 #include <linux/pci.h>
 #include <linux/init.h>
 #include <linux/slab.h>
 #define PCIBIOS_HW_TYPE1_SPEC          0x10
 #define PCIBIOS_HW_TYPE2_SPEC          0x20
 
+/*
+ * Returned in EAX:
+ * - AH: return code
+ */
+#define PCIBIOS_RETURN_CODE                    GENMASK(15, 8)
+
 int pcibios_enabled;
 
+static u8 pcibios_get_return_code(u32 eax)
+{
+       return FIELD_GET(PCIBIOS_RETURN_CODE, eax);
+}
+
 /* According to the BIOS specification at:
  * http://members.datafast.net.au/dft0802/specs/bios21.pdf, we could
  * restrict the x zone to some pages and make it ro. But this may be
@@ -154,7 +167,7 @@ static int __init check_pcibios(void)
                        : "memory");
                local_irq_restore(flags);
 
-               status = (eax >> 8) & 0xff;
+               status = pcibios_get_return_code(eax);
                hw_mech = eax & 0xff;
                major_ver = (ebx >> 8) & 0xff;
                minor_ver = ebx & 0xff;
@@ -227,7 +240,7 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
 
        raw_spin_unlock_irqrestore(&pci_config_lock, flags);
 
-       return (int)((result & 0xff00) >> 8);
+       return pcibios_get_return_code(result);
 }
 
 static int pci_bios_write(unsigned int seg, unsigned int bus,
@@ -269,7 +282,7 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
 
        raw_spin_unlock_irqrestore(&pci_config_lock, flags);
 
-       return (int)((result & 0xff00) >> 8);
+       return pcibios_get_return_code(result);
 }
 
 
@@ -385,9 +398,10 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
                  "m" (opt)
                : "memory");
        DBG("OK  ret=%d, size=%d, map=%x\n", ret, opt.size, map);
-       if (ret & 0xff00)
-               printk(KERN_ERR "PCI: Error %02x when fetching IRQ routing table.\n", (ret >> 8) & 0xff);
-       else if (opt.size) {
+       ret = pcibios_get_return_code(ret);
+       if (ret) {
+               printk(KERN_ERR "PCI: Error %02x when fetching IRQ routing table.\n", ret);
+       } else if (opt.size) {
                rt = kmalloc(sizeof(struct irq_routing_table) + opt.size, GFP_KERNEL);
                if (rt) {
                        memset(rt, 0, sizeof(struct irq_routing_table));
@@ -415,7 +429,7 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
                  "b" ((dev->bus->number << 8) | dev->devfn),
                  "c" ((irq << 8) | (pin + 10)),
                  "S" (&pci_indirect));
-       return !(ret & 0xff00);
+       return pcibios_get_return_code(ret) == PCIBIOS_SUCCESSFUL;
 }
 EXPORT_SYMBOL(pcibios_set_irq_routing);
 
index 6523eb7c3bd177af8b9136cf0d946fba9e2919fa..6052200fe9256882859dcd0f19eef8b70f1245f5 100644 (file)
@@ -168,8 +168,8 @@ do {                                                                \
        (pr_reg)[18] = (_regs)->regs.gp[18];                    \
        (pr_reg)[19] = (_regs)->regs.gp[19];                    \
        (pr_reg)[20] = (_regs)->regs.gp[20];                    \
-       (pr_reg)[21] = current->thread.arch.fs;                 \
-       (pr_reg)[22] = 0;                                       \
+       (pr_reg)[21] = (_regs)->regs.gp[21];                    \
+       (pr_reg)[22] = (_regs)->regs.gp[22];                    \
        (pr_reg)[23] = 0;                                       \
        (pr_reg)[24] = 0;                                       \
        (pr_reg)[25] = 0;                                       \
index 1ef9c21877bc8580b93dac4800b31cfb405edfc7..f901595089365f7214d9ae8b39f763417ae71901 100644 (file)
 struct arch_thread {
         unsigned long debugregs[8];
         int debugregs_seq;
-        unsigned long fs;
         struct faultinfo faultinfo;
 };
 
 #define INIT_ARCH_THREAD { .debugregs                  = { [ 0 ... 7 ] = 0 }, \
                           .debugregs_seq       = 0, \
-                          .fs                  = 0, \
                           .faultinfo           = { 0, 0, 0 } }
 
 #define STACKSLOTS_PER_LINE 4
@@ -28,7 +26,6 @@ static inline void arch_flush_thread(struct arch_thread *thread)
 static inline void arch_copy_thread(struct arch_thread *from,
                                     struct arch_thread *to)
 {
-       to->fs = from->fs;
 }
 
 #define current_sp() ({ void *sp; __asm__("movq %%rsp, %0" : "=r" (sp) : ); sp; })
index ae169125d03fcc41ebd159dca6d58ee9ed964f55..5249bbc30dcdb107ed45d7f0d76328f650308f28 100644 (file)
@@ -6,7 +6,6 @@
 obj-y = registers.o task_size.o mcontext.o
 
 obj-$(CONFIG_X86_32) += tls.o
-obj-$(CONFIG_64BIT) += prctl.o
 
 USER_OBJS := $(obj-y)
 
diff --git a/arch/x86/um/os-Linux/prctl.c b/arch/x86/um/os-Linux/prctl.c
deleted file mode 100644 (file)
index 8431e87..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * Copyright (C) 2007 Jeff Dike (jdike@{addtoit.com,linux.intel.com})
- * Licensed under the GPL
- */
-
-#include <sys/ptrace.h>
-#include <asm/ptrace.h>
-
-int os_arch_prctl(int pid, int option, unsigned long *arg2)
-{
-       return ptrace(PTRACE_ARCH_PRCTL, pid, (unsigned long) arg2, option);
-}
index 0bc4b73a9cdeab719e72026cdf823346cbdc985e..7f1abde2c84b5bf72c39d9beea2d672eded3145c 100644 (file)
@@ -25,30 +25,6 @@ void arch_switch_to(struct task_struct *to)
                printk(KERN_WARNING "arch_switch_tls failed, errno = EINVAL\n");
 }
 
-int is_syscall(unsigned long addr)
-{
-       unsigned short instr;
-       int n;
-
-       n = copy_from_user(&instr, (void __user *) addr, sizeof(instr));
-       if (n) {
-               /* access_process_vm() grants access to vsyscall and stub,
-                * while copy_from_user doesn't. Maybe access_process_vm is
-                * slow, but that doesn't matter, since it will be called only
-                * in case of singlestepping, if copy_from_user failed.
-                */
-               n = access_process_vm(current, addr, &instr, sizeof(instr),
-                               FOLL_FORCE);
-               if (n != sizeof(instr)) {
-                       printk(KERN_ERR "is_syscall : failed to read "
-                              "instruction from 0x%lx\n", addr);
-                       return 1;
-               }
-       }
-       /* int 0x80 or sysenter */
-       return (instr == 0x80cd) || (instr == 0x340f);
-}
-
 /* determines which flags the user has access to. */
 /* 1 = access 0 = no access */
 #define FLAG_MASK 0x00044dd5
index 289d0159b041e246660d16b65e545aa0f9d85f66..aa68d83d3f441b746e0b11a9ffa8bec3ca51a996 100644 (file)
@@ -188,32 +188,6 @@ int peek_user(struct task_struct *child, long addr, long data)
        return put_user(tmp, (unsigned long *) data);
 }
 
-/* XXX Mostly copied from sys-i386 */
-int is_syscall(unsigned long addr)
-{
-       unsigned short instr;
-       int n;
-
-       n = copy_from_user(&instr, (void __user *) addr, sizeof(instr));
-       if (n) {
-               /*
-                * access_process_vm() grants access to vsyscall and stub,
-                * while copy_from_user doesn't. Maybe access_process_vm is
-                * slow, but that doesn't matter, since it will be called only
-                * in case of singlestepping, if copy_from_user failed.
-                */
-               n = access_process_vm(current, addr, &instr, sizeof(instr),
-                               FOLL_FORCE);
-               if (n != sizeof(instr)) {
-                       printk("is_syscall : failed to read instruction from "
-                              "0x%lx\n", addr);
-                       return 1;
-               }
-       }
-       /* sysenter */
-       return instr == 0x050f;
-}
-
 static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
 {
        int err, n, cpu = ((struct thread_info *) child->stack)->cpu;
index db8478a83a09700065b62e1795c2e796c04de608..0c4989842fbe6caaefb47e7f43acdc13bf58ed60 100644 (file)
@@ -8,10 +8,6 @@
 
 #define MAX_FP_NR HOST_FPX_SIZE
 
-void set_using_sysemu(int value);
-int get_using_sysemu(void);
-extern int sysemu_supported;
-
 #define UPT_SYSCALL_ARG1(r) UPT_BX(r)
 #define UPT_SYSCALL_ARG2(r) UPT_CX(r)
 #define UPT_SYSCALL_ARG3(r) UPT_DX(r)
index 44782bbad41ea90658dfc8b19a619f4bd344789e..1d1a824fa65282a2b111a9677dc60243f7c2c0c5 100644 (file)
 #define FP_SIZE ((HOST_FPX_SIZE > HOST_FP_SIZE) ? HOST_FPX_SIZE : HOST_FP_SIZE)
 #else
 #define FP_SIZE HOST_FP_SIZE
+#endif
 
 /*
- * x86_64 FC3 doesn't define this in /usr/include/linux/ptrace.h even though
- * it's defined in the kernel's include/linux/ptrace.h. Additionally, use the
- * 2.4 name and value for 2.4 host compatibility.
+ * glibc before 2.27 does not include PTRACE_SYSEMU_SINGLESTEP in its enum,
+ * ensure we have a definition by (re-)defining it here.
  */
-#ifndef PTRACE_OLDSETOPTIONS
-#define PTRACE_OLDSETOPTIONS 21
-#endif
-
+#ifndef PTRACE_SYSEMU_SINGLESTEP
+#define PTRACE_SYSEMU_SINGLESTEP 32
 #endif
index 38fa894b65d088970f99a9ca7ad6bfeb7895462c..ea8b5a2d67afb95cb2deb76efa4059f7b602771d 100644 (file)
 #define STUB_MMAP_NR __NR_mmap2
 #define MMAP_OFFSET(o) ((o) >> UM_KERN_PAGE_SHIFT)
 
-static inline long stub_syscall0(long syscall)
+static __always_inline long stub_syscall0(long syscall)
 {
        long ret;
 
-       __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall));
+       __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall)
+                       : "memory");
 
        return ret;
 }
 
-static inline long stub_syscall1(long syscall, long arg1)
+static __always_inline long stub_syscall1(long syscall, long arg1)
 {
        long ret;
 
-       __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1));
+       __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1)
+                       : "memory");
 
        return ret;
 }
 
-static inline long stub_syscall2(long syscall, long arg1, long arg2)
+static __always_inline long stub_syscall2(long syscall, long arg1, long arg2)
 {
        long ret;
 
        __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
-                       "c" (arg2));
+                       "c" (arg2)
+                       : "memory");
 
        return ret;
 }
 
-static inline long stub_syscall3(long syscall, long arg1, long arg2, long arg3)
+static __always_inline long stub_syscall3(long syscall, long arg1, long arg2,
+                                         long arg3)
 {
        long ret;
 
        __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
-                       "c" (arg2), "d" (arg3));
+                       "c" (arg2), "d" (arg3)
+                       : "memory");
 
        return ret;
 }
 
-static inline long stub_syscall4(long syscall, long arg1, long arg2, long arg3,
-                                long arg4)
+static __always_inline long stub_syscall4(long syscall, long arg1, long arg2,
+                                         long arg3, long arg4)
 {
        long ret;
 
        __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
-                       "c" (arg2), "d" (arg3), "S" (arg4));
+                       "c" (arg2), "d" (arg3), "S" (arg4)
+                       : "memory");
 
        return ret;
 }
 
-static inline long stub_syscall5(long syscall, long arg1, long arg2, long arg3,
-                                long arg4, long arg5)
+static __always_inline long stub_syscall5(long syscall, long arg1, long arg2,
+                                         long arg3, long arg4, long arg5)
 {
        long ret;
 
        __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
-                       "c" (arg2), "d" (arg3), "S" (arg4), "D" (arg5));
+                       "c" (arg2), "d" (arg3), "S" (arg4), "D" (arg5)
+                       : "memory");
 
        return ret;
 }
 
-static inline void trap_myself(void)
+static __always_inline void trap_myself(void)
 {
        __asm("int3");
 }
 
-static inline void remap_stack_and_trap(void)
+static __always_inline void remap_stack_and_trap(void)
 {
        __asm__ volatile (
                "movl %%esp,%%ebx ;"
index 2de1c8f8817341f7447ed72ccc5010f5faed5686..b24168ef0ac49f7b0935688cbb6b7e3fad47ebe7 100644 (file)
@@ -16,7 +16,7 @@
 #define __syscall_clobber "r11","rcx","memory"
 #define __syscall "syscall"
 
-static inline long stub_syscall0(long syscall)
+static __always_inline long stub_syscall0(long syscall)
 {
        long ret;
 
@@ -27,7 +27,7 @@ static inline long stub_syscall0(long syscall)
        return ret;
 }
 
-static inline long stub_syscall2(long syscall, long arg1, long arg2)
+static __always_inline long stub_syscall2(long syscall, long arg1, long arg2)
 {
        long ret;
 
@@ -38,7 +38,8 @@ static inline long stub_syscall2(long syscall, long arg1, long arg2)
        return ret;
 }
 
-static inline long stub_syscall3(long syscall, long arg1, long arg2, long arg3)
+static __always_inline long stub_syscall3(long syscall, long arg1, long arg2,
+                                         long arg3)
 {
        long ret;
 
@@ -50,7 +51,7 @@ static inline long stub_syscall3(long syscall, long arg1, long arg2, long arg3)
        return ret;
 }
 
-static inline long stub_syscall4(long syscall, long arg1, long arg2, long arg3,
+static __always_inline long stub_syscall4(long syscall, long arg1, long arg2, long arg3,
                                 long arg4)
 {
        long ret;
@@ -64,8 +65,8 @@ static inline long stub_syscall4(long syscall, long arg1, long arg2, long arg3,
        return ret;
 }
 
-static inline long stub_syscall5(long syscall, long arg1, long arg2, long arg3,
-                                long arg4, long arg5)
+static __always_inline long stub_syscall5(long syscall, long arg1, long arg2,
+                                         long arg3, long arg4, long arg5)
 {
        long ret;
 
@@ -78,12 +79,12 @@ static inline long stub_syscall5(long syscall, long arg1, long arg2, long arg3,
        return ret;
 }
 
-static inline void trap_myself(void)
+static __always_inline void trap_myself(void)
 {
        __asm("int3");
 }
 
-static inline void remap_stack_and_trap(void)
+static __always_inline void remap_stack_and_trap(void)
 {
        __asm__ volatile (
                "movq %0,%%rax ;"
index 27b29ae6c471b893dc2d92d7562eaa188cd5b9c2..6a00a28c9cca77ff2124c8718dcae84b8dae24b5 100644 (file)
 long arch_prctl(struct task_struct *task, int option,
                unsigned long __user *arg2)
 {
-       unsigned long *ptr = arg2, tmp;
-       long ret;
-       int pid = task->mm->context.id.u.pid;
-
-       /*
-        * With ARCH_SET_FS (and ARCH_SET_GS is treated similarly to
-        * be safe), we need to call arch_prctl on the host because
-        * setting %fs may result in something else happening (like a
-        * GDT or thread.fs being set instead).  So, we let the host
-        * fiddle the registers and thread struct and restore the
-        * registers afterwards.
-        *
-        * So, the saved registers are stored to the process (this
-        * needed because a stub may have been the last thing to run),
-        * arch_prctl is run on the host, then the registers are read
-        * back.
-        */
-       switch (option) {
-       case ARCH_SET_FS:
-       case ARCH_SET_GS:
-               ret = restore_pid_registers(pid, &current->thread.regs.regs);
-               if (ret)
-                       return ret;
-               break;
-       case ARCH_GET_FS:
-       case ARCH_GET_GS:
-               /*
-                * With these two, we read to a local pointer and
-                * put_user it to the userspace pointer that we were
-                * given.  If addr isn't valid (because it hasn't been
-                * faulted in or is just bogus), we want put_user to
-                * fault it in (or return -EFAULT) instead of having
-                * the host return -EFAULT.
-                */
-               ptr = &tmp;
-       }
-
-       ret = os_arch_prctl(pid, option, ptr);
-       if (ret)
-               return ret;
+       long ret = -EINVAL;
 
        switch (option) {
        case ARCH_SET_FS:
-               current->thread.arch.fs = (unsigned long) ptr;
-               ret = save_registers(pid, &current->thread.regs.regs);
+               current->thread.regs.regs.gp[FS_BASE / sizeof(unsigned long)] =
+                       (unsigned long) arg2;
+               ret = 0;
                break;
        case ARCH_SET_GS:
-               ret = save_registers(pid, &current->thread.regs.regs);
+               current->thread.regs.regs.gp[GS_BASE / sizeof(unsigned long)] =
+                       (unsigned long) arg2;
+               ret = 0;
                break;
        case ARCH_GET_FS:
-               ret = put_user(tmp, arg2);
+               ret = put_user(current->thread.regs.regs.gp[FS_BASE / sizeof(unsigned long)], arg2);
                break;
        case ARCH_GET_GS:
-               ret = put_user(tmp, arg2);
+               ret = put_user(current->thread.regs.regs.gp[GS_BASE / sizeof(unsigned long)], arg2);
                break;
        }
 
@@ -83,10 +47,10 @@ SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
 
 void arch_switch_to(struct task_struct *to)
 {
-       if ((to->thread.arch.fs == 0) || (to->mm == NULL))
-               return;
-
-       arch_prctl(to, ARCH_SET_FS, (void __user *) to->thread.arch.fs);
+       /*
+        * Nothing needs to be done on x86_64.
+        * The FS_BASE/GS_BASE registers are saved in the ptrace register set.
+        */
 }
 
 SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
index ebd3855d9b132379b7065222316a948e06b2b72e..c51a613f6f5c413f8198830de4c3126d6ed930c6 100644 (file)
@@ -12,7 +12,7 @@ int arch_set_tls(struct task_struct *t, unsigned long tls)
         * If CLONE_SETTLS is set, we need to save the thread id
         * so it can be set during context switches.
         */
-       t->thread.arch.fs = tls;
+       t->thread.regs.regs.gp[FS_BASE / sizeof(unsigned long)] = tls;
 
        return 0;
 }
index 46ef8f73aebbb1520b12233ac208db33355b0678..90da47eb85eec3a0f4d64b2ffa796f6d0c71013f 100644 (file)
@@ -1,2 +1,2 @@
 # SPDX-License-Identifier: GPL-2.0-only
-obj-y += seamcall.o
+obj-y += seamcall.o tdx.o
diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c
new file mode 100644 (file)
index 0000000..4d6826a
--- /dev/null
@@ -0,0 +1,1492 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright(c) 2023 Intel Corporation.
+ *
+ * Intel Trusted Domain Extensions (TDX) support
+ */
+
+#define pr_fmt(fmt)    "virt/tdx: " fmt
+
+#include <linux/types.h>
+#include <linux/cache.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/printk.h>
+#include <linux/cpu.h>
+#include <linux/spinlock.h>
+#include <linux/percpu-defs.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/memblock.h>
+#include <linux/memory.h>
+#include <linux/minmax.h>
+#include <linux/sizes.h>
+#include <linux/pfn.h>
+#include <linux/align.h>
+#include <linux/sort.h>
+#include <linux/log2.h>
+#include <linux/acpi.h>
+#include <linux/suspend.h>
+#include <linux/acpi.h>
+#include <asm/page.h>
+#include <asm/special_insns.h>
+#include <asm/msr-index.h>
+#include <asm/msr.h>
+#include <asm/cpufeature.h>
+#include <asm/tdx.h>
+#include <asm/intel-family.h>
+#include <asm/processor.h>
+#include <asm/mce.h>
+#include "tdx.h"
+
+static u32 tdx_global_keyid __ro_after_init;
+static u32 tdx_guest_keyid_start __ro_after_init;
+static u32 tdx_nr_guest_keyids __ro_after_init;
+
+static DEFINE_PER_CPU(bool, tdx_lp_initialized);
+
+static struct tdmr_info_list tdx_tdmr_list;
+
+static enum tdx_module_status_t tdx_module_status;
+static DEFINE_MUTEX(tdx_module_lock);
+
+/* All TDX-usable memory regions.  Protected by mem_hotplug_lock. */
+static LIST_HEAD(tdx_memlist);
+
+typedef void (*sc_err_func_t)(u64 fn, u64 err, struct tdx_module_args *args);
+
+static inline void seamcall_err(u64 fn, u64 err, struct tdx_module_args *args)
+{
+       pr_err("SEAMCALL (0x%016llx) failed: 0x%016llx\n", fn, err);
+}
+
+static inline void seamcall_err_ret(u64 fn, u64 err,
+                                   struct tdx_module_args *args)
+{
+       seamcall_err(fn, err, args);
+       pr_err("RCX 0x%016llx RDX 0x%016llx R08 0x%016llx\n",
+                       args->rcx, args->rdx, args->r8);
+       pr_err("R09 0x%016llx R10 0x%016llx R11 0x%016llx\n",
+                       args->r9, args->r10, args->r11);
+}
+
+static inline int sc_retry_prerr(sc_func_t func, sc_err_func_t err_func,
+                                u64 fn, struct tdx_module_args *args)
+{
+       u64 sret = sc_retry(func, fn, args);
+
+       if (sret == TDX_SUCCESS)
+               return 0;
+
+       if (sret == TDX_SEAMCALL_VMFAILINVALID)
+               return -ENODEV;
+
+       if (sret == TDX_SEAMCALL_GP)
+               return -EOPNOTSUPP;
+
+       if (sret == TDX_SEAMCALL_UD)
+               return -EACCES;
+
+       err_func(fn, sret, args);
+       return -EIO;
+}
+
+#define seamcall_prerr(__fn, __args)                                           \
+       sc_retry_prerr(__seamcall, seamcall_err, (__fn), (__args))
+
+#define seamcall_prerr_ret(__fn, __args)                                       \
+       sc_retry_prerr(__seamcall_ret, seamcall_err_ret, (__fn), (__args))
+
+/*
+ * Do the module global initialization once and return its result.
+ * It can be done on any cpu.  It's always called with interrupts
+ * disabled.
+ */
+static int try_init_module_global(void)
+{
+       struct tdx_module_args args = {};
+       static DEFINE_RAW_SPINLOCK(sysinit_lock);
+       static bool sysinit_done;
+       static int sysinit_ret;
+
+       lockdep_assert_irqs_disabled();
+
+       raw_spin_lock(&sysinit_lock);
+
+       if (sysinit_done)
+               goto out;
+
+       /* RCX is module attributes and all bits are reserved */
+       args.rcx = 0;
+       sysinit_ret = seamcall_prerr(TDH_SYS_INIT, &args);
+
+       /*
+        * The first SEAMCALL also detects the TDX module, thus
+        * it can fail due to the TDX module is not loaded.
+        * Dump message to let the user know.
+        */
+       if (sysinit_ret == -ENODEV)
+               pr_err("module not loaded\n");
+
+       sysinit_done = true;
+out:
+       raw_spin_unlock(&sysinit_lock);
+       return sysinit_ret;
+}
+
+/**
+ * tdx_cpu_enable - Enable TDX on local cpu
+ *
+ * Do one-time TDX module per-cpu initialization SEAMCALL (and TDX module
+ * global initialization SEAMCALL if not done) on local cpu to make this
+ * cpu be ready to run any other SEAMCALLs.
+ *
+ * Always call this function via IPI function calls.
+ *
+ * Return 0 on success, otherwise errors.
+ */
+int tdx_cpu_enable(void)
+{
+       struct tdx_module_args args = {};
+       int ret;
+
+       if (!boot_cpu_has(X86_FEATURE_TDX_HOST_PLATFORM))
+               return -ENODEV;
+
+       lockdep_assert_irqs_disabled();
+
+       if (__this_cpu_read(tdx_lp_initialized))
+               return 0;
+
+       /*
+        * The TDX module global initialization is the very first step
+        * to enable TDX.  Need to do it first (if hasn't been done)
+        * before the per-cpu initialization.
+        */
+       ret = try_init_module_global();
+       if (ret)
+               return ret;
+
+       ret = seamcall_prerr(TDH_SYS_LP_INIT, &args);
+       if (ret)
+               return ret;
+
+       __this_cpu_write(tdx_lp_initialized, true);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(tdx_cpu_enable);
+
+/*
+ * Add a memory region as a TDX memory block.  The caller must make sure
+ * all memory regions are added in address ascending order and don't
+ * overlap.
+ */
+static int add_tdx_memblock(struct list_head *tmb_list, unsigned long start_pfn,
+                           unsigned long end_pfn, int nid)
+{
+       struct tdx_memblock *tmb;
+
+       tmb = kmalloc(sizeof(*tmb), GFP_KERNEL);
+       if (!tmb)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&tmb->list);
+       tmb->start_pfn = start_pfn;
+       tmb->end_pfn = end_pfn;
+       tmb->nid = nid;
+
+       /* @tmb_list is protected by mem_hotplug_lock */
+       list_add_tail(&tmb->list, tmb_list);
+       return 0;
+}
+
+static void free_tdx_memlist(struct list_head *tmb_list)
+{
+       /* @tmb_list is protected by mem_hotplug_lock */
+       while (!list_empty(tmb_list)) {
+               struct tdx_memblock *tmb = list_first_entry(tmb_list,
+                               struct tdx_memblock, list);
+
+               list_del(&tmb->list);
+               kfree(tmb);
+       }
+}
+
+/*
+ * Ensure that all memblock memory regions are convertible to TDX
+ * memory.  Once this has been established, stash the memblock
+ * ranges off in a secondary structure because memblock is modified
+ * in memory hotplug while TDX memory regions are fixed.
+ */
+static int build_tdx_memlist(struct list_head *tmb_list)
+{
+       unsigned long start_pfn, end_pfn;
+       int i, nid, ret;
+
+       for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
+               /*
+                * The first 1MB is not reported as TDX convertible memory.
+                * Although the first 1MB is always reserved and won't end up
+                * to the page allocator, it is still in memblock's memory
+                * regions.  Skip them manually to exclude them as TDX memory.
+                */
+               start_pfn = max(start_pfn, PHYS_PFN(SZ_1M));
+               if (start_pfn >= end_pfn)
+                       continue;
+
+               /*
+                * Add the memory regions as TDX memory.  The regions in
+                * memblock has already guaranteed they are in address
+                * ascending order and don't overlap.
+                */
+               ret = add_tdx_memblock(tmb_list, start_pfn, end_pfn, nid);
+               if (ret)
+                       goto err;
+       }
+
+       return 0;
+err:
+       free_tdx_memlist(tmb_list);
+       return ret;
+}
+
+static int read_sys_metadata_field(u64 field_id, u64 *data)
+{
+       struct tdx_module_args args = {};
+       int ret;
+
+       /*
+        * TDH.SYS.RD -- reads one global metadata field
+        *  - RDX (in): the field to read
+        *  - R8 (out): the field data
+        */
+       args.rdx = field_id;
+       ret = seamcall_prerr_ret(TDH_SYS_RD, &args);
+       if (ret)
+               return ret;
+
+       *data = args.r8;
+
+       return 0;
+}
+
+static int read_sys_metadata_field16(u64 field_id,
+                                    int offset,
+                                    struct tdx_tdmr_sysinfo *ts)
+{
+       u16 *ts_member = ((void *)ts) + offset;
+       u64 tmp;
+       int ret;
+
+       if (WARN_ON_ONCE(MD_FIELD_ID_ELE_SIZE_CODE(field_id) !=
+                       MD_FIELD_ID_ELE_SIZE_16BIT))
+               return -EINVAL;
+
+       ret = read_sys_metadata_field(field_id, &tmp);
+       if (ret)
+               return ret;
+
+       *ts_member = tmp;
+
+       return 0;
+}
+
+struct field_mapping {
+       u64 field_id;
+       int offset;
+};
+
+#define TD_SYSINFO_MAP(_field_id, _offset) \
+       { .field_id = MD_FIELD_ID_##_field_id,     \
+         .offset   = offsetof(struct tdx_tdmr_sysinfo, _offset) }
+
+/* Map TD_SYSINFO fields into 'struct tdx_tdmr_sysinfo': */
+static const struct field_mapping fields[] = {
+       TD_SYSINFO_MAP(MAX_TDMRS,             max_tdmrs),
+       TD_SYSINFO_MAP(MAX_RESERVED_PER_TDMR, max_reserved_per_tdmr),
+       TD_SYSINFO_MAP(PAMT_4K_ENTRY_SIZE,    pamt_entry_size[TDX_PS_4K]),
+       TD_SYSINFO_MAP(PAMT_2M_ENTRY_SIZE,    pamt_entry_size[TDX_PS_2M]),
+       TD_SYSINFO_MAP(PAMT_1G_ENTRY_SIZE,    pamt_entry_size[TDX_PS_1G]),
+};
+
+static int get_tdx_tdmr_sysinfo(struct tdx_tdmr_sysinfo *tdmr_sysinfo)
+{
+       int ret;
+       int i;
+
+       /* Populate 'tdmr_sysinfo' fields using the mapping structure above: */
+       for (i = 0; i < ARRAY_SIZE(fields); i++) {
+               ret = read_sys_metadata_field16(fields[i].field_id,
+                                               fields[i].offset,
+                                               tdmr_sysinfo);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+/* Calculate the actual TDMR size */
+static int tdmr_size_single(u16 max_reserved_per_tdmr)
+{
+       int tdmr_sz;
+
+       /*
+        * The actual size of TDMR depends on the maximum
+        * number of reserved areas.
+        */
+       tdmr_sz = sizeof(struct tdmr_info);
+       tdmr_sz += sizeof(struct tdmr_reserved_area) * max_reserved_per_tdmr;
+
+       return ALIGN(tdmr_sz, TDMR_INFO_ALIGNMENT);
+}
+
+static int alloc_tdmr_list(struct tdmr_info_list *tdmr_list,
+                          struct tdx_tdmr_sysinfo *tdmr_sysinfo)
+{
+       size_t tdmr_sz, tdmr_array_sz;
+       void *tdmr_array;
+
+       tdmr_sz = tdmr_size_single(tdmr_sysinfo->max_reserved_per_tdmr);
+       tdmr_array_sz = tdmr_sz * tdmr_sysinfo->max_tdmrs;
+
+       /*
+        * To keep things simple, allocate all TDMRs together.
+        * The buffer needs to be physically contiguous to make
+        * sure each TDMR is physically contiguous.
+        */
+       tdmr_array = alloc_pages_exact(tdmr_array_sz,
+                       GFP_KERNEL | __GFP_ZERO);
+       if (!tdmr_array)
+               return -ENOMEM;
+
+       tdmr_list->tdmrs = tdmr_array;
+
+       /*
+        * Keep the size of TDMR to find the target TDMR
+        * at a given index in the TDMR list.
+        */
+       tdmr_list->tdmr_sz = tdmr_sz;
+       tdmr_list->max_tdmrs = tdmr_sysinfo->max_tdmrs;
+       tdmr_list->nr_consumed_tdmrs = 0;
+
+       return 0;
+}
+
+static void free_tdmr_list(struct tdmr_info_list *tdmr_list)
+{
+       free_pages_exact(tdmr_list->tdmrs,
+                       tdmr_list->max_tdmrs * tdmr_list->tdmr_sz);
+}
+
+/* Get the TDMR from the list at the given index. */
+static struct tdmr_info *tdmr_entry(struct tdmr_info_list *tdmr_list,
+                                   int idx)
+{
+       int tdmr_info_offset = tdmr_list->tdmr_sz * idx;
+
+       return (void *)tdmr_list->tdmrs + tdmr_info_offset;
+}
+
+#define TDMR_ALIGNMENT         SZ_1G
+#define TDMR_ALIGN_DOWN(_addr) ALIGN_DOWN((_addr), TDMR_ALIGNMENT)
+#define TDMR_ALIGN_UP(_addr)   ALIGN((_addr), TDMR_ALIGNMENT)
+
+static inline u64 tdmr_end(struct tdmr_info *tdmr)
+{
+       return tdmr->base + tdmr->size;
+}
+
+/*
+ * Take the memory referenced in @tmb_list and populate the
+ * preallocated @tdmr_list, following all the special alignment
+ * and size rules for TDMR.
+ */
+static int fill_out_tdmrs(struct list_head *tmb_list,
+                         struct tdmr_info_list *tdmr_list)
+{
+       struct tdx_memblock *tmb;
+       int tdmr_idx = 0;
+
+       /*
+        * Loop over TDX memory regions and fill out TDMRs to cover them.
+        * To keep it simple, always try to use one TDMR to cover one
+        * memory region.
+        *
+        * In practice TDX supports at least 64 TDMRs.  A 2-socket system
+        * typically only consumes less than 10 of those.  This code is
+        * dumb and simple and may use more TMDRs than is strictly
+        * required.
+        */
+       list_for_each_entry(tmb, tmb_list, list) {
+               struct tdmr_info *tdmr = tdmr_entry(tdmr_list, tdmr_idx);
+               u64 start, end;
+
+               start = TDMR_ALIGN_DOWN(PFN_PHYS(tmb->start_pfn));
+               end   = TDMR_ALIGN_UP(PFN_PHYS(tmb->end_pfn));
+
+               /*
+                * A valid size indicates the current TDMR has already
+                * been filled out to cover the previous memory region(s).
+                */
+               if (tdmr->size) {
+                       /*
+                        * Loop to the next if the current memory region
+                        * has already been fully covered.
+                        */
+                       if (end <= tdmr_end(tdmr))
+                               continue;
+
+                       /* Otherwise, skip the already covered part. */
+                       if (start < tdmr_end(tdmr))
+                               start = tdmr_end(tdmr);
+
+                       /*
+                        * Create a new TDMR to cover the current memory
+                        * region, or the remaining part of it.
+                        */
+                       tdmr_idx++;
+                       if (tdmr_idx >= tdmr_list->max_tdmrs) {
+                               pr_warn("initialization failed: TDMRs exhausted.\n");
+                               return -ENOSPC;
+                       }
+
+                       tdmr = tdmr_entry(tdmr_list, tdmr_idx);
+               }
+
+               tdmr->base = start;
+               tdmr->size = end - start;
+       }
+
+       /* @tdmr_idx is always the index of the last valid TDMR. */
+       tdmr_list->nr_consumed_tdmrs = tdmr_idx + 1;
+
+       /*
+        * Warn early that kernel is about to run out of TDMRs.
+        *
+        * This is an indication that TDMR allocation has to be
+        * reworked to be smarter to not run into an issue.
+        */
+       if (tdmr_list->max_tdmrs - tdmr_list->nr_consumed_tdmrs < TDMR_NR_WARN)
+               pr_warn("consumed TDMRs reaching limit: %d used out of %d\n",
+                               tdmr_list->nr_consumed_tdmrs,
+                               tdmr_list->max_tdmrs);
+
+       return 0;
+}
+
+/*
+ * Calculate PAMT size given a TDMR and a page size.  The returned
+ * PAMT size is always aligned up to 4K page boundary.
+ */
+static unsigned long tdmr_get_pamt_sz(struct tdmr_info *tdmr, int pgsz,
+                                     u16 pamt_entry_size)
+{
+       unsigned long pamt_sz, nr_pamt_entries;
+
+       switch (pgsz) {
+       case TDX_PS_4K:
+               nr_pamt_entries = tdmr->size >> PAGE_SHIFT;
+               break;
+       case TDX_PS_2M:
+               nr_pamt_entries = tdmr->size >> PMD_SHIFT;
+               break;
+       case TDX_PS_1G:
+               nr_pamt_entries = tdmr->size >> PUD_SHIFT;
+               break;
+       default:
+               WARN_ON_ONCE(1);
+               return 0;
+       }
+
+       pamt_sz = nr_pamt_entries * pamt_entry_size;
+       /* TDX requires PAMT size must be 4K aligned */
+       pamt_sz = ALIGN(pamt_sz, PAGE_SIZE);
+
+       return pamt_sz;
+}
+
+/*
+ * Locate a NUMA node which should hold the allocation of the @tdmr
+ * PAMT.  This node will have some memory covered by the TDMR.  The
+ * relative amount of memory covered is not considered.
+ */
+static int tdmr_get_nid(struct tdmr_info *tdmr, struct list_head *tmb_list)
+{
+       struct tdx_memblock *tmb;
+
+       /*
+        * A TDMR must cover at least part of one TMB.  That TMB will end
+        * after the TDMR begins.  But, that TMB may have started before
+        * the TDMR.  Find the next 'tmb' that _ends_ after this TDMR
+        * begins.  Ignore 'tmb' start addresses.  They are irrelevant.
+        */
+       list_for_each_entry(tmb, tmb_list, list) {
+               if (tmb->end_pfn > PHYS_PFN(tdmr->base))
+                       return tmb->nid;
+       }
+
+       /*
+        * Fall back to allocating the TDMR's metadata from node 0 when
+        * no TDX memory block can be found.  This should never happen
+        * since TDMRs originate from TDX memory blocks.
+        */
+       pr_warn("TDMR [0x%llx, 0x%llx): unable to find local NUMA node for PAMT allocation, fallback to use node 0.\n",
+                       tdmr->base, tdmr_end(tdmr));
+       return 0;
+}
+
+/*
+ * Allocate PAMTs from the local NUMA node of some memory in @tmb_list
+ * within @tdmr, and set up PAMTs for @tdmr.
+ */
+static int tdmr_set_up_pamt(struct tdmr_info *tdmr,
+                           struct list_head *tmb_list,
+                           u16 pamt_entry_size[])
+{
+       unsigned long pamt_base[TDX_PS_NR];
+       unsigned long pamt_size[TDX_PS_NR];
+       unsigned long tdmr_pamt_base;
+       unsigned long tdmr_pamt_size;
+       struct page *pamt;
+       int pgsz, nid;
+
+       nid = tdmr_get_nid(tdmr, tmb_list);
+
+       /*
+        * Calculate the PAMT size for each TDX supported page size
+        * and the total PAMT size.
+        */
+       tdmr_pamt_size = 0;
+       for (pgsz = TDX_PS_4K; pgsz < TDX_PS_NR; pgsz++) {
+               pamt_size[pgsz] = tdmr_get_pamt_sz(tdmr, pgsz,
+                                       pamt_entry_size[pgsz]);
+               tdmr_pamt_size += pamt_size[pgsz];
+       }
+
+       /*
+        * Allocate one chunk of physically contiguous memory for all
+        * PAMTs.  This helps minimize the PAMT's use of reserved areas
+        * in overlapped TDMRs.
+        */
+       pamt = alloc_contig_pages(tdmr_pamt_size >> PAGE_SHIFT, GFP_KERNEL,
+                       nid, &node_online_map);
+       if (!pamt)
+               return -ENOMEM;
+
+       /*
+        * Break the contiguous allocation back up into the
+        * individual PAMTs for each page size.
+        */
+       tdmr_pamt_base = page_to_pfn(pamt) << PAGE_SHIFT;
+       for (pgsz = TDX_PS_4K; pgsz < TDX_PS_NR; pgsz++) {
+               pamt_base[pgsz] = tdmr_pamt_base;
+               tdmr_pamt_base += pamt_size[pgsz];
+       }
+
+       tdmr->pamt_4k_base = pamt_base[TDX_PS_4K];
+       tdmr->pamt_4k_size = pamt_size[TDX_PS_4K];
+       tdmr->pamt_2m_base = pamt_base[TDX_PS_2M];
+       tdmr->pamt_2m_size = pamt_size[TDX_PS_2M];
+       tdmr->pamt_1g_base = pamt_base[TDX_PS_1G];
+       tdmr->pamt_1g_size = pamt_size[TDX_PS_1G];
+
+       return 0;
+}
+
+static void tdmr_get_pamt(struct tdmr_info *tdmr, unsigned long *pamt_base,
+                         unsigned long *pamt_size)
+{
+       unsigned long pamt_bs, pamt_sz;
+
+       /*
+        * The PAMT was allocated in one contiguous unit.  The 4K PAMT
+        * should always point to the beginning of that allocation.
+        */
+       pamt_bs = tdmr->pamt_4k_base;
+       pamt_sz = tdmr->pamt_4k_size + tdmr->pamt_2m_size + tdmr->pamt_1g_size;
+
+       WARN_ON_ONCE((pamt_bs & ~PAGE_MASK) || (pamt_sz & ~PAGE_MASK));
+
+       *pamt_base = pamt_bs;
+       *pamt_size = pamt_sz;
+}
+
+static void tdmr_do_pamt_func(struct tdmr_info *tdmr,
+               void (*pamt_func)(unsigned long base, unsigned long size))
+{
+       unsigned long pamt_base, pamt_size;
+
+       tdmr_get_pamt(tdmr, &pamt_base, &pamt_size);
+
+       /* Do nothing if PAMT hasn't been allocated for this TDMR */
+       if (!pamt_size)
+               return;
+
+       if (WARN_ON_ONCE(!pamt_base))
+               return;
+
+       pamt_func(pamt_base, pamt_size);
+}
+
+static void free_pamt(unsigned long pamt_base, unsigned long pamt_size)
+{
+       free_contig_range(pamt_base >> PAGE_SHIFT, pamt_size >> PAGE_SHIFT);
+}
+
+static void tdmr_free_pamt(struct tdmr_info *tdmr)
+{
+       tdmr_do_pamt_func(tdmr, free_pamt);
+}
+
+static void tdmrs_free_pamt_all(struct tdmr_info_list *tdmr_list)
+{
+       int i;
+
+       for (i = 0; i < tdmr_list->nr_consumed_tdmrs; i++)
+               tdmr_free_pamt(tdmr_entry(tdmr_list, i));
+}
+
+/* Allocate and set up PAMTs for all TDMRs */
+static int tdmrs_set_up_pamt_all(struct tdmr_info_list *tdmr_list,
+                                struct list_head *tmb_list,
+                                u16 pamt_entry_size[])
+{
+       int i, ret = 0;
+
+       for (i = 0; i < tdmr_list->nr_consumed_tdmrs; i++) {
+               ret = tdmr_set_up_pamt(tdmr_entry(tdmr_list, i), tmb_list,
+                               pamt_entry_size);
+               if (ret)
+                       goto err;
+       }
+
+       return 0;
+err:
+       tdmrs_free_pamt_all(tdmr_list);
+       return ret;
+}
+
+/*
+ * Convert TDX private pages back to normal by using MOVDIR64B to
+ * clear these pages.  Note this function doesn't flush cache of
+ * these TDX private pages.  The caller should make sure of that.
+ */
+static void reset_tdx_pages(unsigned long base, unsigned long size)
+{
+       const void *zero_page = (const void *)page_address(ZERO_PAGE(0));
+       unsigned long phys, end;
+
+       end = base + size;
+       for (phys = base; phys < end; phys += 64)
+               movdir64b(__va(phys), zero_page);
+
+       /*
+        * MOVDIR64B uses WC protocol.  Use memory barrier to
+        * make sure any later user of these pages sees the
+        * updated data.
+        */
+       mb();
+}
+
+static void tdmr_reset_pamt(struct tdmr_info *tdmr)
+{
+       tdmr_do_pamt_func(tdmr, reset_tdx_pages);
+}
+
+static void tdmrs_reset_pamt_all(struct tdmr_info_list *tdmr_list)
+{
+       int i;
+
+       for (i = 0; i < tdmr_list->nr_consumed_tdmrs; i++)
+               tdmr_reset_pamt(tdmr_entry(tdmr_list, i));
+}
+
+static unsigned long tdmrs_count_pamt_kb(struct tdmr_info_list *tdmr_list)
+{
+       unsigned long pamt_size = 0;
+       int i;
+
+       for (i = 0; i < tdmr_list->nr_consumed_tdmrs; i++) {
+               unsigned long base, size;
+
+               tdmr_get_pamt(tdmr_entry(tdmr_list, i), &base, &size);
+               pamt_size += size;
+       }
+
+       return pamt_size / 1024;
+}
+
+static int tdmr_add_rsvd_area(struct tdmr_info *tdmr, int *p_idx, u64 addr,
+                             u64 size, u16 max_reserved_per_tdmr)
+{
+       struct tdmr_reserved_area *rsvd_areas = tdmr->reserved_areas;
+       int idx = *p_idx;
+
+       /* Reserved area must be 4K aligned in offset and size */
+       if (WARN_ON(addr & ~PAGE_MASK || size & ~PAGE_MASK))
+               return -EINVAL;
+
+       if (idx >= max_reserved_per_tdmr) {
+               pr_warn("initialization failed: TDMR [0x%llx, 0x%llx): reserved areas exhausted.\n",
+                               tdmr->base, tdmr_end(tdmr));
+               return -ENOSPC;
+       }
+
+       /*
+        * Consume one reserved area per call.  Make no effort to
+        * optimize or reduce the number of reserved areas which are
+        * consumed by contiguous reserved areas, for instance.
+        */
+       rsvd_areas[idx].offset = addr - tdmr->base;
+       rsvd_areas[idx].size = size;
+
+       *p_idx = idx + 1;
+
+       return 0;
+}
+
+/*
+ * Go through @tmb_list to find holes between memory areas.  If any of
+ * those holes fall within @tdmr, set up a TDMR reserved area to cover
+ * the hole.
+ */
+static int tdmr_populate_rsvd_holes(struct list_head *tmb_list,
+                                   struct tdmr_info *tdmr,
+                                   int *rsvd_idx,
+                                   u16 max_reserved_per_tdmr)
+{
+       struct tdx_memblock *tmb;
+       u64 prev_end;
+       int ret;
+
+       /*
+        * Start looking for reserved blocks at the
+        * beginning of the TDMR.
+        */
+       prev_end = tdmr->base;
+       list_for_each_entry(tmb, tmb_list, list) {
+               u64 start, end;
+
+               start = PFN_PHYS(tmb->start_pfn);
+               end   = PFN_PHYS(tmb->end_pfn);
+
+               /* Break if this region is after the TDMR */
+               if (start >= tdmr_end(tdmr))
+                       break;
+
+               /* Exclude regions before this TDMR */
+               if (end < tdmr->base)
+                       continue;
+
+               /*
+                * Skip over memory areas that
+                * have already been dealt with.
+                */
+               if (start <= prev_end) {
+                       prev_end = end;
+                       continue;
+               }
+
+               /* Add the hole before this region */
+               ret = tdmr_add_rsvd_area(tdmr, rsvd_idx, prev_end,
+                               start - prev_end,
+                               max_reserved_per_tdmr);
+               if (ret)
+                       return ret;
+
+               prev_end = end;
+       }
+
+       /* Add the hole after the last region if it exists. */
+       if (prev_end < tdmr_end(tdmr)) {
+               ret = tdmr_add_rsvd_area(tdmr, rsvd_idx, prev_end,
+                               tdmr_end(tdmr) - prev_end,
+                               max_reserved_per_tdmr);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+/*
+ * Go through @tdmr_list to find all PAMTs.  If any of those PAMTs
+ * overlaps with @tdmr, set up a TDMR reserved area to cover the
+ * overlapping part.
+ */
+static int tdmr_populate_rsvd_pamts(struct tdmr_info_list *tdmr_list,
+                                   struct tdmr_info *tdmr,
+                                   int *rsvd_idx,
+                                   u16 max_reserved_per_tdmr)
+{
+       int i, ret;
+
+       for (i = 0; i < tdmr_list->nr_consumed_tdmrs; i++) {
+               struct tdmr_info *tmp = tdmr_entry(tdmr_list, i);
+               unsigned long pamt_base, pamt_size, pamt_end;
+
+               tdmr_get_pamt(tmp, &pamt_base, &pamt_size);
+               /* Each TDMR must already have PAMT allocated */
+               WARN_ON_ONCE(!pamt_size || !pamt_base);
+
+               pamt_end = pamt_base + pamt_size;
+               /* Skip PAMTs outside of the given TDMR */
+               if ((pamt_end <= tdmr->base) ||
+                               (pamt_base >= tdmr_end(tdmr)))
+                       continue;
+
+               /* Only mark the part within the TDMR as reserved */
+               if (pamt_base < tdmr->base)
+                       pamt_base = tdmr->base;
+               if (pamt_end > tdmr_end(tdmr))
+                       pamt_end = tdmr_end(tdmr);
+
+               ret = tdmr_add_rsvd_area(tdmr, rsvd_idx, pamt_base,
+                               pamt_end - pamt_base,
+                               max_reserved_per_tdmr);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+/* Compare function called by sort() for TDMR reserved areas */
+static int rsvd_area_cmp_func(const void *a, const void *b)
+{
+       struct tdmr_reserved_area *r1 = (struct tdmr_reserved_area *)a;
+       struct tdmr_reserved_area *r2 = (struct tdmr_reserved_area *)b;
+
+       if (r1->offset + r1->size <= r2->offset)
+               return -1;
+       if (r1->offset >= r2->offset + r2->size)
+               return 1;
+
+       /* Reserved areas cannot overlap.  The caller must guarantee. */
+       WARN_ON_ONCE(1);
+       return -1;
+}
+
+/*
+ * Populate reserved areas for the given @tdmr, including memory holes
+ * (via @tmb_list) and PAMTs (via @tdmr_list).
+ */
+static int tdmr_populate_rsvd_areas(struct tdmr_info *tdmr,
+                                   struct list_head *tmb_list,
+                                   struct tdmr_info_list *tdmr_list,
+                                   u16 max_reserved_per_tdmr)
+{
+       int ret, rsvd_idx = 0;
+
+       ret = tdmr_populate_rsvd_holes(tmb_list, tdmr, &rsvd_idx,
+                       max_reserved_per_tdmr);
+       if (ret)
+               return ret;
+
+       ret = tdmr_populate_rsvd_pamts(tdmr_list, tdmr, &rsvd_idx,
+                       max_reserved_per_tdmr);
+       if (ret)
+               return ret;
+
+       /* TDX requires reserved areas listed in address ascending order */
+       sort(tdmr->reserved_areas, rsvd_idx, sizeof(struct tdmr_reserved_area),
+                       rsvd_area_cmp_func, NULL);
+
+       return 0;
+}
+
+/*
+ * Populate reserved areas for all TDMRs in @tdmr_list, including memory
+ * holes (via @tmb_list) and PAMTs.
+ */
+static int tdmrs_populate_rsvd_areas_all(struct tdmr_info_list *tdmr_list,
+                                        struct list_head *tmb_list,
+                                        u16 max_reserved_per_tdmr)
+{
+       int i;
+
+       for (i = 0; i < tdmr_list->nr_consumed_tdmrs; i++) {
+               int ret;
+
+               ret = tdmr_populate_rsvd_areas(tdmr_entry(tdmr_list, i),
+                               tmb_list, tdmr_list, max_reserved_per_tdmr);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+/*
+ * Construct a list of TDMRs on the preallocated space in @tdmr_list
+ * to cover all TDX memory regions in @tmb_list based on the TDX module
+ * TDMR global information in @tdmr_sysinfo.
+ */
+static int construct_tdmrs(struct list_head *tmb_list,
+                          struct tdmr_info_list *tdmr_list,
+                          struct tdx_tdmr_sysinfo *tdmr_sysinfo)
+{
+       int ret;
+
+       ret = fill_out_tdmrs(tmb_list, tdmr_list);
+       if (ret)
+               return ret;
+
+       ret = tdmrs_set_up_pamt_all(tdmr_list, tmb_list,
+                       tdmr_sysinfo->pamt_entry_size);
+       if (ret)
+               return ret;
+
+       ret = tdmrs_populate_rsvd_areas_all(tdmr_list, tmb_list,
+                       tdmr_sysinfo->max_reserved_per_tdmr);
+       if (ret)
+               tdmrs_free_pamt_all(tdmr_list);
+
+       /*
+        * The tdmr_info_list is read-only from here on out.
+        * Ensure that these writes are seen by other CPUs.
+        * Pairs with a smp_rmb() in is_pamt_page().
+        */
+       smp_wmb();
+
+       return ret;
+}
+
+static int config_tdx_module(struct tdmr_info_list *tdmr_list, u64 global_keyid)
+{
+       struct tdx_module_args args = {};
+       u64 *tdmr_pa_array;
+       size_t array_sz;
+       int i, ret;
+
+       /*
+        * TDMRs are passed to the TDX module via an array of physical
+        * addresses of each TDMR.  The array itself also has certain
+        * alignment requirement.
+        */
+       array_sz = tdmr_list->nr_consumed_tdmrs * sizeof(u64);
+       array_sz = roundup_pow_of_two(array_sz);
+       if (array_sz < TDMR_INFO_PA_ARRAY_ALIGNMENT)
+               array_sz = TDMR_INFO_PA_ARRAY_ALIGNMENT;
+
+       tdmr_pa_array = kzalloc(array_sz, GFP_KERNEL);
+       if (!tdmr_pa_array)
+               return -ENOMEM;
+
+       for (i = 0; i < tdmr_list->nr_consumed_tdmrs; i++)
+               tdmr_pa_array[i] = __pa(tdmr_entry(tdmr_list, i));
+
+       args.rcx = __pa(tdmr_pa_array);
+       args.rdx = tdmr_list->nr_consumed_tdmrs;
+       args.r8 = global_keyid;
+       ret = seamcall_prerr(TDH_SYS_CONFIG, &args);
+
+       /* Free the array as it is not required anymore. */
+       kfree(tdmr_pa_array);
+
+       return ret;
+}
+
+static int do_global_key_config(void *unused)
+{
+       struct tdx_module_args args = {};
+
+       return seamcall_prerr(TDH_SYS_KEY_CONFIG, &args);
+}
+
+/*
+ * Attempt to configure the global KeyID on all physical packages.
+ *
+ * This requires running code on at least one CPU in each package.
+ * TDMR initialization) will fail will fail if any package in the
+ * system has no online CPUs.
+ *
+ * This code takes no affirmative steps to online CPUs.  Callers (aka.
+ * KVM) can ensure success by ensuring sufficient CPUs are online and
+ * can run SEAMCALLs.
+ */
+static int config_global_keyid(void)
+{
+       cpumask_var_t packages;
+       int cpu, ret = -EINVAL;
+
+       if (!zalloc_cpumask_var(&packages, GFP_KERNEL))
+               return -ENOMEM;
+
+       /*
+        * Hardware doesn't guarantee cache coherency across different
+        * KeyIDs.  The kernel needs to flush PAMT's dirty cachelines
+        * (associated with KeyID 0) before the TDX module can use the
+        * global KeyID to access the PAMT.  Given PAMTs are potentially
+        * large (~1/256th of system RAM), just use WBINVD.
+        */
+       wbinvd_on_all_cpus();
+
+       for_each_online_cpu(cpu) {
+               /*
+                * The key configuration only needs to be done once per
+                * package and will return an error if configured more
+                * than once.  Avoid doing it multiple times per package.
+                */
+               if (cpumask_test_and_set_cpu(topology_physical_package_id(cpu),
+                                       packages))
+                       continue;
+
+               /*
+                * TDH.SYS.KEY.CONFIG cannot run concurrently on
+                * different cpus.  Do it one by one.
+                */
+               ret = smp_call_on_cpu(cpu, do_global_key_config, NULL, true);
+               if (ret)
+                       break;
+       }
+
+       free_cpumask_var(packages);
+       return ret;
+}
+
+static int init_tdmr(struct tdmr_info *tdmr)
+{
+       u64 next;
+
+       /*
+        * Initializing a TDMR can be time consuming.  To avoid long
+        * SEAMCALLs, the TDX module may only initialize a part of the
+        * TDMR in each call.
+        */
+       do {
+               struct tdx_module_args args = {
+                       .rcx = tdmr->base,
+               };
+               int ret;
+
+               ret = seamcall_prerr_ret(TDH_SYS_TDMR_INIT, &args);
+               if (ret)
+                       return ret;
+               /*
+                * RDX contains 'next-to-initialize' address if
+                * TDH.SYS.TDMR.INIT did not fully complete and
+                * should be retried.
+                */
+               next = args.rdx;
+               cond_resched();
+               /* Keep making SEAMCALLs until the TDMR is done */
+       } while (next < tdmr->base + tdmr->size);
+
+       return 0;
+}
+
+static int init_tdmrs(struct tdmr_info_list *tdmr_list)
+{
+       int i;
+
+       /*
+        * This operation is costly.  It can be parallelized,
+        * but keep it simple for now.
+        */
+       for (i = 0; i < tdmr_list->nr_consumed_tdmrs; i++) {
+               int ret;
+
+               ret = init_tdmr(tdmr_entry(tdmr_list, i));
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static int init_tdx_module(void)
+{
+       struct tdx_tdmr_sysinfo tdmr_sysinfo;
+       int ret;
+
+       /*
+        * To keep things simple, assume that all TDX-protected memory
+        * will come from the page allocator.  Make sure all pages in the
+        * page allocator are TDX-usable memory.
+        *
+        * Build the list of "TDX-usable" memory regions which cover all
+        * pages in the page allocator to guarantee that.  Do it while
+        * holding mem_hotplug_lock read-lock as the memory hotplug code
+        * path reads the @tdx_memlist to reject any new memory.
+        */
+       get_online_mems();
+
+       ret = build_tdx_memlist(&tdx_memlist);
+       if (ret)
+               goto out_put_tdxmem;
+
+       ret = get_tdx_tdmr_sysinfo(&tdmr_sysinfo);
+       if (ret)
+               goto err_free_tdxmem;
+
+       /* Allocate enough space for constructing TDMRs */
+       ret = alloc_tdmr_list(&tdx_tdmr_list, &tdmr_sysinfo);
+       if (ret)
+               goto err_free_tdxmem;
+
+       /* Cover all TDX-usable memory regions in TDMRs */
+       ret = construct_tdmrs(&tdx_memlist, &tdx_tdmr_list, &tdmr_sysinfo);
+       if (ret)
+               goto err_free_tdmrs;
+
+       /* Pass the TDMRs and the global KeyID to the TDX module */
+       ret = config_tdx_module(&tdx_tdmr_list, tdx_global_keyid);
+       if (ret)
+               goto err_free_pamts;
+
+       /* Config the key of global KeyID on all packages */
+       ret = config_global_keyid();
+       if (ret)
+               goto err_reset_pamts;
+
+       /* Initialize TDMRs to complete the TDX module initialization */
+       ret = init_tdmrs(&tdx_tdmr_list);
+       if (ret)
+               goto err_reset_pamts;
+
+       pr_info("%lu KB allocated for PAMT\n", tdmrs_count_pamt_kb(&tdx_tdmr_list));
+
+out_put_tdxmem:
+       /*
+        * @tdx_memlist is written here and read at memory hotplug time.
+        * Lock out memory hotplug code while building it.
+        */
+       put_online_mems();
+       return ret;
+
+err_reset_pamts:
+       /*
+        * Part of PAMTs may already have been initialized by the
+        * TDX module.  Flush cache before returning PAMTs back
+        * to the kernel.
+        */
+       wbinvd_on_all_cpus();
+       /*
+        * According to the TDX hardware spec, if the platform
+        * doesn't have the "partial write machine check"
+        * erratum, any kernel read/write will never cause #MC
+        * in kernel space, thus it's OK to not convert PAMTs
+        * back to normal.  But do the conversion anyway here
+        * as suggested by the TDX spec.
+        */
+       tdmrs_reset_pamt_all(&tdx_tdmr_list);
+err_free_pamts:
+       tdmrs_free_pamt_all(&tdx_tdmr_list);
+err_free_tdmrs:
+       free_tdmr_list(&tdx_tdmr_list);
+err_free_tdxmem:
+       free_tdx_memlist(&tdx_memlist);
+       goto out_put_tdxmem;
+}
+
+static int __tdx_enable(void)
+{
+       int ret;
+
+       ret = init_tdx_module();
+       if (ret) {
+               pr_err("module initialization failed (%d)\n", ret);
+               tdx_module_status = TDX_MODULE_ERROR;
+               return ret;
+       }
+
+       pr_info("module initialized\n");
+       tdx_module_status = TDX_MODULE_INITIALIZED;
+
+       return 0;
+}
+
+/**
+ * tdx_enable - Enable TDX module to make it ready to run TDX guests
+ *
+ * This function assumes the caller has: 1) held read lock of CPU hotplug
+ * lock to prevent any new cpu from becoming online; 2) done both VMXON
+ * and tdx_cpu_enable() on all online cpus.
+ *
+ * This function requires there's at least one online cpu for each CPU
+ * package to succeed.
+ *
+ * This function can be called in parallel by multiple callers.
+ *
+ * Return 0 if TDX is enabled successfully, otherwise error.
+ */
+int tdx_enable(void)
+{
+       int ret;
+
+       if (!boot_cpu_has(X86_FEATURE_TDX_HOST_PLATFORM))
+               return -ENODEV;
+
+       lockdep_assert_cpus_held();
+
+       mutex_lock(&tdx_module_lock);
+
+       switch (tdx_module_status) {
+       case TDX_MODULE_UNINITIALIZED:
+               ret = __tdx_enable();
+               break;
+       case TDX_MODULE_INITIALIZED:
+               /* Already initialized, great, tell the caller. */
+               ret = 0;
+               break;
+       default:
+               /* Failed to initialize in the previous attempts */
+               ret = -EINVAL;
+               break;
+       }
+
+       mutex_unlock(&tdx_module_lock);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(tdx_enable);
+
+static bool is_pamt_page(unsigned long phys)
+{
+       struct tdmr_info_list *tdmr_list = &tdx_tdmr_list;
+       int i;
+
+       /* Ensure that all remote 'tdmr_list' writes are visible: */
+       smp_rmb();
+
+       /*
+        * The TDX module is no longer returning TDX_SYS_NOT_READY and
+        * is initialized.  The 'tdmr_list' was initialized long ago
+        * and is now read-only.
+        */
+       for (i = 0; i < tdmr_list->nr_consumed_tdmrs; i++) {
+               unsigned long base, size;
+
+               tdmr_get_pamt(tdmr_entry(tdmr_list, i), &base, &size);
+
+               if (phys >= base && phys < (base + size))
+                       return true;
+       }
+
+       return false;
+}
+
+/*
+ * Return whether the memory page at the given physical address is TDX
+ * private memory or not.
+ *
+ * This can be imprecise for two known reasons:
+ * 1. PAMTs are private memory and exist before the TDX module is
+ *    ready and TDH_PHYMEM_PAGE_RDMD works.  This is a relatively
+ *    short window that occurs once per boot.
+ * 2. TDH_PHYMEM_PAGE_RDMD reflects the TDX module's knowledge of the
+ *    page.  However, the page can still cause #MC until it has been
+ *    fully converted to shared using 64-byte writes like MOVDIR64B.
+ *    Buggy hosts might still leave #MC-causing memory in place which
+ *    this function can not detect.
+ */
+static bool paddr_is_tdx_private(unsigned long phys)
+{
+       struct tdx_module_args args = {
+               .rcx = phys & PAGE_MASK,
+       };
+       u64 sret;
+
+       if (!boot_cpu_has(X86_FEATURE_TDX_HOST_PLATFORM))
+               return false;
+
+       /* Get page type from the TDX module */
+       sret = __seamcall_ret(TDH_PHYMEM_PAGE_RDMD, &args);
+
+       /*
+        * The SEAMCALL will not return success unless there is a
+        * working, "ready" TDX module.  Assume an absence of TDX
+        * private pages until SEAMCALL is working.
+        */
+       if (sret)
+               return false;
+
+       /*
+        * SEAMCALL was successful -- read page type (via RCX):
+        *
+        *  - PT_NDA:   Page is not used by the TDX module
+        *  - PT_RSVD:  Reserved for Non-TDX use
+        *  - Others:   Page is used by the TDX module
+        *
+        * Note PAMT pages are marked as PT_RSVD but they are also TDX
+        * private memory.
+        */
+       switch (args.rcx) {
+       case PT_NDA:
+               return false;
+       case PT_RSVD:
+               return is_pamt_page(phys);
+       default:
+               return true;
+       }
+}
+
+/*
+ * Some TDX-capable CPUs have an erratum.  A write to TDX private
+ * memory poisons that memory, and a subsequent read of that memory
+ * triggers #MC.
+ *
+ * Help distinguish erratum-triggered #MCs from a normal hardware one.
+ * Just print additional message to show such #MC may be result of the
+ * erratum.
+ */
+const char *tdx_dump_mce_info(struct mce *m)
+{
+       if (!m || !mce_is_memory_error(m) || !mce_usable_address(m))
+               return NULL;
+
+       if (!paddr_is_tdx_private(m->addr))
+               return NULL;
+
+       return "TDX private memory error. Possible kernel bug.";
+}
+
+static __init int record_keyid_partitioning(u32 *tdx_keyid_start,
+                                           u32 *nr_tdx_keyids)
+{
+       u32 _nr_mktme_keyids, _tdx_keyid_start, _nr_tdx_keyids;
+       int ret;
+
+       /*
+        * IA32_MKTME_KEYID_PARTIONING:
+        *   Bit [31:0]:        Number of MKTME KeyIDs.
+        *   Bit [63:32]:       Number of TDX private KeyIDs.
+        */
+       ret = rdmsr_safe(MSR_IA32_MKTME_KEYID_PARTITIONING, &_nr_mktme_keyids,
+                       &_nr_tdx_keyids);
+       if (ret || !_nr_tdx_keyids)
+               return -EINVAL;
+
+       /* TDX KeyIDs start after the last MKTME KeyID. */
+       _tdx_keyid_start = _nr_mktme_keyids + 1;
+
+       *tdx_keyid_start = _tdx_keyid_start;
+       *nr_tdx_keyids = _nr_tdx_keyids;
+
+       return 0;
+}
+
+static bool is_tdx_memory(unsigned long start_pfn, unsigned long end_pfn)
+{
+       struct tdx_memblock *tmb;
+
+       /*
+        * This check assumes that the start_pfn<->end_pfn range does not
+        * cross multiple @tdx_memlist entries.  A single memory online
+        * event across multiple memblocks (from which @tdx_memlist
+        * entries are derived at the time of module initialization) is
+        * not possible.  This is because memory offline/online is done
+        * on granularity of 'struct memory_block', and the hotpluggable
+        * memory region (one memblock) must be multiple of memory_block.
+        */
+       list_for_each_entry(tmb, &tdx_memlist, list) {
+               if (start_pfn >= tmb->start_pfn && end_pfn <= tmb->end_pfn)
+                       return true;
+       }
+       return false;
+}
+
+static int tdx_memory_notifier(struct notifier_block *nb, unsigned long action,
+                              void *v)
+{
+       struct memory_notify *mn = v;
+
+       if (action != MEM_GOING_ONLINE)
+               return NOTIFY_OK;
+
+       /*
+        * Empty list means TDX isn't enabled.  Allow any memory
+        * to go online.
+        */
+       if (list_empty(&tdx_memlist))
+               return NOTIFY_OK;
+
+       /*
+        * The TDX memory configuration is static and can not be
+        * changed.  Reject onlining any memory which is outside of
+        * the static configuration whether it supports TDX or not.
+        */
+       if (is_tdx_memory(mn->start_pfn, mn->start_pfn + mn->nr_pages))
+               return NOTIFY_OK;
+
+       return NOTIFY_BAD;
+}
+
+static struct notifier_block tdx_memory_nb = {
+       .notifier_call = tdx_memory_notifier,
+};
+
+static void __init check_tdx_erratum(void)
+{
+       /*
+        * These CPUs have an erratum.  A partial write from non-TD
+        * software (e.g. via MOVNTI variants or UC/WC mapping) to TDX
+        * private memory poisons that memory, and a subsequent read of
+        * that memory triggers #MC.
+        */
+       switch (boot_cpu_data.x86_model) {
+       case INTEL_FAM6_SAPPHIRERAPIDS_X:
+       case INTEL_FAM6_EMERALDRAPIDS_X:
+               setup_force_cpu_bug(X86_BUG_TDX_PW_MCE);
+       }
+}
+
+void __init tdx_init(void)
+{
+       u32 tdx_keyid_start, nr_tdx_keyids;
+       int err;
+
+       err = record_keyid_partitioning(&tdx_keyid_start, &nr_tdx_keyids);
+       if (err)
+               return;
+
+       pr_info("BIOS enabled: private KeyID range [%u, %u)\n",
+                       tdx_keyid_start, tdx_keyid_start + nr_tdx_keyids);
+
+       /*
+        * The TDX module itself requires one 'global KeyID' to protect
+        * its metadata.  If there's only one TDX KeyID, there won't be
+        * any left for TDX guests thus there's no point to enable TDX
+        * at all.
+        */
+       if (nr_tdx_keyids < 2) {
+               pr_err("initialization failed: too few private KeyIDs available.\n");
+               return;
+       }
+
+       /*
+        * At this point, hibernation_available() indicates whether or
+        * not hibernation support has been permanently disabled.
+        */
+       if (hibernation_available()) {
+               pr_err("initialization failed: Hibernation support is enabled\n");
+               return;
+       }
+
+       err = register_memory_notifier(&tdx_memory_nb);
+       if (err) {
+               pr_err("initialization failed: register_memory_notifier() failed (%d)\n",
+                               err);
+               return;
+       }
+
+#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
+       pr_info("Disable ACPI S3. Turn off TDX in the BIOS to use ACPI S3.\n");
+       acpi_suspend_lowlevel = NULL;
+#endif
+
+       /*
+        * Just use the first TDX KeyID as the 'global KeyID' and
+        * leave the rest for TDX guests.
+        */
+       tdx_global_keyid = tdx_keyid_start;
+       tdx_guest_keyid_start = tdx_keyid_start + 1;
+       tdx_nr_guest_keyids = nr_tdx_keyids - 1;
+
+       setup_force_cpu_cap(X86_FEATURE_TDX_HOST_PLATFORM);
+
+       check_tdx_erratum();
+}
diff --git a/arch/x86/virt/vmx/tdx/tdx.h b/arch/x86/virt/vmx/tdx/tdx.h
new file mode 100644 (file)
index 0000000..b701f69
--- /dev/null
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _X86_VIRT_TDX_H
+#define _X86_VIRT_TDX_H
+
+#include <linux/bits.h>
+
+/*
+ * This file contains both macros and data structures defined by the TDX
+ * architecture and Linux defined software data structures and functions.
+ * The two should not be mixed together for better readability.  The
+ * architectural definitions come first.
+ */
+
+/*
+ * TDX module SEAMCALL leaf functions
+ */
+#define TDH_PHYMEM_PAGE_RDMD   24
+#define TDH_SYS_KEY_CONFIG     31
+#define TDH_SYS_INIT           33
+#define TDH_SYS_RD             34
+#define TDH_SYS_LP_INIT                35
+#define TDH_SYS_TDMR_INIT      36
+#define TDH_SYS_CONFIG         45
+
+/* TDX page types */
+#define        PT_NDA          0x0
+#define        PT_RSVD         0x1
+
+/*
+ * Global scope metadata field ID.
+ *
+ * See Table "Global Scope Metadata", TDX module 1.5 ABI spec.
+ */
+#define MD_FIELD_ID_MAX_TDMRS                  0x9100000100000008ULL
+#define MD_FIELD_ID_MAX_RESERVED_PER_TDMR      0x9100000100000009ULL
+#define MD_FIELD_ID_PAMT_4K_ENTRY_SIZE         0x9100000100000010ULL
+#define MD_FIELD_ID_PAMT_2M_ENTRY_SIZE         0x9100000100000011ULL
+#define MD_FIELD_ID_PAMT_1G_ENTRY_SIZE         0x9100000100000012ULL
+
+/*
+ * Sub-field definition of metadata field ID.
+ *
+ * See Table "MD_FIELD_ID (Metadata Field Identifier / Sequence Header)
+ * Definition", TDX module 1.5 ABI spec.
+ *
+ *  - Bit 33:32: ELEMENT_SIZE_CODE -- size of a single element of metadata
+ *
+ *     0: 8 bits
+ *     1: 16 bits
+ *     2: 32 bits
+ *     3: 64 bits
+ */
+#define MD_FIELD_ID_ELE_SIZE_CODE(_field_id)   \
+               (((_field_id) & GENMASK_ULL(33, 32)) >> 32)
+
+#define MD_FIELD_ID_ELE_SIZE_16BIT     1
+
+struct tdmr_reserved_area {
+       u64 offset;
+       u64 size;
+} __packed;
+
+#define TDMR_INFO_ALIGNMENT    512
+#define TDMR_INFO_PA_ARRAY_ALIGNMENT   512
+
+struct tdmr_info {
+       u64 base;
+       u64 size;
+       u64 pamt_1g_base;
+       u64 pamt_1g_size;
+       u64 pamt_2m_base;
+       u64 pamt_2m_size;
+       u64 pamt_4k_base;
+       u64 pamt_4k_size;
+       /*
+        * The actual number of reserved areas depends on the value of
+        * field MD_FIELD_ID_MAX_RESERVED_PER_TDMR in the TDX module
+        * global metadata.
+        */
+       DECLARE_FLEX_ARRAY(struct tdmr_reserved_area, reserved_areas);
+} __packed __aligned(TDMR_INFO_ALIGNMENT);
+
+/*
+ * Do not put any hardware-defined TDX structure representations below
+ * this comment!
+ */
+
+/* Kernel defined TDX module status during module initialization. */
+enum tdx_module_status_t {
+       TDX_MODULE_UNINITIALIZED,
+       TDX_MODULE_INITIALIZED,
+       TDX_MODULE_ERROR
+};
+
+struct tdx_memblock {
+       struct list_head list;
+       unsigned long start_pfn;
+       unsigned long end_pfn;
+       int nid;
+};
+
+/* "TDMR info" part of "Global Scope Metadata" for constructing TDMRs */
+struct tdx_tdmr_sysinfo {
+       u16 max_tdmrs;
+       u16 max_reserved_per_tdmr;
+       u16 pamt_entry_size[TDX_PS_NR];
+};
+
+/* Warn if kernel has less than TDMR_NR_WARN TDMRs after allocation */
+#define TDMR_NR_WARN 4
+
+struct tdmr_info_list {
+       void *tdmrs;    /* Flexible array to hold 'tdmr_info's */
+       int nr_consumed_tdmrs;  /* How many 'tdmr_info's are in use */
+
+       /* Metadata for finding target 'tdmr_info' and freeing @tdmrs */
+       int tdmr_sz;    /* Size of one 'tdmr_info' */
+       int max_tdmrs;  /* How many 'tdmr_info's are allocated */
+};
+
+#endif
index e031eaf36c991317f8801bdffdc3cf919cb2684b..6f248d87e496aad36a83bbc4b64ee9bbe5050728 100644 (file)
@@ -144,7 +144,7 @@ config XTENSA_VARIANT_CUSTOM_NAME
        depends on XTENSA_VARIANT_CUSTOM
        help
          Provide the name of a custom Xtensa processor variant.
-         This CORENAME selects arch/xtensa/variant/CORENAME.
+         This CORENAME selects arch/xtensa/variants/CORENAME.
          Don't forget you have to select MMU if you have one.
 
 config XTENSA_VARIANT_NAME
index bfd8e433ed621e89366ddaabb80bb9d0672d2b89..4c14a02179eba5726c65fbcd94517c32596e25ad 100644 (file)
@@ -35,15 +35,19 @@ KBUILD_CFLAGS += -ffreestanding -D__linux__
 KBUILD_CFLAGS += -pipe -mlongcalls -mtext-section-literals
 KBUILD_CFLAGS += $(call cc-option,-mforce-no-pic,)
 KBUILD_CFLAGS += $(call cc-option,-mno-serialize-volatile,)
+KBUILD_CFLAGS += $(call cc-option,-mno-fdpic,)
 ifneq ($(CONFIG_KERNEL_ABI_CALL0),)
 KBUILD_CFLAGS += -mabi=call0
 KBUILD_AFLAGS += -mabi=call0
 endif
 
 KBUILD_AFLAGS += -mlongcalls -mtext-section-literals
+KBUILD_AFLAGS += $(call cc-option,-mno-fdpic,)
+
+KBUILD_LDFLAGS += -m elf32xtensa
 
 ifneq ($(CONFIG_LD_NO_RELAX),)
-KBUILD_LDFLAGS := --no-relax
+KBUILD_LDFLAGS += --no-relax
 endif
 
 CHECKFLAGS += -D$(if $(CONFIG_CPU_BIG_ENDIAN),__XTENSA_EB__,__XTENSA_EL__)
index 01bf7d9dbb1910a291316cd713b0014045793933..a52d49a16ce7d0ed5ba351cf61f8a6b50dc76a03 100644 (file)
@@ -11,7 +11,7 @@
 #ifndef _XTENSA_ASMMACRO_H
 #define _XTENSA_ASMMACRO_H
 
-#include <asm-generic/export.h>
+#include <linux/export.h>
 #include <asm/core.h>
 
 /*
index 785a00ce83c11e8bbfa8e02b131315606060c35c..38bcecb0e457d9741c142cada4d38ec65ff0f88b 100644 (file)
@@ -116,8 +116,9 @@ void flush_cache_page(struct vm_area_struct*,
 #define flush_cache_mm(mm)             flush_cache_all()
 #define flush_cache_dup_mm(mm)         flush_cache_mm(mm)
 
-#define flush_cache_vmap(start,end)    flush_cache_all()
-#define flush_cache_vunmap(start,end)  flush_cache_all()
+#define flush_cache_vmap(start,end)            flush_cache_all()
+#define flush_cache_vmap_early(start,end)      do { } while (0)
+#define flush_cache_vunmap(start,end)          flush_cache_all()
 
 void flush_dcache_folio(struct folio *folio);
 #define flush_dcache_folio flush_dcache_folio
@@ -140,6 +141,7 @@ void local_flush_cache_page(struct vm_area_struct *vma,
 #define flush_cache_dup_mm(mm)                         do { } while (0)
 
 #define flush_cache_vmap(start,end)                    do { } while (0)
+#define flush_cache_vmap_early(start,end)              do { } while (0)
 #define flush_cache_vunmap(start,end)                  do { } while (0)
 
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
index c812bf85021c02db59d107fda752dba2872e3b60..46c8596259d2d921fc0395a9eaf8dd444032029c 100644 (file)
@@ -13,7 +13,7 @@
 static __always_inline bool arch_static_branch(struct static_key *key,
                                               bool branch)
 {
-       asm_volatile_goto("1:\n\t"
+       asm goto("1:\n\t"
                          "_nop\n\t"
                          ".pushsection __jump_table,  \"aw\"\n\t"
                          ".word 1b, %l[l_yes], %c0\n\t"
@@ -38,7 +38,7 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key,
         * make it reachable and wrap both into a no-transform block
         * to avoid any assembler interference with this.
         */
-       asm_volatile_goto("1:\n\t"
+       asm goto("1:\n\t"
                          ".begin no-transform\n\t"
                          "_j %l[l_yes]\n\t"
                          "2:\n\t"
index aa6752237985afb13a6ee15ccac5f4b32ef3eda1..05fc02f9e1c78b553ae7fab2cf2164ea60f9ef41 100644 (file)
@@ -11,6 +11,7 @@
  * Based on work from Matt Porter <mporter@mvista.com>
  */
 
+#include <linux/bitfield.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/pci.h>
@@ -222,10 +223,11 @@ pciauto_postscan_setup_bridge(struct pci_dev *dev, int current_bus, int sub_bus,
 
 int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus)
 {
-       int sub_bus, pci_devfn, pci_class, cmdstat, found_multi=0;
+       int sub_bus, pci_devfn, pci_class, cmdstat;
        unsigned short vid;
        unsigned char header_type;
        struct pci_dev *dev = &pciauto_dev;
+       bool found_multi = false;
 
        pciauto_dev.bus = &pciauto_bus;
        pciauto_dev.sysdata = pci_ctrl;
@@ -261,11 +263,11 @@ int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus)
                        continue;
 
                if (!PCI_FUNC(pci_devfn))
-                       found_multi = header_type & 0x80;
+                       found_multi = FIELD_GET(PCI_HEADER_TYPE_MFD, header_type);
                pci_read_config_word(dev, PCI_VENDOR_ID, &vid);
 
                if (vid == 0xffff || vid == 0x0000) {
-                       found_multi = 0;
+                       found_multi = false;
                        continue;
                }
 
index 7d1f8b398a464d8909a3cb3af8920d494c7cf044..8896e691c051eab87ba88ffcbac01bd4860df0de 100644 (file)
@@ -65,7 +65,7 @@ static void rs_poll(struct timer_list *unused)
        struct tty_port *port = &serial_port;
        int i = 0;
        int rd = 1;
-       unsigned char c;
+       u8 c;
 
        while (simc_poll(0)) {
                rd = simc_read(0, &c, 1);
index feef615e2c9c8935e2cd11a4b1e09f115171d328..c9a16fba58b9c47f5424be9a8c7c6681d176b986 100644 (file)
@@ -336,7 +336,7 @@ int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t bytes,
        if (nr_vecs > BIO_MAX_VECS)
                return -E2BIG;
        if (nr_vecs > UIO_FASTIOV) {
-               bvec = kcalloc(sizeof(*bvec), nr_vecs, GFP_KERNEL);
+               bvec = kcalloc(nr_vecs, sizeof(*bvec), GFP_KERNEL);
                if (!bvec)
                        return -ENOMEM;
                pages = NULL;
index e303fd31731377cae25738cd82beea6b96a4d1b9..ff93c385ba5afb6920b53fdbcf96bd5d3970d17a 100644 (file)
@@ -300,7 +300,7 @@ static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
  * @disk: gendisk the new blkg is associated with
  * @gfp_mask: allocation mask to use
  *
- * Allocate a new blkg assocating @blkcg and @q.
+ * Allocate a new blkg associating @blkcg and @disk.
  */
 static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct gendisk *disk,
                                   gfp_t gfp_mask)
index 11342af420d0c41d1c98a729471dcd6bfb46da05..de771093b52687ae2431af36bf75b73ccaa1bbf0 100644 (file)
@@ -49,6 +49,7 @@
 #include "blk-pm.h"
 #include "blk-cgroup.h"
 #include "blk-throttle.h"
+#include "blk-ioprio.h"
 
 struct dentry *blk_debugfs_root;
 
@@ -833,6 +834,14 @@ end_io:
 }
 EXPORT_SYMBOL(submit_bio_noacct);
 
+static void bio_set_ioprio(struct bio *bio)
+{
+       /* Nobody set ioprio so far? Initialize it based on task's nice value */
+       if (IOPRIO_PRIO_CLASS(bio->bi_ioprio) == IOPRIO_CLASS_NONE)
+               bio->bi_ioprio = get_current_ioprio();
+       blkcg_set_ioprio(bio);
+}
+
 /**
  * submit_bio - submit a bio to the block device layer for I/O
  * @bio: The &struct bio which describes the I/O
@@ -855,6 +864,7 @@ void submit_bio(struct bio *bio)
                count_vm_events(PGPGOUT, bio_sectors(bio));
        }
 
+       bio_set_ioprio(bio);
        submit_bio_noacct(bio);
 }
 EXPORT_SYMBOL(submit_bio);
index 089fcb9cfce37011f4cf6ec1f86ba36853fed381..04d44f0bcbc85d4898df728c8ceefb3f1b5bea39 100644 (file)
@@ -1261,7 +1261,7 @@ static void weight_updated(struct ioc_gq *iocg, struct ioc_now *now)
 static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
 {
        struct ioc *ioc = iocg->ioc;
-       u64 last_period, cur_period;
+       u64 __maybe_unused last_period, cur_period;
        u64 vtime, vtarget;
        int i;
 
@@ -1353,6 +1353,13 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
 
        lockdep_assert_held(&iocg->waitq.lock);
 
+       /*
+        * If the delay is set by another CPU, we may be in the past. No need to
+        * change anything if so. This avoids decay calculation underflow.
+        */
+       if (time_before64(now->now, iocg->delay_at))
+               return false;
+
        /* calculate the current delay in effect - 1/2 every second */
        tdelta = now->now - iocg->delay_at;
        if (iocg->delay)
index 8584babf3ea0ca2590f30383b9594231266e9437..71210cdb34426d967b5632667cb7579b11e97a2d 100644 (file)
@@ -205,12 +205,19 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
        /*
         * success
         */
-       if ((iov_iter_rw(iter) == WRITE &&
-            (!map_data || !map_data->null_mapped)) ||
-           (map_data && map_data->from_user)) {
+       if (iov_iter_rw(iter) == WRITE &&
+            (!map_data || !map_data->null_mapped)) {
                ret = bio_copy_from_iter(bio, iter);
                if (ret)
                        goto cleanup;
+       } else if (map_data && map_data->from_user) {
+               struct iov_iter iter2 = *iter;
+
+               /* This is the copy-in part of SG_DXFER_TO_FROM_DEV. */
+               iter2.data_source = ITER_SOURCE;
+               ret = bio_copy_from_iter(bio, &iter2);
+               if (ret)
+                       goto cleanup;
        } else {
                if (bmd->is_our_pages)
                        zero_fill_bio(bio);
index 5cbeb9344f2f5cea3121197892a412deab777838..94668e72ab09bf0922c8d79846a87d91fdbeb1ba 100644 (file)
@@ -479,23 +479,6 @@ out:
        return res;
 }
 
-static int hctx_run_show(void *data, struct seq_file *m)
-{
-       struct blk_mq_hw_ctx *hctx = data;
-
-       seq_printf(m, "%lu\n", hctx->run);
-       return 0;
-}
-
-static ssize_t hctx_run_write(void *data, const char __user *buf, size_t count,
-                             loff_t *ppos)
-{
-       struct blk_mq_hw_ctx *hctx = data;
-
-       hctx->run = 0;
-       return count;
-}
-
 static int hctx_active_show(void *data, struct seq_file *m)
 {
        struct blk_mq_hw_ctx *hctx = data;
@@ -624,7 +607,6 @@ static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
        {"tags_bitmap", 0400, hctx_tags_bitmap_show},
        {"sched_tags", 0400, hctx_sched_tags_show},
        {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show},
-       {"run", 0600, hctx_run_show, hctx_run_write},
        {"active", 0400, hctx_active_show},
        {"dispatch_busy", 0400, hctx_dispatch_busy_show},
        {"type", 0400, hctx_type_show},
index 67c95f31b15bb1e16c022efe644dcb0900ef12f6..451a2c1f1f32186989160ed6e77e87cb8d14f4f1 100644 (file)
@@ -324,8 +324,6 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
        if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
                return;
 
-       hctx->run++;
-
        /*
         * A return of -EAGAIN is an indication that hctx->dispatch is not
         * empty and we must run again in order to avoid starving flushes.
index c11c97afa0bc1de400a68cff21e48d7f420e6bab..2dc01551e27c7d1e50266e554fe4bb6d378a1482 100644 (file)
@@ -40,7 +40,6 @@
 #include "blk-stat.h"
 #include "blk-mq-sched.h"
 #include "blk-rq-qos.h"
-#include "blk-ioprio.h"
 
 static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
 static DEFINE_PER_CPU(call_single_data_t, blk_cpu_csd);
@@ -772,11 +771,16 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
                /*
                 * Partial zone append completions cannot be supported as the
                 * BIO fragments may end up not being written sequentially.
+                * For such case, force the completed nbytes to be equal to
+                * the BIO size so that bio_advance() sets the BIO remaining
+                * size to 0 and we end up calling bio_endio() before returning.
                 */
-               if (bio->bi_iter.bi_size != nbytes)
+               if (bio->bi_iter.bi_size != nbytes) {
                        bio->bi_status = BLK_STS_IOERR;
-               else
+                       nbytes = bio->bi_iter.bi_size;
+               } else {
                        bio->bi_iter.bi_sector = rq->__sector;
+               }
        }
 
        bio_advance(bio, nbytes);
@@ -1859,6 +1863,22 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
        wait->flags &= ~WQ_FLAG_EXCLUSIVE;
        __add_wait_queue(wq, wait);
 
+       /*
+        * Add one explicit barrier since blk_mq_get_driver_tag() may
+        * not imply barrier in case of failure.
+        *
+        * Order adding us to wait queue and allocating driver tag.
+        *
+        * The pair is the one implied in sbitmap_queue_wake_up() which
+        * orders clearing sbitmap tag bits and waitqueue_active() in
+        * __sbitmap_queue_wake_up(), since waitqueue_active() is lockless
+        *
+        * Otherwise, re-order of adding wait queue and getting driver tag
+        * may cause __sbitmap_queue_wake_up() to wake up nothing because
+        * the waitqueue_active() may not observe us in wait queue.
+        */
+       smp_mb();
+
        /*
         * It's possible that a tag was freed in the window between the
         * allocation failure and adding the hardware queue to the wait
@@ -2891,8 +2911,11 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
        return NULL;
 }
 
-/* return true if this @rq can be used for @bio */
-static bool blk_mq_can_use_cached_rq(struct request *rq, struct blk_plug *plug,
+/*
+ * Check if we can use the passed on request for submitting the passed in bio,
+ * and remove it from the request list if it can be used.
+ */
+static bool blk_mq_use_cached_rq(struct request *rq, struct blk_plug *plug,
                struct bio *bio)
 {
        enum hctx_type type = blk_mq_get_hctx_type(bio->bi_opf);
@@ -2920,14 +2943,6 @@ static bool blk_mq_can_use_cached_rq(struct request *rq, struct blk_plug *plug,
        return true;
 }
 
-static void bio_set_ioprio(struct bio *bio)
-{
-       /* Nobody set ioprio so far? Initialize it based on task's nice value */
-       if (IOPRIO_PRIO_CLASS(bio->bi_ioprio) == IOPRIO_CLASS_NONE)
-               bio->bi_ioprio = get_current_ioprio();
-       blkcg_set_ioprio(bio);
-}
-
 /**
  * blk_mq_submit_bio - Create and send a request to block device.
  * @bio: Bio pointer.
@@ -2952,13 +2967,6 @@ void blk_mq_submit_bio(struct bio *bio)
        blk_status_t ret;
 
        bio = blk_queue_bounce(bio, q);
-       if (bio_may_exceed_limits(bio, &q->limits)) {
-               bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
-               if (!bio)
-                       return;
-       }
-
-       bio_set_ioprio(bio);
 
        if (plug) {
                rq = rq_list_peek(&plug->cached_rq);
@@ -2966,16 +2974,26 @@ void blk_mq_submit_bio(struct bio *bio)
                        rq = NULL;
        }
        if (rq) {
+               if (unlikely(bio_may_exceed_limits(bio, &q->limits))) {
+                       bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
+                       if (!bio)
+                               return;
+               }
                if (!bio_integrity_prep(bio))
                        return;
                if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
                        return;
-               if (blk_mq_can_use_cached_rq(rq, plug, bio))
+               if (blk_mq_use_cached_rq(rq, plug, bio))
                        goto done;
                percpu_ref_get(&q->q_usage_counter);
        } else {
                if (unlikely(bio_queue_enter(bio)))
                        return;
+               if (unlikely(bio_may_exceed_limits(bio, &q->limits))) {
+                       bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
+                       if (!bio)
+                               goto fail;
+               }
                if (!bio_integrity_prep(bio))
                        goto fail;
        }
index 5ba3cd574eacbddc1b92bbaec3d79d81fb66ae7a..0c0e270a82650d9a0c6977931cd8a833b467f520 100644 (file)
@@ -163,9 +163,9 @@ static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
  */
 static bool wb_recent_wait(struct rq_wb *rwb)
 {
-       struct bdi_writeback *wb = &rwb->rqos.disk->bdi->wb;
+       struct backing_dev_info *bdi = rwb->rqos.disk->bdi;
 
-       return time_before(jiffies, wb->dirty_sleep + HZ);
+       return time_before(jiffies, bdi->last_bdp_sleep + HZ);
 }
 
 static inline struct rq_wait *get_rq_wait(struct rq_wb *rwb,
index 9c73a763ef8838953bd1050b505621c39b8d4cdb..438f79c564cfc05d6f525550417eeee93c7b82bb 100644 (file)
@@ -20,8 +20,6 @@ static int blkpg_do_ioctl(struct block_device *bdev,
        struct blkpg_partition p;
        sector_t start, length;
 
-       if (disk->flags & GENHD_FL_NO_PART)
-               return -EINVAL;
        if (!capable(CAP_SYS_ADMIN))
                return -EACCES;
        if (copy_from_user(&p, upart, sizeof(struct blkpg_partition)))
index b5a942519a797ceab4729231dd7c673e50ab0613..73301a261429ff9e04a166aefa5c72df55eaee57 100644 (file)
@@ -139,32 +139,6 @@ out:
        return ret;
 }
 
-/*
- * If the task has set an I/O priority, use that. Otherwise, return
- * the default I/O priority.
- *
- * Expected to be called for current task or with task_lock() held to keep
- * io_context stable.
- */
-int __get_task_ioprio(struct task_struct *p)
-{
-       struct io_context *ioc = p->io_context;
-       int prio;
-
-       if (p != current)
-               lockdep_assert_held(&p->alloc_lock);
-       if (ioc)
-               prio = ioc->ioprio;
-       else
-               prio = IOPRIO_DEFAULT;
-
-       if (IOPRIO_PRIO_CLASS(prio) == IOPRIO_CLASS_NONE)
-               prio = IOPRIO_PRIO_VALUE(task_nice_ioclass(p),
-                                        task_nice_ioprio(p));
-       return prio;
-}
-EXPORT_SYMBOL_GPL(__get_task_ioprio);
-
 static int get_task_ioprio(struct task_struct *p)
 {
        int ret;
index e6ac73617f3e12db18d7bc9f4ade561494739cd6..5f5ed5c75f04d91d7bc8bf87ff4c9fa685c62318 100644 (file)
@@ -439,6 +439,11 @@ int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
                goto out;
        }
 
+       if (disk->flags & GENHD_FL_NO_PART) {
+               ret = -EINVAL;
+               goto out;
+       }
+
        if (partition_overlaps(disk, start, length, -1)) {
                ret = -EBUSY;
                goto out;
@@ -562,8 +567,8 @@ static bool blk_add_partition(struct gendisk *disk,
        part = add_partition(disk, p, from, size, state->parts[p].flags,
                             &state->parts[p].info);
        if (IS_ERR(part) && PTR_ERR(part) != -ENXIO) {
-               printk(KERN_ERR " %s: p%d could not be added: %ld\n",
-                      disk->disk_name, p, -PTR_ERR(part));
+               printk(KERN_ERR " %s: p%d could not be added: %pe\n",
+                      disk->disk_name, p, part);
                return true;
        }
 
index 82c44d4899b9676d4d43c2f2af7fd9f95758b894..e24c829d7a0154f0ff016152e6913bff105cd93f 100644 (file)
@@ -91,13 +91,13 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
                if (!(msg->msg_flags & MSG_MORE)) {
                        err = hash_alloc_result(sk, ctx);
                        if (err)
-                               goto unlock_free;
+                               goto unlock_free_result;
                        ahash_request_set_crypt(&ctx->req, NULL,
                                                ctx->result, 0);
                        err = crypto_wait_req(crypto_ahash_final(&ctx->req),
                                              &ctx->wait);
                        if (err)
-                               goto unlock_free;
+                               goto unlock_free_result;
                }
                goto done_more;
        }
@@ -170,6 +170,7 @@ unlock:
 
 unlock_free:
        af_alg_free_sg(&ctx->sgl);
+unlock_free_result:
        hash_free_result(sk, ctx);
        ctx->more = false;
        goto unlock;
index eedddef9ce40cc40fa7a3c2cd3bcca7607be491b..e81918ca68b782c881bf6f868b441281e249e7f4 100644 (file)
@@ -148,6 +148,9 @@ static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb)
        if (!is_power_of_2(inst->alg.co.base.cra_blocksize))
                goto out_free_inst;
 
+       if (inst->alg.co.statesize)
+               goto out_free_inst;
+
        inst->alg.encrypt = crypto_cbc_encrypt;
        inst->alg.decrypt = crypto_cbc_decrypt;
 
index 19035230563d7bf8ca2625a06c241d0eb010c3b7..7cb962e2145349670e4a506c879822a6bb3c6c23 100644 (file)
@@ -102,7 +102,7 @@ static int reset_pending_show(struct seq_file *s, void *v)
 {
        struct ivpu_device *vdev = seq_to_ivpu(s);
 
-       seq_printf(s, "%d\n", atomic_read(&vdev->pm->in_reset));
+       seq_printf(s, "%d\n", atomic_read(&vdev->pm->reset_pending));
        return 0;
 }
 
@@ -130,7 +130,9 @@ dvfs_mode_fops_write(struct file *file, const char __user *user_buf, size_t size
 
        fw->dvfs_mode = dvfs_mode;
 
-       ivpu_pm_schedule_recovery(vdev);
+       ret = pci_try_reset_function(to_pci_dev(vdev->drm.dev));
+       if (ret)
+               return ret;
 
        return size;
 }
@@ -190,7 +192,10 @@ fw_profiling_freq_fops_write(struct file *file, const char __user *user_buf,
                return ret;
 
        ivpu_hw_profiling_freq_drive(vdev, enable);
-       ivpu_pm_schedule_recovery(vdev);
+
+       ret = pci_try_reset_function(to_pci_dev(vdev->drm.dev));
+       if (ret)
+               return ret;
 
        return size;
 }
@@ -301,11 +306,18 @@ static ssize_t
 ivpu_force_recovery_fn(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
 {
        struct ivpu_device *vdev = file->private_data;
+       int ret;
 
        if (!size)
                return -EINVAL;
 
-       ivpu_pm_schedule_recovery(vdev);
+       ret = ivpu_rpm_get(vdev);
+       if (ret)
+               return ret;
+
+       ivpu_pm_trigger_recovery(vdev, "debugfs");
+       flush_work(&vdev->pm->recovery_work);
+       ivpu_rpm_put(vdev);
        return size;
 }
 
index 64927682161b282e739ef024a0ccff29c49de2cd..4b06402269869335c324770fc572a57bea7316f7 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/firmware.h>
 #include <linux/module.h>
 #include <linux/pci.h>
+#include <linux/pm_runtime.h>
 
 #include <drm/drm_accel.h>
 #include <drm/drm_file.h>
@@ -17,6 +18,7 @@
 #include "ivpu_debugfs.h"
 #include "ivpu_drv.h"
 #include "ivpu_fw.h"
+#include "ivpu_fw_log.h"
 #include "ivpu_gem.h"
 #include "ivpu_hw.h"
 #include "ivpu_ipc.h"
@@ -65,22 +67,20 @@ struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv)
        return file_priv;
 }
 
-struct ivpu_file_priv *ivpu_file_priv_get_by_ctx_id(struct ivpu_device *vdev, unsigned long id)
+static void file_priv_unbind(struct ivpu_device *vdev, struct ivpu_file_priv *file_priv)
 {
-       struct ivpu_file_priv *file_priv;
-
-       xa_lock_irq(&vdev->context_xa);
-       file_priv = xa_load(&vdev->context_xa, id);
-       /* file_priv may still be in context_xa during file_priv_release() */
-       if (file_priv && !kref_get_unless_zero(&file_priv->ref))
-               file_priv = NULL;
-       xa_unlock_irq(&vdev->context_xa);
-
-       if (file_priv)
-               ivpu_dbg(vdev, KREF, "file_priv get by id: ctx %u refcount %u\n",
-                        file_priv->ctx.id, kref_read(&file_priv->ref));
-
-       return file_priv;
+       mutex_lock(&file_priv->lock);
+       if (file_priv->bound) {
+               ivpu_dbg(vdev, FILE, "file_priv unbind: ctx %u\n", file_priv->ctx.id);
+
+               ivpu_cmdq_release_all_locked(file_priv);
+               ivpu_jsm_context_release(vdev, file_priv->ctx.id);
+               ivpu_bo_unbind_all_bos_from_context(vdev, &file_priv->ctx);
+               ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
+               file_priv->bound = false;
+               drm_WARN_ON(&vdev->drm, !xa_erase_irq(&vdev->context_xa, file_priv->ctx.id));
+       }
+       mutex_unlock(&file_priv->lock);
 }
 
 static void file_priv_release(struct kref *ref)
@@ -88,13 +88,15 @@ static void file_priv_release(struct kref *ref)
        struct ivpu_file_priv *file_priv = container_of(ref, struct ivpu_file_priv, ref);
        struct ivpu_device *vdev = file_priv->vdev;
 
-       ivpu_dbg(vdev, FILE, "file_priv release: ctx %u\n", file_priv->ctx.id);
+       ivpu_dbg(vdev, FILE, "file_priv release: ctx %u bound %d\n",
+                file_priv->ctx.id, (bool)file_priv->bound);
+
+       pm_runtime_get_sync(vdev->drm.dev);
+       mutex_lock(&vdev->context_list_lock);
+       file_priv_unbind(vdev, file_priv);
+       mutex_unlock(&vdev->context_list_lock);
+       pm_runtime_put_autosuspend(vdev->drm.dev);
 
-       ivpu_cmdq_release_all(file_priv);
-       ivpu_jsm_context_release(vdev, file_priv->ctx.id);
-       ivpu_bo_remove_all_bos_from_context(vdev, &file_priv->ctx);
-       ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
-       drm_WARN_ON(&vdev->drm, xa_erase_irq(&vdev->context_xa, file_priv->ctx.id) != file_priv);
        mutex_destroy(&file_priv->lock);
        kfree(file_priv);
 }
@@ -176,9 +178,6 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
        case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
                args->value = vdev->hw->ranges.user.start;
                break;
-       case DRM_IVPU_PARAM_CONTEXT_PRIORITY:
-               args->value = file_priv->priority;
-               break;
        case DRM_IVPU_PARAM_CONTEXT_ID:
                args->value = file_priv->ctx.id;
                break;
@@ -218,17 +217,10 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
 
 static int ivpu_set_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 {
-       struct ivpu_file_priv *file_priv = file->driver_priv;
        struct drm_ivpu_param *args = data;
        int ret = 0;
 
        switch (args->param) {
-       case DRM_IVPU_PARAM_CONTEXT_PRIORITY:
-               if (args->value <= DRM_IVPU_CONTEXT_PRIORITY_REALTIME)
-                       file_priv->priority = args->value;
-               else
-                       ret = -EINVAL;
-               break;
        default:
                ret = -EINVAL;
        }
@@ -241,50 +233,53 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file)
        struct ivpu_device *vdev = to_ivpu_device(dev);
        struct ivpu_file_priv *file_priv;
        u32 ctx_id;
-       void *old;
-       int ret;
+       int idx, ret;
 
-       ret = xa_alloc_irq(&vdev->context_xa, &ctx_id, NULL, vdev->context_xa_limit, GFP_KERNEL);
-       if (ret) {
-               ivpu_err(vdev, "Failed to allocate context id: %d\n", ret);
-               return ret;
-       }
+       if (!drm_dev_enter(dev, &idx))
+               return -ENODEV;
 
        file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
        if (!file_priv) {
                ret = -ENOMEM;
-               goto err_xa_erase;
+               goto err_dev_exit;
        }
 
        file_priv->vdev = vdev;
-       file_priv->priority = DRM_IVPU_CONTEXT_PRIORITY_NORMAL;
+       file_priv->bound = true;
        kref_init(&file_priv->ref);
        mutex_init(&file_priv->lock);
 
+       mutex_lock(&vdev->context_list_lock);
+
+       ret = xa_alloc_irq(&vdev->context_xa, &ctx_id, file_priv,
+                          vdev->context_xa_limit, GFP_KERNEL);
+       if (ret) {
+               ivpu_err(vdev, "Failed to allocate context id: %d\n", ret);
+               goto err_unlock;
+       }
+
        ret = ivpu_mmu_user_context_init(vdev, &file_priv->ctx, ctx_id);
        if (ret)
-               goto err_mutex_destroy;
+               goto err_xa_erase;
 
-       old = xa_store_irq(&vdev->context_xa, ctx_id, file_priv, GFP_KERNEL);
-       if (xa_is_err(old)) {
-               ret = xa_err(old);
-               ivpu_err(vdev, "Failed to store context %u: %d\n", ctx_id, ret);
-               goto err_ctx_fini;
-       }
+       mutex_unlock(&vdev->context_list_lock);
+       drm_dev_exit(idx);
+
+       file->driver_priv = file_priv;
 
        ivpu_dbg(vdev, FILE, "file_priv create: ctx %u process %s pid %d\n",
                 ctx_id, current->comm, task_pid_nr(current));
 
-       file->driver_priv = file_priv;
        return 0;
 
-err_ctx_fini:
-       ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
-err_mutex_destroy:
-       mutex_destroy(&file_priv->lock);
-       kfree(file_priv);
 err_xa_erase:
        xa_erase_irq(&vdev->context_xa, ctx_id);
+err_unlock:
+       mutex_unlock(&vdev->context_list_lock);
+       mutex_destroy(&file_priv->lock);
+       kfree(file_priv);
+err_dev_exit:
+       drm_dev_exit(idx);
        return ret;
 }
 
@@ -340,8 +335,6 @@ static int ivpu_wait_for_ready(struct ivpu_device *vdev)
 
        if (!ret)
                ivpu_dbg(vdev, PM, "VPU ready message received successfully\n");
-       else
-               ivpu_hw_diagnose_failure(vdev);
 
        return ret;
 }
@@ -369,6 +362,9 @@ int ivpu_boot(struct ivpu_device *vdev)
        ret = ivpu_wait_for_ready(vdev);
        if (ret) {
                ivpu_err(vdev, "Failed to boot the firmware: %d\n", ret);
+               ivpu_hw_diagnose_failure(vdev);
+               ivpu_mmu_evtq_dump(vdev);
+               ivpu_fw_log_dump(vdev);
                return ret;
        }
 
@@ -484,9 +480,8 @@ static int ivpu_pci_init(struct ivpu_device *vdev)
        /* Clear any pending errors */
        pcie_capability_clear_word(pdev, PCI_EXP_DEVSTA, 0x3f);
 
-       /* VPU 37XX does not require 10m D3hot delay */
-       if (ivpu_hw_gen(vdev) == IVPU_HW_37XX)
-               pdev->d3hot_delay = 0;
+       /* NPU does not require 10m D3hot delay */
+       pdev->d3hot_delay = 0;
 
        ret = pcim_enable_device(pdev);
        if (ret) {
@@ -540,6 +535,10 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
        lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
        INIT_LIST_HEAD(&vdev->bo_list);
 
+       ret = drmm_mutex_init(&vdev->drm, &vdev->context_list_lock);
+       if (ret)
+               goto err_xa_destroy;
+
        ret = drmm_mutex_init(&vdev->drm, &vdev->bo_list_lock);
        if (ret)
                goto err_xa_destroy;
@@ -611,14 +610,30 @@ err_xa_destroy:
        return ret;
 }
 
+static void ivpu_bo_unbind_all_user_contexts(struct ivpu_device *vdev)
+{
+       struct ivpu_file_priv *file_priv;
+       unsigned long ctx_id;
+
+       mutex_lock(&vdev->context_list_lock);
+
+       xa_for_each(&vdev->context_xa, ctx_id, file_priv)
+               file_priv_unbind(vdev, file_priv);
+
+       mutex_unlock(&vdev->context_list_lock);
+}
+
 static void ivpu_dev_fini(struct ivpu_device *vdev)
 {
        ivpu_pm_disable(vdev);
        ivpu_shutdown(vdev);
        if (IVPU_WA(d3hot_after_power_off))
                pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
+
+       ivpu_jobs_abort_all(vdev);
        ivpu_job_done_consumer_fini(vdev);
        ivpu_pm_cancel_recovery(vdev);
+       ivpu_bo_unbind_all_user_contexts(vdev);
 
        ivpu_ipc_fini(vdev);
        ivpu_fw_fini(vdev);
index ebc4b84f27b209df9d653747772e756702cf4601..069ace4adb2d19c1a0544333d0da65632c524ea7 100644 (file)
@@ -56,6 +56,7 @@
 #define IVPU_DBG_JSM    BIT(10)
 #define IVPU_DBG_KREF   BIT(11)
 #define IVPU_DBG_RPM    BIT(12)
+#define IVPU_DBG_MMU_MAP BIT(13)
 
 #define ivpu_err(vdev, fmt, ...) \
        drm_err(&(vdev)->drm, "%s(): " fmt, __func__, ##__VA_ARGS__)
@@ -114,6 +115,7 @@ struct ivpu_device {
 
        struct ivpu_mmu_context gctx;
        struct ivpu_mmu_context rctx;
+       struct mutex context_list_lock; /* Protects user context addition/removal */
        struct xarray context_xa;
        struct xa_limit context_xa_limit;
 
@@ -145,8 +147,8 @@ struct ivpu_file_priv {
        struct mutex lock; /* Protects cmdq */
        struct ivpu_cmdq *cmdq[IVPU_NUM_ENGINES];
        struct ivpu_mmu_context ctx;
-       u32 priority;
        bool has_mmu_faults;
+       bool bound;
 };
 
 extern int ivpu_dbg_mask;
@@ -162,7 +164,6 @@ extern bool ivpu_disable_mmu_cont_pages;
 extern int ivpu_test_mode;
 
 struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv);
-struct ivpu_file_priv *ivpu_file_priv_get_by_ctx_id(struct ivpu_device *vdev, unsigned long id);
 void ivpu_file_priv_put(struct ivpu_file_priv **link);
 
 int ivpu_boot(struct ivpu_device *vdev);
index 6576232f3e678ee7c2532b07c830b74733c06960..5fa8bd4603d5be6f1fba8c43ba058e6a9b4f3676 100644 (file)
@@ -222,7 +222,6 @@ ivpu_fw_init_wa(struct ivpu_device *vdev)
        const struct vpu_firmware_header *fw_hdr = (const void *)vdev->fw->file->data;
 
        if (IVPU_FW_CHECK_API_VER_LT(vdev, fw_hdr, BOOT, 3, 17) ||
-           (ivpu_hw_gen(vdev) > IVPU_HW_37XX) ||
            (ivpu_test_mode & IVPU_TEST_MODE_D0I3_MSG_DISABLE))
                vdev->wa.disable_d0i3_msg = true;
 
index 1dda4f38ea25cd356cc9efadcaa8d35394c6b19f..e9ddbe9f50ebeffaa3a1617431b864ceec73e2e7 100644 (file)
@@ -24,14 +24,11 @@ static const struct drm_gem_object_funcs ivpu_gem_funcs;
 
 static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, const char *action)
 {
-       if (bo->ctx)
-               ivpu_dbg(vdev, BO, "%6s: size %zu has_pages %d dma_mapped %d handle %u ctx %d vpu_addr 0x%llx mmu_mapped %d\n",
-                        action, ivpu_bo_size(bo), (bool)bo->base.pages, (bool)bo->base.sgt,
-                        bo->handle, bo->ctx->id, bo->vpu_addr, bo->mmu_mapped);
-       else
-               ivpu_dbg(vdev, BO, "%6s: size %zu has_pages %d dma_mapped %d handle %u (not added to context)\n",
-                        action, ivpu_bo_size(bo), (bool)bo->base.pages, (bool)bo->base.sgt,
-                        bo->handle);
+       ivpu_dbg(vdev, BO,
+                "%6s: bo %8p vpu_addr %9llx size %8zu ctx %d has_pages %d dma_mapped %d mmu_mapped %d wc %d imported %d\n",
+                action, bo, bo->vpu_addr, ivpu_bo_size(bo), bo->ctx ? bo->ctx->id : 0,
+                (bool)bo->base.pages, (bool)bo->base.sgt, bo->mmu_mapped, bo->base.map_wc,
+                (bool)bo->base.base.import_attach);
 }
 
 /*
@@ -49,12 +46,7 @@ int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
        mutex_lock(&bo->lock);
 
        ivpu_dbg_bo(vdev, bo, "pin");
-
-       if (!bo->ctx) {
-               ivpu_err(vdev, "vpu_addr not allocated for BO %d\n", bo->handle);
-               ret = -EINVAL;
-               goto unlock;
-       }
+       drm_WARN_ON(&vdev->drm, !bo->ctx);
 
        if (!bo->mmu_mapped) {
                struct sg_table *sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
@@ -85,7 +77,10 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
                       const struct ivpu_addr_range *range)
 {
        struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
-       int ret;
+       int idx, ret;
+
+       if (!drm_dev_enter(&vdev->drm, &idx))
+               return -ENODEV;
 
        mutex_lock(&bo->lock);
 
@@ -101,6 +96,8 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
 
        mutex_unlock(&bo->lock);
 
+       drm_dev_exit(idx);
+
        return ret;
 }
 
@@ -108,11 +105,7 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
 {
        struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
 
-       lockdep_assert_held(&bo->lock);
-
-       ivpu_dbg_bo(vdev, bo, "unbind");
-
-       /* TODO: dma_unmap */
+       lockdep_assert(lockdep_is_held(&bo->lock) || !kref_read(&bo->base.base.refcount));
 
        if (bo->mmu_mapped) {
                drm_WARN_ON(&vdev->drm, !bo->ctx);
@@ -124,19 +117,23 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
 
        if (bo->ctx) {
                ivpu_mmu_context_remove_node(bo->ctx, &bo->mm_node);
-               bo->vpu_addr = 0;
                bo->ctx = NULL;
        }
-}
 
-static void ivpu_bo_unbind(struct ivpu_bo *bo)
-{
-       mutex_lock(&bo->lock);
-       ivpu_bo_unbind_locked(bo);
-       mutex_unlock(&bo->lock);
+       if (bo->base.base.import_attach)
+               return;
+
+       dma_resv_lock(bo->base.base.resv, NULL);
+       if (bo->base.sgt) {
+               dma_unmap_sgtable(vdev->drm.dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0);
+               sg_free_table(bo->base.sgt);
+               kfree(bo->base.sgt);
+               bo->base.sgt = NULL;
+       }
+       dma_resv_unlock(bo->base.base.resv);
 }
 
-void ivpu_bo_remove_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
+void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
 {
        struct ivpu_bo *bo;
 
@@ -146,8 +143,10 @@ void ivpu_bo_remove_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_m
        mutex_lock(&vdev->bo_list_lock);
        list_for_each_entry(bo, &vdev->bo_list, bo_list_node) {
                mutex_lock(&bo->lock);
-               if (bo->ctx == ctx)
+               if (bo->ctx == ctx) {
+                       ivpu_dbg_bo(vdev, bo, "unbind");
                        ivpu_bo_unbind_locked(bo);
+               }
                mutex_unlock(&bo->lock);
        }
        mutex_unlock(&vdev->bo_list_lock);
@@ -199,9 +198,6 @@ ivpu_bo_create(struct ivpu_device *vdev, u64 size, u32 flags)
        list_add_tail(&bo->bo_list_node, &vdev->bo_list);
        mutex_unlock(&vdev->bo_list_lock);
 
-       ivpu_dbg(vdev, BO, "create: vpu_addr 0x%llx size %zu flags 0x%x\n",
-                bo->vpu_addr, bo->base.base.size, flags);
-
        return bo;
 }
 
@@ -212,6 +208,12 @@ static int ivpu_bo_open(struct drm_gem_object *obj, struct drm_file *file)
        struct ivpu_bo *bo = to_ivpu_bo(obj);
        struct ivpu_addr_range *range;
 
+       if (bo->ctx) {
+               ivpu_warn(vdev, "Can't add BO to ctx %u: already in ctx %u\n",
+                         file_priv->ctx.id, bo->ctx->id);
+               return -EALREADY;
+       }
+
        if (bo->flags & DRM_IVPU_BO_SHAVE_MEM)
                range = &vdev->hw->ranges.shave;
        else if (bo->flags & DRM_IVPU_BO_DMA_MEM)
@@ -227,62 +229,24 @@ static void ivpu_bo_free(struct drm_gem_object *obj)
        struct ivpu_device *vdev = to_ivpu_device(obj->dev);
        struct ivpu_bo *bo = to_ivpu_bo(obj);
 
+       ivpu_dbg_bo(vdev, bo, "free");
+
        mutex_lock(&vdev->bo_list_lock);
        list_del(&bo->bo_list_node);
        mutex_unlock(&vdev->bo_list_lock);
 
        drm_WARN_ON(&vdev->drm, !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ));
 
-       ivpu_dbg_bo(vdev, bo, "free");
-
-       ivpu_bo_unbind(bo);
+       ivpu_bo_unbind_locked(bo);
        mutex_destroy(&bo->lock);
 
        drm_WARN_ON(obj->dev, bo->base.pages_use_count > 1);
        drm_gem_shmem_free(&bo->base);
 }
 
-static const struct dma_buf_ops ivpu_bo_dmabuf_ops =  {
-       .cache_sgt_mapping = true,
-       .attach = drm_gem_map_attach,
-       .detach = drm_gem_map_detach,
-       .map_dma_buf = drm_gem_map_dma_buf,
-       .unmap_dma_buf = drm_gem_unmap_dma_buf,
-       .release = drm_gem_dmabuf_release,
-       .mmap = drm_gem_dmabuf_mmap,
-       .vmap = drm_gem_dmabuf_vmap,
-       .vunmap = drm_gem_dmabuf_vunmap,
-};
-
-static struct dma_buf *ivpu_bo_export(struct drm_gem_object *obj, int flags)
-{
-       struct drm_device *dev = obj->dev;
-       struct dma_buf_export_info exp_info = {
-               .exp_name = KBUILD_MODNAME,
-               .owner = dev->driver->fops->owner,
-               .ops = &ivpu_bo_dmabuf_ops,
-               .size = obj->size,
-               .flags = flags,
-               .priv = obj,
-               .resv = obj->resv,
-       };
-       void *sgt;
-
-       /*
-        * Make sure that pages are allocated and dma-mapped before exporting the bo.
-        * DMA-mapping is required if the bo will be imported to the same device.
-        */
-       sgt = drm_gem_shmem_get_pages_sgt(to_drm_gem_shmem_obj(obj));
-       if (IS_ERR(sgt))
-               return sgt;
-
-       return drm_gem_dmabuf_export(dev, &exp_info);
-}
-
 static const struct drm_gem_object_funcs ivpu_gem_funcs = {
        .free = ivpu_bo_free,
        .open = ivpu_bo_open,
-       .export = ivpu_bo_export,
        .print_info = drm_gem_shmem_object_print_info,
        .pin = drm_gem_shmem_object_pin,
        .unpin = drm_gem_shmem_object_unpin,
@@ -315,11 +279,9 @@ int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
                return PTR_ERR(bo);
        }
 
-       ret = drm_gem_handle_create(file, &bo->base.base, &bo->handle);
-       if (!ret) {
+       ret = drm_gem_handle_create(file, &bo->base.base, &args->handle);
+       if (!ret)
                args->vpu_addr = bo->vpu_addr;
-               args->handle = bo->handle;
-       }
 
        drm_gem_object_put(&bo->base.base);
 
@@ -361,7 +323,9 @@ ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 fla
        if (ret)
                goto err_put;
 
+       dma_resv_lock(bo->base.base.resv, NULL);
        ret = drm_gem_shmem_vmap(&bo->base, &map);
+       dma_resv_unlock(bo->base.base.resv);
        if (ret)
                goto err_put;
 
@@ -376,7 +340,10 @@ void ivpu_bo_free_internal(struct ivpu_bo *bo)
 {
        struct iosys_map map = IOSYS_MAP_INIT_VADDR(bo->base.vaddr);
 
+       dma_resv_lock(bo->base.base.resv, NULL);
        drm_gem_shmem_vunmap(&bo->base, &map);
+       dma_resv_unlock(bo->base.base.resv);
+
        drm_gem_object_put(&bo->base.base);
 }
 
@@ -432,19 +399,11 @@ int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file
 
 static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
 {
-       unsigned long dma_refcount = 0;
-
        mutex_lock(&bo->lock);
 
-       if (bo->base.base.dma_buf && bo->base.base.dma_buf->file)
-               dma_refcount = atomic_long_read(&bo->base.base.dma_buf->file->f_count);
-
-       drm_printf(p, "%-3u %-6d 0x%-12llx %-10lu 0x%-8x %-4u %-8lu",
-                  bo->ctx->id, bo->handle, bo->vpu_addr, bo->base.base.size,
-                  bo->flags, kref_read(&bo->base.base.refcount), dma_refcount);
-
-       if (bo->base.base.import_attach)
-               drm_printf(p, " imported");
+       drm_printf(p, "%-9p %-3u 0x%-12llx %-10lu 0x%-8x %-4u",
+                  bo, bo->ctx->id, bo->vpu_addr, bo->base.base.size,
+                  bo->flags, kref_read(&bo->base.base.refcount));
 
        if (bo->base.pages)
                drm_printf(p, " has_pages");
@@ -452,6 +411,9 @@ static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
        if (bo->mmu_mapped)
                drm_printf(p, " mmu_mapped");
 
+       if (bo->base.base.import_attach)
+               drm_printf(p, " imported");
+
        drm_printf(p, "\n");
 
        mutex_unlock(&bo->lock);
@@ -462,8 +424,8 @@ void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p)
        struct ivpu_device *vdev = to_ivpu_device(dev);
        struct ivpu_bo *bo;
 
-       drm_printf(p, "%-3s %-6s %-14s %-10s %-10s %-4s %-8s %s\n",
-                  "ctx", "handle", "vpu_addr", "size", "flags", "refs", "dma_refs", "attribs");
+       drm_printf(p, "%-9s %-3s %-14s %-10s %-10s %-4s %s\n",
+                  "bo", "ctx", "vpu_addr", "size", "flags", "refs", "attribs");
 
        mutex_lock(&vdev->bo_list_lock);
        list_for_each_entry(bo, &vdev->bo_list, bo_list_node)
index d75cad0d3c742db703dbe0812a0df6eaaba24d53..a8559211c70d41ac20bae1da57d846ffd4d7e3b3 100644 (file)
@@ -19,14 +19,13 @@ struct ivpu_bo {
 
        struct mutex lock; /* Protects: ctx, mmu_mapped, vpu_addr */
        u64 vpu_addr;
-       u32 handle;
        u32 flags;
        u32 job_status; /* Valid only for command buffer */
        bool mmu_mapped;
 };
 
 int ivpu_bo_pin(struct ivpu_bo *bo);
-void ivpu_bo_remove_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx);
+void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx);
 
 struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t size);
 struct ivpu_bo *ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 flags);
index 574cdeefb66b39af45beda6534a6ac3eb0e57c7b..77accd029c4a71399de1729323405d7a3c262cc2 100644 (file)
@@ -525,7 +525,7 @@ static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
        u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES);
 
        val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val);
-       val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
+       val = REG_CLR_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
        val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val);
 
        REGV_WR32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, val);
@@ -875,24 +875,18 @@ static void ivpu_hw_37xx_irq_disable(struct ivpu_device *vdev)
 
 static void ivpu_hw_37xx_irq_wdt_nce_handler(struct ivpu_device *vdev)
 {
-       ivpu_err_ratelimited(vdev, "WDT NCE irq\n");
-
-       ivpu_pm_schedule_recovery(vdev);
+       ivpu_pm_trigger_recovery(vdev, "WDT NCE IRQ");
 }
 
 static void ivpu_hw_37xx_irq_wdt_mss_handler(struct ivpu_device *vdev)
 {
-       ivpu_err_ratelimited(vdev, "WDT MSS irq\n");
-
        ivpu_hw_wdt_disable(vdev);
-       ivpu_pm_schedule_recovery(vdev);
+       ivpu_pm_trigger_recovery(vdev, "WDT MSS IRQ");
 }
 
 static void ivpu_hw_37xx_irq_noc_firewall_handler(struct ivpu_device *vdev)
 {
-       ivpu_err_ratelimited(vdev, "NOC Firewall irq\n");
-
-       ivpu_pm_schedule_recovery(vdev);
+       ivpu_pm_trigger_recovery(vdev, "NOC Firewall IRQ");
 }
 
 /* Handler for IRQs from VPU core (irqV) */
@@ -970,7 +964,7 @@ static bool ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq)
                REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, status);
 
        if (schedule_recovery)
-               ivpu_pm_schedule_recovery(vdev);
+               ivpu_pm_trigger_recovery(vdev, "Buttress IRQ");
 
        return true;
 }
index eba2fdef2ace1384c93c1cbb30a8e3d9633abba8..1c995307c1138885dc9cacd7ad73ca16633f2158 100644 (file)
@@ -530,7 +530,7 @@ static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
        u32 val = REGV_RD32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES);
 
        val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, SNOOP_OVERRIDE_EN, val);
-       val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val);
+       val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val);
        val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AR_SNOOP_OVERRIDE, val);
 
        REGV_WR32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, val);
@@ -704,7 +704,6 @@ static int ivpu_hw_40xx_info_init(struct ivpu_device *vdev)
 {
        struct ivpu_hw_info *hw = vdev->hw;
        u32 tile_disable;
-       u32 tile_enable;
        u32 fuse;
 
        fuse = REGB_RD32(VPU_40XX_BUTTRESS_TILE_FUSE);
@@ -725,10 +724,6 @@ static int ivpu_hw_40xx_info_init(struct ivpu_device *vdev)
        else
                ivpu_dbg(vdev, MISC, "Fuse: All %d tiles enabled\n", TILE_MAX_NUM);
 
-       tile_enable = (~tile_disable) & TILE_MAX_MASK;
-
-       hw->sku = REG_SET_FLD_NUM(SKU, HW_ID, LNL_HW_ID, hw->sku);
-       hw->sku = REG_SET_FLD_NUM(SKU, TILE, tile_enable, hw->sku);
        hw->tile_fuse = tile_disable;
        hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
 
@@ -746,7 +741,7 @@ static int ivpu_hw_40xx_info_init(struct ivpu_device *vdev)
        return 0;
 }
 
-static int ivpu_hw_40xx_reset(struct ivpu_device *vdev)
+static int ivpu_hw_40xx_ip_reset(struct ivpu_device *vdev)
 {
        int ret;
        u32 val;
@@ -768,6 +763,23 @@ static int ivpu_hw_40xx_reset(struct ivpu_device *vdev)
        return ret;
 }
 
+static int ivpu_hw_40xx_reset(struct ivpu_device *vdev)
+{
+       int ret = 0;
+
+       if (ivpu_hw_40xx_ip_reset(vdev)) {
+               ivpu_err(vdev, "Failed to reset VPU IP\n");
+               ret = -EIO;
+       }
+
+       if (ivpu_pll_disable(vdev)) {
+               ivpu_err(vdev, "Failed to disable PLL\n");
+               ret = -EIO;
+       }
+
+       return ret;
+}
+
 static int ivpu_hw_40xx_d0i3_enable(struct ivpu_device *vdev)
 {
        int ret;
@@ -913,7 +925,7 @@ static int ivpu_hw_40xx_power_down(struct ivpu_device *vdev)
 
        ivpu_hw_40xx_save_d0i3_entry_timestamp(vdev);
 
-       if (!ivpu_hw_40xx_is_idle(vdev) && ivpu_hw_40xx_reset(vdev))
+       if (!ivpu_hw_40xx_is_idle(vdev) && ivpu_hw_40xx_ip_reset(vdev))
                ivpu_warn(vdev, "Failed to reset the VPU\n");
 
        if (ivpu_pll_disable(vdev)) {
@@ -1032,18 +1044,18 @@ static void ivpu_hw_40xx_irq_disable(struct ivpu_device *vdev)
 static void ivpu_hw_40xx_irq_wdt_nce_handler(struct ivpu_device *vdev)
 {
        /* TODO: For LNN hang consider engine reset instead of full recovery */
-       ivpu_pm_schedule_recovery(vdev);
+       ivpu_pm_trigger_recovery(vdev, "WDT NCE IRQ");
 }
 
 static void ivpu_hw_40xx_irq_wdt_mss_handler(struct ivpu_device *vdev)
 {
        ivpu_hw_wdt_disable(vdev);
-       ivpu_pm_schedule_recovery(vdev);
+       ivpu_pm_trigger_recovery(vdev, "WDT MSS IRQ");
 }
 
 static void ivpu_hw_40xx_irq_noc_firewall_handler(struct ivpu_device *vdev)
 {
-       ivpu_pm_schedule_recovery(vdev);
+       ivpu_pm_trigger_recovery(vdev, "NOC Firewall IRQ");
 }
 
 /* Handler for IRQs from VPU core (irqV) */
@@ -1137,7 +1149,7 @@ static bool ivpu_hw_40xx_irqb_handler(struct ivpu_device *vdev, int irq)
        REGB_WR32(VPU_40XX_BUTTRESS_INTERRUPT_STAT, status);
 
        if (schedule_recovery)
-               ivpu_pm_schedule_recovery(vdev);
+               ivpu_pm_trigger_recovery(vdev, "Buttress IRQ");
 
        return true;
 }
index e86621f16f85a8d5d41f0d5dcf1ef99d1e45541d..fa66c39b57ecaaecae036d17f79c3e7683fc5279 100644 (file)
@@ -343,10 +343,8 @@ int ivpu_ipc_send_receive_active(struct ivpu_device *vdev, struct vpu_jsm_msg *r
        hb_ret = ivpu_ipc_send_receive_internal(vdev, &hb_req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE,
                                                &hb_resp, VPU_IPC_CHAN_ASYNC_CMD,
                                                vdev->timeout.jsm);
-       if (hb_ret == -ETIMEDOUT) {
-               ivpu_hw_diagnose_failure(vdev);
-               ivpu_pm_schedule_recovery(vdev);
-       }
+       if (hb_ret == -ETIMEDOUT)
+               ivpu_pm_trigger_recovery(vdev, "IPC timeout");
 
        return ret;
 }
index 7206cf9cdb4a45335b220796621fcd2c55a8ddf0..e70cfb8593390e489e9f9868fb6c2420733ae241 100644 (file)
@@ -112,22 +112,20 @@ static void ivpu_cmdq_release_locked(struct ivpu_file_priv *file_priv, u16 engin
        }
 }
 
-void ivpu_cmdq_release_all(struct ivpu_file_priv *file_priv)
+void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
 {
        int i;
 
-       mutex_lock(&file_priv->lock);
+       lockdep_assert_held(&file_priv->lock);
 
        for (i = 0; i < IVPU_NUM_ENGINES; i++)
                ivpu_cmdq_release_locked(file_priv, i);
-
-       mutex_unlock(&file_priv->lock);
 }
 
 /*
  * Mark the doorbell as unregistered and reset job queue pointers.
  * This function needs to be called when the VPU hardware is restarted
- * and FW looses job queue state. The next time job queue is used it
+ * and FW loses job queue state. The next time job queue is used it
  * will be registered again.
  */
 static void ivpu_cmdq_reset_locked(struct ivpu_file_priv *file_priv, u16 engine)
@@ -161,15 +159,13 @@ void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev)
        struct ivpu_file_priv *file_priv;
        unsigned long ctx_id;
 
-       xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
-               file_priv = ivpu_file_priv_get_by_ctx_id(vdev, ctx_id);
-               if (!file_priv)
-                       continue;
+       mutex_lock(&vdev->context_list_lock);
 
+       xa_for_each(&vdev->context_xa, ctx_id, file_priv)
                ivpu_cmdq_reset_all(file_priv);
 
-               ivpu_file_priv_put(&file_priv);
-       }
+       mutex_unlock(&vdev->context_list_lock);
+
 }
 
 static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
@@ -243,60 +239,32 @@ static struct dma_fence *ivpu_fence_create(struct ivpu_device *vdev)
        return &fence->base;
 }
 
-static void job_get(struct ivpu_job *job, struct ivpu_job **link)
+static void ivpu_job_destroy(struct ivpu_job *job)
 {
        struct ivpu_device *vdev = job->vdev;
-
-       kref_get(&job->ref);
-       *link = job;
-
-       ivpu_dbg(vdev, KREF, "Job get: id %u refcount %u\n", job->job_id, kref_read(&job->ref));
-}
-
-static void job_release(struct kref *ref)
-{
-       struct ivpu_job *job = container_of(ref, struct ivpu_job, ref);
-       struct ivpu_device *vdev = job->vdev;
        u32 i;
 
+       ivpu_dbg(vdev, JOB, "Job destroyed: id %3u ctx %2d engine %d",
+                job->job_id, job->file_priv->ctx.id, job->engine_idx);
+
        for (i = 0; i < job->bo_count; i++)
                if (job->bos[i])
                        drm_gem_object_put(&job->bos[i]->base.base);
 
        dma_fence_put(job->done_fence);
        ivpu_file_priv_put(&job->file_priv);
-
-       ivpu_dbg(vdev, KREF, "Job released: id %u\n", job->job_id);
        kfree(job);
-
-       /* Allow the VPU to get suspended, must be called after ivpu_file_priv_put() */
-       ivpu_rpm_put(vdev);
-}
-
-static void job_put(struct ivpu_job *job)
-{
-       struct ivpu_device *vdev = job->vdev;
-
-       ivpu_dbg(vdev, KREF, "Job put: id %u refcount %u\n", job->job_id, kref_read(&job->ref));
-       kref_put(&job->ref, job_release);
 }
 
 static struct ivpu_job *
-ivpu_create_job(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count)
+ivpu_job_create(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count)
 {
        struct ivpu_device *vdev = file_priv->vdev;
        struct ivpu_job *job;
-       int ret;
-
-       ret = ivpu_rpm_get(vdev);
-       if (ret < 0)
-               return NULL;
 
        job = kzalloc(struct_size(job, bos, bo_count), GFP_KERNEL);
        if (!job)
-               goto err_rpm_put;
-
-       kref_init(&job->ref);
+               return NULL;
 
        job->vdev = vdev;
        job->engine_idx = engine_idx;
@@ -310,17 +278,14 @@ ivpu_create_job(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count)
        job->file_priv = ivpu_file_priv_get(file_priv);
 
        ivpu_dbg(vdev, JOB, "Job created: ctx %2d engine %d", file_priv->ctx.id, job->engine_idx);
-
        return job;
 
 err_free_job:
        kfree(job);
-err_rpm_put:
-       ivpu_rpm_put(vdev);
        return NULL;
 }
 
-static int ivpu_job_done(struct ivpu_device *vdev, u32 job_id, u32 job_status)
+static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32 job_status)
 {
        struct ivpu_job *job;
 
@@ -329,7 +294,7 @@ static int ivpu_job_done(struct ivpu_device *vdev, u32 job_id, u32 job_status)
                return -ENOENT;
 
        if (job->file_priv->has_mmu_faults)
-               job_status = VPU_JSM_STATUS_ABORTED;
+               job_status = DRM_IVPU_JOB_STATUS_ABORTED;
 
        job->bos[CMD_BUF_IDX]->job_status = job_status;
        dma_fence_signal(job->done_fence);
@@ -337,9 +302,10 @@ static int ivpu_job_done(struct ivpu_device *vdev, u32 job_id, u32 job_status)
        ivpu_dbg(vdev, JOB, "Job complete:  id %3u ctx %2d engine %d status 0x%x\n",
                 job->job_id, job->file_priv->ctx.id, job->engine_idx, job_status);
 
+       ivpu_job_destroy(job);
        ivpu_stop_job_timeout_detection(vdev);
 
-       job_put(job);
+       ivpu_rpm_put(vdev);
        return 0;
 }
 
@@ -349,10 +315,10 @@ void ivpu_jobs_abort_all(struct ivpu_device *vdev)
        unsigned long id;
 
        xa_for_each(&vdev->submitted_jobs_xa, id, job)
-               ivpu_job_done(vdev, id, VPU_JSM_STATUS_ABORTED);
+               ivpu_job_signal_and_destroy(vdev, id, DRM_IVPU_JOB_STATUS_ABORTED);
 }
 
-static int ivpu_direct_job_submission(struct ivpu_job *job)
+static int ivpu_job_submit(struct ivpu_job *job)
 {
        struct ivpu_file_priv *file_priv = job->file_priv;
        struct ivpu_device *vdev = job->vdev;
@@ -360,53 +326,65 @@ static int ivpu_direct_job_submission(struct ivpu_job *job)
        struct ivpu_cmdq *cmdq;
        int ret;
 
+       ret = ivpu_rpm_get(vdev);
+       if (ret < 0)
+               return ret;
+
        mutex_lock(&file_priv->lock);
 
        cmdq = ivpu_cmdq_acquire(job->file_priv, job->engine_idx);
        if (!cmdq) {
-               ivpu_warn(vdev, "Failed get job queue, ctx %d engine %d\n",
-                         file_priv->ctx.id, job->engine_idx);
+               ivpu_warn_ratelimited(vdev, "Failed get job queue, ctx %d engine %d\n",
+                                     file_priv->ctx.id, job->engine_idx);
                ret = -EINVAL;
-               goto err_unlock;
+               goto err_unlock_file_priv;
        }
 
        job_id_range.min = FIELD_PREP(JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1));
        job_id_range.max = job_id_range.min | JOB_ID_JOB_MASK;
 
-       job_get(job, &job);
-       ret = xa_alloc(&vdev->submitted_jobs_xa, &job->job_id, job, job_id_range, GFP_KERNEL);
+       xa_lock(&vdev->submitted_jobs_xa);
+       ret = __xa_alloc(&vdev->submitted_jobs_xa, &job->job_id, job, job_id_range, GFP_KERNEL);
        if (ret) {
-               ivpu_warn_ratelimited(vdev, "Failed to allocate job id: %d\n", ret);
-               goto err_job_put;
+               ivpu_dbg(vdev, JOB, "Too many active jobs in ctx %d\n",
+                        file_priv->ctx.id);
+               ret = -EBUSY;
+               goto err_unlock_submitted_jobs_xa;
        }
 
        ret = ivpu_cmdq_push_job(cmdq, job);
        if (ret)
-               goto err_xa_erase;
+               goto err_erase_xa;
 
        ivpu_start_job_timeout_detection(vdev);
 
-       ivpu_dbg(vdev, JOB, "Job submitted: id %3u addr 0x%llx ctx %2d engine %d next %d\n",
-                job->job_id, job->cmd_buf_vpu_addr, file_priv->ctx.id,
-                job->engine_idx, cmdq->jobq->header.tail);
-
-       if (ivpu_test_mode & IVPU_TEST_MODE_NULL_HW) {
-               ivpu_job_done(vdev, job->job_id, VPU_JSM_STATUS_SUCCESS);
+       if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW)) {
                cmdq->jobq->header.head = cmdq->jobq->header.tail;
                wmb(); /* Flush WC buffer for jobq header */
        } else {
                ivpu_cmdq_ring_db(vdev, cmdq);
        }
 
+       ivpu_dbg(vdev, JOB, "Job submitted: id %3u ctx %2d engine %d addr 0x%llx next %d\n",
+                job->job_id, file_priv->ctx.id, job->engine_idx,
+                job->cmd_buf_vpu_addr, cmdq->jobq->header.tail);
+
+       xa_unlock(&vdev->submitted_jobs_xa);
+
        mutex_unlock(&file_priv->lock);
+
+       if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW))
+               ivpu_job_signal_and_destroy(vdev, job->job_id, VPU_JSM_STATUS_SUCCESS);
+
        return 0;
 
-err_xa_erase:
-       xa_erase(&vdev->submitted_jobs_xa, job->job_id);
-err_job_put:
-       job_put(job);
-err_unlock:
+err_erase_xa:
+       __xa_erase(&vdev->submitted_jobs_xa, job->job_id);
+err_unlock_submitted_jobs_xa:
+       xa_unlock(&vdev->submitted_jobs_xa);
+err_unlock_file_priv:
        mutex_unlock(&file_priv->lock);
+       ivpu_rpm_put(vdev);
        return ret;
 }
 
@@ -488,6 +466,9 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        if (params->engine > DRM_IVPU_ENGINE_COPY)
                return -EINVAL;
 
+       if (params->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
+               return -EINVAL;
+
        if (params->buffer_count == 0 || params->buffer_count > JOB_MAX_BUFFER_COUNT)
                return -EINVAL;
 
@@ -509,44 +490,49 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
                             params->buffer_count * sizeof(u32));
        if (ret) {
                ret = -EFAULT;
-               goto free_handles;
+               goto err_free_handles;
        }
 
        if (!drm_dev_enter(&vdev->drm, &idx)) {
                ret = -ENODEV;
-               goto free_handles;
+               goto err_free_handles;
        }
 
        ivpu_dbg(vdev, JOB, "Submit ioctl: ctx %u buf_count %u\n",
                 file_priv->ctx.id, params->buffer_count);
 
-       job = ivpu_create_job(file_priv, params->engine, params->buffer_count);
+       job = ivpu_job_create(file_priv, params->engine, params->buffer_count);
        if (!job) {
                ivpu_err(vdev, "Failed to create job\n");
                ret = -ENOMEM;
-               goto dev_exit;
+               goto err_exit_dev;
        }
 
        ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, params->buffer_count,
                                              params->commands_offset);
        if (ret) {
-               ivpu_err(vdev, "Failed to prepare job, ret %d\n", ret);
-               goto job_put;
+               ivpu_err(vdev, "Failed to prepare job: %d\n", ret);
+               goto err_destroy_job;
        }
 
-       ret = ivpu_direct_job_submission(job);
-       if (ret) {
-               dma_fence_signal(job->done_fence);
-               ivpu_err(vdev, "Failed to submit job to the HW, ret %d\n", ret);
-       }
+       down_read(&vdev->pm->reset_lock);
+       ret = ivpu_job_submit(job);
+       up_read(&vdev->pm->reset_lock);
+       if (ret)
+               goto err_signal_fence;
 
-job_put:
-       job_put(job);
-dev_exit:
        drm_dev_exit(idx);
-free_handles:
        kfree(buf_handles);
+       return ret;
 
+err_signal_fence:
+       dma_fence_signal(job->done_fence);
+err_destroy_job:
+       ivpu_job_destroy(job);
+err_exit_dev:
+       drm_dev_exit(idx);
+err_free_handles:
+       kfree(buf_handles);
        return ret;
 }
 
@@ -568,7 +554,7 @@ ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
        }
 
        payload = (struct vpu_ipc_msg_payload_job_done *)&jsm_msg->payload;
-       ret = ivpu_job_done(vdev, payload->job_id, payload->job_status);
+       ret = ivpu_job_signal_and_destroy(vdev, payload->job_id, payload->job_status);
        if (!ret && !xa_empty(&vdev->submitted_jobs_xa))
                ivpu_start_job_timeout_detection(vdev);
 }
index 45a2f2ec82e5ba69110d737e154c38bf6765174a..ca4984071cc76b17d858ae86929a48a9cea39c88 100644 (file)
@@ -43,7 +43,6 @@ struct ivpu_cmdq {
                          will update the job status
  */
 struct ivpu_job {
-       struct kref ref;
        struct ivpu_device *vdev;
        struct ivpu_file_priv *file_priv;
        struct dma_fence *done_fence;
@@ -56,7 +55,7 @@ struct ivpu_job {
 
 int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
 
-void ivpu_cmdq_release_all(struct ivpu_file_priv *file_priv);
+void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv);
 void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev);
 
 void ivpu_job_done_consumer_init(struct ivpu_device *vdev);
index 2228c44b115fa0e4d48f36c115e2fdc7b434a8c0..91bd640655ab363b51df17a25cb9589293adc804 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/highmem.h>
 
 #include "ivpu_drv.h"
+#include "ivpu_hw.h"
 #include "ivpu_hw_reg_io.h"
 #include "ivpu_mmu.h"
 #include "ivpu_mmu_context.h"
 
 #define IVPU_MMU_Q_COUNT_LOG2          4 /* 16 entries */
 #define IVPU_MMU_Q_COUNT               ((u32)1 << IVPU_MMU_Q_COUNT_LOG2)
-#define IVPU_MMU_Q_WRAP_BIT            (IVPU_MMU_Q_COUNT << 1)
-#define IVPU_MMU_Q_WRAP_MASK           (IVPU_MMU_Q_WRAP_BIT - 1)
-#define IVPU_MMU_Q_IDX_MASK            (IVPU_MMU_Q_COUNT - 1)
+#define IVPU_MMU_Q_WRAP_MASK            GENMASK(IVPU_MMU_Q_COUNT_LOG2, 0)
+#define IVPU_MMU_Q_IDX_MASK             (IVPU_MMU_Q_COUNT - 1)
 #define IVPU_MMU_Q_IDX(val)            ((val) & IVPU_MMU_Q_IDX_MASK)
+#define IVPU_MMU_Q_WRP(val)             ((val) & IVPU_MMU_Q_COUNT)
 
 #define IVPU_MMU_CMDQ_CMD_SIZE         16
 #define IVPU_MMU_CMDQ_SIZE             (IVPU_MMU_Q_COUNT * IVPU_MMU_CMDQ_CMD_SIZE)
@@ -474,20 +475,32 @@ static int ivpu_mmu_cmdq_wait_for_cons(struct ivpu_device *vdev)
        return 0;
 }
 
+static bool ivpu_mmu_queue_is_full(struct ivpu_mmu_queue *q)
+{
+       return ((IVPU_MMU_Q_IDX(q->prod) == IVPU_MMU_Q_IDX(q->cons)) &&
+               (IVPU_MMU_Q_WRP(q->prod) != IVPU_MMU_Q_WRP(q->cons)));
+}
+
+static bool ivpu_mmu_queue_is_empty(struct ivpu_mmu_queue *q)
+{
+       return ((IVPU_MMU_Q_IDX(q->prod) == IVPU_MMU_Q_IDX(q->cons)) &&
+               (IVPU_MMU_Q_WRP(q->prod) == IVPU_MMU_Q_WRP(q->cons)));
+}
+
 static int ivpu_mmu_cmdq_cmd_write(struct ivpu_device *vdev, const char *name, u64 data0, u64 data1)
 {
-       struct ivpu_mmu_queue *q = &vdev->mmu->cmdq;
-       u64 *queue_buffer = q->base;
-       int idx = IVPU_MMU_Q_IDX(q->prod) * (IVPU_MMU_CMDQ_CMD_SIZE / sizeof(*queue_buffer));
+       struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq;
+       u64 *queue_buffer = cmdq->base;
+       int idx = IVPU_MMU_Q_IDX(cmdq->prod) * (IVPU_MMU_CMDQ_CMD_SIZE / sizeof(*queue_buffer));
 
-       if (!CIRC_SPACE(IVPU_MMU_Q_IDX(q->prod), IVPU_MMU_Q_IDX(q->cons), IVPU_MMU_Q_COUNT)) {
+       if (ivpu_mmu_queue_is_full(cmdq)) {
                ivpu_err(vdev, "Failed to write MMU CMD %s\n", name);
                return -EBUSY;
        }
 
        queue_buffer[idx] = data0;
        queue_buffer[idx + 1] = data1;
-       q->prod = (q->prod + 1) & IVPU_MMU_Q_WRAP_MASK;
+       cmdq->prod = (cmdq->prod + 1) & IVPU_MMU_Q_WRAP_MASK;
 
        ivpu_dbg(vdev, MMU, "CMD write: %s data: 0x%llx 0x%llx\n", name, data0, data1);
 
@@ -518,6 +531,7 @@ static int ivpu_mmu_cmdq_sync(struct ivpu_device *vdev)
 
                ivpu_err(vdev, "Timed out waiting for MMU consumer: %d, error: %s\n", ret,
                         ivpu_mmu_cmdq_err_to_str(err));
+               ivpu_hw_diagnose_failure(vdev);
        }
 
        return ret;
@@ -558,7 +572,6 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
        mmu->cmdq.cons = 0;
 
        memset(mmu->evtq.base, 0, IVPU_MMU_EVTQ_SIZE);
-       clflush_cache_range(mmu->evtq.base, IVPU_MMU_EVTQ_SIZE);
        mmu->evtq.prod = 0;
        mmu->evtq.cons = 0;
 
@@ -872,20 +885,15 @@ static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev)
        u32 *evt = evtq->base + (idx * IVPU_MMU_EVTQ_CMD_SIZE);
 
        evtq->prod = REGV_RD32(IVPU_MMU_REG_EVTQ_PROD_SEC);
-       if (!CIRC_CNT(IVPU_MMU_Q_IDX(evtq->prod), IVPU_MMU_Q_IDX(evtq->cons), IVPU_MMU_Q_COUNT))
+       if (ivpu_mmu_queue_is_empty(evtq))
                return NULL;
 
-       clflush_cache_range(evt, IVPU_MMU_EVTQ_CMD_SIZE);
-
        evtq->cons = (evtq->cons + 1) & IVPU_MMU_Q_WRAP_MASK;
-       REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, evtq->cons);
-
        return evt;
 }
 
 void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev)
 {
-       bool schedule_recovery = false;
        u32 *event;
        u32 ssid;
 
@@ -895,14 +903,22 @@ void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev)
                ivpu_mmu_dump_event(vdev, event);
 
                ssid = FIELD_GET(IVPU_MMU_EVT_SSID_MASK, event[0]);
-               if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID)
-                       schedule_recovery = true;
-               else
-                       ivpu_mmu_user_context_mark_invalid(vdev, ssid);
+               if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID) {
+                       ivpu_pm_trigger_recovery(vdev, "MMU event");
+                       return;
+               }
+
+               ivpu_mmu_user_context_mark_invalid(vdev, ssid);
+               REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, vdev->mmu->evtq.cons);
        }
+}
 
-       if (schedule_recovery)
-               ivpu_pm_schedule_recovery(vdev);
+void ivpu_mmu_evtq_dump(struct ivpu_device *vdev)
+{
+       u32 *event;
+
+       while ((event = ivpu_mmu_get_event(vdev)) != NULL)
+               ivpu_mmu_dump_event(vdev, event);
 }
 
 void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev)
index cb551126806baa9bb47a967c7bff916b444c2427..6fa35c240710625670b6879098833c6cd680fb40 100644 (file)
@@ -46,5 +46,6 @@ int ivpu_mmu_invalidate_tlb(struct ivpu_device *vdev, u16 ssid);
 
 void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev);
 void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev);
+void ivpu_mmu_evtq_dump(struct ivpu_device *vdev);
 
 #endif /* __IVPU_MMU_H__ */
index 12a8c09d4547d7d9b81cd91d93307e59e648e14f..fe61612992364c65d184eef9c3a3ad3ebb60ce6a 100644 (file)
@@ -355,6 +355,9 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
                dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset;
                size_t size = sg_dma_len(sg) + sg->offset;
 
+               ivpu_dbg(vdev, MMU_MAP, "Map ctx: %u dma_addr: 0x%llx vpu_addr: 0x%llx size: %lu\n",
+                        ctx->id, dma_addr, vpu_addr, size);
+
                ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot);
                if (ret) {
                        ivpu_err(vdev, "Failed to map context pages\n");
@@ -366,6 +369,7 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
 
        /* Ensure page table modifications are flushed from wc buffers to memory */
        wmb();
+
        mutex_unlock(&ctx->lock);
 
        ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
@@ -388,14 +392,19 @@ ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ct
        mutex_lock(&ctx->lock);
 
        for_each_sgtable_dma_sg(sgt, sg, i) {
+               dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset;
                size_t size = sg_dma_len(sg) + sg->offset;
 
+               ivpu_dbg(vdev, MMU_MAP, "Unmap ctx: %u dma_addr: 0x%llx vpu_addr: 0x%llx size: %lu\n",
+                        ctx->id, dma_addr, vpu_addr, size);
+
                ivpu_mmu_context_unmap_pages(ctx, vpu_addr, size);
                vpu_addr += size;
        }
 
        /* Ensure page table modifications are flushed from wc buffers to memory */
        wmb();
+
        mutex_unlock(&ctx->lock);
 
        ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
index 0af8864cb3b55f636bc7418a03dc3c07360e7ac8..f501f27ebafdf6687b5a46ca7e2387faa931af3e 100644 (file)
@@ -13,6 +13,7 @@
 #include "ivpu_drv.h"
 #include "ivpu_hw.h"
 #include "ivpu_fw.h"
+#include "ivpu_fw_log.h"
 #include "ivpu_ipc.h"
 #include "ivpu_job.h"
 #include "ivpu_jsm_msg.h"
@@ -111,6 +112,14 @@ static void ivpu_pm_recovery_work(struct work_struct *work)
        char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL};
        int ret;
 
+       ivpu_err(vdev, "Recovering the VPU (reset #%d)\n", atomic_read(&vdev->pm->reset_counter));
+
+       ret = pm_runtime_resume_and_get(vdev->drm.dev);
+       if (ret)
+               ivpu_err(vdev, "Failed to resume VPU: %d\n", ret);
+
+       ivpu_fw_log_dump(vdev);
+
 retry:
        ret = pci_try_reset_function(to_pci_dev(vdev->drm.dev));
        if (ret == -EAGAIN && !drm_dev_is_unplugged(&vdev->drm)) {
@@ -122,11 +131,13 @@ retry:
                ivpu_err(vdev, "Failed to reset VPU: %d\n", ret);
 
        kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt);
+       pm_runtime_mark_last_busy(vdev->drm.dev);
+       pm_runtime_put_autosuspend(vdev->drm.dev);
 }
 
-void ivpu_pm_schedule_recovery(struct ivpu_device *vdev)
+void ivpu_pm_trigger_recovery(struct ivpu_device *vdev, const char *reason)
 {
-       struct ivpu_pm_info *pm = vdev->pm;
+       ivpu_err(vdev, "Recovery triggered by %s\n", reason);
 
        if (ivpu_disable_recovery) {
                ivpu_err(vdev, "Recovery not available when disable_recovery param is set\n");
@@ -138,10 +149,11 @@ void ivpu_pm_schedule_recovery(struct ivpu_device *vdev)
                return;
        }
 
-       /* Schedule recovery if it's not in progress */
-       if (atomic_cmpxchg(&pm->in_reset, 0, 1) == 0) {
-               ivpu_hw_irq_disable(vdev);
-               queue_work(system_long_wq, &pm->recovery_work);
+       /* Trigger recovery if it's not in progress */
+       if (atomic_cmpxchg(&vdev->pm->reset_pending, 0, 1) == 0) {
+               ivpu_hw_diagnose_failure(vdev);
+               ivpu_hw_irq_disable(vdev); /* Disable IRQ early to protect from IRQ storm */
+               queue_work(system_long_wq, &vdev->pm->recovery_work);
        }
 }
 
@@ -149,12 +161,8 @@ static void ivpu_job_timeout_work(struct work_struct *work)
 {
        struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, job_timeout_work.work);
        struct ivpu_device *vdev = pm->vdev;
-       unsigned long timeout_ms = ivpu_tdr_timeout_ms ? ivpu_tdr_timeout_ms : vdev->timeout.tdr;
 
-       ivpu_err(vdev, "TDR detected, timeout %lu ms", timeout_ms);
-       ivpu_hw_diagnose_failure(vdev);
-
-       ivpu_pm_schedule_recovery(vdev);
+       ivpu_pm_trigger_recovery(vdev, "TDR");
 }
 
 void ivpu_start_job_timeout_detection(struct ivpu_device *vdev)
@@ -227,6 +235,9 @@ int ivpu_pm_runtime_suspend_cb(struct device *dev)
        bool hw_is_idle = true;
        int ret;
 
+       drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
+       drm_WARN_ON(&vdev->drm, work_pending(&vdev->pm->recovery_work));
+
        ivpu_dbg(vdev, PM, "Runtime suspend..\n");
 
        if (!ivpu_hw_is_idle(vdev) && vdev->pm->suspend_reschedule_counter) {
@@ -247,7 +258,8 @@ int ivpu_pm_runtime_suspend_cb(struct device *dev)
                ivpu_err(vdev, "Failed to set suspend VPU: %d\n", ret);
 
        if (!hw_is_idle) {
-               ivpu_warn(vdev, "VPU failed to enter idle, force suspended.\n");
+               ivpu_err(vdev, "VPU failed to enter idle, force suspended.\n");
+               ivpu_fw_log_dump(vdev);
                ivpu_pm_prepare_cold_boot(vdev);
        } else {
                ivpu_pm_prepare_warm_boot(vdev);
@@ -308,11 +320,12 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
 {
        struct ivpu_device *vdev = pci_get_drvdata(pdev);
 
-       pm_runtime_get_sync(vdev->drm.dev);
-
        ivpu_dbg(vdev, PM, "Pre-reset..\n");
        atomic_inc(&vdev->pm->reset_counter);
-       atomic_set(&vdev->pm->in_reset, 1);
+       atomic_set(&vdev->pm->reset_pending, 1);
+
+       pm_runtime_get_sync(vdev->drm.dev);
+       down_write(&vdev->pm->reset_lock);
        ivpu_prepare_for_reset(vdev);
        ivpu_hw_reset(vdev);
        ivpu_pm_prepare_cold_boot(vdev);
@@ -329,9 +342,11 @@ void ivpu_pm_reset_done_cb(struct pci_dev *pdev)
        ret = ivpu_resume(vdev);
        if (ret)
                ivpu_err(vdev, "Failed to set RESUME state: %d\n", ret);
-       atomic_set(&vdev->pm->in_reset, 0);
+       up_write(&vdev->pm->reset_lock);
+       atomic_set(&vdev->pm->reset_pending, 0);
        ivpu_dbg(vdev, PM, "Post-reset done.\n");
 
+       pm_runtime_mark_last_busy(vdev->drm.dev);
        pm_runtime_put_autosuspend(vdev->drm.dev);
 }
 
@@ -344,7 +359,10 @@ void ivpu_pm_init(struct ivpu_device *vdev)
        pm->vdev = vdev;
        pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT;
 
-       atomic_set(&pm->in_reset, 0);
+       init_rwsem(&pm->reset_lock);
+       atomic_set(&pm->reset_pending, 0);
+       atomic_set(&pm->reset_counter, 0);
+
        INIT_WORK(&pm->recovery_work, ivpu_pm_recovery_work);
        INIT_DELAYED_WORK(&pm->job_timeout_work, ivpu_job_timeout_work);
 
index 97c6e0b0aa42d0a5a071940c5b54a052f99a748c..ec60fbeefefc65bbca4ed619d7265aabffd1bb61 100644 (file)
@@ -6,6 +6,7 @@
 #ifndef __IVPU_PM_H__
 #define __IVPU_PM_H__
 
+#include <linux/rwsem.h>
 #include <linux/types.h>
 
 struct ivpu_device;
@@ -14,8 +15,9 @@ struct ivpu_pm_info {
        struct ivpu_device *vdev;
        struct delayed_work job_timeout_work;
        struct work_struct recovery_work;
-       atomic_t in_reset;
+       struct rw_semaphore reset_lock;
        atomic_t reset_counter;
+       atomic_t reset_pending;
        bool is_warmboot;
        u32 suspend_reschedule_counter;
 };
@@ -37,7 +39,7 @@ int __must_check ivpu_rpm_get(struct ivpu_device *vdev);
 int __must_check ivpu_rpm_get_if_active(struct ivpu_device *vdev);
 void ivpu_rpm_put(struct ivpu_device *vdev);
 
-void ivpu_pm_schedule_recovery(struct ivpu_device *vdev);
+void ivpu_pm_trigger_recovery(struct ivpu_device *vdev, const char *reason);
 void ivpu_start_job_timeout_detection(struct ivpu_device *vdev);
 void ivpu_stop_job_timeout_detection(struct ivpu_device *vdev);
 
index 6f2bfcf7645ce6c94732bb0b43512feac90762e3..3c3f8037ebedddbbfa756f52c98fd209053559c7 100644 (file)
@@ -314,7 +314,6 @@ config ACPI_HOTPLUG_CPU
        bool
        depends on ACPI_PROCESSOR && HOTPLUG_CPU
        select ACPI_CONTAINER
-       default y
 
 config ACPI_PROCESSOR_AGGREGATOR
        tristate "Processor Aggregator"
index 0f5218e361df5c2d8e05d9a5e6d7fa205c1e6376..4fe2ef54088c65234ace0f111a0ffe93f40f8fb0 100644 (file)
@@ -184,24 +184,6 @@ static void __init acpi_pcc_cpufreq_init(void) {}
 
 /* Initialization */
 #ifdef CONFIG_ACPI_HOTPLUG_CPU
-int __weak acpi_map_cpu(acpi_handle handle,
-               phys_cpuid_t physid, u32 acpi_id, int *pcpu)
-{
-       return -ENODEV;
-}
-
-int __weak acpi_unmap_cpu(int cpu)
-{
-       return -ENODEV;
-}
-
-int __weak arch_register_cpu(int cpu)
-{
-       return -ENODEV;
-}
-
-void __weak arch_unregister_cpu(int cpu) {}
-
 static int acpi_processor_hotadd_init(struct acpi_processor *pr)
 {
        unsigned long long sta;
index ab2a82cb1b0b48ab21682bdb87c052707f19d282..fe825a432c5bfcce4776d83e0f072c9675507dae 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/interrupt.h>
 #include <linux/timer.h>
 #include <linux/cper.h>
+#include <linux/cxl-event.h>
 #include <linux/platform_device.h>
 #include <linux/mutex.h>
 #include <linux/ratelimit.h>
@@ -673,6 +674,52 @@ static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata,
        schedule_work(&entry->work);
 }
 
+/*
+ * Only a single callback can be registered for CXL CPER events.
+ */
+static DECLARE_RWSEM(cxl_cper_rw_sem);
+static cxl_cper_callback cper_callback;
+
+static void cxl_cper_post_event(enum cxl_event_type event_type,
+                               struct cxl_cper_event_rec *rec)
+{
+       if (rec->hdr.length <= sizeof(rec->hdr) ||
+           rec->hdr.length > sizeof(*rec)) {
+               pr_err(FW_WARN "CXL CPER Invalid section length (%u)\n",
+                      rec->hdr.length);
+               return;
+       }
+
+       if (!(rec->hdr.validation_bits & CPER_CXL_COMP_EVENT_LOG_VALID)) {
+               pr_err(FW_WARN "CXL CPER invalid event\n");
+               return;
+       }
+
+       guard(rwsem_read)(&cxl_cper_rw_sem);
+       if (cper_callback)
+               cper_callback(event_type, rec);
+}
+
+int cxl_cper_register_callback(cxl_cper_callback callback)
+{
+       guard(rwsem_write)(&cxl_cper_rw_sem);
+       if (cper_callback)
+               return -EINVAL;
+       cper_callback = callback;
+       return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_cper_register_callback, CXL);
+
+int cxl_cper_unregister_callback(cxl_cper_callback callback)
+{
+       guard(rwsem_write)(&cxl_cper_rw_sem);
+       if (callback != cper_callback)
+               return -EINVAL;
+       cper_callback = NULL;
+       return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_cper_unregister_callback, CXL);
+
 static bool ghes_do_proc(struct ghes *ghes,
                         const struct acpi_hest_generic_status *estatus)
 {
@@ -707,6 +754,22 @@ static bool ghes_do_proc(struct ghes *ghes,
                }
                else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
                        queued = ghes_handle_arm_hw_error(gdata, sev, sync);
+               } else if (guid_equal(sec_type, &CPER_SEC_CXL_GEN_MEDIA_GUID)) {
+                       struct cxl_cper_event_rec *rec =
+                               acpi_hest_get_payload(gdata);
+
+                       cxl_cper_post_event(CXL_CPER_EVENT_GEN_MEDIA, rec);
+               } else if (guid_equal(sec_type, &CPER_SEC_CXL_DRAM_GUID)) {
+                       struct cxl_cper_event_rec *rec =
+                               acpi_hest_get_payload(gdata);
+
+                       cxl_cper_post_event(CXL_CPER_EVENT_DRAM, rec);
+               } else if (guid_equal(sec_type,
+                                     &CPER_SEC_CXL_MEM_MODULE_GUID)) {
+                       struct cxl_cper_event_rec *rec =
+                               acpi_hest_get_payload(gdata);
+
+                       cxl_cper_post_event(CXL_CPER_EVENT_MEM_MODULE, rec);
                } else {
                        void *err = acpi_hest_get_payload(gdata);
 
index 9ef5f1bdcfdbcf5d5f09827f4e5fae7e56f6e10b..d6b85f0f6082f72421168b26fec4b2fff09b4e13 100644 (file)
@@ -58,14 +58,22 @@ struct target_cache {
        struct node_cache_attrs cache_attrs;
 };
 
+enum {
+       NODE_ACCESS_CLASS_0 = 0,
+       NODE_ACCESS_CLASS_1,
+       NODE_ACCESS_CLASS_GENPORT_SINK,
+       NODE_ACCESS_CLASS_MAX,
+};
+
 struct memory_target {
        struct list_head node;
        unsigned int memory_pxm;
        unsigned int processor_pxm;
        struct resource memregions;
-       struct node_hmem_attrs hmem_attrs[2];
+       struct access_coordinate coord[NODE_ACCESS_CLASS_MAX];
        struct list_head caches;
        struct node_cache_attrs cache_attrs;
+       u8 gen_port_device_handle[ACPI_SRAT_DEVICE_HANDLE_SIZE];
        bool registered;
 };
 
@@ -100,6 +108,47 @@ static struct memory_target *find_mem_target(unsigned int mem_pxm)
        return NULL;
 }
 
+static struct memory_target *acpi_find_genport_target(u32 uid)
+{
+       struct memory_target *target;
+       u32 target_uid;
+       u8 *uid_ptr;
+
+       list_for_each_entry(target, &targets, node) {
+               uid_ptr = target->gen_port_device_handle + 8;
+               target_uid = *(u32 *)uid_ptr;
+               if (uid == target_uid)
+                       return target;
+       }
+
+       return NULL;
+}
+
+/**
+ * acpi_get_genport_coordinates - Retrieve the access coordinates for a generic port
+ * @uid: ACPI unique id
+ * @coord: The access coordinates written back out for the generic port
+ *
+ * Return: 0 on success. Errno on failure.
+ *
+ * Only supports device handles that are ACPI. Assume ACPI0016 HID for CXL.
+ */
+int acpi_get_genport_coordinates(u32 uid,
+                                struct access_coordinate *coord)
+{
+       struct memory_target *target;
+
+       guard(mutex)(&target_lock);
+       target = acpi_find_genport_target(uid);
+       if (!target)
+               return -ENOENT;
+
+       *coord = target->coord[NODE_ACCESS_CLASS_GENPORT_SINK];
+
+       return 0;
+}
+EXPORT_SYMBOL_NS_GPL(acpi_get_genport_coordinates, CXL);
+
 static __init void alloc_memory_initiator(unsigned int cpu_pxm)
 {
        struct memory_initiator *initiator;
@@ -120,8 +169,7 @@ static __init void alloc_memory_initiator(unsigned int cpu_pxm)
        list_add_tail(&initiator->node, &initiators);
 }
 
-static __init void alloc_memory_target(unsigned int mem_pxm,
-               resource_size_t start, resource_size_t len)
+static __init struct memory_target *alloc_target(unsigned int mem_pxm)
 {
        struct memory_target *target;
 
@@ -129,7 +177,7 @@ static __init void alloc_memory_target(unsigned int mem_pxm,
        if (!target) {
                target = kzalloc(sizeof(*target), GFP_KERNEL);
                if (!target)
-                       return;
+                       return NULL;
                target->memory_pxm = mem_pxm;
                target->processor_pxm = PXM_INVAL;
                target->memregions = (struct resource) {
@@ -142,6 +190,19 @@ static __init void alloc_memory_target(unsigned int mem_pxm,
                INIT_LIST_HEAD(&target->caches);
        }
 
+       return target;
+}
+
+static __init void alloc_memory_target(unsigned int mem_pxm,
+                                      resource_size_t start,
+                                      resource_size_t len)
+{
+       struct memory_target *target;
+
+       target = alloc_target(mem_pxm);
+       if (!target)
+               return;
+
        /*
         * There are potentially multiple ranges per PXM, so record each
         * in the per-target memregions resource tree.
@@ -152,6 +213,18 @@ static __init void alloc_memory_target(unsigned int mem_pxm,
                                start, start + len, mem_pxm);
 }
 
+static __init void alloc_genport_target(unsigned int mem_pxm, u8 *handle)
+{
+       struct memory_target *target;
+
+       target = alloc_target(mem_pxm);
+       if (!target)
+               return;
+
+       memcpy(target->gen_port_device_handle, handle,
+              ACPI_SRAT_DEVICE_HANDLE_SIZE);
+}
+
 static __init const char *hmat_data_type(u8 type)
 {
        switch (type) {
@@ -228,24 +301,24 @@ static void hmat_update_target_access(struct memory_target *target,
 {
        switch (type) {
        case ACPI_HMAT_ACCESS_LATENCY:
-               target->hmem_attrs[access].read_latency = value;
-               target->hmem_attrs[access].write_latency = value;
+               target->coord[access].read_latency = value;
+               target->coord[access].write_latency = value;
                break;
        case ACPI_HMAT_READ_LATENCY:
-               target->hmem_attrs[access].read_latency = value;
+               target->coord[access].read_latency = value;
                break;
        case ACPI_HMAT_WRITE_LATENCY:
-               target->hmem_attrs[access].write_latency = value;
+               target->coord[access].write_latency = value;
                break;
        case ACPI_HMAT_ACCESS_BANDWIDTH:
-               target->hmem_attrs[access].read_bandwidth = value;
-               target->hmem_attrs[access].write_bandwidth = value;
+               target->coord[access].read_bandwidth = value;
+               target->coord[access].write_bandwidth = value;
                break;
        case ACPI_HMAT_READ_BANDWIDTH:
-               target->hmem_attrs[access].read_bandwidth = value;
+               target->coord[access].read_bandwidth = value;
                break;
        case ACPI_HMAT_WRITE_BANDWIDTH:
-               target->hmem_attrs[access].write_bandwidth = value;
+               target->coord[access].write_bandwidth = value;
                break;
        default:
                break;
@@ -291,11 +364,28 @@ static __init void hmat_add_locality(struct acpi_hmat_locality *hmat_loc)
        }
 }
 
+static __init void hmat_update_target(unsigned int tgt_pxm, unsigned int init_pxm,
+                                     u8 mem_hier, u8 type, u32 value)
+{
+       struct memory_target *target = find_mem_target(tgt_pxm);
+
+       if (mem_hier != ACPI_HMAT_MEMORY)
+               return;
+
+       if (target && target->processor_pxm == init_pxm) {
+               hmat_update_target_access(target, type, value,
+                                         NODE_ACCESS_CLASS_0);
+               /* If the node has a CPU, update access 1 */
+               if (node_state(pxm_to_node(init_pxm), N_CPU))
+                       hmat_update_target_access(target, type, value,
+                                                 NODE_ACCESS_CLASS_1);
+       }
+}
+
 static __init int hmat_parse_locality(union acpi_subtable_headers *header,
                                      const unsigned long end)
 {
        struct acpi_hmat_locality *hmat_loc = (void *)header;
-       struct memory_target *target;
        unsigned int init, targ, total_size, ipds, tpds;
        u32 *inits, *targs, value;
        u16 *entries;
@@ -336,15 +426,8 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header,
                                inits[init], targs[targ], value,
                                hmat_data_type_suffix(type));
 
-                       if (mem_hier == ACPI_HMAT_MEMORY) {
-                               target = find_mem_target(targs[targ]);
-                               if (target && target->processor_pxm == inits[init]) {
-                                       hmat_update_target_access(target, type, value, 0);
-                                       /* If the node has a CPU, update access 1 */
-                                       if (node_state(pxm_to_node(inits[init]), N_CPU))
-                                               hmat_update_target_access(target, type, value, 1);
-                               }
-                       }
+                       hmat_update_target(targs[targ], inits[init],
+                                          mem_hier, type, value);
                }
        }
 
@@ -491,6 +574,27 @@ static __init int srat_parse_mem_affinity(union acpi_subtable_headers *header,
        return 0;
 }
 
+static __init int srat_parse_genport_affinity(union acpi_subtable_headers *header,
+                                             const unsigned long end)
+{
+       struct acpi_srat_generic_affinity *ga = (void *)header;
+
+       if (!ga)
+               return -EINVAL;
+
+       if (!(ga->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED))
+               return 0;
+
+       /* Skip PCI device_handle for now */
+       if (ga->device_handle_type != 0)
+               return 0;
+
+       alloc_genport_target(ga->proximity_domain,
+                            (u8 *)ga->device_handle);
+
+       return 0;
+}
+
 static u32 hmat_initiator_perf(struct memory_target *target,
                               struct memory_initiator *initiator,
                               struct acpi_hmat_locality *hmat_loc)
@@ -592,6 +696,11 @@ static void hmat_update_target_attrs(struct memory_target *target,
        u32 best = 0;
        int i;
 
+       /* Don't update for generic port if there's no device handle */
+       if (access == NODE_ACCESS_CLASS_GENPORT_SINK &&
+           !(*(u16 *)target->gen_port_device_handle))
+               return;
+
        bitmap_zero(p_nodes, MAX_NUMNODES);
        /*
         * If the Address Range Structure provides a local processor pxm, set
@@ -661,6 +770,14 @@ static void __hmat_register_target_initiators(struct memory_target *target,
        }
 }
 
+static void hmat_register_generic_target_initiators(struct memory_target *target)
+{
+       static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
+
+       __hmat_register_target_initiators(target, p_nodes,
+                                         NODE_ACCESS_CLASS_GENPORT_SINK);
+}
+
 static void hmat_register_target_initiators(struct memory_target *target)
 {
        static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
@@ -681,7 +798,7 @@ static void hmat_register_target_cache(struct memory_target *target)
 static void hmat_register_target_perf(struct memory_target *target, int access)
 {
        unsigned mem_nid = pxm_to_node(target->memory_pxm);
-       node_set_perf_attrs(mem_nid, &target->hmem_attrs[access], access);
+       node_set_perf_attrs(mem_nid, &target->coord[access], access);
 }
 
 static void hmat_register_target_devices(struct memory_target *target)
@@ -712,6 +829,17 @@ static void hmat_register_target(struct memory_target *target)
         */
        hmat_register_target_devices(target);
 
+       /*
+        * Register generic port perf numbers. The nid may not be
+        * initialized and is still NUMA_NO_NODE.
+        */
+       mutex_lock(&target_lock);
+       if (*(u16 *)target->gen_port_device_handle) {
+               hmat_register_generic_target_initiators(target);
+               target->registered = true;
+       }
+       mutex_unlock(&target_lock);
+
        /*
         * Skip offline nodes. This can happen when memory
         * marked EFI_MEMORY_SP, "specific purpose", is applied
@@ -726,8 +854,8 @@ static void hmat_register_target(struct memory_target *target)
        if (!target->registered) {
                hmat_register_target_initiators(target);
                hmat_register_target_cache(target);
-               hmat_register_target_perf(target, 0);
-               hmat_register_target_perf(target, 1);
+               hmat_register_target_perf(target, NODE_ACCESS_CLASS_0);
+               hmat_register_target_perf(target, NODE_ACCESS_CLASS_1);
                target->registered = true;
        }
        mutex_unlock(&target_lock);
@@ -765,7 +893,7 @@ static int hmat_set_default_dram_perf(void)
        int rc;
        int nid, pxm;
        struct memory_target *target;
-       struct node_hmem_attrs *attrs;
+       struct access_coordinate *attrs;
 
        if (!default_dram_type)
                return -EIO;
@@ -775,7 +903,7 @@ static int hmat_set_default_dram_perf(void)
                target = find_mem_target(pxm);
                if (!target)
                        continue;
-               attrs = &target->hmem_attrs[1];
+               attrs = &target->coord[1];
                rc = mt_set_default_dram_perf(nid, attrs, "ACPI HMAT");
                if (rc)
                        return rc;
@@ -789,7 +917,7 @@ static int hmat_calculate_adistance(struct notifier_block *self,
 {
        static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
        struct memory_target *target;
-       struct node_hmem_attrs *perf;
+       struct access_coordinate *perf;
        int *adist = data;
        int pxm;
 
@@ -802,7 +930,7 @@ static int hmat_calculate_adistance(struct notifier_block *self,
        hmat_update_target_attrs(target, p_nodes, 1);
        mutex_unlock(&target_lock);
 
-       perf = &target->hmem_attrs[1];
+       perf = &target->coord[1];
 
        if (mt_perf_to_adistance(perf, adist))
                return NOTIFY_OK;
@@ -870,6 +998,13 @@ static __init int hmat_init(void)
                                ACPI_SRAT_TYPE_MEMORY_AFFINITY,
                                srat_parse_mem_affinity, 0) < 0)
                goto out_put;
+
+       if (acpi_table_parse_entries(ACPI_SIG_SRAT,
+                                    sizeof(struct acpi_table_srat),
+                                    ACPI_SRAT_TYPE_GENERIC_PORT_AFFINITY,
+                                    srat_parse_genport_affinity, 0) < 0)
+               goto out_put;
+
        acpi_put_table(tbl);
 
        status = acpi_get_table(ACPI_SIG_HMAT, 0, &tbl);
index 07d76fb740b6d6e73e119d54c35089cf8b4cb173..a6ead5204046b14dd9d8bf0aba07bb3eac27c51c 100644 (file)
@@ -881,6 +881,7 @@ static struct fwnode_handle *acpi_parse_string_ref(const struct fwnode_handle *f
  * @index: Index of the reference to return
  * @num_args: Maximum number of arguments after each reference
  * @args: Location to store the returned reference with optional arguments
+ *       (may be NULL)
  *
  * Find property with @name, verifify that it is a package containing at least
  * one object reference and if so, store the ACPI device object pointer to the
@@ -938,6 +939,9 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
                if (!device)
                        return -EINVAL;
 
+               if (!args)
+                       return 0;
+
                args->fwnode = acpi_fwnode_handle(device);
                args->nargs = 0;
 
index 0e2c397b139959c8928f9f11b0cf67cbbe6ea21b..dacad1d846c0dbd0bf3765395a2a35ef80c6c390 100644 (file)
@@ -461,6 +461,13 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
                        DMI_MATCH(DMI_BOARD_NAME, "B1502CBA"),
                },
        },
+       {
+               /* Asus ExpertBook B1502CGA */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_BOARD_NAME, "B1502CGA"),
+               },
+       },
        {
                /* Asus ExpertBook B2402CBA */
                .matches = {
@@ -482,6 +489,20 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
                        DMI_MATCH(DMI_BOARD_NAME, "B2502CBA"),
                },
        },
+       {
+               /* Asus Vivobook E1504GA */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_BOARD_NAME, "E1504GA"),
+               },
+       },
+       {
+               /* Asus Vivobook E1504GAB */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_BOARD_NAME, "E1504GAB"),
+               },
+       },
        {
                /* LG Electronics 17U70P */
                .matches = {
index 0ba008773b00079cad8df514e680ce35bf9960f1..e6ed1ba91e5c9152c7eeec242c302f91fda42d5d 100644 (file)
@@ -1561,8 +1561,7 @@ static inline const struct iommu_ops *acpi_iommu_fwspec_ops(struct device *dev)
        return fwspec ? fwspec->ops : NULL;
 }
 
-static const struct iommu_ops *acpi_iommu_configure_id(struct device *dev,
-                                                      const u32 *id_in)
+static int acpi_iommu_configure_id(struct device *dev, const u32 *id_in)
 {
        int err;
        const struct iommu_ops *ops;
@@ -1576,7 +1575,7 @@ static const struct iommu_ops *acpi_iommu_configure_id(struct device *dev,
        ops = acpi_iommu_fwspec_ops(dev);
        if (ops) {
                mutex_unlock(&iommu_probe_device_lock);
-               return ops;
+               return 0;
        }
 
        err = iort_iommu_configure_id(dev, id_in);
@@ -1593,12 +1592,14 @@ static const struct iommu_ops *acpi_iommu_configure_id(struct device *dev,
 
        /* Ignore all other errors apart from EPROBE_DEFER */
        if (err == -EPROBE_DEFER) {
-               return ERR_PTR(err);
+               return err;
        } else if (err) {
                dev_dbg(dev, "Adding to IOMMU failed: %d\n", err);
-               return NULL;
+               return -ENODEV;
        }
-       return acpi_iommu_fwspec_ops(dev);
+       if (!acpi_iommu_fwspec_ops(dev))
+               return -ENODEV;
+       return 0;
 }
 
 #else /* !CONFIG_IOMMU_API */
@@ -1610,10 +1611,9 @@ int acpi_iommu_fwspec_init(struct device *dev, u32 id,
        return -ENODEV;
 }
 
-static const struct iommu_ops *acpi_iommu_configure_id(struct device *dev,
-                                                      const u32 *id_in)
+static int acpi_iommu_configure_id(struct device *dev, const u32 *id_in)
 {
-       return NULL;
+       return -ENODEV;
 }
 
 #endif /* !CONFIG_IOMMU_API */
@@ -1627,7 +1627,7 @@ static const struct iommu_ops *acpi_iommu_configure_id(struct device *dev,
 int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr,
                          const u32 *input_id)
 {
-       const struct iommu_ops *iommu;
+       int ret;
 
        if (attr == DEV_DMA_NOT_SUPPORTED) {
                set_dma_ops(dev, &dma_dummy_ops);
@@ -1636,12 +1636,16 @@ int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr,
 
        acpi_arch_dma_setup(dev);
 
-       iommu = acpi_iommu_configure_id(dev, input_id);
-       if (PTR_ERR(iommu) == -EPROBE_DEFER)
+       ret = acpi_iommu_configure_id(dev, input_id);
+       if (ret == -EPROBE_DEFER)
                return -EPROBE_DEFER;
 
-       arch_setup_dma_ops(dev, 0, U64_MAX,
-                               iommu, attr == DEV_DMA_COHERENT);
+       /*
+        * Historically this routine doesn't fail driver probing due to errors
+        * in acpi_iommu_configure_id()
+        */
+
+       arch_setup_dma_ops(dev, 0, U64_MAX, attr == DEV_DMA_COHERENT);
 
        return 0;
 }
index c1516337f6682840bdee196e50c1de0cc2472a12..b07f7d091d133c6ade25749ca746b2a64c6bb5e8 100644 (file)
@@ -251,8 +251,9 @@ int __init_or_acpilib acpi_table_parse_entries_array(
                return -ENODEV;
        }
 
-       count = acpi_parse_entries_array(id, table_size, table_header,
-                       proc, proc_num, max_entries);
+       count = acpi_parse_entries_array(id, table_size,
+                                        (union fw_table_header *)table_header,
+                                        proc, proc_num, max_entries);
 
        acpi_put_table(table_header);
        return count;
index 7658103ba760d688ae45a5f9b6b5a6db36e220fb..eca24f41556df04ac61747e05aace9622fbcc580 100644 (file)
@@ -478,6 +478,16 @@ binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
 {
        WARN_ON(!list_empty(&thread->waiting_thread_node));
        binder_enqueue_work_ilocked(work, &thread->todo);
+
+       /* (e)poll-based threads require an explicit wakeup signal when
+        * queuing their own work; they rely on these events to consume
+        * messages without I/O block. Without it, threads risk waiting
+        * indefinitely without handling the work.
+        */
+       if (thread->looper & BINDER_LOOPER_STATE_POLL &&
+           thread->pid == current->pid && !thread->process_todo)
+               wake_up_interruptible_sync(&thread->wait);
+
        thread->process_todo = true;
 }
 
@@ -2077,9 +2087,8 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
                         * Convert the address to an offset relative to
                         * the base of the transaction buffer.
                         */
-                       fda_offset =
-                           (parent->buffer - (uintptr_t)buffer->user_data) +
-                           fda->parent_offset;
+                       fda_offset = parent->buffer - buffer->user_data +
+                               fda->parent_offset;
                        for (fd_index = 0; fd_index < fda->num_fds;
                             fd_index++) {
                                u32 fd;
@@ -2597,7 +2606,7 @@ static int binder_translate_fd_array(struct list_head *pf_head,
         * Convert the address to an offset relative to
         * the base of the transaction buffer.
         */
-       fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
+       fda_offset = parent->buffer - t->buffer->user_data +
                fda->parent_offset;
        sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
                                fda->parent_offset;
@@ -2672,8 +2681,9 @@ static int binder_fixup_parent(struct list_head *pf_head,
                                  proc->pid, thread->pid);
                return -EINVAL;
        }
-       buffer_offset = bp->parent_offset +
-                       (uintptr_t)parent->buffer - (uintptr_t)b->user_data;
+
+       buffer_offset = bp->parent_offset + parent->buffer - b->user_data;
+
        return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
 }
 
@@ -3225,7 +3235,7 @@ static void binder_transaction(struct binder_proc *proc,
 
        t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
                tr->offsets_size, extra_buffers_size,
-               !reply && (t->flags & TF_ONE_WAY), current->tgid);
+               !reply && (t->flags & TF_ONE_WAY));
        if (IS_ERR(t->buffer)) {
                char *s;
 
@@ -3250,7 +3260,7 @@ static void binder_transaction(struct binder_proc *proc,
                                    ALIGN(extra_buffers_size, sizeof(void *)) -
                                    ALIGN(secctx_sz, sizeof(u64));
 
-               t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
+               t->security_ctx = t->buffer->user_data + buf_offset;
                err = binder_alloc_copy_to_buffer(&target_proc->alloc,
                                                  t->buffer, buf_offset,
                                                  secctx, secctx_sz);
@@ -3527,8 +3537,7 @@ static void binder_transaction(struct binder_proc *proc,
                                goto err_translate_failed;
                        }
                        /* Fixup buffer pointer to target proc address space */
-                       bp->buffer = (uintptr_t)
-                               t->buffer->user_data + sg_buf_offset;
+                       bp->buffer = t->buffer->user_data + sg_buf_offset;
                        sg_buf_offset += ALIGN(bp->length, sizeof(u64));
 
                        num_valid = (buffer_offset - off_start_offset) /
@@ -4698,7 +4707,7 @@ retry:
                }
                trd->data_size = t->buffer->data_size;
                trd->offsets_size = t->buffer->offsets_size;
-               trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
+               trd->data.ptr.buffer = t->buffer->user_data;
                trd->data.ptr.offsets = trd->data.ptr.buffer +
                                        ALIGN(t->buffer->data_size,
                                            sizeof(void *));
@@ -5030,7 +5039,7 @@ static __poll_t binder_poll(struct file *filp,
 
        thread = binder_get_thread(proc);
        if (!thread)
-               return POLLERR;
+               return EPOLLERR;
 
        binder_inner_proc_lock(thread->proc);
        thread->looper |= BINDER_LOOPER_STATE_POLL;
@@ -5981,9 +5990,9 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
        }
        if (buffer->target_node)
                seq_printf(m, " node %d", buffer->target_node->debug_id);
-       seq_printf(m, " size %zd:%zd data %pK\n",
+       seq_printf(m, " size %zd:%zd offset %lx\n",
                   buffer->data_size, buffer->offsets_size,
-                  buffer->user_data);
+                  proc->alloc.buffer - buffer->user_data);
 }
 
 static void print_binder_work_ilocked(struct seq_file *m,
index f69d30c9f50faebfa97c65e179bb8edb2eca1c2d..e0e4dc38b6920737c2d9ec432664d0b160834de8 100644 (file)
@@ -26,7 +26,7 @@
 #include "binder_alloc.h"
 #include "binder_trace.h"
 
-struct list_lru binder_alloc_lru;
+struct list_lru binder_freelist;
 
 static DEFINE_MUTEX(binder_alloc_mmap_lock);
 
@@ -125,23 +125,20 @@ static void binder_insert_allocated_buffer_locked(
 
 static struct binder_buffer *binder_alloc_prepare_to_free_locked(
                struct binder_alloc *alloc,
-               uintptr_t user_ptr)
+               unsigned long user_ptr)
 {
        struct rb_node *n = alloc->allocated_buffers.rb_node;
        struct binder_buffer *buffer;
-       void __user *uptr;
-
-       uptr = (void __user *)user_ptr;
 
        while (n) {
                buffer = rb_entry(n, struct binder_buffer, rb_node);
                BUG_ON(buffer->free);
 
-               if (uptr < buffer->user_data)
+               if (user_ptr < buffer->user_data) {
                        n = n->rb_left;
-               else if (uptr > buffer->user_data)
+               } else if (user_ptr > buffer->user_data) {
                        n = n->rb_right;
-               else {
+               else {
                        /*
                         * Guard against user threads attempting to
                         * free the buffer when in use by kernel or
@@ -168,145 +165,168 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked(
  * Return:     Pointer to buffer or NULL
  */
 struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
-                                                  uintptr_t user_ptr)
+                                                  unsigned long user_ptr)
 {
        struct binder_buffer *buffer;
 
-       mutex_lock(&alloc->mutex);
+       spin_lock(&alloc->lock);
        buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
-       mutex_unlock(&alloc->mutex);
+       spin_unlock(&alloc->lock);
        return buffer;
 }
 
-static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
-                                   void __user *start, void __user *end)
+static inline void
+binder_set_installed_page(struct binder_lru_page *lru_page,
+                         struct page *page)
+{
+       /* Pairs with acquire in binder_get_installed_page() */
+       smp_store_release(&lru_page->page_ptr, page);
+}
+
+static inline struct page *
+binder_get_installed_page(struct binder_lru_page *lru_page)
+{
+       /* Pairs with release in binder_set_installed_page() */
+       return smp_load_acquire(&lru_page->page_ptr);
+}
+
+static void binder_lru_freelist_add(struct binder_alloc *alloc,
+                                   unsigned long start, unsigned long end)
 {
-       void __user *page_addr;
-       unsigned long user_page_addr;
        struct binder_lru_page *page;
-       struct vm_area_struct *vma = NULL;
-       struct mm_struct *mm = NULL;
-       bool need_mm = false;
+       unsigned long page_addr;
 
-       binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
-                    "%d: %s pages %pK-%pK\n", alloc->pid,
-                    allocate ? "allocate" : "free", start, end);
+       trace_binder_update_page_range(alloc, false, start, end);
 
-       if (end <= start)
-               return 0;
+       for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
+               size_t index;
+               int ret;
 
-       trace_binder_update_page_range(alloc, allocate, start, end);
+               index = (page_addr - alloc->buffer) / PAGE_SIZE;
+               page = &alloc->pages[index];
 
-       if (allocate == 0)
-               goto free_range;
+               if (!binder_get_installed_page(page))
+                       continue;
 
-       for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
-               page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
-               if (!page->page_ptr) {
-                       need_mm = true;
-                       break;
-               }
+               trace_binder_free_lru_start(alloc, index);
+
+               ret = list_lru_add_obj(&binder_freelist, &page->lru);
+               WARN_ON(!ret);
+
+               trace_binder_free_lru_end(alloc, index);
        }
+}
+
+static int binder_install_single_page(struct binder_alloc *alloc,
+                                     struct binder_lru_page *lru_page,
+                                     unsigned long addr)
+{
+       struct page *page;
+       int ret = 0;
 
-       if (need_mm && mmget_not_zero(alloc->mm))
-               mm = alloc->mm;
+       if (!mmget_not_zero(alloc->mm))
+               return -ESRCH;
 
-       if (mm) {
-               mmap_write_lock(mm);
-               vma = alloc->vma;
+       /*
+        * Protected with mmap_sem in write mode as multiple tasks
+        * might race to install the same page.
+        */
+       mmap_write_lock(alloc->mm);
+       if (binder_get_installed_page(lru_page))
+               goto out;
+
+       if (!alloc->vma) {
+               pr_err("%d: %s failed, no vma\n", alloc->pid, __func__);
+               ret = -ESRCH;
+               goto out;
        }
 
-       if (!vma && need_mm) {
-               binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
-                                  "%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
-                                  alloc->pid);
-               goto err_no_vma;
+       page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
+       if (!page) {
+               pr_err("%d: failed to allocate page\n", alloc->pid);
+               ret = -ENOMEM;
+               goto out;
        }
 
-       for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
+       ret = vm_insert_page(alloc->vma, addr, page);
+       if (ret) {
+               pr_err("%d: %s failed to insert page at offset %lx with %d\n",
+                      alloc->pid, __func__, addr - alloc->buffer, ret);
+               __free_page(page);
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       /* Mark page installation complete and safe to use */
+       binder_set_installed_page(lru_page, page);
+out:
+       mmap_write_unlock(alloc->mm);
+       mmput_async(alloc->mm);
+       return ret;
+}
+
+static int binder_install_buffer_pages(struct binder_alloc *alloc,
+                                      struct binder_buffer *buffer,
+                                      size_t size)
+{
+       struct binder_lru_page *page;
+       unsigned long start, final;
+       unsigned long page_addr;
+
+       start = buffer->user_data & PAGE_MASK;
+       final = PAGE_ALIGN(buffer->user_data + size);
+
+       for (page_addr = start; page_addr < final; page_addr += PAGE_SIZE) {
+               unsigned long index;
                int ret;
-               bool on_lru;
-               size_t index;
 
                index = (page_addr - alloc->buffer) / PAGE_SIZE;
                page = &alloc->pages[index];
 
-               if (page->page_ptr) {
-                       trace_binder_alloc_lru_start(alloc, index);
-
-                       on_lru = list_lru_del_obj(&binder_alloc_lru, &page->lru);
-                       WARN_ON(!on_lru);
-
-                       trace_binder_alloc_lru_end(alloc, index);
+               if (binder_get_installed_page(page))
                        continue;
-               }
-
-               if (WARN_ON(!vma))
-                       goto err_page_ptr_cleared;
 
                trace_binder_alloc_page_start(alloc, index);
-               page->page_ptr = alloc_page(GFP_KERNEL |
-                                           __GFP_HIGHMEM |
-                                           __GFP_ZERO);
-               if (!page->page_ptr) {
-                       pr_err("%d: binder_alloc_buf failed for page at %pK\n",
-                               alloc->pid, page_addr);
-                       goto err_alloc_page_failed;
-               }
-               page->alloc = alloc;
-               INIT_LIST_HEAD(&page->lru);
-
-               user_page_addr = (uintptr_t)page_addr;
-               ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
-               if (ret) {
-                       pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
-                              alloc->pid, user_page_addr);
-                       goto err_vm_insert_page_failed;
-               }
 
-               if (index + 1 > alloc->pages_high)
-                       alloc->pages_high = index + 1;
+               ret = binder_install_single_page(alloc, page, page_addr);
+               if (ret)
+                       return ret;
 
                trace_binder_alloc_page_end(alloc, index);
        }
-       if (mm) {
-               mmap_write_unlock(mm);
-               mmput(mm);
-       }
+
        return 0;
+}
 
-free_range:
-       for (page_addr = end - PAGE_SIZE; 1; page_addr -= PAGE_SIZE) {
-               bool ret;
-               size_t index;
+/* The range of pages should exclude those shared with other buffers */
+static void binder_lru_freelist_del(struct binder_alloc *alloc,
+                                   unsigned long start, unsigned long end)
+{
+       struct binder_lru_page *page;
+       unsigned long page_addr;
+
+       trace_binder_update_page_range(alloc, true, start, end);
+
+       for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
+               unsigned long index;
+               bool on_lru;
 
                index = (page_addr - alloc->buffer) / PAGE_SIZE;
                page = &alloc->pages[index];
 
-               trace_binder_free_lru_start(alloc, index);
+               if (page->page_ptr) {
+                       trace_binder_alloc_lru_start(alloc, index);
 
-               ret = list_lru_add_obj(&binder_alloc_lru, &page->lru);
-               WARN_ON(!ret);
+                       on_lru = list_lru_del_obj(&binder_freelist, &page->lru);
+                       WARN_ON(!on_lru);
 
-               trace_binder_free_lru_end(alloc, index);
-               if (page_addr == start)
-                       break;
-               continue;
-
-err_vm_insert_page_failed:
-               __free_page(page->page_ptr);
-               page->page_ptr = NULL;
-err_alloc_page_failed:
-err_page_ptr_cleared:
-               if (page_addr == start)
-                       break;
-       }
-err_no_vma:
-       if (mm) {
-               mmap_write_unlock(mm);
-               mmput(mm);
+                       trace_binder_alloc_lru_end(alloc, index);
+                       continue;
+               }
+
+               if (index + 1 > alloc->pages_high)
+                       alloc->pages_high = index + 1;
        }
-       return vma ? -ENOMEM : -ESRCH;
 }
 
 static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
@@ -323,7 +343,44 @@ static inline struct vm_area_struct *binder_alloc_get_vma(
        return smp_load_acquire(&alloc->vma);
 }
 
-static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
+static void debug_no_space_locked(struct binder_alloc *alloc)
+{
+       size_t largest_alloc_size = 0;
+       struct binder_buffer *buffer;
+       size_t allocated_buffers = 0;
+       size_t largest_free_size = 0;
+       size_t total_alloc_size = 0;
+       size_t total_free_size = 0;
+       size_t free_buffers = 0;
+       size_t buffer_size;
+       struct rb_node *n;
+
+       for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
+               buffer = rb_entry(n, struct binder_buffer, rb_node);
+               buffer_size = binder_alloc_buffer_size(alloc, buffer);
+               allocated_buffers++;
+               total_alloc_size += buffer_size;
+               if (buffer_size > largest_alloc_size)
+                       largest_alloc_size = buffer_size;
+       }
+
+       for (n = rb_first(&alloc->free_buffers); n; n = rb_next(n)) {
+               buffer = rb_entry(n, struct binder_buffer, rb_node);
+               buffer_size = binder_alloc_buffer_size(alloc, buffer);
+               free_buffers++;
+               total_free_size += buffer_size;
+               if (buffer_size > largest_free_size)
+                       largest_free_size = buffer_size;
+       }
+
+       binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
+                          "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
+                          total_alloc_size, allocated_buffers,
+                          largest_alloc_size, total_free_size,
+                          free_buffers, largest_free_size);
+}
+
+static bool debug_low_async_space_locked(struct binder_alloc *alloc)
 {
        /*
         * Find the amount and size of buffers allocated by the current caller;
@@ -332,10 +389,20 @@ static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
         * and at some point we'll catch them in the act. This is more efficient
         * than keeping a map per pid.
         */
-       struct rb_node *n;
        struct binder_buffer *buffer;
        size_t total_alloc_size = 0;
+       int pid = current->tgid;
        size_t num_buffers = 0;
+       struct rb_node *n;
+
+       /*
+        * Only start detecting spammers once we have less than 20% of async
+        * space left (which is less than 10% of total buffer size).
+        */
+       if (alloc->free_async_space >= alloc->buffer_size / 10) {
+               alloc->oneway_spam_detected = false;
+               return false;
+       }
 
        for (n = rb_first(&alloc->allocated_buffers); n != NULL;
                 n = rb_next(n)) {
@@ -344,8 +411,7 @@ static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
                        continue;
                if (!buffer->async_transaction)
                        continue;
-               total_alloc_size += binder_alloc_buffer_size(alloc, buffer)
-                       + sizeof(struct binder_buffer);
+               total_alloc_size += binder_alloc_buffer_size(alloc, buffer);
                num_buffers++;
        }
 
@@ -366,58 +432,28 @@ static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
        return false;
 }
 
+/* Callers preallocate @new_buffer, it is freed by this function if unused */
 static struct binder_buffer *binder_alloc_new_buf_locked(
                                struct binder_alloc *alloc,
-                               size_t data_size,
-                               size_t offsets_size,
-                               size_t extra_buffers_size,
-                               int is_async,
-                               int pid)
+                               struct binder_buffer *new_buffer,
+                               size_t size,
+                               int is_async)
 {
        struct rb_node *n = alloc->free_buffers.rb_node;
+       struct rb_node *best_fit = NULL;
        struct binder_buffer *buffer;
+       unsigned long next_used_page;
+       unsigned long curr_last_page;
        size_t buffer_size;
-       struct rb_node *best_fit = NULL;
-       void __user *has_page_addr;
-       void __user *end_page_addr;
-       size_t size, data_offsets_size;
-       int ret;
 
-       /* Check binder_alloc is fully initialized */
-       if (!binder_alloc_get_vma(alloc)) {
-               binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
-                                  "%d: binder_alloc_buf, no vma\n",
-                                  alloc->pid);
-               return ERR_PTR(-ESRCH);
-       }
-
-       data_offsets_size = ALIGN(data_size, sizeof(void *)) +
-               ALIGN(offsets_size, sizeof(void *));
-
-       if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
-               binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
-                               "%d: got transaction with invalid size %zd-%zd\n",
-                               alloc->pid, data_size, offsets_size);
-               return ERR_PTR(-EINVAL);
-       }
-       size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
-       if (size < data_offsets_size || size < extra_buffers_size) {
-               binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
-                               "%d: got transaction with invalid extra_buffers_size %zd\n",
-                               alloc->pid, extra_buffers_size);
-               return ERR_PTR(-EINVAL);
-       }
-       if (is_async &&
-           alloc->free_async_space < size + sizeof(struct binder_buffer)) {
+       if (is_async && alloc->free_async_space < size) {
                binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
                             "%d: binder_alloc_buf size %zd failed, no async space left\n",
                              alloc->pid, size);
-               return ERR_PTR(-ENOSPC);
+               buffer = ERR_PTR(-ENOSPC);
+               goto out;
        }
 
-       /* Pad 0-size buffers so they get assigned unique addresses */
-       size = max(size, sizeof(void *));
-
        while (n) {
                buffer = rb_entry(n, struct binder_buffer, rb_node);
                BUG_ON(!buffer->free);
@@ -426,121 +462,92 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
                if (size < buffer_size) {
                        best_fit = n;
                        n = n->rb_left;
-               } else if (size > buffer_size)
+               } else if (size > buffer_size) {
                        n = n->rb_right;
-               else {
+               else {
                        best_fit = n;
                        break;
                }
        }
-       if (best_fit == NULL) {
-               size_t allocated_buffers = 0;
-               size_t largest_alloc_size = 0;
-               size_t total_alloc_size = 0;
-               size_t free_buffers = 0;
-               size_t largest_free_size = 0;
-               size_t total_free_size = 0;
-
-               for (n = rb_first(&alloc->allocated_buffers); n != NULL;
-                    n = rb_next(n)) {
-                       buffer = rb_entry(n, struct binder_buffer, rb_node);
-                       buffer_size = binder_alloc_buffer_size(alloc, buffer);
-                       allocated_buffers++;
-                       total_alloc_size += buffer_size;
-                       if (buffer_size > largest_alloc_size)
-                               largest_alloc_size = buffer_size;
-               }
-               for (n = rb_first(&alloc->free_buffers); n != NULL;
-                    n = rb_next(n)) {
-                       buffer = rb_entry(n, struct binder_buffer, rb_node);
-                       buffer_size = binder_alloc_buffer_size(alloc, buffer);
-                       free_buffers++;
-                       total_free_size += buffer_size;
-                       if (buffer_size > largest_free_size)
-                               largest_free_size = buffer_size;
-               }
+
+       if (unlikely(!best_fit)) {
                binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
                                   "%d: binder_alloc_buf size %zd failed, no address space\n",
                                   alloc->pid, size);
-               binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
-                                  "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
-                                  total_alloc_size, allocated_buffers,
-                                  largest_alloc_size, total_free_size,
-                                  free_buffers, largest_free_size);
-               return ERR_PTR(-ENOSPC);
+               debug_no_space_locked(alloc);
+               buffer = ERR_PTR(-ENOSPC);
+               goto out;
        }
-       if (n == NULL) {
+
+       if (buffer_size != size) {
+               /* Found an oversized buffer and needs to be split */
                buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
                buffer_size = binder_alloc_buffer_size(alloc, buffer);
+
+               WARN_ON(n || buffer_size == size);
+               new_buffer->user_data = buffer->user_data + size;
+               list_add(&new_buffer->entry, &buffer->entry);
+               new_buffer->free = 1;
+               binder_insert_free_buffer(alloc, new_buffer);
+               new_buffer = NULL;
        }
 
        binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
                     "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
                      alloc->pid, size, buffer, buffer_size);
 
-       has_page_addr = (void __user *)
-               (((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK);
-       WARN_ON(n && buffer_size != size);
-       end_page_addr =
-               (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size);
-       if (end_page_addr > has_page_addr)
-               end_page_addr = has_page_addr;
-       ret = binder_update_page_range(alloc, 1, (void __user *)
-               PAGE_ALIGN((uintptr_t)buffer->user_data), end_page_addr);
-       if (ret)
-               return ERR_PTR(ret);
-
-       if (buffer_size != size) {
-               struct binder_buffer *new_buffer;
-
-               new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
-               if (!new_buffer) {
-                       pr_err("%s: %d failed to alloc new buffer struct\n",
-                              __func__, alloc->pid);
-                       goto err_alloc_buf_struct_failed;
-               }
-               new_buffer->user_data = (u8 __user *)buffer->user_data + size;
-               list_add(&new_buffer->entry, &buffer->entry);
-               new_buffer->free = 1;
-               binder_insert_free_buffer(alloc, new_buffer);
-       }
+       /*
+        * Now we remove the pages from the freelist. A clever calculation
+        * with buffer_size determines if the last page is shared with an
+        * adjacent in-use buffer. In such case, the page has been already
+        * removed from the freelist so we trim our range short.
+        */
+       next_used_page = (buffer->user_data + buffer_size) & PAGE_MASK;
+       curr_last_page = PAGE_ALIGN(buffer->user_data + size);
+       binder_lru_freelist_del(alloc, PAGE_ALIGN(buffer->user_data),
+                               min(next_used_page, curr_last_page));
 
-       rb_erase(best_fit, &alloc->free_buffers);
+       rb_erase(&buffer->rb_node, &alloc->free_buffers);
        buffer->free = 0;
        buffer->allow_user_free = 0;
        binder_insert_allocated_buffer_locked(alloc, buffer);
-       binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
-                    "%d: binder_alloc_buf size %zd got %pK\n",
-                     alloc->pid, size, buffer);
-       buffer->data_size = data_size;
-       buffer->offsets_size = offsets_size;
        buffer->async_transaction = is_async;
-       buffer->extra_buffers_size = extra_buffers_size;
-       buffer->pid = pid;
        buffer->oneway_spam_suspect = false;
        if (is_async) {
-               alloc->free_async_space -= size + sizeof(struct binder_buffer);
+               alloc->free_async_space -= size;
                binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
                             "%d: binder_alloc_buf size %zd async free %zd\n",
                              alloc->pid, size, alloc->free_async_space);
-               if (alloc->free_async_space < alloc->buffer_size / 10) {
-                       /*
-                        * Start detecting spammers once we have less than 20%
-                        * of async space left (which is less than 10% of total
-                        * buffer size).
-                        */
-                       buffer->oneway_spam_suspect = debug_low_async_space_locked(alloc, pid);
-               } else {
-                       alloc->oneway_spam_detected = false;
-               }
+               if (debug_low_async_space_locked(alloc))
+                       buffer->oneway_spam_suspect = true;
        }
+
+out:
+       /* Discard possibly unused new_buffer */
+       kfree(new_buffer);
        return buffer;
+}
 
-err_alloc_buf_struct_failed:
-       binder_update_page_range(alloc, 0, (void __user *)
-                                PAGE_ALIGN((uintptr_t)buffer->user_data),
-                                end_page_addr);
-       return ERR_PTR(-ENOMEM);
+/* Calculate the sanitized total size, returns 0 for invalid request */
+static inline size_t sanitized_size(size_t data_size,
+                                   size_t offsets_size,
+                                   size_t extra_buffers_size)
+{
+       size_t total, tmp;
+
+       /* Align to pointer size and check for overflows */
+       tmp = ALIGN(data_size, sizeof(void *)) +
+               ALIGN(offsets_size, sizeof(void *));
+       if (tmp < data_size || tmp < offsets_size)
+               return 0;
+       total = tmp + ALIGN(extra_buffers_size, sizeof(void *));
+       if (total < tmp || total < extra_buffers_size)
+               return 0;
+
+       /* Pad 0-sized buffers so they get a unique address */
+       total = max(total, sizeof(void *));
+
+       return total;
 }
 
 /**
@@ -550,87 +557,101 @@ err_alloc_buf_struct_failed:
  * @offsets_size:       user specified buffer offset
  * @extra_buffers_size: size of extra space for meta-data (eg, security context)
  * @is_async:           buffer for async transaction
- * @pid:                               pid to attribute allocation to (used for debugging)
  *
  * Allocate a new buffer given the requested sizes. Returns
  * the kernel version of the buffer pointer. The size allocated
  * is the sum of the three given sizes (each rounded up to
  * pointer-sized boundary)
  *
- * Return:     The allocated buffer or %NULL if error
+ * Return:     The allocated buffer or %ERR_PTR(-errno) if error
  */
 struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
                                           size_t data_size,
                                           size_t offsets_size,
                                           size_t extra_buffers_size,
-                                          int is_async,
-                                          int pid)
+                                          int is_async)
 {
-       struct binder_buffer *buffer;
+       struct binder_buffer *buffer, *next;
+       size_t size;
+       int ret;
+
+       /* Check binder_alloc is fully initialized */
+       if (!binder_alloc_get_vma(alloc)) {
+               binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
+                                  "%d: binder_alloc_buf, no vma\n",
+                                  alloc->pid);
+               return ERR_PTR(-ESRCH);
+       }
+
+       size = sanitized_size(data_size, offsets_size, extra_buffers_size);
+       if (unlikely(!size)) {
+               binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+                                  "%d: got transaction with invalid size %zd-%zd-%zd\n",
+                                  alloc->pid, data_size, offsets_size,
+                                  extra_buffers_size);
+               return ERR_PTR(-EINVAL);
+       }
 
-       mutex_lock(&alloc->mutex);
-       buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
-                                            extra_buffers_size, is_async, pid);
-       mutex_unlock(&alloc->mutex);
+       /* Preallocate the next buffer */
+       next = kzalloc(sizeof(*next), GFP_KERNEL);
+       if (!next)
+               return ERR_PTR(-ENOMEM);
+
+       spin_lock(&alloc->lock);
+       buffer = binder_alloc_new_buf_locked(alloc, next, size, is_async);
+       if (IS_ERR(buffer)) {
+               spin_unlock(&alloc->lock);
+               goto out;
+       }
+
+       buffer->data_size = data_size;
+       buffer->offsets_size = offsets_size;
+       buffer->extra_buffers_size = extra_buffers_size;
+       buffer->pid = current->tgid;
+       spin_unlock(&alloc->lock);
+
+       ret = binder_install_buffer_pages(alloc, buffer, size);
+       if (ret) {
+               binder_alloc_free_buf(alloc, buffer);
+               buffer = ERR_PTR(ret);
+       }
+out:
        return buffer;
 }
 
-static void __user *buffer_start_page(struct binder_buffer *buffer)
+static unsigned long buffer_start_page(struct binder_buffer *buffer)
 {
-       return (void __user *)((uintptr_t)buffer->user_data & PAGE_MASK);
+       return buffer->user_data & PAGE_MASK;
 }
 
-static void __user *prev_buffer_end_page(struct binder_buffer *buffer)
+static unsigned long prev_buffer_end_page(struct binder_buffer *buffer)
 {
-       return (void __user *)
-               (((uintptr_t)(buffer->user_data) - 1) & PAGE_MASK);
+       return (buffer->user_data - 1) & PAGE_MASK;
 }
 
 static void binder_delete_free_buffer(struct binder_alloc *alloc,
                                      struct binder_buffer *buffer)
 {
-       struct binder_buffer *prev, *next = NULL;
-       bool to_free = true;
+       struct binder_buffer *prev, *next;
+
+       if (PAGE_ALIGNED(buffer->user_data))
+               goto skip_freelist;
 
        BUG_ON(alloc->buffers.next == &buffer->entry);
        prev = binder_buffer_prev(buffer);
        BUG_ON(!prev->free);
-       if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
-               to_free = false;
-               binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
-                                  "%d: merge free, buffer %pK share page with %pK\n",
-                                  alloc->pid, buffer->user_data,
-                                  prev->user_data);
-       }
+       if (prev_buffer_end_page(prev) == buffer_start_page(buffer))
+               goto skip_freelist;
 
        if (!list_is_last(&buffer->entry, &alloc->buffers)) {
                next = binder_buffer_next(buffer);
-               if (buffer_start_page(next) == buffer_start_page(buffer)) {
-                       to_free = false;
-                       binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
-                                          "%d: merge free, buffer %pK share page with %pK\n",
-                                          alloc->pid,
-                                          buffer->user_data,
-                                          next->user_data);
-               }
-       }
-
-       if (PAGE_ALIGNED(buffer->user_data)) {
-               binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
-                                  "%d: merge free, buffer start %pK is page aligned\n",
-                                  alloc->pid, buffer->user_data);
-               to_free = false;
+               if (buffer_start_page(next) == buffer_start_page(buffer))
+                       goto skip_freelist;
        }
 
-       if (to_free) {
-               binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
-                                  "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
-                                  alloc->pid, buffer->user_data,
-                                  prev->user_data,
-                                  next ? next->user_data : NULL);
-               binder_update_page_range(alloc, 0, buffer_start_page(buffer),
-                                        buffer_start_page(buffer) + PAGE_SIZE);
-       }
+       binder_lru_freelist_add(alloc, buffer_start_page(buffer),
+                               buffer_start_page(buffer) + PAGE_SIZE);
+skip_freelist:
        list_del(&buffer->entry);
        kfree(buffer);
 }
@@ -657,17 +678,14 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
        BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
 
        if (buffer->async_transaction) {
-               alloc->free_async_space += buffer_size + sizeof(struct binder_buffer);
-
+               alloc->free_async_space += buffer_size;
                binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
                             "%d: binder_free_buf size %zd async free %zd\n",
                              alloc->pid, size, alloc->free_async_space);
        }
 
-       binder_update_page_range(alloc, 0,
-               (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data),
-               (void __user *)(((uintptr_t)
-                         buffer->user_data + buffer_size) & PAGE_MASK));
+       binder_lru_freelist_add(alloc, PAGE_ALIGN(buffer->user_data),
+                               (buffer->user_data + buffer_size) & PAGE_MASK);
 
        rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
        buffer->free = 1;
@@ -691,8 +709,68 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
        binder_insert_free_buffer(alloc, buffer);
 }
 
+/**
+ * binder_alloc_get_page() - get kernel pointer for given buffer offset
+ * @alloc: binder_alloc for this proc
+ * @buffer: binder buffer to be accessed
+ * @buffer_offset: offset into @buffer data
+ * @pgoffp: address to copy final page offset to
+ *
+ * Lookup the struct page corresponding to the address
+ * at @buffer_offset into @buffer->user_data. If @pgoffp is not
+ * NULL, the byte-offset into the page is written there.
+ *
+ * The caller is responsible to ensure that the offset points
+ * to a valid address within the @buffer and that @buffer is
+ * not freeable by the user. Since it can't be freed, we are
+ * guaranteed that the corresponding elements of @alloc->pages[]
+ * cannot change.
+ *
+ * Return: struct page
+ */
+static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
+                                         struct binder_buffer *buffer,
+                                         binder_size_t buffer_offset,
+                                         pgoff_t *pgoffp)
+{
+       binder_size_t buffer_space_offset = buffer_offset +
+               (buffer->user_data - alloc->buffer);
+       pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
+       size_t index = buffer_space_offset >> PAGE_SHIFT;
+       struct binder_lru_page *lru_page;
+
+       lru_page = &alloc->pages[index];
+       *pgoffp = pgoff;
+       return lru_page->page_ptr;
+}
+
+/**
+ * binder_alloc_clear_buf() - zero out buffer
+ * @alloc: binder_alloc for this proc
+ * @buffer: binder buffer to be cleared
+ *
+ * memset the given buffer to 0
+ */
 static void binder_alloc_clear_buf(struct binder_alloc *alloc,
-                                  struct binder_buffer *buffer);
+                                  struct binder_buffer *buffer)
+{
+       size_t bytes = binder_alloc_buffer_size(alloc, buffer);
+       binder_size_t buffer_offset = 0;
+
+       while (bytes) {
+               unsigned long size;
+               struct page *page;
+               pgoff_t pgoff;
+
+               page = binder_alloc_get_page(alloc, buffer,
+                                            buffer_offset, &pgoff);
+               size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
+               memset_page(page, pgoff, 0, size);
+               bytes -= size;
+               buffer_offset += size;
+       }
+}
+
 /**
  * binder_alloc_free_buf() - free a binder buffer
  * @alloc:     binder_alloc for this proc
@@ -706,18 +784,18 @@ void binder_alloc_free_buf(struct binder_alloc *alloc,
        /*
         * We could eliminate the call to binder_alloc_clear_buf()
         * from binder_alloc_deferred_release() by moving this to
-        * binder_alloc_free_buf_locked(). However, that could
-        * increase contention for the alloc mutex if clear_on_free
-        * is used frequently for large buffers. The mutex is not
+        * binder_free_buf_locked(). However, that could
+        * increase contention for the alloc->lock if clear_on_free
+        * is used frequently for large buffers. This lock is not
         * needed for correctness here.
         */
        if (buffer->clear_on_free) {
                binder_alloc_clear_buf(alloc, buffer);
                buffer->clear_on_free = false;
        }
-       mutex_lock(&alloc->mutex);
+       spin_lock(&alloc->lock);
        binder_free_buf_locked(alloc, buffer);
-       mutex_unlock(&alloc->mutex);
+       spin_unlock(&alloc->lock);
 }
 
 /**
@@ -736,9 +814,9 @@ void binder_alloc_free_buf(struct binder_alloc *alloc,
 int binder_alloc_mmap_handler(struct binder_alloc *alloc,
                              struct vm_area_struct *vma)
 {
-       int ret;
-       const char *failure_string;
        struct binder_buffer *buffer;
+       const char *failure_string;
+       int ret, i;
 
        if (unlikely(vma->vm_mm != alloc->mm)) {
                ret = -EINVAL;
@@ -756,7 +834,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
                                   SZ_4M);
        mutex_unlock(&binder_alloc_mmap_lock);
 
-       alloc->buffer = (void __user *)vma->vm_start;
+       alloc->buffer = vma->vm_start;
 
        alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
                               sizeof(alloc->pages[0]),
@@ -767,6 +845,11 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
                goto err_alloc_pages_failed;
        }
 
+       for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+               alloc->pages[i].alloc = alloc;
+               INIT_LIST_HEAD(&alloc->pages[i].lru);
+       }
+
        buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
        if (!buffer) {
                ret = -ENOMEM;
@@ -789,7 +872,7 @@ err_alloc_buf_struct_failed:
        kfree(alloc->pages);
        alloc->pages = NULL;
 err_alloc_pages_failed:
-       alloc->buffer = NULL;
+       alloc->buffer = 0;
        mutex_lock(&binder_alloc_mmap_lock);
        alloc->buffer_size = 0;
 err_already_mapped:
@@ -810,7 +893,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
        struct binder_buffer *buffer;
 
        buffers = 0;
-       mutex_lock(&alloc->mutex);
+       spin_lock(&alloc->lock);
        BUG_ON(alloc->vma);
 
        while ((n = rb_first(&alloc->allocated_buffers))) {
@@ -842,25 +925,25 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
                int i;
 
                for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
-                       void __user *page_addr;
+                       unsigned long page_addr;
                        bool on_lru;
 
                        if (!alloc->pages[i].page_ptr)
                                continue;
 
-                       on_lru = list_lru_del_obj(&binder_alloc_lru,
-                                             &alloc->pages[i].lru);
+                       on_lru = list_lru_del_obj(&binder_freelist,
+                                                 &alloc->pages[i].lru);
                        page_addr = alloc->buffer + i * PAGE_SIZE;
                        binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
-                                    "%s: %d: page %d at %pK %s\n",
-                                    __func__, alloc->pid, i, page_addr,
+                                    "%s: %d: page %d %s\n",
+                                    __func__, alloc->pid, i,
                                     on_lru ? "on lru" : "active");
                        __free_page(alloc->pages[i].page_ptr);
                        page_count++;
                }
                kfree(alloc->pages);
        }
-       mutex_unlock(&alloc->mutex);
+       spin_unlock(&alloc->lock);
        if (alloc->mm)
                mmdrop(alloc->mm);
 
@@ -869,16 +952,6 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
                     __func__, alloc->pid, buffers, page_count);
 }
 
-static void print_binder_buffer(struct seq_file *m, const char *prefix,
-                               struct binder_buffer *buffer)
-{
-       seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
-                  prefix, buffer->debug_id, buffer->user_data,
-                  buffer->data_size, buffer->offsets_size,
-                  buffer->extra_buffers_size,
-                  buffer->transaction ? "active" : "delivered");
-}
-
 /**
  * binder_alloc_print_allocated() - print buffer info
  * @m:     seq_file for output via seq_printf()
@@ -890,13 +963,20 @@ static void print_binder_buffer(struct seq_file *m, const char *prefix,
 void binder_alloc_print_allocated(struct seq_file *m,
                                  struct binder_alloc *alloc)
 {
+       struct binder_buffer *buffer;
        struct rb_node *n;
 
-       mutex_lock(&alloc->mutex);
-       for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
-               print_binder_buffer(m, "  buffer",
-                                   rb_entry(n, struct binder_buffer, rb_node));
-       mutex_unlock(&alloc->mutex);
+       spin_lock(&alloc->lock);
+       for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
+               buffer = rb_entry(n, struct binder_buffer, rb_node);
+               seq_printf(m, "  buffer %d: %lx size %zd:%zd:%zd %s\n",
+                          buffer->debug_id,
+                          buffer->user_data - alloc->buffer,
+                          buffer->data_size, buffer->offsets_size,
+                          buffer->extra_buffers_size,
+                          buffer->transaction ? "active" : "delivered");
+       }
+       spin_unlock(&alloc->lock);
 }
 
 /**
@@ -913,7 +993,7 @@ void binder_alloc_print_pages(struct seq_file *m,
        int lru = 0;
        int free = 0;
 
-       mutex_lock(&alloc->mutex);
+       spin_lock(&alloc->lock);
        /*
         * Make sure the binder_alloc is fully initialized, otherwise we might
         * read inconsistent state.
@@ -929,7 +1009,7 @@ void binder_alloc_print_pages(struct seq_file *m,
                                lru++;
                }
        }
-       mutex_unlock(&alloc->mutex);
+       spin_unlock(&alloc->lock);
        seq_printf(m, "  pages: %d:%d:%d\n", active, lru, free);
        seq_printf(m, "  pages high watermark: %zu\n", alloc->pages_high);
 }
@@ -945,10 +1025,10 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
        struct rb_node *n;
        int count = 0;
 
-       mutex_lock(&alloc->mutex);
+       spin_lock(&alloc->lock);
        for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
                count++;
-       mutex_unlock(&alloc->mutex);
+       spin_unlock(&alloc->lock);
        return count;
 }
 
@@ -981,33 +1061,39 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
                                       void *cb_arg)
        __must_hold(lock)
 {
-       struct mm_struct *mm = NULL;
-       struct binder_lru_page *page = container_of(item,
-                                                   struct binder_lru_page,
-                                                   lru);
-       struct binder_alloc *alloc;
-       uintptr_t page_addr;
-       size_t index;
+       struct binder_lru_page *page = container_of(item, typeof(*page), lru);
+       struct binder_alloc *alloc = page->alloc;
+       struct mm_struct *mm = alloc->mm;
        struct vm_area_struct *vma;
+       struct page *page_to_free;
+       unsigned long page_addr;
+       size_t index;
 
-       alloc = page->alloc;
-       if (!mutex_trylock(&alloc->mutex))
-               goto err_get_alloc_mutex_failed;
-
+       if (!mmget_not_zero(mm))
+               goto err_mmget;
+       if (!mmap_read_trylock(mm))
+               goto err_mmap_read_lock_failed;
+       if (!spin_trylock(&alloc->lock))
+               goto err_get_alloc_lock_failed;
        if (!page->page_ptr)
                goto err_page_already_freed;
 
        index = page - alloc->pages;
-       page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
+       page_addr = alloc->buffer + index * PAGE_SIZE;
 
-       mm = alloc->mm;
-       if (!mmget_not_zero(mm))
-               goto err_mmget;
-       if (!mmap_read_trylock(mm))
-               goto err_mmap_read_lock_failed;
-       vma = binder_alloc_get_vma(alloc);
+       vma = vma_lookup(mm, page_addr);
+       if (vma && vma != binder_alloc_get_vma(alloc))
+               goto err_invalid_vma;
+
+       trace_binder_unmap_kernel_start(alloc, index);
+
+       page_to_free = page->page_ptr;
+       page->page_ptr = NULL;
+
+       trace_binder_unmap_kernel_end(alloc, index);
 
        list_lru_isolate(lru, item);
+       spin_unlock(&alloc->lock);
        spin_unlock(lock);
 
        if (vma) {
@@ -1017,39 +1103,35 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
 
                trace_binder_unmap_user_end(alloc, index);
        }
+
        mmap_read_unlock(mm);
        mmput_async(mm);
-
-       trace_binder_unmap_kernel_start(alloc, index);
-
-       __free_page(page->page_ptr);
-       page->page_ptr = NULL;
-
-       trace_binder_unmap_kernel_end(alloc, index);
+       __free_page(page_to_free);
 
        spin_lock(lock);
-       mutex_unlock(&alloc->mutex);
        return LRU_REMOVED_RETRY;
 
+err_invalid_vma:
+err_page_already_freed:
+       spin_unlock(&alloc->lock);
+err_get_alloc_lock_failed:
+       mmap_read_unlock(mm);
 err_mmap_read_lock_failed:
        mmput_async(mm);
 err_mmget:
-err_page_already_freed:
-       mutex_unlock(&alloc->mutex);
-err_get_alloc_mutex_failed:
        return LRU_SKIP;
 }
 
 static unsigned long
 binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
 {
-       return list_lru_count(&binder_alloc_lru);
+       return list_lru_count(&binder_freelist);
 }
 
 static unsigned long
 binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 {
-       return list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
+       return list_lru_walk(&binder_freelist, binder_alloc_free_page,
                            NULL, sc->nr_to_scan);
 }
 
@@ -1067,7 +1149,7 @@ void binder_alloc_init(struct binder_alloc *alloc)
        alloc->pid = current->group_leader->pid;
        alloc->mm = current->mm;
        mmgrab(alloc->mm);
-       mutex_init(&alloc->mutex);
+       spin_lock_init(&alloc->lock);
        INIT_LIST_HEAD(&alloc->buffers);
 }
 
@@ -1075,13 +1157,13 @@ int binder_alloc_shrinker_init(void)
 {
        int ret;
 
-       ret = list_lru_init(&binder_alloc_lru);
+       ret = list_lru_init(&binder_freelist);
        if (ret)
                return ret;
 
        binder_shrinker = shrinker_alloc(0, "android-binder");
        if (!binder_shrinker) {
-               list_lru_destroy(&binder_alloc_lru);
+               list_lru_destroy(&binder_freelist);
                return -ENOMEM;
        }
 
@@ -1096,7 +1178,7 @@ int binder_alloc_shrinker_init(void)
 void binder_alloc_shrinker_exit(void)
 {
        shrinker_free(binder_shrinker);
-       list_lru_destroy(&binder_alloc_lru);
+       list_lru_destroy(&binder_freelist);
 }
 
 /**
@@ -1131,68 +1213,6 @@ static inline bool check_buffer(struct binder_alloc *alloc,
                (!buffer->allow_user_free || !buffer->transaction);
 }
 
-/**
- * binder_alloc_get_page() - get kernel pointer for given buffer offset
- * @alloc: binder_alloc for this proc
- * @buffer: binder buffer to be accessed
- * @buffer_offset: offset into @buffer data
- * @pgoffp: address to copy final page offset to
- *
- * Lookup the struct page corresponding to the address
- * at @buffer_offset into @buffer->user_data. If @pgoffp is not
- * NULL, the byte-offset into the page is written there.
- *
- * The caller is responsible to ensure that the offset points
- * to a valid address within the @buffer and that @buffer is
- * not freeable by the user. Since it can't be freed, we are
- * guaranteed that the corresponding elements of @alloc->pages[]
- * cannot change.
- *
- * Return: struct page
- */
-static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
-                                         struct binder_buffer *buffer,
-                                         binder_size_t buffer_offset,
-                                         pgoff_t *pgoffp)
-{
-       binder_size_t buffer_space_offset = buffer_offset +
-               (buffer->user_data - alloc->buffer);
-       pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
-       size_t index = buffer_space_offset >> PAGE_SHIFT;
-       struct binder_lru_page *lru_page;
-
-       lru_page = &alloc->pages[index];
-       *pgoffp = pgoff;
-       return lru_page->page_ptr;
-}
-
-/**
- * binder_alloc_clear_buf() - zero out buffer
- * @alloc: binder_alloc for this proc
- * @buffer: binder buffer to be cleared
- *
- * memset the given buffer to 0
- */
-static void binder_alloc_clear_buf(struct binder_alloc *alloc,
-                                  struct binder_buffer *buffer)
-{
-       size_t bytes = binder_alloc_buffer_size(alloc, buffer);
-       binder_size_t buffer_offset = 0;
-
-       while (bytes) {
-               unsigned long size;
-               struct page *page;
-               pgoff_t pgoff;
-
-               page = binder_alloc_get_page(alloc, buffer,
-                                            buffer_offset, &pgoff);
-               size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
-               memset_page(page, pgoff, 0, size);
-               bytes -= size;
-               buffer_offset += size;
-       }
-}
-
 /**
  * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
  * @alloc: binder_alloc for this proc
index dc1e2b01dd64dbe7c7fd7decaadb8313c5b20904..70387234477e0cc9e22317d705bc5206f90a9f94 100644 (file)
@@ -9,13 +9,13 @@
 #include <linux/rbtree.h>
 #include <linux/list.h>
 #include <linux/mm.h>
-#include <linux/rtmutex.h>
+#include <linux/spinlock.h>
 #include <linux/vmalloc.h>
 #include <linux/slab.h>
 #include <linux/list_lru.h>
 #include <uapi/linux/android/binder.h>
 
-extern struct list_lru binder_alloc_lru;
+extern struct list_lru binder_freelist;
 struct binder_transaction;
 
 /**
@@ -49,21 +49,19 @@ struct binder_buffer {
        unsigned async_transaction:1;
        unsigned oneway_spam_suspect:1;
        unsigned debug_id:27;
-
        struct binder_transaction *transaction;
-
        struct binder_node *target_node;
        size_t data_size;
        size_t offsets_size;
        size_t extra_buffers_size;
-       void __user *user_data;
-       int    pid;
+       unsigned long user_data;
+       int pid;
 };
 
 /**
  * struct binder_lru_page - page object used for binder shrinker
  * @page_ptr: pointer to physical page in mmap'd space
- * @lru:      entry in binder_alloc_lru
+ * @lru:      entry in binder_freelist
  * @alloc:    binder_alloc for a proc
  */
 struct binder_lru_page {
@@ -74,7 +72,7 @@ struct binder_lru_page {
 
 /**
  * struct binder_alloc - per-binder proc state for binder allocator
- * @mutex:              protects binder_alloc fields
+ * @lock:               protects binder_alloc fields
  * @vma:                vm_area_struct passed to mmap_handler
  *                      (invariant after mmap)
  * @mm:                 copy of task->mm (invariant after open)
@@ -98,10 +96,10 @@ struct binder_lru_page {
  * struct binder_buffer objects used to track the user buffers
  */
 struct binder_alloc {
-       struct mutex mutex;
+       spinlock_t lock;
        struct vm_area_struct *vma;
        struct mm_struct *mm;
-       void __user *buffer;
+       unsigned long buffer;
        struct list_head buffers;
        struct rb_root free_buffers;
        struct rb_root allocated_buffers;
@@ -121,27 +119,26 @@ static inline void binder_selftest_alloc(struct binder_alloc *alloc) {}
 enum lru_status binder_alloc_free_page(struct list_head *item,
                                       struct list_lru_one *lru,
                                       spinlock_t *lock, void *cb_arg);
-extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
-                                                 size_t data_size,
-                                                 size_t offsets_size,
-                                                 size_t extra_buffers_size,
-                                                 int is_async,
-                                                 int pid);
-extern void binder_alloc_init(struct binder_alloc *alloc);
-extern int binder_alloc_shrinker_init(void);
-extern void binder_alloc_shrinker_exit(void);
-extern void binder_alloc_vma_close(struct binder_alloc *alloc);
-extern struct binder_buffer *
+struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
+                                          size_t data_size,
+                                          size_t offsets_size,
+                                          size_t extra_buffers_size,
+                                          int is_async);
+void binder_alloc_init(struct binder_alloc *alloc);
+int binder_alloc_shrinker_init(void);
+void binder_alloc_shrinker_exit(void);
+void binder_alloc_vma_close(struct binder_alloc *alloc);
+struct binder_buffer *
 binder_alloc_prepare_to_free(struct binder_alloc *alloc,
-                            uintptr_t user_ptr);
-extern void binder_alloc_free_buf(struct binder_alloc *alloc,
-                                 struct binder_buffer *buffer);
-extern int binder_alloc_mmap_handler(struct binder_alloc *alloc,
-                                    struct vm_area_struct *vma);
-extern void binder_alloc_deferred_release(struct binder_alloc *alloc);
-extern int binder_alloc_get_allocated_count(struct binder_alloc *alloc);
-extern void binder_alloc_print_allocated(struct seq_file *m,
-                                        struct binder_alloc *alloc);
+                            unsigned long user_ptr);
+void binder_alloc_free_buf(struct binder_alloc *alloc,
+                          struct binder_buffer *buffer);
+int binder_alloc_mmap_handler(struct binder_alloc *alloc,
+                             struct vm_area_struct *vma);
+void binder_alloc_deferred_release(struct binder_alloc *alloc);
+int binder_alloc_get_allocated_count(struct binder_alloc *alloc);
+void binder_alloc_print_allocated(struct seq_file *m,
+                                 struct binder_alloc *alloc);
 void binder_alloc_print_pages(struct seq_file *m,
                              struct binder_alloc *alloc);
 
@@ -156,9 +153,9 @@ binder_alloc_get_free_async_space(struct binder_alloc *alloc)
 {
        size_t free_async_space;
 
-       mutex_lock(&alloc->mutex);
+       spin_lock(&alloc->lock);
        free_async_space = alloc->free_async_space;
-       mutex_unlock(&alloc->mutex);
+       spin_unlock(&alloc->lock);
        return free_async_space;
 }
 
index c2b323bc3b3a53043fa5783fb4f809de93b24644..81442fe20a695ffd1fd562f2e1e8b640a36fca5c 100644 (file)
@@ -72,6 +72,10 @@ enum buf_end_align_type {
         * buf1 ]|[ buf2 | buf2 | buf2 ][ ...
         */
        NEXT_NEXT_UNALIGNED,
+       /**
+        * @LOOP_END: The number of enum values in &buf_end_align_type.
+        * It is used for controlling loop termination.
+        */
        LOOP_END,
 };
 
@@ -93,11 +97,11 @@ static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
                                         struct binder_buffer *buffer,
                                         size_t size)
 {
-       void __user *page_addr;
-       void __user *end;
+       unsigned long page_addr;
+       unsigned long end;
        int page_index;
 
-       end = (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size);
+       end = PAGE_ALIGN(buffer->user_data + size);
        page_addr = buffer->user_data;
        for (; page_addr < end; page_addr += PAGE_SIZE) {
                page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
@@ -119,7 +123,7 @@ static void binder_selftest_alloc_buf(struct binder_alloc *alloc,
        int i;
 
        for (i = 0; i < BUFFER_NUM; i++) {
-               buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0, 0);
+               buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0);
                if (IS_ERR(buffers[i]) ||
                    !check_buffer_pages_allocated(alloc, buffers[i],
                                                  sizes[i])) {
@@ -158,8 +162,8 @@ static void binder_selftest_free_page(struct binder_alloc *alloc)
        int i;
        unsigned long count;
 
-       while ((count = list_lru_count(&binder_alloc_lru))) {
-               list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
+       while ((count = list_lru_count(&binder_freelist))) {
+               list_lru_walk(&binder_freelist, binder_alloc_free_page,
                              NULL, count);
        }
 
@@ -183,7 +187,7 @@ static void binder_selftest_alloc_free(struct binder_alloc *alloc,
 
        /* Allocate from lru. */
        binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
-       if (list_lru_count(&binder_alloc_lru))
+       if (list_lru_count(&binder_freelist))
                pr_err("lru list should be empty but is not\n");
 
        binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
index 8cc07e6a4273e7f4ed888bd19798af0aa1aa2535..fe38c6fc65d0f89ab7ab6e709fd93e20114034cf 100644 (file)
@@ -317,7 +317,7 @@ DEFINE_EVENT(binder_buffer_class, binder_transaction_update_buffer_release,
 
 TRACE_EVENT(binder_update_page_range,
        TP_PROTO(struct binder_alloc *alloc, bool allocate,
-                void __user *start, void __user *end),
+                unsigned long start, unsigned long end),
        TP_ARGS(alloc, allocate, start, end),
        TP_STRUCT__entry(
                __field(int, proc)
index 1224ab7aa0708b336ec3c9c37b53b12e7ada6e95..3001d754ac369ebdff6d994c9dabe03eb60cccf5 100644 (file)
@@ -29,7 +29,6 @@
 #include <linux/uaccess.h>
 #include <linux/user_namespace.h>
 #include <linux/xarray.h>
-#include <uapi/asm-generic/errno-base.h>
 #include <uapi/linux/android/binder.h>
 #include <uapi/linux/android/binderfs.h>
 
index 3a5f3255f51b39cc4a5b65554e7d55eed8ea2c57..da2e74fce2d995a932914876b44b3fb5d4275d2e 100644 (file)
@@ -48,6 +48,7 @@ enum {
 enum board_ids {
        /* board IDs by feature in alphabetical order */
        board_ahci,
+       board_ahci_43bit_dma,
        board_ahci_ign_iferr,
        board_ahci_low_power,
        board_ahci_no_debounce_delay,
@@ -128,6 +129,13 @@ static const struct ata_port_info ahci_port_info[] = {
                .udma_mask      = ATA_UDMA6,
                .port_ops       = &ahci_ops,
        },
+       [board_ahci_43bit_dma] = {
+               AHCI_HFLAGS     (AHCI_HFLAG_43BIT_ONLY),
+               .flags          = AHCI_FLAG_COMMON,
+               .pio_mask       = ATA_PIO4,
+               .udma_mask      = ATA_UDMA6,
+               .port_ops       = &ahci_ops,
+       },
        [board_ahci_ign_iferr] = {
                AHCI_HFLAGS     (AHCI_HFLAG_IGN_IRQ_IF_ERR),
                .flags          = AHCI_FLAG_COMMON,
@@ -597,14 +605,14 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci },   /* PDC42819 */
        { PCI_VDEVICE(PROMISE, 0x3781), board_ahci },   /* FastTrak TX8660 ahci-mode */
 
-       /* Asmedia */
-       { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci },   /* ASM1060 */
-       { PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci },   /* ASM1060 */
-       { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci },   /* ASM1061 */
-       { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci },   /* ASM1062 */
-       { PCI_VDEVICE(ASMEDIA, 0x0621), board_ahci },   /* ASM1061R */
-       { PCI_VDEVICE(ASMEDIA, 0x0622), board_ahci },   /* ASM1062R */
-       { PCI_VDEVICE(ASMEDIA, 0x0624), board_ahci },   /* ASM1062+JMB575 */
+       /* ASMedia */
+       { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci_43bit_dma }, /* ASM1060 */
+       { PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci_43bit_dma }, /* ASM1060 */
+       { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci_43bit_dma }, /* ASM1061 */
+       { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci_43bit_dma }, /* ASM1061/1062 */
+       { PCI_VDEVICE(ASMEDIA, 0x0621), board_ahci_43bit_dma }, /* ASM1061R */
+       { PCI_VDEVICE(ASMEDIA, 0x0622), board_ahci_43bit_dma }, /* ASM1062R */
+       { PCI_VDEVICE(ASMEDIA, 0x0624), board_ahci_43bit_dma }, /* ASM1062+JMB575 */
        { PCI_VDEVICE(ASMEDIA, 0x1062), board_ahci },   /* ASM1062A */
        { PCI_VDEVICE(ASMEDIA, 0x1064), board_ahci },   /* ASM1064 */
        { PCI_VDEVICE(ASMEDIA, 0x1164), board_ahci },   /* ASM1164 */
@@ -663,6 +671,11 @@ MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets");
 static void ahci_pci_save_initial_config(struct pci_dev *pdev,
                                         struct ahci_host_priv *hpriv)
 {
+       if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == 0x1166) {
+               dev_info(&pdev->dev, "ASM1166 has only six ports\n");
+               hpriv->saved_port_map = 0x3f;
+       }
+
        if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) {
                dev_info(&pdev->dev, "JMB361 has only one port\n");
                hpriv->saved_port_map = 1;
@@ -949,11 +962,20 @@ static int ahci_pci_device_resume(struct device *dev)
 
 #endif /* CONFIG_PM */
 
-static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
+static int ahci_configure_dma_masks(struct pci_dev *pdev,
+                                   struct ahci_host_priv *hpriv)
 {
-       const int dma_bits = using_dac ? 64 : 32;
+       int dma_bits;
        int rc;
 
+       if (hpriv->cap & HOST_CAP_64) {
+               dma_bits = 64;
+               if (hpriv->flags & AHCI_HFLAG_43BIT_ONLY)
+                       dma_bits = 43;
+       } else {
+               dma_bits = 32;
+       }
+
        /*
         * If the device fixup already set the dma_mask to some non-standard
         * value, don't extend it here. This happens on STA2X11, for example.
@@ -1926,7 +1948,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        ahci_gtf_filter_workaround(host);
 
        /* initialize adapter */
-       rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
+       rc = ahci_configure_dma_masks(pdev, hpriv);
        if (rc)
                return rc;
 
index 4bae95b06ae3c953de7a567c35f9b46dd9e3083f..df8f8a1a3a34c3ee26d0d2b899522a82d220b6c2 100644 (file)
@@ -247,6 +247,7 @@ enum {
        AHCI_HFLAG_SUSPEND_PHYS         = BIT(26), /* handle PHYs during
                                                      suspend/resume */
        AHCI_HFLAG_NO_SXS               = BIT(28), /* SXS not supported */
+       AHCI_HFLAG_43BIT_ONLY           = BIT(29), /* 43bit DMA addr limit */
 
        /* ap->flags bits */
 
index b6656c287175c7653324758ccb5422dc36168e3d..0fb1934875f2084a753216cf54ff443aa601361b 100644 (file)
@@ -784,7 +784,7 @@ bool sata_lpm_ignore_phy_events(struct ata_link *link)
 EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
 
 static const char *ata_lpm_policy_names[] = {
-       [ATA_LPM_UNKNOWN]               = "max_performance",
+       [ATA_LPM_UNKNOWN]               = "keep_firmware_settings",
        [ATA_LPM_MAX_POWER]             = "max_performance",
        [ATA_LPM_MED_POWER]             = "medium_power",
        [ATA_LPM_MED_POWER_WITH_DIPM]   = "med_power_with_dipm",
index e327a0229dc173442b2789a402a8ea0adb931cdd..e7f713cd70d3fd7a413c2568b8dca8f8cc8ba2c4 100644 (file)
@@ -2930,6 +2930,8 @@ open_card_ubr0(struct idt77252_dev *card)
        vc->scq = alloc_scq(card, vc->class);
        if (!vc->scq) {
                printk("%s: can't get SCQ.\n", card->name);
+               kfree(card->vcs[0]);
+               card->vcs[0] = NULL;
                return -ENOMEM;
        }
 
index 5aaa0865625d0d10fd02d79b6721c2f6b2863dfe..018ac202de345e9a97bc7198385c1d95d460eb28 100644 (file)
@@ -219,20 +219,34 @@ static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn);
 
 static DEVICE_ATTR_RO(cpu_capacity);
 
-static int register_cpu_capacity_sysctl(void)
+static int cpu_capacity_sysctl_add(unsigned int cpu)
 {
-       int i;
-       struct device *cpu;
+       struct device *cpu_dev = get_cpu_device(cpu);
 
-       for_each_possible_cpu(i) {
-               cpu = get_cpu_device(i);
-               if (!cpu) {
-                       pr_err("%s: too early to get CPU%d device!\n",
-                              __func__, i);
-                       continue;
-               }
-               device_create_file(cpu, &dev_attr_cpu_capacity);
-       }
+       if (!cpu_dev)
+               return -ENOENT;
+
+       device_create_file(cpu_dev, &dev_attr_cpu_capacity);
+
+       return 0;
+}
+
+static int cpu_capacity_sysctl_remove(unsigned int cpu)
+{
+       struct device *cpu_dev = get_cpu_device(cpu);
+
+       if (!cpu_dev)
+               return -ENOENT;
+
+       device_remove_file(cpu_dev, &dev_attr_cpu_capacity);
+
+       return 0;
+}
+
+static int register_cpu_capacity_sysctl(void)
+{
+       cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "topology/cpu-capacity",
+                         cpu_capacity_sysctl_add, cpu_capacity_sysctl_remove);
 
        return 0;
 }
index 4d4c2c8d26c4808aa61c13ef2c3357e2bebf8564..d3a2c40c2f1272ca59ef75aa978e843dc5305ccf 100644 (file)
@@ -244,7 +244,7 @@ static void auxiliary_bus_shutdown(struct device *dev)
                auxdrv->shutdown(auxdev);
 }
 
-static struct bus_type auxiliary_bus_type = {
+static const struct bus_type auxiliary_bus_type = {
        .name = "auxiliary",
        .probe = auxiliary_bus_probe,
        .remove = auxiliary_bus_remove,
index 84a21084d67d16ddfb73ede50715b3a881b5f96a..daee55c9b2d9e1476bf34b13de20b022eb680d03 100644 (file)
@@ -1030,7 +1030,7 @@ static void device_insertion_sort_klist(struct device *a, struct list_head *list
        list_move_tail(&a->p->knode_bus.n_node, list);
 }
 
-void bus_sort_breadthfirst(struct bus_type *bus,
+void bus_sort_breadthfirst(const struct bus_type *bus,
                           int (*compare)(const struct device *a,
                                          const struct device *b))
 {
@@ -1194,7 +1194,7 @@ static void system_root_device_release(struct device *dev)
        kfree(dev);
 }
 
-static int subsys_register(struct bus_type *subsys,
+static int subsys_register(const struct bus_type *subsys,
                           const struct attribute_group **groups,
                           struct kobject *parent_of_root)
 {
@@ -1264,7 +1264,7 @@ err_sp:
  * directory itself and not some create fake root-device placed in
  * /sys/devices/system/<name>.
  */
-int subsys_system_register(struct bus_type *subsys,
+int subsys_system_register(const struct bus_type *subsys,
                           const struct attribute_group **groups)
 {
        return subsys_register(subsys, groups, &system_kset->kobj);
@@ -1282,7 +1282,7 @@ EXPORT_SYMBOL_GPL(subsys_system_register);
  * There's no restriction on device naming.  This is for kernel software
  * constructs which need sysfs interface.
  */
-int subsys_virtual_register(struct bus_type *subsys,
+int subsys_virtual_register(const struct bus_type *subsys,
                            const struct attribute_group **groups)
 {
        struct kobject *virtual_dir;
index 7e78aee0fd6c397a5c121e8365735e35664925d2..7b38fdf8e1d78e95bf8f44ab4cf1cc48f317ec4f 100644 (file)
@@ -213,6 +213,7 @@ int class_register(const struct class *cls)
        return 0;
 
 err_out:
+       lockdep_unregister_key(key);
        kfree(cp);
        return error;
 }
index 1ba42d2d353223e683be57bad5154df07abda10e..f40588ebc3f557216055af8f26196405c471e8b1 100644 (file)
@@ -24,7 +24,7 @@ static int container_offline(struct device *dev)
        return cdev->offline ? cdev->offline(cdev) : 0;
 }
 
-struct bus_type container_subsys = {
+const struct bus_type container_subsys = {
        .name = CONTAINER_BUS_NAME,
        .dev_name = CONTAINER_BUS_NAME,
        .online = trivial_online,
index 67ba592afc7776420995c8730af8200ad12e3e90..14d46af40f9a15e185230eecf3bbac6ec94728ef 100644 (file)
@@ -298,7 +298,7 @@ static inline bool device_link_flag_is_sync_state_only(u32 flags)
  * Check if @target depends on @dev or any device dependent on it (its child or
  * its consumer etc).  Return 1 if that is the case or 0 otherwise.
  */
-int device_is_dependent(struct device *dev, void *target)
+static int device_is_dependent(struct device *dev, void *target)
 {
        struct device_link *link;
        int ret;
@@ -1641,7 +1641,7 @@ static void device_links_purge(struct device *dev)
 #define FW_DEVLINK_FLAGS_RPM           (FW_DEVLINK_FLAGS_ON | \
                                         DL_FLAG_PM_RUNTIME)
 
-static u32 fw_devlink_flags = FW_DEVLINK_FLAGS_ON;
+static u32 fw_devlink_flags = FW_DEVLINK_FLAGS_RPM;
 static int __init fw_devlink_setup(char *arg)
 {
        if (!arg)
@@ -4944,13 +4944,14 @@ define_dev_printk_level(_dev_info, KERN_INFO);
  *
  *     return dev_err_probe(dev, err, ...);
  *
- * Note that it is deemed acceptable to use this function for error
- * prints during probe even if the @err is known to never be -EPROBE_DEFER.
+ * Using this helper in your probe function is totally fine even if @err is
+ * known to never be -EPROBE_DEFER.
  * The benefit compared to a normal dev_err() is the standardized format
- * of the error code and the fact that the error code is returned.
+ * of the error code, it being emitted symbolically (i.e. you get "EAGAIN"
+ * instead of "-35") and the fact that the error code is returned which allows
+ * more compact error paths.
  *
  * Returns @err.
- *
  */
 int dev_err_probe(const struct device *dev, int err, const char *fmt, ...)
 {
index 548491de818ef126f598eba06287bf7899418398..47de0f140ba65e0b9cfbff8574a8adeec69e471d 100644 (file)
@@ -525,19 +525,42 @@ bool cpu_is_hotpluggable(unsigned int cpu)
 EXPORT_SYMBOL_GPL(cpu_is_hotpluggable);
 
 #ifdef CONFIG_GENERIC_CPU_DEVICES
-static DEFINE_PER_CPU(struct cpu, cpu_devices);
-#endif
+DEFINE_PER_CPU(struct cpu, cpu_devices);
+
+bool __weak arch_cpu_is_hotpluggable(int cpu)
+{
+       return false;
+}
+
+int __weak arch_register_cpu(int cpu)
+{
+       struct cpu *c = &per_cpu(cpu_devices, cpu);
+
+       c->hotpluggable = arch_cpu_is_hotpluggable(cpu);
+
+       return register_cpu(c, cpu);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+void __weak arch_unregister_cpu(int num)
+{
+       unregister_cpu(&per_cpu(cpu_devices, num));
+}
+#endif /* CONFIG_HOTPLUG_CPU */
+#endif /* CONFIG_GENERIC_CPU_DEVICES */
 
 static void __init cpu_dev_register_generic(void)
 {
-#ifdef CONFIG_GENERIC_CPU_DEVICES
-       int i;
+       int i, ret;
 
-       for_each_possible_cpu(i) {
-               if (register_cpu(&per_cpu(cpu_devices, i), i))
-                       panic("Failed to register CPU device");
+       if (!IS_ENABLED(CONFIG_GENERIC_CPU_DEVICES))
+               return;
+
+       for_each_present_cpu(i) {
+               ret = arch_register_cpu(i);
+               if (ret)
+                       pr_warn("register_cpu %d failed (%d)\n", i, ret);
        }
-#endif
 }
 
 #ifdef CONFIG_GENERIC_CPU_VULNERABILITIES
index 0c3725c3eefa46d80140f5278f65405262731745..85152537dbf12d005236cf18c721c51f62a5ebd8 100644 (file)
@@ -313,7 +313,7 @@ static void deferred_probe_timeout_work_func(struct work_struct *work)
 
        mutex_lock(&deferred_probe_mutex);
        list_for_each_entry(p, &deferred_probe_pending_list, deferred_probe)
-               dev_info(p->device, "deferred probe pending\n");
+               dev_info(p->device, "deferred probe pending: %s", p->deferred_probe_reason ?: "(reason unknown)\n");
        mutex_unlock(&deferred_probe_mutex);
 
        fw_devlink_probing_done();
index 397eb9880cecb8ed39914880bf64e879a8c94b32..c4954835128cfedaef237fd356ce371115673b5d 100644 (file)
@@ -35,8 +35,8 @@ void __init driver_init(void)
        of_core_init();
        platform_bus_init();
        auxiliary_bus_init();
-       cpu_dev_init();
        memory_dev_init();
        node_dev_init();
+       cpu_dev_init();
        container_dev_init();
 }
index 675ad3139224623bae18d17c7729b4e4b9c2d14e..e23d0b49a7934507850c5df37608559adee491a4 100644 (file)
@@ -82,7 +82,7 @@ static int isa_bus_resume(struct device *dev)
        return 0;
 }
 
-static struct bus_type isa_bus_type = {
+static const struct bus_type isa_bus_type = {
        .name           = "isa",
        .match          = isa_bus_match,
        .probe          = isa_bus_probe,
index 8a13babd826ce3c96a7f73bf7a7e723179e047b1..14f964a7719bd046999f28dcb4c28abc35d0b725 100644 (file)
@@ -68,7 +68,7 @@ static inline unsigned long phys_to_block_id(unsigned long phys)
 static int memory_subsys_online(struct device *dev);
 static int memory_subsys_offline(struct device *dev);
 
-static struct bus_type memory_subsys = {
+static const struct bus_type memory_subsys = {
        .name = MEMORY_CLASS_NAME,
        .dev_name = MEMORY_CLASS_NAME,
        .online = memory_subsys_online,
index 493d533f8375560a1503167363b42480b4d2b897..1c05640461dd1679755c3811b4677e152bb8d875 100644 (file)
@@ -21,7 +21,7 @@
 #include <linux/swap.h>
 #include <linux/slab.h>
 
-static struct bus_type node_subsys = {
+static const struct bus_type node_subsys = {
        .name = "node",
        .dev_name = "node",
 };
@@ -74,14 +74,14 @@ static BIN_ATTR_RO(cpulist, CPULIST_FILE_MAX_BYTES);
  * @dev:       Device for this memory access class
  * @list_node: List element in the node's access list
  * @access:    The access class rank
- * @hmem_attrs: Heterogeneous memory performance attributes
+ * @coord:     Heterogeneous memory performance coordinates
  */
 struct node_access_nodes {
        struct device           dev;
        struct list_head        list_node;
        unsigned int            access;
 #ifdef CONFIG_HMEM_REPORTING
-       struct node_hmem_attrs  hmem_attrs;
+       struct access_coordinate        coord;
 #endif
 };
 #define to_access_nodes(dev) container_of(dev, struct node_access_nodes, dev)
@@ -167,7 +167,7 @@ static ssize_t property##_show(struct device *dev,                  \
                           char *buf)                                   \
 {                                                                      \
        return sysfs_emit(buf, "%u\n",                                  \
-                         to_access_nodes(dev)->hmem_attrs.property);   \
+                         to_access_nodes(dev)->coord.property);        \
 }                                                                      \
 static DEVICE_ATTR_RO(property)
 
@@ -187,10 +187,10 @@ static struct attribute *access_attrs[] = {
 /**
  * node_set_perf_attrs - Set the performance values for given access class
  * @nid: Node identifier to be set
- * @hmem_attrs: Heterogeneous memory performance attributes
+ * @coord: Heterogeneous memory performance coordinates
  * @access: The access class the for the given attributes
  */
-void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs,
+void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord,
                         unsigned int access)
 {
        struct node_access_nodes *c;
@@ -205,7 +205,7 @@ void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs,
        if (!c)
                return;
 
-       c->hmem_attrs = *hmem_attrs;
+       c->coord = *coord;
        for (i = 0; access_attrs[i] != NULL; i++) {
                if (sysfs_add_file_to_group(&c->dev.kobj, access_attrs[i],
                                            "initiators")) {
@@ -868,11 +868,15 @@ int __register_one_node(int nid)
 {
        int error;
        int cpu;
+       struct node *node;
 
-       node_devices[nid] = kzalloc(sizeof(struct node), GFP_KERNEL);
-       if (!node_devices[nid])
+       node = kzalloc(sizeof(struct node), GFP_KERNEL);
+       if (!node)
                return -ENOMEM;
 
+       INIT_LIST_HEAD(&node->access_list);
+       node_devices[nid] = node;
+
        error = register_node(node_devices[nid], nid);
 
        /* link cpu under this node */
@@ -881,7 +885,6 @@ int __register_one_node(int nid)
                        register_cpu_under_node(cpu, nid);
        }
 
-       INIT_LIST_HEAD(&node_devices[nid]->access_list);
        node_init_caches(nid);
 
        return error;
index 4110c19c08dcd7fc29fd0844afc50ae865b70d3d..e18ba676cdf645448477cbabe71605e3ceaa9142 100644 (file)
@@ -793,7 +793,7 @@ static int pm_clk_notify(struct notifier_block *nb,
  * the remaining members of @clknb should be populated prior to calling this
  * routine.
  */
-void pm_clk_add_notifier(struct bus_type *bus,
+void pm_clk_add_notifier(const struct bus_type *bus,
                                 struct pm_clk_notifier_block *clknb)
 {
        if (!bus || !clknb)
index 9c5a5f4dba5a6e1a71b3e310b5ac9f8bc7a509a6..fadcd0379dc2db7e46fe973ab4429dd659b31d63 100644 (file)
@@ -579,7 +579,7 @@ bool dev_pm_skip_resume(struct device *dev)
 }
 
 /**
- * __device_resume_noirq - Execute a "noirq resume" callback for given device.
+ * device_resume_noirq - Execute a "noirq resume" callback for given device.
  * @dev: Device to handle.
  * @state: PM transition of the system being carried out.
  * @async: If true, the device is being resumed asynchronously.
@@ -587,7 +587,7 @@ bool dev_pm_skip_resume(struct device *dev)
  * The driver of @dev will not receive interrupts while this function is being
  * executed.
  */
-static void __device_resume_noirq(struct device *dev, pm_message_t state, bool async)
+static void device_resume_noirq(struct device *dev, pm_message_t state, bool async)
 {
        pm_callback_t callback = NULL;
        const char *info = NULL;
@@ -674,16 +674,22 @@ static bool dpm_async_fn(struct device *dev, async_func_t func)
 {
        reinit_completion(&dev->power.completion);
 
-       if (!is_async(dev))
-               return false;
-
-       get_device(dev);
+       if (is_async(dev)) {
+               dev->power.async_in_progress = true;
 
-       if (async_schedule_dev_nocall(func, dev))
-               return true;
+               get_device(dev);
 
-       put_device(dev);
+               if (async_schedule_dev_nocall(func, dev))
+                       return true;
 
+               put_device(dev);
+       }
+       /*
+        * Because async_schedule_dev_nocall() above has returned false or it
+        * has not been called at all, func() is not running and it is safe to
+        * update the async_in_progress flag without extra synchronization.
+        */
+       dev->power.async_in_progress = false;
        return false;
 }
 
@@ -691,18 +697,10 @@ static void async_resume_noirq(void *data, async_cookie_t cookie)
 {
        struct device *dev = data;
 
-       __device_resume_noirq(dev, pm_transition, true);
+       device_resume_noirq(dev, pm_transition, true);
        put_device(dev);
 }
 
-static void device_resume_noirq(struct device *dev)
-{
-       if (dpm_async_fn(dev, async_resume_noirq))
-               return;
-
-       __device_resume_noirq(dev, pm_transition, false);
-}
-
 static void dpm_noirq_resume_devices(pm_message_t state)
 {
        struct device *dev;
@@ -712,18 +710,28 @@ static void dpm_noirq_resume_devices(pm_message_t state)
        mutex_lock(&dpm_list_mtx);
        pm_transition = state;
 
+       /*
+        * Trigger the resume of "async" devices upfront so they don't have to
+        * wait for the "non-async" ones they don't depend on.
+        */
+       list_for_each_entry(dev, &dpm_noirq_list, power.entry)
+               dpm_async_fn(dev, async_resume_noirq);
+
        while (!list_empty(&dpm_noirq_list)) {
                dev = to_device(dpm_noirq_list.next);
-               get_device(dev);
                list_move_tail(&dev->power.entry, &dpm_late_early_list);
 
-               mutex_unlock(&dpm_list_mtx);
+               if (!dev->power.async_in_progress) {
+                       get_device(dev);
 
-               device_resume_noirq(dev);
+                       mutex_unlock(&dpm_list_mtx);
 
-               put_device(dev);
+                       device_resume_noirq(dev, state, false);
 
-               mutex_lock(&dpm_list_mtx);
+                       put_device(dev);
+
+                       mutex_lock(&dpm_list_mtx);
+               }
        }
        mutex_unlock(&dpm_list_mtx);
        async_synchronize_full();
@@ -747,14 +755,14 @@ void dpm_resume_noirq(pm_message_t state)
 }
 
 /**
- * __device_resume_early - Execute an "early resume" callback for given device.
+ * device_resume_early - Execute an "early resume" callback for given device.
  * @dev: Device to handle.
  * @state: PM transition of the system being carried out.
  * @async: If true, the device is being resumed asynchronously.
  *
  * Runtime PM is disabled for @dev while this function is being executed.
  */
-static void __device_resume_early(struct device *dev, pm_message_t state, bool async)
+static void device_resume_early(struct device *dev, pm_message_t state, bool async)
 {
        pm_callback_t callback = NULL;
        const char *info = NULL;
@@ -820,18 +828,10 @@ static void async_resume_early(void *data, async_cookie_t cookie)
 {
        struct device *dev = data;
 
-       __device_resume_early(dev, pm_transition, true);
+       device_resume_early(dev, pm_transition, true);
        put_device(dev);
 }
 
-static void device_resume_early(struct device *dev)
-{
-       if (dpm_async_fn(dev, async_resume_early))
-               return;
-
-       __device_resume_early(dev, pm_transition, false);
-}
-
 /**
  * dpm_resume_early - Execute "early resume" callbacks for all devices.
  * @state: PM transition of the system being carried out.
@@ -845,18 +845,28 @@ void dpm_resume_early(pm_message_t state)
        mutex_lock(&dpm_list_mtx);
        pm_transition = state;
 
+       /*
+        * Trigger the resume of "async" devices upfront so they don't have to
+        * wait for the "non-async" ones they don't depend on.
+        */
+       list_for_each_entry(dev, &dpm_late_early_list, power.entry)
+               dpm_async_fn(dev, async_resume_early);
+
        while (!list_empty(&dpm_late_early_list)) {
                dev = to_device(dpm_late_early_list.next);
-               get_device(dev);
                list_move_tail(&dev->power.entry, &dpm_suspended_list);
 
-               mutex_unlock(&dpm_list_mtx);
+               if (!dev->power.async_in_progress) {
+                       get_device(dev);
 
-               device_resume_early(dev);
+                       mutex_unlock(&dpm_list_mtx);
 
-               put_device(dev);
+                       device_resume_early(dev, state, false);
 
-               mutex_lock(&dpm_list_mtx);
+                       put_device(dev);
+
+                       mutex_lock(&dpm_list_mtx);
+               }
        }
        mutex_unlock(&dpm_list_mtx);
        async_synchronize_full();
@@ -876,12 +886,12 @@ void dpm_resume_start(pm_message_t state)
 EXPORT_SYMBOL_GPL(dpm_resume_start);
 
 /**
- * __device_resume - Execute "resume" callbacks for given device.
+ * device_resume - Execute "resume" callbacks for given device.
  * @dev: Device to handle.
  * @state: PM transition of the system being carried out.
  * @async: If true, the device is being resumed asynchronously.
  */
-static void __device_resume(struct device *dev, pm_message_t state, bool async)
+static void device_resume(struct device *dev, pm_message_t state, bool async)
 {
        pm_callback_t callback = NULL;
        const char *info = NULL;
@@ -975,18 +985,10 @@ static void async_resume(void *data, async_cookie_t cookie)
 {
        struct device *dev = data;
 
-       __device_resume(dev, pm_transition, true);
+       device_resume(dev, pm_transition, true);
        put_device(dev);
 }
 
-static void device_resume(struct device *dev)
-{
-       if (dpm_async_fn(dev, async_resume))
-               return;
-
-       __device_resume(dev, pm_transition, false);
-}
-
 /**
  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
  * @state: PM transition of the system being carried out.
@@ -1006,16 +1008,25 @@ void dpm_resume(pm_message_t state)
        pm_transition = state;
        async_error = 0;
 
+       /*
+        * Trigger the resume of "async" devices upfront so they don't have to
+        * wait for the "non-async" ones they don't depend on.
+        */
+       list_for_each_entry(dev, &dpm_suspended_list, power.entry)
+               dpm_async_fn(dev, async_resume);
+
        while (!list_empty(&dpm_suspended_list)) {
                dev = to_device(dpm_suspended_list.next);
 
                get_device(dev);
 
-               mutex_unlock(&dpm_list_mtx);
+               if (!dev->power.async_in_progress) {
+                       mutex_unlock(&dpm_list_mtx);
 
-               device_resume(dev);
+                       device_resume(dev, state, false);
 
-               mutex_lock(&dpm_list_mtx);
+                       mutex_lock(&dpm_list_mtx);
+               }
 
                if (!list_empty(&dev->power.entry))
                        list_move_tail(&dev->power.entry, &dpm_prepared_list);
index 8e93167f1783a476e71d042e59d52d6acef4fe94..bd77f6734f14cda9f45c26538ee7ea7c3a0bc136 100644 (file)
@@ -201,7 +201,7 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
        if (!qos)
                return -ENOMEM;
 
-       n = kzalloc(3 * sizeof(*n), GFP_KERNEL);
+       n = kcalloc(3, sizeof(*n), GFP_KERNEL);
        if (!n) {
                kfree(qos);
                return -ENOMEM;
index 72b7a92337b1889a0a5868d47489d26a718290b5..cd6e559648b21bd3661caed21573238f61038401 100644 (file)
@@ -120,7 +120,7 @@ static unsigned int read_magic_time(void)
        struct rtc_time time;
        unsigned int val;
 
-       if (mc146818_get_time(&time) < 0) {
+       if (mc146818_get_time(&time, 1000) < 0) {
                pr_err("Unable to read current time from RTC\n");
                return 0;
        }
index b79608ee0b46be36bb74578de285797ad4da22e0..a1b01ab4205280df05759c5139e49b34f6f64a82 100644 (file)
@@ -473,7 +473,7 @@ int fwnode_property_match_string(const struct fwnode_handle *fwnode,
        const char **values;
        int nval, ret;
 
-       nval = fwnode_property_read_string_array(fwnode, propname, NULL, 0);
+       nval = fwnode_property_string_array_count(fwnode, propname);
        if (nval < 0)
                return nval;
 
@@ -498,6 +498,41 @@ out_free:
 }
 EXPORT_SYMBOL_GPL(fwnode_property_match_string);
 
+/**
+ * fwnode_property_match_property_string - find a property string value in an array and return index
+ * @fwnode: Firmware node to get the property of
+ * @propname: Name of the property holding the string value
+ * @array: String array to search in
+ * @n: Size of the @array
+ *
+ * Find a property string value in a given @array and if it is found return
+ * the index back.
+ *
+ * Return: index, starting from %0, if the string value was found in the @array (success),
+ *        %-ENOENT when the string value was not found in the @array,
+ *        %-EINVAL if given arguments are not valid,
+ *        %-ENODATA if the property does not have a value,
+ *        %-EPROTO or %-EILSEQ if the property is not a string,
+ *        %-ENXIO if no suitable firmware interface is present.
+ */
+int fwnode_property_match_property_string(const struct fwnode_handle *fwnode,
+       const char *propname, const char * const *array, size_t n)
+{
+       const char *string;
+       int ret;
+
+       ret = fwnode_property_read_string(fwnode, propname, &string);
+       if (ret)
+               return ret;
+
+       ret = match_string(array, n, string);
+       if (ret < 0)
+               ret = -ENOENT;
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(fwnode_property_match_property_string);
+
 /**
  * fwnode_property_get_reference_args() - Find a reference with arguments
  * @fwnode:    Firmware node where to look for the reference
@@ -508,6 +543,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_match_string);
  * @nargs:     Number of arguments. Ignored if @nargs_prop is non-NULL.
  * @index:     Index of the reference, from zero onwards.
  * @args:      Result structure with reference and integer arguments.
+ *             May be NULL.
  *
  * Obtain a reference based on a named property in an fwnode, with
  * integer arguments.
index 8dec5228fde3d43e6af5e82727623c383a203c34..282c38aece0de88049dc1e6e9bea00df52bed1ea 100644 (file)
@@ -28,7 +28,7 @@ struct soc_device {
        int soc_dev_num;
 };
 
-static struct bus_type soc_bus_type = {
+static const struct bus_type soc_bus_type = {
        .name  = "soc",
 };
 static bool soc_bus_registered;
@@ -106,7 +106,7 @@ static void soc_release(struct device *dev)
 {
        struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
 
-       ida_simple_remove(&soc_ida, soc_dev->soc_dev_num);
+       ida_free(&soc_ida, soc_dev->soc_dev_num);
        kfree(soc_dev->dev.groups);
        kfree(soc_dev);
 }
@@ -155,7 +155,7 @@ struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr
        soc_attr_groups[1] = soc_dev_attr->custom_attr_group;
 
        /* Fetch a unique (reclaimable) SOC ID. */
-       ret = ida_simple_get(&soc_ida, 0, 0, GFP_KERNEL);
+       ret = ida_alloc(&soc_ida, GFP_KERNEL);
        if (ret < 0)
                goto out3;
        soc_dev->soc_dev_num = ret;
index 1886995a0b3a30e3067d3c7eb5895bcdbea99b22..36512fb75a201c1a1ab5aa68b694667b56b96036 100644 (file)
@@ -541,6 +541,9 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode,
        if (nargs > NR_FWNODE_REFERENCE_ARGS)
                return -EINVAL;
 
+       if (!args)
+               return 0;
+
        args->fwnode = software_node_get(refnode);
        args->nargs = nargs;
 
@@ -747,10 +750,10 @@ static void software_node_release(struct kobject *kobj)
        struct swnode *swnode = kobj_to_swnode(kobj);
 
        if (swnode->parent) {
-               ida_simple_remove(&swnode->parent->child_ids, swnode->id);
+               ida_free(&swnode->parent->child_ids, swnode->id);
                list_del(&swnode->entry);
        } else {
-               ida_simple_remove(&swnode_root_ids, swnode->id);
+               ida_free(&swnode_root_ids, swnode->id);
        }
 
        if (swnode->allocated)
@@ -776,8 +779,8 @@ swnode_register(const struct software_node *node, struct swnode *parent,
        if (!swnode)
                return ERR_PTR(-ENOMEM);
 
-       ret = ida_simple_get(parent ? &parent->child_ids : &swnode_root_ids,
-                            0, 0, GFP_KERNEL);
+       ret = ida_alloc(parent ? &parent->child_ids : &swnode_root_ids,
+                       GFP_KERNEL);
        if (ret < 0) {
                kfree(swnode);
                return ERR_PTR(ret);
index d2dbf8aaccb5b1320909964b3092ea47df90cc4a..b1b47d88f5db44dd9dcb8fc7468718ac36869130 100644 (file)
@@ -333,6 +333,7 @@ aoeblk_gdalloc(void *vp)
        struct gendisk *gd;
        mempool_t *mp;
        struct blk_mq_tag_set *set;
+       sector_t ssize;
        ulong flags;
        int late = 0;
        int err;
@@ -396,7 +397,7 @@ aoeblk_gdalloc(void *vp)
        gd->minors = AOE_PARTITIONS;
        gd->fops = &aoe_bdops;
        gd->private_data = d;
-       set_capacity(gd, d->ssize);
+       ssize = d->ssize;
        snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d",
                d->aoemajor, d->aoeminor);
 
@@ -405,6 +406,8 @@ aoeblk_gdalloc(void *vp)
 
        spin_unlock_irqrestore(&d->lock, flags);
 
+       set_capacity(gd, ssize);
+
        err = device_add_disk(NULL, gd, aoe_attr_groups);
        if (err)
                goto out_disk_cleanup;
index 146b32fa7b47ade338d0379e021aa224412ec657..f8145499da38c834225b8f2d2ee0448d19adc8e1 100644 (file)
@@ -165,39 +165,37 @@ static loff_t get_loop_size(struct loop_device *lo, struct file *file)
        return get_size(lo->lo_offset, lo->lo_sizelimit, file);
 }
 
+/*
+ * We support direct I/O only if lo_offset is aligned with the logical I/O size
+ * of backing device, and the logical block size of loop is bigger than that of
+ * the backing device.
+ */
+static bool lo_bdev_can_use_dio(struct loop_device *lo,
+               struct block_device *backing_bdev)
+{
+       unsigned short sb_bsize = bdev_logical_block_size(backing_bdev);
+
+       if (queue_logical_block_size(lo->lo_queue) < sb_bsize)
+               return false;
+       if (lo->lo_offset & (sb_bsize - 1))
+               return false;
+       return true;
+}
+
 static void __loop_update_dio(struct loop_device *lo, bool dio)
 {
        struct file *file = lo->lo_backing_file;
-       struct address_space *mapping = file->f_mapping;
-       struct inode *inode = mapping->host;
-       unsigned short sb_bsize = 0;
-       unsigned dio_align = 0;
+       struct inode *inode = file->f_mapping->host;
+       struct block_device *backing_bdev = NULL;
        bool use_dio;
 
-       if (inode->i_sb->s_bdev) {
-               sb_bsize = bdev_logical_block_size(inode->i_sb->s_bdev);
-               dio_align = sb_bsize - 1;
-       }
+       if (S_ISBLK(inode->i_mode))
+               backing_bdev = I_BDEV(inode);
+       else if (inode->i_sb->s_bdev)
+               backing_bdev = inode->i_sb->s_bdev;
 
-       /*
-        * We support direct I/O only if lo_offset is aligned with the
-        * logical I/O size of backing device, and the logical block
-        * size of loop is bigger than the backing device's.
-        *
-        * TODO: the above condition may be loosed in the future, and
-        * direct I/O may be switched runtime at that time because most
-        * of requests in sane applications should be PAGE_SIZE aligned
-        */
-       if (dio) {
-               if (queue_logical_block_size(lo->lo_queue) >= sb_bsize &&
-                   !(lo->lo_offset & dio_align) &&
-                   (file->f_mode & FMODE_CAN_ODIRECT))
-                       use_dio = true;
-               else
-                       use_dio = false;
-       } else {
-               use_dio = false;
-       }
+       use_dio = dio && (file->f_mode & FMODE_CAN_ODIRECT) &&
+               (!backing_bdev || lo_bdev_can_use_dio(lo, backing_bdev));
 
        if (lo->use_dio == use_dio)
                return;
index 4e72ec4e25ac5a0f41bca299e7efaecf6503c451..33a8f37bb6a1f504060f783c6d727e4c76026a2e 100644 (file)
@@ -508,7 +508,7 @@ static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
                       struct iov_iter *iter, int msg_flags, int *sent)
 {
        int result;
-       struct msghdr msg;
+       struct msghdr msg = {} ;
        unsigned int noreclaim_flag;
 
        if (unlikely(!sock)) {
@@ -524,10 +524,6 @@ static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
        do {
                sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
                sock->sk->sk_use_task_frag = false;
-               msg.msg_name = NULL;
-               msg.msg_namelen = 0;
-               msg.msg_control = NULL;
-               msg.msg_controllen = 0;
                msg.msg_flags = msg_flags | MSG_NOSIGNAL;
 
                if (send)
index 9f7695f00c2db8494a0230f2337a64d2fb4d3a14..36755f263e8ec03b1828bf44a05cc3b54bb6a03f 100644 (file)
@@ -1840,7 +1840,7 @@ static void null_del_dev(struct nullb *nullb)
 
        dev = nullb->dev;
 
-       ida_simple_remove(&nullb_indexes, nullb->index);
+       ida_free(&nullb_indexes, nullb->index);
 
        list_del_init(&nullb->list);
 
@@ -2174,7 +2174,7 @@ static int null_add_dev(struct nullb_device *dev)
        blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
 
        mutex_lock(&lock);
-       rv = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);
+       rv = ida_alloc(&nullb_indexes, GFP_KERNEL);
        if (rv < 0) {
                mutex_unlock(&lock);
                goto out_cleanup_zone;
index a999b698b131f7763916c3bd0de5c87478fd0df4..12b5d53ec85645fb22395d41adef81d13cdb7292 100644 (file)
@@ -3452,14 +3452,15 @@ static bool rbd_lock_add_request(struct rbd_img_request *img_req)
 static void rbd_lock_del_request(struct rbd_img_request *img_req)
 {
        struct rbd_device *rbd_dev = img_req->rbd_dev;
-       bool need_wakeup;
+       bool need_wakeup = false;
 
        lockdep_assert_held(&rbd_dev->lock_rwsem);
        spin_lock(&rbd_dev->lock_lists_lock);
-       rbd_assert(!list_empty(&img_req->lock_item));
-       list_del_init(&img_req->lock_item);
-       need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING &&
-                      list_empty(&rbd_dev->running_list));
+       if (!list_empty(&img_req->lock_item)) {
+               list_del_init(&img_req->lock_item);
+               need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING &&
+                              list_empty(&rbd_dev->running_list));
+       }
        spin_unlock(&rbd_dev->lock_lists_lock);
        if (need_wakeup)
                complete(&rbd_dev->releasing_wait);
@@ -3842,14 +3843,19 @@ static void wake_lock_waiters(struct rbd_device *rbd_dev, int result)
                return;
        }
 
-       list_for_each_entry(img_req, &rbd_dev->acquiring_list, lock_item) {
+       while (!list_empty(&rbd_dev->acquiring_list)) {
+               img_req = list_first_entry(&rbd_dev->acquiring_list,
+                                          struct rbd_img_request, lock_item);
                mutex_lock(&img_req->state_mutex);
                rbd_assert(img_req->state == RBD_IMG_EXCLUSIVE_LOCK);
+               if (!result)
+                       list_move_tail(&img_req->lock_item,
+                                      &rbd_dev->running_list);
+               else
+                       list_del_init(&img_req->lock_item);
                rbd_img_schedule(img_req, result);
                mutex_unlock(&img_req->state_mutex);
        }
-
-       list_splice_tail_init(&rbd_dev->acquiring_list, &rbd_dev->running_list);
 }
 
 static bool locker_equal(const struct ceph_locker *lhs,
@@ -5326,7 +5332,7 @@ static void rbd_dev_release(struct device *dev)
 
        if (need_put) {
                destroy_workqueue(rbd_dev->task_wq);
-               ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
+               ida_free(&rbd_dev_id_ida, rbd_dev->dev_id);
        }
 
        rbd_dev_free(rbd_dev);
@@ -5402,9 +5408,9 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
                return NULL;
 
        /* get an id and fill in device name */
-       rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
-                                        minor_to_rbd_dev_id(1 << MINORBITS),
-                                        GFP_KERNEL);
+       rbd_dev->dev_id = ida_alloc_max(&rbd_dev_id_ida,
+                                       minor_to_rbd_dev_id(1 << MINORBITS) - 1,
+                                       GFP_KERNEL);
        if (rbd_dev->dev_id < 0)
                goto fail_rbd_dev;
 
@@ -5425,7 +5431,7 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
        return rbd_dev;
 
 fail_dev_id:
-       ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
+       ida_free(&rbd_dev_id_ida, rbd_dev->dev_id);
 fail_rbd_dev:
        rbd_dev_free(rbd_dev);
        return NULL;
index 3b6b9abb8ce1f4d90f66b5ff94c1499b98208fd8..2bf14a0e2815f6292a02b3fa1e394489af780074 100644 (file)
@@ -367,8 +367,6 @@ static void virtblk_done(struct virtqueue *vq)
                                blk_mq_complete_request(req);
                        req_done = true;
                }
-               if (unlikely(virtqueue_is_broken(vq)))
-                       break;
        } while (!virtqueue_enable_cb(vq));
 
        /* In case queue is stopped waiting for more buffers. */
@@ -1595,14 +1593,15 @@ static int virtblk_freeze(struct virtio_device *vdev)
 {
        struct virtio_blk *vblk = vdev->priv;
 
+       /* Ensure no requests in virtqueues before deleting vqs. */
+       blk_mq_freeze_queue(vblk->disk->queue);
+
        /* Ensure we don't receive any more interrupts */
        virtio_reset_device(vdev);
 
        /* Make sure no work handler is accessing the device. */
        flush_work(&vblk->config_work);
 
-       blk_mq_quiesce_queue(vblk->disk->queue);
-
        vdev->config->del_vqs(vdev);
        kfree(vblk->vqs);
 
@@ -1620,7 +1619,7 @@ static int virtblk_restore(struct virtio_device *vdev)
 
        virtio_device_ready(vdev);
 
-       blk_mq_unquiesce_queue(vblk->disk->queue);
+       blk_mq_unfreeze_queue(vblk->disk->queue);
        return 0;
 }
 #endif
index 203a000a84e341a9b4d3670df6f58ebb09114d8c..3c84fcbda01aa3791aac96c97a6fba9ea6d211ce 100644 (file)
@@ -383,8 +383,8 @@ static void btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
        }
 }
 
-static int btmtkuart_receive_buf(struct serdev_device *serdev, const u8 *data,
-                                size_t count)
+static ssize_t btmtkuart_receive_buf(struct serdev_device *serdev,
+                                    const u8 *data, size_t count)
 {
        struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
 
index b7c56be078f815e9cf59ae96b46a3ae2afe54c3a..1d592ac413d1ff6e9cdabe3d6b461b42c6c95dba 100644 (file)
@@ -1264,8 +1264,8 @@ static const struct h4_recv_pkt nxp_recv_pkts[] = {
        { NXP_RECV_FW_REQ_V3,   .recv = nxp_recv_fw_req_v3 },
 };
 
-static int btnxpuart_receive_buf(struct serdev_device *serdev, const u8 *data,
-                                size_t count)
+static ssize_t btnxpuart_receive_buf(struct serdev_device *serdev,
+                                    const u8 *data, size_t count)
 {
        struct btnxpuart_dev *nxpdev = serdev_device_get_drvdata(serdev);
 
index 7835170b1d66182d6bd5c9dc433a7f1c4ed10c38..d31edad7a05607407c6dd4c9f1e14edc571d9385 100644 (file)
@@ -4796,10 +4796,8 @@ static struct usb_driver btusb_driver = {
        .disable_hub_initiated_lpm = 1,
 
 #ifdef CONFIG_DEV_COREDUMP
-       .drvwrap = {
-               .driver = {
-                       .coredump = btusb_coredump,
-               },
+       .driver = {
+               .coredump = btusb_coredump,
        },
 #endif
 };
index f16fd79bc02b8ab6c8e984faf6bf4c5bdb42bd8f..39c8b567da3c0e82d7ec3707db6fb909ea05f8ff 100644 (file)
@@ -271,8 +271,8 @@ static void hci_uart_write_wakeup(struct serdev_device *serdev)
  *
  * Return: number of processed bytes
  */
-static int hci_uart_receive_buf(struct serdev_device *serdev, const u8 *data,
-                                  size_t count)
+static ssize_t hci_uart_receive_buf(struct serdev_device *serdev,
+                                   const u8 *data, size_t count)
 {
        struct hci_uart *hu = serdev_device_get_drvdata(serdev);
 
index a2125fa5fe2f97e5240c62f9503b7fb39e2ad3a2..577965f95fda9e27e9c21adb98df441ac1ad26a7 100644 (file)
@@ -126,6 +126,7 @@ struct mhi_ep_ring {
        union mhi_ep_ring_ctx *ring_ctx;
        struct mhi_ring_element *ring_cache;
        enum mhi_ep_ring_type type;
+       struct delayed_work intmodt_work;
        u64 rbase;
        size_t rd_offset;
        size_t wr_offset;
@@ -135,7 +136,9 @@ struct mhi_ep_ring {
        u32 ch_id;
        u32 er_index;
        u32 irq_vector;
+       u32 intmodt;
        bool started;
+       bool irq_pending;
 };
 
 struct mhi_ep_cmd {
@@ -159,6 +162,7 @@ struct mhi_ep_chan {
        void (*xfer_cb)(struct mhi_ep_device *mhi_dev, struct mhi_result *result);
        enum mhi_ch_state state;
        enum dma_data_direction dir;
+       size_t rd_offset;
        u64 tre_loc;
        u32 tre_size;
        u32 tre_bytes_left;
index 600881808982aa5686636ccd2bf9a19892a25177..65fc1d738bec2671b6f0d08fb30e714c81df1167 100644 (file)
@@ -54,11 +54,27 @@ static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx,
        mutex_unlock(&mhi_cntrl->event_lock);
 
        /*
-        * Raise IRQ to host only if the BEI flag is not set in TRE. Host might
-        * set this flag for interrupt moderation as per MHI protocol.
+        * As per the MHI specification, section 4.3, Interrupt moderation:
+        *
+        * 1. If BEI flag is not set, cancel any pending intmodt work if started
+        * for the event ring and raise IRQ immediately.
+        *
+        * 2. If both BEI and intmodt are set, and if no IRQ is pending for the
+        * same event ring, start the IRQ delayed work as per the value of
+        * intmodt. If previous IRQ is pending, then do nothing as the pending
+        * IRQ is enough for the host to process the current event ring element.
+        *
+        * 3. If BEI is set and intmodt is not set, no need to raise IRQ.
         */
-       if (!bei)
+       if (!bei) {
+               if (READ_ONCE(ring->irq_pending))
+                       cancel_delayed_work(&ring->intmodt_work);
+
                mhi_cntrl->raise_irq(mhi_cntrl, ring->irq_vector);
+       } else if (ring->intmodt && !READ_ONCE(ring->irq_pending)) {
+               WRITE_ONCE(ring->irq_pending, true);
+               schedule_delayed_work(&ring->intmodt_work, msecs_to_jiffies(ring->intmodt));
+       }
 
        return 0;
 
@@ -71,45 +87,77 @@ err_unlock:
 static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
                                        struct mhi_ring_element *tre, u32 len, enum mhi_ev_ccs code)
 {
-       struct mhi_ring_element event = {};
+       struct mhi_ring_element *event;
+       int ret;
+
+       event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
+       if (!event)
+               return -ENOMEM;
 
-       event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(*tre));
-       event.dword[0] = MHI_TRE_EV_DWORD0(code, len);
-       event.dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT);
+       event->ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(*tre));
+       event->dword[0] = MHI_TRE_EV_DWORD0(code, len);
+       event->dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT);
 
-       return mhi_ep_send_event(mhi_cntrl, ring->er_index, &event, MHI_TRE_DATA_GET_BEI(tre));
+       ret = mhi_ep_send_event(mhi_cntrl, ring->er_index, event, MHI_TRE_DATA_GET_BEI(tre));
+       kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event);
+
+       return ret;
 }
 
 int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state)
 {
-       struct mhi_ring_element event = {};
+       struct mhi_ring_element *event;
+       int ret;
 
-       event.dword[0] = MHI_SC_EV_DWORD0(state);
-       event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT);
+       event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
+       if (!event)
+               return -ENOMEM;
 
-       return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
+       event->dword[0] = MHI_SC_EV_DWORD0(state);
+       event->dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT);
+
+       ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0);
+       kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event);
+
+       return ret;
 }
 
 int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env)
 {
-       struct mhi_ring_element event = {};
+       struct mhi_ring_element *event;
+       int ret;
 
-       event.dword[0] = MHI_EE_EV_DWORD0(exec_env);
-       event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT);
+       event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
+       if (!event)
+               return -ENOMEM;
+
+       event->dword[0] = MHI_EE_EV_DWORD0(exec_env);
+       event->dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT);
 
-       return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
+       ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0);
+       kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event);
+
+       return ret;
 }
 
 static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ev_ccs code)
 {
        struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring;
-       struct mhi_ring_element event = {};
+       struct mhi_ring_element *event;
+       int ret;
+
+       event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
+       if (!event)
+               return -ENOMEM;
+
+       event->ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(struct mhi_ring_element));
+       event->dword[0] = MHI_CC_EV_DWORD0(code);
+       event->dword[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT);
 
-       event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(struct mhi_ring_element));
-       event.dword[0] = MHI_CC_EV_DWORD0(code);
-       event.dword[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT);
+       ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0);
+       kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event);
 
-       return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
+       return ret;
 }
 
 static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
@@ -151,6 +199,8 @@ static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_ele
 
                                goto err_unlock;
                        }
+
+                       mhi_chan->rd_offset = ch_ring->rd_offset;
                }
 
                /* Set channel state to RUNNING */
@@ -280,22 +330,85 @@ bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_directio
        struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
        struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
 
-       return !!(ring->rd_offset == ring->wr_offset);
+       return !!(mhi_chan->rd_offset == ring->wr_offset);
 }
 EXPORT_SYMBOL_GPL(mhi_ep_queue_is_empty);
 
+static void mhi_ep_read_completion(struct mhi_ep_buf_info *buf_info)
+{
+       struct mhi_ep_device *mhi_dev = buf_info->mhi_dev;
+       struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
+       struct mhi_ep_chan *mhi_chan = mhi_dev->ul_chan;
+       struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
+       struct mhi_ring_element *el = &ring->ring_cache[ring->rd_offset];
+       struct mhi_result result = {};
+       int ret;
+
+       if (mhi_chan->xfer_cb) {
+               result.buf_addr = buf_info->cb_buf;
+               result.dir = mhi_chan->dir;
+               result.bytes_xferd = buf_info->size;
+
+               mhi_chan->xfer_cb(mhi_dev, &result);
+       }
+
+       /*
+        * The host will split the data packet into multiple TREs if it can't fit
+        * the packet in a single TRE. In that case, CHAIN flag will be set by the
+        * host for all TREs except the last one.
+        */
+       if (buf_info->code != MHI_EV_CC_OVERFLOW) {
+               if (MHI_TRE_DATA_GET_CHAIN(el)) {
+                       /*
+                        * IEOB (Interrupt on End of Block) flag will be set by the host if
+                        * it expects the completion event for all TREs of a TD.
+                        */
+                       if (MHI_TRE_DATA_GET_IEOB(el)) {
+                               ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
+                                                            MHI_TRE_DATA_GET_LEN(el),
+                                                            MHI_EV_CC_EOB);
+                               if (ret < 0) {
+                                       dev_err(&mhi_chan->mhi_dev->dev,
+                                               "Error sending transfer compl. event\n");
+                                       goto err_free_tre_buf;
+                               }
+                       }
+               } else {
+                       /*
+                        * IEOT (Interrupt on End of Transfer) flag will be set by the host
+                        * for the last TRE of the TD and expects the completion event for
+                        * the same.
+                        */
+                       if (MHI_TRE_DATA_GET_IEOT(el)) {
+                               ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
+                                                            MHI_TRE_DATA_GET_LEN(el),
+                                                            MHI_EV_CC_EOT);
+                               if (ret < 0) {
+                                       dev_err(&mhi_chan->mhi_dev->dev,
+                                               "Error sending transfer compl. event\n");
+                                       goto err_free_tre_buf;
+                               }
+                       }
+               }
+       }
+
+       mhi_ep_ring_inc_index(ring);
+
+err_free_tre_buf:
+       kmem_cache_free(mhi_cntrl->tre_buf_cache, buf_info->cb_buf);
+}
+
 static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
-                               struct mhi_ep_ring *ring,
-                               struct mhi_result *result,
-                               u32 len)
+                              struct mhi_ep_ring *ring)
 {
        struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
        struct device *dev = &mhi_cntrl->mhi_dev->dev;
        size_t tr_len, read_offset, write_offset;
+       struct mhi_ep_buf_info buf_info = {};
+       u32 len = MHI_EP_DEFAULT_MTU;
        struct mhi_ring_element *el;
        bool tr_done = false;
-       void *write_addr;
-       u64 read_addr;
+       void *buf_addr;
        u32 buf_left;
        int ret;
 
@@ -308,7 +421,7 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
                        return -ENODEV;
                }
 
-               el = &ring->ring_cache[ring->rd_offset];
+               el = &ring->ring_cache[mhi_chan->rd_offset];
 
                /* Check if there is data pending to be read from previous read operation */
                if (mhi_chan->tre_bytes_left) {
@@ -324,81 +437,51 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
 
                read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left;
                write_offset = len - buf_left;
-               read_addr = mhi_chan->tre_loc + read_offset;
-               write_addr = result->buf_addr + write_offset;
+
+               buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL | GFP_DMA);
+               if (!buf_addr)
+                       return -ENOMEM;
+
+               buf_info.host_addr = mhi_chan->tre_loc + read_offset;
+               buf_info.dev_addr = buf_addr + write_offset;
+               buf_info.size = tr_len;
+               buf_info.cb = mhi_ep_read_completion;
+               buf_info.cb_buf = buf_addr;
+               buf_info.mhi_dev = mhi_chan->mhi_dev;
+
+               if (mhi_chan->tre_bytes_left - tr_len)
+                       buf_info.code = MHI_EV_CC_OVERFLOW;
 
                dev_dbg(dev, "Reading %zd bytes from channel (%u)\n", tr_len, ring->ch_id);
-               ret = mhi_cntrl->read_from_host(mhi_cntrl, read_addr, write_addr, tr_len);
+               ret = mhi_cntrl->read_async(mhi_cntrl, &buf_info);
                if (ret < 0) {
                        dev_err(&mhi_chan->mhi_dev->dev, "Error reading from channel\n");
-                       return ret;
+                       goto err_free_buf_addr;
                }
 
                buf_left -= tr_len;
                mhi_chan->tre_bytes_left -= tr_len;
 
-               /*
-                * Once the TRE (Transfer Ring Element) of a TD (Transfer Descriptor) has been
-                * read completely:
-                *
-                * 1. Send completion event to the host based on the flags set in TRE.
-                * 2. Increment the local read offset of the transfer ring.
-                */
                if (!mhi_chan->tre_bytes_left) {
-                       /*
-                        * The host will split the data packet into multiple TREs if it can't fit
-                        * the packet in a single TRE. In that case, CHAIN flag will be set by the
-                        * host for all TREs except the last one.
-                        */
-                       if (MHI_TRE_DATA_GET_CHAIN(el)) {
-                               /*
-                                * IEOB (Interrupt on End of Block) flag will be set by the host if
-                                * it expects the completion event for all TREs of a TD.
-                                */
-                               if (MHI_TRE_DATA_GET_IEOB(el)) {
-                                       ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
-                                                                    MHI_TRE_DATA_GET_LEN(el),
-                                                                    MHI_EV_CC_EOB);
-                                       if (ret < 0) {
-                                               dev_err(&mhi_chan->mhi_dev->dev,
-                                                       "Error sending transfer compl. event\n");
-                                               return ret;
-                                       }
-                               }
-                       } else {
-                               /*
-                                * IEOT (Interrupt on End of Transfer) flag will be set by the host
-                                * for the last TRE of the TD and expects the completion event for
-                                * the same.
-                                */
-                               if (MHI_TRE_DATA_GET_IEOT(el)) {
-                                       ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
-                                                                    MHI_TRE_DATA_GET_LEN(el),
-                                                                    MHI_EV_CC_EOT);
-                                       if (ret < 0) {
-                                               dev_err(&mhi_chan->mhi_dev->dev,
-                                                       "Error sending transfer compl. event\n");
-                                               return ret;
-                                       }
-                               }
-
+                       if (MHI_TRE_DATA_GET_IEOT(el))
                                tr_done = true;
-                       }
 
-                       mhi_ep_ring_inc_index(ring);
+                       mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size;
                }
-
-               result->bytes_xferd += tr_len;
        } while (buf_left && !tr_done);
 
        return 0;
+
+err_free_buf_addr:
+       kmem_cache_free(mhi_cntrl->tre_buf_cache, buf_addr);
+
+       return ret;
 }
 
-static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
+static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring)
 {
        struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
        struct mhi_result result = {};
-       u32 len = MHI_EP_DEFAULT_MTU;
        struct mhi_ep_chan *mhi_chan;
        int ret;
 
@@ -419,44 +502,59 @@ static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_elem
                mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
        } else {
                /* UL channel */
-               result.buf_addr = kzalloc(len, GFP_KERNEL);
-               if (!result.buf_addr)
-                       return -ENOMEM;
-
                do {
-                       ret = mhi_ep_read_channel(mhi_cntrl, ring, &result, len);
+                       ret = mhi_ep_read_channel(mhi_cntrl, ring);
                        if (ret < 0) {
                                dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n");
-                               kfree(result.buf_addr);
                                return ret;
                        }
 
-                       result.dir = mhi_chan->dir;
-                       mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
-                       result.bytes_xferd = 0;
-                       memset(result.buf_addr, 0, len);
-
                        /* Read until the ring becomes empty */
                } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE));
-
-               kfree(result.buf_addr);
        }
 
        return 0;
 }
 
+static void mhi_ep_skb_completion(struct mhi_ep_buf_info *buf_info)
+{
+       struct mhi_ep_device *mhi_dev = buf_info->mhi_dev;
+       struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
+       struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan;
+       struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
+       struct mhi_ring_element *el = &ring->ring_cache[ring->rd_offset];
+       struct device *dev = &mhi_dev->dev;
+       struct mhi_result result = {};
+       int ret;
+
+       if (mhi_chan->xfer_cb) {
+               result.buf_addr = buf_info->cb_buf;
+               result.dir = mhi_chan->dir;
+               result.bytes_xferd = buf_info->size;
+
+               mhi_chan->xfer_cb(mhi_dev, &result);
+       }
+
+       ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, buf_info->size,
+                                          buf_info->code);
+       if (ret) {
+               dev_err(dev, "Error sending transfer completion event\n");
+               return;
+       }
+
+       mhi_ep_ring_inc_index(ring);
+}
+
 /* TODO: Handle partially formed TDs */
 int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb)
 {
        struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
        struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan;
        struct device *dev = &mhi_chan->mhi_dev->dev;
+       struct mhi_ep_buf_info buf_info = {};
        struct mhi_ring_element *el;
        u32 buf_left, read_offset;
        struct mhi_ep_ring *ring;
-       enum mhi_ev_ccs code;
-       void *read_addr;
-       u64 write_addr;
        size_t tr_len;
        u32 tre_len;
        int ret;
@@ -480,40 +578,44 @@ int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb)
                        goto err_exit;
                }
 
-               el = &ring->ring_cache[ring->rd_offset];
+               el = &ring->ring_cache[mhi_chan->rd_offset];
                tre_len = MHI_TRE_DATA_GET_LEN(el);
 
                tr_len = min(buf_left, tre_len);
                read_offset = skb->len - buf_left;
-               read_addr = skb->data + read_offset;
-               write_addr = MHI_TRE_DATA_GET_PTR(el);
 
-               dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id);
-               ret = mhi_cntrl->write_to_host(mhi_cntrl, read_addr, write_addr, tr_len);
-               if (ret < 0) {
-                       dev_err(dev, "Error writing to the channel\n");
-                       goto err_exit;
-               }
+               buf_info.dev_addr = skb->data + read_offset;
+               buf_info.host_addr = MHI_TRE_DATA_GET_PTR(el);
+               buf_info.size = tr_len;
+               buf_info.cb = mhi_ep_skb_completion;
+               buf_info.cb_buf = skb;
+               buf_info.mhi_dev = mhi_dev;
 
-               buf_left -= tr_len;
                /*
                 * For all TREs queued by the host for DL channel, only the EOT flag will be set.
                 * If the packet doesn't fit into a single TRE, send the OVERFLOW event to
                 * the host so that the host can adjust the packet boundary to next TREs. Else send
                 * the EOT event to the host indicating the packet boundary.
                 */
-               if (buf_left)
-                       code = MHI_EV_CC_OVERFLOW;
+               if (buf_left - tr_len)
+                       buf_info.code = MHI_EV_CC_OVERFLOW;
                else
-                       code = MHI_EV_CC_EOT;
+                       buf_info.code = MHI_EV_CC_EOT;
 
-               ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, tr_len, code);
-               if (ret) {
-                       dev_err(dev, "Error sending transfer completion event\n");
+               dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id);
+               ret = mhi_cntrl->write_async(mhi_cntrl, &buf_info);
+               if (ret < 0) {
+                       dev_err(dev, "Error writing to the channel\n");
                        goto err_exit;
                }
 
-               mhi_ep_ring_inc_index(ring);
+               buf_left -= tr_len;
+
+               /*
+                * Update the read offset cached in mhi_chan. Actual read offset
+                * will be updated by the completion handler.
+                */
+               mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size;
        } while (buf_left);
 
        mutex_unlock(&mhi_chan->lock);
@@ -714,7 +816,6 @@ static void mhi_ep_ch_ring_worker(struct work_struct *work)
        struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, ch_ring_work);
        struct device *dev = &mhi_cntrl->mhi_dev->dev;
        struct mhi_ep_ring_item *itr, *tmp;
-       struct mhi_ring_element *el;
        struct mhi_ep_ring *ring;
        struct mhi_ep_chan *chan;
        unsigned long flags;
@@ -748,31 +849,29 @@ static void mhi_ep_ch_ring_worker(struct work_struct *work)
                if (ret) {
                        dev_err(dev, "Error updating write offset for ring\n");
                        mutex_unlock(&chan->lock);
-                       kfree(itr);
+                       kmem_cache_free(mhi_cntrl->ring_item_cache, itr);
                        continue;
                }
 
                /* Sanity check to make sure there are elements in the ring */
-               if (ring->rd_offset == ring->wr_offset) {
+               if (chan->rd_offset == ring->wr_offset) {
                        mutex_unlock(&chan->lock);
-                       kfree(itr);
+                       kmem_cache_free(mhi_cntrl->ring_item_cache, itr);
                        continue;
                }
 
-               el = &ring->ring_cache[ring->rd_offset];
-
                dev_dbg(dev, "Processing the ring for channel (%u)\n", ring->ch_id);
-               ret = mhi_ep_process_ch_ring(ring, el);
+               ret = mhi_ep_process_ch_ring(ring);
                if (ret) {
                        dev_err(dev, "Error processing ring for channel (%u): %d\n",
                                ring->ch_id, ret);
                        mutex_unlock(&chan->lock);
-                       kfree(itr);
+                       kmem_cache_free(mhi_cntrl->ring_item_cache, itr);
                        continue;
                }
 
                mutex_unlock(&chan->lock);
-               kfree(itr);
+               kmem_cache_free(mhi_cntrl->ring_item_cache, itr);
        }
 }
 
@@ -828,7 +927,7 @@ static void mhi_ep_queue_channel_db(struct mhi_ep_cntrl *mhi_cntrl, unsigned lon
                u32 ch_id = ch_idx + i;
 
                ring = &mhi_cntrl->mhi_chan[ch_id].ring;
-               item = kzalloc(sizeof(*item), GFP_ATOMIC);
+               item = kmem_cache_zalloc(mhi_cntrl->ring_item_cache, GFP_ATOMIC);
                if (!item)
                        return;
 
@@ -1365,6 +1464,10 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
        if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio || !mhi_cntrl->irq)
                return -EINVAL;
 
+       if (!mhi_cntrl->read_sync || !mhi_cntrl->write_sync ||
+           !mhi_cntrl->read_async || !mhi_cntrl->write_async)
+               return -EINVAL;
+
        ret = mhi_ep_chan_init(mhi_cntrl, config);
        if (ret)
                return ret;
@@ -1375,6 +1478,29 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
                goto err_free_ch;
        }
 
+       mhi_cntrl->ev_ring_el_cache = kmem_cache_create("mhi_ep_event_ring_el",
+                                                       sizeof(struct mhi_ring_element), 0,
+                                                       SLAB_CACHE_DMA, NULL);
+       if (!mhi_cntrl->ev_ring_el_cache) {
+               ret = -ENOMEM;
+               goto err_free_cmd;
+       }
+
+       mhi_cntrl->tre_buf_cache = kmem_cache_create("mhi_ep_tre_buf", MHI_EP_DEFAULT_MTU, 0,
+                                                     SLAB_CACHE_DMA, NULL);
+       if (!mhi_cntrl->tre_buf_cache) {
+               ret = -ENOMEM;
+               goto err_destroy_ev_ring_el_cache;
+       }
+
+       mhi_cntrl->ring_item_cache = kmem_cache_create("mhi_ep_ring_item",
+                                                       sizeof(struct mhi_ep_ring_item), 0,
+                                                       0, NULL);
+       if (!mhi_cntrl->ev_ring_el_cache) {
+               ret = -ENOMEM;
+               goto err_destroy_tre_buf_cache;
+       }
+
        INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker);
        INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker);
        INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker);
@@ -1383,7 +1509,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
        mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0);
        if (!mhi_cntrl->wq) {
                ret = -ENOMEM;
-               goto err_free_cmd;
+               goto err_destroy_ring_item_cache;
        }
 
        INIT_LIST_HEAD(&mhi_cntrl->st_transition_list);
@@ -1442,6 +1568,12 @@ err_ida_free:
        ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index);
 err_destroy_wq:
        destroy_workqueue(mhi_cntrl->wq);
+err_destroy_ring_item_cache:
+       kmem_cache_destroy(mhi_cntrl->ring_item_cache);
+err_destroy_ev_ring_el_cache:
+       kmem_cache_destroy(mhi_cntrl->ev_ring_el_cache);
+err_destroy_tre_buf_cache:
+       kmem_cache_destroy(mhi_cntrl->tre_buf_cache);
 err_free_cmd:
        kfree(mhi_cntrl->mhi_cmd);
 err_free_ch:
@@ -1463,6 +1595,9 @@ void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl)
 
        free_irq(mhi_cntrl->irq, mhi_cntrl);
 
+       kmem_cache_destroy(mhi_cntrl->tre_buf_cache);
+       kmem_cache_destroy(mhi_cntrl->ev_ring_el_cache);
+       kmem_cache_destroy(mhi_cntrl->ring_item_cache);
        kfree(mhi_cntrl->mhi_cmd);
        kfree(mhi_cntrl->mhi_chan);
 
index 115518ec76a43a320e4bdbcc4b2978f0518efaf7..aeb53b2c34a8cd859393529d0c8860462bc687ed 100644 (file)
@@ -30,7 +30,8 @@ static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end)
 {
        struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
        struct device *dev = &mhi_cntrl->mhi_dev->dev;
-       size_t start, copy_size;
+       struct mhi_ep_buf_info buf_info = {};
+       size_t start;
        int ret;
 
        /* Don't proceed in the case of event ring. This happens during mhi_ep_ring_start(). */
@@ -43,30 +44,34 @@ static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end)
 
        start = ring->wr_offset;
        if (start < end) {
-               copy_size = (end - start) * sizeof(struct mhi_ring_element);
-               ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase +
-                                               (start * sizeof(struct mhi_ring_element)),
-                                               &ring->ring_cache[start], copy_size);
+               buf_info.size = (end - start) * sizeof(struct mhi_ring_element);
+               buf_info.host_addr = ring->rbase + (start * sizeof(struct mhi_ring_element));
+               buf_info.dev_addr = &ring->ring_cache[start];
+
+               ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info);
                if (ret < 0)
                        return ret;
        } else {
-               copy_size = (ring->ring_size - start) * sizeof(struct mhi_ring_element);
-               ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase +
-                                               (start * sizeof(struct mhi_ring_element)),
-                                               &ring->ring_cache[start], copy_size);
+               buf_info.size = (ring->ring_size - start) * sizeof(struct mhi_ring_element);
+               buf_info.host_addr = ring->rbase + (start * sizeof(struct mhi_ring_element));
+               buf_info.dev_addr = &ring->ring_cache[start];
+
+               ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info);
                if (ret < 0)
                        return ret;
 
                if (end) {
-                       ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase,
-                                                       &ring->ring_cache[0],
-                                                       end * sizeof(struct mhi_ring_element));
+                       buf_info.host_addr = ring->rbase;
+                       buf_info.dev_addr = &ring->ring_cache[0];
+                       buf_info.size = end * sizeof(struct mhi_ring_element);
+
+                       ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info);
                        if (ret < 0)
                                return ret;
                }
        }
 
-       dev_dbg(dev, "Cached ring: start %zu end %zu size %zu\n", start, end, copy_size);
+       dev_dbg(dev, "Cached ring: start %zu end %zu size %zu\n", start, end, buf_info.size);
 
        return 0;
 }
@@ -102,6 +107,7 @@ int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *e
 {
        struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
        struct device *dev = &mhi_cntrl->mhi_dev->dev;
+       struct mhi_ep_buf_info buf_info = {};
        size_t old_offset = 0;
        u32 num_free_elem;
        __le64 rp;
@@ -133,12 +139,11 @@ int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *e
        rp = cpu_to_le64(ring->rd_offset * sizeof(*el) + ring->rbase);
        memcpy_toio((void __iomem *) &ring->ring_ctx->generic.rp, &rp, sizeof(u64));
 
-       ret = mhi_cntrl->write_to_host(mhi_cntrl, el, ring->rbase + (old_offset * sizeof(*el)),
-                                      sizeof(*el));
-       if (ret < 0)
-               return ret;
+       buf_info.host_addr = ring->rbase + (old_offset * sizeof(*el));
+       buf_info.dev_addr = el;
+       buf_info.size = sizeof(*el);
 
-       return 0;
+       return mhi_cntrl->write_sync(mhi_cntrl, &buf_info);
 }
 
 void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id)
@@ -157,6 +162,15 @@ void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32
        }
 }
 
+static void mhi_ep_raise_irq(struct work_struct *work)
+{
+       struct mhi_ep_ring *ring = container_of(work, struct mhi_ep_ring, intmodt_work.work);
+       struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+
+       mhi_cntrl->raise_irq(mhi_cntrl, ring->irq_vector);
+       WRITE_ONCE(ring->irq_pending, false);
+}
+
 int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
                        union mhi_ep_ring_ctx *ctx)
 {
@@ -173,8 +187,13 @@ int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
        if (ring->type == RING_TYPE_CH)
                ring->er_index = le32_to_cpu(ring->ring_ctx->ch.erindex);
 
-       if (ring->type == RING_TYPE_ER)
+       if (ring->type == RING_TYPE_ER) {
                ring->irq_vector = le32_to_cpu(ring->ring_ctx->ev.msivec);
+               ring->intmodt = FIELD_GET(EV_CTX_INTMODT_MASK,
+                                         le32_to_cpu(ring->ring_ctx->ev.intmod));
+
+               INIT_DELAYED_WORK(&ring->intmodt_work, mhi_ep_raise_irq);
+       }
 
        /* During ring init, both rp and wp are equal */
        memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.rp, sizeof(u64));
@@ -201,6 +220,9 @@ int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
 
 void mhi_ep_ring_reset(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring)
 {
+       if (ring->type == RING_TYPE_ER)
+               cancel_delayed_work_sync(&ring->intmodt_work);
+
        ring->started = false;
        kfree(ring->ring_cache);
        ring->ring_cache = NULL;
index f78aefd2d7a3625361e98ec8f800aa748fd57f3d..65ceac1837f9a1a0a133715fafad3cf8d490d309 100644 (file)
@@ -881,6 +881,7 @@ static int parse_config(struct mhi_controller *mhi_cntrl,
        if (!mhi_cntrl->timeout_ms)
                mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS;
 
+       mhi_cntrl->ready_timeout_ms = config->ready_timeout_ms;
        mhi_cntrl->bounce_buf = config->use_bounce_buf;
        mhi_cntrl->buffer_len = config->buf_len;
        if (!mhi_cntrl->buffer_len)
index 2e139e76de4c0375b3cf9d711097b3e97da50f24..30ac415a3000f687367689ece738ed48d70677e9 100644 (file)
@@ -321,7 +321,7 @@ int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
                                    u32 *out);
 int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl,
                                    void __iomem *base, u32 offset, u32 mask,
-                                   u32 val, u32 delayus);
+                                   u32 val, u32 delayus, u32 timeout_ms);
 void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
                   u32 offset, u32 val);
 int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl,
index dcf627b36e829e8554edebf64f9776c37fb8635c..abb561db9ae1d556be5f9e393e7af562f74754d0 100644 (file)
@@ -40,10 +40,11 @@ int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
 
 int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl,
                                    void __iomem *base, u32 offset,
-                                   u32 mask, u32 val, u32 delayus)
+                                   u32 mask, u32 val, u32 delayus,
+                                   u32 timeout_ms)
 {
        int ret;
-       u32 out, retry = (mhi_cntrl->timeout_ms * 1000) / delayus;
+       u32 out, retry = (timeout_ms * 1000) / delayus;
 
        while (retry--) {
                ret = mhi_read_reg_field(mhi_cntrl, base, offset, mask, &out);
@@ -268,7 +269,8 @@ static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
 
 static bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr)
 {
-       return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len;
+       return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len &&
+                       !(addr & (sizeof(struct mhi_ring_element) - 1));
 }
 
 int mhi_destroy_device(struct device *dev, void *data)
@@ -642,6 +644,8 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
                        mhi_del_ring_element(mhi_cntrl, tre_ring);
                        local_rp = tre_ring->rp;
 
+                       read_unlock_bh(&mhi_chan->lock);
+
                        /* notify client */
                        mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
 
@@ -667,6 +671,8 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
                                        kfree(buf_info->cb_buf);
                                }
                        }
+
+                       read_lock_bh(&mhi_chan->lock);
                }
                break;
        } /* CC_EOT */
@@ -1122,17 +1128,15 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
        if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
                return -EIO;
 
-       read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
-
        ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
-       if (unlikely(ret)) {
-               ret = -EAGAIN;
-               goto exit_unlock;
-       }
+       if (unlikely(ret))
+               return -EAGAIN;
 
        ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
        if (unlikely(ret))
-               goto exit_unlock;
+               return ret;
+
+       read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
 
        /* Packet is queued, take a usage ref to exit M3 if necessary
         * for host->device buffer, balanced put is done on buffer completion
@@ -1152,7 +1156,6 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
        if (dir == DMA_FROM_DEVICE)
                mhi_cntrl->runtime_put(mhi_cntrl);
 
-exit_unlock:
        read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
 
        return ret;
@@ -1204,6 +1207,9 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
        int eot, eob, chain, bei;
        int ret;
 
+       /* Protect accesses for reading and incrementing WP */
+       write_lock_bh(&mhi_chan->lock);
+
        buf_ring = &mhi_chan->buf_ring;
        tre_ring = &mhi_chan->tre_ring;
 
@@ -1221,8 +1227,10 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
 
        if (!info->pre_mapped) {
                ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
-               if (ret)
+               if (ret) {
+                       write_unlock_bh(&mhi_chan->lock);
                        return ret;
+               }
        }
 
        eob = !!(flags & MHI_EOB);
@@ -1239,6 +1247,8 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
        mhi_add_ring_element(mhi_cntrl, tre_ring);
        mhi_add_ring_element(mhi_cntrl, buf_ring);
 
+       write_unlock_bh(&mhi_chan->lock);
+
        return 0;
 }
 
index 08f3f039dbddcf997c26daaaaa604b283d1096d2..cd6cd14b3d29b17c0a9ce78ef40a902686d26a48 100644 (file)
@@ -269,6 +269,16 @@ static struct mhi_event_config modem_qcom_v1_mhi_events[] = {
        MHI_EVENT_CONFIG_HW_DATA(5, 2048, 101)
 };
 
+static const struct mhi_controller_config modem_qcom_v2_mhiv_config = {
+       .max_channels = 128,
+       .timeout_ms = 8000,
+       .ready_timeout_ms = 50000,
+       .num_channels = ARRAY_SIZE(modem_qcom_v1_mhi_channels),
+       .ch_cfg = modem_qcom_v1_mhi_channels,
+       .num_events = ARRAY_SIZE(modem_qcom_v1_mhi_events),
+       .event_cfg = modem_qcom_v1_mhi_events,
+};
+
 static const struct mhi_controller_config modem_qcom_v1_mhiv_config = {
        .max_channels = 128,
        .timeout_ms = 8000,
@@ -278,6 +288,16 @@ static const struct mhi_controller_config modem_qcom_v1_mhiv_config = {
        .event_cfg = modem_qcom_v1_mhi_events,
 };
 
+static const struct mhi_pci_dev_info mhi_qcom_sdx75_info = {
+       .name = "qcom-sdx75m",
+       .fw = "qcom/sdx75m/xbl.elf",
+       .edl = "qcom/sdx75m/edl.mbn",
+       .config = &modem_qcom_v2_mhiv_config,
+       .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+       .dma_data_width = 32,
+       .sideband_wake = false,
+};
+
 static const struct mhi_pci_dev_info mhi_qcom_sdx65_info = {
        .name = "qcom-sdx65m",
        .fw = "qcom/sdx65m/xbl.elf",
@@ -600,6 +620,8 @@ static const struct pci_device_id mhi_pci_id_table[] = {
                .driver_data = (kernel_ulong_t) &mhi_telit_fn990_info },
        { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308),
                .driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info },
+       { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0309),
+               .driver_data = (kernel_ulong_t) &mhi_qcom_sdx75_info },
        { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1001), /* EM120R-GL (sdx24) */
                .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
        { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1002), /* EM160R-GL (sdx24) */
index 8a4362d75fc4375635be266406a3a364b58df363..a2f2feef14768a83ad2d0f6f2f7f5db1cd027311 100644 (file)
@@ -163,6 +163,7 @@ int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
        enum mhi_pm_state cur_state;
        struct device *dev = &mhi_cntrl->mhi_dev->dev;
        u32 interval_us = 25000; /* poll register field every 25 milliseconds */
+       u32 timeout_ms;
        int ret, i;
 
        /* Check if device entered error state */
@@ -173,14 +174,18 @@ int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
 
        /* Wait for RESET to be cleared and READY bit to be set by the device */
        ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
-                                MHICTRL_RESET_MASK, 0, interval_us);
+                                MHICTRL_RESET_MASK, 0, interval_us,
+                                mhi_cntrl->timeout_ms);
        if (ret) {
                dev_err(dev, "Device failed to clear MHI Reset\n");
                return ret;
        }
 
+       timeout_ms = mhi_cntrl->ready_timeout_ms ?
+               mhi_cntrl->ready_timeout_ms : mhi_cntrl->timeout_ms;
        ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
-                                MHISTATUS_READY_MASK, 1, interval_us);
+                                MHISTATUS_READY_MASK, 1, interval_us,
+                                timeout_ms);
        if (ret) {
                dev_err(dev, "Device failed to enter MHI Ready\n");
                return ret;
@@ -479,7 +484,7 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
 
                /* Wait for the reset bit to be cleared by the device */
                ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
-                                MHICTRL_RESET_MASK, 0, 25000);
+                                MHICTRL_RESET_MASK, 0, 25000, mhi_cntrl->timeout_ms);
                if (ret)
                        dev_err(dev, "Device failed to clear MHI Reset\n");
 
@@ -492,8 +497,8 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
                if (!MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) {
                        /* wait for ready to be set */
                        ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs,
-                                                MHISTATUS,
-                                                MHISTATUS_READY_MASK, 1, 25000);
+                                                MHISTATUS, MHISTATUS_READY_MASK,
+                                                1, 25000, mhi_cntrl->timeout_ms);
                        if (ret)
                                dev_err(dev, "Device failed to enter READY state\n");
                }
@@ -1111,7 +1116,8 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
        if (state == MHI_STATE_SYS_ERR) {
                mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
                ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
-                                MHICTRL_RESET_MASK, 0, interval_us);
+                                MHICTRL_RESET_MASK, 0, interval_us,
+                                mhi_cntrl->timeout_ms);
                if (ret) {
                        dev_info(dev, "Failed to reset MHI due to syserr state\n");
                        goto error_exit;
@@ -1202,14 +1208,18 @@ EXPORT_SYMBOL_GPL(mhi_power_down);
 int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
 {
        int ret = mhi_async_power_up(mhi_cntrl);
+       u32 timeout_ms;
 
        if (ret)
                return ret;
 
+       /* Some devices need more time to set ready during power up */
+       timeout_ms = mhi_cntrl->ready_timeout_ms ?
+               mhi_cntrl->ready_timeout_ms : mhi_cntrl->timeout_ms;
        wait_event_timeout(mhi_cntrl->state_event,
                           MHI_IN_MISSION_MODE(mhi_cntrl->ee) ||
                           MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
-                          msecs_to_jiffies(mhi_cntrl->timeout_ms));
+                          msecs_to_jiffies(timeout_ms));
 
        ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT;
        if (ret)
index e384fbc6c1d93152d03329724c0373d54d0059f0..641c1a6adc8ae5fed39237a5aab1639a1e8fdc29 100644 (file)
@@ -102,7 +102,7 @@ static int moxtet_match(struct device *dev, struct device_driver *drv)
        return 0;
 }
 
-static struct bus_type moxtet_bus_type = {
+static const struct bus_type moxtet_bus_type = {
        .name           = "moxtet",
        .dev_groups     = moxtet_dev_groups,
        .match          = moxtet_match,
index 4461c6c9313f03576dc1429759fb4876fee2ce95..b74d76afccb6345c0b1bdff08f825979510f6968 100644 (file)
 
 #include <linux/init.h>
 #include <linux/kernel.h>
+#include <linux/of.h>
 #include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/mm.h>
 #include <linux/idr.h>
 #include <linux/cdx/cdx_bus.h>
 #include <linux/iommu.h>
 #include <linux/dma-map-ops.h>
+#include <linux/debugfs.h>
 #include "cdx.h"
 
 /* Default DMA mask for devices on a CDX bus */
 static DEFINE_IDA(cdx_controller_ida);
 /* Lock to protect controller ops */
 static DEFINE_MUTEX(cdx_controller_lock);
+/* Debugfs dir for cdx bus */
+static struct dentry *cdx_debugfs_dir;
 
 static char *compat_node_name = "xlnx,versal-net-cdx";
 
+static void cdx_destroy_res_attr(struct cdx_device *cdx_dev, int num);
+
 /**
  * cdx_dev_reset - Reset a CDX device
  * @dev: CDX device
@@ -145,6 +153,8 @@ static int cdx_unregister_device(struct device *dev,
                if (cdx_dev->enabled && cdx->ops->bus_disable)
                        cdx->ops->bus_disable(cdx, cdx_dev->bus_num);
        } else {
+               cdx_destroy_res_attr(cdx_dev, MAX_CDX_DEV_RESOURCES);
+               debugfs_remove_recursive(cdx_dev->debugfs_dir);
                kfree(cdx_dev->driver_override);
                cdx_dev->driver_override = NULL;
        }
@@ -548,6 +558,31 @@ static const struct attribute_group *cdx_dev_groups[] = {
        NULL,
 };
 
+static int cdx_debug_resource_show(struct seq_file *s, void *data)
+{
+       struct cdx_device *cdx_dev = s->private;
+       int i;
+
+       for (i = 0; i < MAX_CDX_DEV_RESOURCES; i++) {
+               struct resource *res =  &cdx_dev->res[i];
+
+               seq_printf(s, "%pr\n", res);
+       }
+
+       return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(cdx_debug_resource);
+
+static void cdx_device_debugfs_init(struct cdx_device *cdx_dev)
+{
+       cdx_dev->debugfs_dir = debugfs_create_dir(dev_name(&cdx_dev->dev), cdx_debugfs_dir);
+       if (IS_ERR(cdx_dev->debugfs_dir))
+               return;
+
+       debugfs_create_file("resource", 0444, cdx_dev->debugfs_dir, cdx_dev,
+                           &cdx_debug_resource_fops);
+}
+
 static ssize_t rescan_store(const struct bus_type *bus,
                            const char *buf, size_t count)
 {
@@ -569,12 +604,12 @@ static ssize_t rescan_store(const struct bus_type *bus,
 
        /* Rescan all the devices */
        for_each_compatible_node(np, NULL, compat_node_name) {
-               if (!np)
-                       return -EINVAL;
-
                pd = of_find_device_by_node(np);
-               if (!pd)
-                       return -EINVAL;
+               if (!pd) {
+                       of_node_put(np);
+                       count = -EINVAL;
+                       goto unlock;
+               }
 
                cdx = platform_get_drvdata(pd);
                if (cdx && cdx->controller_registered && cdx->ops->scan)
@@ -583,6 +618,7 @@ static ssize_t rescan_store(const struct bus_type *bus,
                put_device(&pd->dev);
        }
 
+unlock:
        mutex_unlock(&cdx_controller_lock);
 
        return count;
@@ -640,11 +676,105 @@ static void cdx_device_release(struct device *dev)
        kfree(cdx_dev);
 }
 
+static const struct vm_operations_struct cdx_phys_vm_ops = {
+#ifdef CONFIG_HAVE_IOREMAP_PROT
+       .access = generic_access_phys,
+#endif
+};
+
+/**
+ * cdx_mmap_resource - map a CDX resource into user memory space
+ * @fp: File pointer. Not used in this function, but required where
+ *      this API is registered as a callback.
+ * @kobj: kobject for mapping
+ * @attr: struct bin_attribute for the file being mapped
+ * @vma: struct vm_area_struct passed into the mmap
+ *
+ * Use the regular CDX mapping routines to map a CDX resource into userspace.
+ *
+ * Return: true on success, false otherwise.
+ */
+static int cdx_mmap_resource(struct file *fp, struct kobject *kobj,
+                            struct bin_attribute *attr,
+                            struct vm_area_struct *vma)
+{
+       struct cdx_device *cdx_dev = to_cdx_device(kobj_to_dev(kobj));
+       int num = (unsigned long)attr->private;
+       struct resource *res;
+       unsigned long size;
+
+       res = &cdx_dev->res[num];
+       if (iomem_is_exclusive(res->start))
+               return -EINVAL;
+
+       /* Make sure the caller is mapping a valid resource for this device */
+       size = ((cdx_resource_len(cdx_dev, num) - 1) >> PAGE_SHIFT) + 1;
+       if (vma->vm_pgoff + vma_pages(vma) > size)
+               return -EINVAL;
+
+       /*
+        * Map memory region and vm->vm_pgoff is expected to be an
+        * offset within that region.
+        */
+       vma->vm_page_prot = pgprot_device(vma->vm_page_prot);
+       vma->vm_pgoff += (cdx_resource_start(cdx_dev, num) >> PAGE_SHIFT);
+       vma->vm_ops = &cdx_phys_vm_ops;
+       return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+                                 vma->vm_end - vma->vm_start,
+                                 vma->vm_page_prot);
+}
+
+static void cdx_destroy_res_attr(struct cdx_device *cdx_dev, int num)
+{
+       int i;
+
+       /* removing the bin attributes */
+       for (i = 0; i < num; i++) {
+               struct bin_attribute *res_attr;
+
+               res_attr = cdx_dev->res_attr[i];
+               if (res_attr) {
+                       sysfs_remove_bin_file(&cdx_dev->dev.kobj, res_attr);
+                       kfree(res_attr);
+               }
+       }
+}
+
+#define CDX_RES_ATTR_NAME_LEN  10
+static int cdx_create_res_attr(struct cdx_device *cdx_dev, int num)
+{
+       struct bin_attribute *res_attr;
+       char *res_attr_name;
+       int ret;
+
+       res_attr = kzalloc(sizeof(*res_attr) + CDX_RES_ATTR_NAME_LEN, GFP_ATOMIC);
+       if (!res_attr)
+               return -ENOMEM;
+
+       res_attr_name = (char *)(res_attr + 1);
+
+       sysfs_bin_attr_init(res_attr);
+
+       cdx_dev->res_attr[num] = res_attr;
+       sprintf(res_attr_name, "resource%d", num);
+
+       res_attr->mmap = cdx_mmap_resource;
+       res_attr->attr.name = res_attr_name;
+       res_attr->attr.mode = 0600;
+       res_attr->size = cdx_resource_len(cdx_dev, num);
+       res_attr->private = (void *)(unsigned long)num;
+       ret = sysfs_create_bin_file(&cdx_dev->dev.kobj, res_attr);
+       if (ret)
+               kfree(res_attr);
+
+       return ret;
+}
+
 int cdx_device_add(struct cdx_dev_params *dev_params)
 {
        struct cdx_controller *cdx = dev_params->cdx;
        struct cdx_device *cdx_dev;
-       int ret;
+       int ret, i;
 
        cdx_dev = kzalloc(sizeof(*cdx_dev), GFP_KERNEL);
        if (!cdx_dev)
@@ -687,7 +817,28 @@ int cdx_device_add(struct cdx_dev_params *dev_params)
                goto fail;
        }
 
+       /* Create resource<N> attributes */
+       for (i = 0; i < MAX_CDX_DEV_RESOURCES; i++) {
+               if (cdx_resource_flags(cdx_dev, i) & IORESOURCE_MEM) {
+                       /* skip empty resources */
+                       if (!cdx_resource_len(cdx_dev, i))
+                               continue;
+
+                       ret = cdx_create_res_attr(cdx_dev, i);
+                       if (ret != 0) {
+                               dev_err(&cdx_dev->dev,
+                                       "cdx device resource<%d> file creation failed: %d", i, ret);
+                               goto resource_create_fail;
+                       }
+               }
+       }
+
+       cdx_device_debugfs_init(cdx_dev);
+
        return 0;
+resource_create_fail:
+       cdx_destroy_res_attr(cdx_dev, i);
+       device_del(&cdx_dev->dev);
 fail:
        /*
         * Do not free cdx_dev here as it would be freed in
@@ -788,6 +939,12 @@ EXPORT_SYMBOL_NS_GPL(cdx_unregister_controller, CDX_BUS_CONTROLLER);
 
 static int __init cdx_bus_init(void)
 {
-       return bus_register(&cdx_bus_type);
+       int ret;
+
+       ret = bus_register(&cdx_bus_type);
+       if (!ret)
+               cdx_debugfs_dir = debugfs_create_dir(cdx_bus_type.name, NULL);
+
+       return ret;
 }
 postcore_initcall(cdx_bus_init);
index 4c188e9e477cdfebec5fa237e0c30fe6e1289ef9..ee951b265213fbb3bf5519ad4b1b02efd0e0fb5b 100644 (file)
@@ -299,7 +299,7 @@ static int register_device(int minor, struct pp_struct *pp)
                goto err;
        }
 
-       index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL);
+       index = ida_alloc(&ida_index, GFP_KERNEL);
        memset(&ppdev_cb, 0, sizeof(ppdev_cb));
        ppdev_cb.irq_func = pp_irq;
        ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0;
@@ -310,7 +310,7 @@ static int register_device(int minor, struct pp_struct *pp)
        if (!pdev) {
                pr_warn("%s: failed to register device!\n", name);
                rc = -ENXIO;
-               ida_simple_remove(&ida_index, index);
+               ida_free(&ida_index, index);
                goto err;
        }
 
@@ -750,7 +750,7 @@ static int pp_release(struct inode *inode, struct file *file)
 
        if (pp->pdev) {
                parport_unregister_device(pp->pdev);
-               ida_simple_remove(&ida_index, pp->index);
+               ida_free(&ida_index, pp->index);
                pp->pdev = NULL;
                pr_debug(CHRDEV "%x: unregistered pardevice\n", minor);
        }
index 5af804c17a75349910cfe9b24d7170aa5dd9f129..4c806a189ee533e40160a02167dc876b1a1fd61c 100644 (file)
@@ -40,7 +40,7 @@ static struct ttyprintk_port tpk_port;
 
 static int tpk_curr;
 
-static char tpk_buffer[TPK_STR_SIZE + 4];
+static u8 tpk_buffer[TPK_STR_SIZE + 4];
 
 static void tpk_flush(void)
 {
@@ -51,9 +51,9 @@ static void tpk_flush(void)
        }
 }
 
-static int tpk_printk(const u8 *buf, int count)
+static int tpk_printk(const u8 *buf, size_t count)
 {
-       int i;
+       size_t i;
 
        for (i = 0; i < count; i++) {
                if (tpk_curr >= TPK_STR_SIZE) {
index 431e9e5bf9c15fa7b9ae3b5b598ff7a662c5433c..035f89f1a251e22a5a4fb5ee907cc2f8de7c4965 100644 (file)
@@ -230,9 +230,6 @@ struct port {
        bool guest_connected;
 };
 
-/* This is the very early arch-specified put chars function. */
-static int (*early_put_chars)(u32, const char *, int);
-
 static struct port *find_port_by_vtermno(u32 vtermno)
 {
        struct port *port;
@@ -653,7 +650,7 @@ done:
  * Give out the data that's requested from the buffer that we have
  * queued up.
  */
-static ssize_t fill_readbuf(struct port *port, char __user *out_buf,
+static ssize_t fill_readbuf(struct port *port, u8 __user *out_buf,
                            size_t out_count, bool to_user)
 {
        struct port_buffer *buf;
@@ -672,7 +669,7 @@ static ssize_t fill_readbuf(struct port *port, char __user *out_buf,
                if (ret)
                        return -EFAULT;
        } else {
-               memcpy((__force char *)out_buf, buf->buf + buf->offset,
+               memcpy((__force u8 *)out_buf, buf->buf + buf->offset,
                       out_count);
        }
 
@@ -1107,16 +1104,13 @@ static const struct file_operations port_fops = {
  * it to finish: inefficient in theory, but in practice
  * implementations will do it immediately.
  */
-static int put_chars(u32 vtermno, const char *buf, int count)
+static ssize_t put_chars(u32 vtermno, const u8 *buf, size_t count)
 {
        struct port *port;
        struct scatterlist sg[1];
        void *data;
        int ret;
 
-       if (unlikely(early_put_chars))
-               return early_put_chars(vtermno, buf, count);
-
        port = find_port_by_vtermno(vtermno);
        if (!port)
                return -EPIPE;
@@ -1138,14 +1132,10 @@ static int put_chars(u32 vtermno, const char *buf, int count)
  * We call out to fill_readbuf that gets us the required data from the
  * buffers that are queued up.
  */
-static int get_chars(u32 vtermno, char *buf, int count)
+static ssize_t get_chars(u32 vtermno, u8 *buf, size_t count)
 {
        struct port *port;
 
-       /* If we've not set up the port yet, we have no input to give. */
-       if (unlikely(early_put_chars))
-               return 0;
-
        port = find_port_by_vtermno(vtermno);
        if (!port)
                return -EPIPE;
@@ -1153,7 +1143,7 @@ static int get_chars(u32 vtermno, char *buf, int count)
        /* If we don't have an input queue yet, we can't get input. */
        BUG_ON(!port->in_vq);
 
-       return fill_readbuf(port, (__force char __user *)buf, count, false);
+       return fill_readbuf(port, (__force u8 __user *)buf, count, false);
 }
 
 static void resize_console(struct port *port)
@@ -1201,21 +1191,6 @@ static const struct hv_ops hv_ops = {
        .notifier_hangup = notifier_del_vio,
 };
 
-/*
- * Console drivers are initialized very early so boot messages can go
- * out, so we do things slightly differently from the generic virtio
- * initialization of the net and block drivers.
- *
- * At this stage, the console is output-only.  It's too early to set
- * up a virtqueue, so we let the drivers do some boutique early-output
- * thing.
- */
-int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
-{
-       early_put_chars = put_chars;
-       return hvc_instantiate(0, 0, &hv_ops);
-}
-
 static int init_port_console(struct port *port)
 {
        int ret;
@@ -1256,13 +1231,6 @@ static int init_port_console(struct port *port)
        spin_unlock_irq(&pdrvdata_lock);
        port->guest_connected = true;
 
-       /*
-        * Start using the new console output if this is the first
-        * console to come up.
-        */
-       if (early_put_chars)
-               early_put_chars = NULL;
-
        /* Notify host of port being opened */
        send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1);
 
@@ -1999,7 +1967,6 @@ static int virtcons_probe(struct virtio_device *vdev)
        struct ports_device *portdev;
        int err;
        bool multiport;
-       bool early = early_put_chars != NULL;
 
        /* We only need a config space if features are offered */
        if (!vdev->config->get &&
@@ -2010,9 +1977,6 @@ static int virtcons_probe(struct virtio_device *vdev)
                return -EINVAL;
        }
 
-       /* Ensure to read early_put_chars now */
-       barrier();
-
        portdev = kmalloc(sizeof(*portdev), GFP_KERNEL);
        if (!portdev) {
                err = -ENOMEM;
@@ -2100,18 +2064,6 @@ static int virtcons_probe(struct virtio_device *vdev)
        __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
                           VIRTIO_CONSOLE_DEVICE_READY, 1);
 
-       /*
-        * If there was an early virtio console, assume that there are no
-        * other consoles. We need to wait until the hvc_alloc matches the
-        * hvc_instantiate, otherwise tty_open will complain, resulting in
-        * a "Warning: unable to open an initial console" boot failure.
-        * Without multiport this is done in add_port above. With multiport
-        * this might take some host<->guest communication - thus we have to
-        * wait.
-        */
-       if (multiport && early)
-               wait_for_completion(&early_console_added);
-
        return 0;
 
 free_chrdev:
index 74db7fef237b4d3f11e758d6be2bf0744056aa86..d7182d6e978372ce467da5d0c7b4fd0613eb0d8b 100644 (file)
@@ -4,8 +4,9 @@
  */
 
 #include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
 #include <linux/regmap.h>
 
 #include <dt-bindings/clock/qcom,x1e80100-gcc.h>
index 32daaac9b13208fc92604c89b625ec914ef3e357..ca7a06489c405f1d013e0fedd997413c43a19d65 100644 (file)
@@ -69,7 +69,7 @@
  * @base_addr: Base address of timer
  * @freq:      Timer input clock frequency
  * @clk:       Associated clock source
- * @clk_rate_change_nb Notifier block for clock rate changes
+ * @clk_rate_change_nb:        Notifier block for clock rate changes
  */
 struct ttc_timer {
        void __iomem *base_addr;
@@ -134,7 +134,7 @@ static void ttc_set_interval(struct ttc_timer *timer,
  * @irq:       IRQ number of the Timer
  * @dev_id:    void pointer to the ttc_timer instance
  *
- * returns: Always IRQ_HANDLED - success
+ * Returns: Always IRQ_HANDLED - success
  **/
 static irqreturn_t ttc_clock_event_interrupt(int irq, void *dev_id)
 {
@@ -151,8 +151,9 @@ static irqreturn_t ttc_clock_event_interrupt(int irq, void *dev_id)
 
 /**
  * __ttc_clocksource_read - Reads the timer counter register
+ * @cs: &clocksource to read from
  *
- * returns: Current timer counter register value
+ * Returns: Current timer counter register value
  **/
 static u64 __ttc_clocksource_read(struct clocksource *cs)
 {
@@ -173,7 +174,7 @@ static u64 notrace ttc_sched_clock_read(void)
  * @cycles:    Timer interval ticks
  * @evt:       Address of clock event instance
  *
- * returns: Always 0 - success
+ * Returns: Always %0 - success
  **/
 static int ttc_set_next_event(unsigned long cycles,
                                        struct clock_event_device *evt)
@@ -186,9 +187,12 @@ static int ttc_set_next_event(unsigned long cycles,
 }
 
 /**
- * ttc_set_{shutdown|oneshot|periodic} - Sets the state of timer
- *
+ * ttc_shutdown - Sets the state of timer
  * @evt:       Address of clock event instance
+ *
+ * Used for shutdown or oneshot.
+ *
+ * Returns: Always %0 - success
  **/
 static int ttc_shutdown(struct clock_event_device *evt)
 {
@@ -202,6 +206,12 @@ static int ttc_shutdown(struct clock_event_device *evt)
        return 0;
 }
 
+/**
+ * ttc_set_periodic - Sets the state of timer
+ * @evt:       Address of clock event instance
+ *
+ * Returns: Always %0 - success
+ */
 static int ttc_set_periodic(struct clock_event_device *evt)
 {
        struct ttc_timer_clockevent *ttce = to_ttc_timer_clkevent(evt);
index bc0ca6e12334903dd8ac17364e96469a42bbf5ab..6981ff3ac8a940be37b8dc247e59e32c2604f992 100644 (file)
@@ -155,9 +155,8 @@ static int __init ep93xx_timer_of_init(struct device_node *np)
        ep93xx_tcu = tcu;
 
        irq = irq_of_parse_and_map(np, 0);
-       if (irq == 0)
-               irq = -EINVAL;
-       if (irq < 0) {
+       if (!irq) {
+               ret = -EINVAL;
                pr_err("EP93XX Timer Can't parse IRQ %d", irq);
                goto out_free;
        }
index 57857c0dfba97e0bfdcd5190e8aee31e11028667..e66dcbd6656658dd32f913189fceebda87e8ccbf 100644 (file)
@@ -61,12 +61,19 @@ static int riscv_clock_next_event(unsigned long delta,
        return 0;
 }
 
+static int riscv_clock_shutdown(struct clock_event_device *evt)
+{
+       riscv_clock_event_stop();
+       return 0;
+}
+
 static unsigned int riscv_clock_event_irq;
 static DEFINE_PER_CPU(struct clock_event_device, riscv_clock_event) = {
        .name                   = "riscv_timer_clockevent",
        .features               = CLOCK_EVT_FEAT_ONESHOT,
        .rating                 = 100,
        .set_next_event         = riscv_clock_next_event,
+       .set_state_shutdown     = riscv_clock_shutdown,
 };
 
 /*
index 5f60f6bd33866b4edc3ee2e248acb6d510f468e0..56acf26172621ffb96a14af47ccf92d31af15501 100644 (file)
@@ -183,7 +183,7 @@ static inline u32 dmtimer_read(struct dmtimer *timer, u32 reg)
  * dmtimer_write - write timer registers in posted and non-posted mode
  * @timer:      timer pointer over which write operation is to perform
  * @reg:        lowest byte holds the register offset
- * @value:      data to write into the register
+ * @val:        data to write into the register
  *
  * The posted mode bit is encoded in reg. Note that in posted mode, the write
  * pending bit must be checked. Otherwise a write on a register which has a
@@ -949,7 +949,7 @@ static int omap_dm_timer_set_int_enable(struct omap_dm_timer *cookie,
 
 /**
  * omap_dm_timer_set_int_disable - disable timer interrupts
- * @timer:     pointer to timer handle
+ * @cookie:    pointer to timer cookie
  * @mask:      bit mask of interrupts to be disabled
  *
  * Disables the specified timer interrupts for a timer.
index 1548dea15df140967d7c321834134d9ac43d0017..1b481731df964ed67e1e8909ee084ed7292941b7 100644 (file)
@@ -1714,8 +1714,8 @@ static int __comedi_get_user_chanlist(struct comedi_device *dev,
 
        lockdep_assert_held(&dev->mutex);
        cmd->chanlist = NULL;
-       chanlist = memdup_user(user_chanlist,
-                              cmd->chanlist_len * sizeof(unsigned int));
+       chanlist = memdup_array_user(user_chanlist,
+                                    cmd->chanlist_len, sizeof(unsigned int));
        if (IS_ERR(chanlist))
                return PTR_ERR(chanlist);
 
index 1f6186475715e0592df1028ade0a336703338b15..1791d37fbc53c57e0f13469934eee357c0de87cc 100644 (file)
@@ -1232,14 +1232,13 @@ static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
        max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq);
        min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq);
 
+       WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
+       WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
+
        max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
                        cpudata->max_limit_perf);
        min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
                        cpudata->max_limit_perf);
-
-       WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
-       WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
-
        value = READ_ONCE(cpudata->cppc_req_cached);
 
        if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
index 3c69040920b8f85c3f169911b50c91300ef73b2f..ca94e60e705a1df435b1dd75a13c0a50dc3f8c27 100644 (file)
@@ -302,7 +302,10 @@ static bool hwp_forced __read_mostly;
 
 static struct cpufreq_driver *intel_pstate_driver __read_mostly;
 
-#define HYBRID_SCALING_FACTOR  78741
+#define HYBRID_SCALING_FACTOR          78741
+#define HYBRID_SCALING_FACTOR_MTL      80000
+
+static int hybrid_scaling_factor = HYBRID_SCALING_FACTOR;
 
 static inline int core_get_scaling(void)
 {
@@ -422,7 +425,7 @@ static int intel_pstate_cppc_get_scaling(int cpu)
         */
        if (!ret && cppc_perf.nominal_perf && cppc_perf.nominal_freq &&
            cppc_perf.nominal_perf * 100 != cppc_perf.nominal_freq)
-               return HYBRID_SCALING_FACTOR;
+               return hybrid_scaling_factor;
 
        return core_get_scaling();
 }
@@ -526,6 +529,30 @@ static int intel_pstate_cppc_get_scaling(int cpu)
 }
 #endif /* CONFIG_ACPI_CPPC_LIB */
 
+static int intel_pstate_freq_to_hwp_rel(struct cpudata *cpu, int freq,
+                                       unsigned int relation)
+{
+       if (freq == cpu->pstate.turbo_freq)
+               return cpu->pstate.turbo_pstate;
+
+       if (freq == cpu->pstate.max_freq)
+               return cpu->pstate.max_pstate;
+
+       switch (relation) {
+       case CPUFREQ_RELATION_H:
+               return freq / cpu->pstate.scaling;
+       case CPUFREQ_RELATION_C:
+               return DIV_ROUND_CLOSEST(freq, cpu->pstate.scaling);
+       }
+
+       return DIV_ROUND_UP(freq, cpu->pstate.scaling);
+}
+
+static int intel_pstate_freq_to_hwp(struct cpudata *cpu, int freq)
+{
+       return intel_pstate_freq_to_hwp_rel(cpu, freq, CPUFREQ_RELATION_L);
+}
+
 /**
  * intel_pstate_hybrid_hwp_adjust - Calibrate HWP performance levels.
  * @cpu: Target CPU.
@@ -543,6 +570,7 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
        int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling;
        int perf_ctl_turbo = pstate_funcs.get_turbo(cpu->cpu);
        int scaling = cpu->pstate.scaling;
+       int freq;
 
        pr_debug("CPU%d: perf_ctl_max_phys = %d\n", cpu->cpu, perf_ctl_max_phys);
        pr_debug("CPU%d: perf_ctl_turbo = %d\n", cpu->cpu, perf_ctl_turbo);
@@ -556,16 +584,16 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
        cpu->pstate.max_freq = rounddown(cpu->pstate.max_pstate * scaling,
                                         perf_ctl_scaling);
 
-       cpu->pstate.max_pstate_physical =
-                       DIV_ROUND_UP(perf_ctl_max_phys * perf_ctl_scaling,
-                                    scaling);
+       freq = perf_ctl_max_phys * perf_ctl_scaling;
+       cpu->pstate.max_pstate_physical = intel_pstate_freq_to_hwp(cpu, freq);
 
-       cpu->pstate.min_freq = cpu->pstate.min_pstate * perf_ctl_scaling;
+       freq = cpu->pstate.min_pstate * perf_ctl_scaling;
+       cpu->pstate.min_freq = freq;
        /*
         * Cast the min P-state value retrieved via pstate_funcs.get_min() to
         * the effective range of HWP performance levels.
         */
-       cpu->pstate.min_pstate = DIV_ROUND_UP(cpu->pstate.min_freq, scaling);
+       cpu->pstate.min_pstate = intel_pstate_freq_to_hwp(cpu, freq);
 }
 
 static inline void update_turbo_state(void)
@@ -1968,7 +1996,7 @@ static int hwp_get_cpu_scaling(int cpu)
        smp_call_function_single(cpu, hybrid_get_type, &cpu_type, 1);
        /* P-cores have a smaller perf level-to-freqency scaling factor. */
        if (cpu_type == 0x40)
-               return HYBRID_SCALING_FACTOR;
+               return hybrid_scaling_factor;
 
        /* Use default core scaling for E-cores */
        if (cpu_type == 0x20)
@@ -2525,13 +2553,12 @@ static void intel_pstate_update_perf_limits(struct cpudata *cpu,
         * abstract values to represent performance rather than pure ratios.
         */
        if (hwp_active && cpu->pstate.scaling != perf_ctl_scaling) {
-               int scaling = cpu->pstate.scaling;
                int freq;
 
                freq = max_policy_perf * perf_ctl_scaling;
-               max_policy_perf = DIV_ROUND_UP(freq, scaling);
+               max_policy_perf = intel_pstate_freq_to_hwp(cpu, freq);
                freq = min_policy_perf * perf_ctl_scaling;
-               min_policy_perf = DIV_ROUND_UP(freq, scaling);
+               min_policy_perf = intel_pstate_freq_to_hwp(cpu, freq);
        }
 
        pr_debug("cpu:%d min_policy_perf:%d max_policy_perf:%d\n",
@@ -2905,18 +2932,7 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
 
        cpufreq_freq_transition_begin(policy, &freqs);
 
-       switch (relation) {
-       case CPUFREQ_RELATION_L:
-               target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling);
-               break;
-       case CPUFREQ_RELATION_H:
-               target_pstate = freqs.new / cpu->pstate.scaling;
-               break;
-       default:
-               target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling);
-               break;
-       }
-
+       target_pstate = intel_pstate_freq_to_hwp_rel(cpu, freqs.new, relation);
        target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, false);
 
        freqs.new = target_pstate * cpu->pstate.scaling;
@@ -2934,7 +2950,7 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
 
        update_turbo_state();
 
-       target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
+       target_pstate = intel_pstate_freq_to_hwp(cpu, target_freq);
 
        target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true);
 
@@ -3399,6 +3415,11 @@ static const struct x86_cpu_id intel_epp_balance_perf[] = {
        {}
 };
 
+static const struct x86_cpu_id intel_hybrid_scaling_factor[] = {
+       X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, HYBRID_SCALING_FACTOR_MTL),
+       {}
+};
+
 static int __init intel_pstate_init(void)
 {
        static struct cpudata **_all_cpu_data;
@@ -3489,9 +3510,16 @@ hwp_cpu_matched:
 
        if (hwp_active) {
                const struct x86_cpu_id *id = x86_match_cpu(intel_epp_balance_perf);
+               const struct x86_cpu_id *hybrid_id = x86_match_cpu(intel_hybrid_scaling_factor);
 
                if (id)
                        epp_values[EPP_INDEX_BALANCE_PERFORMANCE] = id->driver_data;
+
+               if (hybrid_id) {
+                       hybrid_scaling_factor = hybrid_id->driver_data;
+                       pr_debug("hybrid scaling factor: %d\n", hybrid_scaling_factor);
+               }
+
        }
 
        mutex_lock(&intel_pstate_driver_lock);
index a148ff1f0872c419fc2198f64174d26e45342289..a4f6884416a0486181426c8a22d885f4f0534ea0 100644 (file)
@@ -4545,6 +4545,7 @@ struct caam_hash_alg {
        struct list_head entry;
        struct device *dev;
        int alg_type;
+       bool is_hmac;
        struct ahash_alg ahash_alg;
 };
 
@@ -4571,7 +4572,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
 
        ctx->dev = caam_hash->dev;
 
-       if (alg->setkey) {
+       if (caam_hash->is_hmac) {
                ctx->adata.key_dma = dma_map_single_attrs(ctx->dev, ctx->key,
                                                          ARRAY_SIZE(ctx->key),
                                                          DMA_TO_DEVICE,
@@ -4611,7 +4612,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
         * For keyed hash algorithms shared descriptors
         * will be created later in setkey() callback
         */
-       return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
+       return caam_hash->is_hmac ? 0 : ahash_set_sh_desc(ahash);
 }
 
 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
@@ -4646,12 +4647,14 @@ static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
                         template->hmac_name);
                snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
                         template->hmac_driver_name);
+               t_alg->is_hmac = true;
        } else {
                snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
                         template->name);
                snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
                         template->driver_name);
                t_alg->ahash_alg.setkey = NULL;
+               t_alg->is_hmac = false;
        }
        alg->cra_module = THIS_MODULE;
        alg->cra_init = caam_hash_cra_init;
index 290c8500c247f9cbf20fb055e3715400a5f30646..fdd724228c2fa8accc7c7ebc1244c5ee92423247 100644 (file)
@@ -1753,6 +1753,7 @@ static struct caam_hash_template driver_hash[] = {
 struct caam_hash_alg {
        struct list_head entry;
        int alg_type;
+       bool is_hmac;
        struct ahash_engine_alg ahash_alg;
 };
 
@@ -1804,7 +1805,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
        } else {
                if (priv->era >= 6) {
                        ctx->dir = DMA_BIDIRECTIONAL;
-                       ctx->key_dir = alg->setkey ? DMA_TO_DEVICE : DMA_NONE;
+                       ctx->key_dir = caam_hash->is_hmac ? DMA_TO_DEVICE : DMA_NONE;
                } else {
                        ctx->dir = DMA_TO_DEVICE;
                        ctx->key_dir = DMA_NONE;
@@ -1862,7 +1863,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
         * For keyed hash algorithms shared descriptors
         * will be created later in setkey() callback
         */
-       return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
+       return caam_hash->is_hmac ? 0 : ahash_set_sh_desc(ahash);
 }
 
 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
@@ -1915,12 +1916,14 @@ caam_hash_alloc(struct caam_hash_template *template,
                         template->hmac_name);
                snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
                         template->hmac_driver_name);
+               t_alg->is_hmac = true;
        } else {
                snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
                         template->name);
                snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
                         template->driver_name);
                halg->setkey = NULL;
+               t_alg->is_hmac = false;
        }
        alg->cra_module = THIS_MODULE;
        alg->cra_init = caam_hash_cra_init;
index e4d3f45242f63258ea0efc9f0a0a7ca9b411333c..b04bc1d3d627d447c2cfc10b9078b040800c8406 100644 (file)
@@ -534,10 +534,16 @@ EXPORT_SYMBOL_GPL(sev_platform_init);
 
 static int __sev_platform_shutdown_locked(int *error)
 {
-       struct sev_device *sev = psp_master->sev_data;
+       struct psp_device *psp = psp_master;
+       struct sev_device *sev;
        int ret;
 
-       if (!sev || sev->state == SEV_STATE_UNINIT)
+       if (!psp || !psp->sev_data)
+               return 0;
+
+       sev = psp->sev_data;
+
+       if (sev->state == SEV_STATE_UNINIT)
                return 0;
 
        ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
index 479062aa5e6b61c2706ff8b4f4fe912f52ded3dc..94a0ebb03d8c96804b455f73a8d8b3155baab866 100644 (file)
@@ -463,6 +463,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
                hw_data->fw_name = ADF_402XX_FW;
                hw_data->fw_mmp_name = ADF_402XX_MMP;
                hw_data->uof_get_name = uof_get_name_402xx;
+               hw_data->get_ena_thd_mask = get_ena_thd_mask;
                break;
        case ADF_401XX_PCI_DEVICE_ID:
                hw_data->fw_name = ADF_4XXX_FW;
index 8ea1d340e4385089b3b449c07a0d6e1e679ab60a..67998dbd1d46b49dc623a0929c262174965bb601 100644 (file)
@@ -5,6 +5,7 @@ menuconfig CXL_BUS
        select FW_LOADER
        select FW_UPLOAD
        select PCI_DOE
+       select FIRMWARE_TABLE
        help
          CXL is a bus that is electrically compatible with PCI Express, but
          layers three protocols on that signalling (CXL.io, CXL.cache, and
@@ -54,8 +55,10 @@ config CXL_MEM_RAW_COMMANDS
 config CXL_ACPI
        tristate "CXL ACPI: Platform Support"
        depends on ACPI
+       depends on ACPI_NUMA
        default CXL_BUS
        select ACPI_TABLE_LIB
+       select ACPI_HMAT
        help
          Enable support for host managed device memory (HDM) resources
          published by a platform's ACPI CXL memory layout description.  See
index 2034eb4ce83fb7531be4148e4db0679a39b4587b..dcf2b39e1048822ca90324667d85f68225c05fa4 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/kernel.h>
 #include <linux/acpi.h>
 #include <linux/pci.h>
+#include <linux/node.h>
 #include <asm/div64.h>
 #include "cxlpci.h"
 #include "cxl.h"
@@ -17,6 +18,10 @@ struct cxl_cxims_data {
        u64 xormaps[] __counted_by(nr_maps);
 };
 
+static const guid_t acpi_cxl_qtg_id_guid =
+       GUID_INIT(0xF365F9A6, 0xA7DE, 0x4071,
+                 0xA6, 0x6A, 0xB4, 0x0C, 0x0B, 0x4F, 0x8E, 0x52);
+
 /*
  * Find a targets entry (n) in the host bridge interleave list.
  * CXL Specification 3.0 Table 9-22
@@ -194,6 +199,123 @@ struct cxl_cfmws_context {
        int id;
 };
 
+/**
+ * cxl_acpi_evaluate_qtg_dsm - Retrieve QTG ids via ACPI _DSM
+ * @handle: ACPI handle
+ * @coord: performance access coordinates
+ * @entries: number of QTG IDs to return
+ * @qos_class: int array provided by caller to return QTG IDs
+ *
+ * Return: number of QTG IDs returned, or -errno for errors
+ *
+ * Issue QTG _DSM with accompanied bandwidth and latency data in order to get
+ * the QTG IDs that are suitable for the performance point in order of most
+ * suitable to least suitable. Write back array of QTG IDs and return the
+ * actual number of QTG IDs written back.
+ */
+static int
+cxl_acpi_evaluate_qtg_dsm(acpi_handle handle, struct access_coordinate *coord,
+                         int entries, int *qos_class)
+{
+       union acpi_object *out_obj, *out_buf, *obj;
+       union acpi_object in_array[4] = {
+               [0].integer = { ACPI_TYPE_INTEGER, coord->read_latency },
+               [1].integer = { ACPI_TYPE_INTEGER, coord->write_latency },
+               [2].integer = { ACPI_TYPE_INTEGER, coord->read_bandwidth },
+               [3].integer = { ACPI_TYPE_INTEGER, coord->write_bandwidth },
+       };
+       union acpi_object in_obj = {
+               .package = {
+                       .type = ACPI_TYPE_PACKAGE,
+                       .count = 4,
+                       .elements = in_array,
+               },
+       };
+       int count, pkg_entries, i;
+       u16 max_qtg;
+       int rc;
+
+       if (!entries)
+               return -EINVAL;
+
+       out_obj = acpi_evaluate_dsm(handle, &acpi_cxl_qtg_id_guid, 1, 1, &in_obj);
+       if (!out_obj)
+               return -ENXIO;
+
+       if (out_obj->type != ACPI_TYPE_PACKAGE) {
+               rc = -ENXIO;
+               goto out;
+       }
+
+       /* Check Max QTG ID */
+       obj = &out_obj->package.elements[0];
+       if (obj->type != ACPI_TYPE_INTEGER) {
+               rc = -ENXIO;
+               goto out;
+       }
+
+       max_qtg = obj->integer.value;
+
+       /* It's legal to have 0 QTG entries */
+       pkg_entries = out_obj->package.count;
+       if (pkg_entries <= 1) {
+               rc = 0;
+               goto out;
+       }
+
+       /* Retrieve QTG IDs package */
+       obj = &out_obj->package.elements[1];
+       if (obj->type != ACPI_TYPE_PACKAGE) {
+               rc = -ENXIO;
+               goto out;
+       }
+
+       pkg_entries = obj->package.count;
+       count = min(entries, pkg_entries);
+       for (i = 0; i < count; i++) {
+               u16 qtg_id;
+
+               out_buf = &obj->package.elements[i];
+               if (out_buf->type != ACPI_TYPE_INTEGER) {
+                       rc = -ENXIO;
+                       goto out;
+               }
+
+               qtg_id = out_buf->integer.value;
+               if (qtg_id > max_qtg)
+                       pr_warn("QTG ID %u greater than MAX %u\n",
+                               qtg_id, max_qtg);
+
+               qos_class[i] = qtg_id;
+       }
+       rc = count;
+
+out:
+       ACPI_FREE(out_obj);
+       return rc;
+}
+
+static int cxl_acpi_qos_class(struct cxl_root *cxl_root,
+                             struct access_coordinate *coord, int entries,
+                             int *qos_class)
+{
+       struct device *dev = cxl_root->port.uport_dev;
+       acpi_handle handle;
+
+       if (!dev_is_platform(dev))
+               return -ENODEV;
+
+       handle = ACPI_HANDLE(dev);
+       if (!handle)
+               return -ENODEV;
+
+       return cxl_acpi_evaluate_qtg_dsm(handle, coord, entries, qos_class);
+}
+
+static const struct cxl_root_ops acpi_root_ops = {
+       .qos_class = cxl_acpi_qos_class,
+};
+
 static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
                           const unsigned long end)
 {
@@ -389,8 +511,29 @@ static int cxl_get_chbs(struct device *dev, struct acpi_device *hb,
        return 0;
 }
 
+static int get_genport_coordinates(struct device *dev, struct cxl_dport *dport)
+{
+       struct acpi_device *hb = to_cxl_host_bridge(NULL, dev);
+       u32 uid;
+       int rc;
+
+       if (kstrtou32(acpi_device_uid(hb), 0, &uid))
+               return -EINVAL;
+
+       rc = acpi_get_genport_coordinates(uid, &dport->hb_coord);
+       if (rc < 0)
+               return rc;
+
+       /* Adjust back to picoseconds from nanoseconds */
+       dport->hb_coord.read_latency *= 1000;
+       dport->hb_coord.write_latency *= 1000;
+
+       return 0;
+}
+
 static int add_host_bridge_dport(struct device *match, void *arg)
 {
+       int ret;
        acpi_status rc;
        struct device *bridge;
        struct cxl_dport *dport;
@@ -440,6 +583,10 @@ static int add_host_bridge_dport(struct device *match, void *arg)
        if (IS_ERR(dport))
                return PTR_ERR(dport);
 
+       ret = get_genport_coordinates(match, dport);
+       if (ret)
+               dev_dbg(match, "Failed to get generic port perf coordinates.\n");
+
        return 0;
 }
 
@@ -656,6 +803,7 @@ static int cxl_acpi_probe(struct platform_device *pdev)
 {
        int rc;
        struct resource *cxl_res;
+       struct cxl_root *cxl_root;
        struct cxl_port *root_port;
        struct device *host = &pdev->dev;
        struct acpi_device *adev = ACPI_COMPANION(host);
@@ -675,9 +823,10 @@ static int cxl_acpi_probe(struct platform_device *pdev)
        cxl_res->end = -1;
        cxl_res->flags = IORESOURCE_MEM;
 
-       root_port = devm_cxl_add_port(host, host, CXL_RESOURCE_NONE, NULL);
-       if (IS_ERR(root_port))
-               return PTR_ERR(root_port);
+       cxl_root = devm_cxl_add_root(host, &acpi_root_ops);
+       if (IS_ERR(cxl_root))
+               return PTR_ERR(cxl_root);
+       root_port = &cxl_root->port;
 
        rc = bus_for_each_dev(adev->dev.bus, NULL, root_port,
                              add_host_bridge_dport);
index 1f66b5d4d93556868a5d413b520882f3d94b2f6f..9259bcc6773c804ccace2478c9f6f09267b48c9d 100644 (file)
@@ -13,5 +13,6 @@ cxl_core-y += mbox.o
 cxl_core-y += pci.o
 cxl_core-y += hdm.o
 cxl_core-y += pmu.o
+cxl_core-y += cdat.o
 cxl_core-$(CONFIG_TRACING) += trace.o
 cxl_core-$(CONFIG_CXL_REGION) += region.o
diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
new file mode 100644 (file)
index 0000000..6fe1154
--- /dev/null
@@ -0,0 +1,521 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation. All rights reserved. */
+#include <linux/acpi.h>
+#include <linux/xarray.h>
+#include <linux/fw_table.h>
+#include <linux/node.h>
+#include <linux/overflow.h>
+#include "cxlpci.h"
+#include "cxlmem.h"
+#include "core.h"
+#include "cxl.h"
+
+struct dsmas_entry {
+       struct range dpa_range;
+       u8 handle;
+       struct access_coordinate coord;
+
+       int entries;
+       int qos_class;
+};
+
+static int cdat_dsmas_handler(union acpi_subtable_headers *header, void *arg,
+                             const unsigned long end)
+{
+       struct acpi_cdat_header *hdr = &header->cdat;
+       struct acpi_cdat_dsmas *dsmas;
+       int size = sizeof(*hdr) + sizeof(*dsmas);
+       struct xarray *dsmas_xa = arg;
+       struct dsmas_entry *dent;
+       u16 len;
+       int rc;
+
+       len = le16_to_cpu((__force __le16)hdr->length);
+       if (len != size || (unsigned long)hdr + len > end) {
+               pr_warn("Malformed DSMAS table length: (%u:%u)\n", size, len);
+               return -EINVAL;
+       }
+
+       /* Skip common header */
+       dsmas = (struct acpi_cdat_dsmas *)(hdr + 1);
+
+       dent = kzalloc(sizeof(*dent), GFP_KERNEL);
+       if (!dent)
+               return -ENOMEM;
+
+       dent->handle = dsmas->dsmad_handle;
+       dent->dpa_range.start = le64_to_cpu((__force __le64)dsmas->dpa_base_address);
+       dent->dpa_range.end = le64_to_cpu((__force __le64)dsmas->dpa_base_address) +
+                             le64_to_cpu((__force __le64)dsmas->dpa_length) - 1;
+
+       rc = xa_insert(dsmas_xa, dent->handle, dent, GFP_KERNEL);
+       if (rc) {
+               kfree(dent);
+               return rc;
+       }
+
+       return 0;
+}
+
+static void cxl_access_coordinate_set(struct access_coordinate *coord,
+                                     int access, unsigned int val)
+{
+       switch (access) {
+       case ACPI_HMAT_ACCESS_LATENCY:
+               coord->read_latency = val;
+               coord->write_latency = val;
+               break;
+       case ACPI_HMAT_READ_LATENCY:
+               coord->read_latency = val;
+               break;
+       case ACPI_HMAT_WRITE_LATENCY:
+               coord->write_latency = val;
+               break;
+       case ACPI_HMAT_ACCESS_BANDWIDTH:
+               coord->read_bandwidth = val;
+               coord->write_bandwidth = val;
+               break;
+       case ACPI_HMAT_READ_BANDWIDTH:
+               coord->read_bandwidth = val;
+               break;
+       case ACPI_HMAT_WRITE_BANDWIDTH:
+               coord->write_bandwidth = val;
+               break;
+       }
+}
+
+static int cdat_dslbis_handler(union acpi_subtable_headers *header, void *arg,
+                              const unsigned long end)
+{
+       struct acpi_cdat_header *hdr = &header->cdat;
+       struct acpi_cdat_dslbis *dslbis;
+       int size = sizeof(*hdr) + sizeof(*dslbis);
+       struct xarray *dsmas_xa = arg;
+       struct dsmas_entry *dent;
+       __le64 le_base;
+       __le16 le_val;
+       u64 val;
+       u16 len;
+       int rc;
+
+       len = le16_to_cpu((__force __le16)hdr->length);
+       if (len != size || (unsigned long)hdr + len > end) {
+               pr_warn("Malformed DSLBIS table length: (%u:%u)\n", size, len);
+               return -EINVAL;
+       }
+
+       /* Skip common header */
+       dslbis = (struct acpi_cdat_dslbis *)(hdr + 1);
+
+       /* Skip unrecognized data type */
+       if (dslbis->data_type > ACPI_HMAT_WRITE_BANDWIDTH)
+               return 0;
+
+       /* Not a memory type, skip */
+       if ((dslbis->flags & ACPI_HMAT_MEMORY_HIERARCHY) != ACPI_HMAT_MEMORY)
+               return 0;
+
+       dent = xa_load(dsmas_xa, dslbis->handle);
+       if (!dent) {
+               pr_warn("No matching DSMAS entry for DSLBIS entry.\n");
+               return 0;
+       }
+
+       le_base = (__force __le64)dslbis->entry_base_unit;
+       le_val = (__force __le16)dslbis->entry[0];
+       rc = check_mul_overflow(le64_to_cpu(le_base),
+                               le16_to_cpu(le_val), &val);
+       if (rc)
+               pr_warn("DSLBIS value overflowed.\n");
+
+       cxl_access_coordinate_set(&dent->coord, dslbis->data_type, val);
+
+       return 0;
+}
+
+static int cdat_table_parse_output(int rc)
+{
+       if (rc < 0)
+               return rc;
+       if (rc == 0)
+               return -ENOENT;
+
+       return 0;
+}
+
+static int cxl_cdat_endpoint_process(struct cxl_port *port,
+                                    struct xarray *dsmas_xa)
+{
+       int rc;
+
+       rc = cdat_table_parse(ACPI_CDAT_TYPE_DSMAS, cdat_dsmas_handler,
+                             dsmas_xa, port->cdat.table);
+       rc = cdat_table_parse_output(rc);
+       if (rc)
+               return rc;
+
+       rc = cdat_table_parse(ACPI_CDAT_TYPE_DSLBIS, cdat_dslbis_handler,
+                             dsmas_xa, port->cdat.table);
+       return cdat_table_parse_output(rc);
+}
+
+static int cxl_port_perf_data_calculate(struct cxl_port *port,
+                                       struct xarray *dsmas_xa)
+{
+       struct access_coordinate c;
+       struct dsmas_entry *dent;
+       int valid_entries = 0;
+       unsigned long index;
+       int rc;
+
+       rc = cxl_endpoint_get_perf_coordinates(port, &c);
+       if (rc) {
+               dev_dbg(&port->dev, "Failed to retrieve perf coordinates.\n");
+               return rc;
+       }
+
+       struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(port);
+
+       if (!cxl_root)
+               return -ENODEV;
+
+       if (!cxl_root->ops || !cxl_root->ops->qos_class)
+               return -EOPNOTSUPP;
+
+       xa_for_each(dsmas_xa, index, dent) {
+               int qos_class;
+
+               dent->coord.read_latency = dent->coord.read_latency +
+                                          c.read_latency;
+               dent->coord.write_latency = dent->coord.write_latency +
+                                           c.write_latency;
+               dent->coord.read_bandwidth = min_t(int, c.read_bandwidth,
+                                                  dent->coord.read_bandwidth);
+               dent->coord.write_bandwidth = min_t(int, c.write_bandwidth,
+                                                   dent->coord.write_bandwidth);
+
+               dent->entries = 1;
+               rc = cxl_root->ops->qos_class(cxl_root, &dent->coord, 1,
+                                             &qos_class);
+               if (rc != 1)
+                       continue;
+
+               valid_entries++;
+               dent->qos_class = qos_class;
+       }
+
+       if (!valid_entries)
+               return -ENOENT;
+
+       return 0;
+}
+
+static void add_perf_entry(struct device *dev, struct dsmas_entry *dent,
+                          struct list_head *list)
+{
+       struct cxl_dpa_perf *dpa_perf;
+
+       dpa_perf = kzalloc(sizeof(*dpa_perf), GFP_KERNEL);
+       if (!dpa_perf)
+               return;
+
+       dpa_perf->dpa_range = dent->dpa_range;
+       dpa_perf->coord = dent->coord;
+       dpa_perf->qos_class = dent->qos_class;
+       list_add_tail(&dpa_perf->list, list);
+       dev_dbg(dev,
+               "DSMAS: dpa: %#llx qos: %d read_bw: %d write_bw %d read_lat: %d write_lat: %d\n",
+               dent->dpa_range.start, dpa_perf->qos_class,
+               dent->coord.read_bandwidth, dent->coord.write_bandwidth,
+               dent->coord.read_latency, dent->coord.write_latency);
+}
+
+static void free_perf_ents(void *data)
+{
+       struct cxl_memdev_state *mds = data;
+       struct cxl_dpa_perf *dpa_perf, *n;
+       LIST_HEAD(discard);
+
+       list_splice_tail_init(&mds->ram_perf_list, &discard);
+       list_splice_tail_init(&mds->pmem_perf_list, &discard);
+       list_for_each_entry_safe(dpa_perf, n, &discard, list) {
+               list_del(&dpa_perf->list);
+               kfree(dpa_perf);
+       }
+}
+
+static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds,
+                                    struct xarray *dsmas_xa)
+{
+       struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+       struct device *dev = cxlds->dev;
+       struct range pmem_range = {
+               .start = cxlds->pmem_res.start,
+               .end = cxlds->pmem_res.end,
+       };
+       struct range ram_range = {
+               .start = cxlds->ram_res.start,
+               .end = cxlds->ram_res.end,
+       };
+       struct dsmas_entry *dent;
+       unsigned long index;
+
+       xa_for_each(dsmas_xa, index, dent) {
+               if (resource_size(&cxlds->ram_res) &&
+                   range_contains(&ram_range, &dent->dpa_range))
+                       add_perf_entry(dev, dent, &mds->ram_perf_list);
+               else if (resource_size(&cxlds->pmem_res) &&
+                        range_contains(&pmem_range, &dent->dpa_range))
+                       add_perf_entry(dev, dent, &mds->pmem_perf_list);
+               else
+                       dev_dbg(dev, "no partition for dsmas dpa: %#llx\n",
+                               dent->dpa_range.start);
+       }
+
+       devm_add_action_or_reset(&cxlds->cxlmd->dev, free_perf_ents, mds);
+}
+
+static int match_cxlrd_qos_class(struct device *dev, void *data)
+{
+       int dev_qos_class = *(int *)data;
+       struct cxl_root_decoder *cxlrd;
+
+       if (!is_root_decoder(dev))
+               return 0;
+
+       cxlrd = to_cxl_root_decoder(dev);
+       if (cxlrd->qos_class == CXL_QOS_CLASS_INVALID)
+               return 0;
+
+       if (cxlrd->qos_class == dev_qos_class)
+               return 1;
+
+       return 0;
+}
+
+static void cxl_qos_match(struct cxl_port *root_port,
+                         struct list_head *work_list,
+                         struct list_head *discard_list)
+{
+       struct cxl_dpa_perf *dpa_perf, *n;
+
+       list_for_each_entry_safe(dpa_perf, n, work_list, list) {
+               int rc;
+
+               if (dpa_perf->qos_class == CXL_QOS_CLASS_INVALID)
+                       return;
+
+               rc = device_for_each_child(&root_port->dev,
+                                          (void *)&dpa_perf->qos_class,
+                                          match_cxlrd_qos_class);
+               if (!rc)
+                       list_move_tail(&dpa_perf->list, discard_list);
+       }
+}
+
+static int match_cxlrd_hb(struct device *dev, void *data)
+{
+       struct device *host_bridge = data;
+       struct cxl_switch_decoder *cxlsd;
+       struct cxl_root_decoder *cxlrd;
+
+       if (!is_root_decoder(dev))
+               return 0;
+
+       cxlrd = to_cxl_root_decoder(dev);
+       cxlsd = &cxlrd->cxlsd;
+
+       guard(rwsem_read)(&cxl_region_rwsem);
+       for (int i = 0; i < cxlsd->nr_targets; i++) {
+               if (host_bridge == cxlsd->target[i]->dport_dev)
+                       return 1;
+       }
+
+       return 0;
+}
+
+static void discard_dpa_perf(struct list_head *list)
+{
+       struct cxl_dpa_perf *dpa_perf, *n;
+
+       list_for_each_entry_safe(dpa_perf, n, list, list) {
+               list_del(&dpa_perf->list);
+               kfree(dpa_perf);
+       }
+}
+DEFINE_FREE(dpa_perf, struct list_head *, if (!list_empty(_T)) discard_dpa_perf(_T))
+
+static int cxl_qos_class_verify(struct cxl_memdev *cxlmd)
+{
+       struct cxl_dev_state *cxlds = cxlmd->cxlds;
+       struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+       LIST_HEAD(__discard);
+       struct list_head *discard __free(dpa_perf) = &__discard;
+       struct cxl_port *root_port;
+       int rc;
+
+       struct cxl_root *cxl_root __free(put_cxl_root) =
+               find_cxl_root(cxlmd->endpoint);
+
+       if (!cxl_root)
+               return -ENODEV;
+
+       root_port = &cxl_root->port;
+
+       /* Check that the QTG IDs are all sane between end device and root decoders */
+       cxl_qos_match(root_port, &mds->ram_perf_list, discard);
+       cxl_qos_match(root_port, &mds->pmem_perf_list, discard);
+
+       /* Check to make sure that the device's host bridge is under a root decoder */
+       rc = device_for_each_child(&root_port->dev,
+                                  (void *)cxlmd->endpoint->host_bridge,
+                                  match_cxlrd_hb);
+       if (!rc) {
+               list_splice_tail_init(&mds->ram_perf_list, discard);
+               list_splice_tail_init(&mds->pmem_perf_list, discard);
+       }
+
+       return rc;
+}
+
+static void discard_dsmas(struct xarray *xa)
+{
+       unsigned long index;
+       void *ent;
+
+       xa_for_each(xa, index, ent) {
+               xa_erase(xa, index);
+               kfree(ent);
+       }
+       xa_destroy(xa);
+}
+DEFINE_FREE(dsmas, struct xarray *, if (_T) discard_dsmas(_T))
+
+void cxl_endpoint_parse_cdat(struct cxl_port *port)
+{
+       struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
+       struct cxl_dev_state *cxlds = cxlmd->cxlds;
+       struct xarray __dsmas_xa;
+       struct xarray *dsmas_xa __free(dsmas) = &__dsmas_xa;
+       int rc;
+
+       xa_init(&__dsmas_xa);
+       if (!port->cdat.table)
+               return;
+
+       rc = cxl_cdat_endpoint_process(port, dsmas_xa);
+       if (rc < 0) {
+               dev_dbg(&port->dev, "Failed to parse CDAT: %d\n", rc);
+               return;
+       }
+
+       rc = cxl_port_perf_data_calculate(port, dsmas_xa);
+       if (rc) {
+               dev_dbg(&port->dev, "Failed to do perf coord calculations.\n");
+               return;
+       }
+
+       cxl_memdev_set_qos_class(cxlds, dsmas_xa);
+       cxl_qos_class_verify(cxlmd);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_endpoint_parse_cdat, CXL);
+
+static int cdat_sslbis_handler(union acpi_subtable_headers *header, void *arg,
+                              const unsigned long end)
+{
+       struct acpi_cdat_sslbis *sslbis;
+       int size = sizeof(header->cdat) + sizeof(*sslbis);
+       struct cxl_port *port = arg;
+       struct device *dev = &port->dev;
+       struct acpi_cdat_sslbe *entry;
+       int remain, entries, i;
+       u16 len;
+
+       len = le16_to_cpu((__force __le16)header->cdat.length);
+       remain = len - size;
+       if (!remain || remain % sizeof(*entry) ||
+           (unsigned long)header + len > end) {
+               dev_warn(dev, "Malformed SSLBIS table length: (%u)\n", len);
+               return -EINVAL;
+       }
+
+       /* Skip common header */
+       sslbis = (struct acpi_cdat_sslbis *)((unsigned long)header +
+                                            sizeof(header->cdat));
+
+       /* Unrecognized data type, we can skip */
+       if (sslbis->data_type > ACPI_HMAT_WRITE_BANDWIDTH)
+               return 0;
+
+       entries = remain / sizeof(*entry);
+       entry = (struct acpi_cdat_sslbe *)((unsigned long)header + sizeof(*sslbis));
+
+       for (i = 0; i < entries; i++) {
+               u16 x = le16_to_cpu((__force __le16)entry->portx_id);
+               u16 y = le16_to_cpu((__force __le16)entry->porty_id);
+               __le64 le_base;
+               __le16 le_val;
+               struct cxl_dport *dport;
+               unsigned long index;
+               u16 dsp_id;
+               u64 val;
+
+               switch (x) {
+               case ACPI_CDAT_SSLBIS_US_PORT:
+                       dsp_id = y;
+                       break;
+               case ACPI_CDAT_SSLBIS_ANY_PORT:
+                       switch (y) {
+                       case ACPI_CDAT_SSLBIS_US_PORT:
+                               dsp_id = x;
+                               break;
+                       case ACPI_CDAT_SSLBIS_ANY_PORT:
+                               dsp_id = ACPI_CDAT_SSLBIS_ANY_PORT;
+                               break;
+                       default:
+                               dsp_id = y;
+                               break;
+                       }
+                       break;
+               default:
+                       dsp_id = x;
+                       break;
+               }
+
+               le_base = (__force __le64)sslbis->entry_base_unit;
+               le_val = (__force __le16)entry->latency_or_bandwidth;
+
+               if (check_mul_overflow(le64_to_cpu(le_base),
+                                      le16_to_cpu(le_val), &val))
+                       dev_warn(dev, "SSLBIS value overflowed!\n");
+
+               xa_for_each(&port->dports, index, dport) {
+                       if (dsp_id == ACPI_CDAT_SSLBIS_ANY_PORT ||
+                           dsp_id == dport->port_id)
+                               cxl_access_coordinate_set(&dport->sw_coord,
+                                                         sslbis->data_type,
+                                                         val);
+               }
+
+               entry++;
+       }
+
+       return 0;
+}
+
+void cxl_switch_parse_cdat(struct cxl_port *port)
+{
+       int rc;
+
+       if (!port->cdat.table)
+               return;
+
+       rc = cdat_table_parse(ACPI_CDAT_TYPE_SSLBIS, cdat_sslbis_handler,
+                             port, port->cdat.table);
+       rc = cdat_table_parse_output(rc);
+       if (rc)
+               dev_dbg(&port->dev, "Failed to parse SSLBIS: %d\n", rc);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_switch_parse_cdat, CXL);
+
+MODULE_IMPORT_NS(CXL);
index 86d7ba23235e3bdefb567e3dc3cb656b4f9593c0..3b64fb1b9ed058055fa80220fc2b83b109cc6e17 100644 (file)
@@ -88,4 +88,6 @@ enum cxl_poison_trace_type {
        CXL_POISON_TRACE_CLEAR,
 };
 
+long cxl_pci_get_latency(struct pci_dev *pdev);
+
 #endif /* __CXL_CORE_H__ */
index 36270dcfb42ef2f6917ea9036c8903e44584e4a2..27166a41170579a9441a2f9bf3e2a915ed85d893 100644 (file)
@@ -63,6 +63,7 @@ static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = {
        CXL_CMD(GET_SHUTDOWN_STATE, 0, 0x1, 0),
        CXL_CMD(SET_SHUTDOWN_STATE, 0x1, 0, 0),
        CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0),
+       CXL_CMD(GET_TIMESTAMP, 0, 0x8, 0),
 };
 
 /*
@@ -836,54 +837,37 @@ out:
 }
 EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, CXL);
 
-/*
- * General Media Event Record
- * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43
- */
-static const uuid_t gen_media_event_uuid =
-       UUID_INIT(0xfbcd0a77, 0xc260, 0x417f,
-                 0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6);
-
-/*
- * DRAM Event Record
- * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44
- */
-static const uuid_t dram_event_uuid =
-       UUID_INIT(0x601dcbb3, 0x9c06, 0x4eab,
-                 0xb8, 0xaf, 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24);
-
-/*
- * Memory Module Event Record
- * CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45
- */
-static const uuid_t mem_mod_event_uuid =
-       UUID_INIT(0xfe927475, 0xdd59, 0x4339,
-                 0xa5, 0x86, 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74);
-
-static void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
-                                  enum cxl_event_log_type type,
-                                  struct cxl_event_record_raw *record)
+void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
+                           enum cxl_event_log_type type,
+                           enum cxl_event_type event_type,
+                           const uuid_t *uuid, union cxl_event *evt)
 {
-       uuid_t *id = &record->hdr.id;
-
-       if (uuid_equal(id, &gen_media_event_uuid)) {
-               struct cxl_event_gen_media *rec =
-                               (struct cxl_event_gen_media *)record;
+       if (event_type == CXL_CPER_EVENT_GEN_MEDIA)
+               trace_cxl_general_media(cxlmd, type, &evt->gen_media);
+       else if (event_type == CXL_CPER_EVENT_DRAM)
+               trace_cxl_dram(cxlmd, type, &evt->dram);
+       else if (event_type == CXL_CPER_EVENT_MEM_MODULE)
+               trace_cxl_memory_module(cxlmd, type, &evt->mem_module);
+       else
+               trace_cxl_generic_event(cxlmd, type, uuid, &evt->generic);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, CXL);
 
-               trace_cxl_general_media(cxlmd, type, rec);
-       } else if (uuid_equal(id, &dram_event_uuid)) {
-               struct cxl_event_dram *rec = (struct cxl_event_dram *)record;
+static void __cxl_event_trace_record(const struct cxl_memdev *cxlmd,
+                                    enum cxl_event_log_type type,
+                                    struct cxl_event_record_raw *record)
+{
+       enum cxl_event_type ev_type = CXL_CPER_EVENT_GENERIC;
+       const uuid_t *uuid = &record->id;
 
-               trace_cxl_dram(cxlmd, type, rec);
-       } else if (uuid_equal(id, &mem_mod_event_uuid)) {
-               struct cxl_event_mem_module *rec =
-                               (struct cxl_event_mem_module *)record;
+       if (uuid_equal(uuid, &CXL_EVENT_GEN_MEDIA_UUID))
+               ev_type = CXL_CPER_EVENT_GEN_MEDIA;
+       else if (uuid_equal(uuid, &CXL_EVENT_DRAM_UUID))
+               ev_type = CXL_CPER_EVENT_DRAM;
+       else if (uuid_equal(uuid, &CXL_EVENT_MEM_MODULE_UUID))
+               ev_type = CXL_CPER_EVENT_MEM_MODULE;
 
-               trace_cxl_memory_module(cxlmd, type, rec);
-       } else {
-               /* For unknown record types print just the header */
-               trace_cxl_generic_event(cxlmd, type, record);
-       }
+       cxl_event_trace_record(cxlmd, type, ev_type, uuid, &record->event);
 }
 
 static int cxl_clear_event_record(struct cxl_memdev_state *mds,
@@ -926,7 +910,10 @@ static int cxl_clear_event_record(struct cxl_memdev_state *mds,
         */
        i = 0;
        for (cnt = 0; cnt < total; cnt++) {
-               payload->handles[i++] = get_pl->records[cnt].hdr.handle;
+               struct cxl_event_record_raw *raw = &get_pl->records[cnt];
+               struct cxl_event_generic *gen = &raw->event.generic;
+
+               payload->handles[i++] = gen->hdr.handle;
                dev_dbg(mds->cxlds.dev, "Event log '%d': Clearing %u\n", log,
                        le16_to_cpu(payload->handles[i]));
 
@@ -991,8 +978,8 @@ static void cxl_mem_get_records_log(struct cxl_memdev_state *mds,
                        break;
 
                for (i = 0; i < nr_rec; i++)
-                       cxl_event_trace_record(cxlmd, type,
-                                              &payload->records[i]);
+                       __cxl_event_trace_record(cxlmd, type,
+                                                &payload->records[i]);
 
                if (payload->flags & CXL_GET_EVENT_FLAG_OVERFLOW)
                        trace_cxl_overflow(cxlmd, type, payload);
@@ -1404,6 +1391,8 @@ struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev)
        mds->cxlds.reg_map.host = dev;
        mds->cxlds.reg_map.resource = CXL_RESOURCE_NONE;
        mds->cxlds.type = CXL_DEVTYPE_CLASSMEM;
+       INIT_LIST_HEAD(&mds->ram_perf_list);
+       INIT_LIST_HEAD(&mds->pmem_perf_list);
 
        return mds;
 }
index 2f43d368ba07308c27a2aba69a3a3330f7413325..dae8802ecdb01ee748e3891120bc0011e9e8894e 100644 (file)
@@ -114,7 +114,7 @@ static DEVICE_ATTR_RO(serial);
 static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr,
                              char *buf)
 {
-       return sprintf(buf, "%d\n", dev_to_node(dev));
+       return sysfs_emit(buf, "%d\n", dev_to_node(dev));
 }
 static DEVICE_ATTR_RO(numa_node);
 
index 37e1652afbc7eac56fffbb0a5692ea2a1cd82411..6c9c8d92f8f71401af70fec26be60e0339c18c64 100644 (file)
@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /* Copyright(c) 2021 Intel Corporation. All rights reserved. */
+#include <linux/units.h>
 #include <linux/io-64-nonatomic-lo-hi.h>
 #include <linux/device.h>
 #include <linux/delay.h>
@@ -979,3 +980,38 @@ pci_ers_result_t cxl_error_detected(struct pci_dev *pdev,
        return PCI_ERS_RESULT_NEED_RESET;
 }
 EXPORT_SYMBOL_NS_GPL(cxl_error_detected, CXL);
+
+static int cxl_flit_size(struct pci_dev *pdev)
+{
+       if (cxl_pci_flit_256(pdev))
+               return 256;
+
+       return 68;
+}
+
+/**
+ * cxl_pci_get_latency - calculate the link latency for the PCIe link
+ * @pdev: PCI device
+ *
+ * return: calculated latency or 0 for no latency
+ *
+ * CXL Memory Device SW Guide v1.0 2.11.4 Link latency calculation
+ * Link latency = LinkPropagationLatency + FlitLatency + RetimerLatency
+ * LinkProgationLatency is negligible, so 0 will be used
+ * RetimerLatency is assumed to be negligible and 0 will be used
+ * FlitLatency = FlitSize / LinkBandwidth
+ * FlitSize is defined by spec. CXL rev3.0 4.2.1.
+ * 68B flit is used up to 32GT/s. >32GT/s, 256B flit size is used.
+ * The FlitLatency is converted to picoseconds.
+ */
+long cxl_pci_get_latency(struct pci_dev *pdev)
+{
+       long bw;
+
+       bw = pcie_link_speed_mbps(pdev);
+       if (bw < 0)
+               return 0;
+       bw /= BITS_PER_BYTE;
+
+       return cxl_flit_size(pdev) * MEGA / bw;
+}
index fc94f5240327127743336b47e9228f8871deb67d..e69625a8d6a1d7229b7be924d7b005f4e7f1f67a 100644 (file)
@@ -64,14 +64,14 @@ static int match_nvdimm_bridge(struct device *dev, void *data)
 
 struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_memdev *cxlmd)
 {
-       struct cxl_port *port = find_cxl_root(cxlmd->endpoint);
+       struct cxl_root *cxl_root __free(put_cxl_root) =
+               find_cxl_root(cxlmd->endpoint);
        struct device *dev;
 
-       if (!port)
+       if (!cxl_root)
                return NULL;
 
-       dev = device_find_child(&port->dev, NULL, match_nvdimm_bridge);
-       put_device(&port->dev);
+       dev = device_find_child(&cxl_root->port.dev, NULL, match_nvdimm_bridge);
 
        if (!dev)
                return NULL;
index b7c93bb18f6e75adfb129e175be5afcba98b10de..e59d9d37aa65009c97326fa93ec869dee709804d 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/pci.h>
 #include <linux/slab.h>
 #include <linux/idr.h>
+#include <linux/node.h>
 #include <cxlmem.h>
 #include <cxlpci.h>
 #include <cxl.h>
@@ -172,14 +173,10 @@ static ssize_t target_list_show(struct device *dev,
 {
        struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
        ssize_t offset;
-       unsigned int seq;
        int rc;
 
-       do {
-               seq = read_seqbegin(&cxlsd->target_lock);
-               rc = emit_target_list(cxlsd, buf);
-       } while (read_seqretry(&cxlsd->target_lock, seq));
-
+       guard(rwsem_read)(&cxl_region_rwsem);
+       rc = emit_target_list(cxlsd, buf);
        if (rc < 0)
                return rc;
        offset = rc;
@@ -541,7 +538,10 @@ static void cxl_port_release(struct device *dev)
        xa_destroy(&port->dports);
        xa_destroy(&port->regions);
        ida_free(&cxl_port_ida, port->id);
-       kfree(port);
+       if (is_cxl_root(port))
+               kfree(to_cxl_root(port));
+       else
+               kfree(port);
 }
 
 static ssize_t decoders_committed_show(struct device *dev,
@@ -669,17 +669,31 @@ static struct lock_class_key cxl_port_key;
 static struct cxl_port *cxl_port_alloc(struct device *uport_dev,
                                       struct cxl_dport *parent_dport)
 {
-       struct cxl_port *port;
+       struct cxl_root *cxl_root __free(kfree) = NULL;
+       struct cxl_port *port, *_port __free(kfree) = NULL;
        struct device *dev;
        int rc;
 
-       port = kzalloc(sizeof(*port), GFP_KERNEL);
-       if (!port)
-               return ERR_PTR(-ENOMEM);
+       /* No parent_dport, root cxl_port */
+       if (!parent_dport) {
+               cxl_root = kzalloc(sizeof(*cxl_root), GFP_KERNEL);
+               if (!cxl_root)
+                       return ERR_PTR(-ENOMEM);
+       } else {
+               _port = kzalloc(sizeof(*port), GFP_KERNEL);
+               if (!_port)
+                       return ERR_PTR(-ENOMEM);
+       }
 
        rc = ida_alloc(&cxl_port_ida, GFP_KERNEL);
        if (rc < 0)
-               goto err;
+               return ERR_PTR(rc);
+
+       if (cxl_root)
+               port = &no_free_ptr(cxl_root)->port;
+       else
+               port = no_free_ptr(_port);
+
        port->id = rc;
        port->uport_dev = uport_dev;
 
@@ -731,10 +745,6 @@ static struct cxl_port *cxl_port_alloc(struct device *uport_dev,
        dev->type = &cxl_port_type;
 
        return port;
-
-err:
-       kfree(port);
-       return ERR_PTR(rc);
 }
 
 static int cxl_setup_comp_regs(struct device *host, struct cxl_register_map *map,
@@ -841,6 +851,9 @@ static struct cxl_port *__devm_cxl_add_port(struct device *host,
        if (rc)
                return ERR_PTR(rc);
 
+       if (parent_dport && dev_is_pci(uport_dev))
+               port->pci_latency = cxl_pci_get_latency(to_pci_dev(uport_dev));
+
        return port;
 
 err:
@@ -884,6 +897,22 @@ struct cxl_port *devm_cxl_add_port(struct device *host,
 }
 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_port, CXL);
 
+struct cxl_root *devm_cxl_add_root(struct device *host,
+                                  const struct cxl_root_ops *ops)
+{
+       struct cxl_root *cxl_root;
+       struct cxl_port *port;
+
+       port = devm_cxl_add_port(host, host, CXL_RESOURCE_NONE, NULL);
+       if (IS_ERR(port))
+               return (struct cxl_root *)port;
+
+       cxl_root = to_cxl_root(port);
+       cxl_root->ops = ops;
+       return cxl_root;
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_add_root, CXL);
+
 struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port)
 {
        /* There is no pci_bus associated with a CXL platform-root port */
@@ -939,7 +968,7 @@ static bool dev_is_cxl_root_child(struct device *dev)
        return false;
 }
 
-struct cxl_port *find_cxl_root(struct cxl_port *port)
+struct cxl_root *find_cxl_root(struct cxl_port *port)
 {
        struct cxl_port *iter = port;
 
@@ -949,10 +978,19 @@ struct cxl_port *find_cxl_root(struct cxl_port *port)
        if (!iter)
                return NULL;
        get_device(&iter->dev);
-       return iter;
+       return to_cxl_root(iter);
 }
 EXPORT_SYMBOL_NS_GPL(find_cxl_root, CXL);
 
+void put_cxl_root(struct cxl_root *cxl_root)
+{
+       if (!cxl_root)
+               return;
+
+       put_device(&cxl_root->port.dev);
+}
+EXPORT_SYMBOL_NS_GPL(put_cxl_root, CXL);
+
 static struct cxl_dport *find_dport(struct cxl_port *port, int id)
 {
        struct cxl_dport *dport;
@@ -1108,6 +1146,9 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
        if (rc)
                return ERR_PTR(rc);
 
+       if (dev_is_pci(dport_dev))
+               dport->link_latency = cxl_pci_get_latency(to_pci_dev(dport_dev));
+
        return dport;
 }
 
@@ -1633,7 +1674,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_mem_find_port, CXL);
 static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd,
                                    struct cxl_port *port, int *target_map)
 {
-       int i, rc = 0;
+       int i;
 
        if (!target_map)
                return 0;
@@ -1643,19 +1684,16 @@ static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd,
        if (xa_empty(&port->dports))
                return -EINVAL;
 
-       write_seqlock(&cxlsd->target_lock);
-       for (i = 0; i < cxlsd->nr_targets; i++) {
+       guard(rwsem_write)(&cxl_region_rwsem);
+       for (i = 0; i < cxlsd->cxld.interleave_ways; i++) {
                struct cxl_dport *dport = find_dport(port, target_map[i]);
 
-               if (!dport) {
-                       rc = -ENXIO;
-                       break;
-               }
+               if (!dport)
+                       return -ENXIO;
                cxlsd->target[i] = dport;
        }
-       write_sequnlock(&cxlsd->target_lock);
 
-       return rc;
+       return 0;
 }
 
 struct cxl_dport *cxl_hb_modulo(struct cxl_root_decoder *cxlrd, int pos)
@@ -1725,7 +1763,6 @@ static int cxl_switch_decoder_init(struct cxl_port *port,
                return -EINVAL;
 
        cxlsd->nr_targets = nr_targets;
-       seqlock_init(&cxlsd->target_lock);
        return cxl_decoder_init(port, &cxlsd->cxld);
 }
 
@@ -2059,6 +2096,80 @@ bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd)
 }
 EXPORT_SYMBOL_NS_GPL(schedule_cxl_memdev_detach, CXL);
 
+static void combine_coordinates(struct access_coordinate *c1,
+                               struct access_coordinate *c2)
+{
+               if (c2->write_bandwidth)
+                       c1->write_bandwidth = min(c1->write_bandwidth,
+                                                 c2->write_bandwidth);
+               c1->write_latency += c2->write_latency;
+
+               if (c2->read_bandwidth)
+                       c1->read_bandwidth = min(c1->read_bandwidth,
+                                                c2->read_bandwidth);
+               c1->read_latency += c2->read_latency;
+}
+
+/**
+ * cxl_endpoint_get_perf_coordinates - Retrieve performance numbers stored in dports
+ *                                of CXL path
+ * @port: endpoint cxl_port
+ * @coord: output performance data
+ *
+ * Return: errno on failure, 0 on success.
+ */
+int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
+                                     struct access_coordinate *coord)
+{
+       struct access_coordinate c = {
+               .read_bandwidth = UINT_MAX,
+               .write_bandwidth = UINT_MAX,
+       };
+       struct cxl_port *iter = port;
+       struct cxl_dport *dport;
+       struct pci_dev *pdev;
+       unsigned int bw;
+
+       if (!is_cxl_endpoint(port))
+               return -EINVAL;
+
+       dport = iter->parent_dport;
+
+       /*
+        * Exit the loop when the parent port of the current port is cxl root.
+        * The iterative loop starts at the endpoint and gathers the
+        * latency of the CXL link from the current iter to the next downstream
+        * port each iteration. If the parent is cxl root then there is
+        * nothing to gather.
+        */
+       while (iter && !is_cxl_root(to_cxl_port(iter->dev.parent))) {
+               combine_coordinates(&c, &dport->sw_coord);
+               c.write_latency += dport->link_latency;
+               c.read_latency += dport->link_latency;
+
+               iter = to_cxl_port(iter->dev.parent);
+               dport = iter->parent_dport;
+       }
+
+       /* Augment with the generic port (host bridge) perf data */
+       combine_coordinates(&c, &dport->hb_coord);
+
+       /* Get the calculated PCI paths bandwidth */
+       pdev = to_pci_dev(port->uport_dev->parent);
+       bw = pcie_bandwidth_available(pdev, NULL, NULL, NULL);
+       if (bw == 0)
+               return -ENXIO;
+       bw /= BITS_PER_BYTE;
+
+       c.write_bandwidth = min(c.write_bandwidth, bw);
+       c.read_bandwidth = min(c.read_bandwidth, bw);
+
+       *coord = c;
+
+       return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_endpoint_get_perf_coordinates, CXL);
+
 /* for user tooling to ensure port disable work has completed */
 static ssize_t flush_store(const struct bus_type *bus, const char *buf, size_t count)
 {
index 3e817a6f94c6a4d2ac5113558a6c7633f7120821..ce0e2d82bb2b4cfdc61761d5e32a8c91cc121d82 100644 (file)
@@ -397,7 +397,7 @@ static ssize_t interleave_ways_store(struct device *dev,
                return rc;
 
        /*
-        * Even for x3, x9, and x12 interleaves the region interleave must be a
+        * Even for x3, x6, and x12 interleaves the region interleave must be a
         * power of 2 multiple of the host bridge interleave.
         */
        if (!is_power_of_2(val / cxld->interleave_ways) ||
@@ -525,7 +525,7 @@ static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size)
        struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
        struct cxl_region_params *p = &cxlr->params;
        struct resource *res;
-       u32 remainder = 0;
+       u64 remainder = 0;
 
        lockdep_assert_held_write(&cxl_region_rwsem);
 
@@ -545,15 +545,16 @@ static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size)
            (cxlr->mode == CXL_DECODER_PMEM && uuid_is_null(&p->uuid)))
                return -ENXIO;
 
-       div_u64_rem(size, SZ_256M * p->interleave_ways, &remainder);
+       div64_u64_rem(size, (u64)SZ_256M * p->interleave_ways, &remainder);
        if (remainder)
                return -EINVAL;
 
        res = alloc_free_mem_region(cxlrd->res, size, SZ_256M,
                                    dev_name(&cxlr->dev));
        if (IS_ERR(res)) {
-               dev_dbg(&cxlr->dev, "failed to allocate HPA: %ld\n",
-                       PTR_ERR(res));
+               dev_dbg(&cxlr->dev,
+                       "HPA allocation error (%ld) for size:%pap in %s %pr\n",
+                       PTR_ERR(res), &size, cxlrd->res->name, cxlrd->res);
                return PTR_ERR(res);
        }
 
@@ -2083,13 +2084,13 @@ static struct cxl_region *to_cxl_region(struct device *dev)
        return container_of(dev, struct cxl_region, dev);
 }
 
-static void unregister_region(void *dev)
+static void unregister_region(void *_cxlr)
 {
-       struct cxl_region *cxlr = to_cxl_region(dev);
+       struct cxl_region *cxlr = _cxlr;
        struct cxl_region_params *p = &cxlr->params;
        int i;
 
-       device_del(dev);
+       device_del(&cxlr->dev);
 
        /*
         * Now that region sysfs is shutdown, the parameter block is now
@@ -2100,7 +2101,7 @@ static void unregister_region(void *dev)
                detach_target(cxlr, i);
 
        cxl_region_iomem_release(cxlr);
-       put_device(dev);
+       put_device(&cxlr->dev);
 }
 
 static struct lock_class_key cxl_region_key;
index a0b5819bc70b3075b37d5452da0ac53efbf382f8..bdf117a33744be2db0468e869226ac8d45ef7a16 100644 (file)
@@ -181,6 +181,7 @@ TRACE_EVENT(cxl_overflow,
  *     1) Add CXL_EVT_TP_entry to TP_STRUCT__entry
  *     2) Use CXL_EVT_TP_fast_assign within TP_fast_assign;
  *        pass the dev, log, and CXL event header
+ *        NOTE: The uuid must be assigned by the specific trace event
  *     3) Use CXL_EVT_TP_printk() instead of TP_printk()
  *
  * See the generic_event tracepoint as an example.
@@ -203,7 +204,6 @@ TRACE_EVENT(cxl_overflow,
        __assign_str(host, dev_name((cxlmd)->dev.parent));                      \
        __entry->log = (l);                                                     \
        __entry->serial = (cxlmd)->cxlds->serial;                               \
-       memcpy(&__entry->hdr_uuid, &(hdr).id, sizeof(uuid_t));                  \
        __entry->hdr_length = (hdr).length;                                     \
        __entry->hdr_flags = get_unaligned_le24((hdr).flags);                   \
        __entry->hdr_handle = le16_to_cpu((hdr).handle);                        \
@@ -225,9 +225,9 @@ TRACE_EVENT(cxl_overflow,
 TRACE_EVENT(cxl_generic_event,
 
        TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log,
-                struct cxl_event_record_raw *rec),
+                const uuid_t *uuid, struct cxl_event_generic *gen_rec),
 
-       TP_ARGS(cxlmd, log, rec),
+       TP_ARGS(cxlmd, log, uuid, gen_rec),
 
        TP_STRUCT__entry(
                CXL_EVT_TP_entry
@@ -235,8 +235,9 @@ TRACE_EVENT(cxl_generic_event,
        ),
 
        TP_fast_assign(
-               CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr);
-               memcpy(__entry->data, &rec->data, CXL_EVENT_RECORD_DATA_LENGTH);
+               CXL_EVT_TP_fast_assign(cxlmd, log, gen_rec->hdr);
+               memcpy(&__entry->hdr_uuid, uuid, sizeof(uuid_t));
+               memcpy(__entry->data, gen_rec->data, CXL_EVENT_RECORD_DATA_LENGTH);
        ),
 
        CXL_EVT_TP_printk("%s",
@@ -337,6 +338,7 @@ TRACE_EVENT(cxl_general_media,
 
        TP_fast_assign(
                CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr);
+               __entry->hdr_uuid = CXL_EVENT_GEN_MEDIA_UUID;
 
                /* General Media */
                __entry->dpa = le64_to_cpu(rec->phys_addr);
@@ -423,6 +425,7 @@ TRACE_EVENT(cxl_dram,
 
        TP_fast_assign(
                CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr);
+               __entry->hdr_uuid = CXL_EVENT_DRAM_UUID;
 
                /* DRAM */
                __entry->dpa = le64_to_cpu(rec->phys_addr);
@@ -570,6 +573,7 @@ TRACE_EVENT(cxl_memory_module,
 
        TP_fast_assign(
                CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr);
+               __entry->hdr_uuid = CXL_EVENT_MEM_MODULE_UUID;
 
                /* Memory Module Event */
                __entry->event_type = rec->event_type;
index 687043ece1018c41c256c02cd697749d7916a42f..b6017c0c57b4d5e69dfe45011b7a8b3f5bf0b913 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/bitfield.h>
 #include <linux/bitops.h>
 #include <linux/log2.h>
+#include <linux/node.h>
 #include <linux/io.h>
 
 /**
@@ -412,7 +413,6 @@ struct cxl_endpoint_decoder {
 /**
  * struct cxl_switch_decoder - Switch specific CXL HDM Decoder
  * @cxld: base cxl_decoder object
- * @target_lock: coordinate coherent reads of the target list
  * @nr_targets: number of elements in @target
  * @target: active ordered target list in current decoder configuration
  *
@@ -424,7 +424,6 @@ struct cxl_endpoint_decoder {
  */
 struct cxl_switch_decoder {
        struct cxl_decoder cxld;
-       seqlock_t target_lock;
        int nr_targets;
        struct cxl_dport *target[];
 };
@@ -590,6 +589,7 @@ struct cxl_dax_region {
  * @depth: How deep this port is relative to the root. depth 0 is the root.
  * @cdat: Cached CDAT data
  * @cdat_available: Should a CDAT attribute be available in sysfs
+ * @pci_latency: Upstream latency in picoseconds
  */
 struct cxl_port {
        struct device dev;
@@ -612,6 +612,30 @@ struct cxl_port {
                size_t length;
        } cdat;
        bool cdat_available;
+       long pci_latency;
+};
+
+/**
+ * struct cxl_root - logical collection of root cxl_port items
+ *
+ * @port: cxl_port member
+ * @ops: cxl root operations
+ */
+struct cxl_root {
+       struct cxl_port port;
+       const struct cxl_root_ops *ops;
+};
+
+static inline struct cxl_root *
+to_cxl_root(const struct cxl_port *port)
+{
+       return container_of(port, struct cxl_root, port);
+}
+
+struct cxl_root_ops {
+       int (*qos_class)(struct cxl_root *cxl_root,
+                        struct access_coordinate *coord, int entries,
+                        int *qos_class);
 };
 
 static inline struct cxl_dport *
@@ -634,6 +658,9 @@ struct cxl_rcrb_info {
  * @rch: Indicate whether this dport was enumerated in RCH or VH mode
  * @port: reference to cxl_port that contains this downstream port
  * @regs: Dport parsed register blocks
+ * @sw_coord: access coordinates (performance) for switch from CDAT
+ * @hb_coord: access coordinates (performance) from ACPI generic port (host bridge)
+ * @link_latency: calculated PCIe downstream latency
  */
 struct cxl_dport {
        struct device *dport_dev;
@@ -643,6 +670,9 @@ struct cxl_dport {
        bool rch;
        struct cxl_port *port;
        struct cxl_regs regs;
+       struct access_coordinate sw_coord;
+       struct access_coordinate hb_coord;
+       long link_latency;
 };
 
 /**
@@ -700,7 +730,12 @@ struct cxl_port *devm_cxl_add_port(struct device *host,
                                   struct device *uport_dev,
                                   resource_size_t component_reg_phys,
                                   struct cxl_dport *parent_dport);
-struct cxl_port *find_cxl_root(struct cxl_port *port);
+struct cxl_root *devm_cxl_add_root(struct device *host,
+                                  const struct cxl_root_ops *ops);
+struct cxl_root *find_cxl_root(struct cxl_port *port);
+void put_cxl_root(struct cxl_root *cxl_root);
+DEFINE_FREE(put_cxl_root, struct cxl_root *, if (_T) put_cxl_root(_T))
+
 int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd);
 void cxl_bus_rescan(void);
 void cxl_bus_drain(void);
@@ -839,6 +874,12 @@ static inline struct cxl_dax_region *to_cxl_dax_region(struct device *dev)
 }
 #endif
 
+void cxl_endpoint_parse_cdat(struct cxl_port *port);
+void cxl_switch_parse_cdat(struct cxl_port *port);
+
+int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
+                                     struct access_coordinate *coord);
+
 /*
  * Unit test builds overrides this to __weak, find the 'strong' version
  * of these symbols in tools/testing/cxl/.
index a2fcbca253f3983a6c4bfb1b2f964314e1b250d6..5303d6942b880af65dcf8e77b02d26626c2bb94d 100644 (file)
@@ -6,6 +6,8 @@
 #include <linux/cdev.h>
 #include <linux/uuid.h>
 #include <linux/rcuwait.h>
+#include <linux/cxl-event.h>
+#include <linux/node.h>
 #include "cxl.h"
 
 /* CXL 2.0 8.2.8.5.1.1 Memory Device Status Register */
@@ -391,6 +393,20 @@ enum cxl_devtype {
        CXL_DEVTYPE_CLASSMEM,
 };
 
+/**
+ * struct cxl_dpa_perf - DPA performance property entry
+ * @list - list entry
+ * @dpa_range - range for DPA address
+ * @coord - QoS performance data (i.e. latency, bandwidth)
+ * @qos_class - QoS Class cookies
+ */
+struct cxl_dpa_perf {
+       struct list_head list;
+       struct range dpa_range;
+       struct access_coordinate coord;
+       int qos_class;
+};
+
 /**
  * struct cxl_dev_state - The driver device state
  *
@@ -455,6 +471,8 @@ struct cxl_dev_state {
  * @security: security driver state info
  * @fw: firmware upload / activation state
  * @mbox_send: @dev specific transport for transmitting mailbox commands
+ * @ram_perf_list: performance data entries matched to RAM
+ * @pmem_perf_list: performance data entries matched to PMEM
  *
  * See CXL 3.0 8.2.9.8.2 Capacity Configuration and Label Storage for
  * details on capacity parameters.
@@ -475,6 +493,10 @@ struct cxl_memdev_state {
        u64 active_persistent_bytes;
        u64 next_volatile_bytes;
        u64 next_persistent_bytes;
+
+       struct list_head ram_perf_list;
+       struct list_head pmem_perf_list;
+
        struct cxl_event_state event;
        struct cxl_poison_state poison;
        struct cxl_security_state security;
@@ -503,6 +525,7 @@ enum cxl_opcode {
        CXL_MBOX_OP_GET_FW_INFO         = 0x0200,
        CXL_MBOX_OP_TRANSFER_FW         = 0x0201,
        CXL_MBOX_OP_ACTIVATE_FW         = 0x0202,
+       CXL_MBOX_OP_GET_TIMESTAMP       = 0x0300,
        CXL_MBOX_OP_SET_TIMESTAMP       = 0x0301,
        CXL_MBOX_OP_GET_SUPPORTED_LOGS  = 0x0400,
        CXL_MBOX_OP_GET_LOG             = 0x0401,
@@ -580,25 +603,28 @@ struct cxl_mbox_identify {
 } __packed;
 
 /*
- * Common Event Record Format
- * CXL rev 3.0 section 8.2.9.2.1; Table 8-42
+ * General Media Event Record UUID
+ * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43
  */
-struct cxl_event_record_hdr {
-       uuid_t id;
-       u8 length;
-       u8 flags[3];
-       __le16 handle;
-       __le16 related_handle;
-       __le64 timestamp;
-       u8 maint_op_class;
-       u8 reserved[15];
-} __packed;
+#define CXL_EVENT_GEN_MEDIA_UUID                                            \
+       UUID_INIT(0xfbcd0a77, 0xc260, 0x417f, 0x85, 0xa9, 0x08, 0x8b, 0x16, \
+                 0x21, 0xeb, 0xa6)
 
-#define CXL_EVENT_RECORD_DATA_LENGTH 0x50
-struct cxl_event_record_raw {
-       struct cxl_event_record_hdr hdr;
-       u8 data[CXL_EVENT_RECORD_DATA_LENGTH];
-} __packed;
+/*
+ * DRAM Event Record UUID
+ * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44
+ */
+#define CXL_EVENT_DRAM_UUID                                                 \
+       UUID_INIT(0x601dcbb3, 0x9c06, 0x4eab, 0xb8, 0xaf, 0x4e, 0x9b, 0xfb, \
+                 0x5c, 0x96, 0x24)
+
+/*
+ * Memory Module Event Record UUID
+ * CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45
+ */
+#define CXL_EVENT_MEM_MODULE_UUID                                           \
+       UUID_INIT(0xfe927475, 0xdd59, 0x4339, 0xa5, 0x86, 0x79, 0xba, 0xb1, \
+                 0x13, 0xb7, 0x74)
 
 /*
  * Get Event Records output payload
@@ -641,74 +667,6 @@ struct cxl_mbox_clear_event_payload {
 } __packed;
 #define CXL_CLEAR_EVENT_MAX_HANDLES U8_MAX
 
-/*
- * General Media Event Record
- * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43
- */
-#define CXL_EVENT_GEN_MED_COMP_ID_SIZE 0x10
-struct cxl_event_gen_media {
-       struct cxl_event_record_hdr hdr;
-       __le64 phys_addr;
-       u8 descriptor;
-       u8 type;
-       u8 transaction_type;
-       u8 validity_flags[2];
-       u8 channel;
-       u8 rank;
-       u8 device[3];
-       u8 component_id[CXL_EVENT_GEN_MED_COMP_ID_SIZE];
-       u8 reserved[46];
-} __packed;
-
-/*
- * DRAM Event Record - DER
- * CXL rev 3.0 section 8.2.9.2.1.2; Table 3-44
- */
-#define CXL_EVENT_DER_CORRECTION_MASK_SIZE     0x20
-struct cxl_event_dram {
-       struct cxl_event_record_hdr hdr;
-       __le64 phys_addr;
-       u8 descriptor;
-       u8 type;
-       u8 transaction_type;
-       u8 validity_flags[2];
-       u8 channel;
-       u8 rank;
-       u8 nibble_mask[3];
-       u8 bank_group;
-       u8 bank;
-       u8 row[3];
-       u8 column[2];
-       u8 correction_mask[CXL_EVENT_DER_CORRECTION_MASK_SIZE];
-       u8 reserved[0x17];
-} __packed;
-
-/*
- * Get Health Info Record
- * CXL rev 3.0 section 8.2.9.8.3.1; Table 8-100
- */
-struct cxl_get_health_info {
-       u8 health_status;
-       u8 media_status;
-       u8 add_status;
-       u8 life_used;
-       u8 device_temp[2];
-       u8 dirty_shutdown_cnt[4];
-       u8 cor_vol_err_cnt[4];
-       u8 cor_per_err_cnt[4];
-} __packed;
-
-/*
- * Memory Module Event Record
- * CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45
- */
-struct cxl_event_mem_module {
-       struct cxl_event_record_hdr hdr;
-       u8 event_type;
-       struct cxl_get_health_info info;
-       u8 reserved[0x3d];
-} __packed;
-
 struct cxl_mbox_get_partition_info {
        __le64 active_volatile_cap;
        __le64 active_persistent_cap;
@@ -866,6 +824,10 @@ void set_exclusive_cxl_commands(struct cxl_memdev_state *mds,
 void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds,
                                  unsigned long *cmds);
 void cxl_mem_get_event_records(struct cxl_memdev_state *mds, u32 status);
+void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
+                           enum cxl_event_log_type type,
+                           enum cxl_event_type event_type,
+                           const uuid_t *uuid, union cxl_event *evt);
 int cxl_set_timestamp(struct cxl_memdev_state *mds);
 int cxl_poison_state_init(struct cxl_memdev_state *mds);
 int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
index 0fa4799ea316cd802c97feda11fb6e3c88aaa138..711b05d9a370e91b49beffbc5494542d8861d31e 100644 (file)
@@ -85,6 +85,19 @@ struct cdat_entry_header {
        __le16 length;
 } __packed;
 
+/*
+ * CXL v3.0 6.2.3 Table 6-4
+ * The table indicates that if PCIe Flit Mode is set, then CXL is in 256B flits
+ * mode, otherwise it's 68B flits mode.
+ */
+static inline bool cxl_pci_flit_256(struct pci_dev *pdev)
+{
+       u16 lnksta2;
+
+       pcie_capability_read_word(pdev, PCI_EXP_LNKSTA2, &lnksta2);
+       return lnksta2 & PCI_EXP_LNKSTA2_FLIT;
+}
+
 int devm_cxl_port_enumerate_dports(struct cxl_port *port);
 struct cxl_dev_state;
 int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
index e087febf9af047c81dfb11b06d91d92d442d586c..c5c9d8e0d88d69fcc9f031e1bd46ba7c44de4fd4 100644 (file)
@@ -215,23 +215,78 @@ static ssize_t trigger_poison_list_store(struct device *dev,
 }
 static DEVICE_ATTR_WO(trigger_poison_list);
 
+static ssize_t ram_qos_class_show(struct device *dev,
+                                 struct device_attribute *attr, char *buf)
+{
+       struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+       struct cxl_dev_state *cxlds = cxlmd->cxlds;
+       struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+       struct cxl_dpa_perf *dpa_perf;
+
+       if (!dev->driver)
+               return -ENOENT;
+
+       if (list_empty(&mds->ram_perf_list))
+               return -ENOENT;
+
+       dpa_perf = list_first_entry(&mds->ram_perf_list, struct cxl_dpa_perf,
+                                   list);
+
+       return sysfs_emit(buf, "%d\n", dpa_perf->qos_class);
+}
+
+static struct device_attribute dev_attr_ram_qos_class =
+       __ATTR(qos_class, 0444, ram_qos_class_show, NULL);
+
+static ssize_t pmem_qos_class_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+       struct cxl_dev_state *cxlds = cxlmd->cxlds;
+       struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+       struct cxl_dpa_perf *dpa_perf;
+
+       if (!dev->driver)
+               return -ENOENT;
+
+       if (list_empty(&mds->pmem_perf_list))
+               return -ENOENT;
+
+       dpa_perf = list_first_entry(&mds->pmem_perf_list, struct cxl_dpa_perf,
+                                   list);
+
+       return sysfs_emit(buf, "%d\n", dpa_perf->qos_class);
+}
+
+static struct device_attribute dev_attr_pmem_qos_class =
+       __ATTR(qos_class, 0444, pmem_qos_class_show, NULL);
+
 static umode_t cxl_mem_visible(struct kobject *kobj, struct attribute *a, int n)
 {
-       if (a == &dev_attr_trigger_poison_list.attr) {
-               struct device *dev = kobj_to_dev(kobj);
-               struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
-               struct cxl_memdev_state *mds =
-                       to_cxl_memdev_state(cxlmd->cxlds);
+       struct device *dev = kobj_to_dev(kobj);
+       struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+       struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
 
+       if (a == &dev_attr_trigger_poison_list.attr)
                if (!test_bit(CXL_POISON_ENABLED_LIST,
                              mds->poison.enabled_cmds))
                        return 0;
-       }
+
+       if (a == &dev_attr_pmem_qos_class.attr)
+               if (list_empty(&mds->pmem_perf_list))
+                       return 0;
+
+       if (a == &dev_attr_ram_qos_class.attr)
+               if (list_empty(&mds->ram_perf_list))
+                       return 0;
+
        return a->mode;
 }
 
 static struct attribute *cxl_mem_attrs[] = {
        &dev_attr_trigger_poison_list.attr,
+       &dev_attr_ram_qos_class.attr,
+       &dev_attr_pmem_qos_class.attr,
        NULL
 };
 
index 0155fb66b580d7f939e3f2d92a34b5c3c8a89586..233e7c42c161d8e0b64424776d121f5d08176010 100644 (file)
@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /* Copyright(c) 2020 Intel Corporation. All rights reserved. */
+#include <asm-generic/unaligned.h>
 #include <linux/io-64-nonatomic-lo-hi.h>
 #include <linux/moduleparam.h>
 #include <linux/module.h>
@@ -381,7 +382,7 @@ static int cxl_pci_mbox_send(struct cxl_memdev_state *mds,
        return rc;
 }
 
-static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds)
+static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds, bool irq_avail)
 {
        struct cxl_dev_state *cxlds = &mds->cxlds;
        const int cap = readl(cxlds->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET);
@@ -440,7 +441,7 @@ static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds)
        INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mbox_sanitize_work);
 
        /* background command interrupts are optional */
-       if (!(cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ))
+       if (!(cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ) || !irq_avail)
                return 0;
 
        msgnum = FIELD_GET(CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK, cap);
@@ -587,7 +588,7 @@ static int cxl_mem_alloc_event_buf(struct cxl_memdev_state *mds)
        return devm_add_action_or_reset(mds->cxlds.dev, free_event_buf, buf);
 }
 
-static int cxl_alloc_irq_vectors(struct pci_dev *pdev)
+static bool cxl_alloc_irq_vectors(struct pci_dev *pdev)
 {
        int nvecs;
 
@@ -604,9 +605,9 @@ static int cxl_alloc_irq_vectors(struct pci_dev *pdev)
                                      PCI_IRQ_MSIX | PCI_IRQ_MSI);
        if (nvecs < 1) {
                dev_dbg(&pdev->dev, "Failed to alloc irq vectors: %d\n", nvecs);
-               return -ENXIO;
+               return false;
        }
-       return 0;
+       return true;
 }
 
 static irqreturn_t cxl_event_thread(int irq, void *id)
@@ -742,7 +743,7 @@ static bool cxl_event_int_is_fw(u8 setting)
 }
 
 static int cxl_event_config(struct pci_host_bridge *host_bridge,
-                           struct cxl_memdev_state *mds)
+                           struct cxl_memdev_state *mds, bool irq_avail)
 {
        struct cxl_event_interrupt_policy policy;
        int rc;
@@ -754,6 +755,11 @@ static int cxl_event_config(struct pci_host_bridge *host_bridge,
        if (!host_bridge->native_cxl_error)
                return 0;
 
+       if (!irq_avail) {
+               dev_info(mds->cxlds.dev, "No interrupt support, disable event processing.\n");
+               return 0;
+       }
+
        rc = cxl_mem_alloc_event_buf(mds);
        if (rc)
                return rc;
@@ -788,6 +794,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        struct cxl_register_map map;
        struct cxl_memdev *cxlmd;
        int i, rc, pmu_count;
+       bool irq_avail;
 
        /*
         * Double check the anonymous union trickery in struct cxl_regs
@@ -845,11 +852,9 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        else
                dev_warn(&pdev->dev, "Media not active (%d)\n", rc);
 
-       rc = cxl_alloc_irq_vectors(pdev);
-       if (rc)
-               return rc;
+       irq_avail = cxl_alloc_irq_vectors(pdev);
 
-       rc = cxl_pci_setup_mailbox(mds);
+       rc = cxl_pci_setup_mailbox(mds, irq_avail);
        if (rc)
                return rc;
 
@@ -908,7 +913,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                }
        }
 
-       rc = cxl_event_config(host_bridge, mds);
+       rc = cxl_event_config(host_bridge, mds, irq_avail);
        if (rc)
                return rc;
 
@@ -969,6 +974,61 @@ static struct pci_driver cxl_pci_driver = {
        },
 };
 
+#define CXL_EVENT_HDR_FLAGS_REC_SEVERITY GENMASK(1, 0)
+static void cxl_cper_event_call(enum cxl_event_type ev_type,
+                               struct cxl_cper_event_rec *rec)
+{
+       struct cper_cxl_event_devid *device_id = &rec->hdr.device_id;
+       struct pci_dev *pdev __free(pci_dev_put) = NULL;
+       enum cxl_event_log_type log_type;
+       struct cxl_dev_state *cxlds;
+       unsigned int devfn;
+       u32 hdr_flags;
+
+       devfn = PCI_DEVFN(device_id->device_num, device_id->func_num);
+       pdev = pci_get_domain_bus_and_slot(device_id->segment_num,
+                                          device_id->bus_num, devfn);
+       if (!pdev)
+               return;
+
+       guard(pci_dev)(pdev);
+       if (pdev->driver != &cxl_pci_driver)
+               return;
+
+       cxlds = pci_get_drvdata(pdev);
+       if (!cxlds)
+               return;
+
+       /* Fabricate a log type */
+       hdr_flags = get_unaligned_le24(rec->event.generic.hdr.flags);
+       log_type = FIELD_GET(CXL_EVENT_HDR_FLAGS_REC_SEVERITY, hdr_flags);
+
+       cxl_event_trace_record(cxlds->cxlmd, log_type, ev_type,
+                              &uuid_null, &rec->event);
+}
+
+static int __init cxl_pci_driver_init(void)
+{
+       int rc;
+
+       rc = cxl_cper_register_callback(cxl_cper_event_call);
+       if (rc)
+               return rc;
+
+       rc = pci_register_driver(&cxl_pci_driver);
+       if (rc)
+               cxl_cper_unregister_callback(cxl_cper_event_call);
+
+       return rc;
+}
+
+static void __exit cxl_pci_driver_exit(void)
+{
+       pci_unregister_driver(&cxl_pci_driver);
+       cxl_cper_unregister_callback(cxl_cper_event_call);
+}
+
+module_init(cxl_pci_driver_init);
+module_exit(cxl_pci_driver_exit);
 MODULE_LICENSE("GPL v2");
-module_pci_driver(cxl_pci_driver);
 MODULE_IMPORT_NS(CXL);
index 47bc8e0b859077776c06fc1daee901ab49cbdd2d..97c21566677aa3b4dbdd84d7d656198d53af63fd 100644 (file)
@@ -69,6 +69,8 @@ static int cxl_switch_port_probe(struct cxl_port *port)
        if (rc < 0)
                return rc;
 
+       cxl_switch_parse_cdat(port);
+
        cxlhdm = devm_cxl_setup_hdm(port, NULL);
        if (!IS_ERR(cxlhdm))
                return devm_cxl_enumerate_decoders(cxlhdm, NULL);
@@ -109,6 +111,7 @@ static int cxl_endpoint_port_probe(struct cxl_port *port)
 
        /* Cache the data early to ensure is_visible() works */
        read_cdat_data(port);
+       cxl_endpoint_parse_cdat(port);
 
        get_device(&cxlmd->dev);
        rc = devm_add_action_or_reset(&port->dev, schedule_detach, cxlmd);
@@ -127,14 +130,15 @@ static int cxl_endpoint_port_probe(struct cxl_port *port)
         * This can't fail in practice as CXL root exit unregisters all
         * descendant ports and that in turn synchronizes with cxl_port_probe()
         */
-       root = find_cxl_root(port);
+       struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(port);
+
+       root = &cxl_root->port;
 
        /*
         * Now that all endpoint decoders are successfully enumerated, try to
         * assemble regions from committed decoders
         */
        device_for_each_child(&port->dev, root, discover_region);
-       put_device(&root->dev);
 
        return 0;
 }
index ee899f8e67215f6036734795cb5b90ab77a293a3..4a63567e93bae3dd2d5affabeedfd713aaa51460 100644 (file)
@@ -168,10 +168,7 @@ static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf)
        if (vmf->pgoff > buffer->pagecount)
                return VM_FAULT_SIGBUS;
 
-       vmf->page = buffer->pages[vmf->pgoff];
-       get_page(vmf->page);
-
-       return 0;
+       return vmf_insert_pfn(vma, vmf->address, page_to_pfn(buffer->pages[vmf->pgoff]));
 }
 
 static const struct vm_operations_struct dma_heap_vm_ops = {
@@ -185,6 +182,8 @@ static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
        if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
                return -EINVAL;
 
+       vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
+
        vma->vm_ops = &dma_heap_vm_ops;
        vma->vm_private_data = buffer;
 
index 70ba506dabab5f7aa9eb95741bd40636d1535c86..e928f2ca0f1e9adc58594d6b4552d64dae29df34 100644 (file)
@@ -378,6 +378,20 @@ config LPC18XX_DMAMUX
          Enable support for DMA on NXP LPC18xx/43xx platforms
          with PL080 and multiplexed DMA request lines.
 
+config LS2X_APB_DMA
+       tristate "Loongson LS2X APB DMA support"
+       depends on LOONGARCH || COMPILE_TEST
+       select DMA_ENGINE
+       select DMA_VIRTUAL_CHANNELS
+       help
+         Support for the Loongson LS2X APB DMA controller driver. The
+         DMA controller is having single DMA channel which can be
+         configured for different peripherals like audio, nand, sdio
+         etc which is in APB bus.
+
+         This DMA controller transfers data from memory to peripheral fifo.
+         It does not support memory to memory data transfer.
+
 config MCF_EDMA
        tristate "Freescale eDMA engine support, ColdFire mcf5441x SoCs"
        depends on M5441x || COMPILE_TEST
index 83553a97a010e157b285ad6c47557df31c8bc835..dfd40d14e4089d81ce489429f141a896bfc549e7 100644 (file)
@@ -48,6 +48,7 @@ obj-$(CONFIG_INTEL_IOATDMA) += ioat/
 obj-y += idxd/
 obj-$(CONFIG_K3_DMA) += k3dma.o
 obj-$(CONFIG_LPC18XX_DMAMUX) += lpc18xx-dmamux.o
+obj-$(CONFIG_LS2X_APB_DMA) += ls2x-apb-dma.o
 obj-$(CONFIG_MILBEAUT_HDMAC) += milbeaut-hdmac.o
 obj-$(CONFIG_MILBEAUT_XDMAC) += milbeaut-xdmac.o
 obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
index 5b63996640d9d3a210f789a89b6e7ffa054d96e2..9588773dd2eb670a2f6115fdaef39a0e88248015 100644 (file)
@@ -57,6 +57,8 @@
 
 #define REG_BUS_WIDTH(ch)      (0x8040 + (ch) * 0x200)
 
+#define BUS_WIDTH_WORD_SIZE    GENMASK(3, 0)
+#define BUS_WIDTH_FRAME_SIZE   GENMASK(7, 4)
 #define BUS_WIDTH_8BIT         0x00
 #define BUS_WIDTH_16BIT                0x01
 #define BUS_WIDTH_32BIT                0x02
@@ -740,7 +742,8 @@ static int admac_device_config(struct dma_chan *chan,
        struct admac_data *ad = adchan->host;
        bool is_tx = admac_chan_direction(adchan->no) == DMA_MEM_TO_DEV;
        int wordsize = 0;
-       u32 bus_width = 0;
+       u32 bus_width = readl_relaxed(ad->base + REG_BUS_WIDTH(adchan->no)) &
+               ~(BUS_WIDTH_WORD_SIZE | BUS_WIDTH_FRAME_SIZE);
 
        switch (is_tx ? config->dst_addr_width : config->src_addr_width) {
        case DMA_SLAVE_BUSWIDTH_1_BYTE:
index fb89ecbf0cc5be8ca566eaac6e499f2e2336b625..40052d1bd0b5c161180eec477bedb0f871e91a55 100644 (file)
@@ -222,8 +222,14 @@ struct atdma_sg {
  * @vd: pointer to the virtual dma descriptor.
  * @atchan: pointer to the atmel dma channel.
  * @total_len: total transaction byte count
- * @sg_len: number of sg entries.
+ * @sglen: number of sg entries.
  * @sg: array of sgs.
+ * @boundary: number of transfers to perform before the automatic address increment operation
+ * @dst_hole: value to add to the destination address when the boundary has been reached
+ * @src_hole: value to add to the source address when the boundary has been reached
+ * @memset_buffer: buffer used for the memset operation
+ * @memset_paddr: physical address of the buffer used for the memset operation
+ * @memset_vaddr: virtual address of the buffer used for the memset operation
  */
 struct at_desc {
        struct                          virt_dma_desc vd;
@@ -245,7 +251,10 @@ struct at_desc {
 /*--  Channels  --------------------------------------------------------*/
 
 /**
- * atc_status - information bits stored in channel status flag
+ * enum atc_status - information bits stored in channel status flag
+ *
+ * @ATC_IS_PAUSED: If channel is pauses
+ * @ATC_IS_CYCLIC: If channel is cyclic
  *
  * Manipulated with atomic operations.
  */
@@ -282,7 +291,6 @@ struct at_dma_chan {
        u32                     save_cfg;
        u32                     save_dscr;
        struct dma_slave_config dma_sconfig;
-       bool                    cyclic;
        struct at_desc          *desc;
 };
 
@@ -328,12 +336,12 @@ static inline u8 convert_buswidth(enum dma_slave_buswidth addr_width)
 /**
  * struct at_dma - internal representation of an Atmel HDMA Controller
  * @dma_device: dmaengine dma_device object members
- * @atdma_devtype: identifier of DMA controller compatibility
- * @ch_regs: memory mapped register base
+ * @regs: memory mapped register base
  * @clk: dma controller clock
  * @save_imr: interrupt mask register that is saved on suspend/resume cycle
  * @all_chan_mask: all channels availlable in a mask
  * @lli_pool: hw lli table
+ * @memset_pool: hw memset pool
  * @chan: channels table to store at_dma_chan structures
  */
 struct at_dma {
@@ -626,6 +634,9 @@ static inline u32 atc_calc_bytes_left(u32 current_len, u32 ctrla)
 
 /**
  * atc_get_llis_residue - Get residue for a hardware linked list transfer
+ * @atchan: pointer to an atmel hdmac channel.
+ * @desc: pointer to the descriptor for which the residue is calculated.
+ * @residue: residue to be set to dma_tx_state.
  *
  * Calculate the residue by removing the length of the Linked List Item (LLI)
  * already transferred from the total length. To get the current LLI we can use
@@ -661,10 +672,8 @@ static inline u32 atc_calc_bytes_left(u32 current_len, u32 ctrla)
  * two DSCR values are different, we read again the CTRLA then the DSCR till two
  * consecutive read values from DSCR are equal or till the maximum trials is
  * reach. This algorithm is very unlikely not to find a stable value for DSCR.
- * @atchan: pointer to an atmel hdmac channel.
- * @desc: pointer to the descriptor for which the residue is calculated.
- * @residue: residue to be set to dma_tx_state.
- * Returns 0 on success, -errno otherwise.
+ *
+ * Returns: %0 on success, -errno otherwise.
  */
 static int atc_get_llis_residue(struct at_dma_chan *atchan,
                                struct at_desc *desc, u32 *residue)
@@ -731,7 +740,8 @@ static int atc_get_llis_residue(struct at_dma_chan *atchan,
  * @chan: DMA channel
  * @cookie: transaction identifier to check status of
  * @residue: residue to be updated.
- * Return 0 on success, -errono otherwise.
+ *
+ * Return: %0 on success, -errno otherwise.
  */
 static int atc_get_residue(struct dma_chan *chan, dma_cookie_t cookie,
                           u32 *residue)
@@ -1710,7 +1720,7 @@ static void atc_issue_pending(struct dma_chan *chan)
  * atc_alloc_chan_resources - allocate resources for DMA channel
  * @chan: allocate descriptor resources for this channel
  *
- * return - the number of allocated descriptors
+ * Return: the number of allocated descriptors
  */
 static int atc_alloc_chan_resources(struct dma_chan *chan)
 {
index 2457a420c13d72cde2509f6a446269b166b06cdc..4e339c04fc1ea1e973a385250a144945984ae1fd 100644 (file)
 #define AXI_DMAC_REG_CURRENT_DEST_ADDR 0x438
 #define AXI_DMAC_REG_PARTIAL_XFER_LEN  0x44c
 #define AXI_DMAC_REG_PARTIAL_XFER_ID   0x450
+#define AXI_DMAC_REG_CURRENT_SG_ID     0x454
+#define AXI_DMAC_REG_SG_ADDRESS                0x47c
+#define AXI_DMAC_REG_SG_ADDRESS_HIGH   0x4bc
 
 #define AXI_DMAC_CTRL_ENABLE           BIT(0)
 #define AXI_DMAC_CTRL_PAUSE            BIT(1)
+#define AXI_DMAC_CTRL_ENABLE_SG                BIT(2)
 
 #define AXI_DMAC_IRQ_SOT               BIT(0)
 #define AXI_DMAC_IRQ_EOT               BIT(1)
 /* The maximum ID allocated by the hardware is 31 */
 #define AXI_DMAC_SG_UNUSED 32U
 
+/* Flags for axi_dmac_hw_desc.flags */
+#define AXI_DMAC_HW_FLAG_LAST          BIT(0)
+#define AXI_DMAC_HW_FLAG_IRQ           BIT(1)
+
+struct axi_dmac_hw_desc {
+       u32 flags;
+       u32 id;
+       u64 dest_addr;
+       u64 src_addr;
+       u64 next_sg_addr;
+       u32 y_len;
+       u32 x_len;
+       u32 src_stride;
+       u32 dst_stride;
+       u64 __pad[2];
+};
+
 struct axi_dmac_sg {
-       dma_addr_t src_addr;
-       dma_addr_t dest_addr;
-       unsigned int x_len;
-       unsigned int y_len;
-       unsigned int dest_stride;
-       unsigned int src_stride;
-       unsigned int id;
        unsigned int partial_len;
        bool schedule_when_free;
+
+       struct axi_dmac_hw_desc *hw;
+       dma_addr_t hw_phys;
 };
 
 struct axi_dmac_desc {
        struct virt_dma_desc vdesc;
+       struct axi_dmac_chan *chan;
+
        bool cyclic;
        bool have_partial_xfer;
 
@@ -139,6 +158,7 @@ struct axi_dmac_chan {
        bool hw_partial_xfer;
        bool hw_cyclic;
        bool hw_2d;
+       bool hw_sg;
 };
 
 struct axi_dmac {
@@ -213,9 +233,11 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
        unsigned int flags = 0;
        unsigned int val;
 
-       val = axi_dmac_read(dmac, AXI_DMAC_REG_START_TRANSFER);
-       if (val) /* Queue is full, wait for the next SOT IRQ */
-               return;
+       if (!chan->hw_sg) {
+               val = axi_dmac_read(dmac, AXI_DMAC_REG_START_TRANSFER);
+               if (val) /* Queue is full, wait for the next SOT IRQ */
+                       return;
+       }
 
        desc = chan->next_desc;
 
@@ -229,14 +251,15 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
        sg = &desc->sg[desc->num_submitted];
 
        /* Already queued in cyclic mode. Wait for it to finish */
-       if (sg->id != AXI_DMAC_SG_UNUSED) {
+       if (sg->hw->id != AXI_DMAC_SG_UNUSED) {
                sg->schedule_when_free = true;
                return;
        }
 
-       desc->num_submitted++;
-       if (desc->num_submitted == desc->num_sgs ||
-           desc->have_partial_xfer) {
+       if (chan->hw_sg) {
+               chan->next_desc = NULL;
+       } else if (++desc->num_submitted == desc->num_sgs ||
+                  desc->have_partial_xfer) {
                if (desc->cyclic)
                        desc->num_submitted = 0; /* Start again */
                else
@@ -246,32 +269,42 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
                chan->next_desc = desc;
        }
 
-       sg->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID);
+       sg->hw->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID);
 
-       if (axi_dmac_dest_is_mem(chan)) {
-               axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, sg->dest_addr);
-               axi_dmac_write(dmac, AXI_DMAC_REG_DEST_STRIDE, sg->dest_stride);
-       }
+       if (!chan->hw_sg) {
+               if (axi_dmac_dest_is_mem(chan)) {
+                       axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, sg->hw->dest_addr);
+                       axi_dmac_write(dmac, AXI_DMAC_REG_DEST_STRIDE, sg->hw->dst_stride);
+               }
 
-       if (axi_dmac_src_is_mem(chan)) {
-               axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, sg->src_addr);
-               axi_dmac_write(dmac, AXI_DMAC_REG_SRC_STRIDE, sg->src_stride);
+               if (axi_dmac_src_is_mem(chan)) {
+                       axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, sg->hw->src_addr);
+                       axi_dmac_write(dmac, AXI_DMAC_REG_SRC_STRIDE, sg->hw->src_stride);
+               }
        }
 
        /*
         * If the hardware supports cyclic transfers and there is no callback to
-        * call and only a single segment, enable hw cyclic mode to avoid
-        * unnecessary interrupts.
+        * call, enable hw cyclic mode to avoid unnecessary interrupts.
         */
-       if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback &&
-               desc->num_sgs == 1)
-               flags |= AXI_DMAC_FLAG_CYCLIC;
+       if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback) {
+               if (chan->hw_sg)
+                       desc->sg[desc->num_sgs - 1].hw->flags &= ~AXI_DMAC_HW_FLAG_IRQ;
+               else if (desc->num_sgs == 1)
+                       flags |= AXI_DMAC_FLAG_CYCLIC;
+       }
 
        if (chan->hw_partial_xfer)
                flags |= AXI_DMAC_FLAG_PARTIAL_REPORT;
 
-       axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1);
-       axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->y_len - 1);
+       if (chan->hw_sg) {
+               axi_dmac_write(dmac, AXI_DMAC_REG_SG_ADDRESS, (u32)sg->hw_phys);
+               axi_dmac_write(dmac, AXI_DMAC_REG_SG_ADDRESS_HIGH,
+                              (u64)sg->hw_phys >> 32);
+       } else {
+               axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->hw->x_len);
+               axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->hw->y_len);
+       }
        axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, flags);
        axi_dmac_write(dmac, AXI_DMAC_REG_START_TRANSFER, 1);
 }
@@ -286,9 +319,9 @@ static inline unsigned int axi_dmac_total_sg_bytes(struct axi_dmac_chan *chan,
        struct axi_dmac_sg *sg)
 {
        if (chan->hw_2d)
-               return sg->x_len * sg->y_len;
+               return (sg->hw->x_len + 1) * (sg->hw->y_len + 1);
        else
-               return sg->x_len;
+               return (sg->hw->x_len + 1);
 }
 
 static void axi_dmac_dequeue_partial_xfers(struct axi_dmac_chan *chan)
@@ -307,9 +340,9 @@ static void axi_dmac_dequeue_partial_xfers(struct axi_dmac_chan *chan)
                list_for_each_entry(desc, &chan->active_descs, vdesc.node) {
                        for (i = 0; i < desc->num_sgs; i++) {
                                sg = &desc->sg[i];
-                               if (sg->id == AXI_DMAC_SG_UNUSED)
+                               if (sg->hw->id == AXI_DMAC_SG_UNUSED)
                                        continue;
-                               if (sg->id == id) {
+                               if (sg->hw->id == id) {
                                        desc->have_partial_xfer = true;
                                        sg->partial_len = len;
                                        found_sg = true;
@@ -348,6 +381,9 @@ static void axi_dmac_compute_residue(struct axi_dmac_chan *chan,
        rslt->result = DMA_TRANS_NOERROR;
        rslt->residue = 0;
 
+       if (chan->hw_sg)
+               return;
+
        /*
         * We get here if the last completed segment is partial, which
         * means we can compute the residue from that segment onwards
@@ -374,36 +410,47 @@ static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan,
            (completed_transfers & AXI_DMAC_FLAG_PARTIAL_XFER_DONE))
                axi_dmac_dequeue_partial_xfers(chan);
 
-       do {
-               sg = &active->sg[active->num_completed];
-               if (sg->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */
-                       break;
-               if (!(BIT(sg->id) & completed_transfers))
-                       break;
-               active->num_completed++;
-               sg->id = AXI_DMAC_SG_UNUSED;
-               if (sg->schedule_when_free) {
-                       sg->schedule_when_free = false;
-                       start_next = true;
+       if (chan->hw_sg) {
+               if (active->cyclic) {
+                       vchan_cyclic_callback(&active->vdesc);
+               } else {
+                       list_del(&active->vdesc.node);
+                       vchan_cookie_complete(&active->vdesc);
+                       active = axi_dmac_active_desc(chan);
+                       start_next = !!active;
                }
+       } else {
+               do {
+                       sg = &active->sg[active->num_completed];
+                       if (sg->hw->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */
+                               break;
+                       if (!(BIT(sg->hw->id) & completed_transfers))
+                               break;
+                       active->num_completed++;
+                       sg->hw->id = AXI_DMAC_SG_UNUSED;
+                       if (sg->schedule_when_free) {
+                               sg->schedule_when_free = false;
+                               start_next = true;
+                       }
 
-               if (sg->partial_len)
-                       axi_dmac_compute_residue(chan, active);
+                       if (sg->partial_len)
+                               axi_dmac_compute_residue(chan, active);
 
-               if (active->cyclic)
-                       vchan_cyclic_callback(&active->vdesc);
+                       if (active->cyclic)
+                               vchan_cyclic_callback(&active->vdesc);
 
-               if (active->num_completed == active->num_sgs ||
-                   sg->partial_len) {
-                       if (active->cyclic) {
-                               active->num_completed = 0; /* wrap around */
-                       } else {
-                               list_del(&active->vdesc.node);
-                               vchan_cookie_complete(&active->vdesc);
-                               active = axi_dmac_active_desc(chan);
+                       if (active->num_completed == active->num_sgs ||
+                           sg->partial_len) {
+                               if (active->cyclic) {
+                                       active->num_completed = 0; /* wrap around */
+                               } else {
+                                       list_del(&active->vdesc.node);
+                                       vchan_cookie_complete(&active->vdesc);
+                                       active = axi_dmac_active_desc(chan);
+                               }
                        }
-               }
-       } while (active);
+               } while (active);
+       }
 
        return start_next;
 }
@@ -467,8 +514,12 @@ static void axi_dmac_issue_pending(struct dma_chan *c)
        struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
        struct axi_dmac *dmac = chan_to_axi_dmac(chan);
        unsigned long flags;
+       u32 ctrl = AXI_DMAC_CTRL_ENABLE;
 
-       axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, AXI_DMAC_CTRL_ENABLE);
+       if (chan->hw_sg)
+               ctrl |= AXI_DMAC_CTRL_ENABLE_SG;
+
+       axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, ctrl);
 
        spin_lock_irqsave(&chan->vchan.lock, flags);
        if (vchan_issue_pending(&chan->vchan))
@@ -476,22 +527,58 @@ static void axi_dmac_issue_pending(struct dma_chan *c)
        spin_unlock_irqrestore(&chan->vchan.lock, flags);
 }
 
-static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs)
+static struct axi_dmac_desc *
+axi_dmac_alloc_desc(struct axi_dmac_chan *chan, unsigned int num_sgs)
 {
+       struct axi_dmac *dmac = chan_to_axi_dmac(chan);
+       struct device *dev = dmac->dma_dev.dev;
+       struct axi_dmac_hw_desc *hws;
        struct axi_dmac_desc *desc;
+       dma_addr_t hw_phys;
        unsigned int i;
 
        desc = kzalloc(struct_size(desc, sg, num_sgs), GFP_NOWAIT);
        if (!desc)
                return NULL;
        desc->num_sgs = num_sgs;
+       desc->chan = chan;
+
+       hws = dma_alloc_coherent(dev, PAGE_ALIGN(num_sgs * sizeof(*hws)),
+                               &hw_phys, GFP_ATOMIC);
+       if (!hws) {
+               kfree(desc);
+               return NULL;
+       }
 
-       for (i = 0; i < num_sgs; i++)
-               desc->sg[i].id = AXI_DMAC_SG_UNUSED;
+       for (i = 0; i < num_sgs; i++) {
+               desc->sg[i].hw = &hws[i];
+               desc->sg[i].hw_phys = hw_phys + i * sizeof(*hws);
+
+               hws[i].id = AXI_DMAC_SG_UNUSED;
+               hws[i].flags = 0;
+
+               /* Link hardware descriptors */
+               hws[i].next_sg_addr = hw_phys + (i + 1) * sizeof(*hws);
+       }
+
+       /* The last hardware descriptor will trigger an interrupt */
+       desc->sg[num_sgs - 1].hw->flags = AXI_DMAC_HW_FLAG_LAST | AXI_DMAC_HW_FLAG_IRQ;
 
        return desc;
 }
 
+static void axi_dmac_free_desc(struct axi_dmac_desc *desc)
+{
+       struct axi_dmac *dmac = chan_to_axi_dmac(desc->chan);
+       struct device *dev = dmac->dma_dev.dev;
+       struct axi_dmac_hw_desc *hw = desc->sg[0].hw;
+       dma_addr_t hw_phys = desc->sg[0].hw_phys;
+
+       dma_free_coherent(dev, PAGE_ALIGN(desc->num_sgs * sizeof(*hw)),
+                         hw, hw_phys);
+       kfree(desc);
+}
+
 static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan,
        enum dma_transfer_direction direction, dma_addr_t addr,
        unsigned int num_periods, unsigned int period_len,
@@ -508,26 +595,24 @@ static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan,
        segment_size = ((segment_size - 1) | chan->length_align_mask) + 1;
 
        for (i = 0; i < num_periods; i++) {
-               len = period_len;
-
-               while (len > segment_size) {
+               for (len = period_len; len > segment_size; sg++) {
                        if (direction == DMA_DEV_TO_MEM)
-                               sg->dest_addr = addr;
+                               sg->hw->dest_addr = addr;
                        else
-                               sg->src_addr = addr;
-                       sg->x_len = segment_size;
-                       sg->y_len = 1;
-                       sg++;
+                               sg->hw->src_addr = addr;
+                       sg->hw->x_len = segment_size - 1;
+                       sg->hw->y_len = 0;
+                       sg->hw->flags = 0;
                        addr += segment_size;
                        len -= segment_size;
                }
 
                if (direction == DMA_DEV_TO_MEM)
-                       sg->dest_addr = addr;
+                       sg->hw->dest_addr = addr;
                else
-                       sg->src_addr = addr;
-               sg->x_len = len;
-               sg->y_len = 1;
+                       sg->hw->src_addr = addr;
+               sg->hw->x_len = len - 1;
+               sg->hw->y_len = 0;
                sg++;
                addr += len;
        }
@@ -554,7 +639,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
        for_each_sg(sgl, sg, sg_len, i)
                num_sgs += DIV_ROUND_UP(sg_dma_len(sg), chan->max_length);
 
-       desc = axi_dmac_alloc_desc(num_sgs);
+       desc = axi_dmac_alloc_desc(chan, num_sgs);
        if (!desc)
                return NULL;
 
@@ -563,7 +648,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
        for_each_sg(sgl, sg, sg_len, i) {
                if (!axi_dmac_check_addr(chan, sg_dma_address(sg)) ||
                    !axi_dmac_check_len(chan, sg_dma_len(sg))) {
-                       kfree(desc);
+                       axi_dmac_free_desc(desc);
                        return NULL;
                }
 
@@ -583,7 +668,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic(
 {
        struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
        struct axi_dmac_desc *desc;
-       unsigned int num_periods, num_segments;
+       unsigned int num_periods, num_segments, num_sgs;
 
        if (direction != chan->direction)
                return NULL;
@@ -597,11 +682,16 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic(
 
        num_periods = buf_len / period_len;
        num_segments = DIV_ROUND_UP(period_len, chan->max_length);
+       num_sgs = num_periods * num_segments;
 
-       desc = axi_dmac_alloc_desc(num_periods * num_segments);
+       desc = axi_dmac_alloc_desc(chan, num_sgs);
        if (!desc)
                return NULL;
 
+       /* Chain the last descriptor to the first, and remove its "last" flag */
+       desc->sg[num_sgs - 1].hw->next_sg_addr = desc->sg[0].hw_phys;
+       desc->sg[num_sgs - 1].hw->flags &= ~AXI_DMAC_HW_FLAG_LAST;
+
        axi_dmac_fill_linear_sg(chan, direction, buf_addr, num_periods,
                period_len, desc->sg);
 
@@ -653,26 +743,26 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved(
                        return NULL;
        }
 
-       desc = axi_dmac_alloc_desc(1);
+       desc = axi_dmac_alloc_desc(chan, 1);
        if (!desc)
                return NULL;
 
        if (axi_dmac_src_is_mem(chan)) {
-               desc->sg[0].src_addr = xt->src_start;
-               desc->sg[0].src_stride = xt->sgl[0].size + src_icg;
+               desc->sg[0].hw->src_addr = xt->src_start;
+               desc->sg[0].hw->src_stride = xt->sgl[0].size + src_icg;
        }
 
        if (axi_dmac_dest_is_mem(chan)) {
-               desc->sg[0].dest_addr = xt->dst_start;
-               desc->sg[0].dest_stride = xt->sgl[0].size + dst_icg;
+               desc->sg[0].hw->dest_addr = xt->dst_start;
+               desc->sg[0].hw->dst_stride = xt->sgl[0].size + dst_icg;
        }
 
        if (chan->hw_2d) {
-               desc->sg[0].x_len = xt->sgl[0].size;
-               desc->sg[0].y_len = xt->numf;
+               desc->sg[0].hw->x_len = xt->sgl[0].size - 1;
+               desc->sg[0].hw->y_len = xt->numf - 1;
        } else {
-               desc->sg[0].x_len = xt->sgl[0].size * xt->numf;
-               desc->sg[0].y_len = 1;
+               desc->sg[0].hw->x_len = xt->sgl[0].size * xt->numf - 1;
+               desc->sg[0].hw->y_len = 0;
        }
 
        if (flags & DMA_CYCLIC)
@@ -688,7 +778,7 @@ static void axi_dmac_free_chan_resources(struct dma_chan *c)
 
 static void axi_dmac_desc_free(struct virt_dma_desc *vdesc)
 {
-       kfree(container_of(vdesc, struct axi_dmac_desc, vdesc));
+       axi_dmac_free_desc(to_axi_dmac_desc(vdesc));
 }
 
 static bool axi_dmac_regmap_rdwr(struct device *dev, unsigned int reg)
@@ -714,6 +804,9 @@ static bool axi_dmac_regmap_rdwr(struct device *dev, unsigned int reg)
        case AXI_DMAC_REG_CURRENT_DEST_ADDR:
        case AXI_DMAC_REG_PARTIAL_XFER_LEN:
        case AXI_DMAC_REG_PARTIAL_XFER_ID:
+       case AXI_DMAC_REG_CURRENT_SG_ID:
+       case AXI_DMAC_REG_SG_ADDRESS:
+       case AXI_DMAC_REG_SG_ADDRESS_HIGH:
                return true;
        default:
                return false;
@@ -866,6 +959,10 @@ static int axi_dmac_detect_caps(struct axi_dmac *dmac, unsigned int version)
        if (axi_dmac_read(dmac, AXI_DMAC_REG_FLAGS) == AXI_DMAC_FLAG_CYCLIC)
                chan->hw_cyclic = true;
 
+       axi_dmac_write(dmac, AXI_DMAC_REG_SG_ADDRESS, 0xffffffff);
+       if (axi_dmac_read(dmac, AXI_DMAC_REG_SG_ADDRESS))
+               chan->hw_sg = true;
+
        axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, 1);
        if (axi_dmac_read(dmac, AXI_DMAC_REG_Y_LENGTH) == 1)
                chan->hw_2d = true;
@@ -911,6 +1008,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
        struct axi_dmac *dmac;
        struct regmap *regmap;
        unsigned int version;
+       u32 irq_mask = 0;
        int ret;
 
        dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
@@ -966,6 +1064,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
        dma_dev->dst_addr_widths = BIT(dmac->chan.dest_width);
        dma_dev->directions = BIT(dmac->chan.direction);
        dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+       dma_dev->max_sg_burst = 31; /* 31 SGs maximum in one burst */
        INIT_LIST_HEAD(&dma_dev->channels);
 
        dmac->chan.vchan.desc_free = axi_dmac_desc_free;
@@ -977,7 +1076,10 @@ static int axi_dmac_probe(struct platform_device *pdev)
 
        dma_dev->copy_align = (dmac->chan.address_align_mask + 1);
 
-       axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00);
+       if (dmac->chan.hw_sg)
+               irq_mask |= AXI_DMAC_IRQ_SOT;
+
+       axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, irq_mask);
 
        if (of_dma_is_coherent(pdev->dev.of_node)) {
                ret = axi_dmac_read(dmac, AXI_DMAC_REG_COHERENCY_DESC);
index b7388ae62d7f1fde1b24118a38c1d454974b2bd9..491b222402216a4a7bee1627b3e559321af33f36 100644 (file)
@@ -1103,6 +1103,9 @@ EXPORT_SYMBOL_GPL(dma_async_device_channel_register);
 static void __dma_async_device_channel_unregister(struct dma_device *device,
                                                  struct dma_chan *chan)
 {
+       if (chan->local == NULL)
+               return;
+
        WARN_ONCE(!device->device_release && chan->client_count,
                  "%s called while %d clients hold a reference\n",
                  __func__, chan->client_count);
index ffe621695e472b6b6d96e2a06a85ff514ee65651..a4f6088378492d0338fe65edc86ec51c4a9c0a10 100644 (file)
 #include <linux/slab.h>
 #include <linux/wait.h>
 
+static bool nobounce;
+module_param(nobounce, bool, 0644);
+MODULE_PARM_DESC(nobounce, "Prevent using swiotlb buffer (default: use swiotlb buffer)");
+
 static unsigned int test_buf_size = 16384;
 module_param(test_buf_size, uint, 0644);
 MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
@@ -90,6 +94,7 @@ MODULE_PARM_DESC(polled, "Use polling for completion instead of interrupts");
 
 /**
  * struct dmatest_params - test parameters.
+ * @nobounce:          prevent using swiotlb buffer
  * @buf_size:          size of the memcpy test buffer
  * @channel:           bus ID of the channel to test
  * @device:            bus ID of the DMA Engine to test
@@ -106,6 +111,7 @@ MODULE_PARM_DESC(polled, "Use polling for completion instead of interrupts");
  * @polled:            use polling for completion instead of interrupts
  */
 struct dmatest_params {
+       bool            nobounce;
        unsigned int    buf_size;
        char            channel[20];
        char            device[32];
@@ -215,6 +221,7 @@ struct dmatest_done {
 struct dmatest_data {
        u8              **raw;
        u8              **aligned;
+       gfp_t           gfp_flags;
        unsigned int    cnt;
        unsigned int    off;
 };
@@ -533,7 +540,7 @@ static int dmatest_alloc_test_data(struct dmatest_data *d,
                goto err;
 
        for (i = 0; i < d->cnt; i++) {
-               d->raw[i] = kmalloc(buf_size + align, GFP_KERNEL);
+               d->raw[i] = kmalloc(buf_size + align, d->gfp_flags);
                if (!d->raw[i])
                        goto err;
 
@@ -655,6 +662,13 @@ static int dmatest_func(void *data)
                goto err_free_coefs;
        }
 
+       src->gfp_flags = GFP_KERNEL;
+       dst->gfp_flags = GFP_KERNEL;
+       if (params->nobounce) {
+               src->gfp_flags = GFP_DMA;
+               dst->gfp_flags = GFP_DMA;
+       }
+
        if (dmatest_alloc_test_data(src, buf_size, align) < 0)
                goto err_free_coefs;
 
@@ -1093,6 +1107,7 @@ static void add_threaded_test(struct dmatest_info *info)
        struct dmatest_params *params = &info->params;
 
        /* Copy test parameters */
+       params->nobounce = nobounce;
        params->buf_size = test_buf_size;
        strscpy(params->channel, strim(test_channel), sizeof(params->channel));
        strscpy(params->device, strim(test_device), sizeof(params->device));
index 0745d9e7d259b1294e519744ced86dcdecbd7d7d..406f169b09a75a52197c6615c675b1eb61022169 100644 (file)
@@ -176,7 +176,7 @@ dw_edma_debugfs_regs_wr(struct dw_edma *dw, struct dentry *dent)
        };
        struct dentry *regs_dent, *ch_dent;
        int nr_entries, i;
-       char name[16];
+       char name[32];
 
        regs_dent = debugfs_create_dir(WRITE_STR, dent);
 
@@ -239,7 +239,7 @@ static noinline_for_stack void dw_edma_debugfs_regs_rd(struct dw_edma *dw,
        };
        struct dentry *regs_dent, *ch_dent;
        int nr_entries, i;
-       char name[16];
+       char name[32];
 
        regs_dent = debugfs_create_dir(READ_STR, dent);
 
index 520c81978b085fb244311d041faf1786ec7b5750..dcdc57fe976c134f7825425627e782f3ad5b96de 100644 (file)
@@ -116,7 +116,7 @@ static void dw_hdma_debugfs_regs_ch(struct dw_edma *dw, enum dw_edma_dir dir,
 static void dw_hdma_debugfs_regs_wr(struct dw_edma *dw, struct dentry *dent)
 {
        struct dentry *regs_dent, *ch_dent;
-       char name[16];
+       char name[32];
        int i;
 
        regs_dent = debugfs_create_dir(WRITE_STR, dent);
@@ -133,7 +133,7 @@ static void dw_hdma_debugfs_regs_wr(struct dw_edma *dw, struct dentry *dent)
 static void dw_hdma_debugfs_regs_rd(struct dw_edma *dw, struct dentry *dent)
 {
        struct dentry *regs_dent, *ch_dent;
-       char name[16];
+       char name[32];
        int i;
 
        regs_dent = debugfs_create_dir(READ_STR, dent);
index 7958ac33e36ce3fab462d33161fea8dabe0ee215..5a8061a307cdafeb3a4db5ddd104ae6d7ec8d190 100644 (file)
@@ -38,15 +38,17 @@ static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan)
        if (!dpaa2_chan->fd_pool)
                goto err;
 
-       dpaa2_chan->fl_pool = dma_pool_create("fl_pool", dev,
-                                             sizeof(struct dpaa2_fl_entry),
-                                             sizeof(struct dpaa2_fl_entry), 0);
+       dpaa2_chan->fl_pool =
+               dma_pool_create("fl_pool", dev,
+                                sizeof(struct dpaa2_fl_entry) * 3,
+                                sizeof(struct dpaa2_fl_entry), 0);
+
        if (!dpaa2_chan->fl_pool)
                goto err_fd;
 
        dpaa2_chan->sdd_pool =
                dma_pool_create("sdd_pool", dev,
-                               sizeof(struct dpaa2_qdma_sd_d),
+                               sizeof(struct dpaa2_qdma_sd_d) * 2,
                                sizeof(struct dpaa2_qdma_sd_d), 0);
        if (!dpaa2_chan->sdd_pool)
                goto err_fl;
index 238a69bd0d6f5d3ba6d8329543c49d3a750dba21..45cc419b1b4acbe87c12c3daaccafce73f8de1ba 100644 (file)
@@ -9,6 +9,7 @@
  * Vybrid and Layerscape SoCs.
  */
 
+#include <dt-bindings/dma/fsl-edma.h>
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/clk.h>
 
 #include "fsl-edma-common.h"
 
-#define ARGS_RX                         BIT(0)
-#define ARGS_REMOTE                     BIT(1)
-#define ARGS_MULTI_FIFO                 BIT(2)
-
 static void fsl_edma_synchronize(struct dma_chan *chan)
 {
        struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
@@ -153,9 +150,15 @@ static struct dma_chan *fsl_edma3_xlate(struct of_phandle_args *dma_spec,
                i = fsl_chan - fsl_edma->chans;
 
                fsl_chan->priority = dma_spec->args[1];
-               fsl_chan->is_rxchan = dma_spec->args[2] & ARGS_RX;
-               fsl_chan->is_remote = dma_spec->args[2] & ARGS_REMOTE;
-               fsl_chan->is_multi_fifo = dma_spec->args[2] & ARGS_MULTI_FIFO;
+               fsl_chan->is_rxchan = dma_spec->args[2] & FSL_EDMA_RX;
+               fsl_chan->is_remote = dma_spec->args[2] & FSL_EDMA_REMOTE;
+               fsl_chan->is_multi_fifo = dma_spec->args[2] & FSL_EDMA_MULTI_FIFO;
+
+               if ((dma_spec->args[2] & FSL_EDMA_EVEN_CH) && (i & 0x1))
+                       continue;
+
+               if ((dma_spec->args[2] & FSL_EDMA_ODD_CH) && !(i & 0x1))
+                       continue;
 
                if (!b_chmux && i == dma_spec->args[0]) {
                        chan = dma_get_slave_channel(chan);
index 47cb284680494cc842141c3c680a617a424d5669..f405c77060ad8b3508e7c75bee09968e1ae9bc78 100644 (file)
@@ -514,11 +514,11 @@ static struct fsl_qdma_queue
                        queue_temp = queue_head + i + (j * queue_num);
 
                        queue_temp->cq =
-                       dma_alloc_coherent(&pdev->dev,
-                                          sizeof(struct fsl_qdma_format) *
-                                          queue_size[i],
-                                          &queue_temp->bus_addr,
-                                          GFP_KERNEL);
+                       dmam_alloc_coherent(&pdev->dev,
+                                           sizeof(struct fsl_qdma_format) *
+                                           queue_size[i],
+                                           &queue_temp->bus_addr,
+                                           GFP_KERNEL);
                        if (!queue_temp->cq)
                                return NULL;
                        queue_temp->block_base = fsl_qdma->block_base +
@@ -563,15 +563,14 @@ static struct fsl_qdma_queue
        /*
         * Buffer for queue command
         */
-       status_head->cq = dma_alloc_coherent(&pdev->dev,
-                                            sizeof(struct fsl_qdma_format) *
-                                            status_size,
-                                            &status_head->bus_addr,
-                                            GFP_KERNEL);
-       if (!status_head->cq) {
-               devm_kfree(&pdev->dev, status_head);
+       status_head->cq = dmam_alloc_coherent(&pdev->dev,
+                                             sizeof(struct fsl_qdma_format) *
+                                             status_size,
+                                             &status_head->bus_addr,
+                                             GFP_KERNEL);
+       if (!status_head->cq)
                return NULL;
-       }
+
        status_head->n_cq = status_size;
        status_head->virt_head = status_head->cq;
        status_head->virt_tail = status_head->cq;
@@ -805,7 +804,7 @@ fsl_qdma_irq_init(struct platform_device *pdev,
        int i;
        int cpu;
        int ret;
-       char irq_name[20];
+       char irq_name[32];
 
        fsl_qdma->error_irq =
                platform_get_irq_byname(pdev, "qdma-error");
@@ -1268,8 +1267,6 @@ static void fsl_qdma_cleanup_vchan(struct dma_device *dmadev)
 
 static void fsl_qdma_remove(struct platform_device *pdev)
 {
-       int i;
-       struct fsl_qdma_queue *status;
        struct device_node *np = pdev->dev.of_node;
        struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
 
@@ -1277,12 +1274,6 @@ static void fsl_qdma_remove(struct platform_device *pdev)
        fsl_qdma_cleanup_vchan(&fsl_qdma->dma_dev);
        of_dma_controller_free(np);
        dma_async_device_unregister(&fsl_qdma->dma_dev);
-
-       for (i = 0; i < fsl_qdma->block_number; i++) {
-               status = fsl_qdma->status[i];
-               dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_format) *
-                               status->n_cq, status->cq, status->bus_addr);
-       }
 }
 
 static const struct of_device_id fsl_qdma_dt_ids[] = {
index 1d918d45d9f6d67453f8f662e08b6f430161f126..77f8885cf4075acfd3ff535b7e09519a8df41c70 100644 (file)
@@ -165,7 +165,7 @@ static void idxd_cdev_dev_release(struct device *dev)
        struct idxd_wq *wq = idxd_cdev->wq;
 
        cdev_ctx = &ictx[wq->idxd->data->type];
-       ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor);
+       ida_free(&cdev_ctx->minor_ida, idxd_cdev->minor);
        kfree(idxd_cdev);
 }
 
@@ -463,7 +463,7 @@ int idxd_wq_add_cdev(struct idxd_wq *wq)
        cdev = &idxd_cdev->cdev;
        dev = cdev_dev(idxd_cdev);
        cdev_ctx = &ictx[wq->idxd->data->type];
-       minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL);
+       minor = ida_alloc_max(&cdev_ctx->minor_ida, MINORMASK, GFP_KERNEL);
        if (minor < 0) {
                kfree(idxd_cdev);
                return minor;
index f43d81128b96b3672fab2ce2de0f54fea1660f2e..ecfdf4a8f1f838ea49f1dbe3f60b15574aa8be11 100644 (file)
@@ -807,6 +807,9 @@ err_bmap:
 
 static void idxd_device_evl_free(struct idxd_device *idxd)
 {
+       void *evl_log;
+       unsigned int evl_log_size;
+       dma_addr_t evl_dma;
        union gencfg_reg gencfg;
        union genctrl_reg genctrl;
        struct device *dev = &idxd->pdev->dev;
@@ -827,11 +830,15 @@ static void idxd_device_evl_free(struct idxd_device *idxd)
        iowrite64(0, idxd->reg_base + IDXD_EVLCFG_OFFSET);
        iowrite64(0, idxd->reg_base + IDXD_EVLCFG_OFFSET + 8);
 
-       dma_free_coherent(dev, evl->log_size, evl->log, evl->dma);
        bitmap_free(evl->bmap);
+       evl_log = evl->log;
+       evl_log_size = evl->log_size;
+       evl_dma = evl->dma;
        evl->log = NULL;
        evl->size = IDXD_EVL_SIZE_MIN;
        spin_unlock(&evl->lock);
+
+       dma_free_coherent(dev, evl_log_size, evl_log, evl_dma);
 }
 
 static void idxd_group_config_write(struct idxd_group *group)
index f81ecf5863e86ec00190f986e64f129cc8a3dac7..9b42f5e96b1e0a2b7d001fa406a7d98cd040a577 100644 (file)
@@ -421,9 +421,7 @@ struct sdma_desc {
  * @shp_addr:          value for gReg[6]
  * @per_addr:          value for gReg[2]
  * @status:            status of dma channel
- * @context_loaded:    ensure context is only loaded once
  * @data:              specific sdma interface structure
- * @bd_pool:           dma_pool for bd
  * @terminate_worker:  used to call back into terminate work function
  * @terminated:                terminated list
  * @is_ram_script:     flag for script in ram
@@ -486,8 +484,6 @@ struct sdma_channel {
  * @num_script_addrs:  Number of script addresses in this image
  * @ram_code_start:    offset of SDMA ram image in this firmware image
  * @ram_code_size:     size of SDMA ram image
- * @script_addrs:      Stores the start address of the SDMA scripts
- *                     (in SDMA memory space)
  */
 struct sdma_firmware_header {
        u32     magic;
diff --git a/drivers/dma/ls2x-apb-dma.c b/drivers/dma/ls2x-apb-dma.c
new file mode 100644 (file)
index 0000000..a49913f
--- /dev/null
@@ -0,0 +1,705 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for the Loongson LS2X APB DMA Controller
+ *
+ * Copyright (C) 2017-2023 Loongson Corporation
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+/* Global Configuration Register */
+#define LDMA_ORDER_ERG         0x0
+
+/* Bitfield definitions */
+
+/* Bitfields in Global Configuration Register */
+#define LDMA_64BIT_EN          BIT(0) /* 1: 64 bit support */
+#define LDMA_UNCOHERENT_EN     BIT(1) /* 0: cache, 1: uncache */
+#define LDMA_ASK_VALID         BIT(2)
+#define LDMA_START             BIT(3) /* DMA start operation */
+#define LDMA_STOP              BIT(4) /* DMA stop operation */
+#define LDMA_CONFIG_MASK       GENMASK(4, 0) /* DMA controller config bits mask */
+
+/* Bitfields in ndesc_addr field of HW decriptor */
+#define LDMA_DESC_EN           BIT(0) /*1: The next descriptor is valid */
+#define LDMA_DESC_ADDR_LOW     GENMASK(31, 1)
+
+/* Bitfields in cmd field of HW decriptor */
+#define LDMA_INT               BIT(1) /* Enable DMA interrupts */
+#define LDMA_DATA_DIRECTION    BIT(12) /* 1: write to device, 0: read from device */
+
+#define LDMA_SLAVE_BUSWIDTHS   (BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+                                BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
+
+#define LDMA_MAX_TRANS_LEN     U32_MAX
+
+/*--  descriptors  -----------------------------------------------------*/
+
+/*
+ * struct ls2x_dma_hw_desc - DMA HW descriptor
+ * @ndesc_addr: the next descriptor low address.
+ * @mem_addr: memory low address.
+ * @apb_addr: device buffer address.
+ * @len: length of a piece of carried content, in words.
+ * @step_len: length between two moved memory data blocks.
+ * @step_times: number of blocks to be carried in a single DMA operation.
+ * @cmd: descriptor command or state.
+ * @stats: DMA status.
+ * @high_ndesc_addr: the next descriptor high address.
+ * @high_mem_addr: memory high address.
+ * @reserved: reserved
+ */
+struct ls2x_dma_hw_desc {
+       u32 ndesc_addr;
+       u32 mem_addr;
+       u32 apb_addr;
+       u32 len;
+       u32 step_len;
+       u32 step_times;
+       u32 cmd;
+       u32 stats;
+       u32 high_ndesc_addr;
+       u32 high_mem_addr;
+       u32 reserved[2];
+} __packed;
+
+/*
+ * struct ls2x_dma_sg - ls2x dma scatter gather entry
+ * @hw: the pointer to DMA HW descriptor.
+ * @llp: physical address of the DMA HW descriptor.
+ * @phys: destination or source address(mem).
+ * @len: number of Bytes to read.
+ */
+struct ls2x_dma_sg {
+       struct ls2x_dma_hw_desc *hw;
+       dma_addr_t              llp;
+       dma_addr_t              phys;
+       u32                     len;
+};
+
+/*
+ * struct ls2x_dma_desc - software descriptor
+ * @vdesc: pointer to the virtual dma descriptor.
+ * @cyclic: flag to dma cyclic
+ * @burst_size: burst size of transaction, in words.
+ * @desc_num: number of sg entries.
+ * @direction: transfer direction, to or from device.
+ * @status: dma controller status.
+ * @sg: array of sgs.
+ */
+struct ls2x_dma_desc {
+       struct virt_dma_desc            vdesc;
+       bool                            cyclic;
+       size_t                          burst_size;
+       u32                             desc_num;
+       enum dma_transfer_direction     direction;
+       enum dma_status                 status;
+       struct ls2x_dma_sg              sg[] __counted_by(desc_num);
+};
+
+/*--  Channels  --------------------------------------------------------*/
+
+/*
+ * struct ls2x_dma_chan - internal representation of an LS2X APB DMA channel
+ * @vchan: virtual dma channel entry.
+ * @desc: pointer to the ls2x sw dma descriptor.
+ * @pool: hw desc table
+ * @irq: irq line
+ * @sconfig: configuration for slave transfers, passed via .device_config
+ */
+struct ls2x_dma_chan {
+       struct virt_dma_chan    vchan;
+       struct ls2x_dma_desc    *desc;
+       void                    *pool;
+       int                     irq;
+       struct dma_slave_config sconfig;
+};
+
+/*--  Controller  ------------------------------------------------------*/
+
+/*
+ * struct ls2x_dma_priv - LS2X APB DMAC specific information
+ * @ddev: dmaengine dma_device object members
+ * @dma_clk: DMAC clock source
+ * @regs: memory mapped register base
+ * @lchan: channel to store ls2x_dma_chan structures
+ */
+struct ls2x_dma_priv {
+       struct dma_device       ddev;
+       struct clk              *dma_clk;
+       void __iomem            *regs;
+       struct ls2x_dma_chan    lchan;
+};
+
+/*--  Helper functions  ------------------------------------------------*/
+
+static inline struct ls2x_dma_desc *to_ldma_desc(struct virt_dma_desc *vdesc)
+{
+       return container_of(vdesc, struct ls2x_dma_desc, vdesc);
+}
+
+static inline struct ls2x_dma_chan *to_ldma_chan(struct dma_chan *chan)
+{
+       return container_of(chan, struct ls2x_dma_chan, vchan.chan);
+}
+
+static inline struct ls2x_dma_priv *to_ldma_priv(struct dma_device *ddev)
+{
+       return container_of(ddev, struct ls2x_dma_priv, ddev);
+}
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+       return &chan->dev->device;
+}
+
+static void ls2x_dma_desc_free(struct virt_dma_desc *vdesc)
+{
+       struct ls2x_dma_chan *lchan = to_ldma_chan(vdesc->tx.chan);
+       struct ls2x_dma_desc *desc = to_ldma_desc(vdesc);
+       int i;
+
+       for (i = 0; i < desc->desc_num; i++) {
+               if (desc->sg[i].hw)
+                       dma_pool_free(lchan->pool, desc->sg[i].hw,
+                                     desc->sg[i].llp);
+       }
+
+       kfree(desc);
+}
+
+static void ls2x_dma_write_cmd(struct ls2x_dma_chan *lchan, bool cmd)
+{
+       struct ls2x_dma_priv *priv = to_ldma_priv(lchan->vchan.chan.device);
+       u64 val;
+
+       val = lo_hi_readq(priv->regs + LDMA_ORDER_ERG) & ~LDMA_CONFIG_MASK;
+       val |= LDMA_64BIT_EN | cmd;
+       lo_hi_writeq(val, priv->regs + LDMA_ORDER_ERG);
+}
+
+static void ls2x_dma_start_transfer(struct ls2x_dma_chan *lchan)
+{
+       struct ls2x_dma_priv *priv = to_ldma_priv(lchan->vchan.chan.device);
+       struct ls2x_dma_sg *ldma_sg;
+       struct virt_dma_desc *vdesc;
+       u64 val;
+
+       /* Get the next descriptor */
+       vdesc = vchan_next_desc(&lchan->vchan);
+       if (!vdesc) {
+               lchan->desc = NULL;
+               return;
+       }
+
+       list_del(&vdesc->node);
+       lchan->desc = to_ldma_desc(vdesc);
+       ldma_sg = &lchan->desc->sg[0];
+
+       /* Start DMA */
+       lo_hi_writeq(0, priv->regs + LDMA_ORDER_ERG);
+       val = (ldma_sg->llp & ~LDMA_CONFIG_MASK) | LDMA_64BIT_EN | LDMA_START;
+       lo_hi_writeq(val, priv->regs + LDMA_ORDER_ERG);
+}
+
+static size_t ls2x_dmac_detect_burst(struct ls2x_dma_chan *lchan)
+{
+       u32 maxburst, buswidth;
+
+       /* Reject definitely invalid configurations */
+       if ((lchan->sconfig.src_addr_width & LDMA_SLAVE_BUSWIDTHS) &&
+           (lchan->sconfig.dst_addr_width & LDMA_SLAVE_BUSWIDTHS))
+               return 0;
+
+       if (lchan->sconfig.direction == DMA_MEM_TO_DEV) {
+               maxburst = lchan->sconfig.dst_maxburst;
+               buswidth = lchan->sconfig.dst_addr_width;
+       } else {
+               maxburst = lchan->sconfig.src_maxburst;
+               buswidth = lchan->sconfig.src_addr_width;
+       }
+
+       /* If maxburst is zero, fallback to LDMA_MAX_TRANS_LEN */
+       return maxburst ? (maxburst * buswidth) >> 2 : LDMA_MAX_TRANS_LEN;
+}
+
+static void ls2x_dma_fill_desc(struct ls2x_dma_chan *lchan, u32 sg_index,
+                              struct ls2x_dma_desc *desc)
+{
+       struct ls2x_dma_sg *ldma_sg = &desc->sg[sg_index];
+       u32 num_segments, segment_size;
+
+       if (desc->direction == DMA_MEM_TO_DEV) {
+               ldma_sg->hw->cmd = LDMA_INT | LDMA_DATA_DIRECTION;
+               ldma_sg->hw->apb_addr = lchan->sconfig.dst_addr;
+       } else {
+               ldma_sg->hw->cmd = LDMA_INT;
+               ldma_sg->hw->apb_addr = lchan->sconfig.src_addr;
+       }
+
+       ldma_sg->hw->mem_addr = lower_32_bits(ldma_sg->phys);
+       ldma_sg->hw->high_mem_addr = upper_32_bits(ldma_sg->phys);
+
+       /* Split into multiple equally sized segments if necessary */
+       num_segments = DIV_ROUND_UP((ldma_sg->len + 3) >> 2, desc->burst_size);
+       segment_size = DIV_ROUND_UP((ldma_sg->len + 3) >> 2, num_segments);
+
+       /* Word count register takes input in words */
+       ldma_sg->hw->len = segment_size;
+       ldma_sg->hw->step_times = num_segments;
+       ldma_sg->hw->step_len = 0;
+
+       /* lets make a link list */
+       if (sg_index) {
+               desc->sg[sg_index - 1].hw->ndesc_addr = ldma_sg->llp | LDMA_DESC_EN;
+               desc->sg[sg_index - 1].hw->high_ndesc_addr = upper_32_bits(ldma_sg->llp);
+       }
+}
+
+/*--  DMA Engine API  --------------------------------------------------*/
+
+/*
+ * ls2x_dma_alloc_chan_resources - allocate resources for DMA channel
+ * @chan: allocate descriptor resources for this channel
+ *
+ * return - the number of allocated descriptors
+ */
+static int ls2x_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+       struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+
+       /* Create a pool of consistent memory blocks for hardware descriptors */
+       lchan->pool = dma_pool_create(dev_name(chan2dev(chan)),
+                                     chan->device->dev, PAGE_SIZE,
+                                     __alignof__(struct ls2x_dma_hw_desc), 0);
+       if (!lchan->pool) {
+               dev_err(chan2dev(chan), "No memory for descriptors\n");
+               return -ENOMEM;
+       }
+
+       return 1;
+}
+
+/*
+ * ls2x_dma_free_chan_resources - free all channel resources
+ * @chan: DMA channel
+ */
+static void ls2x_dma_free_chan_resources(struct dma_chan *chan)
+{
+       struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+
+       vchan_free_chan_resources(to_virt_chan(chan));
+       dma_pool_destroy(lchan->pool);
+       lchan->pool = NULL;
+}
+
+/*
+ * ls2x_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
+ * @chan: DMA channel
+ * @sgl: scatterlist to transfer to/from
+ * @sg_len: number of entries in @scatterlist
+ * @direction: DMA direction
+ * @flags: tx descriptor status flags
+ * @context: transaction context (ignored)
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *
+ls2x_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+                      u32 sg_len, enum dma_transfer_direction direction,
+                      unsigned long flags, void *context)
+{
+       struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+       struct ls2x_dma_desc *desc;
+       struct scatterlist *sg;
+       size_t burst_size;
+       int i;
+
+       if (unlikely(!sg_len || !is_slave_direction(direction)))
+               return NULL;
+
+       burst_size = ls2x_dmac_detect_burst(lchan);
+       if (!burst_size)
+               return NULL;
+
+       desc = kzalloc(struct_size(desc, sg, sg_len), GFP_NOWAIT);
+       if (!desc)
+               return NULL;
+
+       desc->desc_num = sg_len;
+       desc->direction = direction;
+       desc->burst_size = burst_size;
+
+       for_each_sg(sgl, sg, sg_len, i) {
+               struct ls2x_dma_sg *ldma_sg = &desc->sg[i];
+
+               /* Allocate DMA capable memory for hardware descriptor */
+               ldma_sg->hw = dma_pool_alloc(lchan->pool, GFP_NOWAIT, &ldma_sg->llp);
+               if (!ldma_sg->hw) {
+                       desc->desc_num = i;
+                       ls2x_dma_desc_free(&desc->vdesc);
+                       return NULL;
+               }
+
+               ldma_sg->phys = sg_dma_address(sg);
+               ldma_sg->len = sg_dma_len(sg);
+
+               ls2x_dma_fill_desc(lchan, i, desc);
+       }
+
+       /* Setting the last descriptor enable bit */
+       desc->sg[sg_len - 1].hw->ndesc_addr &= ~LDMA_DESC_EN;
+       desc->status = DMA_IN_PROGRESS;
+
+       return vchan_tx_prep(&lchan->vchan, &desc->vdesc, flags);
+}
+
+/*
+ * ls2x_dma_prep_dma_cyclic - prepare the cyclic DMA transfer
+ * @chan: the DMA channel to prepare
+ * @buf_addr: physical DMA address where the buffer starts
+ * @buf_len: total number of bytes for the entire buffer
+ * @period_len: number of bytes for each period
+ * @direction: transfer direction, to or from device
+ * @flags: tx descriptor status flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *
+ls2x_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+                        size_t period_len, enum dma_transfer_direction direction,
+                        unsigned long flags)
+{
+       struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+       struct ls2x_dma_desc *desc;
+       size_t burst_size;
+       u32 num_periods;
+       int i;
+
+       if (unlikely(!buf_len || !period_len))
+               return NULL;
+
+       if (unlikely(!is_slave_direction(direction)))
+               return NULL;
+
+       burst_size = ls2x_dmac_detect_burst(lchan);
+       if (!burst_size)
+               return NULL;
+
+       num_periods = buf_len / period_len;
+       desc = kzalloc(struct_size(desc, sg, num_periods), GFP_NOWAIT);
+       if (!desc)
+               return NULL;
+
+       desc->desc_num = num_periods;
+       desc->direction = direction;
+       desc->burst_size = burst_size;
+
+       /* Build cyclic linked list */
+       for (i = 0; i < num_periods; i++) {
+               struct ls2x_dma_sg *ldma_sg = &desc->sg[i];
+
+               /* Allocate DMA capable memory for hardware descriptor */
+               ldma_sg->hw = dma_pool_alloc(lchan->pool, GFP_NOWAIT, &ldma_sg->llp);
+               if (!ldma_sg->hw) {
+                       desc->desc_num = i;
+                       ls2x_dma_desc_free(&desc->vdesc);
+                       return NULL;
+               }
+
+               ldma_sg->phys = buf_addr + period_len * i;
+               ldma_sg->len = period_len;
+
+               ls2x_dma_fill_desc(lchan, i, desc);
+       }
+
+       /* Lets make a cyclic list */
+       desc->sg[num_periods - 1].hw->ndesc_addr = desc->sg[0].llp | LDMA_DESC_EN;
+       desc->sg[num_periods - 1].hw->high_ndesc_addr = upper_32_bits(desc->sg[0].llp);
+       desc->cyclic = true;
+       desc->status = DMA_IN_PROGRESS;
+
+       return vchan_tx_prep(&lchan->vchan, &desc->vdesc, flags);
+}
+
+/*
+ * ls2x_slave_config - set slave configuration for channel
+ * @chan: dma channel
+ * @cfg: slave configuration
+ *
+ * Sets slave configuration for channel
+ */
+static int ls2x_dma_slave_config(struct dma_chan *chan,
+                                struct dma_slave_config *config)
+{
+       struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+
+       memcpy(&lchan->sconfig, config, sizeof(*config));
+       return 0;
+}
+
+/*
+ * ls2x_dma_issue_pending - push pending transactions to the hardware
+ * @chan: channel
+ *
+ * When this function is called, all pending transactions are pushed to the
+ * hardware and executed.
+ */
+static void ls2x_dma_issue_pending(struct dma_chan *chan)
+{
+       struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+       unsigned long flags;
+
+       spin_lock_irqsave(&lchan->vchan.lock, flags);
+       if (vchan_issue_pending(&lchan->vchan) && !lchan->desc)
+               ls2x_dma_start_transfer(lchan);
+       spin_unlock_irqrestore(&lchan->vchan.lock, flags);
+}
+
+/*
+ * ls2x_dma_terminate_all - terminate all transactions
+ * @chan: channel
+ *
+ * Stops all DMA transactions.
+ */
+static int ls2x_dma_terminate_all(struct dma_chan *chan)
+{
+       struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+       unsigned long flags;
+       LIST_HEAD(head);
+
+       spin_lock_irqsave(&lchan->vchan.lock, flags);
+       /* Setting stop cmd */
+       ls2x_dma_write_cmd(lchan, LDMA_STOP);
+       if (lchan->desc) {
+               vchan_terminate_vdesc(&lchan->desc->vdesc);
+               lchan->desc = NULL;
+       }
+
+       vchan_get_all_descriptors(&lchan->vchan, &head);
+       spin_unlock_irqrestore(&lchan->vchan.lock, flags);
+
+       vchan_dma_desc_free_list(&lchan->vchan, &head);
+       return 0;
+}
+
+/*
+ * ls2x_dma_synchronize - Synchronizes the termination of transfers to the
+ * current context.
+ * @chan: channel
+ */
+static void ls2x_dma_synchronize(struct dma_chan *chan)
+{
+       struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+
+       vchan_synchronize(&lchan->vchan);
+}
+
+static int ls2x_dma_pause(struct dma_chan *chan)
+{
+       struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+       unsigned long flags;
+
+       spin_lock_irqsave(&lchan->vchan.lock, flags);
+       if (lchan->desc && lchan->desc->status == DMA_IN_PROGRESS) {
+               ls2x_dma_write_cmd(lchan, LDMA_STOP);
+               lchan->desc->status = DMA_PAUSED;
+       }
+       spin_unlock_irqrestore(&lchan->vchan.lock, flags);
+
+       return 0;
+}
+
+static int ls2x_dma_resume(struct dma_chan *chan)
+{
+       struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+       unsigned long flags;
+
+       spin_lock_irqsave(&lchan->vchan.lock, flags);
+       if (lchan->desc && lchan->desc->status == DMA_PAUSED) {
+               lchan->desc->status = DMA_IN_PROGRESS;
+               ls2x_dma_write_cmd(lchan, LDMA_START);
+       }
+       spin_unlock_irqrestore(&lchan->vchan.lock, flags);
+
+       return 0;
+}
+
+/*
+ * ls2x_dma_isr - LS2X DMA Interrupt handler
+ * @irq: IRQ number
+ * @dev_id: Pointer to ls2x_dma_chan
+ *
+ * Return: IRQ_HANDLED/IRQ_NONE
+ */
+static irqreturn_t ls2x_dma_isr(int irq, void *dev_id)
+{
+       struct ls2x_dma_chan *lchan = dev_id;
+       struct ls2x_dma_desc *desc;
+
+       spin_lock(&lchan->vchan.lock);
+       desc = lchan->desc;
+       if (desc) {
+               if (desc->cyclic) {
+                       vchan_cyclic_callback(&desc->vdesc);
+               } else {
+                       desc->status = DMA_COMPLETE;
+                       vchan_cookie_complete(&desc->vdesc);
+                       ls2x_dma_start_transfer(lchan);
+               }
+
+               /* ls2x_dma_start_transfer() updates lchan->desc */
+               if (!lchan->desc)
+                       ls2x_dma_write_cmd(lchan, LDMA_STOP);
+       }
+       spin_unlock(&lchan->vchan.lock);
+
+       return IRQ_HANDLED;
+}
+
+static int ls2x_dma_chan_init(struct platform_device *pdev,
+                             struct ls2x_dma_priv *priv)
+{
+       struct ls2x_dma_chan *lchan = &priv->lchan;
+       struct device *dev = &pdev->dev;
+       int ret;
+
+       lchan->irq = platform_get_irq(pdev, 0);
+       if (lchan->irq < 0)
+               return lchan->irq;
+
+       ret = devm_request_irq(dev, lchan->irq, ls2x_dma_isr, IRQF_TRIGGER_RISING,
+                              dev_name(&pdev->dev), lchan);
+       if (ret)
+               return ret;
+
+       /* Initialize channels related values */
+       INIT_LIST_HEAD(&priv->ddev.channels);
+       lchan->vchan.desc_free = ls2x_dma_desc_free;
+       vchan_init(&lchan->vchan, &priv->ddev);
+
+       return 0;
+}
+
+/*
+ * ls2x_dma_probe - Driver probe function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int ls2x_dma_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct ls2x_dma_priv *priv;
+       struct dma_device *ddev;
+       int ret;
+
+       priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       priv->regs = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(priv->regs))
+               return dev_err_probe(dev, PTR_ERR(priv->regs),
+                                    "devm_platform_ioremap_resource failed.\n");
+
+       priv->dma_clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(priv->dma_clk))
+               return dev_err_probe(dev, PTR_ERR(priv->dma_clk), "devm_clk_get failed.\n");
+
+       ret = clk_prepare_enable(priv->dma_clk);
+       if (ret)
+               return dev_err_probe(dev, ret, "clk_prepare_enable failed.\n");
+
+       ret = ls2x_dma_chan_init(pdev, priv);
+       if (ret)
+               goto disable_clk;
+
+       ddev = &priv->ddev;
+       ddev->dev = dev;
+       dma_cap_zero(ddev->cap_mask);
+       dma_cap_set(DMA_SLAVE, ddev->cap_mask);
+       dma_cap_set(DMA_CYCLIC, ddev->cap_mask);
+
+       ddev->device_alloc_chan_resources = ls2x_dma_alloc_chan_resources;
+       ddev->device_free_chan_resources = ls2x_dma_free_chan_resources;
+       ddev->device_tx_status = dma_cookie_status;
+       ddev->device_issue_pending = ls2x_dma_issue_pending;
+       ddev->device_prep_slave_sg = ls2x_dma_prep_slave_sg;
+       ddev->device_prep_dma_cyclic = ls2x_dma_prep_dma_cyclic;
+       ddev->device_config = ls2x_dma_slave_config;
+       ddev->device_terminate_all = ls2x_dma_terminate_all;
+       ddev->device_synchronize = ls2x_dma_synchronize;
+       ddev->device_pause = ls2x_dma_pause;
+       ddev->device_resume = ls2x_dma_resume;
+
+       ddev->src_addr_widths = LDMA_SLAVE_BUSWIDTHS;
+       ddev->dst_addr_widths = LDMA_SLAVE_BUSWIDTHS;
+       ddev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+
+       ret = dma_async_device_register(&priv->ddev);
+       if (ret < 0)
+               goto disable_clk;
+
+       ret = of_dma_controller_register(dev->of_node, of_dma_xlate_by_chan_id, priv);
+       if (ret < 0)
+               goto unregister_dmac;
+
+       platform_set_drvdata(pdev, priv);
+
+       dev_info(dev, "Loongson LS2X APB DMA driver registered successfully.\n");
+       return 0;
+
+unregister_dmac:
+       dma_async_device_unregister(&priv->ddev);
+disable_clk:
+       clk_disable_unprepare(priv->dma_clk);
+
+       return ret;
+}
+
+/*
+ * ls2x_dma_remove - Driver remove function
+ * @pdev: Pointer to the platform_device structure
+ */
+static void ls2x_dma_remove(struct platform_device *pdev)
+{
+       struct ls2x_dma_priv *priv = platform_get_drvdata(pdev);
+
+       of_dma_controller_free(pdev->dev.of_node);
+       dma_async_device_unregister(&priv->ddev);
+       clk_disable_unprepare(priv->dma_clk);
+}
+
+static const struct of_device_id ls2x_dma_of_match_table[] = {
+       { .compatible = "loongson,ls2k1000-apbdma" },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ls2x_dma_of_match_table);
+
+static struct platform_driver ls2x_dmac_driver = {
+       .probe          = ls2x_dma_probe,
+       .remove_new     = ls2x_dma_remove,
+       .driver = {
+               .name   = "ls2x-apbdma",
+               .of_match_table = ls2x_dma_of_match_table,
+       },
+};
+module_platform_driver(ls2x_dmac_driver);
+
+MODULE_DESCRIPTION("Loongson LS2X APB DMA Controller driver");
+MODULE_AUTHOR("Loongson Technology Corporation Limited");
+MODULE_LICENSE("GPL");
index 1b0a95892627d6dc31439b9112f1c76d9b86071d..7b41c670970a655267f470a945178c53c04602e9 100644 (file)
@@ -531,7 +531,7 @@ disable_clk:
        return ret;
 }
 
-static int milbeaut_hdmac_remove(struct platform_device *pdev)
+static void milbeaut_hdmac_remove(struct platform_device *pdev)
 {
        struct milbeaut_hdmac_device *mdev = platform_get_drvdata(pdev);
        struct dma_chan *chan;
@@ -546,16 +546,21 @@ static int milbeaut_hdmac_remove(struct platform_device *pdev)
         */
        list_for_each_entry(chan, &mdev->ddev.channels, device_node) {
                ret = dmaengine_terminate_sync(chan);
-               if (ret)
-                       return ret;
+               if (ret) {
+                       /*
+                        * This results in resource leakage and maybe also
+                        * use-after-free errors as e.g. *mdev is kfreed.
+                        */
+                       dev_alert(&pdev->dev, "Failed to terminate channel %d (%pe)\n",
+                                 chan->chan_id, ERR_PTR(ret));
+                       return;
+               }
                milbeaut_hdmac_free_chan_resources(chan);
        }
 
        of_dma_controller_free(pdev->dev.of_node);
        dma_async_device_unregister(&mdev->ddev);
        clk_disable_unprepare(mdev->clk);
-
-       return 0;
 }
 
 static const struct of_device_id milbeaut_hdmac_match[] = {
@@ -566,7 +571,7 @@ MODULE_DEVICE_TABLE(of, milbeaut_hdmac_match);
 
 static struct platform_driver milbeaut_hdmac_driver = {
        .probe = milbeaut_hdmac_probe,
-       .remove = milbeaut_hdmac_remove,
+       .remove_new = milbeaut_hdmac_remove,
        .driver = {
                .name = "milbeaut-m10v-hdmac",
                .of_match_table = milbeaut_hdmac_match,
index d29d01e730aa09171eecc60cfd298943b9783c9a..2cce529b448eb732f90dbeee236ac92a8d599ced 100644 (file)
@@ -368,7 +368,7 @@ disable_xdmac:
        return ret;
 }
 
-static int milbeaut_xdmac_remove(struct platform_device *pdev)
+static void milbeaut_xdmac_remove(struct platform_device *pdev)
 {
        struct milbeaut_xdmac_device *mdev = platform_get_drvdata(pdev);
        struct dma_chan *chan;
@@ -383,8 +383,15 @@ static int milbeaut_xdmac_remove(struct platform_device *pdev)
         */
        list_for_each_entry(chan, &mdev->ddev.channels, device_node) {
                ret = dmaengine_terminate_sync(chan);
-               if (ret)
-                       return ret;
+               if (ret) {
+                       /*
+                        * This results in resource leakage and maybe also
+                        * use-after-free errors as e.g. *mdev is kfreed.
+                        */
+                       dev_alert(&pdev->dev, "Failed to terminate channel %d (%pe)\n",
+                                 chan->chan_id, ERR_PTR(ret));
+                       return;
+               }
                milbeaut_xdmac_free_chan_resources(chan);
        }
 
@@ -392,8 +399,6 @@ static int milbeaut_xdmac_remove(struct platform_device *pdev)
        dma_async_device_unregister(&mdev->ddev);
 
        disable_xdmac(mdev);
-
-       return 0;
 }
 
 static const struct of_device_id milbeaut_xdmac_match[] = {
@@ -404,7 +409,7 @@ MODULE_DEVICE_TABLE(of, milbeaut_xdmac_match);
 
 static struct platform_driver milbeaut_xdmac_driver = {
        .probe = milbeaut_xdmac_probe,
-       .remove = milbeaut_xdmac_remove,
+       .remove_new = milbeaut_xdmac_remove,
        .driver = {
                .name = "milbeaut-m10v-xdmac",
                .of_match_table = milbeaut_xdmac_match,
index 3cf0b38387ae5604adf7fbf07574444171c21043..c29744bfdf2c2afc4ae6a7b6fdcd963a19db2977 100644 (file)
@@ -1053,6 +1053,9 @@ static bool _trigger(struct pl330_thread *thrd)
 
        thrd->req_running = idx;
 
+       if (desc->rqtype == DMA_MEM_TO_DEV || desc->rqtype == DMA_DEV_TO_MEM)
+               UNTIL(thrd, PL330_STATE_WFP);
+
        return true;
 }
 
index 3125a2f162b4788d3ffbf182265bff18532ba19c..428473611115d1007755f244a51ab52eeefe46a5 100644 (file)
 #include <linux/mod_devicetable.h>
 #include <linux/dma-mapping.h>
 #include <linux/of.h>
+#include <linux/of_dma.h>
 #include <linux/slab.h>
 
 #include "sf-pdma.h"
 
+#define PDMA_QUIRK_NO_STRICT_ORDERING   BIT(0)
+
 #ifndef readq
 static inline unsigned long long readq(void __iomem *addr)
 {
@@ -65,7 +68,7 @@ static struct sf_pdma_desc *sf_pdma_alloc_desc(struct sf_pdma_chan *chan)
 static void sf_pdma_fill_desc(struct sf_pdma_desc *desc,
                              u64 dst, u64 src, u64 size)
 {
-       desc->xfer_type = PDMA_FULL_SPEED;
+       desc->xfer_type =  desc->chan->pdma->transfer_type;
        desc->xfer_size = size;
        desc->dst_addr = dst;
        desc->src_addr = src;
@@ -492,6 +495,7 @@ static void sf_pdma_setup_chans(struct sf_pdma *pdma)
 
 static int sf_pdma_probe(struct platform_device *pdev)
 {
+       const struct sf_pdma_driver_platdata *ddata;
        struct sf_pdma *pdma;
        int ret, n_chans;
        const enum dma_slave_buswidth widths =
@@ -517,6 +521,14 @@ static int sf_pdma_probe(struct platform_device *pdev)
 
        pdma->n_chans = n_chans;
 
+       pdma->transfer_type = PDMA_FULL_SPEED | PDMA_STRICT_ORDERING;
+
+       ddata  = device_get_match_data(&pdev->dev);
+       if (ddata) {
+               if (ddata->quirks & PDMA_QUIRK_NO_STRICT_ORDERING)
+                       pdma->transfer_type &= ~PDMA_STRICT_ORDERING;
+       }
+
        pdma->membase = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(pdma->membase))
                return PTR_ERR(pdma->membase);
@@ -563,7 +575,20 @@ static int sf_pdma_probe(struct platform_device *pdev)
                return ret;
        }
 
+       ret = of_dma_controller_register(pdev->dev.of_node,
+                                        of_dma_xlate_by_chan_id, pdma);
+       if (ret < 0) {
+               dev_err(&pdev->dev,
+                       "Can't register SiFive Platform OF_DMA. (%d)\n", ret);
+               goto err_unregister;
+       }
+
        return 0;
+
+err_unregister:
+       dma_async_device_unregister(&pdma->dma_dev);
+
+       return ret;
 }
 
 static void sf_pdma_remove(struct platform_device *pdev)
@@ -583,12 +608,25 @@ static void sf_pdma_remove(struct platform_device *pdev)
                tasklet_kill(&ch->err_tasklet);
        }
 
+       if (pdev->dev.of_node)
+               of_dma_controller_free(pdev->dev.of_node);
+
        dma_async_device_unregister(&pdma->dma_dev);
 }
 
+static const struct sf_pdma_driver_platdata mpfs_pdma = {
+       .quirks = PDMA_QUIRK_NO_STRICT_ORDERING,
+};
+
 static const struct of_device_id sf_pdma_dt_ids[] = {
-       { .compatible = "sifive,fu540-c000-pdma" },
-       { .compatible = "sifive,pdma0" },
+       {
+               .compatible = "sifive,fu540-c000-pdma",
+       }, {
+               .compatible = "sifive,pdma0",
+       }, {
+               .compatible = "microchip,mpfs-pdma",
+               .data       = &mpfs_pdma,
+       },
        {},
 };
 MODULE_DEVICE_TABLE(of, sf_pdma_dt_ids);
index d05772b5d8d3fd3e0bee8d984eb41916c80e5214..215e07183d7e26b71f4238ab70e14c50bf72b5a2 100644 (file)
@@ -48,7 +48,8 @@
 #define PDMA_ERR_STATUS_MASK                           GENMASK(31, 31)
 
 /* Transfer Type */
-#define PDMA_FULL_SPEED                                        0xFF000008
+#define PDMA_FULL_SPEED                                        0xFF000000
+#define PDMA_STRICT_ORDERING                           BIT(3)
 
 /* Error Recovery */
 #define MAX_RETRY                                      1
@@ -112,8 +113,13 @@ struct sf_pdma {
        struct dma_device       dma_dev;
        void __iomem            *membase;
        void __iomem            *mappedbase;
+       u32                     transfer_type;
        u32                     n_chans;
        struct sf_pdma_chan     chans[] __counted_by(n_chans);
 };
 
+struct sf_pdma_driver_platdata {
+       u32 quirks;
+};
+
 #endif /* _SF_PDMA_H */
index fea5bda34bc20f679b987e0a2d4ffde4f28e1f7c..1f1e86ba5c66aa8c33556eabb34a94ba431e4cc8 100644 (file)
@@ -755,11 +755,11 @@ static struct dma_chan *rz_dmac_of_xlate(struct of_phandle_args *dma_spec,
 
 static int rz_dmac_chan_probe(struct rz_dmac *dmac,
                              struct rz_dmac_chan *channel,
-                             unsigned int index)
+                             u8 index)
 {
        struct platform_device *pdev = to_platform_device(dmac->dev);
        struct rz_lmdesc *lmdesc;
-       char pdev_irqname[5];
+       char pdev_irqname[6];
        char *irqname;
        int ret;
 
@@ -767,7 +767,7 @@ static int rz_dmac_chan_probe(struct rz_dmac *dmac,
        channel->mid_rid = -EINVAL;
 
        /* Request the channel interrupt. */
-       sprintf(pdev_irqname, "ch%u", index);
+       scnprintf(pdev_irqname, sizeof(pdev_irqname), "ch%u", index);
        channel->irq = platform_get_irq_byname(pdev, pdev_irqname);
        if (channel->irq < 0)
                return channel->irq;
@@ -845,9 +845,9 @@ static int rz_dmac_probe(struct platform_device *pdev)
        struct dma_device *engine;
        struct rz_dmac *dmac;
        int channel_num;
-       unsigned int i;
        int ret;
        int irq;
+       u8 i;
 
        dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
        if (!dmac)
index 9c121a4b33ad829c77ae88c094fc80d942b9d709..f97d80343aea42fd399e93da3492e07a8fa86b35 100644 (file)
@@ -25,7 +25,7 @@ struct sh_dmae_chan {
        const struct sh_dmae_slave_config *config; /* Slave DMA configuration */
        int xmit_shift;                 /* log_2(bytes_per_xfer) */
        void __iomem *base;
-       char dev_id[16];                /* unique name per DMAC of channel */
+       char dev_id[32];                /* unique name per DMAC of channel */
        int pm_error;
        dma_addr_t slave_addr;
 };
index a9b4302f6050144f2359927c4cd4cf41dc38e80f..f7cd0cad056c16ced372483d64b5d1032696dea4 100644 (file)
@@ -706,10 +706,10 @@ static const struct dev_pm_ops usb_dmac_pm = {
 
 static int usb_dmac_chan_probe(struct usb_dmac *dmac,
                               struct usb_dmac_chan *uchan,
-                              unsigned int index)
+                              u8 index)
 {
        struct platform_device *pdev = to_platform_device(dmac->dev);
-       char pdev_irqname[5];
+       char pdev_irqname[6];
        char *irqname;
        int ret;
 
@@ -717,7 +717,7 @@ static int usb_dmac_chan_probe(struct usb_dmac *dmac,
        uchan->iomem = dmac->iomem + USB_DMAC_CHAN_OFFSET(index);
 
        /* Request the channel interrupt. */
-       sprintf(pdev_irqname, "ch%u", index);
+       scnprintf(pdev_irqname, sizeof(pdev_irqname), "ch%u", index);
        uchan->irq = platform_get_irq_byname(pdev, pdev_irqname);
        if (uchan->irq < 0)
                return -ENODEV;
@@ -768,8 +768,8 @@ static int usb_dmac_probe(struct platform_device *pdev)
        const enum dma_slave_buswidth widths = USB_DMAC_SLAVE_BUSWIDTH;
        struct dma_device *engine;
        struct usb_dmac *dmac;
-       unsigned int i;
        int ret;
+       u8 i;
 
        dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
        if (!dmac)
@@ -869,7 +869,7 @@ static void usb_dmac_chan_remove(struct usb_dmac *dmac,
 static void usb_dmac_remove(struct platform_device *pdev)
 {
        struct usb_dmac *dmac = platform_get_drvdata(pdev);
-       int i;
+       u8 i;
 
        for (i = 0; i < dmac->n_channels; ++i)
                usb_dmac_chan_remove(dmac, &dmac->channels[i]);
index 002833fb1fa04cabddb058fa6c575d7ffc42247e..2c489299148eeea268e83e1a74824c434d86cdee 100644 (file)
 /**
  * struct stedma40_platform_data - Configuration struct for the dma device.
  *
- * @dev_tx: mapping between destination event line and io address
- * @dev_rx: mapping between source event line and io address
  * @disabled_channels: A vector, ending with -1, that marks physical channels
  * that are for different reasons not available for the driver.
  * @soft_lli_chans: A vector, that marks physical channels will use LLI by SW
  * which avoids HW bug that exists in some versions of the controller.
- * SoftLLI introduces relink overhead that could impact performace for
+ * SoftLLI introduces relink overhead that could impact performance for
  * certain use cases.
  * @num_of_soft_lli_chans: The number of channels that needs to be configured
  * to use SoftLLI.
@@ -184,7 +182,7 @@ static __maybe_unused u32 d40_backup_regs[] = {
 
 /*
  * since 9540 and 8540 has the same HW revision
- * use v4a for 9540 or ealier
+ * use v4a for 9540 or earlier
  * use v4b for 8540 or later
  * HW revision:
  * DB8500ed has revision 0
@@ -411,7 +409,7 @@ struct d40_desc {
  *
  * @base: The virtual address of LCLA. 18 bit aligned.
  * @dma_addr: DMA address, if mapped
- * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
+ * @base_unaligned: The original kmalloc pointer, if kmalloc is used.
  * This pointer is only there for clean-up on error.
  * @pages: The number of pages needed for all physical channels.
  * Only used later for clean-up on error
@@ -1655,7 +1653,7 @@ static void dma_tasklet(struct tasklet_struct *t)
 
        return;
  check_pending_tx:
-       /* Rescue manouver if receiving double interrupts */
+       /* Rescue maneuver if receiving double interrupts */
        if (d40c->pending_tx > 0)
                d40c->pending_tx--;
        spin_unlock_irqrestore(&d40c->lock, flags);
@@ -3412,7 +3410,7 @@ static int __init d40_lcla_allocate(struct d40_base *base)
                base->lcla_pool.base = (void *)page_list[i];
        } else {
                /*
-                * After many attempts and no succees with finding the correct
+                * After many attempts and no success with finding the correct
                 * alignment, try with allocating a big buffer.
                 */
                dev_warn(base->dev,
index fa4d4142a68a2183744a46f8d36da5e51fe2b18c..88547a23825b18aece9f4eb00221549e02eaadd1 100644 (file)
@@ -1348,8 +1348,8 @@ static int tegra_dma_program_sid(struct tegra_dma_channel *tdc, int stream_id)
 static int tegra_dma_probe(struct platform_device *pdev)
 {
        const struct tegra_dma_chip_data *cdata = NULL;
-       struct iommu_fwspec *iommu_spec;
-       unsigned int stream_id, i;
+       unsigned int i;
+       u32 stream_id;
        struct tegra_dma *tdma;
        int ret;
 
@@ -1378,12 +1378,10 @@ static int tegra_dma_probe(struct platform_device *pdev)
 
        tdma->dma_dev.dev = &pdev->dev;
 
-       iommu_spec = dev_iommu_fwspec_get(&pdev->dev);
-       if (!iommu_spec) {
+       if (!tegra_dev_iommu_get_stream_id(&pdev->dev, &stream_id)) {
                dev_err(&pdev->dev, "Missing iommu stream-id\n");
                return -EINVAL;
        }
-       stream_id = iommu_spec->ids[0] & 0xffff;
 
        ret = device_property_read_u32(&pdev->dev, "dma-channel-mask",
                                       &tdma->chan_mask);
index 7a0586633bf32624b442cf5569bacf9361408b78..24ad7077c53ba8f87b7dfb53b1d922a4bec411d2 100644 (file)
@@ -153,6 +153,7 @@ struct tegra_adma {
        void __iomem                    *base_addr;
        struct clk                      *ahub_clk;
        unsigned int                    nr_channels;
+       unsigned long                   *dma_chan_mask;
        unsigned long                   rx_requests_reserved;
        unsigned long                   tx_requests_reserved;
 
@@ -741,6 +742,10 @@ static int __maybe_unused tegra_adma_runtime_suspend(struct device *dev)
 
        for (i = 0; i < tdma->nr_channels; i++) {
                tdc = &tdma->channels[i];
+               /* skip for reserved channels */
+               if (!tdc->tdma)
+                       continue;
+
                ch_reg = &tdc->ch_regs;
                ch_reg->cmd = tdma_ch_read(tdc, ADMA_CH_CMD);
                /* skip if channel is not active */
@@ -779,6 +784,9 @@ static int __maybe_unused tegra_adma_runtime_resume(struct device *dev)
 
        for (i = 0; i < tdma->nr_channels; i++) {
                tdc = &tdma->channels[i];
+               /* skip for reserved channels */
+               if (!tdc->tdma)
+                       continue;
                ch_reg = &tdc->ch_regs;
                /* skip if channel was not active earlier */
                if (!ch_reg->cmd)
@@ -867,10 +875,31 @@ static int tegra_adma_probe(struct platform_device *pdev)
                return PTR_ERR(tdma->ahub_clk);
        }
 
+       tdma->dma_chan_mask = devm_kzalloc(&pdev->dev,
+                                          BITS_TO_LONGS(tdma->nr_channels) * sizeof(unsigned long),
+                                          GFP_KERNEL);
+       if (!tdma->dma_chan_mask)
+               return -ENOMEM;
+
+       /* Enable all channels by default */
+       bitmap_fill(tdma->dma_chan_mask, tdma->nr_channels);
+
+       ret = of_property_read_u32_array(pdev->dev.of_node, "dma-channel-mask",
+                                        (u32 *)tdma->dma_chan_mask,
+                                        BITS_TO_U32(tdma->nr_channels));
+       if (ret < 0 && (ret != -EINVAL)) {
+               dev_err(&pdev->dev, "dma-channel-mask is not complete.\n");
+               return ret;
+       }
+
        INIT_LIST_HEAD(&tdma->dma_dev.channels);
        for (i = 0; i < tdma->nr_channels; i++) {
                struct tegra_adma_chan *tdc = &tdma->channels[i];
 
+               /* skip for reserved channels */
+               if (!test_bit(i, tdma->dma_chan_mask))
+                       continue;
+
                tdc->chan_addr = tdma->base_addr + cdata->ch_base_offset
                                 + (cdata->ch_reg_size * i);
 
@@ -957,8 +986,10 @@ static void tegra_adma_remove(struct platform_device *pdev)
        of_dma_controller_free(pdev->dev.of_node);
        dma_async_device_unregister(&tdma->dma_dev);
 
-       for (i = 0; i < tdma->nr_channels; ++i)
-               irq_dispose_mapping(tdma->channels[i].irq);
+       for (i = 0; i < tdma->nr_channels; ++i) {
+               if (tdma->channels[i].irq)
+                       irq_dispose_mapping(tdma->channels[i].irq);
+       }
 
        pm_runtime_disable(&pdev->dev);
 }
index acc950bf609c36de7c6382851631ed441abc3af3..d376c117cecf60d0d314cde97f73cb5d3532a013 100644 (file)
@@ -12,6 +12,7 @@ k3-psil-lib-objs := k3-psil.o \
                    k3-psil-j721s2.o \
                    k3-psil-am62.o \
                    k3-psil-am62a.o \
-                   k3-psil-j784s4.o
+                   k3-psil-j784s4.o \
+                   k3-psil-am62p.o
 obj-$(CONFIG_TI_K3_PSIL) += k3-psil-lib.o
 obj-$(CONFIG_TI_DMA_CROSSBAR) += dma-crossbar.o
index f1f920861fa9d8937a34bc29906778720a8dbfd8..5f8d2e93ff3fb516ea6374e007110b057377f11e 100644 (file)
@@ -2404,6 +2404,11 @@ static int edma_probe(struct platform_device *pdev)
        if (irq > 0) {
                irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint",
                                          dev_name(dev));
+               if (!irq_name) {
+                       ret = -ENOMEM;
+                       goto err_disable_pm;
+               }
+
                ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name,
                                       ecc);
                if (ret) {
@@ -2420,6 +2425,11 @@ static int edma_probe(struct platform_device *pdev)
        if (irq > 0) {
                irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint",
                                          dev_name(dev));
+               if (!irq_name) {
+                       ret = -ENOMEM;
+                       goto err_disable_pm;
+               }
+
                ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name,
                                       ecc);
                if (ret) {
diff --git a/drivers/dma/ti/k3-psil-am62p.c b/drivers/dma/ti/k3-psil-am62p.c
new file mode 100644 (file)
index 0000000..0f338e1
--- /dev/null
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#include <linux/kernel.h>
+
+#include "k3-psil-priv.h"
+
+#define PSIL_PDMA_XY_TR(x)                                     \
+       {                                                       \
+               .thread_id = x,                                 \
+               .ep_config = {                                  \
+                       .ep_type = PSIL_EP_PDMA_XY,             \
+                       .mapped_channel_id = -1,                \
+                       .default_flow_id = -1,                  \
+               },                                              \
+       }
+
+#define PSIL_PDMA_XY_PKT(x)                                    \
+       {                                                       \
+               .thread_id = x,                                 \
+               .ep_config = {                                  \
+                       .ep_type = PSIL_EP_PDMA_XY,             \
+                       .mapped_channel_id = -1,                \
+                       .default_flow_id = -1,                  \
+                       .pkt_mode = 1,                          \
+               },                                              \
+       }
+
+#define PSIL_ETHERNET(x, ch, flow_base, flow_cnt)              \
+       {                                                       \
+               .thread_id = x,                                 \
+               .ep_config = {                                  \
+                       .ep_type = PSIL_EP_NATIVE,              \
+                       .pkt_mode = 1,                          \
+                       .needs_epib = 1,                        \
+                       .psd_size = 16,                         \
+                       .mapped_channel_id = ch,                \
+                       .flow_start = flow_base,                \
+                       .flow_num = flow_cnt,                   \
+                       .default_flow_id = flow_base,           \
+               },                                              \
+       }
+
+#define PSIL_SAUL(x, ch, flow_base, flow_cnt, default_flow, tx)        \
+       {                                                       \
+               .thread_id = x,                                 \
+               .ep_config = {                                  \
+                       .ep_type = PSIL_EP_NATIVE,              \
+                       .pkt_mode = 1,                          \
+                       .needs_epib = 1,                        \
+                       .psd_size = 64,                         \
+                       .mapped_channel_id = ch,                \
+                       .flow_start = flow_base,                \
+                       .flow_num = flow_cnt,                   \
+                       .default_flow_id = default_flow,        \
+                       .notdpkt = tx,                          \
+               },                                              \
+       }
+
+#define PSIL_PDMA_MCASP(x)                             \
+       {                                               \
+               .thread_id = x,                         \
+               .ep_config = {                          \
+                       .ep_type = PSIL_EP_PDMA_XY,     \
+                       .pdma_acc32 = 1,                \
+                       .pdma_burst = 1,                \
+               },                                      \
+       }
+
+#define PSIL_CSI2RX(x)                                 \
+       {                                               \
+               .thread_id = x,                         \
+               .ep_config = {                          \
+                       .ep_type = PSIL_EP_NATIVE,      \
+               },                                      \
+       }
+
+/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
+static struct psil_ep am62p_src_ep_map[] = {
+       /* SAUL */
+       PSIL_SAUL(0x7504, 20, 35, 8, 35, 0),
+       PSIL_SAUL(0x7505, 21, 35, 8, 36, 0),
+       PSIL_SAUL(0x7506, 22, 43, 8, 43, 0),
+       PSIL_SAUL(0x7507, 23, 43, 8, 44, 0),
+       /* PDMA_MAIN0 - SPI0-2 */
+       PSIL_PDMA_XY_PKT(0x4300),
+       PSIL_PDMA_XY_PKT(0x4301),
+       PSIL_PDMA_XY_PKT(0x4302),
+       PSIL_PDMA_XY_PKT(0x4303),
+       PSIL_PDMA_XY_PKT(0x4304),
+       PSIL_PDMA_XY_PKT(0x4305),
+       PSIL_PDMA_XY_PKT(0x4306),
+       PSIL_PDMA_XY_PKT(0x4307),
+       PSIL_PDMA_XY_PKT(0x4308),
+       PSIL_PDMA_XY_PKT(0x4309),
+       PSIL_PDMA_XY_PKT(0x430a),
+       PSIL_PDMA_XY_PKT(0x430b),
+       /* PDMA_MAIN1 - UART0-6 */
+       PSIL_PDMA_XY_PKT(0x4400),
+       PSIL_PDMA_XY_PKT(0x4401),
+       PSIL_PDMA_XY_PKT(0x4402),
+       PSIL_PDMA_XY_PKT(0x4403),
+       PSIL_PDMA_XY_PKT(0x4404),
+       PSIL_PDMA_XY_PKT(0x4405),
+       PSIL_PDMA_XY_PKT(0x4406),
+       /* PDMA_MAIN2 - MCASP0-2 */
+       PSIL_PDMA_MCASP(0x4500),
+       PSIL_PDMA_MCASP(0x4501),
+       PSIL_PDMA_MCASP(0x4502),
+       /* CPSW3G */
+       PSIL_ETHERNET(0x4600, 19, 19, 16),
+       /* CSI2RX */
+       PSIL_CSI2RX(0x5000),
+       PSIL_CSI2RX(0x5001),
+       PSIL_CSI2RX(0x5002),
+       PSIL_CSI2RX(0x5003),
+       PSIL_CSI2RX(0x5004),
+       PSIL_CSI2RX(0x5005),
+       PSIL_CSI2RX(0x5006),
+       PSIL_CSI2RX(0x5007),
+       PSIL_CSI2RX(0x5008),
+       PSIL_CSI2RX(0x5009),
+       PSIL_CSI2RX(0x500a),
+       PSIL_CSI2RX(0x500b),
+       PSIL_CSI2RX(0x500c),
+       PSIL_CSI2RX(0x500d),
+       PSIL_CSI2RX(0x500e),
+       PSIL_CSI2RX(0x500f),
+       PSIL_CSI2RX(0x5010),
+       PSIL_CSI2RX(0x5011),
+       PSIL_CSI2RX(0x5012),
+       PSIL_CSI2RX(0x5013),
+       PSIL_CSI2RX(0x5014),
+       PSIL_CSI2RX(0x5015),
+       PSIL_CSI2RX(0x5016),
+       PSIL_CSI2RX(0x5017),
+       PSIL_CSI2RX(0x5018),
+       PSIL_CSI2RX(0x5019),
+       PSIL_CSI2RX(0x501a),
+       PSIL_CSI2RX(0x501b),
+       PSIL_CSI2RX(0x501c),
+       PSIL_CSI2RX(0x501d),
+       PSIL_CSI2RX(0x501e),
+       PSIL_CSI2RX(0x501f),
+       PSIL_CSI2RX(0x5000),
+       PSIL_CSI2RX(0x5001),
+       PSIL_CSI2RX(0x5002),
+       PSIL_CSI2RX(0x5003),
+       PSIL_CSI2RX(0x5004),
+       PSIL_CSI2RX(0x5005),
+       PSIL_CSI2RX(0x5006),
+       PSIL_CSI2RX(0x5007),
+       PSIL_CSI2RX(0x5008),
+       PSIL_CSI2RX(0x5009),
+       PSIL_CSI2RX(0x500a),
+       PSIL_CSI2RX(0x500b),
+       PSIL_CSI2RX(0x500c),
+       PSIL_CSI2RX(0x500d),
+       PSIL_CSI2RX(0x500e),
+       PSIL_CSI2RX(0x500f),
+       PSIL_CSI2RX(0x5010),
+       PSIL_CSI2RX(0x5011),
+       PSIL_CSI2RX(0x5012),
+       PSIL_CSI2RX(0x5013),
+       PSIL_CSI2RX(0x5014),
+       PSIL_CSI2RX(0x5015),
+       PSIL_CSI2RX(0x5016),
+       PSIL_CSI2RX(0x5017),
+       PSIL_CSI2RX(0x5018),
+       PSIL_CSI2RX(0x5019),
+       PSIL_CSI2RX(0x501a),
+       PSIL_CSI2RX(0x501b),
+       PSIL_CSI2RX(0x501c),
+       PSIL_CSI2RX(0x501d),
+       PSIL_CSI2RX(0x501e),
+       PSIL_CSI2RX(0x501f),
+       /* CSIRX 1-3 (only for J722S) */
+       PSIL_CSI2RX(0x5100),
+       PSIL_CSI2RX(0x5101),
+       PSIL_CSI2RX(0x5102),
+       PSIL_CSI2RX(0x5103),
+       PSIL_CSI2RX(0x5104),
+       PSIL_CSI2RX(0x5105),
+       PSIL_CSI2RX(0x5106),
+       PSIL_CSI2RX(0x5107),
+       PSIL_CSI2RX(0x5108),
+       PSIL_CSI2RX(0x5109),
+       PSIL_CSI2RX(0x510a),
+       PSIL_CSI2RX(0x510b),
+       PSIL_CSI2RX(0x510c),
+       PSIL_CSI2RX(0x510d),
+       PSIL_CSI2RX(0x510e),
+       PSIL_CSI2RX(0x510f),
+       PSIL_CSI2RX(0x5110),
+       PSIL_CSI2RX(0x5111),
+       PSIL_CSI2RX(0x5112),
+       PSIL_CSI2RX(0x5113),
+       PSIL_CSI2RX(0x5114),
+       PSIL_CSI2RX(0x5115),
+       PSIL_CSI2RX(0x5116),
+       PSIL_CSI2RX(0x5117),
+       PSIL_CSI2RX(0x5118),
+       PSIL_CSI2RX(0x5119),
+       PSIL_CSI2RX(0x511a),
+       PSIL_CSI2RX(0x511b),
+       PSIL_CSI2RX(0x511c),
+       PSIL_CSI2RX(0x511d),
+       PSIL_CSI2RX(0x511e),
+       PSIL_CSI2RX(0x511f),
+       PSIL_CSI2RX(0x5200),
+       PSIL_CSI2RX(0x5201),
+       PSIL_CSI2RX(0x5202),
+       PSIL_CSI2RX(0x5203),
+       PSIL_CSI2RX(0x5204),
+       PSIL_CSI2RX(0x5205),
+       PSIL_CSI2RX(0x5206),
+       PSIL_CSI2RX(0x5207),
+       PSIL_CSI2RX(0x5208),
+       PSIL_CSI2RX(0x5209),
+       PSIL_CSI2RX(0x520a),
+       PSIL_CSI2RX(0x520b),
+       PSIL_CSI2RX(0x520c),
+       PSIL_CSI2RX(0x520d),
+       PSIL_CSI2RX(0x520e),
+       PSIL_CSI2RX(0x520f),
+       PSIL_CSI2RX(0x5210),
+       PSIL_CSI2RX(0x5211),
+       PSIL_CSI2RX(0x5212),
+       PSIL_CSI2RX(0x5213),
+       PSIL_CSI2RX(0x5214),
+       PSIL_CSI2RX(0x5215),
+       PSIL_CSI2RX(0x5216),
+       PSIL_CSI2RX(0x5217),
+       PSIL_CSI2RX(0x5218),
+       PSIL_CSI2RX(0x5219),
+       PSIL_CSI2RX(0x521a),
+       PSIL_CSI2RX(0x521b),
+       PSIL_CSI2RX(0x521c),
+       PSIL_CSI2RX(0x521d),
+       PSIL_CSI2RX(0x521e),
+       PSIL_CSI2RX(0x521f),
+       PSIL_CSI2RX(0x5300),
+       PSIL_CSI2RX(0x5301),
+       PSIL_CSI2RX(0x5302),
+       PSIL_CSI2RX(0x5303),
+       PSIL_CSI2RX(0x5304),
+       PSIL_CSI2RX(0x5305),
+       PSIL_CSI2RX(0x5306),
+       PSIL_CSI2RX(0x5307),
+       PSIL_CSI2RX(0x5308),
+       PSIL_CSI2RX(0x5309),
+       PSIL_CSI2RX(0x530a),
+       PSIL_CSI2RX(0x530b),
+       PSIL_CSI2RX(0x530c),
+       PSIL_CSI2RX(0x530d),
+       PSIL_CSI2RX(0x530e),
+       PSIL_CSI2RX(0x530f),
+       PSIL_CSI2RX(0x5310),
+       PSIL_CSI2RX(0x5311),
+       PSIL_CSI2RX(0x5312),
+       PSIL_CSI2RX(0x5313),
+       PSIL_CSI2RX(0x5314),
+       PSIL_CSI2RX(0x5315),
+       PSIL_CSI2RX(0x5316),
+       PSIL_CSI2RX(0x5317),
+       PSIL_CSI2RX(0x5318),
+       PSIL_CSI2RX(0x5319),
+       PSIL_CSI2RX(0x531a),
+       PSIL_CSI2RX(0x531b),
+       PSIL_CSI2RX(0x531c),
+       PSIL_CSI2RX(0x531d),
+       PSIL_CSI2RX(0x531e),
+       PSIL_CSI2RX(0x531f),
+};
+
+/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
+static struct psil_ep am62p_dst_ep_map[] = {
+       /* SAUL */
+       PSIL_SAUL(0xf500, 27, 83, 8, 83, 1),
+       PSIL_SAUL(0xf501, 28, 91, 8, 91, 1),
+       /* PDMA_MAIN0 - SPI0-2 */
+       PSIL_PDMA_XY_PKT(0xc300),
+       PSIL_PDMA_XY_PKT(0xc301),
+       PSIL_PDMA_XY_PKT(0xc302),
+       PSIL_PDMA_XY_PKT(0xc303),
+       PSIL_PDMA_XY_PKT(0xc304),
+       PSIL_PDMA_XY_PKT(0xc305),
+       PSIL_PDMA_XY_PKT(0xc306),
+       PSIL_PDMA_XY_PKT(0xc307),
+       PSIL_PDMA_XY_PKT(0xc308),
+       PSIL_PDMA_XY_PKT(0xc309),
+       PSIL_PDMA_XY_PKT(0xc30a),
+       PSIL_PDMA_XY_PKT(0xc30b),
+       /* PDMA_MAIN1 - UART0-6 */
+       PSIL_PDMA_XY_PKT(0xc400),
+       PSIL_PDMA_XY_PKT(0xc401),
+       PSIL_PDMA_XY_PKT(0xc402),
+       PSIL_PDMA_XY_PKT(0xc403),
+       PSIL_PDMA_XY_PKT(0xc404),
+       PSIL_PDMA_XY_PKT(0xc405),
+       PSIL_PDMA_XY_PKT(0xc406),
+       /* PDMA_MAIN2 - MCASP0-2 */
+       PSIL_PDMA_MCASP(0xc500),
+       PSIL_PDMA_MCASP(0xc501),
+       PSIL_PDMA_MCASP(0xc502),
+       /* CPSW3G */
+       PSIL_ETHERNET(0xc600, 19, 19, 8),
+       PSIL_ETHERNET(0xc601, 20, 27, 8),
+       PSIL_ETHERNET(0xc602, 21, 35, 8),
+       PSIL_ETHERNET(0xc603, 22, 43, 8),
+       PSIL_ETHERNET(0xc604, 23, 51, 8),
+       PSIL_ETHERNET(0xc605, 24, 59, 8),
+       PSIL_ETHERNET(0xc606, 25, 67, 8),
+       PSIL_ETHERNET(0xc607, 26, 75, 8),
+};
+
+struct psil_ep_map am62p_ep_map = {
+       .name = "am62p",
+       .src = am62p_src_ep_map,
+       .src_count = ARRAY_SIZE(am62p_src_ep_map),
+       .dst = am62p_dst_ep_map,
+       .dst_count = ARRAY_SIZE(am62p_dst_ep_map),
+};
index c383723d1c8f662b9c0981d1b7b76e0ccf12196f..a577be97e3447148cfc941ca4afe6c6c17fc7697 100644 (file)
@@ -45,5 +45,6 @@ extern struct psil_ep_map j721s2_ep_map;
 extern struct psil_ep_map am62_ep_map;
 extern struct psil_ep_map am62a_ep_map;
 extern struct psil_ep_map j784s4_ep_map;
+extern struct psil_ep_map am62p_ep_map;
 
 #endif /* K3_PSIL_PRIV_H_ */
index c11389d67a3f0f2ff75f300c8f2dc914d27a1570..25148d9524720372ca8098f6cfe68cc59641d29a 100644 (file)
@@ -26,6 +26,8 @@ static const struct soc_device_attribute k3_soc_devices[] = {
        { .family = "AM62X", .data = &am62_ep_map },
        { .family = "AM62AX", .data = &am62a_ep_map },
        { .family = "J784S4", .data = &j784s4_ep_map },
+       { .family = "AM62PX", .data = &am62p_ep_map },
+       { .family = "J722S", .data = &am62p_ep_map },
        { /* sentinel */ }
 };
 
index 30fd2f386f36a1ada7fc5cbe6a4359eb7019a559..6400d06588a24d1aa54ceefc3e79ce7661e43f3a 100644 (file)
@@ -3968,6 +3968,7 @@ static void udma_desc_pre_callback(struct virt_dma_chan *vc,
 {
        struct udma_chan *uc = to_udma_chan(&vc->chan);
        struct udma_desc *d;
+       u8 status;
 
        if (!vd)
                return;
@@ -3977,12 +3978,12 @@ static void udma_desc_pre_callback(struct virt_dma_chan *vc,
        if (d->metadata_size)
                udma_fetch_epib(uc, d);
 
-       /* Provide residue information for the client */
        if (result) {
                void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx);
 
                if (cppi5_desc_get_type(desc_vaddr) ==
                    CPPI5_INFO0_DESC_TYPE_VAL_HOST) {
+                       /* Provide residue information for the client */
                        result->residue = d->residue -
                                          cppi5_hdesc_get_pktlen(desc_vaddr);
                        if (result->residue)
@@ -3991,7 +3992,12 @@ static void udma_desc_pre_callback(struct virt_dma_chan *vc,
                                result->result = DMA_TRANS_NOERROR;
                } else {
                        result->residue = 0;
-                       result->result = DMA_TRANS_NOERROR;
+                       /* Propagate TR Response errors to the client */
+                       status = d->hwdesc[0].tr_resp_base->status;
+                       if (status)
+                               result->result = DMA_TRANS_ABORTED;
+                       else
+                               result->result = DMA_TRANS_NOERROR;
                }
        }
 }
@@ -4441,6 +4447,8 @@ static const struct soc_device_attribute k3_soc_devices[] = {
        { .family = "AM62X", .data = &am64_soc_data },
        { .family = "AM62AX", .data = &am64_soc_data },
        { .family = "J784S4", .data = &j721e_soc_data },
+       { .family = "AM62PX", .data = &am64_soc_data },
+       { .family = "J722S", .data = &am64_soc_data },
        { /* sentinel */ }
 };
 
index 618839df074866c4d9a2feca56a9f4183baea3cb..ad7125f6e2ca8e4452195e3ad4cedf4ee154a5b3 100644 (file)
@@ -453,7 +453,7 @@ disable_clk:
        return ret;
 }
 
-static int uniphier_mdmac_remove(struct platform_device *pdev)
+static void uniphier_mdmac_remove(struct platform_device *pdev)
 {
        struct uniphier_mdmac_device *mdev = platform_get_drvdata(pdev);
        struct dma_chan *chan;
@@ -468,16 +468,21 @@ static int uniphier_mdmac_remove(struct platform_device *pdev)
         */
        list_for_each_entry(chan, &mdev->ddev.channels, device_node) {
                ret = dmaengine_terminate_sync(chan);
-               if (ret)
-                       return ret;
+               if (ret) {
+                       /*
+                        * This results in resource leakage and maybe also
+                        * use-after-free errors as e.g. *mdev is kfreed.
+                        */
+                       dev_alert(&pdev->dev, "Failed to terminate channel %d (%pe)\n",
+                                 chan->chan_id, ERR_PTR(ret));
+                       return;
+               }
                uniphier_mdmac_free_chan_resources(chan);
        }
 
        of_dma_controller_free(pdev->dev.of_node);
        dma_async_device_unregister(&mdev->ddev);
        clk_disable_unprepare(mdev->clk);
-
-       return 0;
 }
 
 static const struct of_device_id uniphier_mdmac_match[] = {
@@ -488,7 +493,7 @@ MODULE_DEVICE_TABLE(of, uniphier_mdmac_match);
 
 static struct platform_driver uniphier_mdmac_driver = {
        .probe = uniphier_mdmac_probe,
-       .remove = uniphier_mdmac_remove,
+       .remove_new = uniphier_mdmac_remove,
        .driver = {
                .name = "uniphier-mio-dmac",
                .of_match_table = uniphier_mdmac_match,
index 3a8ee2b173b52e83b775bbe27d5f715a205aef87..3ce2dc2ad9de4290887e6c5344206d0e747b0c7f 100644 (file)
@@ -563,7 +563,7 @@ out_unregister_dmac:
        return ret;
 }
 
-static int uniphier_xdmac_remove(struct platform_device *pdev)
+static void uniphier_xdmac_remove(struct platform_device *pdev)
 {
        struct uniphier_xdmac_device *xdev = platform_get_drvdata(pdev);
        struct dma_device *ddev = &xdev->ddev;
@@ -579,15 +579,20 @@ static int uniphier_xdmac_remove(struct platform_device *pdev)
         */
        list_for_each_entry(chan, &ddev->channels, device_node) {
                ret = dmaengine_terminate_sync(chan);
-               if (ret)
-                       return ret;
+               if (ret) {
+                       /*
+                        * This results in resource leakage and maybe also
+                        * use-after-free errors as e.g. *xdev is kfreed.
+                        */
+                       dev_alert(&pdev->dev, "Failed to terminate channel %d (%pe)\n",
+                                 chan->chan_id, ERR_PTR(ret));
+                       return;
+               }
                uniphier_xdmac_free_chan_resources(chan);
        }
 
        of_dma_controller_free(pdev->dev.of_node);
        dma_async_device_unregister(ddev);
-
-       return 0;
 }
 
 static const struct of_device_id uniphier_xdmac_match[] = {
@@ -598,7 +603,7 @@ MODULE_DEVICE_TABLE(of, uniphier_xdmac_match);
 
 static struct platform_driver uniphier_xdmac_driver = {
        .probe = uniphier_xdmac_probe,
-       .remove = uniphier_xdmac_remove,
+       .remove_new = uniphier_xdmac_remove,
        .driver = {
                .name = "uniphier-xdmac",
                .of_match_table = uniphier_xdmac_match,
index e641a5083e14b081a7871b3a19bda0c57601d6b5..98f5f6fb9ff9c771270c64c364265f72fd7b216d 100644 (file)
@@ -64,9 +64,10 @@ struct xdma_hw_desc {
        __le64          next_desc;
 };
 
-#define XDMA_DESC_SIZE         sizeof(struct xdma_hw_desc)
-#define XDMA_DESC_BLOCK_SIZE   (XDMA_DESC_SIZE * XDMA_DESC_ADJACENT)
-#define XDMA_DESC_BLOCK_ALIGN  4096
+#define XDMA_DESC_SIZE                 sizeof(struct xdma_hw_desc)
+#define XDMA_DESC_BLOCK_SIZE           (XDMA_DESC_SIZE * XDMA_DESC_ADJACENT)
+#define XDMA_DESC_BLOCK_ALIGN          32
+#define XDMA_DESC_BLOCK_BOUNDARY       4096
 
 /*
  * Channel registers
@@ -76,6 +77,7 @@ struct xdma_hw_desc {
 #define XDMA_CHAN_CONTROL_W1S          0x8
 #define XDMA_CHAN_CONTROL_W1C          0xc
 #define XDMA_CHAN_STATUS               0x40
+#define XDMA_CHAN_STATUS_RC            0x44
 #define XDMA_CHAN_COMPLETED_DESC       0x48
 #define XDMA_CHAN_ALIGNMENTS           0x4c
 #define XDMA_CHAN_INTR_ENABLE          0x90
@@ -101,6 +103,7 @@ struct xdma_hw_desc {
 #define CHAN_CTRL_IE_MAGIC_STOPPED             BIT(4)
 #define CHAN_CTRL_IE_IDLE_STOPPED              BIT(6)
 #define CHAN_CTRL_IE_READ_ERROR                        GENMASK(13, 9)
+#define CHAN_CTRL_IE_WRITE_ERROR               GENMASK(18, 14)
 #define CHAN_CTRL_IE_DESC_ERROR                        GENMASK(23, 19)
 #define CHAN_CTRL_NON_INCR_ADDR                        BIT(25)
 #define CHAN_CTRL_POLL_MODE_WB                 BIT(26)
@@ -111,8 +114,17 @@ struct xdma_hw_desc {
                         CHAN_CTRL_IE_DESC_ALIGN_MISMATCH |             \
                         CHAN_CTRL_IE_MAGIC_STOPPED |                   \
                         CHAN_CTRL_IE_READ_ERROR |                      \
+                        CHAN_CTRL_IE_WRITE_ERROR |                     \
                         CHAN_CTRL_IE_DESC_ERROR)
 
+#define XDMA_CHAN_STATUS_MASK CHAN_CTRL_START
+
+#define XDMA_CHAN_ERROR_MASK (CHAN_CTRL_IE_DESC_ALIGN_MISMATCH |       \
+                             CHAN_CTRL_IE_MAGIC_STOPPED |              \
+                             CHAN_CTRL_IE_READ_ERROR |                 \
+                             CHAN_CTRL_IE_WRITE_ERROR |                \
+                             CHAN_CTRL_IE_DESC_ERROR)
+
 /* bits of the channel interrupt enable mask */
 #define CHAN_IM_DESC_ERROR                     BIT(19)
 #define CHAN_IM_READ_ERROR                     BIT(9)
@@ -134,18 +146,6 @@ struct xdma_hw_desc {
 #define XDMA_SGDMA_DESC_ADJ    0x4088
 #define XDMA_SGDMA_DESC_CREDIT 0x408c
 
-/* bits of the SG DMA control register */
-#define XDMA_CTRL_RUN_STOP                     BIT(0)
-#define XDMA_CTRL_IE_DESC_STOPPED              BIT(1)
-#define XDMA_CTRL_IE_DESC_COMPLETED            BIT(2)
-#define XDMA_CTRL_IE_DESC_ALIGN_MISMATCH       BIT(3)
-#define XDMA_CTRL_IE_MAGIC_STOPPED             BIT(4)
-#define XDMA_CTRL_IE_IDLE_STOPPED              BIT(6)
-#define XDMA_CTRL_IE_READ_ERROR                        GENMASK(13, 9)
-#define XDMA_CTRL_IE_DESC_ERROR                        GENMASK(23, 19)
-#define XDMA_CTRL_NON_INCR_ADDR                        BIT(25)
-#define XDMA_CTRL_POLL_MODE_WB                 BIT(26)
-
 /*
  * interrupt registers
  */
index 84a88029226fdc16e423d8162408eea40cc22d00..170017ff2aad6e58c8d0ee4ea6e7d42c15c8202c 100644 (file)
@@ -78,27 +78,31 @@ struct xdma_chan {
  * @vdesc: Virtual DMA descriptor
  * @chan: DMA channel pointer
  * @dir: Transferring direction of the request
- * @dev_addr: Physical address on DMA device side
  * @desc_blocks: Hardware descriptor blocks
  * @dblk_num: Number of hardware descriptor blocks
  * @desc_num: Number of hardware descriptors
  * @completed_desc_num: Completed hardware descriptors
  * @cyclic: Cyclic transfer vs. scatter-gather
+ * @interleaved_dma: Interleaved DMA transfer
  * @periods: Number of periods in the cyclic transfer
  * @period_size: Size of a period in bytes in cyclic transfers
+ * @frames_left: Number of frames left in interleaved DMA transfer
+ * @error: tx error flag
  */
 struct xdma_desc {
        struct virt_dma_desc            vdesc;
        struct xdma_chan                *chan;
        enum dma_transfer_direction     dir;
-       u64                             dev_addr;
        struct xdma_desc_block          *desc_blocks;
        u32                             dblk_num;
        u32                             desc_num;
        u32                             completed_desc_num;
        bool                            cyclic;
+       bool                            interleaved_dma;
        u32                             periods;
        u32                             period_size;
+       u32                             frames_left;
+       bool                            error;
 };
 
 #define XDMA_DEV_STATUS_REG_DMA                BIT(0)
@@ -276,6 +280,7 @@ xdma_alloc_desc(struct xdma_chan *chan, u32 desc_num, bool cyclic)
        sw_desc->chan = chan;
        sw_desc->desc_num = desc_num;
        sw_desc->cyclic = cyclic;
+       sw_desc->error = false;
        dblk_num = DIV_ROUND_UP(desc_num, XDMA_DESC_ADJACENT);
        sw_desc->desc_blocks = kcalloc(dblk_num, sizeof(*sw_desc->desc_blocks),
                                       GFP_NOWAIT);
@@ -371,6 +376,31 @@ static int xdma_xfer_start(struct xdma_chan *xchan)
                return ret;
 
        xchan->busy = true;
+
+       return 0;
+}
+
+/**
+ * xdma_xfer_stop - Stop DMA transfer
+ * @xchan: DMA channel pointer
+ */
+static int xdma_xfer_stop(struct xdma_chan *xchan)
+{
+       int ret;
+       u32 val;
+       struct xdma_device *xdev = xchan->xdev_hdl;
+
+       /* clear run stop bit to prevent any further auto-triggering */
+       ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
+                          CHAN_CTRL_RUN_STOP);
+       if (ret)
+               return ret;
+
+       /* Clear the channel status register */
+       ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS_RC, &val);
+       if (ret)
+               return ret;
+
        return 0;
 }
 
@@ -475,6 +505,84 @@ static void xdma_issue_pending(struct dma_chan *chan)
        spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
 }
 
+/**
+ * xdma_terminate_all - Terminate all transactions
+ * @chan: DMA channel pointer
+ */
+static int xdma_terminate_all(struct dma_chan *chan)
+{
+       struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+       struct virt_dma_desc *vd;
+       unsigned long flags;
+       LIST_HEAD(head);
+
+       xdma_xfer_stop(xdma_chan);
+
+       spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
+
+       xdma_chan->busy = false;
+       vd = vchan_next_desc(&xdma_chan->vchan);
+       if (vd) {
+               list_del(&vd->node);
+               dma_cookie_complete(&vd->tx);
+               vchan_terminate_vdesc(vd);
+       }
+       vchan_get_all_descriptors(&xdma_chan->vchan, &head);
+       list_splice_tail(&head, &xdma_chan->vchan.desc_terminated);
+
+       spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
+
+       return 0;
+}
+
+/**
+ * xdma_synchronize - Synchronize terminated transactions
+ * @chan: DMA channel pointer
+ */
+static void xdma_synchronize(struct dma_chan *chan)
+{
+       struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+
+       vchan_synchronize(&xdma_chan->vchan);
+}
+
+/**
+ * xdma_fill_descs - Fill hardware descriptors with contiguous memory block addresses
+ * @sw_desc: tx descriptor state container
+ * @src_addr: Value for a ->src_addr field of a first descriptor
+ * @dst_addr: Value for a ->dst_addr field of a first descriptor
+ * @size: Total size of a contiguous memory block
+ * @filled_descs_num: Number of filled hardware descriptors for corresponding sw_desc
+ */
+static inline u32 xdma_fill_descs(struct xdma_desc *sw_desc, u64 src_addr,
+                                 u64 dst_addr, u32 size, u32 filled_descs_num)
+{
+       u32 left = size, len, desc_num = filled_descs_num;
+       struct xdma_desc_block *dblk;
+       struct xdma_hw_desc *desc;
+
+       dblk = sw_desc->desc_blocks + (desc_num / XDMA_DESC_ADJACENT);
+       desc = dblk->virt_addr;
+       desc += desc_num & XDMA_DESC_ADJACENT_MASK;
+       do {
+               len = min_t(u32, left, XDMA_DESC_BLEN_MAX);
+               /* set hardware descriptor */
+               desc->bytes = cpu_to_le32(len);
+               desc->src_addr = cpu_to_le64(src_addr);
+               desc->dst_addr = cpu_to_le64(dst_addr);
+               if (!(++desc_num & XDMA_DESC_ADJACENT_MASK))
+                       desc = (++dblk)->virt_addr;
+               else
+                       desc++;
+
+               src_addr += len;
+               dst_addr += len;
+               left -= len;
+       } while (left);
+
+       return desc_num - filled_descs_num;
+}
+
 /**
  * xdma_prep_device_sg - prepare a descriptor for a DMA transaction
  * @chan: DMA channel pointer
@@ -491,13 +599,10 @@ xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
 {
        struct xdma_chan *xdma_chan = to_xdma_chan(chan);
        struct dma_async_tx_descriptor *tx_desc;
-       u32 desc_num = 0, i, len, rest;
-       struct xdma_desc_block *dblk;
-       struct xdma_hw_desc *desc;
        struct xdma_desc *sw_desc;
-       u64 dev_addr, *src, *dst;
+       u32 desc_num = 0, i;
+       u64 addr, dev_addr, *src, *dst;
        struct scatterlist *sg;
-       u64 addr;
 
        for_each_sg(sgl, sg, sg_len, i)
                desc_num += DIV_ROUND_UP(sg_dma_len(sg), XDMA_DESC_BLEN_MAX);
@@ -506,6 +611,8 @@ xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
        if (!sw_desc)
                return NULL;
        sw_desc->dir = dir;
+       sw_desc->cyclic = false;
+       sw_desc->interleaved_dma = false;
 
        if (dir == DMA_MEM_TO_DEV) {
                dev_addr = xdma_chan->cfg.dst_addr;
@@ -517,32 +624,11 @@ xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
                dst = &addr;
        }
 
-       dblk = sw_desc->desc_blocks;
-       desc = dblk->virt_addr;
-       desc_num = 1;
+       desc_num = 0;
        for_each_sg(sgl, sg, sg_len, i) {
                addr = sg_dma_address(sg);
-               rest = sg_dma_len(sg);
-
-               do {
-                       len = min_t(u32, rest, XDMA_DESC_BLEN_MAX);
-                       /* set hardware descriptor */
-                       desc->bytes = cpu_to_le32(len);
-                       desc->src_addr = cpu_to_le64(*src);
-                       desc->dst_addr = cpu_to_le64(*dst);
-
-                       if (!(desc_num & XDMA_DESC_ADJACENT_MASK)) {
-                               dblk++;
-                               desc = dblk->virt_addr;
-                       } else {
-                               desc++;
-                       }
-
-                       desc_num++;
-                       dev_addr += len;
-                       addr += len;
-                       rest -= len;
-               } while (rest);
+               desc_num += xdma_fill_descs(sw_desc, *src, *dst, sg_dma_len(sg), desc_num);
+               dev_addr += sg_dma_len(sg);
        }
 
        tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
@@ -576,9 +662,9 @@ xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address,
        struct xdma_device *xdev = xdma_chan->xdev_hdl;
        unsigned int periods = size / period_size;
        struct dma_async_tx_descriptor *tx_desc;
-       struct xdma_desc_block *dblk;
-       struct xdma_hw_desc *desc;
        struct xdma_desc *sw_desc;
+       u64 addr, dev_addr, *src, *dst;
+       u32 desc_num;
        unsigned int i;
 
        /*
@@ -602,22 +688,23 @@ xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address,
        sw_desc->periods = periods;
        sw_desc->period_size = period_size;
        sw_desc->dir = dir;
+       sw_desc->interleaved_dma = false;
 
-       dblk = sw_desc->desc_blocks;
-       desc = dblk->virt_addr;
+       addr = address;
+       if (dir == DMA_MEM_TO_DEV) {
+               dev_addr = xdma_chan->cfg.dst_addr;
+               src = &addr;
+               dst = &dev_addr;
+       } else {
+               dev_addr = xdma_chan->cfg.src_addr;
+               src = &dev_addr;
+               dst = &addr;
+       }
 
-       /* fill hardware descriptor */
+       desc_num = 0;
        for (i = 0; i < periods; i++) {
-               desc->bytes = cpu_to_le32(period_size);
-               if (dir == DMA_MEM_TO_DEV) {
-                       desc->src_addr = cpu_to_le64(address + i * period_size);
-                       desc->dst_addr = cpu_to_le64(xdma_chan->cfg.dst_addr);
-               } else {
-                       desc->src_addr = cpu_to_le64(xdma_chan->cfg.src_addr);
-                       desc->dst_addr = cpu_to_le64(address + i * period_size);
-               }
-
-               desc++;
+               desc_num += xdma_fill_descs(sw_desc, *src, *dst, period_size, desc_num);
+               addr += i * period_size;
        }
 
        tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
@@ -632,6 +719,57 @@ failed:
        return NULL;
 }
 
+/**
+ * xdma_prep_interleaved_dma - Prepare virtual descriptor for interleaved DMA transfers
+ * @chan: DMA channel
+ * @xt: DMA transfer template
+ * @flags: tx flags
+ */
+static struct dma_async_tx_descriptor *
+xdma_prep_interleaved_dma(struct dma_chan *chan,
+                         struct dma_interleaved_template *xt,
+                         unsigned long flags)
+{
+       int i;
+       u32 desc_num = 0, period_size = 0;
+       struct dma_async_tx_descriptor *tx_desc;
+       struct xdma_chan *xchan = to_xdma_chan(chan);
+       struct xdma_desc *sw_desc;
+       u64 src_addr, dst_addr;
+
+       for (i = 0; i < xt->frame_size; ++i)
+               desc_num += DIV_ROUND_UP(xt->sgl[i].size, XDMA_DESC_BLEN_MAX);
+
+       sw_desc = xdma_alloc_desc(xchan, desc_num, false);
+       if (!sw_desc)
+               return NULL;
+       sw_desc->dir = xt->dir;
+       sw_desc->interleaved_dma = true;
+       sw_desc->cyclic = flags & DMA_PREP_REPEAT;
+       sw_desc->frames_left = xt->numf;
+       sw_desc->periods = xt->numf;
+
+       desc_num = 0;
+       src_addr = xt->src_start;
+       dst_addr = xt->dst_start;
+       for (i = 0; i < xt->frame_size; ++i) {
+               desc_num += xdma_fill_descs(sw_desc, src_addr, dst_addr, xt->sgl[i].size, desc_num);
+               src_addr += dmaengine_get_src_icg(xt, &xt->sgl[i]) + (xt->src_inc ?
+                                                             xt->sgl[i].size : 0);
+               dst_addr += dmaengine_get_dst_icg(xt, &xt->sgl[i]) + (xt->dst_inc ?
+                                                             xt->sgl[i].size : 0);
+               period_size += xt->sgl[i].size;
+       }
+       sw_desc->period_size = period_size;
+
+       tx_desc = vchan_tx_prep(&xchan->vchan, &sw_desc->vdesc, flags);
+       if (tx_desc)
+               return tx_desc;
+
+       xdma_free_desc(&sw_desc->vdesc);
+       return NULL;
+}
+
 /**
  * xdma_device_config - Configure the DMA channel
  * @chan: DMA channel
@@ -677,9 +815,8 @@ static int xdma_alloc_chan_resources(struct dma_chan *chan)
                return -EINVAL;
        }
 
-       xdma_chan->desc_pool = dma_pool_create(dma_chan_name(chan),
-                                              dev, XDMA_DESC_BLOCK_SIZE,
-                                              XDMA_DESC_BLOCK_ALIGN, 0);
+       xdma_chan->desc_pool = dma_pool_create(dma_chan_name(chan), dev, XDMA_DESC_BLOCK_SIZE,
+                                              XDMA_DESC_BLOCK_ALIGN, XDMA_DESC_BLOCK_BOUNDARY);
        if (!xdma_chan->desc_pool) {
                xdma_err(xdev, "unable to allocate descriptor pool");
                return -ENOMEM;
@@ -706,20 +843,20 @@ static enum dma_status xdma_tx_status(struct dma_chan *chan, dma_cookie_t cookie
        spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
 
        vd = vchan_find_desc(&xdma_chan->vchan, cookie);
-       if (vd)
-               desc = to_xdma_desc(vd);
-       if (!desc || !desc->cyclic) {
-               spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
-               return ret;
-       }
-
-       period_idx = desc->completed_desc_num % desc->periods;
-       residue = (desc->periods - period_idx) * desc->period_size;
+       if (!vd)
+               goto out;
 
+       desc = to_xdma_desc(vd);
+       if (desc->error) {
+               ret = DMA_ERROR;
+       } else if (desc->cyclic) {
+               period_idx = desc->completed_desc_num % desc->periods;
+               residue = (desc->periods - period_idx) * desc->period_size;
+               dma_set_residue(state, residue);
+       }
+out:
        spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
 
-       dma_set_residue(state, residue);
-
        return ret;
 }
 
@@ -732,11 +869,12 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
 {
        struct xdma_chan *xchan = dev_id;
        u32 complete_desc_num = 0;
-       struct xdma_device *xdev;
-       struct virt_dma_desc *vd;
+       struct xdma_device *xdev = xchan->xdev_hdl;
+       struct virt_dma_desc *vd, *next_vd;
        struct xdma_desc *desc;
        int ret;
        u32 st;
+       bool repeat_tx;
 
        spin_lock(&xchan->vchan.lock);
 
@@ -745,45 +883,76 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
        if (!vd)
                goto out;
 
-       xchan->busy = false;
+       /* Clear-on-read the status register */
+       ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS_RC, &st);
+       if (ret)
+               goto out;
+
        desc = to_xdma_desc(vd);
-       xdev = xchan->xdev_hdl;
+
+       st &= XDMA_CHAN_STATUS_MASK;
+       if ((st & XDMA_CHAN_ERROR_MASK) ||
+           !(st & (CHAN_CTRL_IE_DESC_COMPLETED | CHAN_CTRL_IE_DESC_STOPPED))) {
+               desc->error = true;
+               xdma_err(xdev, "channel error, status register value: 0x%x", st);
+               goto out;
+       }
 
        ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_COMPLETED_DESC,
                          &complete_desc_num);
        if (ret)
                goto out;
 
-       desc->completed_desc_num += complete_desc_num;
+       if (desc->interleaved_dma) {
+               xchan->busy = false;
+               desc->completed_desc_num += complete_desc_num;
+               if (complete_desc_num == XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT) {
+                       xdma_xfer_start(xchan);
+                       goto out;
+               }
 
-       if (desc->cyclic) {
-               ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS,
-                                 &st);
-               if (ret)
+               /* last desc of any frame */
+               desc->frames_left--;
+               if (desc->frames_left)
+                       goto out;
+
+               /* last desc of the last frame  */
+               repeat_tx = vd->tx.flags & DMA_PREP_REPEAT;
+               next_vd = list_first_entry_or_null(&vd->node, struct virt_dma_desc, node);
+               if (next_vd)
+                       repeat_tx = repeat_tx && !(next_vd->tx.flags & DMA_PREP_LOAD_EOT);
+               if (repeat_tx) {
+                       desc->frames_left = desc->periods;
+                       desc->completed_desc_num = 0;
+                       vchan_cyclic_callback(vd);
+               } else {
+                       list_del(&vd->node);
+                       vchan_cookie_complete(vd);
+               }
+               /* start (or continue) the tx of a first desc on the vc.desc_issued list, if any */
+               xdma_xfer_start(xchan);
+       } else if (!desc->cyclic) {
+               xchan->busy = false;
+               desc->completed_desc_num += complete_desc_num;
+
+               /* if all data blocks are transferred, remove and complete the request */
+               if (desc->completed_desc_num == desc->desc_num) {
+                       list_del(&vd->node);
+                       vchan_cookie_complete(vd);
                        goto out;
+               }
 
-               regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_STATUS, st);
+               if (desc->completed_desc_num > desc->desc_num ||
+                   complete_desc_num != XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT)
+                       goto out;
 
+               /* transfer the rest of data */
+               xdma_xfer_start(xchan);
+       } else {
+               desc->completed_desc_num = complete_desc_num;
                vchan_cyclic_callback(vd);
-               goto out;
-       }
-
-       /*
-        * if all data blocks are transferred, remove and complete the request
-        */
-       if (desc->completed_desc_num == desc->desc_num) {
-               list_del(&vd->node);
-               vchan_cookie_complete(vd);
-               goto out;
        }
 
-       if (desc->completed_desc_num > desc->desc_num ||
-           complete_desc_num != XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT)
-               goto out;
-
-       /* transfer the rest of data (SG only) */
-       xdma_xfer_start(xchan);
-
 out:
        spin_unlock(&xchan->vchan.lock);
        return IRQ_HANDLED;
@@ -1080,6 +1249,9 @@ static int xdma_probe(struct platform_device *pdev)
        dma_cap_set(DMA_SLAVE, xdev->dma_dev.cap_mask);
        dma_cap_set(DMA_PRIVATE, xdev->dma_dev.cap_mask);
        dma_cap_set(DMA_CYCLIC, xdev->dma_dev.cap_mask);
+       dma_cap_set(DMA_INTERLEAVE, xdev->dma_dev.cap_mask);
+       dma_cap_set(DMA_REPEAT, xdev->dma_dev.cap_mask);
+       dma_cap_set(DMA_LOAD_EOT, xdev->dma_dev.cap_mask);
 
        xdev->dma_dev.dev = &pdev->dev;
        xdev->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
@@ -1089,10 +1261,13 @@ static int xdma_probe(struct platform_device *pdev)
        xdev->dma_dev.device_prep_slave_sg = xdma_prep_device_sg;
        xdev->dma_dev.device_config = xdma_device_config;
        xdev->dma_dev.device_issue_pending = xdma_issue_pending;
+       xdev->dma_dev.device_terminate_all = xdma_terminate_all;
+       xdev->dma_dev.device_synchronize = xdma_synchronize;
        xdev->dma_dev.filter.map = pdata->device_map;
        xdev->dma_dev.filter.mapcnt = pdata->device_map_cnt;
        xdev->dma_dev.filter.fn = xdma_filter_fn;
        xdev->dma_dev.device_prep_dma_cyclic = xdma_prep_dma_cyclic;
+       xdev->dma_dev.device_prep_interleaved_dma = xdma_prep_interleaved_dma;
 
        ret = dma_async_device_register(&xdev->dma_dev);
        if (ret) {
index 69587d85a7cd20417b83be157aa4134784eae389..b82815e64d24e8352ac025bc1bd52de497530d66 100644 (file)
@@ -309,7 +309,7 @@ static ssize_t xilinx_dpdma_debugfs_desc_done_irq_read(char *buf)
 
        out_str_len = strlen(XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR);
        out_str_len = min_t(size_t, XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE,
-                           out_str_len);
+                           out_str_len + 1);
        snprintf(buf, out_str_len, "%d",
                 dpdma_debugfs.xilinx_dpdma_irq_done_count);
 
index 1eca8cc271f841e7b15967b2c33394169065b4ab..5152bd1b0daf599869195e81805fbb2709dbe6b4 100644 (file)
@@ -29,8 +29,6 @@ static u32 dpll_pin_xa_id;
        WARN_ON_ONCE(!xa_get_mark(&dpll_device_xa, (d)->id, DPLL_REGISTERED))
 #define ASSERT_DPLL_NOT_REGISTERED(d)  \
        WARN_ON_ONCE(xa_get_mark(&dpll_device_xa, (d)->id, DPLL_REGISTERED))
-#define ASSERT_PIN_REGISTERED(p)       \
-       WARN_ON_ONCE(!xa_get_mark(&dpll_pin_xa, (p)->id, DPLL_REGISTERED))
 
 struct dpll_device_registration {
        struct list_head list;
@@ -425,6 +423,53 @@ void dpll_device_unregister(struct dpll_device *dpll,
 }
 EXPORT_SYMBOL_GPL(dpll_device_unregister);
 
+static void dpll_pin_prop_free(struct dpll_pin_properties *prop)
+{
+       kfree(prop->package_label);
+       kfree(prop->panel_label);
+       kfree(prop->board_label);
+       kfree(prop->freq_supported);
+}
+
+static int dpll_pin_prop_dup(const struct dpll_pin_properties *src,
+                            struct dpll_pin_properties *dst)
+{
+       memcpy(dst, src, sizeof(*dst));
+       if (src->freq_supported && src->freq_supported_num) {
+               size_t freq_size = src->freq_supported_num *
+                                  sizeof(*src->freq_supported);
+               dst->freq_supported = kmemdup(src->freq_supported,
+                                             freq_size, GFP_KERNEL);
+               if (!src->freq_supported)
+                       return -ENOMEM;
+       }
+       if (src->board_label) {
+               dst->board_label = kstrdup(src->board_label, GFP_KERNEL);
+               if (!dst->board_label)
+                       goto err_board_label;
+       }
+       if (src->panel_label) {
+               dst->panel_label = kstrdup(src->panel_label, GFP_KERNEL);
+               if (!dst->panel_label)
+                       goto err_panel_label;
+       }
+       if (src->package_label) {
+               dst->package_label = kstrdup(src->package_label, GFP_KERNEL);
+               if (!dst->package_label)
+                       goto err_package_label;
+       }
+
+       return 0;
+
+err_package_label:
+       kfree(dst->panel_label);
+err_panel_label:
+       kfree(dst->board_label);
+err_board_label:
+       kfree(dst->freq_supported);
+       return -ENOMEM;
+}
+
 static struct dpll_pin *
 dpll_pin_alloc(u64 clock_id, u32 pin_idx, struct module *module,
               const struct dpll_pin_properties *prop)
@@ -441,20 +486,24 @@ dpll_pin_alloc(u64 clock_id, u32 pin_idx, struct module *module,
        if (WARN_ON(prop->type < DPLL_PIN_TYPE_MUX ||
                    prop->type > DPLL_PIN_TYPE_MAX)) {
                ret = -EINVAL;
-               goto err;
+               goto err_pin_prop;
        }
-       pin->prop = prop;
+       ret = dpll_pin_prop_dup(prop, &pin->prop);
+       if (ret)
+               goto err_pin_prop;
        refcount_set(&pin->refcount, 1);
        xa_init_flags(&pin->dpll_refs, XA_FLAGS_ALLOC);
        xa_init_flags(&pin->parent_refs, XA_FLAGS_ALLOC);
        ret = xa_alloc_cyclic(&dpll_pin_xa, &pin->id, pin, xa_limit_32b,
                              &dpll_pin_xa_id, GFP_KERNEL);
        if (ret)
-               goto err;
+               goto err_xa_alloc;
        return pin;
-err:
+err_xa_alloc:
        xa_destroy(&pin->dpll_refs);
        xa_destroy(&pin->parent_refs);
+       dpll_pin_prop_free(&pin->prop);
+err_pin_prop:
        kfree(pin);
        return ERR_PTR(ret);
 }
@@ -514,6 +563,7 @@ void dpll_pin_put(struct dpll_pin *pin)
                xa_destroy(&pin->dpll_refs);
                xa_destroy(&pin->parent_refs);
                xa_erase(&dpll_pin_xa, pin->id);
+               dpll_pin_prop_free(&pin->prop);
                kfree(pin);
        }
        mutex_unlock(&dpll_lock);
@@ -564,8 +614,6 @@ dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
            WARN_ON(!ops->state_on_dpll_get) ||
            WARN_ON(!ops->direction_get))
                return -EINVAL;
-       if (ASSERT_DPLL_REGISTERED(dpll))
-               return -EINVAL;
 
        mutex_lock(&dpll_lock);
        if (WARN_ON(!(dpll->module == pin->module &&
@@ -636,15 +684,13 @@ int dpll_pin_on_pin_register(struct dpll_pin *parent, struct dpll_pin *pin,
        unsigned long i, stop;
        int ret;
 
-       if (WARN_ON(parent->prop->type != DPLL_PIN_TYPE_MUX))
+       if (WARN_ON(parent->prop.type != DPLL_PIN_TYPE_MUX))
                return -EINVAL;
 
        if (WARN_ON(!ops) ||
            WARN_ON(!ops->state_on_pin_get) ||
            WARN_ON(!ops->direction_get))
                return -EINVAL;
-       if (ASSERT_PIN_REGISTERED(parent))
-               return -EINVAL;
 
        mutex_lock(&dpll_lock);
        ret = dpll_xa_ref_pin_add(&pin->parent_refs, parent, ops, priv);
index 5585873c5c1b020e5618896aaa43ff1656ff53dd..717f715015c742238d5585fddc5cd267fbb0db9f 100644 (file)
@@ -44,7 +44,7 @@ struct dpll_device {
  * @module:            module of creator
  * @dpll_refs:         hold referencees to dplls pin was registered with
  * @parent_refs:       hold references to parent pins pin was registered with
- * @prop:              pointer to pin properties given by registerer
+ * @prop:              pin properties copied from the registerer
  * @rclk_dev_name:     holds name of device when pin can recover clock from it
  * @refcount:          refcount
  **/
@@ -55,7 +55,7 @@ struct dpll_pin {
        struct module *module;
        struct xarray dpll_refs;
        struct xarray parent_refs;
-       const struct dpll_pin_properties *prop;
+       struct dpll_pin_properties prop;
        refcount_t refcount;
 };
 
index 3370dbddb86bdeb6b627fdf741357eeb15ee3676..314bb377546519ef25987b2e6f77827f590fe5fe 100644 (file)
@@ -303,17 +303,17 @@ dpll_msg_add_pin_freq(struct sk_buff *msg, struct dpll_pin *pin,
        if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY, sizeof(freq), &freq,
                          DPLL_A_PIN_PAD))
                return -EMSGSIZE;
-       for (fs = 0; fs < pin->prop->freq_supported_num; fs++) {
+       for (fs = 0; fs < pin->prop.freq_supported_num; fs++) {
                nest = nla_nest_start(msg, DPLL_A_PIN_FREQUENCY_SUPPORTED);
                if (!nest)
                        return -EMSGSIZE;
-               freq = pin->prop->freq_supported[fs].min;
+               freq = pin->prop.freq_supported[fs].min;
                if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY_MIN, sizeof(freq),
                                  &freq, DPLL_A_PIN_PAD)) {
                        nla_nest_cancel(msg, nest);
                        return -EMSGSIZE;
                }
-               freq = pin->prop->freq_supported[fs].max;
+               freq = pin->prop.freq_supported[fs].max;
                if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY_MAX, sizeof(freq),
                                  &freq, DPLL_A_PIN_PAD)) {
                        nla_nest_cancel(msg, nest);
@@ -329,9 +329,9 @@ static bool dpll_pin_is_freq_supported(struct dpll_pin *pin, u32 freq)
 {
        int fs;
 
-       for (fs = 0; fs < pin->prop->freq_supported_num; fs++)
-               if (freq >= pin->prop->freq_supported[fs].min &&
-                   freq <= pin->prop->freq_supported[fs].max)
+       for (fs = 0; fs < pin->prop.freq_supported_num; fs++)
+               if (freq >= pin->prop.freq_supported[fs].min &&
+                   freq <= pin->prop.freq_supported[fs].max)
                        return true;
        return false;
 }
@@ -421,7 +421,7 @@ static int
 dpll_cmd_pin_get_one(struct sk_buff *msg, struct dpll_pin *pin,
                     struct netlink_ext_ack *extack)
 {
-       const struct dpll_pin_properties *prop = pin->prop;
+       const struct dpll_pin_properties *prop = &pin->prop;
        struct dpll_pin_ref *ref;
        int ret;
 
@@ -553,6 +553,24 @@ __dpll_device_change_ntf(struct dpll_device *dpll)
        return dpll_device_event_send(DPLL_CMD_DEVICE_CHANGE_NTF, dpll);
 }
 
+static bool dpll_pin_available(struct dpll_pin *pin)
+{
+       struct dpll_pin_ref *par_ref;
+       unsigned long i;
+
+       if (!xa_get_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED))
+               return false;
+       xa_for_each(&pin->parent_refs, i, par_ref)
+               if (xa_get_mark(&dpll_pin_xa, par_ref->pin->id,
+                               DPLL_REGISTERED))
+                       return true;
+       xa_for_each(&pin->dpll_refs, i, par_ref)
+               if (xa_get_mark(&dpll_device_xa, par_ref->dpll->id,
+                               DPLL_REGISTERED))
+                       return true;
+       return false;
+}
+
 /**
  * dpll_device_change_ntf - notify that the dpll device has been changed
  * @dpll: registered dpll pointer
@@ -579,7 +597,7 @@ dpll_pin_event_send(enum dpll_cmd event, struct dpll_pin *pin)
        int ret = -ENOMEM;
        void *hdr;
 
-       if (WARN_ON(!xa_get_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED)))
+       if (!dpll_pin_available(pin))
                return -ENODEV;
 
        msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
@@ -717,7 +735,7 @@ dpll_pin_on_pin_state_set(struct dpll_pin *pin, u32 parent_idx,
        int ret;
 
        if (!(DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE &
-             pin->prop->capabilities)) {
+             pin->prop.capabilities)) {
                NL_SET_ERR_MSG(extack, "state changing is not allowed");
                return -EOPNOTSUPP;
        }
@@ -753,7 +771,7 @@ dpll_pin_state_set(struct dpll_device *dpll, struct dpll_pin *pin,
        int ret;
 
        if (!(DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE &
-             pin->prop->capabilities)) {
+             pin->prop.capabilities)) {
                NL_SET_ERR_MSG(extack, "state changing is not allowed");
                return -EOPNOTSUPP;
        }
@@ -780,7 +798,7 @@ dpll_pin_prio_set(struct dpll_device *dpll, struct dpll_pin *pin,
        int ret;
 
        if (!(DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE &
-             pin->prop->capabilities)) {
+             pin->prop.capabilities)) {
                NL_SET_ERR_MSG(extack, "prio changing is not allowed");
                return -EOPNOTSUPP;
        }
@@ -808,7 +826,7 @@ dpll_pin_direction_set(struct dpll_pin *pin, struct dpll_device *dpll,
        int ret;
 
        if (!(DPLL_PIN_CAPABILITIES_DIRECTION_CAN_CHANGE &
-             pin->prop->capabilities)) {
+             pin->prop.capabilities)) {
                NL_SET_ERR_MSG(extack, "direction changing is not allowed");
                return -EOPNOTSUPP;
        }
@@ -838,8 +856,8 @@ dpll_pin_phase_adj_set(struct dpll_pin *pin, struct nlattr *phase_adj_attr,
        int ret;
 
        phase_adj = nla_get_s32(phase_adj_attr);
-       if (phase_adj > pin->prop->phase_range.max ||
-           phase_adj < pin->prop->phase_range.min) {
+       if (phase_adj > pin->prop.phase_range.max ||
+           phase_adj < pin->prop.phase_range.min) {
                NL_SET_ERR_MSG_ATTR(extack, phase_adj_attr,
                                    "phase adjust value not supported");
                return -EINVAL;
@@ -1023,7 +1041,7 @@ dpll_pin_find(u64 clock_id, struct nlattr *mod_name_attr,
        unsigned long i;
 
        xa_for_each_marked(&dpll_pin_xa, i, pin, DPLL_REGISTERED) {
-               prop = pin->prop;
+               prop = &pin->prop;
                cid_match = clock_id ? pin->clock_id == clock_id : true;
                mod_match = mod_name_attr && module_name(pin->module) ?
                        !nla_strcmp(mod_name_attr,
@@ -1130,6 +1148,10 @@ int dpll_nl_pin_id_get_doit(struct sk_buff *skb, struct genl_info *info)
        }
        pin = dpll_pin_find_from_nlattr(info);
        if (!IS_ERR(pin)) {
+               if (!dpll_pin_available(pin)) {
+                       nlmsg_free(msg);
+                       return -ENODEV;
+               }
                ret = dpll_msg_add_pin_handle(msg, pin);
                if (ret) {
                        nlmsg_free(msg);
@@ -1179,6 +1201,8 @@ int dpll_nl_pin_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
 
        xa_for_each_marked_start(&dpll_pin_xa, i, pin, DPLL_REGISTERED,
                                 ctx->idx) {
+               if (!dpll_pin_available(pin))
+                       continue;
                hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
                                  cb->nlh->nlmsg_seq,
                                  &dpll_nl_family, NLM_F_MULTI,
@@ -1441,7 +1465,8 @@ int dpll_pin_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
        }
        info->user_ptr[0] = xa_load(&dpll_pin_xa,
                                    nla_get_u32(info->attrs[DPLL_A_PIN_ID]));
-       if (!info->user_ptr[0]) {
+       if (!info->user_ptr[0] ||
+           !dpll_pin_available(info->user_ptr[0])) {
                NL_SET_ERR_MSG(info->extack, "pin not found");
                ret = -ENODEV;
                goto unlock_dev;
index 3f44e6b9d387f961d615db4a435edcfdc2faef11..7db22a4c83ef3f521aa5447759182b227171b668 100644 (file)
@@ -176,7 +176,7 @@ struct edac_device_ctl_info {
        struct edac_dev_sysfs_attribute *sysfs_attributes;
 
        /* pointer to main 'edac' subsys in sysfs */
-       struct bus_type *edac_subsys;
+       const struct bus_type *edac_subsys;
 
        /* the internal state of this controller instance */
        int op_state;
index 010c26be58464d141da85ea5dfea0039cb942827..237a542e045a3f2af6402e674aba0e603a0f5773 100644 (file)
@@ -229,7 +229,7 @@ static struct kobj_type ktype_device_ctrl = {
 int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev)
 {
        struct device *dev_root;
-       struct bus_type *edac_subsys;
+       const struct bus_type *edac_subsys;
        int err = -ENODEV;
 
        edac_dbg(1, "\n");
index 32a931d0cb71ff1c262c97eb323c8894768ee423..1c9f623826665be9e790f40fcd12d564835cad39 100644 (file)
@@ -67,7 +67,7 @@ char *edac_op_state_to_string(int opstate)
  * sysfs object: /sys/devices/system/edac
  *     need to export to other files
  */
-static struct bus_type edac_subsys = {
+static const struct bus_type edac_subsys = {
        .name = "edac",
        .dev_name = "edac",
 };
@@ -90,7 +90,7 @@ static void edac_subsys_exit(void)
 }
 
 /* return pointer to the 'edac' node in sysfs */
-struct bus_type *edac_get_sysfs_subsys(void)
+const struct bus_type *edac_get_sysfs_subsys(void)
 {
        return &edac_subsys;
 }
index 901d4cd3ca38769c4f1561e9c3a6153e1f70ea4a..7b44afcf48db06dc7254259244984eabdb11028c 100644 (file)
@@ -338,7 +338,7 @@ static struct kobj_type ktype_edac_pci_main_kobj = {
 static int edac_pci_main_kobj_setup(void)
 {
        int err = -ENODEV;
-       struct bus_type *edac_subsys;
+       const struct bus_type *edac_subsys;
        struct device *dev_root;
 
        edac_dbg(0, "\n");
index 8625de20fc71752018c261445f250d4abc492f1e..62caf454b567079e0962e327c5f9eae6ec85f3d5 100644 (file)
@@ -1005,7 +1005,7 @@ static int mc_probe(struct platform_device *pdev)
                goto free_edac_mc;
        }
 
-       rc = xlnx_register_event(PM_NOTIFY_CB, EVENT_ERROR_PMC_ERR1,
+       rc = xlnx_register_event(PM_NOTIFY_CB, VERSAL_EVENT_ERROR_PMC_ERR1,
                                 XPM_EVENT_ERROR_MASK_DDRMC_CR | XPM_EVENT_ERROR_MASK_DDRMC_NCR |
                                 XPM_EVENT_ERROR_MASK_NOC_CR | XPM_EVENT_ERROR_MASK_NOC_NCR,
                                 false, err_callback, mci);
@@ -1042,7 +1042,7 @@ static int mc_remove(struct platform_device *pdev)
        debugfs_remove_recursive(priv->debugfs);
 #endif
 
-       xlnx_unregister_event(PM_NOTIFY_CB, EVENT_ERROR_PMC_ERR1,
+       xlnx_unregister_event(PM_NOTIFY_CB, VERSAL_EVENT_ERROR_PMC_ERR1,
                              XPM_EVENT_ERROR_MASK_DDRMC_CR |
                              XPM_EVENT_ERROR_MASK_NOC_CR |
                              XPM_EVENT_ERROR_MASK_NOC_NCR |
index f72e90ceca53d5909a5c01e0ade3f5005d2d2323..53de581a393a35bb48f637ea73e910b4c8340ace 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
  * extcon-qcom-spmi-misc.c - Qualcomm USB extcon driver to support USB ID
  *                     and VBUS detection based on extcon-usb-gpio.c.
  *
index 4d08c2123e5980e27cfe262607ee8320fb004242..2eab341de6b76f3b3ca3d89b008b8800d5260ae1 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/usb/typec.h>
 #include <linux/usb/typec_altmode.h>
 #include <linux/usb/role.h>
+#include <linux/irq.h>
 
 #define TUSB320_REG8                           0x8
 #define TUSB320_REG8_CURRENT_MODE_ADVERTISE    GENMASK(7, 6)
@@ -515,6 +516,8 @@ static int tusb320_probe(struct i2c_client *client)
        const void *match_data;
        unsigned int revision;
        int ret;
+       u32 irq_trigger_type = IRQF_TRIGGER_FALLING;
+       struct irq_data *irq_d;
 
        priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
        if (!priv)
@@ -568,9 +571,13 @@ static int tusb320_probe(struct i2c_client *client)
                 */
                tusb320_state_update_handler(priv, true);
 
+       irq_d = irq_get_irq_data(client->irq);
+       if (irq_d)
+               irq_trigger_type = irqd_get_trigger_type(irq_d);
+
        ret = devm_request_threaded_irq(priv->dev, client->irq, NULL,
                                        tusb320_irq_handler,
-                                       IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+                                       IRQF_ONESHOT | irq_trigger_type,
                                        client->name, priv);
        if (ret)
                tusb320_typec_remove(priv);
index 6f7a60d2ed9161b21625a5949c9a24868cf4de33..e7f55c021e562fd455d4f77bafd75fd8564b6b43 100644 (file)
@@ -1280,8 +1280,6 @@ int extcon_dev_register(struct extcon_dev *edev)
 
        edev->id = ret;
 
-       dev_set_name(&edev->dev, "extcon%d", edev->id);
-
        ret = extcon_alloc_cables(edev);
        if (ret < 0)
                goto err_alloc_cables;
@@ -1310,6 +1308,7 @@ int extcon_dev_register(struct extcon_dev *edev)
        RAW_INIT_NOTIFIER_HEAD(&edev->nh_all);
 
        dev_set_drvdata(&edev->dev, edev);
+       dev_set_name(&edev->dev, "extcon%d", edev->id);
        edev->state = 0;
 
        ret = device_register(&edev->dev);
index 6ac5ff20a2fe22f1c3c5a7010ca0af87caf4c446..8aaa7fcb2630dcf47a5325982cbed1037f685b3b 100644 (file)
@@ -429,7 +429,23 @@ static void bm_work(struct work_struct *work)
         */
        card->bm_generation = generation;
 
-       if (root_device == NULL) {
+       if (card->gap_count == 0) {
+               /*
+                * If self IDs have inconsistent gap counts, do a
+                * bus reset ASAP. The config rom read might never
+                * complete, so don't wait for it. However, still
+                * send a PHY configuration packet prior to the
+                * bus reset. The PHY configuration packet might
+                * fail, but 1394-2008 8.4.5.2 explicitly permits
+                * it in this case, so it should be safe to try.
+                */
+               new_root_id = local_id;
+               /*
+                * We must always send a bus reset if the gap count
+                * is inconsistent, so bypass the 5-reset limit.
+                */
+               card->bm_retries = 0;
+       } else if (root_device == NULL) {
                /*
                 * Either link_on is false, or we failed to read the
                 * config rom.  In either case, pick another root.
index 0547253d16fe5dc5f606a34035473de7f271acf6..7d3346b3a2bf320910c72783ab857415d331ed14 100644 (file)
@@ -118,10 +118,9 @@ static int textual_leaf_to_string(const u32 *block, char *buf, size_t size)
  * @buf:       where to put the string
  * @size:      size of @buf, in bytes
  *
- * The string is taken from a minimal ASCII text descriptor leaf after
- * the immediate entry with @key.  The string is zero-terminated.
- * An overlong string is silently truncated such that it and the
- * zero byte fit into @size.
+ * The string is taken from a minimal ASCII text descriptor leaf just after the entry with the
+ * @key. The string is zero-terminated. An overlong string is silently truncated such that it
+ * and the zero byte fit into @size.
  *
  * Returns strlen(buf) or a negative error code.
  */
@@ -368,8 +367,17 @@ static ssize_t show_text_leaf(struct device *dev,
        for (i = 0; i < ARRAY_SIZE(directories) && !!directories[i]; ++i) {
                int result = fw_csr_string(directories[i], attr->key, buf, bufsize);
                // Detected.
-               if (result >= 0)
+               if (result >= 0) {
                        ret = result;
+               } else if (i == 0 && attr->key == CSR_VENDOR) {
+                       // Sony DVMC-DA1 has configuration ROM such that the descriptor leaf entry
+                       // in the root directory follows to the directory entry for vendor ID
+                       // instead of the immediate value for vendor ID.
+                       result = fw_csr_string(directories[i], CSR_DIRECTORY | attr->key, buf,
+                                              bufsize);
+                       if (result >= 0)
+                               ret = result;
+               }
        }
 
        if (ret >= 0) {
index 6146b2927d5c56af6bc3b9722c1789f29a4498fe..f2556a8e940156bc4f9d34ae5dc92aac837b688a 100644 (file)
@@ -107,12 +107,12 @@ struct ffa_drv_info {
        struct work_struct notif_pcpu_work;
        struct work_struct irq_work;
        struct xarray partition_info;
-       unsigned int partition_count;
        DECLARE_HASHTABLE(notifier_hash, ilog2(FFA_MAX_NOTIFICATIONS));
        struct mutex notify_lock; /* lock to protect notifier hashtable  */
 };
 
 static struct ffa_drv_info *drv_info;
+static void ffa_partitions_cleanup(void);
 
 /*
  * The driver must be able to support all the versions from the earliest
@@ -733,6 +733,11 @@ static void __do_sched_recv_cb(u16 part_id, u16 vcpu, bool is_per_vcpu)
        void *cb_data;
 
        partition = xa_load(&drv_info->partition_info, part_id);
+       if (!partition) {
+               pr_err("%s: Invalid partition ID 0x%x\n", __func__, part_id);
+               return;
+       }
+
        read_lock(&partition->rw_lock);
        callback = partition->callback;
        cb_data = partition->cb_data;
@@ -915,6 +920,11 @@ static int ffa_sched_recv_cb_update(u16 part_id, ffa_sched_recv_cb callback,
                return -EOPNOTSUPP;
 
        partition = xa_load(&drv_info->partition_info, part_id);
+       if (!partition) {
+               pr_err("%s: Invalid partition ID 0x%x\n", __func__, part_id);
+               return -EINVAL;
+       }
+
        write_lock(&partition->rw_lock);
 
        cb_valid = !!partition->callback;
@@ -1186,9 +1196,9 @@ void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid)
        kfree(pbuf);
 }
 
-static void ffa_setup_partitions(void)
+static int ffa_setup_partitions(void)
 {
-       int count, idx;
+       int count, idx, ret;
        uuid_t uuid;
        struct ffa_device *ffa_dev;
        struct ffa_dev_part_info *info;
@@ -1197,7 +1207,7 @@ static void ffa_setup_partitions(void)
        count = ffa_partition_probe(&uuid_null, &pbuf);
        if (count <= 0) {
                pr_info("%s: No partitions found, error %d\n", __func__, count);
-               return;
+               return -EINVAL;
        }
 
        xa_init(&drv_info->partition_info);
@@ -1226,40 +1236,53 @@ static void ffa_setup_partitions(void)
                        ffa_device_unregister(ffa_dev);
                        continue;
                }
-               xa_store(&drv_info->partition_info, tpbuf->id, info, GFP_KERNEL);
+               rwlock_init(&info->rw_lock);
+               ret = xa_insert(&drv_info->partition_info, tpbuf->id,
+                               info, GFP_KERNEL);
+               if (ret) {
+                       pr_err("%s: failed to save partition ID 0x%x - ret:%d\n",
+                              __func__, tpbuf->id, ret);
+                       ffa_device_unregister(ffa_dev);
+                       kfree(info);
+               }
        }
-       drv_info->partition_count = count;
 
        kfree(pbuf);
 
        /* Allocate for the host */
        info = kzalloc(sizeof(*info), GFP_KERNEL);
-       if (!info)
-               return;
-       xa_store(&drv_info->partition_info, drv_info->vm_id, info, GFP_KERNEL);
-       drv_info->partition_count++;
+       if (!info) {
+               pr_err("%s: failed to alloc Host partition ID 0x%x. Abort.\n",
+                      __func__, drv_info->vm_id);
+               /* Already registered devices are freed on bus_exit */
+               ffa_partitions_cleanup();
+               return -ENOMEM;
+       }
+
+       rwlock_init(&info->rw_lock);
+       ret = xa_insert(&drv_info->partition_info, drv_info->vm_id,
+                       info, GFP_KERNEL);
+       if (ret) {
+               pr_err("%s: failed to save Host partition ID 0x%x - ret:%d. Abort.\n",
+                      __func__, drv_info->vm_id, ret);
+               kfree(info);
+               /* Already registered devices are freed on bus_exit */
+               ffa_partitions_cleanup();
+       }
+
+       return ret;
 }
 
 static void ffa_partitions_cleanup(void)
 {
-       struct ffa_dev_part_info **info;
-       int idx, count = drv_info->partition_count;
-
-       if (!count)
-               return;
-
-       info = kcalloc(count, sizeof(*info), GFP_KERNEL);
-       if (!info)
-               return;
-
-       xa_extract(&drv_info->partition_info, (void **)info, 0, VM_ID_MASK,
-                  count, XA_PRESENT);
+       struct ffa_dev_part_info *info;
+       unsigned long idx;
 
-       for (idx = 0; idx < count; idx++)
-               kfree(info[idx]);
-       kfree(info);
+       xa_for_each(&drv_info->partition_info, idx, info) {
+               xa_erase(&drv_info->partition_info, idx);
+               kfree(info);
+       }
 
-       drv_info->partition_count = 0;
        xa_destroy(&drv_info->partition_info);
 }
 
@@ -1508,7 +1531,11 @@ static int __init ffa_init(void)
 
        ffa_notifications_setup();
 
-       ffa_setup_partitions();
+       ret = ffa_setup_partitions();
+       if (ret) {
+               pr_err("failed to setup partitions\n");
+               goto cleanup_notifs;
+       }
 
        ret = ffa_sched_recv_cb_update(drv_info->vm_id, ffa_self_notif_handle,
                                       drv_info, true);
@@ -1516,6 +1543,9 @@ static int __init ffa_init(void)
                pr_info("Failed to register driver sched callback %d\n", ret);
 
        return 0;
+
+cleanup_notifs:
+       ffa_notifications_cleanup();
 free_pages:
        if (drv_info->tx_buffer)
                free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE);
@@ -1535,7 +1565,6 @@ static void __exit ffa_exit(void)
        ffa_rxtx_unmap(drv_info->vm_id);
        free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE);
        free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE);
-       xa_destroy(&drv_info->partition_info);
        kfree(drv_info);
        arm_ffa_bus_exit();
 }
index c0644558042a06dc1f0c087af4f22264abeee52a..e2050adbf85c6a125fc5ba241fb0c6b133466bfe 100644 (file)
@@ -13,7 +13,7 @@
 #include "notify.h"
 
 /* Updated only after ALL the mandatory features for that version are merged */
-#define SCMI_PROTOCOL_SUPPORTED_VERSION                0x20001
+#define SCMI_PROTOCOL_SUPPORTED_VERSION                0x20000
 
 enum scmi_clock_protocol_cmd {
        CLOCK_ATTRIBUTES = 0x3,
@@ -954,8 +954,7 @@ static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph)
                        scmi_clock_describe_rates_get(ph, clkid, clk);
        }
 
-       if (PROTOCOL_REV_MAJOR(version) >= 0x2 &&
-           PROTOCOL_REV_MINOR(version) >= 0x1) {
+       if (PROTOCOL_REV_MAJOR(version) >= 0x3) {
                cinfo->clock_config_set = scmi_clock_config_set_v2;
                cinfo->clock_config_get = scmi_clock_config_get_v2;
        } else {
index c46dc5215af7a7c8a78e0fe26c12fac51c8080b7..00b165d1f502df7816527298996f196585d10f5a 100644 (file)
@@ -314,6 +314,7 @@ void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
 void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem);
 bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
                     struct scmi_xfer *xfer);
+bool shmem_channel_free(struct scmi_shared_mem __iomem *shmem);
 
 /* declarations for message passing transports */
 struct scmi_msg_payld;
index a9f70e6e58ac39912ba2ccc7814b918684741115..3ea64b22cf0dfd4493c8179de90b81b822759342 100644 (file)
@@ -2834,7 +2834,7 @@ clear_ida:
        return ret;
 }
 
-static int scmi_remove(struct platform_device *pdev)
+static void scmi_remove(struct platform_device *pdev)
 {
        int id;
        struct scmi_info *info = platform_get_drvdata(pdev);
@@ -2868,8 +2868,6 @@ static int scmi_remove(struct platform_device *pdev)
        scmi_cleanup_txrx_channels(info);
 
        ida_free(&scmi_id, info->id);
-
-       return 0;
 }
 
 static ssize_t protocol_version_show(struct device *dev,
@@ -2947,7 +2945,7 @@ static struct platform_driver scmi_driver = {
                   .dev_groups = versions_groups,
                   },
        .probe = scmi_probe,
-       .remove = scmi_remove,
+       .remove_new = scmi_remove,
 };
 
 /**
index 19246ed1f01ff7cc3ea7346402c32e02b57b336a..b8d470417e8f99bb6408aba541bc4b89541ddf7c 100644 (file)
@@ -45,6 +45,20 @@ static void rx_callback(struct mbox_client *cl, void *m)
 {
        struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl);
 
+       /*
+        * An A2P IRQ is NOT valid when received while the platform still has
+        * the ownership of the channel, because the platform at first releases
+        * the SMT channel and then sends the completion interrupt.
+        *
+        * This addresses a possible race condition in which a spurious IRQ from
+        * a previous timed-out reply which arrived late could be wrongly
+        * associated with the next pending transaction.
+        */
+       if (cl->knows_txdone && !shmem_channel_free(smbox->shmem)) {
+               dev_warn(smbox->cinfo->dev, "Ignoring spurious A2P IRQ !\n");
+               return;
+       }
+
        scmi_rx_callback(smbox->cinfo, shmem_read_header(smbox->shmem), NULL);
 }
 
index 8ea2a7b3d35d2029f9731ef3031d575609093d6e..211e8e0aef2c2b4fade048990249c2444afb946a 100644 (file)
@@ -350,8 +350,8 @@ process_response_opp(struct scmi_opp *opp, unsigned int loop_idx,
 }
 
 static inline void
-process_response_opp_v4(struct perf_dom_info *dom, struct scmi_opp *opp,
-                       unsigned int loop_idx,
+process_response_opp_v4(struct device *dev, struct perf_dom_info *dom,
+                       struct scmi_opp *opp, unsigned int loop_idx,
                        const struct scmi_msg_resp_perf_describe_levels_v4 *r)
 {
        opp->perf = le32_to_cpu(r->opp[loop_idx].perf_val);
@@ -362,10 +362,23 @@ process_response_opp_v4(struct perf_dom_info *dom, struct scmi_opp *opp,
        /* Note that PERF v4 reports always five 32-bit words */
        opp->indicative_freq = le32_to_cpu(r->opp[loop_idx].indicative_freq);
        if (dom->level_indexing_mode) {
+               int ret;
+
                opp->level_index = le32_to_cpu(r->opp[loop_idx].level_index);
 
-               xa_store(&dom->opps_by_idx, opp->level_index, opp, GFP_KERNEL);
-               xa_store(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
+               ret = xa_insert(&dom->opps_by_idx, opp->level_index, opp,
+                               GFP_KERNEL);
+               if (ret)
+                       dev_warn(dev,
+                                "Failed to add opps_by_idx at %d - ret:%d\n",
+                                opp->level_index, ret);
+
+               ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
+               if (ret)
+                       dev_warn(dev,
+                                "Failed to add opps_by_lvl at %d - ret:%d\n",
+                                opp->perf, ret);
+
                hash_add(dom->opps_by_freq, &opp->hash, opp->indicative_freq);
        }
 }
@@ -382,7 +395,7 @@ iter_perf_levels_process_response(const struct scmi_protocol_handle *ph,
        if (PROTOCOL_REV_MAJOR(p->version) <= 0x3)
                process_response_opp(opp, st->loop_idx, response);
        else
-               process_response_opp_v4(p->perf_dom, opp, st->loop_idx,
+               process_response_opp_v4(ph->dev, p->perf_dom, opp, st->loop_idx,
                                        response);
        p->perf_dom->opp_count++;
 
index 0493aa3c12bf5363e02c1ecc9b2520d1bd8b3d67..350573518503355f6abaa4d24cbcac6368e8930c 100644 (file)
@@ -1111,7 +1111,6 @@ static int scmi_raw_mode_setup(struct scmi_raw_mode_info *raw,
                int i;
 
                for (i = 0; i < num_chans; i++) {
-                       void *xret;
                        struct scmi_raw_queue *q;
 
                        q = scmi_raw_queue_init(raw);
@@ -1120,13 +1119,12 @@ static int scmi_raw_mode_setup(struct scmi_raw_mode_info *raw,
                                goto err_xa;
                        }
 
-                       xret = xa_store(&raw->chans_q, channels[i], q,
+                       ret = xa_insert(&raw->chans_q, channels[i], q,
                                        GFP_KERNEL);
-                       if (xa_err(xret)) {
+                       if (ret) {
                                dev_err(dev,
                                        "Fail to allocate Raw queue 0x%02X\n",
                                        channels[i]);
-                               ret = xa_err(xret);
                                goto err_xa;
                        }
                }
@@ -1322,6 +1320,12 @@ void scmi_raw_message_report(void *r, struct scmi_xfer *xfer,
        dev = raw->handle->dev;
        q = scmi_raw_queue_select(raw, idx,
                                  SCMI_XFER_IS_CHAN_SET(xfer) ? chan_id : 0);
+       if (!q) {
+               dev_warn(dev,
+                        "RAW[%d] - NO queue for chan 0x%X. Dropping report.\n",
+                        idx, chan_id);
+               return;
+       }
 
        /*
         * Grab the msg_q_lock upfront to avoid a possible race between
index 87b4f4d35f06230bc161fc4205c7b199e03c0015..8bf495bcad09b7ba8246c05b4e76086fa1bdaf90 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/processor.h>
 #include <linux/types.h>
 
-#include <asm-generic/bug.h>
+#include <linux/bug.h>
 
 #include "common.h"
 
@@ -122,3 +122,9 @@ bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
                (SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR |
                 SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
 }
+
+bool shmem_channel_free(struct scmi_shared_mem __iomem *shmem)
+{
+       return (ioread32(&shmem->channel_status) &
+                       SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
+}
index 3f123f592cb4c21b86ef2f77b9c31ff6c5aade00..94a6b4e667de140d7dc72245c346df2ae43c4bd2 100644 (file)
@@ -863,7 +863,7 @@ static void scpi_free_channels(void *data)
                mbox_free_channel(info->channels[i].chan);
 }
 
-static int scpi_remove(struct platform_device *pdev)
+static void scpi_remove(struct platform_device *pdev)
 {
        int i;
        struct scpi_drvinfo *info = platform_get_drvdata(pdev);
@@ -874,8 +874,6 @@ static int scpi_remove(struct platform_device *pdev)
                kfree(info->dvfs[i]->opps);
                kfree(info->dvfs[i]);
        }
-
-       return 0;
 }
 
 #define MAX_SCPI_XFERS         10
@@ -1048,7 +1046,7 @@ static struct platform_driver scpi_driver = {
                .dev_groups = versions_groups,
        },
        .probe = scpi_probe,
-       .remove = scpi_remove,
+       .remove_new = scpi_remove,
 };
 module_platform_driver(scpi_driver);
 
index 83f5bb57fa4c466334a90c2195c06ce7443d1b6a..83092d93f36a63087ffbd8b6460d38a824e9cbb1 100644 (file)
@@ -107,7 +107,7 @@ static int __init arm_enable_runtime_services(void)
                efi_memory_desc_t *md;
 
                for_each_efi_memory_desc(md) {
-                       int md_size = md->num_pages << EFI_PAGE_SHIFT;
+                       u64 md_size = md->num_pages << EFI_PAGE_SHIFT;
                        struct resource *res;
 
                        if (!(md->attribute & EFI_MEMORY_SP))
index 35c37f667781c7071c714aef274e68dbddca026b..9b3884ff81e699f2308a3cf618e774ad9a67e6a3 100644 (file)
@@ -523,6 +523,17 @@ static void cper_print_tstamp(const char *pfx,
        }
 }
 
+struct ignore_section {
+       guid_t guid;
+       const char *name;
+};
+
+static const struct ignore_section ignore_sections[] = {
+       { .guid = CPER_SEC_CXL_GEN_MEDIA_GUID, .name = "CXL General Media Event" },
+       { .guid = CPER_SEC_CXL_DRAM_GUID, .name = "CXL DRAM Event" },
+       { .guid = CPER_SEC_CXL_MEM_MODULE_GUID, .name = "CXL Memory Module Event" },
+};
+
 static void
 cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata,
                           int sec_no)
@@ -543,6 +554,14 @@ cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata
                printk("%s""fru_text: %.20s\n", pfx, gdata->fru_text);
 
        snprintf(newpfx, sizeof(newpfx), "%s ", pfx);
+
+       for (int i = 0; i < ARRAY_SIZE(ignore_sections); i++) {
+               if (guid_equal(sec_type, &ignore_sections[i].guid)) {
+                       printk("%ssection_type: %s\n", newpfx, ignore_sections[i].name);
+                       return;
+               }
+       }
+
        if (guid_equal(sec_type, &CPER_SEC_PROC_GENERIC)) {
                struct cper_sec_proc_generic *proc_err = acpi_hest_get_payload(gdata);
 
index d4987d013080174bda0e462f029c03192897bebb..a00e07b853f221721e1bcd2f801cadcc5bcb67cf 100644 (file)
@@ -143,15 +143,6 @@ static __init int is_usable_memory(efi_memory_desc_t *md)
        case EFI_BOOT_SERVICES_DATA:
        case EFI_CONVENTIONAL_MEMORY:
        case EFI_PERSISTENT_MEMORY:
-               /*
-                * Special purpose memory is 'soft reserved', which means it
-                * is set aside initially, but can be hotplugged back in or
-                * be assigned to the dax driver after boot.
-                */
-               if (efi_soft_reserve_enabled() &&
-                   (md->attribute & EFI_MEMORY_SP))
-                       return false;
-
                /*
                 * According to the spec, these regions are no longer reserved
                 * after calling ExitBootServices(). However, we can only use
@@ -196,6 +187,16 @@ static __init void reserve_regions(void)
                size = npages << PAGE_SHIFT;
 
                if (is_memory(md)) {
+                       /*
+                        * Special purpose memory is 'soft reserved', which
+                        * means it is set aside initially. Don't add a memblock
+                        * for it now so that it can be hotplugged back in or
+                        * be assigned to the dax driver after boot.
+                        */
+                       if (efi_soft_reserve_enabled() &&
+                           (md->attribute & EFI_MEMORY_SP))
+                               continue;
+
                        early_init_dt_add_memory_arch(paddr, size);
 
                        if (!is_usable_memory(md))
index 06964a3c130f6addeed20eca1ed26153a2260854..73f4810f6db38ecc933f9a6ac2bed5ae57709148 100644 (file)
@@ -28,7 +28,7 @@ cflags-$(CONFIG_ARM)          += -DEFI_HAVE_STRLEN -DEFI_HAVE_STRNLEN \
                                   -DEFI_HAVE_MEMCHR -DEFI_HAVE_STRRCHR \
                                   -DEFI_HAVE_STRCMP -fno-builtin -fpic \
                                   $(call cc-option,-mno-single-pic-base)
-cflags-$(CONFIG_RISCV)         += -fpic -DNO_ALTERNATIVE
+cflags-$(CONFIG_RISCV)         += -fpic -DNO_ALTERNATIVE -mno-relax
 cflags-$(CONFIG_LOONGARCH)     += -fpie
 
 cflags-$(CONFIG_EFI_PARAMS_FROM_FDT)   += -I$(srctree)/scripts/dtc/libfdt
@@ -143,7 +143,7 @@ STUBCOPY_RELOC-$(CONFIG_ARM64)      := R_AARCH64_ABS
 # exist.
 STUBCOPY_FLAGS-$(CONFIG_RISCV) += --prefix-alloc-sections=.init \
                                   --prefix-symbols=__efistub_
-STUBCOPY_RELOC-$(CONFIG_RISCV) := R_RISCV_HI20
+STUBCOPY_RELOC-$(CONFIG_RISCV) := -E R_RISCV_HI20\|R_RISCV_$(BITS)\|R_RISCV_RELAX
 
 # For LoongArch, keep all the symbols in .init section and make sure that no
 # absolute symbols references exist.
index 6b83c492c3b8260d52e16bb73a1d5abaa4cb943d..31928bd87e0fff5a0666234ef8328cf5d4f564df 100644 (file)
@@ -14,6 +14,7 @@
  * @max:       the address that the last allocated memory page shall not
  *             exceed
  * @align:     minimum alignment of the base of the allocation
+ * @memory_type: the type of memory to allocate
  *
  * Allocate pages as EFI_LOADER_DATA. The allocated pages are aligned according
  * to @align, which should be >= EFI_ALLOC_ALIGN. The last allocated page will
index 212687c30d79c4b0b307af0b8d3c7b52502e6a95..c04b82ea40f2169b6764ff69a14ff3acc5a8795d 100644 (file)
@@ -956,7 +956,8 @@ efi_status_t efi_get_random_bytes(unsigned long size, u8 *out);
 
 efi_status_t efi_random_alloc(unsigned long size, unsigned long align,
                              unsigned long *addr, unsigned long random_seed,
-                             int memory_type, unsigned long alloc_limit);
+                             int memory_type, unsigned long alloc_min,
+                             unsigned long alloc_max);
 
 efi_status_t efi_random_get_seed(void);
 
index 62d63f7a2645bf82525d79b5d8825e9bea023404..1a9808012abd36ee7f58ad0baf818cbae6df1b0b 100644 (file)
@@ -119,7 +119,7 @@ efi_status_t efi_kaslr_relocate_kernel(unsigned long *image_addr,
                 */
                status = efi_random_alloc(*reserve_size, min_kimg_align,
                                          reserve_addr, phys_seed,
-                                         EFI_LOADER_CODE, EFI_ALLOC_LIMIT);
+                                         EFI_LOADER_CODE, 0, EFI_ALLOC_LIMIT);
                if (status != EFI_SUCCESS)
                        efi_warn("efi_random_alloc() failed: 0x%lx\n", status);
        } else {
index 674a064b8f7adc68edf2412bb8e012250077c717..4e96a855fdf47b5b064b63b729d7dc989cd2b949 100644 (file)
@@ -17,7 +17,7 @@
 static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
                                         unsigned long size,
                                         unsigned long align_shift,
-                                        u64 alloc_limit)
+                                        u64 alloc_min, u64 alloc_max)
 {
        unsigned long align = 1UL << align_shift;
        u64 first_slot, last_slot, region_end;
@@ -30,11 +30,11 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
                return 0;
 
        region_end = min(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - 1,
-                        alloc_limit);
+                        alloc_max);
        if (region_end < size)
                return 0;
 
-       first_slot = round_up(md->phys_addr, align);
+       first_slot = round_up(max(md->phys_addr, alloc_min), align);
        last_slot = round_down(region_end - size + 1, align);
 
        if (first_slot > last_slot)
@@ -56,7 +56,8 @@ efi_status_t efi_random_alloc(unsigned long size,
                              unsigned long *addr,
                              unsigned long random_seed,
                              int memory_type,
-                             unsigned long alloc_limit)
+                             unsigned long alloc_min,
+                             unsigned long alloc_max)
 {
        unsigned long total_slots = 0, target_slot;
        unsigned long total_mirrored_slots = 0;
@@ -78,7 +79,8 @@ efi_status_t efi_random_alloc(unsigned long size,
                efi_memory_desc_t *md = (void *)map->map + map_offset;
                unsigned long slots;
 
-               slots = get_entry_num_slots(md, size, ilog2(align), alloc_limit);
+               slots = get_entry_num_slots(md, size, ilog2(align), alloc_min,
+                                           alloc_max);
                MD_NUM_SLOTS(md) = slots;
                total_slots += slots;
                if (md->attribute & EFI_MEMORY_MORE_RELIABLE)
index 0d510c9a06a45925922595f1e44c7ee3b2a170a6..99429bc4b0c7eb0c639b84934fe614f8f8cb5721 100644 (file)
@@ -223,8 +223,8 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params)
        }
 }
 
-void efi_adjust_memory_range_protection(unsigned long start,
-                                       unsigned long size)
+efi_status_t efi_adjust_memory_range_protection(unsigned long start,
+                                               unsigned long size)
 {
        efi_status_t status;
        efi_gcd_memory_space_desc_t desc;
@@ -236,13 +236,17 @@ void efi_adjust_memory_range_protection(unsigned long start,
        rounded_end = roundup(start + size, EFI_PAGE_SIZE);
 
        if (memattr != NULL) {
-               efi_call_proto(memattr, clear_memory_attributes, rounded_start,
-                              rounded_end - rounded_start, EFI_MEMORY_XP);
-               return;
+               status = efi_call_proto(memattr, clear_memory_attributes,
+                                       rounded_start,
+                                       rounded_end - rounded_start,
+                                       EFI_MEMORY_XP);
+               if (status != EFI_SUCCESS)
+                       efi_warn("Failed to clear EFI_MEMORY_XP attribute\n");
+               return status;
        }
 
        if (efi_dxe_table == NULL)
-               return;
+               return EFI_SUCCESS;
 
        /*
         * Don't modify memory region attributes, they are
@@ -255,7 +259,7 @@ void efi_adjust_memory_range_protection(unsigned long start,
                status = efi_dxe_call(get_memory_space_descriptor, start, &desc);
 
                if (status != EFI_SUCCESS)
-                       return;
+                       break;
 
                next = desc.base_address + desc.length;
 
@@ -280,8 +284,10 @@ void efi_adjust_memory_range_protection(unsigned long start,
                                 unprotect_start,
                                 unprotect_start + unprotect_size,
                                 status);
+                       break;
                }
        }
+       return EFI_SUCCESS;
 }
 
 static void setup_unaccepted_memory(void)
@@ -793,6 +799,7 @@ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry)
 
        status = efi_random_alloc(alloc_size, CONFIG_PHYSICAL_ALIGN, &addr,
                                  seed[0], EFI_LOADER_CODE,
+                                 LOAD_PHYSICAL_ADDR,
                                  EFI_X86_KERNEL_ALLOC_LIMIT);
        if (status != EFI_SUCCESS)
                return status;
@@ -805,9 +812,7 @@ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry)
 
        *kernel_entry = addr + entry;
 
-       efi_adjust_memory_range_protection(addr, kernel_total_size);
-
-       return EFI_SUCCESS;
+       return efi_adjust_memory_range_protection(addr, kernel_total_size);
 }
 
 static void __noreturn enter_kernel(unsigned long kernel_addr,
index 37c5a36b9d8cf9b2cad93f228502fd336d142908..1c20e99a6494423787ef1dd091739ed9cbc89a24 100644 (file)
@@ -5,8 +5,8 @@
 extern void trampoline_32bit_src(void *, bool);
 extern const u16 trampoline_ljmp_imm_offset;
 
-void efi_adjust_memory_range_protection(unsigned long start,
-                                       unsigned long size);
+efi_status_t efi_adjust_memory_range_protection(unsigned long start,
+                                               unsigned long size);
 
 #ifdef CONFIG_X86_64
 efi_status_t efi_setup_5level_paging(void);
index bdb17eac0cb401befbcc8b13820f9a3b416b6f19..1ceace956758682f592f6fe3f280b7260f7ca562 100644 (file)
@@ -119,7 +119,7 @@ efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab)
                }
 
                status = efi_random_alloc(alloc_size, min_kimg_align, &image_base,
-                                         seed, EFI_LOADER_CODE, EFI_ALLOC_LIMIT);
+                                         seed, EFI_LOADER_CODE, 0, EFI_ALLOC_LIMIT);
                if (status != EFI_SUCCESS) {
                        efi_err("Failed to allocate memory\n");
                        goto free_cmdline;
index 09525fb5c240e6686ff5588c55998d5815e20ff7..01f0f90ea4183119b0a4eedf82a3fe81f1b2f480 100644 (file)
@@ -85,7 +85,7 @@ static int __init riscv_enable_runtime_services(void)
                efi_memory_desc_t *md;
 
                for_each_efi_memory_desc(md) {
-                       int md_size = md->num_pages << EFI_PAGE_SHIFT;
+                       u64 md_size = md->num_pages << EFI_PAGE_SHIFT;
                        struct resource *res;
 
                        if (!(md->attribute & EFI_MEMORY_SP))
index a48a58e0c61f8f1de483a368453025a48cd693ae..01c8ef14eaec3fa54ebd6ba6e032b37727228db7 100644 (file)
@@ -160,7 +160,7 @@ static int imx_dsp_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int imx_dsp_remove(struct platform_device *pdev)
+static void imx_dsp_remove(struct platform_device *pdev)
 {
        struct imx_dsp_chan *dsp_chan;
        struct imx_dsp_ipc *dsp_ipc;
@@ -173,8 +173,6 @@ static int imx_dsp_remove(struct platform_device *pdev)
                mbox_free_channel(dsp_chan->ch);
                kfree(dsp_chan->name);
        }
-
-       return 0;
 }
 
 static struct platform_driver imx_dsp_driver = {
@@ -182,7 +180,7 @@ static struct platform_driver imx_dsp_driver = {
                .name = "imx-dsp",
        },
        .probe = imx_dsp_probe,
-       .remove = imx_dsp_remove,
+       .remove_new = imx_dsp_remove,
 };
 builtin_platform_driver(imx_dsp_driver);
 
index 85e94ddc7204d80dfd3d5c8439e57adb2c52a978..a762302978de0f9fa3bdea869e1a99dcdc79cc45 100644 (file)
@@ -116,7 +116,7 @@ static int mtk_adsp_ipc_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int mtk_adsp_ipc_remove(struct platform_device *pdev)
+static void mtk_adsp_ipc_remove(struct platform_device *pdev)
 {
        struct mtk_adsp_ipc *adsp_ipc = dev_get_drvdata(&pdev->dev);
        struct mtk_adsp_chan *adsp_chan;
@@ -126,8 +126,6 @@ static int mtk_adsp_ipc_remove(struct platform_device *pdev)
                adsp_chan = &adsp_ipc->chans[i];
                mbox_free_channel(adsp_chan->ch);
        }
-
-       return 0;
 }
 
 static struct platform_driver mtk_adsp_ipc_driver = {
@@ -135,7 +133,7 @@ static struct platform_driver mtk_adsp_ipc_driver = {
                .name = "mtk-adsp-ipc",
        },
        .probe = mtk_adsp_ipc_probe,
-       .remove = mtk_adsp_ipc_remove,
+       .remove_new = mtk_adsp_ipc_remove,
 };
 builtin_platform_driver(mtk_adsp_ipc_driver);
 
index 1448f61173b357f90802c905f6c5076895491626..03da9a4354f8864492e92e5c60a676b3629b9913 100644 (file)
@@ -731,7 +731,7 @@ err_sel:
        return err;
 }
 
-static int fw_cfg_sysfs_remove(struct platform_device *pdev)
+static void fw_cfg_sysfs_remove(struct platform_device *pdev)
 {
        pr_debug("fw_cfg: unloading.\n");
        fw_cfg_sysfs_cache_cleanup();
@@ -739,7 +739,6 @@ static int fw_cfg_sysfs_remove(struct platform_device *pdev)
        fw_cfg_io_cleanup();
        fw_cfg_kset_unregister_recursive(fw_cfg_fname_kset);
        fw_cfg_kobj_cleanup(fw_cfg_sel_ko);
-       return 0;
 }
 
 static const struct of_device_id fw_cfg_sysfs_mmio_match[] = {
@@ -758,7 +757,7 @@ MODULE_DEVICE_TABLE(acpi, fw_cfg_sysfs_acpi_match);
 
 static struct platform_driver fw_cfg_sysfs_driver = {
        .probe = fw_cfg_sysfs_probe,
-       .remove = fw_cfg_sysfs_remove,
+       .remove_new = fw_cfg_sysfs_remove,
        .driver = {
                .name = "fw_cfg",
                .of_match_table = fw_cfg_sysfs_mmio_match,
index 4cd290a60fbaaad10dc3d3ce9ced2146e2e6227d..322aada20f7421fea3a8d4ca6d95d81a2e897619 100644 (file)
@@ -317,7 +317,7 @@ static void rpi_firmware_shutdown(struct platform_device *pdev)
        rpi_firmware_property(fw, RPI_FIRMWARE_NOTIFY_REBOOT, NULL, 0);
 }
 
-static int rpi_firmware_remove(struct platform_device *pdev)
+static void rpi_firmware_remove(struct platform_device *pdev)
 {
        struct rpi_firmware *fw = platform_get_drvdata(pdev);
 
@@ -327,8 +327,6 @@ static int rpi_firmware_remove(struct platform_device *pdev)
        rpi_clk = NULL;
 
        rpi_firmware_put(fw);
-
-       return 0;
 }
 
 static const struct of_device_id rpi_firmware_of_match[] = {
@@ -406,7 +404,7 @@ static struct platform_driver rpi_firmware_driver = {
        },
        .probe          = rpi_firmware_probe,
        .shutdown       = rpi_firmware_shutdown,
-       .remove         = rpi_firmware_remove,
+       .remove_new     = rpi_firmware_remove,
 };
 module_platform_driver(rpi_firmware_driver);
 
index 4f7a7abada48a6b5deadd226ed8d60738c20b6cd..e20cee9c2d320abaa81be38d6eb0c257ebaf3486 100644 (file)
@@ -793,17 +793,16 @@ static int stratix10_rsu_probe(struct platform_device *pdev)
        return ret;
 }
 
-static int stratix10_rsu_remove(struct platform_device *pdev)
+static void stratix10_rsu_remove(struct platform_device *pdev)
 {
        struct stratix10_rsu_priv *priv = platform_get_drvdata(pdev);
 
        stratix10_svc_free_channel(priv->chan);
-       return 0;
 }
 
 static struct platform_driver stratix10_rsu_driver = {
        .probe = stratix10_rsu_probe,
-       .remove = stratix10_rsu_remove,
+       .remove_new = stratix10_rsu_remove,
        .driver = {
                .name = "stratix10-rsu",
                .dev_groups = rsu_groups,
index c693da60e9a97770f83dc7d91febcec687d18650..528f37417aea48532003125cb8eacebff312e160 100644 (file)
@@ -1251,7 +1251,7 @@ err_destroy_pool:
        return ret;
 }
 
-static int stratix10_svc_drv_remove(struct platform_device *pdev)
+static void stratix10_svc_drv_remove(struct platform_device *pdev)
 {
        struct stratix10_svc *svc = dev_get_drvdata(&pdev->dev);
        struct stratix10_svc_controller *ctrl = platform_get_drvdata(pdev);
@@ -1267,13 +1267,11 @@ static int stratix10_svc_drv_remove(struct platform_device *pdev)
        if (ctrl->genpool)
                gen_pool_destroy(ctrl->genpool);
        list_del(&ctrl->node);
-
-       return 0;
 }
 
 static struct platform_driver stratix10_svc_driver = {
        .probe = stratix10_svc_drv_probe,
-       .remove = stratix10_svc_drv_remove,
+       .remove_new = stratix10_svc_drv_remove,
        .driver = {
                .name = "stratix10-svc",
                .of_match_table = stratix10_svc_drv_match,
index 19706bd2642adbec408cbba3bf8cf8bda4b51a6f..3c197db42c9d936866f9ff68cf7561e4735cfe1e 100644 (file)
@@ -71,7 +71,7 @@ EXPORT_SYMBOL_GPL(sysfb_disable);
 
 static __init int sysfb_init(void)
 {
-       const struct screen_info *si = &screen_info;
+       struct screen_info *si = &screen_info;
        struct simplefb_platform_data mode;
        const char *name;
        bool compatible;
@@ -119,18 +119,6 @@ static __init int sysfb_init(void)
        if (ret)
                goto err;
 
-       /*
-        * The firmware framebuffer is now maintained by the created
-        * device. Disable screen_info after we've consumed it. Prevents
-        * invalid access during kexec reboots.
-        *
-        * TODO: Vgacon still relies on the global screen_info. Make
-        *       vgacon work with the platform device, so we can clear
-        *       the screen_info unconditionally.
-        */
-       if (strcmp(name, "platform-framebuffer"))
-               screen_info.orig_video_isVGA = 0;
-
        goto unlock_mutex;
 err:
        platform_device_put(pd);
@@ -140,4 +128,4 @@ unlock_mutex:
 }
 
 /* must execute after PCI subsystem for EFI quirks */
-subsys_initcall_sync(sysfb_init);
+device_initcall(sysfb_init);
index 2de0fb139ce1762164d3e64b56793558c6917b4a..31d962cdd6eb2e0c6ba1cb3ea704093a4e95d325 100644 (file)
@@ -554,7 +554,7 @@ put_kobj:
        return ret;
 }
 
-static int turris_mox_rwtm_remove(struct platform_device *pdev)
+static void turris_mox_rwtm_remove(struct platform_device *pdev)
 {
        struct mox_rwtm *rwtm = platform_get_drvdata(pdev);
 
@@ -562,8 +562,6 @@ static int turris_mox_rwtm_remove(struct platform_device *pdev)
        sysfs_remove_files(rwtm_to_kobj(rwtm), mox_rwtm_attrs);
        kobject_put(rwtm_to_kobj(rwtm));
        mbox_free_channel(rwtm->mbox);
-
-       return 0;
 }
 
 static const struct of_device_id turris_mox_rwtm_match[] = {
@@ -576,7 +574,7 @@ MODULE_DEVICE_TABLE(of, turris_mox_rwtm_match);
 
 static struct platform_driver turris_mox_rwtm_driver = {
        .probe  = turris_mox_rwtm_probe,
-       .remove = turris_mox_rwtm_remove,
+       .remove_new = turris_mox_rwtm_remove,
        .driver = {
                .name           = DRIVER_NAME,
                .of_match_table = turris_mox_rwtm_match,
index b0d22d4455d9500828920b7223168ec31cf099ac..79789f0563f6a3df89f396e12299bd1a2a3452ce 100644 (file)
@@ -92,6 +92,8 @@ static int zynqmp_pm_ret_code(u32 ret_status)
                return 0;
        case XST_PM_NO_FEATURE:
                return -ENOTSUPP;
+       case XST_PM_INVALID_VERSION:
+               return -EOPNOTSUPP;
        case XST_PM_NO_ACCESS:
                return -EACCES;
        case XST_PM_ABORT_SUSPEND:
@@ -101,13 +103,13 @@ static int zynqmp_pm_ret_code(u32 ret_status)
        case XST_PM_INTERNAL:
        case XST_PM_CONFLICT:
        case XST_PM_INVALID_NODE:
+       case XST_PM_INVALID_CRC:
        default:
                return -EINVAL;
        }
 }
 
-static noinline int do_fw_call_fail(u64 arg0, u64 arg1, u64 arg2,
-                                   u32 *ret_payload)
+static noinline int do_fw_call_fail(u32 *ret_payload, u32 num_args, ...)
 {
        return -ENODEV;
 }
@@ -116,25 +118,35 @@ static noinline int do_fw_call_fail(u64 arg0, u64 arg1, u64 arg2,
  * PM function call wrapper
  * Invoke do_fw_call_smc or do_fw_call_hvc, depending on the configuration
  */
-static int (*do_fw_call)(u64, u64, u64, u32 *ret_payload) = do_fw_call_fail;
+static int (*do_fw_call)(u32 *ret_payload, u32, ...) = do_fw_call_fail;
 
 /**
  * do_fw_call_smc() - Call system-level platform management layer (SMC)
- * @arg0:              Argument 0 to SMC call
- * @arg1:              Argument 1 to SMC call
- * @arg2:              Argument 2 to SMC call
+ * @num_args:          Number of variable arguments should be <= 8
  * @ret_payload:       Returned value array
  *
  * Invoke platform management function via SMC call (no hypervisor present).
  *
  * Return: Returns status, either success or error+reason
  */
-static noinline int do_fw_call_smc(u64 arg0, u64 arg1, u64 arg2,
-                                  u32 *ret_payload)
+static noinline int do_fw_call_smc(u32 *ret_payload, u32 num_args, ...)
 {
        struct arm_smccc_res res;
+       u64 args[8] = {0};
+       va_list arg_list;
+       u8 i;
 
-       arm_smccc_smc(arg0, arg1, arg2, 0, 0, 0, 0, 0, &res);
+       if (num_args > 8)
+               return -EINVAL;
+
+       va_start(arg_list, num_args);
+
+       for (i = 0; i < num_args; i++)
+               args[i] = va_arg(arg_list, u64);
+
+       va_end(arg_list);
+
+       arm_smccc_smc(args[0], args[1], args[2], args[3], args[4], args[5], args[6], args[7], &res);
 
        if (ret_payload) {
                ret_payload[0] = lower_32_bits(res.a0);
@@ -148,9 +160,7 @@ static noinline int do_fw_call_smc(u64 arg0, u64 arg1, u64 arg2,
 
 /**
  * do_fw_call_hvc() - Call system-level platform management layer (HVC)
- * @arg0:              Argument 0 to HVC call
- * @arg1:              Argument 1 to HVC call
- * @arg2:              Argument 2 to HVC call
+ * @num_args:          Number of variable arguments should be <= 8
  * @ret_payload:       Returned value array
  *
  * Invoke platform management function via HVC
@@ -159,12 +169,24 @@ static noinline int do_fw_call_smc(u64 arg0, u64 arg1, u64 arg2,
  *
  * Return: Returns status, either success or error+reason
  */
-static noinline int do_fw_call_hvc(u64 arg0, u64 arg1, u64 arg2,
-                                  u32 *ret_payload)
+static noinline int do_fw_call_hvc(u32 *ret_payload, u32 num_args, ...)
 {
        struct arm_smccc_res res;
+       u64 args[8] = {0};
+       va_list arg_list;
+       u8 i;
+
+       if (num_args > 8)
+               return -EINVAL;
+
+       va_start(arg_list, num_args);
+
+       for (i = 0; i < num_args; i++)
+               args[i] = va_arg(arg_list, u64);
+
+       va_end(arg_list);
 
-       arm_smccc_hvc(arg0, arg1, arg2, 0, 0, 0, 0, 0, &res);
+       arm_smccc_hvc(args[0], args[1], args[2], args[3], args[4], args[5], args[6], args[7], &res);
 
        if (ret_payload) {
                ret_payload[0] = lower_32_bits(res.a0);
@@ -180,11 +202,31 @@ static int __do_feature_check_call(const u32 api_id, u32 *ret_payload)
 {
        int ret;
        u64 smc_arg[2];
+       u32 module_id;
+       u32 feature_check_api_id;
 
-       smc_arg[0] = PM_SIP_SVC | PM_FEATURE_CHECK;
-       smc_arg[1] = api_id;
+       module_id = FIELD_GET(MODULE_ID_MASK, api_id);
 
-       ret = do_fw_call(smc_arg[0], smc_arg[1], 0, ret_payload);
+       /*
+        * Feature check of APIs belonging to PM, XSEM, and TF-A are handled by calling
+        * PM_FEATURE_CHECK API. For other modules, call PM_API_FEATURES API.
+        */
+       if (module_id == PM_MODULE_ID || module_id == XSEM_MODULE_ID || module_id == TF_A_MODULE_ID)
+               feature_check_api_id = PM_FEATURE_CHECK;
+       else
+               feature_check_api_id = PM_API_FEATURES;
+
+       /*
+        * Feature check of TF-A APIs is done in the TF-A layer and it expects for
+        * MODULE_ID_MASK bits of SMC's arg[0] to be the same as PM_MODULE_ID.
+        */
+       if (module_id == TF_A_MODULE_ID)
+               module_id = PM_MODULE_ID;
+
+       smc_arg[0] = PM_SIP_SVC | FIELD_PREP(MODULE_ID_MASK, module_id) | feature_check_api_id;
+       smc_arg[1] = (api_id & API_ID_MASK);
+
+       ret = do_fw_call(ret_payload, 2, smc_arg[0], smc_arg[1]);
        if (ret)
                ret = -EOPNOTSUPP;
        else
@@ -295,11 +337,8 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_is_function_supported);
  * zynqmp_pm_invoke_fn() - Invoke the system-level platform management layer
  *                        caller function depending on the configuration
  * @pm_api_id:         Requested PM-API call
- * @arg0:              Argument 0 to requested PM-API call
- * @arg1:              Argument 1 to requested PM-API call
- * @arg2:              Argument 2 to requested PM-API call
- * @arg3:              Argument 3 to requested PM-API call
  * @ret_payload:       Returned value array
+ * @num_args:          Number of arguments to requested PM-API call
  *
  * Invoke platform management function for SMC or HVC call, depending on
  * configuration.
@@ -316,26 +355,38 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_is_function_supported);
  *
  * Return: Returns status, either success or error+reason
  */
-int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1,
-                       u32 arg2, u32 arg3, u32 *ret_payload)
+int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 *ret_payload, u32 num_args, ...)
 {
        /*
         * Added SIP service call Function Identifier
         * Make sure to stay in x0 register
         */
-       u64 smc_arg[4];
-       int ret;
+       u64 smc_arg[8];
+       int ret, i;
+       va_list arg_list;
+       u32 args[14] = {0};
+
+       if (num_args > 14)
+               return -EINVAL;
+
+       va_start(arg_list, num_args);
 
        /* Check if feature is supported or not */
        ret = zynqmp_pm_feature(pm_api_id);
        if (ret < 0)
                return ret;
 
+       for (i = 0; i < num_args; i++)
+               args[i] = va_arg(arg_list, u32);
+
+       va_end(arg_list);
+
        smc_arg[0] = PM_SIP_SVC | pm_api_id;
-       smc_arg[1] = ((u64)arg1 << 32) | arg0;
-       smc_arg[2] = ((u64)arg3 << 32) | arg2;
+       for (i = 0; i < 7; i++)
+               smc_arg[i + 1] = ((u64)args[(i * 2) + 1] << 32) | args[i * 2];
 
-       return do_fw_call(smc_arg[0], smc_arg[1], smc_arg[2], ret_payload);
+       return do_fw_call(ret_payload, 8, smc_arg[0], smc_arg[1], smc_arg[2], smc_arg[3],
+                         smc_arg[4], smc_arg[5], smc_arg[6], smc_arg[7]);
 }
 
 static u32 pm_api_version;
@@ -347,14 +398,12 @@ int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset)
 {
        int ret;
 
-       ret = zynqmp_pm_invoke_fn(TF_A_PM_REGISTER_SGI, sgi_num, reset, 0, 0,
-                                 NULL);
-       if (!ret)
+       ret = zynqmp_pm_invoke_fn(TF_A_PM_REGISTER_SGI, NULL, 2, sgi_num, reset);
+       if (ret != -EOPNOTSUPP && !ret)
                return ret;
 
        /* try old implementation as fallback strategy if above fails */
-       return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_REGISTER_SGI, sgi_num,
-                                  reset, NULL);
+       return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 3, IOCTL_REGISTER_SGI, sgi_num, reset);
 }
 
 /**
@@ -376,7 +425,7 @@ int zynqmp_pm_get_api_version(u32 *version)
                *version = pm_api_version;
                return 0;
        }
-       ret = zynqmp_pm_invoke_fn(PM_GET_API_VERSION, 0, 0, 0, 0, ret_payload);
+       ret = zynqmp_pm_invoke_fn(PM_GET_API_VERSION, ret_payload, 0);
        *version = ret_payload[1];
 
        return ret;
@@ -399,7 +448,7 @@ int zynqmp_pm_get_chipid(u32 *idcode, u32 *version)
        if (!idcode || !version)
                return -EINVAL;
 
-       ret = zynqmp_pm_invoke_fn(PM_GET_CHIPID, 0, 0, 0, 0, ret_payload);
+       ret = zynqmp_pm_invoke_fn(PM_GET_CHIPID, ret_payload, 0);
        *idcode = ret_payload[1];
        *version = ret_payload[2];
 
@@ -414,7 +463,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_get_chipid);
  *
  * Return: Returns status, either success or error+reason
  */
-static int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily)
+int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily)
 {
        u32 ret_payload[PAYLOAD_ARG_CNT];
        u32 idcode;
@@ -427,7 +476,7 @@ static int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily)
                return 0;
        }
 
-       ret = zynqmp_pm_invoke_fn(PM_GET_CHIPID, 0, 0, 0, 0, ret_payload);
+       ret = zynqmp_pm_invoke_fn(PM_GET_CHIPID, ret_payload, 0);
        if (ret < 0)
                return ret;
 
@@ -439,6 +488,7 @@ static int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(zynqmp_pm_get_family_info);
 
 /**
  * zynqmp_pm_get_trustzone_version() - Get secure trustzone firmware version
@@ -459,8 +509,7 @@ static int zynqmp_pm_get_trustzone_version(u32 *version)
                *version = pm_tz_version;
                return 0;
        }
-       ret = zynqmp_pm_invoke_fn(PM_GET_TRUSTZONE_VERSION, 0, 0,
-                                 0, 0, ret_payload);
+       ret = zynqmp_pm_invoke_fn(PM_GET_TRUSTZONE_VERSION, ret_payload, 0);
        *version = ret_payload[1];
 
        return ret;
@@ -507,8 +556,8 @@ int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out)
 {
        int ret;
 
-       ret = zynqmp_pm_invoke_fn(PM_QUERY_DATA, qdata.qid, qdata.arg1,
-                                 qdata.arg2, qdata.arg3, out);
+       ret = zynqmp_pm_invoke_fn(PM_QUERY_DATA, out, 4, qdata.qid, qdata.arg1, qdata.arg2,
+                                 qdata.arg3);
 
        /*
         * For clock name query, all bytes in SMC response are clock name
@@ -530,7 +579,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_query_data);
  */
 int zynqmp_pm_clock_enable(u32 clock_id)
 {
-       return zynqmp_pm_invoke_fn(PM_CLOCK_ENABLE, clock_id, 0, 0, 0, NULL);
+       return zynqmp_pm_invoke_fn(PM_CLOCK_ENABLE, NULL, 1, clock_id);
 }
 EXPORT_SYMBOL_GPL(zynqmp_pm_clock_enable);
 
@@ -545,7 +594,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_clock_enable);
  */
 int zynqmp_pm_clock_disable(u32 clock_id)
 {
-       return zynqmp_pm_invoke_fn(PM_CLOCK_DISABLE, clock_id, 0, 0, 0, NULL);
+       return zynqmp_pm_invoke_fn(PM_CLOCK_DISABLE, NULL, 1, clock_id);
 }
 EXPORT_SYMBOL_GPL(zynqmp_pm_clock_disable);
 
@@ -564,8 +613,7 @@ int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state)
        u32 ret_payload[PAYLOAD_ARG_CNT];
        int ret;
 
-       ret = zynqmp_pm_invoke_fn(PM_CLOCK_GETSTATE, clock_id, 0,
-                                 0, 0, ret_payload);
+       ret = zynqmp_pm_invoke_fn(PM_CLOCK_GETSTATE, ret_payload, 1, clock_id);
        *state = ret_payload[1];
 
        return ret;
@@ -584,8 +632,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getstate);
  */
 int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider)
 {
-       return zynqmp_pm_invoke_fn(PM_CLOCK_SETDIVIDER, clock_id, divider,
-                                  0, 0, NULL);
+       return zynqmp_pm_invoke_fn(PM_CLOCK_SETDIVIDER, NULL, 2, clock_id, divider);
 }
 EXPORT_SYMBOL_GPL(zynqmp_pm_clock_setdivider);
 
@@ -604,55 +651,13 @@ int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider)
        u32 ret_payload[PAYLOAD_ARG_CNT];
        int ret;
 
-       ret = zynqmp_pm_invoke_fn(PM_CLOCK_GETDIVIDER, clock_id, 0,
-                                 0, 0, ret_payload);
+       ret = zynqmp_pm_invoke_fn(PM_CLOCK_GETDIVIDER, ret_payload, 1, clock_id);
        *divider = ret_payload[1];
 
        return ret;
 }
 EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getdivider);
 
-/**
- * zynqmp_pm_clock_setrate() - Set the clock rate for given id
- * @clock_id:  ID of the clock
- * @rate:      rate value in hz
- *
- * This function is used by master to set rate for any clock.
- *
- * Return: Returns status, either success or error+reason
- */
-int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate)
-{
-       return zynqmp_pm_invoke_fn(PM_CLOCK_SETRATE, clock_id,
-                                  lower_32_bits(rate),
-                                  upper_32_bits(rate),
-                                  0, NULL);
-}
-EXPORT_SYMBOL_GPL(zynqmp_pm_clock_setrate);
-
-/**
- * zynqmp_pm_clock_getrate() - Get the clock rate for given id
- * @clock_id:  ID of the clock
- * @rate:      rate value in hz
- *
- * This function is used by master to get rate
- * for any clock.
- *
- * Return: Returns status, either success or error+reason
- */
-int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate)
-{
-       u32 ret_payload[PAYLOAD_ARG_CNT];
-       int ret;
-
-       ret = zynqmp_pm_invoke_fn(PM_CLOCK_GETRATE, clock_id, 0,
-                                 0, 0, ret_payload);
-       *rate = ((u64)ret_payload[2] << 32) | ret_payload[1];
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getrate);
-
 /**
  * zynqmp_pm_clock_setparent() - Set the clock parent for given id
  * @clock_id:  ID of the clock
@@ -664,8 +669,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getrate);
  */
 int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id)
 {
-       return zynqmp_pm_invoke_fn(PM_CLOCK_SETPARENT, clock_id,
-                                  parent_id, 0, 0, NULL);
+       return zynqmp_pm_invoke_fn(PM_CLOCK_SETPARENT, NULL, 2, clock_id, parent_id);
 }
 EXPORT_SYMBOL_GPL(zynqmp_pm_clock_setparent);
 
@@ -684,8 +688,7 @@ int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id)
        u32 ret_payload[PAYLOAD_ARG_CNT];
        int ret;
 
-       ret = zynqmp_pm_invoke_fn(PM_CLOCK_GETPARENT, clock_id, 0,
-                                 0, 0, ret_payload);
+       ret = zynqmp_pm_invoke_fn(PM_CLOCK_GETPARENT, ret_payload, 1, clock_id);
        *parent_id = ret_payload[1];
 
        return ret;
@@ -704,8 +707,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getparent);
  */
 int zynqmp_pm_set_pll_frac_mode(u32 clk_id, u32 mode)
 {
-       return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_PLL_FRAC_MODE,
-                                  clk_id, mode, NULL);
+       return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 4, 0, IOCTL_SET_PLL_FRAC_MODE, clk_id, mode);
 }
 EXPORT_SYMBOL_GPL(zynqmp_pm_set_pll_frac_mode);
 
@@ -721,8 +723,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_set_pll_frac_mode);
  */
 int zynqmp_pm_get_pll_frac_mode(u32 clk_id, u32 *mode)
 {
-       return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_GET_PLL_FRAC_MODE,
-                                  clk_id, 0, mode);
+       return zynqmp_pm_invoke_fn(PM_IOCTL, mode, 3, 0, IOCTL_GET_PLL_FRAC_MODE, clk_id);
 }
 EXPORT_SYMBOL_GPL(zynqmp_pm_get_pll_frac_mode);
 
@@ -739,8 +740,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_get_pll_frac_mode);
  */
 int zynqmp_pm_set_pll_frac_data(u32 clk_id, u32 data)
 {
-       return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_PLL_FRAC_DATA,
-                                  clk_id, data, NULL);
+       return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 4, 0, IOCTL_SET_PLL_FRAC_DATA, clk_id, data);
 }
 EXPORT_SYMBOL_GPL(zynqmp_pm_set_pll_frac_data);
 
@@ -756,8 +756,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_set_pll_frac_data);
  */
 int zynqmp_pm_get_pll_frac_data(u32 clk_id, u32 *data)
 {
-       return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_GET_PLL_FRAC_DATA,
-                                  clk_id, 0, data);
+       return zynqmp_pm_invoke_fn(PM_IOCTL, data, 3, 0, IOCTL_GET_PLL_FRAC_DATA, clk_id);
 }
 EXPORT_SYMBOL_GPL(zynqmp_pm_get_pll_frac_data);
 
@@ -778,9 +777,8 @@ int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value)
        u32 mask = (node_id == NODE_SD_0) ? GENMASK(15, 0) : GENMASK(31, 16);
 
        if (value) {
-               return zynqmp_pm_invoke_fn(PM_IOCTL, node_id,
-                                          IOCTL_SET_SD_TAPDELAY,
-                                          type, value, NULL);
+               return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 4, node_id, IOCTL_SET_SD_TAPDELAY, type,
+                                          value);
        }
 
        /*
@@ -798,7 +796,7 @@ int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value)
         * Use PM_MMIO_READ/PM_MMIO_WRITE to re-implement the missing counter
         * part of IOCTL_SET_SD_TAPDELAY which clears SDx_ITAPDLYENA bits.
         */
-       return zynqmp_pm_invoke_fn(PM_MMIO_WRITE, reg, mask, 0, 0, NULL);
+       return zynqmp_pm_invoke_fn(PM_MMIO_WRITE, NULL, 2, reg, mask);
 }
 EXPORT_SYMBOL_GPL(zynqmp_pm_set_sd_tapdelay);
 
@@ -814,8 +812,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_set_sd_tapdelay);
  */
 int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type)
 {
-       return zynqmp_pm_invoke_fn(PM_IOCTL, node_id, IOCTL_SD_DLL_RESET,
-                                  type, 0, NULL);
+       return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 3, node_id, IOCTL_SD_DLL_RESET, type);
 }
 EXPORT_SYMBOL_GPL(zynqmp_pm_sd_dll_reset);
 
@@ -831,8 +828,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_sd_dll_reset);
  */
 int zynqmp_pm_ospi_mux_select(u32 dev_id, u32 select)
 {
-       return zynqmp_pm_invoke_fn(PM_IOCTL, dev_id, IOCTL_OSPI_MUX_SELECT,
-                                  select, 0, NULL);
+       return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 3, dev_id, IOCTL_OSPI_MUX_SELECT, select);
 }
 EXPORT_SYMBOL_GPL(zynqmp_pm_ospi_mux_select);
 
@@ -847,8 +843,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_ospi_mux_select);
  */
 int zynqmp_pm_write_ggs(u32 index, u32 value)
 {
-       return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_WRITE_GGS,
-                                  index, value, NULL);
+       return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 4, 0, IOCTL_WRITE_GGS, index, value);
 }
 EXPORT_SYMBOL_GPL(zynqmp_pm_write_ggs);
 
@@ -863,8 +858,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_write_ggs);
  */
 int zynqmp_pm_read_ggs(u32 index, u32 *value)
 {
-       return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_READ_GGS,
-                                  index, 0, value);
+       return zynqmp_pm_invoke_fn(PM_IOCTL, value, 3, 0, IOCTL_READ_GGS, index);
 }
 EXPORT_SYMBOL_GPL(zynqmp_pm_read_ggs);
 
@@ -880,8 +874,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_read_ggs);
  */
 int zynqmp_pm_write_pggs(u32 index, u32 value)
 {
-       return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_WRITE_PGGS, index, value,
-                                  NULL);
+       return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 4, 0, IOCTL_WRITE_PGGS, index, value);
 }
 EXPORT_SYMBOL_GPL(zynqmp_pm_write_pggs);
 
@@ -897,15 +890,13 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_write_pggs);
  */
 int zynqmp_pm_read_pggs(u32 index, u32 *value)
 {
-       return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_READ_PGGS, index, 0,
-                                  value);
+       return zynqmp_pm_invoke_fn(PM_IOCTL, value, 3, 0, IOCTL_READ_PGGS, index);
 }
 EXPORT_SYMBOL_GPL(zynqmp_pm_read_pggs);
 
 int zynqmp_pm_set_tapdelay_bypass(u32 index, u32 value)
 {
-       return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_TAPDELAY_BYPASS,
-                                  index, value, NULL);
+       return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 4, 0, IOCTL_SET_TAPDELAY_BYPASS, index, value);
 }
 EXPORT_SYMBOL_GPL(zynqmp_pm_set_tapdelay_bypass);
 
@@ -920,8 +911,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_set_tapdelay_bypass);
  */
 int zynqmp_pm_set_boot_health_status(u32 value)
 {
-       return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_BOOT_HEALTH_STATUS,
-                                  value, 0, NULL);
+       return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 3, 0, IOCTL_SET_BOOT_HEALTH_STATUS, value);
 }
 
 /**
@@ -935,8 +925,7 @@ int zynqmp_pm_set_boot_health_status(u32 value)
 int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
                           const enum zynqmp_pm_reset_action assert_flag)
 {
-       return zynqmp_pm_invoke_fn(PM_RESET_ASSERT, reset, assert_flag,
-                                  0, 0, NULL);
+       return zynqmp_pm_invoke_fn(PM_RESET_ASSERT, NULL, 2, reset, assert_flag);
 }
 EXPORT_SYMBOL_GPL(zynqmp_pm_reset_assert);
 
@@ -955,8 +944,7 @@ int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset, u32 *status)
        if (!status)
                return -EINVAL;
 
-       ret = zynqmp_pm_invoke_fn(PM_RESET_GET_STATUS, reset, 0,
-                                 0, 0, ret_payload);
+       ret = zynqmp_pm_invoke_fn(PM_RESET_GET_STATUS, ret_payload, 1, reset);
        *status = ret_payload[1];
 
        return ret;
@@ -981,9 +969,8 @@ int zynqmp_pm_fpga_load(const u64 address, const u32 size, const u32 flags)
        u32 ret_payload[PAYLOAD_ARG_CNT];
        int ret;
 
-       ret = zynqmp_pm_invoke_fn(PM_FPGA_LOAD, lower_32_bits(address),
-                                 upper_32_bits(address), size, flags,
-                                 ret_payload);
+       ret = zynqmp_pm_invoke_fn(PM_FPGA_LOAD, ret_payload, 4, lower_32_bits(address),
+                                 upper_32_bits(address), size, flags);
        if (ret_payload[0])
                return -ret_payload[0];
 
@@ -1008,7 +995,7 @@ int zynqmp_pm_fpga_get_status(u32 *value)
        if (!value)
                return -EINVAL;
 
-       ret = zynqmp_pm_invoke_fn(PM_FPGA_GET_STATUS, 0, 0, 0, 0, ret_payload);
+       ret = zynqmp_pm_invoke_fn(PM_FPGA_GET_STATUS, ret_payload, 0);
        *value = ret_payload[1];
 
        return ret;
@@ -1036,11 +1023,9 @@ int zynqmp_pm_fpga_get_config_status(u32 *value)
        lower_addr = lower_32_bits((u64)&buf);
        upper_addr = upper_32_bits((u64)&buf);
 
-       ret = zynqmp_pm_invoke_fn(PM_FPGA_READ,
-                                 XILINX_ZYNQMP_PM_FPGA_CONFIG_STAT_OFFSET,
-                                 lower_addr, upper_addr,
-                                 XILINX_ZYNQMP_PM_FPGA_READ_CONFIG_REG,
-                                 ret_payload);
+       ret = zynqmp_pm_invoke_fn(PM_FPGA_READ, ret_payload, 4,
+                                 XILINX_ZYNQMP_PM_FPGA_CONFIG_STAT_OFFSET, lower_addr, upper_addr,
+                                 XILINX_ZYNQMP_PM_FPGA_READ_CONFIG_REG);
 
        *value = ret_payload[1];
 
@@ -1058,7 +1043,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_fpga_get_config_status);
  */
 int zynqmp_pm_pinctrl_request(const u32 pin)
 {
-       return zynqmp_pm_invoke_fn(PM_PINCTRL_REQUEST, pin, 0, 0, 0, NULL);
+       return zynqmp_pm_invoke_fn(PM_PINCTRL_REQUEST, NULL, 1, pin);
 }
 EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_request);
 
@@ -1072,35 +1057,10 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_request);
  */
 int zynqmp_pm_pinctrl_release(const u32 pin)
 {
-       return zynqmp_pm_invoke_fn(PM_PINCTRL_RELEASE, pin, 0, 0, 0, NULL);
+       return zynqmp_pm_invoke_fn(PM_PINCTRL_RELEASE, NULL, 1, pin);
 }
 EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_release);
 
-/**
- * zynqmp_pm_pinctrl_get_function - Read function id set for the given pin
- * @pin: Pin number
- * @id: Buffer to store function ID
- *
- * This function provides the function currently set for the given pin.
- *
- * Return: Returns status, either success or error+reason
- */
-int zynqmp_pm_pinctrl_get_function(const u32 pin, u32 *id)
-{
-       u32 ret_payload[PAYLOAD_ARG_CNT];
-       int ret;
-
-       if (!id)
-               return -EINVAL;
-
-       ret = zynqmp_pm_invoke_fn(PM_PINCTRL_GET_FUNCTION, pin, 0,
-                                 0, 0, ret_payload);
-       *id = ret_payload[1];
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_get_function);
-
 /**
  * zynqmp_pm_pinctrl_set_function - Set requested function for the pin
  * @pin: Pin number
@@ -1112,8 +1072,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_get_function);
  */
 int zynqmp_pm_pinctrl_set_function(const u32 pin, const u32 id)
 {
-       return zynqmp_pm_invoke_fn(PM_PINCTRL_SET_FUNCTION, pin, id,
-                                  0, 0, NULL);
+       return zynqmp_pm_invoke_fn(PM_PINCTRL_SET_FUNCTION, NULL, 2, pin, id);
 }
 EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_set_function);
 
@@ -1136,8 +1095,7 @@ int zynqmp_pm_pinctrl_get_config(const u32 pin, const u32 param,
        if (!value)
                return -EINVAL;
 
-       ret = zynqmp_pm_invoke_fn(PM_PINCTRL_CONFIG_PARAM_GET, pin, param,
-                                 0, 0, ret_payload);
+       ret = zynqmp_pm_invoke_fn(PM_PINCTRL_CONFIG_PARAM_GET, ret_payload, 2, pin, param);
        *value = ret_payload[1];
 
        return ret;
@@ -1166,8 +1124,7 @@ int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param,
                        return -EOPNOTSUPP;
        }
 
-       return zynqmp_pm_invoke_fn(PM_PINCTRL_CONFIG_PARAM_SET, pin,
-                                  param, value, 0, NULL);
+       return zynqmp_pm_invoke_fn(PM_PINCTRL_CONFIG_PARAM_SET, NULL, 3, pin, param, value);
 }
 EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_set_config);
 
@@ -1185,8 +1142,7 @@ unsigned int zynqmp_pm_bootmode_read(u32 *ps_mode)
        unsigned int ret;
        u32 ret_payload[PAYLOAD_ARG_CNT];
 
-       ret = zynqmp_pm_invoke_fn(PM_MMIO_READ, CRL_APB_BOOT_PIN_CTRL, 0,
-                                 0, 0, ret_payload);
+       ret = zynqmp_pm_invoke_fn(PM_MMIO_READ, ret_payload, 1, CRL_APB_BOOT_PIN_CTRL);
 
        *ps_mode = ret_payload[1];
 
@@ -1205,8 +1161,8 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_bootmode_read);
  */
 int zynqmp_pm_bootmode_write(u32 ps_mode)
 {
-       return zynqmp_pm_invoke_fn(PM_MMIO_WRITE, CRL_APB_BOOT_PIN_CTRL,
-                                  CRL_APB_BOOTPIN_CTRL_MASK, ps_mode, 0, NULL);
+       return zynqmp_pm_invoke_fn(PM_MMIO_WRITE, NULL, 3, CRL_APB_BOOT_PIN_CTRL,
+                                  CRL_APB_BOOTPIN_CTRL_MASK, ps_mode);
 }
 EXPORT_SYMBOL_GPL(zynqmp_pm_bootmode_write);
 
@@ -1221,7 +1177,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_bootmode_write);
  */
 int zynqmp_pm_init_finalize(void)
 {
-       return zynqmp_pm_invoke_fn(PM_PM_INIT_FINALIZE, 0, 0, 0, 0, NULL);
+       return zynqmp_pm_invoke_fn(PM_PM_INIT_FINALIZE, NULL, 0);
 }
 EXPORT_SYMBOL_GPL(zynqmp_pm_init_finalize);
 
@@ -1235,7 +1191,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_init_finalize);
  */
 int zynqmp_pm_set_suspend_mode(u32 mode)
 {
-       return zynqmp_pm_invoke_fn(PM_SET_SUSPEND_MODE, mode, 0, 0, 0, NULL);
+       return zynqmp_pm_invoke_fn(PM_SET_SUSPEND_MODE, NULL, 1, mode);
 }
 EXPORT_SYMBOL_GPL(zynqmp_pm_set_suspend_mode);
 
@@ -1254,8 +1210,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_set_suspend_mode);
 int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
                           const u32 qos, const enum zynqmp_pm_request_ack ack)
 {
-       return zynqmp_pm_invoke_fn(PM_REQUEST_NODE, node, capabilities,
-                                  qos, ack, NULL);
+       return zynqmp_pm_invoke_fn(PM_REQUEST_NODE, NULL, 4, node, capabilities, qos, ack);
 }
 EXPORT_SYMBOL_GPL(zynqmp_pm_request_node);
 
@@ -1271,7 +1226,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_request_node);
  */
 int zynqmp_pm_release_node(const u32 node)
 {
-       return zynqmp_pm_invoke_fn(PM_RELEASE_NODE, node, 0, 0, 0, NULL);
+       return zynqmp_pm_invoke_fn(PM_RELEASE_NODE, NULL, 1, node);
 }
 EXPORT_SYMBOL_GPL(zynqmp_pm_release_node);
 
@@ -1290,8 +1245,7 @@ int zynqmp_pm_get_rpu_mode(u32 node_id, enum rpu_oper_mode *rpu_mode)
        u32 ret_payload[PAYLOAD_ARG_CNT];
        int ret;
 
-       ret = zynqmp_pm_invoke_fn(PM_IOCTL, node_id,
-                                 IOCTL_GET_RPU_OPER_MODE, 0, 0, ret_payload);
+       ret = zynqmp_pm_invoke_fn(PM_IOCTL, ret_payload, 2, node_id, IOCTL_GET_RPU_OPER_MODE);
 
        /* only set rpu_mode if no error */
        if (ret == XST_PM_SUCCESS)
@@ -1313,9 +1267,8 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_get_rpu_mode);
  */
 int zynqmp_pm_set_rpu_mode(u32 node_id, enum rpu_oper_mode rpu_mode)
 {
-       return zynqmp_pm_invoke_fn(PM_IOCTL, node_id,
-                                  IOCTL_SET_RPU_OPER_MODE, (u32)rpu_mode,
-                                  0, NULL);
+       return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 3, node_id, IOCTL_SET_RPU_OPER_MODE,
+                                  (u32)rpu_mode);
 }
 EXPORT_SYMBOL_GPL(zynqmp_pm_set_rpu_mode);
 
@@ -1331,9 +1284,8 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_set_rpu_mode);
  */
 int zynqmp_pm_set_tcm_config(u32 node_id, enum rpu_tcm_comb tcm_mode)
 {
-       return zynqmp_pm_invoke_fn(PM_IOCTL, node_id,
-                                  IOCTL_TCM_COMB_CONFIG, (u32)tcm_mode, 0,
-                                  NULL);
+       return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 3, node_id, IOCTL_TCM_COMB_CONFIG,
+                                  (u32)tcm_mode);
 }
 EXPORT_SYMBOL_GPL(zynqmp_pm_set_tcm_config);
 
@@ -1348,7 +1300,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_set_tcm_config);
 int zynqmp_pm_force_pwrdwn(const u32 node,
                           const enum zynqmp_pm_request_ack ack)
 {
-       return zynqmp_pm_invoke_fn(PM_FORCE_POWERDOWN, node, ack, 0, 0, NULL);
+       return zynqmp_pm_invoke_fn(PM_FORCE_POWERDOWN, NULL, 2, node, ack);
 }
 EXPORT_SYMBOL_GPL(zynqmp_pm_force_pwrdwn);
 
@@ -1367,8 +1319,8 @@ int zynqmp_pm_request_wake(const u32 node,
                           const enum zynqmp_pm_request_ack ack)
 {
        /* set_addr flag is encoded into 1st bit of address */
-       return zynqmp_pm_invoke_fn(PM_REQUEST_WAKEUP, node, address | set_addr,
-                                  address >> 32, ack, NULL);
+       return zynqmp_pm_invoke_fn(PM_REQUEST_WAKEUP, NULL, 4, node, address | set_addr,
+                                  address >> 32, ack);
 }
 EXPORT_SYMBOL_GPL(zynqmp_pm_request_wake);
 
@@ -1388,15 +1340,14 @@ int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities,
                              const u32 qos,
                              const enum zynqmp_pm_request_ack ack)
 {
-       return zynqmp_pm_invoke_fn(PM_SET_REQUIREMENT, node, capabilities,
-                                  qos, ack, NULL);
+       return zynqmp_pm_invoke_fn(PM_SET_REQUIREMENT, NULL, 4, node, capabilities, qos, ack);
 }
 EXPORT_SYMBOL_GPL(zynqmp_pm_set_requirement);
 
 /**
  * zynqmp_pm_load_pdi - Load and process PDI
- * @src:       Source device where PDI is located
- * @address:   PDI src address
+ * @src:       Source device where PDI is located
+ * @address:   PDI src address
  *
  * This function provides support to load PDI from linux
  *
@@ -1404,9 +1355,8 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_set_requirement);
  */
 int zynqmp_pm_load_pdi(const u32 src, const u64 address)
 {
-       return zynqmp_pm_invoke_fn(PM_LOAD_PDI, src,
-                                  lower_32_bits(address),
-                                  upper_32_bits(address), 0, NULL);
+       return zynqmp_pm_invoke_fn(PM_LOAD_PDI, NULL, 3, src, lower_32_bits(address),
+                                  upper_32_bits(address));
 }
 EXPORT_SYMBOL_GPL(zynqmp_pm_load_pdi);
 
@@ -1426,9 +1376,8 @@ int zynqmp_pm_aes_engine(const u64 address, u32 *out)
        if (!out)
                return -EINVAL;
 
-       ret = zynqmp_pm_invoke_fn(PM_SECURE_AES, upper_32_bits(address),
-                                 lower_32_bits(address),
-                                 0, 0, ret_payload);
+       ret = zynqmp_pm_invoke_fn(PM_SECURE_AES, ret_payload, 2, upper_32_bits(address),
+                                 lower_32_bits(address));
        *out = ret_payload[1];
 
        return ret;
@@ -1456,8 +1405,7 @@ int zynqmp_pm_sha_hash(const u64 address, const u32 size, const u32 flags)
        u32 lower_addr = lower_32_bits(address);
        u32 upper_addr = upper_32_bits(address);
 
-       return zynqmp_pm_invoke_fn(PM_SECURE_SHA, upper_addr, lower_addr,
-                                  size, flags, NULL);
+       return zynqmp_pm_invoke_fn(PM_SECURE_SHA, NULL, 4, upper_addr, lower_addr, size, flags);
 }
 EXPORT_SYMBOL_GPL(zynqmp_pm_sha_hash);
 
@@ -1479,8 +1427,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_sha_hash);
 int zynqmp_pm_register_notifier(const u32 node, const u32 event,
                                const u32 wake, const u32 enable)
 {
-       return zynqmp_pm_invoke_fn(PM_REGISTER_NOTIFIER, node, event,
-                                  wake, enable, NULL);
+       return zynqmp_pm_invoke_fn(PM_REGISTER_NOTIFIER, NULL, 4, node, event, wake, enable);
 }
 EXPORT_SYMBOL_GPL(zynqmp_pm_register_notifier);
 
@@ -1493,8 +1440,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_register_notifier);
  */
 int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype)
 {
-       return zynqmp_pm_invoke_fn(PM_SYSTEM_SHUTDOWN, type, subtype,
-                                  0, 0, NULL);
+       return zynqmp_pm_invoke_fn(PM_SYSTEM_SHUTDOWN, NULL, 2, type, subtype);
 }
 
 /**
@@ -1506,8 +1452,7 @@ int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype)
  */
 int zynqmp_pm_set_feature_config(enum pm_feature_config_id id, u32 value)
 {
-       return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_FEATURE_CONFIG,
-                                  id, value, NULL);
+       return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 4, 0, IOCTL_SET_FEATURE_CONFIG, id, value);
 }
 
 /**
@@ -1520,8 +1465,7 @@ int zynqmp_pm_set_feature_config(enum pm_feature_config_id id, u32 value)
 int zynqmp_pm_get_feature_config(enum pm_feature_config_id id,
                                 u32 *payload)
 {
-       return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_GET_FEATURE_CONFIG,
-                                  id, 0, payload);
+       return zynqmp_pm_invoke_fn(PM_IOCTL, payload, 3, 0, IOCTL_GET_FEATURE_CONFIG, id);
 }
 
 /**
@@ -1534,8 +1478,7 @@ int zynqmp_pm_get_feature_config(enum pm_feature_config_id id,
  */
 int zynqmp_pm_set_sd_config(u32 node, enum pm_sd_config_type config, u32 value)
 {
-       return zynqmp_pm_invoke_fn(PM_IOCTL, node, IOCTL_SET_SD_CONFIG,
-                                  config, value, NULL);
+       return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 4, node, IOCTL_SET_SD_CONFIG, config, value);
 }
 EXPORT_SYMBOL_GPL(zynqmp_pm_set_sd_config);
 
@@ -1550,8 +1493,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_set_sd_config);
 int zynqmp_pm_set_gem_config(u32 node, enum pm_gem_config_type config,
                             u32 value)
 {
-       return zynqmp_pm_invoke_fn(PM_IOCTL, node, IOCTL_SET_GEM_CONFIG,
-                                  config, value, NULL);
+       return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 4, node, IOCTL_SET_GEM_CONFIG, config, value);
 }
 EXPORT_SYMBOL_GPL(zynqmp_pm_set_gem_config);
 
@@ -1916,7 +1858,6 @@ ATTRIBUTE_GROUPS(zynqmp_firmware);
 static int zynqmp_firmware_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
-       struct device_node *np;
        struct zynqmp_devinfo *devinfo;
        int ret;
 
@@ -1924,22 +1865,9 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
-       np = of_find_compatible_node(NULL, NULL, "xlnx,zynqmp");
-       if (!np) {
-               np = of_find_compatible_node(NULL, NULL, "xlnx,versal");
-               if (!np)
-                       return 0;
-
+       ret = do_feature_check_call(PM_FEATURE_CHECK);
+       if (ret >= 0 && ((ret & FIRMWARE_VERSION_MASK) >= PM_API_VERSION_1))
                feature_check_enabled = true;
-       }
-
-       if (!feature_check_enabled) {
-               ret = do_feature_check_call(PM_FEATURE_CHECK);
-               if (ret >= 0)
-                       feature_check_enabled = true;
-       }
-
-       of_node_put(np);
 
        devinfo = devm_kzalloc(dev, sizeof(*devinfo), GFP_KERNEL);
        if (!devinfo)
@@ -1992,19 +1920,17 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
 
        zynqmp_pm_api_debugfs_init();
 
-       np = of_find_compatible_node(NULL, NULL, "xlnx,versal");
-       if (np) {
+       if (pm_family_code == VERSAL_FAMILY_CODE) {
                em_dev = platform_device_register_data(&pdev->dev, "xlnx_event_manager",
                                                       -1, NULL, 0);
                if (IS_ERR(em_dev))
                        dev_err_probe(&pdev->dev, PTR_ERR(em_dev), "EM register fail with error\n");
        }
-       of_node_put(np);
 
        return of_platform_populate(dev->of_node, NULL, NULL, dev);
 }
 
-static int zynqmp_firmware_remove(struct platform_device *pdev)
+static void zynqmp_firmware_remove(struct platform_device *pdev)
 {
        struct pm_api_feature_data *feature_data;
        struct hlist_node *tmp;
@@ -2019,8 +1945,6 @@ static int zynqmp_firmware_remove(struct platform_device *pdev)
        }
 
        platform_device_unregister(em_dev);
-
-       return 0;
 }
 
 static const struct of_device_id zynqmp_firmware_of_match[] = {
@@ -2037,6 +1961,6 @@ static struct platform_driver zynqmp_firmware_driver = {
                .dev_groups = zynqmp_firmware_groups,
        },
        .probe = zynqmp_firmware_probe,
-       .remove = zynqmp_firmware_remove,
+       .remove_new = zynqmp_firmware_remove,
 };
 module_platform_driver(zynqmp_firmware_driver);
index 1fa2ccc321abfc14ab6482377953037907836548..6b60ca004345deced18963c9feb6d6803653454c 100644 (file)
@@ -147,20 +147,18 @@ static int alt_fpga_bridge_probe(struct platform_device *pdev)
        return ret;
 }
 
-static int alt_fpga_bridge_remove(struct platform_device *pdev)
+static void alt_fpga_bridge_remove(struct platform_device *pdev)
 {
        struct fpga_bridge *br = platform_get_drvdata(pdev);
 
        fpga_bridge_unregister(br);
-
-       return 0;
 }
 
 MODULE_DEVICE_TABLE(of, altera_fpga_of_match);
 
 static struct platform_driver altera_fpga_driver = {
        .probe = alt_fpga_bridge_probe,
-       .remove = alt_fpga_bridge_remove,
+       .remove_new = alt_fpga_bridge_remove,
        .driver = {
                .name   = "altera_fpga2sdram_bridge",
                .of_match_table = of_match_ptr(altera_fpga_of_match),
index 0c3fb8226908907ac05edaa0a807814798288f1f..44061cb16f8770fe87369f5be188460987a0c50a 100644 (file)
@@ -253,18 +253,16 @@ static int altera_freeze_br_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int altera_freeze_br_remove(struct platform_device *pdev)
+static void altera_freeze_br_remove(struct platform_device *pdev)
 {
        struct fpga_bridge *br = platform_get_drvdata(pdev);
 
        fpga_bridge_unregister(br);
-
-       return 0;
 }
 
 static struct platform_driver altera_freeze_br_driver = {
        .probe = altera_freeze_br_probe,
-       .remove = altera_freeze_br_remove,
+       .remove_new = altera_freeze_br_remove,
        .driver = {
                .name   = "altera_freeze_br",
                .of_match_table = altera_freeze_br_of_match,
index 5786635032972ba60f7f4d78313e1212f298f59d..6f8e24be19c6df7a653bbe990f2441d07b3f14e3 100644 (file)
@@ -191,7 +191,7 @@ err:
        return ret;
 }
 
-static int alt_fpga_bridge_remove(struct platform_device *pdev)
+static void alt_fpga_bridge_remove(struct platform_device *pdev)
 {
        struct fpga_bridge *bridge = platform_get_drvdata(pdev);
        struct altera_hps2fpga_data *priv = bridge->priv;
@@ -199,15 +199,13 @@ static int alt_fpga_bridge_remove(struct platform_device *pdev)
        fpga_bridge_unregister(bridge);
 
        clk_disable_unprepare(priv->clk);
-
-       return 0;
 }
 
 MODULE_DEVICE_TABLE(of, altera_fpga_of_match);
 
 static struct platform_driver alt_fpga_bridge_driver = {
        .probe = alt_fpga_bridge_probe,
-       .remove = alt_fpga_bridge_remove,
+       .remove_new = alt_fpga_bridge_remove,
        .driver = {
                .name   = "altera_hps2fpga_bridge",
                .of_match_table = of_match_ptr(altera_fpga_of_match),
index 7f621e96d3b8d8fb0dce599a07a4394b3a702895..c0a75ca360d697a9f736eb19a9f183f2b5143b90 100644 (file)
@@ -932,15 +932,13 @@ exit:
        return ret;
 }
 
-static int afu_remove(struct platform_device *pdev)
+static void afu_remove(struct platform_device *pdev)
 {
        dev_dbg(&pdev->dev, "%s\n", __func__);
 
        dfl_fpga_dev_ops_unregister(pdev);
        dfl_fpga_dev_feature_uinit(pdev);
        afu_dev_destroy(pdev);
-
-       return 0;
 }
 
 static const struct attribute_group *afu_dev_groups[] = {
@@ -956,7 +954,7 @@ static struct platform_driver afu_driver = {
                .dev_groups = afu_dev_groups,
        },
        .probe   = afu_probe,
-       .remove  = afu_remove,
+       .remove_new = afu_remove,
 };
 
 static int __init afu_init(void)
index 808d1f4d76df8f3f69569f598cc6ad1c9b73c47d..0b01b389527760933899f232ba33f3d726a3dc11 100644 (file)
@@ -78,7 +78,7 @@ static int fme_br_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int fme_br_remove(struct platform_device *pdev)
+static void fme_br_remove(struct platform_device *pdev)
 {
        struct fpga_bridge *br = platform_get_drvdata(pdev);
        struct fme_br_priv *priv = br->priv;
@@ -89,8 +89,6 @@ static int fme_br_remove(struct platform_device *pdev)
                put_device(&priv->port_pdev->dev);
        if (priv->port_ops)
                dfl_fpga_port_ops_put(priv->port_ops);
-
-       return 0;
 }
 
 static struct platform_driver fme_br_driver = {
@@ -98,7 +96,7 @@ static struct platform_driver fme_br_driver = {
                .name    = DFL_FPGA_FME_BRIDGE,
        },
        .probe   = fme_br_probe,
-       .remove  = fme_br_remove,
+       .remove_new = fme_br_remove,
 };
 
 module_platform_driver(fme_br_driver);
index 3dcf990bd261f83369af5f4b4f52e3a8d2355c06..a2b5da0093dac7c0ee21022b0a9d5aff17850c64 100644 (file)
@@ -730,13 +730,11 @@ exit:
        return ret;
 }
 
-static int fme_remove(struct platform_device *pdev)
+static void fme_remove(struct platform_device *pdev)
 {
        dfl_fpga_dev_ops_unregister(pdev);
        dfl_fpga_dev_feature_uinit(pdev);
        fme_dev_destroy(pdev);
-
-       return 0;
 }
 
 static const struct attribute_group *fme_dev_groups[] = {
@@ -751,7 +749,7 @@ static struct platform_driver fme_driver = {
                .dev_groups = fme_dev_groups,
        },
        .probe   = fme_probe,
-       .remove  = fme_remove,
+       .remove_new = fme_remove,
 };
 
 module_platform_driver(fme_driver);
index 4aebde0a7f1c35f308c4906931e050c886e079d1..71616f8b4982de143fed31bdcd42deddf8367813 100644 (file)
@@ -61,15 +61,13 @@ eprobe_mgr_put:
        return ret;
 }
 
-static int fme_region_remove(struct platform_device *pdev)
+static void fme_region_remove(struct platform_device *pdev)
 {
        struct fpga_region *region = platform_get_drvdata(pdev);
        struct fpga_manager *mgr = region->mgr;
 
        fpga_region_unregister(region);
        fpga_mgr_put(mgr);
-
-       return 0;
 }
 
 static struct platform_driver fme_region_driver = {
@@ -77,7 +75,7 @@ static struct platform_driver fme_region_driver = {
                .name    = DFL_FPGA_FME_REGION,
        },
        .probe   = fme_region_probe,
-       .remove  = fme_region_remove,
+       .remove_new = fme_region_remove,
 };
 
 module_platform_driver(fme_region_driver);
index e73f88050f08d9066990ed329cf4b163e7cc238f..e6d12fbab653fb9a5fb622497e284ddd553223b3 100644 (file)
@@ -2008,8 +2008,8 @@ long dfl_feature_ioctl_set_irq(struct platform_device *pdev,
            (hdr.start + hdr.count < hdr.start))
                return -EINVAL;
 
-       fds = memdup_user((void __user *)(arg + sizeof(hdr)),
-                         array_size(hdr.count, sizeof(s32)));
+       fds = memdup_array_user((void __user *)(arg + sizeof(hdr)),
+                               hdr.count, sizeof(s32));
        if (IS_ERR(fds))
                return PTR_ERR(fds);
 
index 31af2e08c825fa716606faf5f4ecc03b015a2b6f..89851b1337097c0a58750a5feaa4dbdd4bd12f86 100644 (file)
@@ -730,15 +730,13 @@ fw_name_fail:
        return ret;
 }
 
-static int m10bmc_sec_remove(struct platform_device *pdev)
+static void m10bmc_sec_remove(struct platform_device *pdev)
 {
        struct m10bmc_sec *sec = dev_get_drvdata(&pdev->dev);
 
        firmware_upload_unregister(sec->fwl);
        kfree(sec->fw_name);
        xa_erase(&fw_upload_xa, sec->fw_name_id);
-
-       return 0;
 }
 
 static const struct platform_device_id intel_m10bmc_sec_ids[] = {
@@ -760,7 +758,7 @@ MODULE_DEVICE_TABLE(platform, intel_m10bmc_sec_ids);
 
 static struct platform_driver intel_m10bmc_sec_driver = {
        .probe = m10bmc_sec_probe,
-       .remove = m10bmc_sec_remove,
+       .remove_new = m10bmc_sec_remove,
        .driver = {
                .name = "intel-m10bmc-sec-update",
                .dev_groups = m10bmc_sec_attr_groups,
index a6affd83f27578b0d6084f49046ff23eb283bfaa..8526a5a86f0cbe9e6729528f10b87f4c15078f79 100644 (file)
@@ -425,20 +425,18 @@ eprobe_mgr_put:
        return ret;
 }
 
-static int of_fpga_region_remove(struct platform_device *pdev)
+static void of_fpga_region_remove(struct platform_device *pdev)
 {
        struct fpga_region *region = platform_get_drvdata(pdev);
        struct fpga_manager *mgr = region->mgr;
 
        fpga_region_unregister(region);
        fpga_mgr_put(mgr);
-
-       return 0;
 }
 
 static struct platform_driver of_fpga_region_driver = {
        .probe = of_fpga_region_probe,
-       .remove = of_fpga_region_remove,
+       .remove_new = of_fpga_region_remove,
        .driver = {
                .name   = "of-fpga-region",
                .of_match_table = of_match_ptr(fpga_region_of_match),
index cc4861e345c935ed27a128498d02eccdd0d343da..4c03513b8f03b5c9c6140abea34b4be650f790ea 100644 (file)
@@ -517,15 +517,13 @@ static int socfpga_a10_fpga_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int socfpga_a10_fpga_remove(struct platform_device *pdev)
+static void socfpga_a10_fpga_remove(struct platform_device *pdev)
 {
        struct fpga_manager *mgr = platform_get_drvdata(pdev);
        struct a10_fpga_priv *priv = mgr->priv;
 
        fpga_mgr_unregister(mgr);
        clk_disable_unprepare(priv->clk);
-
-       return 0;
 }
 
 static const struct of_device_id socfpga_a10_fpga_of_match[] = {
@@ -537,7 +535,7 @@ MODULE_DEVICE_TABLE(of, socfpga_a10_fpga_of_match);
 
 static struct platform_driver socfpga_a10_fpga_driver = {
        .probe = socfpga_a10_fpga_probe,
-       .remove = socfpga_a10_fpga_remove,
+       .remove_new = socfpga_a10_fpga_remove,
        .driver = {
                .name   = "socfpga_a10_fpga_manager",
                .of_match_table = socfpga_a10_fpga_of_match,
index cacb9cc5757e781eae756ed1cad522b918e886bb..2c0def7d7cbb160c714e27a92c5006a1f91d6a5d 100644 (file)
@@ -436,15 +436,13 @@ probe_err:
        return ret;
 }
 
-static int s10_remove(struct platform_device *pdev)
+static void s10_remove(struct platform_device *pdev)
 {
        struct fpga_manager *mgr = platform_get_drvdata(pdev);
        struct s10_priv *priv = mgr->priv;
 
        fpga_mgr_unregister(mgr);
        stratix10_svc_free_channel(priv->chan);
-
-       return 0;
 }
 
 static const struct of_device_id s10_of_match[] = {
@@ -457,7 +455,7 @@ MODULE_DEVICE_TABLE(of, s10_of_match);
 
 static struct platform_driver s10_driver = {
        .probe = s10_probe,
-       .remove = s10_remove,
+       .remove_new = s10_remove,
        .driver = {
                .name   = "Stratix10 SoC FPGA manager",
                .of_match_table = of_match_ptr(s10_of_match),
index 68835896f180ea2f9fe3f90a7b006baf6aa96c8f..788dd2f63a652e79f68a16177372eb25096d4380 100644 (file)
@@ -150,7 +150,7 @@ err_clk:
        return err;
 }
 
-static int xlnx_pr_decoupler_remove(struct platform_device *pdev)
+static void xlnx_pr_decoupler_remove(struct platform_device *pdev)
 {
        struct fpga_bridge *bridge = platform_get_drvdata(pdev);
        struct xlnx_pr_decoupler_data *p = bridge->priv;
@@ -158,13 +158,11 @@ static int xlnx_pr_decoupler_remove(struct platform_device *pdev)
        fpga_bridge_unregister(bridge);
 
        clk_unprepare(p->clk);
-
-       return 0;
 }
 
 static struct platform_driver xlnx_pr_decoupler_driver = {
        .probe = xlnx_pr_decoupler_probe,
-       .remove = xlnx_pr_decoupler_remove,
+       .remove_new = xlnx_pr_decoupler_remove,
        .driver = {
                .name = "xlnx_pr_decoupler",
                .of_match_table = xlnx_pr_decoupler_of_match,
index 96611d424a104ed25b3d618688fd806cff430a1e..0ac93183d201650d8e8a0f9b106bbb6e40183a76 100644 (file)
@@ -618,7 +618,7 @@ static int zynq_fpga_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int zynq_fpga_remove(struct platform_device *pdev)
+static void zynq_fpga_remove(struct platform_device *pdev)
 {
        struct zynq_fpga_priv *priv;
        struct fpga_manager *mgr;
@@ -629,8 +629,6 @@ static int zynq_fpga_remove(struct platform_device *pdev)
        fpga_mgr_unregister(mgr);
 
        clk_unprepare(priv->clk);
-
-       return 0;
 }
 
 #ifdef CONFIG_OF
@@ -644,7 +642,7 @@ MODULE_DEVICE_TABLE(of, zynq_fpga_of_match);
 
 static struct platform_driver zynq_fpga_driver = {
        .probe = zynq_fpga_probe,
-       .remove = zynq_fpga_remove,
+       .remove_new = zynq_fpga_remove,
        .driver = {
                .name = "zynq_fpga_manager",
                .of_match_table = of_match_ptr(zynq_fpga_of_match),
index 5d8e9bfb24d020a59573b319f3b89d1a564f7181..baa956494e79f0aa3651d5c0ab2e550f2337aff3 100644 (file)
@@ -80,8 +80,8 @@ static const struct gnss_operations gnss_serial_gnss_ops = {
        .write_raw      = gnss_serial_write_raw,
 };
 
-static int gnss_serial_receive_buf(struct serdev_device *serdev,
-                                       const unsigned char *buf, size_t count)
+static ssize_t gnss_serial_receive_buf(struct serdev_device *serdev,
+                                      const u8 *buf, size_t count)
 {
        struct gnss_serial *gserial = serdev_device_get_drvdata(serdev);
        struct gnss_device *gdev = gserial->gdev;
index bcb53ccfee4d553ba87caa26e4766f65b6b8ec4a..6801a8fb20401a484aba805be4fe1c0f0ca8e940 100644 (file)
@@ -160,8 +160,8 @@ static const struct gnss_operations sirf_gnss_ops = {
        .write_raw      = sirf_write_raw,
 };
 
-static int sirf_receive_buf(struct serdev_device *serdev,
-                               const unsigned char *buf, size_t count)
+static ssize_t sirf_receive_buf(struct serdev_device *serdev,
+                               const u8 *buf, size_t count)
 {
        struct sirf_data *data = serdev_device_get_drvdata(serdev);
        struct gnss_device *gdev = data->gdev;
index be7f2fa5aa7b600a2605084d832e23f24d501c84..806b88d8dfb7bda7d23cae021eb08c3bbf383ab1 100644 (file)
@@ -330,20 +330,27 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
                switch (flow_type) {
                case IRQ_TYPE_LEVEL_HIGH:
                        sprd_eic_update(chip, offset, SPRD_EIC_DBNC_IEV, 1);
+                       sprd_eic_update(chip, offset, SPRD_EIC_DBNC_IC, 1);
                        break;
                case IRQ_TYPE_LEVEL_LOW:
                        sprd_eic_update(chip, offset, SPRD_EIC_DBNC_IEV, 0);
+                       sprd_eic_update(chip, offset, SPRD_EIC_DBNC_IC, 1);
                        break;
                case IRQ_TYPE_EDGE_RISING:
                case IRQ_TYPE_EDGE_FALLING:
                case IRQ_TYPE_EDGE_BOTH:
                        state = sprd_eic_get(chip, offset);
-                       if (state)
+                       if (state) {
                                sprd_eic_update(chip, offset,
                                                SPRD_EIC_DBNC_IEV, 0);
-                       else
+                               sprd_eic_update(chip, offset,
+                                               SPRD_EIC_DBNC_IC, 1);
+                       } else {
                                sprd_eic_update(chip, offset,
                                                SPRD_EIC_DBNC_IEV, 1);
+                               sprd_eic_update(chip, offset,
+                                               SPRD_EIC_DBNC_IC, 1);
+                       }
                        break;
                default:
                        return -ENOTSUPP;
@@ -355,20 +362,27 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
                switch (flow_type) {
                case IRQ_TYPE_LEVEL_HIGH:
                        sprd_eic_update(chip, offset, SPRD_EIC_LATCH_INTPOL, 0);
+                       sprd_eic_update(chip, offset, SPRD_EIC_LATCH_INTCLR, 1);
                        break;
                case IRQ_TYPE_LEVEL_LOW:
                        sprd_eic_update(chip, offset, SPRD_EIC_LATCH_INTPOL, 1);
+                       sprd_eic_update(chip, offset, SPRD_EIC_LATCH_INTCLR, 1);
                        break;
                case IRQ_TYPE_EDGE_RISING:
                case IRQ_TYPE_EDGE_FALLING:
                case IRQ_TYPE_EDGE_BOTH:
                        state = sprd_eic_get(chip, offset);
-                       if (state)
+                       if (state) {
                                sprd_eic_update(chip, offset,
                                                SPRD_EIC_LATCH_INTPOL, 0);
-                       else
+                               sprd_eic_update(chip, offset,
+                                               SPRD_EIC_LATCH_INTCLR, 1);
+                       } else {
                                sprd_eic_update(chip, offset,
                                                SPRD_EIC_LATCH_INTPOL, 1);
+                               sprd_eic_update(chip, offset,
+                                               SPRD_EIC_LATCH_INTCLR, 1);
+                       }
                        break;
                default:
                        return -ENOTSUPP;
@@ -382,29 +396,34 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
                        sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 0);
                        sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0);
                        sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTPOL, 1);
+                       sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTCLR, 1);
                        irq_set_handler_locked(data, handle_edge_irq);
                        break;
                case IRQ_TYPE_EDGE_FALLING:
                        sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 0);
                        sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0);
                        sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTPOL, 0);
+                       sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTCLR, 1);
                        irq_set_handler_locked(data, handle_edge_irq);
                        break;
                case IRQ_TYPE_EDGE_BOTH:
                        sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0);
                        sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 1);
+                       sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTCLR, 1);
                        irq_set_handler_locked(data, handle_edge_irq);
                        break;
                case IRQ_TYPE_LEVEL_HIGH:
                        sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 0);
                        sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 1);
                        sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTPOL, 1);
+                       sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTCLR, 1);
                        irq_set_handler_locked(data, handle_level_irq);
                        break;
                case IRQ_TYPE_LEVEL_LOW:
                        sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 0);
                        sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 1);
                        sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTPOL, 0);
+                       sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTCLR, 1);
                        irq_set_handler_locked(data, handle_level_irq);
                        break;
                default:
@@ -417,29 +436,34 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
                        sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 0);
                        sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 0);
                        sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTPOL, 1);
+                       sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTCLR, 1);
                        irq_set_handler_locked(data, handle_edge_irq);
                        break;
                case IRQ_TYPE_EDGE_FALLING:
                        sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 0);
                        sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 0);
                        sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTPOL, 0);
+                       sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTCLR, 1);
                        irq_set_handler_locked(data, handle_edge_irq);
                        break;
                case IRQ_TYPE_EDGE_BOTH:
                        sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 0);
                        sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 1);
+                       sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTCLR, 1);
                        irq_set_handler_locked(data, handle_edge_irq);
                        break;
                case IRQ_TYPE_LEVEL_HIGH:
                        sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 0);
                        sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 1);
                        sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTPOL, 1);
+                       sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTCLR, 1);
                        irq_set_handler_locked(data, handle_level_irq);
                        break;
                case IRQ_TYPE_LEVEL_LOW:
                        sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 0);
                        sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 1);
                        sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTPOL, 0);
+                       sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTCLR, 1);
                        irq_set_handler_locked(data, handle_level_irq);
                        break;
                default:
index f836a8db4c1d21f8c44370246e8ad97bfedb064f..69834db2c1cf26be379c0deca38dda889202f706 100644 (file)
 #define AIROHA_GPIO_MAX                32
 
 /**
- * airoha_gpio_ctrl - Airoha GPIO driver data
+ * struct airoha_gpio_ctrl - Airoha GPIO driver data
  * @gc: Associated gpio_chip instance.
  * @data: The data register.
- * @dir0: The direction register for the lower 16 pins.
- * @dir1: The direction register for the higher 16 pins.
+ * @dir: [0] The direction register for the lower 16 pins.
+ * [1]: The direction register for the higher 16 pins.
  * @output: The output enable register.
  */
 struct airoha_gpio_ctrl {
index 7a3e1760fc5b7d1a754d25d05df1bb30fbbad396..d5906d419b0ab996a5286d8cc411929385bf2c4b 100644 (file)
@@ -215,6 +215,8 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev)
                        gs->gpio_clr_io + MLXBF_GPIO_FW_DATA_OUT_CLEAR,
                        gs->gpio_set_io + MLXBF_GPIO_FW_OUTPUT_ENABLE_SET,
                        gs->gpio_clr_io + MLXBF_GPIO_FW_OUTPUT_ENABLE_CLEAR, 0);
+       if (ret)
+               return dev_err_probe(dev, ret, "%s: bgpio_init() failed", __func__);
 
        gc->request = gpiochip_generic_request;
        gc->free = gpiochip_generic_free;
index a7939bd0aa566e94ac5093e7fc494b7535623b9a..bf7f008f58d703347cba14f35c19f5798ee3a949 100644 (file)
@@ -525,18 +525,21 @@ static int rtd_gpio_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct gpio_irq_chip *irq_chip;
        struct rtd_gpio *data;
+       int ret;
 
        data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
-       data->irqs[0] = platform_get_irq(pdev, 0);
-       if (data->irqs[0] < 0)
-               return data->irqs[0];
+       ret = platform_get_irq(pdev, 0);
+       if (ret < 0)
+               return ret;
+       data->irqs[0] = ret;
 
-       data->irqs[1] = platform_get_irq(pdev, 1);
-       if (data->irqs[1] < 0)
-               return data->irqs[1];
+       ret = platform_get_irq(pdev, 1);
+       if (ret < 0)
+               return ret;
+       data->irqs[1] = ret;
 
        data->info = device_get_match_data(dev);
        if (!data->info)
index 88066826d8e5b629697136b8bb2431b543544977..cd3e9657cc36df59123a571a0ed2ed5332272a5d 100644 (file)
@@ -1651,6 +1651,20 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
                        .ignore_interrupt = "INT33FC:00@3",
                },
        },
+       {
+               /*
+                * Spurious wakeups from TP_ATTN# pin
+                * Found in BIOS 0.35
+                * https://gitlab.freedesktop.org/drm/amd/-/issues/3073
+                */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "G1619-04"),
+               },
+               .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+                       .ignore_wake = "PNP0C50:00@8",
+               },
+       },
        {} /* Terminating entry */
 };
 
index 402f7d99b0c1e4a36a136e7dd1e3b6f7863cf6f6..e7770eedd14693ac827a0c4ce875aa5484e84a80 100644 (file)
@@ -184,7 +184,7 @@ static void of_gpio_try_fixup_polarity(const struct device_node *np,
                const char *propname;
                bool active_high;
        } gpios[] = {
-#if !IS_ENABLED(CONFIG_LCD_HX8357)
+#if IS_ENABLED(CONFIG_LCD_HX8357)
                /*
                 * Himax LCD controllers used incorrectly named
                 * "gpios-reset" property and also specified wrong
@@ -478,7 +478,7 @@ static struct gpio_desc *of_find_gpio_rename(struct device_node *np,
                 */
                const char *compatible;
        } gpios[] = {
-#if !IS_ENABLED(CONFIG_LCD_HX8357)
+#if IS_ENABLED(CONFIG_LCD_HX8357)
                /* Himax LCD controllers used "gpios-reset" */
                { "reset",      "gpios-reset",  "himax,hx8357" },
                { "reset",      "gpios-reset",  "himax,hx8369" },
index 4dbf298bb5dda0f3fadc58843707d593a1adbc5c..6bf5332136e5a9d4bd0b4c52618fa05241ccaa00 100644 (file)
@@ -768,25 +768,6 @@ int gpiochip_sysfs_register(struct gpio_device *gdev)
        return 0;
 }
 
-int gpiochip_sysfs_register_all(void)
-{
-       struct gpio_device *gdev;
-       int ret;
-
-       guard(rwsem_read)(&gpio_devices_sem);
-
-       list_for_each_entry(gdev, &gpio_devices, list) {
-               if (gdev->mockdev)
-                       continue;
-
-               ret = gpiochip_sysfs_register(gdev);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
-}
-
 void gpiochip_sysfs_unregister(struct gpio_device *gdev)
 {
        struct gpio_desc *desc;
@@ -811,7 +792,9 @@ void gpiochip_sysfs_unregister(struct gpio_device *gdev)
 
 static int __init gpiolib_sysfs_init(void)
 {
-       int status;
+       int             status;
+       unsigned long   flags;
+       struct gpio_device *gdev;
 
        status = class_register(&gpio_class);
        if (status < 0)
@@ -823,6 +806,26 @@ static int __init gpiolib_sysfs_init(void)
         * We run before arch_initcall() so chip->dev nodes can have
         * registered, and so arch_initcall() can always gpiod_export().
         */
-       return gpiochip_sysfs_register_all();
+       spin_lock_irqsave(&gpio_lock, flags);
+       list_for_each_entry(gdev, &gpio_devices, list) {
+               if (gdev->mockdev)
+                       continue;
+
+               /*
+                * TODO we yield gpio_lock here because
+                * gpiochip_sysfs_register() acquires a mutex. This is unsafe
+                * and needs to be fixed.
+                *
+                * Also it would be nice to use gpio_device_find() here so we
+                * can keep gpio_chips local to gpiolib.c, but the yield of
+                * gpio_lock prevents us from doing this.
+                */
+               spin_unlock_irqrestore(&gpio_lock, flags);
+               status = gpiochip_sysfs_register(gdev);
+               spin_lock_irqsave(&gpio_lock, flags);
+       }
+       spin_unlock_irqrestore(&gpio_lock, flags);
+
+       return status;
 }
 postcore_initcall(gpiolib_sysfs_init);
index ab157cec0b4bec5ae89249fedbd8374e1cd0d91e..b794b396d6a52588c93839fbba062eb160236ca4 100644 (file)
@@ -8,7 +8,6 @@ struct gpio_device;
 #ifdef CONFIG_GPIO_SYSFS
 
 int gpiochip_sysfs_register(struct gpio_device *gdev);
-int gpiochip_sysfs_register_all(void);
 void gpiochip_sysfs_unregister(struct gpio_device *gdev);
 
 #else
@@ -18,11 +17,6 @@ static inline int gpiochip_sysfs_register(struct gpio_device *gdev)
        return 0;
 }
 
-static inline int gpiochip_sysfs_register_all(void)
-{
-       return 0;
-}
-
 static inline void gpiochip_sysfs_unregister(struct gpio_device *gdev)
 {
 }
index 4c93cf73a8260569de5287a4b2ae231dc54dbab6..8b3a0f45b57456b13a04ecf4bf706b8cbf27b8dd 100644 (file)
@@ -2,7 +2,6 @@
 
 #include <linux/acpi.h>
 #include <linux/bitmap.h>
-#include <linux/cleanup.h>
 #include <linux/compat.h>
 #include <linux/debugfs.h>
 #include <linux/device.h>
@@ -16,7 +15,6 @@
 #include <linux/kernel.h>
 #include <linux/list.h>
 #include <linux/module.h>
-#include <linux/mutex.h>
 #include <linux/of.h>
 #include <linux/pinctrl/consumer.h>
 #include <linux/seq_file.h>
@@ -83,9 +81,7 @@ DEFINE_SPINLOCK(gpio_lock);
 
 static DEFINE_MUTEX(gpio_lookup_lock);
 static LIST_HEAD(gpio_lookup_list);
-
 LIST_HEAD(gpio_devices);
-DECLARE_RWSEM(gpio_devices_sem);
 
 static DEFINE_MUTEX(gpio_machine_hogs_mutex);
 static LIST_HEAD(gpio_machine_hogs);
@@ -117,15 +113,20 @@ static inline void desc_set_label(struct gpio_desc *d, const char *label)
 struct gpio_desc *gpio_to_desc(unsigned gpio)
 {
        struct gpio_device *gdev;
+       unsigned long flags;
+
+       spin_lock_irqsave(&gpio_lock, flags);
 
-       scoped_guard(rwsem_read, &gpio_devices_sem) {
-               list_for_each_entry(gdev, &gpio_devices, list) {
-                       if (gdev->base <= gpio &&
-                           gdev->base + gdev->ngpio > gpio)
-                               return &gdev->descs[gpio - gdev->base];
+       list_for_each_entry(gdev, &gpio_devices, list) {
+               if (gdev->base <= gpio &&
+                   gdev->base + gdev->ngpio > gpio) {
+                       spin_unlock_irqrestore(&gpio_lock, flags);
+                       return &gdev->descs[gpio - gdev->base];
                }
        }
 
+       spin_unlock_irqrestore(&gpio_lock, flags);
+
        if (!gpio_is_valid(gpio))
                pr_warn("invalid GPIO %d\n", gpio);
 
@@ -398,21 +399,26 @@ static int gpiodev_add_to_list_unlocked(struct gpio_device *gdev)
 static struct gpio_desc *gpio_name_to_desc(const char * const name)
 {
        struct gpio_device *gdev;
+       unsigned long flags;
 
        if (!name)
                return NULL;
 
-       guard(rwsem_read)(&gpio_devices_sem);
+       spin_lock_irqsave(&gpio_lock, flags);
 
        list_for_each_entry(gdev, &gpio_devices, list) {
                struct gpio_desc *desc;
 
                for_each_gpio_desc(gdev->chip, desc) {
-                       if (desc->name && !strcmp(desc->name, name))
+                       if (desc->name && !strcmp(desc->name, name)) {
+                               spin_unlock_irqrestore(&gpio_lock, flags);
                                return desc;
+                       }
                }
        }
 
+       spin_unlock_irqrestore(&gpio_lock, flags);
+
        return NULL;
 }
 
@@ -807,6 +813,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
                               struct lock_class_key *request_key)
 {
        struct gpio_device *gdev;
+       unsigned long flags;
        unsigned int i;
        int base = 0;
        int ret = 0;
@@ -871,46 +878,49 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
 
        gdev->ngpio = gc->ngpio;
 
-       scoped_guard(rwsem_write, &gpio_devices_sem) {
-               /*
-                * TODO: this allocates a Linux GPIO number base in the global
-                * GPIO numberspace for this chip. In the long run we want to
-                * get *rid* of this numberspace and use only descriptors, but
-                * it may be a pipe dream. It will not happen before we get rid
-                * of the sysfs interface anyways.
-                */
-               base = gc->base;
+       spin_lock_irqsave(&gpio_lock, flags);
 
+       /*
+        * TODO: this allocates a Linux GPIO number base in the global
+        * GPIO numberspace for this chip. In the long run we want to
+        * get *rid* of this numberspace and use only descriptors, but
+        * it may be a pipe dream. It will not happen before we get rid
+        * of the sysfs interface anyways.
+        */
+       base = gc->base;
+       if (base < 0) {
+               base = gpiochip_find_base_unlocked(gc->ngpio);
                if (base < 0) {
-                       base = gpiochip_find_base_unlocked(gc->ngpio);
-                       if (base < 0) {
-                               ret = base;
-                               base = 0;
-                               goto err_free_label;
-                       }
-                       /*
-                        * TODO: it should not be necessary to reflect the assigned
-                        * base outside of the GPIO subsystem. Go over drivers and
-                        * see if anyone makes use of this, else drop this and assign
-                        * a poison instead.
-                        */
-                       gc->base = base;
-               } else {
-                       dev_warn(&gdev->dev,
-                                "Static allocation of GPIO base is deprecated, use dynamic allocation.\n");
-               }
-               gdev->base = base;
-
-               ret = gpiodev_add_to_list_unlocked(gdev);
-               if (ret) {
-                       chip_err(gc, "GPIO integer space overlap, cannot add chip\n");
+                       spin_unlock_irqrestore(&gpio_lock, flags);
+                       ret = base;
+                       base = 0;
                        goto err_free_label;
                }
+               /*
+                * TODO: it should not be necessary to reflect the assigned
+                * base outside of the GPIO subsystem. Go over drivers and
+                * see if anyone makes use of this, else drop this and assign
+                * a poison instead.
+                */
+               gc->base = base;
+       } else {
+               dev_warn(&gdev->dev,
+                        "Static allocation of GPIO base is deprecated, use dynamic allocation.\n");
+       }
+       gdev->base = base;
 
-               for (i = 0; i < gc->ngpio; i++)
-                       gdev->descs[i].gdev = gdev;
+       ret = gpiodev_add_to_list_unlocked(gdev);
+       if (ret) {
+               spin_unlock_irqrestore(&gpio_lock, flags);
+               chip_err(gc, "GPIO integer space overlap, cannot add chip\n");
+               goto err_free_label;
        }
 
+       for (i = 0; i < gc->ngpio; i++)
+               gdev->descs[i].gdev = gdev;
+
+       spin_unlock_irqrestore(&gpio_lock, flags);
+
        BLOCKING_INIT_NOTIFIER_HEAD(&gdev->line_state_notifier);
        BLOCKING_INIT_NOTIFIER_HEAD(&gdev->device_notifier);
        init_rwsem(&gdev->sem);
@@ -995,14 +1005,15 @@ err_remove_of_chip:
 err_free_gpiochip_mask:
        gpiochip_remove_pin_ranges(gc);
        gpiochip_free_valid_mask(gc);
+err_remove_from_list:
+       spin_lock_irqsave(&gpio_lock, flags);
+       list_del(&gdev->list);
+       spin_unlock_irqrestore(&gpio_lock, flags);
        if (gdev->dev.release) {
                /* release() has been registered by gpiochip_setup_dev() */
                gpio_device_put(gdev);
                goto err_print_message;
        }
-err_remove_from_list:
-       scoped_guard(rwsem_write, &gpio_devices_sem)
-               list_del(&gdev->list);
 err_free_label:
        kfree_const(gdev->label);
 err_free_descs:
@@ -1065,7 +1076,7 @@ void gpiochip_remove(struct gpio_chip *gc)
                dev_crit(&gdev->dev,
                         "REMOVING GPIOCHIP WITH GPIOS STILL REQUESTED\n");
 
-       scoped_guard(rwsem_write, &gpio_devices_sem)
+       scoped_guard(spinlock_irqsave, &gpio_lock)
                list_del(&gdev->list);
 
        /*
@@ -1114,7 +1125,7 @@ struct gpio_device *gpio_device_find(void *data,
         */
        might_sleep();
 
-       guard(rwsem_read)(&gpio_devices_sem);
+       guard(spinlock_irqsave)(&gpio_lock);
 
        list_for_each_entry(gdev, &gpio_devices, list) {
                if (gdev->chip && match(gdev->chip, data))
@@ -4725,33 +4736,35 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_device *gdev)
 
 static void *gpiolib_seq_start(struct seq_file *s, loff_t *pos)
 {
+       unsigned long flags;
        struct gpio_device *gdev = NULL;
        loff_t index = *pos;
 
        s->private = "";
 
-       guard(rwsem_read)(&gpio_devices_sem);
-
-       list_for_each_entry(gdev, &gpio_devices, list) {
-               if (index-- == 0)
+       spin_lock_irqsave(&gpio_lock, flags);
+       list_for_each_entry(gdev, &gpio_devices, list)
+               if (index-- == 0) {
+                       spin_unlock_irqrestore(&gpio_lock, flags);
                        return gdev;
-       }
+               }
+       spin_unlock_irqrestore(&gpio_lock, flags);
 
        return NULL;
 }
 
 static void *gpiolib_seq_next(struct seq_file *s, void *v, loff_t *pos)
 {
+       unsigned long flags;
        struct gpio_device *gdev = v;
        void *ret = NULL;
 
-       scoped_guard(rwsem_read, &gpio_devices_sem) {
-               if (list_is_last(&gdev->list, &gpio_devices))
-                       ret = NULL;
-               else
-                       ret = list_first_entry(&gdev->list, struct gpio_device,
-                                              list);
-       }
+       spin_lock_irqsave(&gpio_lock, flags);
+       if (list_is_last(&gdev->list, &gpio_devices))
+               ret = NULL;
+       else
+               ret = list_first_entry(&gdev->list, struct gpio_device, list);
+       spin_unlock_irqrestore(&gpio_lock, flags);
 
        s->private = "\n";
        ++*pos;
index 97df54abf57ac0f2a740d72bf37965b299d87927..a4a2520b5f31cced763696970a452ab1fff0e4bf 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/gpio/consumer.h> /* for enum gpiod_flags */
 #include <linux/gpio/driver.h>
 #include <linux/module.h>
-#include <linux/mutex.h>
 #include <linux/notifier.h>
 #include <linux/rwsem.h>
 
@@ -137,7 +136,6 @@ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory);
 
 extern spinlock_t gpio_lock;
 extern struct list_head gpio_devices;
-extern struct rw_semaphore gpio_devices_sem;
 
 void gpiod_line_state_notify(struct gpio_desc *desc, unsigned long action);
 
index 9da14436a3738f8b950db3aa44a71b47c6e4952c..6dce81a061ab1feff52c48f7b160c5e1c13b1322 100644 (file)
@@ -254,8 +254,6 @@ extern int amdgpu_agp;
 
 extern int amdgpu_wbrf;
 
-extern int fw_bo_location;
-
 #define AMDGPU_VM_MAX_NUM_CTX                  4096
 #define AMDGPU_SG_THRESHOLD                    (256*1024*1024)
 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS         3000
@@ -1080,6 +1078,8 @@ struct amdgpu_device {
        bool                            in_s3;
        bool                            in_s4;
        bool                            in_s0ix;
+       /* indicate amdgpu suspension status */
+       bool                            suspend_complete;
 
        enum pp_mp1_state               mp1_state;
        struct amdgpu_doorbell_index doorbell_index;
@@ -1146,6 +1146,7 @@ struct amdgpu_device {
        bool                            debug_vm;
        bool                            debug_largebar;
        bool                            debug_disable_soft_recovery;
+       bool                            debug_use_vram_fw_buf;
 };
 
 static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev,
index 067690ba7bffd4192817fe3acf9d46a19f1f274c..41db030ddc4ee9c98ba952b4b91d6292f7c457d6 100644 (file)
@@ -138,11 +138,34 @@ static void amdgpu_amdkfd_reset_work(struct work_struct *work)
        amdgpu_device_gpu_recover(adev, NULL, &reset_context);
 }
 
+static const struct drm_client_funcs kfd_client_funcs = {
+       .unregister     = drm_client_release,
+};
+
+int amdgpu_amdkfd_drm_client_create(struct amdgpu_device *adev)
+{
+       int ret;
+
+       if (!adev->kfd.init_complete)
+               return 0;
+
+       ret = drm_client_init(&adev->ddev, &adev->kfd.client, "kfd",
+                             &kfd_client_funcs);
+       if (ret) {
+               dev_err(adev->dev, "Failed to init DRM client: %d\n",
+                       ret);
+               return ret;
+       }
+
+       drm_client_register(&adev->kfd.client);
+
+       return 0;
+}
+
 void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
 {
        int i;
        int last_valid_bit;
-       int ret;
 
        amdgpu_amdkfd_gpuvm_init_mem_limits();
 
@@ -161,12 +184,6 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
                        .enable_mes = adev->enable_mes,
                };
 
-               ret = drm_client_init(&adev->ddev, &adev->kfd.client, "kfd", NULL);
-               if (ret) {
-                       dev_err(adev->dev, "Failed to init DRM client: %d\n", ret);
-                       return;
-               }
-
                /* this is going to have a few of the MSBs set that we need to
                 * clear
                 */
@@ -205,10 +222,6 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
 
                adev->kfd.init_complete = kgd2kfd_device_init(adev->kfd.dev,
                                                        &gpu_resources);
-               if (adev->kfd.init_complete)
-                       drm_client_register(&adev->kfd.client);
-               else
-                       drm_client_release(&adev->kfd.client);
 
                amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
 
@@ -695,10 +708,8 @@ err:
 void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle)
 {
        enum amd_powergating_state state = idle ? AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE;
-       /* Temporary workaround to fix issues observed in some
-        * compute applications when GFXOFF is enabled on GFX11.
-        */
-       if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 11) {
+       if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 11 &&
+           ((adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK) <= 64)) {
                pr_debug("GFXOFF is %s\n", idle ? "enabled" : "disabled");
                amdgpu_gfx_off_ctrl(adev, idle);
        } else if ((IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 9) &&
index cf6ed5fce291f946854d329fa91e0fb6eedbc61a..27c61c535e297931892902f1abb9e56ca6feea5c 100644 (file)
@@ -182,6 +182,8 @@ int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
 struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
                                struct mm_struct *mm,
                                struct svm_range_bo *svm_bo);
+
+int amdgpu_amdkfd_drm_client_create(struct amdgpu_device *adev);
 #if defined(CONFIG_DEBUG_FS)
 int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data);
 #endif
@@ -301,7 +303,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct amdgpu_device *adev,
                                          struct kgd_mem *mem, void *drm_priv);
 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
                struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv);
-void amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv);
+int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv);
 int amdgpu_amdkfd_gpuvm_sync_memory(
                struct amdgpu_device *adev, struct kgd_mem *mem, bool intr);
 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem,
@@ -311,7 +313,7 @@ void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem);
 int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_device *adev, struct amdgpu_bo *bo);
 
 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
-                                           struct dma_fence **ef);
+                                           struct dma_fence __rcu **ef);
 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
                                              struct kfd_vm_fault_info *info);
 int amdgpu_amdkfd_gpuvm_import_dmabuf_fd(struct amdgpu_device *adev, int fd,
index 899e31e3a5e81d2be343a668e295a564efee10af..3a3f3ce09f00dbe77f61455f24fed7bd0db0dec5 100644 (file)
@@ -290,7 +290,7 @@ static int suspend_resume_compute_scheduler(struct amdgpu_device *adev, bool sus
        for (i = 0; i < adev->gfx.num_compute_rings; i++) {
                struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
 
-               if (!(ring && drm_sched_wqueue_ready(&ring->sched)))
+               if (!amdgpu_ring_sched_ready(ring))
                        continue;
 
                /* stop secheduler and drain ring. */
index d17b2452cb1f69df276dd95518cf0ca340539237..231fd927dcfbee0db07e3a5d28eed2b24ff82b9c 100644 (file)
@@ -2085,21 +2085,35 @@ out:
        return ret;
 }
 
-void amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv)
+int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv)
 {
        struct kfd_mem_attachment *entry;
        struct amdgpu_vm *vm;
+       int ret;
 
        vm = drm_priv_to_vm(drm_priv);
 
        mutex_lock(&mem->lock);
 
+       ret = amdgpu_bo_reserve(mem->bo, true);
+       if (ret)
+               goto out;
+
        list_for_each_entry(entry, &mem->attachments, list) {
-               if (entry->bo_va->base.vm == vm)
-                       kfd_mem_dmaunmap_attachment(mem, entry);
+               if (entry->bo_va->base.vm != vm)
+                       continue;
+               if (entry->bo_va->base.bo->tbo.ttm &&
+                   !entry->bo_va->base.bo->tbo.ttm->sg)
+                       continue;
+
+               kfd_mem_dmaunmap_attachment(mem, entry);
        }
 
+       amdgpu_bo_unreserve(mem->bo);
+out:
        mutex_unlock(&mem->lock);
+
+       return ret;
 }
 
 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
@@ -2802,7 +2816,7 @@ unlock_out:
        put_task_struct(usertask);
 }
 
-static void replace_eviction_fence(struct dma_fence **ef,
+static void replace_eviction_fence(struct dma_fence __rcu **ef,
                                   struct dma_fence *new_ef)
 {
        struct dma_fence *old_ef = rcu_replace_pointer(*ef, new_ef, true
@@ -2837,7 +2851,7 @@ static void replace_eviction_fence(struct dma_fence **ef,
  * 7.  Add fence to all PD and PT BOs.
  * 8.  Unreserve all BOs
  */
-int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
+int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu **ef)
 {
        struct amdkfd_process_info *process_info = info;
        struct amdgpu_vm *peer_vm;
index e485dd3357c63fd225b3fb7e3847675749f018da..1afbb2e932c6b58a9e26cbabe61370151373a4af 100644 (file)
@@ -1678,7 +1678,7 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
        for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
                struct amdgpu_ring *ring = adev->rings[i];
 
-               if (!ring || !drm_sched_wqueue_ready(&ring->sched))
+               if (!amdgpu_ring_sched_ready(ring))
                        continue;
                drm_sched_wqueue_stop(&ring->sched);
        }
@@ -1694,7 +1694,7 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
        for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
                struct amdgpu_ring *ring = adev->rings[i];
 
-               if (!ring || !drm_sched_wqueue_ready(&ring->sched))
+               if (!amdgpu_ring_sched_ready(ring))
                        continue;
                drm_sched_wqueue_start(&ring->sched);
        }
@@ -1916,8 +1916,8 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
 
        ring = adev->rings[val];
 
-       if (!ring || !ring->funcs->preempt_ib ||
-           !drm_sched_wqueue_ready(&ring->sched))
+       if (!amdgpu_ring_sched_ready(ring) ||
+           !ring->funcs->preempt_ib)
                return -EINVAL;
 
        /* the last preemption failed */
index 5bb444bb36cece19b00fe27f0923c42b1bc1c83f..fdde7488d0ed9a8ff93f4a4cc58c123b904236c7 100644 (file)
@@ -1544,6 +1544,7 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
                                return true;
 
                        fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
+                       release_firmware(adev->pm.fw);
                        if (fw_ver < 0x00160e00)
                                return true;
                }
@@ -4120,23 +4121,13 @@ int amdgpu_device_init(struct amdgpu_device *adev,
                                }
                        }
                } else {
-                       switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
-                       case IP_VERSION(13, 0, 0):
-                       case IP_VERSION(13, 0, 7):
-                       case IP_VERSION(13, 0, 10):
-                               r = psp_gpu_reset(adev);
-                               break;
-                       default:
-                               tmp = amdgpu_reset_method;
-                               /* It should do a default reset when loading or reloading the driver,
-                                * regardless of the module parameter reset_method.
-                                */
-                               amdgpu_reset_method = AMD_RESET_METHOD_NONE;
-                               r = amdgpu_asic_reset(adev);
-                               amdgpu_reset_method = tmp;
-                               break;
-                       }
-
+                       tmp = amdgpu_reset_method;
+                       /* It should do a default reset when loading or reloading the driver,
+                        * regardless of the module parameter reset_method.
+                        */
+                       amdgpu_reset_method = AMD_RESET_METHOD_NONE;
+                       r = amdgpu_asic_reset(adev);
+                       amdgpu_reset_method = tmp;
                        if (r) {
                                dev_err(adev->dev, "asic reset on init failed\n");
                                goto failed;
@@ -5030,7 +5021,7 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
                struct amdgpu_ring *ring = adev->rings[i];
 
-               if (!ring || !drm_sched_wqueue_ready(&ring->sched))
+               if (!amdgpu_ring_sched_ready(ring))
                        continue;
 
                spin_lock(&ring->sched.job_list_lock);
@@ -5169,7 +5160,7 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
                struct amdgpu_ring *ring = adev->rings[i];
 
-               if (!ring || !drm_sched_wqueue_ready(&ring->sched))
+               if (!amdgpu_ring_sched_ready(ring))
                        continue;
 
                /* Clear job fence from fence drv to avoid force_completion
@@ -5245,7 +5236,6 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
        struct amdgpu_device *tmp_adev = NULL;
        bool need_full_reset, skip_hw_reset, vram_lost = false;
        int r = 0;
-       bool gpu_reset_for_dev_remove = 0;
 
        /* Try reset handler method first */
        tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
@@ -5265,10 +5255,6 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
                test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
        skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
 
-       gpu_reset_for_dev_remove =
-               test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
-                       test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
-
        /*
         * ASIC reset has to be done on all XGMI hive nodes ASAP
         * to allow proper links negotiation in FW (within 1 sec)
@@ -5311,18 +5297,6 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
                amdgpu_ras_intr_cleared();
        }
 
-       /* Since the mode1 reset affects base ip blocks, the
-        * phase1 ip blocks need to be resumed. Otherwise there
-        * will be a BIOS signature error and the psp bootloader
-        * can't load kdb on the next amdgpu install.
-        */
-       if (gpu_reset_for_dev_remove) {
-               list_for_each_entry(tmp_adev, device_list_handle, reset_list)
-                       amdgpu_device_ip_resume_phase1(tmp_adev);
-
-               goto end;
-       }
-
        list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
                if (need_full_reset) {
                        /* post card */
@@ -5559,11 +5533,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
        int i, r = 0;
        bool need_emergency_restart = false;
        bool audio_suspended = false;
-       bool gpu_reset_for_dev_remove = false;
-
-       gpu_reset_for_dev_remove =
-                       test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
-                               test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
 
        /*
         * Special case: RAS triggered and full reset isn't supported
@@ -5601,7 +5570,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
        if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
                list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
                        list_add_tail(&tmp_adev->reset_list, &device_list);
-                       if (gpu_reset_for_dev_remove && adev->shutdown)
+                       if (adev->shutdown)
                                tmp_adev->shutdown = true;
                }
                if (!list_is_first(&adev->reset_list, &device_list))
@@ -5658,7 +5627,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
                for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
                        struct amdgpu_ring *ring = tmp_adev->rings[i];
 
-                       if (!ring || !drm_sched_wqueue_ready(&ring->sched))
+                       if (!amdgpu_ring_sched_ready(ring))
                                continue;
 
                        drm_sched_stop(&ring->sched, job ? &job->base : NULL);
@@ -5686,10 +5655,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 
 retry: /* Rest of adevs pre asic reset from XGMI hive. */
        list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
-               if (gpu_reset_for_dev_remove) {
-                       /* Workaroud for ASICs need to disable SMC first */
-                       amdgpu_device_smu_fini_early(tmp_adev);
-               }
                r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
                /*TODO Should we stop ?*/
                if (r) {
@@ -5721,9 +5686,6 @@ retry:    /* Rest of adevs pre asic reset from XGMI hive. */
                r = amdgpu_do_asic_reset(device_list_handle, reset_context);
                if (r && r == -EAGAIN)
                        goto retry;
-
-               if (!r && gpu_reset_for_dev_remove)
-                       goto recover_end;
        }
 
 skip_hw_reset:
@@ -5734,7 +5696,7 @@ skip_hw_reset:
                for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
                        struct amdgpu_ring *ring = tmp_adev->rings[i];
 
-                       if (!ring || !drm_sched_wqueue_ready(&ring->sched))
+                       if (!amdgpu_ring_sched_ready(ring))
                                continue;
 
                        drm_sched_start(&ring->sched, true);
@@ -5779,7 +5741,6 @@ skip_sched_resume:
                amdgpu_ras_set_error_query_ready(tmp_adev, true);
        }
 
-recover_end:
        tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
                                            reset_list);
        amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
@@ -6090,7 +6051,7 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
                for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
                        struct amdgpu_ring *ring = adev->rings[i];
 
-                       if (!ring || !drm_sched_wqueue_ready(&ring->sched))
+                       if (!amdgpu_ring_sched_ready(ring))
                                continue;
 
                        drm_sched_stop(&ring->sched, NULL);
@@ -6218,7 +6179,7 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
                struct amdgpu_ring *ring = adev->rings[i];
 
-               if (!ring || !drm_sched_wqueue_ready(&ring->sched))
+               if (!amdgpu_ring_sched_ready(ring))
                        continue;
 
                drm_sched_start(&ring->sched, true);
index 0431eafa86b5324f4d63cc6060cea30baa03088b..c7d60dd0fb975d47d749300c79f976da15892736 100644 (file)
@@ -1963,8 +1963,6 @@ static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
                amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
                break;
        case IP_VERSION(9, 4, 3):
-               if (!amdgpu_exp_hw_support)
-                       return -EINVAL;
                amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block);
                break;
        case IP_VERSION(10, 1, 10):
index 852cec98ff262359fb823ccb26f1b9977da8dec9..211501ea91694d9f79b84752223f5e6ce60843c1 100644 (file)
@@ -128,6 +128,7 @@ enum AMDGPU_DEBUG_MASK {
        AMDGPU_DEBUG_VM = BIT(0),
        AMDGPU_DEBUG_LARGEBAR = BIT(1),
        AMDGPU_DEBUG_DISABLE_GPU_SOFT_RECOVERY = BIT(2),
+       AMDGPU_DEBUG_USE_VRAM_FW_BUF = BIT(3),
 };
 
 unsigned int amdgpu_vram_limit = UINT_MAX;
@@ -210,7 +211,6 @@ int amdgpu_seamless = -1; /* auto */
 uint amdgpu_debug_mask;
 int amdgpu_agp = -1; /* auto */
 int amdgpu_wbrf = -1;
-int fw_bo_location = -1;
 
 static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work);
 
@@ -990,10 +990,6 @@ MODULE_PARM_DESC(wbrf,
        "Enable Wifi RFI interference mitigation (0 = disabled, 1 = enabled, -1 = auto(default)");
 module_param_named(wbrf, amdgpu_wbrf, int, 0444);
 
-MODULE_PARM_DESC(fw_bo_location,
-       "location to put firmware bo for frontdoor loading (-1 = auto (default), 0 = on ram, 1 = on vram");
-module_param(fw_bo_location, int, 0644);
-
 /* These devices are not supported by amdgpu.
  * They are supported by the mach64, r128, radeon drivers
  */
@@ -2122,6 +2118,11 @@ static void amdgpu_init_debug_options(struct amdgpu_device *adev)
                pr_info("debug: soft reset for GPU recovery disabled\n");
                adev->debug_disable_soft_recovery = true;
        }
+
+       if (amdgpu_debug_mask & AMDGPU_DEBUG_USE_VRAM_FW_BUF) {
+               pr_info("debug: place fw in vram for frontdoor loading\n");
+               adev->debug_use_vram_fw_buf = true;
+       }
 }
 
 static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags)
@@ -2233,6 +2234,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
 
        pci_set_drvdata(pdev, ddev);
 
+       amdgpu_init_debug_options(adev);
+
        ret = amdgpu_driver_load_kms(adev, flags);
        if (ret)
                goto err_pci;
@@ -2252,6 +2255,10 @@ retry_init:
        if (ret)
                goto err_pci;
 
+       ret = amdgpu_amdkfd_drm_client_create(adev);
+       if (ret)
+               goto err_pci;
+
        /*
         * 1. don't init fbdev on hw without DCE
         * 2. don't init fbdev if there are no connectors
@@ -2313,8 +2320,6 @@ retry_init:
                        amdgpu_get_secondary_funcs(adev);
        }
 
-       amdgpu_init_debug_options(adev);
-
        return 0;
 
 err_pci:
@@ -2336,38 +2341,6 @@ amdgpu_pci_remove(struct pci_dev *pdev)
                pm_runtime_forbid(dev->dev);
        }
 
-       if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 2) &&
-           !amdgpu_sriov_vf(adev)) {
-               bool need_to_reset_gpu = false;
-
-               if (adev->gmc.xgmi.num_physical_nodes > 1) {
-                       struct amdgpu_hive_info *hive;
-
-                       hive = amdgpu_get_xgmi_hive(adev);
-                       if (hive->device_remove_count == 0)
-                               need_to_reset_gpu = true;
-                       hive->device_remove_count++;
-                       amdgpu_put_xgmi_hive(hive);
-               } else {
-                       need_to_reset_gpu = true;
-               }
-
-               /* Workaround for ASICs need to reset SMU.
-                * Called only when the first device is removed.
-                */
-               if (need_to_reset_gpu) {
-                       struct amdgpu_reset_context reset_context;
-
-                       adev->shutdown = true;
-                       memset(&reset_context, 0, sizeof(reset_context));
-                       reset_context.method = AMD_RESET_METHOD_NONE;
-                       reset_context.reset_req_dev = adev;
-                       set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
-                       set_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context.flags);
-                       amdgpu_device_gpu_recover(adev, NULL, &reset_context);
-               }
-       }
-
        amdgpu_driver_unload_kms(dev);
 
        /*
@@ -2503,6 +2476,7 @@ static int amdgpu_pmops_suspend(struct device *dev)
        struct drm_device *drm_dev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = drm_to_adev(drm_dev);
 
+       adev->suspend_complete = false;
        if (amdgpu_acpi_is_s0ix_active(adev))
                adev->in_s0ix = true;
        else if (amdgpu_acpi_is_s3_active(adev))
@@ -2517,6 +2491,7 @@ static int amdgpu_pmops_suspend_noirq(struct device *dev)
        struct drm_device *drm_dev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = drm_to_adev(drm_dev);
 
+       adev->suspend_complete = true;
        if (amdgpu_acpi_should_gpu_reset(adev))
                return amdgpu_asic_reset(adev);
 
index 73b8cca35bab8780d1938a45d035d19648bdd081..c623e23049d1d4bde50991fcddc8b542df0099b7 100644 (file)
@@ -121,6 +121,7 @@ int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev)
        struct amdgpu_bo_param bp;
        dma_addr_t dma_addr;
        struct page *p;
+       unsigned long x;
        int ret;
 
        if (adev->gart.bo != NULL)
@@ -130,6 +131,10 @@ int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev)
        if (!p)
                return -ENOMEM;
 
+       /* assign pages to this device */
+       for (x = 0; x < (1UL << order); x++)
+               p[x].mapping = adev->mman.bdev.dev_mapping;
+
        /* If the hardware does not support UTCL2 snooping of the CPU caches
         * then set_memory_wc() could be used as a workaround to mark the pages
         * as write combine memory.
@@ -223,6 +228,7 @@ void amdgpu_gart_table_ram_free(struct amdgpu_device *adev)
        unsigned int order = get_order(adev->gart.table_size);
        struct sg_table *sg = adev->gart.bo->tbo.sg;
        struct page *p;
+       unsigned long x;
        int ret;
 
        ret = amdgpu_bo_reserve(adev->gart.bo, false);
@@ -234,6 +240,8 @@ void amdgpu_gart_table_ram_free(struct amdgpu_device *adev)
        sg_free_table(sg);
        kfree(sg);
        p = virt_to_page(adev->gart.ptr);
+       for (x = 0; x < (1UL << order); x++)
+               p[x].mapping = NULL;
        __free_pages(p, order);
 
        adev->gart.ptr = NULL;
index d2f273d77e59557ba5185cbfa36e243788d3d86e..55784a9f26c4c83b17008a766130c234df8ecbaf 100644 (file)
@@ -1045,21 +1045,28 @@ int amdgpu_gmc_vram_checking(struct amdgpu_device *adev)
         * seconds, so here, we just pick up three parts for emulation.
         */
        ret = memcmp(vram_ptr, cptr, 10);
-       if (ret)
-               return ret;
+       if (ret) {
+               ret = -EIO;
+               goto release_buffer;
+       }
 
        ret = memcmp(vram_ptr + (size / 2), cptr, 10);
-       if (ret)
-               return ret;
+       if (ret) {
+               ret = -EIO;
+               goto release_buffer;
+       }
 
        ret = memcmp(vram_ptr + size - 10, cptr, 10);
-       if (ret)
-               return ret;
+       if (ret) {
+               ret = -EIO;
+               goto release_buffer;
+       }
 
+release_buffer:
        amdgpu_bo_free_kernel(&vram_bo, &vram_gpu,
                        &vram_ptr);
 
-       return 0;
+       return ret;
 }
 
 static ssize_t current_memory_partition_show(
index 82608df4339648a930528560d9e1a2ff5af3c459..d79cb13e1aa835db4028173957d24f3132049521 100644 (file)
@@ -175,7 +175,6 @@ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
 
        i2c->rec = *rec;
        i2c->adapter.owner = THIS_MODULE;
-       i2c->adapter.class = I2C_CLASS_DDC;
        i2c->adapter.dev.parent = dev->dev;
        i2c->dev = dev;
        i2c_set_adapdata(&i2c->adapter, i2c);
index b5ebafd4a3adf82e37b29f9df84cbf6541955441..bf4f48fe438d1b5936852145c8b4c1059446381c 100644 (file)
@@ -1105,7 +1105,12 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                        if (amdgpu_dpm_read_sensor(adev,
                                                   AMDGPU_PP_SENSOR_GPU_AVG_POWER,
                                                   (void *)&ui32, &ui32_size)) {
-                               return -EINVAL;
+                               /* fall back to input power for backwards compat */
+                               if (amdgpu_dpm_read_sensor(adev,
+                                                          AMDGPU_PP_SENSOR_GPU_INPUT_POWER,
+                                                          (void *)&ui32, &ui32_size)) {
+                                       return -EINVAL;
+                               }
                        }
                        ui32 >>= 8;
                        break;
index 2addbdf88394b8287b0ea9fb87297b3435e826fb..0328616473f80af861cd4a1176afc0221eee7db9 100644 (file)
@@ -466,7 +466,7 @@ static int psp_sw_init(void *handle)
        }
 
        ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
-                                     (amdgpu_sriov_vf(adev) || fw_bo_location == 1) ?
+                                     (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ?
                                      AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
                                      &psp->fw_pri_bo,
                                      &psp->fw_pri_mc_addr,
index 468a67b302d4c140c9d7cf09bc92566404180e75..ca5c86e5f7cd671a651d61357ab52d3c53a1e7f3 100644 (file)
@@ -362,7 +362,7 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size
                }
        }
 
-       if (copy_to_user((char *)buf, context->mem_context.shared_buf, shared_buf_len))
+       if (copy_to_user((char *)&buf[copy_pos], context->mem_context.shared_buf, shared_buf_len))
                ret = -EFAULT;
 
 err_free_shared_buf:
index fc42fb6ee1914b82e0bec3897cf92594f587423f..31823a30dea217b5af3a8a36624a01fab70b48a5 100644 (file)
@@ -305,11 +305,13 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
                        return -EINVAL;
 
                data->head.block = block_id;
-               /* only ue and ce errors are supported */
+               /* only ue, ce and poison errors are supported */
                if (!memcmp("ue", err, 2))
                        data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
                else if (!memcmp("ce", err, 2))
                        data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
+               else if (!memcmp("poison", err, 6))
+                       data->head.type = AMDGPU_RAS_ERROR__POISON;
                else
                        return -EINVAL;
 
@@ -431,9 +433,10 @@ static void amdgpu_ras_instance_mask_check(struct amdgpu_device *adev,
  * The block is one of: umc, sdma, gfx, etc.
  *     see ras_block_string[] for details
  *
- * The error type is one of: ue, ce, where,
+ * The error type is one of: ue, ce and poison where,
  *     ue is multi-uncorrectable
  *     ce is single-correctable
+ *     poison is poison
  *
  * The sub-block is a the sub-block index, pass 0 if there is no sub-block.
  * The address and value are hexadecimal numbers, leading 0x is optional.
@@ -1067,8 +1070,7 @@ static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
                        mcm_info = &err_info->mcm_info;
                        if (err_info->ce_count) {
                                dev_info(adev->dev, "socket: %d, die: %d, "
-                                        "%lld new correctable hardware errors detected in %s block, "
-                                        "no user action is needed\n",
+                                        "%lld new correctable hardware errors detected in %s block\n",
                                         mcm_info->socket_id,
                                         mcm_info->die_id,
                                         err_info->ce_count,
@@ -1080,8 +1082,7 @@ static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
                        err_info = &err_node->err_info;
                        mcm_info = &err_info->mcm_info;
                        dev_info(adev->dev, "socket: %d, die: %d, "
-                                "%lld correctable hardware errors detected in total in %s block, "
-                                "no user action is needed\n",
+                                "%lld correctable hardware errors detected in total in %s block\n",
                                 mcm_info->socket_id, mcm_info->die_id, err_info->ce_count, blk_name);
                }
        }
@@ -1108,16 +1109,14 @@ static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
                           adev->smuio.funcs->get_die_id) {
                        dev_info(adev->dev, "socket: %d, die: %d "
                                 "%ld correctable hardware errors "
-                                "detected in %s block, no user "
-                                "action is needed.\n",
+                                "detected in %s block\n",
                                 adev->smuio.funcs->get_socket_id(adev),
                                 adev->smuio.funcs->get_die_id(adev),
                                 ras_mgr->err_data.ce_count,
                                 blk_name);
                } else {
                        dev_info(adev->dev, "%ld correctable hardware errors "
-                                "detected in %s block, no user "
-                                "action is needed.\n",
+                                "detected in %s block\n",
                                 ras_mgr->err_data.ce_count,
                                 blk_name);
                }
@@ -1920,7 +1919,7 @@ static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj
                                struct amdgpu_iv_entry *entry)
 {
        dev_info(obj->adev->dev,
-               "Poison is created, no user action is needed.\n");
+               "Poison is created\n");
 }
 
 static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj,
@@ -2920,6 +2919,11 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
 
        amdgpu_ras_query_poison_mode(adev);
 
+       /* Packed socket_id to ras feature mask bits[31:29] */
+       if (adev->smuio.funcs &&
+           adev->smuio.funcs->get_socket_id)
+               con->features |= ((adev->smuio.funcs->get_socket_id(adev)) << 29);
+
        /* Get RAS schema for particular SOC */
        con->schema = amdgpu_get_ras_schema(adev);
 
index b0335a1c5e90cb8f000fe1989bfb20dfbbd53c58..19899f6b9b2b419a0fdf2ed84c71f0278963f511 100644 (file)
@@ -32,7 +32,6 @@ enum AMDGPU_RESET_FLAGS {
 
        AMDGPU_NEED_FULL_RESET = 0,
        AMDGPU_SKIP_HW_RESET = 1,
-       AMDGPU_RESET_FOR_DEVICE_REMOVE = 2,
 };
 
 struct amdgpu_reset_context {
index 45424ebf9681430fefc21bdc33d6aa2c6e5f6c91..5505d646f43aa8f963d8d8732846b00fc612a3a7 100644 (file)
@@ -635,6 +635,7 @@ int amdgpu_ring_test_helper(struct amdgpu_ring *ring)
                              ring->name);
 
        ring->sched.ready = !r;
+
        return r;
 }
 
@@ -717,3 +718,14 @@ void amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring)
        if (ring->is_sw_ring)
                amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_DE);
 }
+
+bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring)
+{
+       if (!ring)
+               return false;
+
+       if (ring->no_scheduler || !drm_sched_wqueue_ready(&ring->sched))
+               return false;
+
+       return true;
+}
index bbb53720a0181d93cf9fdfd6f7721ee006699004..fe1a61eb6e4c0809c1bccd41bc89f32bcd8304f2 100644 (file)
@@ -450,5 +450,5 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
 int amdgpu_ib_pool_init(struct amdgpu_device *adev);
 void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
 int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
-
+bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring);
 #endif
index d334e42fe0ebe648e3efafb3970f650b615f716d..3e12763e477aa45724d0c16a1b514a5a299a76a9 100644 (file)
@@ -1062,7 +1062,7 @@ int amdgpu_ucode_create_bo(struct amdgpu_device *adev)
 {
        if (adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT) {
                amdgpu_bo_create_kernel(adev, adev->firmware.fw_size, PAGE_SIZE,
-                       (amdgpu_sriov_vf(adev) || fw_bo_location == 1) ?
+                       (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ?
                        AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
                        &adev->firmware.fw_buf,
                        &adev->firmware.fw_buf_mc,
index b6cd565562ad8d9a99270757fc2b37352600d2f3..4740dd65b99d6ccc107e5d63aba0f0d67d02d718 100644 (file)
@@ -116,7 +116,7 @@ struct amdgpu_mem_stats;
 #define AMDGPU_VM_FAULT_STOP_FIRST     1
 #define AMDGPU_VM_FAULT_STOP_ALWAYS    2
 
-/* Reserve 4MB VRAM for page tables */
+/* How much VRAM be reserved for page tables */
 #define AMDGPU_VM_RESERVED_VRAM                (8ULL << 20)
 
 /*
index 6f149b54d4d3970c5fe0a8255f8f7a080433381a..b9a15d51eb5c30e554d4e4f7c1397e3ce51996d9 100644 (file)
@@ -59,11 +59,8 @@ static inline uint16_t complete_integer_division_u16(
 
 static uint16_t vpe_u1_8_from_fraction(uint16_t numerator, uint16_t denominator)
 {
-       bool arg1_negative = numerator < 0;
-       bool arg2_negative = denominator < 0;
-
-       uint16_t arg1_value = (uint16_t)(arg1_negative ? -numerator : numerator);
-       uint16_t arg2_value = (uint16_t)(arg2_negative ? -denominator : denominator);
+       u16 arg1_value = numerator;
+       u16 arg2_value = denominator;
 
        uint16_t remainder;
 
@@ -100,9 +97,6 @@ static uint16_t vpe_u1_8_from_fraction(uint16_t numerator, uint16_t denominator)
                res_value += summand;
        }
 
-       if (arg1_negative ^ arg2_negative)
-               res_value = -res_value;
-
        return res_value;
 }
 
index 08916538a615ff3d072eb5241a97495795c7e32a..8db880244324ff1077ff3d87c20b7387ecd8b74b 100644 (file)
@@ -221,8 +221,23 @@ static struct attribute *amdgpu_vram_mgr_attributes[] = {
        NULL
 };
 
+static umode_t amdgpu_vram_attrs_is_visible(struct kobject *kobj,
+                                           struct attribute *attr, int i)
+{
+       struct device *dev = kobj_to_dev(kobj);
+       struct drm_device *ddev = dev_get_drvdata(dev);
+       struct amdgpu_device *adev = drm_to_adev(ddev);
+
+       if (attr == &dev_attr_mem_info_vram_vendor.attr &&
+           !adev->gmc.vram_vendor)
+               return 0;
+
+       return attr->mode;
+}
+
 const struct attribute_group amdgpu_vram_mgr_attr_group = {
-       .attrs = amdgpu_vram_mgr_attributes
+       .attrs = amdgpu_vram_mgr_attributes,
+       .is_visible = amdgpu_vram_attrs_is_visible
 };
 
 /**
index 6cab882e8061e80f33bca5eb1a7b59d8cf0a687f..1592c63b3099b982d0b9bdda596919da8ec14f5f 100644 (file)
@@ -43,7 +43,6 @@ struct amdgpu_hive_info {
        } pstate;
 
        struct amdgpu_reset_domain *reset_domain;
-       uint32_t device_remove_count;
        atomic_t ras_recovery;
 };
 
index f0737fb3a999e03a44eb3c08f6e0099e3326c929..d1bba9c64e16d808fbaafd4e01d8764cb77b1a86 100644 (file)
@@ -30,6 +30,8 @@
 
 #define regATHUB_MISC_CNTL_V3_0_1                      0x00d7
 #define regATHUB_MISC_CNTL_V3_0_1_BASE_IDX             0
+#define regATHUB_MISC_CNTL_V3_3_0                      0x00d8
+#define regATHUB_MISC_CNTL_V3_3_0_BASE_IDX             0
 
 
 static uint32_t athub_v3_0_get_cg_cntl(struct amdgpu_device *adev)
@@ -40,6 +42,9 @@ static uint32_t athub_v3_0_get_cg_cntl(struct amdgpu_device *adev)
        case IP_VERSION(3, 0, 1):
                data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_0_1);
                break;
+       case IP_VERSION(3, 3, 0):
+               data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_3_0);
+               break;
        default:
                data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
                break;
@@ -53,6 +58,9 @@ static void athub_v3_0_set_cg_cntl(struct amdgpu_device *adev, uint32_t data)
        case IP_VERSION(3, 0, 1):
                WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_0_1, data);
                break;
+       case IP_VERSION(3, 3, 0):
+               WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_3_0, data);
+               break;
        default:
                WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL, data);
                break;
index 6f7c031dd197a22e388ddcfaed56ec75e37cafe5..f24e34dc33d1defcd70cab67f1423dffd31e8f08 100644 (file)
@@ -204,6 +204,12 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev,
                tmp = RREG32(mmIH_RB_CNTL);
                tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
                WREG32(mmIH_RB_CNTL, tmp);
+
+               /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+                * can be detected.
+                */
+               tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
+               WREG32(mmIH_RB_CNTL, tmp);
        }
        return (wptr & ih->ptr_mask);
 }
index b8c47e0cf37ad53bcb3f1afe161e6356b91789e3..c19681492efa748bf7b5d92864dbdc61c0351520 100644 (file)
@@ -216,6 +216,11 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev,
        tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
        WREG32(mmIH_RB_CNTL, tmp);
 
+       /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+        * can be detected.
+        */
+       tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+       WREG32(mmIH_RB_CNTL, tmp);
 
 out:
        return (wptr & ih->ptr_mask);
index 73f6d7e72c737537f17264746b061a936b4960e5..dcdecb18b2306b84ca1b18852837409776707c69 100644 (file)
@@ -3996,16 +3996,13 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
 
        if (!amdgpu_sriov_vf(adev)) {
                snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix);
-               err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
-               /* don't check this.  There are apparently firmwares in the wild with
-                * incorrect size in the header
-                */
-               if (err == -ENODEV)
-                       goto out;
+               err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
                if (err)
-                       dev_dbg(adev->dev,
-                               "gfx10: amdgpu_ucode_request() failed \"%s\"\n",
-                               fw_name);
+                       goto out;
+
+               /* don't validate this firmware. There are apparently firmwares
+                * in the wild with incorrect size in the header
+                */
                rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
                version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
                version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
@@ -4030,8 +4027,6 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
                err = 0;
                adev->gfx.mec2_fw = NULL;
        }
-       amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2);
-       amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2_JT);
 
        gfx_v10_0_check_fw_write_wait(adev);
 out:
@@ -6592,7 +6587,7 @@ static int gfx_v10_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
 #ifdef __BIG_ENDIAN
        tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
 #endif
-       tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
+       tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
        tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH,
                            prop->allow_tunneling);
        tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
index 2fbcd9765980d01a79b9b295fc28d82a909c69a9..4f3bfdc75b37d66cbc5d78a5525a8a905eb1e733 100644 (file)
@@ -107,23 +107,6 @@ static const struct soc15_reg_golden golden_settings_gc_11_0_1[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL2, 0xfcffffff, 0x0000000a)
 };
 
-static const struct soc15_reg_golden golden_settings_gc_11_5_0[] = {
-       SOC15_REG_GOLDEN_VALUE(GC, 0, regDB_DEBUG5, 0xffffffff, 0x00000800),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, regGB_ADDR_CONFIG, 0x0c1807ff, 0x00000242),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, regGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xfffffff3),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xfffffff3),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL, 0xffffffff, 0xf37fff3f),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL3, 0xfffffffb, 0x00f40188),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL4, 0xf0ffffff, 0x8000b007),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_CL_ENHANCE, 0xf1ffffff, 0x00880007),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, regPC_CONFIG_CNTL_1, 0xffffffff, 0x00010000),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL_AUX, 0xf7f7ffff, 0x01030000),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL2, 0x007f0000, 0x00000000),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL2, 0xffcfffff, 0x0000200a),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, regUTCL1_CTRL_2, 0xffffffff, 0x0000048f)
-};
-
 #define DEFAULT_SH_MEM_CONFIG \
        ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
         (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
@@ -304,11 +287,6 @@ static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev)
                                                golden_settings_gc_11_0_1,
                                                (const u32)ARRAY_SIZE(golden_settings_gc_11_0_1));
                break;
-       case IP_VERSION(11, 5, 0):
-               soc15_program_register_sequence(adev,
-                                               golden_settings_gc_11_5_0,
-                                               (const u32)ARRAY_SIZE(golden_settings_gc_11_5_0));
-               break;
        default:
                break;
        }
@@ -3846,7 +3824,7 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
                            (order_base_2(prop->queue_size / 4) - 1));
        tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
                            (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
-       tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
+       tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
        tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH,
                            prop->allow_tunneling);
        tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
@@ -6383,6 +6361,9 @@ static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev,
        mutex_lock(&adev->grbm_idx_mutex);
        for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
                for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+                       bitmap = i * adev->gfx.config.max_sh_per_se + j;
+                       if (!((gfx_v11_0_get_sa_active_bitmap(adev) >> bitmap) & 1))
+                               continue;
                        mask = 1;
                        counter = 0;
                        gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff, 0);
index 69c500910746018281471ad6d27350aaf2461702..3bc6943365a4ff36a32827ae2d477aac6883631d 100644 (file)
@@ -3034,6 +3034,14 @@ static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
 
        gfx_v9_0_cp_gfx_enable(adev, true);
 
+       /* Now only limit the quirk on the APU gfx9 series and already
+        * confirmed that the APU gfx10/gfx11 needn't such update.
+        */
+       if (adev->flags & AMD_IS_APU &&
+                       adev->in_s3 && !adev->suspend_complete) {
+               DRM_INFO(" Will skip the CSB packet resubmit\n");
+               return 0;
+       }
        r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
        if (r) {
                DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
index 95d06da544e2a54ebe6fb10ad1309a0c8074814f..49aecdcee006959491e4dba90058faf35e205fdb 100644 (file)
@@ -456,10 +456,12 @@ static void gfxhub_v1_2_xcc_gart_disable(struct amdgpu_device *adev,
                WREG32_SOC15_RLC(GC, GET_INST(GC, j), regMC_VM_MX_L1_TLB_CNTL, tmp);
 
                /* Setup L2 cache */
-               tmp = RREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL);
-               tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
-               WREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL, tmp);
-               WREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL3, 0);
+               if (!amdgpu_sriov_vf(adev)) {
+                       tmp = RREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL);
+                       tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
+                       WREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL, tmp);
+                       WREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL3, 0);
+               }
        }
 }
 
index 42e103d7077d52d5bbe556f70f2b03bb0d5ae8db..59d9215e555629577b43afcba38e945f5ce90bcd 100644 (file)
@@ -915,8 +915,8 @@ static int gmc_v6_0_hw_init(void *handle)
 
        if (amdgpu_emu_mode == 1)
                return amdgpu_gmc_vram_checking(adev);
-       else
-               return r;
+
+       return 0;
 }
 
 static int gmc_v6_0_hw_fini(void *handle)
index efc16e580f1e27e384b7c80323c72d0e59fba473..45a2f8e031a2c9920f3a68ae690731357f33da0c 100644 (file)
@@ -1099,8 +1099,8 @@ static int gmc_v7_0_hw_init(void *handle)
 
        if (amdgpu_emu_mode == 1)
                return amdgpu_gmc_vram_checking(adev);
-       else
-               return r;
+
+       return 0;
 }
 
 static int gmc_v7_0_hw_fini(void *handle)
index ff4ae73d27ecd26aaf399bdfe158e22c1de3009f..4422b27a3cc2fc069a6ecb3e6d8b9630e9c173cc 100644 (file)
@@ -1219,8 +1219,8 @@ static int gmc_v8_0_hw_init(void *handle)
 
        if (amdgpu_emu_mode == 1)
                return amdgpu_gmc_vram_checking(adev);
-       else
-               return r;
+
+       return 0;
 }
 
 static int gmc_v8_0_hw_fini(void *handle)
index f9039d64ff2d72804556daa16b8ed9632b08b307..e67a62db9e12629b40c92f322922cc763ce53ce7 100644 (file)
@@ -1947,13 +1947,6 @@ static int gmc_v9_0_init_mem_ranges(struct amdgpu_device *adev)
 
 static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev)
 {
-       static const u32 regBIF_BIOS_SCRATCH_4 = 0x50;
-       u32 vram_info;
-
-       if (!amdgpu_sriov_vf(adev)) {
-               vram_info = RREG32(regBIF_BIOS_SCRATCH_4);
-               adev->gmc.vram_vendor = vram_info & 0xF;
-       }
        adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
        adev->gmc.vram_width = 128 * 64;
 }
@@ -2340,8 +2333,8 @@ static int gmc_v9_0_hw_init(void *handle)
 
        if (amdgpu_emu_mode == 1)
                return amdgpu_gmc_vram_checking(adev);
-       else
-               return r;
+
+       return 0;
 }
 
 /**
index aecad530b10a61289f9e2413612bbf58a33cec22..2c02ae69883d2bb86bec8e1d1fb521f8481d7ebb 100644 (file)
@@ -215,6 +215,11 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev,
        tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
        WREG32(mmIH_RB_CNTL, tmp);
 
+       /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+        * can be detected.
+        */
+       tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+       WREG32(mmIH_RB_CNTL, tmp);
 
 out:
        return (wptr & ih->ptr_mask);
index d9ed7332d805d3fca1bd0343ebc804e69dc44595..ad4ad39f128f7d7f788a866d36cc7c8175743b5d 100644 (file)
@@ -418,6 +418,12 @@ static u32 ih_v6_0_get_wptr(struct amdgpu_device *adev,
        tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
        tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
        WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+
+       /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+        * can be detected.
+        */
+       tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+       WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
 out:
        return (wptr & ih->ptr_mask);
 }
index 8fb05eae340ad298653afaca4edccfce86741c84..b8da0fc29378c496ba0392e10105d1c58d53bf5a 100644 (file)
@@ -418,6 +418,13 @@ static u32 ih_v6_1_get_wptr(struct amdgpu_device *adev,
        tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
        tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
        WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+
+       /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+        * can be detected.
+        */
+       tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+       WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+
 out:
        return (wptr & ih->ptr_mask);
 }
index bc38b90f8cf88e8fee393e8e52214ac72f0aa8a6..88ea58d5c4abf5b0f20abff28f9833f402e4b016 100644 (file)
@@ -674,14 +674,6 @@ static int jpeg_v4_0_set_powergating_state(void *handle,
        return ret;
 }
 
-static int jpeg_v4_0_set_interrupt_state(struct amdgpu_device *adev,
-                                       struct amdgpu_irq_src *source,
-                                       unsigned type,
-                                       enum amdgpu_interrupt_state state)
-{
-       return 0;
-}
-
 static int jpeg_v4_0_set_ras_interrupt_state(struct amdgpu_device *adev,
                                        struct amdgpu_irq_src *source,
                                        unsigned int type,
@@ -765,7 +757,6 @@ static void jpeg_v4_0_set_dec_ring_funcs(struct amdgpu_device *adev)
 }
 
 static const struct amdgpu_irq_src_funcs jpeg_v4_0_irq_funcs = {
-       .set = jpeg_v4_0_set_interrupt_state,
        .process = jpeg_v4_0_process_interrupt,
 };
 
index 6ede85b28cc8c0bbfd6a7e94c6a3d1a677e958bf..78b74daf4eebfc30f04ee4aaf6d0ff92891ff30f 100644 (file)
@@ -181,7 +181,6 @@ static int jpeg_v4_0_5_hw_fini(void *handle)
                        RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS))
                        jpeg_v4_0_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
        }
-       amdgpu_irq_put(adev, &adev->jpeg.inst->irq, 0);
 
        return 0;
 }
@@ -516,14 +515,6 @@ static int jpeg_v4_0_5_set_powergating_state(void *handle,
        return ret;
 }
 
-static int jpeg_v4_0_5_set_interrupt_state(struct amdgpu_device *adev,
-                                       struct amdgpu_irq_src *source,
-                                       unsigned type,
-                                       enum amdgpu_interrupt_state state)
-{
-       return 0;
-}
-
 static int jpeg_v4_0_5_process_interrupt(struct amdgpu_device *adev,
                                      struct amdgpu_irq_src *source,
                                      struct amdgpu_iv_entry *entry)
@@ -603,7 +594,6 @@ static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev)
 }
 
 static const struct amdgpu_irq_src_funcs jpeg_v4_0_5_irq_funcs = {
-       .set = jpeg_v4_0_5_set_interrupt_state,
        .process = jpeg_v4_0_5_process_interrupt,
 };
 
index e64b33115848d204a4d81eb9530df5bf95fdf796..de93614726c9a48ccd398c6ac5570a8844fb7618 100644 (file)
@@ -442,6 +442,12 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
        tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
        tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
        WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+
+       /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+        * can be detected.
+        */
+       tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+       WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
 out:
        return (wptr & ih->ptr_mask);
 }
index 6d24c84924cb5dd646ddaa69bb91a3193493b5f4..19986ff6a48d7e773dcc892b9dccd585fe69c306 100644 (file)
@@ -401,8 +401,7 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
 
                        if (err_data.ce_count)
                                dev_info(adev->dev, "%ld correctable hardware "
-                                               "errors detected in %s block, "
-                                               "no user action is needed.\n",
+                                               "errors detected in %s block\n",
                                                obj->err_data.ce_count,
                                                get_ras_block_str(adev->nbio.ras_if));
 
index 25a3da83e0fb97e5949221d17e3fcd63062dd29c..b4723d68eab0f939ba057b67cf7712ddb512c8c8 100644 (file)
@@ -431,6 +431,12 @@ static void nbio_v7_9_init_registers(struct amdgpu_device *adev)
        u32 inst_mask;
        int i;
 
+       if (amdgpu_sriov_vf(adev))
+               adev->rmmio_remap.reg_offset =
+                       SOC15_REG_OFFSET(
+                               NBIO, 0,
+                               regBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL)
+                       << 2;
        WREG32_SOC15(NBIO, 0, regXCC_DOORBELL_FENCE,
                0xff & ~(adev->gfx.xcc_mask));
 
@@ -597,8 +603,7 @@ static void nbio_v7_9_handle_ras_controller_intr_no_bifring(struct amdgpu_device
 
                        if (err_data.ce_count)
                                dev_info(adev->dev, "%ld correctable hardware "
-                                               "errors detected in %s block, "
-                                               "no user action is needed.\n",
+                                               "errors detected in %s block\n",
                                                obj->err_data.ce_count,
                                                get_ras_block_str(adev->nbio.ras_if));
 
index 9a24f17a57502edaa744451bd312dfcd8b3d678c..cada9f300a7f510a3f025c3ed17c87aedcbbaeb5 100644 (file)
@@ -119,6 +119,12 @@ static u32 si_ih_get_wptr(struct amdgpu_device *adev,
                tmp = RREG32(IH_RB_CNTL);
                tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
                WREG32(IH_RB_CNTL, tmp);
+
+               /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+                * can be detected.
+                */
+               tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
+               WREG32(IH_RB_CNTL, tmp);
        }
        return (wptr & ih->ptr_mask);
 }
index 15033efec2bac0148e5d9381027a6ee3e70334b7..c64c01e2944a2e4c1f4177355771a1b47cfcc666 100644 (file)
@@ -1298,10 +1298,32 @@ static int soc15_common_suspend(void *handle)
        return soc15_common_hw_fini(adev);
 }
 
+static bool soc15_need_reset_on_resume(struct amdgpu_device *adev)
+{
+       u32 sol_reg;
+
+       sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
+
+       /* Will reset for the following suspend abort cases.
+        * 1) Only reset limit on APU side, dGPU hasn't checked yet.
+        * 2) S3 suspend abort and TOS already launched.
+        */
+       if (adev->flags & AMD_IS_APU && adev->in_s3 &&
+                       !adev->suspend_complete &&
+                       sol_reg)
+               return true;
+
+       return false;
+}
+
 static int soc15_common_resume(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       if (soc15_need_reset_on_resume(adev)) {
+               dev_info(adev->dev, "S3 suspend abort case, let's reset ASIC.\n");
+               soc15_asic_reset(adev);
+       }
        return soc15_common_hw_init(adev);
 }
 
index 917707bba7f3624e37b0525d3ec72bf563c1307a..450b6e8315091448c24e2d90dcd4edccc9d4423c 100644 (file)
@@ -219,6 +219,12 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev,
        tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
        WREG32(mmIH_RB_CNTL, tmp);
 
+       /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+        * can be detected.
+        */
+       tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+       WREG32(mmIH_RB_CNTL, tmp);
+
 out:
        return (wptr & ih->ptr_mask);
 }
index 530549314ce46c541a192305d1a7e1db17f11ebf..a3ee3c4c650febb4ca6fa61c9b7b5b51f16ce60c 100644 (file)
@@ -64,7 +64,7 @@ static void umc_v6_7_query_error_status_helper(struct amdgpu_device *adev,
        uint64_t reg_value;
 
        if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1)
-               dev_info(adev->dev, "Deferred error, no user action is needed.\n");
+               dev_info(adev->dev, "Deferred error\n");
 
        if (mc_umc_status)
                dev_info(adev->dev, "MCA STATUS 0x%llx, umc_reg_offset 0x%x\n", mc_umc_status, umc_reg_offset);
index 169ed400ee7b7413263ab48a2de1e75aa3ed00f7..8ab01ae919d2e36c8ff1c2226227c173223247be 100644 (file)
@@ -2017,22 +2017,6 @@ static int vcn_v4_0_set_powergating_state(void *handle, enum amd_powergating_sta
        return ret;
 }
 
-/**
- * vcn_v4_0_set_interrupt_state - set VCN block interrupt state
- *
- * @adev: amdgpu_device pointer
- * @source: interrupt sources
- * @type: interrupt types
- * @state: interrupt states
- *
- * Set VCN block interrupt state
- */
-static int vcn_v4_0_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
-      unsigned type, enum amdgpu_interrupt_state state)
-{
-       return 0;
-}
-
 /**
  * vcn_v4_0_set_ras_interrupt_state - set VCN block RAS interrupt state
  *
@@ -2097,7 +2081,6 @@ static int vcn_v4_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_
 }
 
 static const struct amdgpu_irq_src_funcs vcn_v4_0_irq_funcs = {
-       .set = vcn_v4_0_set_interrupt_state,
        .process = vcn_v4_0_process_interrupt,
 };
 
index 2eda30e78f61d928984cf57b94337abc7b9cfc0a..49e4c3c09acab8eab12770325f4cf48c8c491b7c 100644 (file)
@@ -269,8 +269,6 @@ static int vcn_v4_0_5_hw_fini(void *handle)
                                vcn_v4_0_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
                        }
                }
-
-               amdgpu_irq_put(adev, &adev->vcn.inst[i].irq, 0);
        }
 
        return 0;
@@ -1668,22 +1666,6 @@ static int vcn_v4_0_5_set_powergating_state(void *handle, enum amd_powergating_s
        return ret;
 }
 
-/**
- * vcn_v4_0_5_set_interrupt_state - set VCN block interrupt state
- *
- * @adev: amdgpu_device pointer
- * @source: interrupt sources
- * @type: interrupt types
- * @state: interrupt states
- *
- * Set VCN block interrupt state
- */
-static int vcn_v4_0_5_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
-               unsigned type, enum amdgpu_interrupt_state state)
-{
-       return 0;
-}
-
 /**
  * vcn_v4_0_5_process_interrupt - process VCN block interrupt
  *
@@ -1726,7 +1708,6 @@ static int vcn_v4_0_5_process_interrupt(struct amdgpu_device *adev, struct amdgp
 }
 
 static const struct amdgpu_irq_src_funcs vcn_v4_0_5_irq_funcs = {
-       .set = vcn_v4_0_5_set_interrupt_state,
        .process = vcn_v4_0_5_process_interrupt,
 };
 
index d364c6dd152c33b7fc1fbc614668b2dd4ffe223a..bf68e18e3824b8e492c2451b655bfcf5068910f6 100644 (file)
@@ -373,6 +373,12 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
        tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
        WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
 
+       /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+        * can be detected.
+        */
+       tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+       WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+
 out:
        return (wptr & ih->ptr_mask);
 }
index ddfc6941f9d559c916fe2cdb66b4e27394f1d618..db66e6cccaf2aa4e596a8f377eed8030c55159b7 100644 (file)
@@ -421,6 +421,12 @@ static u32 vega20_ih_get_wptr(struct amdgpu_device *adev,
        tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
        WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
 
+       /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+        * can be detected.
+        */
+       tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+       WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+
 out:
        return (wptr & ih->ptr_mask);
 }
index df75863393fcb887613fb4dc054977fb46a49b1e..d1caaf0e6a7c4eaed98fc8f390781719bf28b846 100644 (file)
@@ -674,7 +674,7 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
        0x86ea6a6a, 0x8f6e837a,
        0xb96ee0c2, 0xbf800002,
        0xb97a0002, 0xbf8a0000,
-       0xbe801f6c, 0xbf810000,
+       0xbe801f6c, 0xbf9b0000,
 };
 
 static const uint32_t cwsr_trap_nv1x_hex[] = {
@@ -1091,7 +1091,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
        0xb9eef807, 0x876dff6d,
        0x0000ffff, 0x87fe7e7e,
        0x87ea6a6a, 0xb9faf802,
-       0xbe80226c, 0xbf810000,
+       0xbe80226c, 0xbf9b0000,
        0xbf9f0000, 0xbf9f0000,
        0xbf9f0000, 0xbf9f0000,
        0xbf9f0000, 0x00000000,
@@ -1574,7 +1574,7 @@ static const uint32_t cwsr_trap_arcturus_hex[] = {
        0x86ea6a6a, 0x8f6e837a,
        0xb96ee0c2, 0xbf800002,
        0xb97a0002, 0xbf8a0000,
-       0xbe801f6c, 0xbf810000,
+       0xbe801f6c, 0xbf9b0000,
 };
 
 static const uint32_t cwsr_trap_aldebaran_hex[] = {
@@ -2065,7 +2065,7 @@ static const uint32_t cwsr_trap_aldebaran_hex[] = {
        0x86ea6a6a, 0x8f6e837a,
        0xb96ee0c2, 0xbf800002,
        0xb97a0002, 0xbf8a0000,
-       0xbe801f6c, 0xbf810000,
+       0xbe801f6c, 0xbf9b0000,
 };
 
 static const uint32_t cwsr_trap_gfx10_hex[] = {
@@ -2500,7 +2500,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
        0x876dff6d, 0x0000ffff,
        0x87fe7e7e, 0x87ea6a6a,
        0xb9faf802, 0xbe80226c,
-       0xbf810000, 0xbf9f0000,
+       0xbf9b0000, 0xbf9f0000,
        0xbf9f0000, 0xbf9f0000,
        0xbf9f0000, 0xbf9f0000,
 };
@@ -2944,7 +2944,7 @@ static const uint32_t cwsr_trap_gfx11_hex[] = {
        0xb8eef802, 0xbf0d866e,
        0xbfa20002, 0xb97af802,
        0xbe80486c, 0xb97af802,
-       0xbe804a6c, 0xbfb00000,
+       0xbe804a6c, 0xbfb10000,
        0xbf9f0000, 0xbf9f0000,
        0xbf9f0000, 0xbf9f0000,
        0xbf9f0000, 0x00000000,
@@ -3436,5 +3436,5 @@ static const uint32_t cwsr_trap_gfx9_4_3_hex[] = {
        0x86ea6a6a, 0x8f6e837a,
        0xb96ee0c2, 0xbf800002,
        0xb97a0002, 0xbf8a0000,
-       0xbe801f6c, 0xbf810000,
+       0xbe801f6c, 0xbf9b0000,
 };
index e0140df0b0ec8086433048adb31a06ca6aca740d..71b3dc0c73634aef86846be3669723590ca55db9 100644 (file)
@@ -1104,7 +1104,7 @@ L_RETURN_WITHOUT_PRIV:
        s_rfe_b64       s_restore_pc_lo                                         //Return to the main shader program and resume execution
 
 L_END_PGM:
-       s_endpgm
+       s_endpgm_saved
 end
 
 function write_hwreg_to_mem(s, s_rsrc, s_mem_offset)
index e506411ad28ab99f474eca96ff37254fb43078de..bb26338204f4ba84b5ae41a781e1becdf9ad72bb 100644 (file)
@@ -921,7 +921,7 @@ L_RESTORE:
 /*                     the END                                           */
 /**************************************************************************/
 L_END_PGM:
-    s_endpgm
+    s_endpgm_saved
 
 end
 
index ce4c52ec34d80eabb7f7664051ccebcd2f0ec64e..80e90fdef291d5b8cdcf7d08c6e319150fcf631b 100644 (file)
@@ -1442,7 +1442,9 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
                        kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
 
                /* Remove dma mapping after tlb flush to avoid IO_PAGE_FAULT */
-               amdgpu_amdkfd_gpuvm_dmaunmap_mem(mem, peer_pdd->drm_priv);
+               err = amdgpu_amdkfd_gpuvm_dmaunmap_mem(mem, peer_pdd->drm_priv);
+               if (err)
+                       goto sync_memory_failed;
        }
 
        mutex_unlock(&p->mutex);
index d630100b9e91b8588dc2e9611e279fba909e54c5..bdc01ca9609a7e57fac05ee60d6866a5950e2b07 100644 (file)
@@ -574,7 +574,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
        pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
                 prange->last);
 
-       addr = prange->start << PAGE_SHIFT;
+       addr = migrate->start;
 
        src = (uint64_t *)(scratch + npages);
        dst = scratch;
@@ -1026,7 +1026,7 @@ int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
        } else {
                res = devm_request_free_mem_region(adev->dev, &iomem_resource, size);
                if (IS_ERR(res))
-                       return -ENOMEM;
+                       return PTR_ERR(res);
                pgmap->range.start = res->start;
                pgmap->range.end = res->end;
                pgmap->type = MEMORY_DEVICE_PRIVATE;
@@ -1042,10 +1042,10 @@ int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
        r = devm_memremap_pages(adev->dev, pgmap);
        if (IS_ERR(r)) {
                pr_err("failed to register HMM device memory\n");
-               /* Disable SVM support capability */
-               pgmap->type = 0;
                if (pgmap->type == MEMORY_DEVICE_PRIVATE)
                        devm_release_mem_region(adev->dev, res->start, resource_size(res));
+               /* Disable SVM support capability */
+               pgmap->type = 0;
                return PTR_ERR(r);
        }
 
index 8b7fed91352696cf2b5cafab0680ad0737fa95ee..22cbfa1bdaddb9a764053421b16159391c1ba56d 100644 (file)
@@ -170,6 +170,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
        m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
        m->cp_hqd_pq_control |=
                        ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
+       m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
        pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
 
        m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
index 15277f1d5cf0a9d9eb694ccaeec540e467ab774a..d722cbd317834a8a893a0ed5a847feb3a51d6961 100644 (file)
@@ -224,6 +224,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
        m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
        m->cp_hqd_pq_control |=
                        ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
+       m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
        pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
 
        m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
index 745024b313401261a73900360c3d50f62d8440d8..677281c0793e23a694eb5a7bba9b5f9fd48f61d8 100644 (file)
@@ -917,7 +917,7 @@ struct kfd_process {
         * fence will be triggered during eviction and new one will be created
         * during restore
         */
-       struct dma_fence *ef;
+       struct dma_fence __rcu *ef;
 
        /* Work items for evicting and restoring BOs */
        struct delayed_work eviction_work;
@@ -1488,10 +1488,15 @@ void kfd_dec_compute_active(struct kfd_node *dev);
 
 /* Cgroup Support */
 /* Check with device cgroup if @kfd device is accessible */
-static inline int kfd_devcgroup_check_permission(struct kfd_node *kfd)
+static inline int kfd_devcgroup_check_permission(struct kfd_node *node)
 {
 #if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF)
-       struct drm_device *ddev = adev_to_drm(kfd->adev);
+       struct drm_device *ddev;
+
+       if (node->xcp)
+               ddev = node->xcp->ddev;
+       else
+               ddev = adev_to_drm(node->adev);
 
        return devcgroup_check_permission(DEVCG_DEV_CHAR, DRM_MAJOR,
                                          ddev->render->index,
index 71df51fcc1b0d80f42899a0e15ae454b3f03f2bc..717a60d7a4ea953b8dfc369b09d855ad74b49659 100644 (file)
@@ -1110,6 +1110,7 @@ static void kfd_process_wq_release(struct work_struct *work)
 {
        struct kfd_process *p = container_of(work, struct kfd_process,
                                             release_work);
+       struct dma_fence *ef;
 
        kfd_process_dequeue_from_all_devices(p);
        pqm_uninit(&p->pqm);
@@ -1118,7 +1119,9 @@ static void kfd_process_wq_release(struct work_struct *work)
         * destroyed. This allows any BOs to be freed without
         * triggering pointless evictions or waiting for fences.
         */
-       dma_fence_signal(p->ef);
+       synchronize_rcu();
+       ef = rcu_access_pointer(p->ef);
+       dma_fence_signal(ef);
 
        kfd_process_remove_sysfs(p);
 
@@ -1127,7 +1130,7 @@ static void kfd_process_wq_release(struct work_struct *work)
        svm_range_list_fini(p);
 
        kfd_process_destroy_pdds(p);
-       dma_fence_put(p->ef);
+       dma_fence_put(ef);
 
        kfd_event_free_process(p);
 
index ac84c4a2ca072a7629f1f933214bd2d17a230ac7..c50a0dc9c9c072f5692d003bce90aaaf13615c5d 100644 (file)
@@ -404,14 +404,9 @@ static void svm_range_bo_release(struct kref *kref)
                spin_lock(&svm_bo->list_lock);
        }
        spin_unlock(&svm_bo->list_lock);
-       if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base)) {
-               /* We're not in the eviction worker.
-                * Signal the fence and synchronize with any
-                * pending eviction work.
-                */
+       if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base))
+               /* We're not in the eviction worker. Signal the fence. */
                dma_fence_signal(&svm_bo->eviction_fence->base);
-               cancel_work_sync(&svm_bo->eviction_work);
-       }
        dma_fence_put(&svm_bo->eviction_fence->base);
        amdgpu_bo_unref(&svm_bo->bo);
        kfree(svm_bo);
@@ -2345,8 +2340,10 @@ retry:
                mutex_unlock(&svms->lock);
                mmap_write_unlock(mm);
 
-               /* Pairs with mmget in svm_range_add_list_work */
-               mmput(mm);
+               /* Pairs with mmget in svm_range_add_list_work. If dropping the
+                * last mm refcount, schedule release work to avoid circular locking
+                */
+               mmput_async(mm);
 
                spin_lock(&svms->deferred_list_lock);
        }
@@ -2657,6 +2654,7 @@ svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
 {
        struct vm_area_struct *vma;
        struct interval_tree_node *node;
+       struct rb_node *rb_node;
        unsigned long start_limit, end_limit;
 
        vma = vma_lookup(p->mm, addr << PAGE_SHIFT);
@@ -2676,16 +2674,15 @@ svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
        if (node) {
                end_limit = min(end_limit, node->start);
                /* Last range that ends before the fault address */
-               node = container_of(rb_prev(&node->rb),
-                                   struct interval_tree_node, rb);
+               rb_node = rb_prev(&node->rb);
        } else {
                /* Last range must end before addr because
                 * there was no range after addr
                 */
-               node = container_of(rb_last(&p->svms.objects.rb_root),
-                                   struct interval_tree_node, rb);
+               rb_node = rb_last(&p->svms.objects.rb_root);
        }
-       if (node) {
+       if (rb_node) {
+               node = container_of(rb_node, struct interval_tree_node, rb);
                if (node->last >= addr) {
                        WARN(1, "Overlap with prev node and page fault addr\n");
                        return -EFAULT;
@@ -3432,13 +3429,14 @@ svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
 
 int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
 {
-       if (!fence)
-               return -EINVAL;
-
-       if (dma_fence_is_signaled(&fence->base))
-               return 0;
-
-       if (fence->svm_bo) {
+       /* Dereferencing fence->svm_bo is safe here because the fence hasn't
+        * signaled yet and we're under the protection of the fence->lock.
+        * After the fence is signaled in svm_range_bo_release, we cannot get
+        * here any more.
+        *
+        * Reference is dropped in svm_range_evict_svm_bo_worker.
+        */
+       if (svm_bo_ref_unless_zero(fence->svm_bo)) {
                WRITE_ONCE(fence->svm_bo->evicting, 1);
                schedule_work(&fence->svm_bo->eviction_work);
        }
@@ -3453,8 +3451,6 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
        int r = 0;
 
        svm_bo = container_of(work, struct svm_range_bo, eviction_work);
-       if (!svm_bo_ref_unless_zero(svm_bo))
-               return; /* svm_bo was freed while eviction was pending */
 
        if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
                mm = svm_bo->eviction_fence->mm;
index f6575d7dee97150146c6112c2be4fea52ddbe20a..59d2eee72a3297f4f9b7fbc70040391641f7c67f 100644 (file)
@@ -272,6 +272,7 @@ static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
 {
        u32 v_blank_start, v_blank_end, h_position, v_position;
        struct amdgpu_crtc *acrtc = NULL;
+       struct dc *dc = adev->dm.dc;
 
        if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
                return -EINVAL;
@@ -284,6 +285,9 @@ static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
                return 0;
        }
 
+       if (dc && dc->caps.ips_support && dc->idle_optimizations_allowed)
+               dc_allow_idle_optimizations(dc, false);
+
        /*
         * TODO rework base driver to use values directly.
         * for now parse it back into reg-format
@@ -1715,7 +1719,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
        init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0];
        init_data.clk_reg_offsets = adev->reg_offset[CLK_HWIP][0];
 
-       init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL;
+       if (amdgpu_dc_debug_mask & DC_DISABLE_IPS)
+               init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL;
+
+       init_data.flags.disable_ips_in_vpb = 1;
 
        /* Enable DWB for tested platforms only */
        if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))
@@ -7615,7 +7622,6 @@ create_i2c(struct ddc_service *ddc_service,
        if (!i2c)
                return NULL;
        i2c->base.owner = THIS_MODULE;
-       i2c->base.class = I2C_CLASS_DDC;
        i2c->base.dev.parent = &adev->pdev->dev;
        i2c->base.algo = &amdgpu_dm_i2c_algo;
        snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
@@ -8977,16 +8983,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
 
        trace_amdgpu_dm_atomic_commit_tail_begin(state);
 
-       if (dm->dc->caps.ips_support) {
-               for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
-                       if (new_con_state->crtc &&
-                               new_con_state->crtc->state->active &&
-                               drm_atomic_crtc_needs_modeset(new_con_state->crtc->state)) {
-                               dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false);
-                               break;
-                       }
-               }
-       }
+       if (dm->dc->caps.ips_support && dm->dc->idle_optimizations_allowed)
+               dc_allow_idle_optimizations(dm->dc, false);
 
        drm_atomic_helper_update_legacy_modeset_state(dev, state);
        drm_dp_mst_atomic_wait_for_dependencies(state);
@@ -9189,6 +9187,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                 * To fix this, DC should permit updating only stream properties.
                 */
                dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC);
+               if (!dummy_updates) {
+                       DRM_ERROR("Failed to allocate memory for dummy_updates.\n");
+                       continue;
+               }
                for (j = 0; j < status->plane_count; j++)
                        dummy_updates[j].surface = status->plane_states[0];
 
@@ -9293,10 +9295,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                if (!new_con_state->writeback_job)
                        continue;
 
-               new_crtc_state = NULL;
+               new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
 
-               if (acrtc)
-                       new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+               if (!new_crtc_state)
+                       continue;
 
                if (acrtc->wb_enabled)
                        continue;
@@ -10729,11 +10731,13 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
                        goto fail;
                }
 
-               ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
-               if (ret) {
-                       DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
-                       ret = -EINVAL;
-                       goto fail;
+               if (dc_resource_is_dsc_encoding_supported(dc)) {
+                       ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
+                       if (ret) {
+                               DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
+                               ret = -EINVAL;
+                               goto fail;
+                       }
                }
 
                ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
@@ -10753,7 +10757,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
                        DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
                        goto fail;
                }
-               status = dc_validate_global_state(dc, dm_state->context, false);
+               status = dc_validate_global_state(dc, dm_state->context, true);
                if (status != DC_OK) {
                        DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
                                       dc_status_to_str(status), status);
index 9b527bffe11a1f55e1a63d94c937c829e0d6f820..c87b64e464ed5c8e13c6fb823b8bf9ca0bcfe0fc 100644 (file)
@@ -1239,7 +1239,7 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
        if (has_crtc_cm_degamma && ret != -EINVAL) {
                drm_dbg_kms(crtc->base.crtc->dev,
                            "doesn't support plane and CRTC degamma at the same time\n");
-                       return -EINVAL;
+               return -EINVAL;
        }
 
        /* If we are here, it means we don't have plane degamma settings, check
index eaf8d9f482446d5ea9728ec17657189e25917ae8..85b7f58a7f35a478f551ec097b1613b504ced535 100644 (file)
@@ -979,6 +979,11 @@ int dm_helper_dmub_aux_transfer_sync(
                struct aux_payload *payload,
                enum aux_return_code_type *operation_result)
 {
+       if (!link->hpd_status) {
+               *operation_result = AUX_RET_ERROR_HPD_DISCON;
+               return -1;
+       }
+
        return amdgpu_dm_process_dmub_aux_transfer_sync(ctx, link->link_index, payload,
                        operation_result);
 }
index 58b880acb087ae73352e9ac487d5ccd033f03f4d..3390f0d8420a05dc6d9daae1f3d8c4f53de57aed 100644 (file)
@@ -711,7 +711,7 @@ static inline int dm_irq_state(struct amdgpu_device *adev,
 {
        bool st;
        enum dc_irq_source irq_source;
-
+       struct dc *dc = adev->dm.dc;
        struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc_id];
 
        if (!acrtc) {
@@ -729,6 +729,9 @@ static inline int dm_irq_state(struct amdgpu_device *adev,
 
        st = (state == AMDGPU_IRQ_STATE_ENABLE);
 
+       if (dc && dc->caps.ips_support && dc->idle_optimizations_allowed)
+               dc_allow_idle_optimizations(dc, false);
+
        dc_interrupt_set(adev->dm.dc, irq_source, st);
        return 0;
 }
index 7575282563267c6cd33872debc3f59b7819d35f5..a84f1e376dee45f7fbefea37053c0df57074789a 100644 (file)
@@ -87,6 +87,20 @@ static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0,
 #define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK     0x0000F000L
 #define CLK1_CLK_PLL_REQ__FbMult_frac_MASK     0xFFFF0000L
 
+#define regCLK1_CLK2_BYPASS_CNTL                       0x029c
+#define regCLK1_CLK2_BYPASS_CNTL_BASE_IDX      0
+
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL__SHIFT  0x0
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV__SHIFT  0x10
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK            0x00000007L
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK            0x000F0000L
+
+#define regCLK6_0_CLK6_spll_field_8                            0x464b
+#define regCLK6_0_CLK6_spll_field_8_BASE_IDX   0
+
+#define CLK6_0_CLK6_spll_field_8__spll_ssc_en__SHIFT   0xd
+#define CLK6_0_CLK6_spll_field_8__spll_ssc_en_MASK             0x00002000L
+
 #define REG(reg_name) \
        (CLK_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
 
@@ -131,35 +145,63 @@ static int dcn314_get_active_display_cnt_wa(
        return display_count;
 }
 
-static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
+static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context,
+                                 bool safe_to_lower, bool disable)
 {
        struct dc *dc = clk_mgr_base->ctx->dc;
        int i;
 
        for (i = 0; i < dc->res_pool->pipe_count; ++i) {
-               struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+               struct pipe_ctx *pipe = safe_to_lower
+                       ? &context->res_ctx.pipe_ctx[i]
+                       : &dc->current_state->res_ctx.pipe_ctx[i];
 
                if (pipe->top_pipe || pipe->prev_odm_pipe)
                        continue;
                if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
-                       struct stream_encoder *stream_enc = pipe->stream_res.stream_enc;
-
                        if (disable) {
-                               if (stream_enc && stream_enc->funcs->disable_fifo)
-                                       pipe->stream_res.stream_enc->funcs->disable_fifo(stream_enc);
+                               if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc)
+                                       pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
 
-                               pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
                                reset_sync_context_for_pipe(dc, context, i);
                        } else {
                                pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
-
-                               if (stream_enc && stream_enc->funcs->enable_fifo)
-                                       pipe->stream_res.stream_enc->funcs->enable_fifo(stream_enc);
                        }
                }
        }
 }
 
+bool dcn314_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base)
+{
+       struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+       uint32_t ssc_enable;
+
+       REG_GET(CLK6_0_CLK6_spll_field_8, spll_ssc_en, &ssc_enable);
+
+       return ssc_enable == 1;
+}
+
+void dcn314_init_clocks(struct clk_mgr *clk_mgr)
+{
+       struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
+       uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
+
+       memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
+       // Assumption is that boot state always supports pstate
+       clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk;      // restore ref_dtbclk
+       clk_mgr->clks.p_state_change_support = true;
+       clk_mgr->clks.prev_p_state_change_support = true;
+       clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
+       clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
+
+       // to adjust dp_dto reference clock if ssc is enable otherwise to apply dprefclk
+       if (dcn314_is_spll_ssc_enabled(clk_mgr))
+               clk_mgr->dp_dto_source_clock_in_khz =
+                       dce_adjust_dp_ref_freq_for_ss(clk_mgr_int, clk_mgr->dprefclk_khz);
+       else
+               clk_mgr->dp_dto_source_clock_in_khz = clk_mgr->dprefclk_khz;
+}
+
 void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
                        struct dc_state *context,
                        bool safe_to_lower)
@@ -252,11 +294,11 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
        }
 
        if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
-               dcn314_disable_otg_wa(clk_mgr_base, context, true);
+               dcn314_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
 
                clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
                dcn314_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
-               dcn314_disable_otg_wa(clk_mgr_base, context, false);
+               dcn314_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
 
                update_dispclk = true;
        }
@@ -436,6 +478,11 @@ static DpmClocks314_t dummy_clocks;
 
 static struct dcn314_watermarks dummy_wms = { 0 };
 
+static struct dcn314_ss_info_table ss_info_table = {
+       .ss_divider = 1000,
+       .ss_percentage = {0, 0, 375, 375, 375}
+};
+
 static void dcn314_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn314_watermarks *table)
 {
        int i, num_valid_sets;
@@ -708,13 +755,31 @@ static struct clk_mgr_funcs dcn314_funcs = {
        .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
        .get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
        .update_clocks = dcn314_update_clocks,
-       .init_clocks = dcn31_init_clocks,
+       .init_clocks = dcn314_init_clocks,
        .enable_pme_wa = dcn314_enable_pme_wa,
        .are_clock_states_equal = dcn314_are_clock_states_equal,
        .notify_wm_ranges = dcn314_notify_wm_ranges
 };
 extern struct clk_mgr_funcs dcn3_fpga_funcs;
 
+static void dcn314_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr)
+{
+       uint32_t clock_source;
+       //uint32_t ssc_enable;
+
+       REG_GET(CLK1_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL, &clock_source);
+       //REG_GET(CLK6_0_CLK6_spll_field_8, spll_ssc_en, &ssc_enable);
+
+       if (dcn314_is_spll_ssc_enabled(&clk_mgr->base) && (clock_source < ARRAY_SIZE(ss_info_table.ss_percentage))) {
+               clk_mgr->dprefclk_ss_percentage = ss_info_table.ss_percentage[clock_source];
+
+               if (clk_mgr->dprefclk_ss_percentage != 0) {
+                       clk_mgr->ss_on_dprefclk = true;
+                       clk_mgr->dprefclk_ss_divider = ss_info_table.ss_divider;
+               }
+       }
+}
+
 void dcn314_clk_mgr_construct(
                struct dc_context *ctx,
                struct clk_mgr_dcn314 *clk_mgr,
@@ -782,6 +847,7 @@ void dcn314_clk_mgr_construct(
        clk_mgr->base.base.dprefclk_khz = 600000;
        clk_mgr->base.base.clks.ref_dtbclk_khz = 600000;
        dce_clock_read_ss_info(&clk_mgr->base);
+       dcn314_read_ss_info_from_lut(&clk_mgr->base);
        /*if bios enabled SS, driver needs to adjust dtb clock, only enable with correct bios*/
 
        clk_mgr->base.base.bw_params = &dcn314_bw_params;
index 171f84340eb2fb1d532776ac348cc1fbfad858f5..002c28e807208e584396fdc99dc1822072e8ffa5 100644 (file)
@@ -28,6 +28,8 @@
 #define __DCN314_CLK_MGR_H__
 #include "clk_mgr_internal.h"
 
+#define DCN314_NUM_CLOCK_SOURCES   5
+
 struct dcn314_watermarks;
 
 struct dcn314_smu_watermark_set {
@@ -40,9 +42,18 @@ struct clk_mgr_dcn314 {
        struct dcn314_smu_watermark_set smu_wm_set;
 };
 
+struct dcn314_ss_info_table {
+       uint32_t ss_divider;
+       uint32_t ss_percentage[DCN314_NUM_CLOCK_SOURCES];
+};
+
 bool dcn314_are_clock_states_equal(struct dc_clocks *a,
                struct dc_clocks *b);
 
+bool dcn314_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base);
+
+void dcn314_init_clocks(struct clk_mgr *clk_mgr);
+
 void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
                        struct dc_state *context,
                        bool safe_to_lower);
index 9c660d1facc7699d7a1b3f90292ae31d985fd259..14cec1c7b718c4ab48fbd9588f4b6465c13897cd 100644 (file)
@@ -437,32 +437,32 @@ static struct wm_table ddr5_wm_table = {
                        .wm_inst = WM_A,
                        .wm_type = WM_TYPE_PSTATE_CHG,
                        .pstate_latency_us = 11.72,
-                       .sr_exit_time_us = 14.0,
-                       .sr_enter_plus_exit_time_us = 16.0,
+                       .sr_exit_time_us = 28.0,
+                       .sr_enter_plus_exit_time_us = 30.0,
                        .valid = true,
                },
                {
                        .wm_inst = WM_B,
                        .wm_type = WM_TYPE_PSTATE_CHG,
                        .pstate_latency_us = 11.72,
-                       .sr_exit_time_us = 14.0,
-                       .sr_enter_plus_exit_time_us = 16.0,
+                       .sr_exit_time_us = 28.0,
+                       .sr_enter_plus_exit_time_us = 30.0,
                        .valid = true,
                },
                {
                        .wm_inst = WM_C,
                        .wm_type = WM_TYPE_PSTATE_CHG,
                        .pstate_latency_us = 11.72,
-                       .sr_exit_time_us = 14.0,
-                       .sr_enter_plus_exit_time_us = 16.0,
+                       .sr_exit_time_us = 28.0,
+                       .sr_enter_plus_exit_time_us = 30.0,
                        .valid = true,
                },
                {
                        .wm_inst = WM_D,
                        .wm_type = WM_TYPE_PSTATE_CHG,
                        .pstate_latency_us = 11.72,
-                       .sr_exit_time_us = 14.0,
-                       .sr_enter_plus_exit_time_us = 16.0,
+                       .sr_exit_time_us = 28.0,
+                       .sr_enter_plus_exit_time_us = 30.0,
                        .valid = true,
                },
        }
@@ -474,32 +474,32 @@ static struct wm_table lpddr5_wm_table = {
                        .wm_inst = WM_A,
                        .wm_type = WM_TYPE_PSTATE_CHG,
                        .pstate_latency_us = 11.65333,
-                       .sr_exit_time_us = 14.0,
-                       .sr_enter_plus_exit_time_us = 16.0,
+                       .sr_exit_time_us = 28.0,
+                       .sr_enter_plus_exit_time_us = 30.0,
                        .valid = true,
                },
                {
                        .wm_inst = WM_B,
                        .wm_type = WM_TYPE_PSTATE_CHG,
                        .pstate_latency_us = 11.65333,
-                       .sr_exit_time_us = 14.0,
-                       .sr_enter_plus_exit_time_us = 16.0,
+                       .sr_exit_time_us = 28.0,
+                       .sr_enter_plus_exit_time_us = 30.0,
                        .valid = true,
                },
                {
                        .wm_inst = WM_C,
                        .wm_type = WM_TYPE_PSTATE_CHG,
                        .pstate_latency_us = 11.65333,
-                       .sr_exit_time_us = 14.0,
-                       .sr_enter_plus_exit_time_us = 16.0,
+                       .sr_exit_time_us = 28.0,
+                       .sr_enter_plus_exit_time_us = 30.0,
                        .valid = true,
                },
                {
                        .wm_inst = WM_D,
                        .wm_type = WM_TYPE_PSTATE_CHG,
                        .pstate_latency_us = 11.65333,
-                       .sr_exit_time_us = 14.0,
-                       .sr_enter_plus_exit_time_us = 16.0,
+                       .sr_exit_time_us = 28.0,
+                       .sr_enter_plus_exit_time_us = 30.0,
                        .valid = true,
                },
        }
index 2d7205058c64abfece9cd154a4aebea8958e0168..2c424e435962d4ddd73648aeb3b531ad1bd7aa92 100644 (file)
@@ -411,12 +411,9 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
         * avoid conflicting with firmware updates.
         */
        if (dc->ctx->dce_version > DCE_VERSION_MAX)
-               if (dc->optimized_required)
+               if (dc->optimized_required || dc->wm_optimized_required)
                        return false;
 
-       if (!memcmp(&stream->adjust, adjust, sizeof(*adjust)))
-               return true;
-
        stream->adjust.v_total_max = adjust->v_total_max;
        stream->adjust.v_total_mid = adjust->v_total_mid;
        stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
@@ -2230,6 +2227,7 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
        }
 
        dc->optimized_required = false;
+       dc->wm_optimized_required = false;
 }
 
 bool dc_set_generic_gpio_for_stereo(bool enable,
@@ -2652,6 +2650,8 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
                } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
                        dc->optimized_required = true;
                }
+
+               dc->optimized_required |= dc->wm_optimized_required;
        }
 
        return type;
@@ -2859,6 +2859,9 @@ static void copy_stream_update_to_stream(struct dc *dc,
        if (update->vrr_active_fixed)
                stream->vrr_active_fixed = *update->vrr_active_fixed;
 
+       if (update->crtc_timing_adjust)
+               stream->adjust = *update->crtc_timing_adjust;
+
        if (update->dpms_off)
                stream->dpms_off = *update->dpms_off;
 
@@ -3519,7 +3522,7 @@ static void commit_planes_for_stream(struct dc *dc,
        top_pipe_to_program = resource_get_otg_master_for_stream(
                                &context->res_ctx,
                                stream);
-
+       ASSERT(top_pipe_to_program != NULL);
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
 
@@ -3814,7 +3817,9 @@ static void commit_planes_for_stream(struct dc *dc,
                 * programming has completed (we turn on phantom OTG in order
                 * to complete the plane disable for phantom pipes).
                 */
-               dc->hwss.apply_ctx_to_hw(dc, context);
+
+               if (dc->hwss.disable_phantom_streams)
+                       dc->hwss.disable_phantom_streams(dc, context);
        }
 
        if (update_type != UPDATE_TYPE_FAST)
@@ -4288,7 +4293,8 @@ static bool full_update_required(struct dc *dc,
                        stream_update->mst_bw_update ||
                        stream_update->func_shaper ||
                        stream_update->lut3d_func ||
-                       stream_update->pending_test_pattern))
+                       stream_update->pending_test_pattern ||
+                       stream_update->crtc_timing_adjust))
                return true;
 
        if (stream) {
@@ -4341,6 +4347,8 @@ static bool should_commit_minimal_transition_for_windowed_mpo_odm(struct dc *dc,
 
        cur_pipe = resource_get_otg_master_for_stream(&dc->current_state->res_ctx, stream);
        new_pipe = resource_get_otg_master_for_stream(&context->res_ctx, stream);
+       if (!cur_pipe || !new_pipe)
+               return false;
        cur_is_odm_in_use = resource_get_odm_slice_count(cur_pipe) > 1;
        new_is_odm_in_use = resource_get_odm_slice_count(new_pipe) > 1;
        if (cur_is_odm_in_use == new_is_odm_in_use)
index 57f0ddd1592399821222c4c5abd3708dbd2bd6b0..9fbdb09697fd5ea16abe86e4f970e80fb764ff7f 100644 (file)
@@ -2194,6 +2194,10 @@ void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state)
        for (stream_idx = 0; stream_idx < state->stream_count; stream_idx++) {
                otg_master = resource_get_otg_master_for_stream(
                                &state->res_ctx, state->streams[stream_idx]);
+               if (!otg_master || otg_master->stream_res.tg == NULL) {
+                       DC_LOG_DC("topology update: otg_master NULL stream_idx %d!\n", stream_idx);
+                       return;
+               }
                slice_count = resource_get_opp_heads_for_otg_master(otg_master,
                                &state->res_ctx, opp_heads);
                for (slice_idx = 0; slice_idx < slice_count; slice_idx++) {
@@ -4986,20 +4990,6 @@ enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc,
        return DC_OK;
 }
 
-bool resource_subvp_in_use(struct dc *dc,
-               struct dc_state *context)
-{
-       uint32_t i;
-
-       for (i = 0; i < dc->res_pool->pipe_count; i++) {
-               struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
-               if (dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_NONE)
-                       return true;
-       }
-       return false;
-}
-
 bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_state *stream)
 {
        if (!dc->debug.disable_subvp_high_refresh && is_subvp_high_refresh_candidate(stream))
index 460a8010c79fef0496755ce435f4691e20c3a08e..180ac47868c22a68c1af47096db95ecf6b11994c 100644 (file)
@@ -267,7 +267,8 @@ void dc_state_construct(struct dc *dc, struct dc_state *state)
        state->clk_mgr = dc->clk_mgr;
 
        /* Initialise DIG link encoder resource tracking variables. */
-       link_enc_cfg_init(dc, state);
+       if (dc->res_pool)
+               link_enc_cfg_init(dc, state);
 }
 
 void dc_state_destruct(struct dc_state *state)
@@ -290,11 +291,14 @@ void dc_state_destruct(struct dc_state *state)
                dc_stream_release(state->phantom_streams[i]);
                state->phantom_streams[i] = NULL;
        }
+       state->phantom_stream_count = 0;
 
        for (i = 0; i < state->phantom_plane_count; i++) {
                dc_plane_state_release(state->phantom_planes[i]);
                state->phantom_planes[i] = NULL;
        }
+       state->phantom_plane_count = 0;
+
        state->stream_mask = 0;
        memset(&state->res_ctx, 0, sizeof(state->res_ctx));
        memset(&state->pp_display_cfg, 0, sizeof(state->pp_display_cfg));
@@ -433,8 +437,9 @@ bool dc_state_add_plane(
 
        otg_master_pipe = resource_get_otg_master_for_stream(
                        &state->res_ctx, stream);
-       added = resource_append_dpp_pipes_for_plane_composition(state,
-                       dc->current_state, pool, otg_master_pipe, plane_state);
+       if (otg_master_pipe)
+               added = resource_append_dpp_pipes_for_plane_composition(state,
+                               dc->current_state, pool, otg_master_pipe, plane_state);
 
        if (added) {
                stream_status->plane_states[stream_status->plane_count] =
index f30a341bc09014b156dbe4463b41f84b0f16e083..c9317ea0258ea1cb2f686830fddc7469158966cc 100644 (file)
@@ -51,7 +51,7 @@ struct aux_payload;
 struct set_config_cmd_payload;
 struct dmub_notification;
 
-#define DC_VER "3.2.265"
+#define DC_VER "3.2.266"
 
 #define MAX_SURFACES 3
 #define MAX_PLANES 6
@@ -434,6 +434,7 @@ struct dc_config {
        bool EnableMinDispClkODM;
        bool enable_auto_dpm_test_logs;
        unsigned int disable_ips;
+       unsigned int disable_ips_in_vpb;
 };
 
 enum visual_confirm {
@@ -1036,6 +1037,7 @@ struct dc {
 
        /* Require to optimize clocks and bandwidth for added/removed planes */
        bool optimized_required;
+       bool wm_optimized_required;
        bool idle_optimizations_allowed;
        bool enable_c20_dtm_b0;
 
index a23eebd9933b72ea5c1c2a951a560232250bf34c..ee10941caa5980999044407184e7a41b8548e6b0 100644 (file)
@@ -139,6 +139,7 @@ union stream_update_flags {
                uint32_t wb_update:1;
                uint32_t dsc_changed : 1;
                uint32_t mst_bw : 1;
+               uint32_t crtc_timing_adjust : 1;
                uint32_t fams_changed : 1;
        } bits;
 
@@ -325,6 +326,7 @@ struct dc_stream_update {
        struct dc_3dlut *lut3d_func;
 
        struct test_pattern *pending_test_pattern;
+       struct dc_crtc_timing_adjust *crtc_timing_adjust;
 };
 
 bool dc_is_stream_unchanged(
index 4f276169e05a91098662edc07fd50d0bc1ed327b..9900dda2eef5cd2e44e6dbd008cd411194d107af 100644 (file)
@@ -1034,6 +1034,7 @@ enum replay_FW_Message_type {
        Replay_Msg_Not_Support = -1,
        Replay_Set_Timing_Sync_Supported,
        Replay_Set_Residency_Frameupdate_Timer,
+       Replay_Set_Pseudo_VTotal,
 };
 
 union replay_error_status {
@@ -1089,6 +1090,10 @@ struct replay_settings {
        uint16_t coasting_vtotal_table[PR_COASTING_TYPE_NUM];
        /* Maximum link off frame count */
        enum replay_link_off_frame_count_level link_off_frame_count_level;
+       /* Replay pseudo vtotal for abm + ips on full screen video which can improve ips residency */
+       uint16_t abm_with_ips_on_full_screen_video_pseudo_vtotal;
+       /* Replay last pseudo vtotal set to DMUB */
+       uint16_t last_pseudo_vtotal;
 };
 
 /* To split out "global" and "per-panel" config settings.
@@ -1140,23 +1145,25 @@ struct dc_panel_config {
        } ilr;
 };
 
+#define MAX_SINKS_PER_LINK 4
+
 /*
  *  USB4 DPIA BW ALLOCATION STRUCTS
  */
 struct dc_dpia_bw_alloc {
-       int sink_verified_bw;  // The Verified BW that sink can allocated and use that has been verified already
-       int sink_allocated_bw; // The Actual Allocated BW that sink currently allocated
-       int sink_max_bw;       // The Max BW that sink can require/support
+       int remote_sink_req_bw[MAX_SINKS_PER_LINK]; // BW requested by remote sinks
+       int link_verified_bw;  // The Verified BW that link can allocated and use that has been verified already
+       int link_max_bw;       // The Max BW that link can require/support
+       int allocated_bw;      // The Actual Allocated BW for this DPIA
        int estimated_bw;      // The estimated available BW for this DPIA
        int bw_granularity;    // BW Granularity
+       int dp_overhead;       // DP overhead in dp tunneling
        bool bw_alloc_enabled; // The BW Alloc Mode Support is turned ON for all 3:  DP-Tx & Dpia & CM
        bool response_ready;   // Response ready from the CM side
        uint8_t nrd_max_lane_count; // Non-reduced max lane count
        uint8_t nrd_max_link_rate; // Non-reduced max link rate
 };
 
-#define MAX_SINKS_PER_LINK 4
-
 enum dc_hpd_enable_select {
        HPD_EN_FOR_ALL_EDP = 0,
        HPD_EN_FOR_PRIMARY_EDP_ONLY,
index 140598f18bbdd4cb4758ec7e9ec17c91286d0ecc..f0458b8f00af842b87ab91feadd71eef4c680e27 100644 (file)
@@ -782,7 +782,7 @@ static void get_azalia_clock_info_dp(
        /*audio_dto_module = dpDtoSourceClockInkhz * 10,000;
         *  [khz] ->[100Hz] */
        azalia_clock_info->audio_dto_module =
-               pll_info->dp_dto_source_clock_in_khz * 10;
+               pll_info->audio_dto_source_clock_in_khz * 10;
 }
 
 void dce_aud_wall_dto_setup(
index 5d3f6fa1011e8e33f5e7772bee445cb6602e278d..970644b695cd4f1d96f166cc1786987b460cdafd 100644 (file)
@@ -975,6 +975,9 @@ static bool dcn31_program_pix_clk(
                        look_up_in_video_optimized_rate_tlb(pix_clk_params->requested_pix_clk_100hz / 10);
        struct bp_pixel_clock_parameters bp_pc_params = {0};
        enum transmitter_color_depth bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_24;
+
+       if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0)
+               dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz;
        // For these signal types Driver to program DP_DTO without calling VBIOS Command table
        if (dc_is_dp_signal(pix_clk_params->signal_type) || dc_is_virtual_signal(pix_clk_params->signal_type)) {
                if (e) {
@@ -1088,6 +1091,10 @@ static bool get_pixel_clk_frequency_100hz(
        struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source);
        unsigned int clock_hz = 0;
        unsigned int modulo_hz = 0;
+       unsigned int dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dprefclk_khz;
+
+       if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0)
+               dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz;
 
        if (clock_source->id == CLOCK_SOURCE_ID_DP_DTO) {
                clock_hz = REG_READ(PHASE[inst]);
@@ -1100,7 +1107,7 @@ static bool get_pixel_clk_frequency_100hz(
                        modulo_hz = REG_READ(MODULO[inst]);
                        if (modulo_hz)
                                *pixel_clk_khz = div_u64((uint64_t)clock_hz*
-                                       clock_source->ctx->dc->clk_mgr->dprefclk_khz*10,
+                                       dp_dto_ref_khz*10,
                                        modulo_hz);
                        else
                                *pixel_clk_khz = 0;
index 501388014855c5a1f830b6a830d9f6eed9bf3224..d761b0df28784afd5d81dfef193dfc11657ddff2 100644 (file)
@@ -203,12 +203,12 @@ void dcn32_link_encoder_construct(
        enc10->base.hpd_source = init_data->hpd_source;
        enc10->base.connector = init_data->connector;
 
-       if (enc10->base.connector.id == CONNECTOR_ID_USBC)
-               enc10->base.features.flags.bits.DP_IS_USB_C = 1;
 
        enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
 
        enc10->base.features = *enc_features;
+       if (enc10->base.connector.id == CONNECTOR_ID_USBC)
+               enc10->base.features.flags.bits.DP_IS_USB_C = 1;
 
        enc10->base.transmitter = init_data->transmitter;
 
index e4a328b45c8a5153c6468486dff940a6eb9435a3..87760600e154dad46e911e28f0b2937e6e012602 100644 (file)
@@ -183,6 +183,20 @@ bool dcn32_all_pipes_have_stream_and_plane(struct dc *dc,
        return true;
 }
 
+bool dcn32_subvp_in_use(struct dc *dc,
+               struct dc_state *context)
+{
+       uint32_t i;
+
+       for (i = 0; i < dc->res_pool->pipe_count; i++) {
+               struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+               if (dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_NONE)
+                       return true;
+       }
+       return false;
+}
+
 bool dcn32_mpo_in_use(struct dc_state *context)
 {
        uint32_t i;
index da94e5309fbaf0f8e06a4a1aad4ce431a8d9f2cc..81e349d5835bbed499f03ef6eb33e5210c83d64b 100644 (file)
@@ -184,8 +184,6 @@ void dcn35_link_encoder_construct(
        enc10->base.hpd_source = init_data->hpd_source;
        enc10->base.connector = init_data->connector;
 
-       if (enc10->base.connector.id == CONNECTOR_ID_USBC)
-               enc10->base.features.flags.bits.DP_IS_USB_C = 1;
 
        enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
 
@@ -240,6 +238,8 @@ void dcn35_link_encoder_construct(
        }
 
        enc10->base.features.flags.bits.HDMI_6GB_EN = 1;
+       if (enc10->base.connector.id == CONNECTOR_ID_USBC)
+               enc10->base.features.flags.bits.DP_IS_USB_C = 1;
 
        if (bp_funcs->get_connector_speed_cap_info)
                result = bp_funcs->get_connector_speed_cap_info(enc10->base.ctx->dc_bios,
index 6042a5a6a44f8c32187b2bea702892572f08ec57..59ade76ffb18d56f26a6b329b850462150214c04 100644 (file)
@@ -72,11 +72,11 @@ CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn10/dcn10_fpu.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/dcn20_fpu.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags) $(frame_warn_flag)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags) $(frame_warn_flag)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20v2.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags) $(frame_warn_flag)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(frame_warn_flag)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags)
index aa68d010cbfd247057da5a210b5209ad7a62ded3..ba76dd4a2ce29a68a75883b8e8538395195b4089 100644 (file)
@@ -33,7 +33,6 @@
 #include "dcn30/dcn30_resource.h"
 #include "link.h"
 #include "dc_state_priv.h"
-#include "resource.h"
 
 #define DC_LOGGER_INIT(logger)
 
@@ -292,7 +291,7 @@ int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
 
                /* for subvp + DRR case, if subvp pipes are still present we support pstate */
                if (vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported &&
-                               resource_subvp_in_use(dc, context))
+                               dcn32_subvp_in_use(dc, context))
                        vba->DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] = temp_clock_change_support;
 
                if (vlevel < context->bw_ctx.dml.vba.soc.num_states &&
@@ -1113,7 +1112,7 @@ struct pipe_slice_table {
                struct pipe_ctx *pri_pipe;
                struct dc_plane_state *plane;
                int slice_count;
-       } mpc_combines[MAX_SURFACES];
+       } mpc_combines[MAX_PLANES];
        int mpc_combine_count;
 };
 
@@ -1289,7 +1288,7 @@ static bool update_pipes_with_split_flags(struct dc *dc, struct dc_state *contex
        return updated;
 }
 
-static bool should_allow_odm_power_optimization(struct dc *dc,
+static bool should_apply_odm_power_optimization(struct dc *dc,
                struct dc_state *context, struct vba_vars_st *v, int *split,
                bool *merge)
 {
@@ -1393,9 +1392,12 @@ static void try_odm_power_optimization_and_revalidate(
 {
        int i;
        unsigned int new_vlevel;
+       unsigned int cur_policy[MAX_PIPES];
 
-       for (i = 0; i < pipe_cnt; i++)
+       for (i = 0; i < pipe_cnt; i++) {
+               cur_policy[i] = pipes[i].pipe.dest.odm_combine_policy;
                pipes[i].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
+       }
 
        new_vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
 
@@ -1404,6 +1406,9 @@ static void try_odm_power_optimization_and_revalidate(
                memset(merge, 0, MAX_PIPES * sizeof(bool));
                *vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, new_vlevel, split, merge);
                context->bw_ctx.dml.vba.VoltageLevel = *vlevel;
+       } else {
+               for (i = 0; i < pipe_cnt; i++)
+                       pipes[i].pipe.dest.odm_combine_policy = cur_policy[i];
        }
 }
 
@@ -1581,7 +1586,7 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
                }
        }
 
-       if (should_allow_odm_power_optimization(dc, context, vba, split, merge))
+       if (should_apply_odm_power_optimization(dc, context, vba, split, merge))
                try_odm_power_optimization_and_revalidate(
                                dc, context, pipes, split, merge, vlevel, *pipe_cnt);
 
@@ -2210,7 +2215,8 @@ bool dcn32_internal_validate_bw(struct dc *dc,
                int i;
 
                pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
-               dcn32_update_dml_pipes_odm_policy_based_on_context(dc, context, pipes);
+               if (!dc->config.enable_windowed_mpo_odm)
+                       dcn32_update_dml_pipes_odm_policy_based_on_context(dc, context, pipes);
 
                /* repopulate_pipes = 1 means the pipes were either split or merged. In this case
                 * we have to re-calculate the DET allocation and run through DML once more to
@@ -2273,7 +2279,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
        unsigned int dummy_latency_index = 0;
        int maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
        unsigned int min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
-       bool subvp_active = resource_subvp_in_use(dc, context);
+       bool subvp_in_use = dcn32_subvp_in_use(dc, context);
        unsigned int min_dram_speed_mts_margin;
        bool need_fclk_lat_as_dummy = false;
        bool is_subvp_p_drr = false;
@@ -2282,7 +2288,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
        dc_assert_fp_enabled();
 
        /* need to find dummy latency index for subvp */
-       if (subvp_active) {
+       if (subvp_in_use) {
                /* Override DRAMClockChangeSupport for SubVP + DRR case where the DRR cannot switch without stretching it's VBLANK */
                if (!pstate_en) {
                        context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp;
@@ -2468,7 +2474,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
                                dc->clk_mgr->bw_params->clk_table.entries[min_dram_speed_mts_offset].memclk_mhz * 16;
                }
 
-               if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching && !subvp_active) {
+               if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching && !subvp_in_use) {
                        /* find largest table entry that is lower than dram speed,
                         * but lower than DPM0 still uses DPM0
                         */
@@ -2754,7 +2760,7 @@ static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk
        struct _vcs_dpi_voltage_scaling_st entry = {0};
        struct clk_limit_table_entry max_clk_data = {0};
 
-       unsigned int min_dcfclk_mhz = 199, min_fclk_mhz = 299;
+       unsigned int min_dcfclk_mhz = 399, min_fclk_mhz = 599;
 
        static const unsigned int num_dcfclk_stas = 5;
        unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {199, 615, 906, 1324, 1564};
@@ -3528,7 +3534,7 @@ void dcn32_set_clock_limits(const struct _vcs_dpi_soc_bounding_box_st *soc_bb)
 void dcn32_override_min_req_memclk(struct dc *dc, struct dc_state *context)
 {
        // WA: restrict FPO and SubVP to use first non-strobe mode (DCN32 BW issue)
-       if ((context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || resource_subvp_in_use(dc, context)) &&
+       if ((context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dcn32_subvp_in_use(dc, context)) &&
                        dc->dml.soc.num_chans <= 8) {
                int num_mclk_levels = dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels;
 
index 3d12dabd39e47d0d2a3fc918dd1d07dbc3902e5d..7ea2bd5374d51b138d13179ab7444d0d8d2ef3a7 100644 (file)
@@ -164,11 +164,11 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
                },
        },
        .num_states = 5,
-       .sr_exit_time_us = 14.0,
-       .sr_enter_plus_exit_time_us = 16.0,
-       .sr_exit_z8_time_us = 525.0,
-       .sr_enter_plus_exit_z8_time_us = 715.0,
-       .fclk_change_latency_us = 20.0,
+       .sr_exit_time_us = 28.0,
+       .sr_enter_plus_exit_time_us = 30.0,
+       .sr_exit_z8_time_us = 210.0,
+       .sr_enter_plus_exit_z8_time_us = 320.0,
+       .fclk_change_latency_us = 24.0,
        .usr_retraining_latency_us = 2,
        .writeback_latency_us = 12.0,
 
index b95bf27f2fe2fe9a943cda43ce121c07c548f5dc..9be5ebf3a8c0ba7805b793b108923e057f2fdfe0 100644 (file)
@@ -6229,7 +6229,7 @@ static void set_calculate_prefetch_schedule_params(struct display_mode_lib_st *m
                                CalculatePrefetchSchedule_params->GPUVMEnable = mode_lib->ms.cache_display_cfg.plane.GPUVMEnable;
                                CalculatePrefetchSchedule_params->HostVMEnable = mode_lib->ms.cache_display_cfg.plane.HostVMEnable;
                                CalculatePrefetchSchedule_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
-                               CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
+                               CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
                                CalculatePrefetchSchedule_params->DynamicMetadataEnable = mode_lib->ms.cache_display_cfg.plane.DynamicMetadataEnable[k];
                                CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ms.ip.dynamic_metadata_vm_enabled;
                                CalculatePrefetchSchedule_params->DynamicMetadataLinesBeforeActiveRequired = mode_lib->ms.cache_display_cfg.plane.DynamicMetadataLinesBeforeActiveRequired[k];
@@ -6329,7 +6329,7 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
                                mode_lib->ms.NoOfDPPThisState,
                                mode_lib->ms.dpte_group_bytes,
                                s->HostVMInefficiencyFactor,
-                               mode_lib->ms.soc.hostvm_min_page_size_kbytes,
+                               mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
                                mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels);
 
                s->NextMaxVStartup = s->MaxVStartupAllPlanes[j];
@@ -6542,7 +6542,7 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
                                                mode_lib->ms.cache_display_cfg.plane.HostVMEnable,
                                                mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels,
                                                mode_lib->ms.cache_display_cfg.plane.GPUVMEnable,
-                                               mode_lib->ms.soc.hostvm_min_page_size_kbytes,
+                                               mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
                                                mode_lib->ms.PDEAndMetaPTEBytesPerFrame[j][k],
                                                mode_lib->ms.MetaRowBytes[j][k],
                                                mode_lib->ms.DPTEBytesPerRow[j][k],
@@ -7687,7 +7687,7 @@ dml_bool_t dml_core_mode_support(struct display_mode_lib_st *mode_lib)
                CalculateVMRowAndSwath_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
                CalculateVMRowAndSwath_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels;
                CalculateVMRowAndSwath_params->GPUVMMinPageSizeKBytes = mode_lib->ms.cache_display_cfg.plane.GPUVMMinPageSizeKBytes;
-               CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
+               CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
                CalculateVMRowAndSwath_params->PTEBufferModeOverrideEn = mode_lib->ms.cache_display_cfg.plane.PTEBufferModeOverrideEn;
                CalculateVMRowAndSwath_params->PTEBufferModeOverrideVal = mode_lib->ms.cache_display_cfg.plane.PTEBufferMode;
                CalculateVMRowAndSwath_params->PTEBufferSizeNotExceeded = mode_lib->ms.PTEBufferSizeNotExceededPerState;
@@ -7957,7 +7957,7 @@ dml_bool_t dml_core_mode_support(struct display_mode_lib_st *mode_lib)
                UseMinimumDCFCLK_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels;
                UseMinimumDCFCLK_params->HostVMEnable = mode_lib->ms.cache_display_cfg.plane.HostVMEnable;
                UseMinimumDCFCLK_params->NumberOfActiveSurfaces = mode_lib->ms.num_active_planes;
-               UseMinimumDCFCLK_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
+               UseMinimumDCFCLK_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
                UseMinimumDCFCLK_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
                UseMinimumDCFCLK_params->DynamicMetadataVMEnabled = mode_lib->ms.ip.dynamic_metadata_vm_enabled;
                UseMinimumDCFCLK_params->ImmediateFlipRequirement = s->ImmediateFlipRequiredFinal;
@@ -8699,7 +8699,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
        CalculateVMRowAndSwath_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
        CalculateVMRowAndSwath_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels;
        CalculateVMRowAndSwath_params->GPUVMMinPageSizeKBytes = mode_lib->ms.cache_display_cfg.plane.GPUVMMinPageSizeKBytes;
-       CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
+       CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
        CalculateVMRowAndSwath_params->PTEBufferModeOverrideEn = mode_lib->ms.cache_display_cfg.plane.PTEBufferModeOverrideEn;
        CalculateVMRowAndSwath_params->PTEBufferModeOverrideVal = mode_lib->ms.cache_display_cfg.plane.PTEBufferMode;
        CalculateVMRowAndSwath_params->PTEBufferSizeNotExceeded = s->dummy_boolean_array[0];
@@ -8805,7 +8805,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
                        mode_lib->ms.cache_display_cfg.hw.DPPPerSurface,
                        locals->dpte_group_bytes,
                        s->HostVMInefficiencyFactor,
-                       mode_lib->ms.soc.hostvm_min_page_size_kbytes,
+                       mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
                        mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels);
 
        locals->TCalc = 24.0 / locals->DCFCLKDeepSleep;
@@ -8995,7 +8995,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
                        CalculatePrefetchSchedule_params->GPUVMEnable = mode_lib->ms.cache_display_cfg.plane.GPUVMEnable;
                        CalculatePrefetchSchedule_params->HostVMEnable = mode_lib->ms.cache_display_cfg.plane.HostVMEnable;
                        CalculatePrefetchSchedule_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
-                       CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
+                       CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
                        CalculatePrefetchSchedule_params->DynamicMetadataEnable = mode_lib->ms.cache_display_cfg.plane.DynamicMetadataEnable[k];
                        CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ms.ip.dynamic_metadata_vm_enabled;
                        CalculatePrefetchSchedule_params->DynamicMetadataLinesBeforeActiveRequired = mode_lib->ms.cache_display_cfg.plane.DynamicMetadataLinesBeforeActiveRequired[k];
@@ -9240,7 +9240,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
                                                mode_lib->ms.cache_display_cfg.plane.HostVMEnable,
                                                mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels,
                                                mode_lib->ms.cache_display_cfg.plane.GPUVMEnable,
-                                               mode_lib->ms.soc.hostvm_min_page_size_kbytes,
+                                               mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
                                                locals->PDEAndMetaPTEBytesFrame[k],
                                                locals->MetaRowByte[k],
                                                locals->PixelPTEBytesPerRow[k],
@@ -9446,13 +9446,13 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
                CalculateWatermarks_params->CompressedBufferSizeInkByte = locals->CompressedBufferSizeInkByte;
 
                // Output
-               CalculateWatermarks_params->Watermark = &s->dummy_watermark; // Watermarks *Watermark
-               CalculateWatermarks_params->DRAMClockChangeSupport = &mode_lib->ms.support.DRAMClockChangeSupport[0];
-               CalculateWatermarks_params->MaxActiveDRAMClockChangeLatencySupported = &s->dummy_single_array[0][0]; // dml_float_t *MaxActiveDRAMClockChangeLatencySupported[]
-               CalculateWatermarks_params->SubViewportLinesNeededInMALL = &mode_lib->ms.SubViewportLinesNeededInMALL[j]; // dml_uint_t SubViewportLinesNeededInMALL[]
-               CalculateWatermarks_params->FCLKChangeSupport = &mode_lib->ms.support.FCLKChangeSupport[0];
-               CalculateWatermarks_params->MaxActiveFCLKChangeLatencySupported = &s->dummy_single[0]; // dml_float_t *MaxActiveFCLKChangeLatencySupported
-               CalculateWatermarks_params->USRRetrainingSupport = &mode_lib->ms.support.USRRetrainingSupport[0];
+               CalculateWatermarks_params->Watermark = &locals->Watermark; // Watermarks *Watermark
+               CalculateWatermarks_params->DRAMClockChangeSupport = &locals->DRAMClockChangeSupport;
+               CalculateWatermarks_params->MaxActiveDRAMClockChangeLatencySupported = locals->MaxActiveDRAMClockChangeLatencySupported; // dml_float_t *MaxActiveDRAMClockChangeLatencySupported[]
+               CalculateWatermarks_params->SubViewportLinesNeededInMALL = locals->SubViewportLinesNeededInMALL; // dml_uint_t SubViewportLinesNeededInMALL[]
+               CalculateWatermarks_params->FCLKChangeSupport = &locals->FCLKChangeSupport;
+               CalculateWatermarks_params->MaxActiveFCLKChangeLatencySupported = &locals->MaxActiveFCLKChangeLatencySupported; // dml_float_t *MaxActiveFCLKChangeLatencySupported
+               CalculateWatermarks_params->USRRetrainingSupport = &locals->USRRetrainingSupport;
 
                CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
                        &mode_lib->scratch,
index fa6a93dd9629558120304beae4da239a60268c0d..23a608274096f89002e7e5438be18c85d023e442 100644 (file)
@@ -341,9 +341,6 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
                break;
        }
 
-       if (dml2->config.bbox_overrides.clks_table.num_states)
-                       p->in_states->num_states = dml2->config.bbox_overrides.clks_table.num_states;
-
        /* Override from passed values, if available */
        for (i = 0; i < p->in_states->num_states; i++) {
                if (dml2->config.bbox_overrides.sr_exit_latency_us) {
@@ -400,6 +397,7 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
        }
        /* Copy clocks tables entries, if available */
        if (dml2->config.bbox_overrides.clks_table.num_states) {
+               p->in_states->num_states = dml2->config.bbox_overrides.clks_table.num_states;
 
                for (i = 0; i < dml2->config.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels; i++) {
                        p->in_states->state_array[i].dcfclk_mhz = dml2->config.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz;
@@ -626,8 +624,8 @@ static void populate_dml_output_cfg_from_stream_state(struct dml_output_cfg_st *
                if (is_dp2p0_output_encoder(pipe))
                        out->OutputEncoder[location] = dml_dp2p0;
                break;
-               out->OutputEncoder[location] = dml_edp;
        case SIGNAL_TYPE_EDP:
+               out->OutputEncoder[location] = dml_edp;
                break;
        case SIGNAL_TYPE_HDMI_TYPE_A:
        case SIGNAL_TYPE_DVI_SINGLE_LINK:
@@ -793,35 +791,28 @@ static void populate_dml_surface_cfg_from_plane_state(enum dml_project_id dml2_p
        }
 }
 
-/*TODO no support for mpc combine, need rework - should calculate scaling params based on plane+stream*/
-static struct scaler_data get_scaler_data_for_plane(const struct dc_plane_state *in, const struct dc_state *context)
+static struct scaler_data get_scaler_data_for_plane(const struct dc_plane_state *in, struct dc_state *context)
 {
        int i;
-       struct scaler_data data = { 0 };
+       struct pipe_ctx *temp_pipe = &context->res_ctx.temp_pipe;
+
+       memset(temp_pipe, 0, sizeof(struct pipe_ctx));
 
        for (i = 0; i < MAX_PIPES; i++) {
                const struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 
                if (pipe->plane_state == in && !pipe->prev_odm_pipe) {
-                       const struct pipe_ctx *next_pipe = pipe->next_odm_pipe;
-
-                       data = context->res_ctx.pipe_ctx[i].plane_res.scl_data;
-                       while (next_pipe) {
-                               data.h_active += next_pipe->plane_res.scl_data.h_active;
-                               data.recout.width += next_pipe->plane_res.scl_data.recout.width;
-                               if (in->rotation == ROTATION_ANGLE_0 || in->rotation == ROTATION_ANGLE_180) {
-                                       data.viewport.width += next_pipe->plane_res.scl_data.viewport.width;
-                               } else {
-                                       data.viewport.height += next_pipe->plane_res.scl_data.viewport.height;
-                               }
-                               next_pipe = next_pipe->next_odm_pipe;
-                       }
+                       temp_pipe->stream = pipe->stream;
+                       temp_pipe->plane_state = pipe->plane_state;
+                       temp_pipe->plane_res.scl_data.taps = pipe->plane_res.scl_data.taps;
+
+                       resource_build_scaling_params(temp_pipe);
                        break;
                }
        }
 
        ASSERT(i < MAX_PIPES);
-       return data;
+       return temp_pipe->plane_res.scl_data;
 }
 
 static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_stream_state *in)
@@ -866,7 +857,7 @@ static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned
        out->ScalerEnabled[location] = false;
 }
 
-static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_plane_state *in, const struct dc_state *context)
+static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_plane_state *in, struct dc_state *context)
 {
        const struct scaler_data scaler_data = get_scaler_data_for_plane(in, context);
 
index fb328cd06cea2c8a00f7450c8ade7408fb4716a9..01493c49bd7a084b1748bb786c56106858709dcc 100644 (file)
@@ -1183,9 +1183,9 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
                dto_params.timing = &pipe_ctx->stream->timing;
                dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
                if (dccg) {
-                       dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
                        dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst);
                        dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst);
+                       dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
                }
        } else if (dccg && dccg->funcs->disable_symclk_se) {
                dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst,
@@ -1354,7 +1354,7 @@ static void build_audio_output(
        if (state->clk_mgr &&
                (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
                        pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)) {
-               audio_output->pll_info.dp_dto_source_clock_in_khz =
+               audio_output->pll_info.audio_dto_source_clock_in_khz =
                                state->clk_mgr->funcs->get_dp_ref_clk_frequency(
                                                state->clk_mgr);
        }
@@ -1476,7 +1476,7 @@ static enum dc_status dce110_enable_stream_timing(
        return DC_OK;
 }
 
-static enum dc_status apply_single_controller_ctx_to_hw(
+enum dc_status dce110_apply_single_controller_ctx_to_hw(
                struct pipe_ctx *pipe_ctx,
                struct dc_state *context,
                struct dc *dc)
@@ -2302,7 +2302,7 @@ enum dc_status dce110_apply_ctx_to_hw(
                if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe)
                        continue;
 
-               status = apply_single_controller_ctx_to_hw(
+               status = dce110_apply_single_controller_ctx_to_hw(
                                pipe_ctx,
                                context,
                                dc);
index 08028a1779ae819282ab2394de57c4b8f266a9f3..ed3cc3648e8e23f8d076b92e10a23791253f9662 100644 (file)
@@ -39,6 +39,10 @@ enum dc_status dce110_apply_ctx_to_hw(
                struct dc *dc,
                struct dc_state *context);
 
+enum dc_status dce110_apply_single_controller_ctx_to_hw(
+               struct pipe_ctx *pipe_ctx,
+               struct dc_state *context,
+               struct dc *dc);
 
 void dce110_enable_stream(struct pipe_ctx *pipe_ctx);
 
index 51dd2ae09b2a6235f822c7eb6c72335f38fafe24..6dd479e8a348502c9b285a38f16650fb7cb4f95e 100644 (file)
@@ -3076,7 +3076,7 @@ void dcn10_prepare_bandwidth(
                        context,
                        false);
 
-       dc->optimized_required |= hubbub->funcs->program_watermarks(hubbub,
+       dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
                        &context->bw_ctx.bw.dcn.watermarks,
                        dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
                        true);
index bc71a9b058fedd2c211cf38088758ca6a71480b9..931ac8ed7069d7bdcd3ca2f0c35f5e5f04552827 100644 (file)
@@ -1882,42 +1882,6 @@ static void dcn20_program_pipe(
        }
 }
 
-static void update_vmin_vmax_fams(struct dc *dc,
-               struct dc_state *context)
-{
-       uint32_t i;
-       struct drr_params params = {0};
-       bool subvp_in_use = resource_subvp_in_use(dc, context);
-
-       for (i = 0; i < dc->res_pool->pipe_count; i++) {
-               struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
-               if (resource_is_pipe_type(pipe, OTG_MASTER) &&
-                               ((subvp_in_use && dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM &&
-                               pipe->stream->allow_freesync) || (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching && pipe->stream->fpo_in_use))) {
-                       if (!pipe->stream->vrr_active_variable && !pipe->stream->vrr_active_fixed) {
-                               struct timing_generator *tg = context->res_ctx.pipe_ctx[i].stream_res.tg;
-
-                               /* DRR should be configured already if we're in active variable
-                                * or active fixed, so only program if we're not in this state
-                                */
-                               params.vertical_total_min = pipe->stream->timing.v_total;
-                               params.vertical_total_max = pipe->stream->timing.v_total;
-                               tg->funcs->set_drr(tg, &params);
-                       }
-               } else {
-                       if (resource_is_pipe_type(pipe, OTG_MASTER) &&
-                                       !pipe->stream->vrr_active_variable &&
-                                       !pipe->stream->vrr_active_fixed) {
-                               struct timing_generator *tg = context->res_ctx.pipe_ctx[i].stream_res.tg;
-                               params.vertical_total_min = 0;
-                               params.vertical_total_max = 0;
-                               tg->funcs->set_drr(tg, &params);
-                       }
-               }
-       }
-}
-
 void dcn20_program_front_end_for_ctx(
                struct dc *dc,
                struct dc_state *context)
@@ -1994,7 +1958,6 @@ void dcn20_program_front_end_for_ctx(
                                && context->res_ctx.pipe_ctx[i].stream)
                        hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
 
-       update_vmin_vmax_fams(dc, context);
 
        /* Disconnect mpcc */
        for (i = 0; i < dc->res_pool->pipe_count; i++)
@@ -2196,10 +2159,10 @@ void dcn20_prepare_bandwidth(
        }
 
        /* program dchubbub watermarks:
-        * For assigning optimized_required, use |= operator since we don't want
+        * For assigning wm_optimized_required, use |= operator since we don't want
         * to clear the value if the optimize has not happened yet
         */
-       dc->optimized_required |= hubbub->funcs->program_watermarks(hubbub,
+       dc->wm_optimized_required |= hubbub->funcs->program_watermarks(hubbub,
                                        &context->bw_ctx.bw.dcn.watermarks,
                                        dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
                                        false);
@@ -2212,10 +2175,10 @@ void dcn20_prepare_bandwidth(
        if (hubbub->funcs->program_compbuf_size) {
                if (context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes) {
                        compbuf_size_kb = context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes;
-                       dc->optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.dml.ip.min_comp_buffer_size_kbytes);
+                       dc->wm_optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.dml.ip.min_comp_buffer_size_kbytes);
                } else {
                        compbuf_size_kb = context->bw_ctx.bw.dcn.compbuf_size_kb;
-                       dc->optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.bw.dcn.compbuf_size_kb);
+                       dc->wm_optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.bw.dcn.compbuf_size_kb);
                }
 
                hubbub->funcs->program_compbuf_size(hubbub, compbuf_size_kb, false);
@@ -2598,7 +2561,7 @@ void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
                tg->funcs->setup_vertical_interrupt2(tg, start_line);
 }
 
-static void dcn20_reset_back_end_for_pipe(
+void dcn20_reset_back_end_for_pipe(
                struct dc *dc,
                struct pipe_ctx *pipe_ctx,
                struct dc_state *context)
@@ -2827,18 +2790,17 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
        }
 
        if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
-               dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
-               dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, dp_hpo_inst);
-
-               phyd32clk = get_phyd32clk_src(link);
-               dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk);
-
                dto_params.otg_inst = tg->inst;
                dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
                dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx);
                dto_params.timing = &pipe_ctx->stream->timing;
                dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
                dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
+               dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
+               dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, dp_hpo_inst);
+
+               phyd32clk = get_phyd32clk_src(link);
+               dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk);
        } else {
                if (dccg->funcs->enable_symclk_se)
                        dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst,
index b94c85340abff7c02f3ec59025b04c8417d77bd6..d950b3e54ec2c7d35fb1c70a53094f0543c17b97 100644 (file)
@@ -84,6 +84,10 @@ enum dc_status dcn20_enable_stream_timing(
 void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx);
 void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx);
 void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_reset_back_end_for_pipe(
+               struct dc *dc,
+               struct pipe_ctx *pipe_ctx,
+               struct dc_state *context);
 void dcn20_init_blank(
                struct dc *dc,
                struct timing_generator *tg);
index 8e88dcaf88f5b2b709a95abf9e0673390e27daa5..5c7f380a84f91ecb1a668e4798be6aaf9347a46f 100644 (file)
@@ -206,28 +206,32 @@ void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx)
 void dcn21_set_pipe(struct pipe_ctx *pipe_ctx)
 {
        struct abm *abm = pipe_ctx->stream_res.abm;
-       uint32_t otg_inst = pipe_ctx->stream_res.tg->inst;
+       struct timing_generator *tg = pipe_ctx->stream_res.tg;
        struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
        struct dmcu *dmcu = pipe_ctx->stream->ctx->dc->res_pool->dmcu;
+       uint32_t otg_inst;
+
+       if (!abm && !tg && !panel_cntl)
+               return;
+
+       otg_inst = tg->inst;
 
        if (dmcu) {
                dce110_set_pipe(pipe_ctx);
                return;
        }
 
-       if (abm && panel_cntl) {
-               if (abm->funcs && abm->funcs->set_pipe_ex) {
-                       abm->funcs->set_pipe_ex(abm,
+       if (abm->funcs && abm->funcs->set_pipe_ex) {
+               abm->funcs->set_pipe_ex(abm,
                                        otg_inst,
                                        SET_ABM_PIPE_NORMAL,
                                        panel_cntl->inst,
                                        panel_cntl->pwrseq_inst);
-               } else {
-                               dmub_abm_set_pipe(abm, otg_inst,
-                                               SET_ABM_PIPE_NORMAL,
-                                               panel_cntl->inst,
-                                               panel_cntl->pwrseq_inst);
-               }
+       } else {
+               dmub_abm_set_pipe(abm, otg_inst,
+                                 SET_ABM_PIPE_NORMAL,
+                                 panel_cntl->inst,
+                                 panel_cntl->pwrseq_inst);
        }
 }
 
@@ -237,34 +241,35 @@ bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx,
 {
        struct dc_context *dc = pipe_ctx->stream->ctx;
        struct abm *abm = pipe_ctx->stream_res.abm;
+       struct timing_generator *tg = pipe_ctx->stream_res.tg;
        struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
+       uint32_t otg_inst;
+
+       if (!abm && !tg && !panel_cntl)
+               return false;
+
+       otg_inst = tg->inst;
 
        if (dc->dc->res_pool->dmcu) {
                dce110_set_backlight_level(pipe_ctx, backlight_pwm_u16_16, frame_ramp);
                return true;
        }
 
-       if (abm != NULL) {
-               uint32_t otg_inst = pipe_ctx->stream_res.tg->inst;
-
-               if (abm && panel_cntl) {
-                       if (abm->funcs && abm->funcs->set_pipe_ex) {
-                               abm->funcs->set_pipe_ex(abm,
-                                               otg_inst,
-                                               SET_ABM_PIPE_NORMAL,
-                                               panel_cntl->inst,
-                                               panel_cntl->pwrseq_inst);
-                       } else {
-                                       dmub_abm_set_pipe(abm,
-                                                       otg_inst,
-                                                       SET_ABM_PIPE_NORMAL,
-                                                       panel_cntl->inst,
-                                                       panel_cntl->pwrseq_inst);
-                       }
-               }
+       if (abm->funcs && abm->funcs->set_pipe_ex) {
+               abm->funcs->set_pipe_ex(abm,
+                                       otg_inst,
+                                       SET_ABM_PIPE_NORMAL,
+                                       panel_cntl->inst,
+                                       panel_cntl->pwrseq_inst);
+       } else {
+               dmub_abm_set_pipe(abm,
+                                 otg_inst,
+                                 SET_ABM_PIPE_NORMAL,
+                                 panel_cntl->inst,
+                                 panel_cntl->pwrseq_inst);
        }
 
-       if (abm && abm->funcs && abm->funcs->set_backlight_level_pwm)
+       if (abm->funcs && abm->funcs->set_backlight_level_pwm)
                abm->funcs->set_backlight_level_pwm(abm, backlight_pwm_u16_16,
                        frame_ramp, 0, panel_cntl->inst);
        else
index 6c9299c7683df19b3c444b865d297182d91ae7b3..aa36d7a56ca8c3b6f3cd47e67455ba67549bf73b 100644 (file)
@@ -1474,9 +1474,44 @@ void dcn32_update_dsc_pg(struct dc *dc,
        }
 }
 
+void dcn32_disable_phantom_streams(struct dc *dc, struct dc_state *context)
+{
+       struct dce_hwseq *hws = dc->hwseq;
+       int i;
+
+       for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
+               struct pipe_ctx *pipe_ctx_old =
+                       &dc->current_state->res_ctx.pipe_ctx[i];
+               struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+               if (!pipe_ctx_old->stream)
+                       continue;
+
+               if (dc_state_get_pipe_subvp_type(dc->current_state, pipe_ctx_old) != SUBVP_PHANTOM)
+                       continue;
+
+               if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe)
+                       continue;
+
+               if (!pipe_ctx->stream || pipe_need_reprogram(pipe_ctx_old, pipe_ctx) ||
+                               (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)) {
+                       struct clock_source *old_clk = pipe_ctx_old->clock_source;
+
+                       if (hws->funcs.reset_back_end_for_pipe)
+                               hws->funcs.reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
+                       if (hws->funcs.enable_stream_gating)
+                               hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
+                       if (old_clk)
+                               old_clk->funcs->cs_power_down(old_clk);
+               }
+       }
+}
+
 void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context)
 {
        unsigned int i;
+       enum dc_status status = DC_OK;
+       struct dce_hwseq *hws = dc->hwseq;
 
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
@@ -1497,16 +1532,39 @@ void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context)
                }
        }
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
-               struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
-
-               if (new_pipe->stream && dc_state_get_pipe_subvp_type(context, new_pipe) == SUBVP_PHANTOM) {
-                       // If old context or new context has phantom pipes, apply
-                       // the phantom timings now. We can't change the phantom
-                       // pipe configuration safely without driver acquiring
-                       // the DMCUB lock first.
-                       dc->hwss.apply_ctx_to_hw(dc, context);
-                       break;
+               struct pipe_ctx *pipe_ctx_old =
+                                       &dc->current_state->res_ctx.pipe_ctx[i];
+               struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+               if (pipe_ctx->stream == NULL)
+                       continue;
+
+               if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)
+                       continue;
+
+               if (pipe_ctx->stream == pipe_ctx_old->stream &&
+                       pipe_ctx->stream->link->link_state_valid) {
+                       continue;
                }
+
+               if (pipe_ctx_old->stream && !pipe_need_reprogram(pipe_ctx_old, pipe_ctx))
+                       continue;
+
+               if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe)
+                       continue;
+
+               if (hws->funcs.apply_single_controller_ctx_to_hw)
+                       status = hws->funcs.apply_single_controller_ctx_to_hw(
+                                       pipe_ctx,
+                                       context,
+                                       dc);
+
+               ASSERT(status == DC_OK);
+
+#ifdef CONFIG_DRM_AMD_DC_FP
+               if (hws->funcs.resync_fifo_dccg_dio)
+                       hws->funcs.resync_fifo_dccg_dio(hws, dc, context);
+#endif
        }
 }
 
index cecf7f0f567190b257cf81e5f756b5a916eba09c..069e20bc87c0a75af028168253219fc9343b1af3 100644 (file)
@@ -111,6 +111,8 @@ void dcn32_update_dsc_pg(struct dc *dc,
 
 void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context);
 
+void dcn32_disable_phantom_streams(struct dc *dc, struct dc_state *context);
+
 void dcn32_init_blank(
                struct dc *dc,
                struct timing_generator *tg);
index 427cfc8c24a4b7ed4cee1f0b6955cbe371797219..e8ac94a005b83a78533646aae0a36ca132eb8a75 100644 (file)
@@ -109,6 +109,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
        .get_dcc_en_bits = dcn10_get_dcc_en_bits,
        .commit_subvp_config = dcn32_commit_subvp_config,
        .enable_phantom_streams = dcn32_enable_phantom_streams,
+       .disable_phantom_streams = dcn32_disable_phantom_streams,
        .subvp_pipe_control_lock = dcn32_subvp_pipe_control_lock,
        .update_visual_confirm_color = dcn10_update_visual_confirm_color,
        .subvp_pipe_control_lock_fast = dcn32_subvp_pipe_control_lock_fast,
@@ -159,6 +160,8 @@ static const struct hwseq_private_funcs dcn32_private_funcs = {
        .set_pixels_per_cycle = dcn32_set_pixels_per_cycle,
        .resync_fifo_dccg_dio = dcn32_resync_fifo_dccg_dio,
        .is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy,
+       .apply_single_controller_ctx_to_hw = dce110_apply_single_controller_ctx_to_hw,
+       .reset_back_end_for_pipe = dcn20_reset_back_end_for_pipe,
 };
 
 void dcn32_hw_sequencer_init_functions(struct dc *dc)
index 9c806385ecbdcce6c0d14f949ea41879758969f7..8b6c49622f3b63c8e6dae68c507e1e45c5a736a2 100644 (file)
@@ -680,7 +680,7 @@ void dcn35_power_down_on_boot(struct dc *dc)
 bool dcn35_apply_idle_power_optimizations(struct dc *dc, bool enable)
 {
        struct dc_link *edp_links[MAX_NUM_EDP];
-       int edp_num;
+       int i, edp_num;
        if (dc->debug.dmcub_emulation)
                return true;
 
@@ -688,6 +688,13 @@ bool dcn35_apply_idle_power_optimizations(struct dc *dc, bool enable)
                dc_get_edp_links(dc, edp_links, &edp_num);
                if (edp_num == 0 || edp_num > 1)
                        return false;
+
+               for (i = 0; i < dc->current_state->stream_count; ++i) {
+                       struct dc_stream_state *stream = dc->current_state->streams[i];
+
+                       if (!stream->dpms_off && !dc_is_embedded_signal(stream->signal))
+                               return false;
+               }
        }
 
        // TODO: review other cases when idle optimization is allowed
index a54399383318145b8bc72fc85e646bf546588609..64ca7c66509b79bc2cfe50806cc37e8953468239 100644 (file)
@@ -379,6 +379,7 @@ struct hw_sequencer_funcs {
                        struct dc_cursor_attributes *cursor_attr);
        void (*commit_subvp_config)(struct dc *dc, struct dc_state *context);
        void (*enable_phantom_streams)(struct dc *dc, struct dc_state *context);
+       void (*disable_phantom_streams)(struct dc *dc, struct dc_state *context);
        void (*subvp_pipe_control_lock)(struct dc *dc,
                        struct dc_state *context,
                        bool lock,
index 6137cf09aa54d25750246e86583c5938e557501b..b3c62a82cb1cf10fddad52dcf85b7e02de87ee35 100644 (file)
@@ -165,8 +165,15 @@ struct hwseq_private_funcs {
        void (*set_pixels_per_cycle)(struct pipe_ctx *pipe_ctx);
        void (*resync_fifo_dccg_dio)(struct dce_hwseq *hws, struct dc *dc,
                        struct dc_state *context);
+       enum dc_status (*apply_single_controller_ctx_to_hw)(
+                       struct pipe_ctx *pipe_ctx,
+                       struct dc_state *context,
+                       struct dc *dc);
        bool (*is_dp_dig_pixel_rate_div_policy)(struct pipe_ctx *pipe_ctx);
 #endif
+       void (*reset_back_end_for_pipe)(struct dc *dc,
+                       struct pipe_ctx *pipe_ctx,
+                       struct dc_state *context);
 };
 
 struct dce_hwseq {
index f74ae0d41d3c49cf215d615f336339b773cbbcbc..3a6bf77a68732166d320dbea642929c3201d3e01 100644 (file)
@@ -469,6 +469,8 @@ struct resource_context {
        unsigned int hpo_dp_link_enc_to_link_idx[MAX_HPO_DP2_LINK_ENCODERS];
        int hpo_dp_link_enc_ref_cnts[MAX_HPO_DP2_LINK_ENCODERS];
        bool is_mpc_3dlut_acquired[MAX_PIPES];
+       /* solely used for build scalar data in dml2 */
+       struct pipe_ctx temp_pipe;
 };
 
 struct dce_bw_output {
index cbba39d251e5335d6e11604fc327b8ad31284aea..17e014d3bdc8401893847a4f0fd9670d664f65c5 100644 (file)
@@ -333,6 +333,7 @@ struct clk_mgr {
        bool force_smu_not_present;
        bool dc_mode_softmax_enabled;
        int dprefclk_khz; // Used by program pixel clock in clock source funcs, need to figureout where this goes
+       int dp_dto_source_clock_in_khz; // Used to program DP DTO with ss adjustment on DCN314
        int dentist_vco_freq_khz;
        struct clk_state_registers_and_bypass boot_snapshot;
        struct clk_bw_params *bw_params;
index 1d51fed12e20037b3baf40ed7f3b89095437457e..77a60aa9f27bbfdfa8a652306e2366dc0eca4345 100644 (file)
@@ -427,22 +427,18 @@ struct pipe_ctx *resource_get_primary_dpp_pipe(const struct pipe_ctx *dpp_pipe);
 int resource_get_mpc_slice_index(const struct pipe_ctx *dpp_pipe);
 
 /*
- * Get number of MPC "cuts" of the plane associated with the pipe. MPC slice
- * count is equal to MPC splits + 1. For example if a plane is cut 3 times, it
- * will have 4 pieces of slice.
- * return - 0 if pipe is not used for a plane with MPCC combine. otherwise
- * the number of MPC "cuts" for the plane.
+ * Get the number of MPC slices associated with the pipe.
+ * The function returns 0 if the pipe is not associated with an MPC combine
+ * pipe topology.
  */
-int resource_get_mpc_slice_count(const struct pipe_ctx *opp_head);
+int resource_get_mpc_slice_count(const struct pipe_ctx *pipe);
 
 /*
- * Get number of ODM "cuts" of the timing associated with the pipe. ODM slice
- * count is equal to ODM splits + 1. For example if a timing is cut 3 times, it
- * will have 4 pieces of slice.
- * return - 0 if pipe is not used for ODM combine. otherwise
- * the number of ODM "cuts" for the timing.
+ * Get the number of ODM slices associated with the pipe.
+ * The function returns 0 if the pipe is not associated with an ODM combine
+ * pipe topology.
  */
-int resource_get_odm_slice_count(const struct pipe_ctx *otg_master);
+int resource_get_odm_slice_count(const struct pipe_ctx *pipe);
 
 /* Get the ODM slice index counting from 0 from left most slice */
 int resource_get_odm_slice_index(const struct pipe_ctx *opp_head);
@@ -609,9 +605,6 @@ bool dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy(
                struct pipe_ctx *sec_pipe,
                bool odm);
 
-bool resource_subvp_in_use(struct dc *dc,
-               struct dc_state *context);
-
 /* A test harness interface that modifies dp encoder resources in the given dc
  * state and bypasses the need to revalidate. The interface assumes that the
  * test harness interface is called with pre-validated link config stored in the
index 5fe8b4871c77614eb0fd46421db49fb79197e6f7..3cbfbf8d107e9b62c639ef1618041b8fc09dd9b5 100644 (file)
@@ -900,11 +900,15 @@ bool link_set_dsc_pps_packet(struct pipe_ctx *pipe_ctx, bool enable, bool immedi
 {
        struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
        struct dc_stream_state *stream = pipe_ctx->stream;
-       DC_LOGGER_INIT(dsc->ctx->logger);
 
-       if (!pipe_ctx->stream->timing.flags.DSC || !dsc)
+       if (!pipe_ctx->stream->timing.flags.DSC)
                return false;
 
+       if (!dsc)
+               return false;
+
+       DC_LOGGER_INIT(dsc->ctx->logger);
+
        if (enable) {
                struct dsc_config dsc_cfg;
                uint8_t dsc_packed_pps[128];
@@ -2005,17 +2009,11 @@ static enum dc_status enable_link_dp(struct dc_state *state,
                }
        }
 
-       /*
-        * If the link is DP-over-USB4 do the following:
-        * - Train with fallback when enabling DPIA link. Conventional links are
+       /* Train with fallback when enabling DPIA link. Conventional links are
         * trained with fallback during sink detection.
-        * - Allocate only what the stream needs for bw in Gbps. Inform the CM
-        * in case stream needs more or less bw from what has been allocated
-        * earlier at plug time.
         */
-       if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+       if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
                do_fallback = true;
-       }
 
        /*
         * Temporary w/a to get DP2.0 link rates to work with SST.
@@ -2197,6 +2195,32 @@ static enum dc_status enable_link(
        return status;
 }
 
+static bool allocate_usb4_bandwidth_for_stream(struct dc_stream_state *stream, int bw)
+{
+       return true;
+}
+
+static bool allocate_usb4_bandwidth(struct dc_stream_state *stream)
+{
+       bool ret;
+
+       int bw = dc_bandwidth_in_kbps_from_timing(&stream->timing,
+                       dc_link_get_highest_encoding_format(stream->sink->link));
+
+       ret = allocate_usb4_bandwidth_for_stream(stream, bw);
+
+       return ret;
+}
+
+static bool deallocate_usb4_bandwidth(struct dc_stream_state *stream)
+{
+       bool ret;
+
+       ret = allocate_usb4_bandwidth_for_stream(stream, 0);
+
+       return ret;
+}
+
 void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
 {
        struct dc  *dc = pipe_ctx->stream->ctx->dc;
@@ -2232,6 +2256,9 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
        update_psp_stream_config(pipe_ctx, true);
        dc->hwss.blank_stream(pipe_ctx);
 
+       if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+               deallocate_usb4_bandwidth(pipe_ctx->stream);
+
        if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
                deallocate_mst_payload(pipe_ctx);
        else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
@@ -2474,6 +2501,9 @@ void link_set_dpms_on(
                }
        }
 
+       if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+               allocate_usb4_bandwidth(pipe_ctx->stream);
+
        if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
                allocate_mst_payload(pipe_ctx);
        else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
index b45fda96eaf649bf16f291df2294d787680e0287..8fe66c3678508d9aee6779fa25cd6128e1f30832 100644 (file)
@@ -346,23 +346,61 @@ enum dc_status link_validate_mode_timing(
        return DC_OK;
 }
 
+/*
+ * This function calculates the bandwidth required for the stream timing
+ * and aggregates the stream bandwidth for the respective dpia link
+ *
+ * @stream: pointer to the dc_stream_state struct instance
+ * @num_streams: number of streams to be validated
+ *
+ * return: true if validation is succeeded
+ */
 bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const unsigned int num_streams)
 {
-       bool ret = true;
-       int bw_needed[MAX_DPIA_NUM];
-       struct dc_link *link[MAX_DPIA_NUM];
-
-       if (!num_streams || num_streams > MAX_DPIA_NUM)
-               return ret;
+       int bw_needed[MAX_DPIA_NUM] = {0};
+       struct dc_link *dpia_link[MAX_DPIA_NUM] = {0};
+       int num_dpias = 0;
 
        for (uint8_t i = 0; i < num_streams; ++i) {
+               if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT) {
+                       /* new dpia sst stream, check whether it exceeds max dpia */
+                       if (num_dpias >= MAX_DPIA_NUM)
+                               return false;
 
-               link[i] = stream[i].link;
-               bw_needed[i] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing,
-                               dc_link_get_highest_encoding_format(link[i]));
+                       dpia_link[num_dpias] = stream[i].link;
+                       bw_needed[num_dpias] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing,
+                                       dc_link_get_highest_encoding_format(dpia_link[num_dpias]));
+                       num_dpias++;
+               } else if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+                       uint8_t j = 0;
+                       /* check whether its a known dpia link */
+                       for (; j < num_dpias; ++j) {
+                               if (dpia_link[j] == stream[i].link)
+                                       break;
+                       }
+
+                       if (j == num_dpias) {
+                               /* new dpia mst stream, check whether it exceeds max dpia */
+                               if (num_dpias >= MAX_DPIA_NUM)
+                                       return false;
+                               else {
+                                       dpia_link[j] = stream[i].link;
+                                       num_dpias++;
+                               }
+                       }
+
+                       bw_needed[j] += dc_bandwidth_in_kbps_from_timing(&stream[i].timing,
+                               dc_link_get_highest_encoding_format(dpia_link[j]));
+               }
        }
 
-       ret = dpia_validate_usb4_bw(link, bw_needed, num_streams);
+       /* Include dp overheads */
+       for (uint8_t i = 0; i < num_dpias; ++i) {
+               int dp_overhead = 0;
+
+               dp_overhead = link_dp_dpia_get_dp_overhead_in_dp_tunneling(dpia_link[i]);
+               bw_needed[i] += dp_overhead;
+       }
 
-       return ret;
+       return dpia_validate_usb4_bw(dpia_link, bw_needed, num_dpias);
 }
index 982eda3c46f5680af8c60042e4cc7fc8909f68d3..6af42ba9885c054ead528e11d14007622af098a8 100644 (file)
@@ -82,25 +82,33 @@ bool dpia_query_hpd_status(struct dc_link *link)
 {
        union dmub_rb_cmd cmd = {0};
        struct dc_dmub_srv *dmub_srv = link->ctx->dmub_srv;
-       bool is_hpd_high = false;
 
        /* prepare QUERY_HPD command */
        cmd.query_hpd.header.type = DMUB_CMD__QUERY_HPD_STATE;
        cmd.query_hpd.data.instance = link->link_id.enum_id - ENUM_ID_1;
        cmd.query_hpd.data.ch_type = AUX_CHANNEL_DPIA;
 
-       /* Return HPD status reported by DMUB if query successfully executed. */
-       if (dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
-           cmd.query_hpd.data.status == AUX_RET_SUCCESS)
-               is_hpd_high = cmd.query_hpd.data.result;
-
-       DC_LOG_DEBUG("%s: link(%d) dpia(%d) cmd_status(%d) result(%d)\n",
-               __func__,
-               link->link_index,
-               link->link_id.enum_id - ENUM_ID_1,
-               cmd.query_hpd.data.status,
-               cmd.query_hpd.data.result);
-
-       return is_hpd_high;
+       /* Query dpia hpd status from dmub */
+       if (dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd,
+               DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
+           cmd.query_hpd.data.status == AUX_RET_SUCCESS) {
+               DC_LOG_DEBUG("%s: for link(%d) dpia(%d) success, current_hpd_status(%d) new_hpd_status(%d)\n",
+                       __func__,
+                       link->link_index,
+                       link->link_id.enum_id - ENUM_ID_1,
+                       link->hpd_status,
+                       cmd.query_hpd.data.result);
+               link->hpd_status = cmd.query_hpd.data.result;
+       } else {
+               DC_LOG_ERROR("%s: for link(%d) dpia(%d) failed with status(%d), current_hpd_status(%d) new_hpd_status(0)\n",
+                       __func__,
+                       link->link_index,
+                       link->link_id.enum_id - ENUM_ID_1,
+                       cmd.query_hpd.data.status,
+                       link->hpd_status);
+               link->hpd_status = false;
+       }
+
+       return link->hpd_status;
 }
 
index a7aa8c9da868fcf81a335b3d1384aeb8e37a1f42..5491b707cec881b9854ab96834503c1e88053380 100644 (file)
@@ -54,12 +54,18 @@ static bool get_bw_alloc_proceed_flag(struct dc_link *tmp)
 static void reset_bw_alloc_struct(struct dc_link *link)
 {
        link->dpia_bw_alloc_config.bw_alloc_enabled = false;
-       link->dpia_bw_alloc_config.sink_verified_bw = 0;
-       link->dpia_bw_alloc_config.sink_max_bw = 0;
+       link->dpia_bw_alloc_config.link_verified_bw = 0;
+       link->dpia_bw_alloc_config.link_max_bw = 0;
+       link->dpia_bw_alloc_config.allocated_bw = 0;
        link->dpia_bw_alloc_config.estimated_bw = 0;
        link->dpia_bw_alloc_config.bw_granularity = 0;
+       link->dpia_bw_alloc_config.dp_overhead = 0;
        link->dpia_bw_alloc_config.response_ready = false;
-       link->dpia_bw_alloc_config.sink_allocated_bw = 0;
+       link->dpia_bw_alloc_config.nrd_max_lane_count = 0;
+       link->dpia_bw_alloc_config.nrd_max_link_rate = 0;
+       for (int i = 0; i < MAX_SINKS_PER_LINK; i++)
+               link->dpia_bw_alloc_config.remote_sink_req_bw[i] = 0;
+       DC_LOG_DEBUG("reset usb4 bw alloc of link(%d)\n", link->link_index);
 }
 
 #define BW_GRANULARITY_0 4 // 0.25 Gbps
@@ -190,7 +196,7 @@ static int get_host_router_total_dp_tunnel_bw(const struct dc *dc, uint8_t hr_in
        struct dc_link *link_dpia_primary, *link_dpia_secondary;
        int total_bw = 0;
 
-       for (uint8_t i = 0; i < MAX_PIPES * 2; ++i) {
+       for (uint8_t i = 0; i < (MAX_PIPES * 2) - 1; ++i) {
 
                if (!dc->links[i] || dc->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
                        continue;
@@ -210,8 +216,8 @@ static int get_host_router_total_dp_tunnel_bw(const struct dc *dc, uint8_t hr_in
                                link_dpia_primary->dpia_bw_alloc_config.bw_alloc_enabled) &&
                                (link_dpia_secondary->hpd_status &&
                                link_dpia_secondary->dpia_bw_alloc_config.bw_alloc_enabled)) {
-                               total_bw += link_dpia_primary->dpia_bw_alloc_config.estimated_bw +
-                                       link_dpia_secondary->dpia_bw_alloc_config.sink_allocated_bw;
+                                       total_bw += link_dpia_primary->dpia_bw_alloc_config.estimated_bw +
+                                               link_dpia_secondary->dpia_bw_alloc_config.allocated_bw;
                        } else if (link_dpia_primary->hpd_status &&
                                        link_dpia_primary->dpia_bw_alloc_config.bw_alloc_enabled) {
                                total_bw = link_dpia_primary->dpia_bw_alloc_config.estimated_bw;
@@ -264,7 +270,7 @@ static void set_usb4_req_bw_req(struct dc_link *link, int req_bw)
 
        /* Error check whether requested and allocated are equal */
        req_bw = requested_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
-       if (req_bw == link->dpia_bw_alloc_config.sink_allocated_bw) {
+       if (req_bw == link->dpia_bw_alloc_config.allocated_bw) {
                DC_LOG_ERROR("%s: Request bw equals to allocated bw for link(%d)\n",
                        __func__, link->link_index);
        }
@@ -387,9 +393,9 @@ void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t res
                DC_LOG_DEBUG("%s: BW REQ SUCCESS for DP-TX Request for link(%d)\n",
                        __func__, link->link_index);
                DC_LOG_DEBUG("%s: current allocated_bw(%d), new allocated_bw(%d)\n",
-                       __func__, link->dpia_bw_alloc_config.sink_allocated_bw, bw_needed);
+                       __func__, link->dpia_bw_alloc_config.allocated_bw, bw_needed);
 
-               link->dpia_bw_alloc_config.sink_allocated_bw = bw_needed;
+               link->dpia_bw_alloc_config.allocated_bw = bw_needed;
 
                link->dpia_bw_alloc_config.response_ready = true;
                break;
@@ -427,8 +433,8 @@ int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int pea
        if (link->hpd_status && peak_bw > 0) {
 
                // If DP over USB4 then we need to check BW allocation
-               link->dpia_bw_alloc_config.sink_max_bw = peak_bw;
-               set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.sink_max_bw);
+               link->dpia_bw_alloc_config.link_max_bw = peak_bw;
+               set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.link_max_bw);
 
                do {
                        if (timeout > 0)
@@ -440,8 +446,8 @@ int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int pea
 
                if (!timeout)
                        ret = 0;// ERROR TIMEOUT waiting for response for allocating bw
-               else if (link->dpia_bw_alloc_config.sink_allocated_bw > 0)
-                       ret = link->dpia_bw_alloc_config.sink_allocated_bw;
+               else if (link->dpia_bw_alloc_config.allocated_bw > 0)
+                       ret = link->dpia_bw_alloc_config.allocated_bw;
        }
        //2. Cold Unplug
        else if (!link->hpd_status)
@@ -450,7 +456,6 @@ int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int pea
 out:
        return ret;
 }
-
 bool link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw)
 {
        bool ret = false;
@@ -458,7 +463,7 @@ bool link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int r
 
        DC_LOG_DEBUG("%s: ENTER: link(%d), hpd_status(%d), current allocated_bw(%d), req_bw(%d)\n",
                __func__, link->link_index, link->hpd_status,
-               link->dpia_bw_alloc_config.sink_allocated_bw, req_bw);
+               link->dpia_bw_alloc_config.allocated_bw, req_bw);
 
        if (!get_bw_alloc_proceed_flag(link))
                goto out;
@@ -523,3 +528,30 @@ bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed_per_dpia, const
 
        return ret;
 }
+
+int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link)
+{
+       int dp_overhead = 0, link_mst_overhead = 0;
+
+       if (!get_bw_alloc_proceed_flag((link)))
+               return dp_overhead;
+
+       /* if its mst link, add MTPH overhead */
+       if ((link->type == dc_connection_mst_branch) &&
+               !link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
+               /* For 8b/10b encoding: MTP is 64 time slots long, slot 0 is used for MTPH
+                * MST overhead is 1/64 of link bandwidth (excluding any overhead)
+                */
+               const struct dc_link_settings *link_cap =
+                       dc_link_get_link_cap(link);
+               uint32_t link_bw_in_kbps = (uint32_t)link_cap->link_rate *
+                                          (uint32_t)link_cap->lane_count *
+                                          LINK_RATE_REF_FREQ_IN_KHZ * 8;
+               link_mst_overhead = (link_bw_in_kbps / 64) + ((link_bw_in_kbps % 64) ? 1 : 0);
+       }
+
+       /* add all the overheads */
+       dp_overhead = link_mst_overhead;
+
+       return dp_overhead;
+}
index 981bc4eb6120e76ad959435be7ad716cdc498926..3b6d8494f9d5da4ceb05711c9596007ac73f08a2 100644 (file)
@@ -99,4 +99,13 @@ void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t res
  */
 bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed, const unsigned int num_dpias);
 
+/*
+ * Obtain all the DP overheads in dp tunneling for the dpia link
+ *
+ * @link: pointer to the dc_link struct instance
+ *
+ * return: DP overheads in DP tunneling
+ */
+int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link);
+
 #endif /* DC_INC_LINK_DP_DPIA_BW_H_ */
index 5c9a30211c109f749ab7e1cceb402bd7a0dcb786..fc50931c2aecbb53d74a2d48913e608134510940 100644 (file)
@@ -205,7 +205,7 @@ enum dc_status core_link_read_dpcd(
        uint32_t extended_size;
        /* size of the remaining partitioned address space */
        uint32_t size_left_to_read;
-       enum dc_status status;
+       enum dc_status status = DC_ERROR_UNEXPECTED;
        /* size of the next partition to be read from */
        uint32_t partition_size;
        uint32_t data_index = 0;
@@ -234,7 +234,7 @@ enum dc_status core_link_write_dpcd(
 {
        uint32_t partition_size;
        uint32_t data_index = 0;
-       enum dc_status status;
+       enum dc_status status = DC_ERROR_UNEXPECTED;
 
        while (size) {
                partition_size = dpcd_get_next_partition_size(address, size);
index 7f1196528218692c98f1f15375f153dfe56fe514..046d3e205415311cd63a98aa3c0e59c8aaea2e89 100644 (file)
@@ -930,8 +930,8 @@ bool edp_get_replay_state(const struct dc_link *link, uint64_t *state)
 bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream)
 {
        /* To-do: Setup Replay */
-       struct dc *dc = link->ctx->dc;
-       struct dmub_replay *replay = dc->res_pool->replay;
+       struct dc *dc;
+       struct dmub_replay *replay;
        int i;
        unsigned int panel_inst;
        struct replay_context replay_context = { 0 };
@@ -947,6 +947,10 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream
        if (!link)
                return false;
 
+       dc = link->ctx->dc;
+
+       replay = dc->res_pool->replay;
+
        if (!replay)
                return false;
 
@@ -975,8 +979,7 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream
 
        replay_context.line_time_in_ns = lineTimeInNs;
 
-       if (replay)
-               link->replay_settings.replay_feature_enabled =
+       link->replay_settings.replay_feature_enabled =
                        replay->funcs->replay_copy_settings(replay, link, &replay_context, panel_inst);
        if (link->replay_settings.replay_feature_enabled) {
 
index 91ea0d4da06a9443bb199759fde0c75ae44fc8f3..82349354332548e160494c23bee15acaa18b7630 100644 (file)
@@ -166,12 +166,6 @@ static bool optc32_disable_crtc(struct timing_generator *optc)
 {
        struct optc *optc1 = DCN10TG_FROM_TG(optc);
 
-       /* disable otg request until end of the first line
-        * in the vertical blank region
-        */
-       REG_UPDATE(OTG_CONTROL,
-                       OTG_MASTER_EN, 0);
-
        REG_UPDATE_5(OPTC_DATA_SOURCE_SELECT,
                        OPTC_SEG0_SRC_SEL, 0xf,
                        OPTC_SEG1_SRC_SEL, 0xf,
@@ -179,6 +173,15 @@ static bool optc32_disable_crtc(struct timing_generator *optc)
                        OPTC_SEG3_SRC_SEL, 0xf,
                        OPTC_NUM_OF_INPUT_SEGMENT, 0);
 
+       REG_UPDATE(OPTC_MEMORY_CONFIG,
+                       OPTC_MEM_SEL, 0);
+
+       /* disable otg request until end of the first line
+        * in the vertical blank region
+        */
+       REG_UPDATE(OTG_CONTROL,
+                       OTG_MASTER_EN, 0);
+
        REG_UPDATE(CONTROL,
                        VTG0_ENABLE, 0);
 
@@ -205,6 +208,13 @@ static void optc32_disable_phantom_otg(struct timing_generator *optc)
 {
        struct optc *optc1 = DCN10TG_FROM_TG(optc);
 
+       REG_UPDATE_5(OPTC_DATA_SOURCE_SELECT,
+                       OPTC_SEG0_SRC_SEL, 0xf,
+                       OPTC_SEG1_SRC_SEL, 0xf,
+                       OPTC_SEG2_SRC_SEL, 0xf,
+                       OPTC_SEG3_SRC_SEL, 0xf,
+                       OPTC_NUM_OF_INPUT_SEGMENT, 0);
+
        REG_UPDATE(OTG_CONTROL, OTG_MASTER_EN, 0);
 }
 
index 08a59cf449cae5c27fe7dbe8fc1b2f847f462f9f..5b154750885030e171a483d30e014aa8f4bff8a1 100644 (file)
@@ -138,12 +138,6 @@ static bool optc35_disable_crtc(struct timing_generator *optc)
 {
        struct optc *optc1 = DCN10TG_FROM_TG(optc);
 
-       /* disable otg request until end of the first line
-        * in the vertical blank region
-        */
-       REG_UPDATE(OTG_CONTROL,
-                       OTG_MASTER_EN, 0);
-
        REG_UPDATE_5(OPTC_DATA_SOURCE_SELECT,
                        OPTC_SEG0_SRC_SEL, 0xf,
                        OPTC_SEG1_SRC_SEL, 0xf,
@@ -151,6 +145,15 @@ static bool optc35_disable_crtc(struct timing_generator *optc)
                        OPTC_SEG3_SRC_SEL, 0xf,
                        OPTC_NUM_OF_INPUT_SEGMENT, 0);
 
+       REG_UPDATE(OPTC_MEMORY_CONFIG,
+                       OPTC_MEM_SEL, 0);
+
+       /* disable otg request until end of the first line
+        * in the vertical blank region
+        */
+       REG_UPDATE(OTG_CONTROL,
+                       OTG_MASTER_EN, 0);
+
        REG_UPDATE(CONTROL,
                        VTG0_ENABLE, 0);
 
index 511ff6b5b9856776ea834393e4a7bfcaa90ca49f..7538b548c5725177b12e2d169acc681c31174797 100644 (file)
@@ -999,7 +999,7 @@ static struct stream_encoder *dcn301_stream_encoder_create(enum engine_id eng_id
        vpg = dcn301_vpg_create(ctx, vpg_inst);
        afmt = dcn301_afmt_create(ctx, afmt_inst);
 
-       if (!enc1 || !vpg || !afmt) {
+       if (!enc1 || !vpg || !afmt || eng_id >= ARRAY_SIZE(stream_enc_regs)) {
                kfree(enc1);
                kfree(vpg);
                kfree(afmt);
index ac04a9c9a3d86808000942fd4eae12d5f9fdea66..6f10052caeef02c3448307c4c81aef805e68e95b 100644 (file)
@@ -1829,7 +1829,21 @@ int dcn32_populate_dml_pipes_from_context(
                dcn32_zero_pipe_dcc_fraction(pipes, pipe_cnt);
                DC_FP_END();
                pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
-               pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
+               if (dc->config.enable_windowed_mpo_odm &&
+                               dc->debug.enable_single_display_2to1_odm_policy) {
+                       switch (resource_get_odm_slice_count(pipe)) {
+                       case 2:
+                               pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
+                               break;
+                       case 4:
+                               pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_4to1;
+                               break;
+                       default:
+                               pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
+                       }
+               } else {
+                       pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
+               }
                pipes[pipe_cnt].pipe.src.gpuvm_min_page_size_kbytes = 256; // according to spreadsheet
                pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
                pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_19;
@@ -1899,7 +1913,7 @@ int dcn32_populate_dml_pipes_from_context(
 
 static struct dc_cap_funcs cap_funcs = {
        .get_dcc_compression_cap = dcn20_get_dcc_compression_cap,
-       .get_subvp_en = resource_subvp_in_use,
+       .get_subvp_en = dcn32_subvp_in_use,
 };
 
 void dcn32_calculate_wm_and_dlg(struct dc *dc, struct dc_state *context,
index 62611acd4bcb522c78fafdaa8d811101b65b5f42..0c87b0fabba7d96ff38180900e41f1438419912c 100644 (file)
@@ -131,6 +131,9 @@ void dcn32_merge_pipes_for_subvp(struct dc *dc,
 bool dcn32_all_pipes_have_stream_and_plane(struct dc *dc,
                struct dc_state *context);
 
+bool dcn32_subvp_in_use(struct dc *dc,
+               struct dc_state *context);
+
 bool dcn32_mpo_in_use(struct dc_state *context);
 
 bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context);
index e1ab207c46f15b1c6dd6e13ec35f6049674c0444..74412e5f03fefbaa9350982ac92bc528cc8e80e8 100644 (file)
@@ -1574,7 +1574,7 @@ static void dcn321_destroy_resource_pool(struct resource_pool **pool)
 
 static struct dc_cap_funcs cap_funcs = {
        .get_dcc_compression_cap = dcn20_get_dcc_compression_cap,
-       .get_subvp_en = resource_subvp_in_use,
+       .get_subvp_en = dcn32_subvp_in_use,
 };
 
 static void dcn321_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
index 761ec989187568730fdd8cd51cd1802fa657be9c..1c3d89264ef72c56251e09549cde90f8d78ce91a 100644 (file)
@@ -780,8 +780,8 @@ static const struct dc_debug_options debug_defaults_drv = {
        .disable_z10 = false,
        .ignore_pg = true,
        .psp_disabled_wa = true,
-       .ips2_eval_delay_us = 200,
-       .ips2_entry_delay_us = 400,
+       .ips2_eval_delay_us = 1650,
+       .ips2_entry_delay_us = 800,
        .static_screen_wait_frames = 2,
 };
 
@@ -2130,6 +2130,7 @@ static bool dcn35_resource_construct(
        dc->dml2_options.dcn_pipe_count = pool->base.pipe_count;
        dc->dml2_options.use_native_pstate_optimization = true;
        dc->dml2_options.use_native_soc_bb_construction = true;
+       dc->dml2_options.minimize_dispclk_using_odm = false;
        if (dc->config.EnableMinDispClkODM)
                dc->dml2_options.minimize_dispclk_using_odm = true;
        dc->dml2_options.enable_windowed_mpo_odm = dc->config.enable_windowed_mpo_odm;
index c64b6c848ef7219e3ddc44da8d4e56763a9bf7f4..e699731ee68e96388c52ed55c17b34cc8710aaab 100644 (file)
@@ -2832,6 +2832,7 @@ struct dmub_rb_cmd_psr_set_power_opt {
 #define REPLAY_RESIDENCY_MODE_MASK             (0x1 << REPLAY_RESIDENCY_MODE_SHIFT)
 # define REPLAY_RESIDENCY_MODE_PHY             (0x0 << REPLAY_RESIDENCY_MODE_SHIFT)
 # define REPLAY_RESIDENCY_MODE_ALPM            (0x1 << REPLAY_RESIDENCY_MODE_SHIFT)
+# define REPLAY_RESIDENCY_MODE_IPS             0x10
 
 #define REPLAY_RESIDENCY_ENABLE_MASK           (0x1 << REPLAY_RESIDENCY_ENABLE_SHIFT)
 # define REPLAY_RESIDENCY_DISABLE              (0x0 << REPLAY_RESIDENCY_ENABLE_SHIFT)
@@ -2894,6 +2895,10 @@ enum dmub_cmd_replay_type {
         * Set Residency Frameupdate Timer.
         */
        DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER = 6,
+       /**
+        * Set pseudo vtotal
+        */
+       DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL = 7,
 };
 
 /**
@@ -3076,6 +3081,26 @@ struct dmub_cmd_replay_set_timing_sync_data {
        uint8_t pad[2];
 };
 
+/**
+ * Data passed from driver to FW in a DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command.
+ */
+struct dmub_cmd_replay_set_pseudo_vtotal {
+       /**
+        * Panel Instance.
+        * Panel isntance to identify which replay_state to use
+        * Currently the support is only for 0 or 1
+        */
+       uint8_t panel_inst;
+       /**
+        * Source Vtotal that Replay + IPS + ABM full screen video src vtotal
+        */
+       uint16_t vtotal;
+       /**
+        * Explicit padding to 4 byte boundary.
+        */
+       uint8_t pad;
+};
+
 /**
  * Definition of a DMUB_CMD__SET_REPLAY_POWER_OPT command.
  */
@@ -3156,6 +3181,20 @@ struct dmub_rb_cmd_replay_set_timing_sync {
        struct dmub_cmd_replay_set_timing_sync_data replay_set_timing_sync_data;
 };
 
+/**
+ * Definition of a DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command.
+ */
+struct dmub_rb_cmd_replay_set_pseudo_vtotal {
+       /**
+        * Command header.
+        */
+       struct dmub_cmd_header header;
+       /**
+        * Definition of DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command.
+        */
+       struct dmub_cmd_replay_set_pseudo_vtotal data;
+};
+
 /**
  * Data passed from driver to FW in  DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER command.
  */
@@ -3207,6 +3246,10 @@ union dmub_replay_cmd_set {
         * Definition of DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER command data.
         */
        struct dmub_cmd_replay_frameupdate_timer_data timer_data;
+       /**
+        * Definition of DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command data.
+        */
+       struct dmub_cmd_replay_set_pseudo_vtotal pseudo_vtotal_data;
 };
 
 /**
@@ -4358,6 +4401,10 @@ union dmub_rb_cmd {
         * Definition of a DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER command.
         */
        struct dmub_rb_cmd_replay_set_frameupdate_timer replay_set_frameupdate_timer;
+       /**
+        * Definition of a DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command.
+        */
+       struct dmub_rb_cmd_replay_set_pseudo_vtotal replay_set_pseudo_vtotal;
 };
 
 /**
index 66a54da0641ce11feb10e1d777395f9bcd85f658..915a031a43cb286fdb03f2fb2788d0fa9e539b59 100644 (file)
@@ -64,7 +64,7 @@ enum audio_dto_source {
 /* PLL information required for AZALIA DTO calculation */
 
 struct audio_pll_info {
-       uint32_t dp_dto_source_clock_in_khz;
+       uint32_t audio_dto_source_clock_in_khz;
        uint32_t feed_back_divider;
        enum audio_dto_source dto_source;
        bool ss_enabled;
index ad98e504c00de5908ca94a38392ef818e91b2152..e304e8435fb8f1c5e29428f72c20a6097fb57697 100644 (file)
@@ -980,6 +980,11 @@ void set_replay_coasting_vtotal(struct dc_link *link,
        link->replay_settings.coasting_vtotal_table[type] = vtotal;
 }
 
+void set_replay_ips_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal)
+{
+       link->replay_settings.abm_with_ips_on_full_screen_video_pseudo_vtotal = vtotal;
+}
+
 void calculate_replay_link_off_frame_count(struct dc_link *link,
        uint16_t vtotal, uint16_t htotal)
 {
index c17bbc6fb38cafb518777b16c96a99b2116c36eb..bef4815e1703d78cdebc6f49bc160932d08c5272 100644 (file)
@@ -57,6 +57,7 @@ void init_replay_config(struct dc_link *link, struct replay_config *pr_config);
 void set_replay_coasting_vtotal(struct dc_link *link,
        enum replay_coasting_vtotal_type type,
        uint16_t vtotal);
+void set_replay_ips_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal);
 void calculate_replay_link_off_frame_count(struct dc_link *link,
        uint16_t vtotal, uint16_t htotal);
 
index 1dc5dd9b7bf70b10641a76e4c731e3e735aeaeef..df2c7ffe190f4db36050901dce5af89180646f3b 100644 (file)
@@ -258,6 +258,7 @@ enum DC_DEBUG_MASK {
        DC_ENABLE_DML2 = 0x100,
        DC_DISABLE_PSR_SU = 0x200,
        DC_DISABLE_REPLAY = 0x400,
+       DC_DISABLE_IPS = 0x800,
 };
 
 enum amd_dpm_forced_level;
index be519c8edf496fda93f393a077f174454e635a05..335980e2afbfb8e6eae89e7f28fdcc3391d39cde 100644 (file)
@@ -138,7 +138,7 @@ static inline size_t amdgpu_reginst_size(uint16_t num_inst, size_t inst_size,
 }
 
 #define amdgpu_asic_get_reg_state_supported(adev) \
-       ((adev)->asic_funcs->get_reg_state ? 1 : 0)
+       (((adev)->asic_funcs && (adev)->asic_funcs->get_reg_state) ? 1 : 0)
 
 #define amdgpu_asic_get_reg_state(adev, state, buf, size)                  \
        ((adev)->asic_funcs->get_reg_state ?                               \
index 7ee3d291120d5429d879745c2e63af38cd79f371..6f80bfa7e41ac9c1bdd2faaba4c298cc3f4f9d34 100644 (file)
 #define regBIF_BX1_MM_CFGREGS_CNTL_BASE_IDX                                                             2
 #define regBIF_BX1_BX_RESET_CNTL                                                                        0x00f0
 #define regBIF_BX1_BX_RESET_CNTL_BASE_IDX                                                               2
-#define regBIF_BX1_INTERRUPT_CNTL                                                                       0x8e11
-#define regBIF_BX1_INTERRUPT_CNTL_BASE_IDX                                                              5
-#define regBIF_BX1_INTERRUPT_CNTL2                                                                      0x8e12
-#define regBIF_BX1_INTERRUPT_CNTL2_BASE_IDX                                                             5
+#define regBIF_BX1_INTERRUPT_CNTL                                                                       0x00f1
+#define regBIF_BX1_INTERRUPT_CNTL_BASE_IDX                                                              2
+#define regBIF_BX1_INTERRUPT_CNTL2                                                                      0x00f2
+#define regBIF_BX1_INTERRUPT_CNTL2_BASE_IDX                                                             2
 #define regBIF_BX1_CLKREQB_PAD_CNTL                                                                     0x00f8
 #define regBIF_BX1_CLKREQB_PAD_CNTL_BASE_IDX                                                            2
 #define regBIF_BX1_BIF_FEATURES_CONTROL_MISC                                                            0x00fb
index f3cb490fe79b16baa5a917c8304b6fbfcfeb8729..087d57850304c45193a7f5de336953c1dec9cbba 100644 (file)
@@ -4349,11 +4349,19 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
        if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
                seq_printf(m, "\t%u mV (VDDNB)\n", value);
        size = sizeof(uint32_t);
-       if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&query, &size))
-               seq_printf(m, "\t%u.%02u W (average GPU)\n", query >> 8, query & 0xff);
+       if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&query, &size)) {
+               if (adev->flags & AMD_IS_APU)
+                       seq_printf(m, "\t%u.%02u W (average SoC including CPU)\n", query >> 8, query & 0xff);
+               else
+                       seq_printf(m, "\t%u.%02u W (average SoC)\n", query >> 8, query & 0xff);
+       }
        size = sizeof(uint32_t);
-       if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&query, &size))
-               seq_printf(m, "\t%u.%02u W (current GPU)\n", query >> 8, query & 0xff);
+       if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&query, &size)) {
+               if (adev->flags & AMD_IS_APU)
+                       seq_printf(m, "\t%u.%02u W (current SoC including CPU)\n", query >> 8, query & 0xff);
+               else
+                       seq_printf(m, "\t%u.%02u W (current SoC)\n", query >> 8, query & 0xff);
+       }
        size = sizeof(value);
        seq_printf(m, "\n");
 
@@ -4379,9 +4387,9 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
                /* VCN clocks */
                if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
                        if (!value) {
-                               seq_printf(m, "VCN: Disabled\n");
+                               seq_printf(m, "VCN: Powered down\n");
                        } else {
-                               seq_printf(m, "VCN: Enabled\n");
+                               seq_printf(m, "VCN: Powered up\n");
                                if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
                                        seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
                                if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
@@ -4393,9 +4401,9 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
                /* UVD clocks */
                if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
                        if (!value) {
-                               seq_printf(m, "UVD: Disabled\n");
+                               seq_printf(m, "UVD: Powered down\n");
                        } else {
-                               seq_printf(m, "UVD: Enabled\n");
+                               seq_printf(m, "UVD: Powered up\n");
                                if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
                                        seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
                                if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
@@ -4407,9 +4415,9 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
                /* VCE clocks */
                if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
                        if (!value) {
-                               seq_printf(m, "VCE: Disabled\n");
+                               seq_printf(m, "VCE: Powered down\n");
                        } else {
-                               seq_printf(m, "VCE: Enabled\n");
+                               seq_printf(m, "VCE: Powered up\n");
                                if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
                                        seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
                        }
index f2a55c1413f597a4d643d1a2c99367517bdff17e..17882f8dfdd34f92d5d37a9b0ee37f4a7d1bb406 100644 (file)
@@ -200,7 +200,7 @@ static int get_platform_power_management_table(
                struct pp_hwmgr *hwmgr,
                ATOM_Tonga_PPM_Table *atom_ppm_table)
 {
-       struct phm_ppm_table *ptr = kzalloc(sizeof(ATOM_Tonga_PPM_Table), GFP_KERNEL);
+       struct phm_ppm_table *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
        struct phm_ppt_v1_information *pp_table_information =
                (struct phm_ppt_v1_information *)(hwmgr->pptable);
 
index b1a8799e2dee320390d239c6242600d4d30cdc39..aa91730e4eaffdf7760c844a7722aa1dedcb42d9 100644 (file)
@@ -3999,6 +3999,7 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
        uint32_t sclk, mclk, activity_percent;
        uint32_t offset, val_vid;
        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct amdgpu_device *adev = hwmgr->adev;
 
        /* size must be at least 4 bytes for all sensors */
        if (*size < 4)
@@ -4042,7 +4043,21 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
                *size = 4;
                return 0;
        case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
-               return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
+               if ((adev->asic_type != CHIP_HAWAII) &&
+                   (adev->asic_type != CHIP_BONAIRE) &&
+                   (adev->asic_type != CHIP_FIJI) &&
+                   (adev->asic_type != CHIP_TONGA))
+                       return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
+               else
+                       return -EOPNOTSUPP;
+       case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
+               if ((adev->asic_type != CHIP_HAWAII) &&
+                   (adev->asic_type != CHIP_BONAIRE) &&
+                   (adev->asic_type != CHIP_FIJI) &&
+                   (adev->asic_type != CHIP_TONGA))
+                       return -EOPNOTSUPP;
+               else
+                       return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
        case AMDGPU_PP_SENSOR_VDDGFX:
                if ((data->vr_config & VRCONF_VDDGFX_MASK) ==
                    (VR_SVI2_PLANE_2 << VRCONF_VDDGFX_SHIFT))
index c16703868e5ca2a3f0a7f6c7e3757b8e6ba036d0..0ad947df777ab2665a8f0de986a5d39737dd9ded 100644 (file)
@@ -24,6 +24,7 @@
 
 #include <linux/firmware.h>
 #include <linux/pci.h>
+#include <linux/power_supply.h>
 #include <linux/reboot.h>
 
 #include "amdgpu.h"
@@ -733,7 +734,7 @@ static int smu_early_init(void *handle)
        smu->adev = adev;
        smu->pm_enabled = !!amdgpu_dpm;
        smu->is_apu = false;
-       smu->smu_baco.state = SMU_BACO_STATE_NONE;
+       smu->smu_baco.state = SMU_BACO_STATE_EXIT;
        smu->smu_baco.platform_support = false;
        smu->user_dpm_profile.fan_mode = -1;
 
@@ -817,16 +818,8 @@ static int smu_late_init(void *handle)
         * handle the switch automatically. Driver involvement
         * is unnecessary.
         */
-       if (!smu->dc_controlled_by_gpio) {
-               ret = smu_set_power_source(smu,
-                                          adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
-                                          SMU_POWER_SOURCE_DC);
-               if (ret) {
-                       dev_err(adev->dev, "Failed to switch to %s mode!\n",
-                               adev->pm.ac_power ? "AC" : "DC");
-                       return ret;
-               }
-       }
+       adev->pm.ac_power = power_supply_is_system_supplied() > 0;
+       smu_set_ac_dc(smu);
 
        if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 1)) ||
            (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 3)))
@@ -1961,31 +1954,10 @@ static int smu_smc_hw_cleanup(struct smu_context *smu)
        return 0;
 }
 
-static int smu_reset_mp1_state(struct smu_context *smu)
-{
-       struct amdgpu_device *adev = smu->adev;
-       int ret = 0;
-
-       if ((!adev->in_runpm) && (!adev->in_suspend) &&
-               (!amdgpu_in_reset(adev)))
-               switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
-               case IP_VERSION(13, 0, 0):
-               case IP_VERSION(13, 0, 7):
-               case IP_VERSION(13, 0, 10):
-                       ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD);
-                       break;
-               default:
-                       break;
-               }
-
-       return ret;
-}
-
 static int smu_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        struct smu_context *smu = adev->powerplay.pp_handle;
-       int ret;
 
        if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
                return 0;
@@ -2003,15 +1975,7 @@ static int smu_hw_fini(void *handle)
 
        adev->pm.dpm_enabled = false;
 
-       ret = smu_smc_hw_cleanup(smu);
-       if (ret)
-               return ret;
-
-       ret = smu_reset_mp1_state(smu);
-       if (ret)
-               return ret;
-
-       return 0;
+       return smu_smc_hw_cleanup(smu);
 }
 
 static void smu_late_fini(void *handle)
@@ -2710,6 +2674,7 @@ int smu_get_power_limit(void *handle,
                case SMU_PPT_LIMIT_CURRENT:
                        switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
                        case IP_VERSION(13, 0, 2):
+                       case IP_VERSION(13, 0, 6):
                        case IP_VERSION(11, 0, 7):
                        case IP_VERSION(11, 0, 11):
                        case IP_VERSION(11, 0, 12):
index 2aa4fea873147516c23fb2fc568a94d907ee1c8a..66e84defd0b6ec2521c230262c34215a14251dfb 100644 (file)
@@ -424,7 +424,6 @@ enum smu_reset_mode {
 enum smu_baco_state {
        SMU_BACO_STATE_ENTER = 0,
        SMU_BACO_STATE_EXIT,
-       SMU_BACO_STATE_NONE,
 };
 
 struct smu_baco_context {
index 5a314d0316c1c8410d1f44281e5cc487e4947e81..c7bfa68bf00f400f3396c9853d2c08c6bf971659 100644 (file)
@@ -1442,10 +1442,12 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
                        case 0x3:
                                dev_dbg(adev->dev, "Switched to AC mode!\n");
                                schedule_work(&smu->interrupt_work);
+                               adev->pm.ac_power = true;
                                break;
                        case 0x4:
                                dev_dbg(adev->dev, "Switched to DC mode!\n");
                                schedule_work(&smu->interrupt_work);
+                               adev->pm.ac_power = false;
                                break;
                        case 0x7:
                                /*
index f1440869d1ce0597fee3f5819c80db505136cbfa..dd9bcbd630a1f9465285127dd23999f425bcc986 100644 (file)
@@ -1530,7 +1530,6 @@ static int aldebaran_i2c_control_init(struct smu_context *smu)
        smu_i2c->port = 0;
        mutex_init(&smu_i2c->mutex);
        control->owner = THIS_MODULE;
-       control->class = I2C_CLASS_SPD;
        control->dev.parent = &adev->pdev->dev;
        control->algo = &aldebaran_i2c_algo;
        snprintf(control->name, sizeof(control->name), "AMDGPU SMU 0");
index 771a3d457c335e2cc08582a3e4e3a3ba853d2928..c486182ff275222fedfaa1e27c417f9be80d19d0 100644 (file)
@@ -1379,10 +1379,12 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
                        case 0x3:
                                dev_dbg(adev->dev, "Switched to AC mode!\n");
                                smu_v13_0_ack_ac_dc_interrupt(smu);
+                               adev->pm.ac_power = true;
                                break;
                        case 0x4:
                                dev_dbg(adev->dev, "Switched to DC mode!\n");
                                smu_v13_0_ack_ac_dc_interrupt(smu);
+                               adev->pm.ac_power = false;
                                break;
                        case 0x7:
                                /*
index 231122622a9c06c8b255a48fcdd4231ffa58b7a9..a9954ffc02c562b91bf1166bb4bf87208152b36a 100644 (file)
@@ -2357,6 +2357,7 @@ static int smu_v13_0_0_get_power_limit(struct smu_context *smu,
        PPTable_t *pptable = table_context->driver_pptable;
        SkuTable_t *skutable = &pptable->SkuTable;
        uint32_t power_limit, od_percent_upper, od_percent_lower;
+       uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
 
        if (smu_v13_0_get_current_power_limit(smu, &power_limit))
                power_limit = smu->adev->pm.ac_power ?
@@ -2380,7 +2381,7 @@ static int smu_v13_0_0_get_power_limit(struct smu_context *smu,
                                        od_percent_upper, od_percent_lower, power_limit);
 
        if (max_power_limit) {
-               *max_power_limit = power_limit * (100 + od_percent_upper);
+               *max_power_limit = msg_limit * (100 + od_percent_upper);
                *max_power_limit /= 100;
        }
 
@@ -2696,7 +2697,6 @@ static int smu_v13_0_0_i2c_control_init(struct smu_context *smu)
                smu_i2c->port = i;
                mutex_init(&smu_i2c->mutex);
                control->owner = THIS_MODULE;
-               control->class = I2C_CLASS_SPD;
                control->dev.parent = &adev->pdev->dev;
                control->algo = &smu_v13_0_0_i2c_algo;
                snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
@@ -2748,13 +2748,7 @@ static int smu_v13_0_0_set_mp1_state(struct smu_context *smu,
 
        switch (mp1_state) {
        case PP_MP1_STATE_UNLOAD:
-               ret = smu_cmn_send_smc_msg_with_param(smu,
-                                                                                         SMU_MSG_PrepareMp1ForUnload,
-                                                                                         0x55, NULL);
-
-               if (!ret && smu->smu_baco.state == SMU_BACO_STATE_EXIT)
-                       ret = smu_v13_0_disable_pmfw_state(smu);
-
+               ret = smu_cmn_set_mp1_state(smu, mp1_state);
                break;
        default:
                /* Ignore others */
@@ -2950,7 +2944,7 @@ static bool smu_v13_0_0_wbrf_support_check(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
 
-       switch (adev->ip_versions[MP1_HWIP][0]) {
+       switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
        case IP_VERSION(13, 0, 0):
                return smu->smc_fw_version >= 0x004e6300;
        case IP_VERSION(13, 0, 10):
@@ -2960,6 +2954,55 @@ static bool smu_v13_0_0_wbrf_support_check(struct smu_context *smu)
        }
 }
 
+static int smu_v13_0_0_set_power_limit(struct smu_context *smu,
+                                      enum smu_ppt_limit_type limit_type,
+                                      uint32_t limit)
+{
+       PPTable_t *pptable = smu->smu_table.driver_pptable;
+       SkuTable_t *skutable = &pptable->SkuTable;
+       uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
+       struct smu_table_context *table_context = &smu->smu_table;
+       OverDriveTableExternal_t *od_table =
+               (OverDriveTableExternal_t *)table_context->overdrive_table;
+       int ret = 0;
+
+       if (limit_type != SMU_DEFAULT_PPT_LIMIT)
+               return -EINVAL;
+
+       if (limit <= msg_limit) {
+               if (smu->current_power_limit > msg_limit) {
+                       od_table->OverDriveTable.Ppt = 0;
+                       od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
+
+                       ret = smu_v13_0_0_upload_overdrive_table(smu, od_table);
+                       if (ret) {
+                               dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
+                               return ret;
+                       }
+               }
+               return smu_v13_0_set_power_limit(smu, limit_type, limit);
+       } else if (smu->od_enabled) {
+               ret = smu_v13_0_set_power_limit(smu, limit_type, msg_limit);
+               if (ret)
+                       return ret;
+
+               od_table->OverDriveTable.Ppt = (limit * 100) / msg_limit - 100;
+               od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
+
+               ret = smu_v13_0_0_upload_overdrive_table(smu, od_table);
+               if (ret) {
+                 dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
+                 return ret;
+               }
+
+               smu->current_power_limit = limit;
+       } else {
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
        .get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask,
        .set_default_dpm_table = smu_v13_0_0_set_default_dpm_table,
@@ -3014,7 +3057,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
        .set_fan_control_mode = smu_v13_0_set_fan_control_mode,
        .enable_mgpu_fan_boost = smu_v13_0_0_enable_mgpu_fan_boost,
        .get_power_limit = smu_v13_0_0_get_power_limit,
-       .set_power_limit = smu_v13_0_set_power_limit,
+       .set_power_limit = smu_v13_0_0_set_power_limit,
        .set_power_source = smu_v13_0_set_power_source,
        .get_power_profile_mode = smu_v13_0_0_get_power_profile_mode,
        .set_power_profile_mode = smu_v13_0_0_set_power_profile_mode,
index 4ebc6b421c2cb44baea23225a34e31e9c47464b5..7e1941cf17964c594dc8821c3fcda75f64e9f145 100644 (file)
@@ -160,8 +160,8 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
        MSG_MAP(GfxDriverResetRecovery,              PPSMC_MSG_GfxDriverResetRecovery,          0),
        MSG_MAP(GetMinGfxclkFrequency,               PPSMC_MSG_GetMinGfxDpmFreq,                1),
        MSG_MAP(GetMaxGfxclkFrequency,               PPSMC_MSG_GetMaxGfxDpmFreq,                1),
-       MSG_MAP(SetSoftMinGfxclk,                    PPSMC_MSG_SetSoftMinGfxClk,                0),
-       MSG_MAP(SetSoftMaxGfxClk,                    PPSMC_MSG_SetSoftMaxGfxClk,                0),
+       MSG_MAP(SetSoftMinGfxclk,                    PPSMC_MSG_SetSoftMinGfxClk,                1),
+       MSG_MAP(SetSoftMaxGfxClk,                    PPSMC_MSG_SetSoftMaxGfxClk,                1),
        MSG_MAP(PrepareMp1ForUnload,                 PPSMC_MSG_PrepareForDriverUnload,          0),
        MSG_MAP(GetCTFLimit,                         PPSMC_MSG_GetCTFLimit,                     0),
        MSG_MAP(GetThermalLimit,                     PPSMC_MSG_ReadThrottlerLimit,              0),
@@ -970,7 +970,9 @@ static int smu_v13_0_6_print_clks(struct smu_context *smu, char *buf, int size,
                        if (i < (clocks.num_levels - 1))
                                clk2 = clocks.data[i + 1].clocks_in_khz / 1000;
 
-                       if (curr_clk >= clk1 && curr_clk < clk2) {
+                       if (curr_clk == clk1) {
+                               level = i;
+                       } else if (curr_clk >= clk1 && curr_clk < clk2) {
                                level = (curr_clk - clk1) <= (clk2 - curr_clk) ?
                                                i :
                                                i + 1;
@@ -1936,7 +1938,6 @@ static int smu_v13_0_6_i2c_control_init(struct smu_context *smu)
                smu_i2c->port = i;
                mutex_init(&smu_i2c->mutex);
                control->owner = THIS_MODULE;
-               control->class = I2C_CLASS_SPD;
                control->dev.parent = &adev->pdev->dev;
                control->algo = &smu_v13_0_6_i2c_algo;
                snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
@@ -2235,17 +2236,18 @@ static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
                        continue;
                }
 
-               if (ret) {
-                       dev_err(adev->dev,
-                               "failed to send mode2 message \tparam: 0x%08x error code %d\n",
-                               SMU_RESET_MODE_2, ret);
+               if (ret)
                        goto out;
-               }
+
        } while (ret == -ETIME && timeout);
 
 out:
        mutex_unlock(&smu->message_lock);
 
+       if (ret)
+               dev_err(adev->dev, "failed to send mode2 reset, error code %d",
+                       ret);
+
        return ret;
 }
 
index 59606a19e3d2b4494885b72f36f6524ec25b5139..0ffdb58af74e654af7ca73acf078415663f41dfe 100644 (file)
@@ -2321,6 +2321,7 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu,
        PPTable_t *pptable = table_context->driver_pptable;
        SkuTable_t *skutable = &pptable->SkuTable;
        uint32_t power_limit, od_percent_upper, od_percent_lower;
+       uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
 
        if (smu_v13_0_get_current_power_limit(smu, &power_limit))
                power_limit = smu->adev->pm.ac_power ?
@@ -2344,7 +2345,7 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu,
                                        od_percent_upper, od_percent_lower, power_limit);
 
        if (max_power_limit) {
-               *max_power_limit = power_limit * (100 + od_percent_upper);
+               *max_power_limit = msg_limit * (100 + od_percent_upper);
                *max_power_limit /= 100;
        }
 
@@ -2504,13 +2505,7 @@ static int smu_v13_0_7_set_mp1_state(struct smu_context *smu,
 
        switch (mp1_state) {
        case PP_MP1_STATE_UNLOAD:
-               ret = smu_cmn_send_smc_msg_with_param(smu,
-                                                                                         SMU_MSG_PrepareMp1ForUnload,
-                                                                                         0x55, NULL);
-
-               if (!ret && smu->smu_baco.state == SMU_BACO_STATE_EXIT)
-                       ret = smu_v13_0_disable_pmfw_state(smu);
-
+               ret = smu_cmn_set_mp1_state(smu, mp1_state);
                break;
        default:
                /* Ignore others */
@@ -2545,6 +2540,55 @@ static bool smu_v13_0_7_wbrf_support_check(struct smu_context *smu)
        return smu->smc_fw_version > 0x00524600;
 }
 
+static int smu_v13_0_7_set_power_limit(struct smu_context *smu,
+                                      enum smu_ppt_limit_type limit_type,
+                                      uint32_t limit)
+{
+       PPTable_t *pptable = smu->smu_table.driver_pptable;
+       SkuTable_t *skutable = &pptable->SkuTable;
+       uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
+       struct smu_table_context *table_context = &smu->smu_table;
+       OverDriveTableExternal_t *od_table =
+               (OverDriveTableExternal_t *)table_context->overdrive_table;
+       int ret = 0;
+
+       if (limit_type != SMU_DEFAULT_PPT_LIMIT)
+               return -EINVAL;
+
+       if (limit <= msg_limit) {
+               if (smu->current_power_limit > msg_limit) {
+                       od_table->OverDriveTable.Ppt = 0;
+                       od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
+
+                       ret = smu_v13_0_7_upload_overdrive_table(smu, od_table);
+                       if (ret) {
+                               dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
+                               return ret;
+                       }
+               }
+               return smu_v13_0_set_power_limit(smu, limit_type, limit);
+       } else if (smu->od_enabled) {
+               ret = smu_v13_0_set_power_limit(smu, limit_type, msg_limit);
+               if (ret)
+                       return ret;
+
+               od_table->OverDriveTable.Ppt = (limit * 100) / msg_limit - 100;
+               od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
+
+               ret = smu_v13_0_7_upload_overdrive_table(smu, od_table);
+               if (ret) {
+                 dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
+                 return ret;
+               }
+
+               smu->current_power_limit = limit;
+       } else {
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
        .get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask,
        .set_default_dpm_table = smu_v13_0_7_set_default_dpm_table,
@@ -2596,7 +2640,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
        .set_fan_control_mode = smu_v13_0_set_fan_control_mode,
        .enable_mgpu_fan_boost = smu_v13_0_7_enable_mgpu_fan_boost,
        .get_power_limit = smu_v13_0_7_get_power_limit,
-       .set_power_limit = smu_v13_0_set_power_limit,
+       .set_power_limit = smu_v13_0_7_set_power_limit,
        .set_power_source = smu_v13_0_set_power_source,
        .get_power_profile_mode = smu_v13_0_7_get_power_profile_mode,
        .set_power_profile_mode = smu_v13_0_7_set_power_profile_mode,
index 0e845e7acd9b5aeb091f4f473706968c6358056c..e5d3f7121de4206f4d1af450e450411e349eb1d8 100644 (file)
@@ -120,7 +120,6 @@ struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev)
                return NULL;
 
        i2c->adapter.owner = THIS_MODULE;
-       i2c->adapter.class = I2C_CLASS_DDC;
        i2c->adapter.dev.parent = dev->dev;
        i2c->dev = dev;
        i2c_set_adapdata(&i2c->adapter, i2c);
index ef31033439bc15a896ed8748b7a62a8b46336c13..29d91493b101acb5234c9a2fe76441925b346f55 100644 (file)
@@ -1762,6 +1762,7 @@ static ssize_t anx7625_aux_transfer(struct drm_dp_aux *aux,
        u8 request = msg->request & ~DP_AUX_I2C_MOT;
        int ret = 0;
 
+       mutex_lock(&ctx->aux_lock);
        pm_runtime_get_sync(dev);
        msg->reply = 0;
        switch (request) {
@@ -1778,6 +1779,7 @@ static ssize_t anx7625_aux_transfer(struct drm_dp_aux *aux,
                                        msg->size, msg->buffer);
        pm_runtime_mark_last_busy(dev);
        pm_runtime_put_autosuspend(dev);
+       mutex_unlock(&ctx->aux_lock);
 
        return ret;
 }
@@ -2474,7 +2476,9 @@ static void anx7625_bridge_atomic_disable(struct drm_bridge *bridge,
        ctx->connector = NULL;
        anx7625_dp_stop(ctx);
 
-       pm_runtime_put_sync(dev);
+       mutex_lock(&ctx->aux_lock);
+       pm_runtime_put_sync_suspend(dev);
+       mutex_unlock(&ctx->aux_lock);
 }
 
 static enum drm_connector_status
@@ -2668,6 +2672,7 @@ static int anx7625_i2c_probe(struct i2c_client *client)
 
        mutex_init(&platform->lock);
        mutex_init(&platform->hdcp_wq_lock);
+       mutex_init(&platform->aux_lock);
 
        INIT_DELAYED_WORK(&platform->hdcp_work, hdcp_check_work_func);
        platform->hdcp_workqueue = create_workqueue("hdcp workqueue");
index 66ebee7f3d832534ec64b780bdfa985bbfcfc896..39ed35d338363390d2fe37b765d4e0e48dc0118e 100644 (file)
@@ -475,6 +475,8 @@ struct anx7625_data {
        struct workqueue_struct *hdcp_workqueue;
        /* Lock for hdcp work queue */
        struct mutex hdcp_wq_lock;
+       /* Lock for aux transfer and disable */
+       struct mutex aux_lock;
        char edid_block;
        struct display_timing dt;
        u8 display_timing_valid;
index 541e4f5afc4c86a4e87b74a016885d6231afb892..14d4dcf239da835955f1d594579dd165288bd63f 100644 (file)
@@ -107,6 +107,7 @@ struct ps8640 {
        struct device_link *link;
        bool pre_enabled;
        bool need_post_hpd_delay;
+       struct mutex aux_lock;
 };
 
 static const struct regmap_config ps8640_regmap_config[] = {
@@ -345,11 +346,20 @@ static ssize_t ps8640_aux_transfer(struct drm_dp_aux *aux,
        struct device *dev = &ps_bridge->page[PAGE0_DP_CNTL]->dev;
        int ret;
 
+       mutex_lock(&ps_bridge->aux_lock);
        pm_runtime_get_sync(dev);
+       ret = _ps8640_wait_hpd_asserted(ps_bridge, 200 * 1000);
+       if (ret) {
+               pm_runtime_put_sync_suspend(dev);
+               goto exit;
+       }
        ret = ps8640_aux_transfer_msg(aux, msg);
        pm_runtime_mark_last_busy(dev);
        pm_runtime_put_autosuspend(dev);
 
+exit:
+       mutex_unlock(&ps_bridge->aux_lock);
+
        return ret;
 }
 
@@ -470,7 +480,18 @@ static void ps8640_atomic_post_disable(struct drm_bridge *bridge,
        ps_bridge->pre_enabled = false;
 
        ps8640_bridge_vdo_control(ps_bridge, DISABLE);
+
+       /*
+        * The bridge seems to expect everything to be power cycled at the
+        * disable process, so grab a lock here to make sure
+        * ps8640_aux_transfer() is not holding a runtime PM reference and
+        * preventing the bridge from suspend.
+        */
+       mutex_lock(&ps_bridge->aux_lock);
+
        pm_runtime_put_sync_suspend(&ps_bridge->page[PAGE0_DP_CNTL]->dev);
+
+       mutex_unlock(&ps_bridge->aux_lock);
 }
 
 static int ps8640_bridge_attach(struct drm_bridge *bridge,
@@ -619,6 +640,8 @@ static int ps8640_probe(struct i2c_client *client)
        if (!ps_bridge)
                return -ENOMEM;
 
+       mutex_init(&ps_bridge->aux_lock);
+
        ps_bridge->supplies[0].supply = "vdd12";
        ps_bridge->supplies[1].supply = "vdd33";
        ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ps_bridge->supplies),
index be5914caa17d546601d11719976161624c1a420f..63a1a0c88be4d98d169996d341de5d0d1b6cae91 100644 (file)
@@ -969,10 +969,6 @@ static int samsung_dsim_init_link(struct samsung_dsim *dsi)
        reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG);
        reg &= ~DSIM_STOP_STATE_CNT_MASK;
        reg |= DSIM_STOP_STATE_CNT(driver_data->reg_values[STOP_STATE_CNT]);
-
-       if (!samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type))
-               reg |= DSIM_FORCE_STOP_STATE;
-
        samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg);
 
        reg = DSIM_BTA_TIMEOUT(0xff) | DSIM_LPDR_TIMEOUT(0xffff);
@@ -1431,18 +1427,6 @@ static void samsung_dsim_disable_irq(struct samsung_dsim *dsi)
        disable_irq(dsi->irq);
 }
 
-static void samsung_dsim_set_stop_state(struct samsung_dsim *dsi, bool enable)
-{
-       u32 reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG);
-
-       if (enable)
-               reg |= DSIM_FORCE_STOP_STATE;
-       else
-               reg &= ~DSIM_FORCE_STOP_STATE;
-
-       samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg);
-}
-
 static int samsung_dsim_init(struct samsung_dsim *dsi)
 {
        const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
@@ -1492,9 +1476,6 @@ static void samsung_dsim_atomic_pre_enable(struct drm_bridge *bridge,
                ret = samsung_dsim_init(dsi);
                if (ret)
                        return;
-
-               samsung_dsim_set_display_mode(dsi);
-               samsung_dsim_set_display_enable(dsi, true);
        }
 }
 
@@ -1503,12 +1484,8 @@ static void samsung_dsim_atomic_enable(struct drm_bridge *bridge,
 {
        struct samsung_dsim *dsi = bridge_to_dsi(bridge);
 
-       if (samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type)) {
-               samsung_dsim_set_display_mode(dsi);
-               samsung_dsim_set_display_enable(dsi, true);
-       } else {
-               samsung_dsim_set_stop_state(dsi, false);
-       }
+       samsung_dsim_set_display_mode(dsi);
+       samsung_dsim_set_display_enable(dsi, true);
 
        dsi->state |= DSIM_STATE_VIDOUT_AVAILABLE;
 }
@@ -1521,9 +1498,6 @@ static void samsung_dsim_atomic_disable(struct drm_bridge *bridge,
        if (!(dsi->state & DSIM_STATE_ENABLED))
                return;
 
-       if (!samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type))
-               samsung_dsim_set_stop_state(dsi, true);
-
        dsi->state &= ~DSIM_STATE_VIDOUT_AVAILABLE;
 }
 
@@ -1828,8 +1802,6 @@ static ssize_t samsung_dsim_host_transfer(struct mipi_dsi_host *host,
        if (ret)
                return ret;
 
-       samsung_dsim_set_stop_state(dsi, false);
-
        ret = mipi_dsi_create_packet(&xfer.packet, msg);
        if (ret < 0)
                return ret;
index 2bdc5b439bebd56407af3b5b04892b3ac90678d4..4560ae9cbce15095eddaf6296396960a7887ab06 100644 (file)
@@ -1080,6 +1080,26 @@ static int sii902x_init(struct sii902x *sii902x)
                        return ret;
        }
 
+       ret = sii902x_audio_codec_init(sii902x, dev);
+       if (ret)
+               return ret;
+
+       i2c_set_clientdata(sii902x->i2c, sii902x);
+
+       sii902x->i2cmux = i2c_mux_alloc(sii902x->i2c->adapter, dev,
+                                       1, 0, I2C_MUX_GATE,
+                                       sii902x_i2c_bypass_select,
+                                       sii902x_i2c_bypass_deselect);
+       if (!sii902x->i2cmux) {
+               ret = -ENOMEM;
+               goto err_unreg_audio;
+       }
+
+       sii902x->i2cmux->priv = sii902x;
+       ret = i2c_mux_add_adapter(sii902x->i2cmux, 0, 0, 0);
+       if (ret)
+               goto err_unreg_audio;
+
        sii902x->bridge.funcs = &sii902x_bridge_funcs;
        sii902x->bridge.of_node = dev->of_node;
        sii902x->bridge.timings = &default_sii902x_timings;
@@ -1090,19 +1110,13 @@ static int sii902x_init(struct sii902x *sii902x)
 
        drm_bridge_add(&sii902x->bridge);
 
-       sii902x_audio_codec_init(sii902x, dev);
-
-       i2c_set_clientdata(sii902x->i2c, sii902x);
+       return 0;
 
-       sii902x->i2cmux = i2c_mux_alloc(sii902x->i2c->adapter, dev,
-                                       1, 0, I2C_MUX_GATE,
-                                       sii902x_i2c_bypass_select,
-                                       sii902x_i2c_bypass_deselect);
-       if (!sii902x->i2cmux)
-               return -ENOMEM;
+err_unreg_audio:
+       if (!PTR_ERR_OR_ZERO(sii902x->audio.pdev))
+               platform_device_unregister(sii902x->audio.pdev);
 
-       sii902x->i2cmux->priv = sii902x;
-       return i2c_mux_add_adapter(sii902x->i2cmux, 0, 0, 0);
+       return ret;
 }
 
 static int sii902x_probe(struct i2c_client *client)
@@ -1170,12 +1184,14 @@ static int sii902x_probe(struct i2c_client *client)
 }
 
 static void sii902x_remove(struct i2c_client *client)
-
 {
        struct sii902x *sii902x = i2c_get_clientdata(client);
 
-       i2c_mux_del_adapters(sii902x->i2cmux);
        drm_bridge_remove(&sii902x->bridge);
+       i2c_mux_del_adapters(sii902x->i2cmux);
+
+       if (!PTR_ERR_OR_ZERO(sii902x->audio.pdev))
+               platform_device_unregister(sii902x->audio.pdev);
 }
 
 static const struct of_device_id sii902x_dt_ids[] = {
index 52d91a0df85e9ba3f6082b71b9218dd0f53e8a01..aca5bb0866f886c05f5914ec647242ee1326509a 100644 (file)
@@ -515,7 +515,6 @@ static struct i2c_adapter *dw_hdmi_i2c_adapter(struct dw_hdmi *hdmi)
        init_completion(&i2c->cmp);
 
        adap = &i2c->adap;
-       adap->class = I2C_CLASS_DDC;
        adap->owner = THIS_MODULE;
        adap->dev.parent = hdmi->dev;
        adap->algo = &dw_hdmi_algorithm;
index d72b6f9a352c10c13d6a66509d29372962dc4cef..b1ca3a1100dabbbad98279a65654a85266953e30 100644 (file)
@@ -2102,7 +2102,6 @@ int drm_dp_aux_register(struct drm_dp_aux *aux)
        if (!aux->ddc.algo)
                drm_dp_aux_init(aux);
 
-       aux->ddc.class = I2C_CLASS_DDC;
        aux->ddc.owner = THIS_MODULE;
        aux->ddc.dev.parent = aux->dev;
 
index 8ca01a6bf645d6f79ed202b2a45cc7265a7c7fa9..f7c6b60629c2ba5b178145977d8490a6e094ce71 100644 (file)
@@ -5491,6 +5491,7 @@ EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
  *   - 0 if the new state is valid
  *   - %-ENOSPC, if the new state is invalid, because of BW limitation
  *         @failing_port is set to:
+ *
  *         - The non-root port where a BW limit check failed
  *           with all the ports downstream of @failing_port passing
  *           the BW limit check.
@@ -5499,6 +5500,7 @@ EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
  *         - %NULL if the BW limit check failed at the root port
  *           with all the ports downstream of the root port passing
  *           the BW limit check.
+ *
  *   - %-EINVAL, if the new state is invalid, because the root port has
  *     too many payloads.
  */
@@ -5926,7 +5928,6 @@ static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port)
        aux->ddc.algo_data = aux;
        aux->ddc.retries = 3;
 
-       aux->ddc.class = I2C_CLASS_DDC;
        aux->ddc.owner = THIS_MODULE;
        /* FIXME: set the kdev of the port's connector as parent */
        aux->ddc.dev.parent = parent_dev;
index 776f2f0b602debb88a6c820add8d737332f2938e..0ef7bc8848b0798b125f7a65ff04cf4586f13d71 100644 (file)
@@ -319,9 +319,9 @@ static void decon_win_set_bldmod(struct decon_context *ctx, unsigned int win,
 static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
                                 struct drm_framebuffer *fb)
 {
-       struct exynos_drm_plane plane = ctx->planes[win];
+       struct exynos_drm_plane *plane = &ctx->planes[win];
        struct exynos_drm_plane_state *state =
-               to_exynos_plane_state(plane.base.state);
+               to_exynos_plane_state(plane->base.state);
        unsigned int alpha = state->base.alpha;
        unsigned int pixel_alpha;
        unsigned long val;
index a9f1c5c058940178c8318484fcea422f1428de69..f2145227a1e0ce889d2ce0a3926a79e64c832fc9 100644 (file)
@@ -480,7 +480,7 @@ static void fimd_commit(struct exynos_drm_crtc *crtc)
        struct fimd_context *ctx = crtc->ctx;
        struct drm_display_mode *mode = &crtc->base.state->adjusted_mode;
        const struct fimd_driver_data *driver_data = ctx->driver_data;
-       void *timing_base = ctx->regs + driver_data->timing_base;
+       void __iomem *timing_base = ctx->regs + driver_data->timing_base;
        u32 val;
 
        if (ctx->suspended)
@@ -661,9 +661,9 @@ static void fimd_win_set_bldmod(struct fimd_context *ctx, unsigned int win,
 static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win,
                                struct drm_framebuffer *fb, int width)
 {
-       struct exynos_drm_plane plane = ctx->planes[win];
+       struct exynos_drm_plane *plane = &ctx->planes[win];
        struct exynos_drm_plane_state *state =
-               to_exynos_plane_state(plane.base.state);
+               to_exynos_plane_state(plane->base.state);
        uint32_t pixel_format = fb->format->format;
        unsigned int alpha = state->base.alpha;
        u32 val = WINCONx_ENWIN;
index e9a769590415dcd0d7899df16254d7c20cdea8b1..180507a477009d6e424cc5aede8e18255127b3f1 100644 (file)
@@ -1341,7 +1341,7 @@ static int __maybe_unused gsc_runtime_resume(struct device *dev)
        for (i = 0; i < ctx->num_clocks; i++) {
                ret = clk_prepare_enable(ctx->clocks[i]);
                if (ret) {
-                       while (--i > 0)
+                       while (--i >= 0)
                                clk_disable_unprepare(ctx->clocks[i]);
                        return ret;
                }
index 8992a95076f29e548d7576b066fb1c75ac1b46ee..dd1eb7e9877d46ba7dae136f5992ea510b627797 100644 (file)
@@ -855,7 +855,6 @@ cdv_intel_dp_i2c_init(struct gma_connector *connector,
 
        memset(&intel_dp->adapter, '\0', sizeof (intel_dp->adapter));
        intel_dp->adapter.owner = THIS_MODULE;
-       intel_dp->adapter.class = I2C_CLASS_DDC;
        strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
        intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
        intel_dp->adapter.algo_data = &intel_dp->algo;
index 09cedabf4776dffb065a74157a8a04a857b77047..aa45509859f21ab9af69d2178473a7d091822068 100644 (file)
@@ -411,7 +411,6 @@ int gma_intel_setup_gmbus(struct drm_device *dev)
                struct intel_gmbus *bus = &dev_priv->gmbus[i];
 
                bus->adapter.owner = THIS_MODULE;
-               bus->adapter.class = I2C_CLASS_DDC;
                snprintf(bus->adapter.name,
                         sizeof(bus->adapter.name),
                         "gma500 gmbus %s",
index fc9a34ed58bd136f298a1cffb23ec2f88b8f2d70..6daa6669ed2374bf858bf68cefb7cd43007a9b28 100644 (file)
@@ -168,7 +168,6 @@ static struct i2c_adapter oaktrail_hdmi_i2c_adapter = {
        .name           = "oaktrail_hdmi_i2c",
        .nr             = 3,
        .owner          = THIS_MODULE,
-       .class          = I2C_CLASS_DDC,
        .algo           = &oaktrail_hdmi_i2c_algorithm,
 };
 
index d6fd5d72621609f40612a0ece610970b82707193..e4f914decebaecca7ac0e464fe9608a93d691791 100644 (file)
@@ -2426,7 +2426,6 @@ psb_intel_sdvo_init_ddc_proxy(struct psb_intel_sdvo *sdvo,
                          struct drm_device *dev)
 {
        sdvo->ddc.owner = THIS_MODULE;
-       sdvo->ddc.class = I2C_CLASS_DDC;
        snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy");
        sdvo->ddc.dev.parent = dev->dev;
        sdvo->ddc.algo_data = sdvo;
index 410bd019bb357257054b92ec14b2df1d7b0aa82a..e6e48651c15c63550bbdb79d9742ba21ba26506b 100644 (file)
@@ -81,7 +81,6 @@ int hibmc_ddc_create(struct drm_device *drm_dev,
                     struct hibmc_connector *connector)
 {
        connector->adapter.owner = THIS_MODULE;
-       connector->adapter.class = I2C_CLASS_DDC;
        snprintf(connector->adapter.name, I2C_NAME_SIZE, "HIS i2c bit bus");
        connector->adapter.dev.parent = drm_dev->dev;
        i2c_set_adapdata(&connector->adapter, connector);
index b5d6e3352071f5c3765f074200180bd6dfc7685c..3089029abba481828522070dc0063eaa79251bf9 100644 (file)
@@ -140,7 +140,7 @@ config DRM_I915_GVT_KVMGT
 
          Note that this driver only supports newer device from Broadwell on.
          For further information and setup guide, you can visit:
-         http://01.org/igvt-g.
+         https://github.com/intel/gvt-linux/wiki.
 
          If in doubt, say "N".
 
index e777686190ca241f0ed288b2ad32645d41f1a288..c13f14edb50889baa604b044d2324a371e444ed5 100644 (file)
@@ -17,7 +17,6 @@ subdir-ccflags-y += $(call cc-option, -Wunused-const-variable)
 subdir-ccflags-y += $(call cc-option, -Wpacked-not-aligned)
 subdir-ccflags-y += $(call cc-option, -Wformat-overflow)
 subdir-ccflags-y += $(call cc-option, -Wformat-truncation)
-subdir-ccflags-y += $(call cc-option, -Wstringop-overflow)
 subdir-ccflags-y += $(call cc-option, -Wstringop-truncation)
 # The following turn off the warnings enabled by -Wextra
 ifeq ($(findstring 2, $(KBUILD_EXTRA_WARN)),)
index ac456a2275dbad62cb9a4ac7f706333c73dd03aa..eda4a8b885904de71bb6e3bb1998fa1242a1b9a7 100644 (file)
@@ -1155,6 +1155,7 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
        }
 
        intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP);
+       intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
 
        /* ensure all panel commands dispatched before enabling transcoder */
        wait_for_cmds_dispatched_to_panel(encoder);
@@ -1255,8 +1256,6 @@ static void gen11_dsi_enable(struct intel_atomic_state *state,
        /* step6d: enable dsi transcoder */
        gen11_dsi_enable_transcoder(encoder);
 
-       intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
-
        /* step7: enable backlight */
        intel_backlight_enable(crtc_state, conn_state);
        intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
index 612d4cd9dacbac03c3bab57012c2229b7fbfff4e..3f3cd944a1c5bfcc1a94cfa02ae180496140f408 100644 (file)
@@ -275,7 +275,7 @@ static void ext_pwm_set_backlight(const struct drm_connector_state *conn_state,
        struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
 
        pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100);
-       pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
+       pwm_apply_might_sleep(panel->backlight.pwm, &panel->backlight.pwm_state);
 }
 
 static void
@@ -428,7 +428,7 @@ static void ext_pwm_disable_backlight(const struct drm_connector_state *old_conn
        intel_backlight_set_pwm_level(old_conn_state, level);
 
        panel->backlight.pwm_state.enabled = false;
-       pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
+       pwm_apply_might_sleep(panel->backlight.pwm, &panel->backlight.pwm_state);
 }
 
 void intel_backlight_disable(const struct drm_connector_state *old_conn_state)
@@ -750,7 +750,7 @@ static void ext_pwm_enable_backlight(const struct intel_crtc_state *crtc_state,
 
        pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100);
        panel->backlight.pwm_state.enabled = true;
-       pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
+       pwm_apply_might_sleep(panel->backlight.pwm, &panel->backlight.pwm_state);
 }
 
 static void __intel_backlight_enable(const struct intel_crtc_state *crtc_state,
index 884a1da3608930e1eed49e11c4b192d0106b906e..6b25e195232f13376f9b435f04fa48c50c01abdd 100644 (file)
@@ -3067,24 +3067,29 @@ static void intel_c20pll_state_verify(const struct intel_crtc_state *state,
 {
        struct drm_i915_private *i915 = to_i915(crtc->base.dev);
        const struct intel_c20pll_state *mpll_sw_state = &state->cx0pll_state.c20;
-       bool use_mplla;
+       bool sw_use_mpllb = mpll_sw_state->tx[0] & C20_PHY_USE_MPLLB;
+       bool hw_use_mpllb = mpll_hw_state->tx[0] & C20_PHY_USE_MPLLB;
        int i;
 
-       use_mplla = intel_c20_use_mplla(mpll_hw_state->clock);
-       if (use_mplla) {
-               for (i = 0; i < ARRAY_SIZE(mpll_sw_state->mplla); i++) {
-                       I915_STATE_WARN(i915, mpll_hw_state->mplla[i] != mpll_sw_state->mplla[i],
-                                       "[CRTC:%d:%s] mismatch in C20MPLLA: Register[%d] (expected 0x%04x, found 0x%04x)",
-                                       crtc->base.base.id, crtc->base.name, i,
-                                       mpll_sw_state->mplla[i], mpll_hw_state->mplla[i]);
-               }
-       } else {
+       I915_STATE_WARN(i915, sw_use_mpllb != hw_use_mpllb,
+                       "[CRTC:%d:%s] mismatch in C20: Register MPLLB selection (expected %d, found %d)",
+                       crtc->base.base.id, crtc->base.name,
+                       sw_use_mpllb, hw_use_mpllb);
+
+       if (hw_use_mpllb) {
                for (i = 0; i < ARRAY_SIZE(mpll_sw_state->mpllb); i++) {
                        I915_STATE_WARN(i915, mpll_hw_state->mpllb[i] != mpll_sw_state->mpllb[i],
                                        "[CRTC:%d:%s] mismatch in C20MPLLB: Register[%d] (expected 0x%04x, found 0x%04x)",
                                        crtc->base.base.id, crtc->base.name, i,
                                        mpll_sw_state->mpllb[i], mpll_hw_state->mpllb[i]);
                }
+       } else {
+               for (i = 0; i < ARRAY_SIZE(mpll_sw_state->mplla); i++) {
+                       I915_STATE_WARN(i915, mpll_hw_state->mplla[i] != mpll_sw_state->mplla[i],
+                                       "[CRTC:%d:%s] mismatch in C20MPLLA: Register[%d] (expected 0x%04x, found 0x%04x)",
+                                       crtc->base.base.id, crtc->base.name, i,
+                                       mpll_sw_state->mplla[i], mpll_hw_state->mplla[i]);
+               }
        }
 
        for (i = 0; i < ARRAY_SIZE(mpll_sw_state->tx); i++) {
index 5f091502719b956a7fc3037ecc473b81ba3455d9..6fd4fa52253a35a02957509d7135310346242d41 100644 (file)
@@ -405,8 +405,8 @@ print_async_put_domains_state(struct i915_power_domains *power_domains)
                                                     struct drm_i915_private,
                                                     display.power.domains);
 
-       drm_dbg(&i915->drm, "async_put_wakeref %lu\n",
-               power_domains->async_put_wakeref);
+       drm_dbg(&i915->drm, "async_put_wakeref: %s\n",
+               str_yes_no(power_domains->async_put_wakeref));
 
        print_power_domains(power_domains, "async_put_domains[0]",
                            &power_domains->async_put_domains[0]);
index 7d2b8ce48fda178d415f32fe3d1b956240435927..f5ef95da55346ff14cc6b102c27e78e8960cec65 100644 (file)
@@ -2101,7 +2101,7 @@ static int intel_dp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
                }
        }
 
-       dsc_max_bpc = intel_dp_dsc_min_src_input_bpc(i915);
+       dsc_max_bpc = intel_dp_dsc_max_src_input_bpc(i915);
        if (!dsc_max_bpc)
                return -EINVAL;
 
index 40d7b6f3f4891c8cbe6e6590f90243ca77291581..e9e4dcf345f95722b6c67cb4db802a193c84b0dd 100644 (file)
@@ -899,7 +899,6 @@ int intel_gmbus_setup(struct drm_i915_private *i915)
                }
 
                bus->adapter.owner = THIS_MODULE;
-               bus->adapter.class = I2C_CLASS_DDC;
                snprintf(bus->adapter.name,
                         sizeof(bus->adapter.name),
                         "i915 gmbus %s", gmbus_pin->name);
index b6e2e70e129046040336012604f3955a12c3ce8f..57bbf3e3af92fbb0325d0c41765f7a0f0d0ac806 100644 (file)
@@ -1525,8 +1525,18 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
         * can rely on frontbuffer tracking.
         */
        mask = EDP_PSR_DEBUG_MASK_MEMUP |
-              EDP_PSR_DEBUG_MASK_HPD |
-              EDP_PSR_DEBUG_MASK_LPSP;
+              EDP_PSR_DEBUG_MASK_HPD;
+
+       /*
+        * For some unknown reason on HSW non-ULT (or at least on
+        * Dell Latitude E6540) external displays start to flicker
+        * when PSR is enabled on the eDP. SR/PC6 residency is much
+        * higher than should be possible with an external display.
+        * As a workaround leave LPSP unmasked to prevent PSR entry
+        * when external displays are active.
+        */
+       if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL_ULT(dev_priv))
+               mask |= EDP_PSR_DEBUG_MASK_LPSP;
 
        if (DISPLAY_VER(dev_priv) < 20)
                mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
@@ -3319,11 +3329,11 @@ void intel_psr_connector_debugfs_add(struct intel_connector *connector)
        struct drm_i915_private *i915 = to_i915(connector->base.dev);
        struct dentry *root = connector->base.debugfs_entry;
 
-       if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP) {
-               if (!(HAS_DP20(i915) &&
-                     connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort))
-                       return;
-       }
+       /* TODO: Add support for MST connectors as well. */
+       if ((connector->base.connector_type != DRM_MODE_CONNECTOR_eDP &&
+            connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) ||
+           connector->mst_port)
+               return;
 
        debugfs_create_file("i915_psr_sink_status", 0444, root,
                            connector, &i915_psr_sink_status_fops);
index 9218047495fb41980778f8850805e2178677d50e..acc6b6804105102389dc26c3fefce80444d0adad 100644 (file)
@@ -3327,7 +3327,6 @@ intel_sdvo_init_ddc_proxy(struct intel_sdvo_ddc *ddc,
        ddc->ddc_bus = ddc_bus;
 
        ddc->ddc.owner = THIS_MODULE;
-       ddc->ddc.class = I2C_CLASS_DDC;
        snprintf(ddc->ddc.name, I2C_NAME_SIZE, "SDVO %c DDC%d",
                 port_name(sdvo->base.port), ddc_bus);
        ddc->ddc.dev.parent = &pdev->dev;
index c573c067779f58ad0187832324cdb3fcc2892bc9..03bc7f9d191b98a4df1201098f9fa5c160a3502c 100644 (file)
@@ -412,9 +412,9 @@ struct i915_gem_context {
 
        /** @stale: tracks stale engines to be destroyed */
        struct {
-               /** @lock: guards engines */
+               /** @stale.lock: guards engines */
                spinlock_t lock;
-               /** @engines: list of stale engines */
+               /** @stale.engines: list of stale engines */
                struct list_head engines;
        } stale;
 };
index 7ab3ca0f9f268dc16a9728b585124ad0d4dd5a7b..013c642514486a45c5a49afbb924c9a6a7ca90a8 100644 (file)
@@ -21,8 +21,11 @@ struct mei_aux_device;
 /**
  * struct intel_gsc - graphics security controller
  *
- * @gem_obj: scratch memory GSC operations
- * @intf : gsc interface
+ * @intf: gsc interface
+ * @intf.adev: MEI aux. device for this @intf
+ * @intf.gem_obj: scratch memory GSC operations
+ * @intf.irq: IRQ for this device (%-1 for no IRQ)
+ * @intf.id: this interface's id number/index
  */
 struct intel_gsc {
        struct intel_gsc_intf {
index e22c12ce245adf2e79e7623a9222797c13e62c97..813cc888e6fae3f602661840f726ccf8f8908ec2 100644 (file)
@@ -105,61 +105,67 @@ struct intel_guc {
         */
        struct {
                /**
-                * @lock: protects everything in submission_state,
-                * ce->guc_id.id, and ce->guc_id.ref when transitioning in and
-                * out of zero
+                * @submission_state.lock: protects everything in
+                * submission_state, ce->guc_id.id, and ce->guc_id.ref
+                * when transitioning in and out of zero
                 */
                spinlock_t lock;
                /**
-                * @guc_ids: used to allocate new guc_ids, single-lrc
+                * @submission_state.guc_ids: used to allocate new
+                * guc_ids, single-lrc
                 */
                struct ida guc_ids;
                /**
-                * @num_guc_ids: Number of guc_ids, selftest feature to be able
-                * to reduce this number while testing.
+                * @submission_state.num_guc_ids: Number of guc_ids, selftest
+                * feature to be able to reduce this number while testing.
                 */
                int num_guc_ids;
                /**
-                * @guc_ids_bitmap: used to allocate new guc_ids, multi-lrc
+                * @submission_state.guc_ids_bitmap: used to allocate
+                * new guc_ids, multi-lrc
                 */
                unsigned long *guc_ids_bitmap;
                /**
-                * @guc_id_list: list of intel_context with valid guc_ids but no
-                * refs
+                * @submission_state.guc_id_list: list of intel_context
+                * with valid guc_ids but no refs
                 */
                struct list_head guc_id_list;
                /**
-                * @guc_ids_in_use: Number single-lrc guc_ids in use
+                * @submission_state.guc_ids_in_use: Number single-lrc
+                * guc_ids in use
                 */
                unsigned int guc_ids_in_use;
                /**
-                * @destroyed_contexts: list of contexts waiting to be destroyed
-                * (deregistered with the GuC)
+                * @submission_state.destroyed_contexts: list of contexts
+                * waiting to be destroyed (deregistered with the GuC)
                 */
                struct list_head destroyed_contexts;
                /**
-                * @destroyed_worker: worker to deregister contexts, need as we
-                * need to take a GT PM reference and can't from destroy
-                * function as it might be in an atomic context (no sleeping)
+                * @submission_state.destroyed_worker: worker to deregister
+                * contexts, need as we need to take a GT PM reference and
+                * can't from destroy function as it might be in an atomic
+                * context (no sleeping)
                 */
                struct work_struct destroyed_worker;
                /**
-                * @reset_fail_worker: worker to trigger a GT reset after an
-                * engine reset fails
+                * @submission_state.reset_fail_worker: worker to trigger
+                * a GT reset after an engine reset fails
                 */
                struct work_struct reset_fail_worker;
                /**
-                * @reset_fail_mask: mask of engines that failed to reset
+                * @submission_state.reset_fail_mask: mask of engines that
+                * failed to reset
                 */
                intel_engine_mask_t reset_fail_mask;
                /**
-                * @sched_disable_delay_ms: schedule disable delay, in ms, for
-                * contexts
+                * @submission_state.sched_disable_delay_ms: schedule
+                * disable delay, in ms, for contexts
                 */
                unsigned int sched_disable_delay_ms;
                /**
-                * @sched_disable_gucid_threshold: threshold of min remaining available
-                * guc_ids before we start bypassing the schedule disable delay
+                * @submission_state.sched_disable_gucid_threshold:
+                * threshold of min remaining available guc_ids before
+                * we start bypassing the schedule disable delay
                 */
                unsigned int sched_disable_gucid_threshold;
        } submission_state;
@@ -243,37 +249,40 @@ struct intel_guc {
         */
        struct {
                /**
-                * @lock: Lock protecting the below fields and the engine stats.
+                * @timestamp.lock: Lock protecting the below fields and
+                * the engine stats.
                 */
                spinlock_t lock;
 
                /**
-                * @gt_stamp: 64 bit extended value of the GT timestamp.
+                * @timestamp.gt_stamp: 64-bit extended value of the GT
+                * timestamp.
                 */
                u64 gt_stamp;
 
                /**
-                * @ping_delay: Period for polling the GT timestamp for
-                * overflow.
+                * @timestamp.ping_delay: Period for polling the GT
+                * timestamp for overflow.
                 */
                unsigned long ping_delay;
 
                /**
-                * @work: Periodic work to adjust GT timestamp, engine and
-                * context usage for overflows.
+                * @timestamp.work: Periodic work to adjust GT timestamp,
+                * engine and context usage for overflows.
                 */
                struct delayed_work work;
 
                /**
-                * @shift: Right shift value for the gpm timestamp
+                * @timestamp.shift: Right shift value for the gpm timestamp
                 */
                u32 shift;
 
                /**
-                * @last_stat_jiffies: jiffies at last actual stats collection time
-                * We use this timestamp to ensure we don't oversample the
-                * stats because runtime power management events can trigger
-                * stats collection at much higher rates than required.
+                * @timestamp.last_stat_jiffies: jiffies at last actual
+                * stats collection time. We use this timestamp to ensure
+                * we don't oversample the stats because runtime power
+                * management events can trigger stats collection at much
+                * higher rates than required.
                 */
                unsigned long last_stat_jiffies;
        } timestamp;
index 90f6c1ece57d4478a30375df1725624b47449298..efcb00472be24779590fcce94753ab83a787f2c4 100644 (file)
@@ -2849,8 +2849,7 @@ static int handle_mmio(struct intel_gvt_mmio_table_iter *iter, u32 offset,
        for (i = start; i < end; i += 4) {
                p = intel_gvt_find_mmio_info(gvt, i);
                if (p) {
-                       WARN(1, "dup mmio definition offset %x\n",
-                               info->offset);
+                       WARN(1, "dup mmio definition offset %x\n", i);
 
                        /* We return -EEXIST here to make GVT-g load fail.
                         * So duplicated MMIO can be found as soon as
index 13b1ae9b96c7fddbe1622dbda609d10675021503..46445248d193e6bf0316e4347a993bfc5c01ae1d 100644 (file)
@@ -291,7 +291,8 @@ struct i915_perf_stream {
                int size_exponent;
 
                /**
-                * @ptr_lock: Locks reads and writes to all head/tail state
+                * @oa_buffer.ptr_lock: Locks reads and writes to all
+                * head/tail state
                 *
                 * Consider: the head and tail pointer state needs to be read
                 * consistently from a hrtimer callback (atomic context) and
@@ -313,7 +314,8 @@ struct i915_perf_stream {
                spinlock_t ptr_lock;
 
                /**
-                * @head: Although we can always read back the head pointer register,
+                * @oa_buffer.head: Although we can always read back
+                * the head pointer register,
                 * we prefer to avoid trusting the HW state, just to avoid any
                 * risk that some hardware condition could * somehow bump the
                 * head pointer unpredictably and cause us to forward the wrong
@@ -322,7 +324,8 @@ struct i915_perf_stream {
                u32 head;
 
                /**
-                * @tail: The last verified tail that can be read by userspace.
+                * @oa_buffer.tail: The last verified tail that can be
+                * read by userspace.
                 */
                u32 tail;
        } oa_buffer;
index e98b6d69a91ab70c224d67ad337926c1b69936b3..9b6d87c8b5831c14aec9bbc425a374f0721f0273 100644 (file)
@@ -41,7 +41,7 @@
  * To virtualize GPU resources GVT-g driver depends on hypervisor technology
  * e.g KVM/VFIO/mdev, Xen, etc. to provide resource access trapping capability
  * and be virtualized within GVT-g device module. More architectural design
- * doc is available on https://01.org/group/2230/documentation-list.
+ * doc is available on https://github.com/intel/gvt-linux/wiki.
  */
 
 static LIST_HEAD(intel_gvt_devices);
index 9625d0b1d0b4d81486e62227880b43d3f04c9c63..ce90c25536d240067054bf2c584bd564d465524d 100644 (file)
@@ -154,7 +154,6 @@ int lsdc_create_i2c_chan(struct drm_device *ddev,
        adapter = &li2c->adapter;
        adapter->algo_data = &li2c->bit;
        adapter->owner = THIS_MODULE;
-       adapter->class = I2C_CLASS_DDC;
        adapter->dev.parent = ddev->dev;
        adapter->nr = -1;
 
index d675c954befe3cc3ec833ca4ff838371d58727d4..54e46e440e0f0ddcf5d3073fdd3a6d7b15d3b848 100644 (file)
@@ -297,7 +297,6 @@ static int mtk_hdmi_ddc_probe(struct platform_device *pdev)
 
        strscpy(ddc->adap.name, "mediatek-hdmi-ddc", sizeof(ddc->adap.name));
        ddc->adap.owner = THIS_MODULE;
-       ddc->adap.class = I2C_CLASS_DDC;
        ddc->adap.algo = &mtk_hdmi_ddc_algorithm;
        ddc->adap.retries = 3;
        ddc->adap.dev.of_node = dev->of_node;
index 0c48bdf3e7f800a02a00fcbf06b4d5737475e003..423eb302be7eb9def877158d5499b70c9a785905 100644 (file)
@@ -106,7 +106,6 @@ int mgag200_i2c_init(struct mga_device *mdev, struct mga_i2c_chan *i2c)
        i2c->data = BIT(info->i2c.data_bit);
        i2c->clock = BIT(info->i2c.clock_bit);
        i2c->adapter.owner = THIS_MODULE;
-       i2c->adapter.class = I2C_CLASS_DDC;
        i2c->adapter.dev.parent = dev->dev;
        i2c->dev = dev;
        i2c_set_adapdata(&i2c->adapter, i2c);
index 83380bc92a00a964479a0cbbb8dbc7a9dcd675ca..6a4b489d44e5173831d73956f1fb7a4e10809052 100644 (file)
@@ -144,10 +144,6 @@ enum dpu_enc_rc_states {
  *                     to track crtc in the disable() hook which is called
  *                     _after_ encoder_mask is cleared.
  * @connector:         If a mode is set, cached pointer to the active connector
- * @crtc_kickoff_cb:           Callback into CRTC that will flush & start
- *                             all CTL paths
- * @crtc_kickoff_cb_data:      Opaque user data given to crtc_kickoff_cb
- * @debugfs_root:              Debug file system root file node
  * @enc_lock:                  Lock around physical encoder
  *                             create/destroy/enable/disable
  * @frame_busy_mask:           Bitmask tracking which phys_enc we are still
@@ -2072,7 +2068,7 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
        }
 
        /* reset the merge 3D HW block */
-       if (phys_enc->hw_pp->merge_3d) {
+       if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) {
                phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
                                BLEND_3D_NONE);
                if (phys_enc->hw_ctl->ops.update_pending_flush_merge_3d)
@@ -2103,7 +2099,7 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
        if (phys_enc->hw_wb)
                intf_cfg.wb = phys_enc->hw_wb->idx;
 
-       if (phys_enc->hw_pp->merge_3d)
+       if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d)
                intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
 
        if (ctl->ops.reset_intf_cfg)
index b58a9c2ae326cab6c4799a88fe86acbab1c236f8..724537ab776dfde95c6406cf0aef1b795874b171 100644 (file)
@@ -29,7 +29,6 @@ static inline bool reserved_by_other(uint32_t *res_map, int idx,
 /**
  * struct dpu_rm_requirements - Reservation requirements parameter bundle
  * @topology:  selected topology for the display
- * @hw_res:       Hardware resources required as reported by the encoders
  */
 struct dpu_rm_requirements {
        struct msm_display_topology topology;
@@ -204,6 +203,8 @@ static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
  * _dpu_rm_get_lm_peer - get the id of a mixer which is a peer of the primary
  * @rm: dpu resource manager handle
  * @primary_idx: index of primary mixer in rm->mixer_blks[]
+ *
+ * Returns: lm peer mixed id on success or %-EINVAL on error
  */
 static int _dpu_rm_get_lm_peer(struct dpu_rm *rm, int primary_idx)
 {
index 77a8d9366ed7b01d46a01cf602e74eafb15d4937..fb588fde298a2de231ea5fdd8f639da156d47030 100644 (file)
@@ -135,11 +135,6 @@ static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl)
        tbd = dp_link_get_test_bits_depth(ctrl->link,
                        ctrl->panel->dp_mode.bpp);
 
-       if (tbd == DP_TEST_BIT_DEPTH_UNKNOWN) {
-               pr_debug("BIT_DEPTH not set. Configure default\n");
-               tbd = DP_TEST_BIT_DEPTH_8;
-       }
-
        config |= tbd << DP_CONFIGURATION_CTRL_BPC_SHIFT;
 
        /* Num of Lanes */
index 98427d45e9a7e3ac99a47871bbd1e0e893b2bc24..49dfac1fd1ef2158626f4a417b22e810414b76f9 100644 (file)
@@ -7,6 +7,7 @@
 
 #include <drm/drm_print.h>
 
+#include "dp_reg.h"
 #include "dp_link.h"
 #include "dp_panel.h"
 
@@ -1082,7 +1083,7 @@ int dp_link_process_request(struct dp_link *dp_link)
 
 int dp_link_get_colorimetry_config(struct dp_link *dp_link)
 {
-       u32 cc;
+       u32 cc = DP_MISC0_COLORIMERY_CFG_LEGACY_RGB;
        struct dp_link_private *link;
 
        if (!dp_link) {
@@ -1096,10 +1097,11 @@ int dp_link_get_colorimetry_config(struct dp_link *dp_link)
         * Unless a video pattern CTS test is ongoing, use RGB_VESA
         * Only RGB_VESA and RGB_CEA supported for now
         */
-       if (dp_link_is_video_pattern_requested(link))
-               cc = link->dp_link.test_video.test_dyn_range;
-       else
-               cc = DP_TEST_DYNAMIC_RANGE_VESA;
+       if (dp_link_is_video_pattern_requested(link)) {
+               if (link->dp_link.test_video.test_dyn_range &
+                                       DP_TEST_DYNAMIC_RANGE_CEA)
+                       cc = DP_MISC0_COLORIMERY_CFG_CEA_RGB;
+       }
 
        return cc;
 }
@@ -1179,6 +1181,9 @@ void dp_link_reset_phy_params_vx_px(struct dp_link *dp_link)
 u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp)
 {
        u32 tbd;
+       struct dp_link_private *link;
+
+       link = container_of(dp_link, struct dp_link_private, dp_link);
 
        /*
         * Few simplistic rules and assumptions made here:
@@ -1196,12 +1201,13 @@ u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp)
                tbd = DP_TEST_BIT_DEPTH_10;
                break;
        default:
-               tbd = DP_TEST_BIT_DEPTH_UNKNOWN;
+               drm_dbg_dp(link->drm_dev, "bpp=%d not supported, use bpc=8\n",
+                          bpp);
+               tbd = DP_TEST_BIT_DEPTH_8;
                break;
        }
 
-       if (tbd != DP_TEST_BIT_DEPTH_UNKNOWN)
-               tbd = (tbd >> DP_TEST_BIT_DEPTH_SHIFT);
+       tbd = (tbd >> DP_TEST_BIT_DEPTH_SHIFT);
 
        return tbd;
 }
index ea85a691e72b5ce505822e4fce21f0cbcf0c4319..78785ed4b40c490d83396825d62a65af2fd6c9df 100644 (file)
 #define DP_MISC0_COLORIMETRY_CFG_SHIFT         (0x00000001)
 #define DP_MISC0_TEST_BITS_DEPTH_SHIFT         (0x00000005)
 
+#define DP_MISC0_COLORIMERY_CFG_LEGACY_RGB     (0)
+#define DP_MISC0_COLORIMERY_CFG_CEA_RGB                (0x04)
+
 #define REG_DP_VALID_BOUNDARY                  (0x00000030)
 #define REG_DP_VALID_BOUNDARY_2                        (0x00000034)
 
index de182c00484349c9d097dee02fff50970251a003..7aa500d24240ff3ed6694c469eafc4388c982346 100644 (file)
@@ -249,7 +249,6 @@ struct i2c_adapter *msm_hdmi_i2c_init(struct hdmi *hdmi)
 
 
        i2c->owner = THIS_MODULE;
-       i2c->class = I2C_CLASS_DDC;
        snprintf(i2c->name, sizeof(i2c->name), "msm hdmi i2c");
        i2c->dev.parent = &hdmi->pdev->dev;
        i2c->algo = &msm_hdmi_i2c_algorithm;
index 455b2e3a0cdd4811c67fda8efccd9dd3dcf77a16..35423d10aafa90b98bb6f92c3da405940c8938ea 100644 (file)
@@ -562,6 +562,7 @@ static const struct msm_mdss_data sdm670_data = {
        .ubwc_enc_version = UBWC_2_0,
        .ubwc_dec_version = UBWC_2_0,
        .highest_bank_bit = 1,
+       .reg_bus_bw = 76800,
 };
 
 static const struct msm_mdss_data sdm845_data = {
index d1437c08645f90d9c745ee77405d3fa1d8d51f9d..6f5d376d8fcc1ecb6d9faa80b4b06ba4cd1b21e4 100644 (file)
@@ -9,7 +9,7 @@
 #define GSP_PAGE_SIZE  BIT(GSP_PAGE_SHIFT)
 
 struct nvkm_gsp_mem {
-       u32 size;
+       size_t size;
        void *data;
        dma_addr_t addr;
 };
index 5057d976fa578cebe2e9e847c6e78634d2b08968..93f08f9479d89bfda87fbeef246c9dd702f047a1 100644 (file)
@@ -62,7 +62,7 @@ nouveau_fence_signal(struct nouveau_fence *fence)
        if (test_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags)) {
                struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
 
-               if (atomic_dec_and_test(&fctx->notify_ref))
+               if (!--fctx->notify_ref)
                        drop = 1;
        }
 
@@ -103,7 +103,7 @@ nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error)
 void
 nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
 {
-       cancel_work_sync(&fctx->allow_block_work);
+       cancel_work_sync(&fctx->uevent_work);
        nouveau_fence_context_kill(fctx, 0);
        nvif_event_dtor(&fctx->event);
        fctx->dead = 1;
@@ -146,12 +146,13 @@ nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fc
        return drop;
 }
 
-static int
-nouveau_fence_wait_uevent_handler(struct nvif_event *event, void *repv, u32 repc)
+static void
+nouveau_fence_uevent_work(struct work_struct *work)
 {
-       struct nouveau_fence_chan *fctx = container_of(event, typeof(*fctx), event);
+       struct nouveau_fence_chan *fctx = container_of(work, struct nouveau_fence_chan,
+                                                      uevent_work);
        unsigned long flags;
-       int ret = NVIF_EVENT_KEEP;
+       int drop = 0;
 
        spin_lock_irqsave(&fctx->lock, flags);
        if (!list_empty(&fctx->pending)) {
@@ -161,23 +162,20 @@ nouveau_fence_wait_uevent_handler(struct nvif_event *event, void *repv, u32 repc
                fence = list_entry(fctx->pending.next, typeof(*fence), head);
                chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
                if (nouveau_fence_update(chan, fctx))
-                       ret = NVIF_EVENT_DROP;
+                       drop = 1;
        }
-       spin_unlock_irqrestore(&fctx->lock, flags);
+       if (drop)
+               nvif_event_block(&fctx->event);
 
-       return ret;
+       spin_unlock_irqrestore(&fctx->lock, flags);
 }
 
-static void
-nouveau_fence_work_allow_block(struct work_struct *work)
+static int
+nouveau_fence_wait_uevent_handler(struct nvif_event *event, void *repv, u32 repc)
 {
-       struct nouveau_fence_chan *fctx = container_of(work, struct nouveau_fence_chan,
-                                                      allow_block_work);
-
-       if (atomic_read(&fctx->notify_ref) == 0)
-               nvif_event_block(&fctx->event);
-       else
-               nvif_event_allow(&fctx->event);
+       struct nouveau_fence_chan *fctx = container_of(event, typeof(*fctx), event);
+       schedule_work(&fctx->uevent_work);
+       return NVIF_EVENT_KEEP;
 }
 
 void
@@ -191,7 +189,7 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
        } args;
        int ret;
 
-       INIT_WORK(&fctx->allow_block_work, nouveau_fence_work_allow_block);
+       INIT_WORK(&fctx->uevent_work, nouveau_fence_uevent_work);
        INIT_LIST_HEAD(&fctx->flip);
        INIT_LIST_HEAD(&fctx->pending);
        spin_lock_init(&fctx->lock);
@@ -535,19 +533,15 @@ static bool nouveau_fence_enable_signaling(struct dma_fence *f)
        struct nouveau_fence *fence = from_fence(f);
        struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
        bool ret;
-       bool do_work;
 
-       if (atomic_inc_return(&fctx->notify_ref) == 0)
-               do_work = true;
+       if (!fctx->notify_ref++)
+               nvif_event_allow(&fctx->event);
 
        ret = nouveau_fence_no_signaling(f);
        if (ret)
                set_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags);
-       else if (atomic_dec_and_test(&fctx->notify_ref))
-               do_work = true;
-
-       if (do_work)
-               schedule_work(&fctx->allow_block_work);
+       else if (!--fctx->notify_ref)
+               nvif_event_block(&fctx->event);
 
        return ret;
 }
index 28f5cf013b8983240204d028c8367249a63912e0..8bc065acfe35870f62bd0f2e37df47a35eb8ae38 100644 (file)
@@ -3,7 +3,6 @@
 #define __NOUVEAU_FENCE_H__
 
 #include <linux/dma-fence.h>
-#include <linux/workqueue.h>
 #include <nvif/event.h>
 
 struct nouveau_drm;
@@ -45,10 +44,9 @@ struct nouveau_fence_chan {
        u32 context;
        char name[32];
 
+       struct work_struct uevent_work;
        struct nvif_event event;
-       struct work_struct allow_block_work;
-       atomic_t notify_ref;
-       int dead, killed;
+       int notify_ref, dead, killed;
 };
 
 struct nouveau_fence_priv {
index a6602c0126715635d6328c2fb295d4195b7dd873..3dda885df5b223dc2b637592e50cc7e958b5cbb7 100644 (file)
@@ -108,6 +108,9 @@ nouveau_vma_new(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm,
        } else {
                ret = nvif_vmm_get(&vmm->vmm, PTES, false, mem->mem.page, 0,
                                   mem->mem.size, &tmp);
+               if (ret)
+                       goto done;
+
                vma->addr = tmp.addr;
        }
 
index c8ce7ff187135b0992b52a3c62d8a48593b2e625..e74493a4569edb933e7d55c817709cb93e701a56 100644 (file)
@@ -550,6 +550,10 @@ ga100_fifo_nonstall_ctor(struct nvkm_fifo *fifo)
                struct nvkm_engn *engn = list_first_entry(&runl->engns, typeof(*engn), head);
 
                runl->nonstall.vector = engn->func->nonstall(engn);
+
+               /* if no nonstall vector just keep going */
+               if (runl->nonstall.vector == -1)
+                       continue;
                if (runl->nonstall.vector < 0) {
                        RUNL_ERROR(runl, "nonstall %d", runl->nonstall.vector);
                        return runl->nonstall.vector;
index b903785056b5de6df810a2c457ea90deb557dd69..3454c7d2950295843ea3e8ef4b53eacba8c11553 100644 (file)
@@ -351,7 +351,7 @@ r535_engn_nonstall(struct nvkm_engn *engn)
        int ret;
 
        ret = nvkm_gsp_intr_nonstall(subdev->device->gsp, subdev->type, subdev->inst);
-       WARN_ON(ret < 0);
+       WARN_ON(ret == -ENOENT);
        return ret;
 }
 
index 04bceaa28a197d93d85db77098e9f8330c63cff0..da1bebb896f7fb1ec5bba033600f16d23f29f96c 100644 (file)
@@ -25,12 +25,8 @@ int
 nvkm_gsp_intr_nonstall(struct nvkm_gsp *gsp, enum nvkm_subdev_type type, int inst)
 {
        for (int i = 0; i < gsp->intr_nr; i++) {
-               if (gsp->intr[i].type == type && gsp->intr[i].inst == inst) {
-                       if (gsp->intr[i].nonstall != ~0)
-                               return gsp->intr[i].nonstall;
-
-                       return -EINVAL;
-               }
+               if (gsp->intr[i].type == type && gsp->intr[i].inst == inst)
+                       return gsp->intr[i].nonstall;
        }
 
        return -ENOENT;
index 9ee58e2a0eb2ad99c198ea7a58e6e1cf02a667d0..a41735ab60683f02fde33f0107a2edae89155a6e 100644 (file)
@@ -997,6 +997,32 @@ r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp)
        return 0;
 }
 
+static void
+nvkm_gsp_mem_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_mem *mem)
+{
+       if (mem->data) {
+               /*
+                * Poison the buffer to catch any unexpected access from
+                * GSP-RM if the buffer was prematurely freed.
+                */
+               memset(mem->data, 0xFF, mem->size);
+
+               dma_free_coherent(gsp->subdev.device->dev, mem->size, mem->data, mem->addr);
+               memset(mem, 0, sizeof(*mem));
+       }
+}
+
+static int
+nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, size_t size, struct nvkm_gsp_mem *mem)
+{
+       mem->size = size;
+       mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL);
+       if (WARN_ON(!mem->data))
+               return -ENOMEM;
+
+       return 0;
+}
+
 static int
 r535_gsp_postinit(struct nvkm_gsp *gsp)
 {
@@ -1024,6 +1050,13 @@ r535_gsp_postinit(struct nvkm_gsp *gsp)
 
        nvkm_inth_allow(&gsp->subdev.inth);
        nvkm_wr32(device, 0x110004, 0x00000040);
+
+       /* Release the DMA buffers that were needed only for boot and init */
+       nvkm_gsp_mem_dtor(gsp, &gsp->boot.fw);
+       nvkm_gsp_mem_dtor(gsp, &gsp->libos);
+       nvkm_gsp_mem_dtor(gsp, &gsp->rmargs);
+       nvkm_gsp_mem_dtor(gsp, &gsp->wpr_meta);
+
        return ret;
 }
 
@@ -1078,7 +1111,6 @@ r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp)
        if (IS_ERR(rpc))
                return PTR_ERR(rpc);
 
-       rpc->size = sizeof(*rpc);
        rpc->numEntries = NV_GSP_REG_NUM_ENTRIES;
 
        str_offset = offsetof(typeof(*rpc), entries[NV_GSP_REG_NUM_ENTRIES]);
@@ -1094,6 +1126,7 @@ r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp)
                strings += name_len;
                str_offset += name_len;
        }
+       rpc->size = str_offset;
 
        return nvkm_gsp_rpc_wr(gsp, rpc, false);
 }
@@ -1532,27 +1565,6 @@ r535_gsp_msg_run_cpu_sequencer(void *priv, u32 fn, void *repv, u32 repc)
        return 0;
 }
 
-static void
-nvkm_gsp_mem_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_mem *mem)
-{
-       if (mem->data) {
-               dma_free_coherent(gsp->subdev.device->dev, mem->size, mem->data, mem->addr);
-               mem->data = NULL;
-       }
-}
-
-static int
-nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, u32 size, struct nvkm_gsp_mem *mem)
-{
-       mem->size = size;
-       mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL);
-       if (WARN_ON(!mem->data))
-               return -ENOMEM;
-
-       return 0;
-}
-
-
 static int
 r535_gsp_booter_unload(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1)
 {
@@ -1938,20 +1950,20 @@ nvkm_gsp_radix3_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_radix3 *rx3)
  * See kgspCreateRadix3_IMPL
  */
 static int
-nvkm_gsp_radix3_sg(struct nvkm_device *device, struct sg_table *sgt, u64 size,
+nvkm_gsp_radix3_sg(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size,
                   struct nvkm_gsp_radix3 *rx3)
 {
        u64 addr;
 
        for (int i = ARRAY_SIZE(rx3->mem) - 1; i >= 0; i--) {
                u64 *ptes;
-               int idx;
+               size_t bufsize;
+               int ret, idx;
 
-               rx3->mem[i].size = ALIGN((size / GSP_PAGE_SIZE) * sizeof(u64), GSP_PAGE_SIZE);
-               rx3->mem[i].data = dma_alloc_coherent(device->dev, rx3->mem[i].size,
-                                                     &rx3->mem[i].addr, GFP_KERNEL);
-               if (WARN_ON(!rx3->mem[i].data))
-                       return -ENOMEM;
+               bufsize = ALIGN((size / GSP_PAGE_SIZE) * sizeof(u64), GSP_PAGE_SIZE);
+               ret = nvkm_gsp_mem_ctor(gsp, bufsize, &rx3->mem[i]);
+               if (ret)
+                       return ret;
 
                ptes = rx3->mem[i].data;
                if (i == 2) {
@@ -1991,7 +2003,7 @@ r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
                if (ret)
                        return ret;
 
-               ret = nvkm_gsp_radix3_sg(gsp->subdev.device, &gsp->sr.sgt, len, &gsp->sr.radix3);
+               ret = nvkm_gsp_radix3_sg(gsp, &gsp->sr.sgt, len, &gsp->sr.radix3);
                if (ret)
                        return ret;
 
@@ -2150,6 +2162,11 @@ r535_gsp_dtor(struct nvkm_gsp *gsp)
        mutex_destroy(&gsp->cmdq.mutex);
 
        r535_gsp_dtor_fws(gsp);
+
+       nvkm_gsp_mem_dtor(gsp, &gsp->shm.mem);
+       nvkm_gsp_mem_dtor(gsp, &gsp->loginit);
+       nvkm_gsp_mem_dtor(gsp, &gsp->logintr);
+       nvkm_gsp_mem_dtor(gsp, &gsp->logrm);
 }
 
 int
@@ -2194,7 +2211,7 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp)
        memcpy(gsp->sig.data, data, size);
 
        /* Build radix3 page table for ELF image. */
-       ret = nvkm_gsp_radix3_sg(device, &gsp->fw.mem.sgt, gsp->fw.len, &gsp->radix3);
+       ret = nvkm_gsp_radix3_sg(gsp, &gsp->fw.mem.sgt, gsp->fw.len, &gsp->radix3);
        if (ret)
                return ret;
 
index e7e8fdf3adab7a0c9454f57b8ad91f4e787bc6da..29682722b0b36b584d4e3a8088d70e74507d78b7 100644 (file)
@@ -28,19 +28,14 @@ static void
 gp10b_ltc_init(struct nvkm_ltc *ltc)
 {
        struct nvkm_device *device = ltc->subdev.device;
-       struct iommu_fwspec *spec;
+       u32 sid;
 
        nvkm_wr32(device, 0x17e27c, ltc->ltc_nr);
        nvkm_wr32(device, 0x17e000, ltc->ltc_nr);
        nvkm_wr32(device, 0x100800, ltc->ltc_nr);
 
-       spec = dev_iommu_fwspec_get(device->dev);
-       if (spec) {
-               u32 sid = spec->ids[0] & 0xffff;
-
-               /* stream ID */
+       if (tegra_dev_iommu_get_stream_id(device->dev, &sid))
                nvkm_wr32(device, 0x160000, sid << 2);
-       }
 }
 
 static const struct nvkm_ltc_func
index dad938cf6decfb0658a73439a6ca602c78fce2fb..8f3783742208b60d8b5b9ad7c6e2ceab4e9fc9e4 100644 (file)
@@ -539,6 +539,8 @@ config DRM_PANEL_RAYDIUM_RM692E5
        depends on OF
        depends on DRM_MIPI_DSI
        depends on BACKLIGHT_CLASS_DEVICE
+       select DRM_DISPLAY_DP_HELPER
+       select DRM_DISPLAY_HELPER
        help
          Say Y here if you want to enable support for Raydium RM692E5-based
          display panels, such as the one found in the Fairphone 5 smartphone.
index ea5a857793827af1a0bfe90d88bf2a3a71065f11..f23d8832a1ad055483b1f513557cb3d2807e3692 100644 (file)
@@ -309,7 +309,7 @@ static const struct s6d7aa0_panel_desc s6d7aa0_lsl080al02_desc = {
        .off_func = s6d7aa0_lsl080al02_off,
        .drm_mode = &s6d7aa0_lsl080al02_mode,
        .mode_flags = MIPI_DSI_MODE_VSYNC_FLUSH | MIPI_DSI_MODE_VIDEO_NO_HFP,
-       .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+       .bus_flags = 0,
 
        .has_backlight = false,
        .use_passwd3 = false,
index 2214cb09678cd6a234359c2cb7972c9beb3f5851..d493ee735c7349b2ae1a21abff870859c0ea2af4 100644 (file)
@@ -3948,6 +3948,7 @@ static const struct panel_desc tianma_tm070jdhg30 = {
        },
        .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
        .connector_type = DRM_MODE_CONNECTOR_LVDS,
+       .bus_flags = DRM_BUS_FLAG_DE_HIGH,
 };
 
 static const struct panel_desc tianma_tm070jvhg33 = {
@@ -3960,6 +3961,7 @@ static const struct panel_desc tianma_tm070jvhg33 = {
        },
        .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
        .connector_type = DRM_MODE_CONNECTOR_LVDS,
+       .bus_flags = DRM_BUS_FLAG_DE_HIGH,
 };
 
 static const struct display_timing tianma_tm070rvhg71_timing = {
index 314d066e68e9d0dddb240f09526eedce8eac3828..3d174390a8afe7f23b886fbc99273cd9f76505f5 100644 (file)
@@ -918,7 +918,6 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
 
        i2c->rec = *rec;
        i2c->adapter.owner = THIS_MODULE;
-       i2c->adapter.class = I2C_CLASS_DDC;
        i2c->adapter.dev.parent = dev->dev;
        i2c->dev = dev;
        i2c_set_adapdata(&i2c->adapter, i2c);
index f6d819803c0e02826c7250d9adf9c618360cb6f8..e6fbe040ccf6a9af8f54b6220dfa6c96d4cd0ee8 100644 (file)
@@ -792,7 +792,6 @@ static struct i2c_adapter *inno_hdmi_i2c_adapter(struct inno_hdmi *hdmi)
        init_completion(&i2c->cmp);
 
        adap = &i2c->adap;
-       adap->class = I2C_CLASS_DDC;
        adap->owner = THIS_MODULE;
        adap->dev.parent = hdmi->dev;
        adap->dev.of_node = hdmi->dev->of_node;
index 62e6d8187de767ef5b893b6d7996474312141548..95cd1b49eda8a0c5c436caa9dfe5dca899776314 100644 (file)
@@ -715,7 +715,6 @@ static struct i2c_adapter *rk3066_hdmi_i2c_adapter(struct rk3066_hdmi *hdmi)
        init_completion(&i2c->cmpltn);
 
        adap = &i2c->adap;
-       adap->class = I2C_CLASS_DDC;
        adap->owner = THIS_MODULE;
        adap->dev.parent = hdmi->dev;
        adap->dev.of_node = hdmi->dev->of_node;
index 574103fc79f9882682596fe8845a13eb06e1ca3e..85b3b4871a1d63bf5a8cb2315a25dfd5ef2b8b70 100644 (file)
@@ -35,7 +35,6 @@
 
 #include "rockchip_drm_drv.h"
 #include "rockchip_drm_gem.h"
-#include "rockchip_drm_fb.h"
 #include "rockchip_drm_vop2.h"
 #include "rockchip_rgb.h"
 
@@ -1681,7 +1680,6 @@ static unsigned long rk3588_calc_cru_cfg(struct vop2_video_port *vp, int id,
        unsigned long dclk_core_rate = v_pixclk >> 2;
        unsigned long dclk_rate = v_pixclk;
        unsigned long dclk_out_rate;
-       unsigned long if_dclk_rate;
        unsigned long if_pixclk_rate;
        int K = 1;
 
@@ -1696,8 +1694,8 @@ static unsigned long rk3588_calc_cru_cfg(struct vop2_video_port *vp, int id,
                }
 
                if_pixclk_rate = (dclk_core_rate << 1) / K;
-               if_dclk_rate = dclk_core_rate / K;
                /*
+                * if_dclk_rate = dclk_core_rate / K;
                 * *if_pixclk_div = dclk_rate / if_pixclk_rate;
                 * *if_dclk_div = dclk_rate / if_dclk_rate;
                 */
index 550492a7a031d7827b2e167098c495908bee82aa..d442b893275b971a53adc42b3a06973eebd8bdbb 100644 (file)
@@ -1184,14 +1184,16 @@ static void drm_sched_run_job_work(struct work_struct *w)
        if (READ_ONCE(sched->pause_submit))
                return;
 
+       /* Find entity with a ready job */
        entity = drm_sched_select_entity(sched);
        if (!entity)
-               return;
+               return; /* No more work */
 
        sched_job = drm_sched_entity_pop_job(entity);
        if (!sched_job) {
                complete_all(&entity->entity_idle);
-               return; /* No more work */
+               drm_sched_run_job_queue(sched);
+               return;
        }
 
        s_fence = sched_job->s_fence;
index bef293922b98f0ae46a753d1a5d05974ddd14091..3d0e093a7e6ed74de7c5b7484323c4465e49b89a 100644 (file)
@@ -319,7 +319,7 @@ static int ssd130x_pwm_enable(struct ssd130x_device *ssd130x)
 
        pwm_init_state(ssd130x->pwm, &pwmstate);
        pwm_set_relative_duty_cycle(&pwmstate, 50, 100);
-       pwm_apply_state(ssd130x->pwm, &pwmstate);
+       pwm_apply_might_sleep(ssd130x->pwm, &pwmstate);
 
        /* Enable the PWM */
        pwm_enable(ssd130x->pwm);
index d1a65a921f5afaf5f3332239a7a9f6be44e8c475..f5f62eb0eecaaa469687c19e921ce56811c5af61 100644 (file)
@@ -302,7 +302,6 @@ int sun4i_hdmi_i2c_create(struct device *dev, struct sun4i_hdmi *hdmi)
                return -ENOMEM;
 
        adap->owner = THIS_MODULE;
-       adap->class = I2C_CLASS_DDC;
        adap->algo = &sun4i_hdmi_i2c_algorithm;
        strscpy(adap->name, "sun4i_hdmi_i2c adapter", sizeof(adap->name));
        i2c_set_adapdata(adap, hdmi);
index ff36171c8fb700bae9967961220ea7cbb262d193..a73cff7a307082d97ce78e43deaff5547cf55964 100644 (file)
@@ -960,7 +960,8 @@ int host1x_client_iommu_attach(struct host1x_client *client)
         * not the shared IOMMU domain, don't try to attach it to a different
         * domain. This allows using the IOMMU-backed DMA API.
         */
-       if (domain && domain != tegra->domain)
+       if (domain && domain->type != IOMMU_DOMAIN_IDENTITY &&
+           domain != tegra->domain)
                return 0;
 
        if (tegra->domain) {
index 4e9247cf9977f5677126ffbdcf56c97446c769b4..1eb0c304f9607f6ae4034638a2cf8e3ee8da06ca 100644 (file)
@@ -188,13 +188,13 @@ out:
 
 static void drm_test_mm_debug(struct kunit *test)
 {
+       struct drm_printer p = drm_debug_printer(test->name);
        struct drm_mm mm;
        struct drm_mm_node nodes[2];
 
        /* Create a small drm_mm with a couple of nodes and a few holes, and
         * check that the debug iterator doesn't explode over a trivial drm_mm.
         */
-
        drm_mm_init(&mm, 0, 4096);
 
        memset(nodes, 0, sizeof(nodes));
@@ -209,6 +209,9 @@ static void drm_test_mm_debug(struct kunit *test)
        KUNIT_ASSERT_FALSE_MSG(test, drm_mm_reserve_node(&mm, &nodes[1]),
                               "failed to reserve node[0] {start=%lld, size=%lld)\n",
                               nodes[0].start, nodes[0].size);
+
+       drm_mm_print(&mm, &p);
+       KUNIT_SUCCEED(test);
 }
 
 static bool expect_insert(struct kunit *test, struct drm_mm *mm,
index f5187b384ae9ac8eedede8e6a0d4d56eb8af1670..76027960054f1140e768ae21b30e5a3015437d02 100644 (file)
@@ -95,11 +95,17 @@ static int ttm_global_init(void)
        ttm_pool_mgr_init(num_pages);
        ttm_tt_mgr_init(num_pages, num_dma32);
 
-       glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
+       glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32 |
+                                          __GFP_NOWARN);
 
+       /* Retry without GFP_DMA32 for platforms DMA32 is not available */
        if (unlikely(glob->dummy_read_page == NULL)) {
-               ret = -ENOMEM;
-               goto out;
+               glob->dummy_read_page = alloc_page(__GFP_ZERO);
+               if (unlikely(glob->dummy_read_page == NULL)) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+               pr_warn("Using GFP_DMA32 fallback for dummy_read_page\n");
        }
 
        INIT_LIST_HEAD(&glob->device_list);
@@ -195,7 +201,7 @@ int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *func
                    bool use_dma_alloc, bool use_dma32)
 {
        struct ttm_global *glob = &ttm_glob;
-       int ret;
+       int ret, nid;
 
        if (WARN_ON(vma_manager == NULL))
                return -EINVAL;
@@ -215,7 +221,12 @@ int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *func
 
        ttm_sys_man_init(bdev);
 
-       ttm_pool_init(&bdev->pool, dev, dev_to_node(dev), use_dma_alloc, use_dma32);
+       if (dev)
+               nid = dev_to_node(dev);
+       else
+               nid = NUMA_NO_NODE;
+
+       ttm_pool_init(&bdev->pool, dev, nid, use_dma_alloc, use_dma32);
 
        bdev->vma_manager = vma_manager;
        spin_lock_init(&bdev->lru_lock);
index f843a50d5dce6df10b405ae26853a5d01dbd7c0c..94eafcecc65b0c878e20e7a3f4a528f8a56e60c1 100644 (file)
@@ -62,9 +62,9 @@ static const struct v3d_reg_def v3d_core_reg_defs[] = {
        REGDEF(33, 71, V3D_PTB_BPCA),
        REGDEF(33, 71, V3D_PTB_BPCS),
 
-       REGDEF(33, 41, V3D_GMP_STATUS(33)),
-       REGDEF(33, 41, V3D_GMP_CFG(33)),
-       REGDEF(33, 41, V3D_GMP_VIO_ADDR(33)),
+       REGDEF(33, 42, V3D_GMP_STATUS(33)),
+       REGDEF(33, 42, V3D_GMP_CFG(33)),
+       REGDEF(33, 42, V3D_GMP_VIO_ADDR(33)),
 
        REGDEF(33, 71, V3D_ERR_FDBGO),
        REGDEF(33, 71, V3D_ERR_FDBGB),
@@ -74,13 +74,13 @@ static const struct v3d_reg_def v3d_core_reg_defs[] = {
 
 static const struct v3d_reg_def v3d_csd_reg_defs[] = {
        REGDEF(41, 71, V3D_CSD_STATUS),
-       REGDEF(41, 41, V3D_CSD_CURRENT_CFG0(41)),
-       REGDEF(41, 41, V3D_CSD_CURRENT_CFG1(41)),
-       REGDEF(41, 41, V3D_CSD_CURRENT_CFG2(41)),
-       REGDEF(41, 41, V3D_CSD_CURRENT_CFG3(41)),
-       REGDEF(41, 41, V3D_CSD_CURRENT_CFG4(41)),
-       REGDEF(41, 41, V3D_CSD_CURRENT_CFG5(41)),
-       REGDEF(41, 41, V3D_CSD_CURRENT_CFG6(41)),
+       REGDEF(41, 42, V3D_CSD_CURRENT_CFG0(41)),
+       REGDEF(41, 42, V3D_CSD_CURRENT_CFG1(41)),
+       REGDEF(41, 42, V3D_CSD_CURRENT_CFG2(41)),
+       REGDEF(41, 42, V3D_CSD_CURRENT_CFG3(41)),
+       REGDEF(41, 42, V3D_CSD_CURRENT_CFG4(41)),
+       REGDEF(41, 42, V3D_CSD_CURRENT_CFG5(41)),
+       REGDEF(41, 42, V3D_CSD_CURRENT_CFG6(41)),
        REGDEF(71, 71, V3D_CSD_CURRENT_CFG0(71)),
        REGDEF(71, 71, V3D_CSD_CURRENT_CFG1(71)),
        REGDEF(71, 71, V3D_CSD_CURRENT_CFG2(71)),
index fcff41dd2315b710dc9de6ccdb361922c61d2602..88f63d526b22365b42b90e90d5b451a56e3fda52 100644 (file)
@@ -147,6 +147,13 @@ v3d_job_allocate(void **container, size_t size)
        return 0;
 }
 
+static void
+v3d_job_deallocate(void **container)
+{
+       kfree(*container);
+       *container = NULL;
+}
+
 static int
 v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
             struct v3d_job *job, void (*free)(struct kref *ref),
@@ -273,8 +280,10 @@ v3d_setup_csd_jobs_and_bos(struct drm_file *file_priv,
 
        ret = v3d_job_init(v3d, file_priv, &(*job)->base,
                           v3d_job_free, args->in_sync, se, V3D_CSD);
-       if (ret)
+       if (ret) {
+               v3d_job_deallocate((void *)job);
                return ret;
+       }
 
        ret = v3d_job_allocate((void *)clean_job, sizeof(**clean_job));
        if (ret)
@@ -282,8 +291,10 @@ v3d_setup_csd_jobs_and_bos(struct drm_file *file_priv,
 
        ret = v3d_job_init(v3d, file_priv, *clean_job,
                           v3d_job_free, 0, NULL, V3D_CACHE_CLEAN);
-       if (ret)
+       if (ret) {
+               v3d_job_deallocate((void *)clean_job);
                return ret;
+       }
 
        (*job)->args = *args;
 
@@ -860,8 +871,10 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
 
        ret = v3d_job_init(v3d, file_priv, &render->base,
                           v3d_render_job_free, args->in_sync_rcl, &se, V3D_RENDER);
-       if (ret)
+       if (ret) {
+               v3d_job_deallocate((void *)&render);
                goto fail;
+       }
 
        render->start = args->rcl_start;
        render->end = args->rcl_end;
@@ -874,8 +887,10 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
 
                ret = v3d_job_init(v3d, file_priv, &bin->base,
                                   v3d_job_free, args->in_sync_bcl, &se, V3D_BIN);
-               if (ret)
+               if (ret) {
+                       v3d_job_deallocate((void *)&bin);
                        goto fail;
+               }
 
                bin->start = args->bcl_start;
                bin->end = args->bcl_end;
@@ -892,8 +907,10 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
 
                ret = v3d_job_init(v3d, file_priv, clean_job,
                                   v3d_job_free, 0, NULL, V3D_CACHE_CLEAN);
-               if (ret)
+               if (ret) {
+                       v3d_job_deallocate((void *)&clean_job);
                        goto fail;
+               }
 
                last_job = clean_job;
        } else {
@@ -1015,8 +1032,10 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
 
        ret = v3d_job_init(v3d, file_priv, &job->base,
                           v3d_job_free, args->in_sync, &se, V3D_TFU);
-       if (ret)
+       if (ret) {
+               v3d_job_deallocate((void *)&job);
                goto fail;
+       }
 
        job->base.bo = kcalloc(ARRAY_SIZE(args->bo_handles),
                               sizeof(*job->base.bo), GFP_KERNEL);
@@ -1233,8 +1252,10 @@ v3d_submit_cpu_ioctl(struct drm_device *dev, void *data,
 
        ret = v3d_job_init(v3d, file_priv, &cpu_job->base,
                           v3d_job_free, 0, &se, V3D_CPU);
-       if (ret)
+       if (ret) {
+               v3d_job_deallocate((void *)&cpu_job);
                goto fail;
+       }
 
        clean_job = cpu_job->indirect_csd.clean_job;
        csd_job = cpu_job->indirect_csd.job;
index f8e9abe647b927b211abb4bbc0751ea318d80369..9539aa28937fa4cf71fbcd8e252749607617d966 100644 (file)
@@ -94,6 +94,7 @@ static int virtio_gpu_probe(struct virtio_device *vdev)
                        goto err_free;
        }
 
+       dma_set_max_seg_size(dev->dev, dma_max_mapping_size(dev->dev) ?: UINT_MAX);
        ret = virtio_gpu_init(vdev, dev);
        if (ret)
                goto err_free;
index 1cced50d8d8c9dadcb40f7ebe75cf7474170772b..e36ae1f0d8859fc82f2e0a9ed06f8e7ee6387372 100644 (file)
@@ -47,7 +47,7 @@ config DRM_XE
 
 config DRM_XE_DISPLAY
        bool "Enable display support"
-       depends on DRM_XE && EXPERT && DRM_XE=m
+       depends on DRM_XE && DRM_XE=m
        select FB_IOMEM_HELPERS
        select I2C
        select I2C_ALGOBIT
index 53bd2a8ba1ae5cea2535c59047d028f18bec8e65..efcf0ab7a1a69d35271b5655c5a746e992459b02 100644 (file)
@@ -17,7 +17,6 @@ subdir-ccflags-y += $(call cc-option, -Wunused-const-variable)
 subdir-ccflags-y += $(call cc-option, -Wpacked-not-aligned)
 subdir-ccflags-y += $(call cc-option, -Wformat-overflow)
 subdir-ccflags-y += $(call cc-option, -Wformat-truncation)
-subdir-ccflags-y += $(call cc-option, -Wstringop-overflow)
 subdir-ccflags-y += $(call cc-option, -Wstringop-truncation)
 # The following turn off the warnings enabled by -Wextra
 ifeq ($(findstring 2, $(KBUILD_EXTRA_WARN)),)
index 3062e0e0d467ee0737f0fbf63d3826c9f4013778..79ba98a169f907cc18dcd63c07e9570c623c1608 100644 (file)
@@ -50,8 +50,8 @@
 
 #define HOST2GUC_SELF_CFG_REQUEST_MSG_LEN              (GUC_HXG_REQUEST_MSG_MIN_LEN + 3u)
 #define HOST2GUC_SELF_CFG_REQUEST_MSG_0_MBZ            GUC_HXG_REQUEST_MSG_0_DATA0
-#define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_KEY                (0xffff << 16)
-#define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_LEN                (0xffff << 0)
+#define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_KEY                (0xffffu << 16)
+#define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_LEN                (0xffffu << 0)
 #define HOST2GUC_SELF_CFG_REQUEST_MSG_2_VALUE32                GUC_HXG_REQUEST_MSG_n_DATAn
 #define HOST2GUC_SELF_CFG_REQUEST_MSG_3_VALUE64                GUC_HXG_REQUEST_MSG_n_DATAn
 
index 811add10c30dc21a357841dccd10e6583468978c..c165e26c097669b72e6cfa7f97a7ee5bdada90ff 100644 (file)
@@ -242,8 +242,8 @@ struct slpc_shared_data {
                (HOST2GUC_PC_SLPC_REQUEST_REQUEST_MSG_MIN_LEN + \
                        HOST2GUC_PC_SLPC_EVENT_MAX_INPUT_ARGS)
 #define HOST2GUC_PC_SLPC_REQUEST_MSG_0_MBZ             GUC_HXG_REQUEST_MSG_0_DATA0
-#define HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID                (0xff << 8)
-#define HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC      (0xff << 0)
+#define HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID                (0xffu << 8)
+#define HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC      (0xffu << 0)
 #define HOST2GUC_PC_SLPC_REQUEST_MSG_N_EVENT_DATA_N    GUC_HXG_REQUEST_MSG_n_DATAn
 
 #endif
index 3b83f907ece46165c5bc11f93a3077e6dafd2edf..0b1146d0c997a216c589bb21d86d91f4d0f6841c 100644 (file)
@@ -82,11 +82,11 @@ static_assert(sizeof(struct guc_ct_buffer_desc) == 64);
 #define GUC_CTB_HDR_LEN                                1u
 #define GUC_CTB_MSG_MIN_LEN                    GUC_CTB_HDR_LEN
 #define GUC_CTB_MSG_MAX_LEN                    256u
-#define GUC_CTB_MSG_0_FENCE                    (0xffff << 16)
-#define GUC_CTB_MSG_0_FORMAT                   (0xf << 12)
+#define GUC_CTB_MSG_0_FENCE                    (0xffffu << 16)
+#define GUC_CTB_MSG_0_FORMAT                   (0xfu << 12)
 #define   GUC_CTB_FORMAT_HXG                   0u
-#define GUC_CTB_MSG_0_RESERVED                 (0xf << 8)
-#define GUC_CTB_MSG_0_NUM_DWORDS               (0xff << 0)
+#define GUC_CTB_MSG_0_RESERVED                 (0xfu << 8)
+#define GUC_CTB_MSG_0_NUM_DWORDS               (0xffu << 0)
 
 /**
  * DOC: CTB HXG Message
index 47094b9b044cbbcdd68f51b3cacd6f15e4d97b3c..0400bc0fccdc9b5d5605dafd5f5480ff3983319c 100644 (file)
@@ -31,9 +31,9 @@
  */
 
 #define GUC_KLV_LEN_MIN                                1u
-#define GUC_KLV_0_KEY                          (0xffff << 16)
-#define GUC_KLV_0_LEN                          (0xffff << 0)
-#define GUC_KLV_n_VALUE                                (0xffffffff << 0)
+#define GUC_KLV_0_KEY                          (0xffffu << 16)
+#define GUC_KLV_0_LEN                          (0xffffu << 0)
+#define GUC_KLV_n_VALUE                                (0xffffffffu << 0)
 
 /**
  * DOC: GuC Self Config KLVs
index 3d199016cf881cea10668a010fce5e8b4ea234c1..29e414c82d56cb5a318686e18d3d774c7b9c3d0c 100644 (file)
  */
 
 #define GUC_HXG_MSG_MIN_LEN                    1u
-#define GUC_HXG_MSG_0_ORIGIN                   (0x1 << 31)
+#define GUC_HXG_MSG_0_ORIGIN                   (0x1u << 31)
 #define   GUC_HXG_ORIGIN_HOST                  0u
 #define   GUC_HXG_ORIGIN_GUC                   1u
-#define GUC_HXG_MSG_0_TYPE                     (0x7 << 28)
+#define GUC_HXG_MSG_0_TYPE                     (0x7u << 28)
 #define   GUC_HXG_TYPE_REQUEST                 0u
 #define   GUC_HXG_TYPE_EVENT                   1u
 #define   GUC_HXG_TYPE_NO_RESPONSE_BUSY                3u
 #define   GUC_HXG_TYPE_NO_RESPONSE_RETRY       5u
 #define   GUC_HXG_TYPE_RESPONSE_FAILURE                6u
 #define   GUC_HXG_TYPE_RESPONSE_SUCCESS                7u
-#define GUC_HXG_MSG_0_AUX                      (0xfffffff << 0)
-#define GUC_HXG_MSG_n_PAYLOAD                  (0xffffffff << 0)
+#define GUC_HXG_MSG_0_AUX                      (0xfffffffu << 0)
+#define GUC_HXG_MSG_n_PAYLOAD                  (0xffffffffu << 0)
 
 /**
  * DOC: HXG Request
@@ -85,8 +85,8 @@
  */
 
 #define GUC_HXG_REQUEST_MSG_MIN_LEN            GUC_HXG_MSG_MIN_LEN
-#define GUC_HXG_REQUEST_MSG_0_DATA0            (0xfff << 16)
-#define GUC_HXG_REQUEST_MSG_0_ACTION           (0xffff << 0)
+#define GUC_HXG_REQUEST_MSG_0_DATA0            (0xfffu << 16)
+#define GUC_HXG_REQUEST_MSG_0_ACTION           (0xffffu << 0)
 #define GUC_HXG_REQUEST_MSG_n_DATAn            GUC_HXG_MSG_n_PAYLOAD
 
 /**
  */
 
 #define GUC_HXG_EVENT_MSG_MIN_LEN              GUC_HXG_MSG_MIN_LEN
-#define GUC_HXG_EVENT_MSG_0_DATA0              (0xfff << 16)
-#define GUC_HXG_EVENT_MSG_0_ACTION             (0xffff << 0)
+#define GUC_HXG_EVENT_MSG_0_DATA0              (0xfffu << 16)
+#define GUC_HXG_EVENT_MSG_0_ACTION             (0xffffu << 0)
 #define GUC_HXG_EVENT_MSG_n_DATAn              GUC_HXG_MSG_n_PAYLOAD
 
 /**
  */
 
 #define GUC_HXG_FAILURE_MSG_LEN                        GUC_HXG_MSG_MIN_LEN
-#define GUC_HXG_FAILURE_MSG_0_HINT             (0xfff << 16)
-#define GUC_HXG_FAILURE_MSG_0_ERROR            (0xffff << 0)
+#define GUC_HXG_FAILURE_MSG_0_HINT             (0xfffu << 16)
+#define GUC_HXG_FAILURE_MSG_0_ERROR            (0xffffu << 0)
 
 /**
  * DOC: HXG Response
index 5f19550cc845360ada430477180405fe66bf84b9..68d9f6116bdfc3522ee5d6d94ef2bb763ec81090 100644 (file)
@@ -35,12 +35,10 @@ static inline int i915_gem_object_read_from_page(struct xe_bo *bo,
                                          u32 ofs, u64 *ptr, u32 size)
 {
        struct ttm_bo_kmap_obj map;
-       void *virtual;
+       void *src;
        bool is_iomem;
        int ret;
 
-       XE_WARN_ON(size != 8);
-
        ret = xe_bo_lock(bo, true);
        if (ret)
                return ret;
@@ -50,11 +48,12 @@ static inline int i915_gem_object_read_from_page(struct xe_bo *bo,
                goto out_unlock;
 
        ofs &= ~PAGE_MASK;
-       virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
+       src = ttm_kmap_obj_virtual(&map, &is_iomem);
+       src += ofs;
        if (is_iomem)
-               *ptr = readq((void __iomem *)(virtual + ofs));
+               memcpy_fromio(ptr, (void __iomem *)src, size);
        else
-               *ptr = *(u64 *)(virtual + ofs);
+               memcpy(ptr, src, size);
 
        ttm_bo_kunmap(&map);
 out_unlock:
index 412b2e7ce40cb3ea38b6f5c76fb293009c10c3a2..3436fd9cf2b2738446608990a5f5be1a4f33fb2e 100644 (file)
@@ -125,14 +125,13 @@ static void ccs_test_run_tile(struct xe_device *xe, struct xe_tile *tile,
 
        bo = xe_bo_create_user(xe, NULL, NULL, SZ_1M, DRM_XE_GEM_CPU_CACHING_WC,
                               ttm_bo_type_device, bo_flags);
-
-       xe_bo_lock(bo, false);
-
        if (IS_ERR(bo)) {
                KUNIT_FAIL(test, "Failed to create bo.\n");
                return;
        }
 
+       xe_bo_lock(bo, false);
+
        kunit_info(test, "Verifying that CCS data is cleared on creation.\n");
        ret = ccs_test_migrate(tile, bo, false, 0ULL, 0xdeadbeefdeadbeefULL,
                               test);
index 7a32faa2f68880dcd4cb5b217ff9986153b990cb..a6523df0f1d39fbe7f0354d404f95886a3d56424 100644 (file)
@@ -331,7 +331,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
                xe_res_first_sg(xe_bo_sg(pt), 0, pt->size, &src_it);
 
        emit_pte(m, bb, NUM_KERNEL_PDE - 1, xe_bo_is_vram(pt), false,
-                &src_it, XE_PAGE_SIZE, pt);
+                &src_it, XE_PAGE_SIZE, pt->ttm.resource);
 
        run_sanity_job(m, xe, bb, bb->len, "Writing PTE for our fake PT", test);
 
index a53c22a1958247cbd703264aeb49195620b6754a..b4715b78ef3bf952bacd5ed7e1739c2fe0cfa813 100644 (file)
@@ -74,9 +74,6 @@ static const struct platform_test_case cases[] = {
        SUBPLATFORM_CASE(DG2, G11, B1),
        SUBPLATFORM_CASE(DG2, G12, A0),
        SUBPLATFORM_CASE(DG2, G12, A1),
-       PLATFORM_CASE(PVC, B0),
-       PLATFORM_CASE(PVC, B1),
-       PLATFORM_CASE(PVC, C0),
        GMDID_CASE(METEORLAKE, 1270, A0, 1300, A0),
        GMDID_CASE(METEORLAKE, 1271, A0, 1300, A0),
        GMDID_CASE(LUNARLAKE, 2004, A0, 2000, A0),
index 8e4a3b1f6b938e5a76b8fc6640058ca63a30c709..0b0e262e2166d69da1063915fa4c6eeedfd38bd6 100644 (file)
@@ -125,9 +125,9 @@ static struct xe_mem_region *res_to_mem_region(struct ttm_resource *res)
 static void try_add_system(struct xe_device *xe, struct xe_bo *bo,
                           u32 bo_flags, u32 *c)
 {
-       xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
-
        if (bo_flags & XE_BO_CREATE_SYSTEM_BIT) {
+               xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
+
                bo->placements[*c] = (struct ttm_place) {
                        .mem_type = XE_PL_TT,
                };
@@ -145,6 +145,8 @@ static void add_vram(struct xe_device *xe, struct xe_bo *bo,
        struct xe_mem_region *vram;
        u64 io_size;
 
+       xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
+
        vram = to_xe_ttm_vram_mgr(ttm_manager_type(&xe->ttm, mem_type))->vram;
        xe_assert(xe, vram && vram->usable_size);
        io_size = vram->io_size;
@@ -175,8 +177,6 @@ static void add_vram(struct xe_device *xe, struct xe_bo *bo,
 static void try_add_vram(struct xe_device *xe, struct xe_bo *bo,
                         u32 bo_flags, u32 *c)
 {
-       xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
-
        if (bo->props.preferred_gt == XE_GT1) {
                if (bo_flags & XE_BO_CREATE_VRAM1_BIT)
                        add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c);
@@ -193,9 +193,9 @@ static void try_add_vram(struct xe_device *xe, struct xe_bo *bo,
 static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo,
                           u32 bo_flags, u32 *c)
 {
-       xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
-
        if (bo_flags & XE_BO_CREATE_STOLEN_BIT) {
+               xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
+
                bo->placements[*c] = (struct ttm_place) {
                        .mem_type = XE_PL_STOLEN,
                        .flags = bo_flags & (XE_BO_CREATE_PINNED_BIT |
@@ -442,7 +442,7 @@ static int xe_ttm_io_mem_reserve(struct ttm_device *bdev,
 
                if (vram->mapping &&
                    mem->placement & TTM_PL_FLAG_CONTIGUOUS)
-                       mem->bus.addr = (u8 *)vram->mapping +
+                       mem->bus.addr = (u8 __force *)vram->mapping +
                                mem->bus.offset;
 
                mem->bus.offset += vram->io_start;
@@ -734,7 +734,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
                        /* Create a new VMAP once kernel BO back in VRAM */
                        if (!ret && resource_is_vram(new_mem)) {
                                struct xe_mem_region *vram = res_to_mem_region(new_mem);
-                               void *new_addr = vram->mapping +
+                               void __iomem *new_addr = vram->mapping +
                                        (new_mem->start << PAGE_SHIFT);
 
                                if (XE_WARN_ON(new_mem->start == XE_BO_INVALID_OFFSET)) {
index d9ae77fe7382ddf9997858995fe255108f7c5944..1f0b4b9ce84f585ea599ccaf7f4641c3d139121f 100644 (file)
@@ -484,7 +484,7 @@ int xe_device_probe(struct xe_device *xe)
 
        err = xe_device_set_has_flat_ccs(xe);
        if (err)
-               return err;
+               goto err_irq_shutdown;
 
        err = xe_mmio_probe_vram(xe);
        if (err)
@@ -613,7 +613,7 @@ void xe_device_wmb(struct xe_device *xe)
 u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size)
 {
        return xe_device_has_flat_ccs(xe) ?
-               DIV_ROUND_UP(size, NUM_BYTES_PER_CCS_BYTE(xe)) : 0;
+               DIV_ROUND_UP_ULL(size, NUM_BYTES_PER_CCS_BYTE(xe)) : 0;
 }
 
 bool xe_device_mem_access_ongoing(struct xe_device *xe)
index c45ef17b347323801d397a964e53aa3fc5b060f4..5dc9127a20293e1ebb56c3684e2fdb7e6f425b43 100644 (file)
@@ -97,7 +97,7 @@ struct xe_mem_region {
         */
        resource_size_t actual_physical_size;
        /** @mapping: pointer to VRAM mappable space */
-       void *__iomem mapping;
+       void __iomem *mapping;
 };
 
 /**
@@ -146,7 +146,7 @@ struct xe_tile {
                size_t size;
 
                /** @regs: pointer to tile's MMIO space (starting with registers) */
-               void *regs;
+               void __iomem *regs;
        } mmio;
 
        /**
@@ -159,7 +159,7 @@ struct xe_tile {
                size_t size;
 
                /** @regs: pointer to tile's additional MMIO-extension space */
-               void *regs;
+               void __iomem *regs;
        } mmio_ext;
 
        /** @mem: memory management info for tile */
@@ -301,7 +301,7 @@ struct xe_device {
                /** @size: size of MMIO space for device */
                size_t size;
                /** @regs: pointer to MMIO space for device */
-               void *regs;
+               void __iomem *regs;
        } mmio;
 
        /** @mem: memory info for device */
index 74391d9b11ae0e4cc77ecf9d0f5f47264e7adec6..e4db069f0db3f1fd27ed80eb84fc4544ea0831df 100644 (file)
@@ -134,8 +134,6 @@ static void xe_display_fini_nommio(struct drm_device *dev, void *dummy)
 
 int xe_display_init_nommio(struct xe_device *xe)
 {
-       int err;
-
        if (!xe->info.enable_display)
                return 0;
 
@@ -145,10 +143,6 @@ int xe_display_init_nommio(struct xe_device *xe)
        /* This must be called before any calls to HAS_PCH_* */
        intel_detect_pch(xe);
 
-       err = intel_power_domains_init(xe);
-       if (err)
-               return err;
-
        return drmm_add_action_or_reset(&xe->drm, xe_display_fini_nommio, xe);
 }
 
index 64ed303728fda98d2c4edb2a4a0e1f0b810812d2..da2627ed6ae7a94114ec4e1d0aa6f04495103540 100644 (file)
@@ -175,7 +175,7 @@ static int xe_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
        return 0;
 }
 
-const struct dma_buf_ops xe_dmabuf_ops = {
+static const struct dma_buf_ops xe_dmabuf_ops = {
        .attach = xe_dma_buf_attach,
        .detach = xe_dma_buf_detach,
        .pin = xe_dma_buf_pin,
index d30c0d0689bcc7d4ae55cdd7fc93b116826160e6..17f26952e6656b8a077eb51161acbfd96638db2c 100644 (file)
@@ -111,11 +111,11 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        u64 addresses[XE_HW_ENGINE_MAX_INSTANCE];
        struct drm_gpuvm_exec vm_exec = {.extra.fn = xe_exec_fn};
        struct drm_exec *exec = &vm_exec.exec;
-       u32 i, num_syncs = 0;
+       u32 i, num_syncs = 0, num_ufence = 0;
        struct xe_sched_job *job;
        struct dma_fence *rebind_fence;
        struct xe_vm *vm;
-       bool write_locked;
+       bool write_locked, skip_retry = false;
        ktime_t end = 0;
        int err = 0;
 
@@ -157,6 +157,14 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
                                           SYNC_PARSE_FLAG_LR_MODE : 0));
                if (err)
                        goto err_syncs;
+
+               if (xe_sync_is_ufence(&syncs[i]))
+                       num_ufence++;
+       }
+
+       if (XE_IOCTL_DBG(xe, num_ufence > 1)) {
+               err = -EINVAL;
+               goto err_syncs;
        }
 
        if (xe_exec_queue_is_parallel(q)) {
@@ -227,7 +235,8 @@ retry:
        }
 
        if (xe_exec_queue_is_lr(q) && xe_exec_queue_ring_full(q)) {
-               err = -EWOULDBLOCK;
+               err = -EWOULDBLOCK;     /* Aliased to -EAGAIN */
+               skip_retry = true;
                goto err_exec;
        }
 
@@ -337,7 +346,7 @@ err_unlock_list:
                up_write(&vm->lock);
        else
                up_read(&vm->lock);
-       if (err == -EAGAIN)
+       if (err == -EAGAIN && !skip_retry)
                goto retry;
 err_syncs:
        for (i = 0; i < num_syncs; i++)
index 44fe8097b7cdac8d5c89a3bf00168cf3b8343ca7..254b1d3af4cb56888700f82b2a6b8fa3436e1a2a 100644 (file)
@@ -67,6 +67,11 @@ static struct xe_exec_queue *__xe_exec_queue_create(struct xe_device *xe,
        q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us;
        q->sched_props.preempt_timeout_us =
                                hwe->eclass->sched_props.preempt_timeout_us;
+       if (q->flags & EXEC_QUEUE_FLAG_KERNEL &&
+           q->flags & EXEC_QUEUE_FLAG_HIGH_PRIORITY)
+               q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_KERNEL;
+       else
+               q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL;
 
        if (xe_exec_queue_is_parallel(q)) {
                q->parallel.composite_fence_ctx = dma_fence_context_alloc(1);
@@ -921,20 +926,24 @@ void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q)
  * @q: The exec queue
  * @vm: The VM the engine does a bind or exec for
  *
- * Get last fence, does not take a ref
+ * Get last fence, takes a ref
  *
  * Returns: last fence if not signaled, dma fence stub if signaled
  */
 struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q,
                                               struct xe_vm *vm)
 {
+       struct dma_fence *fence;
+
        xe_exec_queue_last_fence_lockdep_assert(q, vm);
 
        if (q->last_fence &&
            test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
                xe_exec_queue_last_fence_put(q, vm);
 
-       return q->last_fence ? q->last_fence : dma_fence_get_stub();
+       fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
+       dma_fence_get(fence);
+       return fence;
 }
 
 /**
index 3d7e704ec3d9f33b9c0e47867bb58135ea01df91..8d4b7feb8c306b8a406a46f74c5cad2a430bdef3 100644 (file)
@@ -52,8 +52,6 @@ struct xe_exec_queue {
        struct xe_vm *vm;
        /** @class: class of this exec queue */
        enum xe_engine_class class;
-       /** @priority: priority of this exec queue */
-       enum xe_exec_queue_priority priority;
        /**
         * @logical_mask: logical mask of where job submitted to exec queue can run
         */
@@ -84,6 +82,8 @@ struct xe_exec_queue {
 #define EXEC_QUEUE_FLAG_VM                     BIT(4)
 /* child of VM queue for multi-tile VM jobs */
 #define EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD      BIT(5)
+/* kernel exec_queue only, set priority to highest level */
+#define EXEC_QUEUE_FLAG_HIGH_PRIORITY          BIT(6)
 
        /**
         * @flags: flags for this exec queue, should statically setup aside from ban
@@ -142,6 +142,8 @@ struct xe_exec_queue {
                u32 timeslice_us;
                /** @preempt_timeout_us: preemption timeout in micro-seconds */
                u32 preempt_timeout_us;
+               /** @priority: priority of this exec queue */
+               enum xe_exec_queue_priority priority;
        } sched_props;
 
        /** @compute: compute exec queue state */
index 3af2adec129561850bfb378c04ca2d7caacdf325..35474ddbaf97ecc974a6b55643e578dbcfe135f9 100644 (file)
@@ -437,7 +437,10 @@ static int all_fw_domain_init(struct xe_gt *gt)
                 * USM has its only SA pool to non-block behind user operations
                 */
                if (gt_to_xe(gt)->info.has_usm) {
-                       gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt), SZ_1M, 16);
+                       struct xe_device *xe = gt_to_xe(gt);
+
+                       gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt),
+                                                               IS_DGFX(xe) ? SZ_1M : SZ_512K, 16);
                        if (IS_ERR(gt->usm.bb_pool)) {
                                err = PTR_ERR(gt->usm.bb_pool);
                                goto err_force_wake;
index 3adfa6686e7cf9eb2763bccc28b7a0a382dd4834..e5b0f4ecdbe8261ee5c3fa9530a30dc2fd46c14b 100644 (file)
@@ -196,6 +196,9 @@ void xe_gt_freq_init(struct xe_gt *gt)
        struct xe_device *xe = gt_to_xe(gt);
        int err;
 
+       if (xe->info.skip_guc_pc)
+               return;
+
        gt->freq = kobject_create_and_add("freq0", gt->sysfs);
        if (!gt->freq) {
                drm_warn(&xe->drm, "failed to add freq0 directory to %s\n",
index 77925b35cf8dcb0ee1d62ba7c579767796c8d807..8546cd3cc50d1f8c4146b2f69c4758bac05aa240 100644 (file)
@@ -480,7 +480,7 @@ static bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
  * to synchronize with external clients (e.g., firmware), so a semaphore
  * register will also need to be taken.
  */
-static void mcr_lock(struct xe_gt *gt)
+static void mcr_lock(struct xe_gt *gt) __acquires(&gt->mcr_lock)
 {
        struct xe_device *xe = gt_to_xe(gt);
        int ret = 0;
@@ -500,7 +500,7 @@ static void mcr_lock(struct xe_gt *gt)
        drm_WARN_ON_ONCE(&xe->drm, ret == -ETIMEDOUT);
 }
 
-static void mcr_unlock(struct xe_gt *gt)
+static void mcr_unlock(struct xe_gt *gt) __releases(&gt->mcr_lock)
 {
        /* Release hardware semaphore - this is done by writing 1 to the register */
        if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270)
index 59a70d2e0a7a33386fdcfca9cc158919aab1e32c..73f08f1924df2ea8d4aaabb87eceaa13eff81d78 100644 (file)
@@ -165,7 +165,8 @@ retry_userptr:
                goto unlock_vm;
        }
 
-       if (!xe_vma_is_userptr(vma) || !xe_vma_userptr_check_repin(vma)) {
+       if (!xe_vma_is_userptr(vma) ||
+           !xe_vma_userptr_check_repin(to_userptr_vma(vma))) {
                downgrade_write(&vm->lock);
                write_locked = false;
        }
@@ -181,11 +182,13 @@ retry_userptr:
        /* TODO: Validate fault */
 
        if (xe_vma_is_userptr(vma) && write_locked) {
+               struct xe_userptr_vma *uvma = to_userptr_vma(vma);
+
                spin_lock(&vm->userptr.invalidated_lock);
-               list_del_init(&vma->userptr.invalidate_link);
+               list_del_init(&uvma->userptr.invalidate_link);
                spin_unlock(&vm->userptr.invalidated_lock);
 
-               ret = xe_vma_userptr_pin_pages(vma);
+               ret = xe_vma_userptr_pin_pages(uvma);
                if (ret)
                        goto unlock_vm;
 
@@ -220,7 +223,7 @@ retry_userptr:
        dma_fence_put(fence);
 
        if (xe_vma_is_userptr(vma))
-               ret = xe_vma_userptr_check_repin(vma);
+               ret = xe_vma_userptr_check_repin(to_userptr_vma(vma));
        vma->usm.tile_invalidated &= ~BIT(tile->id);
 
 unlock_dma_resv:
@@ -332,7 +335,7 @@ int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len)
                return -EPROTO;
 
        asid = FIELD_GET(PFD_ASID, msg[1]);
-       pf_queue = &gt->usm.pf_queue[asid % NUM_PF_QUEUE];
+       pf_queue = gt->usm.pf_queue + (asid % NUM_PF_QUEUE);
 
        spin_lock_irqsave(&pf_queue->lock, flags);
        full = pf_queue_full(pf_queue);
index 482cb0df9f15bc28d9f5c2a0ea194319f1b2a21a..0a61390c64a7b7100113641f2e073f4ce58d358e 100644 (file)
@@ -60,7 +60,12 @@ static u32 guc_ctl_debug_flags(struct xe_guc *guc)
 
 static u32 guc_ctl_feature_flags(struct xe_guc *guc)
 {
-       return GUC_CTL_ENABLE_SLPC;
+       u32 flags = 0;
+
+       if (!guc_to_xe(guc)->info.skip_guc_pc)
+               flags |= GUC_CTL_ENABLE_SLPC;
+
+       return flags;
 }
 
 static u32 guc_ctl_log_params_flags(struct xe_guc *guc)
index f71085228cb33992940622dca2992f4e1ae9fa62..d91702592520af54eea5f8ca4bd56b67719531be 100644 (file)
@@ -963,7 +963,9 @@ void xe_guc_pc_fini(struct xe_guc_pc *pc)
        struct xe_device *xe = pc_to_xe(pc);
 
        if (xe->info.skip_guc_pc) {
+               xe_device_mem_access_get(xe);
                xe_gt_idle_disable_c6(pc_to_gt(pc));
+               xe_device_mem_access_put(xe);
                return;
        }
 
index 21ac68e3246f86f1e16d05880da8aa9315769ecb..54ffcfcdd41f9ce3c590f5814fcbe3d3535946ac 100644 (file)
@@ -421,7 +421,7 @@ static void init_policies(struct xe_guc *guc, struct xe_exec_queue *q)
 {
        struct exec_queue_policy policy;
        struct xe_device *xe = guc_to_xe(guc);
-       enum xe_exec_queue_priority prio = q->priority;
+       enum xe_exec_queue_priority prio = q->sched_props.priority;
        u32 timeslice_us = q->sched_props.timeslice_us;
        u32 preempt_timeout_us = q->sched_props.preempt_timeout_us;
 
@@ -1231,7 +1231,6 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
        err = xe_sched_entity_init(&ge->entity, sched);
        if (err)
                goto err_sched;
-       q->priority = XE_EXEC_QUEUE_PRIORITY_NORMAL;
 
        if (xe_exec_queue_is_lr(q))
                INIT_WORK(&q->guc->lr_tdr, xe_guc_exec_queue_lr_cleanup);
@@ -1301,15 +1300,15 @@ static int guc_exec_queue_set_priority(struct xe_exec_queue *q,
 {
        struct xe_sched_msg *msg;
 
-       if (q->priority == priority || exec_queue_killed_or_banned(q))
+       if (q->sched_props.priority == priority || exec_queue_killed_or_banned(q))
                return 0;
 
        msg = kmalloc(sizeof(*msg), GFP_KERNEL);
        if (!msg)
                return -ENOMEM;
 
+       q->sched_props.priority = priority;
        guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS);
-       q->priority = priority;
 
        return 0;
 }
index a6094c81f2ad0fa8a3f1cf1001ceb897a044d3cb..a5de3e7b0bd6ab134557fdfb52a406d4bf199016 100644 (file)
@@ -217,13 +217,13 @@ struct xe_hw_fence *xe_hw_fence_create(struct xe_hw_fence_ctx *ctx,
        if (!fence)
                return ERR_PTR(-ENOMEM);
 
-       dma_fence_init(&fence->dma, &xe_hw_fence_ops, &ctx->irq->lock,
-                      ctx->dma_fence_ctx, ctx->next_seqno++);
-
        fence->ctx = ctx;
        fence->seqno_map = seqno_map;
        INIT_LIST_HEAD(&fence->irq_link);
 
+       dma_fence_init(&fence->dma, &xe_hw_fence_ops, &ctx->irq->lock,
+                      ctx->dma_fence_ctx, ctx->next_seqno++);
+
        trace_xe_hw_fence_create(fence);
 
        return fence;
index 6ef2aa1eae8b095e958e74fda4b5c42e205436bf..174ed2185481e32d568551e62f1839181a1771d4 100644 (file)
@@ -419,7 +419,7 @@ static int xe_hwmon_pcode_read_i1(struct xe_gt *gt, u32 *uval)
 
        return xe_pcode_read(gt, PCODE_MBOX(PCODE_POWER_SETUP,
                             POWER_SETUP_SUBCOMMAND_READ_I1, 0),
-                            uval, 0);
+                            uval, NULL);
 }
 
 static int xe_hwmon_pcode_write_i1(struct xe_gt *gt, u32 uval)
index b7fa3831b68451cb74ae557ca3e7a66d5d4fa6fd..0ec5ad2539f1be6098aa248876a2816f65b91f38 100644 (file)
 #include "xe_map.h"
 #include "xe_vm.h"
 
-#define CTX_VALID                              (1 << 0)
-#define CTX_PRIVILEGE                          (1 << 8)
-#define CTX_ADDRESSING_MODE_SHIFT              3
-#define LEGACY_64B_CONTEXT                     3
+#define LRC_VALID                              (1 << 0)
+#define LRC_PRIVILEGE                          (1 << 8)
+#define LRC_ADDRESSING_MODE_SHIFT              3
+#define LRC_LEGACY_64B_CONTEXT                 3
 
 #define ENGINE_CLASS_SHIFT                     61
 #define ENGINE_INSTANCE_SHIFT                  48
@@ -762,15 +762,15 @@ int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
                                     (q->usm.acc_notify << ACC_NOTIFY_S) |
                                     q->usm.acc_trigger);
 
-       lrc->desc = CTX_VALID;
-       lrc->desc |= LEGACY_64B_CONTEXT << CTX_ADDRESSING_MODE_SHIFT;
+       lrc->desc = LRC_VALID;
+       lrc->desc |= LRC_LEGACY_64B_CONTEXT << LRC_ADDRESSING_MODE_SHIFT;
        /* TODO: Priority */
 
        /* While this appears to have something about privileged batches or
         * some such, it really just means PPGTT mode.
         */
        if (vm)
-               lrc->desc |= CTX_PRIVILEGE;
+               lrc->desc |= LRC_PRIVILEGE;
 
        if (GRAPHICS_VERx100(xe) < 1250) {
                lrc->desc |= (u64)hwe->instance << ENGINE_INSTANCE_SHIFT;
index adf1dab5eba253297fb8b4ae4c2c5b5f15b2ec7a..70480c30560215ff7fece9a824fd01c92008562d 100644 (file)
@@ -62,6 +62,8 @@ struct xe_migrate {
         * out of the pt_bo.
         */
        struct drm_suballoc_manager vm_update_sa;
+       /** @min_chunk_size: For dgfx, Minimum chunk size */
+       u64 min_chunk_size;
 };
 
 #define MAX_PREEMPTDISABLE_TRANSFER SZ_8M /* Around 1ms. */
@@ -168,11 +170,6 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
        if (!IS_DGFX(xe)) {
                /* Write out batch too */
                m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE;
-               if (xe->info.has_usm) {
-                       batch = tile->primary_gt->usm.bb_pool->bo;
-                       m->usm_batch_base_ofs = m->batch_base_ofs;
-               }
-
                for (i = 0; i < batch->size;
                     i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
                     XE_PAGE_SIZE) {
@@ -183,6 +180,24 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
                                  entry);
                        level++;
                }
+               if (xe->info.has_usm) {
+                       xe_tile_assert(tile, batch->size == SZ_1M);
+
+                       batch = tile->primary_gt->usm.bb_pool->bo;
+                       m->usm_batch_base_ofs = m->batch_base_ofs + SZ_1M;
+                       xe_tile_assert(tile, batch->size == SZ_512K);
+
+                       for (i = 0; i < batch->size;
+                            i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
+                            XE_PAGE_SIZE) {
+                               entry = vm->pt_ops->pte_encode_bo(batch, i,
+                                                                 pat_index, 0);
+
+                               xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
+                                         entry);
+                               level++;
+                       }
+               }
        } else {
                u64 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
 
@@ -344,7 +359,8 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
 
                m->q = xe_exec_queue_create(xe, vm, logical_mask, 1, hwe,
                                            EXEC_QUEUE_FLAG_KERNEL |
-                                           EXEC_QUEUE_FLAG_PERMANENT);
+                                           EXEC_QUEUE_FLAG_PERMANENT |
+                                           EXEC_QUEUE_FLAG_HIGH_PRIORITY);
        } else {
                m->q = xe_exec_queue_create_class(xe, primary_gt, vm,
                                                  XE_ENGINE_CLASS_COPY,
@@ -355,8 +371,6 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
                xe_vm_close_and_put(vm);
                return ERR_CAST(m->q);
        }
-       if (xe->info.has_usm)
-               m->q->priority = XE_EXEC_QUEUE_PRIORITY_KERNEL;
 
        mutex_init(&m->job_mutex);
 
@@ -364,6 +378,19 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
        if (err)
                return ERR_PTR(err);
 
+       if (IS_DGFX(xe)) {
+               if (xe_device_has_flat_ccs(xe))
+                       /* min chunk size corresponds to 4K of CCS Metadata */
+                       m->min_chunk_size = SZ_4K * SZ_64K /
+                               xe_device_ccs_bytes(xe, SZ_64K);
+               else
+                       /* Somewhat arbitrary to avoid a huge amount of blits */
+                       m->min_chunk_size = SZ_64K;
+               m->min_chunk_size = roundup_pow_of_two(m->min_chunk_size);
+               drm_dbg(&xe->drm, "Migrate min chunk size is 0x%08llx\n",
+                       (unsigned long long)m->min_chunk_size);
+       }
+
        return m;
 }
 
@@ -375,16 +402,35 @@ static u64 max_mem_transfer_per_pass(struct xe_device *xe)
        return MAX_PREEMPTDISABLE_TRANSFER;
 }
 
-static u64 xe_migrate_res_sizes(struct xe_device *xe, struct xe_res_cursor *cur)
+static u64 xe_migrate_res_sizes(struct xe_migrate *m, struct xe_res_cursor *cur)
 {
-       /*
-        * For VRAM we use identity mapped pages so we are limited to current
-        * cursor size. For system we program the pages ourselves so we have no
-        * such limitation.
-        */
-       return min_t(u64, max_mem_transfer_per_pass(xe),
-                    mem_type_is_vram(cur->mem_type) ? cur->size :
-                    cur->remaining);
+       struct xe_device *xe = tile_to_xe(m->tile);
+       u64 size = min_t(u64, max_mem_transfer_per_pass(xe), cur->remaining);
+
+       if (mem_type_is_vram(cur->mem_type)) {
+               /*
+                * VRAM we want to blit in chunks with sizes aligned to
+                * min_chunk_size in order for the offset to CCS metadata to be
+                * page-aligned. If it's the last chunk it may be smaller.
+                *
+                * Another constraint is that we need to limit the blit to
+                * the VRAM block size, unless size is smaller than
+                * min_chunk_size.
+                */
+               u64 chunk = max_t(u64, cur->size, m->min_chunk_size);
+
+               size = min_t(u64, size, chunk);
+               if (size > m->min_chunk_size)
+                       size = round_down(size, m->min_chunk_size);
+       }
+
+       return size;
+}
+
+static bool xe_migrate_allow_identity(u64 size, const struct xe_res_cursor *cur)
+{
+       /* If the chunk is not fragmented, allow identity map. */
+       return cur->size >= size;
 }
 
 static u32 pte_update_size(struct xe_migrate *m,
@@ -397,7 +443,12 @@ static u32 pte_update_size(struct xe_migrate *m,
        u32 cmds = 0;
 
        *L0_pt = pt_ofs;
-       if (!is_vram) {
+       if (is_vram && xe_migrate_allow_identity(*L0, cur)) {
+               /* Offset into identity map. */
+               *L0_ofs = xe_migrate_vram_ofs(tile_to_xe(m->tile),
+                                             cur->start + vram_region_gpu_offset(res));
+               cmds += cmd_size;
+       } else {
                /* Clip L0 to available size */
                u64 size = min(*L0, (u64)avail_pts * SZ_2M);
                u64 num_4k_pages = DIV_ROUND_UP(size, XE_PAGE_SIZE);
@@ -413,11 +464,6 @@ static u32 pte_update_size(struct xe_migrate *m,
 
                /* Each chunk has a single blit command */
                cmds += cmd_size;
-       } else {
-               /* Offset into identity map. */
-               *L0_ofs = xe_migrate_vram_ofs(tile_to_xe(m->tile),
-                                             cur->start + vram_region_gpu_offset(res));
-               cmds += cmd_size;
        }
 
        return cmds;
@@ -427,10 +473,10 @@ static void emit_pte(struct xe_migrate *m,
                     struct xe_bb *bb, u32 at_pt,
                     bool is_vram, bool is_comp_pte,
                     struct xe_res_cursor *cur,
-                    u32 size, struct xe_bo *bo)
+                    u32 size, struct ttm_resource *res)
 {
        struct xe_device *xe = tile_to_xe(m->tile);
-
+       struct xe_vm *vm = m->q->vm;
        u16 pat_index;
        u32 ptes;
        u64 ofs = at_pt * XE_PAGE_SIZE;
@@ -439,17 +485,10 @@ static void emit_pte(struct xe_migrate *m,
        /* Indirect access needs compression enabled uncached PAT index */
        if (GRAPHICS_VERx100(xe) >= 2000)
                pat_index = is_comp_pte ? xe->pat.idx[XE_CACHE_NONE_COMPRESSION] :
-                                         xe->pat.idx[XE_CACHE_NONE];
+                                         xe->pat.idx[XE_CACHE_WB];
        else
                pat_index = xe->pat.idx[XE_CACHE_WB];
 
-       /*
-        * FIXME: Emitting VRAM PTEs to L0 PTs is forbidden. Currently
-        * we're only emitting VRAM PTEs during sanity tests, so when
-        * that's moved to a Kunit test, we should condition VRAM PTEs
-        * on running tests.
-        */
-
        ptes = DIV_ROUND_UP(size, XE_PAGE_SIZE);
 
        while (ptes) {
@@ -469,20 +508,22 @@ static void emit_pte(struct xe_migrate *m,
 
                        addr = xe_res_dma(cur) & PAGE_MASK;
                        if (is_vram) {
-                               /* Is this a 64K PTE entry? */
-                               if ((m->q->vm->flags & XE_VM_FLAG_64K) &&
-                                   !(cur_ofs & (16 * 8 - 1))) {
-                                       xe_tile_assert(m->tile, IS_ALIGNED(addr, SZ_64K));
+                               if (vm->flags & XE_VM_FLAG_64K) {
+                                       u64 va = cur_ofs * XE_PAGE_SIZE / 8;
+
+                                       xe_assert(xe, (va & (SZ_64K - 1)) ==
+                                                 (addr & (SZ_64K - 1)));
+
                                        flags |= XE_PTE_PS64;
                                }
 
-                               addr += vram_region_gpu_offset(bo->ttm.resource);
+                               addr += vram_region_gpu_offset(res);
                                devmem = true;
                        }
 
-                       addr = m->q->vm->pt_ops->pte_encode_addr(m->tile->xe,
-                                                                addr, pat_index,
-                                                                0, devmem, flags);
+                       addr = vm->pt_ops->pte_encode_addr(m->tile->xe,
+                                                          addr, pat_index,
+                                                          0, devmem, flags);
                        bb->cs[bb->len++] = lower_32_bits(addr);
                        bb->cs[bb->len++] = upper_32_bits(addr);
 
@@ -694,8 +735,8 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
                bool usm = xe->info.has_usm;
                u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
 
-               src_L0 = xe_migrate_res_sizes(xe, &src_it);
-               dst_L0 = xe_migrate_res_sizes(xe, &dst_it);
+               src_L0 = xe_migrate_res_sizes(m, &src_it);
+               dst_L0 = xe_migrate_res_sizes(m, &dst_it);
 
                drm_dbg(&xe->drm, "Pass %u, sizes: %llu & %llu\n",
                        pass++, src_L0, dst_L0);
@@ -716,6 +757,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
                                                      &ccs_ofs, &ccs_pt, 0,
                                                      2 * avail_pts,
                                                      avail_pts);
+                       xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE));
                }
 
                /* Add copy commands size here */
@@ -728,20 +770,20 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
                        goto err_sync;
                }
 
-               if (!src_is_vram)
-                       emit_pte(m, bb, src_L0_pt, src_is_vram, true, &src_it, src_L0,
-                                src_bo);
-               else
+               if (src_is_vram && xe_migrate_allow_identity(src_L0, &src_it))
                        xe_res_next(&src_it, src_L0);
-
-               if (!dst_is_vram)
-                       emit_pte(m, bb, dst_L0_pt, dst_is_vram, true, &dst_it, src_L0,
-                                dst_bo);
                else
+                       emit_pte(m, bb, src_L0_pt, src_is_vram, copy_system_ccs,
+                                &src_it, src_L0, src);
+
+               if (dst_is_vram && xe_migrate_allow_identity(src_L0, &dst_it))
                        xe_res_next(&dst_it, src_L0);
+               else
+                       emit_pte(m, bb, dst_L0_pt, dst_is_vram, copy_system_ccs,
+                                &dst_it, src_L0, dst);
 
                if (copy_system_ccs)
-                       emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src_bo);
+                       emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src);
 
                bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
                update_idx = bb->len;
@@ -950,7 +992,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
                bool usm = xe->info.has_usm;
                u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
 
-               clear_L0 = xe_migrate_res_sizes(xe, &src_it);
+               clear_L0 = xe_migrate_res_sizes(m, &src_it);
 
                drm_dbg(&xe->drm, "Pass %u, size: %llu\n", pass++, clear_L0);
 
@@ -977,12 +1019,12 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
 
                size -= clear_L0;
                /* Preemption is enabled again by the ring ops. */
-               if (!clear_vram) {
-                       emit_pte(m, bb, clear_L0_pt, clear_vram, true, &src_it, clear_L0,
-                                bo);
-               } else {
+               if (clear_vram && xe_migrate_allow_identity(clear_L0, &src_it))
                        xe_res_next(&src_it, clear_L0);
-               }
+               else
+                       emit_pte(m, bb, clear_L0_pt, clear_vram, clear_system_ccs,
+                                &src_it, clear_L0, dst);
+
                bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
                update_idx = bb->len;
 
@@ -1175,8 +1217,11 @@ static bool no_in_syncs(struct xe_vm *vm, struct xe_exec_queue *q,
        }
        if (q) {
                fence = xe_exec_queue_last_fence_get(q, vm);
-               if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+               if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+                       dma_fence_put(fence);
                        return false;
+               }
+               dma_fence_put(fence);
        }
 
        return true;
index f660cfb79f504e264f9a3ced4a5f5544fada74e0..5f6b53ea5528b2c904ce0c4ee30e39c4a16139b7 100644 (file)
@@ -272,8 +272,8 @@ int xe_mmio_probe_vram(struct xe_device *xe)
                drm_info(&xe->drm, "VRAM[%u, %u]: Actual physical size %pa, usable size exclude stolen %pa, CPU accessible size %pa\n", id,
                         tile->id, &tile->mem.vram.actual_physical_size, &tile->mem.vram.usable_size, &tile->mem.vram.io_size);
                drm_info(&xe->drm, "VRAM[%u, %u]: DPA range: [%pa-%llx], io range: [%pa-%llx]\n", id, tile->id,
-                        &tile->mem.vram.dpa_base, tile->mem.vram.dpa_base + tile->mem.vram.actual_physical_size,
-                        &tile->mem.vram.io_start, tile->mem.vram.io_start + tile->mem.vram.io_size);
+                        &tile->mem.vram.dpa_base, tile->mem.vram.dpa_base + (u64)tile->mem.vram.actual_physical_size,
+                        &tile->mem.vram.io_start, tile->mem.vram.io_start + (u64)tile->mem.vram.io_size);
 
                /* calculate total size using tile size to get the correct HW sizing */
                total_size += tile_size;
@@ -303,7 +303,7 @@ void xe_mmio_probe_tiles(struct xe_device *xe)
        u8 id, tile_count = xe->info.tile_count;
        struct xe_gt *gt = xe_root_mmio_gt(xe);
        struct xe_tile *tile;
-       void *regs;
+       void __iomem *regs;
        u32 mtcfg;
 
        if (tile_count == 1)
index de1030a47588371b0cc71f5b69bee8f0257e2625..e45b37c3f0c262744f960d769eeed29a07ef14e7 100644 (file)
@@ -618,8 +618,8 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
 
        if (!xe_vma_is_null(vma)) {
                if (xe_vma_is_userptr(vma))
-                       xe_res_first_sg(vma->userptr.sg, 0, xe_vma_size(vma),
-                                       &curs);
+                       xe_res_first_sg(to_userptr_vma(vma)->userptr.sg, 0,
+                                       xe_vma_size(vma), &curs);
                else if (xe_bo_is_vram(bo) || xe_bo_is_stolen(bo))
                        xe_res_first(bo->ttm.resource, xe_vma_bo_offset(vma),
                                     xe_vma_size(vma), &curs);
@@ -906,17 +906,17 @@ static void xe_vm_dbg_print_entries(struct xe_device *xe,
 
 #ifdef CONFIG_DRM_XE_USERPTR_INVAL_INJECT
 
-static int xe_pt_userptr_inject_eagain(struct xe_vma *vma)
+static int xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
 {
-       u32 divisor = vma->userptr.divisor ? vma->userptr.divisor : 2;
+       u32 divisor = uvma->userptr.divisor ? uvma->userptr.divisor : 2;
        static u32 count;
 
        if (count++ % divisor == divisor - 1) {
-               struct xe_vm *vm = xe_vma_vm(vma);
+               struct xe_vm *vm = xe_vma_vm(&uvma->vma);
 
-               vma->userptr.divisor = divisor << 1;
+               uvma->userptr.divisor = divisor << 1;
                spin_lock(&vm->userptr.invalidated_lock);
-               list_move_tail(&vma->userptr.invalidate_link,
+               list_move_tail(&uvma->userptr.invalidate_link,
                               &vm->userptr.invalidated);
                spin_unlock(&vm->userptr.invalidated_lock);
                return true;
@@ -927,7 +927,7 @@ static int xe_pt_userptr_inject_eagain(struct xe_vma *vma)
 
 #else
 
-static bool xe_pt_userptr_inject_eagain(struct xe_vma *vma)
+static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
 {
        return false;
 }
@@ -1000,9 +1000,9 @@ static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
 {
        struct xe_pt_migrate_pt_update *userptr_update =
                container_of(pt_update, typeof(*userptr_update), base);
-       struct xe_vma *vma = pt_update->vma;
-       unsigned long notifier_seq = vma->userptr.notifier_seq;
-       struct xe_vm *vm = xe_vma_vm(vma);
+       struct xe_userptr_vma *uvma = to_userptr_vma(pt_update->vma);
+       unsigned long notifier_seq = uvma->userptr.notifier_seq;
+       struct xe_vm *vm = xe_vma_vm(&uvma->vma);
        int err = xe_pt_vm_dependencies(pt_update->job,
                                        &vm->rftree[pt_update->tile_id],
                                        pt_update->start,
@@ -1023,7 +1023,7 @@ static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
         */
        do {
                down_read(&vm->userptr.notifier_lock);
-               if (!mmu_interval_read_retry(&vma->userptr.notifier,
+               if (!mmu_interval_read_retry(&uvma->userptr.notifier,
                                             notifier_seq))
                        break;
 
@@ -1032,11 +1032,11 @@ static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
                if (userptr_update->bind)
                        return -EAGAIN;
 
-               notifier_seq = mmu_interval_read_begin(&vma->userptr.notifier);
+               notifier_seq = mmu_interval_read_begin(&uvma->userptr.notifier);
        } while (true);
 
        /* Inject errors to test_whether they are handled correctly */
-       if (userptr_update->bind && xe_pt_userptr_inject_eagain(vma)) {
+       if (userptr_update->bind && xe_pt_userptr_inject_eagain(uvma)) {
                up_read(&vm->userptr.notifier_lock);
                return -EAGAIN;
        }
@@ -1297,7 +1297,7 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue
                vma->tile_present |= BIT(tile->id);
 
                if (bind_pt_update.locked) {
-                       vma->userptr.initial_bind = true;
+                       to_userptr_vma(vma)->userptr.initial_bind = true;
                        up_read(&vm->userptr.notifier_lock);
                        xe_bo_put_commit(&deferred);
                }
@@ -1642,7 +1642,7 @@ __xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queu
 
                if (!vma->tile_present) {
                        spin_lock(&vm->userptr.invalidated_lock);
-                       list_del_init(&vma->userptr.invalidate_link);
+                       list_del_init(&to_userptr_vma(vma)->userptr.invalidate_link);
                        spin_unlock(&vm->userptr.invalidated_lock);
                }
                up_read(&vm->userptr.notifier_lock);
index 9b35673b286c80c1c2332d6ece9add9f754f3ca8..7e924faeeea0b0f8ebc2f7fe89ade231412a3892 100644 (file)
@@ -459,21 +459,21 @@ static size_t calc_topo_query_size(struct xe_device *xe)
                 sizeof_field(struct xe_gt, fuse_topo.eu_mask_per_dss));
 }
 
-static void __user *copy_mask(void __user *ptr,
-                             struct drm_xe_query_topology_mask *topo,
-                             void *mask, size_t mask_size)
+static int copy_mask(void __user **ptr,
+                    struct drm_xe_query_topology_mask *topo,
+                    void *mask, size_t mask_size)
 {
        topo->num_bytes = mask_size;
 
-       if (copy_to_user(ptr, topo, sizeof(*topo)))
-               return ERR_PTR(-EFAULT);
-       ptr += sizeof(topo);
+       if (copy_to_user(*ptr, topo, sizeof(*topo)))
+               return -EFAULT;
+       *ptr += sizeof(topo);
 
-       if (copy_to_user(ptr, mask, mask_size))
-               return ERR_PTR(-EFAULT);
-       ptr += mask_size;
+       if (copy_to_user(*ptr, mask, mask_size))
+               return -EFAULT;
+       *ptr += mask_size;
 
-       return ptr;
+       return 0;
 }
 
 static int query_gt_topology(struct xe_device *xe,
@@ -493,28 +493,28 @@ static int query_gt_topology(struct xe_device *xe,
        }
 
        for_each_gt(gt, xe, id) {
+               int err;
+
                topo.gt_id = id;
 
                topo.type = DRM_XE_TOPO_DSS_GEOMETRY;
-               query_ptr = copy_mask(query_ptr, &topo,
-                                     gt->fuse_topo.g_dss_mask,
-                                     sizeof(gt->fuse_topo.g_dss_mask));
-               if (IS_ERR(query_ptr))
-                       return PTR_ERR(query_ptr);
+               err = copy_mask(&query_ptr, &topo, gt->fuse_topo.g_dss_mask,
+                               sizeof(gt->fuse_topo.g_dss_mask));
+               if (err)
+                       return err;
 
                topo.type = DRM_XE_TOPO_DSS_COMPUTE;
-               query_ptr = copy_mask(query_ptr, &topo,
-                                     gt->fuse_topo.c_dss_mask,
-                                     sizeof(gt->fuse_topo.c_dss_mask));
-               if (IS_ERR(query_ptr))
-                       return PTR_ERR(query_ptr);
+               err = copy_mask(&query_ptr, &topo, gt->fuse_topo.c_dss_mask,
+                               sizeof(gt->fuse_topo.c_dss_mask));
+               if (err)
+                       return err;
 
                topo.type = DRM_XE_TOPO_EU_PER_DSS;
-               query_ptr = copy_mask(query_ptr, &topo,
-                                     gt->fuse_topo.eu_mask_per_dss,
-                                     sizeof(gt->fuse_topo.eu_mask_per_dss));
-               if (IS_ERR(query_ptr))
-                       return PTR_ERR(query_ptr);
+               err = copy_mask(&query_ptr, &topo,
+                               gt->fuse_topo.eu_mask_per_dss,
+                               sizeof(gt->fuse_topo.eu_mask_per_dss));
+               if (err)
+                       return err;
        }
 
        return 0;
index 01106a1156ad82ab30378b29abf3f18d55b64fe3..4e2ccad0e52fabaf43ea26ddc1dd86f2294662a1 100644 (file)
@@ -274,7 +274,6 @@ int xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm)
        struct dma_fence *fence;
 
        fence = xe_exec_queue_last_fence_get(job->q, vm);
-       dma_fence_get(fence);
 
        return drm_sched_job_add_dependency(&job->drm, fence);
 }
index e4c220cf9115e9d52fc7b1e9440e0e44ba247c46..aab92bee1d7cf2ff52ec07befe0dcc220325a649 100644 (file)
@@ -307,7 +307,6 @@ xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync,
        /* Easy case... */
        if (!num_in_fence) {
                fence = xe_exec_queue_last_fence_get(q, vm);
-               dma_fence_get(fence);
                return fence;
        }
 
@@ -322,7 +321,6 @@ xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync,
                }
        }
        fences[current_fence++] = xe_exec_queue_last_fence_get(q, vm);
-       dma_fence_get(fences[current_fence - 1]);
        cf = dma_fence_array_create(num_in_fence, fences,
                                    vm->composite_fence_ctx,
                                    vm->composite_fence_seqno++,
index d284afbe917c19203473b30d0abc38ca88ffbfa2..f43cdcaca6c5794ec8b42ab3bc77e1942004d046 100644 (file)
@@ -33,4 +33,9 @@ struct dma_fence *
 xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync,
                     struct xe_exec_queue *q, struct xe_vm *vm);
 
+static inline bool xe_sync_is_ufence(struct xe_sync_entry *sync)
+{
+       return !!sync->ufence;
+}
+
 #endif
index d2b00d0bf1e203c8b9c9322bcf8a489b1409821b..e5d7d5e2bec129937317f14f6b0063308a19bf58 100644 (file)
@@ -31,7 +31,7 @@ struct xe_ttm_stolen_mgr {
        /* GPU base offset */
        resource_size_t stolen_base;
 
-       void *__iomem mapping;
+       void __iomem *mapping;
 };
 
 static inline struct xe_ttm_stolen_mgr *
@@ -275,7 +275,7 @@ static int __xe_ttm_stolen_io_mem_reserve_bar2(struct xe_device *xe,
        drm_WARN_ON(&xe->drm, !(mem->placement & TTM_PL_FLAG_CONTIGUOUS));
 
        if (mem->placement & TTM_PL_FLAG_CONTIGUOUS && mgr->mapping)
-               mem->bus.addr = (u8 *)mgr->mapping + mem->bus.offset;
+               mem->bus.addr = (u8 __force *)mgr->mapping + mem->bus.offset;
 
        mem->bus.offset += mgr->io_base;
        mem->bus.is_iomem = true;
index 0cfe7289b97efddd3d3b1177b6e91ebafd22fdc9..865e10d0a06aa31bc13fc60802257b80ebdc9430 100644 (file)
@@ -37,8 +37,6 @@
 #include "generated/xe_wa_oob.h"
 #include "xe_wa.h"
 
-#define TEST_VM_ASYNC_OPS_ERROR
-
 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
 {
        return vm->gpuvm.r_obj;
@@ -46,7 +44,7 @@ static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
 
 /**
  * xe_vma_userptr_check_repin() - Advisory check for repin needed
- * @vma: The userptr vma
+ * @uvma: The userptr vma
  *
  * Check if the userptr vma has been invalidated since last successful
  * repin. The check is advisory only and can the function can be called
@@ -56,15 +54,17 @@ static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
  *
  * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
  */
-int xe_vma_userptr_check_repin(struct xe_vma *vma)
+int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma)
 {
-       return mmu_interval_check_retry(&vma->userptr.notifier,
-                                       vma->userptr.notifier_seq) ?
+       return mmu_interval_check_retry(&uvma->userptr.notifier,
+                                       uvma->userptr.notifier_seq) ?
                -EAGAIN : 0;
 }
 
-int xe_vma_userptr_pin_pages(struct xe_vma *vma)
+int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma)
 {
+       struct xe_userptr *userptr = &uvma->userptr;
+       struct xe_vma *vma = &uvma->vma;
        struct xe_vm *vm = xe_vma_vm(vma);
        struct xe_device *xe = vm->xe;
        const unsigned long num_pages = xe_vma_size(vma) >> PAGE_SHIFT;
@@ -80,30 +80,30 @@ retry:
        if (vma->gpuva.flags & XE_VMA_DESTROYED)
                return 0;
 
-       notifier_seq = mmu_interval_read_begin(&vma->userptr.notifier);
-       if (notifier_seq == vma->userptr.notifier_seq)
+       notifier_seq = mmu_interval_read_begin(&userptr->notifier);
+       if (notifier_seq == userptr->notifier_seq)
                return 0;
 
        pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
        if (!pages)
                return -ENOMEM;
 
-       if (vma->userptr.sg) {
+       if (userptr->sg) {
                dma_unmap_sgtable(xe->drm.dev,
-                                 vma->userptr.sg,
+                                 userptr->sg,
                                  read_only ? DMA_TO_DEVICE :
                                  DMA_BIDIRECTIONAL, 0);
-               sg_free_table(vma->userptr.sg);
-               vma->userptr.sg = NULL;
+               sg_free_table(userptr->sg);
+               userptr->sg = NULL;
        }
 
        pinned = ret = 0;
        if (in_kthread) {
-               if (!mmget_not_zero(vma->userptr.notifier.mm)) {
+               if (!mmget_not_zero(userptr->notifier.mm)) {
                        ret = -EFAULT;
                        goto mm_closed;
                }
-               kthread_use_mm(vma->userptr.notifier.mm);
+               kthread_use_mm(userptr->notifier.mm);
        }
 
        while (pinned < num_pages) {
@@ -112,43 +112,40 @@ retry:
                                          num_pages - pinned,
                                          read_only ? 0 : FOLL_WRITE,
                                          &pages[pinned]);
-               if (ret < 0) {
-                       if (in_kthread)
-                               ret = 0;
+               if (ret < 0)
                        break;
-               }
 
                pinned += ret;
                ret = 0;
        }
 
        if (in_kthread) {
-               kthread_unuse_mm(vma->userptr.notifier.mm);
-               mmput(vma->userptr.notifier.mm);
+               kthread_unuse_mm(userptr->notifier.mm);
+               mmput(userptr->notifier.mm);
        }
 mm_closed:
        if (ret)
                goto out;
 
-       ret = sg_alloc_table_from_pages_segment(&vma->userptr.sgt, pages,
+       ret = sg_alloc_table_from_pages_segment(&userptr->sgt, pages,
                                                pinned, 0,
                                                (u64)pinned << PAGE_SHIFT,
                                                xe_sg_segment_size(xe->drm.dev),
                                                GFP_KERNEL);
        if (ret) {
-               vma->userptr.sg = NULL;
+               userptr->sg = NULL;
                goto out;
        }
-       vma->userptr.sg = &vma->userptr.sgt;
+       userptr->sg = &userptr->sgt;
 
-       ret = dma_map_sgtable(xe->drm.dev, vma->userptr.sg,
+       ret = dma_map_sgtable(xe->drm.dev, userptr->sg,
                              read_only ? DMA_TO_DEVICE :
                              DMA_BIDIRECTIONAL,
                              DMA_ATTR_SKIP_CPU_SYNC |
                              DMA_ATTR_NO_KERNEL_MAPPING);
        if (ret) {
-               sg_free_table(vma->userptr.sg);
-               vma->userptr.sg = NULL;
+               sg_free_table(userptr->sg);
+               userptr->sg = NULL;
                goto out;
        }
 
@@ -167,8 +164,8 @@ out:
        kvfree(pages);
 
        if (!(ret < 0)) {
-               vma->userptr.notifier_seq = notifier_seq;
-               if (xe_vma_userptr_check_repin(vma) == -EAGAIN)
+               userptr->notifier_seq = notifier_seq;
+               if (xe_vma_userptr_check_repin(uvma) == -EAGAIN)
                        goto retry;
        }
 
@@ -335,13 +332,13 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
        down_write(&vm->lock);
        err = drm_gpuvm_exec_lock(&vm_exec);
        if (err)
-               return err;
+               goto out_up_write;
 
        pfence = xe_preempt_fence_create(q, q->compute.context,
                                         ++q->compute.seqno);
        if (!pfence) {
                err = -ENOMEM;
-               goto out_unlock;
+               goto out_fini;
        }
 
        list_add(&q->compute.link, &vm->preempt.exec_queues);
@@ -364,8 +361,9 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
 
        up_read(&vm->userptr.notifier_lock);
 
-out_unlock:
+out_fini:
        drm_exec_fini(exec);
+out_up_write:
        up_write(&vm->lock);
 
        return err;
@@ -634,7 +632,9 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
                                   const struct mmu_notifier_range *range,
                                   unsigned long cur_seq)
 {
-       struct xe_vma *vma = container_of(mni, struct xe_vma, userptr.notifier);
+       struct xe_userptr *userptr = container_of(mni, typeof(*userptr), notifier);
+       struct xe_userptr_vma *uvma = container_of(userptr, typeof(*uvma), userptr);
+       struct xe_vma *vma = &uvma->vma;
        struct xe_vm *vm = xe_vma_vm(vma);
        struct dma_resv_iter cursor;
        struct dma_fence *fence;
@@ -650,7 +650,7 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
        mmu_interval_set_seq(mni, cur_seq);
 
        /* No need to stop gpu access if the userptr is not yet bound. */
-       if (!vma->userptr.initial_bind) {
+       if (!userptr->initial_bind) {
                up_write(&vm->userptr.notifier_lock);
                return true;
        }
@@ -662,7 +662,7 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
        if (!xe_vm_in_fault_mode(vm) &&
            !(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) {
                spin_lock(&vm->userptr.invalidated_lock);
-               list_move_tail(&vma->userptr.invalidate_link,
+               list_move_tail(&userptr->invalidate_link,
                               &vm->userptr.invalidated);
                spin_unlock(&vm->userptr.invalidated_lock);
        }
@@ -702,7 +702,7 @@ static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
 
 int xe_vm_userptr_pin(struct xe_vm *vm)
 {
-       struct xe_vma *vma, *next;
+       struct xe_userptr_vma *uvma, *next;
        int err = 0;
        LIST_HEAD(tmp_evict);
 
@@ -710,22 +710,23 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
 
        /* Collect invalidated userptrs */
        spin_lock(&vm->userptr.invalidated_lock);
-       list_for_each_entry_safe(vma, next, &vm->userptr.invalidated,
+       list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated,
                                 userptr.invalidate_link) {
-               list_del_init(&vma->userptr.invalidate_link);
-               list_move_tail(&vma->combined_links.userptr,
+               list_del_init(&uvma->userptr.invalidate_link);
+               list_move_tail(&uvma->userptr.repin_link,
                               &vm->userptr.repin_list);
        }
        spin_unlock(&vm->userptr.invalidated_lock);
 
        /* Pin and move to temporary list */
-       list_for_each_entry_safe(vma, next, &vm->userptr.repin_list,
-                                combined_links.userptr) {
-               err = xe_vma_userptr_pin_pages(vma);
+       list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
+                                userptr.repin_link) {
+               err = xe_vma_userptr_pin_pages(uvma);
                if (err < 0)
                        return err;
 
-               list_move_tail(&vma->combined_links.userptr, &vm->rebind_list);
+               list_del_init(&uvma->userptr.repin_link);
+               list_move_tail(&uvma->vma.combined_links.rebind, &vm->rebind_list);
        }
 
        return 0;
@@ -781,6 +782,14 @@ struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
        return fence;
 }
 
+static void xe_vma_free(struct xe_vma *vma)
+{
+       if (xe_vma_is_userptr(vma))
+               kfree(to_userptr_vma(vma));
+       else
+               kfree(vma);
+}
+
 #define VMA_CREATE_FLAG_READ_ONLY      BIT(0)
 #define VMA_CREATE_FLAG_IS_NULL                BIT(1)
 
@@ -799,14 +808,26 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
        xe_assert(vm->xe, start < end);
        xe_assert(vm->xe, end < vm->size);
 
-       if (!bo && !is_null)    /* userptr */
+       /*
+        * Allocate and ensure that the xe_vma_is_userptr() return
+        * matches what was allocated.
+        */
+       if (!bo && !is_null) {
+               struct xe_userptr_vma *uvma = kzalloc(sizeof(*uvma), GFP_KERNEL);
+
+               if (!uvma)
+                       return ERR_PTR(-ENOMEM);
+
+               vma = &uvma->vma;
+       } else {
                vma = kzalloc(sizeof(*vma), GFP_KERNEL);
-       else
-               vma = kzalloc(sizeof(*vma) - sizeof(struct xe_userptr),
-                             GFP_KERNEL);
-       if (!vma) {
-               vma = ERR_PTR(-ENOMEM);
-               return vma;
+               if (!vma)
+                       return ERR_PTR(-ENOMEM);
+
+               if (is_null)
+                       vma->gpuva.flags |= DRM_GPUVA_SPARSE;
+               if (bo)
+                       vma->gpuva.gem.obj = &bo->ttm.base;
        }
 
        INIT_LIST_HEAD(&vma->combined_links.rebind);
@@ -817,8 +838,6 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
        vma->gpuva.va.range = end - start + 1;
        if (read_only)
                vma->gpuva.flags |= XE_VMA_READ_ONLY;
-       if (is_null)
-               vma->gpuva.flags |= DRM_GPUVA_SPARSE;
 
        for_each_tile(tile, vm->xe, id)
                vma->tile_mask |= 0x1 << id;
@@ -835,35 +854,35 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
 
                vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base);
                if (IS_ERR(vm_bo)) {
-                       kfree(vma);
+                       xe_vma_free(vma);
                        return ERR_CAST(vm_bo);
                }
 
                drm_gpuvm_bo_extobj_add(vm_bo);
                drm_gem_object_get(&bo->ttm.base);
-               vma->gpuva.gem.obj = &bo->ttm.base;
                vma->gpuva.gem.offset = bo_offset_or_userptr;
                drm_gpuva_link(&vma->gpuva, vm_bo);
                drm_gpuvm_bo_put(vm_bo);
        } else /* userptr or null */ {
                if (!is_null) {
+                       struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
                        u64 size = end - start + 1;
                        int err;
 
-                       INIT_LIST_HEAD(&vma->userptr.invalidate_link);
+                       INIT_LIST_HEAD(&userptr->invalidate_link);
+                       INIT_LIST_HEAD(&userptr->repin_link);
                        vma->gpuva.gem.offset = bo_offset_or_userptr;
 
-                       err = mmu_interval_notifier_insert(&vma->userptr.notifier,
+                       err = mmu_interval_notifier_insert(&userptr->notifier,
                                                           current->mm,
                                                           xe_vma_userptr(vma), size,
                                                           &vma_userptr_notifier_ops);
                        if (err) {
-                               kfree(vma);
-                               vma = ERR_PTR(err);
-                               return vma;
+                               xe_vma_free(vma);
+                               return ERR_PTR(err);
                        }
 
-                       vma->userptr.notifier_seq = LONG_MAX;
+                       userptr->notifier_seq = LONG_MAX;
                }
 
                xe_vm_get(vm);
@@ -879,13 +898,15 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
        bool read_only = xe_vma_read_only(vma);
 
        if (xe_vma_is_userptr(vma)) {
-               if (vma->userptr.sg) {
+               struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
+
+               if (userptr->sg) {
                        dma_unmap_sgtable(xe->drm.dev,
-                                         vma->userptr.sg,
+                                         userptr->sg,
                                          read_only ? DMA_TO_DEVICE :
                                          DMA_BIDIRECTIONAL, 0);
-                       sg_free_table(vma->userptr.sg);
-                       vma->userptr.sg = NULL;
+                       sg_free_table(userptr->sg);
+                       userptr->sg = NULL;
                }
 
                /*
@@ -893,7 +914,7 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
                 * the notifer until we're sure the GPU is not accessing
                 * them anymore
                 */
-               mmu_interval_notifier_remove(&vma->userptr.notifier);
+               mmu_interval_notifier_remove(&userptr->notifier);
                xe_vm_put(vm);
        } else if (xe_vma_is_null(vma)) {
                xe_vm_put(vm);
@@ -901,7 +922,7 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
                xe_bo_put(xe_vma_bo(vma));
        }
 
-       kfree(vma);
+       xe_vma_free(vma);
 }
 
 static void vma_destroy_work_func(struct work_struct *w)
@@ -932,7 +953,7 @@ static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
                xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
 
                spin_lock(&vm->userptr.invalidated_lock);
-               list_del(&vma->userptr.invalidate_link);
+               list_del(&to_userptr_vma(vma)->userptr.invalidate_link);
                spin_unlock(&vm->userptr.invalidated_lock);
        } else if (!xe_vma_is_null(vma)) {
                xe_bo_assert_held(xe_vma_bo(vma));
@@ -1854,10 +1875,8 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
        mutex_lock(&xef->vm.lock);
        err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
        mutex_unlock(&xef->vm.lock);
-       if (err) {
-               xe_vm_close_and_put(vm);
-               return err;
-       }
+       if (err)
+               goto err_close_and_put;
 
        if (xe->info.has_asid) {
                mutex_lock(&xe->usm.lock);
@@ -1865,11 +1884,9 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
                                      XA_LIMIT(1, XE_MAX_ASID - 1),
                                      &xe->usm.next_asid, GFP_KERNEL);
                mutex_unlock(&xe->usm.lock);
-               if (err < 0) {
-                       xe_vm_close_and_put(vm);
-                       return err;
-               }
-               err = 0;
+               if (err < 0)
+                       goto err_free_id;
+
                vm->usm.asid = asid;
        }
 
@@ -1887,6 +1904,15 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
 #endif
 
        return 0;
+
+err_free_id:
+       mutex_lock(&xef->vm.lock);
+       xa_erase(&xef->vm.xa, id);
+       mutex_unlock(&xef->vm.lock);
+err_close_and_put:
+       xe_vm_close_and_put(vm);
+
+       return err;
 }
 
 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
@@ -1953,6 +1979,7 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
                                        xe_exec_queue_last_fence_get(wait_exec_queue, vm);
 
                                xe_sync_entry_signal(&syncs[i], NULL, fence);
+                               dma_fence_put(fence);
                        }
                }
 
@@ -2033,7 +2060,6 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
        struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
        struct drm_gpuva_ops *ops;
        struct drm_gpuva_op *__op;
-       struct xe_vma_op *op;
        struct drm_gpuvm_bo *vm_bo;
        int err;
 
@@ -2063,9 +2089,11 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
                if (err)
                        return ERR_PTR(err);
 
-               vm_bo = drm_gpuvm_bo_find(&vm->gpuvm, obj);
-               if (!vm_bo)
-                       break;
+               vm_bo = drm_gpuvm_bo_obtain(&vm->gpuvm, obj);
+               if (IS_ERR(vm_bo)) {
+                       xe_bo_unlock(bo);
+                       return ERR_CAST(vm_bo);
+               }
 
                ops = drm_gpuvm_bo_unmap_ops_create(vm_bo);
                drm_gpuvm_bo_put(vm_bo);
@@ -2078,15 +2106,6 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
        if (IS_ERR(ops))
                return ops;
 
-#ifdef TEST_VM_ASYNC_OPS_ERROR
-       if (operation & FORCE_ASYNC_OP_ERROR) {
-               op = list_first_entry_or_null(&ops->list, struct xe_vma_op,
-                                             base.entry);
-               if (op)
-                       op->inject_error = true;
-       }
-#endif
-
        drm_gpuva_for_each_op(__op, ops) {
                struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
 
@@ -2142,7 +2161,7 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
                drm_exec_fini(&exec);
 
        if (xe_vma_is_userptr(vma)) {
-               err = xe_vma_userptr_pin_pages(vma);
+               err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
                if (err) {
                        prep_vma_destroy(vm, vma, false);
                        xe_vma_destroy_unlocked(vma);
@@ -2166,8 +2185,10 @@ static u64 xe_vma_max_pte_size(struct xe_vma *vma)
                return SZ_1G;
        else if (vma->gpuva.flags & XE_VMA_PTE_2M)
                return SZ_2M;
+       else if (vma->gpuva.flags & XE_VMA_PTE_4K)
+               return SZ_4K;
 
-       return SZ_4K;
+       return SZ_1G;   /* Uninitialized, used max size */
 }
 
 static u64 xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
@@ -2497,13 +2518,25 @@ retry_userptr:
        }
        drm_exec_fini(&exec);
 
-       if (err == -EAGAIN && xe_vma_is_userptr(vma)) {
+       if (err == -EAGAIN) {
                lockdep_assert_held_write(&vm->lock);
-               err = xe_vma_userptr_pin_pages(vma);
-               if (!err)
-                       goto retry_userptr;
 
-               trace_xe_vma_fail(vma);
+               if (op->base.op == DRM_GPUVA_OP_REMAP) {
+                       if (!op->remap.unmap_done)
+                               vma = gpuva_to_vma(op->base.remap.unmap->va);
+                       else if (op->remap.prev)
+                               vma = op->remap.prev;
+                       else
+                               vma = op->remap.next;
+               }
+
+               if (xe_vma_is_userptr(vma)) {
+                       err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
+                       if (!err)
+                               goto retry_userptr;
+
+                       trace_xe_vma_fail(vma);
+               }
        }
 
        return err;
@@ -2515,13 +2548,6 @@ static int xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
 
        lockdep_assert_held_write(&vm->lock);
 
-#ifdef TEST_VM_ASYNC_OPS_ERROR
-       if (op->inject_error) {
-               op->inject_error = false;
-               return -ENOMEM;
-       }
-#endif
-
        switch (op->base.op) {
        case DRM_GPUVA_OP_MAP:
                ret = __xe_vma_op_execute(vm, op->map.vma, op);
@@ -2636,7 +2662,7 @@ static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
 {
        int i;
 
-       for (i = num_ops_list - 1; i; ++i) {
+       for (i = num_ops_list - 1; i >= 0; --i) {
                struct drm_gpuva_ops *__ops = ops[i];
                struct drm_gpuva_op *__op;
 
@@ -2681,16 +2707,9 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
        return 0;
 }
 
-#ifdef TEST_VM_ASYNC_OPS_ERROR
-#define SUPPORTED_FLAGS        \
-       (FORCE_ASYNC_OP_ERROR | DRM_XE_VM_BIND_FLAG_READONLY | \
-        DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL | 0xffff)
-#else
 #define SUPPORTED_FLAGS        \
        (DRM_XE_VM_BIND_FLAG_READONLY | \
-        DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL | \
-        0xffff)
-#endif
+        DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL)
 #define XE_64K_PAGE_MASK 0xffffull
 #define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP)
 
@@ -2843,7 +2862,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        struct drm_gpuva_ops **ops = NULL;
        struct xe_vm *vm;
        struct xe_exec_queue *q = NULL;
-       u32 num_syncs;
+       u32 num_syncs, num_ufence = 0;
        struct xe_sync_entry *syncs = NULL;
        struct drm_xe_vm_bind_op *bind_ops;
        LIST_HEAD(ops_list);
@@ -2980,6 +2999,14 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
                                           SYNC_PARSE_FLAG_DISALLOW_USER_FENCE : 0));
                if (err)
                        goto free_syncs;
+
+               if (xe_sync_is_ufence(&syncs[num_syncs]))
+                       num_ufence++;
+       }
+
+       if (XE_IOCTL_DBG(xe, num_ufence > 1)) {
+               err = -EINVAL;
+               goto free_syncs;
        }
 
        if (!args->num_binds) {
@@ -3122,8 +3149,8 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
        if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
                if (xe_vma_is_userptr(vma)) {
                        WARN_ON_ONCE(!mmu_interval_check_retry
-                                    (&vma->userptr.notifier,
-                                     vma->userptr.notifier_seq));
+                                    (&to_userptr_vma(vma)->userptr.notifier,
+                                     to_userptr_vma(vma)->userptr.notifier_seq));
                        WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
                                                             DMA_RESV_USAGE_BOOKKEEP));
 
@@ -3184,11 +3211,11 @@ int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
                if (is_null) {
                        addr = 0;
                } else if (is_userptr) {
+                       struct sg_table *sg = to_userptr_vma(vma)->userptr.sg;
                        struct xe_res_cursor cur;
 
-                       if (vma->userptr.sg) {
-                               xe_res_first_sg(vma->userptr.sg, 0, XE_PAGE_SIZE,
-                                               &cur);
+                       if (sg) {
+                               xe_res_first_sg(sg, 0, XE_PAGE_SIZE, &cur);
                                addr = xe_res_dma(&cur);
                        } else {
                                addr = 0;
index cf2f96e8c1ab92245b69dd8853c90d5e128262fd..9654a0612fc258d0ba7395ba7c7fd87899caf904 100644 (file)
@@ -160,6 +160,18 @@ static inline bool xe_vma_is_userptr(struct xe_vma *vma)
        return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma);
 }
 
+/**
+ * to_userptr_vma() - Return a pointer to an embedding userptr vma
+ * @vma: Pointer to the embedded struct xe_vma
+ *
+ * Return: Pointer to the embedding userptr vma
+ */
+static inline struct xe_userptr_vma *to_userptr_vma(struct xe_vma *vma)
+{
+       xe_assert(xe_vma_vm(vma)->xe, xe_vma_is_userptr(vma));
+       return container_of(vma, struct xe_userptr_vma, vma);
+}
+
 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile);
 
 int xe_vm_create_ioctl(struct drm_device *dev, void *data,
@@ -224,9 +236,9 @@ static inline void xe_vm_reactivate_rebind(struct xe_vm *vm)
        }
 }
 
-int xe_vma_userptr_pin_pages(struct xe_vma *vma);
+int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma);
 
-int xe_vma_userptr_check_repin(struct xe_vma *vma);
+int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma);
 
 bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end);
 
index 63e8a50b88e94980d65a0800817235c518adcd69..5ac9c5bebabc3cf3ecf528f51f8aa92cebc410ef 100644 (file)
@@ -21,9 +21,6 @@ struct xe_bo;
 struct xe_sync_entry;
 struct xe_vm;
 
-#define TEST_VM_ASYNC_OPS_ERROR
-#define FORCE_ASYNC_OP_ERROR   BIT(31)
-
 #define XE_VMA_READ_ONLY       DRM_GPUVA_USERBITS
 #define XE_VMA_DESTROYED       (DRM_GPUVA_USERBITS << 1)
 #define XE_VMA_ATOMIC_PTE_BIT  (DRM_GPUVA_USERBITS << 2)
@@ -37,6 +34,8 @@ struct xe_vm;
 struct xe_userptr {
        /** @invalidate_link: Link for the vm::userptr.invalidated list */
        struct list_head invalidate_link;
+       /** @userptr: link into VM repin list if userptr. */
+       struct list_head repin_link;
        /**
         * @notifier: MMU notifier for user pointer (invalidation call back)
         */
@@ -68,8 +67,6 @@ struct xe_vma {
         * resv.
         */
        union {
-               /** @userptr: link into VM repin list if userptr. */
-               struct list_head userptr;
                /** @rebind: link into VM if this VMA needs rebinding. */
                struct list_head rebind;
                /** @destroy: link to contested list when VM is being closed. */
@@ -105,11 +102,15 @@ struct xe_vma {
         * @pat_index: The pat index to use when encoding the PTEs for this vma.
         */
        u16 pat_index;
+};
 
-       /**
-        * @userptr: user pointer state, only allocated for VMAs that are
-        * user pointers
-        */
+/**
+ * struct xe_userptr_vma - A userptr vma subclass
+ * @vma: The vma.
+ * @userptr: Additional userptr information.
+ */
+struct xe_userptr_vma {
+       struct xe_vma vma;
        struct xe_userptr userptr;
 };
 
@@ -356,11 +357,6 @@ struct xe_vma_op {
        /** @flags: operation flags */
        enum xe_vma_op_flags flags;
 
-#ifdef TEST_VM_ASYNC_OPS_ERROR
-       /** @inject_error: inject error to test async op error handling */
-       bool inject_error;
-#endif
-
        union {
                /** @map: VMA map operation specific data */
                struct xe_vma_op_map map;
index 43318c1993ba9f567367660cd1e715e3a65798a1..c3e90025064bdec9ac5cf7de150d708fbecae03a 100644 (file)
@@ -85,17 +85,31 @@ struct hdlc_payload {
        void *buf;
 };
 
+/**
+ * struct hdlc_greybus_frame - Structure to represent greybus HDLC frame payload
+ *
+ * @cport: cport id
+ * @hdr: greybus operation header
+ * @payload: greybus message payload
+ *
+ * The HDLC payload sent over UART for greybus address has cport preappended to greybus message
+ */
+struct hdlc_greybus_frame {
+       __le16 cport;
+       struct gb_operation_msg_hdr hdr;
+       u8 payload[];
+} __packed;
+
 static void hdlc_rx_greybus_frame(struct gb_beagleplay *bg, u8 *buf, u16 len)
 {
-       u16 cport_id;
-       struct gb_operation_msg_hdr *hdr = (struct gb_operation_msg_hdr *)buf;
-
-       memcpy(&cport_id, hdr->pad, sizeof(cport_id));
+       struct hdlc_greybus_frame *gb_frame = (struct hdlc_greybus_frame *)buf;
+       u16 cport_id = le16_to_cpu(gb_frame->cport);
+       u16 gb_msg_len = le16_to_cpu(gb_frame->hdr.size);
 
        dev_dbg(&bg->sd->dev, "Greybus Operation %u type %X cport %u status %u received",
-               hdr->operation_id, hdr->type, cport_id, hdr->result);
+               gb_frame->hdr.operation_id, gb_frame->hdr.type, cport_id, gb_frame->hdr.result);
 
-       greybus_data_rcvd(bg->gb_hd, cport_id, buf, len);
+       greybus_data_rcvd(bg->gb_hd, cport_id, (u8 *)&gb_frame->hdr, gb_msg_len);
 }
 
 static void hdlc_rx_dbg_frame(const struct gb_beagleplay *bg, const char *buf, u16 len)
@@ -257,7 +271,7 @@ static void hdlc_rx_frame(struct gb_beagleplay *bg)
        }
 }
 
-static int hdlc_rx(struct gb_beagleplay *bg, const u8 *data, size_t count)
+static ssize_t hdlc_rx(struct gb_beagleplay *bg, const u8 *data, size_t count)
 {
        size_t i;
        u8 c;
@@ -317,7 +331,8 @@ static void hdlc_deinit(struct gb_beagleplay *bg)
        flush_work(&bg->tx_work);
 }
 
-static int gb_tty_receive(struct serdev_device *sd, const unsigned char *data, size_t count)
+static ssize_t gb_tty_receive(struct serdev_device *sd, const u8 *data,
+                             size_t count)
 {
        struct gb_beagleplay *bg = serdev_device_get_drvdata(sd);
 
@@ -336,25 +351,39 @@ static struct serdev_device_ops gb_beagleplay_ops = {
        .write_wakeup = gb_tty_wakeup,
 };
 
+/**
+ * gb_message_send() - Send greybus message using HDLC over UART
+ *
+ * @hd: pointer to greybus host device
+ * @cport: AP cport where message originates
+ * @msg: greybus message to send
+ * @mask: gfp mask
+ *
+ * Greybus HDLC frame has the following payload:
+ * 1. le16 cport
+ * 2. gb_operation_msg_hdr msg_header
+ * 3. u8 *msg_payload
+ */
 static int gb_message_send(struct gb_host_device *hd, u16 cport, struct gb_message *msg, gfp_t mask)
 {
        struct gb_beagleplay *bg = dev_get_drvdata(&hd->dev);
-       struct hdlc_payload payloads[2];
+       struct hdlc_payload payloads[3];
+       __le16 cport_id = cpu_to_le16(cport);
 
        dev_dbg(&hd->dev, "Sending greybus message with Operation %u, Type: %X on Cport %u",
                msg->header->operation_id, msg->header->type, cport);
 
-       if (msg->header->size > RX_HDLC_PAYLOAD)
+       if (le16_to_cpu(msg->header->size) > RX_HDLC_PAYLOAD)
                return dev_err_probe(&hd->dev, -E2BIG, "Greybus message too big");
 
-       memcpy(msg->header->pad, &cport, sizeof(cport));
-
-       payloads[0].buf = msg->header;
-       payloads[0].len = sizeof(*msg->header);
-       payloads[1].buf = msg->payload;
-       payloads[1].len = msg->payload_size;
+       payloads[0].buf = &cport_id;
+       payloads[0].len = sizeof(cport_id);
+       payloads[1].buf = msg->header;
+       payloads[1].len = sizeof(*msg->header);
+       payloads[2].buf = msg->payload;
+       payloads[2].len = msg->payload_size;
 
-       hdlc_tx_frames(bg, ADDRESS_GREYBUS, 0x03, payloads, 2);
+       hdlc_tx_frames(bg, ADDRESS_GREYBUS, 0x03, payloads, 3);
        greybus_message_sent(bg->gb_hd, msg, 0);
 
        return 0;
index af752dd3a340947103d25068b6a34802b0d4599f..329de5e12c1a07a0193c031280d807cbd15bdf0c 100644 (file)
@@ -6,6 +6,7 @@ menu "AMD SFH HID Support"
 config AMD_SFH_HID
        tristate "AMD Sensor Fusion Hub"
        depends on HID
+       depends on X86
        help
          If you say yes to this option, support will be included for the
          AMD Sensor Fusion Hub.
index a1950bc6e6cefee4ca1dcdc5cc2ceaf4216c9065..e5620d7db5690e857d3465498ba4e3fb9137d7b7 100644 (file)
@@ -19,6 +19,9 @@
 #define AMD_C2P_MSG(regno) (0x10500 + ((regno) * 4))
 #define AMD_P2C_MSG(regno) (0x10680 + ((regno) * 4))
 
+#define AMD_C2P_MSG_V1(regno) (0x10900 + ((regno) * 4))
+#define AMD_P2C_MSG_V1(regno) (0x10500 + ((regno) * 4))
+
 #define SENSOR_ENABLED                 4
 #define SENSOR_DISABLED                        5
 
@@ -53,6 +56,9 @@ struct amd_mp2_dev {
        /* mp2 active control status */
        u32 mp2_acs;
        struct sfh_dev_status dev_en;
+       struct work_struct work;
+       u8 init_done;
+       u8 rver;
 };
 
 struct amd_mp2_ops {
@@ -79,4 +85,14 @@ void amd_sfh_clear_intr_v2(struct amd_mp2_dev *privdata);
 int amd_sfh_irq_init_v2(struct amd_mp2_dev *privdata);
 void amd_sfh_clear_intr(struct amd_mp2_dev *privdata);
 int amd_sfh_irq_init(struct amd_mp2_dev *privdata);
+
+static inline u64 amd_get_c2p_val(struct amd_mp2_dev *mp2, u32 idx)
+{
+       return mp2->rver == 1 ? AMD_C2P_MSG_V1(idx) :  AMD_C2P_MSG(idx);
+}
+
+static inline u64 amd_get_p2c_val(struct amd_mp2_dev *mp2, u32 idx)
+{
+       return mp2->rver == 1 ? AMD_P2C_MSG_V1(idx) :  AMD_P2C_MSG(idx);
+}
 #endif
index 2530fa98b568beed5c32fc3e714ed72575b3ef8f..9e97c26c4482e12838fb25ddaa6a5de1e19dcd8b 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <linux/bitops.h>
 #include <linux/delay.h>
+#include <linux/devm-helpers.h>
 #include <linux/dma-mapping.h>
 #include <linux/dmi.h>
 #include <linux/interrupt.h>
@@ -35,15 +36,17 @@ static int sensor_mask_override = -1;
 module_param_named(sensor_mask, sensor_mask_override, int, 0444);
 MODULE_PARM_DESC(sensor_mask, "override the detected sensors mask");
 
+static bool intr_disable = true;
+
 static int amd_sfh_wait_response_v2(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_sts)
 {
        union cmd_response cmd_resp;
 
-       /* Get response with status within a max of 1600 ms timeout */
+       /* Get response with status within a max of 10 seconds timeout */
        if (!readl_poll_timeout(mp2->mmio + AMD_P2C_MSG(0), cmd_resp.resp,
                                (cmd_resp.response_v2.response == sensor_sts &&
                                cmd_resp.response_v2.status == 0 && (sid == 0xff ||
-                               cmd_resp.response_v2.sensor_id == sid)), 500, 1600000))
+                               cmd_resp.response_v2.sensor_id == sid)), 500, 10000000))
                return cmd_resp.response_v2.response;
 
        return SENSOR_DISABLED;
@@ -55,7 +58,7 @@ static void amd_start_sensor_v2(struct amd_mp2_dev *privdata, struct amd_mp2_sen
 
        cmd_base.ul = 0;
        cmd_base.cmd_v2.cmd_id = ENABLE_SENSOR;
-       cmd_base.cmd_v2.intr_disable = 1;
+       cmd_base.cmd_v2.intr_disable = intr_disable;
        cmd_base.cmd_v2.period = info.period;
        cmd_base.cmd_v2.sensor_id = info.sensor_idx;
        cmd_base.cmd_v2.length = 16;
@@ -73,7 +76,7 @@ static void amd_stop_sensor_v2(struct amd_mp2_dev *privdata, u16 sensor_idx)
 
        cmd_base.ul = 0;
        cmd_base.cmd_v2.cmd_id = DISABLE_SENSOR;
-       cmd_base.cmd_v2.intr_disable = 1;
+       cmd_base.cmd_v2.intr_disable = intr_disable;
        cmd_base.cmd_v2.period = 0;
        cmd_base.cmd_v2.sensor_id = sensor_idx;
        cmd_base.cmd_v2.length  = 16;
@@ -87,7 +90,7 @@ static void amd_stop_all_sensor_v2(struct amd_mp2_dev *privdata)
        union sfh_cmd_base cmd_base;
 
        cmd_base.cmd_v2.cmd_id = STOP_ALL_SENSORS;
-       cmd_base.cmd_v2.intr_disable = 1;
+       cmd_base.cmd_v2.intr_disable = intr_disable;
        cmd_base.cmd_v2.period = 0;
        cmd_base.cmd_v2.sensor_id = 0;
 
@@ -96,9 +99,9 @@ static void amd_stop_all_sensor_v2(struct amd_mp2_dev *privdata)
 
 void amd_sfh_clear_intr_v2(struct amd_mp2_dev *privdata)
 {
-       if (readl(privdata->mmio + AMD_P2C_MSG(4))) {
-               writel(0, privdata->mmio + AMD_P2C_MSG(4));
-               writel(0xf, privdata->mmio + AMD_P2C_MSG(5));
+       if (readl(privdata->mmio + amd_get_p2c_val(privdata, 4))) {
+               writel(0, privdata->mmio + amd_get_p2c_val(privdata, 4));
+               writel(0xf, privdata->mmio + amd_get_p2c_val(privdata, 5));
        }
 }
 
@@ -292,6 +295,26 @@ int amd_sfh_irq_init(struct amd_mp2_dev *privdata)
        return 0;
 }
 
+static int mp2_disable_intr(const struct dmi_system_id *id)
+{
+       intr_disable = false;
+       return 0;
+}
+
+static const struct dmi_system_id dmi_sfh_table[] = {
+       {
+               /*
+                * https://bugzilla.kernel.org/show_bug.cgi?id=218104
+                */
+               .callback = mp2_disable_intr,
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook x360 435 G7"),
+               },
+       },
+       {}
+};
+
 static const struct dmi_system_id dmi_nodevs[] = {
        {
                /*
@@ -307,6 +330,48 @@ static const struct dmi_system_id dmi_nodevs[] = {
        { }
 };
 
+static void sfh1_1_init_work(struct work_struct *work)
+{
+       struct amd_mp2_dev *mp2 = container_of(work, struct amd_mp2_dev, work);
+       struct pci_dev *pdev = mp2->pdev;
+       int rc;
+
+       rc = mp2->sfh1_1_ops->init(mp2);
+       if (rc) {
+               dev_err(&pdev->dev, "sfh1_1_init failed err %d\n", rc);
+               return;
+       }
+
+       amd_sfh_clear_intr(mp2);
+       mp2->init_done = 1;
+}
+
+static void sfh_init_work(struct work_struct *work)
+{
+       struct amd_mp2_dev *mp2 = container_of(work, struct amd_mp2_dev, work);
+       struct pci_dev *pdev = mp2->pdev;
+       int rc;
+
+       rc = amd_sfh_hid_client_init(mp2);
+       if (rc) {
+               amd_sfh_clear_intr(mp2);
+               dev_err(&pdev->dev, "amd_sfh_hid_client_init failed err %d\n", rc);
+               return;
+       }
+
+       amd_sfh_clear_intr(mp2);
+       mp2->init_done = 1;
+}
+
+static void amd_sfh_remove(struct pci_dev *pdev)
+{
+       struct amd_mp2_dev *mp2 = pci_get_drvdata(pdev);
+
+       flush_work(&mp2->work);
+       if (mp2->init_done)
+               mp2->mp2_ops->remove(mp2);
+}
+
 static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
        struct amd_mp2_dev *privdata;
@@ -315,6 +380,8 @@ static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
        if (dmi_first_match(dmi_nodevs))
                return -ENODEV;
 
+       dmi_check_system(dmi_sfh_table);
+
        privdata = devm_kzalloc(&pdev->dev, sizeof(*privdata), GFP_KERNEL);
        if (!privdata)
                return -ENOMEM;
@@ -343,10 +410,15 @@ static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
 
        privdata->sfh1_1_ops = (const struct amd_sfh1_1_ops *)id->driver_data;
        if (privdata->sfh1_1_ops) {
-               rc = privdata->sfh1_1_ops->init(privdata);
+               if (boot_cpu_data.x86 >= 0x1A)
+                       privdata->rver = 1;
+
+               rc = devm_work_autocancel(&pdev->dev, &privdata->work, sfh1_1_init_work);
                if (rc)
                        return rc;
-               goto init_done;
+
+               schedule_work(&privdata->work);
+               return 0;
        }
 
        mp2_select_ops(privdata);
@@ -357,33 +429,34 @@ static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
                return rc;
        }
 
-       rc = amd_sfh_hid_client_init(privdata);
+       rc = devm_work_autocancel(&pdev->dev, &privdata->work, sfh_init_work);
        if (rc) {
                amd_sfh_clear_intr(privdata);
-               if (rc != -EOPNOTSUPP)
-                       dev_err(&pdev->dev, "amd_sfh_hid_client_init failed\n");
                return rc;
        }
 
-init_done:
-       amd_sfh_clear_intr(privdata);
-
-       return devm_add_action_or_reset(&pdev->dev, privdata->mp2_ops->remove, privdata);
+       schedule_work(&privdata->work);
+       return 0;
 }
 
 static void amd_sfh_shutdown(struct pci_dev *pdev)
 {
        struct amd_mp2_dev *mp2 = pci_get_drvdata(pdev);
 
-       if (mp2 && mp2->mp2_ops)
-               mp2->mp2_ops->stop_all(mp2);
+       if (mp2) {
+               flush_work(&mp2->work);
+               if (mp2->init_done)
+                       mp2->mp2_ops->stop_all(mp2);
+       }
 }
 
 static int __maybe_unused amd_mp2_pci_resume(struct device *dev)
 {
        struct amd_mp2_dev *mp2 = dev_get_drvdata(dev);
 
-       mp2->mp2_ops->resume(mp2);
+       flush_work(&mp2->work);
+       if (mp2->init_done)
+               mp2->mp2_ops->resume(mp2);
 
        return 0;
 }
@@ -392,7 +465,9 @@ static int __maybe_unused amd_mp2_pci_suspend(struct device *dev)
 {
        struct amd_mp2_dev *mp2 = dev_get_drvdata(dev);
 
-       mp2->mp2_ops->suspend(mp2);
+       flush_work(&mp2->work);
+       if (mp2->init_done)
+               mp2->mp2_ops->suspend(mp2);
 
        return 0;
 }
@@ -414,6 +489,7 @@ static struct pci_driver amd_mp2_pci_driver = {
        .probe          = amd_mp2_pci_probe,
        .driver.pm      = &amd_mp2_pm_ops,
        .shutdown       = amd_sfh_shutdown,
+       .remove         = amd_sfh_remove,
 };
 module_pci_driver(amd_mp2_pci_driver);
 
index 70add75fc5066085478bf3b7753a03c2b4e6f0c7..05e400a4a83e40d184b3504cd73771bfef7edafe 100644 (file)
@@ -90,10 +90,10 @@ enum mem_use_type {
 struct hpd_status {
        union {
                struct {
-                       u32 human_presence_report : 4;
-                       u32 human_presence_actual : 4;
-                       u32 probablity            : 8;
                        u32 object_distance       : 16;
+                       u32 probablity            : 8;
+                       u32 human_presence_actual : 4;
+                       u32 human_presence_report : 4;
                } shpd;
                u32 val;
        };
index 33fbdde8aff00c12a9b2dc70491189fef11cd77a..c8916afefa626fed56fd252e91c5a8f2d7a737cb 100644 (file)
@@ -251,7 +251,7 @@ static u8 get_input_rep(u8 current_index, int sensor_idx, int report_id,
                break;
        case HPD_IDX:
                get_common_inputs(&hpd_input.common_property, report_id);
-               hpdstatus.val = readl(mp2->mmio + AMD_C2P_MSG(4));
+               hpdstatus.val = readl(mp2->mmio + amd_get_c2p_val(mp2, 4));
                hpd_input.human_presence = hpdstatus.shpd.presence;
                report_size = sizeof(hpd_input);
                memcpy(input_report, &hpd_input, sizeof(hpd_input));
index 9dbe6f4cb29426a109c8c6c2565ae89e808cd3aa..5b24d5f63701a62e49ed7191a70fca288c252cc2 100644 (file)
@@ -172,7 +172,7 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
                if (rc)
                        goto cleanup;
 
-               writel(0, privdata->mmio + AMD_P2C_MSG(0));
+               writel(0, privdata->mmio + amd_get_p2c_val(privdata, 0));
                mp2_ops->start(privdata, info);
                status = amd_sfh_wait_for_response
                                (privdata, cl_data->sensor_idx[i], ENABLE_SENSOR);
@@ -298,7 +298,7 @@ static void amd_sfh_set_ops(struct amd_mp2_dev *mp2)
 
 int amd_sfh1_1_init(struct amd_mp2_dev *mp2)
 {
-       u32 phy_base = readl(mp2->mmio + AMD_C2P_MSG(22));
+       u32 phy_base = readl(mp2->mmio + amd_get_c2p_val(mp2, 22));
        struct device *dev = &mp2->pdev->dev;
        struct sfh_base_info binfo;
        int rc;
index ae36312bc23650e0b77171e46d1b972e463c1899..2de2668a027797b3c47de0cbfe2a0b742b753b7c 100644 (file)
@@ -20,7 +20,7 @@ static int amd_sfh_wait_response(struct amd_mp2_dev *mp2, u8 sid, u32 cmd_id)
        struct sfh_cmd_response cmd_resp;
 
        /* Get response with status within a max of 10000 ms timeout */
-       if (!readl_poll_timeout(mp2->mmio + AMD_P2C_MSG(0), cmd_resp.resp,
+       if (!readl_poll_timeout(mp2->mmio + amd_get_p2c_val(mp2, 0), cmd_resp.resp,
                                (cmd_resp.response.response == 0 &&
                                cmd_resp.response.cmd_id == cmd_id && (sid == 0xff ||
                                cmd_resp.response.sensor_id == sid)), 500, 10000000))
@@ -39,7 +39,7 @@ static void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor
        cmd_base.cmd.sub_cmd_value = 1;
        cmd_base.cmd.sensor_id = info.sensor_idx;
 
-       writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG(0));
+       writel(cmd_base.ul, privdata->mmio + amd_get_c2p_val(privdata, 0));
 }
 
 static void amd_stop_sensor(struct amd_mp2_dev *privdata, u16 sensor_idx)
@@ -52,8 +52,8 @@ static void amd_stop_sensor(struct amd_mp2_dev *privdata, u16 sensor_idx)
        cmd_base.cmd.sub_cmd_value = 1;
        cmd_base.cmd.sensor_id = sensor_idx;
 
-       writeq(0x0, privdata->mmio + AMD_C2P_MSG(1));
-       writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG(0));
+       writeq(0x0, privdata->mmio + amd_get_c2p_val(privdata, 1));
+       writel(cmd_base.ul, privdata->mmio + amd_get_c2p_val(privdata, 0));
 }
 
 static void amd_stop_all_sensor(struct amd_mp2_dev *privdata)
@@ -66,7 +66,7 @@ static void amd_stop_all_sensor(struct amd_mp2_dev *privdata)
        /* 0xf indicates all sensors */
        cmd_base.cmd.sensor_id = 0xf;
 
-       writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG(0));
+       writel(cmd_base.ul, privdata->mmio + amd_get_c2p_val(privdata, 0));
 }
 
 static struct amd_mp2_ops amd_sfh_ops = {
index d9ef45fcaeab1380967fe2fa2357411d2bc913d4..470ae2c29c94f25b66127827b725da24b41e101b 100644 (file)
@@ -143,6 +143,9 @@ u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *s
 }
 EXPORT_SYMBOL_GPL(call_hid_bpf_rdesc_fixup);
 
+/* Disables missing prototype warnings */
+__bpf_kfunc_start_defs();
+
 /**
  * hid_bpf_get_data - Get the kernel memory pointer associated with the context @ctx
  *
@@ -152,7 +155,7 @@ EXPORT_SYMBOL_GPL(call_hid_bpf_rdesc_fixup);
  *
  * @returns %NULL on error, an %__u8 memory pointer on success
  */
-noinline __u8 *
+__bpf_kfunc __u8 *
 hid_bpf_get_data(struct hid_bpf_ctx *ctx, unsigned int offset, const size_t rdwr_buf_size)
 {
        struct hid_bpf_ctx_kern *ctx_kern;
@@ -167,6 +170,7 @@ hid_bpf_get_data(struct hid_bpf_ctx *ctx, unsigned int offset, const size_t rdwr
 
        return ctx_kern->data + offset;
 }
+__bpf_kfunc_end_defs();
 
 /*
  * The following set contains all functions we agree BPF programs
@@ -241,6 +245,42 @@ int hid_bpf_reconnect(struct hid_device *hdev)
        return 0;
 }
 
+static int do_hid_bpf_attach_prog(struct hid_device *hdev, int prog_fd, struct bpf_prog *prog,
+                                 __u32 flags)
+{
+       int fd, err, prog_type;
+
+       prog_type = hid_bpf_get_prog_attach_type(prog);
+       if (prog_type < 0)
+               return prog_type;
+
+       if (prog_type >= HID_BPF_PROG_TYPE_MAX)
+               return -EINVAL;
+
+       if (prog_type == HID_BPF_PROG_TYPE_DEVICE_EVENT) {
+               err = hid_bpf_allocate_event_data(hdev);
+               if (err)
+                       return err;
+       }
+
+       fd = __hid_bpf_attach_prog(hdev, prog_type, prog_fd, prog, flags);
+       if (fd < 0)
+               return fd;
+
+       if (prog_type == HID_BPF_PROG_TYPE_RDESC_FIXUP) {
+               err = hid_bpf_reconnect(hdev);
+               if (err) {
+                       close_fd(fd);
+                       return err;
+               }
+       }
+
+       return fd;
+}
+
+/* Disables missing prototype warnings */
+__bpf_kfunc_start_defs();
+
 /**
  * hid_bpf_attach_prog - Attach the given @prog_fd to the given HID device
  *
@@ -253,22 +293,17 @@ int hid_bpf_reconnect(struct hid_device *hdev)
  * is pinned to the BPF file system).
  */
 /* called from syscall */
-noinline int
+__bpf_kfunc int
 hid_bpf_attach_prog(unsigned int hid_id, int prog_fd, __u32 flags)
 {
        struct hid_device *hdev;
+       struct bpf_prog *prog;
        struct device *dev;
-       int fd, err, prog_type = hid_bpf_get_prog_attach_type(prog_fd);
+       int err, fd;
 
        if (!hid_bpf_ops)
                return -EINVAL;
 
-       if (prog_type < 0)
-               return prog_type;
-
-       if (prog_type >= HID_BPF_PROG_TYPE_MAX)
-               return -EINVAL;
-
        if ((flags & ~HID_BPF_FLAG_MASK))
                return -EINVAL;
 
@@ -278,25 +313,29 @@ hid_bpf_attach_prog(unsigned int hid_id, int prog_fd, __u32 flags)
 
        hdev = to_hid_device(dev);
 
-       if (prog_type == HID_BPF_PROG_TYPE_DEVICE_EVENT) {
-               err = hid_bpf_allocate_event_data(hdev);
-               if (err)
-                       return err;
+       /*
+        * take a ref on the prog itself, it will be released
+        * on errors or when it'll be detached
+        */
+       prog = bpf_prog_get(prog_fd);
+       if (IS_ERR(prog)) {
+               err = PTR_ERR(prog);
+               goto out_dev_put;
        }
 
-       fd = __hid_bpf_attach_prog(hdev, prog_type, prog_fd, flags);
-       if (fd < 0)
-               return fd;
-
-       if (prog_type == HID_BPF_PROG_TYPE_RDESC_FIXUP) {
-               err = hid_bpf_reconnect(hdev);
-               if (err) {
-                       close_fd(fd);
-                       return err;
-               }
+       fd = do_hid_bpf_attach_prog(hdev, prog_fd, prog, flags);
+       if (fd < 0) {
+               err = fd;
+               goto out_prog_put;
        }
 
        return fd;
+
+ out_prog_put:
+       bpf_prog_put(prog);
+ out_dev_put:
+       put_device(dev);
+       return err;
 }
 
 /**
@@ -306,7 +345,7 @@ hid_bpf_attach_prog(unsigned int hid_id, int prog_fd, __u32 flags)
  *
  * @returns A pointer to &struct hid_bpf_ctx on success, %NULL on error.
  */
-noinline struct hid_bpf_ctx *
+__bpf_kfunc struct hid_bpf_ctx *
 hid_bpf_allocate_context(unsigned int hid_id)
 {
        struct hid_device *hdev;
@@ -323,8 +362,10 @@ hid_bpf_allocate_context(unsigned int hid_id)
        hdev = to_hid_device(dev);
 
        ctx_kern = kzalloc(sizeof(*ctx_kern), GFP_KERNEL);
-       if (!ctx_kern)
+       if (!ctx_kern) {
+               put_device(dev);
                return NULL;
+       }
 
        ctx_kern->ctx.hid = hdev;
 
@@ -337,14 +378,19 @@ hid_bpf_allocate_context(unsigned int hid_id)
  * @ctx: the HID-BPF context to release
  *
  */
-noinline void
+__bpf_kfunc void
 hid_bpf_release_context(struct hid_bpf_ctx *ctx)
 {
        struct hid_bpf_ctx_kern *ctx_kern;
+       struct hid_device *hid;
 
        ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
+       hid = (struct hid_device *)ctx_kern->ctx.hid; /* ignore const */
 
        kfree(ctx_kern);
+
+       /* get_device() is called by bus_find_device() */
+       put_device(&hid->dev);
 }
 
 /**
@@ -358,7 +404,7 @@ hid_bpf_release_context(struct hid_bpf_ctx *ctx)
  *
  * @returns %0 on success, a negative error code otherwise.
  */
-noinline int
+__bpf_kfunc int
 hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz,
                   enum hid_report_type rtype, enum hid_class_request reqtype)
 {
@@ -426,6 +472,7 @@ hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz,
        kfree(dma_data);
        return ret;
 }
+__bpf_kfunc_end_defs();
 
 /* our HID-BPF entrypoints */
 BTF_SET8_START(hid_bpf_fmodret_ids)
index 63dfc8605cd21efbc5f0bdc1844e4e79d73ab3cb..fbe0639d09f2604d6a8e11833eba82480640e289 100644 (file)
@@ -12,9 +12,9 @@ struct hid_bpf_ctx_kern {
 
 int hid_bpf_preload_skel(void);
 void hid_bpf_free_links_and_skel(void);
-int hid_bpf_get_prog_attach_type(int prog_fd);
+int hid_bpf_get_prog_attach_type(struct bpf_prog *prog);
 int __hid_bpf_attach_prog(struct hid_device *hdev, enum hid_bpf_prog_type prog_type, int prog_fd,
-                         __u32 flags);
+                         struct bpf_prog *prog, __u32 flags);
 void __hid_bpf_destroy_device(struct hid_device *hdev);
 int hid_bpf_prog_run(struct hid_device *hdev, enum hid_bpf_prog_type type,
                     struct hid_bpf_ctx_kern *ctx_kern);
index eca34b7372f951fc17e156ec2cc3761282ea61e8..aa8e1c79cdf5518301e73e44038f75e6fb1173e0 100644 (file)
@@ -196,6 +196,7 @@ static void __hid_bpf_do_release_prog(int map_fd, unsigned int idx)
 static void hid_bpf_release_progs(struct work_struct *work)
 {
        int i, j, n, map_fd = -1;
+       bool hdev_destroyed;
 
        if (!jmp_table.map)
                return;
@@ -220,6 +221,12 @@ static void hid_bpf_release_progs(struct work_struct *work)
                if (entry->hdev) {
                        hdev = entry->hdev;
                        type = entry->type;
+                       /*
+                        * hdev is still valid, even if we are called after hid_destroy_device():
+                        * when hid_bpf_attach() gets called, it takes a ref on the dev through
+                        * bus_find_device()
+                        */
+                       hdev_destroyed = hdev->bpf.destroyed;
 
                        hid_bpf_populate_hdev(hdev, type);
 
@@ -232,12 +239,19 @@ static void hid_bpf_release_progs(struct work_struct *work)
                                if (test_bit(next->idx, jmp_table.enabled))
                                        continue;
 
-                               if (next->hdev == hdev && next->type == type)
+                               if (next->hdev == hdev && next->type == type) {
+                                       /*
+                                        * clear the hdev reference and decrement the device ref
+                                        * that was taken during bus_find_device() while calling
+                                        * hid_bpf_attach()
+                                        */
                                        next->hdev = NULL;
+                                       put_device(&hdev->dev);
+                               }
                        }
 
-                       /* if type was rdesc fixup, reconnect device */
-                       if (type == HID_BPF_PROG_TYPE_RDESC_FIXUP)
+                       /* if type was rdesc fixup and the device is not gone, reconnect device */
+                       if (type == HID_BPF_PROG_TYPE_RDESC_FIXUP && !hdev_destroyed)
                                hid_bpf_reconnect(hdev);
                }
        }
@@ -333,15 +347,10 @@ static int hid_bpf_insert_prog(int prog_fd, struct bpf_prog *prog)
        return err;
 }
 
-int hid_bpf_get_prog_attach_type(int prog_fd)
+int hid_bpf_get_prog_attach_type(struct bpf_prog *prog)
 {
-       struct bpf_prog *prog = NULL;
-       int i;
        int prog_type = HID_BPF_PROG_TYPE_UNDEF;
-
-       prog = bpf_prog_get(prog_fd);
-       if (IS_ERR(prog))
-               return PTR_ERR(prog);
+       int i;
 
        for (i = 0; i < HID_BPF_PROG_TYPE_MAX; i++) {
                if (hid_bpf_btf_ids[i] == prog->aux->attach_btf_id) {
@@ -350,8 +359,6 @@ int hid_bpf_get_prog_attach_type(int prog_fd)
                }
        }
 
-       bpf_prog_put(prog);
-
        return prog_type;
 }
 
@@ -388,19 +395,13 @@ static const struct bpf_link_ops hid_bpf_link_lops = {
 /* called from syscall */
 noinline int
 __hid_bpf_attach_prog(struct hid_device *hdev, enum hid_bpf_prog_type prog_type,
-                     int prog_fd, __u32 flags)
+                     int prog_fd, struct bpf_prog *prog, __u32 flags)
 {
        struct bpf_link_primer link_primer;
        struct hid_bpf_link *link;
-       struct bpf_prog *prog = NULL;
        struct hid_bpf_prog_entry *prog_entry;
        int cnt, err = -EINVAL, prog_table_idx = -1;
 
-       /* take a ref on the prog itself */
-       prog = bpf_prog_get(prog_fd);
-       if (IS_ERR(prog))
-               return PTR_ERR(prog);
-
        mutex_lock(&hid_bpf_attach_lock);
 
        link = kzalloc(sizeof(*link), GFP_USER);
@@ -467,7 +468,6 @@ __hid_bpf_attach_prog(struct hid_device *hdev, enum hid_bpf_prog_type prog_type,
  err_unlock:
        mutex_unlock(&hid_bpf_attach_lock);
 
-       bpf_prog_put(prog);
        kfree(link);
 
        return err;
index fb30e228d35f9a91b6bb395845584d9f073be26c..175b6680087e030060545f46048ce1313a2f74bd 100644 (file)
 
 #define USB_VENDOR_ID_CIDC             0x1677
 
+#define I2C_VENDOR_ID_CIRQUE           0x0488
+#define I2C_PRODUCT_ID_CIRQUE_1063     0x1063
+
 #define USB_VENDOR_ID_CJTOUCH          0x24b8
 #define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0020 0x0020
 #define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0040 0x0040
 #define I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V1     0x2BED
 #define I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V2     0x2BEE
 #define I2C_DEVICE_ID_HP_ENVY_X360_15_EU0556NG         0x2D02
+#define I2C_DEVICE_ID_CHROMEBOOK_TROGDOR_POMPOM        0x2F81
 
 #define USB_VENDOR_ID_ELECOM           0x056e
 #define USB_DEVICE_ID_ELECOM_BM084     0x0061
index c8b20d44b14724d9327b23bce09a586d473bfbf5..e03d300d2bac49e02311b63402c1c19f46f71482 100644 (file)
@@ -411,6 +411,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
          HID_BATTERY_QUIRK_IGNORE },
        { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15_EU0556NG),
          HID_BATTERY_QUIRK_IGNORE },
+       { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_CHROMEBOOK_TROGDOR_POMPOM),
+         HID_BATTERY_QUIRK_AVOID_QUERY },
        {}
 };
 
index fd6d8f1d9b8f61992a69ce651dd379d121c2da49..d2f3f234f29dea35b2bfb37ef693ba9d6a9b8bf6 100644 (file)
@@ -203,6 +203,8 @@ struct hidpp_device {
        struct hidpp_scroll_counter vertical_wheel_counter;
 
        u8 wireless_feature_index;
+
+       bool connected_once;
 };
 
 /* HID++ 1.0 error codes */
@@ -988,8 +990,13 @@ static int hidpp_root_get_protocol_version(struct hidpp_device *hidpp)
        hidpp->protocol_minor = response.rap.params[1];
 
 print_version:
-       hid_info(hidpp->hid_dev, "HID++ %u.%u device connected.\n",
-                hidpp->protocol_major, hidpp->protocol_minor);
+       if (!hidpp->connected_once) {
+               hid_info(hidpp->hid_dev, "HID++ %u.%u device connected.\n",
+                        hidpp->protocol_major, hidpp->protocol_minor);
+               hidpp->connected_once = true;
+       } else
+               hid_dbg(hidpp->hid_dev, "HID++ %u.%u device connected.\n",
+                        hidpp->protocol_major, hidpp->protocol_minor);
        return 0;
 }
 
@@ -4184,7 +4191,7 @@ static void hidpp_connect_event(struct work_struct *work)
        /* Get device version to check if it is connected */
        ret = hidpp_root_get_protocol_version(hidpp);
        if (ret) {
-               hid_info(hidpp->hid_dev, "Disconnected\n");
+               hid_dbg(hidpp->hid_dev, "Disconnected\n");
                if (hidpp->battery.ps) {
                        hidpp->battery.online = false;
                        hidpp->battery.status = POWER_SUPPLY_STATUS_UNKNOWN;
@@ -4610,6 +4617,8 @@ static const struct hid_device_id hidpp_devices[] = {
          HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC088) },
        { /* Logitech G Pro X Superlight Gaming Mouse over USB */
          HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC094) },
+       { /* Logitech G Pro X Superlight 2 Gaming Mouse over USB */
+         HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC09b) },
 
        { /* G935 Gaming Headset */
          HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0x0a87),
index fd5b0637dad683e7b20c929974c958e79936880c..3e91e4d6ba6fa335c7f5988638791d3df8d1773a 100644 (file)
@@ -2151,6 +2151,10 @@ static const struct hid_device_id mt_devices[] = {
                HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
                        USB_VENDOR_ID_SYNAPTICS, 0xcd7e) },
 
+       { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
+               HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+                       USB_VENDOR_ID_SYNAPTICS, 0xcddc) },
+
        { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
                HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
                        USB_VENDOR_ID_SYNAPTICS, 0xce08) },
index 82d0a77359c460c9bad772038e1a129e9983e0c0..58b15750dbb0ac2cb2ad333b616dc39cdea8c779 100644 (file)
@@ -800,6 +800,8 @@ static inline int thunderstrike_led_create(struct thunderstrike *ts)
 
        led->name = devm_kasprintf(&ts->base.hdev->dev, GFP_KERNEL,
                                   "thunderstrike%d:blue:led", ts->id);
+       if (!led->name)
+               return -ENOMEM;
        led->max_brightness = 1;
        led->flags = LED_CORE_SUSPENDRESUME | LED_RETAIN_AT_SHUTDOWN;
        led->brightness_get = &thunderstrike_led_get_brightness;
@@ -831,6 +833,8 @@ static inline int thunderstrike_psy_create(struct shield_device *shield_dev)
        shield_dev->battery_dev.desc.name =
                devm_kasprintf(&ts->base.hdev->dev, GFP_KERNEL,
                               "thunderstrike_%d", ts->id);
+       if (!shield_dev->battery_dev.desc.name)
+               return -ENOMEM;
 
        shield_dev->battery_dev.psy = power_supply_register(
                &hdev->dev, &shield_dev->battery_dev.desc, &psy_cfg);
index b3c4e50e248aa7eda08a356187ecea54cc803834..b08a5ab5852884219654ac449f255d9a3e3f1585 100644 (file)
@@ -1109,10 +1109,9 @@ static int steam_probe(struct hid_device *hdev,
                return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
 
        steam = devm_kzalloc(&hdev->dev, sizeof(*steam), GFP_KERNEL);
-       if (!steam) {
-               ret = -ENOMEM;
-               goto steam_alloc_fail;
-       }
+       if (!steam)
+               return -ENOMEM;
+
        steam->hdev = hdev;
        hid_set_drvdata(hdev, steam);
        spin_lock_init(&steam->lock);
@@ -1129,14 +1128,14 @@ static int steam_probe(struct hid_device *hdev,
         */
        ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_HIDRAW);
        if (ret)
-               goto hid_hw_start_fail;
+               goto err_cancel_work;
 
        ret = hid_hw_open(hdev);
        if (ret) {
                hid_err(hdev,
                        "%s:hid_hw_open\n",
                        __func__);
-               goto hid_hw_open_fail;
+               goto err_hw_stop;
        }
 
        if (steam->quirks & STEAM_QUIRK_WIRELESS) {
@@ -1152,36 +1151,37 @@ static int steam_probe(struct hid_device *hdev,
                        hid_err(hdev,
                                "%s:steam_register failed with error %d\n",
                                __func__, ret);
-                       goto input_register_fail;
+                       goto err_hw_close;
                }
        }
 
        steam->client_hdev = steam_create_client_hid(hdev);
        if (IS_ERR(steam->client_hdev)) {
                ret = PTR_ERR(steam->client_hdev);
-               goto client_hdev_fail;
+               goto err_stream_unregister;
        }
        steam->client_hdev->driver_data = steam;
 
        ret = hid_add_device(steam->client_hdev);
        if (ret)
-               goto client_hdev_add_fail;
+               goto err_destroy;
 
        return 0;
 
-client_hdev_add_fail:
-       hid_hw_stop(hdev);
-client_hdev_fail:
+err_destroy:
        hid_destroy_device(steam->client_hdev);
-input_register_fail:
-hid_hw_open_fail:
-hid_hw_start_fail:
+err_stream_unregister:
+       if (steam->connected)
+               steam_unregister(steam);
+err_hw_close:
+       hid_hw_close(hdev);
+err_hw_stop:
+       hid_hw_stop(hdev);
+err_cancel_work:
        cancel_work_sync(&steam->work_connect);
        cancel_delayed_work_sync(&steam->mode_switch);
        cancel_work_sync(&steam->rumble_work);
-steam_alloc_fail:
-       hid_err(hdev, "%s: failed with error %d\n",
-                       __func__, ret);
+
        return ret;
 }
 
index 13c8dd8cd35060731165cd2018f96c6e7bfef512..2bc762d31ac70de9724df166422f31ab1c8687f4 100644 (file)
@@ -357,8 +357,11 @@ static int hidraw_release(struct inode * inode, struct file * file)
        down_write(&minors_rwsem);
 
        spin_lock_irqsave(&hidraw_table[minor]->list_lock, flags);
-       for (int i = list->tail; i < list->head; i++)
-               kfree(list->buffer[i].value);
+       while (list->tail != list->head) {
+               kfree(list->buffer[list->tail].value);
+               list->buffer[list->tail].value = NULL;
+               list->tail = (list->tail + 1) & (HIDRAW_BUFFER_SIZE - 1);
+       }
        list_del(&list->node);
        spin_unlock_irqrestore(&hidraw_table[minor]->list_lock, flags);
        kfree(list);
index 90f316ae9819af4759720aad86136721f78f5abe..2df1ab3c31cc54da812ee653face224f32e69fc2 100644 (file)
@@ -49,6 +49,7 @@
 #define I2C_HID_QUIRK_RESET_ON_RESUME          BIT(2)
 #define I2C_HID_QUIRK_BAD_INPUT_SIZE           BIT(3)
 #define I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET    BIT(4)
+#define I2C_HID_QUIRK_NO_SLEEP_ON_SUSPEND      BIT(5)
 
 /* Command opcodes */
 #define I2C_HID_OPCODE_RESET                   0x01
@@ -131,6 +132,8 @@ static const struct i2c_hid_quirks {
                 I2C_HID_QUIRK_RESET_ON_RESUME },
        { USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720,
                I2C_HID_QUIRK_BAD_INPUT_SIZE },
+       { I2C_VENDOR_ID_CIRQUE, I2C_PRODUCT_ID_CIRQUE_1063,
+               I2C_HID_QUIRK_NO_SLEEP_ON_SUSPEND },
        /*
         * Sending the wakeup after reset actually break ELAN touchscreen controller
         */
@@ -956,7 +959,8 @@ static int i2c_hid_core_suspend(struct i2c_hid *ihid, bool force_poweroff)
                return ret;
 
        /* Save some power */
-       i2c_hid_set_power(ihid, I2C_HID_PWR_SLEEP);
+       if (!(ihid->quirks & I2C_HID_QUIRK_NO_SLEEP_ON_SUSPEND))
+               i2c_hid_set_power(ihid, I2C_HID_PWR_SLEEP);
 
        disable_irq(client->irq);
 
index c4e1fa0273c84c3b2e3b438e04673727b05e6f6e..8be4d576da7733d28b8e4a1a07e86a0d11584ae6 100644 (file)
@@ -87,6 +87,7 @@ static int i2c_hid_of_probe(struct i2c_client *client)
        if (!ihid_of)
                return -ENOMEM;
 
+       ihid_of->client = client;
        ihid_of->ops.power_up = i2c_hid_of_power_up;
        ihid_of->ops.power_down = i2c_hid_of_power_down;
 
index e99f3a3c65e1559a8828c3b75435bdb2264fddbd..f89b300417d722b2daad33af542d7580cc1dc541 100644 (file)
@@ -34,6 +34,7 @@
 #define RPL_S_DEVICE_ID                0x7A78
 #define MTL_P_DEVICE_ID                0x7E45
 #define ARL_H_DEVICE_ID                0x7745
+#define ARL_S_DEVICE_ID                0x7F78
 
 #define        REVISION_ID_CHT_A0      0x6
 #define        REVISION_ID_CHT_Ax_SI   0x0
index 65e7eeb2fa64ecd2745cd270c6e07451e23c111f..56bd4f02f3191b1bfb2ea53adce2bc8f6788b3fb 100644 (file)
@@ -45,6 +45,7 @@ static const struct pci_device_id ish_pci_tbl[] = {
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, RPL_S_DEVICE_ID)},
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MTL_P_DEVICE_ID)},
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ARL_H_DEVICE_ID)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ARL_S_DEVICE_ID)},
        {0, }
 };
 MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
index aa6cb033bb06b77f182e6df441a04e1b016aaef5..03d5601ce807b3b1d49ed88bc923774d71ace572 100644 (file)
@@ -722,6 +722,8 @@ void ishtp_bus_remove_all_clients(struct ishtp_device *ishtp_dev,
        spin_lock_irqsave(&ishtp_dev->cl_list_lock, flags);
        list_for_each_entry(cl, &ishtp_dev->cl_list, link) {
                cl->state = ISHTP_CL_DISCONNECTED;
+               if (warm_reset && cl->device->reference_count)
+                       continue;
 
                /*
                 * Wake any pending process. The waiter would check dev->state
index 82c907f01bd3b66af02efa1d313f3bb2f7cb7209..8a7f2f6a4f86864cd5783ed51852f56cef614d5f 100644 (file)
@@ -49,7 +49,9 @@ static void ishtp_read_list_flush(struct ishtp_cl *cl)
        list_for_each_entry_safe(rb, next, &cl->dev->read_list.list, list)
                if (rb->cl && ishtp_cl_cmp_id(cl, rb->cl)) {
                        list_del(&rb->list);
-                       ishtp_io_rb_free(rb);
+                       spin_lock(&cl->free_list_spinlock);
+                       list_add_tail(&rb->list, &cl->free_rb_list.list);
+                       spin_unlock(&cl->free_list_spinlock);
                }
        spin_unlock_irqrestore(&cl->dev->read_list_spinlock, flags);
 }
index b613f11ed9498d7045f8649496049dc1b0b91839..2bc45b24075c3fe4b70ef222bbd21a4ee11eeb21 100644 (file)
@@ -2087,7 +2087,7 @@ static int wacom_allocate_inputs(struct wacom *wacom)
        return 0;
 }
 
-static int wacom_register_inputs(struct wacom *wacom)
+static int wacom_setup_inputs(struct wacom *wacom)
 {
        struct input_dev *pen_input_dev, *touch_input_dev, *pad_input_dev;
        struct wacom_wac *wacom_wac = &(wacom->wacom_wac);
@@ -2106,10 +2106,6 @@ static int wacom_register_inputs(struct wacom *wacom)
                input_free_device(pen_input_dev);
                wacom_wac->pen_input = NULL;
                pen_input_dev = NULL;
-       } else {
-               error = input_register_device(pen_input_dev);
-               if (error)
-                       goto fail;
        }
 
        error = wacom_setup_touch_input_capabilities(touch_input_dev, wacom_wac);
@@ -2118,10 +2114,6 @@ static int wacom_register_inputs(struct wacom *wacom)
                input_free_device(touch_input_dev);
                wacom_wac->touch_input = NULL;
                touch_input_dev = NULL;
-       } else {
-               error = input_register_device(touch_input_dev);
-               if (error)
-                       goto fail;
        }
 
        error = wacom_setup_pad_input_capabilities(pad_input_dev, wacom_wac);
@@ -2130,7 +2122,34 @@ static int wacom_register_inputs(struct wacom *wacom)
                input_free_device(pad_input_dev);
                wacom_wac->pad_input = NULL;
                pad_input_dev = NULL;
-       } else {
+       }
+
+       return 0;
+}
+
+static int wacom_register_inputs(struct wacom *wacom)
+{
+       struct input_dev *pen_input_dev, *touch_input_dev, *pad_input_dev;
+       struct wacom_wac *wacom_wac = &(wacom->wacom_wac);
+       int error = 0;
+
+       pen_input_dev = wacom_wac->pen_input;
+       touch_input_dev = wacom_wac->touch_input;
+       pad_input_dev = wacom_wac->pad_input;
+
+       if (pen_input_dev) {
+               error = input_register_device(pen_input_dev);
+               if (error)
+                       goto fail;
+       }
+
+       if (touch_input_dev) {
+               error = input_register_device(touch_input_dev);
+               if (error)
+                       goto fail;
+       }
+
+       if (pad_input_dev) {
                error = input_register_device(pad_input_dev);
                if (error)
                        goto fail;
@@ -2383,6 +2402,20 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
        if (error)
                goto fail;
 
+       error = wacom_setup_inputs(wacom);
+       if (error)
+               goto fail;
+
+       if (features->type == HID_GENERIC)
+               connect_mask |= HID_CONNECT_DRIVER;
+
+       /* Regular HID work starts now */
+       error = hid_hw_start(hdev, connect_mask);
+       if (error) {
+               hid_err(hdev, "hw start failed\n");
+               goto fail;
+       }
+
        error = wacom_register_inputs(wacom);
        if (error)
                goto fail;
@@ -2397,16 +2430,6 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
                        goto fail;
        }
 
-       if (features->type == HID_GENERIC)
-               connect_mask |= HID_CONNECT_DRIVER;
-
-       /* Regular HID work starts now */
-       error = hid_hw_start(hdev, connect_mask);
-       if (error) {
-               hid_err(hdev, "hw start failed\n");
-               goto fail;
-       }
-
        if (!wireless) {
                /* Note that if query fails it is not a hard failure */
                wacom_query_tablet_data(wacom);
index da8a01fedd3944a7588aad5e2a523b44b2b2797c..fbe10fbc5769e53affe44a0826a55853b306c0ee 100644 (file)
@@ -2575,7 +2575,14 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
                                wacom_wac->hid_data.tipswitch);
                input_report_key(input, wacom_wac->tool[0], sense);
                if (wacom_wac->serial[0]) {
-                       input_event(input, EV_MSC, MSC_SERIAL, wacom_wac->serial[0]);
+                       /*
+                        * xf86-input-wacom does not accept a serial number
+                        * of '0'. Report the low 32 bits if possible, but
+                        * if they are zero, report the upper ones instead.
+                        */
+                       __u32 serial_lo = wacom_wac->serial[0] & 0xFFFFFFFFu;
+                       __u32 serial_hi = wacom_wac->serial[0] >> 32;
+                       input_event(input, EV_MSC, MSC_SERIAL, (int)(serial_lo ? serial_lo : serial_hi));
                        input_report_abs(input, ABS_MISC, sense ? id : 0);
                }
 
index 4c1a00f9929e4712ab799af86f1a27f3b6cb80bf..6802efb4d6cdc8cfba37526bb26f1a51e3f17a83 100644 (file)
@@ -355,7 +355,7 @@ static int ssi_add_controller(struct hsi_controller *ssi,
        if (!omap_ssi)
                return -ENOMEM;
 
-       err = ida_simple_get(&platform_omap_ssi_ida, 0, 0, GFP_KERNEL);
+       err = ida_alloc(&platform_omap_ssi_ida, GFP_KERNEL);
        if (err < 0)
                return err;
        ssi->id = err;
@@ -417,7 +417,7 @@ static int ssi_add_controller(struct hsi_controller *ssi,
        return 0;
 
 out_err:
-       ida_simple_remove(&platform_omap_ssi_ida, ssi->id);
+       ida_free(&platform_omap_ssi_ida, ssi->id);
        return err;
 }
 
@@ -451,7 +451,7 @@ static void ssi_remove_controller(struct hsi_controller *ssi)
        tasklet_kill(&omap_ssi->gdd_tasklet);
        hsi_unregister_controller(ssi);
        clk_notifier_unregister(omap_ssi->fck, &omap_ssi->fck_nb);
-       ida_simple_remove(&platform_omap_ssi_ida, id);
+       ida_free(&platform_omap_ssi_ida, id);
 }
 
 static inline int ssi_of_get_available_ports_count(const struct device_node *np)
index 4372f5d146ab22edaf948622648df24e741ec2f6..0285a74363b3d11e35b2e29aa86e1861e1900f00 100644 (file)
@@ -488,7 +488,7 @@ void hv_setup_dma_ops(struct device *dev, bool coherent)
         * Hyper-V does not offer a vIOMMU in the guest
         * VM, so pass 0/NULL for the IOMMU settings
         */
-       arch_setup_dma_ops(dev, 0, 0, NULL, coherent);
+       arch_setup_dma_ops(dev, 0, 0, coherent);
 }
 EXPORT_SYMBOL_GPL(hv_setup_dma_ops);
 
index f6e1e55e82922be6f67a98046b4f74c3159625d9..4acc1858d8acf799c20e5c2061431d35adc8db10 100644 (file)
@@ -195,6 +195,8 @@ struct aspeed_pwm_tacho_data {
        u8 fan_tach_ch_source[MAX_ASPEED_FAN_TACH_CHANNELS];
        struct aspeed_cooling_device *cdev[8];
        const struct attribute_group *groups[3];
+       /* protects access to shared ASPEED_PTCR_RESULT */
+       struct mutex tach_lock;
 };
 
 enum type { TYPEM, TYPEN, TYPEO };
@@ -529,6 +531,8 @@ static int aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv,
        u8 fan_tach_ch_source, type, mode, both;
        int ret;
 
+       mutex_lock(&priv->tach_lock);
+
        regmap_write(priv->regmap, ASPEED_PTCR_TRIGGER, 0);
        regmap_write(priv->regmap, ASPEED_PTCR_TRIGGER, 0x1 << fan_tach_ch);
 
@@ -546,6 +550,8 @@ static int aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv,
                ASPEED_RPM_STATUS_SLEEP_USEC,
                usec);
 
+       mutex_unlock(&priv->tach_lock);
+
        /* return -ETIMEDOUT if we didn't get an answer. */
        if (ret)
                return ret;
@@ -915,6 +921,7 @@ static int aspeed_pwm_tacho_probe(struct platform_device *pdev)
        priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
        if (!priv)
                return -ENOMEM;
+       mutex_init(&priv->tach_lock);
        priv->regmap = devm_regmap_init(dev, NULL, (__force void *)regs,
                        &aspeed_pwm_tacho_regmap_config);
        if (IS_ERR(priv->regmap))
index ba82d1e79c131678c0c673bd5c0d9d77b09fdf1a..b8fc8d1ef20dfcb6132a168425df2d7e2653afa4 100644 (file)
@@ -41,7 +41,7 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
 
 #define PKG_SYSFS_ATTR_NO      1       /* Sysfs attribute for package temp */
 #define BASE_SYSFS_ATTR_NO     2       /* Sysfs Base attr no for coretemp */
-#define NUM_REAL_CORES         128     /* Number of Real cores per cpu */
+#define NUM_REAL_CORES         512     /* Number of Real cores per cpu */
 #define CORETEMP_NAME_LENGTH   28      /* String Length of attrs */
 #define MAX_CORE_ATTRS         4       /* Maximum no of basic attrs */
 #define TOTAL_ATTRS            (MAX_CORE_ATTRS + 1)
@@ -419,7 +419,7 @@ static ssize_t show_temp(struct device *dev,
 }
 
 static int create_core_attrs(struct temp_data *tdata, struct device *dev,
-                            int attr_no)
+                            int index)
 {
        int i;
        static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev,
@@ -431,13 +431,20 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev,
        };
 
        for (i = 0; i < tdata->attr_size; i++) {
+               /*
+                * We map the attr number to core id of the CPU
+                * The attr number is always core id + 2
+                * The Pkgtemp will always show up as temp1_*, if available
+                */
+               int attr_no = tdata->is_pkg_data ? 1 : tdata->cpu_core_id + 2;
+
                snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH,
                         "temp%d_%s", attr_no, suffixes[i]);
                sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr);
                tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i];
                tdata->sd_attrs[i].dev_attr.attr.mode = 0444;
                tdata->sd_attrs[i].dev_attr.show = rd_ptr[i];
-               tdata->sd_attrs[i].index = attr_no;
+               tdata->sd_attrs[i].index = index;
                tdata->attrs[i] = &tdata->sd_attrs[i].dev_attr.attr;
        }
        tdata->attr_group.attrs = tdata->attrs;
@@ -495,30 +502,25 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
        struct platform_data *pdata = platform_get_drvdata(pdev);
        struct cpuinfo_x86 *c = &cpu_data(cpu);
        u32 eax, edx;
-       int err, index, attr_no;
+       int err, index;
 
        if (!housekeeping_cpu(cpu, HK_TYPE_MISC))
                return 0;
 
        /*
-        * Find attr number for sysfs:
-        * We map the attr number to core id of the CPU
-        * The attr number is always core id + 2
-        * The Pkgtemp will always show up as temp1_*, if available
+        * Get the index of tdata in pdata->core_data[]
+        * tdata for package: pdata->core_data[1]
+        * tdata for core: pdata->core_data[2] .. pdata->core_data[NUM_REAL_CORES + 1]
         */
        if (pkg_flag) {
-               attr_no = PKG_SYSFS_ATTR_NO;
+               index = PKG_SYSFS_ATTR_NO;
        } else {
-               index = ida_alloc(&pdata->ida, GFP_KERNEL);
+               index = ida_alloc_max(&pdata->ida, NUM_REAL_CORES - 1, GFP_KERNEL);
                if (index < 0)
                        return index;
-               pdata->cpu_map[index] = topology_core_id(cpu);
-               attr_no = index + BASE_SYSFS_ATTR_NO;
-       }
 
-       if (attr_no > MAX_CORE_DATA - 1) {
-               err = -ERANGE;
-               goto ida_free;
+               pdata->cpu_map[index] = topology_core_id(cpu);
+               index += BASE_SYSFS_ATTR_NO;
        }
 
        tdata = init_temp_data(cpu, pkg_flag);
@@ -544,20 +546,20 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
                if (get_ttarget(tdata, &pdev->dev) >= 0)
                        tdata->attr_size++;
 
-       pdata->core_data[attr_no] = tdata;
+       pdata->core_data[index] = tdata;
 
        /* Create sysfs interfaces */
-       err = create_core_attrs(tdata, pdata->hwmon_dev, attr_no);
+       err = create_core_attrs(tdata, pdata->hwmon_dev, index);
        if (err)
                goto exit_free;
 
        return 0;
 exit_free:
-       pdata->core_data[attr_no] = NULL;
+       pdata->core_data[index] = NULL;
        kfree(tdata);
 ida_free:
        if (!pkg_flag)
-               ida_free(&pdata->ida, index);
+               ida_free(&pdata->ida, index - BASE_SYSFS_ATTR_NO);
        return err;
 }
 
index 85e5237757142a05b45fdd7006d4c7b2ca61a3c3..8129d7b3ceaf9ae2e851f39af2db7aa6eaca9ce0 100644 (file)
@@ -146,7 +146,7 @@ static int waterforce_get_status(struct waterforce_data *priv)
        /* Send command for getting status */
        ret = waterforce_write_expanded(priv, get_status_cmd, GET_STATUS_CMD_LENGTH);
        if (ret < 0)
-               return ret;
+               goto unlock_and_return;
 
        ret = wait_for_completion_interruptible_timeout(&priv->status_report_received,
                                                        msecs_to_jiffies(STATUS_VALIDITY));
index d9733da8ea34f558e10ec8e27848d22483856232..904816abb7c468c1476d0c4581cea68d1d24a641 100644 (file)
@@ -195,6 +195,7 @@ struct npcm7xx_cooling_device {
 struct npcm7xx_pwm_fan_data {
        void __iomem *pwm_base;
        void __iomem *fan_base;
+       int pwm_modules;
        unsigned long pwm_clk_freq;
        unsigned long fan_clk_freq;
        struct clk *pwm_clk;
@@ -710,7 +711,7 @@ static u32 npcm7xx_pwm_init(struct npcm7xx_pwm_fan_data *data)
        /* Setting PWM Prescale Register value register to both modules */
        prescale_val |= (prescale_val << NPCM7XX_PWM_PRESCALE_SHIFT_CH01);
 
-       for (m = 0; m < NPCM7XX_PWM_MAX_MODULES  ; m++) {
+       for (m = 0; m < data->pwm_modules; m++) {
                iowrite32(prescale_val, NPCM7XX_PWM_REG_PR(data->pwm_base, m));
                iowrite32(NPCM7XX_PWM_PRESCALE2_DEFAULT,
                          NPCM7XX_PWM_REG_CSR(data->pwm_base, m));
@@ -946,6 +947,8 @@ static int npcm7xx_pwm_fan_probe(struct platform_device *pdev)
        if (!data->info)
                return -EINVAL;
 
+       data->pwm_modules = data->info->pwm_max_channel / NPCM7XX_PWM_MAX_CHN_NUM_IN_A_MODULE;
+
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwm");
        if (!res) {
                dev_err(dev, "pwm resource not found\n");
@@ -983,7 +986,7 @@ static int npcm7xx_pwm_fan_probe(struct platform_device *pdev)
        output_freq = npcm7xx_pwm_init(data);
        npcm7xx_fan_init(data);
 
-       for (cnt = 0; cnt < NPCM7XX_PWM_MAX_MODULES  ; cnt++)
+       for (cnt = 0; cnt < data->pwm_modules; cnt++)
                mutex_init(&data->pwm_lock[cnt]);
 
        for (i = 0; i < NPCM7XX_FAN_MAX_MODULE; i++) {
index b9bb469e2d8febe1d056e0b8f7d0a0b743d5ff3e..e5fa10b3b8bc7184e03e6caa0701e34f81bcb7cb 100644 (file)
@@ -126,6 +126,21 @@ static const struct regulator_desc __maybe_unused mp2975_reg_desc[] = {
 
 #define to_mp2975_data(x)  container_of(x, struct mp2975_data, info)
 
+static int mp2975_read_byte_data(struct i2c_client *client, int page, int reg)
+{
+       switch (reg) {
+       case PMBUS_VOUT_MODE:
+               /*
+                * Report direct format as configured by MFR_DC_LOOP_CTRL.
+                * Unlike on MP2971/MP2973 the reported VOUT_MODE isn't automatically
+                * internally updated, but always reads as PB_VOUT_MODE_VID.
+                */
+               return PB_VOUT_MODE_DIRECT;
+       default:
+               return -ENODATA;
+       }
+}
+
 static int
 mp2975_read_word_helper(struct i2c_client *client, int page, int phase, u8 reg,
                        u16 mask)
@@ -869,6 +884,7 @@ static struct pmbus_driver_info mp2975_info = {
                PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
                PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP | PMBUS_HAVE_POUT |
                PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT | PMBUS_PHASE_VIRTUAL,
+       .read_byte_data = mp2975_read_byte_data,
        .read_word_data = mp2975_read_word_data,
 #if IS_ENABLED(CONFIG_SENSORS_MP2975_REGULATOR)
        .num_regulators = 1,
index 6e4516c2ab894f4a10195af076e532bb8ba1093a..b67bc9e833c01e1db8d2311c39334866fe44e638 100644 (file)
@@ -151,7 +151,7 @@ static int pwm_fan_power_on(struct pwm_fan_ctx *ctx)
        }
 
        state->enabled = true;
-       ret = pwm_apply_state(ctx->pwm, state);
+       ret = pwm_apply_might_sleep(ctx->pwm, state);
        if (ret) {
                dev_err(ctx->dev, "failed to enable PWM\n");
                goto disable_regulator;
@@ -181,7 +181,7 @@ static int pwm_fan_power_off(struct pwm_fan_ctx *ctx)
 
        state->enabled = false;
        state->duty_cycle = 0;
-       ret = pwm_apply_state(ctx->pwm, state);
+       ret = pwm_apply_might_sleep(ctx->pwm, state);
        if (ret) {
                dev_err(ctx->dev, "failed to disable PWM\n");
                return ret;
@@ -207,7 +207,7 @@ static int  __set_pwm(struct pwm_fan_ctx *ctx, unsigned long pwm)
 
                period = state->period;
                state->duty_cycle = DIV_ROUND_UP(pwm * (period - 1), MAX_PWM);
-               ret = pwm_apply_state(ctx->pwm, state);
+               ret = pwm_apply_might_sleep(ctx->pwm, state);
                if (ret)
                        return ret;
                ret = pwm_fan_power_on(ctx);
@@ -278,7 +278,7 @@ static int pwm_fan_update_enable(struct pwm_fan_ctx *ctx, long val)
                                                    state,
                                                    &enable_regulator);
 
-                       pwm_apply_state(ctx->pwm, state);
+                       pwm_apply_might_sleep(ctx->pwm, state);
                        pwm_fan_switch_power(ctx, enable_regulator);
                        pwm_fan_update_state(ctx, 0);
                }
index ada694ba9f958cadc5e64cf773624ed726e40c7d..0c0a932c00f35e058b94a3e631c36819c83c5498 100644 (file)
@@ -84,8 +84,9 @@ static DEFINE_MUTEX(hwspinlock_tree_lock);
  * should decide between spin_trylock, spin_trylock_irq and
  * spin_trylock_irqsave.
  *
- * Returns 0 if we successfully locked the hwspinlock or -EBUSY if
+ * Returns: %0 if we successfully locked the hwspinlock or -EBUSY if
  * the hwspinlock was already taken.
+ *
  * This function will never sleep.
  */
 int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
@@ -171,7 +172,7 @@ EXPORT_SYMBOL_GPL(__hwspin_trylock);
 /**
  * __hwspin_lock_timeout() - lock an hwspinlock with timeout limit
  * @hwlock: the hwspinlock to be locked
- * @timeout: timeout value in msecs
+ * @to: timeout value in msecs
  * @mode: mode which controls whether local interrupts are disabled or not
  * @flags: a pointer to where the caller's interrupt state will be saved at (if
  *         requested)
@@ -199,9 +200,11 @@ EXPORT_SYMBOL_GPL(__hwspin_trylock);
  * to choose the appropriate @mode of operation, exactly the same way users
  * should decide between spin_lock, spin_lock_irq and spin_lock_irqsave.
  *
- * Returns 0 when the @hwlock was successfully taken, and an appropriate
+ * Returns: %0 when the @hwlock was successfully taken, and an appropriate
  * error code otherwise (most notably -ETIMEDOUT if the @hwlock is still
- * busy after @timeout msecs). The function will never sleep.
+ * busy after @timeout msecs).
+ *
+ * The function will never sleep.
  */
 int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
                                        int mode, unsigned long *flags)
@@ -304,13 +307,12 @@ EXPORT_SYMBOL_GPL(__hwspin_unlock);
 
 /**
  * of_hwspin_lock_simple_xlate - translate hwlock_spec to return a lock id
- * @bank: the hwspinlock device bank
  * @hwlock_spec: hwlock specifier as found in the device tree
  *
  * This is a simple translation function, suitable for hwspinlock platform
  * drivers that only has a lock specifier length of 1.
  *
- * Returns a relative index of the lock within a specified bank on success,
+ * Returns: a relative index of the lock within a specified bank on success,
  * or -EINVAL on invalid specifier cell count.
  */
 static inline int
@@ -332,9 +334,10 @@ of_hwspin_lock_simple_xlate(const struct of_phandle_args *hwlock_spec)
  * hwspinlock device, so that it can be requested using the normal
  * hwspin_lock_request_specific() API.
  *
- * Returns the global lock id number on success, -EPROBE_DEFER if the hwspinlock
- * device is not yet registered, -EINVAL on invalid args specifier value or an
- * appropriate error as returned from the OF parsing of the DT client node.
+ * Returns: the global lock id number on success, -EPROBE_DEFER if the
+ * hwspinlock device is not yet registered, -EINVAL on invalid args
+ * specifier value or an appropriate error as returned from the OF parsing
+ * of the DT client node.
  */
 int of_hwspin_lock_get_id(struct device_node *np, int index)
 {
@@ -399,9 +402,10 @@ EXPORT_SYMBOL_GPL(of_hwspin_lock_get_id);
  * the hwspinlock device, so that it can be requested using the normal
  * hwspin_lock_request_specific() API.
  *
- * Returns the global lock id number on success, -EPROBE_DEFER if the hwspinlock
- * device is not yet registered, -EINVAL on invalid args specifier value or an
- * appropriate error as returned from the OF parsing of the DT client node.
+ * Returns: the global lock id number on success, -EPROBE_DEFER if the
+ * hwspinlock device is not yet registered, -EINVAL on invalid args
+ * specifier value or an appropriate error as returned from the OF parsing
+ * of the DT client node.
  */
 int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name)
 {
@@ -481,7 +485,7 @@ out:
  *
  * Should be called from a process context (might sleep)
  *
- * Returns 0 on success, or an appropriate error code on failure
+ * Returns: %0 on success, or an appropriate error code on failure
  */
 int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
                const struct hwspinlock_ops *ops, int base_id, int num_locks)
@@ -529,7 +533,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_register);
  *
  * Should be called from a process context (might sleep)
  *
- * Returns 0 on success, or an appropriate error code on failure
+ * Returns: %0 on success, or an appropriate error code on failure
  */
 int hwspin_lock_unregister(struct hwspinlock_device *bank)
 {
@@ -578,7 +582,7 @@ static int devm_hwspin_lock_device_match(struct device *dev, void *res,
  *
  * Should be called from a process context (might sleep)
  *
- * Returns 0 on success, or an appropriate error code on failure
+ * Returns: %0 on success, or an appropriate error code on failure
  */
 int devm_hwspin_lock_unregister(struct device *dev,
                                struct hwspinlock_device *bank)
@@ -607,7 +611,7 @@ EXPORT_SYMBOL_GPL(devm_hwspin_lock_unregister);
  *
  * Should be called from a process context (might sleep)
  *
- * Returns 0 on success, or an appropriate error code on failure
+ * Returns: %0 on success, or an appropriate error code on failure
  */
 int devm_hwspin_lock_register(struct device *dev,
                              struct hwspinlock_device *bank,
@@ -635,12 +639,13 @@ EXPORT_SYMBOL_GPL(devm_hwspin_lock_register);
 
 /**
  * __hwspin_lock_request() - tag an hwspinlock as used and power it up
+ * @hwlock: the target hwspinlock
  *
  * This is an internal function that prepares an hwspinlock instance
  * before it is given to the user. The function assumes that
  * hwspinlock_tree_lock is taken.
  *
- * Returns 0 or positive to indicate success, and a negative value to
+ * Returns: %0 or positive to indicate success, and a negative value to
  * indicate an error (with the appropriate error code)
  */
 static int __hwspin_lock_request(struct hwspinlock *hwlock)
@@ -680,7 +685,7 @@ static int __hwspin_lock_request(struct hwspinlock *hwlock)
  * hwspin_lock_get_id() - retrieve id number of a given hwspinlock
  * @hwlock: a valid hwspinlock instance
  *
- * Returns the id number of a given @hwlock, or -EINVAL if @hwlock is invalid.
+ * Returns: the id number of a given @hwlock, or -EINVAL if @hwlock is invalid.
  */
 int hwspin_lock_get_id(struct hwspinlock *hwlock)
 {
@@ -704,7 +709,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_get_id);
  *
  * Should be called from a process context (might sleep)
  *
- * Returns the address of the assigned hwspinlock, or NULL on error
+ * Returns: the address of the assigned hwspinlock, or %NULL on error
  */
 struct hwspinlock *hwspin_lock_request(void)
 {
@@ -747,7 +752,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_request);
  *
  * Should be called from a process context (might sleep)
  *
- * Returns the address of the assigned hwspinlock, or NULL on error
+ * Returns: the address of the assigned hwspinlock, or %NULL on error
  */
 struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
 {
@@ -795,7 +800,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
  *
  * Should be called from a process context (might sleep)
  *
- * Returns 0 on success, or an appropriate error code on failure
+ * Returns: %0 on success, or an appropriate error code on failure
  */
 int hwspin_lock_free(struct hwspinlock *hwlock)
 {
@@ -865,7 +870,7 @@ static void devm_hwspin_lock_release(struct device *dev, void *res)
  *
  * Should be called from a process context (might sleep)
  *
- * Returns 0 on success, or an appropriate error code on failure
+ * Returns: %0 on success, or an appropriate error code on failure
  */
 int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock)
 {
@@ -891,7 +896,7 @@ EXPORT_SYMBOL_GPL(devm_hwspin_lock_free);
  *
  * Should be called from a process context (might sleep)
  *
- * Returns the address of the assigned hwspinlock, or NULL on error
+ * Returns: the address of the assigned hwspinlock, or %NULL on error
  */
 struct hwspinlock *devm_hwspin_lock_request(struct device *dev)
 {
@@ -926,7 +931,7 @@ EXPORT_SYMBOL_GPL(devm_hwspin_lock_request);
  *
  * Should be called from a process context (might sleep)
  *
- * Returns the address of the assigned hwspinlock, or NULL on error
+ * Returns: the address of the assigned hwspinlock, or %NULL on error
  */
 struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
                                                     unsigned int id)
index a0fd67fd2934448c9879374b48ebee8f6103c0a6..814dfe8697bf3603f2707f5d5d69430876d72679 100644 (file)
@@ -115,7 +115,6 @@ static const struct of_device_id qcom_hwspinlock_of_match[] = {
        { .compatible = "qcom,sfpb-mutex", .data = &of_sfpb_mutex },
        { .compatible = "qcom,tcsr-mutex", .data = &of_tcsr_mutex },
        { .compatible = "qcom,apq8084-tcsr-mutex", .data = &of_msm8226_tcsr_mutex },
-       { .compatible = "qcom,ipq6018-tcsr-mutex", .data = &of_msm8226_tcsr_mutex },
        { .compatible = "qcom,msm8226-tcsr-mutex", .data = &of_msm8226_tcsr_mutex },
        { .compatible = "qcom,msm8974-tcsr-mutex", .data = &of_msm8226_tcsr_mutex },
        { .compatible = "qcom,msm8994-tcsr-mutex", .data = &of_msm8226_tcsr_mutex },
index 9fabe00a40d6a0b4ec031b4037289808061cd306..d7f0e231feb993458c375caae733279451432ca4 100644 (file)
@@ -1093,6 +1093,7 @@ static int coresight_validate_source(struct coresight_device *csdev,
 
        if (subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_PROC &&
            subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE &&
+           subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM &&
            subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS) {
                dev_err(&csdev->dev, "wrong device subtype in %s\n", function);
                return -EINVAL;
@@ -1162,6 +1163,7 @@ int coresight_enable(struct coresight_device *csdev)
                per_cpu(tracer_path, cpu) = path;
                break;
        case CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE:
+       case CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM:
        case CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS:
                /*
                 * Use the hash of source's device name as ID
@@ -1212,6 +1214,7 @@ void coresight_disable(struct coresight_device *csdev)
                per_cpu(tracer_path, cpu) = NULL;
                break;
        case CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE:
+       case CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM:
        case CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS:
                hash = hashlen_hash(hashlen_string(NULL, dev_name(&csdev->dev)));
                /* Find the path by the hash. */
index e4deafae7bc2078b585f294cabb43dfa051fd234..ac70c0b491bebd647dd3f44a32f42731e2413e90 100644 (file)
@@ -122,14 +122,13 @@ static int dummy_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int dummy_remove(struct platform_device *pdev)
+static void dummy_remove(struct platform_device *pdev)
 {
        struct dummy_drvdata *drvdata = platform_get_drvdata(pdev);
        struct device *dev = &pdev->dev;
 
        pm_runtime_disable(dev);
        coresight_unregister(drvdata->csdev);
-       return 0;
 }
 
 static const struct of_device_id dummy_match[] = {
@@ -140,7 +139,7 @@ static const struct of_device_id dummy_match[] = {
 
 static struct platform_driver dummy_driver = {
        .probe  = dummy_probe,
-       .remove = dummy_remove,
+       .remove_new = dummy_remove,
        .driver = {
                .name   = "coresight-dummy",
                .of_match_table = dummy_match,
index 89e8ed214ea4967620c20ad1c1a1e39f9d068400..a52cfcce25d6ddef4ee48eac1f049f7b13143b50 100644 (file)
@@ -68,6 +68,7 @@ PMU_FORMAT_ATTR(preset,               "config:0-3");
 PMU_FORMAT_ATTR(sinkid,                "config2:0-31");
 /* config ID - set if a system configuration is selected */
 PMU_FORMAT_ATTR(configid,      "config2:32-63");
+PMU_FORMAT_ATTR(cc_threshold,  "config3:0-11");
 
 
 /*
@@ -101,6 +102,7 @@ static struct attribute *etm_config_formats_attr[] = {
        &format_attr_preset.attr,
        &format_attr_configid.attr,
        &format_attr_branch_broadcast.attr,
+       &format_attr_cc_threshold.attr,
        NULL,
 };
 
index 34aee59dd14739504f2a82588d049befc0d06970..ce1995a2827f01bbe164eaa95a845e9707254364 100644 (file)
@@ -644,7 +644,7 @@ static int etm4_parse_event_config(struct coresight_device *csdev,
        struct etmv4_config *config = &drvdata->config;
        struct perf_event_attr *attr = &event->attr;
        unsigned long cfg_hash;
-       int preset;
+       int preset, cc_threshold;
 
        /* Clear configuration from previous run */
        memset(config, 0, sizeof(struct etmv4_config));
@@ -667,7 +667,12 @@ static int etm4_parse_event_config(struct coresight_device *csdev,
        if (attr->config & BIT(ETM_OPT_CYCACC)) {
                config->cfg |= TRCCONFIGR_CCI;
                /* TRM: Must program this for cycacc to work */
-               config->ccctlr = ETM_CYC_THRESHOLD_DEFAULT;
+               cc_threshold = attr->config3 & ETM_CYC_THRESHOLD_MASK;
+               if (!cc_threshold)
+                       cc_threshold = ETM_CYC_THRESHOLD_DEFAULT;
+               if (cc_threshold < drvdata->ccitmin)
+                       cc_threshold = drvdata->ccitmin;
+               config->ccctlr = cc_threshold;
        }
        if (attr->config & BIT(ETM_OPT_TS)) {
                /*
@@ -1150,6 +1155,41 @@ static void cpu_detect_trace_filtering(struct etmv4_drvdata *drvdata)
        drvdata->trfcr = trfcr;
 }
 
+/*
+ * The following errata on applicable cpu ranges, affect the CCITMIN filed
+ * in TCRIDR3 register. Software read for the field returns 0x100 limiting
+ * the cycle threshold granularity, whereas the right value should have
+ * been 0x4, which is well supported in the hardware.
+ */
+static struct midr_range etm_wrong_ccitmin_cpus[] = {
+       /* Erratum #1490853 - Cortex-A76 */
+       MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 4, 0),
+       /* Erratum #1490853 - Neoverse-N1 */
+       MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 4, 0),
+       /* Erratum #1491015 - Cortex-A77 */
+       MIDR_RANGE(MIDR_CORTEX_A77, 0, 0, 1, 0),
+       /* Erratum #1502854 - Cortex-X1 */
+       MIDR_REV(MIDR_CORTEX_X1, 0, 0),
+       /* Erratum #1619801 - Neoverse-V1 */
+       MIDR_REV(MIDR_NEOVERSE_V1, 0, 0),
+       {},
+};
+
+static void etm4_fixup_wrong_ccitmin(struct etmv4_drvdata *drvdata)
+{
+       /*
+        * Erratum affected cpus will read 256 as the minimum
+        * instruction trace cycle counting threshold whereas
+        * the correct value should be 4 instead. Override the
+        * recorded value for 'drvdata->ccitmin' to workaround
+        * this problem.
+        */
+       if (is_midr_in_range_list(read_cpuid_id(), etm_wrong_ccitmin_cpus)) {
+               if (drvdata->ccitmin == 256)
+                       drvdata->ccitmin = 4;
+       }
+}
+
 static void etm4_init_arch_data(void *info)
 {
        u32 etmidr0;
@@ -1214,6 +1254,8 @@ static void etm4_init_arch_data(void *info)
        etmidr3 = etm4x_relaxed_read32(csa, TRCIDR3);
        /* CCITMIN, bits[11:0] minimum threshold value that can be programmed */
        drvdata->ccitmin = FIELD_GET(TRCIDR3_CCITMIN_MASK, etmidr3);
+       etm4_fixup_wrong_ccitmin(drvdata);
+
        /* EXLEVEL_S, bits[19:16] Secure state instruction tracing */
        drvdata->s_ex_level = FIELD_GET(TRCIDR3_EXLEVEL_S_MASK, etmidr3);
        drvdata->config.s_ex_level = drvdata->s_ex_level;
@@ -2261,7 +2303,7 @@ static void etm4_remove_amba(struct amba_device *adev)
                etm4_remove_dev(drvdata);
 }
 
-static int etm4_remove_platform_dev(struct platform_device *pdev)
+static void etm4_remove_platform_dev(struct platform_device *pdev)
 {
        struct etmv4_drvdata *drvdata = dev_get_drvdata(&pdev->dev);
 
@@ -2271,8 +2313,6 @@ static int etm4_remove_platform_dev(struct platform_device *pdev)
 
        if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
                clk_put(drvdata->pclk);
-
-       return 0;
 }
 
 static const struct amba_id etm4_ids[] = {
@@ -2358,7 +2398,7 @@ MODULE_DEVICE_TABLE(acpi, etm4x_acpi_ids);
 
 static struct platform_driver etm4_platform_driver = {
        .probe          = etm4_probe_platform_dev,
-       .remove         = etm4_remove_platform_dev,
+       .remove_new     = etm4_remove_platform_dev,
        .driver                 = {
                .name                   = "coresight-etm4x",
                .of_match_table         = etm4_sysreg_match,
index 20e2e4cb7614628e9f7319d8dc67214f02579351..da17b6c49b0f1ae8467b9de7e2b6249c0a832bd3 100644 (file)
@@ -1036,7 +1036,7 @@ struct etmv4_drvdata {
        u8                              ctxid_size;
        u8                              vmid_size;
        u8                              ccsize;
-       u                             ccitmin;
+       u16                             ccitmin;
        u8                              s_ex_level;
        u8                              ns_ex_level;
        u8                              q_support;
index b8e150e45b272d21409696cab6da8bef9d8e2aa4..a5b1fc787766a0eaea9733374a1288817ec63c24 100644 (file)
@@ -335,11 +335,10 @@ static int static_funnel_probe(struct platform_device *pdev)
        return ret;
 }
 
-static int static_funnel_remove(struct platform_device *pdev)
+static void static_funnel_remove(struct platform_device *pdev)
 {
        funnel_remove(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
-       return 0;
 }
 
 static const struct of_device_id static_funnel_match[] = {
@@ -360,7 +359,7 @@ MODULE_DEVICE_TABLE(acpi, static_funnel_ids);
 
 static struct platform_driver static_funnel_driver = {
        .probe          = static_funnel_probe,
-       .remove          = static_funnel_remove,
+       .remove_new      = static_funnel_remove,
        .driver         = {
                .name   = "coresight-static-funnel",
                /* THIS_MODULE is taken care of by platform_driver_register() */
index b6be730349968a4218b348b9684a8d220518cdcc..91d93060dda5355c3de3fcf04498918547f8c584 100644 (file)
@@ -320,11 +320,10 @@ static int static_replicator_probe(struct platform_device *pdev)
        return ret;
 }
 
-static int static_replicator_remove(struct platform_device *pdev)
+static void static_replicator_remove(struct platform_device *pdev)
 {
        replicator_remove(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
-       return 0;
 }
 
 #ifdef CONFIG_PM
@@ -373,7 +372,7 @@ MODULE_DEVICE_TABLE(acpi, static_replicator_acpi_ids);
 
 static struct platform_driver static_replicator_driver = {
        .probe          = static_replicator_probe,
-       .remove         = static_replicator_remove,
+       .remove_new     = static_replicator_remove,
        .driver         = {
                .name   = "coresight-static-replicator",
                /* THIS_MODULE is taken care of by platform_driver_register() */
index c106d142e63221df0e7ebc42c9a2799495199b2a..7ec5365e2b6429dcc3da499d7cef9ba2ab0967a5 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/device.h>
 #include <linux/idr.h>
 #include <linux/io.h>
+#include <linux/iommu.h>
 #include <linux/err.h>
 #include <linux/fs.h>
 #include <linux/miscdevice.h>
@@ -344,7 +345,14 @@ static const struct attribute_group coresight_tmc_mgmt_group = {
        .name = "mgmt",
 };
 
-static const struct attribute_group *coresight_tmc_groups[] = {
+static const struct attribute_group *coresight_etf_groups[] = {
+       &coresight_tmc_group,
+       &coresight_tmc_mgmt_group,
+       NULL,
+};
+
+static const struct attribute_group *coresight_etr_groups[] = {
+       &coresight_etr_group,
        &coresight_tmc_group,
        &coresight_tmc_mgmt_group,
        NULL,
@@ -465,6 +473,7 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
        drvdata->memwidth = tmc_get_memwidth(devid);
        /* This device is not associated with a session */
        drvdata->pid = -1;
+       drvdata->etr_mode = ETR_MODE_AUTO;
 
        if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
                drvdata->size = tmc_etr_get_default_buffer_size(dev);
@@ -474,16 +483,17 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
        }
 
        desc.dev = dev;
-       desc.groups = coresight_tmc_groups;
 
        switch (drvdata->config_type) {
        case TMC_CONFIG_TYPE_ETB:
+               desc.groups = coresight_etf_groups;
                desc.type = CORESIGHT_DEV_TYPE_SINK;
                desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
                desc.ops = &tmc_etb_cs_ops;
                dev_list = &etb_devs;
                break;
        case TMC_CONFIG_TYPE_ETR:
+               desc.groups = coresight_etr_groups;
                desc.type = CORESIGHT_DEV_TYPE_SINK;
                desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM;
                desc.ops = &tmc_etr_cs_ops;
@@ -496,6 +506,7 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
                dev_list = &etr_devs;
                break;
        case TMC_CONFIG_TYPE_ETF:
+               desc.groups = coresight_etf_groups;
                desc.type = CORESIGHT_DEV_TYPE_LINKSINK;
                desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
                desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
index 8311e1028ddb03096f207798f9226e29b4025efc..af02ba5d5f15de7160421f8ba4d256907e4bd7dd 100644 (file)
@@ -26,6 +26,12 @@ struct etr_flat_buf {
        size_t          size;
 };
 
+struct etr_buf_hw {
+       bool    has_iommu;
+       bool    has_etr_sg;
+       bool    has_catu;
+};
+
 /*
  * etr_perf_buffer - Perf buffer used for ETR
  * @drvdata            - The ETR drvdaga this buffer has been allocated for.
@@ -830,6 +836,22 @@ static inline int tmc_etr_mode_alloc_buf(int mode,
        }
 }
 
+static void get_etr_buf_hw(struct device *dev, struct etr_buf_hw *buf_hw)
+{
+       struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       buf_hw->has_iommu = iommu_get_domain_for_dev(dev->parent);
+       buf_hw->has_etr_sg = tmc_etr_has_cap(drvdata, TMC_ETR_SG);
+       buf_hw->has_catu = !!tmc_etr_get_catu_device(drvdata);
+}
+
+static bool etr_can_use_flat_mode(struct etr_buf_hw *buf_hw, ssize_t etr_buf_size)
+{
+       bool has_sg = buf_hw->has_catu || buf_hw->has_etr_sg;
+
+       return !has_sg || buf_hw->has_iommu || etr_buf_size < SZ_1M;
+}
+
 /*
  * tmc_alloc_etr_buf: Allocate a buffer use by ETR.
  * @drvdata    : ETR device details.
@@ -843,23 +865,22 @@ static struct etr_buf *tmc_alloc_etr_buf(struct tmc_drvdata *drvdata,
                                         int node, void **pages)
 {
        int rc = -ENOMEM;
-       bool has_etr_sg, has_iommu;
-       bool has_sg, has_catu;
        struct etr_buf *etr_buf;
+       struct etr_buf_hw buf_hw;
        struct device *dev = &drvdata->csdev->dev;
 
-       has_etr_sg = tmc_etr_has_cap(drvdata, TMC_ETR_SG);
-       has_iommu = iommu_get_domain_for_dev(dev->parent);
-       has_catu = !!tmc_etr_get_catu_device(drvdata);
-
-       has_sg = has_catu || has_etr_sg;
-
+       get_etr_buf_hw(dev, &buf_hw);
        etr_buf = kzalloc(sizeof(*etr_buf), GFP_KERNEL);
        if (!etr_buf)
                return ERR_PTR(-ENOMEM);
 
        etr_buf->size = size;
 
+       /* If there is user directive for buffer mode, try that first */
+       if (drvdata->etr_mode != ETR_MODE_AUTO)
+               rc = tmc_etr_mode_alloc_buf(drvdata->etr_mode, drvdata,
+                                           etr_buf, node, pages);
+
        /*
         * If we have to use an existing list of pages, we cannot reliably
         * use a contiguous DMA memory (even if we have an IOMMU). Otherwise,
@@ -872,14 +893,13 @@ static struct etr_buf *tmc_alloc_etr_buf(struct tmc_drvdata *drvdata,
         * Fallback to available mechanisms.
         *
         */
-       if (!pages &&
-           (!has_sg || has_iommu || size < SZ_1M))
+       if (rc && !pages && etr_can_use_flat_mode(&buf_hw, size))
                rc = tmc_etr_mode_alloc_buf(ETR_MODE_FLAT, drvdata,
                                            etr_buf, node, pages);
-       if (rc && has_etr_sg)
+       if (rc && buf_hw.has_etr_sg)
                rc = tmc_etr_mode_alloc_buf(ETR_MODE_ETR_SG, drvdata,
                                            etr_buf, node, pages);
-       if (rc && has_catu)
+       if (rc && buf_hw.has_catu)
                rc = tmc_etr_mode_alloc_buf(ETR_MODE_CATU, drvdata,
                                            etr_buf, node, pages);
        if (rc) {
@@ -1804,3 +1824,70 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
 
        return 0;
 }
+
+static const char *const buf_modes_str[] = {
+       [ETR_MODE_FLAT]         = "flat",
+       [ETR_MODE_ETR_SG]       = "tmc-sg",
+       [ETR_MODE_CATU]         = "catu",
+       [ETR_MODE_AUTO]         = "auto",
+};
+
+static ssize_t buf_modes_available_show(struct device *dev,
+                                           struct device_attribute *attr, char *buf)
+{
+       struct etr_buf_hw buf_hw;
+       ssize_t size = 0;
+
+       get_etr_buf_hw(dev, &buf_hw);
+       size += sysfs_emit(buf, "%s ", buf_modes_str[ETR_MODE_AUTO]);
+       size += sysfs_emit_at(buf, size, "%s ", buf_modes_str[ETR_MODE_FLAT]);
+       if (buf_hw.has_etr_sg)
+               size += sysfs_emit_at(buf, size, "%s ", buf_modes_str[ETR_MODE_ETR_SG]);
+
+       if (buf_hw.has_catu)
+               size += sysfs_emit_at(buf, size, "%s ", buf_modes_str[ETR_MODE_CATU]);
+
+       size += sysfs_emit_at(buf, size, "\n");
+       return size;
+}
+static DEVICE_ATTR_RO(buf_modes_available);
+
+static ssize_t buf_mode_preferred_show(struct device *dev,
+                                        struct device_attribute *attr, char *buf)
+{
+       struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       return sysfs_emit(buf, "%s\n", buf_modes_str[drvdata->etr_mode]);
+}
+
+static ssize_t buf_mode_preferred_store(struct device *dev,
+                                         struct device_attribute *attr,
+                                         const char *buf, size_t size)
+{
+       struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct etr_buf_hw buf_hw;
+
+       get_etr_buf_hw(dev, &buf_hw);
+       if (sysfs_streq(buf, buf_modes_str[ETR_MODE_FLAT]))
+               drvdata->etr_mode = ETR_MODE_FLAT;
+       else if (sysfs_streq(buf, buf_modes_str[ETR_MODE_ETR_SG]) && buf_hw.has_etr_sg)
+               drvdata->etr_mode = ETR_MODE_ETR_SG;
+       else if (sysfs_streq(buf, buf_modes_str[ETR_MODE_CATU]) && buf_hw.has_catu)
+               drvdata->etr_mode = ETR_MODE_CATU;
+       else if (sysfs_streq(buf, buf_modes_str[ETR_MODE_AUTO]))
+               drvdata->etr_mode = ETR_MODE_AUTO;
+       else
+               return -EINVAL;
+       return size;
+}
+static DEVICE_ATTR_RW(buf_mode_preferred);
+
+static struct attribute *coresight_etr_attrs[] = {
+       &dev_attr_buf_modes_available.attr,
+       &dev_attr_buf_mode_preferred.attr,
+       NULL,
+};
+
+const struct attribute_group coresight_etr_group = {
+       .attrs = coresight_etr_attrs,
+};
index 0ee48c5ba764d1f7e9c943540138b953f80e9a33..8dcb426ac3e7aa259393ab2e550d4bf186dbbc46 100644 (file)
@@ -135,6 +135,7 @@ enum etr_mode {
        ETR_MODE_FLAT,          /* Uses contiguous flat buffer */
        ETR_MODE_ETR_SG,        /* Uses in-built TMC ETR SG mechanism */
        ETR_MODE_CATU,          /* Use SG mechanism in CATU */
+       ETR_MODE_AUTO,          /* Use the default mechanism */
 };
 
 struct etr_buf_operations;
@@ -207,6 +208,7 @@ struct tmc_drvdata {
        enum tmc_mem_intf_width memwidth;
        u32                     trigger_cntr;
        u32                     etr_caps;
+       enum etr_mode           etr_mode;
        struct idr              idr;
        struct mutex            idr_mutex;
        struct etr_buf          *sysfs_buf;
@@ -334,5 +336,6 @@ void tmc_etr_set_catu_ops(const struct etr_buf_operations *catu);
 void tmc_etr_remove_catu_ops(void);
 struct etr_buf *tmc_etr_get_buffer(struct coresight_device *csdev,
                                   enum cs_mode mode, void *data);
+extern const struct attribute_group coresight_etr_group;
 
 #endif
index 8d2b9d29237d47323515c54dc017dc0e20ca8d85..5f82737c37bba5b063c8a80c86e78227b7759cc9 100644 (file)
 
 DEFINE_CORESIGHT_DEVLIST(tpda_devs, "tpda");
 
+static bool coresight_device_is_tpdm(struct coresight_device *csdev)
+{
+       return (csdev->type == CORESIGHT_DEV_TYPE_SOURCE) &&
+              (csdev->subtype.source_subtype ==
+                       CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM);
+}
+
+/*
+ * Read the DSB element size from the TPDM device
+ * Returns
+ *    The dsb element size read from the devicetree if available.
+ *    0 - Otherwise, with a warning once.
+ */
+static int tpdm_read_dsb_element_size(struct coresight_device *csdev)
+{
+       int rc = 0;
+       u8 size = 0;
+
+       rc = fwnode_property_read_u8(dev_fwnode(csdev->dev.parent),
+                       "qcom,dsb-element-size", &size);
+       if (rc)
+               dev_warn_once(&csdev->dev,
+                       "Failed to read TPDM DSB Element size: %d\n", rc);
+
+       return size;
+}
+
+/*
+ * Search and read element data size from the TPDM node in
+ * the devicetree. Each input port of TPDA is connected to
+ * a TPDM. Different TPDM supports different types of dataset,
+ * and some may support more than one type of dataset.
+ * Parameter "inport" is used to pass in the input port number
+ * of TPDA, and it is set to -1 in the recursize call.
+ */
+static int tpda_get_element_size(struct coresight_device *csdev,
+                                int inport)
+{
+       int dsb_size = -ENOENT;
+       int i, size;
+       struct coresight_device *in;
+
+       for (i = 0; i < csdev->pdata->nr_inconns; i++) {
+               in = csdev->pdata->in_conns[i]->src_dev;
+               if (!in)
+                       continue;
+
+               /* Ignore the paths that do not match port */
+               if (inport > 0 &&
+                   csdev->pdata->in_conns[i]->dest_port != inport)
+                       continue;
+
+               if (coresight_device_is_tpdm(in)) {
+                       size = tpdm_read_dsb_element_size(in);
+               } else {
+                       /* Recurse down the path */
+                       size = tpda_get_element_size(in, -1);
+               }
+
+               if (size < 0)
+                       return size;
+
+               if (dsb_size < 0) {
+                       /* Found a size, save it. */
+                       dsb_size = size;
+               } else {
+                       /* Found duplicate TPDMs */
+                       return -EEXIST;
+               }
+       }
+
+       return dsb_size;
+}
+
 /* Settings pre enabling port control register */
 static void tpda_enable_pre_port(struct tpda_drvdata *drvdata)
 {
@@ -32,26 +106,55 @@ static void tpda_enable_pre_port(struct tpda_drvdata *drvdata)
        writel_relaxed(val, drvdata->base + TPDA_CR);
 }
 
-static void tpda_enable_port(struct tpda_drvdata *drvdata, int port)
+static int tpda_enable_port(struct tpda_drvdata *drvdata, int port)
 {
        u32 val;
+       int size;
 
        val = readl_relaxed(drvdata->base + TPDA_Pn_CR(port));
+       /*
+        * Configure aggregator port n DSB data set element size
+        * Set the bit to 0 if the size is 32
+        * Set the bit to 1 if the size is 64
+        */
+       size = tpda_get_element_size(drvdata->csdev, port);
+       switch (size) {
+       case 32:
+               val &= ~TPDA_Pn_CR_DSBSIZE;
+               break;
+       case 64:
+               val |= TPDA_Pn_CR_DSBSIZE;
+               break;
+       case 0:
+               return -EEXIST;
+       case -EEXIST:
+               dev_warn_once(&drvdata->csdev->dev,
+                       "Detected multiple TPDMs on port %d", -EEXIST);
+               return -EEXIST;
+       default:
+               return -EINVAL;
+       }
+
        /* Enable the port */
        val |= TPDA_Pn_CR_ENA;
        writel_relaxed(val, drvdata->base + TPDA_Pn_CR(port));
+
+       return 0;
 }
 
-static void __tpda_enable(struct tpda_drvdata *drvdata, int port)
+static int __tpda_enable(struct tpda_drvdata *drvdata, int port)
 {
+       int ret;
+
        CS_UNLOCK(drvdata->base);
 
        if (!drvdata->csdev->enable)
                tpda_enable_pre_port(drvdata);
 
-       tpda_enable_port(drvdata, port);
-
+       ret = tpda_enable_port(drvdata, port);
        CS_LOCK(drvdata->base);
+
+       return ret;
 }
 
 static int tpda_enable(struct coresight_device *csdev,
@@ -59,16 +162,19 @@ static int tpda_enable(struct coresight_device *csdev,
                       struct coresight_connection *out)
 {
        struct tpda_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+       int ret = 0;
 
        spin_lock(&drvdata->spinlock);
-       if (atomic_read(&in->dest_refcnt) == 0)
-               __tpda_enable(drvdata, in->dest_port);
+       if (atomic_read(&in->dest_refcnt) == 0) {
+               ret = __tpda_enable(drvdata, in->dest_port);
+               if (!ret) {
+                       atomic_inc(&in->dest_refcnt);
+                       dev_dbg(drvdata->dev, "TPDA inport %d enabled.\n", in->dest_port);
+               }
+       }
 
-       atomic_inc(&in->dest_refcnt);
        spin_unlock(&drvdata->spinlock);
-
-       dev_dbg(drvdata->dev, "TPDA inport %d enabled.\n", in->dest_port);
-       return 0;
+       return ret;
 }
 
 static void __tpda_disable(struct tpda_drvdata *drvdata, int port)
index 0399678df312fb3674df3d101e059e6b850d538b..b3b38fd41b64bc84ceacff3926fe34341f8f1a25 100644 (file)
@@ -10,6 +10,8 @@
 #define TPDA_Pn_CR(n)          (0x004 + (n * 4))
 /* Aggregator port enable bit */
 #define TPDA_Pn_CR_ENA         BIT(0)
+/* Aggregator port DSB data set element size bit */
+#define TPDA_Pn_CR_DSBSIZE             BIT(8)
 
 #define TPDA_MAX_INPORTS       32
 
index f4854af0431e11069ec521311fd1225e53490174..97654aa4b772a07307f798ed93a79fb760960ed0 100644 (file)
@@ -4,6 +4,7 @@
  */
 
 #include <linux/amba/bus.h>
+#include <linux/bitfield.h>
 #include <linux/bitmap.h>
 #include <linux/coresight.h>
 #include <linux/coresight-pmu.h>
 
 DEFINE_CORESIGHT_DEVLIST(tpdm_devs, "tpdm");
 
-static void tpdm_enable_dsb(struct tpdm_drvdata *drvdata)
+/* Read dataset array member with the index number */
+static ssize_t tpdm_simple_dataset_show(struct device *dev,
+                                       struct device_attribute *attr,
+                                       char *buf)
+{
+       struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct tpdm_dataset_attribute *tpdm_attr =
+               container_of(attr, struct tpdm_dataset_attribute, attr);
+
+       switch (tpdm_attr->mem) {
+       case DSB_EDGE_CTRL:
+               if (tpdm_attr->idx >= TPDM_DSB_MAX_EDCR)
+                       return -EINVAL;
+               return sysfs_emit(buf, "0x%x\n",
+                       drvdata->dsb->edge_ctrl[tpdm_attr->idx]);
+       case DSB_EDGE_CTRL_MASK:
+               if (tpdm_attr->idx >= TPDM_DSB_MAX_EDCMR)
+                       return -EINVAL;
+               return sysfs_emit(buf, "0x%x\n",
+                       drvdata->dsb->edge_ctrl_mask[tpdm_attr->idx]);
+       case DSB_TRIG_PATT:
+               if (tpdm_attr->idx >= TPDM_DSB_MAX_PATT)
+                       return -EINVAL;
+               return sysfs_emit(buf, "0x%x\n",
+                       drvdata->dsb->trig_patt[tpdm_attr->idx]);
+       case DSB_TRIG_PATT_MASK:
+               if (tpdm_attr->idx >= TPDM_DSB_MAX_PATT)
+                       return -EINVAL;
+               return sysfs_emit(buf, "0x%x\n",
+                       drvdata->dsb->trig_patt_mask[tpdm_attr->idx]);
+       case DSB_PATT:
+               if (tpdm_attr->idx >= TPDM_DSB_MAX_PATT)
+                       return -EINVAL;
+               return sysfs_emit(buf, "0x%x\n",
+                       drvdata->dsb->patt_val[tpdm_attr->idx]);
+       case DSB_PATT_MASK:
+               if (tpdm_attr->idx >= TPDM_DSB_MAX_PATT)
+                       return -EINVAL;
+               return sysfs_emit(buf, "0x%x\n",
+                       drvdata->dsb->patt_mask[tpdm_attr->idx]);
+       case DSB_MSR:
+               if (tpdm_attr->idx >= drvdata->dsb_msr_num)
+                       return -EINVAL;
+               return sysfs_emit(buf, "0x%x\n",
+                               drvdata->dsb->msr[tpdm_attr->idx]);
+       }
+       return -EINVAL;
+}
+
+/* Write dataset array member with the index number */
+static ssize_t tpdm_simple_dataset_store(struct device *dev,
+                                        struct device_attribute *attr,
+                                        const char *buf,
+                                        size_t size)
+{
+       unsigned long val;
+       ssize_t ret = size;
+
+       struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct tpdm_dataset_attribute *tpdm_attr =
+               container_of(attr, struct tpdm_dataset_attribute, attr);
+
+       if (kstrtoul(buf, 0, &val))
+               return -EINVAL;
+
+       spin_lock(&drvdata->spinlock);
+       switch (tpdm_attr->mem) {
+       case DSB_TRIG_PATT:
+               if (tpdm_attr->idx < TPDM_DSB_MAX_PATT)
+                       drvdata->dsb->trig_patt[tpdm_attr->idx] = val;
+               else
+                       ret = -EINVAL;
+               break;
+       case DSB_TRIG_PATT_MASK:
+               if (tpdm_attr->idx < TPDM_DSB_MAX_PATT)
+                       drvdata->dsb->trig_patt_mask[tpdm_attr->idx] = val;
+               else
+                       ret = -EINVAL;
+               break;
+       case DSB_PATT:
+               if (tpdm_attr->idx < TPDM_DSB_MAX_PATT)
+                       drvdata->dsb->patt_val[tpdm_attr->idx] = val;
+               else
+                       ret = -EINVAL;
+               break;
+       case DSB_PATT_MASK:
+               if (tpdm_attr->idx < TPDM_DSB_MAX_PATT)
+                       drvdata->dsb->patt_mask[tpdm_attr->idx] = val;
+               else
+                       ret = -EINVAL;
+               break;
+       case DSB_MSR:
+               if (tpdm_attr->idx < drvdata->dsb_msr_num)
+                       drvdata->dsb->msr[tpdm_attr->idx] = val;
+               else
+                       ret = -EINVAL;
+               break;
+       default:
+               ret = -EINVAL;
+       }
+       spin_unlock(&drvdata->spinlock);
+
+       return ret;
+}
+
+static bool tpdm_has_dsb_dataset(struct tpdm_drvdata *drvdata)
+{
+       return (drvdata->datasets & TPDM_PIDR0_DS_DSB);
+}
+
+static umode_t tpdm_dsb_is_visible(struct kobject *kobj,
+                                  struct attribute *attr, int n)
+{
+       struct device *dev = kobj_to_dev(kobj);
+       struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       if (drvdata && tpdm_has_dsb_dataset(drvdata))
+               return attr->mode;
+
+       return 0;
+}
+
+static umode_t tpdm_dsb_msr_is_visible(struct kobject *kobj,
+                                      struct attribute *attr, int n)
+{
+       struct device *dev = kobj_to_dev(kobj);
+       struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       struct device_attribute *dev_attr =
+               container_of(attr, struct device_attribute, attr);
+       struct tpdm_dataset_attribute *tpdm_attr =
+               container_of(dev_attr, struct tpdm_dataset_attribute, attr);
+
+       if (tpdm_attr->idx < drvdata->dsb_msr_num)
+               return attr->mode;
+
+       return 0;
+}
+
+static void tpdm_reset_datasets(struct tpdm_drvdata *drvdata)
+{
+       if (tpdm_has_dsb_dataset(drvdata)) {
+               memset(drvdata->dsb, 0, sizeof(struct dsb_dataset));
+
+               drvdata->dsb->trig_ts = true;
+               drvdata->dsb->trig_type = false;
+       }
+}
+
+static void set_dsb_mode(struct tpdm_drvdata *drvdata, u32 *val)
+{
+       u32 mode;
+
+       /* Set the test accurate mode */
+       mode = TPDM_DSB_MODE_TEST(drvdata->dsb->mode);
+       *val &= ~TPDM_DSB_CR_TEST_MODE;
+       *val |= FIELD_PREP(TPDM_DSB_CR_TEST_MODE, mode);
+
+       /* Set the byte lane for high-performance mode */
+       mode = TPDM_DSB_MODE_HPBYTESEL(drvdata->dsb->mode);
+       *val &= ~TPDM_DSB_CR_HPSEL;
+       *val |= FIELD_PREP(TPDM_DSB_CR_HPSEL, mode);
+
+       /* Set the performance mode */
+       if (drvdata->dsb->mode & TPDM_DSB_MODE_PERF)
+               *val |= TPDM_DSB_CR_MODE;
+       else
+               *val &= ~TPDM_DSB_CR_MODE;
+}
+
+static void set_dsb_tier(struct tpdm_drvdata *drvdata)
 {
        u32 val;
 
-       /* Set the enable bit of DSB control register to 1 */
+       val = readl_relaxed(drvdata->base + TPDM_DSB_TIER);
+
+       /* Clear all relevant fields */
+       val &= ~(TPDM_DSB_TIER_PATT_TSENAB | TPDM_DSB_TIER_PATT_TYPE |
+                TPDM_DSB_TIER_XTRIG_TSENAB);
+
+       /* Set pattern timestamp type and enablement */
+       if (drvdata->dsb->patt_ts) {
+               val |= TPDM_DSB_TIER_PATT_TSENAB;
+               if (drvdata->dsb->patt_type)
+                       val |= TPDM_DSB_TIER_PATT_TYPE;
+               else
+                       val &= ~TPDM_DSB_TIER_PATT_TYPE;
+       } else {
+               val &= ~TPDM_DSB_TIER_PATT_TSENAB;
+       }
+
+       /* Set trigger timestamp */
+       if (drvdata->dsb->trig_ts)
+               val |= TPDM_DSB_TIER_XTRIG_TSENAB;
+       else
+               val &= ~TPDM_DSB_TIER_XTRIG_TSENAB;
+
+       writel_relaxed(val, drvdata->base + TPDM_DSB_TIER);
+}
+
+static void set_dsb_msr(struct tpdm_drvdata *drvdata)
+{
+       int i;
+
+       for (i = 0; i < drvdata->dsb_msr_num; i++)
+               writel_relaxed(drvdata->dsb->msr[i],
+                          drvdata->base + TPDM_DSB_MSR(i));
+}
+
+static void tpdm_enable_dsb(struct tpdm_drvdata *drvdata)
+{
+       u32 val, i;
+
+       for (i = 0; i < TPDM_DSB_MAX_EDCR; i++)
+               writel_relaxed(drvdata->dsb->edge_ctrl[i],
+                          drvdata->base + TPDM_DSB_EDCR(i));
+       for (i = 0; i < TPDM_DSB_MAX_EDCMR; i++)
+               writel_relaxed(drvdata->dsb->edge_ctrl_mask[i],
+                          drvdata->base + TPDM_DSB_EDCMR(i));
+       for (i = 0; i < TPDM_DSB_MAX_PATT; i++) {
+               writel_relaxed(drvdata->dsb->patt_val[i],
+                          drvdata->base + TPDM_DSB_TPR(i));
+               writel_relaxed(drvdata->dsb->patt_mask[i],
+                          drvdata->base + TPDM_DSB_TPMR(i));
+               writel_relaxed(drvdata->dsb->trig_patt[i],
+                          drvdata->base + TPDM_DSB_XPR(i));
+               writel_relaxed(drvdata->dsb->trig_patt_mask[i],
+                          drvdata->base + TPDM_DSB_XPMR(i));
+       }
+
+       set_dsb_tier(drvdata);
+
+       set_dsb_msr(drvdata);
+
        val = readl_relaxed(drvdata->base + TPDM_DSB_CR);
+       /* Set the mode of DSB dataset */
+       set_dsb_mode(drvdata, &val);
+       /* Set trigger type */
+       if (drvdata->dsb->trig_type)
+               val |= TPDM_DSB_CR_TRIG_TYPE;
+       else
+               val &= ~TPDM_DSB_CR_TRIG_TYPE;
+       /* Set the enable bit of DSB control register to 1 */
        val |= TPDM_DSB_CR_ENA;
        writel_relaxed(val, drvdata->base + TPDM_DSB_CR);
 }
 
-/* TPDM enable operations */
+/*
+ * TPDM enable operations
+ * The TPDM or Monitor serves as data collection component for various
+ * dataset types. It covers Basic Counts(BC), Tenure Counts(TC),
+ * Continuous Multi-Bit(CMB), Multi-lane CMB(MCMB) and Discrete Single
+ * Bit(DSB). This function will initialize the configuration according
+ * to the dataset type supported by the TPDM.
+ */
 static void __tpdm_enable(struct tpdm_drvdata *drvdata)
 {
        CS_UNLOCK(drvdata->base);
 
-       /* Check if DSB datasets is present for TPDM. */
-       if (drvdata->datasets & TPDM_PIDR0_DS_DSB)
+       if (tpdm_has_dsb_dataset(drvdata))
                tpdm_enable_dsb(drvdata);
 
        CS_LOCK(drvdata->base);
@@ -76,8 +319,7 @@ static void __tpdm_disable(struct tpdm_drvdata *drvdata)
 {
        CS_UNLOCK(drvdata->base);
 
-       /* Check if DSB datasets is present for TPDM. */
-       if (drvdata->datasets & TPDM_PIDR0_DS_DSB)
+       if (tpdm_has_dsb_dataset(drvdata))
                tpdm_disable_dsb(drvdata);
 
        CS_LOCK(drvdata->base);
@@ -110,16 +352,45 @@ static const struct coresight_ops tpdm_cs_ops = {
        .source_ops     = &tpdm_source_ops,
 };
 
-static void tpdm_init_default_data(struct tpdm_drvdata *drvdata)
+static int tpdm_datasets_setup(struct tpdm_drvdata *drvdata)
 {
        u32 pidr;
 
-       CS_UNLOCK(drvdata->base);
        /*  Get the datasets present on the TPDM. */
        pidr = readl_relaxed(drvdata->base + CORESIGHT_PERIPHIDR0);
        drvdata->datasets |= pidr & GENMASK(TPDM_DATASETS - 1, 0);
-       CS_LOCK(drvdata->base);
+
+       if (tpdm_has_dsb_dataset(drvdata) && (!drvdata->dsb)) {
+               drvdata->dsb = devm_kzalloc(drvdata->dev,
+                                               sizeof(*drvdata->dsb), GFP_KERNEL);
+               if (!drvdata->dsb)
+                       return -ENOMEM;
+       }
+       tpdm_reset_datasets(drvdata);
+
+       return 0;
+}
+
+static ssize_t reset_dataset_store(struct device *dev,
+                                  struct device_attribute *attr,
+                                  const char *buf,
+                                  size_t size)
+{
+       int ret = 0;
+       unsigned long val;
+       struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       ret = kstrtoul(buf, 0, &val);
+       if (ret || val != 1)
+               return -EINVAL;
+
+       spin_lock(&drvdata->spinlock);
+       tpdm_reset_datasets(drvdata);
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
 }
+static DEVICE_ATTR_WO(reset_dataset);
 
 /*
  * value 1: 64 bits test data
@@ -161,6 +432,7 @@ static ssize_t integration_test_store(struct device *dev,
 static DEVICE_ATTR_WO(integration_test);
 
 static struct attribute *tpdm_attrs[] = {
+       &dev_attr_reset_dataset.attr,
        &dev_attr_integration_test.attr,
        NULL,
 };
@@ -169,8 +441,421 @@ static struct attribute_group tpdm_attr_grp = {
        .attrs = tpdm_attrs,
 };
 
+static ssize_t dsb_mode_show(struct device *dev,
+                            struct device_attribute *attr,
+                            char *buf)
+{
+       struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       return sysfs_emit(buf, "%x\n", drvdata->dsb->mode);
+}
+
+static ssize_t dsb_mode_store(struct device *dev,
+                             struct device_attribute *attr,
+                             const char *buf,
+                             size_t size)
+{
+       struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       unsigned long val;
+
+       if ((kstrtoul(buf, 0, &val)) || (val < 0) ||
+                       (val & ~TPDM_DSB_MODE_MASK))
+               return -EINVAL;
+
+       spin_lock(&drvdata->spinlock);
+       drvdata->dsb->mode = val & TPDM_DSB_MODE_MASK;
+       spin_unlock(&drvdata->spinlock);
+       return size;
+}
+static DEVICE_ATTR_RW(dsb_mode);
+
+static ssize_t ctrl_idx_show(struct device *dev,
+                            struct device_attribute *attr,
+                            char *buf)
+{
+       struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       return sysfs_emit(buf, "%u\n",
+                       (unsigned int)drvdata->dsb->edge_ctrl_idx);
+}
+
+/*
+ * The EDCR registers can include up to 16 32-bit registers, and each
+ * one can be configured to control up to 16 edge detections(2 bits
+ * control one edge detection). So a total 256 edge detections can be
+ * configured. This function provides a way to set the index number of
+ * the edge detection which needs to be configured.
+ */
+static ssize_t ctrl_idx_store(struct device *dev,
+                             struct device_attribute *attr,
+                             const char *buf,
+                             size_t size)
+{
+       struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       unsigned long val;
+
+       if ((kstrtoul(buf, 0, &val)) || (val >= TPDM_DSB_MAX_LINES))
+               return -EINVAL;
+
+       spin_lock(&drvdata->spinlock);
+       drvdata->dsb->edge_ctrl_idx = val;
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(ctrl_idx);
+
+/*
+ * This function is used to control the edge detection according
+ * to the index number that has been set.
+ * "edge_ctrl" should be one of the following values.
+ * 0 - Rising edge detection
+ * 1 - Falling edge detection
+ * 2 - Rising and falling edge detection (toggle detection)
+ */
+static ssize_t ctrl_val_store(struct device *dev,
+                             struct device_attribute *attr,
+                             const char *buf,
+                             size_t size)
+{
+       struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       unsigned long val, edge_ctrl;
+       int reg;
+
+       if ((kstrtoul(buf, 0, &edge_ctrl)) || (edge_ctrl > 0x2))
+               return -EINVAL;
+
+       spin_lock(&drvdata->spinlock);
+       /*
+        * There are 2 bit per DSB Edge Control line.
+        * Thus we have 16 lines in a 32bit word.
+        */
+       reg = EDCR_TO_WORD_IDX(drvdata->dsb->edge_ctrl_idx);
+       val = drvdata->dsb->edge_ctrl[reg];
+       val &= ~EDCR_TO_WORD_MASK(drvdata->dsb->edge_ctrl_idx);
+       val |= EDCR_TO_WORD_VAL(edge_ctrl, drvdata->dsb->edge_ctrl_idx);
+       drvdata->dsb->edge_ctrl[reg] = val;
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_WO(ctrl_val);
+
+static ssize_t ctrl_mask_store(struct device *dev,
+                              struct device_attribute *attr,
+                              const char *buf,
+                              size_t size)
+{
+       struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       unsigned long val;
+       u32 set;
+       int reg;
+
+       if ((kstrtoul(buf, 0, &val)) || (val & ~1UL))
+               return -EINVAL;
+
+       spin_lock(&drvdata->spinlock);
+       /*
+        * There is 1 bit per DSB Edge Control Mark line.
+        * Thus we have 32 lines in a 32bit word.
+        */
+       reg = EDCMR_TO_WORD_IDX(drvdata->dsb->edge_ctrl_idx);
+       set = drvdata->dsb->edge_ctrl_mask[reg];
+       if (val)
+               set |= BIT(EDCMR_TO_WORD_SHIFT(drvdata->dsb->edge_ctrl_idx));
+       else
+               set &= ~BIT(EDCMR_TO_WORD_SHIFT(drvdata->dsb->edge_ctrl_idx));
+       drvdata->dsb->edge_ctrl_mask[reg] = set;
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_WO(ctrl_mask);
+
+static ssize_t enable_ts_show(struct device *dev,
+                             struct device_attribute *attr,
+                             char *buf)
+{
+       struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       return sysfs_emit(buf, "%u\n",
+                        (unsigned int)drvdata->dsb->patt_ts);
+}
+
+/*
+ * value 1: Enable/Disable DSB pattern timestamp
+ */
+static ssize_t enable_ts_store(struct device *dev,
+                              struct device_attribute *attr,
+                              const char *buf,
+                              size_t size)
+{
+       struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       unsigned long val;
+
+       if ((kstrtoul(buf, 0, &val)) || (val & ~1UL))
+               return -EINVAL;
+
+       spin_lock(&drvdata->spinlock);
+       drvdata->dsb->patt_ts = !!val;
+       spin_unlock(&drvdata->spinlock);
+       return size;
+}
+static DEVICE_ATTR_RW(enable_ts);
+
+static ssize_t set_type_show(struct device *dev,
+                            struct device_attribute *attr,
+                            char *buf)
+{
+       struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       return sysfs_emit(buf, "%u\n",
+                        (unsigned int)drvdata->dsb->patt_type);
+}
+
+/*
+ * value 1: Set DSB pattern type
+ */
+static ssize_t set_type_store(struct device *dev,
+                             struct device_attribute *attr,
+                             const char *buf, size_t size)
+{
+       struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       unsigned long val;
+
+       if ((kstrtoul(buf, 0, &val)) || (val & ~1UL))
+               return -EINVAL;
+
+       spin_lock(&drvdata->spinlock);
+       drvdata->dsb->patt_type = val;
+       spin_unlock(&drvdata->spinlock);
+       return size;
+}
+static DEVICE_ATTR_RW(set_type);
+
+static ssize_t dsb_trig_type_show(struct device *dev,
+                                 struct device_attribute *attr, char *buf)
+{
+       struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       return sysfs_emit(buf, "%u\n",
+                        (unsigned int)drvdata->dsb->trig_type);
+}
+
+/*
+ * Trigger type (boolean):
+ * false - Disable trigger type.
+ * true  - Enable trigger type.
+ */
+static ssize_t dsb_trig_type_store(struct device *dev,
+                                  struct device_attribute *attr,
+                                  const char *buf,
+                                  size_t size)
+{
+       struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       unsigned long val;
+
+       if ((kstrtoul(buf, 0, &val)) || (val & ~1UL))
+               return -EINVAL;
+
+       spin_lock(&drvdata->spinlock);
+       if (val)
+               drvdata->dsb->trig_type = true;
+       else
+               drvdata->dsb->trig_type = false;
+       spin_unlock(&drvdata->spinlock);
+       return size;
+}
+static DEVICE_ATTR_RW(dsb_trig_type);
+
+static ssize_t dsb_trig_ts_show(struct device *dev,
+                               struct device_attribute *attr,
+                               char *buf)
+{
+       struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       return sysfs_emit(buf, "%u\n",
+                        (unsigned int)drvdata->dsb->trig_ts);
+}
+
+/*
+ * Trigger timestamp (boolean):
+ * false - Disable trigger timestamp.
+ * true  - Enable trigger timestamp.
+ */
+static ssize_t dsb_trig_ts_store(struct device *dev,
+                                struct device_attribute *attr,
+                                const char *buf,
+                                size_t size)
+{
+       struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       unsigned long val;
+
+       if ((kstrtoul(buf, 0, &val)) || (val & ~1UL))
+               return -EINVAL;
+
+       spin_lock(&drvdata->spinlock);
+       if (val)
+               drvdata->dsb->trig_ts = true;
+       else
+               drvdata->dsb->trig_ts = false;
+       spin_unlock(&drvdata->spinlock);
+       return size;
+}
+static DEVICE_ATTR_RW(dsb_trig_ts);
+
+static struct attribute *tpdm_dsb_edge_attrs[] = {
+       &dev_attr_ctrl_idx.attr,
+       &dev_attr_ctrl_val.attr,
+       &dev_attr_ctrl_mask.attr,
+       DSB_EDGE_CTRL_ATTR(0),
+       DSB_EDGE_CTRL_ATTR(1),
+       DSB_EDGE_CTRL_ATTR(2),
+       DSB_EDGE_CTRL_ATTR(3),
+       DSB_EDGE_CTRL_ATTR(4),
+       DSB_EDGE_CTRL_ATTR(5),
+       DSB_EDGE_CTRL_ATTR(6),
+       DSB_EDGE_CTRL_ATTR(7),
+       DSB_EDGE_CTRL_ATTR(8),
+       DSB_EDGE_CTRL_ATTR(9),
+       DSB_EDGE_CTRL_ATTR(10),
+       DSB_EDGE_CTRL_ATTR(11),
+       DSB_EDGE_CTRL_ATTR(12),
+       DSB_EDGE_CTRL_ATTR(13),
+       DSB_EDGE_CTRL_ATTR(14),
+       DSB_EDGE_CTRL_ATTR(15),
+       DSB_EDGE_CTRL_MASK_ATTR(0),
+       DSB_EDGE_CTRL_MASK_ATTR(1),
+       DSB_EDGE_CTRL_MASK_ATTR(2),
+       DSB_EDGE_CTRL_MASK_ATTR(3),
+       DSB_EDGE_CTRL_MASK_ATTR(4),
+       DSB_EDGE_CTRL_MASK_ATTR(5),
+       DSB_EDGE_CTRL_MASK_ATTR(6),
+       DSB_EDGE_CTRL_MASK_ATTR(7),
+       NULL,
+};
+
+static struct attribute *tpdm_dsb_trig_patt_attrs[] = {
+       DSB_TRIG_PATT_ATTR(0),
+       DSB_TRIG_PATT_ATTR(1),
+       DSB_TRIG_PATT_ATTR(2),
+       DSB_TRIG_PATT_ATTR(3),
+       DSB_TRIG_PATT_ATTR(4),
+       DSB_TRIG_PATT_ATTR(5),
+       DSB_TRIG_PATT_ATTR(6),
+       DSB_TRIG_PATT_ATTR(7),
+       DSB_TRIG_PATT_MASK_ATTR(0),
+       DSB_TRIG_PATT_MASK_ATTR(1),
+       DSB_TRIG_PATT_MASK_ATTR(2),
+       DSB_TRIG_PATT_MASK_ATTR(3),
+       DSB_TRIG_PATT_MASK_ATTR(4),
+       DSB_TRIG_PATT_MASK_ATTR(5),
+       DSB_TRIG_PATT_MASK_ATTR(6),
+       DSB_TRIG_PATT_MASK_ATTR(7),
+       NULL,
+};
+
+static struct attribute *tpdm_dsb_patt_attrs[] = {
+       DSB_PATT_ATTR(0),
+       DSB_PATT_ATTR(1),
+       DSB_PATT_ATTR(2),
+       DSB_PATT_ATTR(3),
+       DSB_PATT_ATTR(4),
+       DSB_PATT_ATTR(5),
+       DSB_PATT_ATTR(6),
+       DSB_PATT_ATTR(7),
+       DSB_PATT_MASK_ATTR(0),
+       DSB_PATT_MASK_ATTR(1),
+       DSB_PATT_MASK_ATTR(2),
+       DSB_PATT_MASK_ATTR(3),
+       DSB_PATT_MASK_ATTR(4),
+       DSB_PATT_MASK_ATTR(5),
+       DSB_PATT_MASK_ATTR(6),
+       DSB_PATT_MASK_ATTR(7),
+       &dev_attr_enable_ts.attr,
+       &dev_attr_set_type.attr,
+       NULL,
+};
+
+static struct attribute *tpdm_dsb_msr_attrs[] = {
+       DSB_MSR_ATTR(0),
+       DSB_MSR_ATTR(1),
+       DSB_MSR_ATTR(2),
+       DSB_MSR_ATTR(3),
+       DSB_MSR_ATTR(4),
+       DSB_MSR_ATTR(5),
+       DSB_MSR_ATTR(6),
+       DSB_MSR_ATTR(7),
+       DSB_MSR_ATTR(8),
+       DSB_MSR_ATTR(9),
+       DSB_MSR_ATTR(10),
+       DSB_MSR_ATTR(11),
+       DSB_MSR_ATTR(12),
+       DSB_MSR_ATTR(13),
+       DSB_MSR_ATTR(14),
+       DSB_MSR_ATTR(15),
+       DSB_MSR_ATTR(16),
+       DSB_MSR_ATTR(17),
+       DSB_MSR_ATTR(18),
+       DSB_MSR_ATTR(19),
+       DSB_MSR_ATTR(20),
+       DSB_MSR_ATTR(21),
+       DSB_MSR_ATTR(22),
+       DSB_MSR_ATTR(23),
+       DSB_MSR_ATTR(24),
+       DSB_MSR_ATTR(25),
+       DSB_MSR_ATTR(26),
+       DSB_MSR_ATTR(27),
+       DSB_MSR_ATTR(28),
+       DSB_MSR_ATTR(29),
+       DSB_MSR_ATTR(30),
+       DSB_MSR_ATTR(31),
+       NULL,
+};
+
+static struct attribute *tpdm_dsb_attrs[] = {
+       &dev_attr_dsb_mode.attr,
+       &dev_attr_dsb_trig_ts.attr,
+       &dev_attr_dsb_trig_type.attr,
+       NULL,
+};
+
+static struct attribute_group tpdm_dsb_attr_grp = {
+       .attrs = tpdm_dsb_attrs,
+       .is_visible = tpdm_dsb_is_visible,
+};
+
+static struct attribute_group tpdm_dsb_edge_grp = {
+       .attrs = tpdm_dsb_edge_attrs,
+       .is_visible = tpdm_dsb_is_visible,
+       .name = "dsb_edge",
+};
+
+static struct attribute_group tpdm_dsb_trig_patt_grp = {
+       .attrs = tpdm_dsb_trig_patt_attrs,
+       .is_visible = tpdm_dsb_is_visible,
+       .name = "dsb_trig_patt",
+};
+
+static struct attribute_group tpdm_dsb_patt_grp = {
+       .attrs = tpdm_dsb_patt_attrs,
+       .is_visible = tpdm_dsb_is_visible,
+       .name = "dsb_patt",
+};
+
+static struct attribute_group tpdm_dsb_msr_grp = {
+       .attrs = tpdm_dsb_msr_attrs,
+       .is_visible = tpdm_dsb_msr_is_visible,
+       .name = "dsb_msr",
+};
+
 static const struct attribute_group *tpdm_attr_grps[] = {
        &tpdm_attr_grp,
+       &tpdm_dsb_attr_grp,
+       &tpdm_dsb_edge_grp,
+       &tpdm_dsb_trig_patt_grp,
+       &tpdm_dsb_patt_grp,
+       &tpdm_dsb_msr_grp,
        NULL,
 };
 
@@ -181,6 +866,7 @@ static int tpdm_probe(struct amba_device *adev, const struct amba_id *id)
        struct coresight_platform_data *pdata;
        struct tpdm_drvdata *drvdata;
        struct coresight_desc desc = { 0 };
+       int ret;
 
        pdata = coresight_get_platform_data(dev);
        if (IS_ERR(pdata))
@@ -200,12 +886,20 @@ static int tpdm_probe(struct amba_device *adev, const struct amba_id *id)
 
        drvdata->base = base;
 
+       ret = tpdm_datasets_setup(drvdata);
+       if (ret)
+               return ret;
+
+       if (drvdata && tpdm_has_dsb_dataset(drvdata))
+               of_property_read_u32(drvdata->dev->of_node,
+                          "qcom,dsb-msrs-num", &drvdata->dsb_msr_num);
+
        /* Set up coresight component description */
        desc.name = coresight_alloc_device_name(&tpdm_devs, dev);
        if (!desc.name)
                return -ENOMEM;
        desc.type = CORESIGHT_DEV_TYPE_SOURCE;
-       desc.subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS;
+       desc.subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM;
        desc.ops = &tpdm_cs_ops;
        desc.pdata = adev->dev.platform_data;
        desc.dev = &adev->dev;
@@ -216,7 +910,7 @@ static int tpdm_probe(struct amba_device *adev, const struct amba_id *id)
                return PTR_ERR(drvdata->csdev);
 
        spin_lock_init(&drvdata->spinlock);
-       tpdm_init_default_data(drvdata);
+
        /* Decrease pm refcount when probe is done.*/
        pm_runtime_put(&adev->dev);
 
index 543854043a2dd3396cd75aa2ba9b0e8b10be361d..4115b2a17b8d8a05a2f3c8f96e6029df2589e41f 100644 (file)
 
 /* DSB Subunit Registers */
 #define TPDM_DSB_CR            (0x780)
+#define TPDM_DSB_TIER          (0x784)
+#define TPDM_DSB_TPR(n)                (0x788 + (n * 4))
+#define TPDM_DSB_TPMR(n)       (0x7A8 + (n * 4))
+#define TPDM_DSB_XPR(n)                (0x7C8 + (n * 4))
+#define TPDM_DSB_XPMR(n)       (0x7E8 + (n * 4))
+#define TPDM_DSB_EDCR(n)       (0x808 + (n * 4))
+#define TPDM_DSB_EDCMR(n)      (0x848 + (n * 4))
+#define TPDM_DSB_MSR(n)                (0x980 + (n * 4))
+
 /* Enable bit for DSB subunit */
 #define TPDM_DSB_CR_ENA                BIT(0)
+/* Enable bit for DSB subunit perfmance mode */
+#define TPDM_DSB_CR_MODE               BIT(1)
+/* Enable bit for DSB subunit trigger type */
+#define TPDM_DSB_CR_TRIG_TYPE          BIT(12)
+/* Data bits for DSB high performace mode */
+#define TPDM_DSB_CR_HPSEL              GENMASK(6, 2)
+/* Data bits for DSB test mode */
+#define TPDM_DSB_CR_TEST_MODE          GENMASK(10, 9)
+
+/* Enable bit for DSB subunit pattern timestamp */
+#define TPDM_DSB_TIER_PATT_TSENAB              BIT(0)
+/* Enable bit for DSB subunit trigger timestamp */
+#define TPDM_DSB_TIER_XTRIG_TSENAB             BIT(1)
+/* Bit for DSB subunit pattern type */
+#define TPDM_DSB_TIER_PATT_TYPE                        BIT(2)
+
+/* DSB programming modes */
+/* DSB mode bits mask */
+#define TPDM_DSB_MODE_MASK                     GENMASK(8, 0)
+/* Test mode control bit*/
+#define TPDM_DSB_MODE_TEST(val)        (val & GENMASK(1, 0))
+/* Performance mode */
+#define TPDM_DSB_MODE_PERF             BIT(3)
+/* High performance mode */
+#define TPDM_DSB_MODE_HPBYTESEL(val)   (val & GENMASK(8, 4))
+
+#define EDCRS_PER_WORD                 16
+#define EDCR_TO_WORD_IDX(r)            ((r) / EDCRS_PER_WORD)
+#define EDCR_TO_WORD_SHIFT(r)          ((r % EDCRS_PER_WORD) * 2)
+#define EDCR_TO_WORD_VAL(val, r)       (val << EDCR_TO_WORD_SHIFT(r))
+#define EDCR_TO_WORD_MASK(r)           EDCR_TO_WORD_VAL(0x3, r)
+
+#define EDCMRS_PER_WORD                                32
+#define EDCMR_TO_WORD_IDX(r)           ((r) / EDCMRS_PER_WORD)
+#define EDCMR_TO_WORD_SHIFT(r)         ((r) % EDCMRS_PER_WORD)
 
 /* TPDM integration test registers */
 #define TPDM_ITATBCNTRL                (0xEF0)
 #define TPDM_PIDR0_DS_IMPDEF   BIT(0)
 #define TPDM_PIDR0_DS_DSB      BIT(1)
 
+#define TPDM_DSB_MAX_LINES     256
+/* MAX number of EDCR registers */
+#define TPDM_DSB_MAX_EDCR      16
+/* MAX number of EDCMR registers */
+#define TPDM_DSB_MAX_EDCMR     8
+/* MAX number of DSB pattern */
+#define TPDM_DSB_MAX_PATT      8
+/* MAX number of DSB MSR */
+#define TPDM_DSB_MAX_MSR 32
+
+#define tpdm_simple_dataset_ro(name, mem, idx)                 \
+       (&((struct tpdm_dataset_attribute[]) {                  \
+          {                                                            \
+               __ATTR(name, 0444, tpdm_simple_dataset_show, NULL),     \
+               mem,                                                    \
+               idx,                                                    \
+          }                                                            \
+       })[0].attr.attr)
+
+#define tpdm_simple_dataset_rw(name, mem, idx)                 \
+       (&((struct tpdm_dataset_attribute[]) {                  \
+          {                                                            \
+               __ATTR(name, 0644, tpdm_simple_dataset_show,            \
+               tpdm_simple_dataset_store),             \
+               mem,                                                    \
+               idx,                                                    \
+          }                                                            \
+       })[0].attr.attr)
+
+#define DSB_EDGE_CTRL_ATTR(nr)                                 \
+               tpdm_simple_dataset_ro(edcr##nr,                \
+               DSB_EDGE_CTRL, nr)
+
+#define DSB_EDGE_CTRL_MASK_ATTR(nr)                            \
+               tpdm_simple_dataset_ro(edcmr##nr,               \
+               DSB_EDGE_CTRL_MASK, nr)
+
+#define DSB_TRIG_PATT_ATTR(nr)                                 \
+               tpdm_simple_dataset_rw(xpr##nr,                 \
+               DSB_TRIG_PATT, nr)
+
+#define DSB_TRIG_PATT_MASK_ATTR(nr)                            \
+               tpdm_simple_dataset_rw(xpmr##nr,                \
+               DSB_TRIG_PATT_MASK, nr)
+
+#define DSB_PATT_ATTR(nr)                                      \
+               tpdm_simple_dataset_rw(tpr##nr,                 \
+               DSB_PATT, nr)
+
+#define DSB_PATT_MASK_ATTR(nr)                                 \
+               tpdm_simple_dataset_rw(tpmr##nr,                \
+               DSB_PATT_MASK, nr)
+
+#define DSB_MSR_ATTR(nr)                                       \
+               tpdm_simple_dataset_rw(msr##nr,                 \
+               DSB_MSR, nr)
+
+/**
+ * struct dsb_dataset - specifics associated to dsb dataset
+ * @mode:             DSB programming mode
+ * @edge_ctrl_idx     Index number of the edge control
+ * @edge_ctrl:        Save value for edge control
+ * @edge_ctrl_mask:   Save value for edge control mask
+ * @patt_val:         Save value for pattern
+ * @patt_mask:        Save value for pattern mask
+ * @trig_patt:        Save value for trigger pattern
+ * @trig_patt_mask:   Save value for trigger pattern mask
+ * @msr               Save value for MSR
+ * @patt_ts:          Enable/Disable pattern timestamp
+ * @patt_type:        Set pattern type
+ * @trig_ts:          Enable/Disable trigger timestamp.
+ * @trig_type:        Enable/Disable trigger type.
+ */
+struct dsb_dataset {
+       u32                     mode;
+       u32                     edge_ctrl_idx;
+       u32                     edge_ctrl[TPDM_DSB_MAX_EDCR];
+       u32                     edge_ctrl_mask[TPDM_DSB_MAX_EDCMR];
+       u32                     patt_val[TPDM_DSB_MAX_PATT];
+       u32                     patt_mask[TPDM_DSB_MAX_PATT];
+       u32                     trig_patt[TPDM_DSB_MAX_PATT];
+       u32                     trig_patt_mask[TPDM_DSB_MAX_PATT];
+       u32                     msr[TPDM_DSB_MAX_MSR];
+       bool                    patt_ts;
+       bool                    patt_type;
+       bool                    trig_ts;
+       bool                    trig_type;
+};
+
 /**
  * struct tpdm_drvdata - specifics associated to an TPDM component
  * @base:       memory mapped base address for this component.
  * @spinlock:   lock for the drvdata value.
  * @enable:     enable status of the component.
  * @datasets:   The datasets types present of the TPDM.
+ * @dsb         Specifics associated to TPDM DSB.
+ * @dsb_msr_num Number of MSR supported by DSB TPDM
  */
 
 struct tpdm_drvdata {
@@ -57,6 +192,32 @@ struct tpdm_drvdata {
        spinlock_t              spinlock;
        bool                    enable;
        unsigned long           datasets;
+       struct dsb_dataset      *dsb;
+       u32                     dsb_msr_num;
+};
+
+/* Enumerate members of various datasets */
+enum dataset_mem {
+       DSB_EDGE_CTRL,
+       DSB_EDGE_CTRL_MASK,
+       DSB_TRIG_PATT,
+       DSB_TRIG_PATT_MASK,
+       DSB_PATT,
+       DSB_PATT_MASK,
+       DSB_MSR,
+};
+
+/**
+ * struct tpdm_dataset_attribute - Record the member variables and
+ * index number of datasets that need to be operated by sysfs file
+ * @attr:       The device attribute
+ * @mem:        The member in the dataset data structure
+ * @idx:        The index number of the array data
+ */
+struct tpdm_dataset_attribute {
+       struct device_attribute attr;
+       enum dataset_mem mem;
+       u32 idx;
 };
 
 #endif  /* _CORESIGHT_CORESIGHT_TPDM_H */
index e20c1c6acc7315b26f807b48b18ffd034aa23932..6136776482e6d8d9e5db5ce93464352de0ee1f3e 100644 (file)
@@ -1253,8 +1253,18 @@ static void arm_trbe_register_coresight_cpu(struct trbe_drvdata *drvdata, int cp
        desc.name = devm_kasprintf(dev, GFP_KERNEL, "trbe%d", cpu);
        if (!desc.name)
                goto cpu_clear;
-
-       desc.pdata = coresight_get_platform_data(dev);
+       /*
+        * TRBE coresight devices do not need regular connections
+        * information, as the paths get built between all percpu
+        * source and their respective percpu sink devices. Though
+        * coresight_register() expect device connections via the
+        * platform_data, which TRBE devices do not have. As they
+        * are not real ACPI devices, coresight_get_platform_data()
+        * ends up failing. Instead let's allocate a dummy zeroed
+        * coresight_platform_data structure and assign that back
+        * into the device for that purpose.
+        */
+       desc.pdata = devm_kzalloc(dev, sizeof(*desc.pdata), GFP_KERNEL);
        if (IS_ERR(desc.pdata))
                goto cpu_clear;
 
@@ -1520,14 +1530,13 @@ probe_failed:
        return ret;
 }
 
-static int arm_trbe_device_remove(struct platform_device *pdev)
+static void arm_trbe_device_remove(struct platform_device *pdev)
 {
        struct trbe_drvdata *drvdata = platform_get_drvdata(pdev);
 
        arm_trbe_remove_cpuhp(drvdata);
        arm_trbe_remove_coresight(drvdata);
        arm_trbe_remove_irq(drvdata);
-       return 0;
 }
 
 static const struct of_device_id arm_trbe_of_match[] = {
@@ -1536,14 +1545,23 @@ static const struct of_device_id arm_trbe_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, arm_trbe_of_match);
 
+#ifdef CONFIG_ACPI
+static const struct platform_device_id arm_trbe_acpi_match[] = {
+       { ARMV8_TRBE_PDEV_NAME, 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(platform, arm_trbe_acpi_match);
+#endif
+
 static struct platform_driver arm_trbe_driver = {
+       .id_table = ACPI_PTR(arm_trbe_acpi_match),
        .driver = {
                .name = DRVNAME,
                .of_match_table = of_match_ptr(arm_trbe_of_match),
                .suppress_bind_attrs = true,
        },
        .probe  = arm_trbe_device_probe,
-       .remove = arm_trbe_device_remove,
+       .remove_new = arm_trbe_device_remove,
 };
 
 static int __init arm_trbe_init(void)
index e915e749be55150d95f44e19fd1a874d190174b2..45202c48accec7c86ba56130e2737bc2d1830fae 100644 (file)
@@ -7,11 +7,13 @@
  *
  * Author: Anshuman Khandual <anshuman.khandual@arm.com>
  */
+#include <linux/acpi.h>
 #include <linux/coresight.h>
 #include <linux/device.h>
 #include <linux/irq.h>
 #include <linux/kernel.h>
 #include <linux/of.h>
+#include <linux/perf/arm_pmu.h>
 #include <linux/platform_device.h>
 #include <linux/smp.h>
 
index 6e32d31a95fe0865f96d09c1c83fd2e272e2ac31..10e886455b8b711e32de891fac9c025d243eaabb 100644 (file)
@@ -97,27 +97,19 @@ static int smb_open(struct inode *inode, struct file *file)
 {
        struct smb_drv_data *drvdata = container_of(file->private_data,
                                        struct smb_drv_data, miscdev);
-       int ret = 0;
 
-       spin_lock(&drvdata->spinlock);
+       guard(spinlock)(&drvdata->spinlock);
 
-       if (drvdata->reading) {
-               ret = -EBUSY;
-               goto out;
-       }
+       if (drvdata->reading)
+               return -EBUSY;
 
-       if (atomic_read(&drvdata->csdev->refcnt)) {
-               ret = -EBUSY;
-               goto out;
-       }
+       if (atomic_read(&drvdata->csdev->refcnt))
+               return -EBUSY;
 
        smb_update_data_size(drvdata);
-
        drvdata->reading = true;
-out:
-       spin_unlock(&drvdata->spinlock);
 
-       return ret;
+       return 0;
 }
 
 static ssize_t smb_read(struct file *file, char __user *data, size_t len,
@@ -160,9 +152,8 @@ static int smb_release(struct inode *inode, struct file *file)
        struct smb_drv_data *drvdata = container_of(file->private_data,
                                        struct smb_drv_data, miscdev);
 
-       spin_lock(&drvdata->spinlock);
+       guard(spinlock)(&drvdata->spinlock);
        drvdata->reading = false;
-       spin_unlock(&drvdata->spinlock);
 
        return 0;
 }
@@ -255,19 +246,15 @@ static int smb_enable(struct coresight_device *csdev, enum cs_mode mode,
        struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
        int ret = 0;
 
-       spin_lock(&drvdata->spinlock);
+       guard(spinlock)(&drvdata->spinlock);
 
        /* Do nothing, the trace data is reading by other interface now */
-       if (drvdata->reading) {
-               ret = -EBUSY;
-               goto out;
-       }
+       if (drvdata->reading)
+               return -EBUSY;
 
        /* Do nothing, the SMB is already enabled as other mode */
-       if (drvdata->mode != CS_MODE_DISABLED && drvdata->mode != mode) {
-               ret = -EBUSY;
-               goto out;
-       }
+       if (drvdata->mode != CS_MODE_DISABLED && drvdata->mode != mode)
+               return -EBUSY;
 
        switch (mode) {
        case CS_MODE_SYSFS:
@@ -281,13 +268,10 @@ static int smb_enable(struct coresight_device *csdev, enum cs_mode mode,
        }
 
        if (ret)
-               goto out;
+               return ret;
 
        atomic_inc(&csdev->refcnt);
-
        dev_dbg(&csdev->dev, "Ultrasoc SMB enabled\n");
-out:
-       spin_unlock(&drvdata->spinlock);
 
        return ret;
 }
@@ -295,19 +279,14 @@ out:
 static int smb_disable(struct coresight_device *csdev)
 {
        struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
-       int ret = 0;
 
-       spin_lock(&drvdata->spinlock);
+       guard(spinlock)(&drvdata->spinlock);
 
-       if (drvdata->reading) {
-               ret = -EBUSY;
-               goto out;
-       }
+       if (drvdata->reading)
+               return -EBUSY;
 
-       if (atomic_dec_return(&csdev->refcnt)) {
-               ret = -EBUSY;
-               goto out;
-       }
+       if (atomic_dec_return(&csdev->refcnt))
+               return -EBUSY;
 
        /* Complain if we (somehow) got out of sync */
        WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED);
@@ -317,12 +296,9 @@ static int smb_disable(struct coresight_device *csdev)
        /* Dissociate from the target process. */
        drvdata->pid = -1;
        drvdata->mode = CS_MODE_DISABLED;
-
        dev_dbg(&csdev->dev, "Ultrasoc SMB disabled\n");
-out:
-       spin_unlock(&drvdata->spinlock);
 
-       return ret;
+       return 0;
 }
 
 static void *smb_alloc_buffer(struct coresight_device *csdev,
@@ -395,17 +371,17 @@ static unsigned long smb_update_buffer(struct coresight_device *csdev,
        struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
        struct smb_data_buffer *sdb = &drvdata->sdb;
        struct cs_buffers *buf = sink_config;
-       unsigned long data_size = 0;
+       unsigned long data_size;
        bool lost = false;
 
        if (!buf)
                return 0;
 
-       spin_lock(&drvdata->spinlock);
+       guard(spinlock)(&drvdata->spinlock);
 
        /* Don't do anything if another tracer is using this sink. */
        if (atomic_read(&csdev->refcnt) != 1)
-               goto out;
+               return 0;
 
        smb_disable_hw(drvdata);
        smb_update_data_size(drvdata);
@@ -424,8 +400,6 @@ static unsigned long smb_update_buffer(struct coresight_device *csdev,
        smb_sync_perf_buffer(drvdata, buf, handle->head);
        if (!buf->snapshot && lost)
                perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
-out:
-       spin_unlock(&drvdata->spinlock);
 
        return data_size;
 }
@@ -601,15 +575,13 @@ static int smb_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int smb_remove(struct platform_device *pdev)
+static void smb_remove(struct platform_device *pdev)
 {
        struct smb_drv_data *drvdata = platform_get_drvdata(pdev);
 
        smb_unregister_sink(drvdata);
 
        smb_config_inport(&pdev->dev, false);
-
-       return 0;
 }
 
 #ifdef CONFIG_ACPI
@@ -627,7 +599,7 @@ static struct platform_driver smb_driver = {
                .suppress_bind_attrs = true,
        },
        .probe = smb_probe,
-       .remove = smb_remove,
+       .remove_new = smb_remove,
 };
 module_platform_driver(smb_driver);
 
index a991ecb7515a34d70d966d6d0fdbac86ebf34888..c1b5fd2b89741d676a3571facd7a0c4dc3c080f7 100644 (file)
@@ -183,6 +183,10 @@ static void hisi_ptt_wait_dma_reset_done(struct hisi_ptt *hisi_ptt)
 static void hisi_ptt_trace_end(struct hisi_ptt *hisi_ptt)
 {
        writel(0, hisi_ptt->iobase + HISI_PTT_TRACE_CTRL);
+
+       /* Mask the interrupt on the end */
+       writel(HISI_PTT_TRACE_INT_MASK_ALL, hisi_ptt->iobase + HISI_PTT_TRACE_INT_MASK);
+
        hisi_ptt->trace_ctrl.started = false;
 }
 
@@ -270,15 +274,14 @@ static int hisi_ptt_update_aux(struct hisi_ptt *hisi_ptt, int index, bool stop)
        buf->pos += size;
 
        /*
-        * Just commit the traced data if we're going to stop. Otherwise if the
-        * resident AUX buffer cannot contain the data of next trace buffer,
-        * apply a new one.
+        * Always commit the data to the AUX buffer in time to make sure
+        * userspace got enough time to consume the data.
+        *
+        * If we're not going to stop, apply a new one and check whether
+        * there's enough room for the next trace.
         */
-       if (stop) {
-               perf_aux_output_end(handle, buf->pos);
-       } else if (buf->length - buf->pos < HISI_PTT_TRACE_BUF_SIZE) {
-               perf_aux_output_end(handle, buf->pos);
-
+       perf_aux_output_end(handle, size);
+       if (!stop) {
                buf = perf_aux_output_begin(handle, event);
                if (!buf)
                        return -EINVAL;
index e17f045d7e72597bc6bb0909722a0492e8db0e43..46030aa880811d1ccb29f5342b07cc9bef9bbef1 100644 (file)
@@ -47,6 +47,7 @@
 #define HISI_PTT_TRACE_INT_STAT                0x0890
 #define   HISI_PTT_TRACE_INT_STAT_MASK GENMASK(3, 0)
 #define HISI_PTT_TRACE_INT_MASK                0x0894
+#define   HISI_PTT_TRACE_INT_MASK_ALL  GENMASK(3, 0)
 #define HISI_PTT_TUNING_INT_STAT       0x0898
 #define   HISI_PTT_TUNING_INT_STAT_MASK        BIT(0)
 #define HISI_PTT_TRACE_WR_STS          0x08a0
index ee83c4581bce059205515db11621d915e3023836..461eb23f9d476786bebadbbb79c888f6aa0a057f 100644 (file)
@@ -477,7 +477,7 @@ static const struct i2c_algorithm smbus_algorithm = {
 
 static struct i2c_adapter ali1535_adapter = {
        .owner          = THIS_MODULE,
-       .class          = I2C_CLASS_HWMON | I2C_CLASS_SPD,
+       .class          = I2C_CLASS_HWMON,
        .algo           = &smbus_algorithm,
 };
 
index 55a9e93fbfeb56a55d36795e3bca21c101a12f03..307fb0666ecb2f296af23926fe3d5728d78fb74b 100644 (file)
@@ -390,7 +390,7 @@ static const struct i2c_algorithm ali1563_algorithm = {
 
 static struct i2c_adapter ali1563_adapter = {
        .owner  = THIS_MODULE,
-       .class  = I2C_CLASS_HWMON | I2C_CLASS_SPD,
+       .class  = I2C_CLASS_HWMON,
        .algo   = &ali1563_algorithm,
 };
 
index 0231c5be6354f4ff40b2dc0c8ab57bd191511e3c..d2fa30deb054c7fafe0d2e1749ca53e684a1fa32 100644 (file)
@@ -461,7 +461,7 @@ static const struct i2c_algorithm smbus_algorithm = {
 
 static struct i2c_adapter ali15x3_adapter = {
        .owner          = THIS_MODULE,
-       .class          = I2C_CLASS_HWMON | I2C_CLASS_SPD,
+       .class          = I2C_CLASS_HWMON,
        .algo           = &smbus_algorithm,
 };
 
index ef1307a258e952704ba4c048ee2e0eefa42e7989..208310db906dfba418a95d3ac3b38234d0712c77 100644 (file)
@@ -285,7 +285,7 @@ static const struct i2c_algorithm smbus_algorithm = {
 
 struct i2c_adapter amd756_smbus = {
        .owner          = THIS_MODULE,
-       .class          = I2C_CLASS_HWMON | I2C_CLASS_SPD,
+       .class          = I2C_CLASS_HWMON,
        .algo           = &smbus_algorithm,
 };
 
index 1ed7e945bb6d107eeed7f5e90210c99019b3c712..42a9b1221065f6c2d05667de38cb1b41967bd518 100644 (file)
@@ -449,7 +449,7 @@ static int amd8111_probe(struct pci_dev *dev, const struct pci_device_id *id)
        smbus->adapter.owner = THIS_MODULE;
        snprintf(smbus->adapter.name, sizeof(smbus->adapter.name),
                "SMBus2 AMD8111 adapter at %04x", smbus->base);
-       smbus->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+       smbus->adapter.class = I2C_CLASS_HWMON;
        smbus->adapter.algo = &smbus_algorithm;
        smbus->adapter.algo_data = smbus;
 
index 9a664abf734d6627889a33f7f0729c384a8322e0..4404b4aac6765b15b27840fd0a43ee8551f43aed 100644 (file)
@@ -658,7 +658,7 @@ static int cpm_i2c_probe(struct platform_device *ofdev)
        /* register new adapter to i2c module... */
 
        data = of_get_property(ofdev->dev.of_node, "linux,i2c-index", &len);
-       cpm->adap.nr = (data && len == 4) ? be32_to_cpup(data) : -1;
+       cpm->adap.nr = (data && len == 4) ? *data : -1;
        result = i2c_add_numbered_adapter(&cpm->adap);
 
        if (result < 0)
index b0f50dce9d0fe4b4ae56c2cdc4ade814560c6014..cfe8665cacd270d9e8599106a06d7a0e9237cfae 100644 (file)
@@ -188,7 +188,7 @@ static struct i2c_algo_pcf_data pcf_isa_data = {
 
 static struct i2c_adapter pcf_isa_ops = {
        .owner          = THIS_MODULE,
-       .class          = I2C_CLASS_HWMON | I2C_CLASS_SPD,
+       .class          = I2C_CLASS_HWMON,
        .algo_data      = &pcf_isa_data,
        .name           = "i2c-elektor",
 };
index fb35a75fe0e32f54bac273b4a24404c16bf7e663..4f1411b1a775452801dafaed734fab0be7d67ce8 100644 (file)
@@ -25,7 +25,6 @@ struct i2c_gpio_private_data {
        struct i2c_algo_bit_data bit_data;
        struct i2c_gpio_platform_data pdata;
 #ifdef CONFIG_I2C_GPIO_FAULT_INJECTOR
-       struct dentry *debug_dir;
        /* these must be protected by bus lock */
        struct completion scl_irq_completion;
        u64 scl_irq_data;
@@ -72,7 +71,6 @@ static int i2c_gpio_getscl(void *data)
 }
 
 #ifdef CONFIG_I2C_GPIO_FAULT_INJECTOR
-static struct dentry *i2c_gpio_debug_dir;
 
 #define setsda(bd, val)        ((bd)->setsda((bd)->data, val))
 #define setscl(bd, val)        ((bd)->setscl((bd)->data, val))
@@ -258,41 +256,23 @@ static void i2c_gpio_fault_injector_init(struct platform_device *pdev)
 {
        struct i2c_gpio_private_data *priv = platform_get_drvdata(pdev);
 
-       /*
-        * If there will be a debugfs-dir per i2c adapter somewhen, put the
-        * 'fault-injector' dir there. Until then, we have a global dir with
-        * all adapters as subdirs.
-        */
-       if (!i2c_gpio_debug_dir)
-               i2c_gpio_debug_dir = debugfs_create_dir("i2c-fault-injector", NULL);
-
-       priv->debug_dir = debugfs_create_dir(pdev->name, i2c_gpio_debug_dir);
-
        init_completion(&priv->scl_irq_completion);
 
-       debugfs_create_file_unsafe("incomplete_address_phase", 0200, priv->debug_dir,
+       debugfs_create_file_unsafe("incomplete_address_phase", 0200, priv->adap.debugfs,
                                   priv, &fops_incomplete_addr_phase);
-       debugfs_create_file_unsafe("incomplete_write_byte", 0200, priv->debug_dir,
+       debugfs_create_file_unsafe("incomplete_write_byte", 0200, priv->adap.debugfs,
                                   priv, &fops_incomplete_write_byte);
        if (priv->bit_data.getscl) {
-               debugfs_create_file_unsafe("inject_panic", 0200, priv->debug_dir,
+               debugfs_create_file_unsafe("inject_panic", 0200, priv->adap.debugfs,
                                           priv, &fops_inject_panic);
-               debugfs_create_file_unsafe("lose_arbitration", 0200, priv->debug_dir,
+               debugfs_create_file_unsafe("lose_arbitration", 0200, priv->adap.debugfs,
                                           priv, &fops_lose_arbitration);
        }
-       debugfs_create_file_unsafe("scl", 0600, priv->debug_dir, priv, &fops_scl);
-       debugfs_create_file_unsafe("sda", 0600, priv->debug_dir, priv, &fops_sda);
-}
-
-static void i2c_gpio_fault_injector_exit(struct platform_device *pdev)
-{
-       struct i2c_gpio_private_data *priv = platform_get_drvdata(pdev);
-
-       debugfs_remove_recursive(priv->debug_dir);
+       debugfs_create_file_unsafe("scl", 0600, priv->adap.debugfs, priv, &fops_scl);
+       debugfs_create_file_unsafe("sda", 0600, priv->adap.debugfs, priv, &fops_sda);
 }
 #else
 static inline void i2c_gpio_fault_injector_init(struct platform_device *pdev) {}
-static inline void i2c_gpio_fault_injector_exit(struct platform_device *pdev) {}
 #endif /* CONFIG_I2C_GPIO_FAULT_INJECTOR*/
 
 /* Get i2c-gpio properties from DT or ACPI table */
@@ -444,7 +424,7 @@ static int i2c_gpio_probe(struct platform_device *pdev)
                snprintf(adap->name, sizeof(adap->name), "i2c-gpio%d", pdev->id);
 
        adap->algo_data = bit_data;
-       adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+       adap->class = I2C_CLASS_HWMON;
        adap->dev.parent = dev;
        device_set_node(&adap->dev, fwnode);
 
@@ -475,8 +455,6 @@ static void i2c_gpio_remove(struct platform_device *pdev)
        struct i2c_gpio_private_data *priv;
        struct i2c_adapter *adap;
 
-       i2c_gpio_fault_injector_exit(pdev);
-
        priv = platform_get_drvdata(pdev);
        adap = &priv->adap;
 
index 070999139c6dcbe24ddde994543cf1ec9df74e82..3932e8d96a17173fa3b4f7ad90ebcbb786e99370 100644 (file)
@@ -1230,8 +1230,10 @@ static const struct {
         * Additional individual entries were added after verification.
         */
        { "Latitude 5480",      0x29 },
+       { "Precision 3540",     0x29 },
        { "Vostro V131",        0x1d },
        { "Vostro 5568",        0x29 },
+       { "XPS 15 7590",        0x29 },
 };
 
 static void register_dell_lis3lv02d_i2c_device(struct i801_priv *priv)
index 408820319ec48e6aa9258aab21996ec0e09c8656..7fb87b78923e460222fe633e0229aa18df7e608d 100644 (file)
@@ -739,7 +739,7 @@ static int iic_probe(struct platform_device *ofdev)
        adap->dev.of_node = of_node_get(np);
        strscpy(adap->name, "IBM IIC", sizeof(adap->name));
        i2c_set_adapdata(adap, dev);
-       adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+       adap->class = I2C_CLASS_HWMON;
        adap->algo = &iic_algo;
        adap->timeout = HZ;
 
index 1775a79aeba2afa64b1ad3e0e22ac823deaeb48e..88a053987403cc6f59c3def73fd52cd11e2b1359 100644 (file)
@@ -1401,7 +1401,7 @@ static int i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx,
                        PINCTRL_STATE_DEFAULT);
        i2c_imx->pinctrl_pins_gpio = pinctrl_lookup_state(i2c_imx->pinctrl,
                        "gpio");
-       rinfo->sda_gpiod = devm_gpiod_get(&pdev->dev, "sda", GPIOD_IN);
+       rinfo->sda_gpiod = devm_gpiod_get_optional(&pdev->dev, "sda", GPIOD_IN);
        rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl", GPIOD_OUT_HIGH_OPEN_DRAIN);
 
        if (PTR_ERR(rinfo->sda_gpiod) == -EPROBE_DEFER ||
index f2f7ebeeaecb0345edb1e8c9cc7aa222d1970d88..2e5f0165c3d3016ee613517838b223af10480a35 100644 (file)
@@ -478,7 +478,7 @@ iop3xx_i2c_probe(struct platform_device *pdev)
 
        memcpy(new_adapter->name, pdev->name, strlen(pdev->name));
        new_adapter->owner = THIS_MODULE;
-       new_adapter->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+       new_adapter->class = I2C_CLASS_HWMON;
        new_adapter->dev.parent = &pdev->dev;
        new_adapter->dev.of_node = pdev->dev.of_node;
        new_adapter->nr = pdev->id;
index 1dc1ceaa44439f67566008606f6b543e8f386509..416a9968ed2870a26f0a2b735f33e82bba85aa5a 100644 (file)
@@ -249,7 +249,7 @@ static const struct i2c_algorithm smbus_algorithm = {
 
 static struct i2c_adapter sch_adapter = {
        .owner          = THIS_MODULE,
-       .class          = I2C_CLASS_HWMON | I2C_CLASS_SPD,
+       .class          = I2C_CLASS_HWMON,
        .algo           = &smbus_algorithm,
 };
 
index e01d7530828840950273a3e332bb99ced6bf2e6b..c3a529a73b5bc119a9b829d9ea1c31dda0e41c46 100644 (file)
@@ -283,8 +283,7 @@ static const struct i2c_algorithm kempld_i2c_algorithm = {
 static const struct i2c_adapter kempld_i2c_adapter = {
        .owner          = THIS_MODULE,
        .name           = "i2c-kempld",
-       .class          = I2C_CLASS_HWMON | I2C_CLASS_SPD |
-                         I2C_CLASS_DEPRECATED,
+       .class          = I2C_CLASS_HWMON | I2C_CLASS_DEPRECATED,
        .algo           = &kempld_i2c_algorithm,
 };
 
index 6fec64ea67fbc11bffcf2c0a8c1604ea804bca33..099291a0411dec07664bb98fbabf1b4e4e19adf4 100644 (file)
@@ -477,7 +477,7 @@ static const struct i2c_adapter_quirks mlxcpld_i2c_quirks_ext2 = {
 static struct i2c_adapter mlxcpld_i2c_adapter = {
        .owner          = THIS_MODULE,
        .name           = "i2c-mlxcpld",
-       .class          = I2C_CLASS_HWMON | I2C_CLASS_SPD,
+       .class          = I2C_CLASS_HWMON,
        .algo           = &mlxcpld_i2c_algo,
        .quirks         = &mlxcpld_i2c_quirks,
        .retries        = MLXCPLD_I2C_RETR_NUM,
index 38d203d93eeec4bde521ddb034a1e4bd1fc88da0..fab662e6bc084fd1ca573685f60d6c4406b6b0e4 100644 (file)
@@ -349,7 +349,7 @@ static int nforce2_probe_smb(struct pci_dev *dev, int bar, int alt_reg,
                return -EBUSY;
        }
        smbus->adapter.owner = THIS_MODULE;
-       smbus->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+       smbus->adapter.class = I2C_CLASS_HWMON;
        smbus->adapter.algo = &smbus_algorithm;
        smbus->adapter.algo_data = smbus;
        smbus->adapter.dev.parent = &dev->dev;
index ae4bae63ad4f3c3086635928acdd9b46deb10c28..54181b3f1919625a83010efbb7b439f8fd2316b7 100644 (file)
@@ -326,7 +326,6 @@ struct npcm_i2c {
        u8 slv_rd_buf[MAX_I2C_HW_FIFO_SIZE];
        u8 slv_wr_buf[MAX_I2C_HW_FIFO_SIZE];
 #endif
-       struct dentry *debugfs; /* debugfs device directory */
        u64 ber_cnt;
        u64 rec_succ_cnt;
        u64 rec_fail_cnt;
@@ -2250,27 +2249,15 @@ static const struct i2c_algorithm npcm_i2c_algo = {
 #endif
 };
 
-/* i2c debugfs directory: used to keep health monitor of i2c devices */
-static struct dentry *npcm_i2c_debugfs_dir;
-
 static void npcm_i2c_init_debugfs(struct platform_device *pdev,
                                  struct npcm_i2c *bus)
 {
-       struct dentry *d;
-
-       if (!npcm_i2c_debugfs_dir)
-               return;
-       d = debugfs_create_dir(dev_name(&pdev->dev), npcm_i2c_debugfs_dir);
-       if (IS_ERR_OR_NULL(d))
-               return;
-       debugfs_create_u64("ber_cnt", 0444, d, &bus->ber_cnt);
-       debugfs_create_u64("nack_cnt", 0444, d, &bus->nack_cnt);
-       debugfs_create_u64("rec_succ_cnt", 0444, d, &bus->rec_succ_cnt);
-       debugfs_create_u64("rec_fail_cnt", 0444, d, &bus->rec_fail_cnt);
-       debugfs_create_u64("timeout_cnt", 0444, d, &bus->timeout_cnt);
-       debugfs_create_u64("tx_complete_cnt", 0444, d, &bus->tx_complete_cnt);
-
-       bus->debugfs = d;
+       debugfs_create_u64("ber_cnt", 0444, bus->adap.debugfs, &bus->ber_cnt);
+       debugfs_create_u64("nack_cnt", 0444, bus->adap.debugfs, &bus->nack_cnt);
+       debugfs_create_u64("rec_succ_cnt", 0444, bus->adap.debugfs, &bus->rec_succ_cnt);
+       debugfs_create_u64("rec_fail_cnt", 0444, bus->adap.debugfs, &bus->rec_fail_cnt);
+       debugfs_create_u64("timeout_cnt", 0444, bus->adap.debugfs, &bus->timeout_cnt);
+       debugfs_create_u64("tx_complete_cnt", 0444, bus->adap.debugfs, &bus->tx_complete_cnt);
 }
 
 static int npcm_i2c_probe_bus(struct platform_device *pdev)
@@ -2362,7 +2349,6 @@ static void npcm_i2c_remove_bus(struct platform_device *pdev)
        unsigned long lock_flags;
        struct npcm_i2c *bus = platform_get_drvdata(pdev);
 
-       debugfs_remove_recursive(bus->debugfs);
        spin_lock_irqsave(&bus->lock, lock_flags);
        npcm_i2c_disable(bus);
        spin_unlock_irqrestore(&bus->lock, lock_flags);
@@ -2385,28 +2371,7 @@ static struct platform_driver npcm_i2c_bus_driver = {
        }
 };
 
-static int __init npcm_i2c_init(void)
-{
-       int ret;
-
-       npcm_i2c_debugfs_dir = debugfs_create_dir("npcm_i2c", NULL);
-
-       ret = platform_driver_register(&npcm_i2c_bus_driver);
-       if (ret) {
-               debugfs_remove_recursive(npcm_i2c_debugfs_dir);
-               return ret;
-       }
-
-       return 0;
-}
-module_init(npcm_i2c_init);
-
-static void __exit npcm_i2c_exit(void)
-{
-       platform_driver_unregister(&npcm_i2c_bus_driver);
-       debugfs_remove_recursive(npcm_i2c_debugfs_dir);
-}
-module_exit(npcm_i2c_exit);
+module_platform_driver(npcm_i2c_bus_driver);
 
 MODULE_AUTHOR("Avi Fishman <avi.fishman@gmail.com>");
 MODULE_AUTHOR("Tali Perry <tali.perry@nuvoton.com>");
index cfc89e04eb94cc5e27275a2ca1a72a5379773bf5..77f90c7436eda2df16afd7f1cac79355fb005bfd 100644 (file)
@@ -56,7 +56,7 @@ static int pasemi_smb_pci_probe(struct pci_dev *dev,
        if (!smbus->ioaddr)
                return -EBUSY;
 
-       smbus->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+       smbus->adapter.class = I2C_CLASS_HWMON;
        error = pasemi_i2c_common_probe(smbus);
        if (error)
                return error;
index 809fbd014cd6833749a677bba4b6845854459d3b..6a0392172b2f2ea643c70080d127232427d32873 100644 (file)
@@ -943,7 +943,7 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
        }
 
        adap->owner = THIS_MODULE;
-       adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+       adap->class = I2C_CLASS_HWMON;
        adap->algo = sb800_main ? &piix4_smbus_algorithm_sb800
                                : &smbus_algorithm;
 
index 829ac053bbb7c4a1203a7fc878b2aebf28600d22..828aa2ea0fe4c90785cbdc5e65b5b28cdf86b40b 100644 (file)
@@ -89,6 +89,7 @@
 #define TMDMAE BIT(0)  /* DMA Master Transmitted Enable */
 
 /* ICCCR2 */
+#define FMPE   BIT(7)  /* Fast Mode Plus Enable */
 #define CDFD   BIT(2)  /* CDF Disable */
 #define HLSE   BIT(1)  /* HIGH/LOW Separate Control Enable */
 #define SME    BIT(0)  /* SCL Mask Enable */
 #define ID_NACK                        BIT(4)
 #define ID_EPROTO              BIT(5)
 /* persistent flags */
+#define ID_P_FMPLUS            BIT(27)
 #define ID_P_NOT_ATOMIC                BIT(28)
 #define ID_P_HOST_NOTIFY       BIT(29)
 #define ID_P_NO_RXDMA          BIT(30) /* HW forbids RXDMA sometimes */
 #define ID_P_PM_BLOCKED                BIT(31)
-#define ID_P_MASK              GENMASK(31, 28)
+#define ID_P_MASK              GENMASK(31, 27)
 
 enum rcar_i2c_type {
        I2C_RCAR_GEN1,
        I2C_RCAR_GEN2,
        I2C_RCAR_GEN3,
+       I2C_RCAR_GEN4,
 };
 
 struct rcar_i2c_priv {
@@ -148,6 +151,7 @@ struct rcar_i2c_priv {
        u32 icccr;
        u16 schd;
        u16 scld;
+       u8 smd;
        u8 recovery_icmcr;      /* protected by adapter lock */
        enum rcar_i2c_type devtype;
        struct i2c_client *slave;
@@ -239,9 +243,14 @@ static void rcar_i2c_init(struct rcar_i2c_priv *priv)
        if (priv->devtype < I2C_RCAR_GEN3) {
                rcar_i2c_write(priv, ICCCR, priv->icccr);
        } else {
-               rcar_i2c_write(priv, ICCCR2, CDFD | HLSE | SME);
+               u32 icccr2 = CDFD | HLSE | SME;
+
+               if (priv->flags & ID_P_FMPLUS)
+                       icccr2 |= FMPE;
+
+               rcar_i2c_write(priv, ICCCR2, icccr2);
                rcar_i2c_write(priv, ICCCR, priv->icccr);
-               rcar_i2c_write(priv, ICMPR, RCAR_DEFAULT_SMD);
+               rcar_i2c_write(priv, ICMPR, priv->smd);
                rcar_i2c_write(priv, ICHPR, priv->schd);
                rcar_i2c_write(priv, ICLPR, priv->scld);
                rcar_i2c_write(priv, ICFBSCR, TCYC17);
@@ -278,6 +287,7 @@ static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv)
 
        /* Fall back to previously used values if not supplied */
        i2c_parse_fw_timings(dev, &t, false);
+       priv->smd = RCAR_DEFAULT_SMD;
 
        /*
         * calculate SCL clock
@@ -303,6 +313,11 @@ static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv)
        if (cdf >= 1U << cdf_width)
                goto err_no_val;
 
+       if (t.bus_freq_hz > I2C_MAX_FAST_MODE_FREQ && priv->devtype >= I2C_RCAR_GEN4)
+               priv->flags |= ID_P_FMPLUS;
+       else
+               priv->flags &= ~ID_P_FMPLUS;
+
        /* On Gen3+, we use cdf only for the filters, not as a SCL divider */
        ick = rate / (priv->devtype < I2C_RCAR_GEN3 ? (cdf + 1) : 1);
 
@@ -344,30 +359,30 @@ static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv)
                 * x as a base value for the SCLD/SCHD ratio:
                 *
                 * SCL = clkp / (8 + 2 * SMD + SCLD + SCHD + F[(ticf + tr + intd) * clkp])
-                * SCL = clkp / (8 + 2 * RCAR_DEFAULT_SMD + RCAR_SCLD_RATIO * x
+                * SCL = clkp / (8 + 2 * SMD + RCAR_SCLD_RATIO * x
                 *               + RCAR_SCHD_RATIO * x + F[...])
                 *
                 * with: sum_ratio = RCAR_SCLD_RATIO + RCAR_SCHD_RATIO
-                * and:  smd = RCAR_DEFAULT_SMD
                 *
                 * SCL = clkp / (8 + 2 * smd + sum_ratio * x + F[...])
                 * 8 + 2 * smd + sum_ratio * x + F[...] = clkp / SCL
                 * x = ((clkp / SCL) - 8 - 2 * smd - F[...]) / sum_ratio
                 */
                x = DIV_ROUND_UP(rate, t.bus_freq_hz ?: 1);
-               x = DIV_ROUND_UP(x - 8 - 2 * RCAR_DEFAULT_SMD - round, sum_ratio);
-               scl = rate / (8 + 2 * RCAR_DEFAULT_SMD + sum_ratio * x + round);
+               x = DIV_ROUND_UP(x - 8 - 2 * priv->smd - round, sum_ratio);
+               scl = rate / (8 + 2 * priv->smd + sum_ratio * x + round);
 
-               /* Bail out if values don't fit into 16 bit or SMD became too large */
-               if (x * RCAR_SCLD_RATIO > 0xffff || RCAR_DEFAULT_SMD > x * RCAR_SCHD_RATIO)
+               if (x == 0 || x * RCAR_SCLD_RATIO > 0xffff)
                        goto err_no_val;
 
                priv->icccr = cdf;
                priv->schd = RCAR_SCHD_RATIO * x;
                priv->scld = RCAR_SCLD_RATIO * x;
+               if (priv->smd >= priv->schd)
+                       priv->smd = priv->schd - 1;
 
-               dev_dbg(dev, "clk %u/%u(%lu), round %u, CDF: %u SCHD %u SCLD %u\n",
-                       scl, t.bus_freq_hz, rate, round, cdf, priv->schd, priv->scld);
+               dev_dbg(dev, "clk %u/%u(%lu), round %u, CDF: %u SCHD %u SCLD %u SMD %u\n",
+                       scl, t.bus_freq_hz, rate, round, cdf, priv->schd, priv->scld, priv->smd);
        }
 
        return 0;
@@ -431,8 +446,8 @@ static void rcar_i2c_cleanup_dma(struct rcar_i2c_priv *priv, bool terminate)
        dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg),
                         sg_dma_len(&priv->sg), priv->dma_direction);
 
-       /* Gen3 can only do one RXDMA per transfer and we just completed it */
-       if (priv->devtype == I2C_RCAR_GEN3 &&
+       /* Gen3+ can only do one RXDMA per transfer and we just completed it */
+       if (priv->devtype >= I2C_RCAR_GEN3 &&
            priv->dma_direction == DMA_FROM_DEVICE)
                priv->flags |= ID_P_NO_RXDMA;
 
@@ -886,8 +901,8 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
        if (ret < 0)
                goto out;
 
-       /* Gen3 needs a reset before allowing RXDMA once */
-       if (priv->devtype == I2C_RCAR_GEN3) {
+       /* Gen3+ needs a reset. That also allows RXDMA once */
+       if (priv->devtype >= I2C_RCAR_GEN3) {
                priv->flags &= ~ID_P_NO_RXDMA;
                ret = rcar_i2c_do_reset(priv);
                if (ret)
@@ -1072,10 +1087,12 @@ static const struct of_device_id rcar_i2c_dt_ids[] = {
        { .compatible = "renesas,i2c-r8a7794", .data = (void *)I2C_RCAR_GEN2 },
        { .compatible = "renesas,i2c-r8a7795", .data = (void *)I2C_RCAR_GEN3 },
        { .compatible = "renesas,i2c-r8a7796", .data = (void *)I2C_RCAR_GEN3 },
+       /* S4 has no FM+ bit */
+       { .compatible = "renesas,i2c-r8a779f0", .data = (void *)I2C_RCAR_GEN3 },
        { .compatible = "renesas,rcar-gen1-i2c", .data = (void *)I2C_RCAR_GEN1 },
        { .compatible = "renesas,rcar-gen2-i2c", .data = (void *)I2C_RCAR_GEN2 },
        { .compatible = "renesas,rcar-gen3-i2c", .data = (void *)I2C_RCAR_GEN3 },
-       { .compatible = "renesas,rcar-gen4-i2c", .data = (void *)I2C_RCAR_GEN3 },
+       { .compatible = "renesas,rcar-gen4-i2c", .data = (void *)I2C_RCAR_GEN4 },
        {},
 };
 MODULE_DEVICE_TABLE(of, rcar_i2c_dt_ids);
@@ -1151,7 +1168,7 @@ static int rcar_i2c_probe(struct platform_device *pdev)
        if (of_property_read_bool(dev->of_node, "smbus"))
                priv->flags |= ID_P_HOST_NOTIFY;
 
-       if (priv->devtype == I2C_RCAR_GEN3) {
+       if (priv->devtype >= I2C_RCAR_GEN3) {
                priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
                if (IS_ERR(priv->rstc)) {
                        ret = PTR_ERR(priv->rstc);
index 4362db7c57892c83d53f3c0062630d7b7b7d9990..086fdf262e7b60e26c48727f6f8f586615674117 100644 (file)
@@ -1295,8 +1295,12 @@ static int rk3x_i2c_probe(struct platform_device *pdev)
                        return -EINVAL;
                }
 
-               /* 27+i: write mask, 11+i: value */
-               value = BIT(27 + bus_nr) | BIT(11 + bus_nr);
+               /* rv1126 i2c2 uses non-sequential write mask 20, value 4 */
+               if (i2c->soc_data == &rv1126_soc_data && bus_nr == 2)
+                       value = BIT(20) | BIT(4);
+               else
+                       /* 27+i: write mask, 11+i: value */
+                       value = BIT(27 + bus_nr) | BIT(11 + bus_nr);
 
                ret = regmap_write(grf, i2c->soc_data->grf_offset, value);
                if (ret != 0) {
index c56886af724ea87e3f863cd6140951530e620d76..275f7c42165cde7881bad16433c9150f653af91f 100644 (file)
@@ -76,6 +76,7 @@
 #define QUIRK_HDMIPHY          (1 << 1)
 #define QUIRK_NO_GPIO          (1 << 2)
 #define QUIRK_POLL             (1 << 3)
+#define QUIRK_ATOMIC           (1 << 4)
 
 /* Max time to wait for bus to become idle after a xfer (in us) */
 #define S3C2410_IDLE_TIMEOUT   5000
@@ -174,7 +175,7 @@ static inline void s3c24xx_i2c_master_complete(struct s3c24xx_i2c *i2c, int ret)
        if (ret)
                i2c->msg_idx = ret;
 
-       if (!(i2c->quirks & QUIRK_POLL))
+       if (!(i2c->quirks & (QUIRK_POLL | QUIRK_ATOMIC)))
                wake_up(&i2c->wait);
 }
 
@@ -216,8 +217,17 @@ static bool is_ack(struct s3c24xx_i2c *i2c)
        int tries;
 
        for (tries = 50; tries; --tries) {
-               if (readl(i2c->regs + S3C2410_IICCON)
-                       & S3C2410_IICCON_IRQPEND) {
+               unsigned long tmp = readl(i2c->regs + S3C2410_IICCON);
+
+               if (!(tmp & S3C2410_IICCON_ACKEN)) {
+                       /*
+                        * Wait a bit for the bus to stabilize,
+                        * delay estimated experimentally.
+                        */
+                       usleep_range(100, 200);
+                       return true;
+               }
+               if (tmp & S3C2410_IICCON_IRQPEND) {
                        if (!(readl(i2c->regs + S3C2410_IICSTAT)
                                & S3C2410_IICSTAT_LASTBIT))
                                return true;
@@ -270,16 +280,6 @@ static void s3c24xx_i2c_message_start(struct s3c24xx_i2c *i2c,
 
        stat |= S3C2410_IICSTAT_START;
        writel(stat, i2c->regs + S3C2410_IICSTAT);
-
-       if (i2c->quirks & QUIRK_POLL) {
-               while ((i2c->msg_num != 0) && is_ack(i2c)) {
-                       i2c_s3c_irq_nextbyte(i2c, stat);
-                       stat = readl(i2c->regs + S3C2410_IICSTAT);
-
-                       if (stat & S3C2410_IICSTAT_ARBITR)
-                               dev_err(i2c->dev, "deal with arbitration loss\n");
-               }
-       }
 }
 
 static inline void s3c24xx_i2c_stop(struct s3c24xx_i2c *i2c, int ret)
@@ -685,7 +685,7 @@ static void s3c24xx_i2c_wait_idle(struct s3c24xx_i2c *i2c)
 static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
                              struct i2c_msg *msgs, int num)
 {
-       unsigned long timeout;
+       unsigned long timeout = 0;
        int ret;
 
        ret = s3c24xx_i2c_set_master(i2c);
@@ -704,17 +704,20 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
        s3c24xx_i2c_enable_irq(i2c);
        s3c24xx_i2c_message_start(i2c, msgs);
 
-       if (i2c->quirks & QUIRK_POLL) {
-               ret = i2c->msg_idx;
+       if (i2c->quirks & (QUIRK_POLL | QUIRK_ATOMIC)) {
+               while ((i2c->msg_num != 0) && is_ack(i2c)) {
+                       unsigned long stat = readl(i2c->regs + S3C2410_IICSTAT);
 
-               if (ret != num)
-                       dev_dbg(i2c->dev, "incomplete xfer (%d)\n", ret);
+                       i2c_s3c_irq_nextbyte(i2c, stat);
 
-               goto out;
+                       stat = readl(i2c->regs + S3C2410_IICSTAT);
+                       if (stat & S3C2410_IICSTAT_ARBITR)
+                               dev_err(i2c->dev, "deal with arbitration loss\n");
+               }
+       } else {
+               timeout = wait_event_timeout(i2c->wait, i2c->msg_num == 0, HZ * 5);
        }
 
-       timeout = wait_event_timeout(i2c->wait, i2c->msg_num == 0, HZ * 5);
-
        ret = i2c->msg_idx;
 
        /*
@@ -773,6 +776,21 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
        return -EREMOTEIO;
 }
 
+static int s3c24xx_i2c_xfer_atomic(struct i2c_adapter *adap,
+                                  struct i2c_msg *msgs, int num)
+{
+       struct s3c24xx_i2c *i2c = (struct s3c24xx_i2c *)adap->algo_data;
+       int ret;
+
+       disable_irq(i2c->irq);
+       i2c->quirks |= QUIRK_ATOMIC;
+       ret = s3c24xx_i2c_xfer(adap, msgs, num);
+       i2c->quirks &= ~QUIRK_ATOMIC;
+       enable_irq(i2c->irq);
+
+       return ret;
+}
+
 /* declare our i2c functionality */
 static u32 s3c24xx_i2c_func(struct i2c_adapter *adap)
 {
@@ -783,6 +801,7 @@ static u32 s3c24xx_i2c_func(struct i2c_adapter *adap)
 /* i2c bus registration info */
 static const struct i2c_algorithm s3c24xx_i2c_algorithm = {
        .master_xfer            = s3c24xx_i2c_xfer,
+       .master_xfer_atomic     = s3c24xx_i2c_xfer_atomic,
        .functionality          = s3c24xx_i2c_func,
 };
 
index 421735acfa141f2394aaca68b0b872f3accd09b1..d7af8e0d7599ec3abe8275c926a4a4ae2b90d84b 100644 (file)
@@ -385,7 +385,7 @@ static int smbus_cmi_probe(struct platform_device *device)
        smbus_cmi->adapter.owner = THIS_MODULE;
        smbus_cmi->adapter.algo = &acpi_smbus_cmi_algorithm;
        smbus_cmi->adapter.algo_data = smbus_cmi;
-       smbus_cmi->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+       smbus_cmi->adapter.class = I2C_CLASS_HWMON;
        smbus_cmi->adapter.dev.parent = &device->dev;
 
        ret = i2c_add_adapter(&smbus_cmi->adapter);
index 1ad2a26156d17703022548e6b432014f7fb64df9..8a043f5fca1e067125be040ee13a587eb3aee7c9 100644 (file)
@@ -477,7 +477,7 @@ static int sh7760_i2c_probe(struct platform_device *pdev)
 
        id->adap.nr = pdev->id;
        id->adap.algo = &sh7760_i2c_algo;
-       id->adap.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+       id->adap.class = I2C_CLASS_HWMON;
        id->adap.retries = 3;
        id->adap.algo_data = id;
        id->adap.dev.parent = &pdev->dev;
index 8f71f01cb169b04fded2bf59e46a12109e5d11ac..49f8f4f1b0f0fc8eedbde2e2d57756bea432da3f 100644 (file)
@@ -142,7 +142,7 @@ static struct i2c_algo_sibyte_data sibyte_board_data[2] = {
 static struct i2c_adapter sibyte_board_adapter[2] = {
        {
                .owner          = THIS_MODULE,
-               .class          = I2C_CLASS_HWMON | I2C_CLASS_SPD,
+               .class          = I2C_CLASS_HWMON,
                .algo           = NULL,
                .algo_data      = &sibyte_board_data[0],
                .nr             = 0,
@@ -150,7 +150,7 @@ static struct i2c_adapter sibyte_board_adapter[2] = {
        },
        {
                .owner          = THIS_MODULE,
-               .class          = I2C_CLASS_HWMON | I2C_CLASS_SPD,
+               .class          = I2C_CLASS_HWMON,
                .algo           = NULL,
                .algo_data      = &sibyte_board_data[1],
                .nr             = 1,
index 486f1e9dfb74aabff9b29ae39f9b37d16b0444a9..32476dc10ad61f0e13c8f316e41455de5020f968 100644 (file)
@@ -353,7 +353,7 @@ static const struct i2c_algorithm smbus_algorithm = {
 
 static struct i2c_adapter sis5595_adapter = {
        .owner          = THIS_MODULE,
-       .class          = I2C_CLASS_HWMON | I2C_CLASS_SPD,
+       .class          = I2C_CLASS_HWMON,
        .algo           = &smbus_algorithm,
 };
 
index 87d56250d78a3e92f897b9fdd93950a1db320c28..3505cf29cedda32f0498fcf652756af907a53a50 100644 (file)
@@ -493,7 +493,7 @@ static const struct i2c_algorithm smbus_algorithm = {
 
 static struct i2c_adapter sis630_adapter = {
        .owner          = THIS_MODULE,
-       .class          = I2C_CLASS_HWMON | I2C_CLASS_SPD,
+       .class          = I2C_CLASS_HWMON,
        .algo           = &smbus_algorithm,
        .retries        = 3
 };
index cde8003985a58a4d1ee52d165424e95a8ad36fa1..77529dda6fcde6acb29f3916d6564378f89bdc4c 100644 (file)
@@ -228,7 +228,7 @@ static const struct i2c_algorithm smbus_algorithm = {
 
 static struct i2c_adapter sis96x_adapter = {
        .owner          = THIS_MODULE,
-       .class          = I2C_CLASS_HWMON | I2C_CLASS_SPD,
+       .class          = I2C_CLASS_HWMON,
        .algo           = &smbus_algorithm,
 };
 
index 983509936727edfdd8f28320076f2f2ef068263b..01210452216b333abd64e06451524784a537bca8 100644 (file)
@@ -50,6 +50,7 @@
 #define STM32F7_I2C_TXDR                       0x28
 
 /* STM32F7 I2C control 1 */
+#define STM32_I2C_CR1_FMP                      BIT(24)
 #define STM32F7_I2C_CR1_PECEN                  BIT(23)
 #define STM32F7_I2C_CR1_ALERTEN                        BIT(22)
 #define STM32F7_I2C_CR1_SMBHEN                 BIT(20)
@@ -226,6 +227,8 @@ struct stm32f7_i2c_spec {
  * @rise_time: Rise time (ns)
  * @fall_time: Fall time (ns)
  * @fmp_clr_offset: Fast Mode Plus clear register offset from set register
+ * @single_it_line: Only a single IT line is used for both events/errors
+ * @fmp_cr1_bit: Fast Mode Plus control is done via a bit in CR1
  */
 struct stm32f7_i2c_setup {
        u32 speed_freq;
@@ -233,6 +236,8 @@ struct stm32f7_i2c_setup {
        u32 rise_time;
        u32 fall_time;
        u32 fmp_clr_offset;
+       bool single_it_line;
+       bool fmp_cr1_bit;
 };
 
 /**
@@ -418,6 +423,13 @@ static const struct stm32f7_i2c_setup stm32mp13_setup = {
        .fmp_clr_offset = 0x4,
 };
 
+static const struct stm32f7_i2c_setup stm32mp25_setup = {
+       .rise_time = STM32F7_I2C_RISE_TIME_DEFAULT,
+       .fall_time = STM32F7_I2C_FALL_TIME_DEFAULT,
+       .single_it_line = true,
+       .fmp_cr1_bit = true,
+};
+
 static inline void stm32f7_i2c_set_bits(void __iomem *reg, u32 mask)
 {
        writel_relaxed(readl_relaxed(reg) | mask, reg);
@@ -1419,15 +1431,13 @@ static bool stm32f7_i2c_is_slave_busy(struct stm32f7_i2c_dev *i2c_dev)
        return i == busy;
 }
 
-static irqreturn_t stm32f7_i2c_slave_isr_event(struct stm32f7_i2c_dev *i2c_dev)
+static irqreturn_t stm32f7_i2c_slave_isr_event(struct stm32f7_i2c_dev *i2c_dev, u32 status)
 {
        void __iomem *base = i2c_dev->base;
-       u32 cr2, status, mask;
+       u32 cr2, mask;
        u8 val;
        int ret;
 
-       status = readl_relaxed(i2c_dev->base + STM32F7_I2C_ISR);
-
        /* Slave transmitter mode */
        if (status & STM32F7_I2C_ISR_TXIS) {
                i2c_slave_event(i2c_dev->slave_running,
@@ -1494,23 +1504,81 @@ static irqreturn_t stm32f7_i2c_slave_isr_event(struct stm32f7_i2c_dev *i2c_dev)
        return IRQ_HANDLED;
 }
 
-static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
+static irqreturn_t stm32f7_i2c_handle_isr_errs(struct stm32f7_i2c_dev *i2c_dev, u32 status)
 {
-       struct stm32f7_i2c_dev *i2c_dev = data;
        struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg;
-       struct stm32_i2c_dma *dma = i2c_dev->dma;
+       u16 addr = f7_msg->addr;
        void __iomem *base = i2c_dev->base;
-       u32 status, mask;
-       int ret = IRQ_HANDLED;
+       struct device *dev = i2c_dev->dev;
+       struct stm32_i2c_dma *dma = i2c_dev->dma;
 
-       /* Check if the interrupt if for a slave device */
-       if (!i2c_dev->master_mode) {
-               ret = stm32f7_i2c_slave_isr_event(i2c_dev);
-               return ret;
+       /* Bus error */
+       if (status & STM32F7_I2C_ISR_BERR) {
+               dev_err(dev, "Bus error accessing addr 0x%x\n", addr);
+               writel_relaxed(STM32F7_I2C_ICR_BERRCF, base + STM32F7_I2C_ICR);
+               stm32f7_i2c_release_bus(&i2c_dev->adap);
+               f7_msg->result = -EIO;
+       }
+
+       /* Arbitration loss */
+       if (status & STM32F7_I2C_ISR_ARLO) {
+               dev_dbg(dev, "Arbitration loss accessing addr 0x%x\n", addr);
+               writel_relaxed(STM32F7_I2C_ICR_ARLOCF, base + STM32F7_I2C_ICR);
+               f7_msg->result = -EAGAIN;
+       }
+
+       if (status & STM32F7_I2C_ISR_PECERR) {
+               dev_err(dev, "PEC error in reception accessing addr 0x%x\n", addr);
+               writel_relaxed(STM32F7_I2C_ICR_PECCF, base + STM32F7_I2C_ICR);
+               f7_msg->result = -EINVAL;
        }
 
+       if (status & STM32F7_I2C_ISR_ALERT) {
+               dev_dbg(dev, "SMBus alert received\n");
+               writel_relaxed(STM32F7_I2C_ICR_ALERTCF, base + STM32F7_I2C_ICR);
+               i2c_handle_smbus_alert(i2c_dev->alert->ara);
+               return IRQ_HANDLED;
+       }
+
+       if (!i2c_dev->slave_running) {
+               u32 mask;
+               /* Disable interrupts */
+               if (stm32f7_i2c_is_slave_registered(i2c_dev))
+                       mask = STM32F7_I2C_XFER_IRQ_MASK;
+               else
+                       mask = STM32F7_I2C_ALL_IRQ_MASK;
+               stm32f7_i2c_disable_irq(i2c_dev, mask);
+       }
+
+       /* Disable dma */
+       if (i2c_dev->use_dma) {
+               stm32f7_i2c_disable_dma_req(i2c_dev);
+               dmaengine_terminate_async(dma->chan_using);
+       }
+
+       i2c_dev->master_mode = false;
+       complete(&i2c_dev->complete);
+
+       return IRQ_HANDLED;
+}
+
+#define STM32F7_ERR_EVENTS (STM32F7_I2C_ISR_BERR | STM32F7_I2C_ISR_ARLO |\
+                           STM32F7_I2C_ISR_PECERR | STM32F7_I2C_ISR_ALERT)
+static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
+{
+       struct stm32f7_i2c_dev *i2c_dev = data;
+       u32 status;
+
        status = readl_relaxed(i2c_dev->base + STM32F7_I2C_ISR);
 
+       /*
+        * Check if the interrupt is for a slave device or related
+        * to errors flags (in case of single it line mode)
+        */
+       if (!i2c_dev->master_mode ||
+           (i2c_dev->setup.single_it_line && (status & STM32F7_ERR_EVENTS)))
+               return IRQ_WAKE_THREAD;
+
        /* Tx empty */
        if (status & STM32F7_I2C_ISR_TXIS)
                stm32f7_i2c_write_tx_data(i2c_dev);
@@ -1519,6 +1587,33 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
        if (status & STM32F7_I2C_ISR_RXNE)
                stm32f7_i2c_read_rx_data(i2c_dev);
 
+       /* Wake up the thread if other flags are raised */
+       if (status &
+           (STM32F7_I2C_ISR_NACKF | STM32F7_I2C_ISR_STOPF |
+            STM32F7_I2C_ISR_TC | STM32F7_I2C_ISR_TCR))
+               return IRQ_WAKE_THREAD;
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t stm32f7_i2c_isr_event_thread(int irq, void *data)
+{
+       struct stm32f7_i2c_dev *i2c_dev = data;
+       struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg;
+       struct stm32_i2c_dma *dma = i2c_dev->dma;
+       void __iomem *base = i2c_dev->base;
+       u32 status, mask;
+       int ret;
+
+       status = readl_relaxed(i2c_dev->base + STM32F7_I2C_ISR);
+
+       if (!i2c_dev->master_mode)
+               return stm32f7_i2c_slave_isr_event(i2c_dev, status);
+
+       /* Handle errors in case of this handler is used for events/errors */
+       if (i2c_dev->setup.single_it_line && (status & STM32F7_ERR_EVENTS))
+               return stm32f7_i2c_handle_isr_errs(i2c_dev, status);
+
        /* NACK received */
        if (status & STM32F7_I2C_ISR_NACKF) {
                dev_dbg(i2c_dev->dev, "<%s>: Receive NACK (addr %x)\n",
@@ -1531,33 +1626,28 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
                f7_msg->result = -ENXIO;
        }
 
-       /* STOP detection flag */
-       if (status & STM32F7_I2C_ISR_STOPF) {
-               /* Disable interrupts */
-               if (stm32f7_i2c_is_slave_registered(i2c_dev))
-                       mask = STM32F7_I2C_XFER_IRQ_MASK;
+       if (status & STM32F7_I2C_ISR_TCR) {
+               if (f7_msg->smbus)
+                       stm32f7_i2c_smbus_reload(i2c_dev);
                else
-                       mask = STM32F7_I2C_ALL_IRQ_MASK;
-               stm32f7_i2c_disable_irq(i2c_dev, mask);
-
-               /* Clear STOP flag */
-               writel_relaxed(STM32F7_I2C_ICR_STOPCF, base + STM32F7_I2C_ICR);
-
-               if (i2c_dev->use_dma && !f7_msg->result) {
-                       ret = IRQ_WAKE_THREAD;
-               } else {
-                       i2c_dev->master_mode = false;
-                       complete(&i2c_dev->complete);
-               }
+                       stm32f7_i2c_reload(i2c_dev);
        }
 
        /* Transfer complete */
        if (status & STM32F7_I2C_ISR_TC) {
+               /* Wait for dma transfer completion before sending next message */
+               if (i2c_dev->use_dma && !f7_msg->result) {
+                       ret = wait_for_completion_timeout(&i2c_dev->dma->dma_complete, HZ);
+                       if (!ret) {
+                               dev_dbg(i2c_dev->dev, "<%s>: Timed out\n", __func__);
+                               stm32f7_i2c_disable_dma_req(i2c_dev);
+                               dmaengine_terminate_async(dma->chan_using);
+                               f7_msg->result = -ETIMEDOUT;
+                       }
+               }
                if (f7_msg->stop) {
                        mask = STM32F7_I2C_CR2_STOP;
                        stm32f7_i2c_set_bits(base + STM32F7_I2C_CR2, mask);
-               } else if (i2c_dev->use_dma && !f7_msg->result) {
-                       ret = IRQ_WAKE_THREAD;
                } else if (f7_msg->smbus) {
                        stm32f7_i2c_smbus_rep_start(i2c_dev);
                } else {
@@ -1567,47 +1657,18 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
                }
        }
 
-       if (status & STM32F7_I2C_ISR_TCR) {
-               if (f7_msg->smbus)
-                       stm32f7_i2c_smbus_reload(i2c_dev);
+       /* STOP detection flag */
+       if (status & STM32F7_I2C_ISR_STOPF) {
+               /* Disable interrupts */
+               if (stm32f7_i2c_is_slave_registered(i2c_dev))
+                       mask = STM32F7_I2C_XFER_IRQ_MASK;
                else
-                       stm32f7_i2c_reload(i2c_dev);
-       }
-
-       return ret;
-}
-
-static irqreturn_t stm32f7_i2c_isr_event_thread(int irq, void *data)
-{
-       struct stm32f7_i2c_dev *i2c_dev = data;
-       struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg;
-       struct stm32_i2c_dma *dma = i2c_dev->dma;
-       u32 status;
-       int ret;
-
-       /*
-        * Wait for dma transfer completion before sending next message or
-        * notity the end of xfer to the client
-        */
-       ret = wait_for_completion_timeout(&i2c_dev->dma->dma_complete, HZ);
-       if (!ret) {
-               dev_dbg(i2c_dev->dev, "<%s>: Timed out\n", __func__);
-               stm32f7_i2c_disable_dma_req(i2c_dev);
-               dmaengine_terminate_async(dma->chan_using);
-               f7_msg->result = -ETIMEDOUT;
-       }
+                       mask = STM32F7_I2C_ALL_IRQ_MASK;
+               stm32f7_i2c_disable_irq(i2c_dev, mask);
 
-       status = readl_relaxed(i2c_dev->base + STM32F7_I2C_ISR);
+               /* Clear STOP flag */
+               writel_relaxed(STM32F7_I2C_ICR_STOPCF, base + STM32F7_I2C_ICR);
 
-       if (status & STM32F7_I2C_ISR_TC) {
-               if (f7_msg->smbus) {
-                       stm32f7_i2c_smbus_rep_start(i2c_dev);
-               } else {
-                       i2c_dev->msg_id++;
-                       i2c_dev->msg++;
-                       stm32f7_i2c_xfer_msg(i2c_dev, i2c_dev->msg);
-               }
-       } else {
                i2c_dev->master_mode = false;
                complete(&i2c_dev->complete);
        }
@@ -1615,68 +1676,14 @@ static irqreturn_t stm32f7_i2c_isr_event_thread(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-static irqreturn_t stm32f7_i2c_isr_error(int irq, void *data)
+static irqreturn_t stm32f7_i2c_isr_error_thread(int irq, void *data)
 {
        struct stm32f7_i2c_dev *i2c_dev = data;
-       struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg;
-       void __iomem *base = i2c_dev->base;
-       struct device *dev = i2c_dev->dev;
-       struct stm32_i2c_dma *dma = i2c_dev->dma;
        u32 status;
 
        status = readl_relaxed(i2c_dev->base + STM32F7_I2C_ISR);
 
-       /* Bus error */
-       if (status & STM32F7_I2C_ISR_BERR) {
-               dev_err(dev, "<%s>: Bus error accessing addr 0x%x\n",
-                       __func__, f7_msg->addr);
-               writel_relaxed(STM32F7_I2C_ICR_BERRCF, base + STM32F7_I2C_ICR);
-               stm32f7_i2c_release_bus(&i2c_dev->adap);
-               f7_msg->result = -EIO;
-       }
-
-       /* Arbitration loss */
-       if (status & STM32F7_I2C_ISR_ARLO) {
-               dev_dbg(dev, "<%s>: Arbitration loss accessing addr 0x%x\n",
-                       __func__, f7_msg->addr);
-               writel_relaxed(STM32F7_I2C_ICR_ARLOCF, base + STM32F7_I2C_ICR);
-               f7_msg->result = -EAGAIN;
-       }
-
-       if (status & STM32F7_I2C_ISR_PECERR) {
-               dev_err(dev, "<%s>: PEC error in reception accessing addr 0x%x\n",
-                       __func__, f7_msg->addr);
-               writel_relaxed(STM32F7_I2C_ICR_PECCF, base + STM32F7_I2C_ICR);
-               f7_msg->result = -EINVAL;
-       }
-
-       if (status & STM32F7_I2C_ISR_ALERT) {
-               dev_dbg(dev, "<%s>: SMBus alert received\n", __func__);
-               writel_relaxed(STM32F7_I2C_ICR_ALERTCF, base + STM32F7_I2C_ICR);
-               i2c_handle_smbus_alert(i2c_dev->alert->ara);
-               return IRQ_HANDLED;
-       }
-
-       if (!i2c_dev->slave_running) {
-               u32 mask;
-               /* Disable interrupts */
-               if (stm32f7_i2c_is_slave_registered(i2c_dev))
-                       mask = STM32F7_I2C_XFER_IRQ_MASK;
-               else
-                       mask = STM32F7_I2C_ALL_IRQ_MASK;
-               stm32f7_i2c_disable_irq(i2c_dev, mask);
-       }
-
-       /* Disable dma */
-       if (i2c_dev->use_dma) {
-               stm32f7_i2c_disable_dma_req(i2c_dev);
-               dmaengine_terminate_async(dma->chan_using);
-       }
-
-       i2c_dev->master_mode = false;
-       complete(&i2c_dev->complete);
-
-       return IRQ_HANDLED;
+       return stm32f7_i2c_handle_isr_errs(i2c_dev, status);
 }
 
 static int stm32f7_i2c_wait_polling(struct stm32f7_i2c_dev *i2c_dev)
@@ -2012,23 +2019,27 @@ static int stm32f7_i2c_unreg_slave(struct i2c_client *slave)
 static int stm32f7_i2c_write_fm_plus_bits(struct stm32f7_i2c_dev *i2c_dev,
                                          bool enable)
 {
-       int ret;
+       int ret = 0;
 
        if (i2c_dev->bus_rate <= I2C_MAX_FAST_MODE_FREQ ||
-           IS_ERR_OR_NULL(i2c_dev->regmap))
+           (!i2c_dev->setup.fmp_cr1_bit && IS_ERR_OR_NULL(i2c_dev->regmap)))
                /* Optional */
                return 0;
 
-       if (i2c_dev->fmp_sreg == i2c_dev->fmp_creg)
-               ret = regmap_update_bits(i2c_dev->regmap,
-                                        i2c_dev->fmp_sreg,
-                                        i2c_dev->fmp_mask,
-                                        enable ? i2c_dev->fmp_mask : 0);
-       else
-               ret = regmap_write(i2c_dev->regmap,
-                                  enable ? i2c_dev->fmp_sreg :
-                                           i2c_dev->fmp_creg,
-                                  i2c_dev->fmp_mask);
+       if (i2c_dev->setup.fmp_cr1_bit) {
+               if (enable)
+                       stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1, STM32_I2C_CR1_FMP);
+               else
+                       stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1, STM32_I2C_CR1_FMP);
+       } else {
+               if (i2c_dev->fmp_sreg == i2c_dev->fmp_creg)
+                       ret = regmap_update_bits(i2c_dev->regmap, i2c_dev->fmp_sreg,
+                                                i2c_dev->fmp_mask, enable ? i2c_dev->fmp_mask : 0);
+               else
+                       ret = regmap_write(i2c_dev->regmap,
+                                          enable ? i2c_dev->fmp_sreg : i2c_dev->fmp_creg,
+                                          i2c_dev->fmp_mask);
+       }
 
        return ret;
 }
@@ -2162,6 +2173,13 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
        if (!i2c_dev)
                return -ENOMEM;
 
+       setup = of_device_get_match_data(&pdev->dev);
+       if (!setup) {
+               dev_err(&pdev->dev, "Can't get device data\n");
+               return -ENODEV;
+       }
+       i2c_dev->setup = *setup;
+
        i2c_dev->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
        if (IS_ERR(i2c_dev->base))
                return PTR_ERR(i2c_dev->base);
@@ -2171,10 +2189,6 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
        if (irq_event < 0)
                return irq_event;
 
-       irq_error = platform_get_irq(pdev, 1);
-       if (irq_error < 0)
-               return irq_error;
-
        i2c_dev->wakeup_src = of_property_read_bool(pdev->dev.of_node,
                                                    "wakeup-source");
 
@@ -2199,26 +2213,22 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
                                        stm32f7_i2c_isr_event_thread,
                                        IRQF_ONESHOT,
                                        pdev->name, i2c_dev);
-       if (ret) {
-               dev_err(&pdev->dev, "Failed to request irq event %i\n",
-                       irq_event);
-               return ret;
-       }
-
-       ret = devm_request_irq(&pdev->dev, irq_error, stm32f7_i2c_isr_error, 0,
-                              pdev->name, i2c_dev);
-       if (ret) {
-               dev_err(&pdev->dev, "Failed to request irq error %i\n",
-                       irq_error);
-               return ret;
-       }
-
-       setup = of_device_get_match_data(&pdev->dev);
-       if (!setup) {
-               dev_err(&pdev->dev, "Can't get device data\n");
-               return -ENODEV;
+       if (ret)
+               return dev_err_probe(&pdev->dev, ret, "Failed to request irq event\n");
+
+       if (!i2c_dev->setup.single_it_line) {
+               irq_error = platform_get_irq(pdev, 1);
+               if (irq_error < 0)
+                       return irq_error;
+
+               ret = devm_request_threaded_irq(&pdev->dev, irq_error,
+                                               NULL,
+                                               stm32f7_i2c_isr_error_thread,
+                                               IRQF_ONESHOT,
+                                               pdev->name, i2c_dev);
+               if (ret)
+                       return dev_err_probe(&pdev->dev, ret, "Failed to request irq error\n");
        }
-       i2c_dev->setup = *setup;
 
        ret = stm32f7_i2c_setup_timing(i2c_dev, &i2c_dev->setup);
        if (ret)
@@ -2226,9 +2236,12 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
 
        /* Setup Fast mode plus if necessary */
        if (i2c_dev->bus_rate > I2C_MAX_FAST_MODE_FREQ) {
-               ret = stm32f7_i2c_setup_fm_plus_bits(pdev, i2c_dev);
-               if (ret)
-                       return ret;
+               if (!i2c_dev->setup.fmp_cr1_bit) {
+                       ret = stm32f7_i2c_setup_fm_plus_bits(pdev, i2c_dev);
+                       if (ret)
+                               return ret;
+               }
+
                ret = stm32f7_i2c_write_fm_plus_bits(i2c_dev, true);
                if (ret)
                        return ret;
@@ -2507,6 +2520,7 @@ static const struct of_device_id stm32f7_i2c_match[] = {
        { .compatible = "st,stm32f7-i2c", .data = &stm32f7_setup},
        { .compatible = "st,stm32mp15-i2c", .data = &stm32mp15_setup},
        { .compatible = "st,stm32mp13-i2c", .data = &stm32mp13_setup},
+       { .compatible = "st,stm32mp25-i2c", .data = &stm32mp25_setup},
        {},
 };
 MODULE_DEVICE_TABLE(of, stm32f7_i2c_match);
index ad4f09c7f0275057854a004c7b69e3b57524d2d4..7ed29992a97ff87a3f31c296ced8dc9b3df5a08f 100644 (file)
@@ -70,7 +70,7 @@ static struct i2c_algo_bit_data bit_data = {
 
 static struct i2c_adapter vt586b_adapter = {
        .owner          = THIS_MODULE,
-       .class          = I2C_CLASS_HWMON | I2C_CLASS_SPD,
+       .class          = I2C_CLASS_HWMON,
        .name           = "VIA i2c",
        .algo_data      = &bit_data,
 };
index 970ccdcbb8896232e79606d1ea8078593f6f1f2a..2cc7bba3b8bf8d2f64c4ad1098927e2a2f5a269f 100644 (file)
@@ -304,7 +304,7 @@ static const struct i2c_algorithm smbus_algorithm = {
 
 static struct i2c_adapter vt596_adapter = {
        .owner          = THIS_MODULE,
-       .class          = I2C_CLASS_HWMON | I2C_CLASS_SPD,
+       .class          = I2C_CLASS_HWMON,
        .algo           = &smbus_algorithm,
 };
 
index 76118abc6e104d083047714e9cb036116ab9ef85..ec2a8da134e56d01be06588551db26bca47caef4 100644 (file)
@@ -74,9 +74,6 @@
 #define MCR_APB_96M            7
 #define MCR_APB_166M           12
 
-#define I2C_MODE_STANDARD      0
-#define I2C_MODE_FAST          1
-
 #define WMT_I2C_TIMEOUT                (msecs_to_jiffies(1000))
 
 struct wmt_i2c_dev {
@@ -85,7 +82,7 @@ struct wmt_i2c_dev {
        struct device           *dev;
        void __iomem            *base;
        struct clk              *clk;
-       int                     mode;
+       u16                     tcr;
        int                     irq;
        u16                     cmd_status;
 };
@@ -109,6 +106,12 @@ static int wmt_i2c_wait_bus_not_busy(struct wmt_i2c_dev *i2c_dev)
 static int wmt_check_status(struct wmt_i2c_dev *i2c_dev)
 {
        int ret = 0;
+       unsigned long wait_result;
+
+       wait_result = wait_for_completion_timeout(&i2c_dev->complete,
+                                               msecs_to_jiffies(500));
+       if (!wait_result)
+               return -ETIMEDOUT;
 
        if (i2c_dev->cmd_status & ISR_NACK_ADDR)
                ret = -EIO;
@@ -119,21 +122,13 @@ static int wmt_check_status(struct wmt_i2c_dev *i2c_dev)
        return ret;
 }
 
-static int wmt_i2c_write(struct i2c_adapter *adap, struct i2c_msg *pmsg,
+static int wmt_i2c_write(struct wmt_i2c_dev *i2c_dev, struct i2c_msg *pmsg,
                         int last)
 {
-       struct wmt_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
-       u16 val, tcr_val;
+       u16 val, tcr_val = i2c_dev->tcr;
        int ret;
-       unsigned long wait_result;
        int xfer_len = 0;
 
-       if (!(pmsg->flags & I2C_M_NOSTART)) {
-               ret = wmt_i2c_wait_bus_not_busy(i2c_dev);
-               if (ret < 0)
-                       return ret;
-       }
-
        if (pmsg->len == 0) {
                /*
                 * We still need to run through the while (..) once, so
@@ -148,20 +143,12 @@ static int wmt_i2c_write(struct i2c_adapter *adap, struct i2c_msg *pmsg,
        if (!(pmsg->flags & I2C_M_NOSTART)) {
                val = readw(i2c_dev->base + REG_CR);
                val &= ~CR_TX_END;
-               writew(val, i2c_dev->base + REG_CR);
-
-               val = readw(i2c_dev->base + REG_CR);
                val |= CR_CPU_RDY;
                writew(val, i2c_dev->base + REG_CR);
        }
 
        reinit_completion(&i2c_dev->complete);
 
-       if (i2c_dev->mode == I2C_MODE_STANDARD)
-               tcr_val = TCR_STANDARD_MODE;
-       else
-               tcr_val = TCR_FAST_MODE;
-
        tcr_val |= (TCR_MASTER_WRITE | (pmsg->addr & TCR_SLAVE_ADDR_MASK));
 
        writew(tcr_val, i2c_dev->base + REG_TCR);
@@ -173,12 +160,6 @@ static int wmt_i2c_write(struct i2c_adapter *adap, struct i2c_msg *pmsg,
        }
 
        while (xfer_len < pmsg->len) {
-               wait_result = wait_for_completion_timeout(&i2c_dev->complete,
-                                                       msecs_to_jiffies(500));
-
-               if (wait_result == 0)
-                       return -ETIMEDOUT;
-
                ret = wmt_check_status(i2c_dev);
                if (ret)
                        return ret;
@@ -210,47 +191,24 @@ static int wmt_i2c_write(struct i2c_adapter *adap, struct i2c_msg *pmsg,
        return 0;
 }
 
-static int wmt_i2c_read(struct i2c_adapter *adap, struct i2c_msg *pmsg,
-                       int last)
+static int wmt_i2c_read(struct wmt_i2c_dev *i2c_dev, struct i2c_msg *pmsg)
 {
-       struct wmt_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
-       u16 val, tcr_val;
+       u16 val, tcr_val = i2c_dev->tcr;
        int ret;
-       unsigned long wait_result;
        u32 xfer_len = 0;
 
-       if (!(pmsg->flags & I2C_M_NOSTART)) {
-               ret = wmt_i2c_wait_bus_not_busy(i2c_dev);
-               if (ret < 0)
-                       return ret;
-       }
-
-       val = readw(i2c_dev->base + REG_CR);
-       val &= ~CR_TX_END;
-       writew(val, i2c_dev->base + REG_CR);
-
        val = readw(i2c_dev->base + REG_CR);
-       val &= ~CR_TX_NEXT_NO_ACK;
-       writew(val, i2c_dev->base + REG_CR);
+       val &= ~(CR_TX_END | CR_TX_NEXT_NO_ACK);
 
-       if (!(pmsg->flags & I2C_M_NOSTART)) {
-               val = readw(i2c_dev->base + REG_CR);
+       if (!(pmsg->flags & I2C_M_NOSTART))
                val |= CR_CPU_RDY;
-               writew(val, i2c_dev->base + REG_CR);
-       }
 
-       if (pmsg->len == 1) {
-               val = readw(i2c_dev->base + REG_CR);
+       if (pmsg->len == 1)
                val |= CR_TX_NEXT_NO_ACK;
-               writew(val, i2c_dev->base + REG_CR);
-       }
 
-       reinit_completion(&i2c_dev->complete);
+       writew(val, i2c_dev->base + REG_CR);
 
-       if (i2c_dev->mode == I2C_MODE_STANDARD)
-               tcr_val = TCR_STANDARD_MODE;
-       else
-               tcr_val = TCR_FAST_MODE;
+       reinit_completion(&i2c_dev->complete);
 
        tcr_val |= TCR_MASTER_READ | (pmsg->addr & TCR_SLAVE_ADDR_MASK);
 
@@ -263,12 +221,6 @@ static int wmt_i2c_read(struct i2c_adapter *adap, struct i2c_msg *pmsg,
        }
 
        while (xfer_len < pmsg->len) {
-               wait_result = wait_for_completion_timeout(&i2c_dev->complete,
-                                                       msecs_to_jiffies(500));
-
-               if (!wait_result)
-                       return -ETIMEDOUT;
-
                ret = wmt_check_status(i2c_dev);
                if (ret)
                        return ret;
@@ -276,15 +228,10 @@ static int wmt_i2c_read(struct i2c_adapter *adap, struct i2c_msg *pmsg,
                pmsg->buf[xfer_len] = readw(i2c_dev->base + REG_CDR) >> 8;
                xfer_len++;
 
-               if (xfer_len == pmsg->len - 1) {
-                       val = readw(i2c_dev->base + REG_CR);
-                       val |= (CR_TX_NEXT_NO_ACK | CR_CPU_RDY);
-                       writew(val, i2c_dev->base + REG_CR);
-               } else {
-                       val = readw(i2c_dev->base + REG_CR);
-                       val |= CR_CPU_RDY;
-                       writew(val, i2c_dev->base + REG_CR);
-               }
+               val = readw(i2c_dev->base + REG_CR) | CR_CPU_RDY;
+               if (xfer_len == pmsg->len - 1)
+                       val |= CR_TX_NEXT_NO_ACK;
+               writew(val, i2c_dev->base + REG_CR);
        }
 
        return 0;
@@ -295,17 +242,22 @@ static int wmt_i2c_xfer(struct i2c_adapter *adap,
                        int num)
 {
        struct i2c_msg *pmsg;
-       int i, is_last;
+       int i;
        int ret = 0;
+       struct wmt_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
 
        for (i = 0; ret >= 0 && i < num; i++) {
-               is_last = ((i + 1) == num);
-
                pmsg = &msgs[i];
+               if (!(pmsg->flags & I2C_M_NOSTART)) {
+                       ret = wmt_i2c_wait_bus_not_busy(i2c_dev);
+                       if (ret < 0)
+                               return ret;
+               }
+
                if (pmsg->flags & I2C_M_RD)
-                       ret = wmt_i2c_read(adap, pmsg, is_last);
+                       ret = wmt_i2c_read(i2c_dev, pmsg);
                else
-                       ret = wmt_i2c_write(adap, pmsg, is_last);
+                       ret = wmt_i2c_write(i2c_dev, pmsg, (i + 1) == num);
        }
 
        return (ret < 0) ? ret : i;
@@ -359,10 +311,10 @@ static int wmt_i2c_reset_hardware(struct wmt_i2c_dev *i2c_dev)
        readw(i2c_dev->base + REG_CSR);         /* read clear */
        writew(ISR_WRITE_ALL, i2c_dev->base + REG_ISR);
 
-       if (i2c_dev->mode == I2C_MODE_STANDARD)
-               writew(SCL_TIMEOUT(128) | TR_STD, i2c_dev->base + REG_TR);
-       else
+       if (i2c_dev->tcr == TCR_FAST_MODE)
                writew(SCL_TIMEOUT(128) | TR_HS, i2c_dev->base + REG_TR);
+       else
+               writew(SCL_TIMEOUT(128) | TR_STD, i2c_dev->base + REG_TR);
 
        return 0;
 }
@@ -395,10 +347,9 @@ static int wmt_i2c_probe(struct platform_device *pdev)
                return PTR_ERR(i2c_dev->clk);
        }
 
-       i2c_dev->mode = I2C_MODE_STANDARD;
        err = of_property_read_u32(np, "clock-frequency", &clk_rate);
        if (!err && (clk_rate == I2C_MAX_FAST_MODE_FREQ))
-               i2c_dev->mode = I2C_MODE_FAST;
+               i2c_dev->tcr = TCR_FAST_MODE;
 
        i2c_dev->dev = &pdev->dev;
 
index 83c1db610f54b8c6d64139466e777dccc95c330f..3648382b885a4e0fc1b4248224a70c4a7fbd18eb 100644 (file)
@@ -427,7 +427,7 @@ static struct scx200_acb_iface *scx200_create_iface(const char *text,
        snprintf(adapter->name, sizeof(adapter->name), "%s ACB%d", text, index);
        adapter->owner = THIS_MODULE;
        adapter->algo = &scx200_acb_algorithm;
-       adapter->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+       adapter->class = I2C_CLASS_HWMON;
        adapter->dev.parent = dev;
 
        mutex_init(&iface->mutex);
index eac90a3cf61a4b7740108974ab114105cb74ae70..3bd48d4b6318fe1fe83e3718c59713f354ff9878 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/acpi.h>
 #include <linux/clk/clk-conf.h>
 #include <linux/completion.h>
+#include <linux/debugfs.h>
 #include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/errno.h>
@@ -67,6 +68,8 @@ static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver);
 static DEFINE_STATIC_KEY_FALSE(i2c_trace_msg_key);
 static bool is_registered;
 
+static struct dentry *i2c_debugfs_root;
+
 int i2c_transfer_trace_reg(void)
 {
        static_branch_inc(&i2c_trace_msg_key);
@@ -689,7 +692,7 @@ static struct attribute *i2c_dev_attrs[] = {
 };
 ATTRIBUTE_GROUPS(i2c_dev);
 
-struct bus_type i2c_bus_type = {
+const struct bus_type i2c_bus_type = {
        .name           = "i2c",
        .match          = i2c_device_match,
        .probe          = i2c_device_probe,
@@ -1524,6 +1527,8 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
                goto out_list;
        }
 
+       adap->debugfs = debugfs_create_dir(dev_name(&adap->dev), i2c_debugfs_root);
+
        res = i2c_setup_smbus_alert(adap);
        if (res)
                goto out_reg;
@@ -1563,6 +1568,7 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
        return 0;
 
 out_reg:
+       debugfs_remove_recursive(adap->debugfs);
        init_completion(&adap->dev_released);
        device_unregister(&adap->dev);
        wait_for_completion(&adap->dev_released);
@@ -1764,6 +1770,8 @@ void i2c_del_adapter(struct i2c_adapter *adap)
 
        i2c_host_notify_irq_teardown(adap);
 
+       debugfs_remove_recursive(adap->debugfs);
+
        /* wait until all references to the device are gone
         *
         * FIXME: This is old code and should ideally be replaced by an
@@ -2061,6 +2069,8 @@ static int __init i2c_init(void)
 
        is_registered = true;
 
+       i2c_debugfs_root = debugfs_create_dir("i2c", NULL);
+
 #ifdef CONFIG_I2C_COMPAT
        i2c_adapter_compat_class = class_compat_register("i2c-adapter");
        if (!i2c_adapter_compat_class) {
@@ -2099,6 +2109,7 @@ static void __exit i2c_exit(void)
 #ifdef CONFIG_I2C_COMPAT
        class_compat_unregister(i2c_adapter_compat_class);
 #endif
+       debugfs_remove_recursive(i2c_debugfs_root);
        bus_unregister(&i2c_bus_type);
        tracepoint_synchronize_unregister();
 }
index 138c3f5e0093a5c2f8ceeb6ddeca991dd70fbad6..74807c6db596d810fffe035268875a61b1074881 100644 (file)
@@ -308,8 +308,8 @@ EXPORT_SYMBOL_GPL(i2c_free_slave_host_notify_device);
  * target systems are the same.
  * Restrictions to automatic SPD instantiation:
  *  - Only works if all filled slots have the same memory type
- *  - Only works for DDR2, DDR3 and DDR4 for now
- *  - Only works on systems with 1 to 4 memory slots
+ *  - Only works for DDR, DDR2, DDR3 and DDR4 for now
+ *  - Only works on systems with 1 to 8 memory slots
  */
 #if IS_ENABLED(CONFIG_DMI)
 void i2c_register_spd(struct i2c_adapter *adap)
@@ -354,9 +354,9 @@ void i2c_register_spd(struct i2c_adapter *adap)
        dev_info(&adap->dev, "%d/%d memory slots populated (from DMI)\n",
                 dimm_count, slot_count);
 
-       if (slot_count > 4) {
+       if (slot_count > 8) {
                dev_warn(&adap->dev,
-                        "Systems with more than 4 memory slots not supported yet, not instantiating SPD\n");
+                        "Systems with more than 8 memory slots not supported yet, not instantiating SPD\n");
                return;
        }
 
index d642cad219d9e65b561659cfb895af11c6cba41b..09e7b7bf4c5f71b586372ef43d83ec43f391e573 100644 (file)
@@ -308,7 +308,7 @@ static const struct i2c_algorithm smbus_algorithm = {
 
 static struct i2c_adapter stub_adapter = {
        .owner          = THIS_MODULE,
-       .class          = I2C_CLASS_HWMON | I2C_CLASS_SPD,
+       .class          = I2C_CLASS_HWMON,
        .algo           = &smbus_algorithm,
        .name           = "SMBus stub driver",
 };
index 9efc1ed01577b1987f3493c686bd8133eb31fcdb..8489971babd37b55ef794ebd8ec8446d9797acca 100644 (file)
@@ -159,7 +159,6 @@ static int i2c_mux_reg_probe(struct platform_device *pdev)
        struct regmux *mux;
        struct i2c_adapter *parent;
        struct resource *res;
-       unsigned int class;
        int i, ret, nr;
 
        mux = devm_kzalloc(&pdev->dev, sizeof(*mux), GFP_KERNEL);
@@ -213,9 +212,8 @@ static int i2c_mux_reg_probe(struct platform_device *pdev)
 
        for (i = 0; i < mux->data.n_values; i++) {
                nr = mux->data.base_nr ? (mux->data.base_nr + i) : 0;
-               class = mux->data.classes ? mux->data.classes[i] : 0;
 
-               ret = i2c_mux_add_adapter(muxc, nr, mux->data.values[i], class);
+               ret = i2c_mux_add_adapter(muxc, nr, mux->data.values[i], 0);
                if (ret)
                        goto err_del_mux_adapters;
        }
index 95caa162706f5543a91fb40cffaa2927b25b64c7..3afa530c5e3220fa96c7af96a4c90f7c673393f4 100644 (file)
@@ -557,6 +557,88 @@ static ssize_t i2c_scl_frequency_show(struct device *dev,
 }
 static DEVICE_ATTR_RO(i2c_scl_frequency);
 
+static int i3c_set_hotjoin(struct i3c_master_controller *master, bool enable)
+{
+       int ret;
+
+       if (!master || !master->ops)
+               return -EINVAL;
+
+       if (!master->ops->enable_hotjoin || !master->ops->disable_hotjoin)
+               return -EINVAL;
+
+       i3c_bus_normaluse_lock(&master->bus);
+
+       if (enable)
+               ret = master->ops->enable_hotjoin(master);
+       else
+               ret = master->ops->disable_hotjoin(master);
+
+       master->hotjoin = enable;
+
+       i3c_bus_normaluse_unlock(&master->bus);
+
+       return ret;
+}
+
+static ssize_t hotjoin_store(struct device *dev, struct device_attribute *attr,
+                            const char *buf, size_t count)
+{
+       struct i3c_bus *i3cbus = dev_to_i3cbus(dev);
+       int ret;
+       bool res;
+
+       if (!i3cbus->cur_master)
+               return -EINVAL;
+
+       if (kstrtobool(buf, &res))
+               return -EINVAL;
+
+       ret = i3c_set_hotjoin(i3cbus->cur_master->common.master, res);
+       if (ret)
+               return ret;
+
+       return count;
+}
+
+/*
+ * i3c_master_enable_hotjoin - Enable hotjoin
+ * @master: I3C master object
+ *
+ * Return: a 0 in case of success, an negative error code otherwise.
+ */
+int i3c_master_enable_hotjoin(struct i3c_master_controller *master)
+{
+       return i3c_set_hotjoin(master, true);
+}
+EXPORT_SYMBOL_GPL(i3c_master_enable_hotjoin);
+
+/*
+ * i3c_master_disable_hotjoin - Disable hotjoin
+ * @master: I3C master object
+ *
+ * Return: a 0 in case of success, an negative error code otherwise.
+ */
+int i3c_master_disable_hotjoin(struct i3c_master_controller *master)
+{
+       return i3c_set_hotjoin(master, false);
+}
+EXPORT_SYMBOL_GPL(i3c_master_disable_hotjoin);
+
+static ssize_t hotjoin_show(struct device *dev, struct device_attribute *da, char *buf)
+{
+       struct i3c_bus *i3cbus = dev_to_i3cbus(dev);
+       ssize_t ret;
+
+       i3c_bus_normaluse_lock(i3cbus);
+       ret = sysfs_emit(buf, "%d\n", i3cbus->cur_master->common.master->hotjoin);
+       i3c_bus_normaluse_unlock(i3cbus);
+
+       return ret;
+}
+
+static DEVICE_ATTR_RW(hotjoin);
+
 static struct attribute *i3c_masterdev_attrs[] = {
        &dev_attr_mode.attr,
        &dev_attr_current_master.attr,
@@ -567,6 +649,7 @@ static struct attribute *i3c_masterdev_attrs[] = {
        &dev_attr_pid.attr,
        &dev_attr_dynamic_address.attr,
        &dev_attr_hdrcap.attr,
+       &dev_attr_hotjoin.attr,
        NULL,
 };
 ATTRIBUTE_GROUPS(i3c_masterdev);
@@ -1130,8 +1213,16 @@ static int i3c_master_getmxds_locked(struct i3c_master_controller *master,
 
        i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETMXDS, &dest, 1);
        ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
-       if (ret)
-               goto out;
+       if (ret) {
+               /*
+                * Retry when the device does not support max read turnaround
+                * while expecting shorter length from this CCC command.
+                */
+               dest.payload.len -= 3;
+               ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+               if (ret)
+                       goto out;
+       }
 
        if (dest.payload.len != 2 && dest.payload.len != 5) {
                ret = -EIO;
index bcbe8f914149b29d69147e1ccbefbf45339ba314..c1627f3552ce3ebffca5a222407f4b8bf3dfb6f8 100644 (file)
@@ -76,7 +76,8 @@
 #define PRESCL_CTRL0                   0x14
 #define PRESCL_CTRL0_I2C(x)            ((x) << 16)
 #define PRESCL_CTRL0_I3C(x)            (x)
-#define PRESCL_CTRL0_MAX               GENMASK(9, 0)
+#define PRESCL_CTRL0_I3C_MAX           GENMASK(9, 0)
+#define PRESCL_CTRL0_I2C_MAX           GENMASK(15, 0)
 
 #define PRESCL_CTRL1                   0x18
 #define PRESCL_CTRL1_PP_LOW_MASK       GENMASK(15, 8)
@@ -1233,7 +1234,7 @@ static int cdns_i3c_master_bus_init(struct i3c_master_controller *m)
                return -EINVAL;
 
        pres = DIV_ROUND_UP(sysclk_rate, (bus->scl_rate.i3c * 4)) - 1;
-       if (pres > PRESCL_CTRL0_MAX)
+       if (pres > PRESCL_CTRL0_I3C_MAX)
                return -ERANGE;
 
        bus->scl_rate.i3c = sysclk_rate / ((pres + 1) * 4);
@@ -1246,7 +1247,7 @@ static int cdns_i3c_master_bus_init(struct i3c_master_controller *m)
        max_i2cfreq = bus->scl_rate.i2c;
 
        pres = (sysclk_rate / (max_i2cfreq * 5)) - 1;
-       if (pres > PRESCL_CTRL0_MAX)
+       if (pres > PRESCL_CTRL0_I2C_MAX)
                return -ERANGE;
 
        bus->scl_rate.i2c = sysclk_rate / ((pres + 1) * 5);
index 2b2323aa671416075454b781212ca36b66360a5a..638b054d6c9275eb7e3d7da834a5376fb2b4472f 100644 (file)
@@ -298,7 +298,7 @@ static int hci_cmd_v1_daa(struct i3c_hci *hci)
        unsigned int dcr, bcr;
        DECLARE_COMPLETION_ONSTACK(done);
 
-       xfer = hci_alloc_xfer(2);
+       xfer = hci_alloc_xfer(1);
        if (!xfer)
                return -ENOMEM;
 
@@ -339,12 +339,13 @@ static int hci_cmd_v1_daa(struct i3c_hci *hci)
                        ret = -ETIME;
                        break;
                }
-               if (RESP_STATUS(xfer[0].response) == RESP_ERR_NACK &&
+               if ((RESP_STATUS(xfer->response) == RESP_ERR_ADDR_HEADER ||
+                    RESP_STATUS(xfer->response) == RESP_ERR_NACK) &&
                    RESP_DATA_LENGTH(xfer->response) == 1) {
                        ret = 0;  /* no more devices to be assigned */
                        break;
                }
-               if (RESP_STATUS(xfer[0].response) != RESP_SUCCESS) {
+               if (RESP_STATUS(xfer->response) != RESP_SUCCESS) {
                        ret = -EIO;
                        break;
                }
index 1ae56a5699c6b63eee19e73031d28658e5e1d3ca..d7e966a255833730c40728725e5f6c3817083cd9 100644 (file)
@@ -245,7 +245,14 @@ static int i3c_hci_send_ccc_cmd(struct i3c_master_controller *m,
                if (ccc->rnw)
                        ccc->dests[i - prefixed].payload.len =
                                RESP_DATA_LENGTH(xfer[i].response);
-               if (RESP_STATUS(xfer[i].response) != RESP_SUCCESS) {
+               switch (RESP_STATUS(xfer[i].response)) {
+               case RESP_SUCCESS:
+                       continue;
+               case RESP_ERR_ADDR_HEADER:
+               case RESP_ERR_NACK:
+                       ccc->err = I3C_ERROR_M2;
+                       fallthrough;
+               default:
                        ret = -EIO;
                        goto out;
                }
@@ -269,6 +276,34 @@ static int i3c_hci_daa(struct i3c_master_controller *m)
        return hci->cmd->perform_daa(hci);
 }
 
+static int i3c_hci_alloc_safe_xfer_buf(struct i3c_hci *hci,
+                                      struct hci_xfer *xfer)
+{
+       if (hci->io != &mipi_i3c_hci_dma ||
+           xfer->data == NULL || !is_vmalloc_addr(xfer->data))
+               return 0;
+
+       if (xfer->rnw)
+               xfer->bounce_buf = kzalloc(xfer->data_len, GFP_KERNEL);
+       else
+               xfer->bounce_buf = kmemdup(xfer->data,
+                                          xfer->data_len, GFP_KERNEL);
+
+       return xfer->bounce_buf == NULL ? -ENOMEM : 0;
+}
+
+static void i3c_hci_free_safe_xfer_buf(struct i3c_hci *hci,
+                                      struct hci_xfer *xfer)
+{
+       if (hci->io != &mipi_i3c_hci_dma || xfer->bounce_buf == NULL)
+               return;
+
+       if (xfer->rnw)
+               memcpy(xfer->data, xfer->bounce_buf, xfer->data_len);
+
+       kfree(xfer->bounce_buf);
+}
+
 static int i3c_hci_priv_xfers(struct i3c_dev_desc *dev,
                              struct i3c_priv_xfer *i3c_xfers,
                              int nxfers)
@@ -302,6 +337,9 @@ static int i3c_hci_priv_xfers(struct i3c_dev_desc *dev,
                }
                hci->cmd->prep_i3c_xfer(hci, dev, &xfer[i]);
                xfer[i].cmd_desc[0] |= CMD_0_ROC;
+               ret = i3c_hci_alloc_safe_xfer_buf(hci, &xfer[i]);
+               if (ret)
+                       goto out;
        }
        last = i - 1;
        xfer[last].cmd_desc[0] |= CMD_0_TOC;
@@ -325,6 +363,9 @@ static int i3c_hci_priv_xfers(struct i3c_dev_desc *dev,
        }
 
 out:
+       for (i = 0; i < nxfers; i++)
+               i3c_hci_free_safe_xfer_buf(hci, &xfer[i]);
+
        hci_free_xfer(xfer, nxfers);
        return ret;
 }
@@ -350,6 +391,9 @@ static int i3c_hci_i2c_xfers(struct i2c_dev_desc *dev,
                xfer[i].rnw = i2c_xfers[i].flags & I2C_M_RD;
                hci->cmd->prep_i2c_xfer(hci, dev, &xfer[i]);
                xfer[i].cmd_desc[0] |= CMD_0_ROC;
+               ret = i3c_hci_alloc_safe_xfer_buf(hci, &xfer[i]);
+               if (ret)
+                       goto out;
        }
        last = i - 1;
        xfer[last].cmd_desc[0] |= CMD_0_TOC;
@@ -371,6 +415,9 @@ static int i3c_hci_i2c_xfers(struct i2c_dev_desc *dev,
        }
 
 out:
+       for (i = 0; i < nxfers; i++)
+               i3c_hci_free_safe_xfer_buf(hci, &xfer[i]);
+
        hci_free_xfer(xfer, nxfers);
        return ret;
 }
index c805a8497319dbb76f51a97e107ab9206e2a3515..4e01a95cc4d0ad2485caf13fa6776374ce6d0e91 100644 (file)
@@ -362,6 +362,7 @@ static int hci_dma_queue_xfer(struct i3c_hci *hci,
        struct hci_rh_data *rh;
        unsigned int i, ring, enqueue_ptr;
        u32 op1_val, op2_val;
+       void *buf;
 
        /* For now we only use ring 0 */
        ring = 0;
@@ -390,9 +391,10 @@ static int hci_dma_queue_xfer(struct i3c_hci *hci,
 
                /* 2nd and 3rd words of Data Buffer Descriptor Structure */
                if (xfer->data) {
+                       buf = xfer->bounce_buf ? xfer->bounce_buf : xfer->data;
                        xfer->data_dma =
                                dma_map_single(&hci->master.dev,
-                                              xfer->data,
+                                              buf,
                                               xfer->data_len,
                                               xfer->rnw ?
                                                  DMA_FROM_DEVICE :
index f109923f6c3f3e88139288e2bdfd9aafe798d54b..f94d95e024becc41106166073fd62edee55d032e 100644 (file)
@@ -90,6 +90,7 @@ struct hci_xfer {
                struct {
                        /* DMA specific */
                        dma_addr_t data_dma;
+                       void *bounce_buf;
                        int ring_number;
                        int ring_entry;
                };
index cf703c00f63349c7b989efe08365f080a119e2c2..5ee4db68988e2388a56c18d22b1d882b0c13adac 100644 (file)
 /* This parameter depends on the implementation and may be tuned */
 #define SVC_I3C_FIFO_SIZE 16
 
+#define SVC_I3C_EVENT_IBI      BIT(0)
+#define SVC_I3C_EVENT_HOTJOIN  BIT(1)
+
 struct svc_i3c_cmd {
        u8 addr;
        bool rnw;
        u8 *in;
        const void *out;
        unsigned int len;
-       unsigned int read_len;
+       unsigned int actual_len;
+       struct i3c_priv_xfer *xfer;
        bool continued;
 };
 
@@ -177,6 +181,7 @@ struct svc_i3c_regs_save {
  * @ibi.tbq_slot: To be queued IBI slot
  * @ibi.lock: IBI lock
  * @lock: Transfer lock, protect between IBI work thread and callbacks from master
+ * @enabled_events: Bit masks for enable events (IBI, HotJoin).
  */
 struct svc_i3c_master {
        struct i3c_master_controller base;
@@ -206,6 +211,7 @@ struct svc_i3c_master {
                spinlock_t lock;
        } ibi;
        struct mutex lock;
+       int enabled_events;
 };
 
 /**
@@ -220,6 +226,11 @@ struct svc_i3c_i2c_dev_data {
        struct i3c_generic_ibi_pool *ibi_pool;
 };
 
+static inline bool is_events_enabled(struct svc_i3c_master *master, u32 mask)
+{
+       return !!(master->enabled_events & mask);
+}
+
 static bool svc_i3c_master_error(struct svc_i3c_master *master)
 {
        u32 mstatus, merrwarn;
@@ -429,13 +440,16 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
        switch (ibitype) {
        case SVC_I3C_MSTATUS_IBITYPE_IBI:
                dev = svc_i3c_master_dev_from_addr(master, ibiaddr);
-               if (!dev)
+               if (!dev || !is_events_enabled(master, SVC_I3C_EVENT_IBI))
                        svc_i3c_master_nack_ibi(master);
                else
                        svc_i3c_master_handle_ibi(master, dev);
                break;
        case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
-               svc_i3c_master_ack_ibi(master, false);
+               if (is_events_enabled(master, SVC_I3C_EVENT_HOTJOIN))
+                       svc_i3c_master_ack_ibi(master, false);
+               else
+                       svc_i3c_master_nack_ibi(master);
                break;
        case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
                svc_i3c_master_nack_ibi(master);
@@ -472,7 +486,9 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
                svc_i3c_master_emit_stop(master);
                break;
        case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
-               queue_work(master->base.wq, &master->hj_work);
+               svc_i3c_master_emit_stop(master);
+               if (is_events_enabled(master, SVC_I3C_EVENT_HOTJOIN))
+                       queue_work(master->base.wq, &master->hj_work);
                break;
        case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
        default:
@@ -1024,7 +1040,7 @@ static int svc_i3c_master_write(struct svc_i3c_master *master,
 static int svc_i3c_master_xfer(struct svc_i3c_master *master,
                               bool rnw, unsigned int xfer_type, u8 addr,
                               u8 *in, const u8 *out, unsigned int xfer_len,
-                              unsigned int *read_len, bool continued)
+                              unsigned int *actual_len, bool continued)
 {
        u32 reg;
        int ret;
@@ -1037,7 +1053,7 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
               SVC_I3C_MCTRL_IBIRESP_NACK |
               SVC_I3C_MCTRL_DIR(rnw) |
               SVC_I3C_MCTRL_ADDR(addr) |
-              SVC_I3C_MCTRL_RDTERM(*read_len),
+              SVC_I3C_MCTRL_RDTERM(*actual_len),
               master->regs + SVC_I3C_MCTRL);
 
        ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
@@ -1047,6 +1063,7 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
 
        if (readl(master->regs + SVC_I3C_MERRWARN) & SVC_I3C_MERRWARN_NACK) {
                ret = -ENXIO;
+               *actual_len = 0;
                goto emit_stop;
        }
 
@@ -1064,6 +1081,7 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
         */
        if (SVC_I3C_MSTATUS_IBIWON(reg)) {
                ret = -ENXIO;
+               *actual_len = 0;
                goto emit_stop;
        }
 
@@ -1075,7 +1093,7 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
                goto emit_stop;
 
        if (rnw)
-               *read_len = ret;
+               *actual_len = ret;
 
        ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
                                 SVC_I3C_MSTATUS_COMPLETE(reg), 0, 1000);
@@ -1157,8 +1175,12 @@ static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master)
 
                ret = svc_i3c_master_xfer(master, cmd->rnw, xfer->type,
                                          cmd->addr, cmd->in, cmd->out,
-                                         cmd->len, &cmd->read_len,
+                                         cmd->len, &cmd->actual_len,
                                          cmd->continued);
+               /* cmd->xfer is NULL if I2C or CCC transfer */
+               if (cmd->xfer)
+                       cmd->xfer->actual_len = cmd->actual_len;
+
                if (ret)
                        break;
        }
@@ -1243,7 +1265,7 @@ static int svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master *master,
        cmd->in = NULL;
        cmd->out = buf;
        cmd->len = xfer_len;
-       cmd->read_len = 0;
+       cmd->actual_len = 0;
        cmd->continued = false;
 
        mutex_lock(&master->lock);
@@ -1263,7 +1285,7 @@ static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
                                              struct i3c_ccc_cmd *ccc)
 {
        unsigned int xfer_len = ccc->dests[0].payload.len;
-       unsigned int read_len = ccc->rnw ? xfer_len : 0;
+       unsigned int actual_len = ccc->rnw ? xfer_len : 0;
        struct svc_i3c_xfer *xfer;
        struct svc_i3c_cmd *cmd;
        int ret;
@@ -1281,7 +1303,7 @@ static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
        cmd->in = NULL;
        cmd->out = &ccc->id;
        cmd->len = 1;
-       cmd->read_len = 0;
+       cmd->actual_len = 0;
        cmd->continued = true;
 
        /* Directed message */
@@ -1291,7 +1313,7 @@ static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
        cmd->in = ccc->rnw ? ccc->dests[0].payload.data : NULL;
        cmd->out = ccc->rnw ? NULL : ccc->dests[0].payload.data,
        cmd->len = xfer_len;
-       cmd->read_len = read_len;
+       cmd->actual_len = actual_len;
        cmd->continued = false;
 
        mutex_lock(&master->lock);
@@ -1300,8 +1322,8 @@ static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
                svc_i3c_master_dequeue_xfer(master, xfer);
        mutex_unlock(&master->lock);
 
-       if (cmd->read_len != xfer_len)
-               ccc->dests[0].payload.len = cmd->read_len;
+       if (cmd->actual_len != xfer_len)
+               ccc->dests[0].payload.len = cmd->actual_len;
 
        ret = xfer->ret;
        svc_i3c_master_free_xfer(xfer);
@@ -1346,12 +1368,13 @@ static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
        for (i = 0; i < nxfers; i++) {
                struct svc_i3c_cmd *cmd = &xfer->cmds[i];
 
+               cmd->xfer = &xfers[i];
                cmd->addr = master->addrs[data->index];
                cmd->rnw = xfers[i].rnw;
                cmd->in = xfers[i].rnw ? xfers[i].data.in : NULL;
                cmd->out = xfers[i].rnw ? NULL : xfers[i].data.out;
                cmd->len = xfers[i].len;
-               cmd->read_len = xfers[i].rnw ? xfers[i].len : 0;
+               cmd->actual_len = xfers[i].rnw ? xfers[i].len : 0;
                cmd->continued = (i + 1) < nxfers;
        }
 
@@ -1391,7 +1414,7 @@ static int svc_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
                cmd->in = cmd->rnw ? xfers[i].buf : NULL;
                cmd->out = cmd->rnw ? NULL : xfers[i].buf;
                cmd->len = xfers[i].len;
-               cmd->read_len = cmd->rnw ? xfers[i].len : 0;
+               cmd->actual_len = cmd->rnw ? xfers[i].len : 0;
                cmd->continued = (i + 1 < nxfers);
        }
 
@@ -1472,6 +1495,7 @@ static int svc_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
                return ret;
        }
 
+       master->enabled_events |= SVC_I3C_EVENT_IBI;
        svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
 
        return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
@@ -1483,7 +1507,9 @@ static int svc_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
        struct svc_i3c_master *master = to_svc_i3c_master(m);
        int ret;
 
-       svc_i3c_master_disable_interrupts(master);
+       master->enabled_events &= ~SVC_I3C_EVENT_IBI;
+       if (!master->enabled_events)
+               svc_i3c_master_disable_interrupts(master);
 
        ret = i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
 
@@ -1493,6 +1519,39 @@ static int svc_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
        return ret;
 }
 
+static int svc_i3c_master_enable_hotjoin(struct i3c_master_controller *m)
+{
+       struct svc_i3c_master *master = to_svc_i3c_master(m);
+       int ret;
+
+       ret = pm_runtime_resume_and_get(master->dev);
+       if (ret < 0) {
+               dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
+               return ret;
+       }
+
+       master->enabled_events |= SVC_I3C_EVENT_HOTJOIN;
+
+       svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
+
+       return 0;
+}
+
+static int svc_i3c_master_disable_hotjoin(struct i3c_master_controller *m)
+{
+       struct svc_i3c_master *master = to_svc_i3c_master(m);
+
+       master->enabled_events &= ~SVC_I3C_EVENT_HOTJOIN;
+
+       if (!master->enabled_events)
+               svc_i3c_master_disable_interrupts(master);
+
+       pm_runtime_mark_last_busy(master->dev);
+       pm_runtime_put_autosuspend(master->dev);
+
+       return 0;
+}
+
 static void svc_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
                                            struct i3c_ibi_slot *slot)
 {
@@ -1519,6 +1578,8 @@ static const struct i3c_master_controller_ops svc_i3c_master_ops = {
        .recycle_ibi_slot = svc_i3c_master_recycle_ibi_slot,
        .enable_ibi = svc_i3c_master_enable_ibi,
        .disable_ibi = svc_i3c_master_disable_ibi,
+       .enable_hotjoin = svc_i3c_master_enable_hotjoin,
+       .disable_hotjoin = svc_i3c_master_disable_hotjoin,
 };
 
 static int svc_i3c_master_prepare_clks(struct svc_i3c_master *master)
index f113dae590483a97fcdf5070ed0fada54e870ac9..91adcac875a4130d75d887b80464b76dd1f422a0 100644 (file)
@@ -260,10 +260,11 @@ config BMI088_ACCEL
        select REGMAP
        select BMI088_ACCEL_SPI
        help
-         Say yes here to build support for the Bosch BMI088 accelerometer.
+         Say yes here to build support for the following Bosch accelerometers:
+         BMI088, BMI085, BMI090L. Note that all of these are combo module that
+         include both accelerometer and gyroscope.
 
-         This is a combo module with both accelerometer and gyroscope. This
-         driver only implements the accelerometer part, which has its own
+         This driver only implements the accelerometer part, which has its own
          address and register map. BMG160 provides the gyroscope driver.
 
 config BMI088_ACCEL_SPI
index 84edcc78d7961c2a652a3442e6da0d71fa5937bd..4d989708e6c360a0c8476c92e6590ee8169e8b8c 100644 (file)
@@ -2,6 +2,8 @@
 /*
  * 3-axis accelerometer driver supporting following Bosch-Sensortec chips:
  *  - BMI088
+ *  - BMI085
+ *  - BMI090L
  *
  * Copyright (c) 2018-2021, Topic Embedded Products
  */
index ee540edd84126b731f8731eef5ca1501b35d9aee..7b419a7b2478afc6adf1f64264c9a18a7932952f 100644 (file)
@@ -2,6 +2,8 @@
 /*
  * 3-axis accelerometer driver supporting following Bosch-Sensortec chips:
  *  - BMI088
+ *  - BMI085
+ *  - BMI090L
  *
  * Copyright (c) 2018-2020, Topic Embedded Products
  */
index 35f9867da12c390642ba424fc2afa1e415a006aa..3b73c509bd68ef55094e90284b14466b98e48c9e 100644 (file)
@@ -36,13 +36,29 @@ config AD4130
          To compile this driver as a module, choose M here: the module will be
          called ad4130.
 
+config AD7091R
+       tristate
+
 config AD7091R5
        tristate "Analog Devices AD7091R5 ADC Driver"
        depends on I2C
+       select AD7091R
        select REGMAP_I2C
        help
          Say yes here to build support for Analog Devices AD7091R-5 ADC.
 
+config AD7091R8
+       tristate "Analog Devices AD7091R8 ADC Driver"
+       depends on SPI
+       select AD7091R
+       select REGMAP_SPI
+       help
+         Say yes here to build support for Analog Devices AD7091R-2, AD7091R-4,
+         and AD7091R-8 ADC.
+
+         To compile this driver as a module, choose M here: the module will be
+         called ad7091r8.
+
 config AD7124
        tristate "Analog Devices AD7124 and similar sigma-delta ADCs driver"
        depends on SPI_MASTER
@@ -292,7 +308,7 @@ config ADI_AXI_ADC
        select IIO_BUFFER
        select IIO_BUFFER_HW_CONSUMER
        select IIO_BUFFER_DMAENGINE
-       depends on HAS_IOMEM
+       select REGMAP_MMIO
        depends on OF
        help
          Say yes here to build support for Analog Devices Generic
@@ -745,6 +761,17 @@ config MAX1363
          To compile this driver as a module, choose M here: the module will be
          called max1363.
 
+config MAX34408
+       tristate "Maxim max34408/max344089 ADC driver"
+       depends on I2C
+       help
+         Say yes here to build support for Maxim max34408/max34409 current sense
+         monitor with 8-bits ADC interface with overcurrent delay/threshold and
+         shutdown delay.
+
+         To compile this driver as a module, choose M here: the module will be
+         called max34408.
+
 config MAX77541_ADC
        tristate "Analog Devices MAX77541 ADC driver"
        depends on MFD_MAX77541
index bee11d442af4502cd98b1ddbb1b8458e0c628f3e..d2fda54a3259c9766766d4bd80f1500d140260c2 100644 (file)
@@ -7,7 +7,9 @@
 obj-$(CONFIG_AB8500_GPADC) += ab8500-gpadc.o
 obj-$(CONFIG_AD_SIGMA_DELTA) += ad_sigma_delta.o
 obj-$(CONFIG_AD4130) += ad4130.o
-obj-$(CONFIG_AD7091R5) += ad7091r5.o ad7091r-base.o
+obj-$(CONFIG_AD7091R) += ad7091r-base.o
+obj-$(CONFIG_AD7091R5) += ad7091r5.o
+obj-$(CONFIG_AD7091R8) += ad7091r8.o
 obj-$(CONFIG_AD7124) += ad7124.o
 obj-$(CONFIG_AD7192) += ad7192.o
 obj-$(CONFIG_AD7266) += ad7266.o
@@ -68,6 +70,7 @@ obj-$(CONFIG_MAX11205) += max11205.o
 obj-$(CONFIG_MAX11410) += max11410.o
 obj-$(CONFIG_MAX1241) += max1241.o
 obj-$(CONFIG_MAX1363) += max1363.o
+obj-$(CONFIG_MAX34408) += max34408.o
 obj-$(CONFIG_MAX77541_ADC) += max77541-adc.o
 obj-$(CONFIG_MAX9611) += max9611.o
 obj-$(CONFIG_MCP320X) += mcp320x.o
index 8e252cde735b999c6877308fcdaf3f2084f768ce..f4255b91acfc9849df2986f0c619d3f816f3b57c 100644 (file)
@@ -6,6 +6,7 @@
  */
 
 #include <linux/bitops.h>
+#include <linux/bitfield.h>
 #include <linux/iio/events.h>
 #include <linux/iio/iio.h>
 #include <linux/interrupt.h>
 
 #include "ad7091r-base.h"
 
-#define AD7091R_REG_RESULT  0
-#define AD7091R_REG_CHANNEL 1
-#define AD7091R_REG_CONF    2
-#define AD7091R_REG_ALERT   3
-#define AD7091R_REG_CH_LOW_LIMIT(ch) ((ch) * 3 + 4)
-#define AD7091R_REG_CH_HIGH_LIMIT(ch) ((ch) * 3 + 5)
-#define AD7091R_REG_CH_HYSTERESIS(ch) ((ch) * 3 + 6)
-
-/* AD7091R_REG_RESULT */
-#define AD7091R_REG_RESULT_CH_ID(x)        (((x) >> 13) & 0x3)
-#define AD7091R_REG_RESULT_CONV_RESULT(x)   ((x) & 0xfff)
-
-/* AD7091R_REG_CONF */
-#define AD7091R_REG_CONF_AUTO   BIT(8)
-#define AD7091R_REG_CONF_CMD    BIT(10)
-
-#define AD7091R_REG_CONF_MODE_MASK  \
-       (AD7091R_REG_CONF_AUTO | AD7091R_REG_CONF_CMD)
-
-enum ad7091r_mode {
-       AD7091R_MODE_SAMPLE,
-       AD7091R_MODE_COMMAND,
-       AD7091R_MODE_AUTOCYCLE,
+const struct iio_event_spec ad7091r_events[] = {
+       {
+               .type = IIO_EV_TYPE_THRESH,
+               .dir = IIO_EV_DIR_RISING,
+               .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+                                BIT(IIO_EV_INFO_ENABLE),
+       },
+       {
+               .type = IIO_EV_TYPE_THRESH,
+               .dir = IIO_EV_DIR_FALLING,
+               .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+                                BIT(IIO_EV_INFO_ENABLE),
+       },
+       {
+               .type = IIO_EV_TYPE_THRESH,
+               .dir = IIO_EV_DIR_EITHER,
+               .mask_separate = BIT(IIO_EV_INFO_HYSTERESIS),
+       },
 };
-
-struct ad7091r_state {
-       struct device *dev;
-       struct regmap *map;
-       struct regulator *vref;
-       const struct ad7091r_chip_info *chip_info;
-       enum ad7091r_mode mode;
-       struct mutex lock; /*lock to prevent concurent reads */
-};
-
-static int ad7091r_set_mode(struct ad7091r_state *st, enum ad7091r_mode mode)
-{
-       int ret, conf;
-
-       switch (mode) {
-       case AD7091R_MODE_SAMPLE:
-               conf = 0;
-               break;
-       case AD7091R_MODE_COMMAND:
-               conf = AD7091R_REG_CONF_CMD;
-               break;
-       case AD7091R_MODE_AUTOCYCLE:
-               conf = AD7091R_REG_CONF_AUTO;
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       ret = regmap_update_bits(st->map, AD7091R_REG_CONF,
-                                AD7091R_REG_CONF_MODE_MASK, conf);
-       if (ret)
-               return ret;
-
-       st->mode = mode;
-
-       return 0;
-}
+EXPORT_SYMBOL_NS_GPL(ad7091r_events, IIO_AD7091R);
 
 static int ad7091r_set_channel(struct ad7091r_state *st, unsigned int channel)
 {
@@ -110,7 +70,7 @@ static int ad7091r_read_one(struct iio_dev *iio_dev,
        if (ret)
                return ret;
 
-       if (AD7091R_REG_RESULT_CH_ID(val) != channel)
+       if (st->chip_info->reg_result_chan_id(val) != channel)
                return -EIO;
 
        *read_val = AD7091R_REG_RESULT_CONV_RESULT(val);
@@ -168,14 +128,148 @@ unlock:
        return ret;
 }
 
+static int ad7091r_read_event_config(struct iio_dev *indio_dev,
+                                    const struct iio_chan_spec *chan,
+                                    enum iio_event_type type,
+                                    enum iio_event_direction dir)
+{
+       struct ad7091r_state *st = iio_priv(indio_dev);
+       int val, ret;
+
+       switch (dir) {
+       case IIO_EV_DIR_RISING:
+               ret = regmap_read(st->map,
+                                 AD7091R_REG_CH_HIGH_LIMIT(chan->channel),
+                                 &val);
+               if (ret)
+                       return ret;
+               return val != AD7091R_HIGH_LIMIT;
+       case IIO_EV_DIR_FALLING:
+               ret = regmap_read(st->map,
+                                 AD7091R_REG_CH_LOW_LIMIT(chan->channel),
+                                 &val);
+               if (ret)
+                       return ret;
+               return val != AD7091R_LOW_LIMIT;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int ad7091r_write_event_config(struct iio_dev *indio_dev,
+                                     const struct iio_chan_spec *chan,
+                                     enum iio_event_type type,
+                                     enum iio_event_direction dir, int state)
+{
+       struct ad7091r_state *st = iio_priv(indio_dev);
+
+       if (state) {
+               return regmap_set_bits(st->map, AD7091R_REG_CONF,
+                                      AD7091R_REG_CONF_ALERT_EN);
+       } else {
+               /*
+                * Set thresholds either to 0 or to 2^12 - 1 as appropriate to
+                * prevent alerts and thus disable event generation.
+                */
+               switch (dir) {
+               case IIO_EV_DIR_RISING:
+                       return regmap_write(st->map,
+                                           AD7091R_REG_CH_HIGH_LIMIT(chan->channel),
+                                           AD7091R_HIGH_LIMIT);
+               case IIO_EV_DIR_FALLING:
+                       return regmap_write(st->map,
+                                           AD7091R_REG_CH_LOW_LIMIT(chan->channel),
+                                           AD7091R_LOW_LIMIT);
+               default:
+                       return -EINVAL;
+               }
+       }
+}
+
+static int ad7091r_read_event_value(struct iio_dev *indio_dev,
+                                   const struct iio_chan_spec *chan,
+                                   enum iio_event_type type,
+                                   enum iio_event_direction dir,
+                                   enum iio_event_info info, int *val, int *val2)
+{
+       struct ad7091r_state *st = iio_priv(indio_dev);
+       int ret;
+
+       switch (info) {
+       case IIO_EV_INFO_VALUE:
+               switch (dir) {
+               case IIO_EV_DIR_RISING:
+                       ret = regmap_read(st->map,
+                                         AD7091R_REG_CH_HIGH_LIMIT(chan->channel),
+                                         val);
+                       if (ret)
+                               return ret;
+                       return IIO_VAL_INT;
+               case IIO_EV_DIR_FALLING:
+                       ret = regmap_read(st->map,
+                                         AD7091R_REG_CH_LOW_LIMIT(chan->channel),
+                                         val);
+                       if (ret)
+                               return ret;
+                       return IIO_VAL_INT;
+               default:
+                       return -EINVAL;
+               }
+       case IIO_EV_INFO_HYSTERESIS:
+               ret = regmap_read(st->map,
+                                 AD7091R_REG_CH_HYSTERESIS(chan->channel),
+                                 val);
+               if (ret)
+                       return ret;
+               return IIO_VAL_INT;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int ad7091r_write_event_value(struct iio_dev *indio_dev,
+                                    const struct iio_chan_spec *chan,
+                                    enum iio_event_type type,
+                                    enum iio_event_direction dir,
+                                    enum iio_event_info info, int val, int val2)
+{
+       struct ad7091r_state *st = iio_priv(indio_dev);
+
+       switch (info) {
+       case IIO_EV_INFO_VALUE:
+               switch (dir) {
+               case IIO_EV_DIR_RISING:
+                       return regmap_write(st->map,
+                                           AD7091R_REG_CH_HIGH_LIMIT(chan->channel),
+                                           val);
+               case IIO_EV_DIR_FALLING:
+                       return regmap_write(st->map,
+                                           AD7091R_REG_CH_LOW_LIMIT(chan->channel),
+                                           val);
+               default:
+                       return -EINVAL;
+               }
+       case IIO_EV_INFO_HYSTERESIS:
+               return regmap_write(st->map,
+                                   AD7091R_REG_CH_HYSTERESIS(chan->channel),
+                                   val);
+       default:
+               return -EINVAL;
+       }
+}
+
 static const struct iio_info ad7091r_info = {
        .read_raw = ad7091r_read_raw,
+       .read_event_config = &ad7091r_read_event_config,
+       .write_event_config = &ad7091r_write_event_config,
+       .read_event_value = &ad7091r_read_event_value,
+       .write_event_value = &ad7091r_write_event_value,
 };
 
 static irqreturn_t ad7091r_event_handler(int irq, void *private)
 {
-       struct ad7091r_state *st = (struct ad7091r_state *) private;
-       struct iio_dev *iio_dev = dev_get_drvdata(st->dev);
+       struct iio_dev *iio_dev = private;
+       struct ad7091r_state *st = iio_priv(iio_dev);
        unsigned int i, read_val;
        int ret;
        s64 timestamp = iio_get_time_ns(iio_dev);
@@ -207,9 +301,8 @@ static void ad7091r_remove(void *data)
        regulator_disable(st->vref);
 }
 
-int ad7091r_probe(struct device *dev, const char *name,
-               const struct ad7091r_chip_info *chip_info,
-               struct regmap *map, int irq)
+int ad7091r_probe(struct device *dev, const struct ad7091r_init_info *init_info,
+                 int irq)
 {
        struct iio_dev *iio_dev;
        struct ad7091r_state *st;
@@ -221,29 +314,54 @@ int ad7091r_probe(struct device *dev, const char *name,
 
        st = iio_priv(iio_dev);
        st->dev = dev;
-       st->chip_info = chip_info;
-       st->map = map;
+       init_info->init_adc_regmap(st, init_info->regmap_config);
+       if (IS_ERR(st->map))
+               return dev_err_probe(st->dev, PTR_ERR(st->map),
+                                    "Error initializing regmap\n");
 
-       iio_dev->name = name;
        iio_dev->info = &ad7091r_info;
        iio_dev->modes = INDIO_DIRECT_MODE;
 
-       iio_dev->num_channels = chip_info->num_channels;
-       iio_dev->channels = chip_info->channels;
+       if (init_info->setup) {
+               ret = init_info->setup(st);
+               if (ret < 0)
+                       return ret;
+       }
 
        if (irq) {
+               st->chip_info = init_info->info_irq;
+               ret = regmap_update_bits(st->map, AD7091R_REG_CONF,
+                                        AD7091R_REG_CONF_ALERT_EN, BIT(4));
+               if (ret)
+                       return ret;
+
                ret = devm_request_threaded_irq(dev, irq, NULL,
-                               ad7091r_event_handler,
-                               IRQF_TRIGGER_FALLING | IRQF_ONESHOT, name, st);
+                                               ad7091r_event_handler,
+                                               IRQF_TRIGGER_FALLING |
+                                               IRQF_ONESHOT,
+                                               st->chip_info->name, iio_dev);
                if (ret)
                        return ret;
+       } else {
+               st->chip_info = init_info->info_no_irq;
        }
 
+       iio_dev->name = st->chip_info->name;
+       iio_dev->num_channels = st->chip_info->num_channels;
+       iio_dev->channels = st->chip_info->channels;
+
        st->vref = devm_regulator_get_optional(dev, "vref");
        if (IS_ERR(st->vref)) {
                if (PTR_ERR(st->vref) == -EPROBE_DEFER)
                        return -EPROBE_DEFER;
+
                st->vref = NULL;
+               /* Enable internal vref */
+               ret = regmap_set_bits(st->map, AD7091R_REG_CONF,
+                                     AD7091R_REG_CONF_INT_VREF);
+               if (ret)
+                       return dev_err_probe(st->dev, ret,
+                                            "Error on enable internal reference\n");
        } else {
                ret = regulator_enable(st->vref);
                if (ret)
@@ -254,7 +372,7 @@ int ad7091r_probe(struct device *dev, const char *name,
        }
 
        /* Use command mode by default to convert only desired channels*/
-       ret = ad7091r_set_mode(st, AD7091R_MODE_COMMAND);
+       ret = st->chip_info->set_mode(st, AD7091R_MODE_COMMAND);
        if (ret)
                return ret;
 
@@ -262,7 +380,7 @@ int ad7091r_probe(struct device *dev, const char *name,
 }
 EXPORT_SYMBOL_NS_GPL(ad7091r_probe, IIO_AD7091R);
 
-static bool ad7091r_writeable_reg(struct device *dev, unsigned int reg)
+bool ad7091r_writeable_reg(struct device *dev, unsigned int reg)
 {
        switch (reg) {
        case AD7091R_REG_RESULT:
@@ -272,8 +390,9 @@ static bool ad7091r_writeable_reg(struct device *dev, unsigned int reg)
                return true;
        }
 }
+EXPORT_SYMBOL_NS_GPL(ad7091r_writeable_reg, IIO_AD7091R);
 
-static bool ad7091r_volatile_reg(struct device *dev, unsigned int reg)
+bool ad7091r_volatile_reg(struct device *dev, unsigned int reg)
 {
        switch (reg) {
        case AD7091R_REG_RESULT:
@@ -283,14 +402,7 @@ static bool ad7091r_volatile_reg(struct device *dev, unsigned int reg)
                return false;
        }
 }
-
-const struct regmap_config ad7091r_regmap_config = {
-       .reg_bits = 8,
-       .val_bits = 16,
-       .writeable_reg = ad7091r_writeable_reg,
-       .volatile_reg = ad7091r_volatile_reg,
-};
-EXPORT_SYMBOL_NS_GPL(ad7091r_regmap_config, IIO_AD7091R);
+EXPORT_SYMBOL_NS_GPL(ad7091r_volatile_reg, IIO_AD7091R);
 
 MODULE_AUTHOR("Beniamin Bia <beniamin.bia@analog.com>");
 MODULE_DESCRIPTION("Analog Devices AD7091Rx multi-channel converters");
index 509748aef9b196f1649e419d169b9a04a5081046..696bf7a897bb51de16477b5b802919d1ae21124c 100644 (file)
@@ -8,19 +8,92 @@
 #ifndef __DRIVERS_IIO_ADC_AD7091R_BASE_H__
 #define __DRIVERS_IIO_ADC_AD7091R_BASE_H__
 
+#include <linux/regmap.h>
+
+#define AD7091R_REG_RESULT  0
+#define AD7091R_REG_CHANNEL 1
+#define AD7091R_REG_CONF    2
+#define AD7091R_REG_ALERT   3
+#define AD7091R_REG_CH_LOW_LIMIT(ch) ((ch) * 3 + 4)
+#define AD7091R_REG_CH_HIGH_LIMIT(ch) ((ch) * 3 + 5)
+#define AD7091R_REG_CH_HYSTERESIS(ch) ((ch) * 3 + 6)
+
+/* AD7091R_REG_RESULT */
+#define AD7091R5_REG_RESULT_CH_ID(x)       (((x) >> 13) & 0x3)
+#define AD7091R8_REG_RESULT_CH_ID(x)       (((x) >> 13) & 0x7)
+#define AD7091R_REG_RESULT_CONV_RESULT(x)   ((x) & 0xfff)
+
+/* AD7091R_REG_CONF */
+#define AD7091R_REG_CONF_INT_VREF      BIT(0)
+#define AD7091R_REG_CONF_ALERT_EN      BIT(4)
+#define AD7091R_REG_CONF_AUTO          BIT(8)
+#define AD7091R_REG_CONF_CMD           BIT(10)
+
+#define AD7091R_REG_CONF_MODE_MASK  \
+       (AD7091R_REG_CONF_AUTO | AD7091R_REG_CONF_CMD)
+
+/* AD7091R_REG_CH_LIMIT */
+#define AD7091R_HIGH_LIMIT             0xFFF
+#define AD7091R_LOW_LIMIT              0x0
+
+#define AD7091R_CHANNEL(idx, bits, ev, num_ev) {                       \
+       .type = IIO_VOLTAGE,                                            \
+       .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),                   \
+       .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),           \
+       .indexed = 1,                                                   \
+       .channel = idx,                                                 \
+       .event_spec = ev,                                               \
+       .num_event_specs = num_ev,                                      \
+       .scan_type.storagebits = 16,                                    \
+       .scan_type.realbits = bits,                                     \
+}
+
 struct device;
-struct ad7091r_state;
+struct gpio_desc;
+
+enum ad7091r_mode {
+       AD7091R_MODE_SAMPLE,
+       AD7091R_MODE_COMMAND,
+       AD7091R_MODE_AUTOCYCLE,
+};
+
+struct ad7091r_state {
+       struct device *dev;
+       struct regmap *map;
+       struct gpio_desc *convst_gpio;
+       struct gpio_desc *reset_gpio;
+       struct regulator *vref;
+       const struct ad7091r_chip_info *chip_info;
+       enum ad7091r_mode mode;
+       struct mutex lock; /*lock to prevent concurent reads */
+       __be16 tx_buf __aligned(IIO_DMA_MINALIGN);
+       __be16 rx_buf;
+};
 
 struct ad7091r_chip_info {
+       const char *name;
        unsigned int num_channels;
        const struct iio_chan_spec *channels;
        unsigned int vref_mV;
+       unsigned int (*reg_result_chan_id)(unsigned int val);
+       int (*set_mode)(struct ad7091r_state *st, enum ad7091r_mode mode);
 };
 
-extern const struct regmap_config ad7091r_regmap_config;
+struct ad7091r_init_info {
+       const struct ad7091r_chip_info *info_irq;
+       const struct ad7091r_chip_info *info_no_irq;
+       const struct regmap_config *regmap_config;
+       void (*init_adc_regmap)(struct ad7091r_state *st,
+                               const struct regmap_config *regmap_conf);
+       int (*setup)(struct ad7091r_state *st);
+};
+
+extern const struct iio_event_spec ad7091r_events[3];
+
+int ad7091r_probe(struct device *dev, const struct ad7091r_init_info *init_info,
+                 int irq);
 
-int ad7091r_probe(struct device *dev, const char *name,
-               const struct ad7091r_chip_info *chip_info,
-               struct regmap *map, int irq);
+bool ad7091r_volatile_reg(struct device *dev, unsigned int reg);
+bool ad7091r_writeable_reg(struct device *dev, unsigned int reg);
 
 #endif /* __DRIVERS_IIO_ADC_AD7091R_BASE_H__ */
index 2f048527b7b786728f2099209a17497d97d9a233..a75837334157da498dfbbf26dfd673eb11275767 100644 (file)
 
 #include "ad7091r-base.h"
 
-static const struct iio_event_spec ad7091r5_events[] = {
-       {
-               .type = IIO_EV_TYPE_THRESH,
-               .dir = IIO_EV_DIR_RISING,
-               .mask_separate = BIT(IIO_EV_INFO_VALUE) |
-                                BIT(IIO_EV_INFO_ENABLE),
-       },
-       {
-               .type = IIO_EV_TYPE_THRESH,
-               .dir = IIO_EV_DIR_FALLING,
-               .mask_separate = BIT(IIO_EV_INFO_VALUE) |
-                                BIT(IIO_EV_INFO_ENABLE),
-       },
-       {
-               .type = IIO_EV_TYPE_THRESH,
-               .dir = IIO_EV_DIR_EITHER,
-               .mask_separate = BIT(IIO_EV_INFO_HYSTERESIS),
-       },
-};
-
-#define AD7091R_CHANNEL(idx, bits, ev, num_ev) { \
-       .type = IIO_VOLTAGE, \
-       .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
-       .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
-       .indexed = 1, \
-       .channel = idx, \
-       .event_spec = ev, \
-       .num_event_specs = num_ev, \
-       .scan_type.storagebits = 16, \
-       .scan_type.realbits = bits, \
-}
 static const struct iio_chan_spec ad7091r5_channels_irq[] = {
-       AD7091R_CHANNEL(0, 12, ad7091r5_events, ARRAY_SIZE(ad7091r5_events)),
-       AD7091R_CHANNEL(1, 12, ad7091r5_events, ARRAY_SIZE(ad7091r5_events)),
-       AD7091R_CHANNEL(2, 12, ad7091r5_events, ARRAY_SIZE(ad7091r5_events)),
-       AD7091R_CHANNEL(3, 12, ad7091r5_events, ARRAY_SIZE(ad7091r5_events)),
+       AD7091R_CHANNEL(0, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)),
+       AD7091R_CHANNEL(1, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)),
+       AD7091R_CHANNEL(2, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)),
+       AD7091R_CHANNEL(3, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)),
 };
 
 static const struct iio_chan_spec ad7091r5_channels_noirq[] = {
@@ -57,43 +26,98 @@ static const struct iio_chan_spec ad7091r5_channels_noirq[] = {
        AD7091R_CHANNEL(3, 12, NULL, 0),
 };
 
+static int ad7091r5_set_mode(struct ad7091r_state *st, enum ad7091r_mode mode)
+{
+       int ret, conf;
+
+       switch (mode) {
+       case AD7091R_MODE_SAMPLE:
+               conf = 0;
+               break;
+       case AD7091R_MODE_COMMAND:
+               conf = AD7091R_REG_CONF_CMD;
+               break;
+       case AD7091R_MODE_AUTOCYCLE:
+               conf = AD7091R_REG_CONF_AUTO;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       ret = regmap_update_bits(st->map, AD7091R_REG_CONF,
+                                AD7091R_REG_CONF_MODE_MASK, conf);
+       if (ret)
+               return ret;
+
+       st->mode = mode;
+
+       return 0;
+}
+
+static unsigned int ad7091r5_reg_result_chan_id(unsigned int val)
+{
+       return AD7091R5_REG_RESULT_CH_ID(val);
+}
+
 static const struct ad7091r_chip_info ad7091r5_chip_info_irq = {
+       .name = "ad7091r-5",
        .channels = ad7091r5_channels_irq,
        .num_channels = ARRAY_SIZE(ad7091r5_channels_irq),
        .vref_mV = 2500,
+       .reg_result_chan_id = &ad7091r5_reg_result_chan_id,
+       .set_mode = &ad7091r5_set_mode,
 };
 
 static const struct ad7091r_chip_info ad7091r5_chip_info_noirq = {
+       .name = "ad7091r-5",
        .channels = ad7091r5_channels_noirq,
        .num_channels = ARRAY_SIZE(ad7091r5_channels_noirq),
        .vref_mV = 2500,
+       .reg_result_chan_id = &ad7091r5_reg_result_chan_id,
+       .set_mode = &ad7091r5_set_mode,
 };
 
-static int ad7091r5_i2c_probe(struct i2c_client *i2c)
+static const struct regmap_config ad7091r_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 16,
+       .writeable_reg = ad7091r_writeable_reg,
+       .volatile_reg = ad7091r_volatile_reg,
+};
+
+static void ad7091r5_regmap_init(struct ad7091r_state *st,
+                                const struct regmap_config *regmap_conf)
 {
-       const struct i2c_device_id *id = i2c_client_get_device_id(i2c);
-       const struct ad7091r_chip_info *chip_info;
-       struct regmap *map = devm_regmap_init_i2c(i2c, &ad7091r_regmap_config);
+       struct i2c_client *i2c = container_of(st->dev, struct i2c_client, dev);
 
-       if (IS_ERR(map))
-               return PTR_ERR(map);
+       st->map = devm_regmap_init_i2c(i2c, regmap_conf);
+}
+
+static struct ad7091r_init_info ad7091r5_init_info = {
+       .info_irq = &ad7091r5_chip_info_irq,
+       .info_no_irq = &ad7091r5_chip_info_noirq,
+       .regmap_config = &ad7091r_regmap_config,
+       .init_adc_regmap = &ad7091r5_regmap_init
+};
+
+static int ad7091r5_i2c_probe(struct i2c_client *i2c)
+{
+       const struct ad7091r_init_info *init_info;
 
-       if (i2c->irq)
-               chip_info = &ad7091r5_chip_info_irq;
-       else
-               chip_info = &ad7091r5_chip_info_noirq;
+       init_info = i2c_get_match_data(i2c);
+       if (!init_info)
+               return -EINVAL;
 
-       return ad7091r_probe(&i2c->dev, id->name, chip_info, map, i2c->irq);
+       return ad7091r_probe(&i2c->dev, init_info, i2c->irq);
 }
 
 static const struct of_device_id ad7091r5_dt_ids[] = {
-       { .compatible = "adi,ad7091r5" },
+       { .compatible = "adi,ad7091r5", .data = &ad7091r5_init_info },
        {},
 };
 MODULE_DEVICE_TABLE(of, ad7091r5_dt_ids);
 
 static const struct i2c_device_id ad7091r5_i2c_ids[] = {
-       {"ad7091r5", 0},
+       {"ad7091r5", (kernel_ulong_t)&ad7091r5_init_info },
        {}
 };
 MODULE_DEVICE_TABLE(i2c, ad7091r5_i2c_ids);
diff --git a/drivers/iio/adc/ad7091r8.c b/drivers/iio/adc/ad7091r8.c
new file mode 100644 (file)
index 0000000..57700f1
--- /dev/null
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Analog Devices AD7091R8 12-bit SAR ADC driver
+ *
+ * Copyright 2023 Analog Devices Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/gpio/consumer.h>
+#include <linux/spi/spi.h>
+
+#include "ad7091r-base.h"
+
+#define AD7091R8_REG_ADDR_MSK                          GENMASK(15, 11)
+#define AD7091R8_RD_WR_FLAG_MSK                                BIT(10)
+#define AD7091R8_REG_DATA_MSK                          GENMASK(9, 0)
+
+#define AD7091R_SPI_REGMAP_CONFIG(n) {                                 \
+       .reg_bits = 8,                                                  \
+       .val_bits = 16,                                                 \
+       .volatile_reg = ad7091r_volatile_reg,                           \
+       .writeable_reg = ad7091r_writeable_reg,                         \
+       .max_register = AD7091R_REG_CH_HYSTERESIS(n),                   \
+}
+
+static int ad7091r8_set_mode(struct ad7091r_state *st, enum ad7091r_mode mode)
+{
+       /* AD7091R-2/-4/-8 don't set sample/command/autocycle mode in conf reg */
+       st->mode = mode;
+       return 0;
+}
+
+static unsigned int ad7091r8_reg_result_chan_id(unsigned int val)
+{
+       return AD7091R8_REG_RESULT_CH_ID(val);
+}
+
+#define AD7091R_SPI_CHIP_INFO(_n, _name) {                             \
+       .name = _name,                                                  \
+       .channels = ad7091r##_n##_channels,                             \
+       .num_channels = ARRAY_SIZE(ad7091r##_n##_channels),             \
+       .vref_mV = 2500,                                                \
+       .reg_result_chan_id = &ad7091r8_reg_result_chan_id,     \
+       .set_mode = &ad7091r8_set_mode,                         \
+}
+
+#define AD7091R_SPI_CHIP_INFO_IRQ(_n, _name) {                         \
+       .name = _name,                                                  \
+       .channels = ad7091r##_n##_channels_irq,                         \
+       .num_channels = ARRAY_SIZE(ad7091r##_n##_channels_irq),         \
+       .vref_mV = 2500,                                                \
+       .reg_result_chan_id = &ad7091r8_reg_result_chan_id,     \
+       .set_mode = &ad7091r8_set_mode,                         \
+}
+
+enum ad7091r8_info_ids {
+       AD7091R2_INFO,
+       AD7091R4_INFO,
+       AD7091R4_INFO_IRQ,
+       AD7091R8_INFO,
+       AD7091R8_INFO_IRQ,
+};
+
+static const struct iio_chan_spec ad7091r2_channels[] = {
+       AD7091R_CHANNEL(0, 12, NULL, 0),
+       AD7091R_CHANNEL(1, 12, NULL, 0),
+};
+
+static const struct iio_chan_spec ad7091r4_channels[] = {
+       AD7091R_CHANNEL(0, 12, NULL, 0),
+       AD7091R_CHANNEL(1, 12, NULL, 0),
+       AD7091R_CHANNEL(2, 12, NULL, 0),
+       AD7091R_CHANNEL(3, 12, NULL, 0),
+};
+
+static const struct iio_chan_spec ad7091r4_channels_irq[] = {
+       AD7091R_CHANNEL(0, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)),
+       AD7091R_CHANNEL(1, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)),
+       AD7091R_CHANNEL(2, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)),
+       AD7091R_CHANNEL(3, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)),
+};
+
+static const struct iio_chan_spec ad7091r8_channels[] = {
+       AD7091R_CHANNEL(0, 12, NULL, 0),
+       AD7091R_CHANNEL(1, 12, NULL, 0),
+       AD7091R_CHANNEL(2, 12, NULL, 0),
+       AD7091R_CHANNEL(3, 12, NULL, 0),
+       AD7091R_CHANNEL(4, 12, NULL, 0),
+       AD7091R_CHANNEL(5, 12, NULL, 0),
+       AD7091R_CHANNEL(6, 12, NULL, 0),
+       AD7091R_CHANNEL(7, 12, NULL, 0),
+};
+
+static const struct iio_chan_spec ad7091r8_channels_irq[] = {
+       AD7091R_CHANNEL(0, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)),
+       AD7091R_CHANNEL(1, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)),
+       AD7091R_CHANNEL(2, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)),
+       AD7091R_CHANNEL(3, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)),
+       AD7091R_CHANNEL(4, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)),
+       AD7091R_CHANNEL(5, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)),
+       AD7091R_CHANNEL(6, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)),
+       AD7091R_CHANNEL(7, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)),
+};
+
+static void ad7091r_pulse_convst(struct ad7091r_state *st)
+{
+       gpiod_set_value_cansleep(st->convst_gpio, 1);
+       gpiod_set_value_cansleep(st->convst_gpio, 0);
+}
+
+static int ad7091r_regmap_bus_reg_read(void *context, unsigned int reg,
+                                      unsigned int *val)
+{
+       struct ad7091r_state *st = context;
+       struct spi_device *spi = container_of(st->dev, struct spi_device, dev);
+       int ret;
+
+       struct spi_transfer t[] = {
+               {
+                       .tx_buf = &st->tx_buf,
+                       .len = 2,
+                       .cs_change = 1,
+               }, {
+                       .rx_buf = &st->rx_buf,
+                       .len = 2,
+               }
+       };
+
+       if (reg == AD7091R_REG_RESULT)
+               ad7091r_pulse_convst(st);
+
+       st->tx_buf = cpu_to_be16(reg << 11);
+
+       ret = spi_sync_transfer(spi, t, ARRAY_SIZE(t));
+       if (ret < 0)
+               return ret;
+
+       *val = be16_to_cpu(st->rx_buf);
+       return 0;
+}
+
+static int ad7091r_regmap_bus_reg_write(void *context, unsigned int reg,
+                                       unsigned int val)
+{
+       struct ad7091r_state *st = context;
+       struct spi_device *spi = container_of(st->dev, struct spi_device, dev);
+
+       /*
+        * AD7091R-2/-4/-8 protocol (datasheet page 31) is to do a single SPI
+        * transfer with reg address set in bits B15:B11 and value set in B9:B0.
+        */
+       st->tx_buf = cpu_to_be16(FIELD_PREP(AD7091R8_REG_DATA_MSK, val) |
+                                FIELD_PREP(AD7091R8_RD_WR_FLAG_MSK, 1) |
+                                FIELD_PREP(AD7091R8_REG_ADDR_MSK, reg));
+
+       return spi_write(spi, &st->tx_buf, 2);
+}
+
+static struct regmap_bus ad7091r8_regmap_bus = {
+       .reg_read = ad7091r_regmap_bus_reg_read,
+       .reg_write = ad7091r_regmap_bus_reg_write,
+       .reg_format_endian_default = REGMAP_ENDIAN_BIG,
+       .val_format_endian_default = REGMAP_ENDIAN_BIG,
+};
+
+static const struct ad7091r_chip_info ad7091r8_infos[] = {
+       [AD7091R2_INFO] = AD7091R_SPI_CHIP_INFO(2, "ad7091r-2"),
+       [AD7091R4_INFO] = AD7091R_SPI_CHIP_INFO(4, "ad7091r-4"),
+       [AD7091R4_INFO_IRQ] = AD7091R_SPI_CHIP_INFO_IRQ(4, "ad7091r-4"),
+       [AD7091R8_INFO] = AD7091R_SPI_CHIP_INFO(8, "ad7091r-8"),
+       [AD7091R8_INFO_IRQ] = AD7091R_SPI_CHIP_INFO_IRQ(8, "ad7091r-8")
+};
+
+static const struct regmap_config ad7091r2_reg_conf = AD7091R_SPI_REGMAP_CONFIG(2);
+static const struct regmap_config ad7091r4_reg_conf = AD7091R_SPI_REGMAP_CONFIG(4);
+static const struct regmap_config ad7091r8_reg_conf = AD7091R_SPI_REGMAP_CONFIG(8);
+
+static void ad7091r8_regmap_init(struct ad7091r_state *st,
+                                const struct regmap_config *regmap_conf)
+{
+       st->map = devm_regmap_init(st->dev, &ad7091r8_regmap_bus, st,
+                                  regmap_conf);
+}
+
+static int ad7091r8_gpio_setup(struct ad7091r_state *st)
+{
+       st->convst_gpio = devm_gpiod_get(st->dev, "convst", GPIOD_OUT_LOW);
+       if (IS_ERR(st->convst_gpio))
+               return dev_err_probe(st->dev, PTR_ERR(st->convst_gpio),
+                                    "Error getting convst GPIO\n");
+
+       st->reset_gpio = devm_gpiod_get_optional(st->dev, "reset",
+                                                GPIOD_OUT_HIGH);
+       if (IS_ERR(st->reset_gpio))
+               return dev_err_probe(st->dev, PTR_ERR(st->convst_gpio),
+                                    "Error on requesting reset GPIO\n");
+
+       if (st->reset_gpio) {
+               fsleep(20);
+               gpiod_set_value_cansleep(st->reset_gpio, 0);
+       }
+
+       return 0;
+}
+
+static struct ad7091r_init_info ad7091r2_init_info = {
+       .info_no_irq = &ad7091r8_infos[AD7091R2_INFO],
+       .regmap_config = &ad7091r2_reg_conf,
+       .init_adc_regmap = &ad7091r8_regmap_init,
+       .setup = &ad7091r8_gpio_setup
+};
+
+static struct ad7091r_init_info ad7091r4_init_info = {
+       .info_no_irq = &ad7091r8_infos[AD7091R4_INFO],
+       .info_irq = &ad7091r8_infos[AD7091R4_INFO_IRQ],
+       .regmap_config = &ad7091r4_reg_conf,
+       .init_adc_regmap = &ad7091r8_regmap_init,
+       .setup = &ad7091r8_gpio_setup
+};
+
+static struct ad7091r_init_info ad7091r8_init_info = {
+       .info_no_irq = &ad7091r8_infos[AD7091R8_INFO],
+       .info_irq = &ad7091r8_infos[AD7091R8_INFO_IRQ],
+       .regmap_config = &ad7091r8_reg_conf,
+       .init_adc_regmap = &ad7091r8_regmap_init,
+       .setup = &ad7091r8_gpio_setup
+};
+
+static int ad7091r8_spi_probe(struct spi_device *spi)
+{
+       const struct ad7091r_init_info *init_info;
+
+       init_info = spi_get_device_match_data(spi);
+       if (!init_info)
+               return -EINVAL;
+
+       return ad7091r_probe(&spi->dev, init_info, spi->irq);
+}
+
+static const struct of_device_id ad7091r8_of_match[] = {
+       { .compatible = "adi,ad7091r2", .data = &ad7091r2_init_info },
+       { .compatible = "adi,ad7091r4", .data = &ad7091r4_init_info },
+       { .compatible = "adi,ad7091r8", .data = &ad7091r8_init_info },
+       { }
+};
+MODULE_DEVICE_TABLE(of, ad7091r8_of_match);
+
+static const struct spi_device_id ad7091r8_spi_id[] = {
+       { "ad7091r2", (kernel_ulong_t)&ad7091r2_init_info },
+       { "ad7091r4", (kernel_ulong_t)&ad7091r4_init_info },
+       { "ad7091r8", (kernel_ulong_t)&ad7091r8_init_info },
+       { }
+};
+MODULE_DEVICE_TABLE(spi, ad7091r8_spi_id);
+
+static struct spi_driver ad7091r8_driver = {
+       .driver = {
+               .name = "ad7091r8",
+               .of_match_table = ad7091r8_of_match,
+       },
+       .probe = ad7091r8_spi_probe,
+       .id_table = ad7091r8_spi_id,
+};
+module_spi_driver(ad7091r8_driver);
+
+MODULE_AUTHOR("Marcelo Schmitt <marcelo.schmitt@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD7091R8 ADC driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IIO_AD7091R);
index 39eccc28debe4c57cbb4e836b3006d70ee5fefa8..6581fce4ba959af08df2ffd7e0fa67306755e2be 100644 (file)
@@ -4,8 +4,9 @@
  *
  * Copyright 2012-2020 Analog Devices Inc.
  */
-
+#include <linux/cleanup.h>
 #include <linux/module.h>
+#include <linux/mutex.h>
 #include <linux/device.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #define AD9467_DEF_OUTPUT_MODE         0x08
 #define AD9467_REG_VREF_MASK           0x0F
 
-enum {
-       ID_AD9265,
-       ID_AD9434,
-       ID_AD9467,
-};
-
 struct ad9467_chip_info {
        struct adi_axi_adc_chip_info    axi_adc_info;
        unsigned int                    default_output_mode;
@@ -119,9 +114,11 @@ struct ad9467_state {
        struct spi_device               *spi;
        struct clk                      *clk;
        unsigned int                    output_mode;
+       unsigned int                    (*scales)[2];
 
        struct gpio_desc                *pwrdown_gpio;
-       struct gpio_desc                *reset_gpio;
+       /* ensure consistent state obtained on multiple related accesses */
+       struct mutex                    lock;
 };
 
 static int ad9467_spi_read(struct spi_device *spi, unsigned int reg)
@@ -161,11 +158,13 @@ static int ad9467_reg_access(struct adi_axi_adc_conv *conv, unsigned int reg,
        struct spi_device *spi = st->spi;
        int ret;
 
-       if (readval == NULL) {
+       if (!readval) {
+               guard(mutex)(&st->lock);
                ret = ad9467_spi_write(spi, reg, writeval);
-               ad9467_spi_write(spi, AN877_ADC_REG_TRANSFER,
-                                AN877_ADC_TRANSFER_SYNC);
-               return ret;
+               if (ret)
+                       return ret;
+               return ad9467_spi_write(spi, AN877_ADC_REG_TRANSFER,
+                                       AN877_ADC_TRANSFER_SYNC);
        }
 
        ret = ad9467_spi_read(spi, reg);
@@ -212,6 +211,7 @@ static void __ad9467_get_scale(struct adi_axi_adc_conv *conv, int index,
        .channel = _chan,                                               \
        .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |          \
                BIT(IIO_CHAN_INFO_SAMP_FREQ),                           \
+       .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SCALE), \
        .scan_index = _si,                                              \
        .scan_type = {                                                  \
                .sign = _sign,                                          \
@@ -228,43 +228,46 @@ static const struct iio_chan_spec ad9467_channels[] = {
        AD9467_CHAN(0, 0, 16, 'S'),
 };
 
-static const struct ad9467_chip_info ad9467_chip_tbl[] = {
-       [ID_AD9265] = {
-               .axi_adc_info = {
-                       .id = CHIPID_AD9265,
-                       .max_rate = 125000000UL,
-                       .scale_table = ad9265_scale_table,
-                       .num_scales = ARRAY_SIZE(ad9265_scale_table),
-                       .channels = ad9467_channels,
-                       .num_channels = ARRAY_SIZE(ad9467_channels),
-               },
-               .default_output_mode = AD9265_DEF_OUTPUT_MODE,
-               .vref_mask = AD9265_REG_VREF_MASK,
+static const struct ad9467_chip_info ad9467_chip_tbl = {
+       .axi_adc_info = {
+               .name = "ad9467",
+               .id = CHIPID_AD9467,
+               .max_rate = 250000000UL,
+               .scale_table = ad9467_scale_table,
+               .num_scales = ARRAY_SIZE(ad9467_scale_table),
+               .channels = ad9467_channels,
+               .num_channels = ARRAY_SIZE(ad9467_channels),
        },
-       [ID_AD9434] = {
-               .axi_adc_info = {
-                       .id = CHIPID_AD9434,
-                       .max_rate = 500000000UL,
-                       .scale_table = ad9434_scale_table,
-                       .num_scales = ARRAY_SIZE(ad9434_scale_table),
-                       .channels = ad9434_channels,
-                       .num_channels = ARRAY_SIZE(ad9434_channels),
-               },
-               .default_output_mode = AD9434_DEF_OUTPUT_MODE,
-               .vref_mask = AD9434_REG_VREF_MASK,
+       .default_output_mode = AD9467_DEF_OUTPUT_MODE,
+       .vref_mask = AD9467_REG_VREF_MASK,
+};
+
+static const struct ad9467_chip_info ad9434_chip_tbl = {
+       .axi_adc_info = {
+               .name = "ad9434",
+               .id = CHIPID_AD9434,
+               .max_rate = 500000000UL,
+               .scale_table = ad9434_scale_table,
+               .num_scales = ARRAY_SIZE(ad9434_scale_table),
+               .channels = ad9434_channels,
+               .num_channels = ARRAY_SIZE(ad9434_channels),
        },
-       [ID_AD9467] = {
-               .axi_adc_info = {
-                       .id = CHIPID_AD9467,
-                       .max_rate = 250000000UL,
-                       .scale_table = ad9467_scale_table,
-                       .num_scales = ARRAY_SIZE(ad9467_scale_table),
-                       .channels = ad9467_channels,
-                       .num_channels = ARRAY_SIZE(ad9467_channels),
-               },
-               .default_output_mode = AD9467_DEF_OUTPUT_MODE,
-               .vref_mask = AD9467_REG_VREF_MASK,
+       .default_output_mode = AD9434_DEF_OUTPUT_MODE,
+       .vref_mask = AD9434_REG_VREF_MASK,
+};
+
+static const struct ad9467_chip_info ad9265_chip_tbl = {
+       .axi_adc_info = {
+               .name = "ad9265",
+               .id = CHIPID_AD9265,
+               .max_rate = 125000000UL,
+               .scale_table = ad9265_scale_table,
+               .num_scales = ARRAY_SIZE(ad9265_scale_table),
+               .channels = ad9467_channels,
+               .num_channels = ARRAY_SIZE(ad9467_channels),
        },
+       .default_output_mode = AD9265_DEF_OUTPUT_MODE,
+       .vref_mask = AD9265_REG_VREF_MASK,
 };
 
 static int ad9467_get_scale(struct adi_axi_adc_conv *conv, int *val, int *val2)
@@ -273,10 +276,13 @@ static int ad9467_get_scale(struct adi_axi_adc_conv *conv, int *val, int *val2)
        const struct ad9467_chip_info *info1 = to_ad9467_chip_info(info);
        struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
        unsigned int i, vref_val;
+       int ret;
 
-       vref_val = ad9467_spi_read(st->spi, AN877_ADC_REG_VREF);
+       ret = ad9467_spi_read(st->spi, AN877_ADC_REG_VREF);
+       if (ret < 0)
+               return ret;
 
-       vref_val &= info1->vref_mask;
+       vref_val = ret & info1->vref_mask;
 
        for (i = 0; i < info->num_scales; i++) {
                if (vref_val == info->scale_table[i][1])
@@ -297,6 +303,7 @@ static int ad9467_set_scale(struct adi_axi_adc_conv *conv, int val, int val2)
        struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
        unsigned int scale_val[2];
        unsigned int i;
+       int ret;
 
        if (val != 0)
                return -EINVAL;
@@ -306,11 +313,14 @@ static int ad9467_set_scale(struct adi_axi_adc_conv *conv, int val, int val2)
                if (scale_val[0] != val || scale_val[1] != val2)
                        continue;
 
-               ad9467_spi_write(st->spi, AN877_ADC_REG_VREF,
-                                info->scale_table[i][1]);
-               ad9467_spi_write(st->spi, AN877_ADC_REG_TRANSFER,
-                                AN877_ADC_TRANSFER_SYNC);
-               return 0;
+               guard(mutex)(&st->lock);
+               ret = ad9467_spi_write(st->spi, AN877_ADC_REG_VREF,
+                                      info->scale_table[i][1]);
+               if (ret < 0)
+                       return ret;
+
+               return ad9467_spi_write(st->spi, AN877_ADC_REG_TRANSFER,
+                                       AN877_ADC_TRANSFER_SYNC);
        }
 
        return -EINVAL;
@@ -359,6 +369,26 @@ static int ad9467_write_raw(struct adi_axi_adc_conv *conv,
        }
 }
 
+static int ad9467_read_avail(struct adi_axi_adc_conv *conv,
+                            struct iio_chan_spec const *chan,
+                            const int **vals, int *type, int *length,
+                            long mask)
+{
+       const struct adi_axi_adc_chip_info *info = conv->chip_info;
+       struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+
+       switch (mask) {
+       case IIO_CHAN_INFO_SCALE:
+               *vals = (const int *)st->scales;
+               *type = IIO_VAL_INT_PLUS_MICRO;
+               /* Values are stored in a 2D matrix */
+               *length = info->num_scales * 2;
+               return IIO_AVAIL_LIST;
+       default:
+               return -EINVAL;
+       }
+}
+
 static int ad9467_outputmode_set(struct spi_device *spi, unsigned int mode)
 {
        int ret;
@@ -371,6 +401,26 @@ static int ad9467_outputmode_set(struct spi_device *spi, unsigned int mode)
                                AN877_ADC_TRANSFER_SYNC);
 }
 
+static int ad9467_scale_fill(struct adi_axi_adc_conv *conv)
+{
+       const struct adi_axi_adc_chip_info *info = conv->chip_info;
+       struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+       unsigned int i, val1, val2;
+
+       st->scales = devm_kmalloc_array(&st->spi->dev, info->num_scales,
+                                       sizeof(*st->scales), GFP_KERNEL);
+       if (!st->scales)
+               return -ENOMEM;
+
+       for (i = 0; i < info->num_scales; i++) {
+               __ad9467_get_scale(conv, i, &val1, &val2);
+               st->scales[i][0] = val1;
+               st->scales[i][1] = val2;
+       }
+
+       return 0;
+}
+
 static int ad9467_preenable_setup(struct adi_axi_adc_conv *conv)
 {
        struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
@@ -378,6 +428,21 @@ static int ad9467_preenable_setup(struct adi_axi_adc_conv *conv)
        return ad9467_outputmode_set(st->spi, st->output_mode);
 }
 
+static int ad9467_reset(struct device *dev)
+{
+       struct gpio_desc *gpio;
+
+       gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+       if (IS_ERR_OR_NULL(gpio))
+               return PTR_ERR_OR_ZERO(gpio);
+
+       fsleep(1);
+       gpiod_set_value_cansleep(gpio, 0);
+       fsleep(10 * USEC_PER_MSEC);
+
+       return 0;
+}
+
 static int ad9467_probe(struct spi_device *spi)
 {
        const struct ad9467_chip_info *info;
@@ -386,9 +451,7 @@ static int ad9467_probe(struct spi_device *spi)
        unsigned int id;
        int ret;
 
-       info = of_device_get_match_data(&spi->dev);
-       if (!info)
-               info = (void *)spi_get_device_id(spi)->driver_data;
+       info = spi_get_device_match_data(spi);
        if (!info)
                return -ENODEV;
 
@@ -408,21 +471,16 @@ static int ad9467_probe(struct spi_device *spi)
        if (IS_ERR(st->pwrdown_gpio))
                return PTR_ERR(st->pwrdown_gpio);
 
-       st->reset_gpio = devm_gpiod_get_optional(&spi->dev, "reset",
-                                                GPIOD_OUT_LOW);
-       if (IS_ERR(st->reset_gpio))
-               return PTR_ERR(st->reset_gpio);
-
-       if (st->reset_gpio) {
-               udelay(1);
-               ret = gpiod_direction_output(st->reset_gpio, 1);
-               if (ret)
-                       return ret;
-               mdelay(10);
-       }
+       ret = ad9467_reset(&spi->dev);
+       if (ret)
+               return ret;
 
        conv->chip_info = &info->axi_adc_info;
 
+       ret = ad9467_scale_fill(conv);
+       if (ret)
+               return ret;
+
        id = ad9467_spi_read(spi, AN877_ADC_REG_CHIP_ID);
        if (id != conv->chip_info->id) {
                dev_err(&spi->dev, "Mismatch CHIP_ID, got 0x%X, expected 0x%X\n",
@@ -433,6 +491,7 @@ static int ad9467_probe(struct spi_device *spi)
        conv->reg_access = ad9467_reg_access;
        conv->write_raw = ad9467_write_raw;
        conv->read_raw = ad9467_read_raw;
+       conv->read_avail = ad9467_read_avail;
        conv->preenable_setup = ad9467_preenable_setup;
 
        st->output_mode = info->default_output_mode |
@@ -442,17 +501,17 @@ static int ad9467_probe(struct spi_device *spi)
 }
 
 static const struct of_device_id ad9467_of_match[] = {
-       { .compatible = "adi,ad9265", .data = &ad9467_chip_tbl[ID_AD9265], },
-       { .compatible = "adi,ad9434", .data = &ad9467_chip_tbl[ID_AD9434], },
-       { .compatible = "adi,ad9467", .data = &ad9467_chip_tbl[ID_AD9467], },
+       { .compatible = "adi,ad9265", .data = &ad9265_chip_tbl, },
+       { .compatible = "adi,ad9434", .data = &ad9434_chip_tbl, },
+       { .compatible = "adi,ad9467", .data = &ad9467_chip_tbl, },
        {}
 };
 MODULE_DEVICE_TABLE(of, ad9467_of_match);
 
 static const struct spi_device_id ad9467_ids[] = {
-       { "ad9265", (kernel_ulong_t)&ad9467_chip_tbl[ID_AD9265] },
-       { "ad9434", (kernel_ulong_t)&ad9467_chip_tbl[ID_AD9434] },
-       { "ad9467", (kernel_ulong_t)&ad9467_chip_tbl[ID_AD9467] },
+       { "ad9265", (kernel_ulong_t)&ad9265_chip_tbl },
+       { "ad9434", (kernel_ulong_t)&ad9434_chip_tbl },
+       { "ad9467", (kernel_ulong_t)&ad9467_chip_tbl },
        {}
 };
 MODULE_DEVICE_TABLE(spi, ad9467_ids);
index aff0532a974aa0f5601aceb3b9435a15d0507ff6..c247ff1541d2863923727b435af19dd48bb07bb4 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/property.h>
+#include <linux/regmap.h>
 #include <linux/slab.h>
 
 #include <linux/iio/iio.h>
@@ -62,7 +63,7 @@ struct adi_axi_adc_state {
        struct mutex                            lock;
 
        struct adi_axi_adc_client               *client;
-       void __iomem                            *regs;
+       struct regmap                           *regmap;
 };
 
 struct adi_axi_adc_client {
@@ -90,19 +91,6 @@ void *adi_axi_adc_conv_priv(struct adi_axi_adc_conv *conv)
 }
 EXPORT_SYMBOL_NS_GPL(adi_axi_adc_conv_priv, IIO_ADI_AXI);
 
-static void adi_axi_adc_write(struct adi_axi_adc_state *st,
-                             unsigned int reg,
-                             unsigned int val)
-{
-       iowrite32(val, st->regs + reg);
-}
-
-static unsigned int adi_axi_adc_read(struct adi_axi_adc_state *st,
-                                    unsigned int reg)
-{
-       return ioread32(st->regs + reg);
-}
-
 static int adi_axi_adc_config_dma_buffer(struct device *dev,
                                         struct iio_dev *indio_dev)
 {
@@ -144,22 +132,39 @@ static int adi_axi_adc_write_raw(struct iio_dev *indio_dev,
        return conv->write_raw(conv, chan, val, val2, mask);
 }
 
+static int adi_axi_adc_read_avail(struct iio_dev *indio_dev,
+                                 struct iio_chan_spec const *chan,
+                                 const int **vals, int *type, int *length,
+                                 long mask)
+{
+       struct adi_axi_adc_state *st = iio_priv(indio_dev);
+       struct adi_axi_adc_conv *conv = &st->client->conv;
+
+       if (!conv->read_avail)
+               return -EOPNOTSUPP;
+
+       return conv->read_avail(conv, chan, vals, type, length, mask);
+}
+
 static int adi_axi_adc_update_scan_mode(struct iio_dev *indio_dev,
                                        const unsigned long *scan_mask)
 {
        struct adi_axi_adc_state *st = iio_priv(indio_dev);
        struct adi_axi_adc_conv *conv = &st->client->conv;
-       unsigned int i, ctrl;
+       unsigned int i;
+       int ret;
 
        for (i = 0; i < conv->chip_info->num_channels; i++) {
-               ctrl = adi_axi_adc_read(st, ADI_AXI_REG_CHAN_CTRL(i));
-
                if (test_bit(i, scan_mask))
-                       ctrl |= ADI_AXI_REG_CHAN_CTRL_ENABLE;
+                       ret = regmap_set_bits(st->regmap,
+                                             ADI_AXI_REG_CHAN_CTRL(i),
+                                             ADI_AXI_REG_CHAN_CTRL_ENABLE);
                else
-                       ctrl &= ~ADI_AXI_REG_CHAN_CTRL_ENABLE;
-
-               adi_axi_adc_write(st, ADI_AXI_REG_CHAN_CTRL(i), ctrl);
+                       ret = regmap_clear_bits(st->regmap,
+                                               ADI_AXI_REG_CHAN_CTRL(i),
+                                               ADI_AXI_REG_CHAN_CTRL_ENABLE);
+               if (ret)
+                       return ret;
        }
 
        return 0;
@@ -228,69 +233,11 @@ struct adi_axi_adc_conv *devm_adi_axi_adc_conv_register(struct device *dev,
 }
 EXPORT_SYMBOL_NS_GPL(devm_adi_axi_adc_conv_register, IIO_ADI_AXI);
 
-static ssize_t in_voltage_scale_available_show(struct device *dev,
-                                              struct device_attribute *attr,
-                                              char *buf)
-{
-       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-       struct adi_axi_adc_state *st = iio_priv(indio_dev);
-       struct adi_axi_adc_conv *conv = &st->client->conv;
-       size_t len = 0;
-       int i;
-
-       for (i = 0; i < conv->chip_info->num_scales; i++) {
-               const unsigned int *s = conv->chip_info->scale_table[i];
-
-               len += scnprintf(buf + len, PAGE_SIZE - len,
-                                "%u.%06u ", s[0], s[1]);
-       }
-       buf[len - 1] = '\n';
-
-       return len;
-}
-
-static IIO_DEVICE_ATTR_RO(in_voltage_scale_available, 0);
-
-enum {
-       ADI_AXI_ATTR_SCALE_AVAIL,
-};
-
-#define ADI_AXI_ATTR(_en_, _file_)                     \
-       [ADI_AXI_ATTR_##_en_] = &iio_dev_attr_##_file_.dev_attr.attr
-
-static struct attribute *adi_axi_adc_attributes[] = {
-       ADI_AXI_ATTR(SCALE_AVAIL, in_voltage_scale_available),
-       NULL
-};
-
-static umode_t axi_adc_attr_is_visible(struct kobject *kobj,
-                                      struct attribute *attr, int n)
-{
-       struct device *dev = kobj_to_dev(kobj);
-       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-       struct adi_axi_adc_state *st = iio_priv(indio_dev);
-       struct adi_axi_adc_conv *conv = &st->client->conv;
-
-       switch (n) {
-       case ADI_AXI_ATTR_SCALE_AVAIL:
-               if (!conv->chip_info->num_scales)
-                       return 0;
-               return attr->mode;
-       default:
-               return attr->mode;
-       }
-}
-
-static const struct attribute_group adi_axi_adc_attribute_group = {
-       .attrs = adi_axi_adc_attributes,
-       .is_visible = axi_adc_attr_is_visible,
-};
-
 static const struct iio_info adi_axi_adc_info = {
        .read_raw = &adi_axi_adc_read_raw,
        .write_raw = &adi_axi_adc_write_raw,
-       .attrs = &adi_axi_adc_attribute_group,
        .update_scan_mode = &adi_axi_adc_update_scan_mode,
+       .read_avail = &adi_axi_adc_read_avail,
 };
 
 static const struct adi_axi_adc_core_info adi_axi_adc_10_0_a_info = {
@@ -354,21 +301,32 @@ static int adi_axi_adc_setup_channels(struct device *dev,
        }
 
        for (i = 0; i < conv->chip_info->num_channels; i++) {
-               adi_axi_adc_write(st, ADI_AXI_REG_CHAN_CTRL(i),
-                                 ADI_AXI_REG_CHAN_CTRL_DEFAULTS);
+               ret = regmap_write(st->regmap, ADI_AXI_REG_CHAN_CTRL(i),
+                                  ADI_AXI_REG_CHAN_CTRL_DEFAULTS);
+               if (ret)
+                       return ret;
        }
 
        return 0;
 }
 
-static void axi_adc_reset(struct adi_axi_adc_state *st)
+static int axi_adc_reset(struct adi_axi_adc_state *st)
 {
-       adi_axi_adc_write(st, ADI_AXI_REG_RSTN, 0);
+       int ret;
+
+       ret = regmap_write(st->regmap, ADI_AXI_REG_RSTN, 0);
+       if (ret)
+               return ret;
+
        mdelay(10);
-       adi_axi_adc_write(st, ADI_AXI_REG_RSTN, ADI_AXI_REG_RSTN_MMCM_RSTN);
+       ret = regmap_write(st->regmap, ADI_AXI_REG_RSTN,
+                          ADI_AXI_REG_RSTN_MMCM_RSTN);
+       if (ret)
+               return ret;
+
        mdelay(10);
-       adi_axi_adc_write(st, ADI_AXI_REG_RSTN,
-                         ADI_AXI_REG_RSTN_RSTN | ADI_AXI_REG_RSTN_MMCM_RSTN);
+       return regmap_write(st->regmap, ADI_AXI_REG_RSTN,
+                           ADI_AXI_REG_RSTN_RSTN | ADI_AXI_REG_RSTN_MMCM_RSTN);
 }
 
 static void adi_axi_adc_cleanup(void *data)
@@ -379,12 +337,20 @@ static void adi_axi_adc_cleanup(void *data)
        module_put(cl->dev->driver->owner);
 }
 
+static const struct regmap_config axi_adc_regmap_config = {
+       .val_bits = 32,
+       .reg_bits = 32,
+       .reg_stride = 4,
+       .max_register = 0x0800,
+};
+
 static int adi_axi_adc_probe(struct platform_device *pdev)
 {
        struct adi_axi_adc_conv *conv;
        struct iio_dev *indio_dev;
        struct adi_axi_adc_client *cl;
        struct adi_axi_adc_state *st;
+       void __iomem *base;
        unsigned int ver;
        int ret;
 
@@ -405,15 +371,24 @@ static int adi_axi_adc_probe(struct platform_device *pdev)
        cl->state = st;
        mutex_init(&st->lock);
 
-       st->regs = devm_platform_ioremap_resource(pdev, 0);
-       if (IS_ERR(st->regs))
-               return PTR_ERR(st->regs);
+       base = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(base))
+               return PTR_ERR(base);
+
+       st->regmap = devm_regmap_init_mmio(&pdev->dev, base,
+                                          &axi_adc_regmap_config);
+       if (IS_ERR(st->regmap))
+               return PTR_ERR(st->regmap);
 
        conv = &st->client->conv;
 
-       axi_adc_reset(st);
+       ret = axi_adc_reset(st);
+       if (ret)
+               return ret;
 
-       ver = adi_axi_adc_read(st, ADI_AXI_REG_VERSION);
+       ret = regmap_read(st->regmap, ADI_AXI_REG_VERSION, &ver);
+       if (ret)
+               return ret;
 
        if (cl->info->version > ver) {
                dev_err(&pdev->dev,
diff --git a/drivers/iio/adc/max34408.c b/drivers/iio/adc/max34408.c
new file mode 100644 (file)
index 0000000..6c2ea2b
--- /dev/null
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IIO driver for Maxim MAX34409/34408 ADC, 4-Channels/2-Channels, 8bits, I2C
+ *
+ * Datasheet: https://www.analog.com/media/en/technical-documentation/data-sheets/MAX34408-MAX34409.pdf
+ *
+ * TODO: ALERT interrupt, Overcurrent delay, Shutdown delay
+ */
+
+#include <linux/bitfield.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/types.h>
+
+#define MAX34408_STATUS_REG            0x0
+#define MAX34408_CONTROL_REG           0x1
+#define MAX34408_OCDELAY_REG           0x2
+#define MAX34408_SDDELAY_REG           0x3
+
+#define MAX34408_ADC1_REG              0x4
+#define MAX34408_ADC2_REG              0x5
+/* ADC3 & ADC4 always returns 0x0 on 34408 */
+#define MAX34409_ADC3_REG              0x6
+#define MAX34409_ADC4_REG              0x7
+
+#define MAX34408_OCT1_REG              0x8
+#define MAX34408_OCT2_REG              0x9
+#define MAX34409_OCT3_REG              0xA
+#define MAX34409_OCT4_REG              0xB
+
+#define MAX34408_DID_REG               0xC
+#define MAX34408_DCYY_REG              0xD
+#define MAX34408_DCWW_REG              0xE
+
+/* Bit masks for status register */
+#define MAX34408_STATUS_OC_MSK         GENMASK(1, 0)
+#define MAX34409_STATUS_OC_MSK         GENMASK(3, 0)
+#define MAX34408_STATUS_SHTDN          BIT(4)
+#define MAX34408_STATUS_ENA            BIT(5)
+
+/* Bit masks for control register */
+#define MAX34408_CONTROL_AVG0          BIT(0)
+#define MAX34408_CONTROL_AVG1          BIT(1)
+#define MAX34408_CONTROL_AVG2          BIT(2)
+#define MAX34408_CONTROL_ALERT         BIT(3)
+
+#define MAX34408_DEFAULT_AVG           0x4
+
+/* Bit masks for over current delay */
+#define MAX34408_OCDELAY_OCD_MSK       GENMASK(6, 0)
+#define MAX34408_OCDELAY_RESET         BIT(7)
+
+/* Bit masks for shutdown delay */
+#define MAX34408_SDDELAY_SHD_MSK       GENMASK(6, 0)
+#define MAX34408_SDDELAY_RESET         BIT(7)
+
+#define MAX34408_DEFAULT_RSENSE                1000
+
+/**
+ * struct max34408_data - max34408/max34409 specific data.
+ * @regmap:    device register map.
+ * @dev:       max34408 device.
+ * @lock:      lock for protecting access to device hardware registers, mostly
+ *             for read modify write cycles for control registers.
+ * @input_rsense:      Rsense values in uOhm, will be overwritten by
+ *                     values from channel nodes.
+ */
+struct max34408_data {
+       struct regmap *regmap;
+       struct device *dev;
+       struct mutex lock;
+       u32 input_rsense[4];
+};
+
+static const struct regmap_config max34408_regmap_config = {
+       .reg_bits       = 8,
+       .val_bits       = 8,
+       .max_register   = MAX34408_DCWW_REG,
+};
+
+struct max34408_adc_model_data {
+       const char *model_name;
+       const struct iio_chan_spec *channels;
+       const int num_channels;
+};
+
+#define MAX34008_CHANNEL(_index, _address)                     \
+       {                                                       \
+               .type = IIO_CURRENT,                            \
+               .info_mask_separate     = BIT(IIO_CHAN_INFO_RAW) | \
+                                         BIT(IIO_CHAN_INFO_SCALE) | \
+                                         BIT(IIO_CHAN_INFO_OFFSET), \
+               .channel = (_index),                            \
+               .address = (_address),                          \
+               .indexed = 1,                                   \
+       }
+
+static const struct iio_chan_spec max34408_channels[] = {
+       MAX34008_CHANNEL(0, MAX34408_ADC1_REG),
+       MAX34008_CHANNEL(1, MAX34408_ADC2_REG),
+};
+
+static const struct iio_chan_spec max34409_channels[] = {
+       MAX34008_CHANNEL(0, MAX34408_ADC1_REG),
+       MAX34008_CHANNEL(1, MAX34408_ADC2_REG),
+       MAX34008_CHANNEL(2, MAX34409_ADC3_REG),
+       MAX34008_CHANNEL(3, MAX34409_ADC4_REG),
+};
+
+static int max34408_read_adc_avg(struct max34408_data *max34408,
+                                const struct iio_chan_spec *chan, int *val)
+{
+       unsigned int ctrl;
+       int rc;
+
+       guard(mutex)(&max34408->lock);
+       rc = regmap_read(max34408->regmap, MAX34408_CONTROL_REG, (u32 *)&ctrl);
+       if (rc)
+               return rc;
+
+       /* set averaging (0b100) default values*/
+       rc = regmap_write(max34408->regmap, MAX34408_CONTROL_REG,
+                         MAX34408_DEFAULT_AVG);
+       if (rc) {
+               dev_err(max34408->dev,
+                       "Error (%d) writing control register\n", rc);
+               return rc;
+       }
+
+       rc = regmap_read(max34408->regmap, chan->address, val);
+       if (rc)
+               return rc;
+
+       /* back to old values */
+       rc = regmap_write(max34408->regmap, MAX34408_CONTROL_REG, ctrl);
+       if (rc)
+               dev_err(max34408->dev,
+                       "Error (%d) writing control register\n", rc);
+
+       return rc;
+}
+
+static int max34408_read_raw(struct iio_dev *indio_dev,
+                            struct iio_chan_spec const *chan,
+                            int *val, int *val2, long mask)
+{
+       struct max34408_data *max34408 = iio_priv(indio_dev);
+       int rc;
+
+       switch (mask) {
+       case IIO_CHAN_INFO_RAW:
+               rc = max34408_read_adc_avg(max34408, chan, val);
+               if (rc)
+                       return rc;
+               return IIO_VAL_INT;
+       case IIO_CHAN_INFO_SCALE:
+               /*
+                * calcluate current for 8bit ADC with Rsense
+                * value.
+                * 10 mV * 1000 / Rsense uOhm = max current
+                * (max current * adc val * 1000) / (2^8 - 1) mA
+                */
+               *val = 10000 / max34408->input_rsense[chan->channel];
+               *val2 = 8;
+               return IIO_VAL_FRACTIONAL_LOG2;
+       default:
+               return -EINVAL;
+       }
+}
+
+static const struct iio_info max34408_info = {
+       .read_raw       = max34408_read_raw,
+};
+
+static const struct max34408_adc_model_data max34408_model_data = {
+       .model_name = "max34408",
+       .channels = max34408_channels,
+       .num_channels = 2,
+};
+
+static const struct max34408_adc_model_data max34409_model_data = {
+       .model_name = "max34409",
+       .channels = max34409_channels,
+       .num_channels = 4,
+};
+
+static int max34408_probe(struct i2c_client *client)
+{
+       const struct max34408_adc_model_data *model_data;
+       struct device *dev = &client->dev;
+       struct max34408_data *max34408;
+       struct fwnode_handle *node;
+       struct iio_dev *indio_dev;
+       struct regmap *regmap;
+       int rc, i = 0;
+
+       model_data = i2c_get_match_data(client);
+       if (!model_data)
+               return -EINVAL;
+
+       regmap = devm_regmap_init_i2c(client, &max34408_regmap_config);
+       if (IS_ERR(regmap)) {
+               dev_err_probe(dev, PTR_ERR(regmap),
+                             "regmap_init failed\n");
+               return PTR_ERR(regmap);
+       }
+
+       indio_dev = devm_iio_device_alloc(dev, sizeof(*max34408));
+       if (!indio_dev)
+               return -ENOMEM;
+
+       max34408 = iio_priv(indio_dev);
+       max34408->regmap = regmap;
+       max34408->dev = dev;
+       mutex_init(&max34408->lock);
+
+       device_for_each_child_node(dev, node) {
+               fwnode_property_read_u32(node, "maxim,rsense-val-micro-ohms",
+                                        &max34408->input_rsense[i]);
+               i++;
+       }
+
+       /* disable ALERT and averaging */
+       rc = regmap_write(max34408->regmap, MAX34408_CONTROL_REG, 0x0);
+       if (rc)
+               return rc;
+
+       indio_dev->channels = model_data->channels;
+       indio_dev->num_channels = model_data->num_channels;
+       indio_dev->name = model_data->model_name;
+
+       indio_dev->info = &max34408_info;
+       indio_dev->modes = INDIO_DIRECT_MODE;
+
+       return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct of_device_id max34408_of_match[] = {
+       {
+               .compatible = "maxim,max34408",
+               .data = &max34408_model_data,
+       },
+       {
+               .compatible = "maxim,max34409",
+               .data = &max34409_model_data,
+       },
+       {}
+};
+MODULE_DEVICE_TABLE(of, max34408_of_match);
+
+static const struct i2c_device_id max34408_id[] = {
+       { "max34408", (kernel_ulong_t)&max34408_model_data },
+       { "max34409", (kernel_ulong_t)&max34409_model_data },
+       {}
+};
+MODULE_DEVICE_TABLE(i2c, max34408_id);
+
+static struct i2c_driver max34408_driver = {
+       .driver = {
+               .name   = "max34408",
+               .of_match_table = max34408_of_match,
+       },
+       .probe = max34408_probe,
+       .id_table = max34408_id,
+};
+module_i2c_driver(max34408_driver);
+
+MODULE_AUTHOR("Ivan Mikhaylov <fr0st61te@gmail.com>");
+MODULE_DESCRIPTION("Maxim MAX34408/34409 ADC driver");
+MODULE_LICENSE("GPL");
index d864558bc087f083384a0414001540f58b5a92ab..7a32e7a1be9d0930a54d10329626075e22d46ef6 100644 (file)
@@ -7,6 +7,7 @@
  */
 #include <linux/bitfield.h>
 #include <linux/bits.h>
+#include <linux/cleanup.h>
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/err.h>
@@ -316,47 +317,37 @@ static int mcp3911_read_raw(struct iio_dev *indio_dev,
                            int *val2, long mask)
 {
        struct mcp3911 *adc = iio_priv(indio_dev);
-       int ret = -EINVAL;
+       int ret;
 
-       mutex_lock(&adc->lock);
+       guard(mutex)(&adc->lock);
        switch (mask) {
        case IIO_CHAN_INFO_RAW:
                ret = mcp3911_read(adc,
                                   MCP3911_CHANNEL(channel->channel), val, 3);
                if (ret)
-                       goto out;
+                       return ret;
 
                *val = sign_extend32(*val, 23);
-
-               ret = IIO_VAL_INT;
-               break;
-
+               return IIO_VAL_INT;
        case IIO_CHAN_INFO_OFFSET:
-
                ret = adc->chip->get_offset(adc, channel->channel, val);
                if (ret)
-                       goto out;
+                       return ret;
 
-               ret = IIO_VAL_INT;
-               break;
+               return IIO_VAL_INT;
        case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
                ret = adc->chip->get_osr(adc, val);
                if (ret)
-                       goto out;
-
-               ret = IIO_VAL_INT;
-               break;
+                       return ret;
 
+               return IIO_VAL_INT;
        case IIO_CHAN_INFO_SCALE:
                *val = mcp3911_scale_table[ilog2(adc->gain[channel->channel])][0];
                *val2 = mcp3911_scale_table[ilog2(adc->gain[channel->channel])][1];
-               ret = IIO_VAL_INT_PLUS_NANO;
-               break;
+               return IIO_VAL_INT_PLUS_NANO;
+       default:
+               return -EINVAL;
        }
-
-out:
-       mutex_unlock(&adc->lock);
-       return ret;
 }
 
 static int mcp3911_write_raw(struct iio_dev *indio_dev,
@@ -364,9 +355,8 @@ static int mcp3911_write_raw(struct iio_dev *indio_dev,
                             int val2, long mask)
 {
        struct mcp3911 *adc = iio_priv(indio_dev);
-       int ret = -EINVAL;
 
-       mutex_lock(&adc->lock);
+       guard(mutex)(&adc->lock);
        switch (mask) {
        case IIO_CHAN_INFO_SCALE:
                for (int i = 0; i < MCP3911_NUM_SCALES; i++) {
@@ -374,32 +364,25 @@ static int mcp3911_write_raw(struct iio_dev *indio_dev,
                            val2 == mcp3911_scale_table[i][1]) {
 
                                adc->gain[channel->channel] = BIT(i);
-                               ret = adc->chip->set_scale(adc, channel->channel, i);
+                               return adc->chip->set_scale(adc, channel->channel, i);
                        }
                }
-               break;
+               return -EINVAL;
        case IIO_CHAN_INFO_OFFSET:
-               if (val2 != 0) {
-                       ret = -EINVAL;
-                       goto out;
-               }
-
-               ret = adc->chip->set_offset(adc, channel->channel, val);
-               break;
+               if (val2 != 0)
+                       return -EINVAL;
 
+               return adc->chip->set_offset(adc, channel->channel, val);
        case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
                for (int i = 0; i < ARRAY_SIZE(mcp3911_osr_table); i++) {
                        if (val == mcp3911_osr_table[i]) {
-                               ret = adc->chip->set_osr(adc, i);
-                               break;
+                               return adc->chip->set_osr(adc, i);
                        }
                }
-               break;
+               return -EINVAL;
+       default:
+               return -EINVAL;
        }
-
-out:
-       mutex_unlock(&adc->lock);
-       return ret;
 }
 
 static int mcp3911_calc_scale_table(struct mcp3911 *adc)
@@ -532,7 +515,7 @@ static irqreturn_t mcp3911_trigger_handler(int irq, void *p)
        int i = 0;
        int ret;
 
-       mutex_lock(&adc->lock);
+       guard(mutex)(&adc->lock);
        adc->tx_buf = MCP3911_REG_READ(MCP3911_CHANNEL(0), adc->dev_addr);
        ret = spi_sync_transfer(adc->spi, xfer, ARRAY_SIZE(xfer));
        if (ret < 0) {
@@ -549,7 +532,6 @@ static irqreturn_t mcp3911_trigger_handler(int irq, void *p)
        iio_push_to_buffers_with_timestamp(indio_dev, &adc->scan,
                                           iio_get_time_ns(indio_dev));
 out:
-       mutex_unlock(&adc->lock);
        iio_trigger_notify_done(indio_dev->trig);
 
        return IRQ_HANDLED;
index e87d35d50a95cdfbe4a2a78d2621fc3eaea04d9f..ed4d72922696197b8a160a42fdc6c72db1b6d696 100644 (file)
@@ -5,6 +5,7 @@
  * Copyright 2020 Analog Devices Inc.
  */
 
+#include <linux/bitops.h>
 #include <linux/device.h>
 #include <linux/err.h>
 #include <linux/gpio/consumer.h>
@@ -22,6 +23,7 @@
 enum hmc425a_type {
        ID_HMC425A,
        ID_HMC540S,
+       ID_ADRF5740
 };
 
 struct hmc425a_chip_info {
@@ -74,6 +76,10 @@ static int hmc425a_read_raw(struct iio_dev *indio_dev,
                case ID_HMC540S:
                        gain = ~code * -1000;
                        break;
+               case ID_ADRF5740:
+                       code = code & BIT(3) ? code & ~BIT(2) : code;
+                       gain = code * -2000;
+                       break;
                }
 
                *val = gain / 1000;
@@ -113,6 +119,10 @@ static int hmc425a_write_raw(struct iio_dev *indio_dev,
        case ID_HMC540S:
                code = ~((abs(gain) / 1000) & 0xF);
                break;
+       case ID_ADRF5740:
+               code = (abs(gain) / 2000) & 0xF;
+               code = code & BIT(3) ? code | BIT(2) : code;
+               break;
        }
 
        mutex_lock(&st->lock);
@@ -165,6 +175,7 @@ static const struct iio_chan_spec hmc425a_channels[] = {
 static const struct of_device_id hmc425a_of_match[] = {
        { .compatible = "adi,hmc425a", .data = (void *)ID_HMC425A },
        { .compatible = "adi,hmc540s", .data = (void *)ID_HMC540S },
+       { .compatible = "adi,adrf5740", .data = (void *)ID_ADRF5740 },
        {},
 };
 MODULE_DEVICE_TABLE(of, hmc425a_of_match);
@@ -188,6 +199,15 @@ static struct hmc425a_chip_info hmc425a_chip_info_tbl[] = {
                .gain_max = 0,
                .default_gain = -0x10, /* set default gain -15.0db*/
        },
+       [ID_ADRF5740] = {
+               .name = "adrf5740",
+               .channels = hmc425a_channels,
+               .num_channels = ARRAY_SIZE(hmc425a_channels),
+               .num_gpios = 4,
+               .gain_min = -22000,
+               .gain_max = 0,
+               .default_gain = 0xF, /* set default gain -22.0db*/
+       },
 };
 
 static int hmc425a_probe(struct platform_device *pdev)
@@ -229,6 +249,9 @@ static int hmc425a_probe(struct platform_device *pdev)
        indio_dev->info = &hmc425a_info;
        indio_dev->modes = INDIO_DIRECT_MODE;
 
+       /* Set default gain */
+       hmc425a_write(indio_dev, st->gain);
+
        return devm_iio_device_register(&pdev->dev, indio_dev);
 }
 
index d348af8b97050141bd239a6f714d854713c47d64..5610ba67925efc8cc6d9c3c91bf17c9770375eea 100644 (file)
@@ -179,7 +179,7 @@ static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
        }
 
        block->size = size;
-       block->state = IIO_BLOCK_STATE_DEQUEUED;
+       block->state = IIO_BLOCK_STATE_DONE;
        block->queue = queue;
        INIT_LIST_HEAD(&block->head);
        kref_init(&block->kref);
@@ -191,16 +191,8 @@ static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
 
 static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
 {
-       struct iio_dma_buffer_queue *queue = block->queue;
-
-       /*
-        * The buffer has already been freed by the application, just drop the
-        * reference.
-        */
-       if (block->state != IIO_BLOCK_STATE_DEAD) {
+       if (block->state != IIO_BLOCK_STATE_DEAD)
                block->state = IIO_BLOCK_STATE_DONE;
-               list_add_tail(&block->head, &queue->outgoing);
-       }
 }
 
 /**
@@ -261,7 +253,6 @@ static bool iio_dma_block_reusable(struct iio_dma_buffer_block *block)
         * not support abort and has not given back the block yet.
         */
        switch (block->state) {
-       case IIO_BLOCK_STATE_DEQUEUED:
        case IIO_BLOCK_STATE_QUEUED:
        case IIO_BLOCK_STATE_DONE:
                return true;
@@ -317,7 +308,6 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer)
         * dead. This means we can reset the lists without having to fear
         * corrution.
         */
-       INIT_LIST_HEAD(&queue->outgoing);
        spin_unlock_irq(&queue->list_lock);
 
        INIT_LIST_HEAD(&queue->incoming);
@@ -356,6 +346,29 @@ out_unlock:
 }
 EXPORT_SYMBOL_GPL(iio_dma_buffer_request_update);
 
+static void iio_dma_buffer_fileio_free(struct iio_dma_buffer_queue *queue)
+{
+       unsigned int i;
+
+       spin_lock_irq(&queue->list_lock);
+       for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
+               if (!queue->fileio.blocks[i])
+                       continue;
+               queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD;
+       }
+       spin_unlock_irq(&queue->list_lock);
+
+       INIT_LIST_HEAD(&queue->incoming);
+
+       for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
+               if (!queue->fileio.blocks[i])
+                       continue;
+               iio_buffer_block_put(queue->fileio.blocks[i]);
+               queue->fileio.blocks[i] = NULL;
+       }
+       queue->fileio.active_block = NULL;
+}
+
 static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue,
        struct iio_dma_buffer_block *block)
 {
@@ -456,14 +469,20 @@ static struct iio_dma_buffer_block *iio_dma_buffer_dequeue(
        struct iio_dma_buffer_queue *queue)
 {
        struct iio_dma_buffer_block *block;
+       unsigned int idx;
 
        spin_lock_irq(&queue->list_lock);
-       block = list_first_entry_or_null(&queue->outgoing, struct
-               iio_dma_buffer_block, head);
-       if (block != NULL) {
-               list_del(&block->head);
-               block->state = IIO_BLOCK_STATE_DEQUEUED;
+
+       idx = queue->fileio.next_dequeue;
+       block = queue->fileio.blocks[idx];
+
+       if (block->state == IIO_BLOCK_STATE_DONE) {
+               idx = (idx + 1) % ARRAY_SIZE(queue->fileio.blocks);
+               queue->fileio.next_dequeue = idx;
+       } else {
+               block = NULL;
        }
+
        spin_unlock_irq(&queue->list_lock);
 
        return block;
@@ -539,6 +558,7 @@ size_t iio_dma_buffer_data_available(struct iio_buffer *buf)
        struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf);
        struct iio_dma_buffer_block *block;
        size_t data_available = 0;
+       unsigned int i;
 
        /*
         * For counting the available bytes we'll use the size of the block not
@@ -552,8 +572,15 @@ size_t iio_dma_buffer_data_available(struct iio_buffer *buf)
                data_available += queue->fileio.active_block->size;
 
        spin_lock_irq(&queue->list_lock);
-       list_for_each_entry(block, &queue->outgoing, head)
-               data_available += block->size;
+
+       for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
+               block = queue->fileio.blocks[i];
+
+               if (block != queue->fileio.active_block
+                   && block->state == IIO_BLOCK_STATE_DONE)
+                       data_available += block->size;
+       }
+
        spin_unlock_irq(&queue->list_lock);
        mutex_unlock(&queue->lock);
 
@@ -617,7 +644,6 @@ int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
        queue->ops = ops;
 
        INIT_LIST_HEAD(&queue->incoming);
-       INIT_LIST_HEAD(&queue->outgoing);
 
        mutex_init(&queue->lock);
        spin_lock_init(&queue->list_lock);
@@ -635,28 +661,9 @@ EXPORT_SYMBOL_GPL(iio_dma_buffer_init);
  */
 void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue)
 {
-       unsigned int i;
-
        mutex_lock(&queue->lock);
 
-       spin_lock_irq(&queue->list_lock);
-       for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
-               if (!queue->fileio.blocks[i])
-                       continue;
-               queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD;
-       }
-       INIT_LIST_HEAD(&queue->outgoing);
-       spin_unlock_irq(&queue->list_lock);
-
-       INIT_LIST_HEAD(&queue->incoming);
-
-       for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
-               if (!queue->fileio.blocks[i])
-                       continue;
-               iio_buffer_block_put(queue->fileio.blocks[i]);
-               queue->fileio.blocks[i] = NULL;
-       }
-       queue->fileio.active_block = NULL;
+       iio_dma_buffer_fileio_free(queue);
        queue->ops = NULL;
 
        mutex_unlock(&queue->lock);
index c30657e10ee17a400cab7e6e1a8e5e3244ec977c..02649ab81b3cef9d78935a1433f54b60aa6f77ea 100644 (file)
@@ -5,6 +5,17 @@
 
 menu "Chemical Sensors"
 
+config AOSONG_AGS02MA
+       tristate "Aosong AGS02MA TVOC sensor driver"
+       depends on I2C
+       select CRC8
+       help
+         Say Y here to build support for Aosong AGS02MA TVOC (Total Volatile
+         Organic Compounds) sensor.
+
+         To compile this driver as module, choose M here: the module will be
+         called ags02ma.
+
 config ATLAS_PH_SENSOR
        tristate "Atlas Scientific OEM SM sensors"
        depends on I2C
index a11e777a7a00521d7eb8806ae8745ad7b12c651b..2f3dee8bb779600d874711f5c33392de5ec196ba 100644 (file)
@@ -4,6 +4,7 @@
 #
 
 # When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_AOSONG_AGS02MA)   += ags02ma.o
 obj-$(CONFIG_ATLAS_PH_SENSOR)  += atlas-sensor.o
 obj-$(CONFIG_ATLAS_EZO_SENSOR) += atlas-ezo-sensor.o
 obj-$(CONFIG_BME680) += bme680_core.o
diff --git a/drivers/iio/chemical/ags02ma.c b/drivers/iio/chemical/ags02ma.c
new file mode 100644 (file)
index 0000000..8fcd809
--- /dev/null
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2023 Anshul Dalal <anshulusr@gmail.com>
+ *
+ * Driver for Aosong AGS02MA
+ *
+ * Datasheet:
+ *   https://asairsensors.com/wp-content/uploads/2021/09/AGS02MA.pdf
+ * Product Page:
+ *   http://www.aosong.com/m/en/products-33.html
+ */
+
+#include <linux/crc8.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+
+#include <linux/iio/iio.h>
+
+#define AGS02MA_TVOC_READ_REG             0x00
+#define AGS02MA_VERSION_REG               0x11
+
+#define AGS02MA_VERSION_PROCESSING_DELAY   30
+#define AGS02MA_TVOC_READ_PROCESSING_DELAY 1500
+
+#define AGS02MA_CRC8_INIT                 0xff
+#define AGS02MA_CRC8_POLYNOMIAL                   0x31
+
+DECLARE_CRC8_TABLE(ags02ma_crc8_table);
+
+struct ags02ma_data {
+       struct i2c_client *client;
+};
+
+struct ags02ma_reading {
+       __be32 data;
+       u8 crc;
+} __packed;
+
+static int ags02ma_register_read(struct i2c_client *client, u8 reg, u16 delay,
+                                u32 *val)
+{
+       int ret;
+       u8 crc;
+       struct ags02ma_reading read_buffer;
+
+       ret = i2c_master_send(client, &reg, sizeof(reg));
+       if (ret < 0) {
+               dev_err(&client->dev,
+                       "Failed to send data to register 0x%x: %d", reg, ret);
+               return ret;
+       }
+
+       /* Processing Delay, Check Table 7.7 in the datasheet */
+       msleep_interruptible(delay);
+
+       ret = i2c_master_recv(client, (u8 *)&read_buffer, sizeof(read_buffer));
+       if (ret < 0) {
+               dev_err(&client->dev,
+                       "Failed to receive from register 0x%x: %d", reg, ret);
+               return ret;
+       }
+
+       crc = crc8(ags02ma_crc8_table, (u8 *)&read_buffer.data,
+                  sizeof(read_buffer.data), AGS02MA_CRC8_INIT);
+       if (crc != read_buffer.crc) {
+               dev_err(&client->dev, "CRC error\n");
+               return -EIO;
+       }
+
+       *val = be32_to_cpu(read_buffer.data);
+       return 0;
+}
+
+static int ags02ma_read_raw(struct iio_dev *iio_device,
+                           struct iio_chan_spec const *chan, int *val,
+                           int *val2, long mask)
+{
+       int ret;
+       struct ags02ma_data *data = iio_priv(iio_device);
+
+       switch (mask) {
+       case IIO_CHAN_INFO_RAW:
+               ret = ags02ma_register_read(data->client, AGS02MA_TVOC_READ_REG,
+                                           AGS02MA_TVOC_READ_PROCESSING_DELAY,
+                                           val);
+               if (ret < 0)
+                       return ret;
+               return IIO_VAL_INT;
+       case IIO_CHAN_INFO_SCALE:
+               /* The sensor reads data as ppb */
+               *val = 0;
+               *val2 = 100;
+               return IIO_VAL_INT_PLUS_NANO;
+       default:
+               return -EINVAL;
+       }
+}
+
+static const struct iio_info ags02ma_info = {
+       .read_raw = ags02ma_read_raw,
+};
+
+static const struct iio_chan_spec ags02ma_channel = {
+       .type = IIO_CONCENTRATION,
+       .channel2 = IIO_MOD_VOC,
+       .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+               BIT(IIO_CHAN_INFO_SCALE),
+};
+
+static int ags02ma_probe(struct i2c_client *client)
+{
+       int ret;
+       struct ags02ma_data *data;
+       struct iio_dev *indio_dev;
+       u32 version;
+
+       indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+       if (!indio_dev)
+               return -ENOMEM;
+
+       crc8_populate_msb(ags02ma_crc8_table, AGS02MA_CRC8_POLYNOMIAL);
+
+       ret = ags02ma_register_read(client, AGS02MA_VERSION_REG,
+                                   AGS02MA_VERSION_PROCESSING_DELAY, &version);
+       if (ret < 0)
+               return dev_err_probe(&client->dev, ret,
+                             "Failed to read device version\n");
+       dev_dbg(&client->dev, "Aosong AGS02MA, Version: 0x%x", version);
+
+       data = iio_priv(indio_dev);
+       data->client = client;
+       indio_dev->info = &ags02ma_info;
+       indio_dev->channels = &ags02ma_channel;
+       indio_dev->num_channels = 1;
+       indio_dev->name = "ags02ma";
+
+       return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct i2c_device_id ags02ma_id_table[] = {
+       { "ags02ma" },
+       { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, ags02ma_id_table);
+
+static const struct of_device_id ags02ma_of_table[] = {
+       { .compatible = "aosong,ags02ma" },
+       { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ags02ma_of_table);
+
+static struct i2c_driver ags02ma_driver = {
+       .driver = {
+               .name = "ags02ma",
+               .of_match_table = ags02ma_of_table,
+       },
+       .id_table = ags02ma_id_table,
+       .probe = ags02ma_probe,
+};
+module_i2c_driver(ags02ma_driver);
+
+MODULE_AUTHOR("Anshul Dalal <anshulusr@gmail.com>");
+MODULE_DESCRIPTION("Aosong AGS02MA TVOC Driver");
+MODULE_LICENSE("GPL");
index e9857d93b307e4a3989177b09d554a4894e3db70..b5cf15a515d251a3cb4d862cf7a11ba82596baa8 100644 (file)
@@ -211,13 +211,13 @@ static bool pms7003_frame_is_okay(struct pms7003_frame *frame)
        return checksum == pms7003_calc_checksum(frame);
 }
 
-static int pms7003_receive_buf(struct serdev_device *serdev,
-                              const unsigned char *buf, size_t size)
+static ssize_t pms7003_receive_buf(struct serdev_device *serdev, const u8 *buf,
+                                  size_t size)
 {
        struct iio_dev *indio_dev = serdev_device_get_drvdata(serdev);
        struct pms7003_state *state = iio_priv(indio_dev);
        struct pms7003_frame *frame = &state->frame;
-       int num;
+       size_t num;
 
        if (!frame->expected_length) {
                u16 magic;
index 3c519103d30b54fc664b2751ceb0b90754bc374f..a47654591e555fcf107e5c23180438b99921a15d 100644 (file)
@@ -174,13 +174,13 @@ static int scd30_serdev_command(struct scd30_state *state, enum scd30_cmd cmd, u
        return 0;
 }
 
-static int scd30_serdev_receive_buf(struct serdev_device *serdev,
-                                   const unsigned char *buf, size_t size)
+static ssize_t scd30_serdev_receive_buf(struct serdev_device *serdev,
+                                       const u8 *buf, size_t size)
 {
        struct iio_dev *indio_dev = serdev_device_get_drvdata(serdev);
        struct scd30_serdev_priv *priv;
        struct scd30_state *state;
-       int num;
+       size_t num;
 
        if (!indio_dev)
                return 0;
index 164f4b3e025c94f831cabface303bee6c1848e59..3afa89f8acc329c73cf39f22cec98251a32d6120 100644 (file)
@@ -74,8 +74,8 @@ static int sps30_serial_xfer(struct sps30_state *state, const unsigned char *buf
 }
 
 static const struct {
-       unsigned char byte;
-       unsigned char byte2;
+       u8 byte;
+       u8 byte2;
 } sps30_serial_bytes[] = {
        { 0x11, 0x31 },
        { 0x13, 0x33 },
@@ -83,7 +83,7 @@ static const struct {
        { 0x7d, 0x5d },
 };
 
-static int sps30_serial_put_byte(unsigned char *buf, unsigned char byte)
+static int sps30_serial_put_byte(u8 *buf, u8 byte)
 {
        int i;
 
@@ -102,7 +102,7 @@ static int sps30_serial_put_byte(unsigned char *buf, unsigned char byte)
        return 1;
 }
 
-static char sps30_serial_get_byte(bool escaped, unsigned char byte2)
+static u8 sps30_serial_get_byte(bool escaped, u8 byte2)
 {
        int i;
 
@@ -130,8 +130,8 @@ static unsigned char sps30_serial_calc_chksum(const unsigned char *buf, size_t n
        return ~chksum;
 }
 
-static int sps30_serial_prep_frame(unsigned char *buf, unsigned char cmd,
-                                  const unsigned char *arg, size_t arg_size)
+static int sps30_serial_prep_frame(u8 *buf, u8 cmd, const u8 *arg,
+                                  size_t arg_size)
 {
        unsigned char chksum;
        int num = 0;
@@ -210,14 +210,14 @@ static int sps30_serial_command(struct sps30_state *state, unsigned char cmd,
        return rsp_size;
 }
 
-static int sps30_serial_receive_buf(struct serdev_device *serdev,
-                                   const unsigned char *buf, size_t size)
+static ssize_t sps30_serial_receive_buf(struct serdev_device *serdev,
+                                       const u8 *buf, size_t size)
 {
        struct iio_dev *indio_dev = dev_get_drvdata(&serdev->dev);
        struct sps30_serial_priv *priv;
        struct sps30_state *state;
-       unsigned char byte;
        size_t i;
+       u8 byte;
 
        if (!indio_dev)
                return 0;
index 93b8be183de6b4bf1ead1a1fd5592c1751cc3c28..34eb40bb95291a47cedf95cb40bd90f6448b0edc 100644 (file)
@@ -400,6 +400,16 @@ config MCP4728
          To compile this driver as a module, choose M here: the module
          will be called mcp4728.
 
+config MCP4821
+       tristate "MCP4801/02/11/12/21/22 DAC driver"
+       depends on SPI
+       help
+         Say yes here to build the driver for the Microchip MCP4801
+         MCP4802, MCP4811, MCP4812, MCP4821 and MCP4822 DAC devices.
+
+         To compile this driver as a module, choose M here: the module
+         will be called mcp4821.
+
 config MCP4922
        tristate "MCP4902, MCP4912, MCP4922 DAC driver"
        depends on SPI
index 5b2bac900d5a75061ff3b55d587f78ba4d7ac775..55bf89739d14b99db72a0558ab8ad18473c9b77e 100644 (file)
@@ -42,6 +42,7 @@ obj-$(CONFIG_MAX5522) += max5522.o
 obj-$(CONFIG_MAX5821) += max5821.o
 obj-$(CONFIG_MCP4725) += mcp4725.o
 obj-$(CONFIG_MCP4728) += mcp4728.o
+obj-$(CONFIG_MCP4821) += mcp4821.o
 obj-$(CONFIG_MCP4922) += mcp4922.o
 obj-$(CONFIG_STM32_DAC_CORE) += stm32-dac-core.o
 obj-$(CONFIG_STM32_DAC) += stm32-dac.o
index a4167454da8188a8c5134b2a792aba41b0e3ddf6..75b549827e15a5d9aff5541ee098bdd3920c30b7 100644 (file)
@@ -345,6 +345,7 @@ static int ad5791_probe(struct spi_device *spi)
        struct iio_dev *indio_dev;
        struct ad5791_state *st;
        int ret, pos_voltage_uv = 0, neg_voltage_uv = 0;
+       bool use_rbuf_gain2;
 
        indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
        if (!indio_dev)
@@ -379,6 +380,12 @@ static int ad5791_probe(struct spi_device *spi)
        st->pwr_down = true;
        st->spi = spi;
 
+       if (pdata)
+               use_rbuf_gain2 = pdata->use_rbuf_gain2;
+       else
+               use_rbuf_gain2 = device_property_read_bool(&spi->dev,
+                                                          "adi,rbuf-gain2-en");
+
        if (!IS_ERR(st->reg_vss) && !IS_ERR(st->reg_vdd)) {
                st->vref_mv = (pos_voltage_uv + neg_voltage_uv) / 1000;
                st->vref_neg_mv = neg_voltage_uv / 1000;
@@ -398,7 +405,7 @@ static int ad5791_probe(struct spi_device *spi)
 
 
        st->ctrl = AD5761_CTRL_LINCOMP(st->chip_info->get_lin_comp(st->vref_mv))
-                 | ((pdata && pdata->use_rbuf_gain2) ? 0 : AD5791_CTRL_RBUF) |
+                 | (use_rbuf_gain2 ? 0 : AD5791_CTRL_RBUF) |
                  AD5791_CTRL_BIN2SC;
 
        ret = ad5791_spi_write(st, AD5791_ADDR_CTRL, st->ctrl |
diff --git a/drivers/iio/dac/mcp4821.c b/drivers/iio/dac/mcp4821.c
new file mode 100644 (file)
index 0000000..8a0480d
--- /dev/null
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2023 Anshul Dalal <anshulusr@gmail.com>
+ *
+ * Driver for Microchip MCP4801, MCP4802, MCP4811, MCP4812, MCP4821 and MCP4822
+ *
+ * Based on the work of:
+ *     Michael Welling (MCP4922 Driver)
+ *
+ * Datasheet:
+ *     MCP48x1: https://ww1.microchip.com/downloads/en/DeviceDoc/22244B.pdf
+ *     MCP48x2: https://ww1.microchip.com/downloads/en/DeviceDoc/20002249B.pdf
+ *
+ * TODO:
+ *     - Configurable gain
+ *     - Regulator control
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/spi/spi.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/types.h>
+
+#include <asm/unaligned.h>
+
+#define MCP4821_ACTIVE_MODE BIT(12)
+#define MCP4802_SECOND_CHAN BIT(15)
+
+/* DAC uses an internal Voltage reference of 4.096V at a gain of 2x */
+#define MCP4821_2X_GAIN_VREF_MV 4096
+
+enum mcp4821_supported_drvice_ids {
+       ID_MCP4801,
+       ID_MCP4802,
+       ID_MCP4811,
+       ID_MCP4812,
+       ID_MCP4821,
+       ID_MCP4822,
+};
+
+struct mcp4821_state {
+       struct spi_device *spi;
+       u16 dac_value[2];
+};
+
+struct mcp4821_chip_info {
+       const char *name;
+       int num_channels;
+       const struct iio_chan_spec channels[2];
+};
+
+#define MCP4821_CHAN(channel_id, resolution)                          \
+       {                                                             \
+               .type = IIO_VOLTAGE, .output = 1, .indexed = 1,       \
+               .channel = (channel_id),                              \
+               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),         \
+               .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+               .scan_type = {                                        \
+                       .realbits = (resolution),                     \
+                       .shift = 12 - (resolution),                   \
+               },                                                    \
+       }
+
+static const struct mcp4821_chip_info mcp4821_chip_info_table[6] = {
+       [ID_MCP4801] = {
+                       .name = "mcp4801",
+                       .num_channels = 1,
+                       .channels = {
+                               MCP4821_CHAN(0, 8),
+                       },
+       },
+       [ID_MCP4802] = {
+                       .name = "mcp4802",
+                       .num_channels = 2,
+                       .channels = {
+                               MCP4821_CHAN(0, 8),
+                               MCP4821_CHAN(1, 8),
+                       },
+       },
+       [ID_MCP4811] = {
+                       .name = "mcp4811",
+                       .num_channels = 1,
+                       .channels = {
+                               MCP4821_CHAN(0, 10),
+                       },
+       },
+       [ID_MCP4812] = {
+                       .name = "mcp4812",
+                       .num_channels = 2,
+                       .channels = {
+                               MCP4821_CHAN(0, 10),
+                               MCP4821_CHAN(1, 10),
+                       },
+       },
+       [ID_MCP4821] = {
+                       .name = "mcp4821",
+                       .num_channels = 1,
+                       .channels = {
+                               MCP4821_CHAN(0, 12),
+                       },
+       },
+       [ID_MCP4822] = {
+                       .name = "mcp4822",
+                       .num_channels = 2,
+                       .channels = {
+                               MCP4821_CHAN(0, 12),
+                               MCP4821_CHAN(1, 12),
+                       },
+       },
+};
+
+static int mcp4821_read_raw(struct iio_dev *indio_dev,
+                           struct iio_chan_spec const *chan, int *val,
+                           int *val2, long mask)
+{
+       struct mcp4821_state *state;
+
+       switch (mask) {
+       case IIO_CHAN_INFO_RAW:
+               state = iio_priv(indio_dev);
+               *val = state->dac_value[chan->channel];
+               return IIO_VAL_INT;
+       case IIO_CHAN_INFO_SCALE:
+               *val = MCP4821_2X_GAIN_VREF_MV;
+               *val2 = chan->scan_type.realbits;
+               return IIO_VAL_FRACTIONAL_LOG2;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int mcp4821_write_raw(struct iio_dev *indio_dev,
+                            struct iio_chan_spec const *chan, int val,
+                            int val2, long mask)
+{
+       struct mcp4821_state *state = iio_priv(indio_dev);
+       u16 write_val;
+       __be16 write_buffer;
+       int ret;
+
+       if (val2 != 0)
+               return -EINVAL;
+
+       if (val < 0 || val >= BIT(chan->scan_type.realbits))
+               return -EINVAL;
+
+       if (mask != IIO_CHAN_INFO_RAW)
+               return -EINVAL;
+
+       write_val = MCP4821_ACTIVE_MODE | val << chan->scan_type.shift;
+       if (chan->channel)
+               write_val |= MCP4802_SECOND_CHAN;
+
+       write_buffer = cpu_to_be16(write_val);
+       ret = spi_write(state->spi, &write_buffer, sizeof(write_buffer));
+       if (ret) {
+               dev_err(&state->spi->dev, "Failed to write to device: %d", ret);
+               return ret;
+       }
+
+       state->dac_value[chan->channel] = val;
+
+       return 0;
+}
+
+static const struct iio_info mcp4821_info = {
+       .read_raw = &mcp4821_read_raw,
+       .write_raw = &mcp4821_write_raw,
+};
+
+static int mcp4821_probe(struct spi_device *spi)
+{
+       struct iio_dev *indio_dev;
+       struct mcp4821_state *state;
+       const struct mcp4821_chip_info *info;
+
+       indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*state));
+       if (indio_dev == NULL)
+               return -ENOMEM;
+
+       state = iio_priv(indio_dev);
+       state->spi = spi;
+
+       info = spi_get_device_match_data(spi);
+       indio_dev->name = info->name;
+       indio_dev->info = &mcp4821_info;
+       indio_dev->modes = INDIO_DIRECT_MODE;
+       indio_dev->channels = info->channels;
+       indio_dev->num_channels = info->num_channels;
+
+       return devm_iio_device_register(&spi->dev, indio_dev);
+}
+
+#define MCP4821_COMPATIBLE(of_compatible, id)        \
+       {                                            \
+               .compatible = of_compatible,         \
+               .data = &mcp4821_chip_info_table[id] \
+       }
+
+static const struct of_device_id mcp4821_of_table[] = {
+       MCP4821_COMPATIBLE("microchip,mcp4801", ID_MCP4801),
+       MCP4821_COMPATIBLE("microchip,mcp4802", ID_MCP4802),
+       MCP4821_COMPATIBLE("microchip,mcp4811", ID_MCP4811),
+       MCP4821_COMPATIBLE("microchip,mcp4812", ID_MCP4812),
+       MCP4821_COMPATIBLE("microchip,mcp4821", ID_MCP4821),
+       MCP4821_COMPATIBLE("microchip,mcp4822", ID_MCP4822),
+       { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mcp4821_of_table);
+
+static const struct spi_device_id mcp4821_id_table[] = {
+       { "mcp4801", (kernel_ulong_t)&mcp4821_chip_info_table[ID_MCP4801]},
+       { "mcp4802", (kernel_ulong_t)&mcp4821_chip_info_table[ID_MCP4802]},
+       { "mcp4811", (kernel_ulong_t)&mcp4821_chip_info_table[ID_MCP4811]},
+       { "mcp4812", (kernel_ulong_t)&mcp4821_chip_info_table[ID_MCP4812]},
+       { "mcp4821", (kernel_ulong_t)&mcp4821_chip_info_table[ID_MCP4821]},
+       { "mcp4822", (kernel_ulong_t)&mcp4821_chip_info_table[ID_MCP4822]},
+       { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, mcp4821_id_table);
+
+static struct spi_driver mcp4821_driver = {
+       .driver = {
+               .name = "mcp4821",
+               .of_match_table = mcp4821_of_table,
+       },
+       .probe = mcp4821_probe,
+       .id_table = mcp4821_id_table,
+};
+module_spi_driver(mcp4821_driver);
+
+MODULE_AUTHOR("Anshul Dalal <anshulusr@gmail.com>");
+MODULE_DESCRIPTION("Microchip MCP4821 DAC Driver");
+MODULE_LICENSE("GPL");
index 26abecbd51e06611b23f3d54fc09d32430216de4..9284c13f1abb3428d1d9b8d9d1a08089a7d753a1 100644 (file)
@@ -870,7 +870,6 @@ static const struct iio_chan_spec adf4377_channels[] = {
 static int adf4377_properties_parse(struct adf4377_state *st)
 {
        struct spi_device *spi = st->spi;
-       const char *str;
        int ret;
 
        st->clkin = devm_clk_get_enabled(&spi->dev, "ref_in");
@@ -896,16 +895,13 @@ static int adf4377_properties_parse(struct adf4377_state *st)
                return dev_err_probe(&spi->dev, PTR_ERR(st->gpio_enclk2),
                                     "failed to get the CE GPIO\n");
 
-       ret = device_property_read_string(&spi->dev, "adi,muxout-select", &str);
-       if (ret) {
-               st->muxout_select = ADF4377_MUXOUT_HIGH_Z;
-       } else {
-               ret = match_string(adf4377_muxout_modes, ARRAY_SIZE(adf4377_muxout_modes), str);
-               if (ret < 0)
-                       return ret;
-
+       ret = device_property_match_property_string(&spi->dev, "adi,muxout-select",
+                                                   adf4377_muxout_modes,
+                                                   ARRAY_SIZE(adf4377_muxout_modes));
+       if (ret >= 0)
                st->muxout_select = ret;
-       }
+       else
+               st->muxout_select = ADF4377_MUXOUT_HIGH_Z;
 
        return 0;
 }
index bb5e1feef42bfc09fac9955613c0c154ab1fe44a..b46b73b89eb715de9472e15a87af7cdd5607294c 100644 (file)
@@ -710,7 +710,6 @@ static int admv1014_init(struct admv1014_state *st)
 
 static int admv1014_properties_parse(struct admv1014_state *st)
 {
-       const char *str;
        unsigned int i;
        struct spi_device *spi = st->spi;
        int ret;
@@ -719,27 +718,21 @@ static int admv1014_properties_parse(struct admv1014_state *st)
 
        st->p1db_comp = device_property_read_bool(&spi->dev, "adi,p1db-compensation-enable");
 
-       ret = device_property_read_string(&spi->dev, "adi,input-mode", &str);
-       if (ret) {
-               st->input_mode = ADMV1014_IQ_MODE;
-       } else {
-               ret = match_string(input_mode_names, ARRAY_SIZE(input_mode_names), str);
-               if (ret < 0)
-                       return ret;
-
+       ret = device_property_match_property_string(&spi->dev, "adi,input-mode",
+                                                   input_mode_names,
+                                                   ARRAY_SIZE(input_mode_names));
+       if (ret >= 0)
                st->input_mode = ret;
-       }
-
-       ret = device_property_read_string(&spi->dev, "adi,quad-se-mode", &str);
-       if (ret) {
-               st->quad_se_mode = ADMV1014_SE_MODE_POS;
-       } else {
-               ret = match_string(quad_se_mode_names, ARRAY_SIZE(quad_se_mode_names), str);
-               if (ret < 0)
-                       return ret;
+       else
+               st->input_mode = ADMV1014_IQ_MODE;
 
+       ret = device_property_match_property_string(&spi->dev, "adi,quad-se-mode",
+                                                   quad_se_mode_names,
+                                                   ARRAY_SIZE(quad_se_mode_names));
+       if (ret >= 0)
                st->quad_se_mode = ADMV1014_SE_MODE_POS + (ret * 3);
-       }
+       else
+               st->quad_se_mode = ADMV1014_SE_MODE_POS;
 
        for (i = 0; i < ADMV1014_NUM_REGULATORS; ++i)
                st->regulators[i].supply = admv1014_reg_name[i];
diff --git a/drivers/iio/humidity/hdc3020.c b/drivers/iio/humidity/hdc3020.c
new file mode 100644 (file)
index 0000000..4e33111
--- /dev/null
@@ -0,0 +1,473 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * hdc3020.c - Support for the TI HDC3020,HDC3021 and HDC3022
+ * temperature + relative humidity sensors
+ *
+ * Copyright (C) 2023
+ *
+ * Datasheet: https://www.ti.com/lit/ds/symlink/hdc3020.pdf
+ */
+
+#include <linux/bitops.h>
+#include <linux/cleanup.h>
+#include <linux/crc8.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+
+#include <asm/unaligned.h>
+
+#include <linux/iio/iio.h>
+
+#define HDC3020_HEATER_CMD_MSB         0x30 /* shared by all heater commands */
+#define HDC3020_HEATER_ENABLE          0x6D
+#define HDC3020_HEATER_DISABLE         0x66
+#define HDC3020_HEATER_CONFIG          0x6E
+
+#define HDC3020_READ_RETRY_TIMES       10
+#define HDC3020_BUSY_DELAY_MS          10
+
+#define HDC3020_CRC8_POLYNOMIAL                0x31
+
+static const u8 HDC3020_S_AUTO_10HZ_MOD0[2] = { 0x27, 0x37 };
+
+static const u8 HDC3020_EXIT_AUTO[2] = { 0x30, 0x93 };
+
+static const u8 HDC3020_R_T_RH_AUTO[2] = { 0xE0, 0x00 };
+static const u8 HDC3020_R_T_LOW_AUTO[2] = { 0xE0, 0x02 };
+static const u8 HDC3020_R_T_HIGH_AUTO[2] = { 0xE0, 0x03 };
+static const u8 HDC3020_R_RH_LOW_AUTO[2] = { 0xE0, 0x04 };
+static const u8 HDC3020_R_RH_HIGH_AUTO[2] = { 0xE0, 0x05 };
+
+struct hdc3020_data {
+       struct i2c_client *client;
+       /*
+        * Ensure that the sensor configuration (currently only heater is
+        * supported) will not be changed during the process of reading
+        * sensor data (this driver will try HDC3020_READ_RETRY_TIMES times
+        * if the device does not respond).
+        */
+       struct mutex lock;
+};
+
+static const int hdc3020_heater_vals[] = {0, 1, 0x3FFF};
+
+static const struct iio_chan_spec hdc3020_channels[] = {
+       {
+               .type = IIO_TEMP,
+               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+               BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_PEAK) |
+               BIT(IIO_CHAN_INFO_TROUGH) | BIT(IIO_CHAN_INFO_OFFSET),
+       },
+       {
+               .type = IIO_HUMIDITYRELATIVE,
+               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+               BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_PEAK) |
+               BIT(IIO_CHAN_INFO_TROUGH),
+       },
+       {
+               /*
+                * For setting the internal heater, which can be switched on to
+                * prevent or remove any condensation that may develop when the
+                * ambient environment approaches its dew point temperature.
+                */
+               .type = IIO_CURRENT,
+               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+               .info_mask_separate_available = BIT(IIO_CHAN_INFO_RAW),
+               .output = 1,
+       },
+};
+
+DECLARE_CRC8_TABLE(hdc3020_crc8_table);
+
+static int hdc3020_write_bytes(struct hdc3020_data *data, const u8 *buf, u8 len)
+{
+       struct i2c_client *client = data->client;
+       struct i2c_msg msg;
+       int ret, cnt;
+
+       msg.addr = client->addr;
+       msg.flags = 0;
+       msg.buf = (char *)buf;
+       msg.len = len;
+
+       /*
+        * During the measurement process, HDC3020 will not return data.
+        * So wait for a while and try again
+        */
+       for (cnt = 0; cnt < HDC3020_READ_RETRY_TIMES; cnt++) {
+               ret = i2c_transfer(client->adapter, &msg, 1);
+               if (ret == 1)
+                       return 0;
+
+               mdelay(HDC3020_BUSY_DELAY_MS);
+       }
+       dev_err(&client->dev, "Could not write sensor command\n");
+
+       return -ETIMEDOUT;
+}
+
+static int hdc3020_read_bytes(struct hdc3020_data *data, const u8 *buf,
+                             void *val, int len)
+{
+       int ret, cnt;
+       struct i2c_client *client = data->client;
+       struct i2c_msg msg[2] = {
+               [0] = {
+                       .addr = client->addr,
+                       .flags = 0,
+                       .buf = (char *)buf,
+                       .len = 2,
+               },
+               [1] = {
+                       .addr = client->addr,
+                       .flags = I2C_M_RD,
+                       .buf = val,
+                       .len = len,
+               },
+       };
+
+       /*
+        * During the measurement process, HDC3020 will not return data.
+        * So wait for a while and try again
+        */
+       for (cnt = 0; cnt < HDC3020_READ_RETRY_TIMES; cnt++) {
+               ret = i2c_transfer(client->adapter, msg, 2);
+               if (ret == 2)
+                       return 0;
+
+               mdelay(HDC3020_BUSY_DELAY_MS);
+       }
+       dev_err(&client->dev, "Could not read sensor data\n");
+
+       return -ETIMEDOUT;
+}
+
+static int hdc3020_read_measurement(struct hdc3020_data *data,
+                                   enum iio_chan_type type, int *val)
+{
+       u8 crc, buf[6];
+       int ret;
+
+       ret = hdc3020_read_bytes(data, HDC3020_R_T_RH_AUTO, buf, 6);
+       if (ret < 0)
+               return ret;
+
+       /* CRC check of the temperature measurement */
+       crc = crc8(hdc3020_crc8_table, buf, 2, CRC8_INIT_VALUE);
+       if (crc != buf[2])
+               return -EINVAL;
+
+       /* CRC check of the relative humidity measurement */
+       crc = crc8(hdc3020_crc8_table, buf + 3, 2, CRC8_INIT_VALUE);
+       if (crc != buf[5])
+               return -EINVAL;
+
+       if (type == IIO_TEMP)
+               *val = get_unaligned_be16(buf);
+       else if (type == IIO_HUMIDITYRELATIVE)
+               *val = get_unaligned_be16(&buf[3]);
+       else
+               return -EINVAL;
+
+       return 0;
+}
+
+/*
+ * After exiting the automatic measurement mode or resetting, the peak
+ * value will be reset to the default value
+ * This method is used to get the highest temp measured during automatic
+ * measurement
+ */
+static int hdc3020_read_high_peak_t(struct hdc3020_data *data, int *val)
+{
+       u8 crc, buf[3];
+       int ret;
+
+       ret = hdc3020_read_bytes(data, HDC3020_R_T_HIGH_AUTO, buf, 3);
+       if (ret < 0)
+               return ret;
+
+       crc = crc8(hdc3020_crc8_table, buf, 2, CRC8_INIT_VALUE);
+       if (crc != buf[2])
+               return -EINVAL;
+
+       *val = get_unaligned_be16(buf);
+
+       return 0;
+}
+
+/*
+ * This method is used to get the lowest temp measured during automatic
+ * measurement
+ */
+static int hdc3020_read_low_peak_t(struct hdc3020_data *data, int *val)
+{
+       u8 crc, buf[3];
+       int ret;
+
+       ret = hdc3020_read_bytes(data, HDC3020_R_T_LOW_AUTO, buf, 3);
+       if (ret < 0)
+               return ret;
+
+       crc = crc8(hdc3020_crc8_table, buf, 2, CRC8_INIT_VALUE);
+       if (crc != buf[2])
+               return -EINVAL;
+
+       *val = get_unaligned_be16(buf);
+
+       return 0;
+}
+
+/*
+ * This method is used to get the highest humidity measured during automatic
+ * measurement
+ */
+static int hdc3020_read_high_peak_rh(struct hdc3020_data *data, int *val)
+{
+       u8 crc, buf[3];
+       int ret;
+
+       ret = hdc3020_read_bytes(data, HDC3020_R_RH_HIGH_AUTO, buf, 3);
+       if (ret < 0)
+               return ret;
+
+       crc = crc8(hdc3020_crc8_table, buf, 2, CRC8_INIT_VALUE);
+       if (crc != buf[2])
+               return -EINVAL;
+
+       *val = get_unaligned_be16(buf);
+
+       return 0;
+}
+
+/*
+ * This method is used to get the lowest humidity measured during automatic
+ * measurement
+ */
+static int hdc3020_read_low_peak_rh(struct hdc3020_data *data, int *val)
+{
+       u8 crc, buf[3];
+       int ret;
+
+       ret = hdc3020_read_bytes(data, HDC3020_R_RH_LOW_AUTO, buf, 3);
+       if (ret < 0)
+               return ret;
+
+       crc = crc8(hdc3020_crc8_table, buf, 2, CRC8_INIT_VALUE);
+       if (crc != buf[2])
+               return -EINVAL;
+
+       *val = get_unaligned_be16(buf);
+
+       return 0;
+}
+
+static int hdc3020_read_raw(struct iio_dev *indio_dev,
+                           struct iio_chan_spec const *chan, int *val,
+                           int *val2, long mask)
+{
+       struct hdc3020_data *data = iio_priv(indio_dev);
+       int ret;
+
+       if (chan->type != IIO_TEMP && chan->type != IIO_HUMIDITYRELATIVE)
+               return -EINVAL;
+
+       switch (mask) {
+       case IIO_CHAN_INFO_RAW: {
+               guard(mutex)(&data->lock);
+               ret = hdc3020_read_measurement(data, chan->type, val);
+               if (ret < 0)
+                       return ret;
+
+               return IIO_VAL_INT;
+       }
+       case IIO_CHAN_INFO_PEAK: {
+               guard(mutex)(&data->lock);
+               if (chan->type == IIO_TEMP) {
+                       ret = hdc3020_read_high_peak_t(data, val);
+                       if (ret < 0)
+                               return ret;
+               } else {
+                       ret = hdc3020_read_high_peak_rh(data, val);
+                       if (ret < 0)
+                               return ret;
+               }
+               return IIO_VAL_INT;
+       }
+       case IIO_CHAN_INFO_TROUGH: {
+               guard(mutex)(&data->lock);
+               if (chan->type == IIO_TEMP) {
+                       ret = hdc3020_read_low_peak_t(data, val);
+                       if (ret < 0)
+                               return ret;
+               } else {
+                       ret = hdc3020_read_low_peak_rh(data, val);
+                       if (ret < 0)
+                               return ret;
+               }
+               return IIO_VAL_INT;
+       }
+       case IIO_CHAN_INFO_SCALE:
+               *val2 = 65536;
+               if (chan->type == IIO_TEMP)
+                       *val = 175;
+               else
+                       *val = 100;
+               return IIO_VAL_FRACTIONAL;
+
+       case IIO_CHAN_INFO_OFFSET:
+               if (chan->type != IIO_TEMP)
+                       return -EINVAL;
+
+               *val = 16852;
+               return IIO_VAL_INT;
+
+       default:
+               return -EINVAL;
+       }
+}
+
+static int hdc3020_read_available(struct iio_dev *indio_dev,
+                                 struct iio_chan_spec const *chan,
+                                 const int **vals,
+                                 int *type, int *length, long mask)
+{
+       if (mask != IIO_CHAN_INFO_RAW || chan->type != IIO_CURRENT)
+               return -EINVAL;
+
+       *vals = hdc3020_heater_vals;
+       *type = IIO_VAL_INT;
+
+       return IIO_AVAIL_RANGE;
+}
+
+static int hdc3020_update_heater(struct hdc3020_data *data, int val)
+{
+       u8 buf[5];
+       int ret;
+
+       if (val < hdc3020_heater_vals[0] || val > hdc3020_heater_vals[2])
+               return -EINVAL;
+
+       buf[0] = HDC3020_HEATER_CMD_MSB;
+
+       if (!val) {
+               buf[1] = HDC3020_HEATER_DISABLE;
+               return hdc3020_write_bytes(data, buf, 2);
+       }
+
+       buf[1] = HDC3020_HEATER_CONFIG;
+       put_unaligned_be16(val & GENMASK(13, 0), &buf[2]);
+       buf[4] = crc8(hdc3020_crc8_table, buf + 2, 2, CRC8_INIT_VALUE);
+       ret = hdc3020_write_bytes(data, buf, 5);
+       if (ret < 0)
+               return ret;
+
+       buf[1] = HDC3020_HEATER_ENABLE;
+
+       return hdc3020_write_bytes(data, buf, 2);
+}
+
+static int hdc3020_write_raw(struct iio_dev *indio_dev,
+                            struct iio_chan_spec const *chan,
+                            int val, int val2, long mask)
+{
+       struct hdc3020_data *data = iio_priv(indio_dev);
+
+       switch (mask) {
+       case IIO_CHAN_INFO_RAW:
+               if (chan->type != IIO_CURRENT)
+                       return -EINVAL;
+
+               guard(mutex)(&data->lock);
+               return hdc3020_update_heater(data, val);
+       }
+
+       return -EINVAL;
+}
+
+static const struct iio_info hdc3020_info = {
+       .read_raw = hdc3020_read_raw,
+       .write_raw = hdc3020_write_raw,
+       .read_avail = hdc3020_read_available,
+};
+
+static void hdc3020_stop(void *data)
+{
+       hdc3020_write_bytes((struct hdc3020_data *)data, HDC3020_EXIT_AUTO, 2);
+}
+
+static int hdc3020_probe(struct i2c_client *client)
+{
+       struct iio_dev *indio_dev;
+       struct hdc3020_data *data;
+       int ret;
+
+       if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+               return -EOPNOTSUPP;
+
+       indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+       if (!indio_dev)
+               return -ENOMEM;
+
+       data = iio_priv(indio_dev);
+       data->client = client;
+       mutex_init(&data->lock);
+
+       crc8_populate_msb(hdc3020_crc8_table, HDC3020_CRC8_POLYNOMIAL);
+
+       indio_dev->name = "hdc3020";
+       indio_dev->modes = INDIO_DIRECT_MODE;
+       indio_dev->info = &hdc3020_info;
+       indio_dev->channels = hdc3020_channels;
+       indio_dev->num_channels = ARRAY_SIZE(hdc3020_channels);
+
+       ret = hdc3020_write_bytes(data, HDC3020_S_AUTO_10HZ_MOD0, 2);
+       if (ret)
+               return dev_err_probe(&client->dev, ret,
+                                    "Unable to set up measurement\n");
+
+       ret = devm_add_action_or_reset(&data->client->dev, hdc3020_stop, data);
+       if (ret)
+               return ret;
+
+       ret = devm_iio_device_register(&data->client->dev, indio_dev);
+       if (ret)
+               return dev_err_probe(&client->dev, ret, "Failed to add device");
+
+       return 0;
+}
+
+static const struct i2c_device_id hdc3020_id[] = {
+       { "hdc3020" },
+       { "hdc3021" },
+       { "hdc3022" },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, hdc3020_id);
+
+static const struct of_device_id hdc3020_dt_ids[] = {
+       { .compatible = "ti,hdc3020" },
+       { .compatible = "ti,hdc3021" },
+       { .compatible = "ti,hdc3022" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, hdc3020_dt_ids);
+
+static struct i2c_driver hdc3020_driver = {
+       .driver = {
+               .name = "hdc3020",
+               .of_match_table = hdc3020_dt_ids,
+       },
+       .probe = hdc3020_probe,
+       .id_table = hdc3020_id,
+};
+module_i2c_driver(hdc3020_driver);
+
+MODULE_AUTHOR("Javier Carrasco <javier.carrasco.cruz@gmail.com>");
+MODULE_AUTHOR("Li peiyu <579lpy@gmail.com>");
+MODULE_DESCRIPTION("TI HDC3020 humidity and temperature sensor driver");
+MODULE_LICENSE("GPL");
index c2f97629e9cdb6f726ecd7b9053b9b5e9137f825..52a155ff3250476632016bbeef369d523685d22b 100644 (file)
@@ -53,6 +53,7 @@ config ADIS16480
          ADIS16485, ADIS16488 inertial sensors.
 
 source "drivers/iio/imu/bmi160/Kconfig"
+source "drivers/iio/imu/bmi323/Kconfig"
 source "drivers/iio/imu/bno055/Kconfig"
 
 config FXOS8700
index 6eb612034722298439c2e0721c08c7c347b067be..7e2d7d5c3b7bc7e76dfd00ff8437cb3c640e8a54 100644 (file)
@@ -15,6 +15,7 @@ adis_lib-$(CONFIG_IIO_ADIS_LIB_BUFFER) += adis_buffer.o
 obj-$(CONFIG_IIO_ADIS_LIB) += adis_lib.o
 
 obj-y += bmi160/
+obj-y += bmi323/
 obj-y += bno055/
 
 obj-$(CONFIG_FXOS8700) += fxos8700_core.o
index bc40240b29e26b1887e5b9d0e43f91aeef5d3774..495caf4ce87a2d8260c9bc1e9bae9ae0ec9721a7 100644 (file)
@@ -44,8 +44,6 @@ int __adis_write_reg(struct adis *adis, unsigned int reg, unsigned int value,
                        .cs_change = 1,
                        .delay.value = adis->data->write_delay,
                        .delay.unit = SPI_DELAY_UNIT_USECS,
-                       .cs_change_delay.value = adis->data->cs_change_delay,
-                       .cs_change_delay.unit = SPI_DELAY_UNIT_USECS,
                }, {
                        .tx_buf = adis->tx + 2,
                        .bits_per_word = 8,
@@ -53,8 +51,6 @@ int __adis_write_reg(struct adis *adis, unsigned int reg, unsigned int value,
                        .cs_change = 1,
                        .delay.value = adis->data->write_delay,
                        .delay.unit = SPI_DELAY_UNIT_USECS,
-                       .cs_change_delay.value = adis->data->cs_change_delay,
-                       .cs_change_delay.unit = SPI_DELAY_UNIT_USECS,
                }, {
                        .tx_buf = adis->tx + 4,
                        .bits_per_word = 8,
@@ -62,8 +58,6 @@ int __adis_write_reg(struct adis *adis, unsigned int reg, unsigned int value,
                        .cs_change = 1,
                        .delay.value = adis->data->write_delay,
                        .delay.unit = SPI_DELAY_UNIT_USECS,
-                       .cs_change_delay.value = adis->data->cs_change_delay,
-                       .cs_change_delay.unit = SPI_DELAY_UNIT_USECS,
                }, {
                        .tx_buf = adis->tx + 6,
                        .bits_per_word = 8,
@@ -144,8 +138,6 @@ int __adis_read_reg(struct adis *adis, unsigned int reg, unsigned int *val,
                        .cs_change = 1,
                        .delay.value = adis->data->write_delay,
                        .delay.unit = SPI_DELAY_UNIT_USECS,
-                       .cs_change_delay.value = adis->data->cs_change_delay,
-                       .cs_change_delay.unit = SPI_DELAY_UNIT_USECS,
                }, {
                        .tx_buf = adis->tx + 2,
                        .bits_per_word = 8,
@@ -153,8 +145,6 @@ int __adis_read_reg(struct adis *adis, unsigned int reg, unsigned int *val,
                        .cs_change = 1,
                        .delay.value = adis->data->read_delay,
                        .delay.unit = SPI_DELAY_UNIT_USECS,
-                       .cs_change_delay.value = adis->data->cs_change_delay,
-                       .cs_change_delay.unit = SPI_DELAY_UNIT_USECS,
                }, {
                        .tx_buf = adis->tx + 4,
                        .rx_buf = adis->rx,
@@ -163,8 +153,6 @@ int __adis_read_reg(struct adis *adis, unsigned int reg, unsigned int *val,
                        .cs_change = 1,
                        .delay.value = adis->data->read_delay,
                        .delay.unit = SPI_DELAY_UNIT_USECS,
-                       .cs_change_delay.value = adis->data->cs_change_delay,
-                       .cs_change_delay.unit = SPI_DELAY_UNIT_USECS,
                }, {
                        .rx_buf = adis->rx + 2,
                        .bits_per_word = 8,
@@ -524,6 +512,12 @@ int adis_init(struct adis *adis, struct iio_dev *indio_dev,
        }
 
        mutex_init(&adis->state_lock);
+
+       if (!spi->cs_inactive.value) {
+               spi->cs_inactive.value = data->cs_change_delay;
+               spi->cs_inactive.unit = SPI_DELAY_UNIT_USECS;
+       }
+
        adis->spi = spi;
        adis->data = data;
        iio_device_set_drvdata(indio_dev, adis);
diff --git a/drivers/iio/imu/bmi323/Kconfig b/drivers/iio/imu/bmi323/Kconfig
new file mode 100644 (file)
index 0000000..ab37b28
--- /dev/null
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# BMI323 IMU driver
+#
+
+config BMI323
+       tristate
+       select IIO_BUFFER
+       select IIO_TRIGGERED_BUFFER
+
+config BMI323_I2C
+       tristate "Bosch BMI323 I2C driver"
+       depends on I2C
+       select BMI323
+       select REGMAP_I2C
+       help
+         Enable support for the Bosch BMI323 6-Axis IMU connected to I2C
+         interface.
+
+         This driver can also be built as a module. If so, the module will be
+         called bmi323_i2c.
+
+config BMI323_SPI
+       tristate "Bosch BMI323 SPI driver"
+       depends on SPI
+       select BMI323
+       select REGMAP_SPI
+       help
+         Enable support for the Bosch BMI323 6-Axis IMU connected to SPI
+         interface.
+
+         This driver can also be built as a module. If so, the module will be
+         called bmi323_spi.
diff --git a/drivers/iio/imu/bmi323/Makefile b/drivers/iio/imu/bmi323/Makefile
new file mode 100644 (file)
index 0000000..a6a6dc0
--- /dev/null
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Bosch BMI323 IMU
+#
+obj-$(CONFIG_BMI323) += bmi323_core.o
+obj-$(CONFIG_BMI323_I2C) += bmi323_i2c.o
+obj-$(CONFIG_BMI323_SPI) += bmi323_spi.o
diff --git a/drivers/iio/imu/bmi323/bmi323.h b/drivers/iio/imu/bmi323/bmi323.h
new file mode 100644 (file)
index 0000000..dff126d
--- /dev/null
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * IIO driver for Bosch BMI323 6-Axis IMU
+ *
+ * Copyright (C) 2023, Jagath Jog J <jagathjog1996@gmail.com>
+ */
+
+#ifndef _BMI323_H_
+#define _BMI323_H_
+
+#include <linux/bits.h>
+#include <linux/regmap.h>
+#include <linux/units.h>
+
+#define BMI323_I2C_DUMMY                       2
+#define BMI323_SPI_DUMMY                       1
+
+/* Register map */
+
+#define BMI323_CHIP_ID_REG                     0x00
+#define BMI323_CHIP_ID_VAL                     0x0043
+#define BMI323_CHIP_ID_MSK                     GENMASK(7, 0)
+#define BMI323_ERR_REG                         0x01
+#define BMI323_STATUS_REG                      0x02
+#define BMI323_STATUS_POR_MSK                  BIT(0)
+
+/* Accelero/Gyro/Temp data registers */
+#define BMI323_ACCEL_X_REG                     0x03
+#define BMI323_GYRO_X_REG                      0x06
+#define BMI323_TEMP_REG                                0x09
+#define BMI323_ALL_CHAN_MSK                    GENMASK(5, 0)
+
+/* Status registers */
+#define BMI323_STATUS_INT1_REG                 0x0D
+#define BMI323_STATUS_INT2_REG                 0x0E
+#define BMI323_STATUS_NOMOTION_MSK             BIT(0)
+#define BMI323_STATUS_MOTION_MSK               BIT(1)
+#define BMI323_STATUS_STP_WTR_MSK              BIT(5)
+#define BMI323_STATUS_TAP_MSK                  BIT(8)
+#define BMI323_STATUS_ERROR_MSK                        BIT(10)
+#define BMI323_STATUS_TMP_DRDY_MSK             BIT(11)
+#define BMI323_STATUS_GYR_DRDY_MSK             BIT(12)
+#define BMI323_STATUS_ACC_DRDY_MSK             BIT(13)
+#define BMI323_STATUS_ACC_GYR_DRDY_MSK         GENMASK(13, 12)
+#define BMI323_STATUS_FIFO_WTRMRK_MSK          BIT(14)
+#define BMI323_STATUS_FIFO_FULL_MSK            BIT(15)
+
+/* Feature registers */
+#define BMI323_FEAT_IO0_REG                    0x10
+#define BMI323_FEAT_IO0_XYZ_NOMOTION_MSK       GENMASK(2, 0)
+#define BMI323_FEAT_IO0_XYZ_MOTION_MSK         GENMASK(5, 3)
+#define BMI323_FEAT_XYZ_MSK                    GENMASK(2, 0)
+#define BMI323_FEAT_IO0_STP_CNT_MSK            BIT(9)
+#define BMI323_FEAT_IO0_S_TAP_MSK              BIT(12)
+#define BMI323_FEAT_IO0_D_TAP_MSK              BIT(13)
+#define BMI323_FEAT_IO1_REG                    0x11
+#define BMI323_FEAT_IO1_ERR_MSK                        GENMASK(3, 0)
+#define BMI323_FEAT_IO2_REG                    0x12
+#define BMI323_FEAT_IO_STATUS_REG              0x14
+#define BMI323_FEAT_IO_STATUS_MSK              BIT(0)
+#define BMI323_FEAT_ENG_POLL                   2000
+#define BMI323_FEAT_ENG_TIMEOUT                        10000
+
+/* FIFO registers */
+#define BMI323_FIFO_FILL_LEVEL_REG             0x15
+#define BMI323_FIFO_DATA_REG                   0x16
+
+/* Accelero/Gyro config registers */
+#define BMI323_ACC_CONF_REG                    0x20
+#define BMI323_GYRO_CONF_REG                   0x21
+#define BMI323_ACC_GYRO_CONF_MODE_MSK          GENMASK(14, 12)
+#define BMI323_ACC_GYRO_CONF_ODR_MSK           GENMASK(3, 0)
+#define BMI323_ACC_GYRO_CONF_SCL_MSK           GENMASK(6, 4)
+#define BMI323_ACC_GYRO_CONF_BW_MSK            BIT(7)
+#define BMI323_ACC_GYRO_CONF_AVG_MSK           GENMASK(10, 8)
+
+/* FIFO registers */
+#define BMI323_FIFO_WTRMRK_REG                 0x35
+#define BMI323_FIFO_CONF_REG                   0x36
+#define BMI323_FIFO_CONF_STP_FUL_MSK           BIT(0)
+#define BMI323_FIFO_CONF_ACC_GYR_EN_MSK                GENMASK(10, 9)
+#define BMI323_FIFO_ACC_GYR_MSK                        GENMASK(1, 0)
+#define BMI323_FIFO_CTRL_REG                   0x37
+#define BMI323_FIFO_FLUSH_MSK                  BIT(0)
+
+/* Interrupt pin config registers */
+#define BMI323_IO_INT_CTR_REG                  0x38
+#define BMI323_IO_INT1_LVL_MSK                 BIT(0)
+#define BMI323_IO_INT1_OD_MSK                  BIT(1)
+#define BMI323_IO_INT1_OP_EN_MSK               BIT(2)
+#define BMI323_IO_INT1_LVL_OD_OP_MSK           GENMASK(2, 0)
+#define BMI323_IO_INT2_LVL_MSK                 BIT(8)
+#define BMI323_IO_INT2_OD_MSK                  BIT(9)
+#define BMI323_IO_INT2_OP_EN_MSK               BIT(10)
+#define BMI323_IO_INT2_LVL_OD_OP_MSK           GENMASK(10, 8)
+#define BMI323_IO_INT_CONF_REG                 0x39
+#define BMI323_IO_INT_LTCH_MSK                 BIT(0)
+#define BMI323_INT_MAP1_REG                    0x3A
+#define BMI323_INT_MAP2_REG                    0x3B
+#define BMI323_NOMOTION_MSK                    GENMASK(1, 0)
+#define BMI323_MOTION_MSK                      GENMASK(3, 2)
+#define BMI323_STEP_CNT_MSK                    GENMASK(11, 10)
+#define BMI323_TAP_MSK                         GENMASK(1, 0)
+#define BMI323_TMP_DRDY_MSK                    GENMASK(7, 6)
+#define BMI323_GYR_DRDY_MSK                    GENMASK(9, 8)
+#define BMI323_ACC_DRDY_MSK                    GENMASK(11, 10)
+#define BMI323_FIFO_WTRMRK_MSK                 GENMASK(13, 12)
+#define BMI323_FIFO_FULL_MSK                   GENMASK(15, 14)
+
+/* Feature registers */
+#define BMI323_FEAT_CTRL_REG                   0x40
+#define BMI323_FEAT_ENG_EN_MSK                 BIT(0)
+#define BMI323_FEAT_DATA_ADDR                  0x41
+#define BMI323_FEAT_DATA_TX                    0x42
+#define BMI323_FEAT_DATA_STATUS                        0x43
+#define BMI323_FEAT_DATA_TX_RDY_MSK            BIT(1)
+#define BMI323_FEAT_EVNT_EXT_REG               0x47
+#define BMI323_FEAT_EVNT_EXT_S_MSK             BIT(3)
+#define BMI323_FEAT_EVNT_EXT_D_MSK             BIT(4)
+
+#define BMI323_CMD_REG                         0x7E
+#define BMI323_RST_VAL                         0xDEAF
+#define BMI323_CFG_RES_REG                     0x7F
+
+/* Extended registers */
+#define BMI323_GEN_SET1_REG                    0x02
+#define BMI323_GEN_SET1_MODE_MSK               BIT(0)
+#define BMI323_GEN_HOLD_DUR_MSK                        GENMASK(4, 1)
+
+/* Any Motion/No Motion config registers */
+#define BMI323_ANYMO1_REG                      0x05
+#define BMI323_NOMO1_REG                       0x08
+#define BMI323_MO2_OFFSET                      0x01
+#define BMI323_MO3_OFFSET                      0x02
+#define BMI323_MO1_REF_UP_MSK                  BIT(12)
+#define BMI323_MO1_SLOPE_TH_MSK                        GENMASK(11, 0)
+#define BMI323_MO2_HYSTR_MSK                   GENMASK(9, 0)
+#define BMI323_MO3_DURA_MSK                    GENMASK(12, 0)
+
+/* Step counter config registers */
+#define BMI323_STEP_SC1_REG                    0x10
+#define BMI323_STEP_SC1_WTRMRK_MSK             GENMASK(9, 0)
+#define BMI323_STEP_SC1_RST_CNT_MSK            BIT(10)
+#define BMI323_STEP_SC1_REG                    0x10
+#define BMI323_STEP_LEN                                2
+
+/* Tap gesture config registers */
+#define BMI323_TAP1_REG                                0x1E
+#define BMI323_TAP1_AXIS_SEL_MSK               GENMASK(1, 0)
+#define BMI323_AXIS_XYZ_MSK                    GENMASK(1, 0)
+#define BMI323_TAP1_TIMOUT_MSK                 BIT(2)
+#define BMI323_TAP1_MAX_PEAKS_MSK              GENMASK(5, 3)
+#define BMI323_TAP1_MODE_MSK                   GENMASK(7, 6)
+#define BMI323_TAP2_REG                                0x1F
+#define BMI323_TAP2_THRES_MSK                  GENMASK(9, 0)
+#define BMI323_TAP2_MAX_DUR_MSK                        GENMASK(15, 10)
+#define BMI323_TAP3_REG                                0x20
+#define BMI323_TAP3_QUIET_TIM_MSK              GENMASK(15, 12)
+#define BMI323_TAP3_QT_BW_TAP_MSK              GENMASK(11, 8)
+#define BMI323_TAP3_QT_AFT_GES_MSK             GENMASK(15, 12)
+
+#define BMI323_MOTION_THRES_SCALE              512
+#define BMI323_MOTION_HYSTR_SCALE              512
+#define BMI323_MOTION_DURAT_SCALE              50
+#define BMI323_TAP_THRES_SCALE                 512
+#define BMI323_DUR_BW_TAP_SCALE                        200
+#define BMI323_QUITE_TIM_GES_SCALE             25
+#define BMI323_MAX_GES_DUR_SCALE               25
+
+/*
+ * The formula to calculate temperature in C.
+ * See datasheet section 6.1.1, Register Map Overview
+ *
+ * T_C = (temp_raw / 512) + 23
+ */
+#define BMI323_TEMP_OFFSET                     11776
+#define BMI323_TEMP_SCALE                      1953125
+
+/*
+ * The BMI323 features a FIFO with a capacity of 2048 bytes. Each frame
+ * consists of accelerometer (X, Y, Z) data and gyroscope (X, Y, Z) data,
+ * totaling 6 words or 12 bytes. The FIFO buffer can hold a total of
+ * 170 frames.
+ *
+ * If a watermark interrupt is configured for 170 frames, the interrupt will
+ * trigger when the FIFO reaches 169 frames, so limit the maximum watermark
+ * level to 169 frames. In terms of data, 169 frames would equal 1014 bytes,
+ * which is approximately 2 frames before the FIFO reaches its full capacity.
+ * See datasheet section 5.7.3 FIFO Buffer Interrupts
+ */
+#define BMI323_BYTES_PER_SAMPLE                        2
+#define BMI323_FIFO_LENGTH_IN_BYTES            2048
+#define BMI323_FIFO_FRAME_LENGTH               6
+#define BMI323_FIFO_FULL_IN_FRAMES             \
+       ((BMI323_FIFO_LENGTH_IN_BYTES /         \
+       (BMI323_BYTES_PER_SAMPLE * BMI323_FIFO_FRAME_LENGTH)) - 1)
+#define BMI323_FIFO_FULL_IN_WORDS              \
+       (BMI323_FIFO_FULL_IN_FRAMES * BMI323_FIFO_FRAME_LENGTH)
+
+#define BMI323_INT_MICRO_TO_RAW(val, val2, scale) ((val) * (scale) + \
+                                                 ((val2) * (scale)) / MEGA)
+
+#define BMI323_RAW_TO_MICRO(raw, scale) ((((raw) % (scale)) * MEGA) / scale)
+
+struct device;
+int bmi323_core_probe(struct device *dev);
+extern const struct regmap_config bmi323_regmap_config;
+
+#endif
diff --git a/drivers/iio/imu/bmi323/bmi323_core.c b/drivers/iio/imu/bmi323/bmi323_core.c
new file mode 100644 (file)
index 0000000..183af48
--- /dev/null
@@ -0,0 +1,2139 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IIO core driver for Bosch BMI323 6-Axis IMU.
+ *
+ * Copyright (C) 2023, Jagath Jog J <jagathjog1996@gmail.com>
+ *
+ * Datasheet: https://www.bosch-sensortec.com/media/boschsensortec/downloads/datasheets/bst-bmi323-ds000.pdf
+ */
+
+#include <linux/bitfield.h>
+#include <linux/cleanup.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/units.h>
+
+#include <asm/unaligned.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#include "bmi323.h"
+
+enum bmi323_sensor_type {
+       BMI323_ACCEL,
+       BMI323_GYRO,
+       BMI323_SENSORS_CNT,
+};
+
+enum bmi323_opr_mode {
+       ACC_GYRO_MODE_DISABLE = 0x00,
+       GYRO_DRIVE_MODE_ENABLED = 0x01,
+       ACC_GYRO_MODE_DUTYCYCLE = 0x03,
+       ACC_GYRO_MODE_CONTINOUS = 0x04,
+       ACC_GYRO_MODE_HIGH_PERF = 0x07,
+};
+
+enum bmi323_state {
+       BMI323_IDLE,
+       BMI323_BUFFER_DRDY_TRIGGERED,
+       BMI323_BUFFER_FIFO,
+};
+
+enum bmi323_irq_pin {
+       BMI323_IRQ_DISABLED,
+       BMI323_IRQ_INT1,
+       BMI323_IRQ_INT2,
+};
+
+enum bmi323_3db_bw {
+       BMI323_BW_ODR_BY_2,
+       BMI323_BW_ODR_BY_4,
+};
+
+enum bmi323_scan {
+       BMI323_ACCEL_X,
+       BMI323_ACCEL_Y,
+       BMI323_ACCEL_Z,
+       BMI323_GYRO_X,
+       BMI323_GYRO_Y,
+       BMI323_GYRO_Z,
+       BMI323_CHAN_MAX
+};
+
+struct bmi323_hw {
+       u8 data;
+       u8 config;
+       const int (*scale_table)[2];
+       int scale_table_len;
+};
+
+/*
+ * The accelerometer supports +-2G/4G/8G/16G ranges, and the resolution of
+ * each sample is 16 bits, signed.
+ * At +-8G the scale can calculated by
+ * ((8 + 8) * 9.80665 / (2^16 - 1)) * 10^6 = 2394.23819 scale in micro
+ *
+ */
+static const int bmi323_accel_scale[][2] = {
+       { 0, 598 },
+       { 0, 1197 },
+       { 0, 2394 },
+       { 0, 4788 },
+};
+
+static const int bmi323_gyro_scale[][2] = {
+       { 0, 66 },
+       { 0, 133 },
+       { 0, 266 },
+       { 0, 532 },
+       { 0, 1065 },
+};
+
+static const int bmi323_accel_gyro_avrg[] = {0, 2, 4, 8, 16, 32, 64};
+
+static const struct bmi323_hw bmi323_hw[2] = {
+       [BMI323_ACCEL] = {
+               .data = BMI323_ACCEL_X_REG,
+               .config = BMI323_ACC_CONF_REG,
+               .scale_table = bmi323_accel_scale,
+               .scale_table_len = ARRAY_SIZE(bmi323_accel_scale),
+       },
+       [BMI323_GYRO] = {
+               .data = BMI323_GYRO_X_REG,
+               .config = BMI323_GYRO_CONF_REG,
+               .scale_table = bmi323_gyro_scale,
+               .scale_table_len = ARRAY_SIZE(bmi323_gyro_scale),
+       },
+};
+
+struct bmi323_data {
+       struct device *dev;
+       struct regmap *regmap;
+       struct iio_mount_matrix orientation;
+       enum bmi323_irq_pin irq_pin;
+       struct iio_trigger *trig;
+       bool drdy_trigger_enabled;
+       enum bmi323_state state;
+       s64 fifo_tstamp, old_fifo_tstamp;
+       u32 odrns[BMI323_SENSORS_CNT];
+       u32 odrhz[BMI323_SENSORS_CNT];
+       unsigned int feature_events;
+
+       /*
+        * Lock to protect the members of device's private data from concurrent
+        * access and also to serialize the access of extended registers.
+        * See bmi323_write_ext_reg(..) for more info.
+        */
+       struct mutex mutex;
+       int watermark;
+       __le16 fifo_buff[BMI323_FIFO_FULL_IN_WORDS] __aligned(IIO_DMA_MINALIGN);
+       struct {
+               __le16 channels[BMI323_CHAN_MAX];
+               s64 ts __aligned(8);
+       } buffer;
+       __le16 steps_count[BMI323_STEP_LEN];
+};
+
+static const struct iio_mount_matrix *
+bmi323_get_mount_matrix(const struct iio_dev *idev,
+                       const struct iio_chan_spec *chan)
+{
+       struct bmi323_data *data = iio_priv(idev);
+
+       return &data->orientation;
+}
+
+static const struct iio_chan_spec_ext_info bmi323_ext_info[] = {
+       IIO_MOUNT_MATRIX(IIO_SHARED_BY_TYPE, bmi323_get_mount_matrix),
+       { }
+};
+
+static const struct iio_event_spec bmi323_step_wtrmrk_event = {
+       .type = IIO_EV_TYPE_CHANGE,
+       .dir = IIO_EV_DIR_NONE,
+       .mask_shared_by_type = BIT(IIO_EV_INFO_ENABLE) |
+                              BIT(IIO_EV_INFO_VALUE),
+};
+
+static const struct iio_event_spec bmi323_accel_event[] = {
+       {
+               .type = IIO_EV_TYPE_MAG,
+               .dir = IIO_EV_DIR_FALLING,
+               .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) |
+                                      BIT(IIO_EV_INFO_PERIOD) |
+                                      BIT(IIO_EV_INFO_HYSTERESIS) |
+                                      BIT(IIO_EV_INFO_ENABLE),
+       },
+       {
+               .type = IIO_EV_TYPE_MAG,
+               .dir = IIO_EV_DIR_RISING,
+               .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) |
+                                      BIT(IIO_EV_INFO_PERIOD) |
+                                      BIT(IIO_EV_INFO_HYSTERESIS) |
+                                      BIT(IIO_EV_INFO_ENABLE),
+       },
+       {
+               .type = IIO_EV_TYPE_GESTURE,
+               .dir = IIO_EV_DIR_SINGLETAP,
+               .mask_shared_by_type = BIT(IIO_EV_INFO_ENABLE) |
+                                      BIT(IIO_EV_INFO_VALUE) |
+                                      BIT(IIO_EV_INFO_RESET_TIMEOUT),
+       },
+       {
+               .type = IIO_EV_TYPE_GESTURE,
+               .dir = IIO_EV_DIR_DOUBLETAP,
+               .mask_shared_by_type = BIT(IIO_EV_INFO_ENABLE) |
+                                      BIT(IIO_EV_INFO_VALUE) |
+                                      BIT(IIO_EV_INFO_RESET_TIMEOUT) |
+                                      BIT(IIO_EV_INFO_TAP2_MIN_DELAY),
+       },
+};
+
+#define BMI323_ACCEL_CHANNEL(_type, _axis, _index) {                   \
+       .type = _type,                                                  \
+       .modified = 1,                                                  \
+       .channel2 = IIO_MOD_##_axis,                                    \
+       .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),                   \
+       .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ) |      \
+                                   BIT(IIO_CHAN_INFO_SCALE) |          \
+                                   BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+       .info_mask_shared_by_type_available =                           \
+                                   BIT(IIO_CHAN_INFO_SAMP_FREQ) |      \
+                                   BIT(IIO_CHAN_INFO_SCALE) |          \
+                                   BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+       .scan_index = _index,                                           \
+       .scan_type = {                                                  \
+               .sign = 's',                                            \
+               .realbits = 16,                                         \
+               .storagebits = 16,                                      \
+               .endianness = IIO_LE,                                   \
+       },                                                              \
+       .ext_info = bmi323_ext_info,                                    \
+       .event_spec = bmi323_accel_event,                               \
+       .num_event_specs = ARRAY_SIZE(bmi323_accel_event),              \
+}
+
+#define BMI323_GYRO_CHANNEL(_type, _axis, _index) {                    \
+       .type = _type,                                                  \
+       .modified = 1,                                                  \
+       .channel2 = IIO_MOD_##_axis,                                    \
+       .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),                   \
+       .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ) |      \
+                                   BIT(IIO_CHAN_INFO_SCALE) |          \
+                                   BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+       .info_mask_shared_by_type_available =                           \
+                                   BIT(IIO_CHAN_INFO_SAMP_FREQ) |      \
+                                   BIT(IIO_CHAN_INFO_SCALE) |          \
+                                   BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+       .scan_index = _index,                                           \
+       .scan_type = {                                                  \
+               .sign = 's',                                            \
+               .realbits = 16,                                         \
+               .storagebits = 16,                                      \
+               .endianness = IIO_LE,                                   \
+       },                                                              \
+       .ext_info = bmi323_ext_info,                                    \
+}
+
+static const struct iio_chan_spec bmi323_channels[] = {
+       BMI323_ACCEL_CHANNEL(IIO_ACCEL, X, BMI323_ACCEL_X),
+       BMI323_ACCEL_CHANNEL(IIO_ACCEL, Y, BMI323_ACCEL_Y),
+       BMI323_ACCEL_CHANNEL(IIO_ACCEL, Z, BMI323_ACCEL_Z),
+       BMI323_GYRO_CHANNEL(IIO_ANGL_VEL, X, BMI323_GYRO_X),
+       BMI323_GYRO_CHANNEL(IIO_ANGL_VEL, Y, BMI323_GYRO_Y),
+       BMI323_GYRO_CHANNEL(IIO_ANGL_VEL, Z, BMI323_GYRO_Z),
+       {
+               .type = IIO_TEMP,
+               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+                                     BIT(IIO_CHAN_INFO_OFFSET) |
+                                     BIT(IIO_CHAN_INFO_SCALE),
+               .scan_index = -1,
+       },
+       {
+               .type = IIO_STEPS,
+               .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+                                     BIT(IIO_CHAN_INFO_ENABLE),
+               .scan_index = -1,
+               .event_spec = &bmi323_step_wtrmrk_event,
+               .num_event_specs = 1,
+
+       },
+       IIO_CHAN_SOFT_TIMESTAMP(BMI323_CHAN_MAX),
+};
+
+static const int bmi323_acc_gyro_odr[][2] = {
+       { 0, 781250 },
+       { 1, 562500 },
+       { 3, 125000 },
+       { 6, 250000 },
+       { 12, 500000 },
+       { 25, 0 },
+       { 50, 0 },
+       { 100, 0 },
+       { 200, 0 },
+       { 400, 0 },
+       { 800, 0 },
+};
+
+static const int bmi323_acc_gyro_odrns[] = {
+       1280 * MEGA,
+       640 * MEGA,
+       320 * MEGA,
+       160 * MEGA,
+       80 * MEGA,
+       40 * MEGA,
+       20 * MEGA,
+       10 * MEGA,
+       5 * MEGA,
+       2500 * KILO,
+       1250 * KILO,
+};
+
+static enum bmi323_sensor_type bmi323_iio_to_sensor(enum iio_chan_type iio_type)
+{
+       switch (iio_type) {
+       case IIO_ACCEL:
+               return BMI323_ACCEL;
+       case IIO_ANGL_VEL:
+               return BMI323_GYRO;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int bmi323_set_mode(struct bmi323_data *data,
+                          enum bmi323_sensor_type sensor,
+                          enum bmi323_opr_mode mode)
+{
+       guard(mutex)(&data->mutex);
+       return regmap_update_bits(data->regmap, bmi323_hw[sensor].config,
+                                 BMI323_ACC_GYRO_CONF_MODE_MSK,
+                                 FIELD_PREP(BMI323_ACC_GYRO_CONF_MODE_MSK,
+                                            mode));
+}
+
+/*
+ * When writing data to extended register there must be no communication to
+ * any other register before write transaction is complete.
+ * See datasheet section 6.2 Extended Register Map Description.
+ */
+static int bmi323_write_ext_reg(struct bmi323_data *data, unsigned int ext_addr,
+                               unsigned int ext_data)
+{
+       int ret, feature_status;
+
+       ret = regmap_read(data->regmap, BMI323_FEAT_DATA_STATUS,
+                         &feature_status);
+       if (ret)
+               return ret;
+
+       if (!FIELD_GET(BMI323_FEAT_DATA_TX_RDY_MSK, feature_status))
+               return -EBUSY;
+
+       ret = regmap_write(data->regmap, BMI323_FEAT_DATA_ADDR, ext_addr);
+       if (ret)
+               return ret;
+
+       return regmap_write(data->regmap, BMI323_FEAT_DATA_TX, ext_data);
+}
+
+/*
+ * When reading data from extended register there must be no communication to
+ * any other register before read transaction is complete.
+ * See datasheet section 6.2 Extended Register Map Description.
+ */
+static int bmi323_read_ext_reg(struct bmi323_data *data, unsigned int ext_addr,
+                              unsigned int *ext_data)
+{
+       int ret, feature_status;
+
+       ret = regmap_read(data->regmap, BMI323_FEAT_DATA_STATUS,
+                         &feature_status);
+       if (ret)
+               return ret;
+
+       if (!FIELD_GET(BMI323_FEAT_DATA_TX_RDY_MSK, feature_status))
+               return -EBUSY;
+
+       ret = regmap_write(data->regmap, BMI323_FEAT_DATA_ADDR, ext_addr);
+       if (ret)
+               return ret;
+
+       return regmap_read(data->regmap, BMI323_FEAT_DATA_TX, ext_data);
+}
+
+static int bmi323_update_ext_reg(struct bmi323_data *data,
+                                unsigned int ext_addr,
+                                unsigned int mask, unsigned int ext_data)
+{
+       unsigned int value;
+       int ret;
+
+       ret = bmi323_read_ext_reg(data, ext_addr, &value);
+       if (ret)
+               return ret;
+
+       set_mask_bits(&value, mask, ext_data);
+
+       return bmi323_write_ext_reg(data, ext_addr, value);
+}
+
+static int bmi323_get_error_status(struct bmi323_data *data)
+{
+       int error, ret;
+
+       guard(mutex)(&data->mutex);
+       ret = regmap_read(data->regmap, BMI323_ERR_REG, &error);
+       if (ret)
+               return ret;
+
+       if (error)
+               dev_err(data->dev, "Sensor error 0x%x\n", error);
+
+       return error;
+}
+
+static int bmi323_feature_engine_events(struct bmi323_data *data,
+                                       const unsigned int event_mask,
+                                       bool state)
+{
+       unsigned int value;
+       int ret;
+
+       ret = regmap_read(data->regmap, BMI323_FEAT_IO0_REG, &value);
+       if (ret)
+               return ret;
+
+       /* Register must be cleared before changing an active config */
+       ret = regmap_write(data->regmap, BMI323_FEAT_IO0_REG, 0);
+       if (ret)
+               return ret;
+
+       if (state)
+               value |= event_mask;
+       else
+               value &= ~event_mask;
+
+       ret = regmap_write(data->regmap, BMI323_FEAT_IO0_REG, value);
+       if (ret)
+               return ret;
+
+       return regmap_write(data->regmap, BMI323_FEAT_IO_STATUS_REG,
+                           BMI323_FEAT_IO_STATUS_MSK);
+}
+
+static int bmi323_step_wtrmrk_en(struct bmi323_data *data, int state)
+{
+       enum bmi323_irq_pin step_irq;
+       int ret;
+
+       guard(mutex)(&data->mutex);
+       if (!FIELD_GET(BMI323_FEAT_IO0_STP_CNT_MSK, data->feature_events))
+               return -EINVAL;
+
+       if (state)
+               step_irq = data->irq_pin;
+       else
+               step_irq = BMI323_IRQ_DISABLED;
+
+       ret = bmi323_update_ext_reg(data, BMI323_STEP_SC1_REG,
+                                   BMI323_STEP_SC1_WTRMRK_MSK,
+                                   FIELD_PREP(BMI323_STEP_SC1_WTRMRK_MSK,
+                                              state ? 1 : 0));
+       if (ret)
+               return ret;
+
+       return regmap_update_bits(data->regmap, BMI323_INT_MAP1_REG,
+                                 BMI323_STEP_CNT_MSK,
+                                 FIELD_PREP(BMI323_STEP_CNT_MSK, step_irq));
+}
+
+static int bmi323_motion_config_reg(enum iio_event_direction dir)
+{
+       switch (dir) {
+       case IIO_EV_DIR_RISING:
+               return BMI323_ANYMO1_REG;
+       case IIO_EV_DIR_FALLING:
+               return BMI323_NOMO1_REG;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int bmi323_motion_event_en(struct bmi323_data *data,
+                                 enum iio_event_direction dir, int state)
+{
+       unsigned int state_value = state ? BMI323_FEAT_XYZ_MSK : 0;
+       int config, ret, msk, raw, field_value;
+       enum bmi323_irq_pin motion_irq;
+       int irq_msk, irq_field_val;
+
+       if (state)
+               motion_irq = data->irq_pin;
+       else
+               motion_irq = BMI323_IRQ_DISABLED;
+
+       switch (dir) {
+       case IIO_EV_DIR_RISING:
+               msk = BMI323_FEAT_IO0_XYZ_MOTION_MSK;
+               raw = 512;
+               config = BMI323_ANYMO1_REG;
+               irq_msk = BMI323_MOTION_MSK;
+               irq_field_val = FIELD_PREP(BMI323_MOTION_MSK, motion_irq);
+               field_value = FIELD_PREP(BMI323_FEAT_IO0_XYZ_MOTION_MSK,
+                                        state_value);
+               break;
+       case IIO_EV_DIR_FALLING:
+               msk = BMI323_FEAT_IO0_XYZ_NOMOTION_MSK;
+               raw = 0;
+               config = BMI323_NOMO1_REG;
+               irq_msk = BMI323_NOMOTION_MSK;
+               irq_field_val = FIELD_PREP(BMI323_NOMOTION_MSK, motion_irq);
+               field_value = FIELD_PREP(BMI323_FEAT_IO0_XYZ_NOMOTION_MSK,
+                                        state_value);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       guard(mutex)(&data->mutex);
+       ret = bmi323_feature_engine_events(data, msk, state);
+       if (ret)
+               return ret;
+
+       ret = bmi323_update_ext_reg(data, config,
+                                   BMI323_MO1_REF_UP_MSK,
+                                   FIELD_PREP(BMI323_MO1_REF_UP_MSK, 0));
+       if (ret)
+               return ret;
+
+       /* Set initial value to avoid interrupts while enabling*/
+       ret = bmi323_update_ext_reg(data, config,
+                                   BMI323_MO1_SLOPE_TH_MSK,
+                                   FIELD_PREP(BMI323_MO1_SLOPE_TH_MSK, raw));
+       if (ret)
+               return ret;
+
+       ret = regmap_update_bits(data->regmap, BMI323_INT_MAP1_REG, irq_msk,
+                                irq_field_val);
+       if (ret)
+               return ret;
+
+       set_mask_bits(&data->feature_events, msk, field_value);
+
+       return 0;
+}
+
+static int bmi323_tap_event_en(struct bmi323_data *data,
+                              enum iio_event_direction dir, int state)
+{
+       enum bmi323_irq_pin tap_irq;
+       int ret, tap_enabled;
+
+       guard(mutex)(&data->mutex);
+
+       if (data->odrhz[BMI323_ACCEL] < 200) {
+               dev_err(data->dev, "Invalid accelerometer parameter\n");
+               return -EINVAL;
+       }
+
+       switch (dir) {
+       case IIO_EV_DIR_SINGLETAP:
+               ret = bmi323_feature_engine_events(data,
+                                                  BMI323_FEAT_IO0_S_TAP_MSK,
+                                                  state);
+               if (ret)
+                       return ret;
+
+               set_mask_bits(&data->feature_events, BMI323_FEAT_IO0_S_TAP_MSK,
+                             FIELD_PREP(BMI323_FEAT_IO0_S_TAP_MSK, state));
+               break;
+       case IIO_EV_DIR_DOUBLETAP:
+               ret = bmi323_feature_engine_events(data,
+                                                  BMI323_FEAT_IO0_D_TAP_MSK,
+                                                  state);
+               if (ret)
+                       return ret;
+
+               set_mask_bits(&data->feature_events, BMI323_FEAT_IO0_D_TAP_MSK,
+                             FIELD_PREP(BMI323_FEAT_IO0_D_TAP_MSK, state));
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       tap_enabled = FIELD_GET(BMI323_FEAT_IO0_S_TAP_MSK |
+                               BMI323_FEAT_IO0_D_TAP_MSK,
+                               data->feature_events);
+
+       if (tap_enabled)
+               tap_irq = data->irq_pin;
+       else
+               tap_irq = BMI323_IRQ_DISABLED;
+
+       ret = regmap_update_bits(data->regmap, BMI323_INT_MAP2_REG,
+                                BMI323_TAP_MSK,
+                                FIELD_PREP(BMI323_TAP_MSK, tap_irq));
+       if (ret)
+               return ret;
+
+       if (!state)
+               return 0;
+
+       ret = bmi323_update_ext_reg(data, BMI323_TAP1_REG,
+                                   BMI323_TAP1_MAX_PEAKS_MSK,
+                                   FIELD_PREP(BMI323_TAP1_MAX_PEAKS_MSK,
+                                              0x04));
+       if (ret)
+               return ret;
+
+       ret = bmi323_update_ext_reg(data, BMI323_TAP1_REG,
+                                   BMI323_TAP1_AXIS_SEL_MSK,
+                                   FIELD_PREP(BMI323_TAP1_AXIS_SEL_MSK,
+                                              BMI323_AXIS_XYZ_MSK));
+       if (ret)
+               return ret;
+
+       return bmi323_update_ext_reg(data, BMI323_TAP1_REG,
+                                    BMI323_TAP1_TIMOUT_MSK,
+                                    FIELD_PREP(BMI323_TAP1_TIMOUT_MSK,
+                                               0));
+}
+
+static ssize_t in_accel_gesture_tap_wait_dur_show(struct device *dev,
+                                                 struct device_attribute *attr,
+                                                 char *buf)
+{
+       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+       struct bmi323_data *data = iio_priv(indio_dev);
+       unsigned int reg_value, raw;
+       int ret, val[2];
+
+       scoped_guard(mutex, &data->mutex) {
+               ret = bmi323_read_ext_reg(data, BMI323_TAP2_REG, &reg_value);
+               if (ret)
+                       return ret;
+       }
+
+       raw = FIELD_GET(BMI323_TAP2_MAX_DUR_MSK, reg_value);
+       val[0] = raw / BMI323_MAX_GES_DUR_SCALE;
+       val[1] = BMI323_RAW_TO_MICRO(raw, BMI323_MAX_GES_DUR_SCALE);
+
+       return iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO, ARRAY_SIZE(val),
+                               val);
+}
+
+static ssize_t in_accel_gesture_tap_wait_dur_store(struct device *dev,
+                                                  struct device_attribute *attr,
+                                                  const char *buf, size_t len)
+{
+       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+       struct bmi323_data *data = iio_priv(indio_dev);
+       int ret, val_int, val_fract, raw;
+
+       ret = iio_str_to_fixpoint(buf, 100000, &val_int, &val_fract);
+       if (ret)
+               return ret;
+
+       raw = BMI323_INT_MICRO_TO_RAW(val_int, val_fract,
+                                     BMI323_MAX_GES_DUR_SCALE);
+       if (!in_range(raw, 0, 64))
+               return -EINVAL;
+
+       guard(mutex)(&data->mutex);
+       ret = bmi323_update_ext_reg(data, BMI323_TAP2_REG,
+                                   BMI323_TAP2_MAX_DUR_MSK,
+                                   FIELD_PREP(BMI323_TAP2_MAX_DUR_MSK, raw));
+       if (ret)
+               return ret;
+
+       return len;
+}
+
+/*
+ * Maximum duration from first tap within the second tap is expected to happen.
+ * This timeout is applicable only if gesture_tap_wait_timeout is enabled.
+ */
+static IIO_DEVICE_ATTR_RW(in_accel_gesture_tap_wait_dur, 0);
+
+static ssize_t in_accel_gesture_tap_wait_timeout_show(struct device *dev,
+                                                     struct device_attribute *attr,
+                                                     char *buf)
+{
+       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+       struct bmi323_data *data = iio_priv(indio_dev);
+       unsigned int reg_value, raw;
+       int ret;
+
+       scoped_guard(mutex, &data->mutex) {
+               ret = bmi323_read_ext_reg(data, BMI323_TAP1_REG, &reg_value);
+               if (ret)
+                       return ret;
+       }
+
+       raw = FIELD_GET(BMI323_TAP1_TIMOUT_MSK, reg_value);
+
+       return iio_format_value(buf, IIO_VAL_INT, 1, &raw);
+}
+
+static ssize_t in_accel_gesture_tap_wait_timeout_store(struct device *dev,
+                                                      struct device_attribute *attr,
+                                                      const char *buf,
+                                                      size_t len)
+{
+       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+       struct bmi323_data *data = iio_priv(indio_dev);
+       bool val;
+       int ret;
+
+       ret = kstrtobool(buf, &val);
+       if (ret)
+               return ret;
+
+       guard(mutex)(&data->mutex);
+       ret = bmi323_update_ext_reg(data, BMI323_TAP1_REG,
+                                   BMI323_TAP1_TIMOUT_MSK,
+                                   FIELD_PREP(BMI323_TAP1_TIMOUT_MSK, val));
+       if (ret)
+               return ret;
+
+       return len;
+}
+
+/* Enable/disable gesture confirmation with wait time */
+static IIO_DEVICE_ATTR_RW(in_accel_gesture_tap_wait_timeout, 0);
+
+static IIO_CONST_ATTR(in_accel_gesture_tap_wait_dur_available,
+                     "[0.0 0.04 2.52]");
+
+static IIO_CONST_ATTR(in_accel_gesture_doubletap_tap2_min_delay_available,
+                     "[0.005 0.005 0.075]");
+
+static IIO_CONST_ATTR(in_accel_gesture_tap_reset_timeout_available,
+                     "[0.04 0.04 0.6]");
+
+static IIO_CONST_ATTR(in_accel_gesture_tap_value_available, "[0.0 0.002 1.99]");
+
+static IIO_CONST_ATTR(in_accel_mag_value_available, "[0.0 0.002 7.99]");
+
+static IIO_CONST_ATTR(in_accel_mag_period_available, "[0.0 0.02 162.0]");
+
+static IIO_CONST_ATTR(in_accel_mag_hysteresis_available, "[0.0 0.002 1.99]");
+
+static struct attribute *bmi323_event_attributes[] = {
+       &iio_const_attr_in_accel_gesture_tap_value_available.dev_attr.attr,
+       &iio_const_attr_in_accel_gesture_tap_reset_timeout_available.dev_attr.attr,
+       &iio_const_attr_in_accel_gesture_doubletap_tap2_min_delay_available.dev_attr.attr,
+       &iio_const_attr_in_accel_gesture_tap_wait_dur_available.dev_attr.attr,
+       &iio_dev_attr_in_accel_gesture_tap_wait_timeout.dev_attr.attr,
+       &iio_dev_attr_in_accel_gesture_tap_wait_dur.dev_attr.attr,
+       &iio_const_attr_in_accel_mag_value_available.dev_attr.attr,
+       &iio_const_attr_in_accel_mag_period_available.dev_attr.attr,
+       &iio_const_attr_in_accel_mag_hysteresis_available.dev_attr.attr,
+       NULL
+};
+
+static const struct attribute_group bmi323_event_attribute_group = {
+       .attrs = bmi323_event_attributes,
+};
+
+static int bmi323_write_event_config(struct iio_dev *indio_dev,
+                                    const struct iio_chan_spec *chan,
+                                    enum iio_event_type type,
+                                    enum iio_event_direction dir, int state)
+{
+       struct bmi323_data *data = iio_priv(indio_dev);
+
+       switch (type) {
+       case IIO_EV_TYPE_MAG:
+               return bmi323_motion_event_en(data, dir, state);
+       case IIO_EV_TYPE_GESTURE:
+               return bmi323_tap_event_en(data, dir, state);
+       case IIO_EV_TYPE_CHANGE:
+               return bmi323_step_wtrmrk_en(data, state);
+       default:
+               return -EINVAL;
+       }
+}
+
+static int bmi323_read_event_config(struct iio_dev *indio_dev,
+                                   const struct iio_chan_spec *chan,
+                                   enum iio_event_type type,
+                                   enum iio_event_direction dir)
+{
+       struct bmi323_data *data = iio_priv(indio_dev);
+       int ret, value, reg_val;
+
+       guard(mutex)(&data->mutex);
+
+       switch (chan->type) {
+       case IIO_ACCEL:
+               switch (dir) {
+               case IIO_EV_DIR_SINGLETAP:
+                       ret = FIELD_GET(BMI323_FEAT_IO0_S_TAP_MSK,
+                                       data->feature_events);
+                       break;
+               case IIO_EV_DIR_DOUBLETAP:
+                       ret = FIELD_GET(BMI323_FEAT_IO0_D_TAP_MSK,
+                                       data->feature_events);
+                       break;
+               case IIO_EV_DIR_RISING:
+                       value = FIELD_GET(BMI323_FEAT_IO0_XYZ_MOTION_MSK,
+                                         data->feature_events);
+                       ret = value ? 1 : 0;
+                       break;
+               case IIO_EV_DIR_FALLING:
+                       value = FIELD_GET(BMI323_FEAT_IO0_XYZ_NOMOTION_MSK,
+                                         data->feature_events);
+                       ret = value ? 1 : 0;
+                       break;
+               default:
+                       ret = -EINVAL;
+                       break;
+               }
+               return ret;
+       case IIO_STEPS:
+               ret = regmap_read(data->regmap, BMI323_INT_MAP1_REG, &reg_val);
+               if (ret)
+                       return ret;
+
+               return FIELD_GET(BMI323_STEP_CNT_MSK, reg_val) ? 1 : 0;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int bmi323_write_event_value(struct iio_dev *indio_dev,
+                                   const struct iio_chan_spec *chan,
+                                   enum iio_event_type type,
+                                   enum iio_event_direction dir,
+                                   enum iio_event_info info,
+                                   int val, int val2)
+{
+       struct bmi323_data *data = iio_priv(indio_dev);
+       unsigned int raw;
+       int reg;
+
+       guard(mutex)(&data->mutex);
+
+       switch (type) {
+       case IIO_EV_TYPE_GESTURE:
+               switch (info) {
+               case IIO_EV_INFO_VALUE:
+                       if (!in_range(val, 0, 2))
+                               return -EINVAL;
+
+                       raw = BMI323_INT_MICRO_TO_RAW(val, val2,
+                                                     BMI323_TAP_THRES_SCALE);
+
+                       return bmi323_update_ext_reg(data, BMI323_TAP2_REG,
+                                                    BMI323_TAP2_THRES_MSK,
+                                                    FIELD_PREP(BMI323_TAP2_THRES_MSK,
+                                                               raw));
+               case IIO_EV_INFO_RESET_TIMEOUT:
+                       if (val || !in_range(val2, 40000, 560001))
+                               return -EINVAL;
+
+                       raw = BMI323_INT_MICRO_TO_RAW(val, val2,
+                                                     BMI323_QUITE_TIM_GES_SCALE);
+
+                       return bmi323_update_ext_reg(data, BMI323_TAP3_REG,
+                                                    BMI323_TAP3_QT_AFT_GES_MSK,
+                                                    FIELD_PREP(BMI323_TAP3_QT_AFT_GES_MSK,
+                                                               raw));
+               case IIO_EV_INFO_TAP2_MIN_DELAY:
+                       if (val || !in_range(val2, 5000, 70001))
+                               return -EINVAL;
+
+                       raw = BMI323_INT_MICRO_TO_RAW(val, val2,
+                                                     BMI323_DUR_BW_TAP_SCALE);
+
+                       return bmi323_update_ext_reg(data, BMI323_TAP3_REG,
+                                                    BMI323_TAP3_QT_BW_TAP_MSK,
+                                                    FIELD_PREP(BMI323_TAP3_QT_BW_TAP_MSK,
+                                                               raw));
+               default:
+                       return -EINVAL;
+               }
+       case IIO_EV_TYPE_MAG:
+               reg = bmi323_motion_config_reg(dir);
+               if (reg < 0)
+                       return -EINVAL;
+
+               switch (info) {
+               case IIO_EV_INFO_VALUE:
+                       if (!in_range(val, 0, 8))
+                               return -EINVAL;
+
+                       raw = BMI323_INT_MICRO_TO_RAW(val, val2,
+                                                     BMI323_MOTION_THRES_SCALE);
+
+                       return bmi323_update_ext_reg(data, reg,
+                                                    BMI323_MO1_SLOPE_TH_MSK,
+                                                    FIELD_PREP(BMI323_MO1_SLOPE_TH_MSK,
+                                                               raw));
+               case IIO_EV_INFO_PERIOD:
+                       if (!in_range(val, 0, 163))
+                               return -EINVAL;
+
+                       raw = BMI323_INT_MICRO_TO_RAW(val, val2,
+                                                     BMI323_MOTION_DURAT_SCALE);
+
+                       return bmi323_update_ext_reg(data,
+                                                    reg + BMI323_MO3_OFFSET,
+                                                    BMI323_MO3_DURA_MSK,
+                                                    FIELD_PREP(BMI323_MO3_DURA_MSK,
+                                                               raw));
+               case IIO_EV_INFO_HYSTERESIS:
+                       if (!in_range(val, 0, 2))
+                               return -EINVAL;
+
+                       raw = BMI323_INT_MICRO_TO_RAW(val, val2,
+                                                     BMI323_MOTION_HYSTR_SCALE);
+
+                       return bmi323_update_ext_reg(data,
+                                                    reg + BMI323_MO2_OFFSET,
+                                                    BMI323_MO2_HYSTR_MSK,
+                                                    FIELD_PREP(BMI323_MO2_HYSTR_MSK,
+                                                               raw));
+               default:
+                       return -EINVAL;
+               }
+       case IIO_EV_TYPE_CHANGE:
+               if (!in_range(val, 0, 20461))
+                       return -EINVAL;
+
+               raw = val / 20;
+               return bmi323_update_ext_reg(data, BMI323_STEP_SC1_REG,
+                                            BMI323_STEP_SC1_WTRMRK_MSK,
+                                            FIELD_PREP(BMI323_STEP_SC1_WTRMRK_MSK,
+                                                       raw));
+       default:
+               return -EINVAL;
+       }
+}
+
+static int bmi323_read_event_value(struct iio_dev *indio_dev,
+                                  const struct iio_chan_spec *chan,
+                                  enum iio_event_type type,
+                                  enum iio_event_direction dir,
+                                  enum iio_event_info info,
+                                  int *val, int *val2)
+{
+       struct bmi323_data *data = iio_priv(indio_dev);
+       unsigned int raw, reg_value;
+       int ret, reg;
+
+       guard(mutex)(&data->mutex);
+
+       switch (type) {
+       case IIO_EV_TYPE_GESTURE:
+               switch (info) {
+               case IIO_EV_INFO_VALUE:
+                       ret = bmi323_read_ext_reg(data, BMI323_TAP2_REG,
+                                                 &reg_value);
+                       if (ret)
+                               return ret;
+
+                       raw = FIELD_GET(BMI323_TAP2_THRES_MSK, reg_value);
+                       *val = raw / BMI323_TAP_THRES_SCALE;
+                       *val2 = BMI323_RAW_TO_MICRO(raw, BMI323_TAP_THRES_SCALE);
+                       return IIO_VAL_INT_PLUS_MICRO;
+               case IIO_EV_INFO_RESET_TIMEOUT:
+                       ret = bmi323_read_ext_reg(data, BMI323_TAP3_REG,
+                                                 &reg_value);
+                       if (ret)
+                               return ret;
+
+                       raw = FIELD_GET(BMI323_TAP3_QT_AFT_GES_MSK, reg_value);
+                       *val = 0;
+                       *val2 = BMI323_RAW_TO_MICRO(raw,
+                                                   BMI323_QUITE_TIM_GES_SCALE);
+                       return IIO_VAL_INT_PLUS_MICRO;
+               case IIO_EV_INFO_TAP2_MIN_DELAY:
+                       ret = bmi323_read_ext_reg(data, BMI323_TAP3_REG,
+                                                 &reg_value);
+                       if (ret)
+                               return ret;
+
+                       raw = FIELD_GET(BMI323_TAP3_QT_BW_TAP_MSK, reg_value);
+                       *val = 0;
+                       *val2 = BMI323_RAW_TO_MICRO(raw,
+                                                   BMI323_DUR_BW_TAP_SCALE);
+                       return IIO_VAL_INT_PLUS_MICRO;
+               default:
+                       return -EINVAL;
+               }
+       case IIO_EV_TYPE_MAG:
+               reg = bmi323_motion_config_reg(dir);
+               if (reg < 0)
+                       return -EINVAL;
+
+               switch (info) {
+               case IIO_EV_INFO_VALUE:
+                       ret = bmi323_read_ext_reg(data, reg, &reg_value);
+                       if (ret)
+                               return ret;
+
+                       raw = FIELD_GET(BMI323_MO1_SLOPE_TH_MSK, reg_value);
+                       *val = raw / BMI323_MOTION_THRES_SCALE;
+                       *val2 = BMI323_RAW_TO_MICRO(raw,
+                                                   BMI323_MOTION_THRES_SCALE);
+                       return IIO_VAL_INT_PLUS_MICRO;
+               case IIO_EV_INFO_PERIOD:
+                       ret = bmi323_read_ext_reg(data,
+                                                 reg + BMI323_MO3_OFFSET,
+                                                 &reg_value);
+                       if (ret)
+                               return ret;
+
+                       raw = FIELD_GET(BMI323_MO3_DURA_MSK, reg_value);
+                       *val = raw / BMI323_MOTION_DURAT_SCALE;
+                       *val2 = BMI323_RAW_TO_MICRO(raw,
+                                                   BMI323_MOTION_DURAT_SCALE);
+                       return IIO_VAL_INT_PLUS_MICRO;
+               case IIO_EV_INFO_HYSTERESIS:
+                       ret = bmi323_read_ext_reg(data,
+                                                 reg + BMI323_MO2_OFFSET,
+                                                 &reg_value);
+                       if (ret)
+                               return ret;
+
+                       raw = FIELD_GET(BMI323_MO2_HYSTR_MSK, reg_value);
+                       *val = raw / BMI323_MOTION_HYSTR_SCALE;
+                       *val2 = BMI323_RAW_TO_MICRO(raw,
+                                                   BMI323_MOTION_HYSTR_SCALE);
+                       return IIO_VAL_INT_PLUS_MICRO;
+               default:
+                       return -EINVAL;
+               }
+       case IIO_EV_TYPE_CHANGE:
+               ret = bmi323_read_ext_reg(data, BMI323_STEP_SC1_REG,
+                                         &reg_value);
+               if (ret)
+                       return ret;
+
+               raw = FIELD_GET(BMI323_STEP_SC1_WTRMRK_MSK, reg_value);
+               *val = raw * 20;
+               return IIO_VAL_INT;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int __bmi323_fifo_flush(struct iio_dev *indio_dev)
+{
+       struct bmi323_data *data = iio_priv(indio_dev);
+       int i, ret, fifo_lvl, frame_count, bit, index;
+       __le16 *frame, *pchannels;
+       u64 sample_period;
+       s64 tstamp;
+
+       guard(mutex)(&data->mutex);
+       ret = regmap_read(data->regmap, BMI323_FIFO_FILL_LEVEL_REG, &fifo_lvl);
+       if (ret)
+               return ret;
+
+       fifo_lvl = min(fifo_lvl, BMI323_FIFO_FULL_IN_WORDS);
+
+       frame_count = fifo_lvl / BMI323_FIFO_FRAME_LENGTH;
+       if (!frame_count)
+               return -EINVAL;
+
+       if (fifo_lvl % BMI323_FIFO_FRAME_LENGTH)
+               dev_warn(data->dev, "Bad FIFO alignment\n");
+
+       /*
+        * Approximate timestamps for each of the sample based on the sampling
+        * frequency, timestamp for last sample and number of samples.
+        */
+       if (data->old_fifo_tstamp) {
+               sample_period = data->fifo_tstamp - data->old_fifo_tstamp;
+               do_div(sample_period, frame_count);
+       } else {
+               sample_period = data->odrns[BMI323_ACCEL];
+       }
+
+       tstamp = data->fifo_tstamp - (frame_count - 1) * sample_period;
+
+       ret = regmap_noinc_read(data->regmap, BMI323_FIFO_DATA_REG,
+                               &data->fifo_buff[0],
+                               fifo_lvl * BMI323_BYTES_PER_SAMPLE);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < frame_count; i++) {
+               frame = &data->fifo_buff[i * BMI323_FIFO_FRAME_LENGTH];
+               pchannels = &data->buffer.channels[0];
+
+               index = 0;
+               for_each_set_bit(bit, indio_dev->active_scan_mask,
+                                BMI323_CHAN_MAX)
+                       pchannels[index++] = frame[bit];
+
+               iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer,
+                                                  tstamp);
+
+               tstamp += sample_period;
+       }
+
+       return frame_count;
+}
+
+static int bmi323_set_watermark(struct iio_dev *indio_dev, unsigned int val)
+{
+       struct bmi323_data *data = iio_priv(indio_dev);
+
+       val = min(val, (u32)BMI323_FIFO_FULL_IN_FRAMES);
+
+       guard(mutex)(&data->mutex);
+       data->watermark = val;
+
+       return 0;
+}
+
+static int bmi323_fifo_disable(struct bmi323_data *data)
+{
+       int ret;
+
+       guard(mutex)(&data->mutex);
+       ret = regmap_write(data->regmap, BMI323_FIFO_CONF_REG, 0);
+       if (ret)
+               return ret;
+
+       ret = regmap_update_bits(data->regmap, BMI323_INT_MAP2_REG,
+                                BMI323_FIFO_WTRMRK_MSK,
+                                FIELD_PREP(BMI323_FIFO_WTRMRK_MSK, 0));
+       if (ret)
+               return ret;
+
+       data->fifo_tstamp = 0;
+       data->state = BMI323_IDLE;
+
+       return 0;
+}
+
+static int bmi323_buffer_predisable(struct iio_dev *indio_dev)
+{
+       struct bmi323_data *data = iio_priv(indio_dev);
+
+       if (iio_device_get_current_mode(indio_dev) == INDIO_BUFFER_TRIGGERED)
+               return 0;
+
+       return bmi323_fifo_disable(data);
+}
+
+static int bmi323_update_watermark(struct bmi323_data *data)
+{
+       int wtrmrk;
+
+       wtrmrk = data->watermark * BMI323_FIFO_FRAME_LENGTH;
+
+       return regmap_write(data->regmap, BMI323_FIFO_WTRMRK_REG, wtrmrk);
+}
+
+static int bmi323_fifo_enable(struct bmi323_data *data)
+{
+       int ret;
+
+       guard(mutex)(&data->mutex);
+       ret = regmap_update_bits(data->regmap, BMI323_FIFO_CONF_REG,
+                                BMI323_FIFO_CONF_ACC_GYR_EN_MSK,
+                                FIELD_PREP(BMI323_FIFO_CONF_ACC_GYR_EN_MSK,
+                                           BMI323_FIFO_ACC_GYR_MSK));
+       if (ret)
+               return ret;
+
+       ret = regmap_update_bits(data->regmap, BMI323_INT_MAP2_REG,
+                                BMI323_FIFO_WTRMRK_MSK,
+                                FIELD_PREP(BMI323_FIFO_WTRMRK_MSK,
+                                           data->irq_pin));
+       if (ret)
+               return ret;
+
+       ret = bmi323_update_watermark(data);
+       if (ret)
+               return ret;
+
+       ret = regmap_write(data->regmap, BMI323_FIFO_CTRL_REG,
+                          BMI323_FIFO_FLUSH_MSK);
+       if (ret)
+               return ret;
+
+       data->state = BMI323_BUFFER_FIFO;
+
+       return 0;
+}
+
+static int bmi323_buffer_preenable(struct iio_dev *indio_dev)
+{
+       struct bmi323_data *data = iio_priv(indio_dev);
+
+       guard(mutex)(&data->mutex);
+       /*
+        * When the ODR of the accelerometer and gyroscope do not match, the
+        * maximum ODR value between the accelerometer and gyroscope is used
+        * for FIFO and the signal with lower ODR will insert dummy frame.
+        * So allow buffer read only when ODR's of accelero and gyro are equal.
+        * See datasheet section 5.7 "FIFO Data Buffering".
+        */
+       if (data->odrns[BMI323_ACCEL] != data->odrns[BMI323_GYRO]) {
+               dev_err(data->dev, "Accelero and Gyro ODR doesn't match\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int bmi323_buffer_postenable(struct iio_dev *indio_dev)
+{
+       struct bmi323_data *data = iio_priv(indio_dev);
+
+       if (iio_device_get_current_mode(indio_dev) == INDIO_BUFFER_TRIGGERED)
+               return 0;
+
+       return bmi323_fifo_enable(data);
+}
+
+static ssize_t hwfifo_watermark_show(struct device *dev,
+                                    struct device_attribute *attr, char *buf)
+{
+       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+       struct bmi323_data *data = iio_priv(indio_dev);
+       int wm;
+
+       scoped_guard(mutex, &data->mutex)
+               wm = data->watermark;
+
+       return sysfs_emit(buf, "%d\n", wm);
+}
+static IIO_DEVICE_ATTR_RO(hwfifo_watermark, 0);
+
+static ssize_t hwfifo_enabled_show(struct device *dev,
+                                  struct device_attribute *attr,
+                                  char *buf)
+{
+       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+       struct bmi323_data *data = iio_priv(indio_dev);
+       bool state;
+
+       scoped_guard(mutex, &data->mutex)
+               state = data->state == BMI323_BUFFER_FIFO;
+
+       return sysfs_emit(buf, "%d\n", state);
+}
+static IIO_DEVICE_ATTR_RO(hwfifo_enabled, 0);
+
+static const struct iio_dev_attr *bmi323_fifo_attributes[] = {
+       &iio_dev_attr_hwfifo_watermark,
+       &iio_dev_attr_hwfifo_enabled,
+       NULL
+};
+
+static const struct iio_buffer_setup_ops bmi323_buffer_ops = {
+       .preenable = bmi323_buffer_preenable,
+       .postenable = bmi323_buffer_postenable,
+       .predisable = bmi323_buffer_predisable,
+};
+
+static irqreturn_t bmi323_irq_thread_handler(int irq, void *private)
+{
+       struct iio_dev *indio_dev = private;
+       struct bmi323_data *data = iio_priv(indio_dev);
+       unsigned int status_addr, status, feature_event;
+       s64 timestamp = iio_get_time_ns(indio_dev);
+       int ret;
+
+       if (data->irq_pin == BMI323_IRQ_INT1)
+               status_addr = BMI323_STATUS_INT1_REG;
+       else
+               status_addr = BMI323_STATUS_INT2_REG;
+
+       scoped_guard(mutex, &data->mutex) {
+               ret = regmap_read(data->regmap, status_addr, &status);
+               if (ret)
+                       return IRQ_NONE;
+       }
+
+       if (!status || FIELD_GET(BMI323_STATUS_ERROR_MSK, status))
+               return IRQ_NONE;
+
+       if (FIELD_GET(BMI323_STATUS_FIFO_WTRMRK_MSK, status)) {
+               data->old_fifo_tstamp = data->fifo_tstamp;
+               data->fifo_tstamp = iio_get_time_ns(indio_dev);
+               ret = __bmi323_fifo_flush(indio_dev);
+               if (ret < 0)
+                       return IRQ_NONE;
+       }
+
+       if (FIELD_GET(BMI323_STATUS_ACC_GYR_DRDY_MSK, status))
+               iio_trigger_poll_nested(data->trig);
+
+       if (FIELD_GET(BMI323_STATUS_MOTION_MSK, status))
+               iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL, 0,
+                                                            IIO_MOD_X_OR_Y_OR_Z,
+                                                            IIO_EV_TYPE_MAG,
+                                                            IIO_EV_DIR_RISING),
+                              timestamp);
+
+       if (FIELD_GET(BMI323_STATUS_NOMOTION_MSK, status))
+               iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL, 0,
+                                                            IIO_MOD_X_OR_Y_OR_Z,
+                                                            IIO_EV_TYPE_MAG,
+                                                            IIO_EV_DIR_FALLING),
+                              timestamp);
+
+       if (FIELD_GET(BMI323_STATUS_STP_WTR_MSK, status))
+               iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_STEPS, 0,
+                                                            IIO_NO_MOD,
+                                                            IIO_EV_TYPE_CHANGE,
+                                                            IIO_EV_DIR_NONE),
+                              timestamp);
+
+       if (FIELD_GET(BMI323_STATUS_TAP_MSK, status)) {
+               scoped_guard(mutex, &data->mutex) {
+                       ret = regmap_read(data->regmap,
+                                         BMI323_FEAT_EVNT_EXT_REG,
+                                         &feature_event);
+                       if (ret)
+                               return IRQ_NONE;
+               }
+
+               if (FIELD_GET(BMI323_FEAT_EVNT_EXT_S_MSK, feature_event)) {
+                       iio_push_event(indio_dev,
+                                      IIO_MOD_EVENT_CODE(IIO_ACCEL, 0,
+                                                         IIO_MOD_X_OR_Y_OR_Z,
+                                                         IIO_EV_TYPE_GESTURE,
+                                                         IIO_EV_DIR_SINGLETAP),
+                                      timestamp);
+               }
+
+               if (FIELD_GET(BMI323_FEAT_EVNT_EXT_D_MSK, feature_event))
+                       iio_push_event(indio_dev,
+                                      IIO_MOD_EVENT_CODE(IIO_ACCEL, 0,
+                                                         IIO_MOD_X_OR_Y_OR_Z,
+                                                         IIO_EV_TYPE_GESTURE,
+                                                         IIO_EV_DIR_DOUBLETAP),
+                                      timestamp);
+       }
+
+       return IRQ_HANDLED;
+}
+
+static int bmi323_set_drdy_irq(struct bmi323_data *data,
+                              enum bmi323_irq_pin irq_pin)
+{
+       int ret;
+
+       ret = regmap_update_bits(data->regmap, BMI323_INT_MAP2_REG,
+                                BMI323_GYR_DRDY_MSK,
+                                FIELD_PREP(BMI323_GYR_DRDY_MSK, irq_pin));
+       if (ret)
+               return ret;
+
+       return regmap_update_bits(data->regmap, BMI323_INT_MAP2_REG,
+                                 BMI323_ACC_DRDY_MSK,
+                                 FIELD_PREP(BMI323_ACC_DRDY_MSK, irq_pin));
+}
+
+static int bmi323_data_rdy_trigger_set_state(struct iio_trigger *trig,
+                                            bool state)
+{
+       struct bmi323_data *data = iio_trigger_get_drvdata(trig);
+       enum bmi323_irq_pin irq_pin;
+
+       guard(mutex)(&data->mutex);
+
+       if (data->state == BMI323_BUFFER_FIFO) {
+               dev_warn(data->dev, "Can't set trigger when FIFO enabled\n");
+               return -EBUSY;
+       }
+
+       if (state) {
+               data->state = BMI323_BUFFER_DRDY_TRIGGERED;
+               irq_pin = data->irq_pin;
+       } else {
+               data->state = BMI323_IDLE;
+               irq_pin = BMI323_IRQ_DISABLED;
+       }
+
+       return bmi323_set_drdy_irq(data, irq_pin);
+}
+
+static const struct iio_trigger_ops bmi323_trigger_ops = {
+       .set_trigger_state = &bmi323_data_rdy_trigger_set_state,
+};
+
+static irqreturn_t bmi323_trigger_handler(int irq, void *p)
+{
+       struct iio_poll_func *pf = p;
+       struct iio_dev *indio_dev = pf->indio_dev;
+       struct bmi323_data *data = iio_priv(indio_dev);
+       int ret, bit, index = 0;
+
+       /* Lock to protect the data->buffer */
+       guard(mutex)(&data->mutex);
+
+       if (*indio_dev->active_scan_mask == BMI323_ALL_CHAN_MSK) {
+               ret = regmap_bulk_read(data->regmap, BMI323_ACCEL_X_REG,
+                                      &data->buffer.channels,
+                                      ARRAY_SIZE(data->buffer.channels));
+               if (ret)
+                       return IRQ_NONE;
+       } else {
+               for_each_set_bit(bit, indio_dev->active_scan_mask,
+                                BMI323_CHAN_MAX) {
+                       ret = regmap_raw_read(data->regmap,
+                                             BMI323_ACCEL_X_REG + bit,
+                                             &data->buffer.channels[index++],
+                                             BMI323_BYTES_PER_SAMPLE);
+                       if (ret)
+                               return IRQ_NONE;
+               }
+       }
+
+       iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer,
+                                          iio_get_time_ns(indio_dev));
+
+       iio_trigger_notify_done(indio_dev->trig);
+
+       return IRQ_HANDLED;
+}
+
+static int bmi323_set_average(struct bmi323_data *data,
+                             enum bmi323_sensor_type sensor, int avg)
+{
+       int raw = ARRAY_SIZE(bmi323_accel_gyro_avrg);
+
+       while (raw--)
+               if (avg == bmi323_accel_gyro_avrg[raw])
+                       break;
+       if (raw < 0)
+               return -EINVAL;
+
+       guard(mutex)(&data->mutex);
+       return regmap_update_bits(data->regmap, bmi323_hw[sensor].config,
+                                BMI323_ACC_GYRO_CONF_AVG_MSK,
+                                FIELD_PREP(BMI323_ACC_GYRO_CONF_AVG_MSK,
+                                           raw));
+}
+
+static int bmi323_get_average(struct bmi323_data *data,
+                             enum bmi323_sensor_type sensor, int *avg)
+{
+       int ret, value, raw;
+
+       scoped_guard(mutex, &data->mutex) {
+               ret = regmap_read(data->regmap, bmi323_hw[sensor].config, &value);
+               if (ret)
+                       return ret;
+       }
+
+       raw = FIELD_GET(BMI323_ACC_GYRO_CONF_AVG_MSK, value);
+       *avg = bmi323_accel_gyro_avrg[raw];
+
+       return IIO_VAL_INT;
+}
+
+static int bmi323_enable_steps(struct bmi323_data *data, int val)
+{
+       int ret;
+
+       guard(mutex)(&data->mutex);
+       if (data->odrhz[BMI323_ACCEL] < 200) {
+               dev_err(data->dev, "Invalid accelerometer parameter\n");
+               return -EINVAL;
+       }
+
+       ret = bmi323_feature_engine_events(data, BMI323_FEAT_IO0_STP_CNT_MSK,
+                                          val ? 1 : 0);
+       if (ret)
+               return ret;
+
+       set_mask_bits(&data->feature_events, BMI323_FEAT_IO0_STP_CNT_MSK,
+                     FIELD_PREP(BMI323_FEAT_IO0_STP_CNT_MSK, val ? 1 : 0));
+
+       return 0;
+}
+
+static int bmi323_read_steps(struct bmi323_data *data, int *val)
+{
+       int ret;
+
+       guard(mutex)(&data->mutex);
+       if (!FIELD_GET(BMI323_FEAT_IO0_STP_CNT_MSK, data->feature_events))
+               return -EINVAL;
+
+       ret = regmap_bulk_read(data->regmap, BMI323_FEAT_IO2_REG,
+                              data->steps_count,
+                              ARRAY_SIZE(data->steps_count));
+       if (ret)
+               return ret;
+
+       *val = get_unaligned_le32(data->steps_count);
+
+       return IIO_VAL_INT;
+}
+
+static int bmi323_read_axis(struct bmi323_data *data,
+                           struct iio_chan_spec const *chan, int *val)
+{
+       enum bmi323_sensor_type sensor;
+       unsigned int value;
+       u8 addr;
+       int ret;
+
+       ret = bmi323_get_error_status(data);
+       if (ret)
+               return -EINVAL;
+
+       sensor = bmi323_iio_to_sensor(chan->type);
+       addr = bmi323_hw[sensor].data + (chan->channel2 - IIO_MOD_X);
+
+       scoped_guard(mutex, &data->mutex) {
+               ret = regmap_read(data->regmap, addr, &value);
+               if (ret)
+                       return ret;
+       }
+
+       *val = sign_extend32(value, chan->scan_type.realbits - 1);
+
+       return IIO_VAL_INT;
+}
+
+static int bmi323_get_temp_data(struct bmi323_data *data, int *val)
+{
+       unsigned int value;
+       int ret;
+
+       ret = bmi323_get_error_status(data);
+       if (ret)
+               return -EINVAL;
+
+       scoped_guard(mutex, &data->mutex) {
+               ret = regmap_read(data->regmap, BMI323_TEMP_REG, &value);
+               if (ret)
+                       return ret;
+       }
+
+       *val = sign_extend32(value, 15);
+
+       return IIO_VAL_INT;
+}
+
+static int bmi323_get_odr(struct bmi323_data *data,
+                         enum bmi323_sensor_type sensor, int *odr, int *uodr)
+{
+       int ret, value, odr_raw;
+
+       scoped_guard(mutex, &data->mutex) {
+               ret = regmap_read(data->regmap, bmi323_hw[sensor].config, &value);
+               if (ret)
+                       return ret;
+       }
+
+       odr_raw = FIELD_GET(BMI323_ACC_GYRO_CONF_ODR_MSK, value);
+       *odr = bmi323_acc_gyro_odr[odr_raw - 1][0];
+       *uodr = bmi323_acc_gyro_odr[odr_raw - 1][1];
+
+       return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int bmi323_configure_power_mode(struct bmi323_data *data,
+                                      enum bmi323_sensor_type sensor,
+                                      int odr_index)
+{
+       enum bmi323_opr_mode mode;
+
+       if (bmi323_acc_gyro_odr[odr_index][0] > 25)
+               mode = ACC_GYRO_MODE_CONTINOUS;
+       else
+               mode = ACC_GYRO_MODE_DUTYCYCLE;
+
+       return bmi323_set_mode(data, sensor, mode);
+}
+
+static int bmi323_set_odr(struct bmi323_data *data,
+                         enum bmi323_sensor_type sensor, int odr, int uodr)
+{
+       int odr_raw, ret;
+
+       odr_raw = ARRAY_SIZE(bmi323_acc_gyro_odr);
+
+       while (odr_raw--)
+               if (odr == bmi323_acc_gyro_odr[odr_raw][0] &&
+                   uodr == bmi323_acc_gyro_odr[odr_raw][1])
+                       break;
+       if (odr_raw < 0)
+               return -EINVAL;
+
+       ret = bmi323_configure_power_mode(data, sensor, odr_raw);
+       if (ret)
+               return -EINVAL;
+
+       guard(mutex)(&data->mutex);
+       data->odrhz[sensor] = bmi323_acc_gyro_odr[odr_raw][0];
+       data->odrns[sensor] = bmi323_acc_gyro_odrns[odr_raw];
+
+       odr_raw++;
+
+       return regmap_update_bits(data->regmap, bmi323_hw[sensor].config,
+                                 BMI323_ACC_GYRO_CONF_ODR_MSK,
+                                 FIELD_PREP(BMI323_ACC_GYRO_CONF_ODR_MSK,
+                                            odr_raw));
+}
+
+static int bmi323_get_scale(struct bmi323_data *data,
+                           enum bmi323_sensor_type sensor, int *val2)
+{
+       int ret, value, scale_raw;
+
+       scoped_guard(mutex, &data->mutex) {
+               ret = regmap_read(data->regmap, bmi323_hw[sensor].config,
+                                 &value);
+               if (ret)
+                       return ret;
+       }
+
+       scale_raw = FIELD_GET(BMI323_ACC_GYRO_CONF_SCL_MSK, value);
+       *val2 = bmi323_hw[sensor].scale_table[scale_raw][1];
+
+       return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int bmi323_set_scale(struct bmi323_data *data,
+                           enum bmi323_sensor_type sensor, int val, int val2)
+{
+       int scale_raw;
+
+       scale_raw = bmi323_hw[sensor].scale_table_len;
+
+       while (scale_raw--)
+               if (val == bmi323_hw[sensor].scale_table[scale_raw][0] &&
+                   val2 == bmi323_hw[sensor].scale_table[scale_raw][1])
+                       break;
+       if (scale_raw < 0)
+               return -EINVAL;
+
+       guard(mutex)(&data->mutex);
+       return regmap_update_bits(data->regmap, bmi323_hw[sensor].config,
+                                 BMI323_ACC_GYRO_CONF_SCL_MSK,
+                                 FIELD_PREP(BMI323_ACC_GYRO_CONF_SCL_MSK,
+                                            scale_raw));
+}
+
+static int bmi323_read_avail(struct iio_dev *indio_dev,
+                            struct iio_chan_spec const *chan,
+                            const int **vals, int *type, int *length,
+                            long mask)
+{
+       enum bmi323_sensor_type sensor;
+
+       switch (mask) {
+       case IIO_CHAN_INFO_SAMP_FREQ:
+               *type = IIO_VAL_INT_PLUS_MICRO;
+               *vals = (const int *)bmi323_acc_gyro_odr;
+               *length = ARRAY_SIZE(bmi323_acc_gyro_odr) * 2;
+               return IIO_AVAIL_LIST;
+       case IIO_CHAN_INFO_SCALE:
+               sensor = bmi323_iio_to_sensor(chan->type);
+               *type = IIO_VAL_INT_PLUS_MICRO;
+               *vals = (const int *)bmi323_hw[sensor].scale_table;
+               *length = bmi323_hw[sensor].scale_table_len * 2;
+               return IIO_AVAIL_LIST;
+       case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+               *type = IIO_VAL_INT;
+               *vals = (const int *)bmi323_accel_gyro_avrg;
+               *length = ARRAY_SIZE(bmi323_accel_gyro_avrg);
+               return IIO_AVAIL_LIST;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int bmi323_write_raw(struct iio_dev *indio_dev,
+                           struct iio_chan_spec const *chan, int val,
+                           int val2, long mask)
+{
+       struct bmi323_data *data = iio_priv(indio_dev);
+       int ret;
+
+       switch (mask) {
+       case IIO_CHAN_INFO_SAMP_FREQ:
+               ret = iio_device_claim_direct_mode(indio_dev);
+               if (ret)
+                       return ret;
+
+               ret = bmi323_set_odr(data, bmi323_iio_to_sensor(chan->type),
+                                    val, val2);
+               iio_device_release_direct_mode(indio_dev);
+               return ret;
+       case IIO_CHAN_INFO_SCALE:
+               ret = iio_device_claim_direct_mode(indio_dev);
+               if (ret)
+                       return ret;
+
+               ret = bmi323_set_scale(data, bmi323_iio_to_sensor(chan->type),
+                                      val, val2);
+               iio_device_release_direct_mode(indio_dev);
+               return ret;
+       case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+               ret = iio_device_claim_direct_mode(indio_dev);
+               if (ret)
+                       return ret;
+
+               ret = bmi323_set_average(data, bmi323_iio_to_sensor(chan->type),
+                                        val);
+
+               iio_device_release_direct_mode(indio_dev);
+               return ret;
+       case IIO_CHAN_INFO_ENABLE:
+               return bmi323_enable_steps(data, val);
+       case IIO_CHAN_INFO_PROCESSED:
+               scoped_guard(mutex, &data->mutex) {
+                       if (val || !FIELD_GET(BMI323_FEAT_IO0_STP_CNT_MSK,
+                                             data->feature_events))
+                               return -EINVAL;
+
+                       /* Clear step counter value */
+                       ret = bmi323_update_ext_reg(data, BMI323_STEP_SC1_REG,
+                                                   BMI323_STEP_SC1_RST_CNT_MSK,
+                                                   FIELD_PREP(BMI323_STEP_SC1_RST_CNT_MSK,
+                                                              1));
+               }
+               return ret;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int bmi323_read_raw(struct iio_dev *indio_dev,
+                          struct iio_chan_spec const *chan, int *val,
+                          int *val2, long mask)
+{
+       struct bmi323_data *data = iio_priv(indio_dev);
+       int ret;
+
+       switch (mask) {
+       case IIO_CHAN_INFO_PROCESSED:
+               return bmi323_read_steps(data, val);
+       case IIO_CHAN_INFO_RAW:
+               switch (chan->type) {
+               case IIO_ACCEL:
+               case IIO_ANGL_VEL:
+                       ret = iio_device_claim_direct_mode(indio_dev);
+                       if (ret)
+                               return ret;
+
+                       ret = bmi323_read_axis(data, chan, val);
+
+                       iio_device_release_direct_mode(indio_dev);
+                       return ret;
+               case IIO_TEMP:
+                       return bmi323_get_temp_data(data, val);
+               default:
+                       return -EINVAL;
+               }
+       case IIO_CHAN_INFO_SAMP_FREQ:
+               return bmi323_get_odr(data, bmi323_iio_to_sensor(chan->type),
+                                     val, val2);
+       case IIO_CHAN_INFO_SCALE:
+               switch (chan->type) {
+               case IIO_ACCEL:
+               case IIO_ANGL_VEL:
+                       *val = 0;
+                       return bmi323_get_scale(data,
+                                               bmi323_iio_to_sensor(chan->type),
+                                               val2);
+               case IIO_TEMP:
+                       *val = BMI323_TEMP_SCALE / MEGA;
+                       *val2 = BMI323_TEMP_SCALE % MEGA;
+                       return IIO_VAL_INT_PLUS_MICRO;
+               default:
+                       return -EINVAL;
+               }
+       case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+               return bmi323_get_average(data,
+                                         bmi323_iio_to_sensor(chan->type),
+                                         val);
+       case IIO_CHAN_INFO_OFFSET:
+               switch (chan->type) {
+               case IIO_TEMP:
+                       *val = BMI323_TEMP_OFFSET;
+                       return IIO_VAL_INT;
+               default:
+                       return -EINVAL;
+               }
+       case IIO_CHAN_INFO_ENABLE:
+               scoped_guard(mutex, &data->mutex)
+                       *val = FIELD_GET(BMI323_FEAT_IO0_STP_CNT_MSK,
+                                        data->feature_events);
+               return IIO_VAL_INT;
+       default:
+               return -EINVAL;
+       }
+}
+
+static const struct iio_info bmi323_info = {
+       .read_raw = bmi323_read_raw,
+       .write_raw = bmi323_write_raw,
+       .read_avail = bmi323_read_avail,
+       .hwfifo_set_watermark = bmi323_set_watermark,
+       .write_event_config = bmi323_write_event_config,
+       .read_event_config = bmi323_read_event_config,
+       .write_event_value = bmi323_write_event_value,
+       .read_event_value = bmi323_read_event_value,
+       .event_attrs = &bmi323_event_attribute_group,
+};
+
+#define BMI323_SCAN_MASK_ACCEL_3AXIS           \
+       (BIT(BMI323_ACCEL_X) | BIT(BMI323_ACCEL_Y) | BIT(BMI323_ACCEL_Z))
+
+#define BMI323_SCAN_MASK_GYRO_3AXIS            \
+       (BIT(BMI323_GYRO_X) | BIT(BMI323_GYRO_Y) | BIT(BMI323_GYRO_Z))
+
+static const unsigned long bmi323_avail_scan_masks[] = {
+       /* 3-axis accel */
+       BMI323_SCAN_MASK_ACCEL_3AXIS,
+       /* 3-axis gyro */
+       BMI323_SCAN_MASK_GYRO_3AXIS,
+       /* 3-axis accel + 3-axis gyro */
+       BMI323_SCAN_MASK_ACCEL_3AXIS | BMI323_SCAN_MASK_GYRO_3AXIS,
+       0
+};
+
+static int bmi323_int_pin_config(struct bmi323_data *data,
+                                enum bmi323_irq_pin irq_pin,
+                                bool active_high, bool open_drain, bool latch)
+{
+       unsigned int mask, field_value;
+       int ret;
+
+       ret = regmap_update_bits(data->regmap, BMI323_IO_INT_CONF_REG,
+                                BMI323_IO_INT_LTCH_MSK,
+                                FIELD_PREP(BMI323_IO_INT_LTCH_MSK, latch));
+       if (ret)
+               return ret;
+
+       ret = bmi323_update_ext_reg(data, BMI323_GEN_SET1_REG,
+                                   BMI323_GEN_HOLD_DUR_MSK,
+                                   FIELD_PREP(BMI323_GEN_HOLD_DUR_MSK, 0));
+       if (ret)
+               return ret;
+
+       switch (irq_pin) {
+       case BMI323_IRQ_INT1:
+               mask = BMI323_IO_INT1_LVL_OD_OP_MSK;
+
+               field_value = FIELD_PREP(BMI323_IO_INT1_LVL_MSK, active_high) |
+                             FIELD_PREP(BMI323_IO_INT1_OD_MSK, open_drain) |
+                             FIELD_PREP(BMI323_IO_INT1_OP_EN_MSK, 1);
+               break;
+       case BMI323_IRQ_INT2:
+               mask = BMI323_IO_INT2_LVL_OD_OP_MSK;
+
+               field_value = FIELD_PREP(BMI323_IO_INT2_LVL_MSK, active_high) |
+                             FIELD_PREP(BMI323_IO_INT2_OD_MSK, open_drain) |
+                             FIELD_PREP(BMI323_IO_INT2_OP_EN_MSK, 1);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return regmap_update_bits(data->regmap, BMI323_IO_INT_CTR_REG, mask,
+                                 field_value);
+}
+
+static int bmi323_trigger_probe(struct bmi323_data *data,
+                               struct iio_dev *indio_dev)
+{
+       bool open_drain, active_high, latch;
+       struct fwnode_handle *fwnode;
+       enum bmi323_irq_pin irq_pin;
+       int ret, irq, irq_type;
+       struct irq_data *desc;
+
+       fwnode = dev_fwnode(data->dev);
+       if (!fwnode)
+               return -ENODEV;
+
+       irq = fwnode_irq_get_byname(fwnode, "INT1");
+       if (irq > 0) {
+               irq_pin = BMI323_IRQ_INT1;
+       } else {
+               irq = fwnode_irq_get_byname(fwnode, "INT2");
+               if (irq < 0)
+                       return 0;
+
+               irq_pin = BMI323_IRQ_INT2;
+       }
+
+       desc = irq_get_irq_data(irq);
+       if (!desc)
+               return dev_err_probe(data->dev, -EINVAL,
+                                    "Could not find IRQ %d\n", irq);
+
+       irq_type = irqd_get_trigger_type(desc);
+       switch (irq_type) {
+       case IRQF_TRIGGER_RISING:
+               latch = false;
+               active_high = true;
+               break;
+       case IRQF_TRIGGER_HIGH:
+               latch = true;
+               active_high = true;
+               break;
+       case IRQF_TRIGGER_FALLING:
+               latch = false;
+               active_high = false;
+               break;
+       case IRQF_TRIGGER_LOW:
+               latch = true;
+               active_high = false;
+               break;
+       default:
+               return dev_err_probe(data->dev, -EINVAL,
+                                    "Invalid interrupt type 0x%x specified\n",
+                                    irq_type);
+       }
+
+       open_drain = fwnode_property_read_bool(fwnode, "drive-open-drain");
+
+       ret = bmi323_int_pin_config(data, irq_pin, active_high, open_drain,
+                                   latch);
+       if (ret)
+               return dev_err_probe(data->dev, ret,
+                                    "Failed to configure irq line\n");
+
+       data->trig = devm_iio_trigger_alloc(data->dev, "%s-trig-%d",
+                                           indio_dev->name, irq_pin);
+       if (!data->trig)
+               return -ENOMEM;
+
+       data->trig->ops = &bmi323_trigger_ops;
+       iio_trigger_set_drvdata(data->trig, data);
+
+       ret = devm_request_threaded_irq(data->dev, irq, NULL,
+                                       bmi323_irq_thread_handler,
+                                       IRQF_ONESHOT, "bmi323-int", indio_dev);
+       if (ret)
+               return dev_err_probe(data->dev, ret, "Failed to request IRQ\n");
+
+       ret = devm_iio_trigger_register(data->dev, data->trig);
+       if (ret)
+               return dev_err_probe(data->dev, ret,
+                                    "Trigger registration failed\n");
+
+       data->irq_pin = irq_pin;
+
+       return 0;
+}
+
+static int bmi323_feature_engine_enable(struct bmi323_data *data, bool en)
+{
+       unsigned int feature_status;
+       int ret;
+
+       if (!en)
+               return regmap_write(data->regmap, BMI323_FEAT_CTRL_REG, 0);
+
+       ret = regmap_write(data->regmap, BMI323_FEAT_IO2_REG, 0x012c);
+       if (ret)
+               return ret;
+
+       ret = regmap_write(data->regmap, BMI323_FEAT_IO_STATUS_REG,
+                          BMI323_FEAT_IO_STATUS_MSK);
+       if (ret)
+               return ret;
+
+       ret = regmap_write(data->regmap, BMI323_FEAT_CTRL_REG,
+                          BMI323_FEAT_ENG_EN_MSK);
+       if (ret)
+               return ret;
+
+       /*
+        * It takes around 4 msec to enable the Feature engine, so check
+        * the status of the feature engine every 2 msec for a maximum
+        * of 5 trials.
+        */
+       ret = regmap_read_poll_timeout(data->regmap, BMI323_FEAT_IO1_REG,
+                                      feature_status,
+                                      FIELD_GET(BMI323_FEAT_IO1_ERR_MSK,
+                                                feature_status) == 1,
+                                      BMI323_FEAT_ENG_POLL,
+                                      BMI323_FEAT_ENG_TIMEOUT);
+       if (ret)
+               return dev_err_probe(data->dev, -EINVAL,
+                               "Failed to enable feature engine\n");
+
+       return 0;
+}
+
+static void bmi323_disable(void *data_ptr)
+{
+       struct bmi323_data *data = data_ptr;
+
+       bmi323_set_mode(data, BMI323_ACCEL, ACC_GYRO_MODE_DISABLE);
+       bmi323_set_mode(data, BMI323_GYRO, ACC_GYRO_MODE_DISABLE);
+}
+
+static int bmi323_set_bw(struct bmi323_data *data,
+                        enum bmi323_sensor_type sensor, enum bmi323_3db_bw bw)
+{
+       return regmap_update_bits(data->regmap, bmi323_hw[sensor].config,
+                                 BMI323_ACC_GYRO_CONF_BW_MSK,
+                                 FIELD_PREP(BMI323_ACC_GYRO_CONF_BW_MSK, bw));
+}
+
+static int bmi323_init(struct bmi323_data *data)
+{
+       int ret, val;
+
+       /*
+        * Perform soft reset to make sure the device is in a known state after
+        * start up. A delay of 1.5 ms is required after reset.
+        * See datasheet section 5.17 "Soft Reset".
+        */
+       ret = regmap_write(data->regmap, BMI323_CMD_REG, BMI323_RST_VAL);
+       if (ret)
+               return ret;
+
+       usleep_range(1500, 2000);
+
+       /*
+        * Dummy read is required to enable SPI interface after reset.
+        * See datasheet section 7.2.1 "Protocol Selection".
+        */
+       regmap_read(data->regmap, BMI323_CHIP_ID_REG, &val);
+
+       ret = regmap_read(data->regmap, BMI323_STATUS_REG, &val);
+       if (ret)
+               return ret;
+
+       if (!FIELD_GET(BMI323_STATUS_POR_MSK, val))
+               return dev_err_probe(data->dev, -EINVAL,
+                                    "Sensor initialization error\n");
+
+       ret = regmap_read(data->regmap, BMI323_CHIP_ID_REG, &val);
+       if (ret)
+               return ret;
+
+       if (FIELD_GET(BMI323_CHIP_ID_MSK, val) != BMI323_CHIP_ID_VAL)
+               return dev_err_probe(data->dev, -EINVAL, "Chip ID mismatch\n");
+
+       ret = bmi323_feature_engine_enable(data, true);
+       if (ret)
+               return ret;
+
+       ret = regmap_read(data->regmap, BMI323_ERR_REG, &val);
+       if (ret)
+               return ret;
+
+       if (val)
+               return dev_err_probe(data->dev, -EINVAL,
+                                    "Sensor power error = 0x%x\n", val);
+
+       /*
+        * Set the Bandwidth coefficient which defines the 3 dB cutoff
+        * frequency in relation to the ODR.
+        */
+       ret = bmi323_set_bw(data, BMI323_ACCEL, BMI323_BW_ODR_BY_2);
+       if (ret)
+               return ret;
+
+       ret = bmi323_set_bw(data, BMI323_GYRO, BMI323_BW_ODR_BY_2);
+       if (ret)
+               return ret;
+
+       ret = bmi323_set_odr(data, BMI323_ACCEL, 25, 0);
+       if (ret)
+               return ret;
+
+       ret = bmi323_set_odr(data, BMI323_GYRO, 25, 0);
+       if (ret)
+               return ret;
+
+       return devm_add_action_or_reset(data->dev, bmi323_disable, data);
+}
+
+int bmi323_core_probe(struct device *dev)
+{
+       static const char * const regulator_names[] = { "vdd", "vddio" };
+       struct iio_dev *indio_dev;
+       struct bmi323_data *data;
+       struct regmap *regmap;
+       int ret;
+
+       regmap = dev_get_regmap(dev, NULL);
+       if (!regmap)
+               return dev_err_probe(dev, -ENODEV, "Failed to get regmap\n");
+
+       indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+       if (!indio_dev)
+               return dev_err_probe(dev, -ENOMEM,
+                                    "Failed to allocate device\n");
+
+       ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(regulator_names),
+                                            regulator_names);
+       if (ret)
+               return dev_err_probe(dev, ret, "Failed to enable regulators\n");
+
+       data = iio_priv(indio_dev);
+       data->dev = dev;
+       data->regmap = regmap;
+       mutex_init(&data->mutex);
+
+       ret = bmi323_init(data);
+       if (ret)
+               return -EINVAL;
+
+       ret = iio_read_mount_matrix(dev, &data->orientation);
+       if (ret)
+               return ret;
+
+       indio_dev->name = "bmi323-imu";
+       indio_dev->info = &bmi323_info;
+       indio_dev->channels = bmi323_channels;
+       indio_dev->num_channels = ARRAY_SIZE(bmi323_channels);
+       indio_dev->available_scan_masks = bmi323_avail_scan_masks;
+       indio_dev->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_SOFTWARE;
+       dev_set_drvdata(data->dev, indio_dev);
+
+       ret = bmi323_trigger_probe(data, indio_dev);
+       if (ret)
+               return -EINVAL;
+
+       ret = devm_iio_triggered_buffer_setup_ext(data->dev, indio_dev,
+                                                 &iio_pollfunc_store_time,
+                                                 bmi323_trigger_handler,
+                                                 IIO_BUFFER_DIRECTION_IN,
+                                                 &bmi323_buffer_ops,
+                                                 bmi323_fifo_attributes);
+       if (ret)
+               return dev_err_probe(data->dev, ret,
+                                    "Failed to setup trigger buffer\n");
+
+       ret = devm_iio_device_register(data->dev, indio_dev);
+       if (ret)
+               return dev_err_probe(data->dev, ret,
+                                    "Unable to register iio device\n");
+
+       return 0;
+}
+EXPORT_SYMBOL_NS_GPL(bmi323_core_probe, IIO_BMI323);
+
+MODULE_DESCRIPTION("Bosch BMI323 IMU driver");
+MODULE_AUTHOR("Jagath Jog J <jagathjog1996@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/imu/bmi323/bmi323_i2c.c b/drivers/iio/imu/bmi323/bmi323_i2c.c
new file mode 100644 (file)
index 0000000..20a8001
--- /dev/null
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * I2C driver for Bosch BMI323 6-Axis IMU.
+ *
+ * Copyright (C) 2023, Jagath Jog J <jagathjog1996@gmail.com>
+ */
+
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "bmi323.h"
+
+struct bmi323_i2c_priv {
+       struct i2c_client *i2c;
+       u8 i2c_rx_buffer[BMI323_FIFO_LENGTH_IN_BYTES + BMI323_I2C_DUMMY];
+};
+
+/*
+ * From BMI323 datasheet section 4: Notes on the Serial Interface Support.
+ * Each I2C register read operation requires to read two dummy bytes before
+ * the actual payload.
+ */
+static int bmi323_regmap_i2c_read(void *context, const void *reg_buf,
+                                 size_t reg_size, void *val_buf,
+                                 size_t val_size)
+{
+       struct bmi323_i2c_priv *priv = context;
+       struct i2c_msg msgs[2];
+       int ret;
+
+       msgs[0].addr = priv->i2c->addr;
+       msgs[0].flags = priv->i2c->flags;
+       msgs[0].len = reg_size;
+       msgs[0].buf = (u8 *)reg_buf;
+
+       msgs[1].addr = priv->i2c->addr;
+       msgs[1].len = val_size + BMI323_I2C_DUMMY;
+       msgs[1].buf = priv->i2c_rx_buffer;
+       msgs[1].flags = priv->i2c->flags | I2C_M_RD;
+
+       ret = i2c_transfer(priv->i2c->adapter, msgs, ARRAY_SIZE(msgs));
+       if (ret < 0)
+               return -EIO;
+
+       memcpy(val_buf, priv->i2c_rx_buffer + BMI323_I2C_DUMMY, val_size);
+
+       return 0;
+}
+
+static int bmi323_regmap_i2c_write(void *context, const void *data,
+                                  size_t count)
+{
+       struct bmi323_i2c_priv *priv = context;
+       u8 reg;
+
+       reg = *(u8 *)data;
+       return i2c_smbus_write_i2c_block_data(priv->i2c, reg,
+                                             count - sizeof(u8),
+                                             data + sizeof(u8));
+}
+
+static struct regmap_bus bmi323_regmap_bus = {
+       .read = bmi323_regmap_i2c_read,
+       .write = bmi323_regmap_i2c_write,
+};
+
+static const struct regmap_config bmi323_i2c_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 16,
+       .max_register = BMI323_CFG_RES_REG,
+       .val_format_endian = REGMAP_ENDIAN_LITTLE,
+};
+
+static int bmi323_i2c_probe(struct i2c_client *i2c)
+{
+       struct device *dev = &i2c->dev;
+       struct bmi323_i2c_priv *priv;
+       struct regmap *regmap;
+
+       priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       priv->i2c = i2c;
+       regmap = devm_regmap_init(dev, &bmi323_regmap_bus, priv,
+                                 &bmi323_i2c_regmap_config);
+       if (IS_ERR(regmap))
+               return dev_err_probe(dev, PTR_ERR(regmap),
+                                    "Failed to initialize I2C Regmap\n");
+
+       return bmi323_core_probe(dev);
+}
+
+static const struct i2c_device_id bmi323_i2c_ids[] = {
+       { "bmi323" },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, bmi323_i2c_ids);
+
+static const struct of_device_id bmi323_of_i2c_match[] = {
+       { .compatible = "bosch,bmi323" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, bmi323_of_i2c_match);
+
+static struct i2c_driver bmi323_i2c_driver = {
+       .driver = {
+               .name = "bmi323",
+               .of_match_table = bmi323_of_i2c_match,
+       },
+       .probe = bmi323_i2c_probe,
+       .id_table = bmi323_i2c_ids,
+};
+module_i2c_driver(bmi323_i2c_driver);
+
+MODULE_DESCRIPTION("Bosch BMI323 IMU driver");
+MODULE_AUTHOR("Jagath Jog J <jagathjog1996@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IIO_BMI323);
diff --git a/drivers/iio/imu/bmi323/bmi323_spi.c b/drivers/iio/imu/bmi323/bmi323_spi.c
new file mode 100644 (file)
index 0000000..7b1e812
--- /dev/null
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SPI driver for Bosch BMI323 6-Axis IMU.
+ *
+ * Copyright (C) 2023, Jagath Jog J <jagathjog1996@gmail.com>
+ */
+
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#include "bmi323.h"
+
+/*
+ * From BMI323 datasheet section 4: Notes on the Serial Interface Support.
+ * Each SPI register read operation requires to read one dummy byte before
+ * the actual payload.
+ */
+static int bmi323_regmap_spi_read(void *context, const void *reg_buf,
+                                 size_t reg_size, void *val_buf,
+                                 size_t val_size)
+{
+       struct spi_device *spi = context;
+
+       return spi_write_then_read(spi, reg_buf, reg_size, val_buf, val_size);
+}
+
+static int bmi323_regmap_spi_write(void *context, const void *data,
+                                  size_t count)
+{
+       struct spi_device *spi = context;
+       u8 *data_buff = (u8 *)data;
+
+       data_buff[1] = data_buff[0];
+       return spi_write(spi, data_buff + 1, count - 1);
+}
+
+static struct regmap_bus bmi323_regmap_bus = {
+       .read = bmi323_regmap_spi_read,
+       .write = bmi323_regmap_spi_write,
+};
+
+static const struct regmap_config bmi323_spi_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 16,
+       .pad_bits = 8,
+       .read_flag_mask = BIT(7),
+       .max_register = BMI323_CFG_RES_REG,
+       .val_format_endian = REGMAP_ENDIAN_LITTLE,
+};
+
+static int bmi323_spi_probe(struct spi_device *spi)
+{
+       struct device *dev = &spi->dev;
+       struct regmap *regmap;
+
+       regmap = devm_regmap_init(dev, &bmi323_regmap_bus, dev,
+                                 &bmi323_spi_regmap_config);
+       if (IS_ERR(regmap))
+               return dev_err_probe(dev, PTR_ERR(regmap),
+                                    "Failed to initialize SPI Regmap\n");
+
+       return bmi323_core_probe(dev);
+}
+
+static const struct spi_device_id bmi323_spi_ids[] = {
+       { "bmi323" },
+       { }
+};
+MODULE_DEVICE_TABLE(spi, bmi323_spi_ids);
+
+static const struct of_device_id bmi323_of_spi_match[] = {
+       { .compatible = "bosch,bmi323" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, bmi323_of_spi_match);
+
+static struct spi_driver bmi323_spi_driver = {
+       .driver = {
+               .name = "bmi323",
+               .of_match_table = bmi323_of_spi_match,
+       },
+       .probe = bmi323_spi_probe,
+       .id_table = bmi323_spi_ids,
+};
+module_spi_driver(bmi323_spi_driver);
+
+MODULE_DESCRIPTION("Bosch BMI323 IMU driver");
+MODULE_AUTHOR("Jagath Jog J <jagathjog1996@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IIO_BMI323);
index 57728a568471560a18dc94d6c06eb05359a6cd39..5677bdf4f846ebd0405fbba43d1b9ad108033a3a 100644 (file)
@@ -378,12 +378,12 @@ static void bno055_ser_handle_rx(struct bno055_ser_priv *priv, int status)
  * Also, we assume to RX one pkt per time (i.e. the HW doesn't send anything
  * unless we require to AND we don't queue more than one request per time).
  */
-static int bno055_ser_receive_buf(struct serdev_device *serdev,
-                                 const unsigned char *buf, size_t size)
+static ssize_t bno055_ser_receive_buf(struct serdev_device *serdev,
+                                     const u8 *buf, size_t size)
 {
        int status;
        struct bno055_ser_priv *priv = serdev_device_get_drvdata(serdev);
-       int remaining = size;
+       size_t remaining = size;
 
        if (size == 0)
                return 0;
index b1e4fde27d25609d27a359a228c84e7f31b2881f..f67bd5a39beb3441114ec5f500bf0bd086917872 100644 (file)
@@ -137,10 +137,7 @@ static int inv_icm42600_accel_update_scan_mode(struct iio_dev *indio_dev,
 out_unlock:
        mutex_unlock(&st->lock);
        /* sleep maximum required time */
-       if (sleep_accel > sleep_temp)
-               sleep = sleep_accel;
-       else
-               sleep = sleep_temp;
+       sleep = max(sleep_accel, sleep_temp);
        if (sleep)
                msleep(sleep);
        return ret;
index 6ef1df9d60b77de585b8cc30307a0d32576e2cb4..b52f328fd26ce7ab130b40a21c5ba8c317920fbb 100644 (file)
@@ -424,10 +424,7 @@ out_unlock:
        mutex_unlock(&st->lock);
 
        /* sleep maximum required time */
-       if (sleep_sensor > sleep_temp)
-               sleep = sleep_sensor;
-       else
-               sleep = sleep_temp;
+       sleep = max(sleep_sensor, sleep_temp);
        if (sleep)
                msleep(sleep);
 
index 3bf946e56e1dfd4f4b766d2b289caee4cd468072..3df0a715e8856039fc185f69ed193ae9dcbabb0a 100644 (file)
@@ -137,10 +137,7 @@ static int inv_icm42600_gyro_update_scan_mode(struct iio_dev *indio_dev,
 out_unlock:
        mutex_unlock(&st->lock);
        /* sleep maximum required time */
-       if (sleep_gyro > sleep_temp)
-               sleep = sleep_gyro;
-       else
-               sleep = sleep_temp;
+       sleep = max(sleep_gyro, sleep_temp);
        if (sleep)
                msleep(sleep);
        return ret;
index 6b034dccc3b17ee4e7898ca9bbda08243a8cc544..0e94e5335e9371969f145f41c2c6f345ca90f0ac 100644 (file)
@@ -567,15 +567,12 @@ static int inv_mpu6050_init_config(struct iio_dev *indio_dev)
 static int inv_mpu6050_sensor_set(struct inv_mpu6050_state  *st, int reg,
                                int axis, int val)
 {
-       int ind, result;
+       int ind;
        __be16 d = cpu_to_be16(val);
 
        ind = (axis - IIO_MOD_X) * 2;
-       result = regmap_bulk_write(st->map, reg + ind, &d, sizeof(d));
-       if (result)
-               return -EINVAL;
 
-       return 0;
+       return regmap_bulk_write(st->map, reg + ind, &d, sizeof(d));
 }
 
 static int inv_mpu6050_sensor_show(struct inv_mpu6050_state  *st, int reg,
@@ -587,7 +584,7 @@ static int inv_mpu6050_sensor_show(struct inv_mpu6050_state  *st, int reg,
        ind = (axis - IIO_MOD_X) * 2;
        result = regmap_bulk_read(st->map, reg + ind, &d, sizeof(d));
        if (result)
-               return -EINVAL;
+               return result;
        *val = (short)be16_to_cpup(&d);
 
        return IIO_VAL_INT;
index 176d31d9f9d807ed2b8d83ab706c085cf2e56590..b581a7e805662134cd4bc8a0c80e9a44f30ee325 100644 (file)
@@ -413,6 +413,22 @@ static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
 {
        if (bitmap_empty(mask, masklength))
                return NULL;
+       /*
+        * The condition here do not handle multi-long masks correctly.
+        * It only checks the first long to be zero, and will use such mask
+        * as a terminator even if there was bits set after the first long.
+        *
+        * Correct check would require using:
+        * while (!bitmap_empty(av_masks, masklength))
+        * instead. This is potentially hazardous because the
+        * avaliable_scan_masks is a zero terminated array of longs - and
+        * using the proper bitmap_empty() check for multi-long wide masks
+        * would require the array to be terminated with multiple zero longs -
+        * which is not such an usual pattern.
+        *
+        * As writing of this no multi-long wide masks were found in-tree, so
+        * the simple while (*av_masks) check is working.
+        */
        while (*av_masks) {
                if (strict) {
                        if (bitmap_equal(mask, av_masks, masklength))
@@ -600,7 +616,7 @@ static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
                                     &iio_show_fixed_type,
                                     NULL,
                                     0,
-                                    0,
+                                    IIO_SEPARATE,
                                     &indio_dev->dev,
                                     buffer,
                                     &buffer->buffer_attr_list);
@@ -613,7 +629,7 @@ static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
                                             &iio_scan_el_show,
                                             &iio_scan_el_store,
                                             chan->scan_index,
-                                            0,
+                                            IIO_SEPARATE,
                                             &indio_dev->dev,
                                             buffer,
                                             &buffer->buffer_attr_list);
@@ -623,7 +639,7 @@ static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
                                             &iio_scan_el_ts_show,
                                             &iio_scan_el_ts_store,
                                             chan->scan_index,
-                                            0,
+                                            IIO_SEPARATE,
                                             &indio_dev->dev,
                                             buffer,
                                             &buffer->buffer_attr_list);
index c77745b594bd6ecddfd483d255a308efc2c5cd1c..9a85752124ddc43b10ecb12ed2c48f605395b170 100644 (file)
@@ -117,6 +117,8 @@ static const char * const iio_modifier_names[] = {
        [IIO_MOD_LIGHT_GREEN] = "green",
        [IIO_MOD_LIGHT_BLUE] = "blue",
        [IIO_MOD_LIGHT_UV] = "uv",
+       [IIO_MOD_LIGHT_UVA] = "uva",
+       [IIO_MOD_LIGHT_UVB] = "uvb",
        [IIO_MOD_LIGHT_DUV] = "duv",
        [IIO_MOD_QUATERNION] = "quaternion",
        [IIO_MOD_TEMP_AMBIENT] = "ambient",
@@ -182,6 +184,7 @@ static const char * const iio_chan_info_postfix[] = {
        [IIO_CHAN_INFO_THERMOCOUPLE_TYPE] = "thermocouple_type",
        [IIO_CHAN_INFO_CALIBAMBIENT] = "calibambient",
        [IIO_CHAN_INFO_ZEROPOINT] = "zeropoint",
+       [IIO_CHAN_INFO_TROUGH] = "trough_raw",
 };
 /**
  * iio_device_id() - query the unique ID for the device
@@ -1896,6 +1899,66 @@ static int iio_check_extended_name(const struct iio_dev *indio_dev)
 
 static const struct iio_buffer_setup_ops noop_ring_setup_ops;
 
+static void iio_sanity_check_avail_scan_masks(struct iio_dev *indio_dev)
+{
+       unsigned int num_masks, masklength, longs_per_mask;
+       const unsigned long *av_masks;
+       int i;
+
+       av_masks = indio_dev->available_scan_masks;
+       masklength = indio_dev->masklength;
+       longs_per_mask = BITS_TO_LONGS(masklength);
+
+       /*
+        * The code determining how many available_scan_masks is in the array
+        * will be assuming the end of masks when first long with all bits
+        * zeroed is encountered. This is incorrect for masks where mask
+        * consists of more than one long, and where some of the available masks
+        * has long worth of bits zeroed (but has subsequent bit(s) set). This
+        * is a safety measure against bug where array of masks is terminated by
+        * a single zero while mask width is greater than width of a long.
+        */
+       if (longs_per_mask > 1)
+               dev_warn(indio_dev->dev.parent,
+                        "multi long available scan masks not fully supported\n");
+
+       if (bitmap_empty(av_masks, masklength))
+               dev_warn(indio_dev->dev.parent, "empty scan mask\n");
+
+       for (num_masks = 0; *av_masks; num_masks++)
+               av_masks += longs_per_mask;
+
+       if (num_masks < 2)
+               return;
+
+       av_masks = indio_dev->available_scan_masks;
+
+       /*
+        * Go through all the masks from first to one before the last, and see
+        * that no mask found later from the available_scan_masks array is a
+        * subset of mask found earlier. If this happens, then the mask found
+        * later will never get used because scanning the array is stopped when
+        * the first suitable mask is found. Drivers should order the array of
+        * available masks in the order of preference (presumably the least
+        * costy to access masks first).
+        */
+       for (i = 0; i < num_masks - 1; i++) {
+               const unsigned long *mask1;
+               int j;
+
+               mask1 = av_masks + i * longs_per_mask;
+               for (j = i + 1; j < num_masks; j++) {
+                       const unsigned long *mask2;
+
+                       mask2 = av_masks + j * longs_per_mask;
+                       if (bitmap_subset(mask2, mask1, masklength))
+                               dev_warn(indio_dev->dev.parent,
+                                        "available_scan_mask %d subset of %d. Never used\n",
+                                        j, i);
+               }
+       }
+}
+
 int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod)
 {
        struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
@@ -1934,6 +1997,9 @@ int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod)
                goto error_unreg_debugfs;
        }
 
+       if (indio_dev->available_scan_masks)
+               iio_sanity_check_avail_scan_masks(indio_dev);
+
        ret = iio_device_register_sysfs(indio_dev);
        if (ret) {
                dev_err(indio_dev->dev.parent,
index 45edba797e4c7e9dfd89e43a250ac5dddffa215c..143003232d1c2ae1d0eeca6eb69610ff641ccc28 100644 (file)
@@ -252,6 +252,21 @@ config ISL29125
          To compile this driver as a module, choose M here: the module will be
          called isl29125.
 
+config ISL76682
+       tristate "Intersil ISL76682 Light Sensor"
+       depends on I2C
+       select REGMAP_I2C
+       help
+         Say Y here if you want to build a driver for the Intersil ISL76682
+         Ambient Light Sensor and IR Intensity sensor. This driver provides
+         the readouts via standard IIO sysfs and device interface. Both ALS
+         illuminance and IR illuminance are provided raw with separate scale
+         setting which can be configured via sysfs, the default scale is 1000
+         lux, other options are 4000/16000/64000 lux.
+
+         To compile this driver as a module, choose M here: the module will be
+         called isl76682.
+
 config HID_SENSOR_ALS
        depends on HID_SENSOR_HUB
        select IIO_BUFFER
@@ -347,6 +362,17 @@ config SENSORS_LM3533
          changes. The ALS-control output values can be set per zone for the
          three current output channels.
 
+config LTR390
+       tristate "LTR-390UV-01 ambient light and UV sensor"
+       depends on I2C
+       select REGMAP_I2C
+       help
+         If you say yes here you get support for the Lite-On LTR-390UV-01
+         ambient light and UV sensor.
+
+         This driver can also be built as a module.  If so, the module
+         will be called ltr390.
+
 config LTR501
        tristate "LTR-501ALS-01 light sensor"
        depends on I2C
@@ -637,6 +663,17 @@ config VEML6070
          To compile this driver as a module, choose M here: the
          module will be called veml6070.
 
+config VEML6075
+       tristate "VEML6075 UVA and UVB light sensor"
+       select REGMAP_I2C
+       depends on I2C
+       help
+         Say Y here if you want to build a driver for the Vishay VEML6075 UVA
+         and UVB light sensor.
+
+         To compile this driver as a module, choose M here: the
+         module will be called veml6075.
+
 config VL6180
        tristate "VL6180 ALS, range and proximity sensor"
        depends on I2C
index c0db4c4c36ec9f1ed9aefeb02fb583b94e361874..2e5fdb33e0e9795ed2e7931e05963e5e0fa5009e 100644 (file)
@@ -28,8 +28,10 @@ obj-$(CONFIG_IQS621_ALS)     += iqs621-als.o
 obj-$(CONFIG_SENSORS_ISL29018) += isl29018.o
 obj-$(CONFIG_SENSORS_ISL29028) += isl29028.o
 obj-$(CONFIG_ISL29125)         += isl29125.o
+obj-$(CONFIG_ISL76682)         += isl76682.o
 obj-$(CONFIG_JSA1212)          += jsa1212.o
 obj-$(CONFIG_SENSORS_LM3533)   += lm3533-als.o
+obj-$(CONFIG_LTR390)           += ltr390.o
 obj-$(CONFIG_LTR501)           += ltr501.o
 obj-$(CONFIG_LTRF216A)         += ltrf216a.o
 obj-$(CONFIG_LV0104CS)         += lv0104cs.o
@@ -60,5 +62,6 @@ obj-$(CONFIG_VCNL4000)                += vcnl4000.o
 obj-$(CONFIG_VCNL4035)         += vcnl4035.o
 obj-$(CONFIG_VEML6030)         += veml6030.o
 obj-$(CONFIG_VEML6070)         += veml6070.o
+obj-$(CONFIG_VEML6075)         += veml6075.o
 obj-$(CONFIG_VL6180)           += vl6180.o
 obj-$(CONFIG_ZOPT2201)         += zopt2201.o
diff --git a/drivers/iio/light/isl76682.c b/drivers/iio/light/isl76682.c
new file mode 100644 (file)
index 0000000..cf6ddee
--- /dev/null
@@ -0,0 +1,345 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * IIO driver for the light sensor ISL76682.
+ * ISL76682 is Ambient Light Sensor
+ *
+ * Copyright (c) 2023 Marek Vasut <marex@denx.de>
+ */
+
+#include <linux/array_size.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+#include <linux/iio/iio.h>
+
+#define ISL76682_REG_COMMAND                   0x00
+
+#define ISL76682_COMMAND_EN                    BIT(7)
+#define ISL76682_COMMAND_MODE_CONTINUOUS       BIT(6)
+#define ISL76682_COMMAND_LIGHT_IR              BIT(5)
+
+#define ISL76682_COMMAND_RANGE_LUX_1K          0x0
+#define ISL76682_COMMAND_RANGE_LUX_4K          0x1
+#define ISL76682_COMMAND_RANGE_LUX_16K         0x2
+#define ISL76682_COMMAND_RANGE_LUX_64K         0x3
+#define ISL76682_COMMAND_RANGE_LUX_MASK                GENMASK(1, 0)
+
+#define ISL76682_REG_ALSIR_L                   0x01
+
+#define ISL76682_REG_ALSIR_U                   0x02
+
+#define ISL76682_NUM_REGS                      (ISL76682_REG_ALSIR_U + 1)
+
+#define ISL76682_CONV_TIME_MS                  100
+#define ISL76682_INT_TIME_US                   90000
+
+#define ISL76682_ADC_MAX                       (BIT(16) - 1)
+
+struct isl76682_chip {
+       /*
+        * Lock to synchronize access to device command register
+        * and the content of range variable below.
+        */
+       struct mutex                    lock;
+       struct regmap                   *regmap;
+       u8                              range;
+       u8                              command;
+};
+
+struct isl76682_range {
+       u8                              range;
+       u32                             als;
+       u32                             ir;
+};
+
+static struct isl76682_range isl76682_range_table[] = {
+       { ISL76682_COMMAND_RANGE_LUX_1K, 15000, 10500 },
+       { ISL76682_COMMAND_RANGE_LUX_4K, 60000, 42000 },
+       { ISL76682_COMMAND_RANGE_LUX_16K, 240000, 168000 },
+       { ISL76682_COMMAND_RANGE_LUX_64K, 960000, 673000 }
+};
+
+static int isl76682_get(struct isl76682_chip *chip, bool mode_ir, int *data)
+{
+       u8 command;
+       int ret;
+
+       command = ISL76682_COMMAND_EN | ISL76682_COMMAND_MODE_CONTINUOUS |
+                 chip->range;
+
+       if (mode_ir)
+               command |= ISL76682_COMMAND_LIGHT_IR;
+
+       if (command != chip->command) {
+               ret = regmap_write(chip->regmap, ISL76682_REG_COMMAND, command);
+               if (ret)
+                       return ret;
+
+               /* Need to wait for conversion time if ALS/IR mode enabled */
+               msleep(ISL76682_CONV_TIME_MS);
+
+               chip->command = command;
+       }
+
+       ret = regmap_bulk_read(chip->regmap, ISL76682_REG_ALSIR_L, data, 2);
+       *data &= ISL76682_ADC_MAX;
+       return ret;
+}
+
+static int isl76682_write_raw(struct iio_dev *indio_dev,
+                             struct iio_chan_spec const *chan,
+                             int val, int val2, long mask)
+{
+       struct isl76682_chip *chip = iio_priv(indio_dev);
+       int i;
+
+       if (mask != IIO_CHAN_INFO_SCALE)
+               return -EINVAL;
+
+       if (val != 0)
+               return -EINVAL;
+
+       for (i = 0; i < ARRAY_SIZE(isl76682_range_table); i++) {
+               if (chan->type == IIO_LIGHT && val2 != isl76682_range_table[i].als)
+                       continue;
+               if (chan->type == IIO_INTENSITY && val2 != isl76682_range_table[i].ir)
+                       continue;
+
+               scoped_guard(mutex, &chip->lock)
+                       chip->range = isl76682_range_table[i].range;
+               return 0;
+       }
+
+       return -EINVAL;
+}
+
+static int isl76682_read_raw(struct iio_dev *indio_dev,
+                            struct iio_chan_spec const *chan,
+                            int *val, int *val2, long mask)
+{
+       struct isl76682_chip *chip = iio_priv(indio_dev);
+       int ret;
+       int i;
+
+       guard(mutex)(&chip->lock);
+
+       switch (mask) {
+       case IIO_CHAN_INFO_RAW:
+               switch (chan->type) {
+               case IIO_LIGHT:
+                       ret = isl76682_get(chip, false, val);
+                       return (ret < 0) ? ret : IIO_VAL_INT;
+               case IIO_INTENSITY:
+                       ret = isl76682_get(chip, true, val);
+                       return (ret < 0) ? ret : IIO_VAL_INT;
+               default:
+                       return -EINVAL;
+               }
+       case IIO_CHAN_INFO_SCALE:
+               for (i = 0; i < ARRAY_SIZE(isl76682_range_table); i++) {
+                       if (chip->range != isl76682_range_table[i].range)
+                               continue;
+
+                       *val = 0;
+                       switch (chan->type) {
+                       case IIO_LIGHT:
+                               *val2 = isl76682_range_table[i].als;
+                               return IIO_VAL_INT_PLUS_MICRO;
+                       case IIO_INTENSITY:
+                               *val2 = isl76682_range_table[i].ir;
+                               return IIO_VAL_INT_PLUS_MICRO;
+                       default:
+                               return -EINVAL;
+                       }
+               }
+               return -EINVAL;
+       case IIO_CHAN_INFO_INT_TIME:
+               *val = 0;
+               *val2 = ISL76682_INT_TIME_US;
+               return IIO_VAL_INT_PLUS_MICRO;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int illuminance_scale_available[] = {
+       0, 15000,
+       0, 60000,
+       0, 240000,
+       0, 960000,
+};
+
+static int intensity_scale_available[] = {
+       0, 10500,
+       0, 42000,
+       0, 168000,
+       0, 673000,
+};
+
+static int isl76682_read_avail(struct iio_dev *indio_dev,
+                              struct iio_chan_spec const *chan,
+                              const int **vals, int *type,
+                              int *length, long mask)
+{
+       switch (mask) {
+       case IIO_CHAN_INFO_SCALE:
+               switch (chan->type) {
+               case IIO_LIGHT:
+                       *vals = illuminance_scale_available;
+                       *length = ARRAY_SIZE(illuminance_scale_available);
+                       *type = IIO_VAL_INT_PLUS_MICRO;
+                       return IIO_AVAIL_LIST;
+               case IIO_INTENSITY:
+                       *vals = intensity_scale_available;
+                       *length = ARRAY_SIZE(intensity_scale_available);
+                       *type = IIO_VAL_INT_PLUS_MICRO;
+                       return IIO_AVAIL_LIST;
+               default:
+                       return -EINVAL;
+               }
+       default:
+               return -EINVAL;
+       }
+}
+
+static const struct iio_chan_spec isl76682_channels[] = {
+       {
+               .type = IIO_LIGHT,
+               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+                                     BIT(IIO_CHAN_INFO_SCALE),
+               .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SCALE),
+               .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME),
+       }, {
+               .type = IIO_INTENSITY,
+               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+                                     BIT(IIO_CHAN_INFO_SCALE),
+               .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SCALE),
+               .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME),
+       }
+};
+
+static const struct iio_info isl76682_info = {
+       .read_avail     = isl76682_read_avail,
+       .read_raw       = isl76682_read_raw,
+       .write_raw      = isl76682_write_raw,
+};
+
+static int isl76682_clear_configure_reg(struct isl76682_chip *chip)
+{
+       struct device *dev = regmap_get_device(chip->regmap);
+       int ret;
+
+       ret = regmap_write(chip->regmap, ISL76682_REG_COMMAND, 0x0);
+       if (ret < 0)
+               dev_err(dev, "Error %d clearing the CONFIGURE register\n", ret);
+
+       /*
+        * In the success case, the command register was zeroed out.
+        *
+        * In the error case, we do not know in which state the command
+        * register is, so we assume it is zeroed out, so that it would
+        * be reprogrammed at the next data read out, and at that time
+        * we hope it would be reprogrammed successfully. That is very
+        * much a best effort approach.
+        */
+       chip->command = 0;
+
+       return ret;
+}
+
+static void isl76682_reset_action(void *chip)
+{
+       isl76682_clear_configure_reg(chip);
+}
+
+static bool isl76682_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case ISL76682_REG_ALSIR_L:
+       case ISL76682_REG_ALSIR_U:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static const struct regmap_config isl76682_regmap_config = {
+       .reg_bits               = 8,
+       .val_bits               = 8,
+       .volatile_reg           = isl76682_is_volatile_reg,
+       .max_register           = ISL76682_NUM_REGS - 1,
+       .num_reg_defaults_raw   = ISL76682_NUM_REGS,
+       .cache_type             = REGCACHE_FLAT,
+};
+
+static int isl76682_probe(struct i2c_client *client)
+{
+       struct device *dev = &client->dev;
+       struct isl76682_chip *chip;
+       struct iio_dev *indio_dev;
+       int ret;
+
+       indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
+       if (!indio_dev)
+               return -ENOMEM;
+
+       chip = iio_priv(indio_dev);
+
+       mutex_init(&chip->lock);
+
+       chip->regmap = devm_regmap_init_i2c(client, &isl76682_regmap_config);
+       ret = PTR_ERR_OR_ZERO(chip->regmap);
+       if (ret)
+               return dev_err_probe(dev, ret, "Error initializing regmap\n");
+
+       chip->range = ISL76682_COMMAND_RANGE_LUX_1K;
+
+       ret = isl76682_clear_configure_reg(chip);
+       if (ret < 0)
+               return ret;
+
+       ret = devm_add_action_or_reset(dev, isl76682_reset_action, chip);
+       if (ret)
+               return ret;
+
+       indio_dev->info = &isl76682_info;
+       indio_dev->channels = isl76682_channels;
+       indio_dev->num_channels = ARRAY_SIZE(isl76682_channels);
+       indio_dev->name = "isl76682";
+       indio_dev->modes = INDIO_DIRECT_MODE;
+
+       return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct i2c_device_id isl76682_id[] = {
+       { "isl76682" },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, isl76682_id);
+
+static const struct of_device_id isl76682_of_match[] = {
+       { .compatible = "isil,isl76682" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, isl76682_of_match);
+
+static struct i2c_driver isl76682_driver = {
+       .driver  = {
+               .name           = "isl76682",
+               .of_match_table = isl76682_of_match,
+       },
+       .probe          = isl76682_probe,
+       .id_table       = isl76682_id,
+};
+module_i2c_driver(isl76682_driver);
+
+MODULE_DESCRIPTION("ISL76682 Ambient Light Sensor driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
diff --git a/drivers/iio/light/ltr390.c b/drivers/iio/light/ltr390.c
new file mode 100644 (file)
index 0000000..fff1e89
--- /dev/null
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * IIO driver for Lite-On LTR390 ALS and UV sensor
+ * (7-bit I2C slave address 0x53)
+ *
+ * Based on the work of:
+ *   Shreeya Patel and Shi Zhigang (LTRF216 Driver)
+ *
+ * Copyright (C) 2023 Anshul Dalal <anshulusr@gmail.com>
+ *
+ * Datasheet:
+ *   https://optoelectronics.liteon.com/upload/download/DS86-2015-0004/LTR-390UV_Final_%20DS_V1%201.pdf
+ *
+ * TODO:
+ *   - Support for configurable gain and resolution
+ *   - Sensor suspend/resume support
+ *   - Add support for reading the ALS
+ *   - Interrupt support
+ */
+
+#include <linux/i2c.h>
+#include <linux/math.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+
+#include <linux/iio/iio.h>
+
+#include <asm/unaligned.h>
+
+#define LTR390_MAIN_CTRL      0x00
+#define LTR390_PART_ID       0x06
+#define LTR390_UVS_DATA              0x10
+
+#define LTR390_SW_RESET              BIT(4)
+#define LTR390_UVS_MODE              BIT(3)
+#define LTR390_SENSOR_ENABLE  BIT(1)
+
+#define LTR390_PART_NUMBER_ID 0xb
+
+/*
+ * At 20-bit resolution (integration time: 400ms) and 18x gain, 2300 counts of
+ * the sensor are equal to 1 UV Index [Datasheet Page#8].
+ *
+ * For the default resolution of 18-bit (integration time: 100ms) and default
+ * gain of 3x, the counts/uvi are calculated as follows:
+ * 2300 / ((3/18) * (100/400)) = 95.83
+ */
+#define LTR390_COUNTS_PER_UVI 96
+
+/*
+ * Window Factor is needed when the device is under Window glass with coated
+ * tinted ink. This is to compensate for the light loss due to the lower
+ * transmission rate of the window glass and helps * in calculating lux.
+ */
+#define LTR390_WINDOW_FACTOR 1
+
+struct ltr390_data {
+       struct regmap *regmap;
+       struct i2c_client *client;
+       /* Protects device from simulataneous reads */
+       struct mutex lock;
+};
+
+static const struct regmap_config ltr390_regmap_config = {
+       .name = "ltr390",
+       .reg_bits = 8,
+       .reg_stride = 1,
+       .val_bits = 8,
+};
+
+static int ltr390_register_read(struct ltr390_data *data, u8 register_address)
+{
+       struct device *dev = &data->client->dev;
+       int ret;
+       u8 recieve_buffer[3];
+
+       guard(mutex)(&data->lock);
+
+       ret = regmap_bulk_read(data->regmap, register_address, recieve_buffer,
+                              sizeof(recieve_buffer));
+       if (ret) {
+               dev_err(dev, "failed to read measurement data");
+               return ret;
+       }
+
+       return get_unaligned_le24(recieve_buffer);
+}
+
+static int ltr390_read_raw(struct iio_dev *iio_device,
+                          struct iio_chan_spec const *chan, int *val,
+                          int *val2, long mask)
+{
+       int ret;
+       struct ltr390_data *data = iio_priv(iio_device);
+
+       switch (mask) {
+       case IIO_CHAN_INFO_RAW:
+               ret = ltr390_register_read(data, LTR390_UVS_DATA);
+               if (ret < 0)
+                       return ret;
+               *val = ret;
+               return IIO_VAL_INT;
+       case IIO_CHAN_INFO_SCALE:
+               *val = LTR390_WINDOW_FACTOR;
+               *val2 = LTR390_COUNTS_PER_UVI;
+               return IIO_VAL_FRACTIONAL;
+       default:
+               return -EINVAL;
+       }
+}
+
+static const struct iio_info ltr390_info = {
+       .read_raw = ltr390_read_raw,
+};
+
+static const struct iio_chan_spec ltr390_channel = {
+       .type = IIO_UVINDEX,
+       .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE)
+};
+
+static int ltr390_probe(struct i2c_client *client)
+{
+       struct ltr390_data *data;
+       struct iio_dev *indio_dev;
+       struct device *dev;
+       int ret, part_number;
+
+       dev = &client->dev;
+       indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+       if (!indio_dev)
+               return -ENOMEM;
+
+       data = iio_priv(indio_dev);
+
+       data->regmap = devm_regmap_init_i2c(client, &ltr390_regmap_config);
+       if (IS_ERR(data->regmap))
+               return dev_err_probe(dev, PTR_ERR(data->regmap),
+                                    "regmap initialization failed\n");
+
+       data->client = client;
+       mutex_init(&data->lock);
+
+       indio_dev->info = &ltr390_info;
+       indio_dev->channels = &ltr390_channel;
+       indio_dev->num_channels = 1;
+       indio_dev->name = "ltr390";
+
+       ret = regmap_read(data->regmap, LTR390_PART_ID, &part_number);
+       if (ret)
+               return dev_err_probe(dev, ret,
+                                    "failed to get sensor's part id\n");
+       /* Lower 4 bits of `part_number` change with hardware revisions */
+       if (part_number >> 4 != LTR390_PART_NUMBER_ID)
+               dev_info(dev, "received invalid product id: 0x%x", part_number);
+       dev_dbg(dev, "LTR390, product id: 0x%x\n", part_number);
+
+       /* reset sensor, chip fails to respond to this, so ignore any errors */
+       regmap_set_bits(data->regmap, LTR390_MAIN_CTRL, LTR390_SW_RESET);
+
+       /* Wait for the registers to reset before proceeding */
+       usleep_range(1000, 2000);
+
+       ret = regmap_set_bits(data->regmap, LTR390_MAIN_CTRL,
+                             LTR390_SENSOR_ENABLE | LTR390_UVS_MODE);
+       if (ret)
+               return dev_err_probe(dev, ret, "failed to enable the sensor\n");
+
+       return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct i2c_device_id ltr390_id[] = {
+       { "ltr390" },
+       { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, ltr390_id);
+
+static const struct of_device_id ltr390_of_table[] = {
+       { .compatible = "liteon,ltr390" },
+       { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ltr390_of_table);
+
+static struct i2c_driver ltr390_driver = {
+       .driver = {
+               .name = "ltr390",
+               .of_match_table = ltr390_of_table,
+       },
+       .probe = ltr390_probe,
+       .id_table = ltr390_id,
+};
+module_i2c_driver(ltr390_driver);
+
+MODULE_AUTHOR("Anshul Dalal <anshulusr@gmail.com>");
+MODULE_DESCRIPTION("Lite-On LTR390 ALS and UV sensor Driver");
+MODULE_LICENSE("GPL");
index 8de4dd849936d403684159eb4a5e26698963e666..68dc48420a886aab5088f04f7a67b2a470867e8f 100644 (file)
@@ -234,7 +234,7 @@ static int ltrf216a_read_data(struct ltrf216a_data *data, u8 addr)
 static int ltrf216a_get_lux(struct ltrf216a_data *data)
 {
        int ret, greendata;
-       u64 lux, div;
+       u64 lux;
 
        ret = ltrf216a_set_power_state(data, true);
        if (ret)
@@ -246,10 +246,9 @@ static int ltrf216a_get_lux(struct ltrf216a_data *data)
 
        ltrf216a_set_power_state(data, false);
 
-       lux = greendata * 45 * LTRF216A_WIN_FAC * 100;
-       div = data->als_gain_fac * data->int_time_fac * 100;
+       lux = greendata * 45 * LTRF216A_WIN_FAC;
 
-       return div_u64(lux, div);
+       return lux;
 }
 
 static int ltrf216a_read_raw(struct iio_dev *indio_dev,
@@ -279,7 +278,8 @@ static int ltrf216a_read_raw(struct iio_dev *indio_dev,
                if (ret < 0)
                        return ret;
                *val = ret;
-               return IIO_VAL_INT;
+               *val2 = data->als_gain_fac * data->int_time_fac;
+               return IIO_VAL_FRACTIONAL;
        case IIO_CHAN_INFO_INT_TIME:
                mutex_lock(&data->lock);
                ret = ltrf216a_get_int_time(data, val, val2);
index ed241598aefbe41c7b70f648e8df8c13e01140a3..636432c45651d2aadd5133a14d5b952735efe57c 100644 (file)
@@ -472,7 +472,7 @@ static struct i2c_driver pa12203001_driver = {
        .driver = {
                .name = PA12203001_DRIVER_NAME,
                .pm = &pa12203001_pm_ops,
-               .acpi_match_table = ACPI_PTR(pa12203001_acpi_match),
+               .acpi_match_table = pa12203001_acpi_match,
        },
        .probe = pa12203001_probe,
        .remove = pa12203001_remove,
index 6a6d778050911eebfd4823468f65c93571231697..0f010eff1981fbc4f7271d3329b12ecd3a0a8bfd 100644 (file)
  * @BU27008_BLUE:      Blue channel. Via data2 (when used).
  * @BU27008_CLEAR:     Clear channel. Via data2 or data3 (when used).
  * @BU27008_IR:                IR channel. Via data3 (when used).
+ * @BU27008_LUX:       Illuminance channel, computed using RGB and IR.
  * @BU27008_NUM_CHANS: Number of channel types.
  */
 enum bu27008_chan_type {
@@ -138,6 +139,7 @@ enum bu27008_chan_type {
        BU27008_BLUE,
        BU27008_CLEAR,
        BU27008_IR,
+       BU27008_LUX,
        BU27008_NUM_CHANS
 };
 
@@ -172,6 +174,8 @@ static const unsigned long bu27008_scan_masks[] = {
        ALWAYS_SCANNABLE | BIT(BU27008_CLEAR) | BIT(BU27008_IR),
        /* buffer is R, G, B, IR */
        ALWAYS_SCANNABLE | BIT(BU27008_BLUE) | BIT(BU27008_IR),
+       /* buffer is R, G, B, IR, LUX */
+       ALWAYS_SCANNABLE | BIT(BU27008_BLUE) | BIT(BU27008_IR) | BIT(BU27008_LUX),
        0
 };
 
@@ -331,6 +335,19 @@ static const struct iio_chan_spec bu27008_channels[] = {
         * Hence we don't advertise available ones either.
         */
        BU27008_CHAN(IR, DATA3, 0),
+       {
+               .type = IIO_LIGHT,
+               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+                                     BIT(IIO_CHAN_INFO_SCALE),
+               .channel = BU27008_LUX,
+               .scan_index = BU27008_LUX,
+               .scan_type = {
+                       .sign = 'u',
+                       .realbits = 64,
+                       .storagebits = 64,
+                       .endianness = IIO_CPU,
+               },
+       },
        IIO_CHAN_SOFT_TIMESTAMP(BU27008_NUM_CHANS),
 };
 
@@ -1004,6 +1021,169 @@ static int bu27008_read_one(struct bu27008_data *data, struct iio_dev *idev,
        return ret;
 }
 
+#define BU27008_LUX_DATA_RED   0
+#define BU27008_LUX_DATA_GREEN 1
+#define BU27008_LUX_DATA_BLUE  2
+#define BU27008_LUX_DATA_IR    3
+#define LUX_DATA_SIZE (BU27008_NUM_HW_CHANS * sizeof(__le16))
+
+static int bu27008_read_lux_chans(struct bu27008_data *data, unsigned int time,
+                                 __le16 *chan_data)
+{
+       int ret, chan_sel, tmpret, valid;
+
+       chan_sel = BU27008_BLUE2_IR3 << (ffs(data->cd->chan_sel_mask) - 1);
+
+       ret = regmap_update_bits(data->regmap, data->cd->chan_sel_reg,
+                                data->cd->chan_sel_mask, chan_sel);
+       if (ret)
+               return ret;
+
+       ret = bu27008_meas_set(data, true);
+       if (ret)
+               return ret;
+
+       msleep(time / USEC_PER_MSEC);
+
+       ret = regmap_read_poll_timeout(data->regmap, data->cd->valid_reg,
+                                      valid, (valid & BU27008_MASK_VALID),
+                                      BU27008_VALID_RESULT_WAIT_QUANTA_US,
+                                      BU27008_MAX_VALID_RESULT_WAIT_US);
+       if (ret)
+               goto out;
+
+       ret = regmap_bulk_read(data->regmap, BU27008_REG_DATA0_LO, chan_data,
+                              LUX_DATA_SIZE);
+       if (ret)
+               goto out;
+out:
+       tmpret = bu27008_meas_set(data, false);
+       if (tmpret)
+               dev_warn(data->dev, "Stopping measurement failed\n");
+
+       return ret;
+}
+
+/*
+ * Following equation for computing lux out of register values was given by
+ * ROHM HW colleagues;
+ *
+ * Red = RedData*1024 / Gain * 20 / meas_mode
+ * Green = GreenData* 1024 / Gain * 20 / meas_mode
+ * Blue = BlueData* 1024 / Gain * 20 / meas_mode
+ * IR = IrData* 1024 / Gain * 20 / meas_mode
+ *
+ * where meas_mode is the integration time in mS / 10
+ *
+ * IRratio = (IR > 0.18 * Green) ? 0 : 1
+ *
+ * Lx = max(c1*Red + c2*Green + c3*Blue,0)
+ *
+ * for
+ * IRratio 0: c1 = -0.00002237, c2 = 0.0003219, c3 = -0.000120371
+ * IRratio 1: c1 = -0.00001074, c2 = 0.000305415, c3 = -0.000129367
+ */
+
+/*
+ * The max chan data is 0xffff. When we multiply it by 1024 * 20, we'll get
+ * 0x4FFFB000 which still fits in 32-bit integer. This won't overflow.
+ */
+#define NORM_CHAN_DATA_FOR_LX_CALC(chan, gain, time) (le16_to_cpu(chan) * \
+                                  1024 * 20 / (gain) / (time))
+static u64 bu27008_calc_nlux(struct bu27008_data *data, __le16 *lux_data,
+               unsigned int gain, unsigned int gain_ir, unsigned int time)
+{
+       unsigned int red, green, blue, ir;
+       s64 c1, c2, c3, nlux;
+
+       time /= 10000;
+       ir = NORM_CHAN_DATA_FOR_LX_CALC(lux_data[BU27008_LUX_DATA_IR], gain_ir, time);
+       red = NORM_CHAN_DATA_FOR_LX_CALC(lux_data[BU27008_LUX_DATA_RED], gain, time);
+       green = NORM_CHAN_DATA_FOR_LX_CALC(lux_data[BU27008_LUX_DATA_GREEN], gain, time);
+       blue = NORM_CHAN_DATA_FOR_LX_CALC(lux_data[BU27008_LUX_DATA_BLUE], gain, time);
+
+       if ((u64)ir * 100LLU > (u64)green * 18LLU) {
+               c1 = -22370;
+               c2 = 321900;
+               c3 = -120371;
+       } else {
+               c1 = -10740;
+               c2 = 305415;
+               c3 = -129367;
+       }
+       nlux = c1 * red + c2 * green + c3 * blue;
+
+       return max_t(s64, 0, nlux);
+}
+
+static int bu27008_get_time_n_gains(struct bu27008_data *data,
+               unsigned int *gain, unsigned int *gain_ir, unsigned int *time)
+{
+       int ret;
+
+       ret = bu27008_get_gain(data, &data->gts, gain);
+       if (ret < 0)
+               return ret;
+
+       ret = bu27008_get_gain(data, &data->gts_ir, gain_ir);
+       if (ret < 0)
+               return ret;
+
+       ret = bu27008_get_int_time_us(data);
+       if (ret < 0)
+               return ret;
+
+       /* Max integration time is 400000. Fits in signed int. */
+       *time = ret;
+
+       return 0;
+}
+
+struct bu27008_buf {
+       __le16 chan[BU27008_NUM_HW_CHANS];
+       u64 lux __aligned(8);
+       s64 ts __aligned(8);
+};
+
+static int bu27008_buffer_fill_lux(struct bu27008_data *data,
+                                  struct bu27008_buf *raw)
+{
+       unsigned int gain, gain_ir, time;
+       int ret;
+
+       ret = bu27008_get_time_n_gains(data, &gain, &gain_ir, &time);
+       if (ret)
+               return ret;
+
+       raw->lux = bu27008_calc_nlux(data, raw->chan, gain, gain_ir, time);
+
+       return 0;
+}
+
+static int bu27008_read_lux(struct bu27008_data *data, struct iio_dev *idev,
+                           struct iio_chan_spec const *chan,
+                           int *val, int *val2)
+{
+       __le16 lux_data[BU27008_NUM_HW_CHANS];
+       unsigned int gain, gain_ir, time;
+       u64 nlux;
+       int ret;
+
+       ret = bu27008_get_time_n_gains(data, &gain, &gain_ir, &time);
+       if (ret)
+               return ret;
+
+       ret = bu27008_read_lux_chans(data, time, lux_data);
+       if (ret)
+               return ret;
+
+       nlux = bu27008_calc_nlux(data, lux_data, gain, gain_ir, time);
+       *val = (int)nlux;
+       *val2 = nlux >> 32LLU;
+
+       return IIO_VAL_INT_64;
+}
+
 static int bu27008_read_raw(struct iio_dev *idev,
                           struct iio_chan_spec const *chan,
                           int *val, int *val2, long mask)
@@ -1018,7 +1198,10 @@ static int bu27008_read_raw(struct iio_dev *idev,
                        return -EBUSY;
 
                mutex_lock(&data->mutex);
-               ret = bu27008_read_one(data, idev, chan, val, val2);
+               if (chan->type == IIO_LIGHT)
+                       ret = bu27008_read_lux(data, idev, chan, val, val2);
+               else
+                       ret = bu27008_read_one(data, idev, chan, val, val2);
                mutex_unlock(&data->mutex);
 
                iio_device_release_direct_mode(idev);
@@ -1026,6 +1209,11 @@ static int bu27008_read_raw(struct iio_dev *idev,
                return ret;
 
        case IIO_CHAN_INFO_SCALE:
+               if (chan->type == IIO_LIGHT) {
+                       *val = 0;
+                       *val2 = 1;
+                       return IIO_VAL_INT_PLUS_NANO;
+               }
                ret = bu27008_get_scale(data, chan->scan_index == BU27008_IR,
                                        val, val2);
                if (ret)
@@ -1236,10 +1424,7 @@ static irqreturn_t bu27008_trigger_handler(int irq, void *p)
        struct iio_poll_func *pf = p;
        struct iio_dev *idev = pf->indio_dev;
        struct bu27008_data *data = iio_priv(idev);
-       struct {
-               __le16 chan[BU27008_NUM_HW_CHANS];
-               s64 ts __aligned(8);
-       } raw;
+       struct bu27008_buf raw;
        int ret, dummy;
 
        memset(&raw, 0, sizeof(raw));
@@ -1257,6 +1442,12 @@ static irqreturn_t bu27008_trigger_handler(int irq, void *p)
        if (ret < 0)
                goto err_read;
 
+       if (test_bit(BU27008_LUX, idev->active_scan_mask)) {
+               ret = bu27008_buffer_fill_lux(data, &raw);
+               if (ret)
+                       goto err_read;
+       }
+
        iio_push_to_buffers_with_timestamp(idev, &raw, pf->timestamp);
 err_read:
        iio_trigger_notify_done(idev->trig);
diff --git a/drivers/iio/light/veml6075.c b/drivers/iio/light/veml6075.c
new file mode 100644 (file)
index 0000000..05d4c0e
--- /dev/null
@@ -0,0 +1,474 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Vishay VEML6075 UVA and UVB light sensor
+ *
+ * Copyright 2023 Javier Carrasco <javier.carrasco.cruz@gmail.com>
+ *
+ * 7-bit I2C slave, address 0x10
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/units.h>
+
+#include <linux/iio/iio.h>
+
+#define VEML6075_CMD_CONF      0x00 /* configuration register */
+#define VEML6075_CMD_UVA       0x07 /* UVA channel */
+#define VEML6075_CMD_UVB       0x09 /* UVB channel */
+#define VEML6075_CMD_COMP1     0x0A /* visible light compensation */
+#define VEML6075_CMD_COMP2     0x0B /* infrarred light compensation */
+#define VEML6075_CMD_ID                0x0C /* device ID */
+
+#define VEML6075_CONF_IT       GENMASK(6, 4) /* intregration time */
+#define VEML6075_CONF_HD       BIT(3) /* dynamic setting */
+#define VEML6075_CONF_TRIG     BIT(2) /* trigger */
+#define VEML6075_CONF_AF       BIT(1) /* active force enable */
+#define VEML6075_CONF_SD       BIT(0) /* shutdown */
+
+#define VEML6075_IT_50_MS      0x00
+#define VEML6075_IT_100_MS     0x01
+#define VEML6075_IT_200_MS     0x02
+#define VEML6075_IT_400_MS     0x03
+#define VEML6075_IT_800_MS     0x04
+
+#define VEML6075_AF_DISABLE    0x00
+#define VEML6075_AF_ENABLE     0x01
+
+#define VEML6075_SD_DISABLE    0x00
+#define VEML6075_SD_ENABLE     0x01
+
+/* Open-air coefficients and responsivity */
+#define VEML6075_A_COEF                2220
+#define VEML6075_B_COEF                1330
+#define VEML6075_C_COEF                2950
+#define VEML6075_D_COEF                1740
+#define VEML6075_UVA_RESP      1461
+#define VEML6075_UVB_RESP      2591
+
+static const int veml6075_it_ms[] = { 50, 100, 200, 400, 800 };
+
+struct veml6075_data {
+       struct i2c_client *client;
+       struct regmap *regmap;
+       /*
+        * prevent integration time modification and triggering
+        * measurements while a measurement is underway.
+        */
+       struct mutex lock;
+};
+
+/* channel number */
+enum veml6075_chan {
+       CH_UVA,
+       CH_UVB,
+};
+
+static const struct iio_chan_spec veml6075_channels[] = {
+       {
+               .type = IIO_INTENSITY,
+               .channel = CH_UVA,
+               .modified = 1,
+               .channel2 = IIO_MOD_LIGHT_UVA,
+               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+                       BIT(IIO_CHAN_INFO_SCALE),
+               .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME),
+               .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME),
+       },
+       {
+               .type = IIO_INTENSITY,
+               .channel = CH_UVB,
+               .modified = 1,
+               .channel2 = IIO_MOD_LIGHT_UVB,
+               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+                       BIT(IIO_CHAN_INFO_SCALE),
+               .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME),
+               .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME),
+       },
+       {
+               .type = IIO_UVINDEX,
+               .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+               .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME),
+               .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME),
+       },
+};
+
+static int veml6075_request_measurement(struct veml6075_data *data)
+{
+       int ret, conf, int_time;
+
+       ret = regmap_read(data->regmap, VEML6075_CMD_CONF, &conf);
+       if (ret < 0)
+               return ret;
+
+       /* disable shutdown and trigger measurement */
+       ret = regmap_write(data->regmap, VEML6075_CMD_CONF,
+                          (conf | VEML6075_CONF_TRIG) & ~VEML6075_CONF_SD);
+       if (ret < 0)
+               return ret;
+
+       /*
+        * A measurement requires between 1.30 and 1.40 times the integration
+        * time for all possible configurations. Using a 1.50 factor simplifies
+        * operations and ensures reliability under all circumstances.
+        */
+       int_time = veml6075_it_ms[FIELD_GET(VEML6075_CONF_IT, conf)];
+       msleep(int_time + (int_time / 2));
+
+       /* shutdown again, data registers are still accessible */
+       return regmap_update_bits(data->regmap, VEML6075_CMD_CONF,
+                                 VEML6075_CONF_SD, VEML6075_CONF_SD);
+}
+
+static int veml6075_uva_comp(int raw_uva, int comp1, int comp2)
+{
+       int comp1a_c, comp2a_c, uva_comp;
+
+       comp1a_c = (comp1 * VEML6075_A_COEF) / 1000U;
+       comp2a_c = (comp2 * VEML6075_B_COEF) / 1000U;
+       uva_comp = raw_uva - comp1a_c - comp2a_c;
+
+       return clamp_val(uva_comp, 0, U16_MAX);
+}
+
+static int veml6075_uvb_comp(int raw_uvb, int comp1, int comp2)
+{
+       int comp1b_c, comp2b_c, uvb_comp;
+
+       comp1b_c = (comp1 * VEML6075_C_COEF) / 1000U;
+       comp2b_c = (comp2 * VEML6075_D_COEF) / 1000U;
+       uvb_comp = raw_uvb - comp1b_c - comp2b_c;
+
+       return clamp_val(uvb_comp, 0, U16_MAX);
+}
+
+static int veml6075_read_comp(struct veml6075_data *data, int *c1, int *c2)
+{
+       int ret;
+
+       ret = regmap_read(data->regmap, VEML6075_CMD_COMP1, c1);
+       if (ret < 0)
+               return ret;
+
+       return regmap_read(data->regmap, VEML6075_CMD_COMP2, c2);
+}
+
+static int veml6075_read_uv_direct(struct veml6075_data *data, int chan,
+                                  int *val)
+{
+       int c1, c2, ret;
+
+       guard(mutex)(&data->lock);
+
+       ret = veml6075_request_measurement(data);
+       if (ret < 0)
+               return ret;
+
+       ret = veml6075_read_comp(data, &c1, &c2);
+       if (ret < 0)
+               return ret;
+
+       switch (chan) {
+       case CH_UVA:
+               ret = regmap_read(data->regmap, VEML6075_CMD_UVA, val);
+               if (ret < 0)
+                       return ret;
+
+               *val = veml6075_uva_comp(*val, c1, c2);
+               return IIO_VAL_INT;
+       case CH_UVB:
+               ret = regmap_read(data->regmap, VEML6075_CMD_UVB, val);
+               if (ret < 0)
+                       return ret;
+
+               *val = veml6075_uvb_comp(*val, c1, c2);
+               return IIO_VAL_INT;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int veml6075_read_int_time_index(struct veml6075_data *data)
+{
+       int ret, conf;
+
+       ret = regmap_read(data->regmap, VEML6075_CMD_CONF, &conf);
+       if (ret < 0)
+               return ret;
+
+       return FIELD_GET(VEML6075_CONF_IT, conf);
+}
+
+static int veml6075_read_int_time_ms(struct veml6075_data *data, int *val)
+{
+       int int_index;
+
+       guard(mutex)(&data->lock);
+       int_index = veml6075_read_int_time_index(data);
+       if (int_index < 0)
+               return int_index;
+
+       *val = veml6075_it_ms[int_index];
+
+       return IIO_VAL_INT;
+}
+
+static int veml6075_get_uvi_micro(struct veml6075_data *data, int uva_comp,
+                                 int uvb_comp)
+{
+       int uvia_micro = uva_comp * VEML6075_UVA_RESP;
+       int uvib_micro = uvb_comp * VEML6075_UVB_RESP;
+       int int_index;
+
+       int_index = veml6075_read_int_time_index(data);
+       if (int_index < 0)
+               return int_index;
+
+       switch (int_index) {
+       case VEML6075_IT_50_MS:
+               return uvia_micro + uvib_micro;
+       case VEML6075_IT_100_MS:
+       case VEML6075_IT_200_MS:
+       case VEML6075_IT_400_MS:
+       case VEML6075_IT_800_MS:
+               return (uvia_micro + uvib_micro) / (2 << int_index);
+       default:
+               return -EINVAL;
+       }
+}
+
+static int veml6075_read_uvi(struct veml6075_data *data, int *val, int *val2)
+{
+       int ret, c1, c2, uva, uvb, uvi_micro;
+
+       guard(mutex)(&data->lock);
+
+       ret = veml6075_request_measurement(data);
+       if (ret < 0)
+               return ret;
+
+       ret = veml6075_read_comp(data, &c1, &c2);
+       if (ret < 0)
+               return ret;
+
+       ret = regmap_read(data->regmap, VEML6075_CMD_UVA, &uva);
+       if (ret < 0)
+               return ret;
+
+       ret = regmap_read(data->regmap, VEML6075_CMD_UVB, &uvb);
+       if (ret < 0)
+               return ret;
+
+       uvi_micro = veml6075_get_uvi_micro(data, veml6075_uva_comp(uva, c1, c2),
+                                          veml6075_uvb_comp(uvb, c1, c2));
+       if (uvi_micro < 0)
+               return uvi_micro;
+
+       *val = uvi_micro / MICRO;
+       *val2 = uvi_micro % MICRO;
+
+       return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int veml6075_read_responsivity(int chan, int *val, int *val2)
+{
+       /* scale = 1 / resp */
+       switch (chan) {
+       case CH_UVA:
+               /* resp = 0.93 c/uW/cm2: scale = 1.75268817 */
+               *val = 1;
+               *val2 = 75268817;
+               return IIO_VAL_INT_PLUS_NANO;
+       case CH_UVB:
+               /* resp = 2.1 c/uW/cm2: scale = 0.476190476 */
+               *val = 0;
+               *val2 = 476190476;
+               return IIO_VAL_INT_PLUS_NANO;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int veml6075_read_avail(struct iio_dev *indio_dev,
+                              struct iio_chan_spec const *chan,
+                              const int **vals, int *type, int *length,
+                              long mask)
+{
+       switch (mask) {
+       case IIO_CHAN_INFO_INT_TIME:
+               *length = ARRAY_SIZE(veml6075_it_ms);
+               *vals = veml6075_it_ms;
+               *type = IIO_VAL_INT;
+               return IIO_AVAIL_LIST;
+
+       default:
+               return -EINVAL;
+       }
+}
+
+static int veml6075_read_raw(struct iio_dev *indio_dev,
+                            struct iio_chan_spec const *chan,
+                            int *val, int *val2, long mask)
+{
+       struct veml6075_data *data = iio_priv(indio_dev);
+
+       switch (mask) {
+       case IIO_CHAN_INFO_RAW:
+               return veml6075_read_uv_direct(data, chan->channel, val);
+       case IIO_CHAN_INFO_PROCESSED:
+               return veml6075_read_uvi(data, val, val2);
+       case IIO_CHAN_INFO_INT_TIME:
+               return veml6075_read_int_time_ms(data, val);
+       case IIO_CHAN_INFO_SCALE:
+               return veml6075_read_responsivity(chan->channel, val, val2);
+       default:
+               return -EINVAL;
+       }
+}
+
+static int veml6075_write_int_time_ms(struct veml6075_data *data, int val)
+{
+       int i = ARRAY_SIZE(veml6075_it_ms);
+
+       guard(mutex)(&data->lock);
+
+       while (i-- > 0) {
+               if (val == veml6075_it_ms[i])
+                       break;
+       }
+       if (i < 0)
+               return -EINVAL;
+
+       return regmap_update_bits(data->regmap, VEML6075_CMD_CONF,
+                                 VEML6075_CONF_IT,
+                                 FIELD_PREP(VEML6075_CONF_IT, i));
+}
+
+static int veml6075_write_raw(struct iio_dev *indio_dev,
+                             struct iio_chan_spec const *chan,
+                             int val, int val2, long mask)
+{
+       struct veml6075_data *data = iio_priv(indio_dev);
+
+       switch (mask) {
+       case IIO_CHAN_INFO_INT_TIME:
+               return veml6075_write_int_time_ms(data, val);
+       default:
+               return -EINVAL;
+       }
+}
+
+static const struct iio_info veml6075_info = {
+       .read_avail = veml6075_read_avail,
+       .read_raw = veml6075_read_raw,
+       .write_raw = veml6075_write_raw,
+};
+
+static bool veml6075_readable_reg(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case VEML6075_CMD_CONF:
+       case VEML6075_CMD_UVA:
+       case VEML6075_CMD_UVB:
+       case VEML6075_CMD_COMP1:
+       case VEML6075_CMD_COMP2:
+       case VEML6075_CMD_ID:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static bool veml6075_writable_reg(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case VEML6075_CMD_CONF:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static const struct regmap_config veml6075_regmap_config = {
+       .name = "veml6075",
+       .reg_bits = 8,
+       .val_bits = 16,
+       .max_register = VEML6075_CMD_ID,
+       .readable_reg = veml6075_readable_reg,
+       .writeable_reg = veml6075_writable_reg,
+       .val_format_endian = REGMAP_ENDIAN_LITTLE,
+};
+
+static int veml6075_probe(struct i2c_client *client)
+{
+       struct veml6075_data *data;
+       struct iio_dev *indio_dev;
+       struct regmap *regmap;
+       int config, ret;
+
+       indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+       if (!indio_dev)
+               return -ENOMEM;
+
+       regmap = devm_regmap_init_i2c(client, &veml6075_regmap_config);
+       if (IS_ERR(regmap))
+               return PTR_ERR(regmap);
+
+       data = iio_priv(indio_dev);
+       data->client = client;
+       data->regmap = regmap;
+
+       mutex_init(&data->lock);
+
+       indio_dev->name = "veml6075";
+       indio_dev->info = &veml6075_info;
+       indio_dev->channels = veml6075_channels;
+       indio_dev->num_channels = ARRAY_SIZE(veml6075_channels);
+       indio_dev->modes = INDIO_DIRECT_MODE;
+
+       ret = devm_regulator_get_enable(&client->dev, "vdd");
+       if (ret < 0)
+               return ret;
+
+       /* default: 100ms integration time, active force enable, shutdown */
+       config = FIELD_PREP(VEML6075_CONF_IT, VEML6075_IT_100_MS) |
+               FIELD_PREP(VEML6075_CONF_AF, VEML6075_AF_ENABLE) |
+               FIELD_PREP(VEML6075_CONF_SD, VEML6075_SD_ENABLE);
+       ret = regmap_write(data->regmap, VEML6075_CMD_CONF, config);
+       if (ret < 0)
+               return ret;
+
+       return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct i2c_device_id veml6075_id[] = {
+       { "veml6075" },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, veml6075_id);
+
+static const struct of_device_id veml6075_of_match[] = {
+       { .compatible = "vishay,veml6075" },
+       {}
+};
+MODULE_DEVICE_TABLE(of, veml6075_of_match);
+
+static struct i2c_driver veml6075_driver = {
+       .driver = {
+               .name   = "veml6075",
+               .of_match_table = veml6075_of_match,
+       },
+       .probe = veml6075_probe,
+       .id_table = veml6075_id,
+};
+
+module_i2c_driver(veml6075_driver);
+
+MODULE_AUTHOR("Javier Carrasco <javier.carrasco.cruz@gmail.com>");
+MODULE_DESCRIPTION("Vishay VEML6075 UVA and UVB light sensor driver");
+MODULE_LICENSE("GPL");
index e8c4ca142d21d6638f6fbfe92e98e212e9d6915a..218b1ce076c19ddef897ac473781a15180f0e4d5 100644 (file)
@@ -497,17 +497,13 @@ static int tmag5273_set_operating_mode(struct tmag5273_data *data,
 static void tmag5273_read_device_property(struct tmag5273_data *data)
 {
        struct device *dev = data->dev;
-       const char *str;
        int ret;
 
        data->angle_measurement = TMAG5273_ANGLE_EN_X_Y;
 
-       ret = device_property_read_string(dev, "ti,angle-measurement", &str);
-       if (ret)
-               return;
-
-       ret = match_string(tmag5273_angle_names,
-                          ARRAY_SIZE(tmag5273_angle_names), str);
+       ret = device_property_match_property_string(dev, "ti,angle-measurement",
+                                                   tmag5273_angle_names,
+                                                   ARRAY_SIZE(tmag5273_angle_names));
        if (ret >= 0)
                data->angle_measurement = ret;
 }
index 95efa32e4289b96f6083f7013ed10cf02edc975c..79adfd059c3a7e15a95b129d4fc686f8756f0e64 100644 (file)
@@ -109,6 +109,28 @@ config HP03
          To compile this driver as a module, choose M here: the module
          will be called hp03.
 
+config HSC030PA
+       tristate "Honeywell HSC/SSC TruStability pressure sensor series"
+       depends on (I2C || SPI_MASTER)
+       select HSC030PA_I2C if I2C
+       select HSC030PA_SPI if SPI_MASTER
+       help
+         Say Y here to build support for the Honeywell TruStability
+         HSC and SSC pressure and temperature sensor series.
+
+         To compile this driver as a module, choose M here: the module
+         will be called hsc030pa.
+
+config HSC030PA_I2C
+       tristate
+       depends on HSC030PA
+       depends on I2C
+
+config HSC030PA_SPI
+       tristate
+       depends on HSC030PA
+       depends on SPI_MASTER
+
 config ICP10100
        tristate "InvenSense ICP-101xx pressure and temperature sensor"
        depends on I2C
index 436aec7e65f3ac644b89883673f37ed1ee2067b6..b0f8b94662f2094e1a9a503381ea401c1ea1460b 100644 (file)
@@ -15,6 +15,9 @@ obj-$(CONFIG_DPS310) += dps310.o
 obj-$(CONFIG_IIO_CROS_EC_BARO) += cros_ec_baro.o
 obj-$(CONFIG_HID_SENSOR_PRESS)   += hid-sensor-press.o
 obj-$(CONFIG_HP03) += hp03.o
+obj-$(CONFIG_HSC030PA) += hsc030pa.o
+obj-$(CONFIG_HSC030PA_I2C) += hsc030pa_i2c.o
+obj-$(CONFIG_HSC030PA_SPI) += hsc030pa_spi.o
 obj-$(CONFIG_ICP10100) += icp10100.o
 obj-$(CONFIG_MPL115) += mpl115.o
 obj-$(CONFIG_MPL115_I2C) += mpl115_i2c.o
index a2ef1373a274e27bb323eb9e4a4bf3291e73334b..fe8734468ed352589a0d09ddc9c3a3509b3d3fad 100644 (file)
@@ -13,6 +13,7 @@
  * https://www.bosch-sensortec.com/media/boschsensortec/downloads/datasheets/bst-bmp280-ds001.pdf
  * https://www.bosch-sensortec.com/media/boschsensortec/downloads/datasheets/bst-bme280-ds002.pdf
  * https://www.bosch-sensortec.com/media/boschsensortec/downloads/datasheets/bst-bmp388-ds001.pdf
+ * https://www.bosch-sensortec.com/media/boschsensortec/downloads/datasheets/bst-bmp390-ds002.pdf
  * https://www.bosch-sensortec.com/media/boschsensortec/downloads/datasheets/bst-bmp581-ds004.pdf
  *
  * Notice:
@@ -794,10 +795,12 @@ static int bmp280_chip_config(struct bmp280_data *data)
 }
 
 static const int bmp280_oversampling_avail[] = { 1, 2, 4, 8, 16 };
+static const u8 bmp280_chip_ids[] = { BMP280_CHIP_ID };
 
 const struct bmp280_chip_info bmp280_chip_info = {
        .id_reg = BMP280_REG_ID,
-       .chip_id = BMP280_CHIP_ID,
+       .chip_id = bmp280_chip_ids,
+       .num_chip_id = ARRAY_SIZE(bmp280_chip_ids),
        .regmap_config = &bmp280_regmap_config,
        .start_up_time = 2000,
        .channels = bmp280_channels,
@@ -846,9 +849,12 @@ static int bme280_chip_config(struct bmp280_data *data)
        return bmp280_chip_config(data);
 }
 
+static const u8 bme280_chip_ids[] = { BME280_CHIP_ID };
+
 const struct bmp280_chip_info bme280_chip_info = {
        .id_reg = BMP280_REG_ID,
-       .chip_id = BME280_CHIP_ID,
+       .chip_id = bme280_chip_ids,
+       .num_chip_id = ARRAY_SIZE(bme280_chip_ids),
        .regmap_config = &bmp280_regmap_config,
        .start_up_time = 2000,
        .channels = bmp280_channels,
@@ -920,7 +926,7 @@ static int bmp380_cmd(struct bmp280_data *data, u8 cmd)
 }
 
 /*
- * Returns temperature in Celsius dregrees, resolution is 0.01º C. Output value of
+ * Returns temperature in Celsius degrees, resolution is 0.01º C. Output value of
  * "5123" equals 51.2º C. t_fine carries fine temperature as global value.
  *
  * Taken from datasheet, Section Appendix 9, "Compensation formula" and repo
@@ -1220,10 +1226,12 @@ static int bmp380_chip_config(struct bmp280_data *data)
 
 static const int bmp380_oversampling_avail[] = { 1, 2, 4, 8, 16, 32 };
 static const int bmp380_iir_filter_coeffs_avail[] = { 1, 2, 4, 8, 16, 32, 64, 128};
+static const u8 bmp380_chip_ids[] = { BMP380_CHIP_ID, BMP390_CHIP_ID };
 
 const struct bmp280_chip_info bmp380_chip_info = {
        .id_reg = BMP380_REG_ID,
-       .chip_id = BMP380_CHIP_ID,
+       .chip_id = bmp380_chip_ids,
+       .num_chip_id = ARRAY_SIZE(bmp380_chip_ids),
        .regmap_config = &bmp380_regmap_config,
        .start_up_time = 2000,
        .channels = bmp380_channels,
@@ -1385,7 +1393,7 @@ static int bmp580_read_temp(struct bmp280_data *data, int *val, int *val2)
 
        /*
         * Temperature is returned in Celsius degrees in fractional
-        * form down 2^16. We reescale by x1000 to return milli Celsius
+        * form down 2^16. We rescale by x1000 to return milli Celsius
         * to respect IIO ABI.
         */
        *val = raw_temp * 1000;
@@ -1412,7 +1420,7 @@ static int bmp580_read_press(struct bmp280_data *data, int *val, int *val2)
        }
        /*
         * Pressure is returned in Pascals in fractional form down 2^16.
-        * We reescale /1000 to convert to kilopascal to respect IIO ABI.
+        * We rescale /1000 to convert to kilopascal to respect IIO ABI.
         */
        *val = raw_press;
        *val2 = 64000; /* 2^6 * 1000 */
@@ -1720,10 +1728,12 @@ static int bmp580_chip_config(struct bmp280_data *data)
 }
 
 static const int bmp580_oversampling_avail[] = { 1, 2, 4, 8, 16, 32, 64, 128 };
+static const u8 bmp580_chip_ids[] = { BMP580_CHIP_ID, BMP580_CHIP_ID_ALT };
 
 const struct bmp280_chip_info bmp580_chip_info = {
        .id_reg = BMP580_REG_CHIP_ID,
-       .chip_id = BMP580_CHIP_ID,
+       .chip_id = bmp580_chip_ids,
+       .num_chip_id = ARRAY_SIZE(bmp580_chip_ids),
        .regmap_config = &bmp580_regmap_config,
        .start_up_time = 2000,
        .channels = bmp380_channels,
@@ -1983,10 +1993,12 @@ static int bmp180_chip_config(struct bmp280_data *data)
 
 static const int bmp180_oversampling_temp_avail[] = { 1 };
 static const int bmp180_oversampling_press_avail[] = { 1, 2, 4, 8 };
+static const u8 bmp180_chip_ids[] = { BMP180_CHIP_ID };
 
 const struct bmp280_chip_info bmp180_chip_info = {
        .id_reg = BMP280_REG_ID,
-       .chip_id = BMP180_CHIP_ID,
+       .chip_id = bmp180_chip_ids,
+       .num_chip_id = ARRAY_SIZE(bmp180_chip_ids),
        .regmap_config = &bmp180_regmap_config,
        .start_up_time = 2000,
        .channels = bmp280_channels,
@@ -2077,6 +2089,7 @@ int bmp280_common_probe(struct device *dev,
        struct bmp280_data *data;
        struct gpio_desc *gpiod;
        unsigned int chip_id;
+       unsigned int i;
        int ret;
 
        indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
@@ -2142,12 +2155,17 @@ int bmp280_common_probe(struct device *dev,
        ret = regmap_read(regmap, data->chip_info->id_reg, &chip_id);
        if (ret < 0)
                return ret;
-       if (chip_id != data->chip_info->chip_id) {
-               dev_err(dev, "bad chip id: expected %x got %x\n",
-                       data->chip_info->chip_id, chip_id);
-               return -EINVAL;
+
+       for (i = 0; i < data->chip_info->num_chip_id; i++) {
+               if (chip_id == data->chip_info->chip_id[i]) {
+                       dev_info(dev, "0x%x is a known chip id for %s\n", chip_id, name);
+                       break;
+               }
        }
 
+       if (i == data->chip_info->num_chip_id)
+               dev_warn(dev, "bad chip id: 0x%x is not a known chip id\n", chip_id);
+
        if (data->chip_info->preinit) {
                ret = data->chip_info->preinit(data);
                if (ret)
index dbe630ad05b5a8c820230806b46caecf53904d56..34e3bc758493ea605facae8bf28546bfa904a0b4 100644 (file)
@@ -7,13 +7,11 @@
 
 static int bmp280_i2c_probe(struct i2c_client *client)
 {
-       struct regmap *regmap;
-       const struct bmp280_chip_info *chip_info;
        const struct i2c_device_id *id = i2c_client_get_device_id(client);
+       const struct bmp280_chip_info *chip_info;
+       struct regmap *regmap;
 
-       chip_info = device_get_match_data(&client->dev);
-       if (!chip_info)
-               chip_info = (const struct bmp280_chip_info *) id->driver_data;
+       chip_info = i2c_get_match_data(client);
 
        regmap = devm_regmap_init_i2c(client, chip_info->regmap_config);
        if (IS_ERR(regmap)) {
index 1dff9bb7c4e90621cec10c216b6bb5815a857b9a..433d6fac83c4cd95f698e1063a78c36dde79b374 100644 (file)
@@ -14,8 +14,7 @@
 static int bmp280_regmap_spi_write(void *context, const void *data,
                                    size_t count)
 {
-       struct device *dev = context;
-       struct spi_device *spi = to_spi_device(dev);
+       struct spi_device *spi = to_spi_device(context);
        u8 buf[2];
 
        memcpy(buf, data, 2);
@@ -31,8 +30,7 @@ static int bmp280_regmap_spi_write(void *context, const void *data,
 static int bmp280_regmap_spi_read(void *context, const void *reg,
                                   size_t reg_size, void *val, size_t val_size)
 {
-       struct device *dev = context;
-       struct spi_device *spi = to_spi_device(dev);
+       struct spi_device *spi = to_spi_device(context);
 
        return spi_write_then_read(spi, reg, reg_size, val, val_size);
 }
@@ -58,9 +56,7 @@ static int bmp280_spi_probe(struct spi_device *spi)
                return ret;
        }
 
-       chip_info = device_get_match_data(&spi->dev);
-       if (!chip_info)
-               chip_info = (const struct bmp280_chip_info *) id->driver_data;
+       chip_info = spi_get_device_match_data(spi);
 
        regmap = devm_regmap_init(&spi->dev,
                                  &bmp280_regmap_bus,
index 5c0563ce7572518bec64667b7c40f3f6e86d0cb4..4012387d79565631e9c8fdd0152f859c80eac485 100644 (file)
 #define BMP580_CHIP_ID_ALT             0x51
 #define BMP180_CHIP_ID                 0x55
 #define BMP280_CHIP_ID                 0x58
+#define BMP390_CHIP_ID                 0x60
 #define BME280_CHIP_ID                 0x60
 #define BMP280_SOFT_RESET_VAL          0xB6
 
@@ -410,7 +411,7 @@ struct bmp280_data {
                __le16 bmp280_cal_buf[BMP280_CONTIGUOUS_CALIB_REGS / 2];
                __be16 bmp180_cal_buf[BMP180_REG_CALIB_COUNT / 2];
                u8 bmp380_cal_buf[BMP380_CALIB_REG_COUNT];
-               /* Miscellaneous, endianess-aware data buffers */
+               /* Miscellaneous, endianness-aware data buffers */
                __le16 le16;
                __be16 be16;
        } __aligned(IIO_DMA_MINALIGN);
@@ -418,7 +419,8 @@ struct bmp280_data {
 
 struct bmp280_chip_info {
        unsigned int id_reg;
-       const unsigned int chip_id;
+       const u8 *chip_id;
+       int num_chip_id;
 
        const struct regmap_config *regmap_config;
 
diff --git a/drivers/iio/pressure/hsc030pa.c b/drivers/iio/pressure/hsc030pa.c
new file mode 100644 (file)
index 0000000..d6a51f0
--- /dev/null
@@ -0,0 +1,494 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Honeywell TruStability HSC Series pressure/temperature sensor
+ *
+ * Copyright (c) 2023 Petre Rodan <petre.rodan@subdimension.ro>
+ *
+ * Datasheet: https://prod-edam.honeywell.com/content/dam/honeywell-edam/sps/siot/en-us/products/sensors/pressure-sensors/board-mount-pressure-sensors/trustability-hsc-series/documents/sps-siot-trustability-hsc-series-high-accuracy-board-mount-pressure-sensors-50099148-a-en-ciid-151133.pdf
+ */
+
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/init.h>
+#include <linux/math64.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/units.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#include <asm/unaligned.h>
+
+#include "hsc030pa.h"
+
+/*
+ * HSC_PRESSURE_TRIPLET_LEN - length for the string that defines the
+ * pressure range, measurement unit and type as per the part nomenclature.
+ * Consult honeywell,pressure-triplet in the bindings file for details.
+ */
+#define HSC_PRESSURE_TRIPLET_LEN 6
+#define HSC_STATUS_MASK          GENMASK(7, 6)
+#define HSC_TEMPERATURE_MASK     GENMASK(15, 5)
+#define HSC_PRESSURE_MASK        GENMASK(29, 16)
+
+struct hsc_func_spec {
+       u32 output_min;
+       u32 output_max;
+};
+
+/*
+ * function A: 10% - 90% of 2^14
+ * function B:  5% - 95% of 2^14
+ * function C:  5% - 85% of 2^14
+ * function F:  4% - 94% of 2^14
+ */
+static const struct hsc_func_spec hsc_func_spec[] = {
+       [HSC_FUNCTION_A] = { .output_min = 1638, .output_max = 14746 },
+       [HSC_FUNCTION_B] = { .output_min =  819, .output_max = 15565 },
+       [HSC_FUNCTION_C] = { .output_min =  819, .output_max = 13926 },
+       [HSC_FUNCTION_F] = { .output_min =  655, .output_max = 15401 },
+};
+
+enum hsc_variants {
+       HSC001BA = 0x00, HSC1_6BA = 0x01, HSC2_5BA = 0x02, HSC004BA = 0x03,
+       HSC006BA = 0x04, HSC010BA = 0x05, HSC1_6MD = 0x06, HSC2_5MD = 0x07,
+       HSC004MD = 0x08, HSC006MD = 0x09, HSC010MD = 0x0a, HSC016MD = 0x0b,
+       HSC025MD = 0x0c, HSC040MD = 0x0d, HSC060MD = 0x0e, HSC100MD = 0x0f,
+       HSC160MD = 0x10, HSC250MD = 0x11, HSC400MD = 0x12, HSC600MD = 0x13,
+       HSC001BD = 0x14, HSC1_6BD = 0x15, HSC2_5BD = 0x16, HSC004BD = 0x17,
+       HSC2_5MG = 0x18, HSC004MG = 0x19, HSC006MG = 0x1a, HSC010MG = 0x1b,
+       HSC016MG = 0x1c, HSC025MG = 0x1d, HSC040MG = 0x1e, HSC060MG = 0x1f,
+       HSC100MG = 0x20, HSC160MG = 0x21, HSC250MG = 0x22, HSC400MG = 0x23,
+       HSC600MG = 0x24, HSC001BG = 0x25, HSC1_6BG = 0x26, HSC2_5BG = 0x27,
+       HSC004BG = 0x28, HSC006BG = 0x29, HSC010BG = 0x2a, HSC100KA = 0x2b,
+       HSC160KA = 0x2c, HSC250KA = 0x2d, HSC400KA = 0x2e, HSC600KA = 0x2f,
+       HSC001GA = 0x30, HSC160LD = 0x31, HSC250LD = 0x32, HSC400LD = 0x33,
+       HSC600LD = 0x34, HSC001KD = 0x35, HSC1_6KD = 0x36, HSC2_5KD = 0x37,
+       HSC004KD = 0x38, HSC006KD = 0x39, HSC010KD = 0x3a, HSC016KD = 0x3b,
+       HSC025KD = 0x3c, HSC040KD = 0x3d, HSC060KD = 0x3e, HSC100KD = 0x3f,
+       HSC160KD = 0x40, HSC250KD = 0x41, HSC400KD = 0x42, HSC250LG = 0x43,
+       HSC400LG = 0x44, HSC600LG = 0x45, HSC001KG = 0x46, HSC1_6KG = 0x47,
+       HSC2_5KG = 0x48, HSC004KG = 0x49, HSC006KG = 0x4a, HSC010KG = 0x4b,
+       HSC016KG = 0x4c, HSC025KG = 0x4d, HSC040KG = 0x4e, HSC060KG = 0x4f,
+       HSC100KG = 0x50, HSC160KG = 0x51, HSC250KG = 0x52, HSC400KG = 0x53,
+       HSC600KG = 0x54, HSC001GG = 0x55, HSC015PA = 0x56, HSC030PA = 0x57,
+       HSC060PA = 0x58, HSC100PA = 0x59, HSC150PA = 0x5a, HSC0_5ND = 0x5b,
+       HSC001ND = 0x5c, HSC002ND = 0x5d, HSC004ND = 0x5e, HSC005ND = 0x5f,
+       HSC010ND = 0x60, HSC020ND = 0x61, HSC030ND = 0x62, HSC001PD = 0x63,
+       HSC005PD = 0x64, HSC015PD = 0x65, HSC030PD = 0x66, HSC060PD = 0x67,
+       HSC001NG = 0x68, HSC002NG = 0x69, HSC004NG = 0x6a, HSC005NG = 0x6b,
+       HSC010NG = 0x6c, HSC020NG = 0x6d, HSC030NG = 0x6e, HSC001PG = 0x6f,
+       HSC005PG = 0x70, HSC015PG = 0x71, HSC030PG = 0x72, HSC060PG = 0x73,
+       HSC100PG = 0x74, HSC150PG = 0x75, HSC_VARIANTS_MAX
+};
+
+static const char * const hsc_triplet_variants[HSC_VARIANTS_MAX] = {
+       [HSC001BA] = "001BA", [HSC1_6BA] = "1.6BA", [HSC2_5BA] = "2.5BA",
+       [HSC004BA] = "004BA", [HSC006BA] = "006BA", [HSC010BA] = "010BA",
+       [HSC1_6MD] = "1.6MD", [HSC2_5MD] = "2.5MD", [HSC004MD] = "004MD",
+       [HSC006MD] = "006MD", [HSC010MD] = "010MD", [HSC016MD] = "016MD",
+       [HSC025MD] = "025MD", [HSC040MD] = "040MD", [HSC060MD] = "060MD",
+       [HSC100MD] = "100MD", [HSC160MD] = "160MD", [HSC250MD] = "250MD",
+       [HSC400MD] = "400MD", [HSC600MD] = "600MD", [HSC001BD] = "001BD",
+       [HSC1_6BD] = "1.6BD", [HSC2_5BD] = "2.5BD", [HSC004BD] = "004BD",
+       [HSC2_5MG] = "2.5MG", [HSC004MG] = "004MG", [HSC006MG] = "006MG",
+       [HSC010MG] = "010MG", [HSC016MG] = "016MG", [HSC025MG] = "025MG",
+       [HSC040MG] = "040MG", [HSC060MG] = "060MG", [HSC100MG] = "100MG",
+       [HSC160MG] = "160MG", [HSC250MG] = "250MG", [HSC400MG] = "400MG",
+       [HSC600MG] = "600MG", [HSC001BG] = "001BG", [HSC1_6BG] = "1.6BG",
+       [HSC2_5BG] = "2.5BG", [HSC004BG] = "004BG", [HSC006BG] = "006BG",
+       [HSC010BG] = "010BG", [HSC100KA] = "100KA", [HSC160KA] = "160KA",
+       [HSC250KA] = "250KA", [HSC400KA] = "400KA", [HSC600KA] = "600KA",
+       [HSC001GA] = "001GA", [HSC160LD] = "160LD", [HSC250LD] = "250LD",
+       [HSC400LD] = "400LD", [HSC600LD] = "600LD", [HSC001KD] = "001KD",
+       [HSC1_6KD] = "1.6KD", [HSC2_5KD] = "2.5KD", [HSC004KD] = "004KD",
+       [HSC006KD] = "006KD", [HSC010KD] = "010KD", [HSC016KD] = "016KD",
+       [HSC025KD] = "025KD", [HSC040KD] = "040KD", [HSC060KD] = "060KD",
+       [HSC100KD] = "100KD", [HSC160KD] = "160KD", [HSC250KD] = "250KD",
+       [HSC400KD] = "400KD", [HSC250LG] = "250LG", [HSC400LG] = "400LG",
+       [HSC600LG] = "600LG", [HSC001KG] = "001KG", [HSC1_6KG] = "1.6KG",
+       [HSC2_5KG] = "2.5KG", [HSC004KG] = "004KG", [HSC006KG] = "006KG",
+       [HSC010KG] = "010KG", [HSC016KG] = "016KG", [HSC025KG] = "025KG",
+       [HSC040KG] = "040KG", [HSC060KG] = "060KG", [HSC100KG] = "100KG",
+       [HSC160KG] = "160KG", [HSC250KG] = "250KG", [HSC400KG] = "400KG",
+       [HSC600KG] = "600KG", [HSC001GG] = "001GG", [HSC015PA] = "015PA",
+       [HSC030PA] = "030PA", [HSC060PA] = "060PA", [HSC100PA] = "100PA",
+       [HSC150PA] = "150PA", [HSC0_5ND] = "0.5ND", [HSC001ND] = "001ND",
+       [HSC002ND] = "002ND", [HSC004ND] = "004ND", [HSC005ND] = "005ND",
+       [HSC010ND] = "010ND", [HSC020ND] = "020ND", [HSC030ND] = "030ND",
+       [HSC001PD] = "001PD", [HSC005PD] = "005PD", [HSC015PD] = "015PD",
+       [HSC030PD] = "030PD", [HSC060PD] = "060PD", [HSC001NG] = "001NG",
+       [HSC002NG] = "002NG", [HSC004NG] = "004NG", [HSC005NG] = "005NG",
+       [HSC010NG] = "010NG", [HSC020NG] = "020NG", [HSC030NG] = "030NG",
+       [HSC001PG] = "001PG", [HSC005PG] = "005PG", [HSC015PG] = "015PG",
+       [HSC030PG] = "030PG", [HSC060PG] = "060PG", [HSC100PG] = "100PG",
+       [HSC150PG] = "150PG",
+};
+
+/**
+ * struct hsc_range_config - list of pressure ranges based on nomenclature
+ * @pmin: lowest pressure that can be measured
+ * @pmax: highest pressure that can be measured
+ */
+struct hsc_range_config {
+       const s32 pmin;
+       const s32 pmax;
+};
+
+/* All min max limits have been converted to pascals */
+static const struct hsc_range_config hsc_range_config[HSC_VARIANTS_MAX] = {
+       [HSC001BA] = { .pmin =       0, .pmax =  100000 },
+       [HSC1_6BA] = { .pmin =       0, .pmax =  160000 },
+       [HSC2_5BA] = { .pmin =       0, .pmax =  250000 },
+       [HSC004BA] = { .pmin =       0, .pmax =  400000 },
+       [HSC006BA] = { .pmin =       0, .pmax =  600000 },
+       [HSC010BA] = { .pmin =       0, .pmax = 1000000 },
+       [HSC1_6MD] = { .pmin =    -160, .pmax =     160 },
+       [HSC2_5MD] = { .pmin =    -250, .pmax =     250 },
+       [HSC004MD] = { .pmin =    -400, .pmax =     400 },
+       [HSC006MD] = { .pmin =    -600, .pmax =     600 },
+       [HSC010MD] = { .pmin =   -1000, .pmax =    1000 },
+       [HSC016MD] = { .pmin =   -1600, .pmax =    1600 },
+       [HSC025MD] = { .pmin =   -2500, .pmax =    2500 },
+       [HSC040MD] = { .pmin =   -4000, .pmax =    4000 },
+       [HSC060MD] = { .pmin =   -6000, .pmax =    6000 },
+       [HSC100MD] = { .pmin =  -10000, .pmax =   10000 },
+       [HSC160MD] = { .pmin =  -16000, .pmax =   16000 },
+       [HSC250MD] = { .pmin =  -25000, .pmax =   25000 },
+       [HSC400MD] = { .pmin =  -40000, .pmax =   40000 },
+       [HSC600MD] = { .pmin =  -60000, .pmax =   60000 },
+       [HSC001BD] = { .pmin = -100000, .pmax =  100000 },
+       [HSC1_6BD] = { .pmin = -160000, .pmax =  160000 },
+       [HSC2_5BD] = { .pmin = -250000, .pmax =  250000 },
+       [HSC004BD] = { .pmin = -400000, .pmax =  400000 },
+       [HSC2_5MG] = { .pmin =       0, .pmax =     250 },
+       [HSC004MG] = { .pmin =       0, .pmax =     400 },
+       [HSC006MG] = { .pmin =       0, .pmax =     600 },
+       [HSC010MG] = { .pmin =       0, .pmax =    1000 },
+       [HSC016MG] = { .pmin =       0, .pmax =    1600 },
+       [HSC025MG] = { .pmin =       0, .pmax =    2500 },
+       [HSC040MG] = { .pmin =       0, .pmax =    4000 },
+       [HSC060MG] = { .pmin =       0, .pmax =    6000 },
+       [HSC100MG] = { .pmin =       0, .pmax =   10000 },
+       [HSC160MG] = { .pmin =       0, .pmax =   16000 },
+       [HSC250MG] = { .pmin =       0, .pmax =   25000 },
+       [HSC400MG] = { .pmin =       0, .pmax =   40000 },
+       [HSC600MG] = { .pmin =       0, .pmax =   60000 },
+       [HSC001BG] = { .pmin =       0, .pmax =  100000 },
+       [HSC1_6BG] = { .pmin =       0, .pmax =  160000 },
+       [HSC2_5BG] = { .pmin =       0, .pmax =  250000 },
+       [HSC004BG] = { .pmin =       0, .pmax =  400000 },
+       [HSC006BG] = { .pmin =       0, .pmax =  600000 },
+       [HSC010BG] = { .pmin =       0, .pmax = 1000000 },
+       [HSC100KA] = { .pmin =       0, .pmax =  100000 },
+       [HSC160KA] = { .pmin =       0, .pmax =  160000 },
+       [HSC250KA] = { .pmin =       0, .pmax =  250000 },
+       [HSC400KA] = { .pmin =       0, .pmax =  400000 },
+       [HSC600KA] = { .pmin =       0, .pmax =  600000 },
+       [HSC001GA] = { .pmin =       0, .pmax = 1000000 },
+       [HSC160LD] = { .pmin =    -160, .pmax =     160 },
+       [HSC250LD] = { .pmin =    -250, .pmax =     250 },
+       [HSC400LD] = { .pmin =    -400, .pmax =     400 },
+       [HSC600LD] = { .pmin =    -600, .pmax =     600 },
+       [HSC001KD] = { .pmin =   -1000, .pmax =    1000 },
+       [HSC1_6KD] = { .pmin =   -1600, .pmax =    1600 },
+       [HSC2_5KD] = { .pmin =   -2500, .pmax =    2500 },
+       [HSC004KD] = { .pmin =   -4000, .pmax =    4000 },
+       [HSC006KD] = { .pmin =   -6000, .pmax =    6000 },
+       [HSC010KD] = { .pmin =  -10000, .pmax =   10000 },
+       [HSC016KD] = { .pmin =  -16000, .pmax =   16000 },
+       [HSC025KD] = { .pmin =  -25000, .pmax =   25000 },
+       [HSC040KD] = { .pmin =  -40000, .pmax =   40000 },
+       [HSC060KD] = { .pmin =  -60000, .pmax =   60000 },
+       [HSC100KD] = { .pmin = -100000, .pmax =  100000 },
+       [HSC160KD] = { .pmin = -160000, .pmax =  160000 },
+       [HSC250KD] = { .pmin = -250000, .pmax =  250000 },
+       [HSC400KD] = { .pmin = -400000, .pmax =  400000 },
+       [HSC250LG] = { .pmin =       0, .pmax =     250 },
+       [HSC400LG] = { .pmin =       0, .pmax =     400 },
+       [HSC600LG] = { .pmin =       0, .pmax =     600 },
+       [HSC001KG] = { .pmin =       0, .pmax =    1000 },
+       [HSC1_6KG] = { .pmin =       0, .pmax =    1600 },
+       [HSC2_5KG] = { .pmin =       0, .pmax =    2500 },
+       [HSC004KG] = { .pmin =       0, .pmax =    4000 },
+       [HSC006KG] = { .pmin =       0, .pmax =    6000 },
+       [HSC010KG] = { .pmin =       0, .pmax =   10000 },
+       [HSC016KG] = { .pmin =       0, .pmax =   16000 },
+       [HSC025KG] = { .pmin =       0, .pmax =   25000 },
+       [HSC040KG] = { .pmin =       0, .pmax =   40000 },
+       [HSC060KG] = { .pmin =       0, .pmax =   60000 },
+       [HSC100KG] = { .pmin =       0, .pmax =  100000 },
+       [HSC160KG] = { .pmin =       0, .pmax =  160000 },
+       [HSC250KG] = { .pmin =       0, .pmax =  250000 },
+       [HSC400KG] = { .pmin =       0, .pmax =  400000 },
+       [HSC600KG] = { .pmin =       0, .pmax =  600000 },
+       [HSC001GG] = { .pmin =       0, .pmax = 1000000 },
+       [HSC015PA] = { .pmin =       0, .pmax =  103421 },
+       [HSC030PA] = { .pmin =       0, .pmax =  206843 },
+       [HSC060PA] = { .pmin =       0, .pmax =  413685 },
+       [HSC100PA] = { .pmin =       0, .pmax =  689476 },
+       [HSC150PA] = { .pmin =       0, .pmax = 1034214 },
+       [HSC0_5ND] = { .pmin =    -125, .pmax =     125 },
+       [HSC001ND] = { .pmin =    -249, .pmax =     249 },
+       [HSC002ND] = { .pmin =    -498, .pmax =     498 },
+       [HSC004ND] = { .pmin =    -996, .pmax =     996 },
+       [HSC005ND] = { .pmin =   -1245, .pmax =    1245 },
+       [HSC010ND] = { .pmin =   -2491, .pmax =    2491 },
+       [HSC020ND] = { .pmin =   -4982, .pmax =    4982 },
+       [HSC030ND] = { .pmin =   -7473, .pmax =    7473 },
+       [HSC001PD] = { .pmin =   -6895, .pmax =    6895 },
+       [HSC005PD] = { .pmin =  -34474, .pmax =   34474 },
+       [HSC015PD] = { .pmin = -103421, .pmax =  103421 },
+       [HSC030PD] = { .pmin = -206843, .pmax =  206843 },
+       [HSC060PD] = { .pmin = -413685, .pmax =  413685 },
+       [HSC001NG] = { .pmin =       0, .pmax =     249 },
+       [HSC002NG] = { .pmin =       0, .pmax =     498 },
+       [HSC004NG] = { .pmin =       0, .pmax =     996 },
+       [HSC005NG] = { .pmin =       0, .pmax =    1245 },
+       [HSC010NG] = { .pmin =       0, .pmax =    2491 },
+       [HSC020NG] = { .pmin =       0, .pmax =    4982 },
+       [HSC030NG] = { .pmin =       0, .pmax =    7473 },
+       [HSC001PG] = { .pmin =       0, .pmax =    6895 },
+       [HSC005PG] = { .pmin =       0, .pmax =   34474 },
+       [HSC015PG] = { .pmin =       0, .pmax =  103421 },
+       [HSC030PG] = { .pmin =       0, .pmax =  206843 },
+       [HSC060PG] = { .pmin =       0, .pmax =  413685 },
+       [HSC100PG] = { .pmin =       0, .pmax =  689476 },
+       [HSC150PG] = { .pmin =       0, .pmax = 1034214 },
+};
+
+/**
+ * hsc_measurement_is_valid() - validate last conversion via status bits
+ * @data: structure containing instantiated sensor data
+ * Return: true only if both status bits are zero
+ *
+ * the two MSB from the first transfered byte contain a status code
+ *   00 - normal operation, valid data
+ *   01 - device in factory programming mode
+ *   10 - stale data
+ *   11 - diagnostic condition
+ */
+static bool hsc_measurement_is_valid(struct hsc_data *data)
+{
+       return !(data->buffer[0] & HSC_STATUS_MASK);
+}
+
+static int hsc_get_measurement(struct hsc_data *data)
+{
+       const struct hsc_chip_data *chip = data->chip;
+       int ret;
+
+       ret = data->recv_cb(data);
+       if (ret < 0)
+               return ret;
+
+       data->is_valid = chip->valid(data);
+       if (!data->is_valid)
+               return -EAGAIN;
+
+       return 0;
+}
+
+/*
+ * IIO ABI expects
+ * value = (conv + offset) * scale
+ *
+ * datasheet provides the following formula for determining the temperature
+ * temp[C] = conv * a + b
+ *   where a = 200/2047; b = -50
+ *
+ *  temp[C] = (conv + (b/a)) * a * (1000)
+ *  =>
+ *  scale = a * 1000 = .097703957 * 1000 = 97.703957
+ *  offset = b/a = -50 / .097703957 = -50000000 / 97704
+ *
+ *  based on the datasheet
+ *  pressure = (conv - Omin) * Q + Pmin =
+ *          ((conv - Omin) + Pmin/Q) * Q
+ *  =>
+ *  scale = Q = (Pmax - Pmin) / (Omax - Omin)
+ *  offset = Pmin/Q - Omin = Pmin * (Omax - Omin) / (Pmax - Pmin) - Omin
+ */
+static int hsc_read_raw(struct iio_dev *indio_dev,
+                       struct iio_chan_spec const *channel, int *val,
+                       int *val2, long mask)
+{
+       struct hsc_data *data = iio_priv(indio_dev);
+       int ret;
+       u32 recvd;
+
+       switch (mask) {
+       case IIO_CHAN_INFO_RAW:
+               ret = hsc_get_measurement(data);
+               if (ret)
+                       return ret;
+
+               recvd = get_unaligned_be32(data->buffer);
+               switch (channel->type) {
+               case IIO_PRESSURE:
+                       *val = FIELD_GET(HSC_PRESSURE_MASK, recvd);
+                       return IIO_VAL_INT;
+               case IIO_TEMP:
+                       *val = FIELD_GET(HSC_TEMPERATURE_MASK, recvd);
+                       return IIO_VAL_INT;
+               default:
+                       return -EINVAL;
+               }
+
+       case IIO_CHAN_INFO_SCALE:
+               switch (channel->type) {
+               case IIO_TEMP:
+                       *val = 97;
+                       *val2 = 703957;
+                       return IIO_VAL_INT_PLUS_MICRO;
+               case IIO_PRESSURE:
+                       *val = data->p_scale;
+                       *val2 = data->p_scale_dec;
+                       return IIO_VAL_INT_PLUS_NANO;
+               default:
+                       return -EINVAL;
+               }
+
+       case IIO_CHAN_INFO_OFFSET:
+               switch (channel->type) {
+               case IIO_TEMP:
+                       *val = -50000000;
+                       *val2 = 97704;
+                       return IIO_VAL_FRACTIONAL;
+               case IIO_PRESSURE:
+                       *val = data->p_offset;
+                       *val2 = data->p_offset_dec;
+                       return IIO_VAL_INT_PLUS_MICRO;
+               default:
+                       return -EINVAL;
+               }
+
+       default:
+               return -EINVAL;
+       }
+}
+
+static const struct iio_chan_spec hsc_channels[] = {
+       {
+               .type = IIO_PRESSURE,
+               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+                                     BIT(IIO_CHAN_INFO_SCALE) |
+                                     BIT(IIO_CHAN_INFO_OFFSET),
+       },
+       {
+               .type = IIO_TEMP,
+               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+                                     BIT(IIO_CHAN_INFO_SCALE) |
+                                     BIT(IIO_CHAN_INFO_OFFSET),
+       },
+};
+
+static const struct iio_info hsc_info = {
+       .read_raw = hsc_read_raw,
+};
+
+static const struct hsc_chip_data hsc_chip = {
+       .valid = hsc_measurement_is_valid,
+       .channels = hsc_channels,
+       .num_channels = ARRAY_SIZE(hsc_channels),
+};
+
+int hsc_common_probe(struct device *dev, hsc_recv_fn recv)
+{
+       struct hsc_data *hsc;
+       struct iio_dev *indio_dev;
+       const char *triplet;
+       u64 tmp;
+       int ret;
+
+       indio_dev = devm_iio_device_alloc(dev, sizeof(*hsc));
+       if (!indio_dev)
+               return -ENOMEM;
+
+       hsc = iio_priv(indio_dev);
+
+       hsc->chip = &hsc_chip;
+       hsc->recv_cb = recv;
+       hsc->dev = dev;
+
+       ret = device_property_read_u32(dev, "honeywell,transfer-function",
+                                      &hsc->function);
+       if (ret)
+               return dev_err_probe(dev, ret,
+                           "honeywell,transfer-function could not be read\n");
+       if (hsc->function > HSC_FUNCTION_F)
+               return dev_err_probe(dev, -EINVAL,
+                                    "honeywell,transfer-function %d invalid\n",
+                                    hsc->function);
+
+       ret = device_property_read_string(dev, "honeywell,pressure-triplet",
+                                         &triplet);
+       if (ret)
+               return dev_err_probe(dev, ret,
+                            "honeywell,pressure-triplet could not be read\n");
+
+       if (str_has_prefix(triplet, "NA")) {
+               ret = device_property_read_u32(dev, "honeywell,pmin-pascal",
+                                              &hsc->pmin);
+               if (ret)
+                       return dev_err_probe(dev, ret,
+                                 "honeywell,pmin-pascal could not be read\n");
+
+               ret = device_property_read_u32(dev, "honeywell,pmax-pascal",
+                                              &hsc->pmax);
+               if (ret)
+                       return dev_err_probe(dev, ret,
+                                 "honeywell,pmax-pascal could not be read\n");
+       } else {
+               ret = device_property_match_property_string(dev,
+                                                 "honeywell,pressure-triplet",
+                                                 hsc_triplet_variants,
+                                                 HSC_VARIANTS_MAX);
+               if (ret < 0)
+                       return dev_err_probe(dev, -EINVAL,
+                                   "honeywell,pressure-triplet is invalid\n");
+
+               hsc->pmin = hsc_range_config[ret].pmin;
+               hsc->pmax = hsc_range_config[ret].pmax;
+       }
+
+       if (hsc->pmin >= hsc->pmax)
+               return dev_err_probe(dev, -EINVAL,
+                                    "pressure limits are invalid\n");
+
+       ret = devm_regulator_get_enable(dev, "vdd");
+       if (ret)
+               return dev_err_probe(dev, ret, "can't get vdd supply\n");
+
+       hsc->outmin = hsc_func_spec[hsc->function].output_min;
+       hsc->outmax = hsc_func_spec[hsc->function].output_max;
+
+       tmp = div_s64(((s64)(hsc->pmax - hsc->pmin)) * MICRO,
+                     hsc->outmax - hsc->outmin);
+       hsc->p_scale = div_s64_rem(tmp, NANO, &hsc->p_scale_dec);
+       tmp = div_s64(((s64)hsc->pmin * (s64)(hsc->outmax - hsc->outmin)) * MICRO,
+                     hsc->pmax - hsc->pmin);
+       tmp -= (s64)hsc->outmin * MICRO;
+       hsc->p_offset = div_s64_rem(tmp, MICRO, &hsc->p_offset_dec);
+
+       indio_dev->name = "hsc030pa";
+       indio_dev->modes = INDIO_DIRECT_MODE;
+       indio_dev->info = &hsc_info;
+       indio_dev->channels = hsc->chip->channels;
+       indio_dev->num_channels = hsc->chip->num_channels;
+
+       return devm_iio_device_register(dev, indio_dev);
+}
+EXPORT_SYMBOL_NS(hsc_common_probe, IIO_HONEYWELL_HSC030PA);
+
+MODULE_AUTHOR("Petre Rodan <petre.rodan@subdimension.ro>");
+MODULE_DESCRIPTION("Honeywell HSC and SSC pressure sensor core driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/pressure/hsc030pa.h b/drivers/iio/pressure/hsc030pa.h
new file mode 100644 (file)
index 0000000..d20420d
--- /dev/null
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Honeywell TruStability HSC Series pressure/temperature sensor
+ *
+ * Copyright (c) 2023 Petre Rodan <petre.rodan@subdimension.ro>
+ */
+
+#ifndef _HSC030PA_H
+#define _HSC030PA_H
+
+#include <linux/types.h>
+
+#define HSC_REG_MEASUREMENT_RD_SIZE 4
+
+struct device;
+
+struct iio_chan_spec;
+struct iio_dev;
+
+struct hsc_data;
+struct hsc_chip_data;
+
+typedef int (*hsc_recv_fn)(struct hsc_data *);
+
+/**
+ * struct hsc_data
+ * @dev: current device structure
+ * @chip: structure containing chip's channel properties
+ * @recv_cb: function that implements the chip reads
+ * @is_valid: true if last transfer has been validated
+ * @pmin: minimum measurable pressure limit
+ * @pmax: maximum measurable pressure limit
+ * @outmin: minimum raw pressure in counts (based on transfer function)
+ * @outmax: maximum raw pressure in counts (based on transfer function)
+ * @function: transfer function
+ * @p_scale: pressure scale
+ * @p_scale_dec: pressure scale, decimal places
+ * @p_offset: pressure offset
+ * @p_offset_dec: pressure offset, decimal places
+ * @buffer: raw conversion data
+ */
+struct hsc_data {
+       struct device *dev;
+       const struct hsc_chip_data *chip;
+       hsc_recv_fn recv_cb;
+       bool is_valid;
+       s32 pmin;
+       s32 pmax;
+       u32 outmin;
+       u32 outmax;
+       u32 function;
+       s64 p_scale;
+       s32 p_scale_dec;
+       s64 p_offset;
+       s32 p_offset_dec;
+       u8 buffer[HSC_REG_MEASUREMENT_RD_SIZE] __aligned(IIO_DMA_MINALIGN);
+};
+
+struct hsc_chip_data {
+       bool (*valid)(struct hsc_data *data);
+       const struct iio_chan_spec *channels;
+       u8 num_channels;
+};
+
+enum hsc_func_id {
+       HSC_FUNCTION_A,
+       HSC_FUNCTION_B,
+       HSC_FUNCTION_C,
+       HSC_FUNCTION_F,
+};
+
+int hsc_common_probe(struct device *dev, hsc_recv_fn recv);
+
+#endif
diff --git a/drivers/iio/pressure/hsc030pa_i2c.c b/drivers/iio/pressure/hsc030pa_i2c.c
new file mode 100644 (file)
index 0000000..e2b524b
--- /dev/null
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Honeywell TruStability HSC Series pressure/temperature sensor
+ *
+ * Copyright (c) 2023 Petre Rodan <petre.rodan@subdimension.ro>
+ *
+ * Datasheet: https://prod-edam.honeywell.com/content/dam/honeywell-edam/sps/siot/en-us/products/sensors/pressure-sensors/board-mount-pressure-sensors/trustability-hsc-series/documents/sps-siot-trustability-hsc-series-high-accuracy-board-mount-pressure-sensors-50099148-a-en-ciid-151133.pdf [hsc]
+ * Datasheet: https://prod-edam.honeywell.com/content/dam/honeywell-edam/sps/siot/en-us/products/sensors/pressure-sensors/board-mount-pressure-sensors/common/documents/sps-siot-i2c-comms-digital-output-pressure-sensors-tn-008201-3-en-ciid-45841.pdf [i2c related]
+ */
+
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+
+#include <linux/iio/iio.h>
+
+#include "hsc030pa.h"
+
+static int hsc_i2c_recv(struct hsc_data *data)
+{
+       struct i2c_client *client = to_i2c_client(data->dev);
+       struct i2c_msg msg;
+       int ret;
+
+       msg.addr = client->addr;
+       msg.flags = client->flags | I2C_M_RD;
+       msg.len = HSC_REG_MEASUREMENT_RD_SIZE;
+       msg.buf = data->buffer;
+
+       ret = i2c_transfer(client->adapter, &msg, 1);
+
+       return (ret == 2) ? 0 : ret;
+}
+
+static int hsc_i2c_probe(struct i2c_client *client)
+{
+       if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+               return -EOPNOTSUPP;
+
+       return hsc_common_probe(&client->dev, hsc_i2c_recv);
+}
+
+static const struct of_device_id hsc_i2c_match[] = {
+       { .compatible = "honeywell,hsc030pa" },
+       {}
+};
+MODULE_DEVICE_TABLE(of, hsc_i2c_match);
+
+static const struct i2c_device_id hsc_i2c_id[] = {
+       { "hsc030pa" },
+       {}
+};
+MODULE_DEVICE_TABLE(i2c, hsc_i2c_id);
+
+static struct i2c_driver hsc_i2c_driver = {
+       .driver = {
+               .name = "hsc030pa",
+               .of_match_table = hsc_i2c_match,
+       },
+       .probe = hsc_i2c_probe,
+       .id_table = hsc_i2c_id,
+};
+module_i2c_driver(hsc_i2c_driver);
+
+MODULE_AUTHOR("Petre Rodan <petre.rodan@subdimension.ro>");
+MODULE_DESCRIPTION("Honeywell HSC and SSC pressure sensor i2c driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IIO_HONEYWELL_HSC030PA);
diff --git a/drivers/iio/pressure/hsc030pa_spi.c b/drivers/iio/pressure/hsc030pa_spi.c
new file mode 100644 (file)
index 0000000..a719bad
--- /dev/null
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Honeywell TruStability HSC Series pressure/temperature sensor
+ *
+ * Copyright (c) 2023 Petre Rodan <petre.rodan@subdimension.ro>
+ *
+ * Datasheet: https://prod-edam.honeywell.com/content/dam/honeywell-edam/sps/siot/en-us/products/sensors/pressure-sensors/board-mount-pressure-sensors/trustability-hsc-series/documents/sps-siot-trustability-hsc-series-high-accuracy-board-mount-pressure-sensors-50099148-a-en-ciid-151133.pdf
+ */
+
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/stddef.h>
+
+#include <linux/iio/iio.h>
+
+#include "hsc030pa.h"
+
+static int hsc_spi_recv(struct hsc_data *data)
+{
+       struct spi_device *spi = to_spi_device(data->dev);
+       struct spi_transfer xfer = {
+               .tx_buf = NULL,
+               .rx_buf = data->buffer,
+               .len = HSC_REG_MEASUREMENT_RD_SIZE,
+       };
+
+       return spi_sync_transfer(spi, &xfer, 1);
+}
+
+static int hsc_spi_probe(struct spi_device *spi)
+{
+       return hsc_common_probe(&spi->dev, hsc_spi_recv);
+}
+
+static const struct of_device_id hsc_spi_match[] = {
+       { .compatible = "honeywell,hsc030pa" },
+       {}
+};
+MODULE_DEVICE_TABLE(of, hsc_spi_match);
+
+static const struct spi_device_id hsc_spi_id[] = {
+       { "hsc030pa" },
+       {}
+};
+MODULE_DEVICE_TABLE(spi, hsc_spi_id);
+
+static struct spi_driver hsc_spi_driver = {
+       .driver = {
+               .name = "hsc030pa",
+               .of_match_table = hsc_spi_match,
+       },
+       .probe = hsc_spi_probe,
+       .id_table = hsc_spi_id,
+};
+module_spi_driver(hsc_spi_driver);
+
+MODULE_AUTHOR("Petre Rodan <petre.rodan@subdimension.ro>");
+MODULE_DESCRIPTION("Honeywell HSC and SSC pressure sensor spi driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IIO_HONEYWELL_HSC030PA);
index bdff91f6b1a3731178605a9b1c84c43d33d23ecb..323ac6dac90e1b5918549c61009db6398b84261d 100644 (file)
@@ -7,7 +7,6 @@
 
 #include <asm/unaligned.h>
 #include <linux/bitfield.h>
-#include <linux/gpio.h>
 #include <linux/i2c.h>
 #include <linux/module.h>
 #include <linux/regmap.h>
index 438f9c9aba6eaf9b8135bd6bb697798db72c25a4..ac2ed2da21ccc99f8f830458e7b83becef504eda 100644 (file)
@@ -888,7 +888,6 @@ sx9324_get_default_reg(struct device *dev, int idx,
        char prop[] = SX9324_PROXRAW_DEF;
        u32 start = 0, raw = 0, pos = 0;
        int ret, count, ph, pin;
-       const char *res;
 
        memcpy(reg_def, &sx9324_default_regs[idx], sizeof(*reg_def));
 
@@ -915,24 +914,21 @@ sx9324_get_default_reg(struct device *dev, int idx,
                reg_def->def = raw;
                break;
        case SX9324_REG_AFE_CTRL0:
-               ret = device_property_read_string(dev,
-                               "semtech,cs-idle-sleep", &res);
-               if (!ret)
-                       ret = match_string(sx9324_csidle, ARRAY_SIZE(sx9324_csidle), res);
+               ret = device_property_match_property_string(dev, "semtech,cs-idle-sleep",
+                                                           sx9324_csidle,
+                                                           ARRAY_SIZE(sx9324_csidle));
                if (ret >= 0) {
                        reg_def->def &= ~SX9324_REG_AFE_CTRL0_CSIDLE_MASK;
                        reg_def->def |= ret << SX9324_REG_AFE_CTRL0_CSIDLE_SHIFT;
                }
 
-               ret = device_property_read_string(dev,
-                               "semtech,int-comp-resistor", &res);
-               if (ret)
-                       break;
-               ret = match_string(sx9324_rints, ARRAY_SIZE(sx9324_rints), res);
-               if (ret < 0)
-                       break;
-               reg_def->def &= ~SX9324_REG_AFE_CTRL0_RINT_MASK;
-               reg_def->def |= ret << SX9324_REG_AFE_CTRL0_RINT_SHIFT;
+               ret = device_property_match_property_string(dev, "semtech,int-comp-resistor",
+                                                           sx9324_rints,
+                                                           ARRAY_SIZE(sx9324_rints));
+               if (ret >= 0) {
+                       reg_def->def &= ~SX9324_REG_AFE_CTRL0_RINT_MASK;
+                       reg_def->def |= ret << SX9324_REG_AFE_CTRL0_RINT_SHIFT;
+               }
                break;
        case SX9324_REG_AFE_CTRL4:
        case SX9324_REG_AFE_CTRL7:
index 1bd1b950e7cc535d3d9ab11908d47c7f4f983735..a414eef12e5e317a25f856ece7f3f7326b94f1fe 100644 (file)
@@ -141,7 +141,7 @@ struct ad2s1210_state {
        struct spi_device *sdev;
        /** GPIO pin connected to SAMPLE line. */
        struct gpio_desc *sample_gpio;
-       /** GPIO pins connected to A0 and A1 lines. */
+       /** GPIO pins connected to A0 and A1 lines (optional). */
        struct gpio_descs *mode_gpios;
        /** Used to access config registers. */
        struct regmap *regmap;
@@ -149,6 +149,8 @@ struct ad2s1210_state {
        unsigned long clkin_hz;
        /** Available raw hysteresis values based on resolution. */
        int hysteresis_available[2];
+       /* adi,fixed-mode property - only valid when mode_gpios == NULL. */
+       enum ad2s1210_mode fixed_mode;
        /** The selected resolution */
        enum ad2s1210_resolution resolution;
        /** Copy of fault register from the previous read. */
@@ -175,6 +177,9 @@ static int ad2s1210_set_mode(struct ad2s1210_state *st, enum ad2s1210_mode mode)
        struct gpio_descs *gpios = st->mode_gpios;
        DECLARE_BITMAP(bitmap, 2);
 
+       if (!gpios)
+               return mode == st->fixed_mode ? 0 : -EOPNOTSUPP;
+
        bitmap[0] = mode;
 
        return gpiod_set_array_value(gpios->ndescs, gpios->desc, gpios->info,
@@ -276,7 +281,8 @@ static int ad2s1210_regmap_reg_read(void *context, unsigned int reg,
         * parity error. The fault register is read-only and the D7 bit means
         * something else there.
         */
-       if (reg != AD2S1210_REG_FAULT && st->rx[1] & AD2S1210_ADDRESS_DATA)
+       if ((reg > AD2S1210_REG_VELOCITY_LSB && reg != AD2S1210_REG_FAULT)
+            && st->rx[1] & AD2S1210_ADDRESS_DATA)
                return -EBADMSG;
 
        *val = st->rx[1];
@@ -450,21 +456,53 @@ static int ad2s1210_single_conversion(struct iio_dev *indio_dev,
        ad2s1210_toggle_sample_line(st);
        timestamp = iio_get_time_ns(indio_dev);
 
-       switch (chan->type) {
-       case IIO_ANGL:
-               ret = ad2s1210_set_mode(st, MOD_POS);
-               break;
-       case IIO_ANGL_VEL:
-               ret = ad2s1210_set_mode(st, MOD_VEL);
-               break;
-       default:
-               return -EINVAL;
+       if (st->fixed_mode == MOD_CONFIG) {
+               unsigned int reg_val;
+
+               switch (chan->type) {
+               case IIO_ANGL:
+                       ret = regmap_bulk_read(st->regmap,
+                                              AD2S1210_REG_POSITION_MSB,
+                                              &st->sample.raw, 2);
+                       if (ret < 0)
+                               return ret;
+
+                       break;
+               case IIO_ANGL_VEL:
+                       ret = regmap_bulk_read(st->regmap,
+                                              AD2S1210_REG_VELOCITY_MSB,
+                                              &st->sample.raw, 2);
+                       if (ret < 0)
+                               return ret;
+
+                       break;
+               default:
+                       return -EINVAL;
+               }
+
+               ret = regmap_read(st->regmap, AD2S1210_REG_FAULT, &reg_val);
+               if (ret < 0)
+                       return ret;
+
+               st->sample.fault = reg_val;
+       } else {
+               switch (chan->type) {
+               case IIO_ANGL:
+                       ret = ad2s1210_set_mode(st, MOD_POS);
+                       break;
+               case IIO_ANGL_VEL:
+                       ret = ad2s1210_set_mode(st, MOD_VEL);
+                       break;
+               default:
+                       return -EINVAL;
+               }
+               if (ret < 0)
+                       return ret;
+
+               ret = spi_read(st->sdev, &st->sample, 3);
+               if (ret < 0)
+                       return ret;
        }
-       if (ret < 0)
-               return ret;
-       ret = spi_read(st->sdev, &st->sample, 3);
-       if (ret < 0)
-               return ret;
 
        switch (chan->type) {
        case IIO_ANGL:
@@ -1252,27 +1290,53 @@ static irqreturn_t ad2s1210_trigger_handler(int irq, void *p)
        ad2s1210_toggle_sample_line(st);
 
        if (test_bit(0, indio_dev->active_scan_mask)) {
-               ret = ad2s1210_set_mode(st, MOD_POS);
-               if (ret < 0)
-                       goto error_ret;
-
-               ret = spi_read(st->sdev, &st->sample, 3);
-               if (ret < 0)
-                       goto error_ret;
+               if (st->fixed_mode == MOD_CONFIG) {
+                       ret = regmap_bulk_read(st->regmap,
+                                              AD2S1210_REG_POSITION_MSB,
+                                              &st->sample.raw, 2);
+                       if (ret < 0)
+                               goto error_ret;
+               } else {
+                       ret = ad2s1210_set_mode(st, MOD_POS);
+                       if (ret < 0)
+                               goto error_ret;
+
+                       ret = spi_read(st->sdev, &st->sample, 3);
+                       if (ret < 0)
+                               goto error_ret;
+               }
 
                memcpy(&st->scan.chan[chan++], &st->sample.raw, 2);
        }
 
        if (test_bit(1, indio_dev->active_scan_mask)) {
-               ret = ad2s1210_set_mode(st, MOD_VEL);
-               if (ret < 0)
-                       goto error_ret;
+               if (st->fixed_mode == MOD_CONFIG) {
+                       ret = regmap_bulk_read(st->regmap,
+                                              AD2S1210_REG_VELOCITY_MSB,
+                                              &st->sample.raw, 2);
+                       if (ret < 0)
+                               goto error_ret;
+               } else {
+                       ret = ad2s1210_set_mode(st, MOD_VEL);
+                       if (ret < 0)
+                               goto error_ret;
+
+                       ret = spi_read(st->sdev, &st->sample, 3);
+                       if (ret < 0)
+                               goto error_ret;
+               }
 
-               ret = spi_read(st->sdev, &st->sample, 3);
+               memcpy(&st->scan.chan[chan++], &st->sample.raw, 2);
+       }
+
+       if (st->fixed_mode == MOD_CONFIG) {
+               unsigned int reg_val;
+
+               ret = regmap_read(st->regmap, AD2S1210_REG_FAULT, &reg_val);
                if (ret < 0)
-                       goto error_ret;
+                       return ret;
 
-               memcpy(&st->scan.chan[chan++], &st->sample.raw, 2);
+               st->sample.fault = reg_val;
        }
 
        ad2s1210_push_events(indio_dev, st->sample.fault, pf->timestamp);
@@ -1299,9 +1363,24 @@ static const struct iio_info ad2s1210_info = {
 static int ad2s1210_setup_properties(struct ad2s1210_state *st)
 {
        struct device *dev = &st->sdev->dev;
+       const char *str_val;
        u32 val;
        int ret;
 
+       ret = device_property_read_string(dev, "adi,fixed-mode", &str_val);
+       if (ret == -EINVAL)
+               st->fixed_mode = -1;
+       else if (ret < 0)
+               return dev_err_probe(dev, ret,
+                       "failed to read adi,fixed-mode property\n");
+       else {
+               if (strcmp(str_val, "config"))
+                       return dev_err_probe(dev, -EINVAL,
+                               "only adi,fixed-mode=\"config\" is supported\n");
+
+               st->fixed_mode = MOD_CONFIG;
+       }
+
        ret = device_property_read_u32(dev, "assigned-resolution-bits", &val);
        if (ret < 0)
                return dev_err_probe(dev, ret,
@@ -1347,6 +1426,7 @@ static int ad2s1210_setup_gpios(struct ad2s1210_state *st)
 {
        struct device *dev = &st->sdev->dev;
        struct gpio_descs *resolution_gpios;
+       struct gpio_desc *reset_gpio;
        DECLARE_BITMAP(bitmap, 2);
        int ret;
 
@@ -1357,12 +1437,21 @@ static int ad2s1210_setup_gpios(struct ad2s1210_state *st)
                                     "failed to request sample GPIO\n");
 
        /* both pins high means that we start in config mode */
-       st->mode_gpios = devm_gpiod_get_array(dev, "mode", GPIOD_OUT_HIGH);
+       st->mode_gpios = devm_gpiod_get_array_optional(dev, "mode",
+                                                      GPIOD_OUT_HIGH);
        if (IS_ERR(st->mode_gpios))
                return dev_err_probe(dev, PTR_ERR(st->mode_gpios),
                                     "failed to request mode GPIOs\n");
 
-       if (st->mode_gpios->ndescs != 2)
+       if (!st->mode_gpios && st->fixed_mode == -1)
+               return dev_err_probe(dev, -EINVAL,
+                       "must specify either adi,fixed-mode or mode-gpios\n");
+
+       if (st->mode_gpios && st->fixed_mode != -1)
+               return dev_err_probe(dev, -EINVAL,
+                       "must specify only one of adi,fixed-mode or mode-gpios\n");
+
+       if (st->mode_gpios && st->mode_gpios->ndescs != 2)
                return dev_err_probe(dev, -EINVAL,
                                     "requires exactly 2 mode-gpios\n");
 
@@ -1393,6 +1482,17 @@ static int ad2s1210_setup_gpios(struct ad2s1210_state *st)
                                             "failed to set resolution gpios\n");
        }
 
+       /* If the optional reset GPIO is present, toggle it to do a hard reset. */
+       reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+       if (IS_ERR(reset_gpio))
+               return dev_err_probe(dev, PTR_ERR(reset_gpio),
+                                    "failed to request reset GPIO\n");
+
+       if (reset_gpio) {
+               udelay(10);
+               gpiod_set_value(reset_gpio, 0);
+       }
+
        return 0;
 }
 
index ed384f33e0c76809fcf8b40c7c4c994566bf05d2..ed0e4963362f96a0d79e001efd490e03b5a6bd5c 100644 (file)
@@ -76,6 +76,18 @@ config MLX90632
          This driver can also be built as a module. If so, the module will
          be called mlx90632.
 
+config MLX90635
+       tristate "MLX90635 contact-less infrared sensor with medical accuracy"
+       depends on I2C
+       select REGMAP_I2C
+       help
+         If you say yes here you get support for the Melexis
+         MLX90635 contact-less infrared sensor with medical accuracy
+         connected with I2C.
+
+         This driver can also be built as a module. If so, the module will
+         be called mlx90635.
+
 config TMP006
        tristate "TMP006 infrared thermopile sensor"
        depends on I2C
@@ -158,4 +170,14 @@ config MAX31865
          This driver can also be build as a module. If so, the module
          will be called max31865.
 
+config MCP9600
+       tristate "MCP9600 thermocouple EMF converter"
+       depends on I2C
+       help
+         If you say yes here you get support for MCP9600
+         thermocouple EMF converter connected via I2C.
+
+         This driver can also be built as a module. If so, the module
+         will be called mcp9600.
+
 endmenu
index dfec8c6d301934befae6125b6bed6184d4a70301..07d6e65709f7fe6e1ed51867e08112c8a68ee98a 100644 (file)
@@ -10,8 +10,10 @@ obj-$(CONFIG_MAXIM_THERMOCOUPLE) += maxim_thermocouple.o
 obj-$(CONFIG_MAX30208) += max30208.o
 obj-$(CONFIG_MAX31856) += max31856.o
 obj-$(CONFIG_MAX31865) += max31865.o
+obj-$(CONFIG_MCP9600) += mcp9600.o
 obj-$(CONFIG_MLX90614) += mlx90614.o
 obj-$(CONFIG_MLX90632) += mlx90632.o
+obj-$(CONFIG_MLX90632) += mlx90635.o
 obj-$(CONFIG_TMP006) += tmp006.o
 obj-$(CONFIG_TMP007) += tmp007.o
 obj-$(CONFIG_TMP117) += tmp117.o
diff --git a/drivers/iio/temperature/mcp9600.c b/drivers/iio/temperature/mcp9600.c
new file mode 100644 (file)
index 0000000..4684580
--- /dev/null
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * mcp9600.c - Support for Microchip MCP9600 thermocouple EMF converter
+ *
+ * Copyright (c) 2022 Andrew Hepp
+ * Author: <andrew.hepp@ahepp.dev>
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+
+#include <linux/iio/iio.h>
+
+/* MCP9600 registers */
+#define MCP9600_HOT_JUNCTION 0x0
+#define MCP9600_COLD_JUNCTION 0x2
+#define MCP9600_DEVICE_ID 0x20
+
+/* MCP9600 device id value */
+#define MCP9600_DEVICE_ID_MCP9600 0x40
+
+static const struct iio_chan_spec mcp9600_channels[] = {
+       {
+               .type = IIO_TEMP,
+               .address = MCP9600_HOT_JUNCTION,
+               .info_mask_separate =
+                       BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+       },
+       {
+               .type = IIO_TEMP,
+               .address = MCP9600_COLD_JUNCTION,
+               .channel2 = IIO_MOD_TEMP_AMBIENT,
+               .modified = 1,
+               .info_mask_separate =
+                       BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+       },
+};
+
+struct mcp9600_data {
+       struct i2c_client *client;
+};
+
+static int mcp9600_read(struct mcp9600_data *data,
+                       struct iio_chan_spec const *chan, int *val)
+{
+       int ret;
+
+       ret = i2c_smbus_read_word_swapped(data->client, chan->address);
+
+       if (ret < 0)
+               return ret;
+       *val = ret;
+
+       return 0;
+}
+
+static int mcp9600_read_raw(struct iio_dev *indio_dev,
+                           struct iio_chan_spec const *chan, int *val,
+                           int *val2, long mask)
+{
+       struct mcp9600_data *data = iio_priv(indio_dev);
+       int ret;
+
+       switch (mask) {
+       case IIO_CHAN_INFO_RAW:
+               ret = mcp9600_read(data, chan, val);
+               if (ret)
+                       return ret;
+               return IIO_VAL_INT;
+       case IIO_CHAN_INFO_SCALE:
+               *val = 62;
+               *val2 = 500000;
+               return IIO_VAL_INT_PLUS_MICRO;
+       default:
+               return -EINVAL;
+       }
+}
+
+static const struct iio_info mcp9600_info = {
+       .read_raw = mcp9600_read_raw,
+};
+
+static int mcp9600_probe(struct i2c_client *client)
+{
+       struct iio_dev *indio_dev;
+       struct mcp9600_data *data;
+       int ret;
+
+       ret = i2c_smbus_read_byte_data(client, MCP9600_DEVICE_ID);
+       if (ret < 0)
+               return dev_err_probe(&client->dev, ret, "Failed to read device ID\n");
+       if (ret != MCP9600_DEVICE_ID_MCP9600)
+               dev_warn(&client->dev, "Expected ID %x, got %x\n",
+                               MCP9600_DEVICE_ID_MCP9600, ret);
+
+       indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+       if (!indio_dev)
+               return -ENOMEM;
+
+       data = iio_priv(indio_dev);
+       data->client = client;
+
+       indio_dev->info = &mcp9600_info;
+       indio_dev->name = "mcp9600";
+       indio_dev->modes = INDIO_DIRECT_MODE;
+       indio_dev->channels = mcp9600_channels;
+       indio_dev->num_channels = ARRAY_SIZE(mcp9600_channels);
+
+       return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct i2c_device_id mcp9600_id[] = {
+       { "mcp9600" },
+       {}
+};
+MODULE_DEVICE_TABLE(i2c, mcp9600_id);
+
+static const struct of_device_id mcp9600_of_match[] = {
+       { .compatible = "microchip,mcp9600" },
+       {}
+};
+MODULE_DEVICE_TABLE(of, mcp9600_of_match);
+
+static struct i2c_driver mcp9600_driver = {
+       .driver = {
+               .name = "mcp9600",
+               .of_match_table = mcp9600_of_match,
+       },
+       .probe = mcp9600_probe,
+       .id_table = mcp9600_id
+};
+module_i2c_driver(mcp9600_driver);
+
+MODULE_AUTHOR("Andrew Hepp <andrew.hepp@ahepp.dev>");
+MODULE_DESCRIPTION("Microchip MCP9600 thermocouple EMF converter driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/temperature/mlx90635.c b/drivers/iio/temperature/mlx90635.c
new file mode 100644 (file)
index 0000000..1f5c962
--- /dev/null
@@ -0,0 +1,1097 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * mlx90635.c - Melexis MLX90635 contactless IR temperature sensor
+ *
+ * Copyright (c) 2023 Melexis <cmo@melexis.com>
+ *
+ * Driver for the Melexis MLX90635 I2C 16-bit IR thermopile sensor
+ */
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/iopoll.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/limits.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/math64.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#include <linux/iio/iio.h>
+
+/* Memory sections addresses */
+#define MLX90635_ADDR_RAM      0x0000 /* Start address of ram */
+#define MLX90635_ADDR_EEPROM   0x0018 /* Start address of user eeprom */
+
+/* EEPROM addresses - used at startup */
+#define MLX90635_EE_I2C_CFG    0x0018 /* I2C address register initial value */
+#define MLX90635_EE_CTRL1      0x001A /* Control register1 initial value */
+#define MLX90635_EE_CTRL2      0x001C /* Control register2 initial value */
+
+#define MLX90635_EE_Ha         0x001E /* Ha customer calib value reg 16bit */
+#define MLX90635_EE_Hb         0x0020 /* Hb customer calib value reg 16bit */
+#define MLX90635_EE_Fa         0x0026 /* Fa calibration register 32bit */
+#define MLX90635_EE_FASCALE    0x002A /* Scaling coefficient for Fa register 16bit */
+#define MLX90635_EE_Ga         0x002C /* Ga calibration register 16bit */
+#define MLX90635_EE_Fb         0x002E /* Fb calibration register 16bit */
+#define MLX90635_EE_Ea         0x0030 /* Ea calibration register 32bit */
+#define MLX90635_EE_Eb         0x0034 /* Eb calibration register 32bit */
+#define MLX90635_EE_P_G                0x0038 /* P_G calibration register 16bit */
+#define MLX90635_EE_P_O                0x003A /* P_O calibration register 16bit */
+#define MLX90635_EE_Aa         0x003C /* Aa calibration register 16bit */
+#define MLX90635_EE_VERSION    0x003E /* Version bits 4:7 and 12:15 */
+#define MLX90635_EE_Gb         0x0040 /* Gb calibration register 16bit */
+
+/* Device status register - volatile */
+#define MLX90635_REG_STATUS    0x0000
+#define   MLX90635_STAT_BUSY BIT(6) /* Device busy indicator */
+#define   MLX90635_STAT_BRST BIT(5) /* Brown out reset indicator */
+#define   MLX90635_STAT_CYCLE_POS GENMASK(4, 2) /* Data position */
+#define   MLX90635_STAT_END_CONV BIT(1) /* End of conversion indicator */
+#define   MLX90635_STAT_DATA_RDY BIT(0) /* Data ready indicator */
+
+/* EEPROM control register address - volatile */
+#define MLX90635_REG_EE                0x000C
+#define   MLX90635_EE_ACTIVE BIT(4) /* Power-on EEPROM */
+#define   MLX90635_EE_BUSY_MASK        BIT(15)
+
+#define MLX90635_REG_CMD       0x0010 /* Command register address */
+
+/* Control register1 address - volatile */
+#define MLX90635_REG_CTRL1     0x0014
+#define   MLX90635_CTRL1_REFRESH_RATE_MASK GENMASK(2, 0)
+#define   MLX90635_CTRL1_RES_CTRL_MASK GENMASK(4, 3)
+#define   MLX90635_CTRL1_TABLE_MASK BIT(15) /* Table select */
+
+/* Control register2 address - volatile */
+#define   MLX90635_REG_CTRL2   0x0016
+#define   MLX90635_CTRL2_BURST_CNT_MASK GENMASK(10, 6) /* Burst count */
+#define   MLX90635_CTRL2_MODE_MASK GENMASK(12, 11) /* Power mode */
+#define   MLX90635_CTRL2_SOB_MASK BIT(15)
+
+/* PowerModes statuses */
+#define MLX90635_PWR_STATUS_HALT 0
+#define MLX90635_PWR_STATUS_SLEEP_STEP 1
+#define MLX90635_PWR_STATUS_STEP 2
+#define MLX90635_PWR_STATUS_CONTINUOUS 3
+
+/* Measurement data addresses */
+#define MLX90635_RESULT_1   0x0002
+#define MLX90635_RESULT_2   0x0004
+#define MLX90635_RESULT_3   0x0006
+#define MLX90635_RESULT_4   0x0008
+#define MLX90635_RESULT_5   0x000A
+
+/* Timings (ms) */
+#define MLX90635_TIMING_RST_MIN 200 /* Minimum time after addressed reset command */
+#define MLX90635_TIMING_RST_MAX 250 /* Maximum time after addressed reset command */
+#define MLX90635_TIMING_POLLING 10000 /* Time between bit polling*/
+#define MLX90635_TIMING_EE_ACTIVE_MIN 100 /* Minimum time after activating the EEPROM for read */
+#define MLX90635_TIMING_EE_ACTIVE_MAX 150 /* Maximum time after activating the EEPROM for read */
+
+/* Magic constants */
+#define MLX90635_ID_DSPv1 0x01 /* EEPROM DSP version */
+#define MLX90635_RESET_CMD  0x0006 /* Reset sensor (address or global) */
+#define MLX90635_MAX_MEAS_NUM   31 /* Maximum number of measurements in list */
+#define MLX90635_PTAT_DIV 12   /* Used to divide the PTAT value in pre-processing */
+#define MLX90635_IR_DIV 24   /* Used to divide the IR value in pre-processing */
+#define MLX90635_SLEEP_DELAY_MS 6000 /* Autosleep delay */
+#define MLX90635_MEAS_MAX_TIME 2000 /* Max measurement time in ms for the lowest refresh rate */
+#define MLX90635_READ_RETRIES 100 /* Number of read retries before quitting with timeout error */
+#define MLX90635_VERSION_MASK (GENMASK(15, 12) | GENMASK(7, 4))
+#define MLX90635_DSP_VERSION(reg) (((reg & GENMASK(14, 12)) >> 9) | ((reg & GENMASK(6, 4)) >> 4))
+#define MLX90635_DSP_FIXED BIT(15)
+
+
+/**
+ * struct mlx90635_data - private data for the MLX90635 device
+ * @client: I2C client of the device
+ * @lock: Internal mutex because multiple reads are needed for single triggered
+ *       measurement to ensure data consistency
+ * @regmap: Regmap of the device registers
+ * @regmap_ee: Regmap of the device EEPROM which can be cached
+ * @emissivity: Object emissivity from 0 to 1000 where 1000 = 1
+ * @regulator: Regulator of the device
+ * @powerstatus: Current POWER status of the device
+ * @interaction_ts: Timestamp of the last temperature read that is used
+ *                 for power management in jiffies
+ */
+struct mlx90635_data {
+       struct i2c_client *client;
+       struct mutex lock;
+       struct regmap *regmap;
+       struct regmap *regmap_ee;
+       u16 emissivity;
+       struct regulator *regulator;
+       int powerstatus;
+       unsigned long interaction_ts;
+};
+
+static const struct regmap_range mlx90635_volatile_reg_range[] = {
+       regmap_reg_range(MLX90635_REG_STATUS, MLX90635_REG_STATUS),
+       regmap_reg_range(MLX90635_RESULT_1, MLX90635_RESULT_5),
+       regmap_reg_range(MLX90635_REG_EE, MLX90635_REG_EE),
+       regmap_reg_range(MLX90635_REG_CMD, MLX90635_REG_CMD),
+       regmap_reg_range(MLX90635_REG_CTRL1, MLX90635_REG_CTRL2),
+};
+
+static const struct regmap_access_table mlx90635_volatile_regs_tbl = {
+       .yes_ranges = mlx90635_volatile_reg_range,
+       .n_yes_ranges = ARRAY_SIZE(mlx90635_volatile_reg_range),
+};
+
+static const struct regmap_range mlx90635_read_reg_range[] = {
+       regmap_reg_range(MLX90635_REG_STATUS, MLX90635_REG_STATUS),
+       regmap_reg_range(MLX90635_RESULT_1, MLX90635_RESULT_5),
+       regmap_reg_range(MLX90635_REG_EE, MLX90635_REG_EE),
+       regmap_reg_range(MLX90635_REG_CMD, MLX90635_REG_CMD),
+       regmap_reg_range(MLX90635_REG_CTRL1, MLX90635_REG_CTRL2),
+};
+
+static const struct regmap_access_table mlx90635_readable_regs_tbl = {
+       .yes_ranges = mlx90635_read_reg_range,
+       .n_yes_ranges = ARRAY_SIZE(mlx90635_read_reg_range),
+};
+
+static const struct regmap_range mlx90635_no_write_reg_range[] = {
+       regmap_reg_range(MLX90635_RESULT_1, MLX90635_RESULT_5),
+};
+
+static const struct regmap_access_table mlx90635_writeable_regs_tbl = {
+       .no_ranges = mlx90635_no_write_reg_range,
+       .n_no_ranges = ARRAY_SIZE(mlx90635_no_write_reg_range),
+};
+
+static const struct regmap_config mlx90635_regmap = {
+       .name = "mlx90635-registers",
+       .reg_stride = 1,
+       .reg_bits = 16,
+       .val_bits = 16,
+
+       .volatile_table = &mlx90635_volatile_regs_tbl,
+       .rd_table = &mlx90635_readable_regs_tbl,
+       .wr_table = &mlx90635_writeable_regs_tbl,
+
+       .use_single_read = true,
+       .use_single_write = true,
+       .can_multi_write = false,
+       .reg_format_endian = REGMAP_ENDIAN_BIG,
+       .val_format_endian = REGMAP_ENDIAN_BIG,
+       .cache_type = REGCACHE_RBTREE,
+};
+
+static const struct regmap_range mlx90635_read_ee_range[] = {
+       regmap_reg_range(MLX90635_EE_I2C_CFG, MLX90635_EE_CTRL2),
+       regmap_reg_range(MLX90635_EE_Ha, MLX90635_EE_Gb),
+};
+
+static const struct regmap_access_table mlx90635_readable_ees_tbl = {
+       .yes_ranges = mlx90635_read_ee_range,
+       .n_yes_ranges = ARRAY_SIZE(mlx90635_read_ee_range),
+};
+
+static const struct regmap_range mlx90635_no_write_ee_range[] = {
+       regmap_reg_range(MLX90635_ADDR_EEPROM, MLX90635_EE_Gb),
+};
+
+static const struct regmap_access_table mlx90635_writeable_ees_tbl = {
+       .no_ranges = mlx90635_no_write_ee_range,
+       .n_no_ranges = ARRAY_SIZE(mlx90635_no_write_ee_range),
+};
+
+static const struct regmap_config mlx90635_regmap_ee = {
+       .name = "mlx90635-eeprom",
+       .reg_stride = 1,
+       .reg_bits = 16,
+       .val_bits = 16,
+
+       .volatile_table = NULL,
+       .rd_table = &mlx90635_readable_ees_tbl,
+       .wr_table = &mlx90635_writeable_ees_tbl,
+
+       .use_single_read = true,
+       .use_single_write = true,
+       .can_multi_write = false,
+       .reg_format_endian = REGMAP_ENDIAN_BIG,
+       .val_format_endian = REGMAP_ENDIAN_BIG,
+       .cache_type = REGCACHE_RBTREE,
+};
+
+/**
+ * mlx90635_reset_delay() - Give the mlx90635 some time to reset properly
+ * If this is not done, the following I2C command(s) will not be accepted.
+ */
+static void mlx90635_reset_delay(void)
+{
+       usleep_range(MLX90635_TIMING_RST_MIN, MLX90635_TIMING_RST_MAX);
+}
+
+static int mlx90635_pwr_sleep_step(struct mlx90635_data *data)
+{
+       int ret;
+
+       if (data->powerstatus == MLX90635_PWR_STATUS_SLEEP_STEP)
+               return 0;
+
+       ret = regmap_write_bits(data->regmap, MLX90635_REG_CTRL2, MLX90635_CTRL2_MODE_MASK,
+                               FIELD_PREP(MLX90635_CTRL2_MODE_MASK, MLX90635_PWR_STATUS_SLEEP_STEP));
+       if (ret < 0)
+               return ret;
+
+       data->powerstatus = MLX90635_PWR_STATUS_SLEEP_STEP;
+       return 0;
+}
+
+static int mlx90635_pwr_continuous(struct mlx90635_data *data)
+{
+       int ret;
+
+       if (data->powerstatus == MLX90635_PWR_STATUS_CONTINUOUS)
+               return 0;
+
+       ret = regmap_write_bits(data->regmap, MLX90635_REG_CTRL2, MLX90635_CTRL2_MODE_MASK,
+                               FIELD_PREP(MLX90635_CTRL2_MODE_MASK, MLX90635_PWR_STATUS_CONTINUOUS));
+       if (ret < 0)
+               return ret;
+
+       data->powerstatus = MLX90635_PWR_STATUS_CONTINUOUS;
+       return 0;
+}
+
+static int mlx90635_read_ee_register(struct regmap *regmap, u16 reg_lsb,
+                                    s32 *reg_value)
+{
+       unsigned int read;
+       u32 value;
+       int ret;
+
+       ret = regmap_read(regmap, reg_lsb + 2, &read);
+       if (ret < 0)
+               return ret;
+
+       value = read;
+
+       ret = regmap_read(regmap, reg_lsb, &read);
+       if (ret < 0)
+               return ret;
+
+       *reg_value = (read << 16) | (value & 0xffff);
+
+       return 0;
+}
+
+static int mlx90635_read_ee_ambient(struct regmap *regmap, s16 *PG, s16 *PO, s16 *Gb)
+{
+       unsigned int read_tmp;
+       int ret;
+
+       ret = regmap_read(regmap, MLX90635_EE_P_O, &read_tmp);
+       if (ret < 0)
+               return ret;
+       *PO = (s16)read_tmp;
+
+       ret = regmap_read(regmap, MLX90635_EE_P_G, &read_tmp);
+       if (ret < 0)
+               return ret;
+       *PG = (s16)read_tmp;
+
+       ret = regmap_read(regmap, MLX90635_EE_Gb, &read_tmp);
+       if (ret < 0)
+               return ret;
+       *Gb = (u16)read_tmp;
+
+       return 0;
+}
+
+static int mlx90635_read_ee_object(struct regmap *regmap, u32 *Ea, u32 *Eb, u32 *Fa, s16 *Fb,
+                                  s16 *Ga, s16 *Gb, s16 *Ha, s16 *Hb, u16 *Fa_scale)
+{
+       unsigned int read_tmp;
+       int ret;
+
+       ret = mlx90635_read_ee_register(regmap, MLX90635_EE_Ea, Ea);
+       if (ret < 0)
+               return ret;
+
+       ret = mlx90635_read_ee_register(regmap, MLX90635_EE_Eb, Eb);
+       if (ret < 0)
+               return ret;
+
+       ret = mlx90635_read_ee_register(regmap, MLX90635_EE_Fa, Fa);
+       if (ret < 0)
+               return ret;
+
+       ret = regmap_read(regmap, MLX90635_EE_Ha, &read_tmp);
+       if (ret < 0)
+               return ret;
+       *Ha = (s16)read_tmp;
+
+       ret = regmap_read(regmap, MLX90635_EE_Hb, &read_tmp);
+       if (ret < 0)
+               return ret;
+       *Hb = (s16)read_tmp;
+
+       ret = regmap_read(regmap, MLX90635_EE_Ga, &read_tmp);
+       if (ret < 0)
+               return ret;
+       *Ga = (s16)read_tmp;
+
+       ret = regmap_read(regmap, MLX90635_EE_Gb, &read_tmp);
+       if (ret < 0)
+               return ret;
+       *Gb = (s16)read_tmp;
+
+       ret = regmap_read(regmap, MLX90635_EE_Fb, &read_tmp);
+       if (ret < 0)
+               return ret;
+       *Fb = (s16)read_tmp;
+
+       ret = regmap_read(regmap, MLX90635_EE_FASCALE, &read_tmp);
+       if (ret < 0)
+               return ret;
+       *Fa_scale = (u16)read_tmp;
+
+       return 0;
+}
+
+static int mlx90635_calculate_dataset_ready_time(struct mlx90635_data *data, int *refresh_time)
+{
+       unsigned int reg;
+       int ret;
+
+       ret = regmap_read(data->regmap, MLX90635_REG_CTRL1, &reg);
+       if (ret < 0)
+               return ret;
+
+       *refresh_time = 2 * (MLX90635_MEAS_MAX_TIME >> FIELD_GET(MLX90635_CTRL1_REFRESH_RATE_MASK, reg)) + 80;
+
+       return 0;
+}
+
+static int mlx90635_perform_measurement_burst(struct mlx90635_data *data)
+{
+       unsigned int reg_status;
+       int refresh_time;
+       int ret;
+
+       ret = regmap_write_bits(data->regmap, MLX90635_REG_STATUS,
+                               MLX90635_STAT_END_CONV, MLX90635_STAT_END_CONV);
+       if (ret < 0)
+               return ret;
+
+       ret = mlx90635_calculate_dataset_ready_time(data, &refresh_time);
+       if (ret < 0)
+               return ret;
+
+       ret = regmap_write_bits(data->regmap, MLX90635_REG_CTRL2,
+                               FIELD_PREP(MLX90635_CTRL2_SOB_MASK, 1),
+                               FIELD_PREP(MLX90635_CTRL2_SOB_MASK, 1));
+       if (ret < 0)
+               return ret;
+
+       msleep(refresh_time); /* Wait minimum time for dataset to be ready */
+
+       ret = regmap_read_poll_timeout(data->regmap, MLX90635_REG_STATUS, reg_status,
+                                      (!(reg_status & MLX90635_STAT_END_CONV)) == 0,
+                                      MLX90635_TIMING_POLLING, MLX90635_READ_RETRIES * 10000);
+       if (ret < 0) {
+               dev_err(&data->client->dev, "data not ready");
+               return -ETIMEDOUT;
+       }
+
+       return 0;
+}
+
+static int mlx90635_read_ambient_raw(struct regmap *regmap,
+                                    s16 *ambient_new_raw, s16 *ambient_old_raw)
+{
+       unsigned int read_tmp;
+       int ret;
+
+       ret = regmap_read(regmap, MLX90635_RESULT_2, &read_tmp);
+       if (ret < 0)
+               return ret;
+       *ambient_new_raw = (s16)read_tmp;
+
+       ret = regmap_read(regmap, MLX90635_RESULT_3, &read_tmp);
+       if (ret < 0)
+               return ret;
+       *ambient_old_raw = (s16)read_tmp;
+
+       return 0;
+}
+
+static int mlx90635_read_object_raw(struct regmap *regmap, s16 *object_raw)
+{
+       unsigned int read_tmp;
+       s16 read;
+       int ret;
+
+       ret = regmap_read(regmap, MLX90635_RESULT_1, &read_tmp);
+       if (ret < 0)
+               return ret;
+
+       read = (s16)read_tmp;
+
+       ret = regmap_read(regmap, MLX90635_RESULT_4, &read_tmp);
+       if (ret < 0)
+               return ret;
+       *object_raw = (read - (s16)read_tmp) / 2;
+
+       return 0;
+}
+
+static int mlx90635_read_all_channel(struct mlx90635_data *data,
+                                    s16 *ambient_new_raw, s16 *ambient_old_raw,
+                                    s16 *object_raw)
+{
+       int ret;
+
+       mutex_lock(&data->lock);
+       if (data->powerstatus == MLX90635_PWR_STATUS_SLEEP_STEP) {
+               /* Trigger measurement in Sleep Step mode */
+               ret = mlx90635_perform_measurement_burst(data);
+               if (ret < 0)
+                       goto read_unlock;
+       }
+
+       ret = mlx90635_read_ambient_raw(data->regmap, ambient_new_raw,
+                                       ambient_old_raw);
+       if (ret < 0)
+               goto read_unlock;
+
+       ret = mlx90635_read_object_raw(data->regmap, object_raw);
+read_unlock:
+       mutex_unlock(&data->lock);
+       return ret;
+}
+
+static s64 mlx90635_preprocess_temp_amb(s16 ambient_new_raw,
+                                       s16 ambient_old_raw, s16 Gb)
+{
+       s64 VR_Ta, kGb, tmp;
+
+       kGb = ((s64)Gb * 1000LL) >> 10ULL;
+       VR_Ta = (s64)ambient_old_raw * 1000000LL +
+               kGb * div64_s64(((s64)ambient_new_raw * 1000LL),
+                       (MLX90635_PTAT_DIV));
+       tmp = div64_s64(
+                        div64_s64(((s64)ambient_new_raw * 1000000000000LL),
+                                  (MLX90635_PTAT_DIV)), VR_Ta);
+       return div64_s64(tmp << 19ULL, 1000LL);
+}
+
+static s64 mlx90635_preprocess_temp_obj(s16 object_raw,
+                                       s16 ambient_new_raw,
+                                       s16 ambient_old_raw, s16 Gb)
+{
+       s64 VR_IR, kGb, tmp;
+
+       kGb = ((s64)Gb * 1000LL) >> 10ULL;
+       VR_IR = (s64)ambient_old_raw * 1000000LL +
+               kGb * (div64_s64((s64)ambient_new_raw * 1000LL,
+                       MLX90635_PTAT_DIV));
+       tmp = div64_s64(
+                       div64_s64((s64)(object_raw * 1000000LL),
+                                  MLX90635_IR_DIV) * 1000000LL,
+                       VR_IR);
+       return div64_s64((tmp << 19ULL), 1000LL);
+}
+
+static s32 mlx90635_calc_temp_ambient(s16 ambient_new_raw, s16 ambient_old_raw,
+                                     u16 P_G, u16 P_O, s16 Gb)
+{
+       s64 kPG, kPO, AMB;
+
+       AMB = mlx90635_preprocess_temp_amb(ambient_new_raw, ambient_old_raw,
+                                          Gb);
+       kPG = ((s64)P_G * 1000000LL) >> 9ULL;
+       kPO = AMB - (((s64)P_O * 1000LL) >> 1ULL);
+
+       return 30 * 1000LL + div64_s64(kPO * 1000000LL, kPG);
+}
+
+static s32 mlx90635_calc_temp_object_iteration(s32 prev_object_temp, s64 object,
+                                              s64 TAdut, s64 TAdut4, s16 Ga,
+                                              u32 Fa, u16 Fa_scale, s16 Fb,
+                                              s16 Ha, s16 Hb, u16 emissivity)
+{
+       s64 calcedGa, calcedGb, calcedFa, Alpha_corr;
+       s64 Ha_customer, Hb_customer;
+
+       Ha_customer = ((s64)Ha * 1000000LL) >> 14ULL;
+       Hb_customer = ((s64)Hb * 100) >> 10ULL;
+
+       calcedGa = ((s64)((s64)Ga * (prev_object_temp - 35 * 1000LL)
+                            * 1000LL)) >> 24LL;
+       calcedGb = ((s64)(Fb * (TAdut - 30 * 1000000LL))) >> 24LL;
+
+       Alpha_corr = ((s64)((s64)Fa * Ha_customer * 10000LL) >> Fa_scale);
+       Alpha_corr *= ((s64)(1 * 1000000LL + calcedGa + calcedGb));
+
+       Alpha_corr = div64_s64(Alpha_corr, 1000LL);
+       Alpha_corr *= emissivity;
+       Alpha_corr = div64_s64(Alpha_corr, 100LL);
+       calcedFa = div64_s64((s64)object * 100000000000LL, Alpha_corr);
+
+       return (int_sqrt64(int_sqrt64(calcedFa * 100000000LL + TAdut4))
+               - 27315 - Hb_customer) * 10;
+}
+
+static s64 mlx90635_calc_ta4(s64 TAdut, s64 scale)
+{
+       return (div64_s64(TAdut, scale) + 27315) *
+               (div64_s64(TAdut, scale) + 27315) *
+               (div64_s64(TAdut, scale) + 27315) *
+               (div64_s64(TAdut, scale) + 27315);
+}
+
+static s32 mlx90635_calc_temp_object(s64 object, s64 ambient, u32 Ea, u32 Eb,
+                                    s16 Ga, u32 Fa, u16 Fa_scale, s16 Fb, s16 Ha, s16 Hb,
+                                    u16 tmp_emi)
+{
+       s64 kTA, kTA0, TAdut, TAdut4;
+       s64 temp = 35000;
+       s8 i;
+
+       kTA = (Ea * 1000LL) >> 16LL;
+       kTA0 = (Eb * 1000LL) >> 8LL;
+       TAdut = div64_s64(((ambient - kTA0) * 1000000LL), kTA) + 30 * 1000000LL;
+       TAdut4 = mlx90635_calc_ta4(TAdut, 10000LL);
+
+       /* Iterations of calculation as described in datasheet */
+       for (i = 0; i < 5; ++i) {
+               temp = mlx90635_calc_temp_object_iteration(temp, object, TAdut, TAdut4,
+                                                          Ga, Fa, Fa_scale, Fb, Ha, Hb,
+                                                          tmp_emi);
+       }
+       return temp;
+}
+
+static int mlx90635_calc_object(struct mlx90635_data *data, int *val)
+{
+       s16 ambient_new_raw, ambient_old_raw, object_raw;
+       s16 Fb, Ga, Gb, Ha, Hb;
+       s64 object, ambient;
+       u32 Ea, Eb, Fa;
+       u16 Fa_scale;
+       int ret;
+
+       ret = mlx90635_read_ee_object(data->regmap_ee, &Ea, &Eb, &Fa, &Fb, &Ga, &Gb, &Ha, &Hb, &Fa_scale);
+       if (ret < 0)
+               return ret;
+
+       ret = mlx90635_read_all_channel(data,
+                                       &ambient_new_raw, &ambient_old_raw,
+                                       &object_raw);
+       if (ret < 0)
+               return ret;
+
+       ambient = mlx90635_preprocess_temp_amb(ambient_new_raw,
+                                              ambient_old_raw, Gb);
+       object = mlx90635_preprocess_temp_obj(object_raw,
+                                             ambient_new_raw,
+                                             ambient_old_raw, Gb);
+
+       *val = mlx90635_calc_temp_object(object, ambient, Ea, Eb, Ga, Fa, Fa_scale, Fb,
+                                        Ha, Hb, data->emissivity);
+       return 0;
+}
+
+static int mlx90635_calc_ambient(struct mlx90635_data *data, int *val)
+{
+       s16 ambient_new_raw, ambient_old_raw;
+       s16 PG, PO, Gb;
+       int ret;
+
+       ret = mlx90635_read_ee_ambient(data->regmap_ee, &PG, &PO, &Gb);
+       if (ret < 0)
+               return ret;
+
+       mutex_lock(&data->lock);
+       if (data->powerstatus == MLX90635_PWR_STATUS_SLEEP_STEP) {
+               ret = mlx90635_perform_measurement_burst(data);
+               if (ret < 0)
+                       goto read_ambient_unlock;
+       }
+
+       ret = mlx90635_read_ambient_raw(data->regmap, &ambient_new_raw,
+                                       &ambient_old_raw);
+read_ambient_unlock:
+       mutex_unlock(&data->lock);
+       if (ret < 0)
+               return ret;
+
+       *val = mlx90635_calc_temp_ambient(ambient_new_raw, ambient_old_raw,
+                                         PG, PO, Gb);
+       return ret;
+}
+
+static int mlx90635_get_refresh_rate(struct mlx90635_data *data,
+                                    unsigned int *refresh_rate)
+{
+       unsigned int reg;
+       int ret;
+
+       ret = regmap_read(data->regmap, MLX90635_REG_CTRL1, &reg);
+       if (ret < 0)
+               return ret;
+
+       *refresh_rate = FIELD_GET(MLX90635_CTRL1_REFRESH_RATE_MASK, reg);
+
+       return 0;
+}
+
+static const struct {
+       int val;
+       int val2;
+} mlx90635_freqs[] = {
+       { 0, 200000 },
+       { 0, 500000 },
+       { 0, 900000 },
+       { 1, 700000 },
+       { 3, 0 },
+       { 4, 800000 },
+       { 6, 900000 },
+       { 8, 900000 }
+};
+
+/**
+ * mlx90635_pm_interaction_wakeup() - Measure time between user interactions to change powermode
+ * @data: pointer to mlx90635_data object containing interaction_ts information
+ *
+ * Switch to continuous mode when interaction is faster than MLX90635_MEAS_MAX_TIME. Update the
+ * interaction_ts for each function call with the jiffies to enable measurement between function
+ * calls. Initial value of the interaction_ts needs to be set before this function call.
+ */
+static int mlx90635_pm_interaction_wakeup(struct mlx90635_data *data)
+{
+       unsigned long now;
+       int ret;
+
+       now = jiffies;
+       if (time_in_range(now, data->interaction_ts,
+                         data->interaction_ts +
+                         msecs_to_jiffies(MLX90635_MEAS_MAX_TIME + 100))) {
+               ret = mlx90635_pwr_continuous(data);
+               if (ret < 0)
+                       return ret;
+       }
+
+       data->interaction_ts = now;
+
+       return 0;
+}
+
+static int mlx90635_read_raw(struct iio_dev *indio_dev,
+                            struct iio_chan_spec const *channel, int *val,
+                            int *val2, long mask)
+{
+       struct mlx90635_data *data = iio_priv(indio_dev);
+       int ret;
+       int cr;
+
+       pm_runtime_get_sync(&data->client->dev);
+       ret = mlx90635_pm_interaction_wakeup(data);
+       if (ret < 0)
+               goto mlx90635_read_raw_pm;
+
+       switch (mask) {
+       case IIO_CHAN_INFO_PROCESSED:
+               switch (channel->channel2) {
+               case IIO_MOD_TEMP_AMBIENT:
+                       ret = mlx90635_calc_ambient(data, val);
+                       if (ret < 0)
+                               goto mlx90635_read_raw_pm;
+
+                       ret = IIO_VAL_INT;
+                       break;
+               case IIO_MOD_TEMP_OBJECT:
+                       ret = mlx90635_calc_object(data, val);
+                       if (ret < 0)
+                               goto mlx90635_read_raw_pm;
+
+                       ret = IIO_VAL_INT;
+                       break;
+               default:
+                       ret = -EINVAL;
+                       break;
+               }
+               break;
+       case IIO_CHAN_INFO_CALIBEMISSIVITY:
+               if (data->emissivity == 1000) {
+                       *val = 1;
+                       *val2 = 0;
+               } else {
+                       *val = 0;
+                       *val2 = data->emissivity * 1000;
+               }
+               ret = IIO_VAL_INT_PLUS_MICRO;
+               break;
+       case IIO_CHAN_INFO_SAMP_FREQ:
+               ret = mlx90635_get_refresh_rate(data, &cr);
+               if (ret < 0)
+                       goto mlx90635_read_raw_pm;
+
+               *val = mlx90635_freqs[cr].val;
+               *val2 = mlx90635_freqs[cr].val2;
+               ret = IIO_VAL_INT_PLUS_MICRO;
+               break;
+       default:
+               ret = -EINVAL;
+               break;
+       }
+
+mlx90635_read_raw_pm:
+       pm_runtime_mark_last_busy(&data->client->dev);
+       pm_runtime_put_autosuspend(&data->client->dev);
+       return ret;
+}
+
+static int mlx90635_write_raw(struct iio_dev *indio_dev,
+                             struct iio_chan_spec const *channel, int val,
+                             int val2, long mask)
+{
+       struct mlx90635_data *data = iio_priv(indio_dev);
+       int ret;
+       int i;
+
+       switch (mask) {
+       case IIO_CHAN_INFO_CALIBEMISSIVITY:
+               /* Confirm we are within 0 and 1.0 */
+               if (val < 0 || val2 < 0 || val > 1 ||
+                   (val == 1 && val2 != 0))
+                       return -EINVAL;
+               data->emissivity = val * 1000 + val2 / 1000;
+               return 0;
+       case IIO_CHAN_INFO_SAMP_FREQ:
+               for (i = 0; i < ARRAY_SIZE(mlx90635_freqs); i++) {
+                       if (val == mlx90635_freqs[i].val &&
+                           val2 == mlx90635_freqs[i].val2)
+                               break;
+               }
+               if (i == ARRAY_SIZE(mlx90635_freqs))
+                       return -EINVAL;
+
+               ret = regmap_write_bits(data->regmap, MLX90635_REG_CTRL1,
+                                       MLX90635_CTRL1_REFRESH_RATE_MASK, i);
+
+               return ret;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int mlx90635_read_avail(struct iio_dev *indio_dev,
+                              struct iio_chan_spec const *chan,
+                              const int **vals, int *type, int *length,
+                              long mask)
+{
+       switch (mask) {
+       case IIO_CHAN_INFO_SAMP_FREQ:
+               *vals = (int *)mlx90635_freqs;
+               *type = IIO_VAL_INT_PLUS_MICRO;
+               *length = 2 * ARRAY_SIZE(mlx90635_freqs);
+               return IIO_AVAIL_LIST;
+       default:
+               return -EINVAL;
+       }
+}
+
+static const struct iio_chan_spec mlx90635_channels[] = {
+       {
+               .type = IIO_TEMP,
+               .modified = 1,
+               .channel2 = IIO_MOD_TEMP_AMBIENT,
+               .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+               .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+               .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+       },
+       {
+               .type = IIO_TEMP,
+               .modified = 1,
+               .channel2 = IIO_MOD_TEMP_OBJECT,
+               .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+                       BIT(IIO_CHAN_INFO_CALIBEMISSIVITY),
+               .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+               .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+       },
+};
+
+static const struct iio_info mlx90635_info = {
+       .read_raw = mlx90635_read_raw,
+       .write_raw = mlx90635_write_raw,
+       .read_avail = mlx90635_read_avail,
+};
+
+static void mlx90635_sleep(void *_data)
+{
+       struct mlx90635_data *data = _data;
+
+       mlx90635_pwr_sleep_step(data);
+}
+
+static int mlx90635_suspend(struct mlx90635_data *data)
+{
+       return mlx90635_pwr_sleep_step(data);
+}
+
+static int mlx90635_wakeup(struct mlx90635_data *data)
+{
+       s16 Fb, Ga, Gb, Ha, Hb, PG, PO;
+       unsigned int dsp_version;
+       u32 Ea, Eb, Fa;
+       u16 Fa_scale;
+       int ret;
+
+       regcache_cache_bypass(data->regmap_ee, false);
+       regcache_cache_only(data->regmap_ee, false);
+       regcache_cache_only(data->regmap, false);
+
+       ret = mlx90635_pwr_continuous(data);
+       if (ret < 0) {
+               dev_err(&data->client->dev, "Switch to continuous mode failed\n");
+               return ret;
+       }
+       ret = regmap_write_bits(data->regmap, MLX90635_REG_EE,
+                               MLX90635_EE_ACTIVE, MLX90635_EE_ACTIVE);
+       if (ret < 0) {
+               dev_err(&data->client->dev, "Powering EEPROM failed\n");
+               return ret;
+       }
+       usleep_range(MLX90635_TIMING_EE_ACTIVE_MIN, MLX90635_TIMING_EE_ACTIVE_MAX);
+
+       regcache_mark_dirty(data->regmap_ee);
+
+       ret = regcache_sync(data->regmap_ee);
+       if (ret < 0) {
+               dev_err(&data->client->dev,
+                       "Failed to sync cache: %d\n", ret);
+               return ret;
+       }
+
+       ret = mlx90635_read_ee_ambient(data->regmap_ee, &PG, &PO, &Gb);
+       if (ret < 0) {
+               dev_err(&data->client->dev,
+                       "Failed to read to cache Ambient coefficients EEPROM region: %d\n", ret);
+               return ret;
+       }
+
+       ret = mlx90635_read_ee_object(data->regmap_ee, &Ea, &Eb, &Fa, &Fb, &Ga, &Gb, &Ha, &Hb, &Fa_scale);
+       if (ret < 0) {
+               dev_err(&data->client->dev,
+                       "Failed to read to cache Object coefficients EEPROM region: %d\n", ret);
+               return ret;
+       }
+
+       ret = regmap_read(data->regmap_ee, MLX90635_EE_VERSION, &dsp_version);
+       if (ret < 0) {
+               dev_err(&data->client->dev,
+                       "Failed to read to cache of EEPROM version: %d\n", ret);
+               return ret;
+       }
+
+       regcache_cache_only(data->regmap_ee, true);
+
+       return ret;
+}
+
+static void mlx90635_disable_regulator(void *_data)
+{
+       struct mlx90635_data *data = _data;
+       int ret;
+
+       ret = regulator_disable(data->regulator);
+       if (ret < 0)
+               dev_err(regmap_get_device(data->regmap),
+                       "Failed to disable power regulator: %d\n", ret);
+}
+
+static int mlx90635_enable_regulator(struct mlx90635_data *data)
+{
+       int ret;
+
+       ret = regulator_enable(data->regulator);
+       if (ret < 0) {
+               dev_err(regmap_get_device(data->regmap), "Failed to enable power regulator!\n");
+               return ret;
+       }
+
+       mlx90635_reset_delay();
+
+       return ret;
+}
+
+static int mlx90635_probe(struct i2c_client *client)
+{
+       struct mlx90635_data *mlx90635;
+       struct iio_dev *indio_dev;
+       unsigned int dsp_version;
+       struct regmap *regmap;
+       struct regmap *regmap_ee;
+       int ret;
+
+       indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*mlx90635));
+       if (!indio_dev)
+               return dev_err_probe(&client->dev, -ENOMEM, "failed to allocate device\n");
+
+       regmap = devm_regmap_init_i2c(client, &mlx90635_regmap);
+       if (IS_ERR(regmap))
+               return dev_err_probe(&client->dev, PTR_ERR(regmap),
+                                    "failed to allocate regmap\n");
+
+       regmap_ee = devm_regmap_init_i2c(client, &mlx90635_regmap_ee);
+       if (IS_ERR(regmap))
+               return dev_err_probe(&client->dev, PTR_ERR(regmap),
+                                    "failed to allocate regmap\n");
+
+       mlx90635 = iio_priv(indio_dev);
+       i2c_set_clientdata(client, indio_dev);
+       mlx90635->client = client;
+       mlx90635->regmap = regmap;
+       mlx90635->regmap_ee = regmap_ee;
+       mlx90635->powerstatus = MLX90635_PWR_STATUS_SLEEP_STEP;
+
+       mutex_init(&mlx90635->lock);
+       indio_dev->name = "mlx90635";
+       indio_dev->modes = INDIO_DIRECT_MODE;
+       indio_dev->info = &mlx90635_info;
+       indio_dev->channels = mlx90635_channels;
+       indio_dev->num_channels = ARRAY_SIZE(mlx90635_channels);
+
+       mlx90635->regulator = devm_regulator_get(&client->dev, "vdd");
+       if (IS_ERR(mlx90635->regulator))
+               return dev_err_probe(&client->dev, PTR_ERR(mlx90635->regulator),
+                                    "failed to get vdd regulator");
+
+       ret = mlx90635_enable_regulator(mlx90635);
+       if (ret < 0)
+               return ret;
+
+       ret = devm_add_action_or_reset(&client->dev, mlx90635_disable_regulator,
+                                      mlx90635);
+       if (ret < 0)
+               return dev_err_probe(&client->dev, ret,
+                                    "failed to setup regulator cleanup action\n");
+
+       ret = mlx90635_wakeup(mlx90635);
+       if (ret < 0)
+               return dev_err_probe(&client->dev, ret, "wakeup failed\n");
+
+       ret = devm_add_action_or_reset(&client->dev, mlx90635_sleep, mlx90635);
+       if (ret < 0)
+               return dev_err_probe(&client->dev, ret,
+                                    "failed to setup low power cleanup\n");
+
+       ret = regmap_read(mlx90635->regmap_ee, MLX90635_EE_VERSION, &dsp_version);
+       if (ret < 0)
+               return dev_err_probe(&client->dev, ret, "read of version failed\n");
+
+       dsp_version = dsp_version & MLX90635_VERSION_MASK;
+
+       if (FIELD_GET(MLX90635_DSP_FIXED, dsp_version)) {
+               if (MLX90635_DSP_VERSION(dsp_version) == MLX90635_ID_DSPv1) {
+                       dev_dbg(&client->dev,
+                               "Detected DSP v1 calibration %x\n", dsp_version);
+               } else {
+                       dev_dbg(&client->dev,
+                               "Detected Unknown EEPROM calibration %lx\n",
+                               MLX90635_DSP_VERSION(dsp_version));
+               }
+       } else {
+               return dev_err_probe(&client->dev, -EPROTONOSUPPORT,
+                       "Wrong fixed top bit %x (expected 0x8X0X)\n",
+                       dsp_version);
+       }
+
+       mlx90635->emissivity = 1000;
+       mlx90635->interaction_ts = jiffies; /* Set initial value */
+
+       pm_runtime_get_noresume(&client->dev);
+       pm_runtime_set_active(&client->dev);
+
+       ret = devm_pm_runtime_enable(&client->dev);
+       if (ret)
+               return dev_err_probe(&client->dev, ret,
+                                    "failed to enable powermanagement\n");
+
+       pm_runtime_set_autosuspend_delay(&client->dev, MLX90635_SLEEP_DELAY_MS);
+       pm_runtime_use_autosuspend(&client->dev);
+       pm_runtime_put_autosuspend(&client->dev);
+
+       return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct i2c_device_id mlx90635_id[] = {
+       { "mlx90635" },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, mlx90635_id);
+
+static const struct of_device_id mlx90635_of_match[] = {
+       { .compatible = "melexis,mlx90635" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, mlx90635_of_match);
+
+static int mlx90635_pm_suspend(struct device *dev)
+{
+       struct mlx90635_data *data = iio_priv(dev_get_drvdata(dev));
+       int ret;
+
+       ret = mlx90635_suspend(data);
+       if (ret < 0)
+               return ret;
+
+       ret = regulator_disable(data->regulator);
+       if (ret < 0)
+               dev_err(regmap_get_device(data->regmap),
+                       "Failed to disable power regulator: %d\n", ret);
+
+       return ret;
+}
+
+static int mlx90635_pm_resume(struct device *dev)
+{
+       struct mlx90635_data *data = iio_priv(dev_get_drvdata(dev));
+       int ret;
+
+       ret = mlx90635_enable_regulator(data);
+       if (ret < 0)
+               return ret;
+
+       return mlx90635_wakeup(data);
+}
+
+static int mlx90635_pm_runtime_suspend(struct device *dev)
+{
+       struct mlx90635_data *data = iio_priv(dev_get_drvdata(dev));
+
+       return mlx90635_pwr_sleep_step(data);
+}
+
+static const struct dev_pm_ops mlx90635_pm_ops = {
+       SYSTEM_SLEEP_PM_OPS(mlx90635_pm_suspend, mlx90635_pm_resume)
+       RUNTIME_PM_OPS(mlx90635_pm_runtime_suspend, NULL, NULL)
+};
+
+static struct i2c_driver mlx90635_driver = {
+       .driver = {
+               .name   = "mlx90635",
+               .of_match_table = mlx90635_of_match,
+               .pm     = pm_ptr(&mlx90635_pm_ops),
+       },
+       .probe = mlx90635_probe,
+       .id_table = mlx90635_id,
+};
+module_i2c_driver(mlx90635_driver);
+
+MODULE_AUTHOR("Crt Mori <cmo@melexis.com>");
+MODULE_DESCRIPTION("Melexis MLX90635 contactless Infra Red temperature sensor driver");
+MODULE_LICENSE("GPL");
index 8c5fdb0f858ab5102926c0f6858f6a0f022c44b2..f71ea4fb173fdd2950cd6a271e1975e930578ce1 100644 (file)
@@ -1365,8 +1365,8 @@ static ssize_t input_dev_show_##name(struct device *dev,          \
 {                                                                      \
        struct input_dev *input_dev = to_input_dev(dev);                \
                                                                        \
-       return scnprintf(buf, PAGE_SIZE, "%s\n",                        \
-                        input_dev->name ? input_dev->name : "");       \
+       return sysfs_emit(buf, "%s\n",                                  \
+                         input_dev->name ? input_dev->name : "");      \
 }                                                                      \
 static DEVICE_ATTR(name, S_IRUGO, input_dev_show_##name, NULL)
 
@@ -1458,7 +1458,7 @@ static ssize_t inhibited_show(struct device *dev,
 {
        struct input_dev *input_dev = to_input_dev(dev);
 
-       return scnprintf(buf, PAGE_SIZE, "%d\n", input_dev->inhibited);
+       return sysfs_emit(buf, "%d\n", input_dev->inhibited);
 }
 
 static ssize_t inhibited_store(struct device *dev,
@@ -1505,7 +1505,7 @@ static ssize_t input_dev_show_id_##name(struct device *dev,               \
                                        char *buf)                      \
 {                                                                      \
        struct input_dev *input_dev = to_input_dev(dev);                \
-       return scnprintf(buf, PAGE_SIZE, "%04x\n", input_dev->id.name); \
+       return sysfs_emit(buf, "%04x\n", input_dev->id.name);           \
 }                                                                      \
 static DEVICE_ATTR(name, S_IRUGO, input_dev_show_id_##name, NULL)
 
index ac6925ce836670ead078f44816e97fc2fe7008ab..7755e5b454d2cb8d1b8ec8295647c4a47d787d8c 100644 (file)
@@ -412,4 +412,14 @@ config JOYSTICK_SENSEHAT
          To compile this driver as a module, choose M here: the
          module will be called sensehat_joystick.
 
+config JOYSTICK_SEESAW
+       tristate "Adafruit Mini I2C Gamepad with Seesaw"
+       depends on I2C
+       select INPUT_SPARSEKMAP
+       help
+         Say Y here if you want to use the Adafruit Mini I2C Gamepad.
+
+         To compile this driver as a module, choose M here: the module will be
+         called adafruit-seesaw.
+
 endif
index 3937535f00981e52052bea22e449326c3c88716e..9976f596a92085bda1bb28d7ca33e934f1be0ba3 100644 (file)
@@ -28,6 +28,7 @@ obj-$(CONFIG_JOYSTICK_N64)            += n64joy.o
 obj-$(CONFIG_JOYSTICK_PSXPAD_SPI)      += psxpad-spi.o
 obj-$(CONFIG_JOYSTICK_PXRC)            += pxrc.o
 obj-$(CONFIG_JOYSTICK_QWIIC)           += qwiic-joystick.o
+obj-$(CONFIG_JOYSTICK_SEESAW)          += adafruit-seesaw.o
 obj-$(CONFIG_JOYSTICK_SENSEHAT)        += sensehat-joystick.o
 obj-$(CONFIG_JOYSTICK_SIDEWINDER)      += sidewinder.o
 obj-$(CONFIG_JOYSTICK_SPACEBALL)       += spaceball.o
diff --git a/drivers/input/joystick/adafruit-seesaw.c b/drivers/input/joystick/adafruit-seesaw.c
new file mode 100644 (file)
index 0000000..1b9279f
--- /dev/null
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2023 Anshul Dalal <anshulusr@gmail.com>
+ *
+ * Driver for Adafruit Mini I2C Gamepad
+ *
+ * Based on the work of:
+ *     Oleh Kravchenko (Sparkfun Qwiic Joystick driver)
+ *
+ * Datasheet: https://cdn-learn.adafruit.com/downloads/pdf/gamepad-qt.pdf
+ * Product page: https://www.adafruit.com/product/5743
+ * Firmware and hardware sources: https://github.com/adafruit/Adafruit_Seesaw
+ *
+ * TODO:
+ *     - Add interrupt support
+ */
+
+#include <asm/unaligned.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#define SEESAW_DEVICE_NAME             "seesaw-gamepad"
+
+#define SEESAW_ADC_BASE                        0x0900
+
+#define SEESAW_GPIO_DIRCLR_BULK                0x0103
+#define SEESAW_GPIO_BULK               0x0104
+#define SEESAW_GPIO_BULK_SET           0x0105
+#define SEESAW_GPIO_PULLENSET          0x010b
+
+#define SEESAW_STATUS_HW_ID            0x0001
+#define SEESAW_STATUS_SWRST            0x007f
+
+#define SEESAW_ADC_OFFSET              0x07
+
+#define SEESAW_BUTTON_A                        0x05
+#define SEESAW_BUTTON_B                        0x01
+#define SEESAW_BUTTON_X                        0x06
+#define SEESAW_BUTTON_Y                        0x02
+#define SEESAW_BUTTON_START            0x10
+#define SEESAW_BUTTON_SELECT           0x00
+
+#define SEESAW_ANALOG_X                        0x0e
+#define SEESAW_ANALOG_Y                        0x0f
+
+#define SEESAW_JOYSTICK_MAX_AXIS       1023
+#define SEESAW_JOYSTICK_FUZZ           2
+#define SEESAW_JOYSTICK_FLAT           4
+
+#define SEESAW_GAMEPAD_POLL_INTERVAL_MS        16
+#define SEESAW_GAMEPAD_POLL_MIN                8
+#define SEESAW_GAMEPAD_POLL_MAX                32
+
+static const unsigned long SEESAW_BUTTON_MASK =
+       BIT(SEESAW_BUTTON_A) | BIT(SEESAW_BUTTON_B) | BIT(SEESAW_BUTTON_X) |
+       BIT(SEESAW_BUTTON_Y) | BIT(SEESAW_BUTTON_START) |
+       BIT(SEESAW_BUTTON_SELECT);
+
+struct seesaw_gamepad {
+       struct input_dev *input_dev;
+       struct i2c_client *i2c_client;
+};
+
+struct seesaw_data {
+       u16 x;
+       u16 y;
+       u32 button_state;
+};
+
+static const struct key_entry seesaw_buttons_new[] = {
+       { KE_KEY, SEESAW_BUTTON_A, .keycode = BTN_SOUTH },
+       { KE_KEY, SEESAW_BUTTON_B, .keycode = BTN_EAST },
+       { KE_KEY, SEESAW_BUTTON_X, .keycode = BTN_NORTH },
+       { KE_KEY, SEESAW_BUTTON_Y, .keycode = BTN_WEST },
+       { KE_KEY, SEESAW_BUTTON_START, .keycode = BTN_START },
+       { KE_KEY, SEESAW_BUTTON_SELECT, .keycode = BTN_SELECT },
+       { KE_END, 0 }
+};
+
+static int seesaw_register_read(struct i2c_client *client, u16 reg, void *buf,
+                               int count)
+{
+       __be16 register_buf = cpu_to_be16(reg);
+       struct i2c_msg message_buf[2] = {
+               {
+                       .addr = client->addr,
+                       .flags = client->flags,
+                       .len = sizeof(register_buf),
+                       .buf = (u8 *)&register_buf,
+               },
+               {
+                       .addr = client->addr,
+                       .flags = client->flags | I2C_M_RD,
+                       .len = count,
+                       .buf = (u8 *)buf,
+               },
+       };
+       int ret;
+
+       ret = i2c_transfer(client->adapter, message_buf,
+                          ARRAY_SIZE(message_buf));
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+static int seesaw_register_write_u8(struct i2c_client *client, u16 reg,
+                                   u8 value)
+{
+       u8 write_buf[sizeof(reg) + sizeof(value)];
+       int ret;
+
+       put_unaligned_be16(reg, write_buf);
+       write_buf[sizeof(reg)] = value;
+
+       ret = i2c_master_send(client, write_buf, sizeof(write_buf));
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+static int seesaw_register_write_u32(struct i2c_client *client, u16 reg,
+                                    u32 value)
+{
+       u8 write_buf[sizeof(reg) + sizeof(value)];
+       int ret;
+
+       put_unaligned_be16(reg, write_buf);
+       put_unaligned_be32(value, write_buf + sizeof(reg));
+       ret = i2c_master_send(client, write_buf, sizeof(write_buf));
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+static int seesaw_read_data(struct i2c_client *client, struct seesaw_data *data)
+{
+       __be16 adc_data;
+       __be32 read_buf;
+       int err;
+
+       err = seesaw_register_read(client, SEESAW_GPIO_BULK,
+                                  &read_buf, sizeof(read_buf));
+       if (err)
+               return err;
+
+       data->button_state = ~be32_to_cpu(read_buf);
+
+       err = seesaw_register_read(client,
+                                  SEESAW_ADC_BASE |
+                                       (SEESAW_ADC_OFFSET + SEESAW_ANALOG_X),
+                                  &adc_data, sizeof(adc_data));
+       if (err)
+               return err;
+       /*
+        * ADC reads left as max and right as 0, must be reversed since kernel
+        * expects reports in opposite order.
+        */
+       data->x = SEESAW_JOYSTICK_MAX_AXIS - be16_to_cpu(adc_data);
+
+       err = seesaw_register_read(client,
+                                  SEESAW_ADC_BASE |
+                                       (SEESAW_ADC_OFFSET + SEESAW_ANALOG_Y),
+                                  &adc_data, sizeof(adc_data));
+       if (err)
+               return err;
+
+       data->y = be16_to_cpu(adc_data);
+
+       return 0;
+}
+
+static void seesaw_poll(struct input_dev *input)
+{
+       struct seesaw_gamepad *private = input_get_drvdata(input);
+       struct seesaw_data data;
+       int err, i;
+
+       err = seesaw_read_data(private->i2c_client, &data);
+       if (err) {
+               dev_err_ratelimited(&input->dev,
+                                   "failed to read joystick state: %d\n", err);
+               return;
+       }
+
+       input_report_abs(input, ABS_X, data.x);
+       input_report_abs(input, ABS_Y, data.y);
+
+       for_each_set_bit(i, &SEESAW_BUTTON_MASK,
+                        BITS_PER_TYPE(SEESAW_BUTTON_MASK)) {
+               if (!sparse_keymap_report_event(input, i,
+                                               data.button_state & BIT(i),
+                                               false))
+                       dev_err_ratelimited(&input->dev,
+                                           "failed to report keymap event");
+       }
+
+       input_sync(input);
+}
+
+static int seesaw_probe(struct i2c_client *client)
+{
+       struct seesaw_gamepad *seesaw;
+       u8 hardware_id;
+       int err;
+
+       err = seesaw_register_write_u8(client, SEESAW_STATUS_SWRST, 0xFF);
+       if (err)
+               return err;
+
+       /* Wait for the registers to reset before proceeding */
+       usleep_range(10000, 15000);
+
+       seesaw = devm_kzalloc(&client->dev, sizeof(*seesaw), GFP_KERNEL);
+       if (!seesaw)
+               return -ENOMEM;
+
+       err = seesaw_register_read(client, SEESAW_STATUS_HW_ID,
+                                  &hardware_id, sizeof(hardware_id));
+       if (err)
+               return err;
+
+       dev_dbg(&client->dev, "Adafruit Seesaw Gamepad, Hardware ID: %02x\n",
+               hardware_id);
+
+       /* Set Pin Mode to input and enable pull-up resistors */
+       err = seesaw_register_write_u32(client, SEESAW_GPIO_DIRCLR_BULK,
+                                       SEESAW_BUTTON_MASK);
+       if (err)
+               return err;
+       err = seesaw_register_write_u32(client, SEESAW_GPIO_PULLENSET,
+                                       SEESAW_BUTTON_MASK);
+       if (err)
+               return err;
+       err = seesaw_register_write_u32(client, SEESAW_GPIO_BULK_SET,
+                                       SEESAW_BUTTON_MASK);
+       if (err)
+               return err;
+
+       seesaw->i2c_client = client;
+       seesaw->input_dev = devm_input_allocate_device(&client->dev);
+       if (!seesaw->input_dev)
+               return -ENOMEM;
+
+       seesaw->input_dev->id.bustype = BUS_I2C;
+       seesaw->input_dev->name = "Adafruit Seesaw Gamepad";
+       seesaw->input_dev->phys = "i2c/" SEESAW_DEVICE_NAME;
+       input_set_drvdata(seesaw->input_dev, seesaw);
+       input_set_abs_params(seesaw->input_dev, ABS_X,
+                            0, SEESAW_JOYSTICK_MAX_AXIS,
+                            SEESAW_JOYSTICK_FUZZ, SEESAW_JOYSTICK_FLAT);
+       input_set_abs_params(seesaw->input_dev, ABS_Y,
+                            0, SEESAW_JOYSTICK_MAX_AXIS,
+                            SEESAW_JOYSTICK_FUZZ, SEESAW_JOYSTICK_FLAT);
+
+       err = sparse_keymap_setup(seesaw->input_dev, seesaw_buttons_new, NULL);
+       if (err) {
+               dev_err(&client->dev,
+                       "failed to set up input device keymap: %d\n", err);
+               return err;
+       }
+
+       err = input_setup_polling(seesaw->input_dev, seesaw_poll);
+       if (err) {
+               dev_err(&client->dev, "failed to set up polling: %d\n", err);
+               return err;
+       }
+
+       input_set_poll_interval(seesaw->input_dev,
+                               SEESAW_GAMEPAD_POLL_INTERVAL_MS);
+       input_set_max_poll_interval(seesaw->input_dev, SEESAW_GAMEPAD_POLL_MAX);
+       input_set_min_poll_interval(seesaw->input_dev, SEESAW_GAMEPAD_POLL_MIN);
+
+       err = input_register_device(seesaw->input_dev);
+       if (err) {
+               dev_err(&client->dev, "failed to register joystick: %d\n", err);
+               return err;
+       }
+
+       return 0;
+}
+
+static const struct i2c_device_id seesaw_id_table[] = {
+       { SEESAW_DEVICE_NAME },
+       { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, seesaw_id_table);
+
+static const struct of_device_id seesaw_of_table[] = {
+       { .compatible = "adafruit,seesaw-gamepad"},
+       { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, seesaw_of_table);
+
+static struct i2c_driver seesaw_driver = {
+       .driver = {
+               .name = SEESAW_DEVICE_NAME,
+               .of_match_table = seesaw_of_table,
+       },
+       .id_table = seesaw_id_table,
+       .probe = seesaw_probe,
+};
+module_i2c_driver(seesaw_driver);
+
+MODULE_AUTHOR("Anshul Dalal <anshulusr@gmail.com>");
+MODULE_DESCRIPTION("Adafruit Mini I2C Gamepad driver");
+MODULE_LICENSE("GPL");
index bf8b1cc0ea9c7681aab5feb4d968607866c6f70f..f1822c19a289d95832ca1390c205853da40d4fff 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/i2c.h>
 #include <linux/interrupt.h>
 #include <linux/input.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/delay.h>
 #include <linux/input/as5011.h>
 #include <linux/slab.h>
@@ -61,7 +61,7 @@ MODULE_LICENSE("GPL");
 struct as5011_device {
        struct input_dev *input_dev;
        struct i2c_client *i2c_client;
-       unsigned int button_gpio;
+       struct gpio_desc *button_gpiod;
        unsigned int button_irq;
        unsigned int axis_irq;
 };
@@ -114,7 +114,7 @@ static int as5011_i2c_read(struct i2c_client *client,
 static irqreturn_t as5011_button_interrupt(int irq, void *dev_id)
 {
        struct as5011_device *as5011 = dev_id;
-       int val = gpio_get_value_cansleep(as5011->button_gpio);
+       int val = gpiod_get_value_cansleep(as5011->button_gpiod);
 
        input_report_key(as5011->input_dev, BTN_JOYSTICK, !val);
        input_sync(as5011->input_dev);
@@ -248,7 +248,6 @@ static int as5011_probe(struct i2c_client *client)
 
        as5011->i2c_client = client;
        as5011->input_dev = input_dev;
-       as5011->button_gpio = plat_data->button_gpio;
        as5011->axis_irq = plat_data->axis_irq;
 
        input_dev->name = "Austria Microsystem as5011 joystick";
@@ -262,18 +261,20 @@ static int as5011_probe(struct i2c_client *client)
        input_set_abs_params(as5011->input_dev, ABS_Y,
                AS5011_MIN_AXIS, AS5011_MAX_AXIS, AS5011_FUZZ, AS5011_FLAT);
 
-       error = gpio_request(as5011->button_gpio, "AS5011 button");
-       if (error < 0) {
-               dev_err(&client->dev, "Failed to request button gpio\n");
+       as5011->button_gpiod = devm_gpiod_get(&client->dev, NULL, GPIOD_IN);
+       if (IS_ERR(as5011->button_gpiod)) {
+               error = PTR_ERR(as5011->button_gpiod);
+               dev_err(&client->dev, "Failed to request button GPIO\n");
                goto err_free_mem;
        }
+       gpiod_set_consumer_name(as5011->button_gpiod, "AS5011 button");
 
-       irq = gpio_to_irq(as5011->button_gpio);
+       irq = gpiod_to_irq(as5011->button_gpiod);
        if (irq < 0) {
                dev_err(&client->dev,
                        "Failed to get irq number for button gpio\n");
                error = irq;
-               goto err_free_button_gpio;
+               goto err_free_mem;
        }
 
        as5011->button_irq = irq;
@@ -286,7 +287,7 @@ static int as5011_probe(struct i2c_client *client)
        if (error < 0) {
                dev_err(&client->dev,
                        "Can't allocate button irq %d\n", as5011->button_irq);
-               goto err_free_button_gpio;
+               goto err_free_mem;
        }
 
        error = as5011_configure_chip(as5011, plat_data);
@@ -317,8 +318,6 @@ err_free_axis_irq:
        free_irq(as5011->axis_irq, as5011);
 err_free_button_irq:
        free_irq(as5011->button_irq, as5011);
-err_free_button_gpio:
-       gpio_free(as5011->button_gpio);
 err_free_mem:
        input_free_device(input_dev);
        kfree(as5011);
@@ -332,7 +331,6 @@ static void as5011_remove(struct i2c_client *client)
 
        free_irq(as5011->axis_irq, as5011);
        free_irq(as5011->button_irq, as5011);
-       gpio_free(as5011->button_gpio);
 
        input_unregister_device(as5011->input_dev);
        kfree(as5011);
index ea2bf5951d67716d332c6a8b95d6c2d23dd7b88e..52d9eab667b7acbd9d355af7ee4755d12100a806 100644 (file)
@@ -5,15 +5,17 @@
  * Copyright (C) 2018 Marcus Folkesson <marcus.folkesson@gmail.com>
  */
 
-#include <linux/kernel.h>
+#include <linux/cleanup.h>
 #include <linux/errno.h>
-#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
 #include <linux/uaccess.h>
+
 #include <linux/usb.h>
 #include <linux/usb/input.h>
-#include <linux/mutex.h>
-#include <linux/input.h>
 
 #define PXRC_VENDOR_ID         0x1781
 #define PXRC_PRODUCT_ID                0x0898
@@ -81,33 +83,28 @@ exit:
 static int pxrc_open(struct input_dev *input)
 {
        struct pxrc *pxrc = input_get_drvdata(input);
-       int retval;
+       int error;
 
-       mutex_lock(&pxrc->pm_mutex);
-       retval = usb_submit_urb(pxrc->urb, GFP_KERNEL);
-       if (retval) {
+       guard(mutex)(&pxrc->pm_mutex);
+       error = usb_submit_urb(pxrc->urb, GFP_KERNEL);
+       if (error) {
                dev_err(&pxrc->intf->dev,
                        "%s - usb_submit_urb failed, error: %d\n",
-                       __func__, retval);
-               retval = -EIO;
-               goto out;
+                       __func__, error);
+               return -EIO;
        }
 
        pxrc->is_open = true;
-
-out:
-       mutex_unlock(&pxrc->pm_mutex);
-       return retval;
+       return 0;
 }
 
 static void pxrc_close(struct input_dev *input)
 {
        struct pxrc *pxrc = input_get_drvdata(input);
 
-       mutex_lock(&pxrc->pm_mutex);
+       guard(mutex)(&pxrc->pm_mutex);
        usb_kill_urb(pxrc->urb);
        pxrc->is_open = false;
-       mutex_unlock(&pxrc->pm_mutex);
 }
 
 static void pxrc_free_urb(void *_pxrc)
@@ -208,10 +205,9 @@ static int pxrc_suspend(struct usb_interface *intf, pm_message_t message)
 {
        struct pxrc *pxrc = usb_get_intfdata(intf);
 
-       mutex_lock(&pxrc->pm_mutex);
+       guard(mutex)(&pxrc->pm_mutex);
        if (pxrc->is_open)
                usb_kill_urb(pxrc->urb);
-       mutex_unlock(&pxrc->pm_mutex);
 
        return 0;
 }
@@ -219,14 +215,12 @@ static int pxrc_suspend(struct usb_interface *intf, pm_message_t message)
 static int pxrc_resume(struct usb_interface *intf)
 {
        struct pxrc *pxrc = usb_get_intfdata(intf);
-       int retval = 0;
 
-       mutex_lock(&pxrc->pm_mutex);
+       guard(mutex)(&pxrc->pm_mutex);
        if (pxrc->is_open && usb_submit_urb(pxrc->urb, GFP_KERNEL) < 0)
-               retval = -EIO;
+               return -EIO;
 
-       mutex_unlock(&pxrc->pm_mutex);
-       return retval;
+       return 0;
 }
 
 static int pxrc_pre_reset(struct usb_interface *intf)
index e2c1848182de9a44683a49f673d0ff3ff3a41999..7c4b2a5cc1b54a1c98a92b38076df6a7b0424b49 100644 (file)
@@ -294,6 +294,7 @@ static const struct xpad_device {
        { 0x1689, 0xfd00, "Razer Onza Tournament Edition", 0, XTYPE_XBOX360 },
        { 0x1689, 0xfd01, "Razer Onza Classic Edition", 0, XTYPE_XBOX360 },
        { 0x1689, 0xfe00, "Razer Sabertooth", 0, XTYPE_XBOX360 },
+       { 0x17ef, 0x6182, "Lenovo Legion Controller for Windows", 0, XTYPE_XBOX360 },
        { 0x1949, 0x041a, "Amazon Game Controller", 0, XTYPE_XBOX360 },
        { 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
        { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
@@ -491,6 +492,7 @@ static const struct usb_device_id xpad_table[] = {
        XPAD_XBOX360_VENDOR(0x15e4),            /* Numark Xbox 360 controllers */
        XPAD_XBOX360_VENDOR(0x162e),            /* Joytech Xbox 360 controllers */
        XPAD_XBOX360_VENDOR(0x1689),            /* Razer Onza */
+       XPAD_XBOX360_VENDOR(0x17ef),            /* Lenovo */
        XPAD_XBOX360_VENDOR(0x1949),            /* Amazon controllers */
        XPAD_XBOX360_VENDOR(0x1bad),            /* Harmonix Rock Band guitar and drums */
        XPAD_XBOX360_VENDOR(0x20d6),            /* PowerA controllers */
@@ -1670,7 +1672,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
        if (!led)
                return -ENOMEM;
 
-       xpad->pad_nr = ida_simple_get(&xpad_pad_seq, 0, 0, GFP_KERNEL);
+       xpad->pad_nr = ida_alloc(&xpad_pad_seq, GFP_KERNEL);
        if (xpad->pad_nr < 0) {
                error = xpad->pad_nr;
                goto err_free_mem;
@@ -1693,7 +1695,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
        return 0;
 
 err_free_id:
-       ida_simple_remove(&xpad_pad_seq, xpad->pad_nr);
+       ida_free(&xpad_pad_seq, xpad->pad_nr);
 err_free_mem:
        kfree(led);
        xpad->led = NULL;
@@ -1706,7 +1708,7 @@ static void xpad_led_disconnect(struct usb_xpad *xpad)
 
        if (xpad_led) {
                led_classdev_unregister(&xpad_led->led_cdev);
-               ida_simple_remove(&xpad_pad_seq, xpad->pad_nr);
+               ida_free(&xpad_pad_seq, xpad->pad_nr);
                kfree(xpad_led);
        }
 }
index 786f00f6b7fd8be3e876598d30a2ba8e6615ce10..7f67f9f2946b484317575d529ee35a385fc2882e 100644 (file)
@@ -791,9 +791,9 @@ static bool atkbd_is_portable_device(void)
  * not work. So in this case simply assume a keyboard is connected to avoid
  * confusing some laptop keyboards.
  *
- * Skipping ATKBD_CMD_GETID ends up using a fake keyboard id. Using a fake id is
- * ok in translated mode, only atkbd_select_set() checks atkbd->id and in
- * translated mode that is a no-op.
+ * Skipping ATKBD_CMD_GETID ends up using a fake keyboard id. Using the standard
+ * 0xab83 id is ok in translated mode, only atkbd_select_set() checks atkbd->id
+ * and in translated mode that is a no-op.
  */
 static bool atkbd_skip_getid(struct atkbd *atkbd)
 {
@@ -824,6 +824,11 @@ static int atkbd_probe(struct atkbd *atkbd)
                                 "keyboard reset failed on %s\n",
                                 ps2dev->serio->phys);
 
+       if (atkbd_skip_getid(atkbd)) {
+               atkbd->id = 0xab83;
+               goto deactivate_kbd;
+       }
+
 /*
  * Then we check the keyboard ID. We should get 0xab83 under normal conditions.
  * Some keyboards report different values, but the first byte is always 0xab or
@@ -832,10 +837,10 @@ static int atkbd_probe(struct atkbd *atkbd)
  */
 
        param[0] = param[1] = 0xa5;     /* initialize with invalid values */
-       if (atkbd_skip_getid(atkbd) || ps2_command(ps2dev, param, ATKBD_CMD_GETID)) {
+       if (ps2_command(ps2dev, param, ATKBD_CMD_GETID)) {
 
 /*
- * If the get ID command was skipped or failed, we check if we can at least set
+ * If the get ID command failed, we check if we can at least set
  * the LEDs on the keyboard. This should work on every keyboard out there.
  * It also turns the LEDs off, which we want anyway.
  */
@@ -858,6 +863,7 @@ static int atkbd_probe(struct atkbd *atkbd)
                return -1;
        }
 
+deactivate_kbd:
 /*
  * Make sure nothing is coming from the keyboard and disturbs our
  * internal state.
index 1b4937dce6725f8a94da1eaec79d1987dc6920a6..52fba9ee7c1d86275efa779f0980d0283ccd5265 100644 (file)
 #include <linux/interrupt.h>
 #include <linux/input.h>
 #include <linux/leds.h>
-#include <linux/of_irq.h>
+#include <linux/of.h>
 #include <linux/regmap.h>
 #include <linux/i2c.h>
 #include <linux/gpio/consumer.h>
+#include <linux/bitfield.h>
 
 #define CAP11XX_REG_MAIN_CONTROL       0x00
 #define CAP11XX_REG_MAIN_CONTROL_GAIN_SHIFT    (6)
@@ -24,6 +25,7 @@
 #define CAP11XX_REG_NOISE_FLAG_STATUS  0x0a
 #define CAP11XX_REG_SENOR_DELTA(X)     (0x10 + (X))
 #define CAP11XX_REG_SENSITIVITY_CONTROL        0x1f
+#define CAP11XX_REG_SENSITIVITY_CONTROL_DELTA_SENSE_MASK       0x70
 #define CAP11XX_REG_CONFIG             0x20
 #define CAP11XX_REG_SENSOR_ENABLE      0x21
 #define CAP11XX_REG_SENSOR_CONFIG      0x22
@@ -32,6 +34,7 @@
 #define CAP11XX_REG_CALIBRATION                0x26
 #define CAP11XX_REG_INT_ENABLE         0x27
 #define CAP11XX_REG_REPEAT_RATE                0x28
+#define CAP11XX_REG_SIGNAL_GUARD_ENABLE        0x29
 #define CAP11XX_REG_MT_CONFIG          0x2a
 #define CAP11XX_REG_MT_PATTERN_CONFIG  0x2b
 #define CAP11XX_REG_MT_PATTERN         0x2d
@@ -47,6 +50,8 @@
 #define CAP11XX_REG_SENSOR_BASE_CNT(X) (0x50 + (X))
 #define CAP11XX_REG_LED_POLARITY       0x73
 #define CAP11XX_REG_LED_OUTPUT_CONTROL 0x74
+#define CAP11XX_REG_CALIB_SENSITIVITY_CONFIG   0x80
+#define CAP11XX_REG_CALIB_SENSITIVITY_CONFIG2  0x81
 
 #define CAP11XX_REG_LED_DUTY_CYCLE_1   0x90
 #define CAP11XX_REG_LED_DUTY_CYCLE_2   0x91
@@ -78,12 +83,20 @@ struct cap11xx_led {
 
 struct cap11xx_priv {
        struct regmap *regmap;
+       struct device *dev;
        struct input_dev *idev;
+       const struct cap11xx_hw_model *model;
+       u8 id;
 
        struct cap11xx_led *leds;
        int num_leds;
 
        /* config */
+       u8 analog_gain;
+       u8 sensitivity_delta_sense;
+       u8 signal_guard_inputs_mask;
+       u32 thresholds[8];
+       u32 calib_sensitivities[8];
        u32 keycodes[];
 };
 
@@ -160,9 +173,6 @@ static bool cap11xx_volatile_reg(struct device *dev, unsigned int reg)
        case CAP11XX_REG_SENOR_DELTA(3):
        case CAP11XX_REG_SENOR_DELTA(4):
        case CAP11XX_REG_SENOR_DELTA(5):
-       case CAP11XX_REG_PRODUCT_ID:
-       case CAP11XX_REG_MANUFACTURER_ID:
-       case CAP11XX_REG_REVISION:
                return true;
        }
 
@@ -177,10 +187,179 @@ static const struct regmap_config cap11xx_regmap_config = {
        .reg_defaults = cap11xx_reg_defaults,
 
        .num_reg_defaults = ARRAY_SIZE(cap11xx_reg_defaults),
-       .cache_type = REGCACHE_RBTREE,
+       .cache_type = REGCACHE_MAPLE,
        .volatile_reg = cap11xx_volatile_reg,
 };
 
+static int cap11xx_write_calib_sens_config_1(struct cap11xx_priv *priv)
+{
+       return regmap_write(priv->regmap,
+                           CAP11XX_REG_CALIB_SENSITIVITY_CONFIG,
+                           (priv->calib_sensitivities[3] << 6) |
+                           (priv->calib_sensitivities[2] << 4) |
+                           (priv->calib_sensitivities[1] << 2) |
+                           priv->calib_sensitivities[0]);
+}
+
+static int cap11xx_write_calib_sens_config_2(struct cap11xx_priv *priv)
+{
+       return regmap_write(priv->regmap,
+                           CAP11XX_REG_CALIB_SENSITIVITY_CONFIG2,
+                           (priv->calib_sensitivities[7] << 6) |
+                           (priv->calib_sensitivities[6] << 4) |
+                           (priv->calib_sensitivities[5] << 2) |
+                           priv->calib_sensitivities[4]);
+}
+
+static int cap11xx_init_keys(struct cap11xx_priv *priv)
+{
+       struct device_node *node = priv->dev->of_node;
+       struct device *dev = priv->dev;
+       int i, error;
+       u32 u32_val;
+
+       if (!node) {
+               dev_err(dev, "Corresponding DT entry is not available\n");
+               return -ENODEV;
+       }
+
+       if (!of_property_read_u32(node, "microchip,sensor-gain", &u32_val)) {
+               if (priv->model->no_gain) {
+                       dev_warn(dev,
+                                "This model doesn't support 'sensor-gain'\n");
+               } else if (is_power_of_2(u32_val) && u32_val <= 8) {
+                       priv->analog_gain = (u8)ilog2(u32_val);
+
+                       error = regmap_update_bits(priv->regmap,
+                               CAP11XX_REG_MAIN_CONTROL,
+                               CAP11XX_REG_MAIN_CONTROL_GAIN_MASK,
+                               priv->analog_gain << CAP11XX_REG_MAIN_CONTROL_GAIN_SHIFT);
+                       if (error)
+                               return error;
+               } else {
+                       dev_err(dev, "Invalid sensor-gain value %u\n", u32_val);
+                       return -EINVAL;
+               }
+       }
+
+       if (of_property_read_bool(node, "microchip,irq-active-high")) {
+               if (priv->id == CAP1106 ||
+                   priv->id == CAP1126 ||
+                   priv->id == CAP1188) {
+                       error = regmap_update_bits(priv->regmap,
+                                                  CAP11XX_REG_CONFIG2,
+                                                  CAP11XX_REG_CONFIG2_ALT_POL,
+                                                  0);
+                       if (error)
+                               return error;
+               } else {
+                       dev_warn(dev,
+                                "This model doesn't support 'irq-active-high'\n");
+               }
+       }
+
+       if (!of_property_read_u32(node, "microchip,sensitivity-delta-sense", &u32_val)) {
+               if (!is_power_of_2(u32_val) || u32_val > 128) {
+                       dev_err(dev, "Invalid sensitivity-delta-sense value %u\n", u32_val);
+                       return -EINVAL;
+               }
+
+               priv->sensitivity_delta_sense = (u8)ilog2(u32_val);
+               u32_val = ~(FIELD_PREP(CAP11XX_REG_SENSITIVITY_CONTROL_DELTA_SENSE_MASK,
+                                       priv->sensitivity_delta_sense));
+
+               error = regmap_update_bits(priv->regmap,
+                                          CAP11XX_REG_SENSITIVITY_CONTROL,
+                                          CAP11XX_REG_SENSITIVITY_CONTROL_DELTA_SENSE_MASK,
+                                          u32_val);
+               if (error)
+                       return error;
+       }
+
+       if (!of_property_read_u32_array(node, "microchip,input-threshold",
+                                       priv->thresholds, priv->model->num_channels)) {
+               for (i = 0; i < priv->model->num_channels; i++) {
+                       if (priv->thresholds[i] > 127) {
+                               dev_err(dev, "Invalid input-threshold value %u\n",
+                                       priv->thresholds[i]);
+                               return -EINVAL;
+                       }
+
+                       error = regmap_write(priv->regmap,
+                                            CAP11XX_REG_SENSOR_THRESH(i),
+                                            priv->thresholds[i]);
+                       if (error)
+                               return error;
+               }
+       }
+
+       if (!of_property_read_u32_array(node, "microchip,calib-sensitivity",
+                                       priv->calib_sensitivities,
+                                       priv->model->num_channels)) {
+               if (priv->id == CAP1293 || priv->id == CAP1298) {
+                       for (i = 0; i < priv->model->num_channels; i++) {
+                               if (!is_power_of_2(priv->calib_sensitivities[i]) ||
+                                   priv->calib_sensitivities[i] > 4) {
+                                       dev_err(dev, "Invalid calib-sensitivity value %u\n",
+                                               priv->calib_sensitivities[i]);
+                                       return -EINVAL;
+                               }
+                               priv->calib_sensitivities[i] = ilog2(priv->calib_sensitivities[i]);
+                       }
+
+                       error = cap11xx_write_calib_sens_config_1(priv);
+                       if (error)
+                               return error;
+
+                       if (priv->id == CAP1298) {
+                               error = cap11xx_write_calib_sens_config_2(priv);
+                               if (error)
+                                       return error;
+                       }
+               } else {
+                       dev_warn(dev,
+                                "This model doesn't support 'calib-sensitivity'\n");
+               }
+       }
+
+       for (i = 0; i < priv->model->num_channels; i++) {
+               if (!of_property_read_u32_index(node, "microchip,signal-guard",
+                                               i, &u32_val)) {
+                       if (u32_val > 1)
+                               return -EINVAL;
+                       if (u32_val)
+                               priv->signal_guard_inputs_mask |= 0x01 << i;
+               }
+       }
+
+       if (priv->signal_guard_inputs_mask) {
+               if (priv->id == CAP1293 || priv->id == CAP1298) {
+                       error = regmap_write(priv->regmap,
+                                            CAP11XX_REG_SIGNAL_GUARD_ENABLE,
+                                            priv->signal_guard_inputs_mask);
+                       if (error)
+                               return error;
+               } else {
+                       dev_warn(dev,
+                                "This model doesn't support 'signal-guard'\n");
+               }
+       }
+
+       /* Provide some useful defaults */
+       for (i = 0; i < priv->model->num_channels; i++)
+               priv->keycodes[i] = KEY_A + i;
+
+       of_property_read_u32_array(node, "linux,keycodes",
+                                  priv->keycodes, priv->model->num_channels);
+
+       /* Disable autorepeat. The Linux input system has its own handling. */
+       error = regmap_write(priv->regmap, CAP11XX_REG_REPEAT_RATE, 0);
+       if (error)
+               return error;
+
+       return 0;
+}
+
 static irqreturn_t cap11xx_thread_func(int irq_num, void *data)
 {
        struct cap11xx_priv *priv = data;
@@ -332,11 +511,9 @@ static int cap11xx_i2c_probe(struct i2c_client *i2c_client)
        const struct i2c_device_id *id = i2c_client_get_device_id(i2c_client);
        struct device *dev = &i2c_client->dev;
        struct cap11xx_priv *priv;
-       struct device_node *node;
        const struct cap11xx_hw_model *cap;
-       int i, error, irq, gain = 0;
+       int i, error;
        unsigned int val, rev;
-       u32 gain32;
 
        if (id->driver_data >= ARRAY_SIZE(cap11xx_devices)) {
                dev_err(dev, "Invalid device ID %lu\n", id->driver_data);
@@ -355,6 +532,8 @@ static int cap11xx_i2c_probe(struct i2c_client *i2c_client)
        if (!priv)
                return -ENOMEM;
 
+       priv->dev = dev;
+
        priv->regmap = devm_regmap_init_i2c(i2c_client, &cap11xx_regmap_config);
        if (IS_ERR(priv->regmap))
                return PTR_ERR(priv->regmap);
@@ -384,50 +563,15 @@ static int cap11xx_i2c_probe(struct i2c_client *i2c_client)
                return error;
 
        dev_info(dev, "CAP11XX detected, model %s, revision 0x%02x\n",
-                id->name, rev);
-       node = dev->of_node;
-
-       if (!of_property_read_u32(node, "microchip,sensor-gain", &gain32)) {
-               if (cap->no_gain)
-                       dev_warn(dev,
-                                "This version doesn't support sensor gain\n");
-               else if (is_power_of_2(gain32) && gain32 <= 8)
-                       gain = ilog2(gain32);
-               else
-                       dev_err(dev, "Invalid sensor-gain value %d\n", gain32);
-       }
+                        id->name, rev);
 
-       if (id->driver_data == CAP1106 ||
-           id->driver_data == CAP1126 ||
-           id->driver_data == CAP1188) {
-               if (of_property_read_bool(node, "microchip,irq-active-high")) {
-                       error = regmap_update_bits(priv->regmap,
-                                                  CAP11XX_REG_CONFIG2,
-                                                  CAP11XX_REG_CONFIG2_ALT_POL,
-                                                  0);
-                       if (error)
-                               return error;
-               }
-       }
-
-       /* Provide some useful defaults */
-       for (i = 0; i < cap->num_channels; i++)
-               priv->keycodes[i] = KEY_A + i;
-
-       of_property_read_u32_array(node, "linux,keycodes",
-                                  priv->keycodes, cap->num_channels);
+       priv->model = cap;
+       priv->id = id->driver_data;
 
-       if (!cap->no_gain) {
-               error = regmap_update_bits(priv->regmap,
-                               CAP11XX_REG_MAIN_CONTROL,
-                               CAP11XX_REG_MAIN_CONTROL_GAIN_MASK,
-                               gain << CAP11XX_REG_MAIN_CONTROL_GAIN_SHIFT);
-               if (error)
-                       return error;
-       }
+       dev_info(dev, "CAP11XX device detected, model %s, revision 0x%02x\n",
+                id->name, rev);
 
-       /* Disable autorepeat. The Linux input system has its own handling. */
-       error = regmap_write(priv->regmap, CAP11XX_REG_REPEAT_RATE, 0);
+       error = cap11xx_init_keys(priv);
        if (error)
                return error;
 
@@ -439,7 +583,7 @@ static int cap11xx_i2c_probe(struct i2c_client *i2c_client)
        priv->idev->id.bustype = BUS_I2C;
        priv->idev->evbit[0] = BIT_MASK(EV_KEY);
 
-       if (of_property_read_bool(node, "autorepeat"))
+       if (of_property_read_bool(dev->of_node, "autorepeat"))
                __set_bit(EV_REP, priv->idev->evbit);
 
        for (i = 0; i < cap->num_channels; i++)
@@ -474,13 +618,8 @@ static int cap11xx_i2c_probe(struct i2c_client *i2c_client)
        if (error)
                return error;
 
-       irq = irq_of_parse_and_map(node, 0);
-       if (!irq) {
-               dev_err(dev, "Unable to parse or map IRQ\n");
-               return -ENXIO;
-       }
-
-       error = devm_request_threaded_irq(dev, irq, NULL, cap11xx_thread_func,
+       error = devm_request_threaded_irq(dev, i2c_client->irq,
+                                         NULL, cap11xx_thread_func,
                                          IRQF_ONESHOT, dev_name(dev), priv);
        if (error)
                return error;
index 2e7c2c046e675f8ae21a6fe668e9b7dfa23c1a9a..9f3bcd41cf67da810c3aad9fe535feea6a89ea48 100644 (file)
@@ -45,7 +45,9 @@ struct gpio_button_data {
        unsigned int software_debounce; /* in msecs, for GPIO-driven buttons */
 
        unsigned int irq;
+       unsigned int wakeirq;
        unsigned int wakeup_trigger_type;
+
        spinlock_t lock;
        bool disabled;
        bool key_pressed;
@@ -511,6 +513,7 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
        struct gpio_button_data *bdata = &ddata->data[idx];
        irq_handler_t isr;
        unsigned long irqflags;
+       const char *wakedesc;
        int irq;
        int error;
 
@@ -575,15 +578,23 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
                                        !gpiod_cansleep(bdata->gpiod);
                }
 
+               /*
+                * If an interrupt was specified, use it instead of the gpio
+                * interrupt and use the gpio for reading the state. A separate
+                * interrupt may be used as the main button interrupt for
+                * runtime PM to detect events also in deeper idle states. If a
+                * dedicated wakeirq is used for system suspend only, see below
+                * for bdata->wakeirq setup.
+                */
                if (button->irq) {
                        bdata->irq = button->irq;
                } else {
                        irq = gpiod_to_irq(bdata->gpiod);
                        if (irq < 0) {
                                error = irq;
-                               dev_err(dev,
-                                       "Unable to get irq number for GPIO %d, error %d\n",
-                                       button->gpio, error);
+                               dev_err_probe(dev, error,
+                                             "Unable to get irq number for GPIO %d\n",
+                                             button->gpio);
                                return error;
                        }
                        bdata->irq = irq;
@@ -672,6 +683,36 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
                return error;
        }
 
+       if (!button->wakeirq)
+               return 0;
+
+       /* Use :wakeup suffix like drivers/base/power/wakeirq.c does */
+       wakedesc = devm_kasprintf(dev, GFP_KERNEL, "%s:wakeup", desc);
+       if (!wakedesc)
+               return -ENOMEM;
+
+       bdata->wakeirq = button->wakeirq;
+       irqflags |= IRQF_NO_SUSPEND;
+
+       /*
+        * Wakeirq shares the handler with the main interrupt, it's only
+        * active during system suspend. See gpio_keys_button_enable_wakeup()
+        * and gpio_keys_button_disable_wakeup().
+        */
+       error = devm_request_any_context_irq(dev, bdata->wakeirq, isr,
+                                            irqflags, wakedesc, bdata);
+       if (error < 0) {
+               dev_err(dev, "Unable to claim wakeirq %d; error %d\n",
+                       bdata->irq, error);
+               return error;
+       }
+
+       /*
+        * Disable wakeirq until suspend. IRQF_NO_AUTOEN won't work if
+        * IRQF_SHARED was set based on !button->can_disable.
+        */
+       disable_irq(bdata->wakeirq);
+
        return 0;
 }
 
@@ -728,7 +769,7 @@ gpio_keys_get_devtree_pdata(struct device *dev)
        struct gpio_keys_platform_data *pdata;
        struct gpio_keys_button *button;
        struct fwnode_handle *child;
-       int nbuttons;
+       int nbuttons, irq;
 
        nbuttons = device_get_child_node_count(dev);
        if (nbuttons == 0)
@@ -750,9 +791,19 @@ gpio_keys_get_devtree_pdata(struct device *dev)
        device_property_read_string(dev, "label", &pdata->name);
 
        device_for_each_child_node(dev, child) {
-               if (is_of_node(child))
-                       button->irq =
-                               irq_of_parse_and_map(to_of_node(child), 0);
+               if (is_of_node(child)) {
+                       irq = of_irq_get_byname(to_of_node(child), "irq");
+                       if (irq > 0)
+                               button->irq = irq;
+
+                       irq = of_irq_get_byname(to_of_node(child), "wakeup");
+                       if (irq > 0)
+                               button->wakeirq = irq;
+
+                       if (!button->irq && !button->wakeirq)
+                               button->irq =
+                                       irq_of_parse_and_map(to_of_node(child), 0);
+               }
 
                if (fwnode_property_read_u32(child, "linux,code",
                                             &button->code)) {
@@ -921,6 +972,11 @@ gpio_keys_button_enable_wakeup(struct gpio_button_data *bdata)
                }
        }
 
+       if (bdata->wakeirq) {
+               enable_irq(bdata->wakeirq);
+               disable_irq(bdata->irq);
+       }
+
        return 0;
 }
 
@@ -929,6 +985,11 @@ gpio_keys_button_disable_wakeup(struct gpio_button_data *bdata)
 {
        int error;
 
+       if (bdata->wakeirq) {
+               enable_irq(bdata->irq);
+               disable_irq(bdata->wakeirq);
+       }
+
        /*
         * The trigger type is always both edges for gpio-based keys and we do
         * not support changing wakeup trigger for interrupt-based keys.
index 454fb8675657302ca1281c211a2be027fc520163..16f936db73058e948505f4479849b2c753515250 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/mutex.h>
 #include <linux/errno.h>
 #include <linux/slab.h>
-#include <linux/gpio.h>
 #include <linux/platform_data/gpio-omap.h>
 #include <linux/platform_data/keypad-omap.h>
 #include <linux/soc/ti/omap1-io.h>
@@ -49,9 +48,6 @@ struct omap_kp {
 
 static DECLARE_TASKLET_DISABLED_OLD(kp_tasklet, omap_kp_tasklet);
 
-static unsigned int *row_gpios;
-static unsigned int *col_gpios;
-
 static irqreturn_t omap_kp_interrupt(int irq, void *dev_id)
 {
        /* disable keyboard interrupt and schedule for handling */
@@ -180,7 +176,7 @@ static int omap_kp_probe(struct platform_device *pdev)
        struct omap_kp *omap_kp;
        struct input_dev *input_dev;
        struct omap_kp_platform_data *pdata = dev_get_platdata(&pdev->dev);
-       int i, col_idx, row_idx, ret;
+       int ret;
        unsigned int row_shift, keycodemax;
 
        if (!pdata->rows || !pdata->cols || !pdata->keymap_data) {
@@ -209,17 +205,9 @@ static int omap_kp_probe(struct platform_device *pdev)
        if (pdata->delay)
                omap_kp->delay = pdata->delay;
 
-       if (pdata->row_gpios && pdata->col_gpios) {
-               row_gpios = pdata->row_gpios;
-               col_gpios = pdata->col_gpios;
-       }
-
        omap_kp->rows = pdata->rows;
        omap_kp->cols = pdata->cols;
 
-       col_idx = 0;
-       row_idx = 0;
-
        timer_setup(&omap_kp->timer, omap_kp_timer, 0);
 
        /* get the irq and init timer*/
@@ -276,11 +264,6 @@ err4:
 err3:
        device_remove_file(&pdev->dev, &dev_attr_enable);
 err2:
-       for (i = row_idx - 1; i >= 0; i--)
-               gpio_free(row_gpios[i]);
-       for (i = col_idx - 1; i >= 0; i--)
-               gpio_free(col_gpios[i]);
-
        kfree(omap_kp);
        input_free_device(input_dev);
 
index d3f8688fdd9c3ebe03a0c97fb32283f4cd4190ac..040b340995d89d4bae21630f6218f0bd17337684 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
+#include <linux/clk.h>
 #include <linux/errno.h>
 #include <linux/io.h>
 #include <linux/of.h>
@@ -83,6 +84,7 @@ struct omap4_keypad {
        bool no_autorepeat;
        u64 keys;
        unsigned short *keymap;
+       struct clk *fck;
 };
 
 static int kbd_readl(struct omap4_keypad *keypad_data, u32 offset)
@@ -209,6 +211,10 @@ static int omap4_keypad_open(struct input_dev *input)
        if (error)
                return error;
 
+       error = clk_prepare_enable(keypad_data->fck);
+       if (error)
+               goto out;
+
        disable_irq(keypad_data->irq);
 
        kbd_writel(keypad_data, OMAP4_KBD_CTRL,
@@ -226,10 +232,11 @@ static int omap4_keypad_open(struct input_dev *input)
 
        enable_irq(keypad_data->irq);
 
+out:
        pm_runtime_mark_last_busy(dev);
        pm_runtime_put_autosuspend(dev);
 
-       return 0;
+       return error;
 }
 
 static void omap4_keypad_stop(struct omap4_keypad *keypad_data)
@@ -258,6 +265,7 @@ static void omap4_keypad_close(struct input_dev *input)
        disable_irq(keypad_data->irq);
        omap4_keypad_stop(keypad_data);
        enable_irq(keypad_data->irq);
+       clk_disable_unprepare(keypad_data->fck);
 
        pm_runtime_mark_last_busy(dev);
        pm_runtime_put_autosuspend(dev);
@@ -356,6 +364,11 @@ static int omap4_keypad_probe(struct platform_device *pdev)
        }
 
        keypad_data->irq = irq;
+       keypad_data->fck = devm_clk_get(&pdev->dev, "fck");
+       if (IS_ERR(keypad_data->fck))
+               return dev_err_probe(&pdev->dev, PTR_ERR(keypad_data->fck),
+                                    "unable to get fck");
+
        mutex_init(&keypad_data->lock);
        platform_set_drvdata(pdev, keypad_data);
 
index 6953097db4456f27cc698ce208841280762f37ca..b51dfcd76038623c8ce321901c8201a05416d347 100644 (file)
@@ -213,7 +213,7 @@ static struct regmap_config qt1050_regmap_config = {
        .val_bits = 8,
        .max_register = QT1050_RES_CAL,
 
-       .cache_type = REGCACHE_RBTREE,
+       .cache_type = REGCACHE_MAPLE,
 
        .wr_table = &qt1050_writeable_table,
        .rd_table = &qt1050_readable_table,
index 8af59ced1ec2eda1365f5636ac91d48984da29da..677bc4baa5d19521e010f68f388c780917c9c9c9 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/slab.h>
 #include <linux/interrupt.h>
 #include <linux/workqueue.h>
-#include <linux/gpio.h>
 #include <linux/i2c.h>
 #include <linux/input.h>
 #include <linux/tca6416_keypad.h>
index ce82548916bbc688470812465f3c3c3baf0e1fc8..c1fa75c0f970addfc4b1a991508ca83c132d34ae 100644 (file)
@@ -352,7 +352,7 @@ static int da7280_haptic_set_pwm(struct da7280_haptic *haptics, bool enabled)
                state.duty_cycle = period_mag_multi;
        }
 
-       error = pwm_apply_state(haptics->pwm_dev, &state);
+       error = pwm_apply_might_sleep(haptics->pwm_dev, &state);
        if (error)
                dev_err(haptics->dev, "Failed to apply pwm state: %d\n", error);
 
@@ -1175,7 +1175,7 @@ static int da7280_probe(struct i2c_client *client)
                /* Sync up PWM state and ensure it is off. */
                pwm_init_state(haptics->pwm_dev, &state);
                state.enabled = false;
-               error = pwm_apply_state(haptics->pwm_dev, &state);
+               error = pwm_apply_might_sleep(haptics->pwm_dev, &state);
                if (error) {
                        dev_err(dev, "Failed to apply PWM state: %d\n", error);
                        return error;
index 74808bae326a710779cdfa455ff5781739bf6948..c338765e0ecd032ab541ad70020cec84bea9c296 100644 (file)
@@ -9,11 +9,12 @@
 #include <linux/errno.h>
 #include <linux/input.h>
 #include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
 #include <linux/platform_device.h>
 #include <linux/pm_wakeirq.h>
+#include <linux/property.h>
 #include <linux/workqueue.h>
 #include <linux/regmap.h>
-#include <linux/of.h>
 #include <linux/mfd/da9063/core.h>
 #include <linux/mfd/da9063/registers.h>
 #include <linux/mfd/da9062/core.h>
@@ -74,13 +75,6 @@ static const struct da906x_chip_config da9062_regs = {
        .name = "da9062-onkey",
 };
 
-static const struct of_device_id da9063_compatible_reg_id_table[] = {
-       { .compatible = "dlg,da9063-onkey", .data = &da9063_regs },
-       { .compatible = "dlg,da9062-onkey", .data = &da9062_regs },
-       { },
-};
-MODULE_DEVICE_TABLE(of, da9063_compatible_reg_id_table);
-
 static void da9063_poll_on(struct work_struct *work)
 {
        struct da9063_onkey *onkey = container_of(work,
@@ -187,56 +181,43 @@ static irqreturn_t da9063_onkey_irq_handler(int irq, void *data)
 static int da9063_onkey_probe(struct platform_device *pdev)
 {
        struct da9063_onkey *onkey;
-       const struct of_device_id *match;
-       int irq;
        int error;
-
-       match = of_match_node(da9063_compatible_reg_id_table,
-                             pdev->dev.of_node);
-       if (!match)
-               return -ENXIO;
+       int irq;
 
        onkey = devm_kzalloc(&pdev->dev, sizeof(struct da9063_onkey),
                             GFP_KERNEL);
-       if (!onkey) {
-               dev_err(&pdev->dev, "Failed to allocate memory.\n");
+       if (!onkey)
                return -ENOMEM;
-       }
 
-       onkey->config = match->data;
+       onkey->config = device_get_match_data(&pdev->dev);
+       if (!onkey->config)
+               return -ENXIO;
+
        onkey->dev = &pdev->dev;
 
        onkey->regmap = dev_get_regmap(pdev->dev.parent, NULL);
-       if (!onkey->regmap) {
-               dev_err(&pdev->dev, "Parent regmap unavailable.\n");
-               return -ENXIO;
-       }
+       if (!onkey->regmap)
+               return dev_err_probe(&pdev->dev, -ENXIO,
+                                    "Parent regmap unavailable.\n");
 
-       onkey->key_power = !of_property_read_bool(pdev->dev.of_node,
-                                                 "dlg,disable-key-power");
+       onkey->key_power = !device_property_read_bool(&pdev->dev,
+                                                     "dlg,disable-key-power");
 
        onkey->input = devm_input_allocate_device(&pdev->dev);
-       if (!onkey->input) {
-               dev_err(&pdev->dev, "Failed to allocated input device.\n");
+       if (!onkey->input)
                return -ENOMEM;
-       }
 
        onkey->input->name = onkey->config->name;
        snprintf(onkey->phys, sizeof(onkey->phys), "%s/input0",
                 onkey->config->name);
        onkey->input->phys = onkey->phys;
-       onkey->input->dev.parent = &pdev->dev;
 
        input_set_capability(onkey->input, EV_KEY, KEY_POWER);
 
        error = devm_delayed_work_autocancel(&pdev->dev, &onkey->work,
                                             da9063_poll_on);
-       if (error) {
-               dev_err(&pdev->dev,
-                       "Failed to add cancel poll action: %d\n",
-                       error);
+       if (error)
                return error;
-       }
 
        irq = platform_get_irq_byname(pdev, "ONKEY");
        if (irq < 0)
@@ -246,11 +227,9 @@ static int da9063_onkey_probe(struct platform_device *pdev)
                                          NULL, da9063_onkey_irq_handler,
                                          IRQF_TRIGGER_LOW | IRQF_ONESHOT,
                                          "ONKEY", onkey);
-       if (error) {
-               dev_err(&pdev->dev,
-                       "Failed to request IRQ %d: %d\n", irq, error);
-               return error;
-       }
+       if (error)
+               return dev_err_probe(&pdev->dev, error,
+                                    "Failed to allocate onkey IRQ\n");
 
        error = dev_pm_set_wake_irq(&pdev->dev, irq);
        if (error)
@@ -261,15 +240,19 @@ static int da9063_onkey_probe(struct platform_device *pdev)
                device_init_wakeup(&pdev->dev, true);
 
        error = input_register_device(onkey->input);
-       if (error) {
-               dev_err(&pdev->dev,
-                       "Failed to register input device: %d\n", error);
+       if (error)
                return error;
-       }
 
        return 0;
 }
 
+static const struct of_device_id da9063_compatible_reg_id_table[] = {
+       { .compatible = "dlg,da9063-onkey", .data = &da9063_regs },
+       { .compatible = "dlg,da9062-onkey", .data = &da9062_regs },
+       { }
+};
+MODULE_DEVICE_TABLE(of, da9063_compatible_reg_id_table);
+
 static struct platform_driver da9063_onkey_driver = {
        .probe  = da9063_onkey_probe,
        .driver = {
index b2f1292e27ef7d5e9d6c0160c05479bd8f144e1d..6e8cc28debd979eb9b0b276811c615d9a59be850 100644 (file)
@@ -1050,7 +1050,7 @@ static ssize_t ims_pcu_attribute_show(struct device *dev,
                        container_of(dattr, struct ims_pcu_attribute, dattr);
        char *field = (char *)pcu + attr->field_offset;
 
-       return scnprintf(buf, PAGE_SIZE, "%.*s\n", attr->field_length, field);
+       return sysfs_emit(buf, "%.*s\n", attr->field_length, field);
 }
 
 static ssize_t ims_pcu_attribute_store(struct device *dev,
@@ -1206,7 +1206,7 @@ ims_pcu_update_firmware_status_show(struct device *dev,
        struct usb_interface *intf = to_usb_interface(dev);
        struct ims_pcu *pcu = usb_get_intfdata(intf);
 
-       return scnprintf(buf, PAGE_SIZE, "%d\n", pcu->update_firmware_status);
+       return sysfs_emit(buf, "%d\n", pcu->update_firmware_status);
 }
 
 static DEVICE_ATTR(update_firmware_status, S_IRUGO,
@@ -1309,7 +1309,7 @@ static ssize_t ims_pcu_ofn_reg_data_show(struct device *dev,
        if (error)
                return error;
 
-       return scnprintf(buf, PAGE_SIZE, "%x\n", data);
+       return sysfs_emit(buf, "%x\n", data);
 }
 
 static ssize_t ims_pcu_ofn_reg_data_store(struct device *dev,
@@ -1344,7 +1344,7 @@ static ssize_t ims_pcu_ofn_reg_addr_show(struct device *dev,
        int error;
 
        mutex_lock(&pcu->cmd_mutex);
-       error = scnprintf(buf, PAGE_SIZE, "%x\n", pcu->ofn_reg_addr);
+       error = sysfs_emit(buf, "%x\n", pcu->ofn_reg_addr);
        mutex_unlock(&pcu->cmd_mutex);
 
        return error;
@@ -1397,7 +1397,7 @@ static ssize_t ims_pcu_ofn_bit_show(struct device *dev,
        if (error)
                return error;
 
-       return scnprintf(buf, PAGE_SIZE, "%d\n", !!(data & (1 << attr->nr)));
+       return sysfs_emit(buf, "%d\n", !!(data & (1 << attr->nr)));
 }
 
 static ssize_t ims_pcu_ofn_bit_store(struct device *dev,
index 3c636c75e8a1f1520ab2c5c3bec4004e023080c8..cd14ff9f57cf239819d76cf3bae0da8c13524655 100644 (file)
@@ -9,6 +9,7 @@
  * axial sliders presented by the device.
  */
 
+#include <linux/bits.h>
 #include <linux/completion.h>
 #include <linux/delay.h>
 #include <linux/device.h>
@@ -26,6 +27,8 @@
 
 #define IQS269_VER_INFO                                0x00
 #define IQS269_VER_INFO_PROD_NUM               0x4F
+#define IQS269_VER_INFO_FW_NUM_2               0x03
+#define IQS269_VER_INFO_FW_NUM_3               0x10
 
 #define IQS269_SYS_FLAGS                       0x02
 #define IQS269_SYS_FLAGS_SHOW_RESET            BIT(15)
@@ -53,6 +56,7 @@
 #define IQS269_SYS_SETTINGS_ULP_UPDATE_MASK    GENMASK(10, 8)
 #define IQS269_SYS_SETTINGS_ULP_UPDATE_SHIFT   8
 #define IQS269_SYS_SETTINGS_ULP_UPDATE_MAX     7
+#define IQS269_SYS_SETTINGS_SLIDER_SWIPE       BIT(7)
 #define IQS269_SYS_SETTINGS_RESEED_OFFSET      BIT(6)
 #define IQS269_SYS_SETTINGS_EVENT_MODE         BIT(5)
 #define IQS269_SYS_SETTINGS_EVENT_MODE_LP      BIT(4)
@@ -69,6 +73,7 @@
 #define IQS269_FILT_STR_MAX                    3
 
 #define IQS269_EVENT_MASK_SYS                  BIT(6)
+#define IQS269_EVENT_MASK_GESTURE              BIT(3)
 #define IQS269_EVENT_MASK_DEEP                 BIT(2)
 #define IQS269_EVENT_MASK_TOUCH                        BIT(1)
 #define IQS269_EVENT_MASK_PROX                 BIT(0)
 #define IQS269_MISC_B_TRACKING_UI_ENABLE       BIT(4)
 #define IQS269_MISC_B_FILT_STR_SLIDER          GENMASK(1, 0)
 
+#define IQS269_TOUCH_HOLD_SLIDER_SEL           0x89
+#define IQS269_TOUCH_HOLD_DEFAULT              0x14
+#define IQS269_TOUCH_HOLD_MS_MIN               256
+#define IQS269_TOUCH_HOLD_MS_MAX               65280
+
+#define IQS269_TIMEOUT_TAP_MS_MAX              4080
+#define IQS269_TIMEOUT_SWIPE_MS_MAX            4080
+#define IQS269_THRESH_SWIPE_MAX                        255
+
 #define IQS269_CHx_ENG_A_MEAS_CAP_SIZE         BIT(15)
 #define IQS269_CHx_ENG_A_RX_GND_INACTIVE       BIT(13)
 #define IQS269_CHx_ENG_A_LOCAL_CAP_SIZE                BIT(12)
 
 #define IQS269_MAX_REG                         0xFF
 
+#define IQS269_OTP_OPTION_DEFAULT              0x00
+#define IQS269_OTP_OPTION_TWS                  0xD0
+#define IQS269_OTP_OPTION_HOLD                 BIT(7)
+
 #define IQS269_NUM_CH                          8
 #define IQS269_NUM_SL                          2
 
@@ -175,6 +193,20 @@ enum iqs269_event_id {
        IQS269_EVENT_DEEP_UP,
 };
 
+enum iqs269_slider_id {
+       IQS269_SLIDER_NONE,
+       IQS269_SLIDER_KEY,
+       IQS269_SLIDER_RAW,
+};
+
+enum iqs269_gesture_id {
+       IQS269_GESTURE_TAP,
+       IQS269_GESTURE_HOLD,
+       IQS269_GESTURE_FLICK_POS,
+       IQS269_GESTURE_FLICK_NEG,
+       IQS269_NUM_GESTURES,
+};
+
 struct iqs269_switch_desc {
        unsigned int code;
        bool enabled;
@@ -234,7 +266,7 @@ struct iqs269_ver_info {
        u8 prod_num;
        u8 sw_num;
        u8 hw_num;
-       u8 padding;
+       u8 fw_num;
 } __packed;
 
 struct iqs269_ch_reg {
@@ -285,16 +317,42 @@ struct iqs269_private {
        struct regmap *regmap;
        struct mutex lock;
        struct iqs269_switch_desc switches[ARRAY_SIZE(iqs269_events)];
+       struct iqs269_ver_info ver_info;
        struct iqs269_sys_reg sys_reg;
        struct completion ati_done;
        struct input_dev *keypad;
        struct input_dev *slider[IQS269_NUM_SL];
        unsigned int keycode[ARRAY_SIZE(iqs269_events) * IQS269_NUM_CH];
+       unsigned int sl_code[IQS269_NUM_SL][IQS269_NUM_GESTURES];
+       unsigned int otp_option;
        unsigned int ch_num;
        bool hall_enable;
        bool ati_current;
 };
 
+static enum iqs269_slider_id iqs269_slider_type(struct iqs269_private *iqs269,
+                                               int slider_num)
+{
+       int i;
+
+       /*
+        * Slider 1 is unavailable if the touch-and-hold option is enabled via
+        * OTP. In that case, the channel selection register is repurposed for
+        * the touch-and-hold timer ceiling.
+        */
+       if (slider_num && (iqs269->otp_option & IQS269_OTP_OPTION_HOLD))
+               return IQS269_SLIDER_NONE;
+
+       if (!iqs269->sys_reg.slider_select[slider_num])
+               return IQS269_SLIDER_NONE;
+
+       for (i = 0; i < IQS269_NUM_GESTURES; i++)
+               if (iqs269->sl_code[slider_num][i] != KEY_RESERVED)
+                       return IQS269_SLIDER_KEY;
+
+       return IQS269_SLIDER_RAW;
+}
+
 static int iqs269_ati_mode_set(struct iqs269_private *iqs269,
                               unsigned int ch_num, unsigned int mode)
 {
@@ -525,7 +583,8 @@ static int iqs269_parse_chan(struct iqs269_private *iqs269,
        if (fwnode_property_present(ch_node, "azoteq,slider0-select"))
                iqs269->sys_reg.slider_select[0] |= BIT(reg);
 
-       if (fwnode_property_present(ch_node, "azoteq,slider1-select"))
+       if (fwnode_property_present(ch_node, "azoteq,slider1-select") &&
+           !(iqs269->otp_option & IQS269_OTP_OPTION_HOLD))
                iqs269->sys_reg.slider_select[1] |= BIT(reg);
 
        ch_reg = &iqs269->sys_reg.ch_reg[reg];
@@ -950,7 +1009,43 @@ static int iqs269_parse_prop(struct iqs269_private *iqs269)
        sys_reg->blocking = 0;
 
        sys_reg->slider_select[0] = 0;
-       sys_reg->slider_select[1] = 0;
+
+       /*
+        * If configured via OTP to do so, the device asserts a pulse on the
+        * GPIO4 pin for approximately 60 ms once a selected channel is held
+        * in a state of touch for a configurable length of time.
+        *
+        * In that case, the register used for slider 1 channel selection is
+        * repurposed for the touch-and-hold timer ceiling.
+        */
+       if (iqs269->otp_option & IQS269_OTP_OPTION_HOLD) {
+               if (!device_property_read_u32(&client->dev,
+                                             "azoteq,touch-hold-ms", &val)) {
+                       if (val < IQS269_TOUCH_HOLD_MS_MIN ||
+                           val > IQS269_TOUCH_HOLD_MS_MAX) {
+                               dev_err(&client->dev,
+                                       "Invalid touch-and-hold ceiling: %u\n",
+                                       val);
+                               return -EINVAL;
+                       }
+
+                       sys_reg->slider_select[1] = val / 256;
+               } else if (iqs269->ver_info.fw_num < IQS269_VER_INFO_FW_NUM_3) {
+                       /*
+                        * The default touch-and-hold timer ceiling initially
+                        * read from early revisions of silicon is invalid if
+                        * the device experienced a soft reset between power-
+                        * on and the read operation.
+                        *
+                        * To protect against this case, explicitly cache the
+                        * default value so that it is restored each time the
+                        * device is re-initialized.
+                        */
+                       sys_reg->slider_select[1] = IQS269_TOUCH_HOLD_DEFAULT;
+               }
+       } else {
+               sys_reg->slider_select[1] = 0;
+       }
 
        sys_reg->event_mask = ~((u8)IQS269_EVENT_MASK_SYS);
 
@@ -1004,6 +1099,76 @@ static int iqs269_parse_prop(struct iqs269_private *iqs269)
                general |= (val << IQS269_SYS_SETTINGS_ULP_UPDATE_SHIFT);
        }
 
+       if (device_property_present(&client->dev, "linux,keycodes")) {
+               int scale = 1;
+               int count = device_property_count_u32(&client->dev,
+                                                     "linux,keycodes");
+               if (count > IQS269_NUM_GESTURES * IQS269_NUM_SL) {
+                       dev_err(&client->dev, "Too many keycodes present\n");
+                       return -EINVAL;
+               } else if (count < 0) {
+                       dev_err(&client->dev, "Failed to count keycodes: %d\n",
+                               count);
+                       return count;
+               }
+
+               error = device_property_read_u32_array(&client->dev,
+                                                      "linux,keycodes",
+                                                      *iqs269->sl_code, count);
+               if (error) {
+                       dev_err(&client->dev, "Failed to read keycodes: %d\n",
+                               error);
+                       return error;
+               }
+
+               if (device_property_present(&client->dev,
+                                           "azoteq,gesture-swipe"))
+                       general |= IQS269_SYS_SETTINGS_SLIDER_SWIPE;
+
+               /*
+                * Early revisions of silicon use a more granular step size for
+                * tap and swipe gesture timeouts; scale them appropriately.
+                */
+               if (iqs269->ver_info.fw_num < IQS269_VER_INFO_FW_NUM_3)
+                       scale = 4;
+
+               if (!device_property_read_u32(&client->dev,
+                                             "azoteq,timeout-tap-ms", &val)) {
+                       if (val > IQS269_TIMEOUT_TAP_MS_MAX / scale) {
+                               dev_err(&client->dev, "Invalid timeout: %u\n",
+                                       val);
+                               return -EINVAL;
+                       }
+
+                       sys_reg->timeout_tap = val / (16 / scale);
+               }
+
+               if (!device_property_read_u32(&client->dev,
+                                             "azoteq,timeout-swipe-ms",
+                                             &val)) {
+                       if (val > IQS269_TIMEOUT_SWIPE_MS_MAX / scale) {
+                               dev_err(&client->dev, "Invalid timeout: %u\n",
+                                       val);
+                               return -EINVAL;
+                       }
+
+                       sys_reg->timeout_swipe = val / (16 / scale);
+               }
+
+               if (!device_property_read_u32(&client->dev,
+                                             "azoteq,thresh-swipe", &val)) {
+                       if (val > IQS269_THRESH_SWIPE_MAX) {
+                               dev_err(&client->dev, "Invalid threshold: %u\n",
+                                       val);
+                               return -EINVAL;
+                       }
+
+                       sys_reg->thresh_swipe = val;
+               }
+
+               sys_reg->event_mask &= ~IQS269_EVENT_MASK_GESTURE;
+       }
+
        general &= ~IQS269_SYS_SETTINGS_RESEED_OFFSET;
        if (device_property_present(&client->dev, "azoteq,reseed-offset"))
                general |= IQS269_SYS_SETTINGS_RESEED_OFFSET;
@@ -1012,10 +1177,11 @@ static int iqs269_parse_prop(struct iqs269_private *iqs269)
 
        /*
         * As per the datasheet, enable streaming during normal-power mode if
-        * either slider is in use. In that case, the device returns to event
-        * mode during low-power mode.
+        * raw coordinates will be read from either slider. In that case, the
+        * device returns to event mode during low-power mode.
         */
-       if (sys_reg->slider_select[0] || sys_reg->slider_select[1])
+       if (iqs269_slider_type(iqs269, 0) == IQS269_SLIDER_RAW ||
+           iqs269_slider_type(iqs269, 1) == IQS269_SLIDER_RAW)
                general |= IQS269_SYS_SETTINGS_EVENT_MODE_LP;
 
        general |= IQS269_SYS_SETTINGS_REDO_ATI;
@@ -1026,12 +1192,30 @@ static int iqs269_parse_prop(struct iqs269_private *iqs269)
        return 0;
 }
 
+static const struct reg_sequence iqs269_tws_init[] = {
+       { IQS269_TOUCH_HOLD_SLIDER_SEL, IQS269_TOUCH_HOLD_DEFAULT },
+       { 0xF0, 0x580F },
+       { 0xF0, 0x59EF },
+};
+
 static int iqs269_dev_init(struct iqs269_private *iqs269)
 {
        int error;
 
        mutex_lock(&iqs269->lock);
 
+       /*
+        * Early revisions of silicon require the following workaround in order
+        * to restore any OTP-enabled functionality after a soft reset.
+        */
+       if (iqs269->otp_option == IQS269_OTP_OPTION_TWS &&
+           iqs269->ver_info.fw_num < IQS269_VER_INFO_FW_NUM_3) {
+               error = regmap_multi_reg_write(iqs269->regmap, iqs269_tws_init,
+                                              ARRAY_SIZE(iqs269_tws_init));
+               if (error)
+                       goto err_mutex;
+       }
+
        error = regmap_update_bits(iqs269->regmap, IQS269_HALL_UI,
                                   IQS269_HALL_UI_ENABLE,
                                   iqs269->hall_enable ? ~0 : 0);
@@ -1106,19 +1290,37 @@ static int iqs269_input_init(struct iqs269_private *iqs269)
        }
 
        for (i = 0; i < IQS269_NUM_SL; i++) {
-               if (!iqs269->sys_reg.slider_select[i])
+               if (iqs269_slider_type(iqs269, i) == IQS269_SLIDER_NONE)
                        continue;
 
                iqs269->slider[i] = devm_input_allocate_device(&client->dev);
                if (!iqs269->slider[i])
                        return -ENOMEM;
 
+               iqs269->slider[i]->keycodemax = ARRAY_SIZE(iqs269->sl_code[i]);
+               iqs269->slider[i]->keycode = iqs269->sl_code[i];
+               iqs269->slider[i]->keycodesize = sizeof(**iqs269->sl_code);
+
                iqs269->slider[i]->name = i ? "iqs269a_slider_1"
                                            : "iqs269a_slider_0";
                iqs269->slider[i]->id.bustype = BUS_I2C;
 
-               input_set_capability(iqs269->slider[i], EV_KEY, BTN_TOUCH);
-               input_set_abs_params(iqs269->slider[i], ABS_X, 0, 255, 0, 0);
+               for (j = 0; j < IQS269_NUM_GESTURES; j++)
+                       if (iqs269->sl_code[i][j] != KEY_RESERVED)
+                               input_set_capability(iqs269->slider[i], EV_KEY,
+                                                    iqs269->sl_code[i][j]);
+
+               /*
+                * Present the slider as a narrow trackpad if one or more chan-
+                * nels have been selected to participate, but no gestures have
+                * been mapped to a keycode.
+                */
+               if (iqs269_slider_type(iqs269, i) == IQS269_SLIDER_RAW) {
+                       input_set_capability(iqs269->slider[i],
+                                            EV_KEY, BTN_TOUCH);
+                       input_set_abs_params(iqs269->slider[i],
+                                            ABS_X, 0, 255, 0, 0);
+               }
 
                error = input_register_device(iqs269->slider[i]);
                if (error) {
@@ -1167,28 +1369,62 @@ static int iqs269_report(struct iqs269_private *iqs269)
        if (be16_to_cpu(flags.system) & IQS269_SYS_FLAGS_IN_ATI)
                return 0;
 
-       error = regmap_raw_read(iqs269->regmap, IQS269_SLIDER_X, slider_x,
-                               sizeof(slider_x));
-       if (error) {
-               dev_err(&client->dev, "Failed to read slider position: %d\n",
-                       error);
-               return error;
+       if (iqs269_slider_type(iqs269, 0) == IQS269_SLIDER_RAW ||
+           iqs269_slider_type(iqs269, 1) == IQS269_SLIDER_RAW) {
+               error = regmap_raw_read(iqs269->regmap, IQS269_SLIDER_X,
+                                       slider_x, sizeof(slider_x));
+               if (error) {
+                       dev_err(&client->dev,
+                               "Failed to read slider position: %d\n", error);
+                       return error;
+               }
        }
 
        for (i = 0; i < IQS269_NUM_SL; i++) {
-               if (!iqs269->sys_reg.slider_select[i])
+               flags.gesture >>= (i * IQS269_NUM_GESTURES);
+
+               switch (iqs269_slider_type(iqs269, i)) {
+               case IQS269_SLIDER_NONE:
                        continue;
 
-               /*
-                * Report BTN_TOUCH if any channel that participates in the
-                * slider is in a state of touch.
-                */
-               if (flags.states[IQS269_ST_OFFS_TOUCH] &
-                   iqs269->sys_reg.slider_select[i]) {
-                       input_report_key(iqs269->slider[i], BTN_TOUCH, 1);
-                       input_report_abs(iqs269->slider[i], ABS_X, slider_x[i]);
-               } else {
-                       input_report_key(iqs269->slider[i], BTN_TOUCH, 0);
+               case IQS269_SLIDER_KEY:
+                       for (j = 0; j < IQS269_NUM_GESTURES; j++)
+                               input_report_key(iqs269->slider[i],
+                                                iqs269->sl_code[i][j],
+                                                flags.gesture & BIT(j));
+
+                       if (!(flags.gesture & (BIT(IQS269_GESTURE_FLICK_NEG) |
+                                              BIT(IQS269_GESTURE_FLICK_POS) |
+                                              BIT(IQS269_GESTURE_TAP))))
+                               break;
+
+                       input_sync(iqs269->slider[i]);
+
+                       /*
+                        * Momentary gestures are followed by a complementary
+                        * release cycle so as to emulate a full keystroke.
+                        */
+                       for (j = 0; j < IQS269_NUM_GESTURES; j++)
+                               if (j != IQS269_GESTURE_HOLD)
+                                       input_report_key(iqs269->slider[i],
+                                                        iqs269->sl_code[i][j],
+                                                        0);
+                       break;
+
+               case IQS269_SLIDER_RAW:
+                       /*
+                        * The slider is considered to be in a state of touch
+                        * if any selected channels are in a state of touch.
+                        */
+                       state = flags.states[IQS269_ST_OFFS_TOUCH];
+                       state &= iqs269->sys_reg.slider_select[i];
+
+                       input_report_key(iqs269->slider[i], BTN_TOUCH, state);
+
+                       if (state)
+                               input_report_abs(iqs269->slider[i],
+                                                ABS_X, slider_x[i]);
+                       break;
                }
 
                input_sync(iqs269->slider[i]);
@@ -1286,7 +1522,7 @@ static ssize_t counts_show(struct device *dev,
        if (error)
                return error;
 
-       return scnprintf(buf, PAGE_SIZE, "%u\n", le16_to_cpu(counts));
+       return sysfs_emit(buf, "%u\n", le16_to_cpu(counts));
 }
 
 static ssize_t hall_bin_show(struct device *dev,
@@ -1324,7 +1560,7 @@ static ssize_t hall_bin_show(struct device *dev,
                return -EINVAL;
        }
 
-       return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+       return sysfs_emit(buf, "%u\n", val);
 }
 
 static ssize_t hall_enable_show(struct device *dev,
@@ -1332,7 +1568,7 @@ static ssize_t hall_enable_show(struct device *dev,
 {
        struct iqs269_private *iqs269 = dev_get_drvdata(dev);
 
-       return scnprintf(buf, PAGE_SIZE, "%u\n", iqs269->hall_enable);
+       return sysfs_emit(buf, "%u\n", iqs269->hall_enable);
 }
 
 static ssize_t hall_enable_store(struct device *dev,
@@ -1362,7 +1598,7 @@ static ssize_t ch_number_show(struct device *dev,
 {
        struct iqs269_private *iqs269 = dev_get_drvdata(dev);
 
-       return scnprintf(buf, PAGE_SIZE, "%u\n", iqs269->ch_num);
+       return sysfs_emit(buf, "%u\n", iqs269->ch_num);
 }
 
 static ssize_t ch_number_store(struct device *dev,
@@ -1391,8 +1627,7 @@ static ssize_t rx_enable_show(struct device *dev,
        struct iqs269_private *iqs269 = dev_get_drvdata(dev);
        struct iqs269_ch_reg *ch_reg = iqs269->sys_reg.ch_reg;
 
-       return scnprintf(buf, PAGE_SIZE, "%u\n",
-                        ch_reg[iqs269->ch_num].rx_enable);
+       return sysfs_emit(buf, "%u\n", ch_reg[iqs269->ch_num].rx_enable);
 }
 
 static ssize_t rx_enable_store(struct device *dev,
@@ -1432,7 +1667,7 @@ static ssize_t ati_mode_show(struct device *dev,
        if (error)
                return error;
 
-       return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+       return sysfs_emit(buf, "%u\n", val);
 }
 
 static ssize_t ati_mode_store(struct device *dev,
@@ -1465,7 +1700,7 @@ static ssize_t ati_base_show(struct device *dev,
        if (error)
                return error;
 
-       return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+       return sysfs_emit(buf, "%u\n", val);
 }
 
 static ssize_t ati_base_store(struct device *dev,
@@ -1498,7 +1733,7 @@ static ssize_t ati_target_show(struct device *dev,
        if (error)
                return error;
 
-       return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+       return sysfs_emit(buf, "%u\n", val);
 }
 
 static ssize_t ati_target_store(struct device *dev,
@@ -1525,9 +1760,9 @@ static ssize_t ati_trigger_show(struct device *dev,
 {
        struct iqs269_private *iqs269 = dev_get_drvdata(dev);
 
-       return scnprintf(buf, PAGE_SIZE, "%u\n",
-                        iqs269->ati_current &&
-                        completion_done(&iqs269->ati_done));
+       return sysfs_emit(buf, "%u\n",
+                         iqs269->ati_current &&
+                         completion_done(&iqs269->ati_done));
 }
 
 static ssize_t ati_trigger_store(struct device *dev,
@@ -1596,7 +1831,6 @@ static const struct regmap_config iqs269_regmap_config = {
 
 static int iqs269_probe(struct i2c_client *client)
 {
-       struct iqs269_ver_info ver_info;
        struct iqs269_private *iqs269;
        int error;
 
@@ -1618,14 +1852,16 @@ static int iqs269_probe(struct i2c_client *client)
        mutex_init(&iqs269->lock);
        init_completion(&iqs269->ati_done);
 
-       error = regmap_raw_read(iqs269->regmap, IQS269_VER_INFO, &ver_info,
-                               sizeof(ver_info));
+       iqs269->otp_option = (uintptr_t)device_get_match_data(&client->dev);
+
+       error = regmap_raw_read(iqs269->regmap, IQS269_VER_INFO,
+                               &iqs269->ver_info, sizeof(iqs269->ver_info));
        if (error)
                return error;
 
-       if (ver_info.prod_num != IQS269_VER_INFO_PROD_NUM) {
+       if (iqs269->ver_info.prod_num != IQS269_VER_INFO_PROD_NUM) {
                dev_err(&client->dev, "Unrecognized product number: 0x%02X\n",
-                       ver_info.prod_num);
+                       iqs269->ver_info.prod_num);
                return -EINVAL;
        }
 
@@ -1728,7 +1964,18 @@ static int iqs269_resume(struct device *dev)
 static DEFINE_SIMPLE_DEV_PM_OPS(iqs269_pm, iqs269_suspend, iqs269_resume);
 
 static const struct of_device_id iqs269_of_match[] = {
-       { .compatible = "azoteq,iqs269a" },
+       {
+               .compatible = "azoteq,iqs269a",
+               .data = (void *)IQS269_OTP_OPTION_DEFAULT,
+       },
+       {
+               .compatible = "azoteq,iqs269a-00",
+               .data = (void *)IQS269_OTP_OPTION_DEFAULT,
+       },
+       {
+               .compatible = "azoteq,iqs269a-d0",
+               .data = (void *)IQS269_OTP_OPTION_TWS,
+       },
        { }
 };
 MODULE_DEVICE_TABLE(of, iqs269_of_match);
index 80f4416ffe2fe98ecc8032527cda68e1b57b019e..0e646f1b257b8fefd21057c31806d82150b4f4db 100644 (file)
@@ -307,7 +307,7 @@ static int max77693_haptic_probe(struct platform_device *pdev)
        haptic->suspend_state = false;
 
        /* Variant-specific init */
-       haptic->dev_type = platform_get_device_id(pdev)->driver_data;
+       haptic->dev_type = max77693->type;
        switch (haptic->dev_type) {
        case TYPE_MAX77693:
                haptic->regmap_haptic = max77693->regmap_haptic;
@@ -406,16 +406,24 @@ static DEFINE_SIMPLE_DEV_PM_OPS(max77693_haptic_pm_ops,
                                max77693_haptic_resume);
 
 static const struct platform_device_id max77693_haptic_id[] = {
-       { "max77693-haptic", TYPE_MAX77693 },
-       { "max77843-haptic", TYPE_MAX77843 },
+       { "max77693-haptic", },
+       { "max77843-haptic", },
        {},
 };
 MODULE_DEVICE_TABLE(platform, max77693_haptic_id);
 
+static const struct of_device_id of_max77693_haptic_dt_match[] = {
+       { .compatible = "maxim,max77693-haptic", },
+       { .compatible = "maxim,max77843-haptic", },
+       { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, of_max77693_haptic_dt_match);
+
 static struct platform_driver max77693_haptic_driver = {
        .driver         = {
                .name   = "max77693-haptic",
                .pm     = pm_sleep_ptr(&max77693_haptic_pm_ops),
+               .of_match_table = of_max77693_haptic_dt_match,
        },
        .probe          = max77693_haptic_probe,
        .id_table       = max77693_haptic_id,
index 1e731d8397c6f525c283b8858d45fac7c34eab6d..5b9aedf4362f49c756dec9729667227b81d787c5 100644 (file)
@@ -39,7 +39,7 @@ static int pwm_beeper_on(struct pwm_beeper *beeper, unsigned long period)
        state.period = period;
        pwm_set_relative_duty_cycle(&state, 50, 100);
 
-       error = pwm_apply_state(beeper->pwm, &state);
+       error = pwm_apply_might_sleep(beeper->pwm, &state);
        if (error)
                return error;
 
@@ -138,7 +138,7 @@ static int pwm_beeper_probe(struct platform_device *pdev)
        /* Sync up PWM state and ensure it is off. */
        pwm_init_state(beeper->pwm, &state);
        state.enabled = false;
-       error = pwm_apply_state(beeper->pwm, &state);
+       error = pwm_apply_might_sleep(beeper->pwm, &state);
        if (error) {
                dev_err(dev, "failed to apply initial PWM state: %d\n",
                        error);
index acac79c488aa1531fa421ea6d677469652d4c406..3e5ed685ed8f507c8cb76d2b6033fa7c713342a9 100644 (file)
@@ -56,7 +56,7 @@ static int pwm_vibrator_start(struct pwm_vibrator *vibrator)
        pwm_set_relative_duty_cycle(&state, vibrator->level, 0xffff);
        state.enabled = true;
 
-       err = pwm_apply_state(vibrator->pwm, &state);
+       err = pwm_apply_might_sleep(vibrator->pwm, &state);
        if (err) {
                dev_err(pdev, "failed to apply pwm state: %d\n", err);
                return err;
@@ -67,7 +67,7 @@ static int pwm_vibrator_start(struct pwm_vibrator *vibrator)
                state.duty_cycle = vibrator->direction_duty_cycle;
                state.enabled = true;
 
-               err = pwm_apply_state(vibrator->pwm_dir, &state);
+               err = pwm_apply_might_sleep(vibrator->pwm_dir, &state);
                if (err) {
                        dev_err(pdev, "failed to apply dir-pwm state: %d\n", err);
                        pwm_disable(vibrator->pwm);
@@ -160,7 +160,7 @@ static int pwm_vibrator_probe(struct platform_device *pdev)
        /* Sync up PWM state and ensure it is off. */
        pwm_init_state(vibrator->pwm, &state);
        state.enabled = false;
-       err = pwm_apply_state(vibrator->pwm, &state);
+       err = pwm_apply_might_sleep(vibrator->pwm, &state);
        if (err) {
                dev_err(&pdev->dev, "failed to apply initial PWM state: %d\n",
                        err);
@@ -174,7 +174,7 @@ static int pwm_vibrator_probe(struct platform_device *pdev)
                /* Sync up PWM state and ensure it is off. */
                pwm_init_state(vibrator->pwm_dir, &state);
                state.enabled = false;
-               err = pwm_apply_state(vibrator->pwm_dir, &state);
+               err = pwm_apply_might_sleep(vibrator->pwm_dir, &state);
                if (err) {
                        dev_err(&pdev->dev, "failed to apply initial PWM state: %d\n",
                                err);
index ca150618d32f1863795f390b4ebf4687ea0e36c1..953992b458e9f2c46900204e926da7c665468709 100644 (file)
@@ -19,6 +19,7 @@
  * Copyright (C) 2006     Nicolas Boichat (nicolas@boichat.ch)
  */
 
+#include "linux/usb.h"
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/slab.h>
@@ -193,6 +194,8 @@ enum tp_type {
 
 /* list of device capability bits */
 #define HAS_INTEGRATED_BUTTON  1
+/* maximum number of supported endpoints (currently trackpad and button) */
+#define MAX_ENDPOINTS  2
 
 /* trackpad finger data block size */
 #define FSIZE_TYPE1            (14 * sizeof(__le16))
@@ -891,6 +894,18 @@ static int bcm5974_resume(struct usb_interface *iface)
        return error;
 }
 
+static bool bcm5974_check_endpoints(struct usb_interface *iface,
+                                   const struct bcm5974_config *cfg)
+{
+       u8 ep_addr[MAX_ENDPOINTS + 1] = {0};
+
+       ep_addr[0] = cfg->tp_ep;
+       if (cfg->tp_type == TYPE1)
+               ep_addr[1] = cfg->bt_ep;
+
+       return usb_check_int_endpoints(iface, ep_addr);
+}
+
 static int bcm5974_probe(struct usb_interface *iface,
                         const struct usb_device_id *id)
 {
@@ -903,6 +918,11 @@ static int bcm5974_probe(struct usb_interface *iface,
        /* find the product index */
        cfg = bcm5974_get_config(udev);
 
+       if (!bcm5974_check_endpoints(iface, cfg)) {
+               dev_err(&iface->dev, "Unexpected non-int endpoint\n");
+               return -ENODEV;
+       }
+
        /* allocate memory for our device state and initialize it */
        dev = kzalloc(sizeof(struct bcm5974), GFP_KERNEL);
        input_dev = input_allocate_device();
index a84098448f5b94d607dc5b86ceb5f04618783179..5979deabe23d192f47debae819d1ddc488d8edc6 100644 (file)
@@ -756,16 +756,16 @@ static ssize_t cyapa_show_suspend_scanrate(struct device *dev,
 
        switch (pwr_cmd) {
        case PWR_MODE_BTN_ONLY:
-               len = scnprintf(buf, PAGE_SIZE, "%s\n", BTN_ONLY_MODE_NAME);
+               len = sysfs_emit(buf, "%s\n", BTN_ONLY_MODE_NAME);
                break;
 
        case PWR_MODE_OFF:
-               len = scnprintf(buf, PAGE_SIZE, "%s\n", OFF_MODE_NAME);
+               len = sysfs_emit(buf, "%s\n", OFF_MODE_NAME);
                break;
 
        default:
-               len = scnprintf(buf, PAGE_SIZE, "%u\n",
-                               cyapa->gen == CYAPA_GEN3 ?
+               len = sysfs_emit(buf, "%u\n",
+                                cyapa->gen == CYAPA_GEN3 ?
                                        cyapa_pwr_cmd_to_sleep_time(pwr_cmd) :
                                        sleep_time);
                break;
@@ -877,8 +877,8 @@ static ssize_t cyapa_show_rt_suspend_scanrate(struct device *dev,
 
        mutex_unlock(&cyapa->state_sync_lock);
 
-       return scnprintf(buf, PAGE_SIZE, "%u\n",
-                        cyapa->gen == CYAPA_GEN3 ?
+       return sysfs_emit(buf, "%u\n",
+                         cyapa->gen == CYAPA_GEN3 ?
                                cyapa_pwr_cmd_to_sleep_time(pwr_cmd) :
                                sleep_time);
 }
@@ -988,8 +988,8 @@ static ssize_t cyapa_show_fm_ver(struct device *dev,
        error = mutex_lock_interruptible(&cyapa->state_sync_lock);
        if (error)
                return error;
-       error = scnprintf(buf, PAGE_SIZE, "%d.%d\n", cyapa->fw_maj_ver,
-                        cyapa->fw_min_ver);
+       error = sysfs_emit(buf, "%d.%d\n",
+                          cyapa->fw_maj_ver, cyapa->fw_min_ver);
        mutex_unlock(&cyapa->state_sync_lock);
        return error;
 }
@@ -1004,7 +1004,7 @@ static ssize_t cyapa_show_product_id(struct device *dev,
        error = mutex_lock_interruptible(&cyapa->state_sync_lock);
        if (error)
                return error;
-       size = scnprintf(buf, PAGE_SIZE, "%s\n", cyapa->product_id);
+       size = sysfs_emit(buf, "%s\n", cyapa->product_id);
        mutex_unlock(&cyapa->state_sync_lock);
        return size;
 }
@@ -1209,8 +1209,8 @@ static ssize_t cyapa_show_mode(struct device *dev,
        if (error)
                return error;
 
-       size = scnprintf(buf, PAGE_SIZE, "gen%d %s\n",
-                       cyapa->gen, cyapa_state_to_string(cyapa));
+       size = sysfs_emit(buf, "gen%d %s\n",
+                         cyapa->gen, cyapa_state_to_string(cyapa));
 
        mutex_unlock(&cyapa->state_sync_lock);
        return size;
index a97f4acb64526248178715e035b89dd9a93eef37..60c83bc71d84e662d15af2654c54ba2cb39e4147 100644 (file)
@@ -860,7 +860,7 @@ static ssize_t cyapa_gen3_show_baseline(struct device *dev,
 
        dev_dbg(dev, "Baseline report successful. Max: %d Min: %d\n",
                max_baseline, min_baseline);
-       ret = scnprintf(buf, PAGE_SIZE, "%d %d\n", max_baseline, min_baseline);
+       ret = sysfs_emit(buf, "%d %d\n", max_baseline, min_baseline);
 
 out:
        return ret;
index abf42f77b4c593d1b3f8406832d2a67684780b8d..2e6bcb07257ed7374ac69e288d21fc4133cddb85 100644 (file)
@@ -2418,12 +2418,12 @@ resume_scanning:
                return resume_error ? resume_error : error;
 
        /* 12. Output data strings */
-       size = scnprintf(buf, PAGE_SIZE, "%d %d %d %d %d %d %d %d %d %d %d ",
+       size = sysfs_emit(buf, "%d %d %d %d %d %d %d %d %d %d %d ",
                gidac_mutual_min, gidac_mutual_max, gidac_mutual_ave,
                lidac_mutual_min, lidac_mutual_max, lidac_mutual_ave,
                gidac_self_rx, gidac_self_tx,
                lidac_self_min, lidac_self_max, lidac_self_ave);
-       size += scnprintf(buf + size, PAGE_SIZE - size,
+       size += sysfs_emit_at(buf, size,
                "%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d\n",
                raw_cap_mutual_min, raw_cap_mutual_max, raw_cap_mutual_ave,
                raw_cap_self_min, raw_cap_self_max, raw_cap_self_ave,
index 0caaf3e64215d00c57196967f229d7892afe8acc..4ffe08fee10c8a465cb3e4042de08b335ffa6f0b 100644 (file)
@@ -629,14 +629,14 @@ static ssize_t cyapa_gen6_show_baseline(struct device *dev,
        if (error)
                goto resume_scanning;
 
-       size = scnprintf(buf, PAGE_SIZE, "%d %d %d %d %d %d ",
-                       data[0],  /* RX Attenuator Mutual */
-                       data[1],  /* IDAC Mutual */
-                       data[2],  /* RX Attenuator Self RX */
-                       data[3],  /* IDAC Self RX */
-                       data[4],  /* RX Attenuator Self TX */
-                       data[5]   /* IDAC Self TX */
-                       );
+       size = sysfs_emit(buf, "%d %d %d %d %d %d ",
+                         data[0],  /* RX Attenuator Mutual */
+                         data[1],  /* IDAC Mutual */
+                         data[2],  /* RX Attenuator Self RX */
+                         data[3],  /* IDAC Self RX */
+                         data[4],  /* RX Attenuator Self TX */
+                         data[5]   /* IDAC Self TX */
+                        );
 
        /* 3. Read Attenuator Trim. */
        data_len = sizeof(data);
@@ -648,8 +648,8 @@ static ssize_t cyapa_gen6_show_baseline(struct device *dev,
 
        /* set attenuator trim values. */
        for (i = 0; i < data_len; i++)
-               size += scnprintf(buf + size, PAGE_SIZE - size, "%d ", data[i]);
-       size += scnprintf(buf + size, PAGE_SIZE - size, "\n");
+               size += sysfs_emit_at(buf, size, "%d ", data[i]);
+       size += sysfs_emit_at(buf, size, "\n");
 
 resume_scanning:
        /* 4. Resume Scanning*/
index 148a601396f92cd5293405fe0355f57672154e4e..8a72c200ccb5d127d1dcd209b8eda29f4b619fef 100644 (file)
@@ -572,7 +572,7 @@ static ssize_t elan_sysfs_read_fw_checksum(struct device *dev,
        struct i2c_client *client = to_i2c_client(dev);
        struct elan_tp_data *data = i2c_get_clientdata(client);
 
-       return sprintf(buf, "0x%04x\n", data->fw_checksum);
+       return sysfs_emit(buf, "0x%04x\n", data->fw_checksum);
 }
 
 static ssize_t elan_sysfs_read_product_id(struct device *dev,
@@ -582,8 +582,8 @@ static ssize_t elan_sysfs_read_product_id(struct device *dev,
        struct i2c_client *client = to_i2c_client(dev);
        struct elan_tp_data *data = i2c_get_clientdata(client);
 
-       return sprintf(buf, ETP_PRODUCT_ID_FORMAT_STRING "\n",
-                      data->product_id);
+       return sysfs_emit(buf, ETP_PRODUCT_ID_FORMAT_STRING "\n",
+                         data->product_id);
 }
 
 static ssize_t elan_sysfs_read_fw_ver(struct device *dev,
@@ -593,7 +593,7 @@ static ssize_t elan_sysfs_read_fw_ver(struct device *dev,
        struct i2c_client *client = to_i2c_client(dev);
        struct elan_tp_data *data = i2c_get_clientdata(client);
 
-       return sprintf(buf, "%d.0\n", data->fw_version);
+       return sysfs_emit(buf, "%d.0\n", data->fw_version);
 }
 
 static ssize_t elan_sysfs_read_sm_ver(struct device *dev,
@@ -603,7 +603,7 @@ static ssize_t elan_sysfs_read_sm_ver(struct device *dev,
        struct i2c_client *client = to_i2c_client(dev);
        struct elan_tp_data *data = i2c_get_clientdata(client);
 
-       return sprintf(buf, "%d.0\n", data->sm_version);
+       return sysfs_emit(buf, "%d.0\n", data->sm_version);
 }
 
 static ssize_t elan_sysfs_read_iap_ver(struct device *dev,
@@ -613,7 +613,7 @@ static ssize_t elan_sysfs_read_iap_ver(struct device *dev,
        struct i2c_client *client = to_i2c_client(dev);
        struct elan_tp_data *data = i2c_get_clientdata(client);
 
-       return sprintf(buf, "%d.0\n", data->iap_version);
+       return sysfs_emit(buf, "%d.0\n", data->iap_version);
 }
 
 static ssize_t elan_sysfs_update_fw(struct device *dev,
@@ -754,7 +754,7 @@ static ssize_t elan_sysfs_read_mode(struct device *dev,
        if (error)
                return error;
 
-       return sprintf(buf, "%d\n", (int)mode);
+       return sysfs_emit(buf, "%d\n", (int)mode);
 }
 
 static DEVICE_ATTR(product_id, S_IRUGO, elan_sysfs_read_product_id, NULL);
@@ -858,7 +858,7 @@ static ssize_t min_show(struct device *dev,
                goto out;
        }
 
-       retval = snprintf(buf, PAGE_SIZE, "%d", data->min_baseline);
+       retval = sysfs_emit(buf, "%d", data->min_baseline);
 
 out:
        mutex_unlock(&data->sysfs_mutex);
@@ -881,7 +881,7 @@ static ssize_t max_show(struct device *dev,
                goto out;
        }
 
-       retval = snprintf(buf, PAGE_SIZE, "%d", data->max_baseline);
+       retval = sysfs_emit(buf, "%d", data->max_baseline);
 
 out:
        mutex_unlock(&data->sysfs_mutex);
index c00dc1275da23daebb21110749f3c3f75431d075..ba757783c258aaddef4d6b882282451df70e2528 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/platform_device.h>
 #include <linux/clk.h>
 #include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/input.h>
 #include <linux/input/navpoint.h>
 #include <linux/interrupt.h>
@@ -32,7 +32,7 @@ struct navpoint {
        struct ssp_device       *ssp;
        struct input_dev        *input;
        struct device           *dev;
-       int                     gpio;
+       struct gpio_desc        *gpiod;
        int                     index;
        u8                      data[1 + HEADER_LENGTH(0xff)];
 };
@@ -170,16 +170,14 @@ static void navpoint_up(struct navpoint *navpoint)
                dev_err(navpoint->dev,
                        "timeout waiting for SSSR[CSS] to clear\n");
 
-       if (gpio_is_valid(navpoint->gpio))
-               gpio_set_value(navpoint->gpio, 1);
+       gpiod_set_value(navpoint->gpiod, 1);
 }
 
 static void navpoint_down(struct navpoint *navpoint)
 {
        struct ssp_device *ssp = navpoint->ssp;
 
-       if (gpio_is_valid(navpoint->gpio))
-               gpio_set_value(navpoint->gpio, 0);
+       gpiod_set_value(navpoint->gpiod, 0);
 
        pxa_ssp_write_reg(ssp, SSCR0, 0);
 
@@ -216,18 +214,9 @@ static int navpoint_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       if (gpio_is_valid(pdata->gpio)) {
-               error = gpio_request_one(pdata->gpio, GPIOF_OUT_INIT_LOW,
-                                        "SYNAPTICS_ON");
-               if (error)
-                       return error;
-       }
-
        ssp = pxa_ssp_request(pdata->port, pdev->name);
-       if (!ssp) {
-               error = -ENODEV;
-               goto err_free_gpio;
-       }
+       if (!ssp)
+               return -ENODEV;
 
        /* HaRET does not disable devices before jumping into Linux */
        if (pxa_ssp_read_reg(ssp, SSCR0) & SSCR0_SSE) {
@@ -242,10 +231,18 @@ static int navpoint_probe(struct platform_device *pdev)
                goto err_free_mem;
        }
 
+       navpoint->gpiod = gpiod_get_optional(&pdev->dev,
+                                            NULL, GPIOD_OUT_LOW);
+       if (IS_ERR(navpoint->gpiod)) {
+               error = PTR_ERR(navpoint->gpiod);
+               dev_err(&pdev->dev, "error getting GPIO\n");
+               goto err_free_mem;
+       }
+       gpiod_set_consumer_name(navpoint->gpiod, "SYNAPTICS_ON");
+
        navpoint->ssp = ssp;
        navpoint->input = input;
        navpoint->dev = &pdev->dev;
-       navpoint->gpio = pdata->gpio;
 
        input->name = pdev->name;
        input->dev.parent = &pdev->dev;
@@ -288,17 +285,12 @@ err_free_mem:
        input_free_device(input);
        kfree(navpoint);
        pxa_ssp_free(ssp);
-err_free_gpio:
-       if (gpio_is_valid(pdata->gpio))
-               gpio_free(pdata->gpio);
 
        return error;
 }
 
 static void navpoint_remove(struct platform_device *pdev)
 {
-       const struct navpoint_platform_data *pdata =
-                                       dev_get_platdata(&pdev->dev);
        struct navpoint *navpoint = platform_get_drvdata(pdev);
        struct ssp_device *ssp = navpoint->ssp;
 
@@ -308,9 +300,6 @@ static void navpoint_remove(struct platform_device *pdev)
        kfree(navpoint);
 
        pxa_ssp_free(ssp);
-
-       if (gpio_is_valid(pdata->gpio))
-               gpio_free(pdata->gpio);
 }
 
 static int navpoint_suspend(struct device *dev)
index d7603c50f864b329e1ef4dc9c75df885113e8cbb..cc1d4b424640ea3902cf726329b69c01aa23c4ba 100644 (file)
@@ -267,8 +267,7 @@ static ssize_t rmi_driver_manufacturer_id_show(struct device *dev,
        struct rmi_driver_data *data = dev_get_drvdata(dev);
        struct f01_data *f01 = dev_get_drvdata(&data->f01_container->dev);
 
-       return scnprintf(buf, PAGE_SIZE, "%d\n",
-                        f01->properties.manufacturer_id);
+       return sysfs_emit(buf, "%d\n", f01->properties.manufacturer_id);
 }
 
 static DEVICE_ATTR(manufacturer_id, 0444,
@@ -280,7 +279,7 @@ static ssize_t rmi_driver_dom_show(struct device *dev,
        struct rmi_driver_data *data = dev_get_drvdata(dev);
        struct f01_data *f01 = dev_get_drvdata(&data->f01_container->dev);
 
-       return scnprintf(buf, PAGE_SIZE, "%s\n", f01->properties.dom);
+       return sysfs_emit(buf, "%s\n", f01->properties.dom);
 }
 
 static DEVICE_ATTR(date_of_manufacture, 0444, rmi_driver_dom_show, NULL);
@@ -292,7 +291,7 @@ static ssize_t rmi_driver_product_id_show(struct device *dev,
        struct rmi_driver_data *data = dev_get_drvdata(dev);
        struct f01_data *f01 = dev_get_drvdata(&data->f01_container->dev);
 
-       return scnprintf(buf, PAGE_SIZE, "%s\n", f01->properties.product_id);
+       return sysfs_emit(buf, "%s\n", f01->properties.product_id);
 }
 
 static DEVICE_ATTR(product_id, 0444, rmi_driver_product_id_show, NULL);
@@ -304,7 +303,7 @@ static ssize_t rmi_driver_firmware_id_show(struct device *dev,
        struct rmi_driver_data *data = dev_get_drvdata(dev);
        struct f01_data *f01 = dev_get_drvdata(&data->f01_container->dev);
 
-       return scnprintf(buf, PAGE_SIZE, "%d\n", f01->properties.firmware_id);
+       return sysfs_emit(buf, "%d\n", f01->properties.firmware_id);
 }
 
 static DEVICE_ATTR(firmware_id, 0444, rmi_driver_firmware_id_show, NULL);
@@ -318,8 +317,8 @@ static ssize_t rmi_driver_package_id_show(struct device *dev,
 
        u32 package_id = f01->properties.package_id;
 
-       return scnprintf(buf, PAGE_SIZE, "%04x.%04x\n",
-                        package_id & 0xffff, (package_id >> 16) & 0xffff);
+       return sysfs_emit(buf, "%04x.%04x\n",
+                         package_id & 0xffff, (package_id >> 16) & 0xffff);
 }
 
 static DEVICE_ATTR(package_id, 0444, rmi_driver_package_id_show, NULL);
index b585b1dab870e0725daa62d7b52d2c9ca406798a..dfc6c581873b7d45da63d88a216295a24fa2c13b 100644 (file)
@@ -634,6 +634,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
                },
                .driver_data = (void *)(SERIO_QUIRK_NOAUX)
        },
+       {
+               /* Fujitsu Lifebook U728 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U728"),
+               },
+               .driver_data = (void *)(SERIO_QUIRK_NOAUX)
+       },
        {
                /* Gigabyte M912 */
                .matches = {
@@ -1208,6 +1216,12 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
                                        SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOLOOP |
                                        SERIO_QUIRK_NOPNP)
        },
+       {
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_NAME, "NS5x_7xPU"),
+               },
+               .driver_data = (void *)(SERIO_QUIRK_NOAUX)
+       },
        {
                .matches = {
                        DMI_MATCH(DMI_BOARD_NAME, "NJ50_70CU"),
index a2c4b3b87f9356602b15e4d5eaaed2cc47f453a6..542a31448c8f13249cddffa1b2c8f5f9fb9f0c02 100644 (file)
@@ -2818,8 +2818,8 @@ static ssize_t mxt_fw_version_show(struct device *dev,
 {
        struct mxt_data *data = dev_get_drvdata(dev);
        struct mxt_info *info = data->info;
-       return scnprintf(buf, PAGE_SIZE, "%u.%u.%02X\n",
-                        info->version >> 4, info->version & 0xf, info->build);
+       return sysfs_emit(buf, "%u.%u.%02X\n",
+                         info->version >> 4, info->version & 0xf, info->build);
 }
 
 /* Hardware Version is returned as FamilyID.VariantID */
@@ -2828,8 +2828,7 @@ static ssize_t mxt_hw_version_show(struct device *dev,
 {
        struct mxt_data *data = dev_get_drvdata(dev);
        struct mxt_info *info = data->info;
-       return scnprintf(buf, PAGE_SIZE, "%u.%u\n",
-                        info->family_id, info->variant_id);
+       return sysfs_emit(buf, "%u.%u\n", info->family_id, info->variant_id);
 }
 
 static ssize_t mxt_show_instance(char *buf, int count,
@@ -2839,19 +2838,18 @@ static ssize_t mxt_show_instance(char *buf, int count,
        int i;
 
        if (mxt_obj_instances(object) > 1)
-               count += scnprintf(buf + count, PAGE_SIZE - count,
-                                  "Instance %u\n", instance);
+               count += sysfs_emit_at(buf, count, "Instance %u\n", instance);
 
        for (i = 0; i < mxt_obj_size(object); i++)
-               count += scnprintf(buf + count, PAGE_SIZE - count,
-                               "\t[%2u]: %02x (%d)\n", i, val[i], val[i]);
-       count += scnprintf(buf + count, PAGE_SIZE - count, "\n");
+               count += sysfs_emit_at(buf, count, "\t[%2u]: %02x (%d)\n",
+                                      i, val[i], val[i]);
+       count += sysfs_emit_at(buf, count, "\n");
 
        return count;
 }
 
 static ssize_t mxt_object_show(struct device *dev,
-                                   struct device_attribute *attr, char *buf)
+                              struct device_attribute *attr, char *buf)
 {
        struct mxt_data *data = dev_get_drvdata(dev);
        struct mxt_object *object;
@@ -2872,8 +2870,7 @@ static ssize_t mxt_object_show(struct device *dev,
                if (!mxt_object_readable(object->type))
                        continue;
 
-               count += scnprintf(buf + count, PAGE_SIZE - count,
-                               "T%u:\n", object->type);
+               count += sysfs_emit_at(buf, count, "T%u:\n", object->type);
 
                for (j = 0; j < mxt_obj_instances(object); j++) {
                        u16 size = mxt_obj_size(object);
index 3e102bcc4a1c7120428ca4c8a40d396797ae9980..2a1db1134476656e8148dd963d68f72de7d57d55 100644 (file)
@@ -431,7 +431,7 @@ static ssize_t edt_ft5x06_setting_show(struct device *dev,
                *field = val;
        }
 
-       count = scnprintf(buf, PAGE_SIZE, "%d\n", val);
+       count = sysfs_emit(buf, "%d\n", val);
 out:
        mutex_unlock(&tsdata->mutex);
        return error ?: count;
index af32fbe57b630373f6fd8b67e3129be1d54de1d1..b068ff8afbc9ad3ba62b70cbbee20feb572c3855 100644 (file)
@@ -884,7 +884,8 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts)
                }
        }
 
-       if (ts->gpio_count == 2 && ts->gpio_int_idx == 0) {
+       /* Some devices with gpio_int_idx 0 list a third unused GPIO */
+       if ((ts->gpio_count == 2 || ts->gpio_count == 3) && ts->gpio_int_idx == 0) {
                ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_GPIO;
                gpio_mapping = acpi_goodix_int_first_gpios;
        } else if (ts->gpio_count == 2 && ts->gpio_int_idx == 1) {
index 0f58258306bfc594e84625f51e56cf7cd963615d..eae90676f4e551b67b2d8565665e86ce94ee74d7 100644 (file)
@@ -928,8 +928,7 @@ static ssize_t hideep_fw_version_show(struct device *dev,
        ssize_t len;
 
        mutex_lock(&ts->dev_mutex);
-       len = scnprintf(buf, PAGE_SIZE, "%04x\n",
-                       be16_to_cpu(ts->dwz_info.release_ver));
+       len = sysfs_emit(buf, "%04x\n", be16_to_cpu(ts->dwz_info.release_ver));
        mutex_unlock(&ts->dev_mutex);
 
        return len;
@@ -943,8 +942,7 @@ static ssize_t hideep_product_id_show(struct device *dev,
        ssize_t len;
 
        mutex_lock(&ts->dev_mutex);
-       len = scnprintf(buf, PAGE_SIZE, "%04x\n",
-                       be16_to_cpu(ts->dwz_info.product_id));
+       len = sysfs_emit(buf, "%04x\n", be16_to_cpu(ts->dwz_info.product_id));
        mutex_unlock(&ts->dev_mutex);
 
        return len;
index d0f257989fd6b728091ff3ee1add1d00777507de..2e01d87977c168ff618220aceaa02da0a132e59a 100644 (file)
@@ -202,7 +202,7 @@ static ssize_t hycon_hy46xx_setting_show(struct device *dev,
                *field = val;
        }
 
-       count = scnprintf(buf, PAGE_SIZE, "%d\n", val);
+       count = sysfs_emit(buf, "%d\n", val);
 
 out:
        mutex_unlock(&tsdata->mutex);
index 90c4934e750a3a3dda59e0688388f8d1f0dcf4b4..fc4e39b6651a4919bea885c5971a04c5dadeeb7f 100644 (file)
@@ -512,12 +512,12 @@ static ssize_t firmware_version_show(struct device *dev,
        struct i2c_client *client = to_i2c_client(dev);
        struct ilitek_ts_data *ts = i2c_get_clientdata(client);
 
-       return scnprintf(buf, PAGE_SIZE,
-                        "fw version: [%02X%02X.%02X%02X.%02X%02X.%02X%02X]\n",
-                        ts->firmware_ver[0], ts->firmware_ver[1],
-                        ts->firmware_ver[2], ts->firmware_ver[3],
-                        ts->firmware_ver[4], ts->firmware_ver[5],
-                        ts->firmware_ver[6], ts->firmware_ver[7]);
+       return sysfs_emit(buf,
+                         "fw version: [%02X%02X.%02X%02X.%02X%02X.%02X%02X]\n",
+                         ts->firmware_ver[0], ts->firmware_ver[1],
+                         ts->firmware_ver[2], ts->firmware_ver[3],
+                         ts->firmware_ver[4], ts->firmware_ver[5],
+                         ts->firmware_ver[6], ts->firmware_ver[7]);
 }
 static DEVICE_ATTR_RO(firmware_version);
 
@@ -527,8 +527,8 @@ static ssize_t product_id_show(struct device *dev,
        struct i2c_client *client = to_i2c_client(dev);
        struct ilitek_ts_data *ts = i2c_get_clientdata(client);
 
-       return scnprintf(buf, PAGE_SIZE, "product id: [%04X], module: [%s]\n",
-                        ts->mcu_ver, ts->product_id);
+       return sysfs_emit(buf, "product id: [%04X], module: [%s]\n",
+                         ts->mcu_ver, ts->product_id);
 }
 static DEVICE_ATTR_RO(product_id);
 
index a3f4fb85bee58bf1e5f73861bd59637eed4e565f..4d226118f3cc2986efd2a8efce6484733c7dc206 100644 (file)
@@ -943,12 +943,12 @@ static ssize_t fw_info_show(struct device *dev,
        if (!iqs5xx->dev_id_info.bl_status)
                return -ENODATA;
 
-       return scnprintf(buf, PAGE_SIZE, "%u.%u.%u.%u:%u.%u\n",
-                        be16_to_cpu(iqs5xx->dev_id_info.prod_num),
-                        be16_to_cpu(iqs5xx->dev_id_info.proj_num),
-                        iqs5xx->dev_id_info.major_ver,
-                        iqs5xx->dev_id_info.minor_ver,
-                        iqs5xx->exp_file[0], iqs5xx->exp_file[1]);
+       return sysfs_emit(buf, "%u.%u.%u.%u:%u.%u\n",
+                         be16_to_cpu(iqs5xx->dev_id_info.prod_num),
+                         be16_to_cpu(iqs5xx->dev_id_info.proj_num),
+                         iqs5xx->dev_id_info.major_ver,
+                         iqs5xx->dev_id_info.minor_ver,
+                         iqs5xx->exp_file[0], iqs5xx->exp_file[1]);
 }
 
 static DEVICE_ATTR_WO(fw_file);
index dc084f873762005b3c129cdf154f1feaa5b6758d..f0a56cde899e48054d5aaf6ec3daef02104fa6c5 100644 (file)
@@ -2401,12 +2401,12 @@ static ssize_t fw_info_show(struct device *dev,
 {
        struct iqs7211_private *iqs7211 = dev_get_drvdata(dev);
 
-       return scnprintf(buf, PAGE_SIZE, "%u.%u.%u.%u:%u.%u\n",
-                        le16_to_cpu(iqs7211->ver_info.prod_num),
-                        le32_to_cpu(iqs7211->ver_info.patch),
-                        le16_to_cpu(iqs7211->ver_info.major),
-                        le16_to_cpu(iqs7211->ver_info.minor),
-                        iqs7211->exp_file[1], iqs7211->exp_file[0]);
+       return sysfs_emit(buf, "%u.%u.%u.%u:%u.%u\n",
+                         le16_to_cpu(iqs7211->ver_info.prod_num),
+                         le32_to_cpu(iqs7211->ver_info.patch),
+                         le16_to_cpu(iqs7211->ver_info.major),
+                         le16_to_cpu(iqs7211->ver_info.minor),
+                         iqs7211->exp_file[1], iqs7211->exp_file[0]);
 }
 
 static DEVICE_ATTR_RO(fw_info);
index aa325486f61825e2e30d36a432fffafd8ddb2cbb..78e1c63e530e06b4cf86801a3feef0a02785879f 100644 (file)
@@ -1336,9 +1336,9 @@ static ssize_t mip4_sysfs_read_fw_version(struct device *dev,
        /* Take lock to prevent racing with firmware update */
        mutex_lock(&ts->input->mutex);
 
-       count = snprintf(buf, PAGE_SIZE, "%04X %04X %04X %04X\n",
-                        ts->fw_version.boot, ts->fw_version.core,
-                        ts->fw_version.app, ts->fw_version.param);
+       count = sysfs_emit(buf, "%04X %04X %04X %04X\n",
+                          ts->fw_version.boot, ts->fw_version.core,
+                          ts->fw_version.app, ts->fw_version.param);
 
        mutex_unlock(&ts->input->mutex);
 
@@ -1362,8 +1362,8 @@ static ssize_t mip4_sysfs_read_hw_version(struct device *dev,
         * product_name shows the name or version of the hardware
         * paired with current firmware in the chip.
         */
-       count = snprintf(buf, PAGE_SIZE, "%.*s\n",
-                        (int)sizeof(ts->product_name), ts->product_name);
+       count = sysfs_emit(buf, "%.*s\n",
+                          (int)sizeof(ts->product_name), ts->product_name);
 
        mutex_unlock(&ts->input->mutex);
 
@@ -1382,7 +1382,7 @@ static ssize_t mip4_sysfs_read_product_id(struct device *dev,
 
        mutex_lock(&ts->input->mutex);
 
-       count = snprintf(buf, PAGE_SIZE, "%04X\n", ts->product_id);
+       count = sysfs_emit(buf, "%04X\n", ts->product_id);
 
        mutex_unlock(&ts->input->mutex);
 
@@ -1401,8 +1401,8 @@ static ssize_t mip4_sysfs_read_ic_name(struct device *dev,
 
        mutex_lock(&ts->input->mutex);
 
-       count = snprintf(buf, PAGE_SIZE, "%.*s\n",
-                        (int)sizeof(ts->ic_name), ts->ic_name);
+       count = sysfs_emit(buf, "%.*s\n",
+                          (int)sizeof(ts->ic_name), ts->ic_name);
 
        mutex_unlock(&ts->input->mutex);
 
index d6d04b9f04fc1cb272c67f3968556d1e8024cd08..60354ebc7242493ed7625db8e647101b28142748 100644 (file)
@@ -456,8 +456,8 @@ static ssize_t mtouch_firmware_rev_show(struct device *dev,
        struct usbtouch_usb *usbtouch = usb_get_intfdata(intf);
        struct mtouch_priv *priv = usbtouch->priv;
 
-       return scnprintf(output, PAGE_SIZE, "%1x.%1x\n",
-                        priv->fw_rev_major, priv->fw_rev_minor);
+       return sysfs_emit(output, "%1x.%1x\n",
+                         priv->fw_rev_major, priv->fw_rev_minor);
 }
 static DEVICE_ATTR(firmware_rev, 0444, mtouch_firmware_rev_show, NULL);
 
index 128341a6696bc01943c1b7eb7eb42ecf93417183..32c7be54434cf26444eef7b8a3a34277c40049bd 100644 (file)
@@ -887,7 +887,7 @@ static ssize_t config_csum_show(struct device *dev,
        cfg_csum = wdt->param.xmls_id1;
        cfg_csum = (cfg_csum << 16) | wdt->param.xmls_id2;
 
-       return scnprintf(buf, PAGE_SIZE, "%x\n", cfg_csum);
+       return sysfs_emit(buf, "%x\n", cfg_csum);
 }
 
 static ssize_t fw_version_show(struct device *dev,
@@ -896,7 +896,7 @@ static ssize_t fw_version_show(struct device *dev,
        struct i2c_client *client = to_i2c_client(dev);
        struct wdt87xx_data *wdt = i2c_get_clientdata(client);
 
-       return scnprintf(buf, PAGE_SIZE, "%x\n", wdt->param.fw_id);
+       return sysfs_emit(buf, "%x\n", wdt->param.fw_id);
 }
 
 static ssize_t plat_id_show(struct device *dev,
@@ -905,7 +905,7 @@ static ssize_t plat_id_show(struct device *dev,
        struct i2c_client *client = to_i2c_client(dev);
        struct wdt87xx_data *wdt = i2c_get_clientdata(client);
 
-       return scnprintf(buf, PAGE_SIZE, "%x\n", wdt->param.plat_id);
+       return sysfs_emit(buf, "%x\n", wdt->param.plat_id);
 }
 
 static ssize_t update_config_store(struct device *dev,
index 5be5112845e1edf93808bddd3a65fa93347f4997..5680075f0bb84474713705cb43db09b5f4b90e9a 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/device.h>
 #include <linux/sysfs.h>
 #include <linux/input/mt.h>
+#include <linux/input/touchscreen.h>
 #include <linux/platform_data/zforce_ts.h>
 #include <linux/regulator/consumer.h>
 #include <linux/of.h>
@@ -106,6 +107,7 @@ struct zforce_point {
 struct zforce_ts {
        struct i2c_client       *client;
        struct input_dev        *input;
+       struct touchscreen_properties prop;
        const struct zforce_ts_platdata *pdata;
        char                    phys[32];
 
@@ -266,7 +268,6 @@ static int zforce_setconfig(struct zforce_ts *ts, char b1)
 static int zforce_start(struct zforce_ts *ts)
 {
        struct i2c_client *client = ts->client;
-       const struct zforce_ts_platdata *pdata = ts->pdata;
        int ret;
 
        dev_dbg(&client->dev, "starting device\n");
@@ -277,7 +278,7 @@ static int zforce_start(struct zforce_ts *ts)
                return ret;
        }
 
-       ret = zforce_resolution(ts, pdata->x_max, pdata->y_max);
+       ret = zforce_resolution(ts, ts->prop.max_x, ts->prop.max_y);
        if (ret) {
                dev_err(&client->dev, "Unable to set resolution, %d\n", ret);
                goto error;
@@ -337,7 +338,6 @@ static int zforce_stop(struct zforce_ts *ts)
 static int zforce_touch_event(struct zforce_ts *ts, u8 *payload)
 {
        struct i2c_client *client = ts->client;
-       const struct zforce_ts_platdata *pdata = ts->pdata;
        struct zforce_point point;
        int count, i, num = 0;
 
@@ -355,8 +355,8 @@ static int zforce_touch_event(struct zforce_ts *ts, u8 *payload)
                point.coord_y =
                        payload[9 * i + 4] << 8 | payload[9 * i + 3];
 
-               if (point.coord_x > pdata->x_max ||
-                   point.coord_y > pdata->y_max) {
+               if (point.coord_x > ts->prop.max_x ||
+                   point.coord_y > ts->prop.max_y) {
                        dev_warn(&client->dev, "coordinates (%d,%d) invalid\n",
                                point.coord_x, point.coord_y);
                        point.coord_x = point.coord_y = 0;
@@ -390,10 +390,9 @@ static int zforce_touch_event(struct zforce_ts *ts, u8 *payload)
                                                point.state != STATE_UP);
 
                if (point.state != STATE_UP) {
-                       input_report_abs(ts->input, ABS_MT_POSITION_X,
-                                        point.coord_x);
-                       input_report_abs(ts->input, ABS_MT_POSITION_Y,
-                                        point.coord_y);
+                       touchscreen_report_pos(ts->input, &ts->prop,
+                                              point.coord_x, point.coord_y,
+                                              true);
                        input_report_abs(ts->input, ABS_MT_TOUCH_MAJOR,
                                         point.area_major);
                        input_report_abs(ts->input, ABS_MT_TOUCH_MINOR,
@@ -719,15 +718,8 @@ static struct zforce_ts_platdata *zforce_parse_dt(struct device *dev)
                return ERR_PTR(-ENOMEM);
        }
 
-       if (of_property_read_u32(np, "x-size", &pdata->x_max)) {
-               dev_err(dev, "failed to get x-size property\n");
-               return ERR_PTR(-EINVAL);
-       }
-
-       if (of_property_read_u32(np, "y-size", &pdata->y_max)) {
-               dev_err(dev, "failed to get y-size property\n");
-               return ERR_PTR(-EINVAL);
-       }
+       of_property_read_u32(np, "x-size", &pdata->x_max);
+       of_property_read_u32(np, "y-size", &pdata->y_max);
 
        return pdata;
 }
@@ -856,6 +848,12 @@ static int zforce_probe(struct i2c_client *client)
        input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0,
                             pdata->y_max, 0, 0);
 
+       touchscreen_parse_properties(input_dev, true, &ts->prop);
+       if (ts->prop.max_x == 0 || ts->prop.max_y == 0) {
+               dev_err(&client->dev, "no size specified\n");
+               return -EINVAL;
+       }
+
        input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0,
                             ZFORCE_MAX_AREA, 0, 0);
        input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR, 0,
index 6dae83d968067e775b20824a11180a6073c931ba..0d29ec014e2f9b8d8dc53f6ce1981cb850480995 100644 (file)
@@ -27,10 +27,10 @@ ssize_t vivaldi_function_row_physmap_show(const struct vivaldi_data *data,
                return 0;
 
        for (i = 0; i < data->num_function_row_keys; i++)
-               size += scnprintf(buf + size, PAGE_SIZE - size,
-                                 "%s%02X", size ? " " : "", physmap[i]);
+               size += sysfs_emit_at(buf, size,
+                                     "%s%02X", size ? " " : "", physmap[i]);
        if (size)
-               size += scnprintf(buf + size, PAGE_SIZE - size, "\n");
+               size += sysfs_emit_at(buf, size, "\n");
 
        return size;
 }
index b43325364aa31adbeddee934b8c6a3d480a54b55..8c40f41822634c1780fceb367da59ab79e190089 100644 (file)
@@ -86,16 +86,9 @@ static int imx8mm_icc_probe(struct platform_device *pdev)
        return imx_icc_register(pdev, nodes, ARRAY_SIZE(nodes), NULL);
 }
 
-static int imx8mm_icc_remove(struct platform_device *pdev)
-{
-       imx_icc_unregister(pdev);
-
-       return 0;
-}
-
 static struct platform_driver imx8mm_icc_driver = {
        .probe = imx8mm_icc_probe,
-       .remove = imx8mm_icc_remove,
+       .remove_new = imx_icc_unregister,
        .driver = {
                .name = "imx8mm-interconnect",
        },
index 8ce6d8e4bf5e9455de9addd06b942e39dcab7bd4..fa3d4f97dfa4a11e9b62940dd3152bf3a5e5d2e2 100644 (file)
@@ -75,16 +75,9 @@ static int imx8mn_icc_probe(struct platform_device *pdev)
        return imx_icc_register(pdev, nodes, ARRAY_SIZE(nodes), NULL);
 }
 
-static int imx8mn_icc_remove(struct platform_device *pdev)
-{
-       imx_icc_unregister(pdev);
-
-       return 0;
-}
-
 static struct platform_driver imx8mn_icc_driver = {
        .probe = imx8mn_icc_probe,
-       .remove = imx8mn_icc_remove,
+       .remove_new = imx_icc_unregister,
        .driver = {
                .name = "imx8mn-interconnect",
        },
index a66ae3638b18a27fa5f33605bd73a8db855b6504..d218bb47757a2a0700310e7a814a99873b4c5529 100644 (file)
@@ -239,16 +239,9 @@ static int imx8mp_icc_probe(struct platform_device *pdev)
        return imx_icc_register(pdev, nodes, ARRAY_SIZE(nodes), noc_setting_nodes);
 }
 
-static int imx8mp_icc_remove(struct platform_device *pdev)
-{
-       imx_icc_unregister(pdev);
-
-       return 0;
-}
-
 static struct platform_driver imx8mp_icc_driver = {
        .probe = imx8mp_icc_probe,
-       .remove = imx8mp_icc_remove,
+       .remove_new = imx_icc_unregister,
        .driver = {
                .name = "imx8mp-interconnect",
        },
index b6fb71305c99b5549b4a44fb4bb59c7ce2034325..8bbd672b346e10ca446ca40dc0435768df5a21d9 100644 (file)
@@ -85,16 +85,9 @@ static int imx8mq_icc_probe(struct platform_device *pdev)
        return imx_icc_register(pdev, nodes, ARRAY_SIZE(nodes), NULL);
 }
 
-static int imx8mq_icc_remove(struct platform_device *pdev)
-{
-       imx_icc_unregister(pdev);
-
-       return 0;
-}
-
 static struct platform_driver imx8mq_icc_driver = {
        .probe = imx8mq_icc_probe,
-       .remove = imx8mq_icc_remove,
+       .remove_new = imx_icc_unregister,
        .driver = {
                .name = "imx8mq-interconnect",
                .sync_state = icc_sync_state,
index 62b516d38d03ff23da192c33b4a289379fdfc4cc..697f96c49f6f4b181c0a43cef3364790b0f5aa2b 100644 (file)
@@ -191,6 +191,15 @@ config INTERCONNECT_QCOM_SDX75
          This is a driver for the Qualcomm Network-on-Chip on sdx75-based
          platforms.
 
+config INTERCONNECT_QCOM_SM6115
+       tristate "Qualcomm SM6115 interconnect driver"
+       depends on INTERCONNECT_QCOM
+       depends on QCOM_SMD_RPM
+       select INTERCONNECT_QCOM_SMD_RPM
+       help
+         This is a driver for the Qualcomm Network-on-Chip on sm6115-based
+         platforms.
+
 config INTERCONNECT_QCOM_SM6350
        tristate "Qualcomm SM6350 interconnect driver"
        depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
@@ -245,5 +254,23 @@ config INTERCONNECT_QCOM_SM8550
          This is a driver for the Qualcomm Network-on-Chip on SM8550-based
          platforms.
 
+config INTERCONNECT_QCOM_SM8650
+       tristate "Qualcomm SM8650 interconnect driver"
+       depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
+       select INTERCONNECT_QCOM_RPMH
+       select INTERCONNECT_QCOM_BCM_VOTER
+       help
+         This is a driver for the Qualcomm Network-on-Chip on SM8650-based
+         platforms.
+
+config INTERCONNECT_QCOM_X1E80100
+       tristate "Qualcomm X1E80100 interconnect driver"
+       depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
+       select INTERCONNECT_QCOM_RPMH
+       select INTERCONNECT_QCOM_BCM_VOTER
+       help
+         This is a driver for the Qualcomm Network-on-Chip on X1E80100-based
+         platforms.
+
 config INTERCONNECT_QCOM_SMD_RPM
        tristate
index c5320e293960a38acf636fcc1d350d13115ce628..7048461650221934f186e537010b12ecc061a95d 100644 (file)
@@ -24,12 +24,15 @@ qnoc-sdm845-objs                    := sdm845.o
 qnoc-sdx55-objs                                := sdx55.o
 qnoc-sdx65-objs                                := sdx65.o
 qnoc-sdx75-objs                                := sdx75.o
+qnoc-sm6115-objs                       := sm6115.o
 qnoc-sm6350-objs                       := sm6350.o
 qnoc-sm8150-objs                       := sm8150.o
 qnoc-sm8250-objs                       := sm8250.o
 qnoc-sm8350-objs                       := sm8350.o
 qnoc-sm8450-objs                       := sm8450.o
 qnoc-sm8550-objs                       := sm8550.o
+qnoc-sm8650-objs                       := sm8650.o
+qnoc-x1e80100-objs                     := x1e80100.o
 icc-smd-rpm-objs                       := smd-rpm.o icc-rpm.o icc-rpm-clocks.o
 
 obj-$(CONFIG_INTERCONNECT_QCOM_BCM_VOTER) += icc-bcm-voter.o
@@ -53,10 +56,13 @@ obj-$(CONFIG_INTERCONNECT_QCOM_SDM845) += qnoc-sdm845.o
 obj-$(CONFIG_INTERCONNECT_QCOM_SDX55) += qnoc-sdx55.o
 obj-$(CONFIG_INTERCONNECT_QCOM_SDX65) += qnoc-sdx65.o
 obj-$(CONFIG_INTERCONNECT_QCOM_SDX75) += qnoc-sdx75.o
+obj-$(CONFIG_INTERCONNECT_QCOM_SM6115) += qnoc-sm6115.o
 obj-$(CONFIG_INTERCONNECT_QCOM_SM6350) += qnoc-sm6350.o
 obj-$(CONFIG_INTERCONNECT_QCOM_SM8150) += qnoc-sm8150.o
 obj-$(CONFIG_INTERCONNECT_QCOM_SM8250) += qnoc-sm8250.o
 obj-$(CONFIG_INTERCONNECT_QCOM_SM8350) += qnoc-sm8350.o
 obj-$(CONFIG_INTERCONNECT_QCOM_SM8450) += qnoc-sm8450.o
 obj-$(CONFIG_INTERCONNECT_QCOM_SM8550) += qnoc-sm8550.o
+obj-$(CONFIG_INTERCONNECT_QCOM_SM8650) += qnoc-sm8650.o
+obj-$(CONFIG_INTERCONNECT_QCOM_X1E80100) += qnoc-x1e80100.o
 obj-$(CONFIG_INTERCONNECT_QCOM_SMD_RPM) += icc-smd-rpm.o
index dbacb2a7af508f5b330e8cc00c87b10ac09b55b4..a8ed435f696c67cd406092d8662b13ea7ce34273 100644 (file)
@@ -627,14 +627,12 @@ err_disable_unprepare_clk:
 }
 EXPORT_SYMBOL(qnoc_probe);
 
-int qnoc_remove(struct platform_device *pdev)
+void qnoc_remove(struct platform_device *pdev)
 {
        struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
 
        icc_provider_deregister(&qp->provider);
        icc_nodes_remove(&qp->provider);
        clk_disable_unprepare(qp->bus_clk);
-
-       return 0;
 }
 EXPORT_SYMBOL(qnoc_remove);
index a13768cfd2311ed4b7d4bb7b9827b7121deb02a9..f4883d43eae4d3c05c9d5dba619433a3b2716910 100644 (file)
@@ -161,7 +161,7 @@ extern const struct rpm_clk_resource aggre1_branch_clk;
 extern const struct rpm_clk_resource aggre2_branch_clk;
 
 int qnoc_probe(struct platform_device *pdev);
-int qnoc_remove(struct platform_device *pdev);
+void qnoc_remove(struct platform_device *pdev);
 
 bool qcom_icc_rpm_smd_available(void);
 int qcom_icc_rpm_smd_send(int ctx, int rsc_type, int id, u32 val);
index 35148880b3e87de86e78977390a7bd988f52e1c3..499b1a9ac413bf52a587e81969e4f2e72e745049 100644 (file)
@@ -1344,7 +1344,7 @@ MODULE_DEVICE_TABLE(of, msm8916_noc_of_match);
 
 static struct platform_driver msm8916_noc_driver = {
        .probe = qnoc_probe,
-       .remove = qnoc_remove,
+       .remove_new = qnoc_remove,
        .driver = {
                .name = "qnoc-msm8916",
                .of_match_table = msm8916_noc_of_match,
index b52c5ac1175c3fa458007b13906bba92219368db..8ff2c23b1ca0dd8759290190931f7e294a4e2d3d 100644 (file)
@@ -1421,7 +1421,7 @@ MODULE_DEVICE_TABLE(of, msm8939_noc_of_match);
 
 static struct platform_driver msm8939_noc_driver = {
        .probe = qnoc_probe,
-       .remove = qnoc_remove,
+       .remove_new = qnoc_remove,
        .driver = {
                .name = "qnoc-msm8939",
                .of_match_table = msm8939_noc_of_match,
index 21f6c852141e3043b5ca59934c0cbeec4f7dda34..241076b5f36b460b4c1035cbf0c3422d35dc5735 100644 (file)
@@ -740,15 +740,13 @@ err_remove_nodes:
        return ret;
 }
 
-static int msm8974_icc_remove(struct platform_device *pdev)
+static void msm8974_icc_remove(struct platform_device *pdev)
 {
        struct msm8974_icc_provider *qp = platform_get_drvdata(pdev);
 
        icc_provider_deregister(&qp->provider);
        icc_nodes_remove(&qp->provider);
        clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
-
-       return 0;
 }
 
 static const struct of_device_id msm8974_noc_of_match[] = {
@@ -764,7 +762,7 @@ MODULE_DEVICE_TABLE(of, msm8974_noc_of_match);
 
 static struct platform_driver msm8974_noc_driver = {
        .probe = msm8974_icc_probe,
-       .remove = msm8974_icc_remove,
+       .remove_new = msm8974_icc_remove,
        .driver = {
                .name = "qnoc-msm8974",
                .of_match_table = msm8974_noc_of_match,
index b73566c9b21f9dd275878419e030ab07163ef8dd..788131400cd132a4d963503db24efaf05ce7a46b 100644 (file)
@@ -2108,7 +2108,7 @@ MODULE_DEVICE_TABLE(of, qnoc_of_match);
 
 static struct platform_driver qnoc_driver = {
        .probe = qnoc_probe,
-       .remove = qnoc_remove,
+       .remove_new = qnoc_remove,
        .driver = {
                .name = "qnoc-msm8996",
                .of_match_table = qnoc_of_match,
index e97478bbc282533a16e285882f4a58cd88b8ed02..61a8695a9adc73b0bf0dc47f1232c03da7a1a52d 100644 (file)
@@ -148,14 +148,12 @@ static int qcom_osm_l3_set(struct icc_node *src, struct icc_node *dst)
        return 0;
 }
 
-static int qcom_osm_l3_remove(struct platform_device *pdev)
+static void qcom_osm_l3_remove(struct platform_device *pdev)
 {
        struct qcom_osm_l3_icc_provider *qp = platform_get_drvdata(pdev);
 
        icc_provider_deregister(&qp->provider);
        icc_nodes_remove(&qp->provider);
-
-       return 0;
 }
 
 static int qcom_osm_l3_probe(struct platform_device *pdev)
@@ -292,7 +290,7 @@ MODULE_DEVICE_TABLE(of, osm_l3_of_match);
 
 static struct platform_driver osm_l3_driver = {
        .probe = qcom_osm_l3_probe,
-       .remove = qcom_osm_l3_remove,
+       .remove_new = qcom_osm_l3_remove,
        .driver = {
                .name = "osm-l3",
                .of_match_table = osm_l3_of_match,
index b88cf9a022e035fe5ba1981b79e948e2472d3218..96735800b13c099b577c03ee673756503e2174b8 100644 (file)
@@ -1367,7 +1367,7 @@ MODULE_DEVICE_TABLE(of, qcm2290_noc_of_match);
 
 static struct platform_driver qcm2290_noc_driver = {
        .probe = qnoc_probe,
-       .remove = qnoc_remove,
+       .remove_new = qnoc_remove,
        .driver = {
                .name = "qnoc-qcm2290",
                .of_match_table = qcm2290_noc_of_match,
index 9fa1da70c843a2a0a5cef3d9240720875c017380..11b49a89c03d14149dd22637f0ad898bb083139e 100644 (file)
@@ -1083,7 +1083,7 @@ MODULE_DEVICE_TABLE(of, qcs404_noc_of_match);
 
 static struct platform_driver qcs404_noc_driver = {
        .probe = qnoc_probe,
-       .remove = qnoc_remove,
+       .remove_new = qnoc_remove,
        .driver = {
                .name = "qnoc-qcs404",
                .of_match_table = qcs404_noc_of_match,
index 7392bebba3344fbf65e0dc36378e8c3fe5f3ae18..ab91de446da8818e31b6b8ddd30064f2592521f1 100644 (file)
@@ -1714,7 +1714,7 @@ MODULE_DEVICE_TABLE(of, sdm660_noc_of_match);
 
 static struct platform_driver sdm660_noc_driver = {
        .probe = qnoc_probe,
-       .remove = qnoc_remove,
+       .remove_new = qnoc_remove,
        .driver = {
                .name = "qnoc-sdm660",
                .of_match_table = sdm660_noc_of_match,
diff --git a/drivers/interconnect/qcom/sm6115.c b/drivers/interconnect/qcom/sm6115.c
new file mode 100644 (file)
index 0000000..88b6763
--- /dev/null
@@ -0,0 +1,1423 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#include <dt-bindings/interconnect/qcom,sm6115.h>
+#include <linux/device.h>
+#include <linux/interconnect-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "icc-rpm.h"
+
+static const char * const snoc_intf_clocks[] = {
+       "cpu_axi",
+       "ufs_axi",
+       "usb_axi",
+       "ipa", /* Required by qxm_ipa */
+};
+
+static const char * const cnoc_intf_clocks[] = {
+       "usb_axi",
+};
+
+enum {
+       SM6115_MASTER_AMPSS_M0,
+       SM6115_MASTER_ANOC_SNOC,
+       SM6115_MASTER_BIMC_SNOC,
+       SM6115_MASTER_CAMNOC_HF,
+       SM6115_MASTER_CAMNOC_SF,
+       SM6115_MASTER_CRYPTO_CORE0,
+       SM6115_MASTER_GRAPHICS_3D,
+       SM6115_MASTER_IPA,
+       SM6115_MASTER_MDP_PORT0,
+       SM6115_MASTER_PIMEM,
+       SM6115_MASTER_QDSS_BAM,
+       SM6115_MASTER_QDSS_DAP,
+       SM6115_MASTER_QDSS_ETR,
+       SM6115_MASTER_QPIC,
+       SM6115_MASTER_QUP_0,
+       SM6115_MASTER_QUP_CORE_0,
+       SM6115_MASTER_SDCC_1,
+       SM6115_MASTER_SDCC_2,
+       SM6115_MASTER_SNOC_BIMC_NRT,
+       SM6115_MASTER_SNOC_BIMC_RT,
+       SM6115_MASTER_SNOC_BIMC,
+       SM6115_MASTER_SNOC_CFG,
+       SM6115_MASTER_SNOC_CNOC,
+       SM6115_MASTER_TCU_0,
+       SM6115_MASTER_TIC,
+       SM6115_MASTER_USB3,
+       SM6115_MASTER_VIDEO_P0,
+       SM6115_MASTER_VIDEO_PROC,
+
+       SM6115_SLAVE_AHB2PHY_USB,
+       SM6115_SLAVE_ANOC_SNOC,
+       SM6115_SLAVE_APPSS,
+       SM6115_SLAVE_APSS_THROTTLE_CFG,
+       SM6115_SLAVE_BIMC_CFG,
+       SM6115_SLAVE_BIMC_SNOC,
+       SM6115_SLAVE_BOOT_ROM,
+       SM6115_SLAVE_CAMERA_CFG,
+       SM6115_SLAVE_CAMERA_NRT_THROTTLE_CFG,
+       SM6115_SLAVE_CAMERA_RT_THROTTLE_CFG,
+       SM6115_SLAVE_CLK_CTL,
+       SM6115_SLAVE_CNOC_MSS,
+       SM6115_SLAVE_CRYPTO_0_CFG,
+       SM6115_SLAVE_DCC_CFG,
+       SM6115_SLAVE_DDR_PHY_CFG,
+       SM6115_SLAVE_DDR_SS_CFG,
+       SM6115_SLAVE_DISPLAY_CFG,
+       SM6115_SLAVE_DISPLAY_THROTTLE_CFG,
+       SM6115_SLAVE_EBI_CH0,
+       SM6115_SLAVE_GPU_CFG,
+       SM6115_SLAVE_GPU_THROTTLE_CFG,
+       SM6115_SLAVE_HWKM_CORE,
+       SM6115_SLAVE_IMEM_CFG,
+       SM6115_SLAVE_IPA_CFG,
+       SM6115_SLAVE_LPASS,
+       SM6115_SLAVE_MAPSS,
+       SM6115_SLAVE_MDSP_MPU_CFG,
+       SM6115_SLAVE_MESSAGE_RAM,
+       SM6115_SLAVE_OCIMEM,
+       SM6115_SLAVE_PDM,
+       SM6115_SLAVE_PIMEM_CFG,
+       SM6115_SLAVE_PIMEM,
+       SM6115_SLAVE_PKA_CORE,
+       SM6115_SLAVE_PMIC_ARB,
+       SM6115_SLAVE_QDSS_CFG,
+       SM6115_SLAVE_QDSS_STM,
+       SM6115_SLAVE_QM_CFG,
+       SM6115_SLAVE_QM_MPU_CFG,
+       SM6115_SLAVE_QPIC,
+       SM6115_SLAVE_QUP_0,
+       SM6115_SLAVE_QUP_CORE_0,
+       SM6115_SLAVE_RBCPR_CX_CFG,
+       SM6115_SLAVE_RBCPR_MX_CFG,
+       SM6115_SLAVE_RPM,
+       SM6115_SLAVE_SDCC_1,
+       SM6115_SLAVE_SDCC_2,
+       SM6115_SLAVE_SECURITY,
+       SM6115_SLAVE_SERVICE_CNOC,
+       SM6115_SLAVE_SERVICE_SNOC,
+       SM6115_SLAVE_SNOC_BIMC_NRT,
+       SM6115_SLAVE_SNOC_BIMC_RT,
+       SM6115_SLAVE_SNOC_BIMC,
+       SM6115_SLAVE_SNOC_CFG,
+       SM6115_SLAVE_SNOC_CNOC,
+       SM6115_SLAVE_TCSR,
+       SM6115_SLAVE_TCU,
+       SM6115_SLAVE_TLMM,
+       SM6115_SLAVE_USB3,
+       SM6115_SLAVE_VENUS_CFG,
+       SM6115_SLAVE_VENUS_THROTTLE_CFG,
+       SM6115_SLAVE_VSENSE_CTRL_CFG,
+};
+
+static const u16 slv_ebi_slv_bimc_snoc_links[] = {
+       SM6115_SLAVE_EBI_CH0,
+       SM6115_SLAVE_BIMC_SNOC,
+};
+
+static struct qcom_icc_node apps_proc = {
+       .name = "apps_proc",
+       .id = SM6115_MASTER_AMPSS_M0,
+       .channels = 1,
+       .buswidth = 16,
+       .qos.qos_port = 0,
+       .qos.qos_mode = NOC_QOS_MODE_FIXED,
+       .qos.prio_level = 0,
+       .qos.areq_prio = 0,
+       .mas_rpm_id = 0,
+       .slv_rpm_id = -1,
+       .num_links = ARRAY_SIZE(slv_ebi_slv_bimc_snoc_links),
+       .links = slv_ebi_slv_bimc_snoc_links,
+};
+
+static const u16 link_slv_ebi[] = {
+       SM6115_SLAVE_EBI_CH0,
+};
+
+static struct qcom_icc_node mas_snoc_bimc_rt = {
+       .name = "mas_snoc_bimc_rt",
+       .id = SM6115_MASTER_SNOC_BIMC_RT,
+       .channels = 1,
+       .buswidth = 16,
+       .qos.qos_port = 2,
+       .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+       .qos.areq_prio = 0,
+       .qos.prio_level = 0,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+       .num_links = ARRAY_SIZE(link_slv_ebi),
+       .links = link_slv_ebi,
+};
+
+static struct qcom_icc_node mas_snoc_bimc_nrt = {
+       .name = "mas_snoc_bimc_nrt",
+       .id = SM6115_MASTER_SNOC_BIMC_NRT,
+       .channels = 1,
+       .buswidth = 16,
+       .qos.qos_port = 3,
+       .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+       .qos.areq_prio = 0,
+       .qos.prio_level = 0,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+       .num_links = ARRAY_SIZE(link_slv_ebi),
+       .links = link_slv_ebi,
+};
+
+static struct qcom_icc_node mas_snoc_bimc = {
+       .name = "mas_snoc_bimc",
+       .id = SM6115_MASTER_SNOC_BIMC,
+       .channels = 1,
+       .buswidth = 16,
+       .qos.qos_port = 6,
+       .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+       .qos.areq_prio = 0,
+       .qos.prio_level = 0,
+       .mas_rpm_id = 3,
+       .slv_rpm_id = -1,
+       .num_links = ARRAY_SIZE(link_slv_ebi),
+       .links = link_slv_ebi,
+};
+
+static struct qcom_icc_node qnm_gpu = {
+       .name = "qnm_gpu",
+       .id = SM6115_MASTER_GRAPHICS_3D,
+       .channels = 1,
+       .buswidth = 32,
+       .qos.qos_port = 1,
+       .qos.qos_mode = NOC_QOS_MODE_FIXED,
+       .qos.prio_level = 0,
+       .qos.areq_prio = 0,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+       .num_links = ARRAY_SIZE(slv_ebi_slv_bimc_snoc_links),
+       .links = slv_ebi_slv_bimc_snoc_links,
+};
+
+static struct qcom_icc_node tcu_0 = {
+       .name = "tcu_0",
+       .id = SM6115_MASTER_TCU_0,
+       .channels = 1,
+       .buswidth = 8,
+       .qos.qos_port = 4,
+       .qos.qos_mode = NOC_QOS_MODE_FIXED,
+       .qos.prio_level = 6,
+       .qos.areq_prio = 6,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+       .num_links = ARRAY_SIZE(slv_ebi_slv_bimc_snoc_links),
+       .links = slv_ebi_slv_bimc_snoc_links,
+};
+
+static const u16 qup_core_0_links[] = {
+       SM6115_SLAVE_QUP_CORE_0,
+};
+
+static struct qcom_icc_node qup0_core_master = {
+       .name = "qup0_core_master",
+       .id = SM6115_MASTER_QUP_CORE_0,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = 170,
+       .slv_rpm_id = -1,
+       .num_links = ARRAY_SIZE(qup_core_0_links),
+       .links = qup_core_0_links,
+};
+
+static const u16 link_slv_anoc_snoc[] = {
+       SM6115_SLAVE_ANOC_SNOC,
+};
+
+static struct qcom_icc_node crypto_c0 = {
+       .name = "crypto_c0",
+       .id = SM6115_MASTER_CRYPTO_CORE0,
+       .channels = 1,
+       .buswidth = 8,
+       .qos.qos_port = 43,
+       .qos.qos_mode = NOC_QOS_MODE_FIXED,
+       .qos.areq_prio = 2,
+       .mas_rpm_id = 23,
+       .slv_rpm_id = -1,
+       .num_links = ARRAY_SIZE(link_slv_anoc_snoc),
+       .links = link_slv_anoc_snoc,
+};
+
+static const u16 mas_snoc_cnoc_links[] = {
+       SM6115_SLAVE_AHB2PHY_USB,
+       SM6115_SLAVE_APSS_THROTTLE_CFG,
+       SM6115_SLAVE_BIMC_CFG,
+       SM6115_SLAVE_BOOT_ROM,
+       SM6115_SLAVE_CAMERA_CFG,
+       SM6115_SLAVE_CAMERA_NRT_THROTTLE_CFG,
+       SM6115_SLAVE_CAMERA_RT_THROTTLE_CFG,
+       SM6115_SLAVE_CLK_CTL,
+       SM6115_SLAVE_CNOC_MSS,
+       SM6115_SLAVE_CRYPTO_0_CFG,
+       SM6115_SLAVE_DCC_CFG,
+       SM6115_SLAVE_DDR_PHY_CFG,
+       SM6115_SLAVE_DDR_SS_CFG,
+       SM6115_SLAVE_DISPLAY_CFG,
+       SM6115_SLAVE_DISPLAY_THROTTLE_CFG,
+       SM6115_SLAVE_GPU_CFG,
+       SM6115_SLAVE_GPU_THROTTLE_CFG,
+       SM6115_SLAVE_HWKM_CORE,
+       SM6115_SLAVE_IMEM_CFG,
+       SM6115_SLAVE_IPA_CFG,
+       SM6115_SLAVE_LPASS,
+       SM6115_SLAVE_MAPSS,
+       SM6115_SLAVE_MDSP_MPU_CFG,
+       SM6115_SLAVE_MESSAGE_RAM,
+       SM6115_SLAVE_PDM,
+       SM6115_SLAVE_PIMEM_CFG,
+       SM6115_SLAVE_PKA_CORE,
+       SM6115_SLAVE_PMIC_ARB,
+       SM6115_SLAVE_QDSS_CFG,
+       SM6115_SLAVE_QM_CFG,
+       SM6115_SLAVE_QM_MPU_CFG,
+       SM6115_SLAVE_QPIC,
+       SM6115_SLAVE_QUP_0,
+       SM6115_SLAVE_RBCPR_CX_CFG,
+       SM6115_SLAVE_RBCPR_MX_CFG,
+       SM6115_SLAVE_RPM,
+       SM6115_SLAVE_SDCC_1,
+       SM6115_SLAVE_SDCC_2,
+       SM6115_SLAVE_SECURITY,
+       SM6115_SLAVE_SERVICE_CNOC,
+       SM6115_SLAVE_SNOC_CFG,
+       SM6115_SLAVE_TCSR,
+       SM6115_SLAVE_TLMM,
+       SM6115_SLAVE_USB3,
+       SM6115_SLAVE_VENUS_CFG,
+       SM6115_SLAVE_VENUS_THROTTLE_CFG,
+       SM6115_SLAVE_VSENSE_CTRL_CFG,
+};
+
+static struct qcom_icc_node mas_snoc_cnoc = {
+       .name = "mas_snoc_cnoc",
+       .id = SM6115_MASTER_SNOC_CNOC,
+       .channels = 1,
+       .buswidth = 8,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+       .num_links = ARRAY_SIZE(mas_snoc_cnoc_links),
+       .links = mas_snoc_cnoc_links,
+};
+
+static struct qcom_icc_node xm_dap = {
+       .name = "xm_dap",
+       .id = SM6115_MASTER_QDSS_DAP,
+       .channels = 1,
+       .buswidth = 8,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+       .num_links = ARRAY_SIZE(mas_snoc_cnoc_links),
+       .links = mas_snoc_cnoc_links,
+};
+
+static const u16 link_slv_snoc_bimc_nrt[] = {
+       SM6115_SLAVE_SNOC_BIMC_NRT,
+};
+
+static struct qcom_icc_node qnm_camera_nrt = {
+       .name = "qnm_camera_nrt",
+       .id = SM6115_MASTER_CAMNOC_SF,
+       .channels = 1,
+       .buswidth = 32,
+       .qos.qos_port = 25,
+       .qos.qos_mode = NOC_QOS_MODE_FIXED,
+       .qos.areq_prio = 3,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+       .num_links = ARRAY_SIZE(link_slv_snoc_bimc_nrt),
+       .links = link_slv_snoc_bimc_nrt,
+};
+
+static struct qcom_icc_node qxm_venus0 = {
+       .name = "qxm_venus0",
+       .id = SM6115_MASTER_VIDEO_P0,
+       .channels = 1,
+       .buswidth = 16,
+       .qos.qos_port = 30,
+       .qos.qos_mode = NOC_QOS_MODE_FIXED,
+       .qos.areq_prio = 3,
+       .qos.urg_fwd_en = true,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+       .num_links = ARRAY_SIZE(link_slv_snoc_bimc_nrt),
+       .links = link_slv_snoc_bimc_nrt,
+};
+
+static struct qcom_icc_node qxm_venus_cpu = {
+       .name = "qxm_venus_cpu",
+       .id = SM6115_MASTER_VIDEO_PROC,
+       .channels = 1,
+       .buswidth = 8,
+       .qos.qos_port = 34,
+       .qos.qos_mode = NOC_QOS_MODE_FIXED,
+       .qos.areq_prio = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+       .num_links = ARRAY_SIZE(link_slv_snoc_bimc_nrt),
+       .links = link_slv_snoc_bimc_nrt,
+};
+
+static const u16 link_slv_snoc_bimc_rt[] = {
+       SM6115_SLAVE_SNOC_BIMC_RT,
+};
+
+static struct qcom_icc_node qnm_camera_rt = {
+       .name = "qnm_camera_rt",
+       .id = SM6115_MASTER_CAMNOC_HF,
+       .channels = 1,
+       .buswidth = 32,
+       .qos.qos_port = 31,
+       .qos.qos_mode = NOC_QOS_MODE_FIXED,
+       .qos.areq_prio = 3,
+       .qos.urg_fwd_en = true,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+       .num_links = ARRAY_SIZE(link_slv_snoc_bimc_rt),
+       .links = link_slv_snoc_bimc_rt,
+};
+
+static struct qcom_icc_node qxm_mdp0 = {
+       .name = "qxm_mdp0",
+       .id = SM6115_MASTER_MDP_PORT0,
+       .channels = 1,
+       .buswidth = 16,
+       .qos.qos_port = 26,
+       .qos.qos_mode = NOC_QOS_MODE_FIXED,
+       .qos.areq_prio = 3,
+       .qos.urg_fwd_en = true,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+       .num_links = ARRAY_SIZE(link_slv_snoc_bimc_rt),
+       .links = link_slv_snoc_bimc_rt,
+};
+
+static const u16 slv_service_snoc_links[] = {
+       SM6115_SLAVE_SERVICE_SNOC,
+};
+
+static struct qcom_icc_node qhm_snoc_cfg = {
+       .name = "qhm_snoc_cfg",
+       .id = SM6115_MASTER_SNOC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+       .num_links = ARRAY_SIZE(slv_service_snoc_links),
+       .links = slv_service_snoc_links,
+};
+
+static const u16 mas_tic_links[] = {
+       SM6115_SLAVE_APPSS,
+       SM6115_SLAVE_OCIMEM,
+       SM6115_SLAVE_PIMEM,
+       SM6115_SLAVE_QDSS_STM,
+       SM6115_SLAVE_TCU,
+       SM6115_SLAVE_SNOC_BIMC,
+       SM6115_SLAVE_SNOC_CNOC,
+};
+
+static struct qcom_icc_node qhm_tic = {
+       .name = "qhm_tic",
+       .id = SM6115_MASTER_TIC,
+       .channels = 1,
+       .buswidth = 4,
+       .qos.qos_port = 29,
+       .qos.qos_mode = NOC_QOS_MODE_FIXED,
+       .qos.areq_prio = 2,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+       .num_links = ARRAY_SIZE(mas_tic_links),
+       .links = mas_tic_links,
+};
+
+static struct qcom_icc_node mas_anoc_snoc = {
+       .name = "mas_anoc_snoc",
+       .id = SM6115_MASTER_ANOC_SNOC,
+       .channels = 1,
+       .buswidth = 16,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+       .num_links = ARRAY_SIZE(mas_tic_links),
+       .links = mas_tic_links,
+};
+
+static const u16 mas_bimc_snoc_links[] = {
+       SM6115_SLAVE_APPSS,
+       SM6115_SLAVE_SNOC_CNOC,
+       SM6115_SLAVE_OCIMEM,
+       SM6115_SLAVE_PIMEM,
+       SM6115_SLAVE_QDSS_STM,
+       SM6115_SLAVE_TCU,
+};
+
+static struct qcom_icc_node mas_bimc_snoc = {
+       .name = "mas_bimc_snoc",
+       .id = SM6115_MASTER_BIMC_SNOC,
+       .channels = 1,
+       .buswidth = 8,
+       .mas_rpm_id = 21,
+       .slv_rpm_id = -1,
+       .num_links = ARRAY_SIZE(mas_bimc_snoc_links),
+       .links = mas_bimc_snoc_links,
+};
+
+static const u16 mas_pimem_links[] = {
+       SM6115_SLAVE_OCIMEM,
+       SM6115_SLAVE_SNOC_BIMC,
+};
+
+static struct qcom_icc_node qxm_pimem = {
+       .name = "qxm_pimem",
+       .id = SM6115_MASTER_PIMEM,
+       .channels = 1,
+       .buswidth = 8,
+       .qos.qos_port = 41,
+       .qos.qos_mode = NOC_QOS_MODE_FIXED,
+       .qos.areq_prio = 2,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+       .num_links = ARRAY_SIZE(mas_pimem_links),
+       .links = mas_pimem_links,
+};
+
+static struct qcom_icc_node qhm_qdss_bam = {
+       .name = "qhm_qdss_bam",
+       .id = SM6115_MASTER_QDSS_BAM,
+       .channels = 1,
+       .buswidth = 4,
+       .qos.qos_port = 23,
+       .qos.qos_mode = NOC_QOS_MODE_FIXED,
+       .qos.areq_prio = 2,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+       .num_links = ARRAY_SIZE(link_slv_anoc_snoc),
+       .links = link_slv_anoc_snoc,
+};
+
+static struct qcom_icc_node qhm_qpic = {
+       .name = "qhm_qpic",
+       .id = SM6115_MASTER_QPIC,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+       .num_links = ARRAY_SIZE(link_slv_anoc_snoc),
+       .links = link_slv_anoc_snoc,
+};
+
+static struct qcom_icc_node qhm_qup0 = {
+       .name = "qhm_qup0",
+       .id = SM6115_MASTER_QUP_0,
+       .channels = 1,
+       .buswidth = 4,
+       .qos.qos_port = 21,
+       .qos.qos_mode = NOC_QOS_MODE_FIXED,
+       .qos.areq_prio = 2,
+       .mas_rpm_id = 166,
+       .slv_rpm_id = -1,
+       .num_links = ARRAY_SIZE(link_slv_anoc_snoc),
+       .links = link_slv_anoc_snoc,
+};
+
+static struct qcom_icc_node qxm_ipa = {
+       .name = "qxm_ipa",
+       .id = SM6115_MASTER_IPA,
+       .channels = 1,
+       .buswidth = 8,
+       .qos.qos_port = 24,
+       .qos.qos_mode = NOC_QOS_MODE_FIXED,
+       .qos.areq_prio = 2,
+       .mas_rpm_id = 59,
+       .slv_rpm_id = -1,
+       .num_links = ARRAY_SIZE(link_slv_anoc_snoc),
+       .links = link_slv_anoc_snoc,
+};
+
+static struct qcom_icc_node xm_qdss_etr = {
+       .name = "xm_qdss_etr",
+       .id = SM6115_MASTER_QDSS_ETR,
+       .channels = 1,
+       .buswidth = 8,
+       .qos.qos_port = 33,
+       .qos.qos_mode = NOC_QOS_MODE_FIXED,
+       .qos.areq_prio = 2,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+       .num_links = ARRAY_SIZE(link_slv_anoc_snoc),
+       .links = link_slv_anoc_snoc,
+};
+
+static struct qcom_icc_node xm_sdc1 = {
+       .name = "xm_sdc1",
+       .id = SM6115_MASTER_SDCC_1,
+       .channels = 1,
+       .buswidth = 8,
+       .qos.qos_port = 38,
+       .qos.qos_mode = NOC_QOS_MODE_FIXED,
+       .qos.areq_prio = 2,
+       .mas_rpm_id = 33,
+       .slv_rpm_id = -1,
+       .num_links = ARRAY_SIZE(link_slv_anoc_snoc),
+       .links = link_slv_anoc_snoc,
+};
+
+static struct qcom_icc_node xm_sdc2 = {
+       .name = "xm_sdc2",
+       .id = SM6115_MASTER_SDCC_2,
+       .channels = 1,
+       .buswidth = 8,
+       .qos.qos_port = 44,
+       .qos.qos_mode = NOC_QOS_MODE_FIXED,
+       .qos.areq_prio = 2,
+       .mas_rpm_id = 35,
+       .slv_rpm_id = -1,
+       .num_links = ARRAY_SIZE(link_slv_anoc_snoc),
+       .links = link_slv_anoc_snoc,
+};
+
+static struct qcom_icc_node xm_usb3_0 = {
+       .name = "xm_usb3_0",
+       .id = SM6115_MASTER_USB3,
+       .channels = 1,
+       .buswidth = 8,
+       .qos.qos_port = 45,
+       .qos.qos_mode = NOC_QOS_MODE_FIXED,
+       .qos.areq_prio = 2,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+       .num_links = ARRAY_SIZE(link_slv_anoc_snoc),
+       .links = link_slv_anoc_snoc,
+};
+
+static struct qcom_icc_node ebi = {
+       .name = "ebi",
+       .id = SM6115_SLAVE_EBI_CH0,
+       .channels = 2,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = 0,
+};
+
+static const u16 slv_bimc_snoc_links[] = {
+       SM6115_MASTER_BIMC_SNOC,
+};
+
+static struct qcom_icc_node slv_bimc_snoc = {
+       .name = "slv_bimc_snoc",
+       .id = SM6115_SLAVE_BIMC_SNOC,
+       .channels = 1,
+       .buswidth = 16,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = 2,
+       .num_links = ARRAY_SIZE(slv_bimc_snoc_links),
+       .links = slv_bimc_snoc_links,
+};
+
+static struct qcom_icc_node qup0_core_slave = {
+       .name = "qup0_core_slave",
+       .id = SM6115_SLAVE_QUP_CORE_0,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_ahb2phy_usb = {
+       .name = "qhs_ahb2phy_usb",
+       .id = SM6115_SLAVE_AHB2PHY_USB,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_apss_throttle_cfg = {
+       .name = "qhs_apss_throttle_cfg",
+       .id = SM6115_SLAVE_APSS_THROTTLE_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_bimc_cfg = {
+       .name = "qhs_bimc_cfg",
+       .id = SM6115_SLAVE_BIMC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_boot_rom = {
+       .name = "qhs_boot_rom",
+       .id = SM6115_SLAVE_BOOT_ROM,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_camera_nrt_throttle_cfg = {
+       .name = "qhs_camera_nrt_throttle_cfg",
+       .id = SM6115_SLAVE_CAMERA_NRT_THROTTLE_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_camera_rt_throttle_cfg = {
+       .name = "qhs_camera_rt_throttle_cfg",
+       .id = SM6115_SLAVE_CAMERA_RT_THROTTLE_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_camera_ss_cfg = {
+       .name = "qhs_camera_ss_cfg",
+       .id = SM6115_SLAVE_CAMERA_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_clk_ctl = {
+       .name = "qhs_clk_ctl",
+       .id = SM6115_SLAVE_CLK_CTL,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_cpr_cx = {
+       .name = "qhs_cpr_cx",
+       .id = SM6115_SLAVE_RBCPR_CX_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_cpr_mx = {
+       .name = "qhs_cpr_mx",
+       .id = SM6115_SLAVE_RBCPR_MX_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_crypto0_cfg = {
+       .name = "qhs_crypto0_cfg",
+       .id = SM6115_SLAVE_CRYPTO_0_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_dcc_cfg = {
+       .name = "qhs_dcc_cfg",
+       .id = SM6115_SLAVE_DCC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_ddr_phy_cfg = {
+       .name = "qhs_ddr_phy_cfg",
+       .id = SM6115_SLAVE_DDR_PHY_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_ddr_ss_cfg = {
+       .name = "qhs_ddr_ss_cfg",
+       .id = SM6115_SLAVE_DDR_SS_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_disp_ss_cfg = {
+       .name = "qhs_disp_ss_cfg",
+       .id = SM6115_SLAVE_DISPLAY_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_display_throttle_cfg = {
+       .name = "qhs_display_throttle_cfg",
+       .id = SM6115_SLAVE_DISPLAY_THROTTLE_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_gpu_cfg = {
+       .name = "qhs_gpu_cfg",
+       .id = SM6115_SLAVE_GPU_CFG,
+       .channels = 1,
+       .buswidth = 8,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_gpu_throttle_cfg = {
+       .name = "qhs_gpu_throttle_cfg",
+       .id = SM6115_SLAVE_GPU_THROTTLE_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_hwkm = {
+       .name = "qhs_hwkm",
+       .id = SM6115_SLAVE_HWKM_CORE,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_imem_cfg = {
+       .name = "qhs_imem_cfg",
+       .id = SM6115_SLAVE_IMEM_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_ipa_cfg = {
+       .name = "qhs_ipa_cfg",
+       .id = SM6115_SLAVE_IPA_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_lpass = {
+       .name = "qhs_lpass",
+       .id = SM6115_SLAVE_LPASS,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_mapss = {
+       .name = "qhs_mapss",
+       .id = SM6115_SLAVE_MAPSS,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_mdsp_mpu_cfg = {
+       .name = "qhs_mdsp_mpu_cfg",
+       .id = SM6115_SLAVE_MDSP_MPU_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_mesg_ram = {
+       .name = "qhs_mesg_ram",
+       .id = SM6115_SLAVE_MESSAGE_RAM,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_mss = {
+       .name = "qhs_mss",
+       .id = SM6115_SLAVE_CNOC_MSS,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_pdm = {
+       .name = "qhs_pdm",
+       .id = SM6115_SLAVE_PDM,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_pimem_cfg = {
+       .name = "qhs_pimem_cfg",
+       .id = SM6115_SLAVE_PIMEM_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_pka_wrapper = {
+       .name = "qhs_pka_wrapper",
+       .id = SM6115_SLAVE_PKA_CORE,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_pmic_arb = {
+       .name = "qhs_pmic_arb",
+       .id = SM6115_SLAVE_PMIC_ARB,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_qdss_cfg = {
+       .name = "qhs_qdss_cfg",
+       .id = SM6115_SLAVE_QDSS_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_qm_cfg = {
+       .name = "qhs_qm_cfg",
+       .id = SM6115_SLAVE_QM_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_qm_mpu_cfg = {
+       .name = "qhs_qm_mpu_cfg",
+       .id = SM6115_SLAVE_QM_MPU_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_qpic = {
+       .name = "qhs_qpic",
+       .id = SM6115_SLAVE_QPIC,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_qup0 = {
+       .name = "qhs_qup0",
+       .id = SM6115_SLAVE_QUP_0,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_rpm = {
+       .name = "qhs_rpm",
+       .id = SM6115_SLAVE_RPM,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_sdc1 = {
+       .name = "qhs_sdc1",
+       .id = SM6115_SLAVE_SDCC_1,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_sdc2 = {
+       .name = "qhs_sdc2",
+       .id = SM6115_SLAVE_SDCC_2,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_security = {
+       .name = "qhs_security",
+       .id = SM6115_SLAVE_SECURITY,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static const u16 slv_snoc_cfg_links[] = {
+       SM6115_MASTER_SNOC_CFG,
+};
+
+static struct qcom_icc_node qhs_snoc_cfg = {
+       .name = "qhs_snoc_cfg",
+       .id = SM6115_SLAVE_SNOC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+       .num_links = ARRAY_SIZE(slv_snoc_cfg_links),
+       .links = slv_snoc_cfg_links,
+};
+
+static struct qcom_icc_node qhs_tcsr = {
+       .name = "qhs_tcsr",
+       .id = SM6115_SLAVE_TCSR,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_tlmm = {
+       .name = "qhs_tlmm",
+       .id = SM6115_SLAVE_TLMM,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_usb3 = {
+       .name = "qhs_usb3",
+       .id = SM6115_SLAVE_USB3,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_venus_cfg = {
+       .name = "qhs_venus_cfg",
+       .id = SM6115_SLAVE_VENUS_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_venus_throttle_cfg = {
+       .name = "qhs_venus_throttle_cfg",
+       .id = SM6115_SLAVE_VENUS_THROTTLE_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
+       .name = "qhs_vsense_ctrl_cfg",
+       .id = SM6115_SLAVE_VSENSE_CTRL_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node srvc_cnoc = {
+       .name = "srvc_cnoc",
+       .id = SM6115_SLAVE_SERVICE_CNOC,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static const u16 slv_snoc_bimc_nrt_links[] = {
+       SM6115_MASTER_SNOC_BIMC_NRT,
+};
+
+static struct qcom_icc_node slv_snoc_bimc_nrt = {
+       .name = "slv_snoc_bimc_nrt",
+       .id = SM6115_SLAVE_SNOC_BIMC_NRT,
+       .channels = 1,
+       .buswidth = 16,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+       .num_links = ARRAY_SIZE(slv_snoc_bimc_nrt_links),
+       .links = slv_snoc_bimc_nrt_links,
+};
+
+static const u16 slv_snoc_bimc_rt_links[] = {
+       SM6115_MASTER_SNOC_BIMC_RT,
+};
+
+static struct qcom_icc_node slv_snoc_bimc_rt = {
+       .name = "slv_snoc_bimc_rt",
+       .id = SM6115_SLAVE_SNOC_BIMC_RT,
+       .channels = 1,
+       .buswidth = 16,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+       .num_links = ARRAY_SIZE(slv_snoc_bimc_rt_links),
+       .links = slv_snoc_bimc_rt_links,
+};
+
+static struct qcom_icc_node qhs_apss = {
+       .name = "qhs_apss",
+       .id = SM6115_SLAVE_APPSS,
+       .channels = 1,
+       .buswidth = 8,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static const u16 slv_snoc_cnoc_links[] = {
+       SM6115_MASTER_SNOC_CNOC
+};
+
+static struct qcom_icc_node slv_snoc_cnoc = {
+       .name = "slv_snoc_cnoc",
+       .id = SM6115_SLAVE_SNOC_CNOC,
+       .channels = 1,
+       .buswidth = 8,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = 25,
+       .num_links = ARRAY_SIZE(slv_snoc_cnoc_links),
+       .links = slv_snoc_cnoc_links,
+};
+
+static struct qcom_icc_node qxs_imem = {
+       .name = "qxs_imem",
+       .id = SM6115_SLAVE_OCIMEM,
+       .channels = 1,
+       .buswidth = 8,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = 26,
+};
+
+static struct qcom_icc_node qxs_pimem = {
+       .name = "qxs_pimem",
+       .id = SM6115_SLAVE_PIMEM,
+       .channels = 1,
+       .buswidth = 8,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static const u16 slv_snoc_bimc_links[] = {
+       SM6115_MASTER_SNOC_BIMC,
+};
+
+static struct qcom_icc_node slv_snoc_bimc = {
+       .name = "slv_snoc_bimc",
+       .id = SM6115_SLAVE_SNOC_BIMC,
+       .channels = 1,
+       .buswidth = 16,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = 24,
+       .num_links = ARRAY_SIZE(slv_snoc_bimc_links),
+       .links = slv_snoc_bimc_links,
+};
+
+static struct qcom_icc_node srvc_snoc = {
+       .name = "srvc_snoc",
+       .id = SM6115_SLAVE_SERVICE_SNOC,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node xs_qdss_stm = {
+       .name = "xs_qdss_stm",
+       .id = SM6115_SLAVE_QDSS_STM,
+       .channels = 1,
+       .buswidth = 4,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = 30,
+};
+
+static struct qcom_icc_node xs_sys_tcu_cfg = {
+       .name = "xs_sys_tcu_cfg",
+       .id = SM6115_SLAVE_TCU,
+       .channels = 1,
+       .buswidth = 8,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+};
+
+static const u16 slv_anoc_snoc_links[] = {
+       SM6115_MASTER_ANOC_SNOC,
+};
+
+static struct qcom_icc_node slv_anoc_snoc = {
+       .name = "slv_anoc_snoc",
+       .id = SM6115_SLAVE_ANOC_SNOC,
+       .channels = 1,
+       .buswidth = 16,
+       .mas_rpm_id = -1,
+       .slv_rpm_id = -1,
+       .num_links = ARRAY_SIZE(slv_anoc_snoc_links),
+       .links = slv_anoc_snoc_links,
+};
+
+static struct qcom_icc_node *bimc_nodes[] = {
+       [MASTER_AMPSS_M0] = &apps_proc,
+       [MASTER_SNOC_BIMC_RT] = &mas_snoc_bimc_rt,
+       [MASTER_SNOC_BIMC_NRT] = &mas_snoc_bimc_nrt,
+       [SNOC_BIMC_MAS] = &mas_snoc_bimc,
+       [MASTER_GRAPHICS_3D] = &qnm_gpu,
+       [MASTER_TCU_0] = &tcu_0,
+       [SLAVE_EBI_CH0] = &ebi,
+       [BIMC_SNOC_SLV] = &slv_bimc_snoc,
+};
+
+static const struct regmap_config bimc_regmap_config = {
+       .reg_bits       = 32,
+       .reg_stride     = 4,
+       .val_bits       = 32,
+       .max_register   = 0x80000,
+       .fast_io        = true,
+};
+
+static const struct qcom_icc_desc sm6115_bimc = {
+       .type = QCOM_ICC_BIMC,
+       .nodes = bimc_nodes,
+       .num_nodes = ARRAY_SIZE(bimc_nodes),
+       .regmap_cfg = &bimc_regmap_config,
+       .bus_clk_desc = &bimc_clk,
+       .keep_alive = true,
+       .qos_offset = 0x8000,
+       .ab_coeff = 153,
+};
+
+static struct qcom_icc_node *config_noc_nodes[] = {
+       [SNOC_CNOC_MAS] = &mas_snoc_cnoc,
+       [MASTER_QDSS_DAP] = &xm_dap,
+       [SLAVE_AHB2PHY_USB] = &qhs_ahb2phy_usb,
+       [SLAVE_APSS_THROTTLE_CFG] = &qhs_apss_throttle_cfg,
+       [SLAVE_BIMC_CFG] = &qhs_bimc_cfg,
+       [SLAVE_BOOT_ROM] = &qhs_boot_rom,
+       [SLAVE_CAMERA_NRT_THROTTLE_CFG] = &qhs_camera_nrt_throttle_cfg,
+       [SLAVE_CAMERA_RT_THROTTLE_CFG] = &qhs_camera_rt_throttle_cfg,
+       [SLAVE_CAMERA_CFG] = &qhs_camera_ss_cfg,
+       [SLAVE_CLK_CTL] = &qhs_clk_ctl,
+       [SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx,
+       [SLAVE_RBCPR_MX_CFG] = &qhs_cpr_mx,
+       [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
+       [SLAVE_DCC_CFG] = &qhs_dcc_cfg,
+       [SLAVE_DDR_PHY_CFG] = &qhs_ddr_phy_cfg,
+       [SLAVE_DDR_SS_CFG] = &qhs_ddr_ss_cfg,
+       [SLAVE_DISPLAY_CFG] = &qhs_disp_ss_cfg,
+       [SLAVE_DISPLAY_THROTTLE_CFG] = &qhs_display_throttle_cfg,
+       [SLAVE_GPU_CFG] = &qhs_gpu_cfg,
+       [SLAVE_GPU_THROTTLE_CFG] = &qhs_gpu_throttle_cfg,
+       [SLAVE_HWKM_CORE] = &qhs_hwkm,
+       [SLAVE_IMEM_CFG] = &qhs_imem_cfg,
+       [SLAVE_IPA_CFG] = &qhs_ipa_cfg,
+       [SLAVE_LPASS] = &qhs_lpass,
+       [SLAVE_MAPSS] = &qhs_mapss,
+       [SLAVE_MDSP_MPU_CFG] = &qhs_mdsp_mpu_cfg,
+       [SLAVE_MESSAGE_RAM] = &qhs_mesg_ram,
+       [SLAVE_CNOC_MSS] = &qhs_mss,
+       [SLAVE_PDM] = &qhs_pdm,
+       [SLAVE_PIMEM_CFG] = &qhs_pimem_cfg,
+       [SLAVE_PKA_CORE] = &qhs_pka_wrapper,
+       [SLAVE_PMIC_ARB] = &qhs_pmic_arb,
+       [SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
+       [SLAVE_QM_CFG] = &qhs_qm_cfg,
+       [SLAVE_QM_MPU_CFG] = &qhs_qm_mpu_cfg,
+       [SLAVE_QPIC] = &qhs_qpic,
+       [SLAVE_QUP_0] = &qhs_qup0,
+       [SLAVE_RPM] = &qhs_rpm,
+       [SLAVE_SDCC_1] = &qhs_sdc1,
+       [SLAVE_SDCC_2] = &qhs_sdc2,
+       [SLAVE_SECURITY] = &qhs_security,
+       [SLAVE_SNOC_CFG] = &qhs_snoc_cfg,
+       [SLAVE_TCSR] = &qhs_tcsr,
+       [SLAVE_TLMM] = &qhs_tlmm,
+       [SLAVE_USB3] = &qhs_usb3,
+       [SLAVE_VENUS_CFG] = &qhs_venus_cfg,
+       [SLAVE_VENUS_THROTTLE_CFG] = &qhs_venus_throttle_cfg,
+       [SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
+       [SLAVE_SERVICE_CNOC] = &srvc_cnoc,
+};
+
+static const struct regmap_config cnoc_regmap_config = {
+       .reg_bits       = 32,
+       .reg_stride     = 4,
+       .val_bits       = 32,
+       .max_register   = 0x6200,
+       .fast_io        = true,
+};
+
+static const struct qcom_icc_desc sm6115_config_noc = {
+       .type = QCOM_ICC_QNOC,
+       .nodes = config_noc_nodes,
+       .num_nodes = ARRAY_SIZE(config_noc_nodes),
+       .regmap_cfg = &cnoc_regmap_config,
+       .intf_clocks = cnoc_intf_clocks,
+       .num_intf_clocks = ARRAY_SIZE(cnoc_intf_clocks),
+       .bus_clk_desc = &bus_1_clk,
+       .keep_alive = true,
+};
+
+static struct qcom_icc_node *sys_noc_nodes[] = {
+       [MASTER_CRYPTO_CORE0] = &crypto_c0,
+       [MASTER_SNOC_CFG] = &qhm_snoc_cfg,
+       [MASTER_TIC] = &qhm_tic,
+       [MASTER_ANOC_SNOC] = &mas_anoc_snoc,
+       [BIMC_SNOC_MAS] = &mas_bimc_snoc,
+       [MASTER_PIMEM] = &qxm_pimem,
+       [MASTER_QDSS_BAM] = &qhm_qdss_bam,
+       [MASTER_QPIC] = &qhm_qpic,
+       [MASTER_QUP_0] = &qhm_qup0,
+       [MASTER_IPA] = &qxm_ipa,
+       [MASTER_QDSS_ETR] = &xm_qdss_etr,
+       [MASTER_SDCC_1] = &xm_sdc1,
+       [MASTER_SDCC_2] = &xm_sdc2,
+       [MASTER_USB3] = &xm_usb3_0,
+       [SLAVE_APPSS] = &qhs_apss,
+       [SNOC_CNOC_SLV] = &slv_snoc_cnoc,
+       [SLAVE_OCIMEM] = &qxs_imem,
+       [SLAVE_PIMEM] = &qxs_pimem,
+       [SNOC_BIMC_SLV] = &slv_snoc_bimc,
+       [SLAVE_SERVICE_SNOC] = &srvc_snoc,
+       [SLAVE_QDSS_STM] = &xs_qdss_stm,
+       [SLAVE_TCU] = &xs_sys_tcu_cfg,
+       [SLAVE_ANOC_SNOC] = &slv_anoc_snoc,
+};
+
+static const struct regmap_config sys_noc_regmap_config = {
+       .reg_bits       = 32,
+       .reg_stride     = 4,
+       .val_bits       = 32,
+       .max_register   = 0x5f080,
+       .fast_io        = true,
+};
+
+static const struct qcom_icc_desc sm6115_sys_noc = {
+       .type = QCOM_ICC_QNOC,
+       .nodes = sys_noc_nodes,
+       .num_nodes = ARRAY_SIZE(sys_noc_nodes),
+       .regmap_cfg = &sys_noc_regmap_config,
+       .intf_clocks = snoc_intf_clocks,
+       .num_intf_clocks = ARRAY_SIZE(snoc_intf_clocks),
+       .bus_clk_desc = &bus_2_clk,
+       .keep_alive = true,
+};
+
+static struct qcom_icc_node *clk_virt_nodes[] = {
+       [MASTER_QUP_CORE_0] = &qup0_core_master,
+       [SLAVE_QUP_CORE_0] = &qup0_core_slave,
+};
+
+static const struct qcom_icc_desc sm6115_clk_virt = {
+       .type = QCOM_ICC_QNOC,
+       .nodes = clk_virt_nodes,
+       .num_nodes = ARRAY_SIZE(clk_virt_nodes),
+       .regmap_cfg = &sys_noc_regmap_config,
+       .bus_clk_desc = &qup_clk,
+       .keep_alive = true,
+};
+
+static struct qcom_icc_node *mmnrt_virt_nodes[] = {
+       [MASTER_CAMNOC_SF] = &qnm_camera_nrt,
+       [MASTER_VIDEO_P0] = &qxm_venus0,
+       [MASTER_VIDEO_PROC] = &qxm_venus_cpu,
+       [SLAVE_SNOC_BIMC_NRT] = &slv_snoc_bimc_nrt,
+};
+
+static const struct qcom_icc_desc sm6115_mmnrt_virt = {
+       .type = QCOM_ICC_QNOC,
+       .nodes = mmnrt_virt_nodes,
+       .num_nodes = ARRAY_SIZE(mmnrt_virt_nodes),
+       .regmap_cfg = &sys_noc_regmap_config,
+       .bus_clk_desc = &mmaxi_0_clk,
+       .keep_alive = true,
+       .ab_coeff = 142,
+};
+
+static struct qcom_icc_node *mmrt_virt_nodes[] = {
+       [MASTER_CAMNOC_HF] = &qnm_camera_rt,
+       [MASTER_MDP_PORT0] = &qxm_mdp0,
+       [SLAVE_SNOC_BIMC_RT] = &slv_snoc_bimc_rt,
+};
+
+static const struct qcom_icc_desc sm6115_mmrt_virt = {
+       .type = QCOM_ICC_QNOC,
+       .nodes = mmrt_virt_nodes,
+       .num_nodes = ARRAY_SIZE(mmrt_virt_nodes),
+       .regmap_cfg = &sys_noc_regmap_config,
+       .bus_clk_desc = &mmaxi_1_clk,
+       .keep_alive = true,
+       .ab_coeff = 139,
+};
+
+static const struct of_device_id qnoc_of_match[] = {
+       { .compatible = "qcom,sm6115-bimc", .data = &sm6115_bimc },
+       { .compatible = "qcom,sm6115-clk-virt", .data = &sm6115_clk_virt },
+       { .compatible = "qcom,sm6115-cnoc", .data = &sm6115_config_noc },
+       { .compatible = "qcom,sm6115-mmrt-virt", .data = &sm6115_mmrt_virt },
+       { .compatible = "qcom,sm6115-mmnrt-virt", .data = &sm6115_mmnrt_virt },
+       { .compatible = "qcom,sm6115-snoc", .data = &sm6115_sys_noc },
+       { }
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+       .probe = qnoc_probe,
+       .remove_new = qnoc_remove,
+       .driver = {
+               .name = "qnoc-sm6115",
+               .of_match_table = qnoc_of_match,
+               .sync_state = icc_sync_state,
+       },
+};
+
+static int __init qnoc_driver_init(void)
+{
+       return platform_driver_register(&qnoc_driver);
+}
+core_initcall(qnoc_driver_init);
+
+static void __exit qnoc_driver_exit(void)
+{
+       platform_driver_unregister(&qnoc_driver);
+}
+module_exit(qnoc_driver_exit);
+
+MODULE_DESCRIPTION("SM6115 NoC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/interconnect/qcom/sm8650.c b/drivers/interconnect/qcom/sm8650.c
new file mode 100644 (file)
index 0000000..b83de54
--- /dev/null
@@ -0,0 +1,1674 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <dt-bindings/interconnect/qcom,sm8650-rpmh.h>
+
+#include "bcm-voter.h"
+#include "icc-common.h"
+#include "icc-rpmh.h"
+#include "sm8650.h"
+
+static struct qcom_icc_node qhm_qspi = {
+       .name = "qhm_qspi",
+       .id = SM8650_MASTER_QSPI_0,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SM8650_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup1 = {
+       .name = "qhm_qup1",
+       .id = SM8650_MASTER_QUP_1,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SM8650_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_qup02 = {
+       .name = "qxm_qup02",
+       .id = SM8650_MASTER_QUP_3,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SM8650_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_sdc4 = {
+       .name = "xm_sdc4",
+       .id = SM8650_MASTER_SDCC_4,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SM8650_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_ufs_mem = {
+       .name = "xm_ufs_mem",
+       .id = SM8650_MASTER_UFS_MEM,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { SM8650_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_usb3_0 = {
+       .name = "xm_usb3_0",
+       .id = SM8650_MASTER_USB3_0,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SM8650_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qdss_bam = {
+       .name = "qhm_qdss_bam",
+       .id = SM8650_MASTER_QDSS_BAM,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SM8650_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup2 = {
+       .name = "qhm_qup2",
+       .id = SM8650_MASTER_QUP_2,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SM8650_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_crypto = {
+       .name = "qxm_crypto",
+       .id = SM8650_MASTER_CRYPTO,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SM8650_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_ipa = {
+       .name = "qxm_ipa",
+       .id = SM8650_MASTER_IPA,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SM8650_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_sp = {
+       .name = "qxm_sp",
+       .id = SM8650_MASTER_SP,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SM8650_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_qdss_etr_0 = {
+       .name = "xm_qdss_etr_0",
+       .id = SM8650_MASTER_QDSS_ETR,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SM8650_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_qdss_etr_1 = {
+       .name = "xm_qdss_etr_1",
+       .id = SM8650_MASTER_QDSS_ETR_1,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SM8650_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_sdc2 = {
+       .name = "xm_sdc2",
+       .id = SM8650_MASTER_SDCC_2,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SM8650_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qup0_core_master = {
+       .name = "qup0_core_master",
+       .id = SM8650_MASTER_QUP_CORE_0,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SM8650_SLAVE_QUP_CORE_0 },
+};
+
+static struct qcom_icc_node qup1_core_master = {
+       .name = "qup1_core_master",
+       .id = SM8650_MASTER_QUP_CORE_1,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SM8650_SLAVE_QUP_CORE_1 },
+};
+
+static struct qcom_icc_node qup2_core_master = {
+       .name = "qup2_core_master",
+       .id = SM8650_MASTER_QUP_CORE_2,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SM8650_SLAVE_QUP_CORE_2 },
+};
+
+static struct qcom_icc_node qsm_cfg = {
+       .name = "qsm_cfg",
+       .id = SM8650_MASTER_CNOC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 46,
+       .links = { SM8650_SLAVE_AHB2PHY_SOUTH, SM8650_SLAVE_AHB2PHY_NORTH,
+                  SM8650_SLAVE_CAMERA_CFG, SM8650_SLAVE_CLK_CTL,
+                  SM8650_SLAVE_RBCPR_CX_CFG, SM8650_SLAVE_CPR_HMX,
+                  SM8650_SLAVE_RBCPR_MMCX_CFG, SM8650_SLAVE_RBCPR_MXA_CFG,
+                  SM8650_SLAVE_RBCPR_MXC_CFG, SM8650_SLAVE_CPR_NSPCX,
+                  SM8650_SLAVE_CRYPTO_0_CFG, SM8650_SLAVE_CX_RDPM,
+                  SM8650_SLAVE_DISPLAY_CFG, SM8650_SLAVE_GFX3D_CFG,
+                  SM8650_SLAVE_I2C, SM8650_SLAVE_I3C_IBI0_CFG,
+                  SM8650_SLAVE_I3C_IBI1_CFG, SM8650_SLAVE_IMEM_CFG,
+                  SM8650_SLAVE_CNOC_MSS, SM8650_SLAVE_MX_2_RDPM,
+                  SM8650_SLAVE_MX_RDPM, SM8650_SLAVE_PCIE_0_CFG,
+                  SM8650_SLAVE_PCIE_1_CFG, SM8650_SLAVE_PCIE_RSCC,
+                  SM8650_SLAVE_PDM, SM8650_SLAVE_PRNG,
+                  SM8650_SLAVE_QDSS_CFG, SM8650_SLAVE_QSPI_0,
+                  SM8650_SLAVE_QUP_3, SM8650_SLAVE_QUP_1,
+                  SM8650_SLAVE_QUP_2, SM8650_SLAVE_SDCC_2,
+                  SM8650_SLAVE_SDCC_4, SM8650_SLAVE_SPSS_CFG,
+                  SM8650_SLAVE_TCSR, SM8650_SLAVE_TLMM,
+                  SM8650_SLAVE_UFS_MEM_CFG, SM8650_SLAVE_USB3_0,
+                  SM8650_SLAVE_VENUS_CFG, SM8650_SLAVE_VSENSE_CTRL_CFG,
+                  SM8650_SLAVE_CNOC_MNOC_CFG, SM8650_SLAVE_NSP_QTB_CFG,
+                  SM8650_SLAVE_PCIE_ANOC_CFG, SM8650_SLAVE_SERVICE_CNOC_CFG,
+                  SM8650_SLAVE_QDSS_STM, SM8650_SLAVE_TCU },
+};
+
+static struct qcom_icc_node qnm_gemnoc_cnoc = {
+       .name = "qnm_gemnoc_cnoc",
+       .id = SM8650_MASTER_GEM_NOC_CNOC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 9,
+       .links = { SM8650_SLAVE_AOSS, SM8650_SLAVE_IPA_CFG,
+                  SM8650_SLAVE_IPC_ROUTER_CFG, SM8650_SLAVE_TME_CFG,
+                  SM8650_SLAVE_APPSS, SM8650_SLAVE_CNOC_CFG,
+                  SM8650_SLAVE_DDRSS_CFG, SM8650_SLAVE_IMEM,
+                  SM8650_SLAVE_SERVICE_CNOC },
+};
+
+static struct qcom_icc_node qnm_gemnoc_pcie = {
+       .name = "qnm_gemnoc_pcie",
+       .id = SM8650_MASTER_GEM_NOC_PCIE_SNOC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 2,
+       .links = { SM8650_SLAVE_PCIE_0, SM8650_SLAVE_PCIE_1 },
+};
+
+static struct qcom_icc_node alm_gpu_tcu = {
+       .name = "alm_gpu_tcu",
+       .id = SM8650_MASTER_GPU_TCU,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 2,
+       .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node alm_sys_tcu = {
+       .name = "alm_sys_tcu",
+       .id = SM8650_MASTER_SYS_TCU,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 2,
+       .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node alm_ubwc_p_tcu = {
+       .name = "alm_ubwc_p_tcu",
+       .id = SM8650_MASTER_UBWC_P_TCU,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 2,
+       .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node chm_apps = {
+       .name = "chm_apps",
+       .id = SM8650_MASTER_APPSS_PROC,
+       .channels = 3,
+       .buswidth = 32,
+       .num_links = 3,
+       .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC,
+                  SM8650_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qnm_gpu = {
+       .name = "qnm_gpu",
+       .id = SM8650_MASTER_GFX3D,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 2,
+       .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_lpass_gemnoc = {
+       .name = "qnm_lpass_gemnoc",
+       .id = SM8650_MASTER_LPASS_GEM_NOC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 3,
+       .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC,
+                  SM8650_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qnm_mdsp = {
+       .name = "qnm_mdsp",
+       .id = SM8650_MASTER_MSS_PROC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 3,
+       .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC,
+                  SM8650_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qnm_mnoc_hf = {
+       .name = "qnm_mnoc_hf",
+       .id = SM8650_MASTER_MNOC_HF_MEM_NOC,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 2,
+       .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_mnoc_sf = {
+       .name = "qnm_mnoc_sf",
+       .id = SM8650_MASTER_MNOC_SF_MEM_NOC,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 2,
+       .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_nsp_gemnoc = {
+       .name = "qnm_nsp_gemnoc",
+       .id = SM8650_MASTER_COMPUTE_NOC,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 3,
+       .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC,
+                  SM8650_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qnm_pcie = {
+       .name = "qnm_pcie",
+       .id = SM8650_MASTER_ANOC_PCIE_GEM_NOC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 2,
+       .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_snoc_sf = {
+       .name = "qnm_snoc_sf",
+       .id = SM8650_MASTER_SNOC_SF_MEM_NOC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 3,
+       .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC,
+                  SM8650_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qnm_ubwc_p = {
+       .name = "qnm_ubwc_p",
+       .id = SM8650_MASTER_UBWC_P,
+       .channels = 1,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SM8650_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node xm_gic = {
+       .name = "xm_gic",
+       .id = SM8650_MASTER_GIC,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SM8650_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_lpiaon_noc = {
+       .name = "qnm_lpiaon_noc",
+       .id = SM8650_MASTER_LPIAON_NOC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { SM8650_SLAVE_LPASS_GEM_NOC },
+};
+
+static struct qcom_icc_node qnm_lpass_lpinoc = {
+       .name = "qnm_lpass_lpinoc",
+       .id = SM8650_MASTER_LPASS_LPINOC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { SM8650_SLAVE_LPIAON_NOC_LPASS_AG_NOC },
+};
+
+static struct qcom_icc_node qxm_lpinoc_dsp_axim = {
+       .name = "qxm_lpinoc_dsp_axim",
+       .id = SM8650_MASTER_LPASS_PROC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { SM8650_SLAVE_LPICX_NOC_LPIAON_NOC },
+};
+
+static struct qcom_icc_node llcc_mc = {
+       .name = "llcc_mc",
+       .id = SM8650_MASTER_LLCC,
+       .channels = 4,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SM8650_SLAVE_EBI1 },
+};
+
+static struct qcom_icc_node qnm_camnoc_hf = {
+       .name = "qnm_camnoc_hf",
+       .id = SM8650_MASTER_CAMNOC_HF,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SM8650_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_camnoc_icp = {
+       .name = "qnm_camnoc_icp",
+       .id = SM8650_MASTER_CAMNOC_ICP,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SM8650_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_camnoc_sf = {
+       .name = "qnm_camnoc_sf",
+       .id = SM8650_MASTER_CAMNOC_SF,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SM8650_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_mdp = {
+       .name = "qnm_mdp",
+       .id = SM8650_MASTER_MDP,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SM8650_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_vapss_hcp = {
+       .name = "qnm_vapss_hcp",
+       .id = SM8650_MASTER_CDSP_HCP,
+       .channels = 1,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SM8650_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video = {
+       .name = "qnm_video",
+       .id = SM8650_MASTER_VIDEO,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SM8650_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video_cv_cpu = {
+       .name = "qnm_video_cv_cpu",
+       .id = SM8650_MASTER_VIDEO_CV_PROC,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SM8650_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video_cvp = {
+       .name = "qnm_video_cvp",
+       .id = SM8650_MASTER_VIDEO_PROC,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SM8650_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video_v_cpu = {
+       .name = "qnm_video_v_cpu",
+       .id = SM8650_MASTER_VIDEO_V_PROC,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SM8650_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qsm_mnoc_cfg = {
+       .name = "qsm_mnoc_cfg",
+       .id = SM8650_MASTER_CNOC_MNOC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SM8650_SLAVE_SERVICE_MNOC },
+};
+
+static struct qcom_icc_node qnm_nsp = {
+       .name = "qnm_nsp",
+       .id = SM8650_MASTER_CDSP_PROC,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SM8650_SLAVE_CDSP_MEM_NOC },
+};
+
+static struct qcom_icc_node qsm_pcie_anoc_cfg = {
+       .name = "qsm_pcie_anoc_cfg",
+       .id = SM8650_MASTER_PCIE_ANOC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SM8650_SLAVE_SERVICE_PCIE_ANOC },
+};
+
+static struct qcom_icc_node xm_pcie3_0 = {
+       .name = "xm_pcie3_0",
+       .id = SM8650_MASTER_PCIE_0,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SM8650_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node xm_pcie3_1 = {
+       .name = "xm_pcie3_1",
+       .id = SM8650_MASTER_PCIE_1,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { SM8650_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node qnm_aggre1_noc = {
+       .name = "qnm_aggre1_noc",
+       .id = SM8650_MASTER_A1NOC_SNOC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { SM8650_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_aggre2_noc = {
+       .name = "qnm_aggre2_noc",
+       .id = SM8650_MASTER_A2NOC_SNOC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { SM8650_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qns_a1noc_snoc = {
+       .name = "qns_a1noc_snoc",
+       .id = SM8650_SLAVE_A1NOC_SNOC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { SM8650_MASTER_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qns_a2noc_snoc = {
+       .name = "qns_a2noc_snoc",
+       .id = SM8650_SLAVE_A2NOC_SNOC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { SM8650_MASTER_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qup0_core_slave = {
+       .name = "qup0_core_slave",
+       .id = SM8650_SLAVE_QUP_CORE_0,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qup1_core_slave = {
+       .name = "qup1_core_slave",
+       .id = SM8650_SLAVE_QUP_CORE_1,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qup2_core_slave = {
+       .name = "qup2_core_slave",
+       .id = SM8650_SLAVE_QUP_CORE_2,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ahb2phy0 = {
+       .name = "qhs_ahb2phy0",
+       .id = SM8650_SLAVE_AHB2PHY_SOUTH,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ahb2phy1 = {
+       .name = "qhs_ahb2phy1",
+       .id = SM8650_SLAVE_AHB2PHY_NORTH,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_camera_cfg = {
+       .name = "qhs_camera_cfg",
+       .id = SM8650_SLAVE_CAMERA_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_clk_ctl = {
+       .name = "qhs_clk_ctl",
+       .id = SM8650_SLAVE_CLK_CTL,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_cpr_cx = {
+       .name = "qhs_cpr_cx",
+       .id = SM8650_SLAVE_RBCPR_CX_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_cpr_hmx = {
+       .name = "qhs_cpr_hmx",
+       .id = SM8650_SLAVE_CPR_HMX,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_cpr_mmcx = {
+       .name = "qhs_cpr_mmcx",
+       .id = SM8650_SLAVE_RBCPR_MMCX_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_cpr_mxa = {
+       .name = "qhs_cpr_mxa",
+       .id = SM8650_SLAVE_RBCPR_MXA_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_cpr_mxc = {
+       .name = "qhs_cpr_mxc",
+       .id = SM8650_SLAVE_RBCPR_MXC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_cpr_nspcx = {
+       .name = "qhs_cpr_nspcx",
+       .id = SM8650_SLAVE_CPR_NSPCX,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_crypto0_cfg = {
+       .name = "qhs_crypto0_cfg",
+       .id = SM8650_SLAVE_CRYPTO_0_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_cx_rdpm = {
+       .name = "qhs_cx_rdpm",
+       .id = SM8650_SLAVE_CX_RDPM,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_display_cfg = {
+       .name = "qhs_display_cfg",
+       .id = SM8650_SLAVE_DISPLAY_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_gpuss_cfg = {
+       .name = "qhs_gpuss_cfg",
+       .id = SM8650_SLAVE_GFX3D_CFG,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_i2c = {
+       .name = "qhs_i2c",
+       .id = SM8650_SLAVE_I2C,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_i3c_ibi0_cfg = {
+       .name = "qhs_i3c_ibi0_cfg",
+       .id = SM8650_SLAVE_I3C_IBI0_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_i3c_ibi1_cfg = {
+       .name = "qhs_i3c_ibi1_cfg",
+       .id = SM8650_SLAVE_I3C_IBI1_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_imem_cfg = {
+       .name = "qhs_imem_cfg",
+       .id = SM8650_SLAVE_IMEM_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_mss_cfg = {
+       .name = "qhs_mss_cfg",
+       .id = SM8650_SLAVE_CNOC_MSS,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_mx_2_rdpm = {
+       .name = "qhs_mx_2_rdpm",
+       .id = SM8650_SLAVE_MX_2_RDPM,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_mx_rdpm = {
+       .name = "qhs_mx_rdpm",
+       .id = SM8650_SLAVE_MX_RDPM,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pcie0_cfg = {
+       .name = "qhs_pcie0_cfg",
+       .id = SM8650_SLAVE_PCIE_0_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pcie1_cfg = {
+       .name = "qhs_pcie1_cfg",
+       .id = SM8650_SLAVE_PCIE_1_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pcie_rscc = {
+       .name = "qhs_pcie_rscc",
+       .id = SM8650_SLAVE_PCIE_RSCC,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pdm = {
+       .name = "qhs_pdm",
+       .id = SM8650_SLAVE_PDM,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_prng = {
+       .name = "qhs_prng",
+       .id = SM8650_SLAVE_PRNG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qdss_cfg = {
+       .name = "qhs_qdss_cfg",
+       .id = SM8650_SLAVE_QDSS_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qspi = {
+       .name = "qhs_qspi",
+       .id = SM8650_SLAVE_QSPI_0,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qup02 = {
+       .name = "qhs_qup02",
+       .id = SM8650_SLAVE_QUP_3,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qup1 = {
+       .name = "qhs_qup1",
+       .id = SM8650_SLAVE_QUP_1,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qup2 = {
+       .name = "qhs_qup2",
+       .id = SM8650_SLAVE_QUP_2,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_sdc2 = {
+       .name = "qhs_sdc2",
+       .id = SM8650_SLAVE_SDCC_2,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_sdc4 = {
+       .name = "qhs_sdc4",
+       .id = SM8650_SLAVE_SDCC_4,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_spss_cfg = {
+       .name = "qhs_spss_cfg",
+       .id = SM8650_SLAVE_SPSS_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_tcsr = {
+       .name = "qhs_tcsr",
+       .id = SM8650_SLAVE_TCSR,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_tlmm = {
+       .name = "qhs_tlmm",
+       .id = SM8650_SLAVE_TLMM,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ufs_mem_cfg = {
+       .name = "qhs_ufs_mem_cfg",
+       .id = SM8650_SLAVE_UFS_MEM_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_usb3_0 = {
+       .name = "qhs_usb3_0",
+       .id = SM8650_SLAVE_USB3_0,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_venus_cfg = {
+       .name = "qhs_venus_cfg",
+       .id = SM8650_SLAVE_VENUS_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
+       .name = "qhs_vsense_ctrl_cfg",
+       .id = SM8650_SLAVE_VSENSE_CTRL_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qss_mnoc_cfg = {
+       .name = "qss_mnoc_cfg",
+       .id = SM8650_SLAVE_CNOC_MNOC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SM8650_MASTER_CNOC_MNOC_CFG },
+};
+
+static struct qcom_icc_node qss_nsp_qtb_cfg = {
+       .name = "qss_nsp_qtb_cfg",
+       .id = SM8650_SLAVE_NSP_QTB_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qss_pcie_anoc_cfg = {
+       .name = "qss_pcie_anoc_cfg",
+       .id = SM8650_SLAVE_PCIE_ANOC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SM8650_MASTER_PCIE_ANOC_CFG },
+};
+
+static struct qcom_icc_node srvc_cnoc_cfg = {
+       .name = "srvc_cnoc_cfg",
+       .id = SM8650_SLAVE_SERVICE_CNOC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node xs_qdss_stm = {
+       .name = "xs_qdss_stm",
+       .id = SM8650_SLAVE_QDSS_STM,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node xs_sys_tcu_cfg = {
+       .name = "xs_sys_tcu_cfg",
+       .id = SM8650_SLAVE_TCU,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_aoss = {
+       .name = "qhs_aoss",
+       .id = SM8650_SLAVE_AOSS,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ipa = {
+       .name = "qhs_ipa",
+       .id = SM8650_SLAVE_IPA_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ipc_router = {
+       .name = "qhs_ipc_router",
+       .id = SM8650_SLAVE_IPC_ROUTER_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_tme_cfg = {
+       .name = "qhs_tme_cfg",
+       .id = SM8650_SLAVE_TME_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qss_apss = {
+       .name = "qss_apss",
+       .id = SM8650_SLAVE_APPSS,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qss_cfg = {
+       .name = "qss_cfg",
+       .id = SM8650_SLAVE_CNOC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { SM8650_MASTER_CNOC_CFG },
+};
+
+static struct qcom_icc_node qss_ddrss_cfg = {
+       .name = "qss_ddrss_cfg",
+       .id = SM8650_SLAVE_DDRSS_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qxs_imem = {
+       .name = "qxs_imem",
+       .id = SM8650_SLAVE_IMEM,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node srvc_cnoc_main = {
+       .name = "srvc_cnoc_main",
+       .id = SM8650_SLAVE_SERVICE_CNOC,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node xs_pcie_0 = {
+       .name = "xs_pcie_0",
+       .id = SM8650_SLAVE_PCIE_0,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node xs_pcie_1 = {
+       .name = "xs_pcie_1",
+       .id = SM8650_SLAVE_PCIE_1,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qns_gem_noc_cnoc = {
+       .name = "qns_gem_noc_cnoc",
+       .id = SM8650_SLAVE_GEM_NOC_CNOC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { SM8650_MASTER_GEM_NOC_CNOC },
+};
+
+static struct qcom_icc_node qns_llcc = {
+       .name = "qns_llcc",
+       .id = SM8650_SLAVE_LLCC,
+       .channels = 4,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { SM8650_MASTER_LLCC },
+};
+
+static struct qcom_icc_node qns_pcie = {
+       .name = "qns_pcie",
+       .id = SM8650_SLAVE_MEM_NOC_PCIE_SNOC,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { SM8650_MASTER_GEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qns_lpass_ag_noc_gemnoc = {
+       .name = "qns_lpass_ag_noc_gemnoc",
+       .id = SM8650_SLAVE_LPASS_GEM_NOC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { SM8650_MASTER_LPASS_GEM_NOC },
+};
+
+static struct qcom_icc_node qns_lpass_aggnoc = {
+       .name = "qns_lpass_aggnoc",
+       .id = SM8650_SLAVE_LPIAON_NOC_LPASS_AG_NOC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { SM8650_MASTER_LPIAON_NOC },
+};
+
+static struct qcom_icc_node qns_lpi_aon_noc = {
+       .name = "qns_lpi_aon_noc",
+       .id = SM8650_SLAVE_LPICX_NOC_LPIAON_NOC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { SM8650_MASTER_LPASS_LPINOC },
+};
+
+static struct qcom_icc_node ebi = {
+       .name = "ebi",
+       .id = SM8650_SLAVE_EBI1,
+       .channels = 4,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qns_mem_noc_hf = {
+       .name = "qns_mem_noc_hf",
+       .id = SM8650_SLAVE_MNOC_HF_MEM_NOC,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SM8650_MASTER_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_mem_noc_sf = {
+       .name = "qns_mem_noc_sf",
+       .id = SM8650_SLAVE_MNOC_SF_MEM_NOC,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SM8650_MASTER_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node srvc_mnoc = {
+       .name = "srvc_mnoc",
+       .id = SM8650_SLAVE_SERVICE_MNOC,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qns_nsp_gemnoc = {
+       .name = "qns_nsp_gemnoc",
+       .id = SM8650_SLAVE_CDSP_MEM_NOC,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { SM8650_MASTER_COMPUTE_NOC },
+};
+
+static struct qcom_icc_node qns_pcie_mem_noc = {
+       .name = "qns_pcie_mem_noc",
+       .id = SM8650_SLAVE_ANOC_PCIE_GEM_NOC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { SM8650_MASTER_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node srvc_pcie_aggre_noc = {
+       .name = "srvc_pcie_aggre_noc",
+       .id = SM8650_SLAVE_SERVICE_PCIE_ANOC,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qns_gemnoc_sf = {
+       .name = "qns_gemnoc_sf",
+       .id = SM8650_SLAVE_SNOC_GEM_NOC_SF,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { SM8650_MASTER_SNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_bcm bcm_acv = {
+       .name = "ACV",
+       .enable_mask = BIT(3),
+       .num_nodes = 1,
+       .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_ce0 = {
+       .name = "CE0",
+       .num_nodes = 1,
+       .nodes = { &qxm_crypto },
+};
+
+static struct qcom_icc_bcm bcm_cn0 = {
+       .name = "CN0",
+       .enable_mask = BIT(0),
+       .keepalive = true,
+       .num_nodes = 59,
+       .nodes = { &qsm_cfg, &qhs_ahb2phy0,
+                  &qhs_ahb2phy1, &qhs_camera_cfg,
+                  &qhs_clk_ctl, &qhs_cpr_cx,
+                  &qhs_cpr_hmx, &qhs_cpr_mmcx,
+                  &qhs_cpr_mxa, &qhs_cpr_mxc,
+                  &qhs_cpr_nspcx, &qhs_crypto0_cfg,
+                  &qhs_cx_rdpm, &qhs_display_cfg,
+                  &qhs_gpuss_cfg, &qhs_i2c,
+                  &qhs_i3c_ibi0_cfg, &qhs_i3c_ibi1_cfg,
+                  &qhs_imem_cfg, &qhs_mss_cfg,
+                  &qhs_mx_2_rdpm, &qhs_mx_rdpm,
+                  &qhs_pcie0_cfg, &qhs_pcie1_cfg,
+                  &qhs_pcie_rscc, &qhs_pdm,
+                  &qhs_prng, &qhs_qdss_cfg,
+                  &qhs_qspi, &qhs_qup02,
+                  &qhs_qup1, &qhs_qup2,
+                  &qhs_sdc2, &qhs_sdc4,
+                  &qhs_spss_cfg, &qhs_tcsr,
+                  &qhs_tlmm, &qhs_ufs_mem_cfg,
+                  &qhs_usb3_0, &qhs_venus_cfg,
+                  &qhs_vsense_ctrl_cfg, &qss_mnoc_cfg,
+                  &qss_nsp_qtb_cfg, &qss_pcie_anoc_cfg,
+                  &srvc_cnoc_cfg, &xs_qdss_stm,
+                  &xs_sys_tcu_cfg, &qnm_gemnoc_cnoc,
+                  &qnm_gemnoc_pcie, &qhs_aoss,
+                  &qhs_ipa, &qhs_ipc_router,
+                  &qhs_tme_cfg, &qss_apss,
+                  &qss_cfg, &qss_ddrss_cfg,
+                  &qxs_imem, &srvc_cnoc_main,
+                  &xs_pcie_0, &xs_pcie_1 },
+};
+
+static struct qcom_icc_bcm bcm_co0 = {
+       .name = "CO0",
+       .enable_mask = BIT(0),
+       .num_nodes = 2,
+       .nodes = { &qnm_nsp, &qns_nsp_gemnoc },
+};
+
+static struct qcom_icc_bcm bcm_lp0 = {
+       .name = "LP0",
+       .num_nodes = 2,
+       .nodes = { &qnm_lpass_lpinoc, &qns_lpass_aggnoc },
+};
+
+static struct qcom_icc_bcm bcm_mc0 = {
+       .name = "MC0",
+       .keepalive = true,
+       .num_nodes = 1,
+       .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_mm0 = {
+       .name = "MM0",
+       .num_nodes = 1,
+       .nodes = { &qns_mem_noc_hf },
+};
+
+static struct qcom_icc_bcm bcm_mm1 = {
+       .name = "MM1",
+       .enable_mask = BIT(0),
+       .num_nodes = 8,
+       .nodes = { &qnm_camnoc_hf, &qnm_camnoc_icp,
+                  &qnm_camnoc_sf, &qnm_vapss_hcp,
+                  &qnm_video_cv_cpu, &qnm_video_cvp,
+                  &qnm_video_v_cpu, &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_bcm bcm_qup0 = {
+       .name = "QUP0",
+       .keepalive = true,
+       .vote_scale = 1,
+       .num_nodes = 1,
+       .nodes = { &qup0_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_qup1 = {
+       .name = "QUP1",
+       .keepalive = true,
+       .vote_scale = 1,
+       .num_nodes = 1,
+       .nodes = { &qup1_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_qup2 = {
+       .name = "QUP2",
+       .keepalive = true,
+       .vote_scale = 1,
+       .num_nodes = 1,
+       .nodes = { &qup2_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_sh0 = {
+       .name = "SH0",
+       .keepalive = true,
+       .num_nodes = 1,
+       .nodes = { &qns_llcc },
+};
+
+static struct qcom_icc_bcm bcm_sh1 = {
+       .name = "SH1",
+       .enable_mask = BIT(0),
+       .num_nodes = 15,
+       .nodes = { &alm_gpu_tcu, &alm_sys_tcu,
+                  &alm_ubwc_p_tcu, &chm_apps,
+                  &qnm_gpu, &qnm_mdsp,
+                  &qnm_mnoc_hf, &qnm_mnoc_sf,
+                  &qnm_nsp_gemnoc, &qnm_pcie,
+                  &qnm_snoc_sf, &qnm_ubwc_p,
+                  &xm_gic, &qns_gem_noc_cnoc,
+                  &qns_pcie },
+};
+
+static struct qcom_icc_bcm bcm_sn0 = {
+       .name = "SN0",
+       .keepalive = true,
+       .num_nodes = 1,
+       .nodes = { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_bcm bcm_sn2 = {
+       .name = "SN2",
+       .num_nodes = 1,
+       .nodes = { &qnm_aggre1_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn3 = {
+       .name = "SN3",
+       .num_nodes = 1,
+       .nodes = { &qnm_aggre2_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn4 = {
+       .name = "SN4",
+       .num_nodes = 1,
+       .nodes = { &qns_pcie_mem_noc },
+};
+
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
+       [MASTER_QSPI_0] = &qhm_qspi,
+       [MASTER_QUP_1] = &qhm_qup1,
+       [MASTER_QUP_3] = &qxm_qup02,
+       [MASTER_SDCC_4] = &xm_sdc4,
+       [MASTER_UFS_MEM] = &xm_ufs_mem,
+       [MASTER_USB3_0] = &xm_usb3_0,
+       [SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
+};
+
+static const struct qcom_icc_desc sm8650_aggre1_noc = {
+       .nodes = aggre1_noc_nodes,
+       .num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
+};
+
+static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
+       &bcm_ce0,
+};
+
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
+       [MASTER_QDSS_BAM] = &qhm_qdss_bam,
+       [MASTER_QUP_2] = &qhm_qup2,
+       [MASTER_CRYPTO] = &qxm_crypto,
+       [MASTER_IPA] = &qxm_ipa,
+       [MASTER_SP] = &qxm_sp,
+       [MASTER_QDSS_ETR] = &xm_qdss_etr_0,
+       [MASTER_QDSS_ETR_1] = &xm_qdss_etr_1,
+       [MASTER_SDCC_2] = &xm_sdc2,
+       [SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
+};
+
+static const struct qcom_icc_desc sm8650_aggre2_noc = {
+       .nodes = aggre2_noc_nodes,
+       .num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
+       .bcms = aggre2_noc_bcms,
+       .num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const clk_virt_bcms[] = {
+       &bcm_qup0,
+       &bcm_qup1,
+       &bcm_qup2,
+};
+
+static struct qcom_icc_node * const clk_virt_nodes[] = {
+       [MASTER_QUP_CORE_0] = &qup0_core_master,
+       [MASTER_QUP_CORE_1] = &qup1_core_master,
+       [MASTER_QUP_CORE_2] = &qup2_core_master,
+       [SLAVE_QUP_CORE_0] = &qup0_core_slave,
+       [SLAVE_QUP_CORE_1] = &qup1_core_slave,
+       [SLAVE_QUP_CORE_2] = &qup2_core_slave,
+};
+
+static const struct qcom_icc_desc sm8650_clk_virt = {
+       .nodes = clk_virt_nodes,
+       .num_nodes = ARRAY_SIZE(clk_virt_nodes),
+       .bcms = clk_virt_bcms,
+       .num_bcms = ARRAY_SIZE(clk_virt_bcms),
+};
+
+static struct qcom_icc_bcm * const config_noc_bcms[] = {
+       &bcm_cn0,
+};
+
+static struct qcom_icc_node * const config_noc_nodes[] = {
+       [MASTER_CNOC_CFG] = &qsm_cfg,
+       [SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy0,
+       [SLAVE_AHB2PHY_NORTH] = &qhs_ahb2phy1,
+       [SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
+       [SLAVE_CLK_CTL] = &qhs_clk_ctl,
+       [SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx,
+       [SLAVE_CPR_HMX] = &qhs_cpr_hmx,
+       [SLAVE_RBCPR_MMCX_CFG] = &qhs_cpr_mmcx,
+       [SLAVE_RBCPR_MXA_CFG] = &qhs_cpr_mxa,
+       [SLAVE_RBCPR_MXC_CFG] = &qhs_cpr_mxc,
+       [SLAVE_CPR_NSPCX] = &qhs_cpr_nspcx,
+       [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
+       [SLAVE_CX_RDPM] = &qhs_cx_rdpm,
+       [SLAVE_DISPLAY_CFG] = &qhs_display_cfg,
+       [SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg,
+       [SLAVE_I2C] = &qhs_i2c,
+       [SLAVE_I3C_IBI0_CFG] = &qhs_i3c_ibi0_cfg,
+       [SLAVE_I3C_IBI1_CFG] = &qhs_i3c_ibi1_cfg,
+       [SLAVE_IMEM_CFG] = &qhs_imem_cfg,
+       [SLAVE_CNOC_MSS] = &qhs_mss_cfg,
+       [SLAVE_MX_2_RDPM] = &qhs_mx_2_rdpm,
+       [SLAVE_MX_RDPM] = &qhs_mx_rdpm,
+       [SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg,
+       [SLAVE_PCIE_1_CFG] = &qhs_pcie1_cfg,
+       [SLAVE_PCIE_RSCC] = &qhs_pcie_rscc,
+       [SLAVE_PDM] = &qhs_pdm,
+       [SLAVE_PRNG] = &qhs_prng,
+       [SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
+       [SLAVE_QSPI_0] = &qhs_qspi,
+       [SLAVE_QUP_3] = &qhs_qup02,
+       [SLAVE_QUP_1] = &qhs_qup1,
+       [SLAVE_QUP_2] = &qhs_qup2,
+       [SLAVE_SDCC_2] = &qhs_sdc2,
+       [SLAVE_SDCC_4] = &qhs_sdc4,
+       [SLAVE_SPSS_CFG] = &qhs_spss_cfg,
+       [SLAVE_TCSR] = &qhs_tcsr,
+       [SLAVE_TLMM] = &qhs_tlmm,
+       [SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
+       [SLAVE_USB3_0] = &qhs_usb3_0,
+       [SLAVE_VENUS_CFG] = &qhs_venus_cfg,
+       [SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
+       [SLAVE_CNOC_MNOC_CFG] = &qss_mnoc_cfg,
+       [SLAVE_NSP_QTB_CFG] = &qss_nsp_qtb_cfg,
+       [SLAVE_PCIE_ANOC_CFG] = &qss_pcie_anoc_cfg,
+       [SLAVE_SERVICE_CNOC_CFG] = &srvc_cnoc_cfg,
+       [SLAVE_QDSS_STM] = &xs_qdss_stm,
+       [SLAVE_TCU] = &xs_sys_tcu_cfg,
+};
+
+static const struct qcom_icc_desc sm8650_config_noc = {
+       .nodes = config_noc_nodes,
+       .num_nodes = ARRAY_SIZE(config_noc_nodes),
+       .bcms = config_noc_bcms,
+       .num_bcms = ARRAY_SIZE(config_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const cnoc_main_bcms[] = {
+       &bcm_cn0,
+};
+
+static struct qcom_icc_node * const cnoc_main_nodes[] = {
+       [MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc,
+       [MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie,
+       [SLAVE_AOSS] = &qhs_aoss,
+       [SLAVE_IPA_CFG] = &qhs_ipa,
+       [SLAVE_IPC_ROUTER_CFG] = &qhs_ipc_router,
+       [SLAVE_TME_CFG] = &qhs_tme_cfg,
+       [SLAVE_APPSS] = &qss_apss,
+       [SLAVE_CNOC_CFG] = &qss_cfg,
+       [SLAVE_DDRSS_CFG] = &qss_ddrss_cfg,
+       [SLAVE_IMEM] = &qxs_imem,
+       [SLAVE_SERVICE_CNOC] = &srvc_cnoc_main,
+       [SLAVE_PCIE_0] = &xs_pcie_0,
+       [SLAVE_PCIE_1] = &xs_pcie_1,
+};
+
+static const struct qcom_icc_desc sm8650_cnoc_main = {
+       .nodes = cnoc_main_nodes,
+       .num_nodes = ARRAY_SIZE(cnoc_main_nodes),
+       .bcms = cnoc_main_bcms,
+       .num_bcms = ARRAY_SIZE(cnoc_main_bcms),
+};
+
+static struct qcom_icc_bcm * const gem_noc_bcms[] = {
+       &bcm_sh0,
+       &bcm_sh1,
+};
+
+static struct qcom_icc_node * const gem_noc_nodes[] = {
+       [MASTER_GPU_TCU] = &alm_gpu_tcu,
+       [MASTER_SYS_TCU] = &alm_sys_tcu,
+       [MASTER_UBWC_P_TCU] = &alm_ubwc_p_tcu,
+       [MASTER_APPSS_PROC] = &chm_apps,
+       [MASTER_GFX3D] = &qnm_gpu,
+       [MASTER_LPASS_GEM_NOC] = &qnm_lpass_gemnoc,
+       [MASTER_MSS_PROC] = &qnm_mdsp,
+       [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
+       [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
+       [MASTER_COMPUTE_NOC] = &qnm_nsp_gemnoc,
+       [MASTER_ANOC_PCIE_GEM_NOC] = &qnm_pcie,
+       [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
+       [MASTER_UBWC_P] = &qnm_ubwc_p,
+       [MASTER_GIC] = &xm_gic,
+       [SLAVE_GEM_NOC_CNOC] = &qns_gem_noc_cnoc,
+       [SLAVE_LLCC] = &qns_llcc,
+       [SLAVE_MEM_NOC_PCIE_SNOC] = &qns_pcie,
+};
+
+static const struct qcom_icc_desc sm8650_gem_noc = {
+       .nodes = gem_noc_nodes,
+       .num_nodes = ARRAY_SIZE(gem_noc_nodes),
+       .bcms = gem_noc_bcms,
+       .num_bcms = ARRAY_SIZE(gem_noc_bcms),
+};
+
+static struct qcom_icc_node * const lpass_ag_noc_nodes[] = {
+       [MASTER_LPIAON_NOC] = &qnm_lpiaon_noc,
+       [SLAVE_LPASS_GEM_NOC] = &qns_lpass_ag_noc_gemnoc,
+};
+
+static const struct qcom_icc_desc sm8650_lpass_ag_noc = {
+       .nodes = lpass_ag_noc_nodes,
+       .num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes),
+};
+
+static struct qcom_icc_bcm * const lpass_lpiaon_noc_bcms[] = {
+       &bcm_lp0,
+};
+
+static struct qcom_icc_node * const lpass_lpiaon_noc_nodes[] = {
+       [MASTER_LPASS_LPINOC] = &qnm_lpass_lpinoc,
+       [SLAVE_LPIAON_NOC_LPASS_AG_NOC] = &qns_lpass_aggnoc,
+};
+
+static const struct qcom_icc_desc sm8650_lpass_lpiaon_noc = {
+       .nodes = lpass_lpiaon_noc_nodes,
+       .num_nodes = ARRAY_SIZE(lpass_lpiaon_noc_nodes),
+       .bcms = lpass_lpiaon_noc_bcms,
+       .num_bcms = ARRAY_SIZE(lpass_lpiaon_noc_bcms),
+};
+
+static struct qcom_icc_node * const lpass_lpicx_noc_nodes[] = {
+       [MASTER_LPASS_PROC] = &qxm_lpinoc_dsp_axim,
+       [SLAVE_LPICX_NOC_LPIAON_NOC] = &qns_lpi_aon_noc,
+};
+
+static const struct qcom_icc_desc sm8650_lpass_lpicx_noc = {
+       .nodes = lpass_lpicx_noc_nodes,
+       .num_nodes = ARRAY_SIZE(lpass_lpicx_noc_nodes),
+};
+
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
+       &bcm_acv,
+       &bcm_mc0,
+};
+
+static struct qcom_icc_node * const mc_virt_nodes[] = {
+       [MASTER_LLCC] = &llcc_mc,
+       [SLAVE_EBI1] = &ebi,
+};
+
+static const struct qcom_icc_desc sm8650_mc_virt = {
+       .nodes = mc_virt_nodes,
+       .num_nodes = ARRAY_SIZE(mc_virt_nodes),
+       .bcms = mc_virt_bcms,
+       .num_bcms = ARRAY_SIZE(mc_virt_bcms),
+};
+
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
+       &bcm_mm0,
+       &bcm_mm1,
+};
+
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
+       [MASTER_CAMNOC_HF] = &qnm_camnoc_hf,
+       [MASTER_CAMNOC_ICP] = &qnm_camnoc_icp,
+       [MASTER_CAMNOC_SF] = &qnm_camnoc_sf,
+       [MASTER_MDP] = &qnm_mdp,
+       [MASTER_CDSP_HCP] = &qnm_vapss_hcp,
+       [MASTER_VIDEO] = &qnm_video,
+       [MASTER_VIDEO_CV_PROC] = &qnm_video_cv_cpu,
+       [MASTER_VIDEO_PROC] = &qnm_video_cvp,
+       [MASTER_VIDEO_V_PROC] = &qnm_video_v_cpu,
+       [MASTER_CNOC_MNOC_CFG] = &qsm_mnoc_cfg,
+       [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
+       [SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf,
+       [SLAVE_SERVICE_MNOC] = &srvc_mnoc,
+};
+
+static const struct qcom_icc_desc sm8650_mmss_noc = {
+       .nodes = mmss_noc_nodes,
+       .num_nodes = ARRAY_SIZE(mmss_noc_nodes),
+       .bcms = mmss_noc_bcms,
+       .num_bcms = ARRAY_SIZE(mmss_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const nsp_noc_bcms[] = {
+       &bcm_co0,
+};
+
+static struct qcom_icc_node * const nsp_noc_nodes[] = {
+       [MASTER_CDSP_PROC] = &qnm_nsp,
+       [SLAVE_CDSP_MEM_NOC] = &qns_nsp_gemnoc,
+};
+
+static const struct qcom_icc_desc sm8650_nsp_noc = {
+       .nodes = nsp_noc_nodes,
+       .num_nodes = ARRAY_SIZE(nsp_noc_nodes),
+       .bcms = nsp_noc_bcms,
+       .num_bcms = ARRAY_SIZE(nsp_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const pcie_anoc_bcms[] = {
+       &bcm_sn4,
+};
+
+static struct qcom_icc_node * const pcie_anoc_nodes[] = {
+       [MASTER_PCIE_ANOC_CFG] = &qsm_pcie_anoc_cfg,
+       [MASTER_PCIE_0] = &xm_pcie3_0,
+       [MASTER_PCIE_1] = &xm_pcie3_1,
+       [SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_mem_noc,
+       [SLAVE_SERVICE_PCIE_ANOC] = &srvc_pcie_aggre_noc,
+};
+
+static const struct qcom_icc_desc sm8650_pcie_anoc = {
+       .nodes = pcie_anoc_nodes,
+       .num_nodes = ARRAY_SIZE(pcie_anoc_nodes),
+       .bcms = pcie_anoc_bcms,
+       .num_bcms = ARRAY_SIZE(pcie_anoc_bcms),
+};
+
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
+       &bcm_sn0,
+       &bcm_sn2,
+       &bcm_sn3,
+};
+
+static struct qcom_icc_node * const system_noc_nodes[] = {
+       [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
+       [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
+       [SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf,
+};
+
+static const struct qcom_icc_desc sm8650_system_noc = {
+       .nodes = system_noc_nodes,
+       .num_nodes = ARRAY_SIZE(system_noc_nodes),
+       .bcms = system_noc_bcms,
+       .num_bcms = ARRAY_SIZE(system_noc_bcms),
+};
+
+static const struct of_device_id qnoc_of_match[] = {
+       { .compatible = "qcom,sm8650-aggre1-noc", .data = &sm8650_aggre1_noc },
+       { .compatible = "qcom,sm8650-aggre2-noc", .data = &sm8650_aggre2_noc },
+       { .compatible = "qcom,sm8650-clk-virt", .data = &sm8650_clk_virt },
+       { .compatible = "qcom,sm8650-config-noc", .data = &sm8650_config_noc },
+       { .compatible = "qcom,sm8650-cnoc-main", .data = &sm8650_cnoc_main },
+       { .compatible = "qcom,sm8650-gem-noc", .data = &sm8650_gem_noc },
+       { .compatible = "qcom,sm8650-lpass-ag-noc", .data = &sm8650_lpass_ag_noc },
+       { .compatible = "qcom,sm8650-lpass-lpiaon-noc", .data = &sm8650_lpass_lpiaon_noc },
+       { .compatible = "qcom,sm8650-lpass-lpicx-noc", .data = &sm8650_lpass_lpicx_noc },
+       { .compatible = "qcom,sm8650-mc-virt", .data = &sm8650_mc_virt },
+       { .compatible = "qcom,sm8650-mmss-noc", .data = &sm8650_mmss_noc },
+       { .compatible = "qcom,sm8650-nsp-noc", .data = &sm8650_nsp_noc },
+       { .compatible = "qcom,sm8650-pcie-anoc", .data = &sm8650_pcie_anoc },
+       { .compatible = "qcom,sm8650-system-noc", .data = &sm8650_system_noc },
+       { }
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+       .probe = qcom_icc_rpmh_probe,
+       .remove_new = qcom_icc_rpmh_remove,
+       .driver = {
+               .name = "qnoc-sm8650",
+               .of_match_table = qnoc_of_match,
+               .sync_state = icc_sync_state,
+       },
+};
+
+static int __init qnoc_driver_init(void)
+{
+       return platform_driver_register(&qnoc_driver);
+}
+core_initcall(qnoc_driver_init);
+
+static void __exit qnoc_driver_exit(void)
+{
+       platform_driver_unregister(&qnoc_driver);
+}
+module_exit(qnoc_driver_exit);
+
+MODULE_DESCRIPTION("sm8650 NoC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/interconnect/qcom/sm8650.h b/drivers/interconnect/qcom/sm8650.h
new file mode 100644 (file)
index 0000000..de35c95
--- /dev/null
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * SM8650 interconnect IDs
+ *
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_QCOM_SM8650_H
+#define __DRIVERS_INTERCONNECT_QCOM_SM8650_H
+
+#define SM8650_MASTER_A1NOC_SNOC               0
+#define SM8650_MASTER_A2NOC_SNOC               1
+#define SM8650_MASTER_ANOC_PCIE_GEM_NOC                2
+#define SM8650_MASTER_APPSS_PROC               3
+#define SM8650_MASTER_CAMNOC_HF                        4
+#define SM8650_MASTER_CAMNOC_ICP               5
+#define SM8650_MASTER_CAMNOC_SF                        6
+#define SM8650_MASTER_CDSP_HCP                 7
+#define SM8650_MASTER_CDSP_PROC                        8
+#define SM8650_MASTER_CNOC_CFG                 9
+#define SM8650_MASTER_CNOC_MNOC_CFG            10
+#define SM8650_MASTER_COMPUTE_NOC              11
+#define SM8650_MASTER_CRYPTO                   12
+#define SM8650_MASTER_GEM_NOC_CNOC             13
+#define SM8650_MASTER_GEM_NOC_PCIE_SNOC                14
+#define SM8650_MASTER_GFX3D                    15
+#define SM8650_MASTER_GIC                      16
+#define SM8650_MASTER_GPU_TCU                  17
+#define SM8650_MASTER_IPA                      18
+#define SM8650_MASTER_LLCC                     19
+#define SM8650_MASTER_LPASS_GEM_NOC            20
+#define SM8650_MASTER_LPASS_LPINOC             21
+#define SM8650_MASTER_LPASS_PROC               22
+#define SM8650_MASTER_LPIAON_NOC               23
+#define SM8650_MASTER_MDP                      24
+#define SM8650_MASTER_MNOC_HF_MEM_NOC          25
+#define SM8650_MASTER_MNOC_SF_MEM_NOC          26
+#define SM8650_MASTER_MSS_PROC                 27
+#define SM8650_MASTER_PCIE_0                   28
+#define SM8650_MASTER_PCIE_1                   29
+#define SM8650_MASTER_PCIE_ANOC_CFG            30
+#define SM8650_MASTER_QDSS_BAM                 31
+#define SM8650_MASTER_QDSS_ETR                 32
+#define SM8650_MASTER_QDSS_ETR_1               33
+#define SM8650_MASTER_QSPI_0                   34
+#define SM8650_MASTER_QUP_1                    35
+#define SM8650_MASTER_QUP_2                    36
+#define SM8650_MASTER_QUP_3                    37
+#define SM8650_MASTER_QUP_CORE_0               38
+#define SM8650_MASTER_QUP_CORE_1               39
+#define SM8650_MASTER_QUP_CORE_2               40
+#define SM8650_MASTER_SDCC_2                   41
+#define SM8650_MASTER_SDCC_4                   42
+#define SM8650_MASTER_SNOC_SF_MEM_NOC          43
+#define SM8650_MASTER_SP                       44
+#define SM8650_MASTER_SYS_TCU                  45
+#define SM8650_MASTER_UBWC_P                   46
+#define SM8650_MASTER_UBWC_P_TCU               47
+#define SM8650_MASTER_UFS_MEM                  48
+#define SM8650_MASTER_USB3_0                   49
+#define SM8650_MASTER_VIDEO                    50
+#define SM8650_MASTER_VIDEO_CV_PROC            51
+#define SM8650_MASTER_VIDEO_PROC               52
+#define SM8650_MASTER_VIDEO_V_PROC             53
+#define SM8650_SLAVE_A1NOC_SNOC                        54
+#define SM8650_SLAVE_A2NOC_SNOC                        55
+#define SM8650_SLAVE_AHB2PHY_NORTH             56
+#define SM8650_SLAVE_AHB2PHY_SOUTH             57
+#define SM8650_SLAVE_ANOC_PCIE_GEM_NOC         58
+#define SM8650_SLAVE_AOSS                      59
+#define SM8650_SLAVE_APPSS                     60
+#define SM8650_SLAVE_CAMERA_CFG                        61
+#define SM8650_SLAVE_CDSP_MEM_NOC              62
+#define SM8650_SLAVE_CLK_CTL                   63
+#define SM8650_SLAVE_CNOC_CFG                  64
+#define SM8650_SLAVE_CNOC_MNOC_CFG             65
+#define SM8650_SLAVE_CNOC_MSS                  66
+#define SM8650_SLAVE_CPR_HMX                   67
+#define SM8650_SLAVE_CPR_NSPCX                 68
+#define SM8650_SLAVE_CRYPTO_0_CFG              69
+#define SM8650_SLAVE_CX_RDPM                   70
+#define SM8650_SLAVE_DDRSS_CFG                 71
+#define SM8650_SLAVE_DISPLAY_CFG               72
+#define SM8650_SLAVE_EBI1                      73
+#define SM8650_SLAVE_GEM_NOC_CNOC              74
+#define SM8650_SLAVE_GFX3D_CFG                 75
+#define SM8650_SLAVE_I2C                       76
+#define SM8650_SLAVE_I3C_IBI0_CFG              77
+#define SM8650_SLAVE_I3C_IBI1_CFG              78
+#define SM8650_SLAVE_IMEM                      79
+#define SM8650_SLAVE_IMEM_CFG                  80
+#define SM8650_SLAVE_IPA_CFG                   81
+#define SM8650_SLAVE_IPC_ROUTER_CFG            82
+#define SM8650_SLAVE_LLCC                      83
+#define SM8650_SLAVE_LPASS_GEM_NOC             84
+#define SM8650_SLAVE_LPIAON_NOC_LPASS_AG_NOC   85
+#define SM8650_SLAVE_LPICX_NOC_LPIAON_NOC      86
+#define SM8650_SLAVE_MEM_NOC_PCIE_SNOC         87
+#define SM8650_SLAVE_MNOC_HF_MEM_NOC           88
+#define SM8650_SLAVE_MNOC_SF_MEM_NOC           89
+#define SM8650_SLAVE_MX_2_RDPM                 90
+#define SM8650_SLAVE_MX_RDPM                   91
+#define SM8650_SLAVE_NSP_QTB_CFG               92
+#define SM8650_SLAVE_PCIE_0                    93
+#define SM8650_SLAVE_PCIE_1                    94
+#define SM8650_SLAVE_PCIE_0_CFG                        95
+#define SM8650_SLAVE_PCIE_1_CFG                        96
+#define SM8650_SLAVE_PCIE_ANOC_CFG             97
+#define SM8650_SLAVE_PCIE_RSCC                 98
+#define SM8650_SLAVE_PDM                       99
+#define SM8650_SLAVE_PRNG                      100
+#define SM8650_SLAVE_QDSS_CFG                  101
+#define SM8650_SLAVE_QDSS_STM                  102
+#define SM8650_SLAVE_QSPI_0                    103
+#define SM8650_SLAVE_QUP_1                     104
+#define SM8650_SLAVE_QUP_2                     105
+#define SM8650_SLAVE_QUP_3                     106
+#define SM8650_SLAVE_QUP_CORE_0                        107
+#define SM8650_SLAVE_QUP_CORE_1                        108
+#define SM8650_SLAVE_QUP_CORE_2                        109
+#define SM8650_SLAVE_RBCPR_CX_CFG              110
+#define SM8650_SLAVE_RBCPR_MMCX_CFG            111
+#define SM8650_SLAVE_RBCPR_MXA_CFG             112
+#define SM8650_SLAVE_RBCPR_MXC_CFG             113
+#define SM8650_SLAVE_SDCC_2                    114
+#define SM8650_SLAVE_SDCC_4                    115
+#define SM8650_SLAVE_SERVICE_CNOC              116
+#define SM8650_SLAVE_SERVICE_CNOC_CFG          117
+#define SM8650_SLAVE_SERVICE_MNOC              118
+#define SM8650_SLAVE_SERVICE_PCIE_ANOC         119
+#define SM8650_SLAVE_SNOC_GEM_NOC_SF           120
+#define SM8650_SLAVE_SPSS_CFG                  121
+#define SM8650_SLAVE_TCSR                      122
+#define SM8650_SLAVE_TCU                       123
+#define SM8650_SLAVE_TLMM                      124
+#define SM8650_SLAVE_TME_CFG                   125
+#define SM8650_SLAVE_UFS_MEM_CFG               126
+#define SM8650_SLAVE_USB3_0                    127
+#define SM8650_SLAVE_VENUS_CFG                 128
+#define SM8650_SLAVE_VSENSE_CTRL_CFG           129
+
+#endif
index 16a145a3c914467cea26d5c1c49a13d5e398cb3a..3816bfb4e2f39eaa31a5322146d28ba4fd604f1f 100644 (file)
@@ -63,11 +63,9 @@ int qcom_icc_rpm_set_bus_rate(const struct rpm_clk_resource *clk, int ctx, u32 r
 }
 EXPORT_SYMBOL_GPL(qcom_icc_rpm_set_bus_rate);
 
-static int qcom_icc_rpm_smd_remove(struct platform_device *pdev)
+static void qcom_icc_rpm_smd_remove(struct platform_device *pdev)
 {
        icc_smd_rpm = NULL;
-
-       return 0;
 }
 
 static int qcom_icc_rpm_smd_probe(struct platform_device *pdev)
@@ -87,7 +85,7 @@ static struct platform_driver qcom_interconnect_rpm_smd_driver = {
                .name           = "icc_smd_rpm",
        },
        .probe = qcom_icc_rpm_smd_probe,
-       .remove = qcom_icc_rpm_smd_remove,
+       .remove_new = qcom_icc_rpm_smd_remove,
 };
 module_platform_driver(qcom_interconnect_rpm_smd_driver);
 MODULE_AUTHOR("Georgi Djakov <georgi.djakov@linaro.org>");
diff --git a/drivers/interconnect/qcom/x1e80100.c b/drivers/interconnect/qcom/x1e80100.c
new file mode 100644 (file)
index 0000000..d19501d
--- /dev/null
@@ -0,0 +1,2328 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <dt-bindings/interconnect/qcom,x1e80100-rpmh.h>
+
+#include "bcm-voter.h"
+#include "icc-common.h"
+#include "icc-rpmh.h"
+#include "x1e80100.h"
+
+static struct qcom_icc_node qhm_qspi = {
+       .name = "qhm_qspi",
+       .id = X1E80100_MASTER_QSPI_0,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup1 = {
+       .name = "qhm_qup1",
+       .id = X1E80100_MASTER_QUP_1,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_sdc4 = {
+       .name = "xm_sdc4",
+       .id = X1E80100_MASTER_SDCC_4,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_ufs_mem = {
+       .name = "xm_ufs_mem",
+       .id = X1E80100_MASTER_UFS_MEM,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup0 = {
+       .name = "qhm_qup0",
+       .id = X1E80100_MASTER_QUP_0,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup2 = {
+       .name = "qhm_qup2",
+       .id = X1E80100_MASTER_QUP_2,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_crypto = {
+       .name = "qxm_crypto",
+       .id = X1E80100_MASTER_CRYPTO,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_sp = {
+       .name = "qxm_sp",
+       .id = X1E80100_MASTER_SP,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_qdss_etr_0 = {
+       .name = "xm_qdss_etr_0",
+       .id = X1E80100_MASTER_QDSS_ETR,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_qdss_etr_1 = {
+       .name = "xm_qdss_etr_1",
+       .id = X1E80100_MASTER_QDSS_ETR_1,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_sdc2 = {
+       .name = "xm_sdc2",
+       .id = X1E80100_MASTER_SDCC_2,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node ddr_perf_mode_master = {
+       .name = "ddr_perf_mode_master",
+       .id = X1E80100_MASTER_DDR_PERF_MODE,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_DDR_PERF_MODE },
+};
+
+static struct qcom_icc_node qup0_core_master = {
+       .name = "qup0_core_master",
+       .id = X1E80100_MASTER_QUP_CORE_0,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_QUP_CORE_0 },
+};
+
+static struct qcom_icc_node qup1_core_master = {
+       .name = "qup1_core_master",
+       .id = X1E80100_MASTER_QUP_CORE_1,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_QUP_CORE_1 },
+};
+
+static struct qcom_icc_node qup2_core_master = {
+       .name = "qup2_core_master",
+       .id = X1E80100_MASTER_QUP_CORE_2,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_QUP_CORE_2 },
+};
+
+static struct qcom_icc_node qsm_cfg = {
+       .name = "qsm_cfg",
+       .id = X1E80100_MASTER_CNOC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 47,
+       .links = { X1E80100_SLAVE_AHB2PHY_SOUTH, X1E80100_SLAVE_AHB2PHY_NORTH,
+                  X1E80100_SLAVE_AHB2PHY_2, X1E80100_SLAVE_AV1_ENC_CFG,
+                  X1E80100_SLAVE_CAMERA_CFG, X1E80100_SLAVE_CLK_CTL,
+                  X1E80100_SLAVE_CRYPTO_0_CFG, X1E80100_SLAVE_DISPLAY_CFG,
+                  X1E80100_SLAVE_GFX3D_CFG, X1E80100_SLAVE_IMEM_CFG,
+                  X1E80100_SLAVE_IPC_ROUTER_CFG, X1E80100_SLAVE_PCIE_0_CFG,
+                  X1E80100_SLAVE_PCIE_1_CFG, X1E80100_SLAVE_PCIE_2_CFG,
+                  X1E80100_SLAVE_PCIE_3_CFG, X1E80100_SLAVE_PCIE_4_CFG,
+                  X1E80100_SLAVE_PCIE_5_CFG, X1E80100_SLAVE_PCIE_6A_CFG,
+                  X1E80100_SLAVE_PCIE_6B_CFG, X1E80100_SLAVE_PCIE_RSC_CFG,
+                  X1E80100_SLAVE_PDM, X1E80100_SLAVE_PRNG,
+                  X1E80100_SLAVE_QDSS_CFG, X1E80100_SLAVE_QSPI_0,
+                  X1E80100_SLAVE_QUP_0, X1E80100_SLAVE_QUP_1,
+                  X1E80100_SLAVE_QUP_2, X1E80100_SLAVE_SDCC_2,
+                  X1E80100_SLAVE_SDCC_4, X1E80100_SLAVE_SMMUV3_CFG,
+                  X1E80100_SLAVE_TCSR, X1E80100_SLAVE_TLMM,
+                  X1E80100_SLAVE_UFS_MEM_CFG, X1E80100_SLAVE_USB2,
+                  X1E80100_SLAVE_USB3_0, X1E80100_SLAVE_USB3_1,
+                  X1E80100_SLAVE_USB3_2, X1E80100_SLAVE_USB3_MP,
+                  X1E80100_SLAVE_USB4_0, X1E80100_SLAVE_USB4_1,
+                  X1E80100_SLAVE_USB4_2, X1E80100_SLAVE_VENUS_CFG,
+                  X1E80100_SLAVE_LPASS_QTB_CFG, X1E80100_SLAVE_CNOC_MNOC_CFG,
+                  X1E80100_SLAVE_NSP_QTB_CFG, X1E80100_SLAVE_QDSS_STM,
+                  X1E80100_SLAVE_TCU },
+};
+
+static struct qcom_icc_node qnm_gemnoc_cnoc = {
+       .name = "qnm_gemnoc_cnoc",
+       .id = X1E80100_MASTER_GEM_NOC_CNOC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 6,
+       .links = { X1E80100_SLAVE_AOSS, X1E80100_SLAVE_TME_CFG,
+                  X1E80100_SLAVE_APPSS, X1E80100_SLAVE_CNOC_CFG,
+                  X1E80100_SLAVE_BOOT_IMEM, X1E80100_SLAVE_IMEM },
+};
+
+static struct qcom_icc_node qnm_gemnoc_pcie = {
+       .name = "qnm_gemnoc_pcie",
+       .id = X1E80100_MASTER_GEM_NOC_PCIE_SNOC,
+       .channels = 1,
+       .buswidth = 32,
+       .num_links = 8,
+       .links = { X1E80100_SLAVE_PCIE_0, X1E80100_SLAVE_PCIE_1,
+                  X1E80100_SLAVE_PCIE_2, X1E80100_SLAVE_PCIE_3,
+                  X1E80100_SLAVE_PCIE_4, X1E80100_SLAVE_PCIE_5,
+                  X1E80100_SLAVE_PCIE_6A, X1E80100_SLAVE_PCIE_6B },
+};
+
+static struct qcom_icc_node alm_gpu_tcu = {
+       .name = "alm_gpu_tcu",
+       .id = X1E80100_MASTER_GPU_TCU,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 2,
+       .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node alm_pcie_tcu = {
+       .name = "alm_pcie_tcu",
+       .id = X1E80100_MASTER_PCIE_TCU,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 2,
+       .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node alm_sys_tcu = {
+       .name = "alm_sys_tcu",
+       .id = X1E80100_MASTER_SYS_TCU,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 2,
+       .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node chm_apps = {
+       .name = "chm_apps",
+       .id = X1E80100_MASTER_APPSS_PROC,
+       .channels = 6,
+       .buswidth = 32,
+       .num_links = 3,
+       .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC,
+                  X1E80100_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qnm_gpu = {
+       .name = "qnm_gpu",
+       .id = X1E80100_MASTER_GFX3D,
+       .channels = 4,
+       .buswidth = 32,
+       .num_links = 2,
+       .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_lpass = {
+       .name = "qnm_lpass",
+       .id = X1E80100_MASTER_LPASS_GEM_NOC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 3,
+       .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC,
+                  X1E80100_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qnm_mnoc_hf = {
+       .name = "qnm_mnoc_hf",
+       .id = X1E80100_MASTER_MNOC_HF_MEM_NOC,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 2,
+       .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_mnoc_sf = {
+       .name = "qnm_mnoc_sf",
+       .id = X1E80100_MASTER_MNOC_SF_MEM_NOC,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 2,
+       .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_nsp_noc = {
+       .name = "qnm_nsp_noc",
+       .id = X1E80100_MASTER_COMPUTE_NOC,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 3,
+       .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC,
+                  X1E80100_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qnm_pcie = {
+       .name = "qnm_pcie",
+       .id = X1E80100_MASTER_ANOC_PCIE_GEM_NOC,
+       .channels = 1,
+       .buswidth = 64,
+       .num_links = 2,
+       .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_snoc_sf = {
+       .name = "qnm_snoc_sf",
+       .id = X1E80100_MASTER_SNOC_SF_MEM_NOC,
+       .channels = 1,
+       .buswidth = 64,
+       .num_links = 3,
+       .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC,
+                  X1E80100_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node xm_gic = {
+       .name = "xm_gic",
+       .id = X1E80100_MASTER_GIC2,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_lpiaon_noc = {
+       .name = "qnm_lpiaon_noc",
+       .id = X1E80100_MASTER_LPIAON_NOC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_LPASS_GEM_NOC },
+};
+
+static struct qcom_icc_node qnm_lpass_lpinoc = {
+       .name = "qnm_lpass_lpinoc",
+       .id = X1E80100_MASTER_LPASS_LPINOC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_LPIAON_NOC_LPASS_AG_NOC },
+};
+
+static struct qcom_icc_node qxm_lpinoc_dsp_axim = {
+       .name = "qxm_lpinoc_dsp_axim",
+       .id = X1E80100_MASTER_LPASS_PROC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_LPICX_NOC_LPIAON_NOC },
+};
+
+static struct qcom_icc_node llcc_mc = {
+       .name = "llcc_mc",
+       .id = X1E80100_MASTER_LLCC,
+       .channels = 8,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_EBI1 },
+};
+
+static struct qcom_icc_node qnm_av1_enc = {
+       .name = "qnm_av1_enc",
+       .id = X1E80100_MASTER_AV1_ENC,
+       .channels = 1,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_camnoc_hf = {
+       .name = "qnm_camnoc_hf",
+       .id = X1E80100_MASTER_CAMNOC_HF,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_camnoc_icp = {
+       .name = "qnm_camnoc_icp",
+       .id = X1E80100_MASTER_CAMNOC_ICP,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_camnoc_sf = {
+       .name = "qnm_camnoc_sf",
+       .id = X1E80100_MASTER_CAMNOC_SF,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_eva = {
+       .name = "qnm_eva",
+       .id = X1E80100_MASTER_EVA,
+       .channels = 1,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_mdp = {
+       .name = "qnm_mdp",
+       .id = X1E80100_MASTER_MDP,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video = {
+       .name = "qnm_video",
+       .id = X1E80100_MASTER_VIDEO,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video_cv_cpu = {
+       .name = "qnm_video_cv_cpu",
+       .id = X1E80100_MASTER_VIDEO_CV_PROC,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video_v_cpu = {
+       .name = "qnm_video_v_cpu",
+       .id = X1E80100_MASTER_VIDEO_V_PROC,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qsm_mnoc_cfg = {
+       .name = "qsm_mnoc_cfg",
+       .id = X1E80100_MASTER_CNOC_MNOC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_SERVICE_MNOC },
+};
+
+static struct qcom_icc_node qxm_nsp = {
+       .name = "qxm_nsp",
+       .id = X1E80100_MASTER_CDSP_PROC,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_CDSP_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_pcie_north_gem_noc = {
+       .name = "qnm_pcie_north_gem_noc",
+       .id = X1E80100_MASTER_PCIE_NORTH,
+       .channels = 1,
+       .buswidth = 64,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node qnm_pcie_south_gem_noc = {
+       .name = "qnm_pcie_south_gem_noc",
+       .id = X1E80100_MASTER_PCIE_SOUTH,
+       .channels = 1,
+       .buswidth = 64,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node xm_pcie_3 = {
+       .name = "xm_pcie_3",
+       .id = X1E80100_MASTER_PCIE_3,
+       .channels = 1,
+       .buswidth = 64,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_PCIE_NORTH },
+};
+
+static struct qcom_icc_node xm_pcie_4 = {
+       .name = "xm_pcie_4",
+       .id = X1E80100_MASTER_PCIE_4,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_PCIE_NORTH },
+};
+
+static struct qcom_icc_node xm_pcie_5 = {
+       .name = "xm_pcie_5",
+       .id = X1E80100_MASTER_PCIE_5,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_PCIE_NORTH },
+};
+
+static struct qcom_icc_node xm_pcie_0 = {
+       .name = "xm_pcie_0",
+       .id = X1E80100_MASTER_PCIE_0,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_PCIE_SOUTH },
+};
+
+static struct qcom_icc_node xm_pcie_1 = {
+       .name = "xm_pcie_1",
+       .id = X1E80100_MASTER_PCIE_1,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_PCIE_SOUTH },
+};
+
+static struct qcom_icc_node xm_pcie_2 = {
+       .name = "xm_pcie_2",
+       .id = X1E80100_MASTER_PCIE_2,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_PCIE_SOUTH },
+};
+
+static struct qcom_icc_node xm_pcie_6a = {
+       .name = "xm_pcie_6a",
+       .id = X1E80100_MASTER_PCIE_6A,
+       .channels = 1,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_PCIE_SOUTH },
+};
+
+static struct qcom_icc_node xm_pcie_6b = {
+       .name = "xm_pcie_6b",
+       .id = X1E80100_MASTER_PCIE_6B,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_PCIE_SOUTH },
+};
+
+static struct qcom_icc_node qnm_aggre1_noc = {
+       .name = "qnm_aggre1_noc",
+       .id = X1E80100_MASTER_A1NOC_SNOC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_aggre2_noc = {
+       .name = "qnm_aggre2_noc",
+       .id = X1E80100_MASTER_A2NOC_SNOC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_gic = {
+       .name = "qnm_gic",
+       .id = X1E80100_MASTER_GIC1,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_usb_anoc = {
+       .name = "qnm_usb_anoc",
+       .id = X1E80100_MASTER_USB_NOC_SNOC,
+       .channels = 1,
+       .buswidth = 64,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_aggre_usb_north_snoc = {
+       .name = "qnm_aggre_usb_north_snoc",
+       .id = X1E80100_MASTER_AGGRE_USB_NORTH,
+       .channels = 1,
+       .buswidth = 64,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_USB_NOC_SNOC },
+};
+
+static struct qcom_icc_node qnm_aggre_usb_south_snoc = {
+       .name = "qnm_aggre_usb_south_snoc",
+       .id = X1E80100_MASTER_AGGRE_USB_SOUTH,
+       .channels = 1,
+       .buswidth = 64,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_USB_NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_usb2_0 = {
+       .name = "xm_usb2_0",
+       .id = X1E80100_MASTER_USB2,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_AGGRE_USB_NORTH },
+};
+
+static struct qcom_icc_node xm_usb3_mp = {
+       .name = "xm_usb3_mp",
+       .id = X1E80100_MASTER_USB3_MP,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_AGGRE_USB_NORTH },
+};
+
+static struct qcom_icc_node xm_usb3_0 = {
+       .name = "xm_usb3_0",
+       .id = X1E80100_MASTER_USB3_0,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_AGGRE_USB_SOUTH },
+};
+
+static struct qcom_icc_node xm_usb3_1 = {
+       .name = "xm_usb3_1",
+       .id = X1E80100_MASTER_USB3_1,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_AGGRE_USB_SOUTH },
+};
+
+static struct qcom_icc_node xm_usb3_2 = {
+       .name = "xm_usb3_2",
+       .id = X1E80100_MASTER_USB3_2,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_AGGRE_USB_SOUTH },
+};
+
+static struct qcom_icc_node xm_usb4_0 = {
+       .name = "xm_usb4_0",
+       .id = X1E80100_MASTER_USB4_0,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_AGGRE_USB_SOUTH },
+};
+
+static struct qcom_icc_node xm_usb4_1 = {
+       .name = "xm_usb4_1",
+       .id = X1E80100_MASTER_USB4_1,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_AGGRE_USB_SOUTH },
+};
+
+static struct qcom_icc_node xm_usb4_2 = {
+       .name = "xm_usb4_2",
+       .id = X1E80100_MASTER_USB4_2,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_AGGRE_USB_SOUTH },
+};
+
+static struct qcom_icc_node qnm_mnoc_hf_disp = {
+       .name = "qnm_mnoc_hf_disp",
+       .id = X1E80100_MASTER_MNOC_HF_MEM_NOC_DISP,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_LLCC_DISP },
+};
+
+static struct qcom_icc_node qnm_pcie_disp = {
+       .name = "qnm_pcie_disp",
+       .id = X1E80100_MASTER_ANOC_PCIE_GEM_NOC_DISP,
+       .channels = 1,
+       .buswidth = 64,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_LLCC_DISP },
+};
+
+static struct qcom_icc_node llcc_mc_disp = {
+       .name = "llcc_mc_disp",
+       .id = X1E80100_MASTER_LLCC_DISP,
+       .channels = 8,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_EBI1_DISP },
+};
+
+static struct qcom_icc_node qnm_mdp_disp = {
+       .name = "qnm_mdp_disp",
+       .id = X1E80100_MASTER_MDP_DISP,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_MNOC_HF_MEM_NOC_DISP },
+};
+
+static struct qcom_icc_node qnm_pcie_pcie = {
+       .name = "qnm_pcie_pcie",
+       .id = X1E80100_MASTER_ANOC_PCIE_GEM_NOC_PCIE,
+       .channels = 1,
+       .buswidth = 64,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_LLCC_PCIE },
+};
+
+static struct qcom_icc_node llcc_mc_pcie = {
+       .name = "llcc_mc_pcie",
+       .id = X1E80100_MASTER_LLCC_PCIE,
+       .channels = 8,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_EBI1_PCIE },
+};
+
+static struct qcom_icc_node qnm_pcie_north_gem_noc_pcie = {
+       .name = "qnm_pcie_north_gem_noc_pcie",
+       .id = X1E80100_MASTER_PCIE_NORTH_PCIE,
+       .channels = 1,
+       .buswidth = 64,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_ANOC_PCIE_GEM_NOC_PCIE },
+};
+
+static struct qcom_icc_node qnm_pcie_south_gem_noc_pcie = {
+       .name = "qnm_pcie_south_gem_noc_pcie",
+       .id = X1E80100_MASTER_PCIE_SOUTH_PCIE,
+       .channels = 1,
+       .buswidth = 64,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_ANOC_PCIE_GEM_NOC_PCIE },
+};
+
+static struct qcom_icc_node xm_pcie_3_pcie = {
+       .name = "xm_pcie_3_pcie",
+       .id = X1E80100_MASTER_PCIE_3_PCIE,
+       .channels = 1,
+       .buswidth = 64,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_PCIE_NORTH_PCIE },
+};
+
+static struct qcom_icc_node xm_pcie_4_pcie = {
+       .name = "xm_pcie_4_pcie",
+       .id = X1E80100_MASTER_PCIE_4_PCIE,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_PCIE_NORTH_PCIE },
+};
+
+static struct qcom_icc_node xm_pcie_5_pcie = {
+       .name = "xm_pcie_5_pcie",
+       .id = X1E80100_MASTER_PCIE_5_PCIE,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_PCIE_NORTH_PCIE },
+};
+
+static struct qcom_icc_node xm_pcie_0_pcie = {
+       .name = "xm_pcie_0_pcie",
+       .id = X1E80100_MASTER_PCIE_0_PCIE,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_PCIE_SOUTH_PCIE },
+};
+
+static struct qcom_icc_node xm_pcie_1_pcie = {
+       .name = "xm_pcie_1_pcie",
+       .id = X1E80100_MASTER_PCIE_1_PCIE,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_PCIE_SOUTH_PCIE },
+};
+
+static struct qcom_icc_node xm_pcie_2_pcie = {
+       .name = "xm_pcie_2_pcie",
+       .id = X1E80100_MASTER_PCIE_2_PCIE,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_PCIE_SOUTH_PCIE },
+};
+
+static struct qcom_icc_node xm_pcie_6a_pcie = {
+       .name = "xm_pcie_6a_pcie",
+       .id = X1E80100_MASTER_PCIE_6A_PCIE,
+       .channels = 1,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_PCIE_SOUTH_PCIE },
+};
+
+static struct qcom_icc_node xm_pcie_6b_pcie = {
+       .name = "xm_pcie_6b_pcie",
+       .id = X1E80100_MASTER_PCIE_6B_PCIE,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { X1E80100_SLAVE_PCIE_SOUTH_PCIE },
+};
+
+static struct qcom_icc_node qns_a1noc_snoc = {
+       .name = "qns_a1noc_snoc",
+       .id = X1E80100_SLAVE_A1NOC_SNOC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { X1E80100_MASTER_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qns_a2noc_snoc = {
+       .name = "qns_a2noc_snoc",
+       .id = X1E80100_SLAVE_A2NOC_SNOC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { X1E80100_MASTER_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node ddr_perf_mode_slave = {
+       .name = "ddr_perf_mode_slave",
+       .id = X1E80100_SLAVE_DDR_PERF_MODE,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qup0_core_slave = {
+       .name = "qup0_core_slave",
+       .id = X1E80100_SLAVE_QUP_CORE_0,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qup1_core_slave = {
+       .name = "qup1_core_slave",
+       .id = X1E80100_SLAVE_QUP_CORE_1,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qup2_core_slave = {
+       .name = "qup2_core_slave",
+       .id = X1E80100_SLAVE_QUP_CORE_2,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ahb2phy0 = {
+       .name = "qhs_ahb2phy0",
+       .id = X1E80100_SLAVE_AHB2PHY_SOUTH,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ahb2phy1 = {
+       .name = "qhs_ahb2phy1",
+       .id = X1E80100_SLAVE_AHB2PHY_NORTH,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ahb2phy2 = {
+       .name = "qhs_ahb2phy2",
+       .id = X1E80100_SLAVE_AHB2PHY_2,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_av1_enc_cfg = {
+       .name = "qhs_av1_enc_cfg",
+       .id = X1E80100_SLAVE_AV1_ENC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_camera_cfg = {
+       .name = "qhs_camera_cfg",
+       .id = X1E80100_SLAVE_CAMERA_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_clk_ctl = {
+       .name = "qhs_clk_ctl",
+       .id = X1E80100_SLAVE_CLK_CTL,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_crypto0_cfg = {
+       .name = "qhs_crypto0_cfg",
+       .id = X1E80100_SLAVE_CRYPTO_0_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_display_cfg = {
+       .name = "qhs_display_cfg",
+       .id = X1E80100_SLAVE_DISPLAY_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_gpuss_cfg = {
+       .name = "qhs_gpuss_cfg",
+       .id = X1E80100_SLAVE_GFX3D_CFG,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_imem_cfg = {
+       .name = "qhs_imem_cfg",
+       .id = X1E80100_SLAVE_IMEM_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ipc_router = {
+       .name = "qhs_ipc_router",
+       .id = X1E80100_SLAVE_IPC_ROUTER_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pcie0_cfg = {
+       .name = "qhs_pcie0_cfg",
+       .id = X1E80100_SLAVE_PCIE_0_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pcie1_cfg = {
+       .name = "qhs_pcie1_cfg",
+       .id = X1E80100_SLAVE_PCIE_1_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pcie2_cfg = {
+       .name = "qhs_pcie2_cfg",
+       .id = X1E80100_SLAVE_PCIE_2_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pcie3_cfg = {
+       .name = "qhs_pcie3_cfg",
+       .id = X1E80100_SLAVE_PCIE_3_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pcie4_cfg = {
+       .name = "qhs_pcie4_cfg",
+       .id = X1E80100_SLAVE_PCIE_4_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pcie5_cfg = {
+       .name = "qhs_pcie5_cfg",
+       .id = X1E80100_SLAVE_PCIE_5_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pcie6a_cfg = {
+       .name = "qhs_pcie6a_cfg",
+       .id = X1E80100_SLAVE_PCIE_6A_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pcie6b_cfg = {
+       .name = "qhs_pcie6b_cfg",
+       .id = X1E80100_SLAVE_PCIE_6B_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pcie_rsc_cfg = {
+       .name = "qhs_pcie_rsc_cfg",
+       .id = X1E80100_SLAVE_PCIE_RSC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pdm = {
+       .name = "qhs_pdm",
+       .id = X1E80100_SLAVE_PDM,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_prng = {
+       .name = "qhs_prng",
+       .id = X1E80100_SLAVE_PRNG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qdss_cfg = {
+       .name = "qhs_qdss_cfg",
+       .id = X1E80100_SLAVE_QDSS_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qspi = {
+       .name = "qhs_qspi",
+       .id = X1E80100_SLAVE_QSPI_0,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qup0 = {
+       .name = "qhs_qup0",
+       .id = X1E80100_SLAVE_QUP_0,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qup1 = {
+       .name = "qhs_qup1",
+       .id = X1E80100_SLAVE_QUP_1,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qup2 = {
+       .name = "qhs_qup2",
+       .id = X1E80100_SLAVE_QUP_2,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_sdc2 = {
+       .name = "qhs_sdc2",
+       .id = X1E80100_SLAVE_SDCC_2,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_sdc4 = {
+       .name = "qhs_sdc4",
+       .id = X1E80100_SLAVE_SDCC_4,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_smmuv3_cfg = {
+       .name = "qhs_smmuv3_cfg",
+       .id = X1E80100_SLAVE_SMMUV3_CFG,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_tcsr = {
+       .name = "qhs_tcsr",
+       .id = X1E80100_SLAVE_TCSR,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_tlmm = {
+       .name = "qhs_tlmm",
+       .id = X1E80100_SLAVE_TLMM,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ufs_mem_cfg = {
+       .name = "qhs_ufs_mem_cfg",
+       .id = X1E80100_SLAVE_UFS_MEM_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_usb2_0_cfg = {
+       .name = "qhs_usb2_0_cfg",
+       .id = X1E80100_SLAVE_USB2,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_usb3_0_cfg = {
+       .name = "qhs_usb3_0_cfg",
+       .id = X1E80100_SLAVE_USB3_0,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_usb3_1_cfg = {
+       .name = "qhs_usb3_1_cfg",
+       .id = X1E80100_SLAVE_USB3_1,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_usb3_2_cfg = {
+       .name = "qhs_usb3_2_cfg",
+       .id = X1E80100_SLAVE_USB3_2,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_usb3_mp_cfg = {
+       .name = "qhs_usb3_mp_cfg",
+       .id = X1E80100_SLAVE_USB3_MP,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_usb4_0_cfg = {
+       .name = "qhs_usb4_0_cfg",
+       .id = X1E80100_SLAVE_USB4_0,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_usb4_1_cfg = {
+       .name = "qhs_usb4_1_cfg",
+       .id = X1E80100_SLAVE_USB4_1,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_usb4_2_cfg = {
+       .name = "qhs_usb4_2_cfg",
+       .id = X1E80100_SLAVE_USB4_2,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_venus_cfg = {
+       .name = "qhs_venus_cfg",
+       .id = X1E80100_SLAVE_VENUS_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qss_lpass_qtb_cfg = {
+       .name = "qss_lpass_qtb_cfg",
+       .id = X1E80100_SLAVE_LPASS_QTB_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qss_mnoc_cfg = {
+       .name = "qss_mnoc_cfg",
+       .id = X1E80100_SLAVE_CNOC_MNOC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { X1E80100_MASTER_CNOC_MNOC_CFG },
+};
+
+static struct qcom_icc_node qss_nsp_qtb_cfg = {
+       .name = "qss_nsp_qtb_cfg",
+       .id = X1E80100_SLAVE_NSP_QTB_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node xs_qdss_stm = {
+       .name = "xs_qdss_stm",
+       .id = X1E80100_SLAVE_QDSS_STM,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node xs_sys_tcu_cfg = {
+       .name = "xs_sys_tcu_cfg",
+       .id = X1E80100_SLAVE_TCU,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_aoss = {
+       .name = "qhs_aoss",
+       .id = X1E80100_SLAVE_AOSS,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_tme_cfg = {
+       .name = "qhs_tme_cfg",
+       .id = X1E80100_SLAVE_TME_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qns_apss = {
+       .name = "qns_apss",
+       .id = X1E80100_SLAVE_APPSS,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qss_cfg = {
+       .name = "qss_cfg",
+       .id = X1E80100_SLAVE_CNOC_CFG,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 1,
+       .links = { X1E80100_MASTER_CNOC_CFG },
+};
+
+static struct qcom_icc_node qxs_boot_imem = {
+       .name = "qxs_boot_imem",
+       .id = X1E80100_SLAVE_BOOT_IMEM,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qxs_imem = {
+       .name = "qxs_imem",
+       .id = X1E80100_SLAVE_IMEM,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node xs_pcie_0 = {
+       .name = "xs_pcie_0",
+       .id = X1E80100_SLAVE_PCIE_0,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node xs_pcie_1 = {
+       .name = "xs_pcie_1",
+       .id = X1E80100_SLAVE_PCIE_1,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node xs_pcie_2 = {
+       .name = "xs_pcie_2",
+       .id = X1E80100_SLAVE_PCIE_2,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node xs_pcie_3 = {
+       .name = "xs_pcie_3",
+       .id = X1E80100_SLAVE_PCIE_3,
+       .channels = 1,
+       .buswidth = 64,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node xs_pcie_4 = {
+       .name = "xs_pcie_4",
+       .id = X1E80100_SLAVE_PCIE_4,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node xs_pcie_5 = {
+       .name = "xs_pcie_5",
+       .id = X1E80100_SLAVE_PCIE_5,
+       .channels = 1,
+       .buswidth = 8,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node xs_pcie_6a = {
+       .name = "xs_pcie_6a",
+       .id = X1E80100_SLAVE_PCIE_6A,
+       .channels = 1,
+       .buswidth = 32,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node xs_pcie_6b = {
+       .name = "xs_pcie_6b",
+       .id = X1E80100_SLAVE_PCIE_6B,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qns_gem_noc_cnoc = {
+       .name = "qns_gem_noc_cnoc",
+       .id = X1E80100_SLAVE_GEM_NOC_CNOC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { X1E80100_MASTER_GEM_NOC_CNOC },
+};
+
+static struct qcom_icc_node qns_llcc = {
+       .name = "qns_llcc",
+       .id = X1E80100_SLAVE_LLCC,
+       .channels = 8,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { X1E80100_MASTER_LLCC },
+};
+
+static struct qcom_icc_node qns_pcie = {
+       .name = "qns_pcie",
+       .id = X1E80100_SLAVE_MEM_NOC_PCIE_SNOC,
+       .channels = 1,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { X1E80100_MASTER_GEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qns_lpass_ag_noc_gemnoc = {
+       .name = "qns_lpass_ag_noc_gemnoc",
+       .id = X1E80100_SLAVE_LPASS_GEM_NOC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { X1E80100_MASTER_LPASS_GEM_NOC },
+};
+
+static struct qcom_icc_node qns_lpass_aggnoc = {
+       .name = "qns_lpass_aggnoc",
+       .id = X1E80100_SLAVE_LPIAON_NOC_LPASS_AG_NOC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { X1E80100_MASTER_LPIAON_NOC },
+};
+
+static struct qcom_icc_node qns_lpi_aon_noc = {
+       .name = "qns_lpi_aon_noc",
+       .id = X1E80100_SLAVE_LPICX_NOC_LPIAON_NOC,
+       .channels = 1,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { X1E80100_MASTER_LPASS_LPINOC },
+};
+
+static struct qcom_icc_node ebi = {
+       .name = "ebi",
+       .id = X1E80100_SLAVE_EBI1,
+       .channels = 8,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qns_mem_noc_hf = {
+       .name = "qns_mem_noc_hf",
+       .id = X1E80100_SLAVE_MNOC_HF_MEM_NOC,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { X1E80100_MASTER_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_mem_noc_sf = {
+       .name = "qns_mem_noc_sf",
+       .id = X1E80100_SLAVE_MNOC_SF_MEM_NOC,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { X1E80100_MASTER_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node srvc_mnoc = {
+       .name = "srvc_mnoc",
+       .id = X1E80100_SLAVE_SERVICE_MNOC,
+       .channels = 1,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qns_nsp_gemnoc = {
+       .name = "qns_nsp_gemnoc",
+       .id = X1E80100_SLAVE_CDSP_MEM_NOC,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { X1E80100_MASTER_COMPUTE_NOC },
+};
+
+static struct qcom_icc_node qns_pcie_mem_noc = {
+       .name = "qns_pcie_mem_noc",
+       .id = X1E80100_SLAVE_ANOC_PCIE_GEM_NOC,
+       .channels = 1,
+       .buswidth = 64,
+       .num_links = 1,
+       .links = { X1E80100_MASTER_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node qns_pcie_north_gem_noc = {
+       .name = "qns_pcie_north_gem_noc",
+       .id = X1E80100_SLAVE_PCIE_NORTH,
+       .channels = 1,
+       .buswidth = 64,
+       .num_links = 1,
+       .links = { X1E80100_MASTER_PCIE_NORTH },
+};
+
+static struct qcom_icc_node qns_pcie_south_gem_noc = {
+       .name = "qns_pcie_south_gem_noc",
+       .id = X1E80100_SLAVE_PCIE_SOUTH,
+       .channels = 1,
+       .buswidth = 64,
+       .num_links = 1,
+       .links = { X1E80100_MASTER_PCIE_SOUTH },
+};
+
+static struct qcom_icc_node qns_gemnoc_sf = {
+       .name = "qns_gemnoc_sf",
+       .id = X1E80100_SLAVE_SNOC_GEM_NOC_SF,
+       .channels = 1,
+       .buswidth = 64,
+       .num_links = 1,
+       .links = { X1E80100_MASTER_SNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_aggre_usb_snoc = {
+       .name = "qns_aggre_usb_snoc",
+       .id = X1E80100_SLAVE_USB_NOC_SNOC,
+       .channels = 1,
+       .buswidth = 64,
+       .num_links = 1,
+       .links = { X1E80100_MASTER_USB_NOC_SNOC },
+};
+
+static struct qcom_icc_node qns_aggre_usb_north_snoc = {
+       .name = "qns_aggre_usb_north_snoc",
+       .id = X1E80100_SLAVE_AGGRE_USB_NORTH,
+       .channels = 1,
+       .buswidth = 64,
+       .num_links = 1,
+       .links = { X1E80100_MASTER_AGGRE_USB_NORTH },
+};
+
+static struct qcom_icc_node qns_aggre_usb_south_snoc = {
+       .name = "qns_aggre_usb_south_snoc",
+       .id = X1E80100_SLAVE_AGGRE_USB_SOUTH,
+       .channels = 1,
+       .buswidth = 64,
+       .num_links = 1,
+       .links = { X1E80100_MASTER_AGGRE_USB_SOUTH },
+};
+
+static struct qcom_icc_node qns_llcc_disp = {
+       .name = "qns_llcc_disp",
+       .id = X1E80100_SLAVE_LLCC_DISP,
+       .channels = 8,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { X1E80100_MASTER_LLCC_DISP },
+};
+
+static struct qcom_icc_node ebi_disp = {
+       .name = "ebi_disp",
+       .id = X1E80100_SLAVE_EBI1_DISP,
+       .channels = 8,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qns_mem_noc_hf_disp = {
+       .name = "qns_mem_noc_hf_disp",
+       .id = X1E80100_SLAVE_MNOC_HF_MEM_NOC_DISP,
+       .channels = 2,
+       .buswidth = 32,
+       .num_links = 1,
+       .links = { X1E80100_MASTER_MNOC_HF_MEM_NOC_DISP },
+};
+
+static struct qcom_icc_node qns_llcc_pcie = {
+       .name = "qns_llcc_pcie",
+       .id = X1E80100_SLAVE_LLCC_PCIE,
+       .channels = 8,
+       .buswidth = 16,
+       .num_links = 1,
+       .links = { X1E80100_MASTER_LLCC_PCIE },
+};
+
+static struct qcom_icc_node ebi_pcie = {
+       .name = "ebi_pcie",
+       .id = X1E80100_SLAVE_EBI1_PCIE,
+       .channels = 8,
+       .buswidth = 4,
+       .num_links = 0,
+};
+
+static struct qcom_icc_node qns_pcie_mem_noc_pcie = {
+       .name = "qns_pcie_mem_noc_pcie",
+       .id = X1E80100_SLAVE_ANOC_PCIE_GEM_NOC_PCIE,
+       .channels = 1,
+       .buswidth = 64,
+       .num_links = 1,
+       .links = { X1E80100_MASTER_ANOC_PCIE_GEM_NOC_PCIE },
+};
+
+static struct qcom_icc_node qns_pcie_north_gem_noc_pcie = {
+       .name = "qns_pcie_north_gem_noc_pcie",
+       .id = X1E80100_SLAVE_PCIE_NORTH_PCIE,
+       .channels = 1,
+       .buswidth = 64,
+       .num_links = 1,
+       .links = { X1E80100_MASTER_PCIE_NORTH_PCIE },
+};
+
+static struct qcom_icc_node qns_pcie_south_gem_noc_pcie = {
+       .name = "qns_pcie_south_gem_noc_pcie",
+       .id = X1E80100_SLAVE_PCIE_SOUTH_PCIE,
+       .channels = 1,
+       .buswidth = 64,
+       .num_links = 1,
+       .links = { X1E80100_MASTER_PCIE_SOUTH_PCIE },
+};
+
+static struct qcom_icc_bcm bcm_acv = {
+       .name = "ACV",
+       .num_nodes = 1,
+       .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_acv_perf = {
+       .name = "ACV_PERF",
+       .num_nodes = 1,
+       .nodes = { &ddr_perf_mode_slave },
+};
+
+static struct qcom_icc_bcm bcm_ce0 = {
+       .name = "CE0",
+       .num_nodes = 1,
+       .nodes = { &qxm_crypto },
+};
+
+static struct qcom_icc_bcm bcm_cn0 = {
+       .name = "CN0",
+       .keepalive = true,
+       .num_nodes = 63,
+       .nodes = { &qsm_cfg, &qhs_ahb2phy0,
+                  &qhs_ahb2phy1, &qhs_ahb2phy2,
+                  &qhs_av1_enc_cfg, &qhs_camera_cfg,
+                  &qhs_clk_ctl, &qhs_crypto0_cfg,
+                  &qhs_gpuss_cfg, &qhs_imem_cfg,
+                  &qhs_ipc_router, &qhs_pcie0_cfg,
+                  &qhs_pcie1_cfg, &qhs_pcie2_cfg,
+                  &qhs_pcie3_cfg, &qhs_pcie4_cfg,
+                  &qhs_pcie5_cfg, &qhs_pcie6a_cfg,
+                  &qhs_pcie6b_cfg, &qhs_pcie_rsc_cfg,
+                  &qhs_pdm, &qhs_prng,
+                  &qhs_qdss_cfg, &qhs_qspi,
+                  &qhs_qup0, &qhs_qup1,
+                  &qhs_qup2, &qhs_sdc2,
+                  &qhs_sdc4, &qhs_smmuv3_cfg,
+                  &qhs_tcsr, &qhs_tlmm,
+                  &qhs_ufs_mem_cfg, &qhs_usb2_0_cfg,
+                  &qhs_usb3_0_cfg, &qhs_usb3_1_cfg,
+                  &qhs_usb3_2_cfg, &qhs_usb3_mp_cfg,
+                  &qhs_usb4_0_cfg, &qhs_usb4_1_cfg,
+                  &qhs_usb4_2_cfg, &qhs_venus_cfg,
+                  &qss_lpass_qtb_cfg, &qss_mnoc_cfg,
+                  &qss_nsp_qtb_cfg, &xs_qdss_stm,
+                  &xs_sys_tcu_cfg, &qnm_gemnoc_cnoc,
+                  &qnm_gemnoc_pcie, &qhs_aoss,
+                  &qhs_tme_cfg, &qns_apss,
+                  &qss_cfg, &qxs_boot_imem,
+                  &qxs_imem, &xs_pcie_0,
+                  &xs_pcie_1, &xs_pcie_2,
+                  &xs_pcie_3, &xs_pcie_4,
+                  &xs_pcie_5, &xs_pcie_6a,
+                  &xs_pcie_6b },
+};
+
+static struct qcom_icc_bcm bcm_cn1 = {
+       .name = "CN1",
+       .num_nodes = 1,
+       .nodes = { &qhs_display_cfg },
+};
+
+static struct qcom_icc_bcm bcm_co0 = {
+       .name = "CO0",
+       .num_nodes = 2,
+       .nodes = { &qxm_nsp, &qns_nsp_gemnoc },
+};
+
+static struct qcom_icc_bcm bcm_lp0 = {
+       .name = "LP0",
+       .num_nodes = 2,
+       .nodes = { &qnm_lpass_lpinoc, &qns_lpass_aggnoc },
+};
+
+static struct qcom_icc_bcm bcm_mc0 = {
+       .name = "MC0",
+       .keepalive = true,
+       .num_nodes = 1,
+       .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_mm0 = {
+       .name = "MM0",
+       .num_nodes = 1,
+       .nodes = { &qns_mem_noc_hf },
+};
+
+static struct qcom_icc_bcm bcm_mm1 = {
+       .name = "MM1",
+       .num_nodes = 10,
+       .nodes = { &qnm_av1_enc, &qnm_camnoc_hf,
+                  &qnm_camnoc_icp, &qnm_camnoc_sf,
+                  &qnm_eva, &qnm_mdp,
+                  &qnm_video, &qnm_video_cv_cpu,
+                  &qnm_video_v_cpu, &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_bcm bcm_pc0 = {
+       .name = "PC0",
+       .num_nodes = 1,
+       .nodes = { &qns_pcie_mem_noc },
+};
+
+static struct qcom_icc_bcm bcm_qup0 = {
+       .name = "QUP0",
+       .keepalive = true,
+       .vote_scale = 1,
+       .num_nodes = 1,
+       .nodes = { &qup0_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_qup1 = {
+       .name = "QUP1",
+       .keepalive = true,
+       .vote_scale = 1,
+       .num_nodes = 1,
+       .nodes = { &qup1_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_qup2 = {
+       .name = "QUP2",
+       .keepalive = true,
+       .vote_scale = 1,
+       .num_nodes = 1,
+       .nodes = { &qup2_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_sh0 = {
+       .name = "SH0",
+       .keepalive = true,
+       .num_nodes = 1,
+       .nodes = { &qns_llcc },
+};
+
+static struct qcom_icc_bcm bcm_sh1 = {
+       .name = "SH1",
+       .num_nodes = 13,
+       .nodes = { &alm_gpu_tcu, &alm_pcie_tcu,
+                  &alm_sys_tcu, &chm_apps,
+                  &qnm_gpu, &qnm_lpass,
+                  &qnm_mnoc_hf, &qnm_mnoc_sf,
+                  &qnm_nsp_noc, &qnm_pcie,
+                  &xm_gic, &qns_gem_noc_cnoc,
+                  &qns_pcie },
+};
+
+static struct qcom_icc_bcm bcm_sn0 = {
+       .name = "SN0",
+       .keepalive = true,
+       .num_nodes = 1,
+       .nodes = { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_bcm bcm_sn2 = {
+       .name = "SN2",
+       .num_nodes = 1,
+       .nodes = { &qnm_aggre1_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn3 = {
+       .name = "SN3",
+       .num_nodes = 1,
+       .nodes = { &qnm_aggre2_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn4 = {
+       .name = "SN4",
+       .num_nodes = 1,
+       .nodes = { &qnm_usb_anoc },
+};
+
+static struct qcom_icc_bcm bcm_acv_disp = {
+       .name = "ACV",
+       .num_nodes = 1,
+       .nodes = { &ebi_disp },
+};
+
+static struct qcom_icc_bcm bcm_mc0_disp = {
+       .name = "MC0",
+       .num_nodes = 1,
+       .nodes = { &ebi_disp },
+};
+
+static struct qcom_icc_bcm bcm_mm0_disp = {
+       .name = "MM0",
+       .num_nodes = 1,
+       .nodes = { &qns_mem_noc_hf_disp },
+};
+
+static struct qcom_icc_bcm bcm_mm1_disp = {
+       .name = "MM1",
+       .num_nodes = 1,
+       .nodes = { &qnm_mdp_disp },
+};
+
+static struct qcom_icc_bcm bcm_sh0_disp = {
+       .name = "SH0",
+       .num_nodes = 1,
+       .nodes = { &qns_llcc_disp },
+};
+
+static struct qcom_icc_bcm bcm_sh1_disp = {
+       .name = "SH1",
+       .num_nodes = 2,
+       .nodes = { &qnm_mnoc_hf_disp, &qnm_pcie_disp },
+};
+
+static struct qcom_icc_bcm bcm_acv_pcie = {
+       .name = "ACV",
+       .num_nodes = 1,
+       .nodes = { &ebi_pcie },
+};
+
+static struct qcom_icc_bcm bcm_mc0_pcie = {
+       .name = "MC0",
+       .num_nodes = 1,
+       .nodes = { &ebi_pcie },
+};
+
+static struct qcom_icc_bcm bcm_pc0_pcie = {
+       .name = "PC0",
+       .num_nodes = 1,
+       .nodes = { &qns_pcie_mem_noc_pcie },
+};
+
+static struct qcom_icc_bcm bcm_sh0_pcie = {
+       .name = "SH0",
+       .num_nodes = 1,
+       .nodes = { &qns_llcc_pcie },
+};
+
+static struct qcom_icc_bcm bcm_sh1_pcie = {
+       .name = "SH1",
+       .num_nodes = 1,
+       .nodes = { &qnm_pcie_pcie },
+};
+
+static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
+};
+
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
+       [MASTER_QSPI_0] = &qhm_qspi,
+       [MASTER_QUP_1] = &qhm_qup1,
+       [MASTER_SDCC_4] = &xm_sdc4,
+       [MASTER_UFS_MEM] = &xm_ufs_mem,
+       [SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
+};
+
+static const struct qcom_icc_desc x1e80100_aggre1_noc = {
+       .nodes = aggre1_noc_nodes,
+       .num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
+       .bcms = aggre1_noc_bcms,
+       .num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
+       &bcm_ce0,
+};
+
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
+       [MASTER_QUP_0] = &qhm_qup0,
+       [MASTER_QUP_2] = &qhm_qup2,
+       [MASTER_CRYPTO] = &qxm_crypto,
+       [MASTER_SP] = &qxm_sp,
+       [MASTER_QDSS_ETR] = &xm_qdss_etr_0,
+       [MASTER_QDSS_ETR_1] = &xm_qdss_etr_1,
+       [MASTER_SDCC_2] = &xm_sdc2,
+       [SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
+};
+
+static const struct qcom_icc_desc x1e80100_aggre2_noc = {
+       .nodes = aggre2_noc_nodes,
+       .num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
+       .bcms = aggre2_noc_bcms,
+       .num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const clk_virt_bcms[] = {
+       &bcm_acv_perf,
+       &bcm_qup0,
+       &bcm_qup1,
+       &bcm_qup2,
+};
+
+static struct qcom_icc_node * const clk_virt_nodes[] = {
+       [MASTER_DDR_PERF_MODE] = &ddr_perf_mode_master,
+       [MASTER_QUP_CORE_0] = &qup0_core_master,
+       [MASTER_QUP_CORE_1] = &qup1_core_master,
+       [MASTER_QUP_CORE_2] = &qup2_core_master,
+       [SLAVE_DDR_PERF_MODE] = &ddr_perf_mode_slave,
+       [SLAVE_QUP_CORE_0] = &qup0_core_slave,
+       [SLAVE_QUP_CORE_1] = &qup1_core_slave,
+       [SLAVE_QUP_CORE_2] = &qup2_core_slave,
+};
+
+static const struct qcom_icc_desc x1e80100_clk_virt = {
+       .nodes = clk_virt_nodes,
+       .num_nodes = ARRAY_SIZE(clk_virt_nodes),
+       .bcms = clk_virt_bcms,
+       .num_bcms = ARRAY_SIZE(clk_virt_bcms),
+};
+
+static struct qcom_icc_bcm * const cnoc_cfg_bcms[] = {
+       &bcm_cn0,
+       &bcm_cn1,
+};
+
+static struct qcom_icc_node * const cnoc_cfg_nodes[] = {
+       [MASTER_CNOC_CFG] = &qsm_cfg,
+       [SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy0,
+       [SLAVE_AHB2PHY_NORTH] = &qhs_ahb2phy1,
+       [SLAVE_AHB2PHY_2] = &qhs_ahb2phy2,
+       [SLAVE_AV1_ENC_CFG] = &qhs_av1_enc_cfg,
+       [SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
+       [SLAVE_CLK_CTL] = &qhs_clk_ctl,
+       [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
+       [SLAVE_DISPLAY_CFG] = &qhs_display_cfg,
+       [SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg,
+       [SLAVE_IMEM_CFG] = &qhs_imem_cfg,
+       [SLAVE_IPC_ROUTER_CFG] = &qhs_ipc_router,
+       [SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg,
+       [SLAVE_PCIE_1_CFG] = &qhs_pcie1_cfg,
+       [SLAVE_PCIE_2_CFG] = &qhs_pcie2_cfg,
+       [SLAVE_PCIE_3_CFG] = &qhs_pcie3_cfg,
+       [SLAVE_PCIE_4_CFG] = &qhs_pcie4_cfg,
+       [SLAVE_PCIE_5_CFG] = &qhs_pcie5_cfg,
+       [SLAVE_PCIE_6A_CFG] = &qhs_pcie6a_cfg,
+       [SLAVE_PCIE_6B_CFG] = &qhs_pcie6b_cfg,
+       [SLAVE_PCIE_RSC_CFG] = &qhs_pcie_rsc_cfg,
+       [SLAVE_PDM] = &qhs_pdm,
+       [SLAVE_PRNG] = &qhs_prng,
+       [SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
+       [SLAVE_QSPI_0] = &qhs_qspi,
+       [SLAVE_QUP_0] = &qhs_qup0,
+       [SLAVE_QUP_1] = &qhs_qup1,
+       [SLAVE_QUP_2] = &qhs_qup2,
+       [SLAVE_SDCC_2] = &qhs_sdc2,
+       [SLAVE_SDCC_4] = &qhs_sdc4,
+       [SLAVE_SMMUV3_CFG] = &qhs_smmuv3_cfg,
+       [SLAVE_TCSR] = &qhs_tcsr,
+       [SLAVE_TLMM] = &qhs_tlmm,
+       [SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
+       [SLAVE_USB2] = &qhs_usb2_0_cfg,
+       [SLAVE_USB3_0] = &qhs_usb3_0_cfg,
+       [SLAVE_USB3_1] = &qhs_usb3_1_cfg,
+       [SLAVE_USB3_2] = &qhs_usb3_2_cfg,
+       [SLAVE_USB3_MP] = &qhs_usb3_mp_cfg,
+       [SLAVE_USB4_0] = &qhs_usb4_0_cfg,
+       [SLAVE_USB4_1] = &qhs_usb4_1_cfg,
+       [SLAVE_USB4_2] = &qhs_usb4_2_cfg,
+       [SLAVE_VENUS_CFG] = &qhs_venus_cfg,
+       [SLAVE_LPASS_QTB_CFG] = &qss_lpass_qtb_cfg,
+       [SLAVE_CNOC_MNOC_CFG] = &qss_mnoc_cfg,
+       [SLAVE_NSP_QTB_CFG] = &qss_nsp_qtb_cfg,
+       [SLAVE_QDSS_STM] = &xs_qdss_stm,
+       [SLAVE_TCU] = &xs_sys_tcu_cfg,
+};
+
+static const struct qcom_icc_desc x1e80100_cnoc_cfg = {
+       .nodes = cnoc_cfg_nodes,
+       .num_nodes = ARRAY_SIZE(cnoc_cfg_nodes),
+       .bcms = cnoc_cfg_bcms,
+       .num_bcms = ARRAY_SIZE(cnoc_cfg_bcms),
+};
+
+static struct qcom_icc_bcm * const cnoc_main_bcms[] = {
+       &bcm_cn0,
+};
+
+static struct qcom_icc_node * const cnoc_main_nodes[] = {
+       [MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc,
+       [MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie,
+       [SLAVE_AOSS] = &qhs_aoss,
+       [SLAVE_TME_CFG] = &qhs_tme_cfg,
+       [SLAVE_APPSS] = &qns_apss,
+       [SLAVE_CNOC_CFG] = &qss_cfg,
+       [SLAVE_BOOT_IMEM] = &qxs_boot_imem,
+       [SLAVE_IMEM] = &qxs_imem,
+       [SLAVE_PCIE_0] = &xs_pcie_0,
+       [SLAVE_PCIE_1] = &xs_pcie_1,
+       [SLAVE_PCIE_2] = &xs_pcie_2,
+       [SLAVE_PCIE_3] = &xs_pcie_3,
+       [SLAVE_PCIE_4] = &xs_pcie_4,
+       [SLAVE_PCIE_5] = &xs_pcie_5,
+       [SLAVE_PCIE_6A] = &xs_pcie_6a,
+       [SLAVE_PCIE_6B] = &xs_pcie_6b,
+};
+
+static const struct qcom_icc_desc x1e80100_cnoc_main = {
+       .nodes = cnoc_main_nodes,
+       .num_nodes = ARRAY_SIZE(cnoc_main_nodes),
+       .bcms = cnoc_main_bcms,
+       .num_bcms = ARRAY_SIZE(cnoc_main_bcms),
+};
+
+static struct qcom_icc_bcm * const gem_noc_bcms[] = {
+       &bcm_sh0,
+       &bcm_sh1,
+       &bcm_sh0_disp,
+       &bcm_sh1_disp,
+       &bcm_sh0_pcie,
+       &bcm_sh1_pcie,
+};
+
+static struct qcom_icc_node * const gem_noc_nodes[] = {
+       [MASTER_GPU_TCU] = &alm_gpu_tcu,
+       [MASTER_PCIE_TCU] = &alm_pcie_tcu,
+       [MASTER_SYS_TCU] = &alm_sys_tcu,
+       [MASTER_APPSS_PROC] = &chm_apps,
+       [MASTER_GFX3D] = &qnm_gpu,
+       [MASTER_LPASS_GEM_NOC] = &qnm_lpass,
+       [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
+       [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
+       [MASTER_COMPUTE_NOC] = &qnm_nsp_noc,
+       [MASTER_ANOC_PCIE_GEM_NOC] = &qnm_pcie,
+       [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
+       [MASTER_GIC2] = &xm_gic,
+       [SLAVE_GEM_NOC_CNOC] = &qns_gem_noc_cnoc,
+       [SLAVE_LLCC] = &qns_llcc,
+       [SLAVE_MEM_NOC_PCIE_SNOC] = &qns_pcie,
+       [MASTER_MNOC_HF_MEM_NOC_DISP] = &qnm_mnoc_hf_disp,
+       [MASTER_ANOC_PCIE_GEM_NOC_DISP] = &qnm_pcie_disp,
+       [SLAVE_LLCC_DISP] = &qns_llcc_disp,
+       [MASTER_ANOC_PCIE_GEM_NOC_PCIE] = &qnm_pcie_pcie,
+       [SLAVE_LLCC_PCIE] = &qns_llcc_pcie,
+};
+
+static const struct qcom_icc_desc x1e80100_gem_noc = {
+       .nodes = gem_noc_nodes,
+       .num_nodes = ARRAY_SIZE(gem_noc_nodes),
+       .bcms = gem_noc_bcms,
+       .num_bcms = ARRAY_SIZE(gem_noc_bcms),
+};
+
+static struct qcom_icc_bcm *lpass_ag_noc_bcms[] = {
+};
+
+static struct qcom_icc_node * const lpass_ag_noc_nodes[] = {
+       [MASTER_LPIAON_NOC] = &qnm_lpiaon_noc,
+       [SLAVE_LPASS_GEM_NOC] = &qns_lpass_ag_noc_gemnoc,
+};
+
+static const struct qcom_icc_desc x1e80100_lpass_ag_noc = {
+       .nodes = lpass_ag_noc_nodes,
+       .num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes),
+       .bcms = lpass_ag_noc_bcms,
+       .num_bcms = ARRAY_SIZE(lpass_ag_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const lpass_lpiaon_noc_bcms[] = {
+       &bcm_lp0,
+};
+
+static struct qcom_icc_node * const lpass_lpiaon_noc_nodes[] = {
+       [MASTER_LPASS_LPINOC] = &qnm_lpass_lpinoc,
+       [SLAVE_LPIAON_NOC_LPASS_AG_NOC] = &qns_lpass_aggnoc,
+};
+
+static const struct qcom_icc_desc x1e80100_lpass_lpiaon_noc = {
+       .nodes = lpass_lpiaon_noc_nodes,
+       .num_nodes = ARRAY_SIZE(lpass_lpiaon_noc_nodes),
+       .bcms = lpass_lpiaon_noc_bcms,
+       .num_bcms = ARRAY_SIZE(lpass_lpiaon_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const lpass_lpicx_noc_bcms[] = {
+};
+
+static struct qcom_icc_node * const lpass_lpicx_noc_nodes[] = {
+       [MASTER_LPASS_PROC] = &qxm_lpinoc_dsp_axim,
+       [SLAVE_LPICX_NOC_LPIAON_NOC] = &qns_lpi_aon_noc,
+};
+
+static const struct qcom_icc_desc x1e80100_lpass_lpicx_noc = {
+       .nodes = lpass_lpicx_noc_nodes,
+       .num_nodes = ARRAY_SIZE(lpass_lpicx_noc_nodes),
+       .bcms = lpass_lpicx_noc_bcms,
+       .num_bcms = ARRAY_SIZE(lpass_lpicx_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
+       &bcm_acv,
+       &bcm_mc0,
+       &bcm_acv_disp,
+       &bcm_mc0_disp,
+       &bcm_acv_pcie,
+       &bcm_mc0_pcie,
+};
+
+static struct qcom_icc_node * const mc_virt_nodes[] = {
+       [MASTER_LLCC] = &llcc_mc,
+       [SLAVE_EBI1] = &ebi,
+       [MASTER_LLCC_DISP] = &llcc_mc_disp,
+       [SLAVE_EBI1_DISP] = &ebi_disp,
+       [MASTER_LLCC_PCIE] = &llcc_mc_pcie,
+       [SLAVE_EBI1_PCIE] = &ebi_pcie,
+};
+
+static const struct qcom_icc_desc x1e80100_mc_virt = {
+       .nodes = mc_virt_nodes,
+       .num_nodes = ARRAY_SIZE(mc_virt_nodes),
+       .bcms = mc_virt_bcms,
+       .num_bcms = ARRAY_SIZE(mc_virt_bcms),
+};
+
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
+       &bcm_mm0,
+       &bcm_mm1,
+       &bcm_mm0_disp,
+       &bcm_mm1_disp,
+};
+
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
+       [MASTER_AV1_ENC] = &qnm_av1_enc,
+       [MASTER_CAMNOC_HF] = &qnm_camnoc_hf,
+       [MASTER_CAMNOC_ICP] = &qnm_camnoc_icp,
+       [MASTER_CAMNOC_SF] = &qnm_camnoc_sf,
+       [MASTER_EVA] = &qnm_eva,
+       [MASTER_MDP] = &qnm_mdp,
+       [MASTER_VIDEO] = &qnm_video,
+       [MASTER_VIDEO_CV_PROC] = &qnm_video_cv_cpu,
+       [MASTER_VIDEO_V_PROC] = &qnm_video_v_cpu,
+       [MASTER_CNOC_MNOC_CFG] = &qsm_mnoc_cfg,
+       [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
+       [SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf,
+       [SLAVE_SERVICE_MNOC] = &srvc_mnoc,
+       [MASTER_MDP_DISP] = &qnm_mdp_disp,
+       [SLAVE_MNOC_HF_MEM_NOC_DISP] = &qns_mem_noc_hf_disp,
+};
+
+static const struct qcom_icc_desc x1e80100_mmss_noc = {
+       .nodes = mmss_noc_nodes,
+       .num_nodes = ARRAY_SIZE(mmss_noc_nodes),
+       .bcms = mmss_noc_bcms,
+       .num_bcms = ARRAY_SIZE(mmss_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const nsp_noc_bcms[] = {
+       &bcm_co0,
+};
+
+static struct qcom_icc_node * const nsp_noc_nodes[] = {
+       [MASTER_CDSP_PROC] = &qxm_nsp,
+       [SLAVE_CDSP_MEM_NOC] = &qns_nsp_gemnoc,
+};
+
+static const struct qcom_icc_desc x1e80100_nsp_noc = {
+       .nodes = nsp_noc_nodes,
+       .num_nodes = ARRAY_SIZE(nsp_noc_nodes),
+       .bcms = nsp_noc_bcms,
+       .num_bcms = ARRAY_SIZE(nsp_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const pcie_center_anoc_bcms[] = {
+       &bcm_pc0,
+       &bcm_pc0_pcie,
+};
+
+static struct qcom_icc_node * const pcie_center_anoc_nodes[] = {
+       [MASTER_PCIE_NORTH] = &qnm_pcie_north_gem_noc,
+       [MASTER_PCIE_SOUTH] = &qnm_pcie_south_gem_noc,
+       [SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_mem_noc,
+       [MASTER_PCIE_NORTH_PCIE] = &qnm_pcie_north_gem_noc_pcie,
+       [MASTER_PCIE_SOUTH_PCIE] = &qnm_pcie_south_gem_noc_pcie,
+       [SLAVE_ANOC_PCIE_GEM_NOC_PCIE] = &qns_pcie_mem_noc_pcie,
+};
+
+static const struct qcom_icc_desc x1e80100_pcie_center_anoc = {
+       .nodes = pcie_center_anoc_nodes,
+       .num_nodes = ARRAY_SIZE(pcie_center_anoc_nodes),
+       .bcms = pcie_center_anoc_bcms,
+       .num_bcms = ARRAY_SIZE(pcie_center_anoc_bcms),
+};
+
+static struct qcom_icc_bcm * const pcie_north_anoc_bcms[] = {
+};
+
+static struct qcom_icc_node * const pcie_north_anoc_nodes[] = {
+       [MASTER_PCIE_3] = &xm_pcie_3,
+       [MASTER_PCIE_4] = &xm_pcie_4,
+       [MASTER_PCIE_5] = &xm_pcie_5,
+       [SLAVE_PCIE_NORTH] = &qns_pcie_north_gem_noc,
+       [MASTER_PCIE_3_PCIE] = &xm_pcie_3_pcie,
+       [MASTER_PCIE_4_PCIE] = &xm_pcie_4_pcie,
+       [MASTER_PCIE_5_PCIE] = &xm_pcie_5_pcie,
+       [SLAVE_PCIE_NORTH_PCIE] = &qns_pcie_north_gem_noc_pcie,
+};
+
+static const struct qcom_icc_desc x1e80100_pcie_north_anoc = {
+       .nodes = pcie_north_anoc_nodes,
+       .num_nodes = ARRAY_SIZE(pcie_north_anoc_nodes),
+       .bcms = pcie_north_anoc_bcms,
+       .num_bcms = ARRAY_SIZE(pcie_north_anoc_bcms),
+};
+
+static struct qcom_icc_bcm *pcie_south_anoc_bcms[] = {
+};
+
+static struct qcom_icc_node * const pcie_south_anoc_nodes[] = {
+       [MASTER_PCIE_0] = &xm_pcie_0,
+       [MASTER_PCIE_1] = &xm_pcie_1,
+       [MASTER_PCIE_2] = &xm_pcie_2,
+       [MASTER_PCIE_6A] = &xm_pcie_6a,
+       [MASTER_PCIE_6B] = &xm_pcie_6b,
+       [SLAVE_PCIE_SOUTH] = &qns_pcie_south_gem_noc,
+       [MASTER_PCIE_0_PCIE] = &xm_pcie_0_pcie,
+       [MASTER_PCIE_1_PCIE] = &xm_pcie_1_pcie,
+       [MASTER_PCIE_2_PCIE] = &xm_pcie_2_pcie,
+       [MASTER_PCIE_6A_PCIE] = &xm_pcie_6a_pcie,
+       [MASTER_PCIE_6B_PCIE] = &xm_pcie_6b_pcie,
+       [SLAVE_PCIE_SOUTH_PCIE] = &qns_pcie_south_gem_noc_pcie,
+};
+
+static const struct qcom_icc_desc x1e80100_pcie_south_anoc = {
+       .nodes = pcie_south_anoc_nodes,
+       .num_nodes = ARRAY_SIZE(pcie_south_anoc_nodes),
+       .bcms = pcie_south_anoc_bcms,
+       .num_bcms = ARRAY_SIZE(pcie_south_anoc_bcms),
+};
+
+static struct qcom_icc_bcm *system_noc_bcms[] = {
+       &bcm_sn0,
+       &bcm_sn2,
+       &bcm_sn3,
+       &bcm_sn4,
+};
+
+static struct qcom_icc_node * const system_noc_nodes[] = {
+       [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
+       [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
+       [MASTER_GIC1] = &qnm_gic,
+       [MASTER_USB_NOC_SNOC] = &qnm_usb_anoc,
+       [SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf,
+};
+
+static const struct qcom_icc_desc x1e80100_system_noc = {
+       .nodes = system_noc_nodes,
+       .num_nodes = ARRAY_SIZE(system_noc_nodes),
+       .bcms = system_noc_bcms,
+       .num_bcms = ARRAY_SIZE(system_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const usb_center_anoc_bcms[] = {
+};
+
+static struct qcom_icc_node * const usb_center_anoc_nodes[] = {
+       [MASTER_AGGRE_USB_NORTH] = &qnm_aggre_usb_north_snoc,
+       [MASTER_AGGRE_USB_SOUTH] = &qnm_aggre_usb_south_snoc,
+       [SLAVE_USB_NOC_SNOC] = &qns_aggre_usb_snoc,
+};
+
+static const struct qcom_icc_desc x1e80100_usb_center_anoc = {
+       .nodes = usb_center_anoc_nodes,
+       .num_nodes = ARRAY_SIZE(usb_center_anoc_nodes),
+       .bcms = usb_center_anoc_bcms,
+       .num_bcms = ARRAY_SIZE(usb_center_anoc_bcms),
+};
+
+static struct qcom_icc_bcm *usb_north_anoc_bcms[] = {
+};
+
+static struct qcom_icc_node * const usb_north_anoc_nodes[] = {
+       [MASTER_USB2] = &xm_usb2_0,
+       [MASTER_USB3_MP] = &xm_usb3_mp,
+       [SLAVE_AGGRE_USB_NORTH] = &qns_aggre_usb_north_snoc,
+};
+
+static const struct qcom_icc_desc x1e80100_usb_north_anoc = {
+       .nodes = usb_north_anoc_nodes,
+       .num_nodes = ARRAY_SIZE(usb_north_anoc_nodes),
+       .bcms = usb_north_anoc_bcms,
+       .num_bcms = ARRAY_SIZE(usb_north_anoc_bcms),
+};
+
+static struct qcom_icc_bcm *usb_south_anoc_bcms[] = {
+};
+
+static struct qcom_icc_node * const usb_south_anoc_nodes[] = {
+       [MASTER_USB3_0] = &xm_usb3_0,
+       [MASTER_USB3_1] = &xm_usb3_1,
+       [MASTER_USB3_2] = &xm_usb3_2,
+       [MASTER_USB4_0] = &xm_usb4_0,
+       [MASTER_USB4_1] = &xm_usb4_1,
+       [MASTER_USB4_2] = &xm_usb4_2,
+       [SLAVE_AGGRE_USB_SOUTH] = &qns_aggre_usb_south_snoc,
+};
+
+static const struct qcom_icc_desc x1e80100_usb_south_anoc = {
+       .nodes = usb_south_anoc_nodes,
+       .num_nodes = ARRAY_SIZE(usb_south_anoc_nodes),
+       .bcms = usb_south_anoc_bcms,
+       .num_bcms = ARRAY_SIZE(usb_south_anoc_bcms),
+};
+
+static const struct of_device_id qnoc_of_match[] = {
+       { .compatible = "qcom,x1e80100-aggre1-noc", .data = &x1e80100_aggre1_noc},
+       { .compatible = "qcom,x1e80100-aggre2-noc", .data = &x1e80100_aggre2_noc},
+       { .compatible = "qcom,x1e80100-clk-virt", .data = &x1e80100_clk_virt},
+       { .compatible = "qcom,x1e80100-cnoc-cfg", .data = &x1e80100_cnoc_cfg},
+       { .compatible = "qcom,x1e80100-cnoc-main", .data = &x1e80100_cnoc_main},
+       { .compatible = "qcom,x1e80100-gem-noc", .data = &x1e80100_gem_noc},
+       { .compatible = "qcom,x1e80100-lpass-ag-noc", .data = &x1e80100_lpass_ag_noc},
+       { .compatible = "qcom,x1e80100-lpass-lpiaon-noc", .data = &x1e80100_lpass_lpiaon_noc},
+       { .compatible = "qcom,x1e80100-lpass-lpicx-noc", .data = &x1e80100_lpass_lpicx_noc},
+       { .compatible = "qcom,x1e80100-mc-virt", .data = &x1e80100_mc_virt},
+       { .compatible = "qcom,x1e80100-mmss-noc", .data = &x1e80100_mmss_noc},
+       { .compatible = "qcom,x1e80100-nsp-noc", .data = &x1e80100_nsp_noc},
+       { .compatible = "qcom,x1e80100-pcie-center-anoc", .data = &x1e80100_pcie_center_anoc},
+       { .compatible = "qcom,x1e80100-pcie-north-anoc", .data = &x1e80100_pcie_north_anoc},
+       { .compatible = "qcom,x1e80100-pcie-south-anoc", .data = &x1e80100_pcie_south_anoc},
+       { .compatible = "qcom,x1e80100-system-noc", .data = &x1e80100_system_noc},
+       { .compatible = "qcom,x1e80100-usb-center-anoc", .data = &x1e80100_usb_center_anoc},
+       { .compatible = "qcom,x1e80100-usb-north-anoc", .data = &x1e80100_usb_north_anoc},
+       { .compatible = "qcom,x1e80100-usb-south-anoc", .data = &x1e80100_usb_south_anoc},
+       { }
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+       .probe = qcom_icc_rpmh_probe,
+       .remove_new = qcom_icc_rpmh_remove,
+       .driver = {
+               .name = "qnoc-x1e80100",
+               .of_match_table = qnoc_of_match,
+               .sync_state = icc_sync_state,
+       },
+};
+
+static int __init qnoc_driver_init(void)
+{
+       return platform_driver_register(&qnoc_driver);
+}
+core_initcall(qnoc_driver_init);
+
+static void __exit qnoc_driver_exit(void)
+{
+       platform_driver_unregister(&qnoc_driver);
+}
+module_exit(qnoc_driver_exit);
+
+MODULE_DESCRIPTION("x1e80100 NoC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/interconnect/qcom/x1e80100.h b/drivers/interconnect/qcom/x1e80100.h
new file mode 100644 (file)
index 0000000..2e14264
--- /dev/null
@@ -0,0 +1,192 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * X1E80100 interconnect IDs
+ *
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_QCOM_X1E80100_H
+#define __DRIVERS_INTERCONNECT_QCOM_X1E80100_H
+
+#define X1E80100_MASTER_A1NOC_SNOC                     0
+#define X1E80100_MASTER_A2NOC_SNOC                     1
+#define X1E80100_MASTER_ANOC_PCIE_GEM_NOC              2
+#define X1E80100_MASTER_ANOC_PCIE_GEM_NOC_DISP         3
+#define X1E80100_MASTER_APPSS_PROC                     4
+#define X1E80100_MASTER_CAMNOC_HF                      5
+#define X1E80100_MASTER_CAMNOC_ICP                     6
+#define X1E80100_MASTER_CAMNOC_SF                      7
+#define X1E80100_MASTER_CDSP_PROC                      8
+#define X1E80100_MASTER_CNOC_CFG                       9
+#define X1E80100_MASTER_CNOC_MNOC_CFG                  10
+#define X1E80100_MASTER_COMPUTE_NOC                    11
+#define X1E80100_MASTER_CRYPTO                         12
+#define X1E80100_MASTER_GEM_NOC_CNOC                   13
+#define X1E80100_MASTER_GEM_NOC_PCIE_SNOC              14
+#define X1E80100_MASTER_GFX3D                          15
+#define X1E80100_MASTER_GPU_TCU                                16
+#define X1E80100_MASTER_IPA                            17
+#define X1E80100_MASTER_LLCC                           18
+#define X1E80100_MASTER_LLCC_DISP                      19
+#define X1E80100_MASTER_LPASS_GEM_NOC                  20
+#define X1E80100_MASTER_LPASS_LPINOC                   21
+#define X1E80100_MASTER_LPASS_PROC                     22
+#define X1E80100_MASTER_LPIAON_NOC                     23
+#define X1E80100_MASTER_MDP                            24
+#define X1E80100_MASTER_MDP_DISP                       25
+#define X1E80100_MASTER_MNOC_HF_MEM_NOC                        26
+#define X1E80100_MASTER_MNOC_HF_MEM_NOC_DISP           27
+#define X1E80100_MASTER_MNOC_SF_MEM_NOC                        28
+#define X1E80100_MASTER_PCIE_0                         29
+#define X1E80100_MASTER_PCIE_1                         30
+#define X1E80100_MASTER_QDSS_ETR                       31
+#define X1E80100_MASTER_QDSS_ETR_1                     32
+#define X1E80100_MASTER_QSPI_0                         33
+#define X1E80100_MASTER_QUP_0                          34
+#define X1E80100_MASTER_QUP_1                          35
+#define X1E80100_MASTER_QUP_2                          36
+#define X1E80100_MASTER_QUP_CORE_0                     37
+#define X1E80100_MASTER_QUP_CORE_1                     38
+#define X1E80100_MASTER_SDCC_2                         39
+#define X1E80100_MASTER_SDCC_4                         40
+#define X1E80100_MASTER_SNOC_SF_MEM_NOC                        41
+#define X1E80100_MASTER_SP                             42
+#define X1E80100_MASTER_SYS_TCU                                43
+#define X1E80100_MASTER_UFS_MEM                                44
+#define X1E80100_MASTER_USB3_0                         45
+#define X1E80100_MASTER_VIDEO                          46
+#define X1E80100_MASTER_VIDEO_CV_PROC                  47
+#define X1E80100_MASTER_VIDEO_V_PROC                   48
+#define X1E80100_SLAVE_A1NOC_SNOC                      49
+#define X1E80100_SLAVE_A2NOC_SNOC                      50
+#define X1E80100_SLAVE_AHB2PHY_NORTH                   51
+#define X1E80100_SLAVE_AHB2PHY_SOUTH                   52
+#define X1E80100_SLAVE_ANOC_PCIE_GEM_NOC               53
+#define X1E80100_SLAVE_AOSS                            54
+#define X1E80100_SLAVE_APPSS                           55
+#define X1E80100_SLAVE_BOOT_IMEM                       56
+#define X1E80100_SLAVE_CAMERA_CFG                      57
+#define X1E80100_SLAVE_CDSP_MEM_NOC                    58
+#define X1E80100_SLAVE_CLK_CTL                         59
+#define X1E80100_SLAVE_CNOC_CFG                                60
+#define X1E80100_SLAVE_CNOC_MNOC_CFG                   61
+#define X1E80100_SLAVE_CRYPTO_0_CFG                    62
+#define X1E80100_SLAVE_DISPLAY_CFG                     63
+#define X1E80100_SLAVE_EBI1                            64
+#define X1E80100_SLAVE_EBI1_DISP                       65
+#define X1E80100_SLAVE_GEM_NOC_CNOC                    66
+#define X1E80100_SLAVE_GFX3D_CFG                       67
+#define X1E80100_SLAVE_IMEM                            68
+#define X1E80100_SLAVE_IMEM_CFG                                69
+#define X1E80100_SLAVE_IPC_ROUTER_CFG                  70
+#define X1E80100_SLAVE_LLCC                            71
+#define X1E80100_SLAVE_LLCC_DISP                       72
+#define X1E80100_SLAVE_LPASS_GEM_NOC                   73
+#define X1E80100_SLAVE_LPASS_QTB_CFG                   74
+#define X1E80100_SLAVE_LPIAON_NOC_LPASS_AG_NOC         75
+#define X1E80100_SLAVE_LPICX_NOC_LPIAON_NOC            76
+#define X1E80100_SLAVE_MEM_NOC_PCIE_SNOC               77
+#define X1E80100_SLAVE_MNOC_HF_MEM_NOC                 78
+#define X1E80100_SLAVE_MNOC_HF_MEM_NOC_DISP            79
+#define X1E80100_SLAVE_MNOC_SF_MEM_NOC                 80
+#define X1E80100_SLAVE_NSP_QTB_CFG                     81
+#define X1E80100_SLAVE_PCIE_0                          82
+#define X1E80100_SLAVE_PCIE_0_CFG                      83
+#define X1E80100_SLAVE_PCIE_1                          84
+#define X1E80100_SLAVE_PCIE_1_CFG                      85
+#define X1E80100_SLAVE_PDM                             86
+#define X1E80100_SLAVE_PRNG                            87
+#define X1E80100_SLAVE_QDSS_CFG                                88
+#define X1E80100_SLAVE_QDSS_STM                                89
+#define X1E80100_SLAVE_QSPI_0                          90
+#define X1E80100_SLAVE_QUP_1                           91
+#define X1E80100_SLAVE_QUP_2                           92
+#define X1E80100_SLAVE_QUP_CORE_0                      93
+#define X1E80100_SLAVE_QUP_CORE_1                      94
+#define X1E80100_SLAVE_QUP_CORE_2                      95
+#define X1E80100_SLAVE_SDCC_2                          96
+#define X1E80100_SLAVE_SDCC_4                          97
+#define X1E80100_SLAVE_SERVICE_MNOC                    98
+#define X1E80100_SLAVE_SNOC_GEM_NOC_SF                 99
+#define X1E80100_SLAVE_TCSR                            100
+#define X1E80100_SLAVE_TCU                             101
+#define X1E80100_SLAVE_TLMM                            102
+#define X1E80100_SLAVE_TME_CFG                         103
+#define X1E80100_SLAVE_UFS_MEM_CFG                     104
+#define X1E80100_SLAVE_USB3_0                          105
+#define X1E80100_SLAVE_VENUS_CFG                       106
+#define X1E80100_MASTER_DDR_PERF_MODE                  107
+#define X1E80100_MASTER_QUP_CORE_2                     108
+#define X1E80100_MASTER_PCIE_TCU                       109
+#define X1E80100_MASTER_GIC2                           110
+#define X1E80100_MASTER_AV1_ENC                                111
+#define X1E80100_MASTER_EVA                            112
+#define X1E80100_MASTER_PCIE_NORTH                     113
+#define X1E80100_MASTER_PCIE_SOUTH                     114
+#define X1E80100_MASTER_PCIE_3                         115
+#define X1E80100_MASTER_PCIE_4                         116
+#define X1E80100_MASTER_PCIE_5                         117
+#define X1E80100_MASTER_PCIE_2                         118
+#define X1E80100_MASTER_PCIE_6A                                119
+#define X1E80100_MASTER_PCIE_6B                                120
+#define X1E80100_MASTER_GIC1                           121
+#define X1E80100_MASTER_USB_NOC_SNOC                   122
+#define X1E80100_MASTER_AGGRE_USB_NORTH                        123
+#define X1E80100_MASTER_AGGRE_USB_SOUTH                        124
+#define X1E80100_MASTER_USB2                           125
+#define X1E80100_MASTER_USB3_MP                                126
+#define X1E80100_MASTER_USB3_1                         127
+#define X1E80100_MASTER_USB3_2                         128
+#define X1E80100_MASTER_USB4_0                         129
+#define X1E80100_MASTER_USB4_1                         130
+#define X1E80100_MASTER_USB4_2                         131
+#define X1E80100_MASTER_ANOC_PCIE_GEM_NOC_PCIE         132
+#define X1E80100_MASTER_LLCC_PCIE                      133
+#define X1E80100_MASTER_PCIE_NORTH_PCIE                        134
+#define X1E80100_MASTER_PCIE_SOUTH_PCIE                        135
+#define X1E80100_MASTER_PCIE_3_PCIE                    136
+#define X1E80100_MASTER_PCIE_4_PCIE                    137
+#define X1E80100_MASTER_PCIE_5_PCIE                    138
+#define X1E80100_MASTER_PCIE_0_PCIE                    139
+#define X1E80100_MASTER_PCIE_1_PCIE                    140
+#define X1E80100_MASTER_PCIE_2_PCIE                    141
+#define X1E80100_MASTER_PCIE_6A_PCIE                   142
+#define X1E80100_MASTER_PCIE_6B_PCIE                   143
+#define X1E80100_SLAVE_AHB2PHY_2                       144
+#define X1E80100_SLAVE_AV1_ENC_CFG                     145
+#define X1E80100_SLAVE_PCIE_2_CFG                      146
+#define X1E80100_SLAVE_PCIE_3_CFG                      147
+#define X1E80100_SLAVE_PCIE_4_CFG                      148
+#define X1E80100_SLAVE_PCIE_5_CFG                      149
+#define X1E80100_SLAVE_PCIE_6A_CFG                     150
+#define X1E80100_SLAVE_PCIE_6B_CFG                     151
+#define X1E80100_SLAVE_PCIE_RSC_CFG                    152
+#define X1E80100_SLAVE_QUP_0                           153
+#define X1E80100_SLAVE_SMMUV3_CFG                      154
+#define X1E80100_SLAVE_USB2                            155
+#define X1E80100_SLAVE_USB3_1                          156
+#define X1E80100_SLAVE_USB3_2                          157
+#define X1E80100_SLAVE_USB3_MP                         158
+#define X1E80100_SLAVE_USB4_0                          159
+#define X1E80100_SLAVE_USB4_1                          160
+#define X1E80100_SLAVE_USB4_2                          161
+#define X1E80100_SLAVE_PCIE_2                          162
+#define X1E80100_SLAVE_PCIE_3                          163
+#define X1E80100_SLAVE_PCIE_4                          164
+#define X1E80100_SLAVE_PCIE_5                          165
+#define X1E80100_SLAVE_PCIE_6A                         166
+#define X1E80100_SLAVE_PCIE_6B                         167
+#define X1E80100_SLAVE_DDR_PERF_MODE                   168
+#define X1E80100_SLAVE_PCIE_NORTH                      169
+#define X1E80100_SLAVE_PCIE_SOUTH                      170
+#define X1E80100_SLAVE_USB_NOC_SNOC                    171
+#define X1E80100_SLAVE_AGGRE_USB_NORTH                 172
+#define X1E80100_SLAVE_AGGRE_USB_SOUTH                 173
+#define X1E80100_SLAVE_LLCC_PCIE                       174
+#define X1E80100_SLAVE_EBI1_PCIE                       175
+#define X1E80100_SLAVE_ANOC_PCIE_GEM_NOC_PCIE          176
+#define X1E80100_SLAVE_PCIE_NORTH_PCIE                 177
+#define X1E80100_SLAVE_PCIE_SOUTH_PCIE                 178
+
+#endif
index ebf09bbf725bd117195eec5457b5495b12305105..1ba14cb45d5a2a42378802feaf293c798b893ad2 100644 (file)
@@ -93,14 +93,12 @@ static struct icc_node *exynos_generic_icc_xlate(struct of_phandle_args *spec,
        return priv->node;
 }
 
-static int exynos_generic_icc_remove(struct platform_device *pdev)
+static void exynos_generic_icc_remove(struct platform_device *pdev)
 {
        struct exynos_icc_priv *priv = platform_get_drvdata(pdev);
 
        icc_provider_deregister(&priv->provider);
        icc_nodes_remove(&priv->provider);
-
-       return 0;
 }
 
 static int exynos_generic_icc_probe(struct platform_device *pdev)
@@ -182,7 +180,7 @@ static struct platform_driver exynos_generic_icc_driver = {
                .sync_state = icc_sync_state,
        },
        .probe = exynos_generic_icc_probe,
-       .remove = exynos_generic_icc_remove,
+       .remove_new = exynos_generic_icc_remove,
 };
 module_platform_driver(exynos_generic_icc_driver);
 
index 7673bb82945b6cbf08ee1d4d44c196dc46875c5a..9a29d742617e3d34e3c9a28d25857230b06d2a68 100644 (file)
@@ -160,6 +160,7 @@ config IOMMU_DMA
 
 # Shared Virtual Addressing
 config IOMMU_SVA
+       select IOMMU_MM_DATA
        bool
 
 config FSL_PAMU
index 86be1edd50ee9afe027addc7c0c1fcbb4f536de5..8b3601f285fd699dd4d9d4d32e1ac62c2c5e3058 100644 (file)
@@ -53,10 +53,16 @@ int amd_iommu_pdev_enable_cap_pri(struct pci_dev *pdev);
 void amd_iommu_pdev_disable_cap_pri(struct pci_dev *pdev);
 
 int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid, u64 address);
+/*
+ * This function flushes all internal caches of
+ * the IOMMU used by this driver.
+ */
+void amd_iommu_flush_all_caches(struct amd_iommu *iommu);
 void amd_iommu_update_and_flush_device_table(struct protection_domain *domain);
 void amd_iommu_domain_update(struct protection_domain *domain);
 void amd_iommu_domain_flush_complete(struct protection_domain *domain);
-void amd_iommu_domain_flush_tlb_pde(struct protection_domain *domain);
+void amd_iommu_domain_flush_pages(struct protection_domain *domain,
+                                 u64 address, size_t size);
 int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid);
 int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid,
                              unsigned long cr3);
index 90b7d7950a9efa032f116a448db3c96fb7570004..809d74faa1a5d1513b12e8f964575d212996a35a 100644 (file)
@@ -902,12 +902,6 @@ extern int amd_iommu_max_glx_val;
 extern u64 amd_iommu_efr;
 extern u64 amd_iommu_efr2;
 
-/*
- * This function flushes all internal caches of
- * the IOMMU used by this driver.
- */
-void iommu_flush_all_caches(struct amd_iommu *iommu);
-
 static inline int get_ioapic_devid(int id)
 {
        struct devid_map *entry;
index 64bcf3df37ee5e3813f2bc71d19c6c18e9540cd2..c83bd0c2a1c9214df007c7ac29e31641adf23702 100644 (file)
@@ -2223,7 +2223,7 @@ static int __init amd_iommu_init_pci(void)
                init_device_table_dma(pci_seg);
 
        for_each_iommu(iommu)
-               iommu_flush_all_caches(iommu);
+               amd_iommu_flush_all_caches(iommu);
 
        print_iommu_info();
 
@@ -2773,7 +2773,7 @@ static void early_enable_iommu(struct amd_iommu *iommu)
        iommu_enable_xt(iommu);
        iommu_enable_irtcachedis(iommu);
        iommu_enable(iommu);
-       iommu_flush_all_caches(iommu);
+       amd_iommu_flush_all_caches(iommu);
 }
 
 /*
@@ -2829,7 +2829,7 @@ static void early_enable_iommus(void)
                        iommu_enable_xt(iommu);
                        iommu_enable_irtcachedis(iommu);
                        iommu_set_device_table(iommu);
-                       iommu_flush_all_caches(iommu);
+                       amd_iommu_flush_all_caches(iommu);
                }
        }
 }
@@ -3293,7 +3293,7 @@ static int __init state_next(void)
                                uninit_device_table_dma(pci_seg);
 
                        for_each_iommu(iommu)
-                               iommu_flush_all_caches(iommu);
+                               amd_iommu_flush_all_caches(iommu);
                }
        }
        return ret;
index 6c0621f6f572a4c4c0fb72ea1bdb5abe9d504311..2a0d1e97e52fdfe2375c3988c698260ae520cb9f 100644 (file)
@@ -369,6 +369,8 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
        bool updated = false;
        u64 __pte, *pte;
        int ret, i, count;
+       size_t size = pgcount << __ffs(pgsize);
+       unsigned long o_iova = iova;
 
        BUG_ON(!IS_ALIGNED(iova, pgsize));
        BUG_ON(!IS_ALIGNED(paddr, pgsize));
@@ -424,8 +426,7 @@ out:
                 * Updates and flushing already happened in
                 * increase_address_space().
                 */
-               amd_iommu_domain_flush_tlb_pde(dom);
-               amd_iommu_domain_flush_complete(dom);
+               amd_iommu_domain_flush_pages(dom, o_iova, size);
                spin_unlock_irqrestore(&dom->lock, flags);
        }
 
index f818a7e254d42627ebbd2d3154e290263af37421..6d69ba60744f063462ae8205284004de78d95ed2 100644 (file)
@@ -244,7 +244,6 @@ static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
        unsigned long mapped_size = 0;
        unsigned long o_iova = iova;
        size_t size = pgcount << __ffs(pgsize);
-       int count = 0;
        int ret = 0;
        bool updated = false;
 
@@ -265,19 +264,14 @@ static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
 
                *pte = set_pte_attr(paddr, map_size, prot);
 
-               count++;
                iova += map_size;
                paddr += map_size;
                mapped_size += map_size;
        }
 
 out:
-       if (updated) {
-               if (count > 1)
-                       amd_iommu_flush_tlb(&pdom->domain, 0);
-               else
-                       amd_iommu_flush_page(&pdom->domain, 0, o_iova);
-       }
+       if (updated)
+               amd_iommu_domain_flush_pages(pdom, o_iova, size);
 
        if (mapped)
                *mapped += mapped_size;
index b9a0523cbb0a5cf4de628db0f13128599513be0c..4283dd8191f053e2c879fccd136f336eef5bb962 100644 (file)
@@ -64,7 +64,7 @@ LIST_HEAD(hpet_map);
 LIST_HEAD(acpihid_map);
 
 const struct iommu_ops amd_iommu_ops;
-const struct iommu_dirty_ops amd_dirty_ops;
+static const struct iommu_dirty_ops amd_dirty_ops;
 
 int amd_iommu_max_glx_val = -1;
 
@@ -85,6 +85,11 @@ static void detach_device(struct device *dev);
  *
  ****************************************************************************/
 
+static inline bool pdom_is_v2_pgtbl_mode(struct protection_domain *pdom)
+{
+       return (pdom && (pdom->flags & PD_IOMMUV2_MASK));
+}
+
 static inline int get_acpihid_device_id(struct device *dev,
                                        struct acpihid_map_entry **entry)
 {
@@ -551,8 +556,6 @@ static void amd_iommu_uninit_device(struct device *dev)
        if (dev_data->domain)
                detach_device(dev);
 
-       dev_iommu_priv_set(dev, NULL);
-
        /*
         * We keep dev_data around for unplugged devices and reuse it when the
         * device is re-plugged - not doing so would introduce a ton of races.
@@ -1124,68 +1127,44 @@ static inline u64 build_inv_address(u64 address, size_t size)
 }
 
 static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
-                                 size_t size, u16 domid, int pde)
+                                 size_t size, u16 domid,
+                                 ioasid_t pasid, bool gn)
 {
        u64 inv_address = build_inv_address(address, size);
 
        memset(cmd, 0, sizeof(*cmd));
+
        cmd->data[1] |= domid;
        cmd->data[2]  = lower_32_bits(inv_address);
        cmd->data[3]  = upper_32_bits(inv_address);
+       /* PDE bit - we want to flush everything, not only the PTEs */
+       cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
+       if (gn) {
+               cmd->data[0] |= pasid;
+               cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
+       }
        CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
-       if (pde) /* PDE bit - we want to flush everything, not only the PTEs */
-               cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
 }
 
 static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
-                                 u64 address, size_t size)
+                                 u64 address, size_t size,
+                                 ioasid_t pasid, bool gn)
 {
        u64 inv_address = build_inv_address(address, size);
 
        memset(cmd, 0, sizeof(*cmd));
+
        cmd->data[0]  = devid;
        cmd->data[0] |= (qdep & 0xff) << 24;
        cmd->data[1]  = devid;
        cmd->data[2]  = lower_32_bits(inv_address);
        cmd->data[3]  = upper_32_bits(inv_address);
-       CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
-}
-
-static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, u32 pasid,
-                                 u64 address, bool size)
-{
-       memset(cmd, 0, sizeof(*cmd));
-
-       address &= ~(0xfffULL);
-
-       cmd->data[0]  = pasid;
-       cmd->data[1]  = domid;
-       cmd->data[2]  = lower_32_bits(address);
-       cmd->data[3]  = upper_32_bits(address);
-       cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
-       cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
-       if (size)
-               cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
-       CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
-}
-
-static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, u32 pasid,
-                                 int qdep, u64 address, bool size)
-{
-       memset(cmd, 0, sizeof(*cmd));
-
-       address &= ~(0xfffULL);
+       if (gn) {
+               cmd->data[0] |= ((pasid >> 8) & 0xff) << 16;
+               cmd->data[1] |= (pasid & 0xff) << 16;
+               cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
+       }
 
-       cmd->data[0]  = devid;
-       cmd->data[0] |= ((pasid >> 8) & 0xff) << 16;
-       cmd->data[0] |= (qdep  & 0xff) << 24;
-       cmd->data[1]  = devid;
-       cmd->data[1] |= (pasid & 0xff) << 16;
-       cmd->data[2]  = lower_32_bits(address);
-       cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
-       cmd->data[3]  = upper_32_bits(address);
-       if (size)
-               cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
        CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
 }
 
@@ -1341,7 +1320,7 @@ static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu)
        for (dom_id = 0; dom_id <= last_bdf; ++dom_id) {
                struct iommu_cmd cmd;
                build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
-                                     dom_id, 1);
+                                     dom_id, IOMMU_NO_PASID, false);
                iommu_queue_command(iommu, &cmd);
        }
 
@@ -1353,7 +1332,7 @@ static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id)
        struct iommu_cmd cmd;
 
        build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
-                             dom_id, 1);
+                             dom_id, IOMMU_NO_PASID, false);
        iommu_queue_command(iommu, &cmd);
 
        iommu_completion_wait(iommu);
@@ -1392,7 +1371,7 @@ static void amd_iommu_flush_irt_all(struct amd_iommu *iommu)
        iommu_completion_wait(iommu);
 }
 
-void iommu_flush_all_caches(struct amd_iommu *iommu)
+void amd_iommu_flush_all_caches(struct amd_iommu *iommu)
 {
        if (check_feature(FEATURE_IA)) {
                amd_iommu_flush_all(iommu);
@@ -1406,8 +1385,8 @@ void iommu_flush_all_caches(struct amd_iommu *iommu)
 /*
  * Command send function for flushing on-device TLB
  */
-static int device_flush_iotlb(struct iommu_dev_data *dev_data,
-                             u64 address, size_t size)
+static int device_flush_iotlb(struct iommu_dev_data *dev_data, u64 address,
+                             size_t size, ioasid_t pasid, bool gn)
 {
        struct amd_iommu *iommu;
        struct iommu_cmd cmd;
@@ -1418,7 +1397,8 @@ static int device_flush_iotlb(struct iommu_dev_data *dev_data,
        if (!iommu)
                return -EINVAL;
 
-       build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size);
+       build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address,
+                             size, pasid, gn);
 
        return iommu_queue_command(iommu, &cmd);
 }
@@ -1464,8 +1444,11 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
                        return ret;
        }
 
-       if (dev_data->ats_enabled)
-               ret = device_flush_iotlb(dev_data, 0, ~0UL);
+       if (dev_data->ats_enabled) {
+               /* Invalidate the entire contents of an IOTLB */
+               ret = device_flush_iotlb(dev_data, 0, ~0UL,
+                                        IOMMU_NO_PASID, false);
+       }
 
        return ret;
 }
@@ -1476,13 +1459,18 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
  * page. Otherwise it flushes the whole TLB of the IOMMU.
  */
 static void __domain_flush_pages(struct protection_domain *domain,
-                                u64 address, size_t size, int pde)
+                                u64 address, size_t size)
 {
        struct iommu_dev_data *dev_data;
        struct iommu_cmd cmd;
        int ret = 0, i;
+       ioasid_t pasid = IOMMU_NO_PASID;
+       bool gn = false;
+
+       if (pdom_is_v2_pgtbl_mode(domain))
+               gn = true;
 
-       build_inv_iommu_pages(&cmd, address, size, domain->id, pde);
+       build_inv_iommu_pages(&cmd, address, size, domain->id, pasid, gn);
 
        for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
                if (!domain->dev_iommu[i])
@@ -1500,17 +1488,21 @@ static void __domain_flush_pages(struct protection_domain *domain,
                if (!dev_data->ats_enabled)
                        continue;
 
-               ret |= device_flush_iotlb(dev_data, address, size);
+               ret |= device_flush_iotlb(dev_data, address, size, pasid, gn);
        }
 
        WARN_ON(ret);
 }
 
-static void domain_flush_pages(struct protection_domain *domain,
-                              u64 address, size_t size, int pde)
+void amd_iommu_domain_flush_pages(struct protection_domain *domain,
+                                 u64 address, size_t size)
 {
        if (likely(!amd_iommu_np_cache)) {
-               __domain_flush_pages(domain, address, size, pde);
+               __domain_flush_pages(domain, address, size);
+
+               /* Wait until IOMMU TLB and all device IOTLB flushes are complete */
+               amd_iommu_domain_flush_complete(domain);
+
                return;
        }
 
@@ -1543,16 +1535,20 @@ static void domain_flush_pages(struct protection_domain *domain,
 
                flush_size = 1ul << min_alignment;
 
-               __domain_flush_pages(domain, address, flush_size, pde);
+               __domain_flush_pages(domain, address, flush_size);
                address += flush_size;
                size -= flush_size;
        }
+
+       /* Wait until IOMMU TLB and all device IOTLB flushes are complete */
+       amd_iommu_domain_flush_complete(domain);
 }
 
 /* Flush the whole IO/TLB for a given protection domain - including PDE */
-void amd_iommu_domain_flush_tlb_pde(struct protection_domain *domain)
+static void amd_iommu_domain_flush_all(struct protection_domain *domain)
 {
-       domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
+       amd_iommu_domain_flush_pages(domain, 0,
+                                    CMD_INV_IOMMU_ALL_PAGES_ADDRESS);
 }
 
 void amd_iommu_domain_flush_complete(struct protection_domain *domain)
@@ -1579,8 +1575,7 @@ static void domain_flush_np_cache(struct protection_domain *domain,
                unsigned long flags;
 
                spin_lock_irqsave(&domain->lock, flags);
-               domain_flush_pages(domain, iova, size, 1);
-               amd_iommu_domain_flush_complete(domain);
+               amd_iommu_domain_flush_pages(domain, iova, size);
                spin_unlock_irqrestore(&domain->lock, flags);
        }
 }
@@ -1858,11 +1853,8 @@ static void do_detach(struct iommu_dev_data *dev_data)
        /* Flush the DTE entry */
        device_flush_dte(dev_data);
 
-       /* Flush IOTLB */
-       amd_iommu_domain_flush_tlb_pde(domain);
-
-       /* Wait for the flushes to finish */
-       amd_iommu_domain_flush_complete(domain);
+       /* Flush IOTLB and wait for the flushes to finish */
+       amd_iommu_domain_flush_all(domain);
 
        /* decrease reference counters - needs to happen after the flushes */
        domain->dev_iommu[iommu->index] -= 1;
@@ -1896,15 +1888,6 @@ static int attach_device(struct device *dev,
 
        do_attach(dev_data, domain);
 
-       /*
-        * We might boot into a crash-kernel here. The crashed kernel
-        * left the caches in the IOMMU dirty. So we have to flush
-        * here to evict all dirty stuff.
-        */
-       amd_iommu_domain_flush_tlb_pde(domain);
-
-       amd_iommu_domain_flush_complete(domain);
-
 out:
        spin_unlock(&dev_data->lock);
 
@@ -2048,8 +2031,7 @@ void amd_iommu_domain_update(struct protection_domain *domain)
        amd_iommu_update_and_flush_device_table(domain);
 
        /* Flush domain TLB(s) and wait for completion */
-       amd_iommu_domain_flush_tlb_pde(domain);
-       amd_iommu_domain_flush_complete(domain);
+       amd_iommu_domain_flush_all(domain);
 }
 
 /*****************************************************************************
@@ -2482,10 +2464,9 @@ static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain,
        }
 
        /* Flush IOTLB to mark IOPTE dirty on the next translation(s) */
-       if (domain_flush) {
-               amd_iommu_domain_flush_tlb_pde(pdomain);
-               amd_iommu_domain_flush_complete(pdomain);
-       }
+       if (domain_flush)
+               amd_iommu_domain_flush_all(pdomain);
+
        pdomain->dirty_tracking = enable;
        spin_unlock_irqrestore(&pdomain->lock, flags);
 
@@ -2588,8 +2569,7 @@ static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
        unsigned long flags;
 
        spin_lock_irqsave(&dom->lock, flags);
-       amd_iommu_domain_flush_tlb_pde(dom);
-       amd_iommu_domain_flush_complete(dom);
+       amd_iommu_domain_flush_all(dom);
        spin_unlock_irqrestore(&dom->lock, flags);
 }
 
@@ -2600,8 +2580,8 @@ static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
        unsigned long flags;
 
        spin_lock_irqsave(&dom->lock, flags);
-       domain_flush_pages(dom, gather->start, gather->end - gather->start + 1, 1);
-       amd_iommu_domain_flush_complete(dom);
+       amd_iommu_domain_flush_pages(dom, gather->start,
+                                    gather->end - gather->start + 1);
        spin_unlock_irqrestore(&dom->lock, flags);
 }
 
@@ -2635,7 +2615,7 @@ static bool amd_iommu_enforce_cache_coherency(struct iommu_domain *domain)
        return true;
 }
 
-const struct iommu_dirty_ops amd_dirty_ops = {
+static const struct iommu_dirty_ops amd_dirty_ops = {
        .set_dirty_tracking = amd_iommu_set_dirty_tracking,
        .read_and_clear_dirty = amd_iommu_read_and_clear_dirty,
 };
@@ -2666,7 +2646,7 @@ const struct iommu_ops amd_iommu_ops = {
 };
 
 static int __flush_pasid(struct protection_domain *domain, u32 pasid,
-                        u64 address, bool size)
+                        u64 address, size_t size)
 {
        struct iommu_dev_data *dev_data;
        struct iommu_cmd cmd;
@@ -2675,7 +2655,7 @@ static int __flush_pasid(struct protection_domain *domain, u32 pasid,
        if (!(domain->flags & PD_IOMMUV2_MASK))
                return -EINVAL;
 
-       build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size);
+       build_inv_iommu_pages(&cmd, address, size, domain->id, pasid, true);
 
        /*
         * IOMMU TLB needs to be flushed before Device TLB to
@@ -2709,8 +2689,8 @@ static int __flush_pasid(struct protection_domain *domain, u32 pasid,
                iommu = rlookup_amd_iommu(dev_data->dev);
                if (!iommu)
                        continue;
-               build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid,
-                                     qdep, address, size);
+               build_inv_iotlb_pages(&cmd, dev_data->devid, qdep,
+                                     address, size, pasid, true);
 
                ret = iommu_queue_command(iommu, &cmd);
                if (ret != 0)
@@ -2730,7 +2710,7 @@ out:
 static int __amd_iommu_flush_page(struct protection_domain *domain, u32 pasid,
                                  u64 address)
 {
-       return __flush_pasid(domain, pasid, address, false);
+       return __flush_pasid(domain, pasid, address, PAGE_SIZE);
 }
 
 int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid,
@@ -2749,8 +2729,7 @@ int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid,
 
 static int __amd_iommu_flush_tlb(struct protection_domain *domain, u32 pasid)
 {
-       return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
-                            true);
+       return __flush_pasid(domain, pasid, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS);
 }
 
 int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid)
@@ -3111,8 +3090,8 @@ out:
        return index;
 }
 
-static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
-                         struct irte_ga *irte)
+static int __modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
+                           struct irte_ga *irte)
 {
        struct irq_remap_table *table;
        struct irte_ga *entry;
@@ -3139,6 +3118,18 @@ static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
 
        raw_spin_unlock_irqrestore(&table->lock, flags);
 
+       return 0;
+}
+
+static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
+                         struct irte_ga *irte)
+{
+       bool ret;
+
+       ret = __modify_irte_ga(iommu, devid, index, irte);
+       if (ret)
+               return ret;
+
        iommu_flush_irt_and_complete(iommu, devid);
 
        return 0;
@@ -3822,8 +3813,8 @@ int amd_iommu_update_ga(int cpu, bool is_run, void *data)
        }
        entry->lo.fields_vapic.is_run = is_run;
 
-       return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
-                             ir_data->irq_2_irte.index, entry);
+       return __modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
+                               ir_data->irq_2_irte.index, entry);
 }
 EXPORT_SYMBOL(amd_iommu_update_ga);
 #endif
index ee05f4824bfad1d6515fd506e9c1c2fd6760e18f..ef3ee95706dac0997d18f9de6598f529df66064c 100644 (file)
@@ -81,6 +81,7 @@
 #define DART_T8020_TCR_BYPASS_DAPF      BIT(12)
 
 #define DART_T8020_TTBR       0x200
+#define DART_T8020_USB4_TTBR  0x400
 #define DART_T8020_TTBR_VALID BIT(31)
 #define DART_T8020_TTBR_ADDR_FIELD_SHIFT 0
 #define DART_T8020_TTBR_SHIFT 12
@@ -368,12 +369,14 @@ apple_dart_t8020_hw_stream_command(struct apple_dart_stream_map *stream_map,
                             u32 command)
 {
        unsigned long flags;
-       int ret;
+       int ret, i;
        u32 command_reg;
 
        spin_lock_irqsave(&stream_map->dart->lock, flags);
 
-       writel(stream_map->sidmap[0], stream_map->dart->regs + DART_T8020_STREAM_SELECT);
+       for (i = 0; i < BITS_TO_U32(stream_map->dart->num_streams); i++)
+               writel(stream_map->sidmap[i],
+                      stream_map->dart->regs + DART_T8020_STREAM_SELECT + 4 * i);
        writel(command, stream_map->dart->regs + DART_T8020_STREAM_COMMAND);
 
        ret = readl_poll_timeout_atomic(
@@ -740,7 +743,6 @@ static void apple_dart_release_device(struct device *dev)
 {
        struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
 
-       dev_iommu_priv_set(dev, NULL);
        kfree(cfg);
 }
 
@@ -908,7 +910,7 @@ static struct iommu_group *apple_dart_device_group(struct device *dev)
 
                ret = apple_dart_merge_master_cfg(group_master_cfg, cfg);
                if (ret) {
-                       dev_err(dev, "Failed to merge DART IOMMU grups.\n");
+                       dev_err(dev, "Failed to merge DART IOMMU groups.\n");
                        iommu_group_put(group);
                        res = ERR_PTR(ret);
                        goto out;
@@ -1215,6 +1217,33 @@ static const struct apple_dart_hw apple_dart_hw_t8103 = {
        .ttbr_shift = DART_T8020_TTBR_SHIFT,
        .ttbr_count = 4,
 };
+
+static const struct apple_dart_hw apple_dart_hw_t8103_usb4 = {
+       .type = DART_T8020,
+       .irq_handler = apple_dart_t8020_irq,
+       .invalidate_tlb = apple_dart_t8020_hw_invalidate_tlb,
+       .oas = 36,
+       .fmt = APPLE_DART,
+       .max_sid_count = 64,
+
+       .enable_streams = DART_T8020_STREAMS_ENABLE,
+       .lock = DART_T8020_CONFIG,
+       .lock_bit = DART_T8020_CONFIG_LOCK,
+
+       .error = DART_T8020_ERROR,
+
+       .tcr = DART_T8020_TCR,
+       .tcr_enabled = DART_T8020_TCR_TRANSLATE_ENABLE,
+       .tcr_disabled = 0,
+       .tcr_bypass = 0,
+
+       .ttbr = DART_T8020_USB4_TTBR,
+       .ttbr_valid = DART_T8020_TTBR_VALID,
+       .ttbr_addr_field_shift = DART_T8020_TTBR_ADDR_FIELD_SHIFT,
+       .ttbr_shift = DART_T8020_TTBR_SHIFT,
+       .ttbr_count = 4,
+};
+
 static const struct apple_dart_hw apple_dart_hw_t6000 = {
        .type = DART_T6000,
        .irq_handler = apple_dart_t8020_irq,
@@ -1272,7 +1301,7 @@ static __maybe_unused int apple_dart_suspend(struct device *dev)
        unsigned int sid, idx;
 
        for (sid = 0; sid < dart->num_streams; sid++) {
-               dart->save_tcr[sid] = readl_relaxed(dart->regs + DART_TCR(dart, sid));
+               dart->save_tcr[sid] = readl(dart->regs + DART_TCR(dart, sid));
                for (idx = 0; idx < dart->hw->ttbr_count; idx++)
                        dart->save_ttbr[sid][idx] =
                                readl(dart->regs + DART_TTBR(dart, sid, idx));
@@ -1307,6 +1336,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(apple_dart_pm_ops, apple_dart_suspend, apple_dar
 
 static const struct of_device_id apple_dart_of_match[] = {
        { .compatible = "apple,t8103-dart", .data = &apple_dart_hw_t8103 },
+       { .compatible = "apple,t8103-usb4-dart", .data = &apple_dart_hw_t8103_usb4 },
        { .compatible = "apple,t8110-dart", .data = &apple_dart_hw_t8110 },
        { .compatible = "apple,t6000-dart", .data = &apple_dart_hw_t6000 },
        {},
index 353248ab18e76d3ab1f07c894cfb903f7e424b83..05722121f00e70689680ce7a45cc5e953f50210b 100644 (file)
@@ -246,7 +246,8 @@ static void arm_smmu_mm_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
                                                    smmu_domain);
        }
 
-       arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, start, size);
+       arm_smmu_atc_inv_domain(smmu_domain, mm_get_enqcmd_pasid(mm), start,
+                               size);
 }
 
 static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
@@ -264,10 +265,11 @@ static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
         * DMA may still be running. Keep the cd valid to avoid C_BAD_CD events,
         * but disable translation.
         */
-       arm_smmu_update_ctx_desc_devices(smmu_domain, mm->pasid, &quiet_cd);
+       arm_smmu_update_ctx_desc_devices(smmu_domain, mm_get_enqcmd_pasid(mm),
+                                        &quiet_cd);
 
        arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_mn->cd->asid);
-       arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, 0, 0);
+       arm_smmu_atc_inv_domain(smmu_domain, mm_get_enqcmd_pasid(mm), 0, 0);
 
        smmu_mn->cleared = true;
        mutex_unlock(&sva_lock);
@@ -325,10 +327,13 @@ arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
 
        spin_lock_irqsave(&smmu_domain->devices_lock, flags);
        list_for_each_entry(master, &smmu_domain->devices, domain_head) {
-               ret = arm_smmu_write_ctx_desc(master, mm->pasid, cd);
+               ret = arm_smmu_write_ctx_desc(master, mm_get_enqcmd_pasid(mm),
+                                             cd);
                if (ret) {
-                       list_for_each_entry_from_reverse(master, &smmu_domain->devices, domain_head)
-                               arm_smmu_write_ctx_desc(master, mm->pasid, NULL);
+                       list_for_each_entry_from_reverse(
+                               master, &smmu_domain->devices, domain_head)
+                               arm_smmu_write_ctx_desc(
+                                       master, mm_get_enqcmd_pasid(mm), NULL);
                        break;
                }
        }
@@ -358,7 +363,8 @@ static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
 
        list_del(&smmu_mn->list);
 
-       arm_smmu_update_ctx_desc_devices(smmu_domain, mm->pasid, NULL);
+       arm_smmu_update_ctx_desc_devices(smmu_domain, mm_get_enqcmd_pasid(mm),
+                                        NULL);
 
        /*
         * If we went through clear(), we've already invalidated, and no
@@ -366,7 +372,8 @@ static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
         */
        if (!smmu_mn->cleared) {
                arm_smmu_tlb_inv_asid(smmu_domain->smmu, cd->asid);
-               arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, 0, 0);
+               arm_smmu_atc_inv_domain(smmu_domain, mm_get_enqcmd_pasid(mm), 0,
+                                       0);
        }
 
        /* Frees smmu_mn */
index 7445454c2af244f03b9274db12e3e4dd325e31ab..0ffb1cf17e0b2e6687b1c5ff12ff87405e2552b6 100644 (file)
@@ -1063,6 +1063,7 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_master *master, int ssid,
        bool cd_live;
        __le64 *cdptr;
        struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
+       struct arm_smmu_device *smmu = master->smmu;
 
        if (WARN_ON(ssid >= (1 << cd_table->s1cdmax)))
                return -E2BIG;
@@ -1077,6 +1078,8 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_master *master, int ssid,
        if (!cd) { /* (5) */
                val = 0;
        } else if (cd == &quiet_cd) { /* (4) */
+               if (!(smmu->features & ARM_SMMU_FEAT_STALL_FORCE))
+                       val &= ~(CTXDESC_CD_0_S | CTXDESC_CD_0_R);
                val |= CTXDESC_CD_0_TCR_EPD0;
        } else if (cd_live) { /* (3) */
                val &= ~CTXDESC_CD_0_ASID;
@@ -1249,7 +1252,7 @@ static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
 }
 
 static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
-                                     __le64 *dst)
+                                     struct arm_smmu_ste *dst)
 {
        /*
         * This is hideously complicated, but we only really care about
@@ -1267,12 +1270,12 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
         * 2. Write everything apart from dword 0, sync, write dword 0, sync
         * 3. Update Config, sync
         */
-       u64 val = le64_to_cpu(dst[0]);
+       u64 val = le64_to_cpu(dst->data[0]);
        bool ste_live = false;
-       struct arm_smmu_device *smmu = NULL;
+       struct arm_smmu_device *smmu = master->smmu;
        struct arm_smmu_ctx_desc_cfg *cd_table = NULL;
        struct arm_smmu_s2_cfg *s2_cfg = NULL;
-       struct arm_smmu_domain *smmu_domain = NULL;
+       struct arm_smmu_domain *smmu_domain = master->domain;
        struct arm_smmu_cmdq_ent prefetch_cmd = {
                .opcode         = CMDQ_OP_PREFETCH_CFG,
                .prefetch       = {
@@ -1280,18 +1283,12 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
                },
        };
 
-       if (master) {
-               smmu_domain = master->domain;
-               smmu = master->smmu;
-       }
-
        if (smmu_domain) {
                switch (smmu_domain->stage) {
                case ARM_SMMU_DOMAIN_S1:
                        cd_table = &master->cd_table;
                        break;
                case ARM_SMMU_DOMAIN_S2:
-               case ARM_SMMU_DOMAIN_NESTED:
                        s2_cfg = &smmu_domain->s2_cfg;
                        break;
                default:
@@ -1325,10 +1322,10 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
                else
                        val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS);
 
-               dst[0] = cpu_to_le64(val);
-               dst[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
+               dst->data[0] = cpu_to_le64(val);
+               dst->data[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
                                                STRTAB_STE_1_SHCFG_INCOMING));
-               dst[2] = 0; /* Nuke the VMID */
+               dst->data[2] = 0; /* Nuke the VMID */
                /*
                 * The SMMU can perform negative caching, so we must sync
                 * the STE regardless of whether the old value was live.
@@ -1343,7 +1340,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
                        STRTAB_STE_1_STRW_EL2 : STRTAB_STE_1_STRW_NSEL1;
 
                BUG_ON(ste_live);
-               dst[1] = cpu_to_le64(
+               dst->data[1] = cpu_to_le64(
                         FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) |
                         FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) |
                         FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) |
@@ -1352,7 +1349,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
 
                if (smmu->features & ARM_SMMU_FEAT_STALLS &&
                    !master->stall_enabled)
-                       dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
+                       dst->data[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
 
                val |= (cd_table->cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
                        FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS) |
@@ -1362,7 +1359,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
 
        if (s2_cfg) {
                BUG_ON(ste_live);
-               dst[2] = cpu_to_le64(
+               dst->data[2] = cpu_to_le64(
                         FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) |
                         FIELD_PREP(STRTAB_STE_2_VTCR, s2_cfg->vtcr) |
 #ifdef __BIG_ENDIAN
@@ -1371,18 +1368,18 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
                         STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 |
                         STRTAB_STE_2_S2R);
 
-               dst[3] = cpu_to_le64(s2_cfg->vttbr & STRTAB_STE_3_S2TTB_MASK);
+               dst->data[3] = cpu_to_le64(s2_cfg->vttbr & STRTAB_STE_3_S2TTB_MASK);
 
                val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S2_TRANS);
        }
 
        if (master->ats_enabled)
-               dst[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_EATS,
+               dst->data[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_EATS,
                                                 STRTAB_STE_1_EATS_TRANS));
 
        arm_smmu_sync_ste_for_sid(smmu, sid);
        /* See comment in arm_smmu_write_ctx_desc() */
-       WRITE_ONCE(dst[0], cpu_to_le64(val));
+       WRITE_ONCE(dst->data[0], cpu_to_le64(val));
        arm_smmu_sync_ste_for_sid(smmu, sid);
 
        /* It's likely that we'll want to use the new STE soon */
@@ -1390,7 +1387,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
                arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
 }
 
-static void arm_smmu_init_bypass_stes(__le64 *strtab, unsigned int nent, bool force)
+static void arm_smmu_init_bypass_stes(struct arm_smmu_ste *strtab,
+                                     unsigned int nent, bool force)
 {
        unsigned int i;
        u64 val = STRTAB_STE_0_V;
@@ -1401,11 +1399,11 @@ static void arm_smmu_init_bypass_stes(__le64 *strtab, unsigned int nent, bool fo
                val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS);
 
        for (i = 0; i < nent; ++i) {
-               strtab[0] = cpu_to_le64(val);
-               strtab[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
-                                                  STRTAB_STE_1_SHCFG_INCOMING));
-               strtab[2] = 0;
-               strtab += STRTAB_STE_DWORDS;
+               strtab->data[0] = cpu_to_le64(val);
+               strtab->data[1] = cpu_to_le64(FIELD_PREP(
+                       STRTAB_STE_1_SHCFG, STRTAB_STE_1_SHCFG_INCOMING));
+               strtab->data[2] = 0;
+               strtab++;
        }
 }
 
@@ -2171,7 +2169,6 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
                fmt = ARM_64_LPAE_S1;
                finalise_stage_fn = arm_smmu_domain_finalise_s1;
                break;
-       case ARM_SMMU_DOMAIN_NESTED:
        case ARM_SMMU_DOMAIN_S2:
                ias = smmu->ias;
                oas = smmu->oas;
@@ -2209,26 +2206,23 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
        return 0;
 }
 
-static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
+static struct arm_smmu_ste *
+arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
 {
-       __le64 *step;
        struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
 
        if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
-               struct arm_smmu_strtab_l1_desc *l1_desc;
-               int idx;
+               unsigned int idx1, idx2;
 
                /* Two-level walk */
-               idx = (sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS;
-               l1_desc = &cfg->l1_desc[idx];
-               idx = (sid & ((1 << STRTAB_SPLIT) - 1)) * STRTAB_STE_DWORDS;
-               step = &l1_desc->l2ptr[idx];
+               idx1 = (sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS;
+               idx2 = sid & ((1 << STRTAB_SPLIT) - 1);
+               return &cfg->l1_desc[idx1].l2ptr[idx2];
        } else {
                /* Simple linear lookup */
-               step = &cfg->strtab[sid * STRTAB_STE_DWORDS];
+               return (struct arm_smmu_ste *)&cfg
+                              ->strtab[sid * STRTAB_STE_DWORDS];
        }
-
-       return step;
 }
 
 static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master)
@@ -2238,7 +2232,8 @@ static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master)
 
        for (i = 0; i < master->num_streams; ++i) {
                u32 sid = master->streams[i].id;
-               __le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
+               struct arm_smmu_ste *step =
+                       arm_smmu_get_step_for_sid(smmu, sid);
 
                /* Bridged PCI devices may end up with duplicated IDs */
                for (j = 0; j < i; j++)
@@ -2649,9 +2644,6 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev)
        struct arm_smmu_master *master;
        struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
 
-       if (!fwspec || fwspec->ops != &arm_smmu_ops)
-               return ERR_PTR(-ENODEV);
-
        if (WARN_ON_ONCE(dev_iommu_priv_get(dev)))
                return ERR_PTR(-EBUSY);
 
@@ -2698,7 +2690,6 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev)
 
 err_free_master:
        kfree(master);
-       dev_iommu_priv_set(dev, NULL);
        return ERR_PTR(ret);
 }
 
@@ -2742,7 +2733,7 @@ static int arm_smmu_enable_nesting(struct iommu_domain *domain)
        if (smmu_domain->smmu)
                ret = -EPERM;
        else
-               smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
+               smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
        mutex_unlock(&smmu_domain->init_mutex);
 
        return ret;
@@ -3769,7 +3760,7 @@ static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu)
        iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
 
        list_for_each_entry(e, &rmr_list, list) {
-               __le64 *step;
+               struct arm_smmu_ste *step;
                struct iommu_iort_rmr_data *rmr;
                int ret, i;
 
index 925ac6a47bceb7b15cd184741d478dc3675d5f09..65fb388d51734d677bf6112090c6eaf94bf5966e 100644 (file)
 #define STRTAB_L1_DESC_L2PTR_MASK      GENMASK_ULL(51, 6)
 
 #define STRTAB_STE_DWORDS              8
+
+struct arm_smmu_ste {
+       __le64 data[STRTAB_STE_DWORDS];
+};
+
 #define STRTAB_STE_0_V                 (1UL << 0)
 #define STRTAB_STE_0_CFG               GENMASK_ULL(3, 1)
 #define STRTAB_STE_0_CFG_ABORT         0
@@ -571,7 +576,7 @@ struct arm_smmu_priq {
 struct arm_smmu_strtab_l1_desc {
        u8                              span;
 
-       __le64                          *l2ptr;
+       struct arm_smmu_ste             *l2ptr;
        dma_addr_t                      l2ptr_dma;
 };
 
@@ -710,7 +715,6 @@ struct arm_smmu_master {
 enum arm_smmu_domain_stage {
        ARM_SMMU_DOMAIN_S1 = 0,
        ARM_SMMU_DOMAIN_S2,
-       ARM_SMMU_DOMAIN_NESTED,
        ARM_SMMU_DOMAIN_BYPASS,
 };
 
index 549ae4dba3a681b08832d00bf5057f7d803fc06c..8b04ece00420dd4ed61a63053802f4ffe8248099 100644 (file)
@@ -243,8 +243,10 @@ static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
 
 static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
        { .compatible = "qcom,adreno" },
+       { .compatible = "qcom,adreno-gmu" },
        { .compatible = "qcom,mdp4" },
        { .compatible = "qcom,mdss" },
+       { .compatible = "qcom,qcm2290-mdss" },
        { .compatible = "qcom,sc7180-mdss" },
        { .compatible = "qcom,sc7180-mss-pil" },
        { .compatible = "qcom,sc7280-mdss" },
index d6d1a2a55cc0692fb02f0f58b901ac438c71604c..68b6bc5e7c71016b8d58a6a077e921b27fb51447 100644 (file)
@@ -82,6 +82,23 @@ static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
                pm_runtime_put_autosuspend(smmu->dev);
 }
 
+static void arm_smmu_rpm_use_autosuspend(struct arm_smmu_device *smmu)
+{
+       /*
+        * Setup an autosuspend delay to avoid bouncing runpm state.
+        * Otherwise, if a driver for a suspended consumer device
+        * unmaps buffers, it will runpm resume/suspend for each one.
+        *
+        * For example, when used by a GPU device, when an application
+        * or game exits, it can trigger unmapping 100s or 1000s of
+        * buffers.  With a runpm cycle for each buffer, that adds up
+        * to 5-10sec worth of reprogramming the context bank, while
+        * the system appears to be locked up to the user.
+        */
+       pm_runtime_set_autosuspend_delay(smmu->dev, 20);
+       pm_runtime_use_autosuspend(smmu->dev);
+}
+
 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
 {
        return container_of(dom, struct arm_smmu_domain, domain);
@@ -392,8 +409,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
 {
        u32 fsr, fsynr, cbfrsynra;
        unsigned long iova;
-       struct iommu_domain *domain = dev;
-       struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+       struct arm_smmu_domain *smmu_domain = dev;
        struct arm_smmu_device *smmu = smmu_domain->smmu;
        int idx = smmu_domain->cfg.cbndx;
        int ret;
@@ -406,7 +422,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
        iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
        cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
 
-       ret = report_iommu_fault(domain, NULL, iova,
+       ret = report_iommu_fault(&smmu_domain->domain, NULL, iova,
                fsynr & ARM_SMMU_FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ);
 
        if (ret == -ENOSYS)
@@ -607,7 +623,7 @@ static int arm_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_domain,
        return __arm_smmu_alloc_bitmap(smmu->context_map, start, smmu->num_context_banks);
 }
 
-static int arm_smmu_init_domain_context(struct iommu_domain *domain,
+static int arm_smmu_init_domain_context(struct arm_smmu_domain *smmu_domain,
                                        struct arm_smmu_device *smmu,
                                        struct device *dev)
 {
@@ -616,7 +632,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
        struct io_pgtable_ops *pgtbl_ops;
        struct io_pgtable_cfg pgtbl_cfg;
        enum io_pgtable_fmt fmt;
-       struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+       struct iommu_domain *domain = &smmu_domain->domain;
        struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
        irqreturn_t (*context_fault)(int irq, void *dev);
 
@@ -624,12 +640,6 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
        if (smmu_domain->smmu)
                goto out_unlock;
 
-       if (domain->type == IOMMU_DOMAIN_IDENTITY) {
-               smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
-               smmu_domain->smmu = smmu;
-               goto out_unlock;
-       }
-
        /*
         * Mapping the requested stage onto what we support is surprisingly
         * complicated, mainly because the spec allows S1+S2 SMMUs without
@@ -796,8 +806,8 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
        else
                context_fault = arm_smmu_context_fault;
 
-       ret = devm_request_irq(smmu->dev, irq, context_fault,
-                              IRQF_SHARED, "arm-smmu-context-fault", domain);
+       ret = devm_request_irq(smmu->dev, irq, context_fault, IRQF_SHARED,
+                              "arm-smmu-context-fault", smmu_domain);
        if (ret < 0) {
                dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
                        cfg->irptndx, irq);
@@ -818,14 +828,13 @@ out_unlock:
        return ret;
 }
 
-static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
+static void arm_smmu_destroy_domain_context(struct arm_smmu_domain *smmu_domain)
 {
-       struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
        struct arm_smmu_device *smmu = smmu_domain->smmu;
        struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
        int ret, irq;
 
-       if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
+       if (!smmu)
                return;
 
        ret = arm_smmu_rpm_get(smmu);
@@ -841,7 +850,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
 
        if (cfg->irptndx != ARM_SMMU_INVALID_IRPTNDX) {
                irq = smmu->irqs[cfg->irptndx];
-               devm_free_irq(smmu->dev, irq, domain);
+               devm_free_irq(smmu->dev, irq, smmu_domain);
        }
 
        free_io_pgtable_ops(smmu_domain->pgtbl_ops);
@@ -850,14 +859,10 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
        arm_smmu_rpm_put(smmu);
 }
 
-static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
+static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
 {
        struct arm_smmu_domain *smmu_domain;
 
-       if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_IDENTITY) {
-               if (using_legacy_binding || type != IOMMU_DOMAIN_DMA)
-                       return NULL;
-       }
        /*
         * Allocate the domain and initialise some of its data structures.
         * We can't really do anything meaningful until we've added a
@@ -870,6 +875,15 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
        mutex_init(&smmu_domain->init_mutex);
        spin_lock_init(&smmu_domain->cb_lock);
 
+       if (dev) {
+               struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
+
+               if (arm_smmu_init_domain_context(smmu_domain, cfg->smmu, dev)) {
+                       kfree(smmu_domain);
+                       return NULL;
+               }
+       }
+
        return &smmu_domain->domain;
 }
 
@@ -881,7 +895,7 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
         * Free the domain resources. We assume that all devices have
         * already been detached.
         */
-       arm_smmu_destroy_domain_context(domain);
+       arm_smmu_destroy_domain_context(smmu_domain);
        kfree(smmu_domain);
 }
 
@@ -1081,21 +1095,14 @@ static void arm_smmu_master_free_smes(struct arm_smmu_master_cfg *cfg,
        mutex_unlock(&smmu->stream_map_mutex);
 }
 
-static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
-                                     struct arm_smmu_master_cfg *cfg,
-                                     struct iommu_fwspec *fwspec)
+static void arm_smmu_master_install_s2crs(struct arm_smmu_master_cfg *cfg,
+                                         enum arm_smmu_s2cr_type type,
+                                         u8 cbndx, struct iommu_fwspec *fwspec)
 {
-       struct arm_smmu_device *smmu = smmu_domain->smmu;
+       struct arm_smmu_device *smmu = cfg->smmu;
        struct arm_smmu_s2cr *s2cr = smmu->s2crs;
-       u8 cbndx = smmu_domain->cfg.cbndx;
-       enum arm_smmu_s2cr_type type;
        int i, idx;
 
-       if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
-               type = S2CR_TYPE_BYPASS;
-       else
-               type = S2CR_TYPE_TRANS;
-
        for_each_cfg_sme(cfg, fwspec, i, idx) {
                if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
                        continue;
@@ -1105,7 +1112,6 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
                s2cr[idx].cbndx = cbndx;
                arm_smmu_write_s2cr(smmu, idx);
        }
-       return 0;
 }
 
 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
@@ -1116,11 +1122,6 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
        struct arm_smmu_device *smmu;
        int ret;
 
-       if (!fwspec || fwspec->ops != &arm_smmu_ops) {
-               dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
-               return -ENXIO;
-       }
-
        /*
         * FIXME: The arch/arm DMA API code tries to attach devices to its own
         * domains between of_xlate() and probe_device() - we have no way to cope
@@ -1139,7 +1140,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
                return ret;
 
        /* Ensure that the domain is finalised */
-       ret = arm_smmu_init_domain_context(domain, smmu, dev);
+       ret = arm_smmu_init_domain_context(smmu_domain, smmu, dev);
        if (ret < 0)
                goto rpm_put;
 
@@ -1153,27 +1154,66 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
        }
 
        /* Looks ok, so add the device to the domain */
-       ret = arm_smmu_domain_add_master(smmu_domain, cfg, fwspec);
-
-       /*
-        * Setup an autosuspend delay to avoid bouncing runpm state.
-        * Otherwise, if a driver for a suspended consumer device
-        * unmaps buffers, it will runpm resume/suspend for each one.
-        *
-        * For example, when used by a GPU device, when an application
-        * or game exits, it can trigger unmapping 100s or 1000s of
-        * buffers.  With a runpm cycle for each buffer, that adds up
-        * to 5-10sec worth of reprogramming the context bank, while
-        * the system appears to be locked up to the user.
-        */
-       pm_runtime_set_autosuspend_delay(smmu->dev, 20);
-       pm_runtime_use_autosuspend(smmu->dev);
-
+       arm_smmu_master_install_s2crs(cfg, S2CR_TYPE_TRANS,
+                                     smmu_domain->cfg.cbndx, fwspec);
+       arm_smmu_rpm_use_autosuspend(smmu);
 rpm_put:
        arm_smmu_rpm_put(smmu);
        return ret;
 }
 
+static int arm_smmu_attach_dev_type(struct device *dev,
+                                   enum arm_smmu_s2cr_type type)
+{
+       struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
+       struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+       struct arm_smmu_device *smmu;
+       int ret;
+
+       if (!cfg)
+               return -ENODEV;
+       smmu = cfg->smmu;
+
+       ret = arm_smmu_rpm_get(smmu);
+       if (ret < 0)
+               return ret;
+
+       arm_smmu_master_install_s2crs(cfg, type, 0, fwspec);
+       arm_smmu_rpm_use_autosuspend(smmu);
+       arm_smmu_rpm_put(smmu);
+       return 0;
+}
+
+static int arm_smmu_attach_dev_identity(struct iommu_domain *domain,
+                                       struct device *dev)
+{
+       return arm_smmu_attach_dev_type(dev, S2CR_TYPE_BYPASS);
+}
+
+static const struct iommu_domain_ops arm_smmu_identity_ops = {
+       .attach_dev = arm_smmu_attach_dev_identity,
+};
+
+static struct iommu_domain arm_smmu_identity_domain = {
+       .type = IOMMU_DOMAIN_IDENTITY,
+       .ops = &arm_smmu_identity_ops,
+};
+
+static int arm_smmu_attach_dev_blocked(struct iommu_domain *domain,
+                                      struct device *dev)
+{
+       return arm_smmu_attach_dev_type(dev, S2CR_TYPE_FAULT);
+}
+
+static const struct iommu_domain_ops arm_smmu_blocked_ops = {
+       .attach_dev = arm_smmu_attach_dev_blocked,
+};
+
+static struct iommu_domain arm_smmu_blocked_domain = {
+       .type = IOMMU_DOMAIN_BLOCKED,
+       .ops = &arm_smmu_blocked_ops,
+};
+
 static int arm_smmu_map_pages(struct iommu_domain *domain, unsigned long iova,
                              phys_addr_t paddr, size_t pgsize, size_t pgcount,
                              int prot, gfp_t gfp, size_t *mapped)
@@ -1357,10 +1397,8 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev)
                fwspec = dev_iommu_fwspec_get(dev);
                if (ret)
                        goto out_free;
-       } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
-               smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
        } else {
-               return ERR_PTR(-ENODEV);
+               smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
        }
 
        ret = -EINVAL;
@@ -1427,7 +1465,6 @@ static void arm_smmu_release_device(struct device *dev)
 
        arm_smmu_rpm_put(cfg->smmu);
 
-       dev_iommu_priv_set(dev, NULL);
        kfree(cfg);
 }
 
@@ -1560,8 +1597,10 @@ static int arm_smmu_def_domain_type(struct device *dev)
 }
 
 static struct iommu_ops arm_smmu_ops = {
+       .identity_domain        = &arm_smmu_identity_domain,
+       .blocked_domain         = &arm_smmu_blocked_domain,
        .capable                = arm_smmu_capable,
-       .domain_alloc           = arm_smmu_domain_alloc,
+       .domain_alloc_paging    = arm_smmu_domain_alloc_paging,
        .probe_device           = arm_smmu_probe_device,
        .release_device         = arm_smmu_release_device,
        .probe_finalize         = arm_smmu_probe_finalize,
@@ -2161,7 +2200,8 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
                return err;
        }
 
-       err = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev);
+       err = iommu_device_register(&smmu->iommu, &arm_smmu_ops,
+                                   using_legacy_binding ? NULL : dev);
        if (err) {
                dev_err(dev, "Failed to register iommu\n");
                iommu_device_sysfs_remove(&smmu->iommu);
index 703fd5817ec11f401e9eed0286c39faa655204ee..836ed6799a801fda916207cabb8b289604352b1f 100644 (file)
@@ -361,7 +361,6 @@ enum arm_smmu_domain_stage {
        ARM_SMMU_DOMAIN_S1 = 0,
        ARM_SMMU_DOMAIN_S2,
        ARM_SMMU_DOMAIN_NESTED,
-       ARM_SMMU_DOMAIN_BYPASS,
 };
 
 struct arm_smmu_domain {
index 97b2122032b2371915047aa03d3118005fc49496..17a1c163fef660397f9e39f252563952cf9bd10a 100644 (file)
@@ -79,16 +79,6 @@ static struct qcom_iommu_domain *to_qcom_iommu_domain(struct iommu_domain *dom)
 
 static const struct iommu_ops qcom_iommu_ops;
 
-static struct qcom_iommu_dev * to_iommu(struct device *dev)
-{
-       struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
-
-       if (!fwspec || fwspec->ops != &qcom_iommu_ops)
-               return NULL;
-
-       return dev_iommu_priv_get(dev);
-}
-
 static struct qcom_iommu_ctx * to_ctx(struct qcom_iommu_domain *d, unsigned asid)
 {
        struct qcom_iommu_dev *qcom_iommu = d->iommu;
@@ -372,7 +362,7 @@ static void qcom_iommu_domain_free(struct iommu_domain *domain)
 
 static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
 {
-       struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
+       struct qcom_iommu_dev *qcom_iommu = dev_iommu_priv_get(dev);
        struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
        int ret;
 
@@ -404,7 +394,7 @@ static int qcom_iommu_identity_attach(struct iommu_domain *identity_domain,
        struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
        struct qcom_iommu_domain *qcom_domain;
        struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
-       struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
+       struct qcom_iommu_dev *qcom_iommu = dev_iommu_priv_get(dev);
        unsigned int i;
 
        if (domain == identity_domain || !domain)
@@ -535,7 +525,7 @@ static bool qcom_iommu_capable(struct device *dev, enum iommu_cap cap)
 
 static struct iommu_device *qcom_iommu_probe_device(struct device *dev)
 {
-       struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
+       struct qcom_iommu_dev *qcom_iommu = dev_iommu_priv_get(dev);
        struct device_link *link;
 
        if (!qcom_iommu)
@@ -900,8 +890,16 @@ static void qcom_iommu_device_remove(struct platform_device *pdev)
 static int __maybe_unused qcom_iommu_resume(struct device *dev)
 {
        struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev);
+       int ret;
+
+       ret = clk_bulk_prepare_enable(CLK_NUM, qcom_iommu->clks);
+       if (ret < 0)
+               return ret;
+
+       if (dev->pm_domain)
+               return qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, 0);
 
-       return clk_bulk_prepare_enable(CLK_NUM, qcom_iommu->clks);
+       return ret;
 }
 
 static int __maybe_unused qcom_iommu_suspend(struct device *dev)
index e59f50e11ea8bbc569714c238b0436d29a8380e6..50ccc4f1ef81c8e8a3a73bceaeca66e7b4613b04 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/spinlock.h>
 #include <linux/swiotlb.h>
 #include <linux/vmalloc.h>
+#include <trace/events/swiotlb.h>
 
 #include "dma-iommu.h"
 
@@ -1156,6 +1157,8 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
                        return DMA_MAPPING_ERROR;
                }
 
+               trace_swiotlb_bounced(dev, phys, size);
+
                aligned_size = iova_align(iovad, size);
                phys = swiotlb_tbl_map_single(dev, phys, size, aligned_size,
                                              iova_mask(iovad), dir, attrs);
index dee61e513be6d44f72d5b2c7071bf4a30e696e5f..86b506af7daa1418e70fb87bded66181248a1ec4 100644 (file)
@@ -106,9 +106,6 @@ static const struct iommu_regset iommu_regs_64[] = {
        IOMMU_REGSET_ENTRY(MTRR_PHYSMASK8),
        IOMMU_REGSET_ENTRY(MTRR_PHYSBASE9),
        IOMMU_REGSET_ENTRY(MTRR_PHYSMASK9),
-       IOMMU_REGSET_ENTRY(VCCAP),
-       IOMMU_REGSET_ENTRY(VCMD),
-       IOMMU_REGSET_ENTRY(VCRSP),
 };
 
 static struct dentry *intel_iommu_debug;
index 897159dba47de4f863f57b365bd025cda0c8cd57..6fb5f6fceea11fb7865d92d8451a5de98a655556 100644 (file)
@@ -46,9 +46,6 @@
 
 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 57
 
-#define MAX_AGAW_WIDTH 64
-#define MAX_AGAW_PFN_WIDTH     (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
-
 #define __DOMAIN_MAX_PFN(gaw)  ((((uint64_t)1) << ((gaw) - VTD_PAGE_SHIFT)) - 1)
 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << (gaw)) - 1)
 
 
 #define IOVA_PFN(addr)         ((addr) >> PAGE_SHIFT)
 
-/* page table handling */
-#define LEVEL_STRIDE           (9)
-#define LEVEL_MASK             (((u64)1 << LEVEL_STRIDE) - 1)
-
-static inline int agaw_to_level(int agaw)
-{
-       return agaw + 2;
-}
-
-static inline int agaw_to_width(int agaw)
-{
-       return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
-}
-
-static inline int width_to_agaw(int width)
-{
-       return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
-}
-
-static inline unsigned int level_to_offset_bits(int level)
-{
-       return (level - 1) * LEVEL_STRIDE;
-}
-
-static inline int pfn_level_offset(u64 pfn, int level)
-{
-       return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
-}
-
-static inline u64 level_mask(int level)
-{
-       return -1ULL << level_to_offset_bits(level);
-}
-
-static inline u64 level_size(int level)
-{
-       return 1ULL << level_to_offset_bits(level);
-}
-
-static inline u64 align_to_level(u64 pfn, int level)
-{
-       return (pfn + level_size(level) - 1) & level_mask(level);
-}
-
-static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
-{
-       return 1UL << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
-}
-
-/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
-   are never going to work. */
-static inline unsigned long mm_to_dma_pfn_start(unsigned long mm_pfn)
-{
-       return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
-}
-static inline unsigned long mm_to_dma_pfn_end(unsigned long mm_pfn)
-{
-       return ((mm_pfn + 1) << (PAGE_SHIFT - VTD_PAGE_SHIFT)) - 1;
-}
-static inline unsigned long page_to_dma_pfn(struct page *pg)
-{
-       return mm_to_dma_pfn_start(page_to_pfn(pg));
-}
-static inline unsigned long virt_to_dma_pfn(void *p)
-{
-       return page_to_dma_pfn(virt_to_page(p));
-}
-
 static void __init check_tylersburg_isoch(void);
 static int rwbf_quirk;
 
@@ -168,78 +97,6 @@ static phys_addr_t root_entry_uctp(struct root_entry *re)
        return re->hi & VTD_PAGE_MASK;
 }
 
-static inline void context_set_present(struct context_entry *context)
-{
-       context->lo |= 1;
-}
-
-static inline void context_set_fault_enable(struct context_entry *context)
-{
-       context->lo &= (((u64)-1) << 2) | 1;
-}
-
-static inline void context_set_translation_type(struct context_entry *context,
-                                               unsigned long value)
-{
-       context->lo &= (((u64)-1) << 4) | 3;
-       context->lo |= (value & 3) << 2;
-}
-
-static inline void context_set_address_root(struct context_entry *context,
-                                           unsigned long value)
-{
-       context->lo &= ~VTD_PAGE_MASK;
-       context->lo |= value & VTD_PAGE_MASK;
-}
-
-static inline void context_set_address_width(struct context_entry *context,
-                                            unsigned long value)
-{
-       context->hi |= value & 7;
-}
-
-static inline void context_set_domain_id(struct context_entry *context,
-                                        unsigned long value)
-{
-       context->hi |= (value & ((1 << 16) - 1)) << 8;
-}
-
-static inline void context_set_pasid(struct context_entry *context)
-{
-       context->lo |= CONTEXT_PASIDE;
-}
-
-static inline int context_domain_id(struct context_entry *c)
-{
-       return((c->hi >> 8) & 0xffff);
-}
-
-static inline void context_clear_entry(struct context_entry *context)
-{
-       context->lo = 0;
-       context->hi = 0;
-}
-
-static inline bool context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
-{
-       if (!iommu->copied_tables)
-               return false;
-
-       return test_bit(((long)bus << 8) | devfn, iommu->copied_tables);
-}
-
-static inline void
-set_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
-{
-       set_bit(((long)bus << 8) | devfn, iommu->copied_tables);
-}
-
-static inline void
-clear_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
-{
-       clear_bit(((long)bus << 8) | devfn, iommu->copied_tables);
-}
-
 /*
  * This domain is a statically identity mapping domain.
  *     1. This domain creats a static 1:1 mapping to all usable memory.
@@ -383,13 +240,12 @@ void free_pgtable_page(void *vaddr)
        free_page((unsigned long)vaddr);
 }
 
-static inline int domain_type_is_si(struct dmar_domain *domain)
+static int domain_type_is_si(struct dmar_domain *domain)
 {
        return domain->domain.type == IOMMU_DOMAIN_IDENTITY;
 }
 
-static inline int domain_pfn_supported(struct dmar_domain *domain,
-                                      unsigned long pfn)
+static int domain_pfn_supported(struct dmar_domain *domain, unsigned long pfn)
 {
        int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
 
@@ -451,7 +307,7 @@ int iommu_calculate_agaw(struct intel_iommu *iommu)
        return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
 }
 
-static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu)
+static bool iommu_paging_structure_coherency(struct intel_iommu *iommu)
 {
        return sm_supported(iommu) ?
                        ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap);
@@ -703,7 +559,7 @@ static bool iommu_is_dummy(struct intel_iommu *iommu, struct device *dev)
        return false;
 }
 
-struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
+static struct intel_iommu *device_lookup_iommu(struct device *dev, u8 *bus, u8 *devfn)
 {
        struct dmar_drhd_unit *drhd = NULL;
        struct pci_dev *pdev = NULL;
@@ -1574,9 +1430,8 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
 }
 
 /* Notification for newly created mappings */
-static inline void __mapping_notify_one(struct intel_iommu *iommu,
-                                       struct dmar_domain *domain,
-                                       unsigned long pfn, unsigned int pages)
+static void __mapping_notify_one(struct intel_iommu *iommu, struct dmar_domain *domain,
+                                unsigned long pfn, unsigned int pages)
 {
        /*
         * It's a non-present to present mapping. Only flush if caching mode
@@ -1843,7 +1698,7 @@ void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
        spin_unlock(&iommu->lock);
 }
 
-static inline int guestwidth_to_adjustwidth(int gaw)
+static int guestwidth_to_adjustwidth(int gaw)
 {
        int agaw;
        int r = (gaw - 12) % 9;
@@ -1877,7 +1732,7 @@ static void domain_exit(struct dmar_domain *domain)
  * Value of X in the PDTS field of a scalable mode context entry
  * indicates PASID directory with 2^(X + 7) entries.
  */
-static inline unsigned long context_get_sm_pds(struct pasid_table *table)
+static unsigned long context_get_sm_pds(struct pasid_table *table)
 {
        unsigned long pds, max_pde;
 
@@ -1889,38 +1744,6 @@ static inline unsigned long context_get_sm_pds(struct pasid_table *table)
        return pds - 7;
 }
 
-/*
- * Set the RID_PASID field of a scalable mode context entry. The
- * IOMMU hardware will use the PASID value set in this field for
- * DMA translations of DMA requests without PASID.
- */
-static inline void
-context_set_sm_rid2pasid(struct context_entry *context, unsigned long pasid)
-{
-       context->hi |= pasid & ((1 << 20) - 1);
-}
-
-/*
- * Set the DTE(Device-TLB Enable) field of a scalable mode context
- * entry.
- */
-static inline void context_set_sm_dte(struct context_entry *context)
-{
-       context->lo |= BIT_ULL(2);
-}
-
-/*
- * Set the PRE(Page Request Enable) field of a scalable mode context
- * entry.
- */
-static inline void context_set_sm_pre(struct context_entry *context)
-{
-       context->lo |= BIT_ULL(4);
-}
-
-/* Convert value to context PASID directory size field coding. */
-#define context_pdts(pds)      (((pds) & 0x7) << 9)
-
 static int domain_context_mapping_one(struct dmar_domain *domain,
                                      struct intel_iommu *iommu,
                                      struct pasid_table *table,
@@ -2081,14 +1904,11 @@ static int domain_context_mapping_cb(struct pci_dev *pdev,
 static int
 domain_context_mapping(struct dmar_domain *domain, struct device *dev)
 {
+       struct device_domain_info *info = dev_iommu_priv_get(dev);
        struct domain_context_mapping_data data;
+       struct intel_iommu *iommu = info->iommu;
+       u8 bus = info->bus, devfn = info->devfn;
        struct pasid_table *table;
-       struct intel_iommu *iommu;
-       u8 bus, devfn;
-
-       iommu = device_to_iommu(dev, &bus, &devfn);
-       if (!iommu)
-               return -ENODEV;
 
        table = intel_pasid_get_table(dev);
 
@@ -2105,18 +1925,15 @@ domain_context_mapping(struct dmar_domain *domain, struct device *dev)
 }
 
 /* Returns a number of VTD pages, but aligned to MM page size */
-static inline unsigned long aligned_nrpages(unsigned long host_addr,
-                                           size_t size)
+static unsigned long aligned_nrpages(unsigned long host_addr, size_t size)
 {
        host_addr &= ~PAGE_MASK;
        return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
 }
 
 /* Return largest possible superpage level for a given mapping */
-static inline int hardware_largepage_caps(struct dmar_domain *domain,
-                                         unsigned long iov_pfn,
-                                         unsigned long phy_pfn,
-                                         unsigned long pages)
+static int hardware_largepage_caps(struct dmar_domain *domain, unsigned long iov_pfn,
+                                  unsigned long phy_pfn, unsigned long pages)
 {
        int support, level = 1;
        unsigned long pfnmerge;
@@ -2449,15 +2266,10 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
                                     struct device *dev)
 {
        struct device_domain_info *info = dev_iommu_priv_get(dev);
-       struct intel_iommu *iommu;
+       struct intel_iommu *iommu = info->iommu;
        unsigned long flags;
-       u8 bus, devfn;
        int ret;
 
-       iommu = device_to_iommu(dev, &bus, &devfn);
-       if (!iommu)
-               return -ENODEV;
-
        ret = domain_attach_iommu(domain, iommu);
        if (ret)
                return ret;
@@ -2470,7 +2282,7 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
        if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
                /* Setup the PASID entry for requests without PASID: */
                if (hw_pass_through && domain_type_is_si(domain))
-                       ret = intel_pasid_setup_pass_through(iommu, domain,
+                       ret = intel_pasid_setup_pass_through(iommu,
                                        dev, IOMMU_NO_PASID);
                else if (domain->use_first_level)
                        ret = domain_setup_first_level(iommu, domain, dev,
@@ -3615,7 +3427,7 @@ void intel_iommu_shutdown(void)
        up_write(&dmar_global_lock);
 }
 
-static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
+static struct intel_iommu *dev_to_intel_iommu(struct device *dev)
 {
        struct iommu_device *iommu_dev = dev_to_iommu_device(dev);
 
@@ -3694,7 +3506,7 @@ const struct attribute_group *intel_iommu_groups[] = {
        NULL,
 };
 
-static inline bool has_external_pci(void)
+static bool has_external_pci(void)
 {
        struct pci_dev *pdev = NULL;
 
@@ -4119,14 +3931,11 @@ static void intel_iommu_domain_free(struct iommu_domain *domain)
 int prepare_domain_attach_device(struct iommu_domain *domain,
                                 struct device *dev)
 {
+       struct device_domain_info *info = dev_iommu_priv_get(dev);
        struct dmar_domain *dmar_domain = to_dmar_domain(domain);
-       struct intel_iommu *iommu;
+       struct intel_iommu *iommu = info->iommu;
        int addr_width;
 
-       iommu = device_to_iommu(dev, NULL, NULL);
-       if (!iommu)
-               return -ENODEV;
-
        if (dmar_domain->force_snooping && !ecap_sc_support(iommu->ecap))
                return -EINVAL;
 
@@ -4403,7 +4212,7 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
        u8 bus, devfn;
        int ret;
 
-       iommu = device_to_iommu(dev, &bus, &devfn);
+       iommu = device_lookup_iommu(dev, &bus, &devfn);
        if (!iommu || !iommu->iommu.ops)
                return ERR_PTR(-ENODEV);
 
@@ -4461,7 +4270,6 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
                ret = intel_pasid_alloc_table(dev);
                if (ret) {
                        dev_err(dev, "PASID table allocation failed\n");
-                       dev_iommu_priv_set(dev, NULL);
                        kfree(info);
                        return ERR_PTR(ret);
                }
@@ -4479,7 +4287,6 @@ static void intel_iommu_release_device(struct device *dev)
        dmar_remove_one_dev_info(dev);
        intel_pasid_free_table(dev);
        intel_iommu_debugfs_remove_dev(info);
-       dev_iommu_priv_set(dev, NULL);
        kfree(info);
        set_dma_ops(dev, NULL);
 }
@@ -4739,8 +4546,9 @@ static int intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
 
 static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
 {
-       struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
+       struct device_domain_info *info = dev_iommu_priv_get(dev);
        struct dev_pasid_info *curr, *dev_pasid = NULL;
+       struct intel_iommu *iommu = info->iommu;
        struct dmar_domain *dmar_domain;
        struct iommu_domain *domain;
        unsigned long flags;
@@ -4811,8 +4619,7 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
                goto out_free;
 
        if (domain_type_is_si(dmar_domain))
-               ret = intel_pasid_setup_pass_through(iommu, dmar_domain,
-                                                    dev, pasid);
+               ret = intel_pasid_setup_pass_through(iommu, dev, pasid);
        else if (dmar_domain->use_first_level)
                ret = domain_setup_first_level(iommu, dmar_domain,
                                               dev, pasid);
index ce030c5b5772abadabd3f4a11fb1e443ffe80599..d02f916d8e59a914d2441fa2b81af9ac31dfbf86 100644 (file)
 #define DMAR_ECEO_REG          0x408
 #define DMAR_ECRSP_REG         0x410
 #define DMAR_ECCAP_REG         0x430
-#define DMAR_VCCAP_REG         0xe30 /* Virtual command capability register */
-#define DMAR_VCMD_REG          0xe00 /* Virtual command register */
-#define DMAR_VCRSP_REG         0xe10 /* Virtual command response register */
 
 #define DMAR_IQER_REG_IQEI(reg)                FIELD_GET(GENMASK_ULL(3, 0), reg)
 #define DMAR_IQER_REG_ITESID(reg)      FIELD_GET(GENMASK_ULL(47, 32), reg)
@@ -854,6 +851,181 @@ static inline bool context_present(struct context_entry *context)
        return (context->lo & 1);
 }
 
+#define LEVEL_STRIDE           (9)
+#define LEVEL_MASK             (((u64)1 << LEVEL_STRIDE) - 1)
+#define MAX_AGAW_WIDTH         (64)
+#define MAX_AGAW_PFN_WIDTH     (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
+
+static inline int agaw_to_level(int agaw)
+{
+       return agaw + 2;
+}
+
+static inline int agaw_to_width(int agaw)
+{
+       return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
+}
+
+static inline int width_to_agaw(int width)
+{
+       return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
+}
+
+static inline unsigned int level_to_offset_bits(int level)
+{
+       return (level - 1) * LEVEL_STRIDE;
+}
+
+static inline int pfn_level_offset(u64 pfn, int level)
+{
+       return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
+}
+
+static inline u64 level_mask(int level)
+{
+       return -1ULL << level_to_offset_bits(level);
+}
+
+static inline u64 level_size(int level)
+{
+       return 1ULL << level_to_offset_bits(level);
+}
+
+static inline u64 align_to_level(u64 pfn, int level)
+{
+       return (pfn + level_size(level) - 1) & level_mask(level);
+}
+
+static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
+{
+       return 1UL << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
+}
+
+/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
+   are never going to work. */
+static inline unsigned long mm_to_dma_pfn_start(unsigned long mm_pfn)
+{
+       return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
+}
+static inline unsigned long mm_to_dma_pfn_end(unsigned long mm_pfn)
+{
+       return ((mm_pfn + 1) << (PAGE_SHIFT - VTD_PAGE_SHIFT)) - 1;
+}
+static inline unsigned long page_to_dma_pfn(struct page *pg)
+{
+       return mm_to_dma_pfn_start(page_to_pfn(pg));
+}
+static inline unsigned long virt_to_dma_pfn(void *p)
+{
+       return page_to_dma_pfn(virt_to_page(p));
+}
+
+static inline void context_set_present(struct context_entry *context)
+{
+       context->lo |= 1;
+}
+
+static inline void context_set_fault_enable(struct context_entry *context)
+{
+       context->lo &= (((u64)-1) << 2) | 1;
+}
+
+static inline void context_set_translation_type(struct context_entry *context,
+                                               unsigned long value)
+{
+       context->lo &= (((u64)-1) << 4) | 3;
+       context->lo |= (value & 3) << 2;
+}
+
+static inline void context_set_address_root(struct context_entry *context,
+                                           unsigned long value)
+{
+       context->lo &= ~VTD_PAGE_MASK;
+       context->lo |= value & VTD_PAGE_MASK;
+}
+
+static inline void context_set_address_width(struct context_entry *context,
+                                            unsigned long value)
+{
+       context->hi |= value & 7;
+}
+
+static inline void context_set_domain_id(struct context_entry *context,
+                                        unsigned long value)
+{
+       context->hi |= (value & ((1 << 16) - 1)) << 8;
+}
+
+static inline void context_set_pasid(struct context_entry *context)
+{
+       context->lo |= CONTEXT_PASIDE;
+}
+
+static inline int context_domain_id(struct context_entry *c)
+{
+       return((c->hi >> 8) & 0xffff);
+}
+
+static inline void context_clear_entry(struct context_entry *context)
+{
+       context->lo = 0;
+       context->hi = 0;
+}
+
+#ifdef CONFIG_INTEL_IOMMU
+static inline bool context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
+{
+       if (!iommu->copied_tables)
+               return false;
+
+       return test_bit(((long)bus << 8) | devfn, iommu->copied_tables);
+}
+
+static inline void
+set_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
+{
+       set_bit(((long)bus << 8) | devfn, iommu->copied_tables);
+}
+
+static inline void
+clear_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
+{
+       clear_bit(((long)bus << 8) | devfn, iommu->copied_tables);
+}
+#endif /* CONFIG_INTEL_IOMMU */
+
+/*
+ * Set the RID_PASID field of a scalable mode context entry. The
+ * IOMMU hardware will use the PASID value set in this field for
+ * DMA translations of DMA requests without PASID.
+ */
+static inline void
+context_set_sm_rid2pasid(struct context_entry *context, unsigned long pasid)
+{
+       context->hi |= pasid & ((1 << 20) - 1);
+}
+
+/*
+ * Set the DTE(Device-TLB Enable) field of a scalable mode context
+ * entry.
+ */
+static inline void context_set_sm_dte(struct context_entry *context)
+{
+       context->lo |= BIT_ULL(2);
+}
+
+/*
+ * Set the PRE(Page Request Enable) field of a scalable mode context
+ * entry.
+ */
+static inline void context_set_sm_pre(struct context_entry *context)
+{
+       context->lo |= BIT_ULL(4);
+}
+
+/* Convert value to context PASID directory size field coding. */
+#define context_pdts(pds)      (((pds) & 0x7) << 9)
+
 struct dmar_drhd_unit *dmar_find_matched_drhd_unit(struct pci_dev *dev);
 
 int dmar_enable_qi(struct intel_iommu *iommu);
@@ -900,7 +1072,6 @@ int dmar_ir_support(void);
 void *alloc_pgtable_page(int node, gfp_t gfp);
 void free_pgtable_page(void *vaddr);
 void iommu_flush_write_buffer(struct intel_iommu *iommu);
-struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn);
 struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent,
                                               const struct iommu_user_data *user_data);
 
index b5a5563ab32c6bbb3a2e780bb010875b95dd29f4..f26c7f1c46ccaf43b0a4db5209b5c85b484277ed 100644 (file)
@@ -73,9 +73,97 @@ static void intel_nested_domain_free(struct iommu_domain *domain)
        kfree(to_dmar_domain(domain));
 }
 
+static void nested_flush_dev_iotlb(struct dmar_domain *domain, u64 addr,
+                                  unsigned int mask)
+{
+       struct device_domain_info *info;
+       unsigned long flags;
+       u16 sid, qdep;
+
+       spin_lock_irqsave(&domain->lock, flags);
+       list_for_each_entry(info, &domain->devices, link) {
+               if (!info->ats_enabled)
+                       continue;
+               sid = info->bus << 8 | info->devfn;
+               qdep = info->ats_qdep;
+               qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
+                                  qdep, addr, mask);
+               quirk_extra_dev_tlb_flush(info, addr, mask,
+                                         IOMMU_NO_PASID, qdep);
+       }
+       spin_unlock_irqrestore(&domain->lock, flags);
+}
+
+static void intel_nested_flush_cache(struct dmar_domain *domain, u64 addr,
+                                    unsigned long npages, bool ih)
+{
+       struct iommu_domain_info *info;
+       unsigned int mask;
+       unsigned long i;
+
+       xa_for_each(&domain->iommu_array, i, info)
+               qi_flush_piotlb(info->iommu,
+                               domain_id_iommu(domain, info->iommu),
+                               IOMMU_NO_PASID, addr, npages, ih);
+
+       if (!domain->has_iotlb_device)
+               return;
+
+       if (npages == U64_MAX)
+               mask = 64 - VTD_PAGE_SHIFT;
+       else
+               mask = ilog2(__roundup_pow_of_two(npages));
+
+       nested_flush_dev_iotlb(domain, addr, mask);
+}
+
+static int intel_nested_cache_invalidate_user(struct iommu_domain *domain,
+                                             struct iommu_user_data_array *array)
+{
+       struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+       struct iommu_hwpt_vtd_s1_invalidate inv_entry;
+       u32 index, processed = 0;
+       int ret = 0;
+
+       if (array->type != IOMMU_HWPT_INVALIDATE_DATA_VTD_S1) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       for (index = 0; index < array->entry_num; index++) {
+               ret = iommu_copy_struct_from_user_array(&inv_entry, array,
+                                                       IOMMU_HWPT_INVALIDATE_DATA_VTD_S1,
+                                                       index, __reserved);
+               if (ret)
+                       break;
+
+               if ((inv_entry.flags & ~IOMMU_VTD_INV_FLAGS_LEAF) ||
+                   inv_entry.__reserved) {
+                       ret = -EOPNOTSUPP;
+                       break;
+               }
+
+               if (!IS_ALIGNED(inv_entry.addr, VTD_PAGE_SIZE) ||
+                   ((inv_entry.npages == U64_MAX) && inv_entry.addr)) {
+                       ret = -EINVAL;
+                       break;
+               }
+
+               intel_nested_flush_cache(dmar_domain, inv_entry.addr,
+                                        inv_entry.npages,
+                                        inv_entry.flags & IOMMU_VTD_INV_FLAGS_LEAF);
+               processed++;
+       }
+
+out:
+       array->entry_num = processed;
+       return ret;
+}
+
 static const struct iommu_domain_ops intel_nested_domain_ops = {
        .attach_dev             = intel_nested_attach_dev,
        .free                   = intel_nested_domain_free,
+       .cache_invalidate_user  = intel_nested_cache_invalidate_user,
 };
 
 struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent,
index 74e8e4c17e81430f216fa88a9575d591de203e3a..3239cefa4c337897dda048ebec7aeb1fc075a955 100644 (file)
  */
 u32 intel_pasid_max_id = PASID_MAX;
 
-int vcmd_alloc_pasid(struct intel_iommu *iommu, u32 *pasid)
-{
-       unsigned long flags;
-       u8 status_code;
-       int ret = 0;
-       u64 res;
-
-       raw_spin_lock_irqsave(&iommu->register_lock, flags);
-       dmar_writeq(iommu->reg + DMAR_VCMD_REG, VCMD_CMD_ALLOC);
-       IOMMU_WAIT_OP(iommu, DMAR_VCRSP_REG, dmar_readq,
-                     !(res & VCMD_VRSP_IP), res);
-       raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
-
-       status_code = VCMD_VRSP_SC(res);
-       switch (status_code) {
-       case VCMD_VRSP_SC_SUCCESS:
-               *pasid = VCMD_VRSP_RESULT_PASID(res);
-               break;
-       case VCMD_VRSP_SC_NO_PASID_AVAIL:
-               pr_info("IOMMU: %s: No PASID available\n", iommu->name);
-               ret = -ENOSPC;
-               break;
-       default:
-               ret = -ENODEV;
-               pr_warn("IOMMU: %s: Unexpected error code %d\n",
-                       iommu->name, status_code);
-       }
-
-       return ret;
-}
-
-void vcmd_free_pasid(struct intel_iommu *iommu, u32 pasid)
-{
-       unsigned long flags;
-       u8 status_code;
-       u64 res;
-
-       raw_spin_lock_irqsave(&iommu->register_lock, flags);
-       dmar_writeq(iommu->reg + DMAR_VCMD_REG,
-                   VCMD_CMD_OPERAND(pasid) | VCMD_CMD_FREE);
-       IOMMU_WAIT_OP(iommu, DMAR_VCRSP_REG, dmar_readq,
-                     !(res & VCMD_VRSP_IP), res);
-       raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
-
-       status_code = VCMD_VRSP_SC(res);
-       switch (status_code) {
-       case VCMD_VRSP_SC_SUCCESS:
-               break;
-       case VCMD_VRSP_SC_INVALID_PASID:
-               pr_info("IOMMU: %s: Invalid PASID\n", iommu->name);
-               break;
-       default:
-               pr_warn("IOMMU: %s: Unexpected error code %d\n",
-                       iommu->name, status_code);
-       }
-}
-
 /*
  * Per device pasid table management:
  */
@@ -230,30 +173,6 @@ retry:
 /*
  * Interfaces for PASID table entry manipulation:
  */
-static inline void pasid_clear_entry(struct pasid_entry *pe)
-{
-       WRITE_ONCE(pe->val[0], 0);
-       WRITE_ONCE(pe->val[1], 0);
-       WRITE_ONCE(pe->val[2], 0);
-       WRITE_ONCE(pe->val[3], 0);
-       WRITE_ONCE(pe->val[4], 0);
-       WRITE_ONCE(pe->val[5], 0);
-       WRITE_ONCE(pe->val[6], 0);
-       WRITE_ONCE(pe->val[7], 0);
-}
-
-static inline void pasid_clear_entry_with_fpd(struct pasid_entry *pe)
-{
-       WRITE_ONCE(pe->val[0], PASID_PTE_FPD);
-       WRITE_ONCE(pe->val[1], 0);
-       WRITE_ONCE(pe->val[2], 0);
-       WRITE_ONCE(pe->val[3], 0);
-       WRITE_ONCE(pe->val[4], 0);
-       WRITE_ONCE(pe->val[5], 0);
-       WRITE_ONCE(pe->val[6], 0);
-       WRITE_ONCE(pe->val[7], 0);
-}
-
 static void
 intel_pasid_clear_entry(struct device *dev, u32 pasid, bool fault_ignore)
 {
@@ -269,192 +188,6 @@ intel_pasid_clear_entry(struct device *dev, u32 pasid, bool fault_ignore)
                pasid_clear_entry(pe);
 }
 
-static inline void pasid_set_bits(u64 *ptr, u64 mask, u64 bits)
-{
-       u64 old;
-
-       old = READ_ONCE(*ptr);
-       WRITE_ONCE(*ptr, (old & ~mask) | bits);
-}
-
-static inline u64 pasid_get_bits(u64 *ptr)
-{
-       return READ_ONCE(*ptr);
-}
-
-/*
- * Setup the DID(Domain Identifier) field (Bit 64~79) of scalable mode
- * PASID entry.
- */
-static inline void
-pasid_set_domain_id(struct pasid_entry *pe, u64 value)
-{
-       pasid_set_bits(&pe->val[1], GENMASK_ULL(15, 0), value);
-}
-
-/*
- * Get domain ID value of a scalable mode PASID entry.
- */
-static inline u16
-pasid_get_domain_id(struct pasid_entry *pe)
-{
-       return (u16)(READ_ONCE(pe->val[1]) & GENMASK_ULL(15, 0));
-}
-
-/*
- * Setup the SLPTPTR(Second Level Page Table Pointer) field (Bit 12~63)
- * of a scalable mode PASID entry.
- */
-static inline void
-pasid_set_slptr(struct pasid_entry *pe, u64 value)
-{
-       pasid_set_bits(&pe->val[0], VTD_PAGE_MASK, value);
-}
-
-/*
- * Setup the AW(Address Width) field (Bit 2~4) of a scalable mode PASID
- * entry.
- */
-static inline void
-pasid_set_address_width(struct pasid_entry *pe, u64 value)
-{
-       pasid_set_bits(&pe->val[0], GENMASK_ULL(4, 2), value << 2);
-}
-
-/*
- * Setup the PGTT(PASID Granular Translation Type) field (Bit 6~8)
- * of a scalable mode PASID entry.
- */
-static inline void
-pasid_set_translation_type(struct pasid_entry *pe, u64 value)
-{
-       pasid_set_bits(&pe->val[0], GENMASK_ULL(8, 6), value << 6);
-}
-
-/*
- * Enable fault processing by clearing the FPD(Fault Processing
- * Disable) field (Bit 1) of a scalable mode PASID entry.
- */
-static inline void pasid_set_fault_enable(struct pasid_entry *pe)
-{
-       pasid_set_bits(&pe->val[0], 1 << 1, 0);
-}
-
-/*
- * Enable second level A/D bits by setting the SLADE (Second Level
- * Access Dirty Enable) field (Bit 9) of a scalable mode PASID
- * entry.
- */
-static inline void pasid_set_ssade(struct pasid_entry *pe)
-{
-       pasid_set_bits(&pe->val[0], 1 << 9, 1 << 9);
-}
-
-/*
- * Disable second level A/D bits by clearing the SLADE (Second Level
- * Access Dirty Enable) field (Bit 9) of a scalable mode PASID
- * entry.
- */
-static inline void pasid_clear_ssade(struct pasid_entry *pe)
-{
-       pasid_set_bits(&pe->val[0], 1 << 9, 0);
-}
-
-/*
- * Checks if second level A/D bits specifically the SLADE (Second Level
- * Access Dirty Enable) field (Bit 9) of a scalable mode PASID
- * entry is set.
- */
-static inline bool pasid_get_ssade(struct pasid_entry *pe)
-{
-       return pasid_get_bits(&pe->val[0]) & (1 << 9);
-}
-
-/*
- * Setup the SRE(Supervisor Request Enable) field (Bit 128) of a
- * scalable mode PASID entry.
- */
-static inline void pasid_set_sre(struct pasid_entry *pe)
-{
-       pasid_set_bits(&pe->val[2], 1 << 0, 1);
-}
-
-/*
- * Setup the WPE(Write Protect Enable) field (Bit 132) of a
- * scalable mode PASID entry.
- */
-static inline void pasid_set_wpe(struct pasid_entry *pe)
-{
-       pasid_set_bits(&pe->val[2], 1 << 4, 1 << 4);
-}
-
-/*
- * Setup the P(Present) field (Bit 0) of a scalable mode PASID
- * entry.
- */
-static inline void pasid_set_present(struct pasid_entry *pe)
-{
-       pasid_set_bits(&pe->val[0], 1 << 0, 1);
-}
-
-/*
- * Setup Page Walk Snoop bit (Bit 87) of a scalable mode PASID
- * entry.
- */
-static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value)
-{
-       pasid_set_bits(&pe->val[1], 1 << 23, value << 23);
-}
-
-/*
- * Setup No Execute Enable bit (Bit 133) of a scalable mode PASID
- * entry. It is required when XD bit of the first level page table
- * entry is about to be set.
- */
-static inline void pasid_set_nxe(struct pasid_entry *pe)
-{
-       pasid_set_bits(&pe->val[2], 1 << 5, 1 << 5);
-}
-
-/*
- * Setup the Page Snoop (PGSNP) field (Bit 88) of a scalable mode
- * PASID entry.
- */
-static inline void
-pasid_set_pgsnp(struct pasid_entry *pe)
-{
-       pasid_set_bits(&pe->val[1], 1ULL << 24, 1ULL << 24);
-}
-
-/*
- * Setup the First Level Page table Pointer field (Bit 140~191)
- * of a scalable mode PASID entry.
- */
-static inline void
-pasid_set_flptr(struct pasid_entry *pe, u64 value)
-{
-       pasid_set_bits(&pe->val[2], VTD_PAGE_MASK, value);
-}
-
-/*
- * Setup the First Level Paging Mode field (Bit 130~131) of a
- * scalable mode PASID entry.
- */
-static inline void
-pasid_set_flpm(struct pasid_entry *pe, u64 value)
-{
-       pasid_set_bits(&pe->val[2], GENMASK_ULL(3, 2), value << 2);
-}
-
-/*
- * Setup the Extended Access Flag Enable (EAFE) field (Bit 135)
- * of a scalable mode PASID entry.
- */
-static inline void pasid_set_eafe(struct pasid_entry *pe)
-{
-       pasid_set_bits(&pe->val[2], 1 << 7, 1 << 7);
-}
-
 static void
 pasid_cache_invalidation_with_pasid(struct intel_iommu *iommu,
                                    u16 did, u32 pasid)
@@ -613,9 +346,9 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
  * Skip top levels of page tables for iommu which has less agaw
  * than default. Unnecessary for PT mode.
  */
-static inline int iommu_skip_agaw(struct dmar_domain *domain,
-                                 struct intel_iommu *iommu,
-                                 struct dma_pte **pgd)
+static int iommu_skip_agaw(struct dmar_domain *domain,
+                          struct intel_iommu *iommu,
+                          struct dma_pte **pgd)
 {
        int agaw;
 
@@ -767,7 +500,6 @@ int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu,
  * Set up the scalable mode pasid entry for passthrough translation type.
  */
 int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
-                                  struct dmar_domain *domain,
                                   struct device *dev, u32 pasid)
 {
        u16 did = FLPT_DEFAULT_DID;
index dd37611175cc1b9e4009aad7d0c09522147128eb..8d40d4c66e3198a7ce90c83168a3f86491d79f71 100644 (file)
 #define is_pasid_enabled(entry)                (((entry)->lo >> 3) & 0x1)
 #define get_pasid_dir_size(entry)      (1 << ((((entry)->lo >> 9) & 0x7) + 7))
 
-/* Virtual command interface for enlightened pasid management. */
-#define VCMD_CMD_ALLOC                 0x1
-#define VCMD_CMD_FREE                  0x2
-#define VCMD_VRSP_IP                   0x1
-#define VCMD_VRSP_SC(e)                        (((e) & 0xff) >> 1)
-#define VCMD_VRSP_SC_SUCCESS           0
-#define VCMD_VRSP_SC_NO_PASID_AVAIL    16
-#define VCMD_VRSP_SC_INVALID_PASID     16
-#define VCMD_VRSP_RESULT_PASID(e)      (((e) >> 16) & 0xfffff)
-#define VCMD_CMD_OPERAND(e)            ((e) << 16)
 /*
  * Domain ID reserved for pasid entries programmed for first-level
  * only and pass-through transfer modes.
@@ -96,6 +86,216 @@ static inline u16 pasid_pte_get_pgtt(struct pasid_entry *pte)
        return (u16)((READ_ONCE(pte->val[0]) >> 6) & 0x7);
 }
 
+static inline void pasid_clear_entry(struct pasid_entry *pe)
+{
+       WRITE_ONCE(pe->val[0], 0);
+       WRITE_ONCE(pe->val[1], 0);
+       WRITE_ONCE(pe->val[2], 0);
+       WRITE_ONCE(pe->val[3], 0);
+       WRITE_ONCE(pe->val[4], 0);
+       WRITE_ONCE(pe->val[5], 0);
+       WRITE_ONCE(pe->val[6], 0);
+       WRITE_ONCE(pe->val[7], 0);
+}
+
+static inline void pasid_clear_entry_with_fpd(struct pasid_entry *pe)
+{
+       WRITE_ONCE(pe->val[0], PASID_PTE_FPD);
+       WRITE_ONCE(pe->val[1], 0);
+       WRITE_ONCE(pe->val[2], 0);
+       WRITE_ONCE(pe->val[3], 0);
+       WRITE_ONCE(pe->val[4], 0);
+       WRITE_ONCE(pe->val[5], 0);
+       WRITE_ONCE(pe->val[6], 0);
+       WRITE_ONCE(pe->val[7], 0);
+}
+
+static inline void pasid_set_bits(u64 *ptr, u64 mask, u64 bits)
+{
+       u64 old;
+
+       old = READ_ONCE(*ptr);
+       WRITE_ONCE(*ptr, (old & ~mask) | bits);
+}
+
+static inline u64 pasid_get_bits(u64 *ptr)
+{
+       return READ_ONCE(*ptr);
+}
+
+/*
+ * Setup the DID(Domain Identifier) field (Bit 64~79) of scalable mode
+ * PASID entry.
+ */
+static inline void
+pasid_set_domain_id(struct pasid_entry *pe, u64 value)
+{
+       pasid_set_bits(&pe->val[1], GENMASK_ULL(15, 0), value);
+}
+
+/*
+ * Get domain ID value of a scalable mode PASID entry.
+ */
+static inline u16
+pasid_get_domain_id(struct pasid_entry *pe)
+{
+       return (u16)(READ_ONCE(pe->val[1]) & GENMASK_ULL(15, 0));
+}
+
+/*
+ * Setup the SLPTPTR(Second Level Page Table Pointer) field (Bit 12~63)
+ * of a scalable mode PASID entry.
+ */
+static inline void
+pasid_set_slptr(struct pasid_entry *pe, u64 value)
+{
+       pasid_set_bits(&pe->val[0], VTD_PAGE_MASK, value);
+}
+
+/*
+ * Setup the AW(Address Width) field (Bit 2~4) of a scalable mode PASID
+ * entry.
+ */
+static inline void
+pasid_set_address_width(struct pasid_entry *pe, u64 value)
+{
+       pasid_set_bits(&pe->val[0], GENMASK_ULL(4, 2), value << 2);
+}
+
+/*
+ * Setup the PGTT(PASID Granular Translation Type) field (Bit 6~8)
+ * of a scalable mode PASID entry.
+ */
+static inline void
+pasid_set_translation_type(struct pasid_entry *pe, u64 value)
+{
+       pasid_set_bits(&pe->val[0], GENMASK_ULL(8, 6), value << 6);
+}
+
+/*
+ * Enable fault processing by clearing the FPD(Fault Processing
+ * Disable) field (Bit 1) of a scalable mode PASID entry.
+ */
+static inline void pasid_set_fault_enable(struct pasid_entry *pe)
+{
+       pasid_set_bits(&pe->val[0], 1 << 1, 0);
+}
+
+/*
+ * Enable second level A/D bits by setting the SLADE (Second Level
+ * Access Dirty Enable) field (Bit 9) of a scalable mode PASID
+ * entry.
+ */
+static inline void pasid_set_ssade(struct pasid_entry *pe)
+{
+       pasid_set_bits(&pe->val[0], 1 << 9, 1 << 9);
+}
+
+/*
+ * Disable second level A/D bits by clearing the SLADE (Second Level
+ * Access Dirty Enable) field (Bit 9) of a scalable mode PASID
+ * entry.
+ */
+static inline void pasid_clear_ssade(struct pasid_entry *pe)
+{
+       pasid_set_bits(&pe->val[0], 1 << 9, 0);
+}
+
+/*
+ * Checks if second level A/D bits specifically the SLADE (Second Level
+ * Access Dirty Enable) field (Bit 9) of a scalable mode PASID
+ * entry is set.
+ */
+static inline bool pasid_get_ssade(struct pasid_entry *pe)
+{
+       return pasid_get_bits(&pe->val[0]) & (1 << 9);
+}
+
+/*
+ * Setup the SRE(Supervisor Request Enable) field (Bit 128) of a
+ * scalable mode PASID entry.
+ */
+static inline void pasid_set_sre(struct pasid_entry *pe)
+{
+       pasid_set_bits(&pe->val[2], 1 << 0, 1);
+}
+
+/*
+ * Setup the WPE(Write Protect Enable) field (Bit 132) of a
+ * scalable mode PASID entry.
+ */
+static inline void pasid_set_wpe(struct pasid_entry *pe)
+{
+       pasid_set_bits(&pe->val[2], 1 << 4, 1 << 4);
+}
+
+/*
+ * Setup the P(Present) field (Bit 0) of a scalable mode PASID
+ * entry.
+ */
+static inline void pasid_set_present(struct pasid_entry *pe)
+{
+       pasid_set_bits(&pe->val[0], 1 << 0, 1);
+}
+
+/*
+ * Setup Page Walk Snoop bit (Bit 87) of a scalable mode PASID
+ * entry.
+ */
+static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value)
+{
+       pasid_set_bits(&pe->val[1], 1 << 23, value << 23);
+}
+
+/*
+ * Setup No Execute Enable bit (Bit 133) of a scalable mode PASID
+ * entry. It is required when XD bit of the first level page table
+ * entry is about to be set.
+ */
+static inline void pasid_set_nxe(struct pasid_entry *pe)
+{
+       pasid_set_bits(&pe->val[2], 1 << 5, 1 << 5);
+}
+
+/*
+ * Setup the Page Snoop (PGSNP) field (Bit 88) of a scalable mode
+ * PASID entry.
+ */
+static inline void
+pasid_set_pgsnp(struct pasid_entry *pe)
+{
+       pasid_set_bits(&pe->val[1], 1ULL << 24, 1ULL << 24);
+}
+
+/*
+ * Setup the First Level Page table Pointer field (Bit 140~191)
+ * of a scalable mode PASID entry.
+ */
+static inline void
+pasid_set_flptr(struct pasid_entry *pe, u64 value)
+{
+       pasid_set_bits(&pe->val[2], VTD_PAGE_MASK, value);
+}
+
+/*
+ * Setup the First Level Paging Mode field (Bit 130~131) of a
+ * scalable mode PASID entry.
+ */
+static inline void
+pasid_set_flpm(struct pasid_entry *pe, u64 value)
+{
+       pasid_set_bits(&pe->val[2], GENMASK_ULL(3, 2), value << 2);
+}
+
+/*
+ * Setup the Extended Access Flag Enable (EAFE) field (Bit 135)
+ * of a scalable mode PASID entry.
+ */
+static inline void pasid_set_eafe(struct pasid_entry *pe)
+{
+       pasid_set_bits(&pe->val[2], 1 << 7, 1 << 7);
+}
+
 extern unsigned int intel_pasid_max_id;
 int intel_pasid_alloc_table(struct device *dev);
 void intel_pasid_free_table(struct device *dev);
@@ -111,15 +311,12 @@ int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu,
                                     struct device *dev, u32 pasid,
                                     bool enabled);
 int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
-                                  struct dmar_domain *domain,
                                   struct device *dev, u32 pasid);
 int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev,
                             u32 pasid, struct dmar_domain *domain);
 void intel_pasid_tear_down_entry(struct intel_iommu *iommu,
                                 struct device *dev, u32 pasid,
                                 bool fault_ignore);
-int vcmd_alloc_pasid(struct intel_iommu *iommu, u32 *pasid);
-void vcmd_free_pasid(struct intel_iommu *iommu, u32 pasid);
 void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu,
                                          struct device *dev, u32 pasid);
 #endif /* __INTEL_PASID_H */
index ac12f76c1212ac5f8f3a835f9afc1a0f6737af14..40edd282903fbe7c804512819aa95c3a1ae9d43e 100644 (file)
@@ -316,21 +316,22 @@ out:
 }
 
 static int intel_svm_bind_mm(struct intel_iommu *iommu, struct device *dev,
-                            struct mm_struct *mm)
+                            struct iommu_domain *domain, ioasid_t pasid)
 {
        struct device_domain_info *info = dev_iommu_priv_get(dev);
+       struct mm_struct *mm = domain->mm;
        struct intel_svm_dev *sdev;
        struct intel_svm *svm;
        unsigned long sflags;
        int ret = 0;
 
-       svm = pasid_private_find(mm->pasid);
+       svm = pasid_private_find(pasid);
        if (!svm) {
                svm = kzalloc(sizeof(*svm), GFP_KERNEL);
                if (!svm)
                        return -ENOMEM;
 
-               svm->pasid = mm->pasid;
+               svm->pasid = pasid;
                svm->mm = mm;
                INIT_LIST_HEAD_RCU(&svm->devs);
 
@@ -368,7 +369,7 @@ static int intel_svm_bind_mm(struct intel_iommu *iommu, struct device *dev,
 
        /* Setup the pasid table: */
        sflags = cpu_feature_enabled(X86_FEATURE_LA57) ? PASID_FLAG_FL5LP : 0;
-       ret = intel_pasid_setup_first_level(iommu, dev, mm->pgd, mm->pasid,
+       ret = intel_pasid_setup_first_level(iommu, dev, mm->pgd, pasid,
                                            FLPT_DEFAULT_DID, sflags);
        if (ret)
                goto free_sdev;
@@ -382,7 +383,7 @@ free_sdev:
 free_svm:
        if (list_empty(&svm->devs)) {
                mmu_notifier_unregister(&svm->notifier, mm);
-               pasid_private_remove(mm->pasid);
+               pasid_private_remove(pasid);
                kfree(svm);
        }
 
@@ -392,14 +393,9 @@ free_svm:
 void intel_svm_remove_dev_pasid(struct device *dev, u32 pasid)
 {
        struct intel_svm_dev *sdev;
-       struct intel_iommu *iommu;
        struct intel_svm *svm;
        struct mm_struct *mm;
 
-       iommu = device_to_iommu(dev, NULL, NULL);
-       if (!iommu)
-               return;
-
        if (pasid_to_svm_sdev(dev, pasid, &svm, &sdev))
                return;
        mm = svm->mm;
@@ -750,25 +746,16 @@ int intel_svm_page_response(struct device *dev,
                            struct iommu_fault_event *evt,
                            struct iommu_page_response *msg)
 {
+       struct device_domain_info *info = dev_iommu_priv_get(dev);
+       struct intel_iommu *iommu = info->iommu;
+       u8 bus = info->bus, devfn = info->devfn;
        struct iommu_fault_page_request *prm;
-       struct intel_iommu *iommu;
        bool private_present;
        bool pasid_present;
        bool last_page;
-       u8 bus, devfn;
        int ret = 0;
        u16 sid;
 
-       if (!dev || !dev_is_pci(dev))
-               return -ENODEV;
-
-       iommu = device_to_iommu(dev, &bus, &devfn);
-       if (!iommu)
-               return -ENODEV;
-
-       if (!msg || !evt)
-               return -EINVAL;
-
        prm = &evt->fault.prm;
        sid = PCI_DEVID(bus, devfn);
        pasid_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
@@ -822,9 +809,8 @@ static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
 {
        struct device_domain_info *info = dev_iommu_priv_get(dev);
        struct intel_iommu *iommu = info->iommu;
-       struct mm_struct *mm = domain->mm;
 
-       return intel_svm_bind_mm(iommu, dev, mm);
+       return intel_svm_bind_mm(iommu, dev, domain, pasid);
 }
 
 static void intel_svm_domain_free(struct iommu_domain *domain)
index 72dcdd468cf30d6ec32eaf0795463549076309b0..f7828a7aad410d4406de3d85a97cd419bc29bd0e 100644 (file)
@@ -188,20 +188,28 @@ static dma_addr_t __arm_lpae_dma_addr(void *pages)
 }
 
 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
-                                   struct io_pgtable_cfg *cfg)
+                                   struct io_pgtable_cfg *cfg,
+                                   void *cookie)
 {
        struct device *dev = cfg->iommu_dev;
        int order = get_order(size);
-       struct page *p;
        dma_addr_t dma;
        void *pages;
 
        VM_BUG_ON((gfp & __GFP_HIGHMEM));
-       p = alloc_pages_node(dev_to_node(dev), gfp | __GFP_ZERO, order);
-       if (!p)
+
+       if (cfg->alloc) {
+               pages = cfg->alloc(cookie, size, gfp);
+       } else {
+               struct page *p;
+
+               p = alloc_pages_node(dev_to_node(dev), gfp | __GFP_ZERO, order);
+               pages = p ? page_address(p) : NULL;
+       }
+
+       if (!pages)
                return NULL;
 
-       pages = page_address(p);
        if (!cfg->coherent_walk) {
                dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
                if (dma_mapping_error(dev, dma))
@@ -220,18 +228,28 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
 out_unmap:
        dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
        dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
+
 out_free:
-       __free_pages(p, order);
+       if (cfg->free)
+               cfg->free(cookie, pages, size);
+       else
+               free_pages((unsigned long)pages, order);
+
        return NULL;
 }
 
 static void __arm_lpae_free_pages(void *pages, size_t size,
-                                 struct io_pgtable_cfg *cfg)
+                                 struct io_pgtable_cfg *cfg,
+                                 void *cookie)
 {
        if (!cfg->coherent_walk)
                dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
                                 size, DMA_TO_DEVICE);
-       free_pages((unsigned long)pages, get_order(size));
+
+       if (cfg->free)
+               cfg->free(cookie, pages, size);
+       else
+               free_pages((unsigned long)pages, get_order(size));
 }
 
 static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries,
@@ -373,13 +391,13 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
        /* Grab a pointer to the next level */
        pte = READ_ONCE(*ptep);
        if (!pte) {
-               cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg);
+               cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg, data->iop.cookie);
                if (!cptep)
                        return -ENOMEM;
 
                pte = arm_lpae_install_table(cptep, ptep, 0, data);
                if (pte)
-                       __arm_lpae_free_pages(cptep, tblsz, cfg);
+                       __arm_lpae_free_pages(cptep, tblsz, cfg, data->iop.cookie);
        } else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) {
                __arm_lpae_sync_pte(ptep, 1, cfg);
        }
@@ -524,7 +542,7 @@ static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
                __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
        }
 
-       __arm_lpae_free_pages(start, table_size, &data->iop.cfg);
+       __arm_lpae_free_pages(start, table_size, &data->iop.cfg, data->iop.cookie);
 }
 
 static void arm_lpae_free_pgtable(struct io_pgtable *iop)
@@ -552,7 +570,7 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
        if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
                return 0;
 
-       tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg);
+       tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg, data->iop.cookie);
        if (!tablep)
                return 0; /* Bytes unmapped */
 
@@ -575,7 +593,7 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
 
        pte = arm_lpae_install_table(tablep, ptep, blk_pte, data);
        if (pte != blk_pte) {
-               __arm_lpae_free_pages(tablep, tablesz, cfg);
+               __arm_lpae_free_pages(tablep, tablesz, cfg, data->iop.cookie);
                /*
                 * We may race against someone unmapping another part of this
                 * block, but anything else is invalid. We can't misinterpret
@@ -882,7 +900,7 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
 
        /* Looking good; allocate a pgd */
        data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
-                                          GFP_KERNEL, cfg);
+                                          GFP_KERNEL, cfg, cookie);
        if (!data->pgd)
                goto out_free_data;
 
@@ -984,7 +1002,7 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
 
        /* Allocate pgd pages */
        data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
-                                          GFP_KERNEL, cfg);
+                                          GFP_KERNEL, cfg, cookie);
        if (!data->pgd)
                goto out_free_data;
 
@@ -1059,7 +1077,7 @@ arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
                 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
 
        data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL,
-                                          cfg);
+                                          cfg, cookie);
        if (!data->pgd)
                goto out_free_data;
 
@@ -1080,26 +1098,31 @@ out_free_data:
 }
 
 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
+       .caps   = IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
        .alloc  = arm_64_lpae_alloc_pgtable_s1,
        .free   = arm_lpae_free_pgtable,
 };
 
 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
+       .caps   = IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
        .alloc  = arm_64_lpae_alloc_pgtable_s2,
        .free   = arm_lpae_free_pgtable,
 };
 
 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
+       .caps   = IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
        .alloc  = arm_32_lpae_alloc_pgtable_s1,
        .free   = arm_lpae_free_pgtable,
 };
 
 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
+       .caps   = IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
        .alloc  = arm_32_lpae_alloc_pgtable_s2,
        .free   = arm_lpae_free_pgtable,
 };
 
 struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
+       .caps   = IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
        .alloc  = arm_mali_lpae_alloc_pgtable,
        .free   = arm_lpae_free_pgtable,
 };
index b843fcd365d286668273667401b331a27fc04d23..8841c1487f00481f92759e499ad85a70423b7428 100644 (file)
@@ -34,6 +34,26 @@ io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = {
 #endif
 };
 
+static int check_custom_allocator(enum io_pgtable_fmt fmt,
+                                 struct io_pgtable_cfg *cfg)
+{
+       /* No custom allocator, no need to check the format. */
+       if (!cfg->alloc && !cfg->free)
+               return 0;
+
+       /* When passing a custom allocator, both the alloc and free
+        * functions should be provided.
+        */
+       if (!cfg->alloc || !cfg->free)
+               return -EINVAL;
+
+       /* Make sure the format supports custom allocators. */
+       if (io_pgtable_init_table[fmt]->caps & IO_PGTABLE_CAP_CUSTOM_ALLOCATOR)
+               return 0;
+
+       return -EINVAL;
+}
+
 struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
                                            struct io_pgtable_cfg *cfg,
                                            void *cookie)
@@ -44,6 +64,9 @@ struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
        if (fmt >= IO_PGTABLE_NUM_FMTS)
                return NULL;
 
+       if (check_custom_allocator(fmt, cfg))
+               return NULL;
+
        fns = io_pgtable_init_table[fmt];
        if (!fns)
                return NULL;
index b78671a8a9143fe0b961c69d0ad81fa3224e5a83..c3fc9201d0be97e59395750cda0fc29940c0b844 100644 (file)
 static DEFINE_MUTEX(iommu_sva_lock);
 
 /* Allocate a PASID for the mm within range (inclusive) */
-static int iommu_sva_alloc_pasid(struct mm_struct *mm, struct device *dev)
+static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct device *dev)
 {
+       struct iommu_mm_data *iommu_mm;
        ioasid_t pasid;
-       int ret = 0;
+
+       lockdep_assert_held(&iommu_sva_lock);
 
        if (!arch_pgtable_dma_compat(mm))
-               return -EBUSY;
+               return ERR_PTR(-EBUSY);
 
-       mutex_lock(&iommu_sva_lock);
+       iommu_mm = mm->iommu_mm;
        /* Is a PASID already associated with this mm? */
-       if (mm_valid_pasid(mm)) {
-               if (mm->pasid >= dev->iommu->max_pasids)
-                       ret = -EOVERFLOW;
-               goto out;
+       if (iommu_mm) {
+               if (iommu_mm->pasid >= dev->iommu->max_pasids)
+                       return ERR_PTR(-EOVERFLOW);
+               return iommu_mm;
        }
 
+       iommu_mm = kzalloc(sizeof(struct iommu_mm_data), GFP_KERNEL);
+       if (!iommu_mm)
+               return ERR_PTR(-ENOMEM);
+
        pasid = iommu_alloc_global_pasid(dev);
        if (pasid == IOMMU_PASID_INVALID) {
-               ret = -ENOSPC;
-               goto out;
+               kfree(iommu_mm);
+               return ERR_PTR(-ENOSPC);
        }
-       mm->pasid = pasid;
-       ret = 0;
-out:
-       mutex_unlock(&iommu_sva_lock);
-       return ret;
+       iommu_mm->pasid = pasid;
+       INIT_LIST_HEAD(&iommu_mm->sva_domains);
+       /*
+        * Make sure the write to mm->iommu_mm is not reordered in front of
+        * initialization to iommu_mm fields. If it does, readers may see a
+        * valid iommu_mm with uninitialized values.
+        */
+       smp_store_release(&mm->iommu_mm, iommu_mm);
+       return iommu_mm;
 }
 
 /**
@@ -58,57 +68,60 @@ out:
  */
 struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
 {
+       struct iommu_mm_data *iommu_mm;
        struct iommu_domain *domain;
        struct iommu_sva *handle;
        int ret;
 
+       mutex_lock(&iommu_sva_lock);
+
        /* Allocate mm->pasid if necessary. */
-       ret = iommu_sva_alloc_pasid(mm, dev);
-       if (ret)
-               return ERR_PTR(ret);
+       iommu_mm = iommu_alloc_mm_data(mm, dev);
+       if (IS_ERR(iommu_mm)) {
+               ret = PTR_ERR(iommu_mm);
+               goto out_unlock;
+       }
 
        handle = kzalloc(sizeof(*handle), GFP_KERNEL);
-       if (!handle)
-               return ERR_PTR(-ENOMEM);
-
-       mutex_lock(&iommu_sva_lock);
-       /* Search for an existing domain. */
-       domain = iommu_get_domain_for_dev_pasid(dev, mm->pasid,
-                                               IOMMU_DOMAIN_SVA);
-       if (IS_ERR(domain)) {
-               ret = PTR_ERR(domain);
+       if (!handle) {
+               ret = -ENOMEM;
                goto out_unlock;
        }
 
-       if (domain) {
-               domain->users++;
-               goto out;
+       /* Search for an existing domain. */
+       list_for_each_entry(domain, &mm->iommu_mm->sva_domains, next) {
+               ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid);
+               if (!ret) {
+                       domain->users++;
+                       goto out;
+               }
        }
 
        /* Allocate a new domain and set it on device pasid. */
        domain = iommu_sva_domain_alloc(dev, mm);
        if (!domain) {
                ret = -ENOMEM;
-               goto out_unlock;
+               goto out_free_handle;
        }
 
-       ret = iommu_attach_device_pasid(domain, dev, mm->pasid);
+       ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid);
        if (ret)
                goto out_free_domain;
        domain->users = 1;
+       list_add(&domain->next, &mm->iommu_mm->sva_domains);
+
 out:
        mutex_unlock(&iommu_sva_lock);
        handle->dev = dev;
        handle->domain = domain;
-
        return handle;
 
 out_free_domain:
        iommu_domain_free(domain);
+out_free_handle:
+       kfree(handle);
 out_unlock:
        mutex_unlock(&iommu_sva_lock);
-       kfree(handle);
-
        return ERR_PTR(ret);
 }
 EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
@@ -124,12 +137,13 @@ EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
 void iommu_sva_unbind_device(struct iommu_sva *handle)
 {
        struct iommu_domain *domain = handle->domain;
-       ioasid_t pasid = domain->mm->pasid;
+       struct iommu_mm_data *iommu_mm = domain->mm->iommu_mm;
        struct device *dev = handle->dev;
 
        mutex_lock(&iommu_sva_lock);
+       iommu_detach_device_pasid(domain, dev, iommu_mm->pasid);
        if (--domain->users == 0) {
-               iommu_detach_device_pasid(domain, dev, pasid);
+               list_del(&domain->next);
                iommu_domain_free(domain);
        }
        mutex_unlock(&iommu_sva_lock);
@@ -141,7 +155,7 @@ u32 iommu_sva_get_pasid(struct iommu_sva *handle)
 {
        struct iommu_domain *domain = handle->domain;
 
-       return domain->mm->pasid;
+       return mm_get_enqcmd_pasid(domain->mm);
 }
 EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
 
@@ -205,8 +219,11 @@ out_put_mm:
 
 void mm_pasid_drop(struct mm_struct *mm)
 {
-       if (likely(!mm_valid_pasid(mm)))
+       struct iommu_mm_data *iommu_mm = mm->iommu_mm;
+
+       if (!iommu_mm)
                return;
 
-       iommu_free_global_pasid(mm->pasid);
+       iommu_free_global_pasid(iommu_mm->pasid);
+       kfree(iommu_mm);
 }
index 33e2a9b5d339e4f82a63e4c8fcbc2e8103af93b5..d14413916f93a01626e850aa72ee0c919c1f72bd 100644 (file)
@@ -148,7 +148,7 @@ struct iommu_group_attribute iommu_group_attr_##_name =             \
 static LIST_HEAD(iommu_device_list);
 static DEFINE_SPINLOCK(iommu_device_lock);
 
-static struct bus_type * const iommu_buses[] = {
+static const struct bus_type * const iommu_buses[] = {
        &platform_bus_type,
 #ifdef CONFIG_PCI
        &pci_bus_type,
@@ -257,13 +257,6 @@ int iommu_device_register(struct iommu_device *iommu,
        /* We need to be able to take module references appropriately */
        if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner))
                return -EINVAL;
-       /*
-        * Temporarily enforce global restriction to a single driver. This was
-        * already the de-facto behaviour, since any possible combination of
-        * existing drivers would compete for at least the PCI or platform bus.
-        */
-       if (iommu_buses[0]->iommu_ops && iommu_buses[0]->iommu_ops != ops)
-               return -EBUSY;
 
        iommu->ops = ops;
        if (hwdev)
@@ -273,10 +266,8 @@ int iommu_device_register(struct iommu_device *iommu,
        list_add_tail(&iommu->list, &iommu_device_list);
        spin_unlock(&iommu_device_lock);
 
-       for (int i = 0; i < ARRAY_SIZE(iommu_buses) && !err; i++) {
-               iommu_buses[i]->iommu_ops = ops;
+       for (int i = 0; i < ARRAY_SIZE(iommu_buses) && !err; i++)
                err = bus_iommu_probe(iommu_buses[i]);
-       }
        if (err)
                iommu_device_unregister(iommu);
        return err;
@@ -329,7 +320,6 @@ int iommu_device_register_bus(struct iommu_device *iommu,
        list_add_tail(&iommu->list, &iommu_device_list);
        spin_unlock(&iommu_device_lock);
 
-       bus->iommu_ops = ops;
        err = bus_iommu_probe(bus);
        if (err) {
                iommu_device_unregister_bus(iommu, bus, nb);
@@ -344,6 +334,8 @@ static struct dev_iommu *dev_iommu_get(struct device *dev)
 {
        struct dev_iommu *param = dev->iommu;
 
+       lockdep_assert_held(&iommu_probe_device_lock);
+
        if (param)
                return param;
 
@@ -368,6 +360,15 @@ static void dev_iommu_free(struct device *dev)
        kfree(param);
 }
 
+/*
+ * Internal equivalent of device_iommu_mapped() for when we care that a device
+ * actually has API ops, and don't want false positives from VFIO-only groups.
+ */
+static bool dev_has_iommu(struct device *dev)
+{
+       return dev->iommu && dev->iommu->iommu_dev;
+}
+
 static u32 dev_iommu_get_max_pasids(struct device *dev)
 {
        u32 max_pasids = 0, bits = 0;
@@ -386,6 +387,15 @@ static u32 dev_iommu_get_max_pasids(struct device *dev)
        return min_t(u32, max_pasids, dev->iommu->iommu_dev->max_pasids);
 }
 
+void dev_iommu_priv_set(struct device *dev, void *priv)
+{
+       /* FSL_PAMU does something weird */
+       if (!IS_ENABLED(CONFIG_FSL_PAMU))
+               lockdep_assert_held(&iommu_probe_device_lock);
+       dev->iommu->priv = priv;
+}
+EXPORT_SYMBOL_GPL(dev_iommu_priv_set);
+
 /*
  * Init the dev->iommu and dev->iommu_group in the struct device and get the
  * driver probed
@@ -489,11 +499,26 @@ DEFINE_MUTEX(iommu_probe_device_lock);
 
 static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
 {
-       const struct iommu_ops *ops = dev->bus->iommu_ops;
+       const struct iommu_ops *ops;
+       struct iommu_fwspec *fwspec;
        struct iommu_group *group;
        struct group_device *gdev;
        int ret;
 
+       /*
+        * For FDT-based systems and ACPI IORT/VIOT, drivers register IOMMU
+        * instances with non-NULL fwnodes, and client devices should have been
+        * identified with a fwspec by this point. Otherwise, we can currently
+        * assume that only one of Intel, AMD, s390, PAMU or legacy SMMUv2 can
+        * be present, and that any of their registered instances has suitable
+        * ops for probing, and thus cheekily co-opt the same mechanism.
+        */
+       fwspec = dev_iommu_fwspec_get(dev);
+       if (fwspec && fwspec->ops)
+               ops = fwspec->ops;
+       else
+               ops = iommu_ops_from_fwnode(NULL);
+
        if (!ops)
                return -ENODEV;
        /*
@@ -618,7 +643,7 @@ static void __iommu_group_remove_device(struct device *dev)
 
                list_del(&device->list);
                __iommu_group_free_device(group, device);
-               if (dev->iommu && dev->iommu->iommu_dev)
+               if (dev_has_iommu(dev))
                        iommu_deinit_device(dev);
                else
                        dev->iommu_group = NULL;
@@ -817,7 +842,7 @@ int iommu_get_group_resv_regions(struct iommu_group *group,
                 * Non-API groups still expose reserved_regions in sysfs,
                 * so filter out calls that get here that way.
                 */
-               if (!device->dev->iommu)
+               if (!dev_has_iommu(device->dev))
                        break;
 
                INIT_LIST_HEAD(&dev_resv_regions);
@@ -1223,6 +1248,12 @@ void iommu_group_remove_device(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(iommu_group_remove_device);
 
+static struct device *iommu_group_first_dev(struct iommu_group *group)
+{
+       lockdep_assert_held(&group->mutex);
+       return list_first_entry(&group->devices, struct group_device, list)->dev;
+}
+
 /**
  * iommu_group_for_each_dev - iterate over each device in the group
  * @group: the group
@@ -1750,23 +1781,6 @@ __iommu_group_alloc_default_domain(struct iommu_group *group, int req_type)
        return __iommu_group_domain_alloc(group, req_type);
 }
 
-/*
- * Returns the iommu_ops for the devices in an iommu group.
- *
- * It is assumed that all devices in an iommu group are managed by a single
- * IOMMU unit. Therefore, this returns the dev_iommu_ops of the first device
- * in the group.
- */
-static const struct iommu_ops *group_iommu_ops(struct iommu_group *group)
-{
-       struct group_device *device =
-               list_first_entry(&group->devices, struct group_device, list);
-
-       lockdep_assert_held(&group->mutex);
-
-       return dev_iommu_ops(device->dev);
-}
-
 /*
  * req_type of 0 means "auto" which means to select a domain based on
  * iommu_def_domain_type or what the driver actually supports.
@@ -1774,7 +1788,7 @@ static const struct iommu_ops *group_iommu_ops(struct iommu_group *group)
 static struct iommu_domain *
 iommu_group_alloc_default_domain(struct iommu_group *group, int req_type)
 {
-       const struct iommu_ops *ops = group_iommu_ops(group);
+       const struct iommu_ops *ops = dev_iommu_ops(iommu_group_first_dev(group));
        struct iommu_domain *dom;
 
        lockdep_assert_held(&group->mutex);
@@ -1785,7 +1799,7 @@ iommu_group_alloc_default_domain(struct iommu_group *group, int req_type)
         * domain. Do not use in new drivers.
         */
        if (ops->default_domain) {
-               if (req_type)
+               if (req_type != ops->default_domain->type)
                        return ERR_PTR(-EINVAL);
                return ops->default_domain;
        }
@@ -1854,13 +1868,21 @@ static int iommu_bus_notifier(struct notifier_block *nb,
 static int iommu_get_def_domain_type(struct iommu_group *group,
                                     struct device *dev, int cur_type)
 {
-       const struct iommu_ops *ops = group_iommu_ops(group);
+       const struct iommu_ops *ops = dev_iommu_ops(dev);
        int type;
 
-       if (!ops->def_domain_type)
-               return cur_type;
-
-       type = ops->def_domain_type(dev);
+       if (ops->default_domain) {
+               /*
+                * Drivers that declare a global static default_domain will
+                * always choose that.
+                */
+               type = ops->default_domain->type;
+       } else {
+               if (ops->def_domain_type)
+                       type = ops->def_domain_type(dev);
+               else
+                       return cur_type;
+       }
        if (!type || cur_type == type)
                return cur_type;
        if (!cur_type)
@@ -2003,9 +2025,28 @@ int bus_iommu_probe(const struct bus_type *bus)
        return 0;
 }
 
+/**
+ * iommu_present() - make platform-specific assumptions about an IOMMU
+ * @bus: bus to check
+ *
+ * Do not use this function. You want device_iommu_mapped() instead.
+ *
+ * Return: true if some IOMMU is present and aware of devices on the given bus;
+ * in general it may not be the only IOMMU, and it may not have anything to do
+ * with whatever device you are ultimately interested in.
+ */
 bool iommu_present(const struct bus_type *bus)
 {
-       return bus->iommu_ops != NULL;
+       bool ret = false;
+
+       for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) {
+               if (iommu_buses[i] == bus) {
+                       spin_lock(&iommu_device_lock);
+                       ret = !list_empty(&iommu_device_list);
+                       spin_unlock(&iommu_device_lock);
+               }
+       }
+       return ret;
 }
 EXPORT_SYMBOL_GPL(iommu_present);
 
@@ -2021,7 +2062,7 @@ bool device_iommu_capable(struct device *dev, enum iommu_cap cap)
 {
        const struct iommu_ops *ops;
 
-       if (!dev->iommu || !dev->iommu->iommu_dev)
+       if (!dev_has_iommu(dev))
                return false;
 
        ops = dev_iommu_ops(dev);
@@ -2107,6 +2148,7 @@ static struct iommu_domain *__iommu_domain_alloc(const struct iommu_ops *ops,
                return ERR_PTR(-ENOMEM);
 
        domain->type = type;
+       domain->owner = ops;
        /*
         * If not already set, assume all sizes by default; the driver
         * may override this later
@@ -2132,21 +2174,37 @@ static struct iommu_domain *__iommu_domain_alloc(const struct iommu_ops *ops,
 static struct iommu_domain *
 __iommu_group_domain_alloc(struct iommu_group *group, unsigned int type)
 {
-       struct device *dev =
-               list_first_entry(&group->devices, struct group_device, list)
-                       ->dev;
+       struct device *dev = iommu_group_first_dev(group);
 
-       return __iommu_domain_alloc(group_iommu_ops(group), dev, type);
+       return __iommu_domain_alloc(dev_iommu_ops(dev), dev, type);
+}
+
+static int __iommu_domain_alloc_dev(struct device *dev, void *data)
+{
+       const struct iommu_ops **ops = data;
+
+       if (!dev_has_iommu(dev))
+               return 0;
+
+       if (WARN_ONCE(*ops && *ops != dev_iommu_ops(dev),
+                     "Multiple IOMMU drivers present for bus %s, which the public IOMMU API can't fully support yet. You will still need to disable one or more for this to work, sorry!\n",
+                     dev_bus_name(dev)))
+               return -EBUSY;
+
+       *ops = dev_iommu_ops(dev);
+       return 0;
 }
 
 struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus)
 {
+       const struct iommu_ops *ops = NULL;
+       int err = bus_for_each_dev(bus, NULL, &ops, __iommu_domain_alloc_dev);
        struct iommu_domain *domain;
 
-       if (bus == NULL || bus->iommu_ops == NULL)
+       if (err || !ops)
                return NULL;
-       domain = __iommu_domain_alloc(bus->iommu_ops, NULL,
-                                   IOMMU_DOMAIN_UNMANAGED);
+
+       domain = __iommu_domain_alloc(ops, NULL, IOMMU_DOMAIN_UNMANAGED);
        if (IS_ERR(domain))
                return NULL;
        return domain;
@@ -2284,10 +2342,16 @@ struct iommu_domain *iommu_get_dma_domain(struct device *dev)
 static int __iommu_attach_group(struct iommu_domain *domain,
                                struct iommu_group *group)
 {
+       struct device *dev;
+
        if (group->domain && group->domain != group->default_domain &&
            group->domain != group->blocking_domain)
                return -EBUSY;
 
+       dev = iommu_group_first_dev(group);
+       if (!dev_has_iommu(dev) || dev_iommu_ops(dev) != domain->owner)
+               return -EINVAL;
+
        return __iommu_group_set_domain(group, domain);
 }
 
@@ -3004,8 +3068,8 @@ EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
  */
 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
 {
-       if (dev->iommu && dev->iommu->iommu_dev) {
-               const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
+       if (dev_has_iommu(dev)) {
+               const struct iommu_ops *ops = dev_iommu_ops(dev);
 
                if (ops->dev_enable_feat)
                        return ops->dev_enable_feat(dev, feat);
@@ -3020,8 +3084,8 @@ EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
  */
 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
 {
-       if (dev->iommu && dev->iommu->iommu_dev) {
-               const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
+       if (dev_has_iommu(dev)) {
+               const struct iommu_ops *ops = dev_iommu_ops(dev);
 
                if (ops->dev_disable_feat)
                        return ops->dev_disable_feat(dev, feat);
@@ -3481,6 +3545,9 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
        if (!group)
                return -ENODEV;
 
+       if (!dev_has_iommu(dev) || dev_iommu_ops(dev) != domain->owner)
+               return -EINVAL;
+
        mutex_lock(&group->mutex);
        curr = xa_cmpxchg(&group->pasid_array, pasid, NULL, domain, GFP_KERNEL);
        if (curr) {
@@ -3569,6 +3636,7 @@ struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
        domain->type = IOMMU_DOMAIN_SVA;
        mmgrab(mm);
        domain->mm = mm;
+       domain->owner = ops;
        domain->iopf_handler = iommu_sva_handle_iopf;
        domain->fault_data = mm;
 
index cbb5df0a6c32f835b50535a84bde3f44bfb4d6db..3f3f1fa1a0a946a43eb48ee324ab4979683bb566 100644 (file)
@@ -135,6 +135,7 @@ iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
                        hwpt->domain = NULL;
                        goto out_abort;
                }
+               hwpt->domain->owner = ops;
        } else {
                hwpt->domain = iommu_domain_alloc(idev->dev->bus);
                if (!hwpt->domain) {
@@ -233,6 +234,7 @@ iommufd_hwpt_nested_alloc(struct iommufd_ctx *ictx,
                hwpt->domain = NULL;
                goto out_abort;
        }
+       hwpt->domain->owner = ops;
 
        if (WARN_ON_ONCE(hwpt->domain->type != IOMMU_DOMAIN_NESTED)) {
                rc = -EINVAL;
@@ -371,3 +373,44 @@ int iommufd_hwpt_get_dirty_bitmap(struct iommufd_ucmd *ucmd)
        iommufd_put_object(ucmd->ictx, &hwpt_paging->common.obj);
        return rc;
 }
+
+int iommufd_hwpt_invalidate(struct iommufd_ucmd *ucmd)
+{
+       struct iommu_hwpt_invalidate *cmd = ucmd->cmd;
+       struct iommu_user_data_array data_array = {
+               .type = cmd->data_type,
+               .uptr = u64_to_user_ptr(cmd->data_uptr),
+               .entry_len = cmd->entry_len,
+               .entry_num = cmd->entry_num,
+       };
+       struct iommufd_hw_pagetable *hwpt;
+       u32 done_num = 0;
+       int rc;
+
+       if (cmd->__reserved) {
+               rc = -EOPNOTSUPP;
+               goto out;
+       }
+
+       if (cmd->entry_num && (!cmd->data_uptr || !cmd->entry_len)) {
+               rc = -EINVAL;
+               goto out;
+       }
+
+       hwpt = iommufd_get_hwpt_nested(ucmd, cmd->hwpt_id);
+       if (IS_ERR(hwpt)) {
+               rc = PTR_ERR(hwpt);
+               goto out;
+       }
+
+       rc = hwpt->domain->ops->cache_invalidate_user(hwpt->domain,
+                                                     &data_array);
+       done_num = data_array.entry_num;
+
+       iommufd_put_object(ucmd->ictx, &hwpt->obj);
+out:
+       cmd->entry_num = done_num;
+       if (iommufd_ucmd_respond(ucmd, sizeof(*cmd)))
+               return -EFAULT;
+       return rc;
+}
index abae041e256f7ed1a0a6fcc68a48087098effc6b..991f864d1f9bc175b9acc9b9c445d32ed837f878 100644 (file)
@@ -328,6 +328,15 @@ iommufd_get_hwpt_paging(struct iommufd_ucmd *ucmd, u32 id)
                                               IOMMUFD_OBJ_HWPT_PAGING),
                            struct iommufd_hwpt_paging, common.obj);
 }
+
+static inline struct iommufd_hw_pagetable *
+iommufd_get_hwpt_nested(struct iommufd_ucmd *ucmd, u32 id)
+{
+       return container_of(iommufd_get_object(ucmd->ictx, id,
+                                              IOMMUFD_OBJ_HWPT_NESTED),
+                           struct iommufd_hw_pagetable, obj);
+}
+
 int iommufd_hwpt_set_dirty_tracking(struct iommufd_ucmd *ucmd);
 int iommufd_hwpt_get_dirty_bitmap(struct iommufd_ucmd *ucmd);
 
@@ -345,6 +354,7 @@ void iommufd_hwpt_paging_abort(struct iommufd_object *obj);
 void iommufd_hwpt_nested_destroy(struct iommufd_object *obj);
 void iommufd_hwpt_nested_abort(struct iommufd_object *obj);
 int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd);
+int iommufd_hwpt_invalidate(struct iommufd_ucmd *ucmd);
 
 static inline void iommufd_hw_pagetable_put(struct iommufd_ctx *ictx,
                                            struct iommufd_hw_pagetable *hwpt)
index 7910fbe1962d78b9c8b65726fad12e75c0fd4a22..482d4059f5db6aed38ee8aa60f25b791f1e7556d 100644 (file)
@@ -21,6 +21,7 @@ enum {
        IOMMU_TEST_OP_ACCESS_REPLACE_IOAS,
        IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS,
        IOMMU_TEST_OP_DIRTY,
+       IOMMU_TEST_OP_MD_CHECK_IOTLB,
 };
 
 enum {
@@ -121,6 +122,10 @@ struct iommu_test_cmd {
                        __aligned_u64 uptr;
                        __aligned_u64 out_nr_dirty;
                } dirty;
+               struct {
+                       __u32 id;
+                       __u32 iotlb;
+               } check_iotlb;
        };
        __u32 last;
 };
@@ -148,4 +153,22 @@ struct iommu_hwpt_selftest {
        __u32 iotlb;
 };
 
+/* Should not be equal to any defined value in enum iommu_hwpt_invalidate_data_type */
+#define IOMMU_HWPT_INVALIDATE_DATA_SELFTEST 0xdeadbeef
+#define IOMMU_HWPT_INVALIDATE_DATA_SELFTEST_INVALID 0xdadbeef
+
+/**
+ * struct iommu_hwpt_invalidate_selftest - Invalidation data for Mock driver
+ *                                         (IOMMU_HWPT_INVALIDATE_DATA_SELFTEST)
+ * @flags: Invalidate flags
+ * @iotlb_id: Invalidate iotlb entry index
+ *
+ * If IOMMU_TEST_INVALIDATE_ALL is set in @flags, @iotlb_id will be ignored
+ */
+struct iommu_hwpt_invalidate_selftest {
+#define IOMMU_TEST_INVALIDATE_FLAG_ALL (1 << 0)
+       __u32 flags;
+       __u32 iotlb_id;
+};
+
 #endif
index c9091e46d208abeea14aea1c649a016c39a077ba..39b32932c61ee4e924e24ab18fc05ec4149829c9 100644 (file)
@@ -322,6 +322,7 @@ union ucmd_buffer {
        struct iommu_hw_info info;
        struct iommu_hwpt_alloc hwpt;
        struct iommu_hwpt_get_dirty_bitmap get_dirty_bitmap;
+       struct iommu_hwpt_invalidate cache;
        struct iommu_hwpt_set_dirty_tracking set_dirty_tracking;
        struct iommu_ioas_alloc alloc;
        struct iommu_ioas_allow_iovas allow_iovas;
@@ -360,6 +361,8 @@ static const struct iommufd_ioctl_op iommufd_ioctl_ops[] = {
                 __reserved),
        IOCTL_OP(IOMMU_HWPT_GET_DIRTY_BITMAP, iommufd_hwpt_get_dirty_bitmap,
                 struct iommu_hwpt_get_dirty_bitmap, data),
+       IOCTL_OP(IOMMU_HWPT_INVALIDATE, iommufd_hwpt_invalidate,
+                struct iommu_hwpt_invalidate, __reserved),
        IOCTL_OP(IOMMU_HWPT_SET_DIRTY_TRACKING, iommufd_hwpt_set_dirty_tracking,
                 struct iommu_hwpt_set_dirty_tracking, __reserved),
        IOCTL_OP(IOMMU_IOAS_ALLOC, iommufd_ioas_alloc_ioctl,
index 022ef8f55088a6b1e7d452ad4260510cca5bb303..d9e9920c7eba413eaf25b7840eefdf36a3999a9e 100644 (file)
@@ -25,6 +25,19 @@ static struct iommu_domain_ops domain_nested_ops;
 
 size_t iommufd_test_memory_limit = 65536;
 
+struct mock_bus_type {
+       struct bus_type bus;
+       struct notifier_block nb;
+};
+
+static struct mock_bus_type iommufd_mock_bus_type = {
+       .bus = {
+               .name = "iommufd_mock",
+       },
+};
+
+static atomic_t mock_dev_num;
+
 enum {
        MOCK_DIRTY_TRACK = 1,
        MOCK_IO_PAGE_SIZE = PAGE_SIZE / 2,
@@ -437,6 +450,8 @@ static struct iommu_device mock_iommu_device = {
 
 static struct iommu_device *mock_probe_device(struct device *dev)
 {
+       if (dev->bus != &iommufd_mock_bus_type.bus)
+               return ERR_PTR(-ENODEV);
        return &mock_iommu_device;
 }
 
@@ -473,9 +488,59 @@ static void mock_domain_free_nested(struct iommu_domain *domain)
        kfree(mock_nested);
 }
 
+static int
+mock_domain_cache_invalidate_user(struct iommu_domain *domain,
+                                 struct iommu_user_data_array *array)
+{
+       struct mock_iommu_domain_nested *mock_nested =
+               container_of(domain, struct mock_iommu_domain_nested, domain);
+       struct iommu_hwpt_invalidate_selftest inv;
+       u32 processed = 0;
+       int i = 0, j;
+       int rc = 0;
+
+       if (array->type != IOMMU_HWPT_INVALIDATE_DATA_SELFTEST) {
+               rc = -EINVAL;
+               goto out;
+       }
+
+       for ( ; i < array->entry_num; i++) {
+               rc = iommu_copy_struct_from_user_array(&inv, array,
+                                                      IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
+                                                      i, iotlb_id);
+               if (rc)
+                       break;
+
+               if (inv.flags & ~IOMMU_TEST_INVALIDATE_FLAG_ALL) {
+                       rc = -EOPNOTSUPP;
+                       break;
+               }
+
+               if (inv.iotlb_id > MOCK_NESTED_DOMAIN_IOTLB_ID_MAX) {
+                       rc = -EINVAL;
+                       break;
+               }
+
+               if (inv.flags & IOMMU_TEST_INVALIDATE_FLAG_ALL) {
+                       /* Invalidate all mock iotlb entries and ignore iotlb_id */
+                       for (j = 0; j < MOCK_NESTED_DOMAIN_IOTLB_NUM; j++)
+                               mock_nested->iotlb[j] = 0;
+               } else {
+                       mock_nested->iotlb[inv.iotlb_id] = 0;
+               }
+
+               processed++;
+       }
+
+out:
+       array->entry_num = processed;
+       return rc;
+}
+
 static struct iommu_domain_ops domain_nested_ops = {
        .free = mock_domain_free_nested,
        .attach_dev = mock_domain_nop_attach,
+       .cache_invalidate_user = mock_domain_cache_invalidate_user,
 };
 
 static inline struct iommufd_hw_pagetable *
@@ -526,19 +591,6 @@ get_md_pagetable_nested(struct iommufd_ucmd *ucmd, u32 mockpt_id,
        return hwpt;
 }
 
-struct mock_bus_type {
-       struct bus_type bus;
-       struct notifier_block nb;
-};
-
-static struct mock_bus_type iommufd_mock_bus_type = {
-       .bus = {
-               .name = "iommufd_mock",
-       },
-};
-
-static atomic_t mock_dev_num;
-
 static void mock_dev_release(struct device *dev)
 {
        struct mock_dev *mdev = container_of(dev, struct mock_dev, dev);
@@ -793,6 +845,28 @@ static int iommufd_test_md_check_refs(struct iommufd_ucmd *ucmd,
        return 0;
 }
 
+static int iommufd_test_md_check_iotlb(struct iommufd_ucmd *ucmd,
+                                      u32 mockpt_id, unsigned int iotlb_id,
+                                      u32 iotlb)
+{
+       struct mock_iommu_domain_nested *mock_nested;
+       struct iommufd_hw_pagetable *hwpt;
+       int rc = 0;
+
+       hwpt = get_md_pagetable_nested(ucmd, mockpt_id, &mock_nested);
+       if (IS_ERR(hwpt))
+               return PTR_ERR(hwpt);
+
+       mock_nested = container_of(hwpt->domain,
+                                  struct mock_iommu_domain_nested, domain);
+
+       if (iotlb_id > MOCK_NESTED_DOMAIN_IOTLB_ID_MAX ||
+           mock_nested->iotlb[iotlb_id] != iotlb)
+               rc = -EINVAL;
+       iommufd_put_object(ucmd->ictx, &hwpt->obj);
+       return rc;
+}
+
 struct selftest_access {
        struct iommufd_access *access;
        struct file *file;
@@ -1274,6 +1348,10 @@ int iommufd_test(struct iommufd_ucmd *ucmd)
                return iommufd_test_md_check_refs(
                        ucmd, u64_to_user_ptr(cmd->check_refs.uptr),
                        cmd->check_refs.length, cmd->check_refs.refs);
+       case IOMMU_TEST_OP_MD_CHECK_IOTLB:
+               return iommufd_test_md_check_iotlb(ucmd, cmd->id,
+                                                  cmd->check_iotlb.id,
+                                                  cmd->check_iotlb.iotlb);
        case IOMMU_TEST_OP_CREATE_ACCESS:
                return iommufd_test_create_access(ucmd, cmd->id,
                                                  cmd->create_access.flags);
index 75279500a4a824f4246bd3fec4fdfb22b1c2cccc..7abe9e85a570632a74080ba275b44e14882d6cec 100644 (file)
@@ -863,16 +863,11 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
 static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
 {
        struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
-       struct mtk_iommu_data *data;
+       struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
        struct device_link *link;
        struct device *larbdev;
        unsigned int larbid, larbidx, i;
 
-       if (!fwspec || fwspec->ops != &mtk_iommu_ops)
-               return ERR_PTR(-ENODEV); /* Not a iommu client device */
-
-       data = dev_iommu_priv_get(dev);
-
        if (!MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM))
                return &data->iommu;
 
index 67e044c1a7d93bc7e8c398445fd867008d349154..25b41222abaec109b4c87a8feba380401ad61a69 100644 (file)
@@ -481,9 +481,6 @@ static struct iommu_device *mtk_iommu_v1_probe_device(struct device *dev)
                idx++;
        }
 
-       if (!fwspec || fwspec->ops != &mtk_iommu_v1_ops)
-               return ERR_PTR(-ENODEV); /* Not a iommu client device */
-
        data = dev_iommu_priv_get(dev);
 
        /* Link the consumer device with the smi-larb device(supplier) */
index 35ba090f3b5e24e615e7a604f18fb0d532706a6a..719652b608407ad905721d676ff7a0c245e76ee0 100644 (file)
@@ -17,8 +17,6 @@
 #include <linux/slab.h>
 #include <linux/fsl/mc.h>
 
-#define NO_IOMMU       1
-
 static int of_iommu_xlate(struct device *dev,
                          struct of_phandle_args *iommu_spec)
 {
@@ -29,7 +27,7 @@ static int of_iommu_xlate(struct device *dev,
        ops = iommu_ops_from_fwnode(fwnode);
        if ((ops && !ops->of_xlate) ||
            !of_device_is_available(iommu_spec->np))
-               return NO_IOMMU;
+               return -ENODEV;
 
        ret = iommu_fwspec_init(dev, &iommu_spec->np->fwnode, ops);
        if (ret)
@@ -61,7 +59,7 @@ static int of_iommu_configure_dev_id(struct device_node *master_np,
                         "iommu-map-mask", &iommu_spec.np,
                         iommu_spec.args);
        if (err)
-               return err == -ENODEV ? NO_IOMMU : err;
+               return err;
 
        err = of_iommu_xlate(dev, &iommu_spec);
        of_node_put(iommu_spec.np);
@@ -72,7 +70,7 @@ static int of_iommu_configure_dev(struct device_node *master_np,
                                  struct device *dev)
 {
        struct of_phandle_args iommu_spec;
-       int err = NO_IOMMU, idx = 0;
+       int err = -ENODEV, idx = 0;
 
        while (!of_parse_phandle_with_args(master_np, "iommus",
                                           "#iommu-cells",
@@ -107,16 +105,21 @@ static int of_iommu_configure_device(struct device_node *master_np,
                      of_iommu_configure_dev(master_np, dev);
 }
 
-const struct iommu_ops *of_iommu_configure(struct device *dev,
-                                          struct device_node *master_np,
-                                          const u32 *id)
+/*
+ * Returns:
+ *  0 on success, an iommu was configured
+ *  -ENODEV if the device does not have any IOMMU
+ *  -EPROBEDEFER if probing should be tried again
+ *  -errno fatal errors
+ */
+int of_iommu_configure(struct device *dev, struct device_node *master_np,
+                      const u32 *id)
 {
-       const struct iommu_ops *ops = NULL;
        struct iommu_fwspec *fwspec;
-       int err = NO_IOMMU;
+       int err;
 
        if (!master_np)
-               return NULL;
+               return -ENODEV;
 
        /* Serialise to make dev->iommu stable under our potential fwspec */
        mutex_lock(&iommu_probe_device_lock);
@@ -124,7 +127,7 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
        if (fwspec) {
                if (fwspec->ops) {
                        mutex_unlock(&iommu_probe_device_lock);
-                       return fwspec->ops;
+                       return 0;
                }
                /* In the deferred case, start again from scratch */
                iommu_fwspec_free(dev);
@@ -147,36 +150,21 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
        } else {
                err = of_iommu_configure_device(master_np, dev, id);
        }
-
-       /*
-        * Two success conditions can be represented by non-negative err here:
-        * >0 : there is no IOMMU, or one was unavailable for non-fatal reasons
-        *  0 : we found an IOMMU, and dev->fwspec is initialised appropriately
-        * <0 : any actual error
-        */
-       if (!err) {
-               /* The fwspec pointer changed, read it again */
-               fwspec = dev_iommu_fwspec_get(dev);
-               ops    = fwspec->ops;
-       }
        mutex_unlock(&iommu_probe_device_lock);
 
-       /*
-        * If we have reason to believe the IOMMU driver missed the initial
-        * probe for dev, replay it to get things in order.
-        */
-       if (!err && dev->bus)
-               err = iommu_probe_device(dev);
-
-       /* Ignore all other errors apart from EPROBE_DEFER */
-       if (err == -EPROBE_DEFER) {
-               ops = ERR_PTR(err);
-       } else if (err < 0) {
-               dev_dbg(dev, "Adding to IOMMU failed: %d\n", err);
-               ops = NULL;
-       }
+       if (err == -ENODEV || err == -EPROBE_DEFER)
+               return err;
+       if (err)
+               goto err_log;
 
-       return ops;
+       err = iommu_probe_device(dev);
+       if (err)
+               goto err_log;
+       return 0;
+
+err_log:
+       dev_dbg(dev, "Adding to IOMMU failed: %pe\n", ERR_PTR(err));
+       return err;
 }
 
 static enum iommu_resv_type __maybe_unused
@@ -260,7 +248,14 @@ void of_iommu_get_resv_regions(struct device *dev, struct list_head *list)
                                phys_addr_t iova;
                                size_t length;
 
+                               if (of_dma_is_coherent(dev->of_node))
+                                       prot |= IOMMU_CACHE;
+
                                maps = of_translate_dma_region(np, maps, &iova, &length);
+                               if (length == 0) {
+                                       dev_warn(dev, "Cannot reserve IOVA region of 0 size\n");
+                                       continue;
+                               }
                                type = iommu_resv_region_get_type(dev, &phys, iova, length);
 
                                region = iommu_alloc_resv_region(iova, length, prot, type,
index c66b070841dd41e0c322f12515c7d8f919e5bd16..c9528065a59afac738a6f06ba89ef11c90082a72 100644 (file)
@@ -1719,7 +1719,6 @@ static void omap_iommu_release_device(struct device *dev)
        if (!dev->of_node || !arch_data)
                return;
 
-       dev_iommu_priv_set(dev, NULL);
        kfree(arch_data);
 
 }
index 2eb9fb46703b3ae3d836b6c757a511f37d5998e5..537359f109979b703707ff52d398c61866adb73c 100644 (file)
@@ -385,13 +385,7 @@ static phys_addr_t sprd_iommu_iova_to_phys(struct iommu_domain *domain,
 
 static struct iommu_device *sprd_iommu_probe_device(struct device *dev)
 {
-       struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
-       struct sprd_iommu_device *sdev;
-
-       if (!fwspec || fwspec->ops != &sprd_iommu_ops)
-               return ERR_PTR(-ENODEV);
-
-       sdev = dev_iommu_priv_get(dev);
+       struct sprd_iommu_device *sdev = dev_iommu_priv_get(dev);
 
        return &sdev->iommu;
 }
index 379ebe03efb6d45b42afd8a63b4fcb830bb37903..34db37fd9675cd98403633547b34663c2241821d 100644 (file)
@@ -843,7 +843,7 @@ static int viommu_map_pages(struct iommu_domain *domain, unsigned long iova,
                        .flags          = cpu_to_le32(flags),
                };
 
-               ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
+               ret = viommu_add_req(vdomain->viommu, &map, sizeof(map));
                if (ret) {
                        viommu_del_mappings(vdomain, iova, end);
                        return ret;
@@ -912,6 +912,33 @@ static void viommu_iotlb_sync(struct iommu_domain *domain,
        viommu_sync_req(vdomain->viommu);
 }
 
+static int viommu_iotlb_sync_map(struct iommu_domain *domain,
+                                unsigned long iova, size_t size)
+{
+       struct viommu_domain *vdomain = to_viommu_domain(domain);
+
+       /*
+        * May be called before the viommu is initialized including
+        * while creating direct mapping
+        */
+       if (!vdomain->nr_endpoints)
+               return 0;
+       return viommu_sync_req(vdomain->viommu);
+}
+
+static void viommu_flush_iotlb_all(struct iommu_domain *domain)
+{
+       struct viommu_domain *vdomain = to_viommu_domain(domain);
+
+       /*
+        * May be called before the viommu is initialized including
+        * while creating direct mapping
+        */
+       if (!vdomain->nr_endpoints)
+               return;
+       viommu_sync_req(vdomain->viommu);
+}
+
 static void viommu_get_resv_regions(struct device *dev, struct list_head *head)
 {
        struct iommu_resv_region *entry, *new_entry, *msi = NULL;
@@ -969,9 +996,6 @@ static struct iommu_device *viommu_probe_device(struct device *dev)
        struct viommu_dev *viommu = NULL;
        struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
 
-       if (!fwspec || fwspec->ops != &viommu_ops)
-               return ERR_PTR(-ENODEV);
-
        viommu = viommu_get_by_fwnode(fwspec->iommu_fwnode);
        if (!viommu)
                return ERR_PTR(-ENODEV);
@@ -1037,6 +1061,8 @@ static bool viommu_capable(struct device *dev, enum iommu_cap cap)
        switch (cap) {
        case IOMMU_CAP_CACHE_COHERENCY:
                return true;
+       case IOMMU_CAP_DEFERRED_FLUSH:
+               return true;
        default:
                return false;
        }
@@ -1057,7 +1083,9 @@ static struct iommu_ops viommu_ops = {
                .map_pages              = viommu_map_pages,
                .unmap_pages            = viommu_unmap_pages,
                .iova_to_phys           = viommu_iova_to_phys,
+               .flush_iotlb_all        = viommu_flush_iotlb_all,
                .iotlb_sync             = viommu_iotlb_sync,
+               .iotlb_sync_map         = viommu_iotlb_sync_map,
                .free                   = viommu_domain_free,
        }
 };
index da308be6c4877e2dbbae38948b2716d54fe44850..ba2e9e52d72bf02831cd2ed1160023d4784c807e 100644 (file)
@@ -158,9 +158,7 @@ static int ipoctal_get_icount(struct tty_struct *tty,
 static void ipoctal_irq_rx(struct ipoctal_channel *channel, u8 sr)
 {
        struct tty_port *port = &channel->tty_port;
-       unsigned char value;
-       unsigned char flag;
-       u8 isr;
+       u8 isr, value, flag;
 
        do {
                value = ioread8(&channel->regs->r.rhr);
@@ -202,8 +200,8 @@ static void ipoctal_irq_rx(struct ipoctal_channel *channel, u8 sr)
 
 static void ipoctal_irq_tx(struct ipoctal_channel *channel)
 {
-       unsigned char value;
        unsigned int *pointer_write = &channel->pointer_write;
+       u8 value;
 
        if (channel->nb_bytes == 0)
                return;
@@ -436,11 +434,11 @@ err_put_driver:
        return res;
 }
 
-static inline int ipoctal_copy_write_buffer(struct ipoctal_channel *channel,
-                                           const u8 *buf, int count)
+static inline size_t ipoctal_copy_write_buffer(struct ipoctal_channel *channel,
+                                              const u8 *buf, size_t count)
 {
        unsigned long flags;
-       int i;
+       size_t i;
        unsigned int *pointer_read = &channel->pointer_read;
 
        /* Copy the bytes from the user buffer to the internal one */
@@ -462,7 +460,7 @@ static ssize_t ipoctal_write_tty(struct tty_struct *tty, const u8 *buf,
                                 size_t count)
 {
        struct ipoctal_channel *channel = tty->driver_data;
-       unsigned int char_copied;
+       size_t char_copied;
 
        char_copied = ipoctal_copy_write_buffer(channel, buf, count);
 
index cc1ecfd4992866c780cc57e68583eb9e4f415713..b1471ba016a515b11abf87a5987905174f8aaf6d 100644 (file)
@@ -207,7 +207,7 @@ struct ipack_bus_device *ipack_bus_register(struct device *parent, int slots,
        if (!bus)
                return NULL;
 
-       bus_nr = ida_simple_get(&ipack_ida, 0, 0, GFP_KERNEL);
+       bus_nr = ida_alloc(&ipack_ida, GFP_KERNEL);
        if (bus_nr < 0) {
                kfree(bus);
                return NULL;
@@ -237,7 +237,7 @@ int ipack_bus_unregister(struct ipack_bus_device *bus)
 {
        bus_for_each_dev(&ipack_bus_type, NULL, bus,
                ipack_unregister_bus_member);
-       ida_simple_remove(&ipack_ida, bus->bus_nr);
+       ida_free(&ipack_ida, bus->bus_nr);
        kfree(bus);
        return 0;
 }
index 2f37895154455e9ff806a294f2336fc83bdd43a5..6e80d7bd3c4dc51f1467fd94640596b1a04c8b01 100644 (file)
@@ -1231,9 +1231,9 @@ static void capinc_tty_hangup(struct tty_struct *tty)
        tty_port_hangup(&mp->port);
 }
 
-static void capinc_tty_send_xchar(struct tty_struct *tty, char ch)
+static void capinc_tty_send_xchar(struct tty_struct *tty, u8 ch)
 {
-       pr_debug("capinc_tty_send_xchar(%d)\n", ch);
+       pr_debug("capinc_tty_send_xchar(%u)\n", ch);
 }
 
 static const struct tty_operations capinc_ops = {
index 6292fddcc55cd4c7ef694cfdf3694b5fac61aea9..d721b254e1e45072a627cbe45d3fed97ed81a5f7 100644 (file)
@@ -95,14 +95,18 @@ config LEDS_ARIEL
          Say Y to if your machine is a Dell Wyse 3020 thin client.
 
 config LEDS_AW200XX
-       tristate "LED support for Awinic AW20036/AW20054/AW20072"
+       tristate "LED support for Awinic AW20036/AW20054/AW20072/AW20108"
        depends on LEDS_CLASS
        depends on I2C
        help
-         This option enables support for the AW20036/AW20054/AW20072 LED driver.
-         It is a 3x12/6x9/6x12 matrix LED driver programmed via
-         an I2C interface, up to 36/54/72 LEDs or 12/18/24 RGBs,
-         3 pattern controllers for auto breathing or group dimming control.
+         This option enables support for the Awinic AW200XX LED controllers.
+         It is a matrix LED driver programmed via an I2C interface. Devices have
+         a set of individually controlled LEDs and support 3 pattern controllers
+         for auto breathing or group dimming control. Supported devices:
+           - AW20036 (3x12) 36 LEDs
+           - AW20054 (6x9)  54 LEDs
+           - AW20072 (6x12) 72 LEDs
+           - AW20108 (9x12) 108 LEDs
 
          To compile this driver as a module, choose M here: the module
          will be called leds-aw200xx.
@@ -110,6 +114,7 @@ config LEDS_AW200XX
 config LEDS_AW2013
        tristate "LED support for Awinic AW2013"
        depends on LEDS_CLASS && I2C && OF
+       select REGMAP_I2C
        help
          This option enables support for the AW2013 3-channel
          LED driver.
@@ -298,6 +303,15 @@ config LEDS_COBALT_RAQ
        help
          This option enables support for the Cobalt Raq series LEDs.
 
+config LEDS_SUN50I_A100
+       tristate "LED support for Allwinner A100 RGB LED controller"
+       depends on LEDS_CLASS_MULTICOLOR
+       depends on ARCH_SUNXI || COMPILE_TEST
+       help
+         This option enables support for the RGB LED controller found
+         in some Allwinner sunxi SoCs, including A100, R329, and D1.
+         It uses a one-wire interface to control up to 1024 LEDs.
+
 config LEDS_SUNFIRE
        tristate "LED support for SunFire servers."
        depends on LEDS_CLASS
@@ -638,6 +652,17 @@ config LEDS_ADP5520
          To compile this driver as a module, choose M here: the module will
          be called leds-adp5520.
 
+config LEDS_MAX5970
+       tristate "LED Support for Maxim 5970"
+       depends on LEDS_CLASS
+       depends on MFD_MAX5970
+       help
+         This option enables support for the Maxim MAX5970 & MAX5978 smart
+         switch indication LEDs via the I2C bus.
+
+         To compile this driver as a module, choose M here: the module will
+         be called leds-max5970.
+
 config LEDS_MC13783
        tristate "LED Support for MC13XXX PMIC"
        depends on LEDS_CLASS
index d7348e8bc019acf21a713507410d050fcb5f9251..ce07dc295ff000082c7ca187d6594ce8175c0f6d 100644 (file)
@@ -56,6 +56,7 @@ obj-$(CONFIG_LEDS_LP8501)             += leds-lp8501.o
 obj-$(CONFIG_LEDS_LP8788)              += leds-lp8788.o
 obj-$(CONFIG_LEDS_LP8860)              += leds-lp8860.o
 obj-$(CONFIG_LEDS_LT3593)              += leds-lt3593.o
+obj-$(CONFIG_LEDS_MAX5970)             += leds-max5970.o
 obj-$(CONFIG_LEDS_MAX77650)            += leds-max77650.o
 obj-$(CONFIG_LEDS_MAX8997)             += leds-max8997.o
 obj-$(CONFIG_LEDS_MC13783)             += leds-mc13783.o
@@ -78,6 +79,7 @@ obj-$(CONFIG_LEDS_POWERNV)            += leds-powernv.o
 obj-$(CONFIG_LEDS_PWM)                 += leds-pwm.o
 obj-$(CONFIG_LEDS_REGULATOR)           += leds-regulator.o
 obj-$(CONFIG_LEDS_SC27XX_BLTC)         += leds-sc27xx-bltc.o
+obj-$(CONFIG_LEDS_SUN50I_A100)         += leds-sun50i-a100.o
 obj-$(CONFIG_LEDS_SUNFIRE)             += leds-sunfire.o
 obj-$(CONFIG_LEDS_SYSCON)              += leds-syscon.o
 obj-$(CONFIG_LEDS_TCA6507)             += leds-tca6507.o
index 6a5e1f41f9a4527e7373de97cf63ee5079d946c1..bd59a14a4a90c2394268418e9347c50e1aff4154 100644 (file)
@@ -269,19 +269,6 @@ void led_trigger_set_default(struct led_classdev *led_cdev)
 }
 EXPORT_SYMBOL_GPL(led_trigger_set_default);
 
-void led_trigger_rename_static(const char *name, struct led_trigger *trig)
-{
-       /* new name must be on a temporary string to prevent races */
-       BUG_ON(name == trig->name);
-
-       down_write(&triggers_list_lock);
-       /* this assumes that trig->name was originaly allocated to
-        * non constant storage */
-       strcpy((char *)trig->name, name);
-       up_write(&triggers_list_lock);
-}
-EXPORT_SYMBOL_GPL(led_trigger_rename_static);
-
 /* LED Trigger Interface */
 
 int led_trigger_register(struct led_trigger *trig)
index 14ca236ce29e59fb65c4fedb026ec3df5405a5cf..f584a7f98fc5b8811fc449e8709449ec56e16cb7 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Awinic AW20036/AW20054/AW20072 LED driver
+ * Awinic AW20036/AW20054/AW20072/AW20108 LED driver
  *
  * Copyright (c) 2023, SberDevices. All Rights Reserved.
  *
@@ -10,6 +10,7 @@
 #include <linux/bitfield.h>
 #include <linux/bits.h>
 #include <linux/container_of.h>
+#include <linux/gpio/consumer.h>
 #include <linux/i2c.h>
 #include <linux/leds.h>
 #include <linux/mod_devicetable.h>
 #define AW200XX_LED2REG(x, columns) \
        ((x) + (((x) / (columns)) * (AW200XX_DSIZE_COLUMNS_MAX - (columns))))
 
+/* DIM current configuration register on page 1 */
+#define AW200XX_REG_DIM_PAGE1(x, columns) \
+       AW200XX_REG(AW200XX_PAGE1, AW200XX_LED2REG(x, columns))
+
 /*
  * DIM current configuration register (page 4).
  * The even address for current DIM configuration.
@@ -82,6 +87,8 @@
 #define AW200XX_REG_DIM(x, columns) \
        AW200XX_REG(AW200XX_PAGE4, AW200XX_LED2REG(x, columns) * 2)
 #define AW200XX_REG_DIM2FADE(x) ((x) + 1)
+#define AW200XX_REG_FADE2DIM(fade) \
+       DIV_ROUND_UP((fade) * AW200XX_DIM_MAX, AW200XX_FADE_MAX)
 
 /*
  * Duty ratio of display scan (see p.15 of datasheet for formula):
@@ -112,6 +119,7 @@ struct aw200xx {
        struct mutex mutex;
        u32 num_leds;
        u32 display_rows;
+       struct gpio_desc *hwen;
        struct aw200xx_led leds[] __counted_by(num_leds);
 };
 
@@ -153,7 +161,8 @@ static ssize_t dim_store(struct device *dev, struct device_attribute *devattr,
 
        if (dim >= 0) {
                ret = regmap_write(chip->regmap,
-                                  AW200XX_REG_DIM(led->num, columns), dim);
+                                  AW200XX_REG_DIM_PAGE1(led->num, columns),
+                                  dim);
                if (ret)
                        goto out_unlock;
        }
@@ -188,9 +197,7 @@ static int aw200xx_brightness_set(struct led_classdev *cdev,
 
        dim = led->dim;
        if (dim < 0)
-               dim = max_t(int,
-                           brightness / (AW200XX_FADE_MAX / AW200XX_DIM_MAX),
-                           1);
+               dim = AW200XX_REG_FADE2DIM(brightness);
 
        ret = regmap_write(chip->regmap, reg, dim);
        if (ret)
@@ -314,6 +321,9 @@ static int aw200xx_chip_reset(const struct aw200xx *const chip)
        if (ret)
                return ret;
 
+       /* According to the datasheet software reset takes at least 1ms */
+       fsleep(1000);
+
        regcache_mark_dirty(chip->regmap);
        return regmap_write(chip->regmap, AW200XX_REG_FCD, AW200XX_FCD_CLEAR);
 }
@@ -353,6 +363,50 @@ static int aw200xx_chip_check(const struct aw200xx *const chip)
        return 0;
 }
 
+static void aw200xx_enable(const struct aw200xx *const chip)
+{
+       gpiod_set_value_cansleep(chip->hwen, 1);
+
+       /*
+        * After HWEN pin set high the chip begins to load the OTP information,
+        * which takes 200us to complete. About 200us wait time is needed for
+        * internal oscillator startup and display SRAM initialization. After
+        * display SRAM initialization, the registers in page1 to page5 can be
+        * configured via i2c interface.
+        */
+       fsleep(400);
+}
+
+static void aw200xx_disable(const struct aw200xx *const chip)
+{
+       return gpiod_set_value_cansleep(chip->hwen, 0);
+}
+
+static int aw200xx_probe_get_display_rows(struct device *dev,
+                                         struct aw200xx *chip)
+{
+       struct fwnode_handle *child;
+       u32 max_source = 0;
+
+       device_for_each_child_node(dev, child) {
+               u32 source;
+               int ret;
+
+               ret = fwnode_property_read_u32(child, "reg", &source);
+               if (ret || source >= chip->cdef->channels)
+                       continue;
+
+               max_source = max(max_source, source);
+       }
+
+       if (max_source == 0)
+               return -EINVAL;
+
+       chip->display_rows = max_source / chip->cdef->display_size_columns + 1;
+
+       return 0;
+}
+
 static int aw200xx_probe_fw(struct device *dev, struct aw200xx *chip)
 {
        struct fwnode_handle *child;
@@ -360,18 +414,10 @@ static int aw200xx_probe_fw(struct device *dev, struct aw200xx *chip)
        int ret;
        int i;
 
-       ret = device_property_read_u32(dev, "awinic,display-rows",
-                                      &chip->display_rows);
+       ret = aw200xx_probe_get_display_rows(dev, chip);
        if (ret)
                return dev_err_probe(dev, ret,
-                                    "Failed to read 'display-rows' property\n");
-
-       if (!chip->display_rows ||
-           chip->display_rows > chip->cdef->display_size_rows_max) {
-               return dev_err_probe(dev, -EINVAL,
-                                    "Invalid leds display size %u\n",
-                                    chip->display_rows);
-       }
+                                    "No valid led definitions found\n");
 
        current_max = aw200xx_imax_from_global(chip, AW200XX_IMAX_MAX_uA);
        current_min = aw200xx_imax_from_global(chip, AW200XX_IMAX_MIN_uA);
@@ -416,6 +462,7 @@ static int aw200xx_probe_fw(struct device *dev, struct aw200xx *chip)
                led->num = source;
                led->chip = chip;
                led->cdev.brightness_set_blocking = aw200xx_brightness_set;
+               led->cdev.max_brightness = AW200XX_FADE_MAX;
                led->cdev.groups = dim_groups;
                init_data.fwnode = child;
 
@@ -480,6 +527,7 @@ static const struct regmap_config aw200xx_regmap_config = {
        .rd_table = &aw200xx_readable_table,
        .wr_table = &aw200xx_writeable_table,
        .cache_type = REGCACHE_MAPLE,
+       .disable_locking = true,
 };
 
 static int aw200xx_probe(struct i2c_client *client)
@@ -512,6 +560,14 @@ static int aw200xx_probe(struct i2c_client *client)
        if (IS_ERR(chip->regmap))
                return PTR_ERR(chip->regmap);
 
+       chip->hwen = devm_gpiod_get_optional(&client->dev, "enable",
+                                            GPIOD_OUT_HIGH);
+       if (IS_ERR(chip->hwen))
+               return dev_err_probe(&client->dev, PTR_ERR(chip->hwen),
+                                    "Cannot get enable GPIO");
+
+       aw200xx_enable(chip);
+
        ret = aw200xx_chip_check(chip);
        if (ret)
                return ret;
@@ -532,6 +588,9 @@ static int aw200xx_probe(struct i2c_client *client)
        ret = aw200xx_chip_init(chip);
 
 out_unlock:
+       if (ret)
+               aw200xx_disable(chip);
+
        mutex_unlock(&chip->mutex);
        return ret;
 }
@@ -541,6 +600,7 @@ static void aw200xx_remove(struct i2c_client *client)
        struct aw200xx *chip = i2c_get_clientdata(client);
 
        aw200xx_chip_reset(chip);
+       aw200xx_disable(chip);
        mutex_destroy(&chip->mutex);
 }
 
@@ -562,10 +622,17 @@ static const struct aw200xx_chipdef aw20072_cdef = {
        .display_size_columns = 12,
 };
 
+static const struct aw200xx_chipdef aw20108_cdef = {
+       .channels = 108,
+       .display_size_rows_max = 9,
+       .display_size_columns = 12,
+};
+
 static const struct i2c_device_id aw200xx_id[] = {
        { "aw20036" },
        { "aw20054" },
        { "aw20072" },
+       { "aw20108" },
        {}
 };
 MODULE_DEVICE_TABLE(i2c, aw200xx_id);
@@ -574,6 +641,7 @@ static const struct of_device_id aw200xx_match_table[] = {
        { .compatible = "awinic,aw20036", .data = &aw20036_cdef, },
        { .compatible = "awinic,aw20054", .data = &aw20054_cdef, },
        { .compatible = "awinic,aw20072", .data = &aw20072_cdef, },
+       { .compatible = "awinic,aw20108", .data = &aw20108_cdef, },
        {}
 };
 MODULE_DEVICE_TABLE(of, aw200xx_match_table);
index 710c319ad312bf6da20d3d263c207269e4d00521..83fcd7b6afff767583bb39670abe5f2e058504f6 100644 (file)
@@ -172,6 +172,8 @@ static struct gpio_leds_priv *gpio_leds_create(struct device *dev)
                led.gpiod = devm_fwnode_gpiod_get(dev, child, NULL, GPIOD_ASIS,
                                                  NULL);
                if (IS_ERR(led.gpiod)) {
+                       dev_err_probe(dev, PTR_ERR(led.gpiod), "Failed to get GPIO '%pfw'\n",
+                                     child);
                        fwnode_handle_put(child);
                        return ERR_CAST(led.gpiod);
                }
diff --git a/drivers/leds/leds-max5970.c b/drivers/leds/leds-max5970.c
new file mode 100644 (file)
index 0000000..56a5843
--- /dev/null
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device driver for leds in MAX5970 and MAX5978 IC
+ *
+ * Copyright (c) 2022 9elements GmbH
+ *
+ * Author: Patrick Rudolph <patrick.rudolph@9elements.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/container_of.h>
+#include <linux/device.h>
+#include <linux/leds.h>
+#include <linux/mfd/max5970.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+
+#define ldev_to_maxled(c)       container_of(c, struct max5970_led, cdev)
+
+struct max5970_led {
+       struct device *dev;
+       struct regmap *regmap;
+       struct led_classdev cdev;
+       unsigned int index;
+};
+
+static int max5970_led_set_brightness(struct led_classdev *cdev,
+                                     enum led_brightness brightness)
+{
+       struct max5970_led *ddata = ldev_to_maxled(cdev);
+       int ret, val;
+
+       /* Set/clear corresponding bit for given led index */
+       val = !brightness ? BIT(ddata->index) : 0;
+
+       ret = regmap_update_bits(ddata->regmap, MAX5970_REG_LED_FLASH, BIT(ddata->index), val);
+       if (ret < 0)
+               dev_err(cdev->dev, "failed to set brightness %d", ret);
+
+       return ret;
+}
+
+static int max5970_led_probe(struct platform_device *pdev)
+{
+       struct fwnode_handle *led_node, *child;
+       struct device *dev = &pdev->dev;
+       struct regmap *regmap;
+       struct max5970_led *ddata;
+       int ret = -ENODEV;
+
+       regmap = dev_get_regmap(dev->parent, NULL);
+       if (!regmap)
+               return -ENODEV;
+
+       led_node = device_get_named_child_node(dev->parent, "leds");
+       if (!led_node)
+               return -ENODEV;
+
+       fwnode_for_each_available_child_node(led_node, child) {
+               u32 reg;
+
+               if (fwnode_property_read_u32(child, "reg", &reg))
+                       continue;
+
+               if (reg >= MAX5970_NUM_LEDS) {
+                       dev_err_probe(dev, -EINVAL, "invalid LED (%u >= %d)\n", reg, MAX5970_NUM_LEDS);
+                       continue;
+               }
+
+               ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
+               if (!ddata) {
+                       fwnode_handle_put(child);
+                       return -ENOMEM;
+               }
+
+               ddata->index = reg;
+               ddata->regmap = regmap;
+               ddata->dev = dev;
+
+               if (fwnode_property_read_string(child, "label", &ddata->cdev.name))
+                       ddata->cdev.name = fwnode_get_name(child);
+
+               ddata->cdev.max_brightness = 1;
+               ddata->cdev.brightness_set_blocking = max5970_led_set_brightness;
+               ddata->cdev.default_trigger = "none";
+
+               ret = devm_led_classdev_register(dev, &ddata->cdev);
+               if (ret < 0) {
+                       fwnode_handle_put(child);
+                       return dev_err_probe(dev, ret, "Failed to initialize LED %u\n", reg);
+               }
+       }
+
+       return ret;
+}
+
+static struct platform_driver max5970_led_driver = {
+       .driver = {
+               .name = "max5970-led",
+       },
+       .probe = max5970_led_probe,
+};
+module_platform_driver(max5970_led_driver);
+
+MODULE_AUTHOR("Patrick Rudolph <patrick.rudolph@9elements.com>");
+MODULE_AUTHOR("Naresh Solanki <Naresh.Solanki@9elements.com>");
+MODULE_DESCRIPTION("MAX5970_hot-swap controller LED driver");
+MODULE_LICENSE("GPL");
index 2b3bf1353b7077cbb999c82269cb5f395341ccb0..4e3936a39d0ed3262c3fa8fc000b1f8de38279d8 100644 (file)
@@ -54,7 +54,7 @@ static int led_pwm_set(struct led_classdev *led_cdev,
 
        led_dat->pwmstate.duty_cycle = duty;
        led_dat->pwmstate.enabled = true;
-       return pwm_apply_state(led_dat->pwm, &led_dat->pwmstate);
+       return pwm_apply_might_sleep(led_dat->pwm, &led_dat->pwmstate);
 }
 
 __attribute__((nonnull))
diff --git a/drivers/leds/leds-sun50i-a100.c b/drivers/leds/leds-sun50i-a100.c
new file mode 100644 (file)
index 0000000..62d21c3
--- /dev/null
@@ -0,0 +1,584 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021-2023 Samuel Holland <samuel@sholland.org>
+ *
+ * Partly based on drivers/leds/leds-turris-omnia.c, which is:
+ *     Copyright (c) 2020 by Marek Behún <kabel@kernel.org>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/led-class-multicolor.h>
+#include <linux/leds.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/property.h>
+#include <linux/reset.h>
+#include <linux/spinlock.h>
+
+#define LEDC_CTRL_REG                  0x0000
+#define LEDC_CTRL_REG_DATA_LENGTH              GENMASK(28, 16)
+#define LEDC_CTRL_REG_RGB_MODE                 GENMASK(8, 6)
+#define LEDC_CTRL_REG_LEDC_EN                  BIT(0)
+#define LEDC_T01_TIMING_CTRL_REG       0x0004
+#define LEDC_T01_TIMING_CTRL_REG_T1H           GENMASK(26, 21)
+#define LEDC_T01_TIMING_CTRL_REG_T1L           GENMASK(20, 16)
+#define LEDC_T01_TIMING_CTRL_REG_T0H           GENMASK(10, 6)
+#define LEDC_T01_TIMING_CTRL_REG_T0L           GENMASK(5, 0)
+#define LEDC_RESET_TIMING_CTRL_REG     0x000c
+#define LEDC_RESET_TIMING_CTRL_REG_TR          GENMASK(28, 16)
+#define LEDC_RESET_TIMING_CTRL_REG_LED_NUM     GENMASK(9, 0)
+#define LEDC_DATA_REG                  0x0014
+#define LEDC_DMA_CTRL_REG              0x0018
+#define LEDC_DMA_CTRL_REG_DMA_EN               BIT(5)
+#define LEDC_DMA_CTRL_REG_FIFO_TRIG_LEVEL      GENMASK(4, 0)
+#define LEDC_INT_CTRL_REG              0x001c
+#define LEDC_INT_CTRL_REG_GLOBAL_INT_EN                BIT(5)
+#define LEDC_INT_CTRL_REG_FIFO_CPUREQ_INT_EN   BIT(1)
+#define LEDC_INT_CTRL_REG_TRANS_FINISH_INT_EN  BIT(0)
+#define LEDC_INT_STS_REG               0x0020
+#define LEDC_INT_STS_REG_FIFO_WLW              GENMASK(15, 10)
+#define LEDC_INT_STS_REG_FIFO_CPUREQ_INT       BIT(1)
+#define LEDC_INT_STS_REG_TRANS_FINISH_INT      BIT(0)
+
+#define LEDC_FIFO_DEPTH                        32U
+#define LEDC_MAX_LEDS                  1024
+#define LEDC_CHANNELS_PER_LED          3 /* RGB */
+
+#define LEDS_TO_BYTES(n)               ((n) * sizeof(u32))
+
+struct sun50i_a100_ledc_led {
+       struct led_classdev_mc mc_cdev;
+       struct mc_subled subled_info[LEDC_CHANNELS_PER_LED];
+       u32 addr;
+};
+
+#define to_ledc_led(mc) container_of(mc, struct sun50i_a100_ledc_led, mc_cdev)
+
+struct sun50i_a100_ledc_timing {
+       u32 t0h_ns;
+       u32 t0l_ns;
+       u32 t1h_ns;
+       u32 t1l_ns;
+       u32 treset_ns;
+};
+
+struct sun50i_a100_ledc {
+       struct device *dev;
+       void __iomem *base;
+       struct clk *bus_clk;
+       struct clk *mod_clk;
+       struct reset_control *reset;
+
+       u32 *buffer;
+       struct dma_chan *dma_chan;
+       dma_addr_t dma_handle;
+       unsigned int pio_length;
+       unsigned int pio_offset;
+
+       spinlock_t lock;
+       unsigned int next_length;
+       bool xfer_active;
+
+       u32 format;
+       struct sun50i_a100_ledc_timing timing;
+
+       u32 max_addr;
+       u32 num_leds;
+       struct sun50i_a100_ledc_led leds[] __counted_by(num_leds);
+};
+
+static int sun50i_a100_ledc_dma_xfer(struct sun50i_a100_ledc *priv, unsigned int length)
+{
+       struct dma_async_tx_descriptor *desc;
+       dma_cookie_t cookie;
+
+       desc = dmaengine_prep_slave_single(priv->dma_chan, priv->dma_handle,
+                                          LEDS_TO_BYTES(length), DMA_MEM_TO_DEV, 0);
+       if (!desc)
+               return -ENOMEM;
+
+       cookie = dmaengine_submit(desc);
+       if (dma_submit_error(cookie))
+               return -EIO;
+
+       dma_async_issue_pending(priv->dma_chan);
+
+       return 0;
+}
+
+static void sun50i_a100_ledc_pio_xfer(struct sun50i_a100_ledc *priv, unsigned int fifo_used)
+{
+       unsigned int burst, length, offset;
+       u32 control;
+
+       length = priv->pio_length;
+       offset = priv->pio_offset;
+       burst  = min(length, LEDC_FIFO_DEPTH - fifo_used);
+
+       iowrite32_rep(priv->base + LEDC_DATA_REG, priv->buffer + offset, burst);
+
+       if (burst < length) {
+               priv->pio_length = length - burst;
+               priv->pio_offset = offset + burst;
+
+               if (!offset) {
+                       control = readl(priv->base + LEDC_INT_CTRL_REG);
+                       control |= LEDC_INT_CTRL_REG_FIFO_CPUREQ_INT_EN;
+                       writel(control, priv->base + LEDC_INT_CTRL_REG);
+               }
+       } else {
+               /* Disable the request IRQ once all data is written. */
+               control = readl(priv->base + LEDC_INT_CTRL_REG);
+               control &= ~LEDC_INT_CTRL_REG_FIFO_CPUREQ_INT_EN;
+               writel(control, priv->base + LEDC_INT_CTRL_REG);
+       }
+}
+
+static void sun50i_a100_ledc_start_xfer(struct sun50i_a100_ledc *priv, unsigned int length)
+{
+       bool use_dma = false;
+       u32 control;
+
+       if (priv->dma_chan && length > LEDC_FIFO_DEPTH) {
+               int ret;
+
+               ret = sun50i_a100_ledc_dma_xfer(priv, length);
+               if (ret)
+                       dev_warn(priv->dev, "Failed to set up DMA (%d), using PIO\n", ret);
+               else
+                       use_dma = true;
+       }
+
+       /* The DMA trigger level must be at least the burst length. */
+       control = FIELD_PREP(LEDC_DMA_CTRL_REG_DMA_EN, use_dma) |
+                 FIELD_PREP_CONST(LEDC_DMA_CTRL_REG_FIFO_TRIG_LEVEL, LEDC_FIFO_DEPTH / 2);
+       writel(control, priv->base + LEDC_DMA_CTRL_REG);
+
+       control = readl(priv->base + LEDC_CTRL_REG);
+       control &= ~LEDC_CTRL_REG_DATA_LENGTH;
+       control |= FIELD_PREP(LEDC_CTRL_REG_DATA_LENGTH, length) | LEDC_CTRL_REG_LEDC_EN;
+       writel(control, priv->base + LEDC_CTRL_REG);
+
+       if (!use_dma) {
+               /* The FIFO is empty when starting a new transfer. */
+               unsigned int fifo_used = 0;
+
+               priv->pio_length = length;
+               priv->pio_offset = 0;
+
+               sun50i_a100_ledc_pio_xfer(priv, fifo_used);
+       }
+}
+
+static irqreturn_t sun50i_a100_ledc_irq(int irq, void *data)
+{
+       struct sun50i_a100_ledc *priv = data;
+       u32 status;
+
+       status = readl(priv->base + LEDC_INT_STS_REG);
+
+       if (status & LEDC_INT_STS_REG_TRANS_FINISH_INT) {
+               unsigned int next_length;
+
+               spin_lock(&priv->lock);
+
+               /* If another transfer is queued, dequeue and start it. */
+               next_length = priv->next_length;
+               if (next_length)
+                       priv->next_length = 0;
+               else
+                       priv->xfer_active = false;
+
+               spin_unlock(&priv->lock);
+
+               if (next_length)
+                       sun50i_a100_ledc_start_xfer(priv, next_length);
+       } else if (status & LEDC_INT_STS_REG_FIFO_CPUREQ_INT) {
+               /* Continue the current transfer. */
+               sun50i_a100_ledc_pio_xfer(priv, FIELD_GET(LEDC_INT_STS_REG_FIFO_WLW, status));
+       }
+
+       /* Clear the W1C status bits. */
+       writel(status, priv->base + LEDC_INT_STS_REG);
+
+       return IRQ_HANDLED;
+}
+
+static void sun50i_a100_ledc_brightness_set(struct led_classdev *cdev,
+                                           enum led_brightness brightness)
+{
+       struct sun50i_a100_ledc *priv = dev_get_drvdata(cdev->dev->parent);
+       struct led_classdev_mc *mc_cdev = lcdev_to_mccdev(cdev);
+       struct sun50i_a100_ledc_led *led = to_ledc_led(mc_cdev);
+       unsigned int next_length;
+       unsigned long flags;
+       bool xfer_active;
+
+       led_mc_calc_color_components(mc_cdev, brightness);
+
+       priv->buffer[led->addr] = led->subled_info[0].brightness << 16 |
+                                 led->subled_info[1].brightness <<  8 |
+                                 led->subled_info[2].brightness;
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       /* Start, enqueue, or extend an enqueued transfer, as appropriate. */
+       next_length = max(priv->next_length, led->addr + 1);
+       xfer_active = priv->xfer_active;
+       if (xfer_active)
+               priv->next_length = next_length;
+       else
+               priv->xfer_active = true;
+
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       if (!xfer_active)
+               sun50i_a100_ledc_start_xfer(priv, next_length);
+}
+
+static const char *const sun50i_a100_ledc_formats[] = {
+       "rgb", "rbg", "grb", "gbr", "brg", "bgr",
+};
+
+static int sun50i_a100_ledc_parse_format(struct device *dev,
+                                        struct sun50i_a100_ledc *priv)
+{
+       const char *format = "grb";
+       u32 i;
+
+       device_property_read_string(dev, "allwinner,pixel-format", &format);
+
+       for (i = 0; i < ARRAY_SIZE(sun50i_a100_ledc_formats); i++) {
+               if (!strcmp(format, sun50i_a100_ledc_formats[i])) {
+                       priv->format = i;
+                       return 0;
+               }
+       }
+
+       return dev_err_probe(dev, -EINVAL, "Bad pixel format '%s'\n", format);
+}
+
+static void sun50i_a100_ledc_set_format(struct sun50i_a100_ledc *priv)
+{
+       u32 control;
+
+       control = readl(priv->base + LEDC_CTRL_REG);
+       control &= ~LEDC_CTRL_REG_RGB_MODE;
+       control |= FIELD_PREP(LEDC_CTRL_REG_RGB_MODE, priv->format);
+       writel(control, priv->base + LEDC_CTRL_REG);
+}
+
+static const struct sun50i_a100_ledc_timing sun50i_a100_ledc_default_timing = {
+       .t0h_ns = 336,
+       .t0l_ns = 840,
+       .t1h_ns = 882,
+       .t1l_ns = 294,
+       .treset_ns = 300000,
+};
+
+static int sun50i_a100_ledc_parse_timing(struct device *dev,
+                                        struct sun50i_a100_ledc *priv)
+{
+       struct sun50i_a100_ledc_timing *timing = &priv->timing;
+
+       *timing = sun50i_a100_ledc_default_timing;
+
+       device_property_read_u32(dev, "allwinner,t0h-ns", &timing->t0h_ns);
+       device_property_read_u32(dev, "allwinner,t0l-ns", &timing->t0l_ns);
+       device_property_read_u32(dev, "allwinner,t1h-ns", &timing->t1h_ns);
+       device_property_read_u32(dev, "allwinner,t1l-ns", &timing->t1l_ns);
+       device_property_read_u32(dev, "allwinner,treset-ns", &timing->treset_ns);
+
+       return 0;
+}
+
+static void sun50i_a100_ledc_set_timing(struct sun50i_a100_ledc *priv)
+{
+       const struct sun50i_a100_ledc_timing *timing = &priv->timing;
+       unsigned long mod_freq = clk_get_rate(priv->mod_clk);
+       u32 cycle_ns;
+       u32 control;
+
+       if (!mod_freq)
+               return;
+
+       cycle_ns = NSEC_PER_SEC / mod_freq;
+       control = FIELD_PREP(LEDC_T01_TIMING_CTRL_REG_T1H, timing->t1h_ns / cycle_ns) |
+                 FIELD_PREP(LEDC_T01_TIMING_CTRL_REG_T1L, timing->t1l_ns / cycle_ns) |
+                 FIELD_PREP(LEDC_T01_TIMING_CTRL_REG_T0H, timing->t0h_ns / cycle_ns) |
+                 FIELD_PREP(LEDC_T01_TIMING_CTRL_REG_T0L, timing->t0l_ns / cycle_ns);
+       writel(control, priv->base + LEDC_T01_TIMING_CTRL_REG);
+
+       control = FIELD_PREP(LEDC_RESET_TIMING_CTRL_REG_TR, timing->treset_ns / cycle_ns) |
+                 FIELD_PREP(LEDC_RESET_TIMING_CTRL_REG_LED_NUM, priv->max_addr);
+       writel(control, priv->base + LEDC_RESET_TIMING_CTRL_REG);
+}
+
+static int sun50i_a100_ledc_resume(struct device *dev)
+{
+       struct sun50i_a100_ledc *priv = dev_get_drvdata(dev);
+       int ret;
+
+       ret = reset_control_deassert(priv->reset);
+       if (ret)
+               return ret;
+
+       ret = clk_prepare_enable(priv->bus_clk);
+       if (ret)
+               goto err_assert_reset;
+
+       ret = clk_prepare_enable(priv->mod_clk);
+       if (ret)
+               goto err_disable_bus_clk;
+
+       sun50i_a100_ledc_set_format(priv);
+       sun50i_a100_ledc_set_timing(priv);
+
+       writel(LEDC_INT_CTRL_REG_GLOBAL_INT_EN | LEDC_INT_CTRL_REG_TRANS_FINISH_INT_EN,
+              priv->base + LEDC_INT_CTRL_REG);
+
+       return 0;
+
+err_disable_bus_clk:
+       clk_disable_unprepare(priv->bus_clk);
+err_assert_reset:
+       reset_control_assert(priv->reset);
+
+       return ret;
+}
+
+static int sun50i_a100_ledc_suspend(struct device *dev)
+{
+       struct sun50i_a100_ledc *priv = dev_get_drvdata(dev);
+
+       /* Wait for all transfers to complete. */
+       for (;;) {
+               unsigned long flags;
+               bool xfer_active;
+
+               spin_lock_irqsave(&priv->lock, flags);
+               xfer_active = priv->xfer_active;
+               spin_unlock_irqrestore(&priv->lock, flags);
+               if (!xfer_active)
+                       break;
+
+               msleep(1);
+       }
+
+       clk_disable_unprepare(priv->mod_clk);
+       clk_disable_unprepare(priv->bus_clk);
+       reset_control_assert(priv->reset);
+
+       return 0;
+}
+
+static void sun50i_a100_ledc_dma_cleanup(void *data)
+{
+       struct sun50i_a100_ledc *priv = data;
+
+       dma_release_channel(priv->dma_chan);
+}
+
+static int sun50i_a100_ledc_probe(struct platform_device *pdev)
+{
+       struct dma_slave_config dma_cfg = {};
+       struct led_init_data init_data = {};
+       struct sun50i_a100_ledc_led *led;
+       struct device *dev = &pdev->dev;
+       struct sun50i_a100_ledc *priv;
+       struct fwnode_handle *child;
+       struct resource *mem;
+       u32 max_addr = 0;
+       u32 num_leds = 0;
+       int irq, ret;
+
+       /*
+        * The maximum LED address must be known in sun50i_a100_ledc_resume() before
+        * class device registration, so parse and validate the subnodes up front.
+        */
+       device_for_each_child_node(dev, child) {
+               u32 addr, color;
+
+               ret = fwnode_property_read_u32(child, "reg", &addr);
+               if (ret || addr >= LEDC_MAX_LEDS) {
+                       fwnode_handle_put(child);
+                       return dev_err_probe(dev, -EINVAL, "'reg' must be between 0 and %d\n",
+                                            LEDC_MAX_LEDS - 1);
+               }
+
+               ret = fwnode_property_read_u32(child, "color", &color);
+               if (ret || color != LED_COLOR_ID_RGB) {
+                       fwnode_handle_put(child);
+                       return dev_err_probe(dev, -EINVAL, "'color' must be LED_COLOR_ID_RGB\n");
+               }
+
+               max_addr = max(max_addr, addr);
+               num_leds++;
+       }
+
+       if (!num_leds)
+               return -ENODEV;
+
+       priv = devm_kzalloc(dev, struct_size(priv, leds, num_leds), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       priv->dev = dev;
+       priv->max_addr = max_addr;
+       priv->num_leds = num_leds;
+       spin_lock_init(&priv->lock);
+       dev_set_drvdata(dev, priv);
+
+       ret = sun50i_a100_ledc_parse_format(dev, priv);
+       if (ret)
+               return ret;
+
+       ret = sun50i_a100_ledc_parse_timing(dev, priv);
+       if (ret)
+               return ret;
+
+       priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
+       if (IS_ERR(priv->base))
+               return PTR_ERR(priv->base);
+
+       priv->bus_clk = devm_clk_get(dev, "bus");
+       if (IS_ERR(priv->bus_clk))
+               return PTR_ERR(priv->bus_clk);
+
+       priv->mod_clk = devm_clk_get(dev, "mod");
+       if (IS_ERR(priv->mod_clk))
+               return PTR_ERR(priv->mod_clk);
+
+       priv->reset = devm_reset_control_get_exclusive(dev, NULL);
+       if (IS_ERR(priv->reset))
+               return PTR_ERR(priv->reset);
+
+       priv->dma_chan = dma_request_chan(dev, "tx");
+       if (IS_ERR(priv->dma_chan)) {
+               if (PTR_ERR(priv->dma_chan) != -ENODEV)
+                       return PTR_ERR(priv->dma_chan);
+
+               priv->dma_chan = NULL;
+
+               priv->buffer = devm_kzalloc(dev, LEDS_TO_BYTES(LEDC_MAX_LEDS), GFP_KERNEL);
+               if (!priv->buffer)
+                       return -ENOMEM;
+       } else {
+               ret = devm_add_action_or_reset(dev, sun50i_a100_ledc_dma_cleanup, priv);
+               if (ret)
+                       return ret;
+
+               dma_cfg.dst_addr        = mem->start + LEDC_DATA_REG;
+               dma_cfg.dst_addr_width  = DMA_SLAVE_BUSWIDTH_4_BYTES;
+               dma_cfg.dst_maxburst    = LEDC_FIFO_DEPTH / 2;
+
+               ret = dmaengine_slave_config(priv->dma_chan, &dma_cfg);
+               if (ret)
+                       return ret;
+
+               priv->buffer = dmam_alloc_attrs(dmaengine_get_dma_device(priv->dma_chan),
+                                               LEDS_TO_BYTES(LEDC_MAX_LEDS), &priv->dma_handle,
+                                               GFP_KERNEL, DMA_ATTR_WRITE_COMBINE);
+               if (!priv->buffer)
+                       return -ENOMEM;
+       }
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0)
+               return irq;
+
+       ret = devm_request_irq(dev, irq, sun50i_a100_ledc_irq, 0, dev_name(dev), priv);
+       if (ret)
+               return ret;
+
+       ret = sun50i_a100_ledc_resume(dev);
+       if (ret)
+               return ret;
+
+       led = priv->leds;
+       device_for_each_child_node(dev, child) {
+               struct led_classdev *cdev;
+
+               /* The node was already validated above. */
+               fwnode_property_read_u32(child, "reg", &led->addr);
+
+               led->subled_info[0].color_index = LED_COLOR_ID_RED;
+               led->subled_info[0].channel = 0;
+               led->subled_info[1].color_index = LED_COLOR_ID_GREEN;
+               led->subled_info[1].channel = 1;
+               led->subled_info[2].color_index = LED_COLOR_ID_BLUE;
+               led->subled_info[2].channel = 2;
+
+               led->mc_cdev.num_colors = ARRAY_SIZE(led->subled_info);
+               led->mc_cdev.subled_info = led->subled_info;
+
+               cdev = &led->mc_cdev.led_cdev;
+               cdev->max_brightness = U8_MAX;
+               cdev->brightness_set = sun50i_a100_ledc_brightness_set;
+
+               init_data.fwnode = child;
+
+               ret = led_classdev_multicolor_register_ext(dev, &led->mc_cdev, &init_data);
+               if (ret) {
+                       dev_err_probe(dev, ret, "Failed to register multicolor LED %u", led->addr);
+                       goto err_put_child;
+               }
+
+               led++;
+       }
+
+       dev_info(dev, "Registered %u LEDs\n", num_leds);
+
+       return 0;
+
+err_put_child:
+       fwnode_handle_put(child);
+       while (led-- > priv->leds)
+               led_classdev_multicolor_unregister(&led->mc_cdev);
+       sun50i_a100_ledc_suspend(&pdev->dev);
+
+       return ret;
+}
+
+static void sun50i_a100_ledc_remove(struct platform_device *pdev)
+{
+       struct sun50i_a100_ledc *priv = platform_get_drvdata(pdev);
+
+       for (u32 i = 0; i < priv->num_leds; i++)
+               led_classdev_multicolor_unregister(&priv->leds[i].mc_cdev);
+       sun50i_a100_ledc_suspend(&pdev->dev);
+}
+
+static const struct of_device_id sun50i_a100_ledc_of_match[] = {
+       { .compatible = "allwinner,sun50i-a100-ledc" },
+       {}
+};
+MODULE_DEVICE_TABLE(of, sun50i_a100_ledc_of_match);
+
+static DEFINE_SIMPLE_DEV_PM_OPS(sun50i_a100_ledc_pm,
+                               sun50i_a100_ledc_suspend,
+                               sun50i_a100_ledc_resume);
+
+static struct platform_driver sun50i_a100_ledc_driver = {
+       .probe          = sun50i_a100_ledc_probe,
+       .remove_new     = sun50i_a100_ledc_remove,
+       .shutdown       = sun50i_a100_ledc_remove,
+       .driver         = {
+               .name           = "sun50i-a100-ledc",
+               .of_match_table = sun50i_a100_ledc_of_match,
+               .pm             = pm_ptr(&sun50i_a100_ledc_pm),
+       },
+};
+module_platform_driver(sun50i_a100_ledc_driver);
+
+MODULE_AUTHOR("Samuel Holland <samuel@sholland.org>");
+MODULE_DESCRIPTION("Allwinner A100 LED controller driver");
+MODULE_LICENSE("GPL");
index 360a376fa738c4d305f48f99e1202b273934c264..d633ad519d0cdc52ad2ba4790cbcfde7055a2484 100644 (file)
@@ -81,7 +81,8 @@ static int syscon_led_probe(struct platform_device *pdev)
 
        sled->map = map;
 
-       if (of_property_read_u32(np, "offset", &sled->offset))
+       if (of_property_read_u32(np, "reg", &sled->offset) &&
+           of_property_read_u32(np, "offset", &sled->offset))
                return -EINVAL;
        if (of_property_read_u32(np, "mask", &sled->mask))
                return -EINVAL;
index e190746140959e21abbf3dd96d1dbf4bd3a4c552..4f22f42249467d7ce5a321a43affd490eb9598a9 100644 (file)
@@ -638,19 +638,13 @@ static int tca6507_probe_gpios(struct device *dev,
        tca->gpio.direction_output = tca6507_gpio_direction_output;
        tca->gpio.set = tca6507_gpio_set_value;
        tca->gpio.parent = dev;
-       err = gpiochip_add_data(&tca->gpio, tca);
+       err = devm_gpiochip_add_data(dev, &tca->gpio, tca);
        if (err) {
                tca->gpio.ngpio = 0;
                return err;
        }
        return 0;
 }
-
-static void tca6507_remove_gpio(struct tca6507_chip *tca)
-{
-       if (tca->gpio.ngpio)
-               gpiochip_remove(&tca->gpio);
-}
 #else /* CONFIG_GPIOLIB */
 static int tca6507_probe_gpios(struct device *dev,
                               struct tca6507_chip *tca,
@@ -658,9 +652,6 @@ static int tca6507_probe_gpios(struct device *dev,
 {
        return 0;
 }
-static void tca6507_remove_gpio(struct tca6507_chip *tca)
-{
-}
 #endif /* CONFIG_GPIOLIB */
 
 static struct tca6507_platform_data *
@@ -762,38 +753,25 @@ static int tca6507_probe(struct i2c_client *client)
                        l->led_cdev.brightness_set = tca6507_brightness_set;
                        l->led_cdev.blink_set = tca6507_blink_set;
                        l->bank = -1;
-                       err = led_classdev_register(dev, &l->led_cdev);
+                       err = devm_led_classdev_register(dev, &l->led_cdev);
                        if (err < 0)
-                               goto exit;
+                               return err;
                }
        }
        err = tca6507_probe_gpios(dev, tca, pdata);
        if (err)
-               goto exit;
+               return err;
        /* set all registers to known state - zero */
        tca->reg_set = 0x7f;
        schedule_work(&tca->work);
 
        return 0;
-exit:
-       while (i--) {
-               if (tca->leds[i].led_cdev.name)
-                       led_classdev_unregister(&tca->leds[i].led_cdev);
-       }
-       return err;
 }
 
 static void tca6507_remove(struct i2c_client *client)
 {
-       int i;
        struct tca6507_chip *tca = i2c_get_clientdata(client);
-       struct tca6507_led *tca_leds = tca->leds;
 
-       for (i = 0; i < NUM_LEDS; i++) {
-               if (tca_leds[i].led_cdev.name)
-                       led_classdev_unregister(&tca_leds[i].led_cdev);
-       }
-       tca6507_remove_gpio(tca);
        cancel_work_sync(&tca->work);
 }
 
index a6a21f5646734cc6a4c3360d3ed78da5e1075b1e..e66bd21b9852ec5d1df1ac14babf21eacb93792c 100644 (file)
@@ -4,7 +4,7 @@ if LEDS_CLASS_MULTICOLOR
 
 config LEDS_GROUP_MULTICOLOR
        tristate "LEDs group multi-color support"
-       depends on OF || COMPILE_TEST
+       depends on OF
        help
          This option enables support for monochrome LEDs that are grouped
          into multicolor LEDs which is useful in the case where LEDs of
index 46cd062b8b24c8de4d25c3223c122c6fdcd4b30c..e1a81e0109e8a57e357d121dd1cf74b18220082c 100644 (file)
@@ -51,8 +51,8 @@ static int led_pwm_mc_set(struct led_classdev *cdev,
 
                priv->leds[i].state.duty_cycle = duty;
                priv->leds[i].state.enabled = duty > 0;
-               ret = pwm_apply_state(priv->leds[i].pwm,
-                                     &priv->leds[i].state);
+               ret = pwm_apply_might_sleep(priv->leds[i].pwm,
+                                           &priv->leds[i].state);
                if (ret)
                        break;
        }
index 68d82a682bf67f93b50c8df96fc463fcf92437f0..156b73d1f4a29d459ccd1029c68f8797cfaa69bd 100644 (file)
@@ -552,9 +552,9 @@ static int lpg_parse_dtest(struct lpg *lpg)
                ret = count;
                goto err_malformed;
        } else if (count != lpg->data->num_channels * 2) {
-               dev_err(lpg->dev, "qcom,dtest needs to be %d items\n",
-                       lpg->data->num_channels * 2);
-               return -EINVAL;
+               return dev_err_probe(lpg->dev, -EINVAL,
+                                    "qcom,dtest needs to be %d items\n",
+                                    lpg->data->num_channels * 2);
        }
 
        for (i = 0; i < lpg->data->num_channels; i++) {
@@ -574,8 +574,7 @@ static int lpg_parse_dtest(struct lpg *lpg)
        return 0;
 
 err_malformed:
-       dev_err(lpg->dev, "malformed qcom,dtest\n");
-       return ret;
+       return dev_err_probe(lpg->dev, ret, "malformed qcom,dtest\n");
 }
 
 static void lpg_apply_dtest(struct lpg_channel *chan)
@@ -977,9 +976,14 @@ static int lpg_pattern_mc_clear(struct led_classdev *cdev)
        return lpg_pattern_clear(led);
 }
 
+static inline struct lpg *lpg_pwm_from_chip(struct pwm_chip *chip)
+{
+       return container_of(chip, struct lpg, pwm);
+}
+
 static int lpg_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
 {
-       struct lpg *lpg = container_of(chip, struct lpg, pwm);
+       struct lpg *lpg = lpg_pwm_from_chip(chip);
        struct lpg_channel *chan = &lpg->channels[pwm->hwpwm];
 
        return chan->in_use ? -EBUSY : 0;
@@ -995,7 +999,7 @@ static int lpg_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
 static int lpg_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                         const struct pwm_state *state)
 {
-       struct lpg *lpg = container_of(chip, struct lpg, pwm);
+       struct lpg *lpg = lpg_pwm_from_chip(chip);
        struct lpg_channel *chan = &lpg->channels[pwm->hwpwm];
        int ret = 0;
 
@@ -1026,7 +1030,7 @@ out_unlock:
 static int lpg_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
                             struct pwm_state *state)
 {
-       struct lpg *lpg = container_of(chip, struct lpg, pwm);
+       struct lpg *lpg = lpg_pwm_from_chip(chip);
        struct lpg_channel *chan = &lpg->channels[pwm->hwpwm];
        unsigned int resolution;
        unsigned int pre_div;
@@ -1095,9 +1099,9 @@ static int lpg_add_pwm(struct lpg *lpg)
        lpg->pwm.npwm = lpg->num_channels;
        lpg->pwm.ops = &lpg_pwm_ops;
 
-       ret = pwmchip_add(&lpg->pwm);
+       ret = devm_pwmchip_add(lpg->dev, &lpg->pwm);
        if (ret)
-               dev_err(lpg->dev, "failed to add PWM chip: ret %d\n", ret);
+               dev_err_probe(lpg->dev, ret, "failed to add PWM chip\n");
 
        return ret;
 }
@@ -1111,19 +1115,16 @@ static int lpg_parse_channel(struct lpg *lpg, struct device_node *np,
        int ret;
 
        ret = of_property_read_u32(np, "reg", &reg);
-       if (ret || !reg || reg > lpg->num_channels) {
-               dev_err(lpg->dev, "invalid \"reg\" of %pOFn\n", np);
-               return -EINVAL;
-       }
+       if (ret || !reg || reg > lpg->num_channels)
+               return dev_err_probe(lpg->dev, -EINVAL, "invalid \"reg\" of %pOFn\n", np);
 
        chan = &lpg->channels[reg - 1];
        chan->in_use = true;
 
        ret = of_property_read_u32(np, "color", &color);
-       if (ret < 0 && ret != -EINVAL) {
-               dev_err(lpg->dev, "failed to parse \"color\" of %pOF\n", np);
-               return ret;
-       }
+       if (ret < 0 && ret != -EINVAL)
+               return dev_err_probe(lpg->dev, ret,
+                                    "failed to parse \"color\" of %pOF\n", np);
 
        chan->color = color;
 
@@ -1146,10 +1147,9 @@ static int lpg_add_led(struct lpg *lpg, struct device_node *np)
        int i;
 
        ret = of_property_read_u32(np, "color", &color);
-       if (ret < 0 && ret != -EINVAL) {
-               dev_err(lpg->dev, "failed to parse \"color\" of %pOF\n", np);
-               return ret;
-       }
+       if (ret < 0 && ret != -EINVAL)
+               return dev_err_probe(lpg->dev, ret,
+                             "failed to parse \"color\" of %pOF\n", np);
 
        if (color == LED_COLOR_ID_RGB)
                num_channels = of_get_available_child_count(np);
@@ -1226,7 +1226,7 @@ static int lpg_add_led(struct lpg *lpg, struct device_node *np)
        else
                ret = devm_led_classdev_register_ext(lpg->dev, &led->cdev, &init_data);
        if (ret)
-               dev_err(lpg->dev, "unable to register %s\n", cdev->name);
+               dev_err_probe(lpg->dev, ret, "unable to register %s\n", cdev->name);
 
        return ret;
 }
@@ -1272,10 +1272,9 @@ static int lpg_init_triled(struct lpg *lpg)
 
        if (lpg->triled_has_src_sel) {
                ret = of_property_read_u32(np, "qcom,power-source", &lpg->triled_src);
-               if (ret || lpg->triled_src == 2 || lpg->triled_src > 3) {
-                       dev_err(lpg->dev, "invalid power source\n");
-                       return -EINVAL;
-               }
+               if (ret || lpg->triled_src == 2 || lpg->triled_src > 3)
+                       return dev_err_probe(lpg->dev, -EINVAL,
+                                            "invalid power source\n");
        }
 
        /* Disable automatic trickle charge LED */
@@ -1324,8 +1323,6 @@ static int lpg_probe(struct platform_device *pdev)
        if (!lpg->data)
                return -EINVAL;
 
-       platform_set_drvdata(pdev, lpg);
-
        lpg->dev = &pdev->dev;
        mutex_init(&lpg->lock);
 
@@ -1363,13 +1360,6 @@ static int lpg_probe(struct platform_device *pdev)
        return lpg_add_pwm(lpg);
 }
 
-static void lpg_remove(struct platform_device *pdev)
-{
-       struct lpg *lpg = platform_get_drvdata(pdev);
-
-       pwmchip_remove(&lpg->pwm);
-}
-
 static const struct lpg_data pm8916_pwm_data = {
        .num_channels = 1,
        .channels = (const struct lpg_channel_data[]) {
@@ -1529,7 +1519,6 @@ MODULE_DEVICE_TABLE(of, lpg_of_table);
 
 static struct platform_driver lpg_driver = {
        .probe = lpg_probe,
-       .remove_new = lpg_remove,
        .driver = {
                .name = "qcom-spmi-lpg",
                .of_match_table = lpg_of_table,
index 9b7fe5dd52088f1456bd3657614f589ab3c4ceb9..7f6a2352b0acdd911ef18402575b80b163a2084a 100644 (file)
@@ -41,33 +41,30 @@ static irqreturn_t gpio_trig_irq(int irq, void *_led)
        return IRQ_HANDLED;
 }
 
-static ssize_t gpio_trig_brightness_show(struct device *dev,
+static ssize_t desired_brightness_show(struct device *dev,
                struct device_attribute *attr, char *buf)
 {
        struct gpio_trig_data *gpio_data = led_trigger_get_drvdata(dev);
 
-       return sprintf(buf, "%u\n", gpio_data->desired_brightness);
+       return sysfs_emit(buf, "%u\n", gpio_data->desired_brightness);
 }
 
-static ssize_t gpio_trig_brightness_store(struct device *dev,
+static ssize_t desired_brightness_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t n)
 {
        struct gpio_trig_data *gpio_data = led_trigger_get_drvdata(dev);
-       unsigned desired_brightness;
+       u8 desired_brightness;
        int ret;
 
-       ret = sscanf(buf, "%u", &desired_brightness);
-       if (ret < 1 || desired_brightness > 255) {
-               dev_err(dev, "invalid value\n");
-               return -EINVAL;
-       }
+       ret = kstrtou8(buf, 10, &desired_brightness);
+       if (ret)
+               return ret;
 
        gpio_data->desired_brightness = desired_brightness;
 
        return n;
 }
-static DEVICE_ATTR(desired_brightness, 0644, gpio_trig_brightness_show,
-               gpio_trig_brightness_store);
+static DEVICE_ATTR_RW(desired_brightness);
 
 static struct attribute *gpio_trig_attrs[] = {
        &dev_attr_desired_brightness.attr,
@@ -89,10 +86,7 @@ static int gpio_trig_activate(struct led_classdev *led)
         * The generic property "trigger-sources" is followed,
         * and we hope that this is a GPIO.
         */
-       gpio_data->gpiod = fwnode_gpiod_get_index(dev->fwnode,
-                                                 "trigger-sources",
-                                                 0, GPIOD_IN,
-                                                 "led-trigger");
+       gpio_data->gpiod = gpiod_get_optional(dev, "trigger-sources", GPIOD_IN);
        if (IS_ERR(gpio_data->gpiod)) {
                ret = PTR_ERR(gpio_data->gpiod);
                kfree(gpio_data);
@@ -104,6 +98,8 @@ static int gpio_trig_activate(struct led_classdev *led)
                return -EINVAL;
        }
 
+       gpiod_set_consumer_name(gpio_data->gpiod, "led-trigger");
+
        gpio_data->led = led;
        led_set_trigger_data(led, gpio_data);
 
index d76214fa9ad8645441e9c53814719ba0109f30ac..8e5475819590e056142dae6ac1dd8c0dbe186d84 100644 (file)
  * tx -  LED blinks on transmitted data
  * rx -  LED blinks on receive data
  *
+ * Note: If the user selects a mode that is not supported by hw, default
+ * behavior is to fall back to software control of the LED. However not every
+ * hw supports software control. LED callbacks brightness_set() and
+ * brightness_set_blocking() are NULL in this case. hw_control_is_supported()
+ * should use available means supported by hw to inform the user that selected
+ * mode isn't supported by hw. This could be switching off the LED or any
+ * hw blink mode. If software control fallback isn't possible, we return
+ * -EOPNOTSUPP to the user, but still store the selected mode. This is needed
+ * in case an intermediate unsupported mode is necessary to switch from one
+ * supported mode to another.
  */
 
 struct led_netdev_data {
@@ -99,6 +109,18 @@ static void set_baseline_state(struct led_netdev_data *trigger_data)
                    trigger_data->link_speed == SPEED_1000)
                        blink_on = true;
 
+               if (test_bit(TRIGGER_NETDEV_LINK_2500, &trigger_data->mode) &&
+                   trigger_data->link_speed == SPEED_2500)
+                       blink_on = true;
+
+               if (test_bit(TRIGGER_NETDEV_LINK_5000, &trigger_data->mode) &&
+                   trigger_data->link_speed == SPEED_5000)
+                       blink_on = true;
+
+               if (test_bit(TRIGGER_NETDEV_LINK_10000, &trigger_data->mode) &&
+                   trigger_data->link_speed == SPEED_10000)
+                       blink_on = true;
+
                if (test_bit(TRIGGER_NETDEV_HALF_DUPLEX, &trigger_data->mode) &&
                    trigger_data->duplex == DUPLEX_HALF)
                        blink_on = true;
@@ -289,6 +311,9 @@ static ssize_t netdev_led_attr_show(struct device *dev, char *buf,
        case TRIGGER_NETDEV_LINK_10:
        case TRIGGER_NETDEV_LINK_100:
        case TRIGGER_NETDEV_LINK_1000:
+       case TRIGGER_NETDEV_LINK_2500:
+       case TRIGGER_NETDEV_LINK_5000:
+       case TRIGGER_NETDEV_LINK_10000:
        case TRIGGER_NETDEV_HALF_DUPLEX:
        case TRIGGER_NETDEV_FULL_DUPLEX:
        case TRIGGER_NETDEV_TX:
@@ -306,6 +331,7 @@ static ssize_t netdev_led_attr_store(struct device *dev, const char *buf,
                                     size_t size, enum led_trigger_netdev_modes attr)
 {
        struct led_netdev_data *trigger_data = led_trigger_get_drvdata(dev);
+       struct led_classdev *led_cdev = trigger_data->led_cdev;
        unsigned long state, mode = trigger_data->mode;
        int ret;
        int bit;
@@ -319,6 +345,9 @@ static ssize_t netdev_led_attr_store(struct device *dev, const char *buf,
        case TRIGGER_NETDEV_LINK_10:
        case TRIGGER_NETDEV_LINK_100:
        case TRIGGER_NETDEV_LINK_1000:
+       case TRIGGER_NETDEV_LINK_2500:
+       case TRIGGER_NETDEV_LINK_5000:
+       case TRIGGER_NETDEV_LINK_10000:
        case TRIGGER_NETDEV_HALF_DUPLEX:
        case TRIGGER_NETDEV_FULL_DUPLEX:
        case TRIGGER_NETDEV_TX:
@@ -337,7 +366,10 @@ static ssize_t netdev_led_attr_store(struct device *dev, const char *buf,
        if (test_bit(TRIGGER_NETDEV_LINK, &mode) &&
            (test_bit(TRIGGER_NETDEV_LINK_10, &mode) ||
             test_bit(TRIGGER_NETDEV_LINK_100, &mode) ||
-            test_bit(TRIGGER_NETDEV_LINK_1000, &mode)))
+            test_bit(TRIGGER_NETDEV_LINK_1000, &mode) ||
+            test_bit(TRIGGER_NETDEV_LINK_2500, &mode) ||
+            test_bit(TRIGGER_NETDEV_LINK_5000, &mode) ||
+            test_bit(TRIGGER_NETDEV_LINK_10000, &mode)))
                return -EINVAL;
 
        cancel_delayed_work_sync(&trigger_data->work);
@@ -345,6 +377,10 @@ static ssize_t netdev_led_attr_store(struct device *dev, const char *buf,
        trigger_data->mode = mode;
        trigger_data->hw_control = can_hw_control(trigger_data);
 
+       if (!led_cdev->brightness_set && !led_cdev->brightness_set_blocking &&
+           !trigger_data->hw_control)
+               return -EOPNOTSUPP;
+
        set_baseline_state(trigger_data);
 
        return size;
@@ -367,6 +403,9 @@ DEFINE_NETDEV_TRIGGER(link, TRIGGER_NETDEV_LINK);
 DEFINE_NETDEV_TRIGGER(link_10, TRIGGER_NETDEV_LINK_10);
 DEFINE_NETDEV_TRIGGER(link_100, TRIGGER_NETDEV_LINK_100);
 DEFINE_NETDEV_TRIGGER(link_1000, TRIGGER_NETDEV_LINK_1000);
+DEFINE_NETDEV_TRIGGER(link_2500, TRIGGER_NETDEV_LINK_2500);
+DEFINE_NETDEV_TRIGGER(link_5000, TRIGGER_NETDEV_LINK_5000);
+DEFINE_NETDEV_TRIGGER(link_10000, TRIGGER_NETDEV_LINK_10000);
 DEFINE_NETDEV_TRIGGER(half_duplex, TRIGGER_NETDEV_HALF_DUPLEX);
 DEFINE_NETDEV_TRIGGER(full_duplex, TRIGGER_NETDEV_FULL_DUPLEX);
 DEFINE_NETDEV_TRIGGER(tx, TRIGGER_NETDEV_TX);
@@ -425,6 +464,9 @@ static struct attribute *netdev_trig_attrs[] = {
        &dev_attr_link_10.attr,
        &dev_attr_link_100.attr,
        &dev_attr_link_1000.attr,
+       &dev_attr_link_2500.attr,
+       &dev_attr_link_5000.attr,
+       &dev_attr_link_10000.attr,
        &dev_attr_full_duplex.attr,
        &dev_attr_half_duplex.attr,
        &dev_attr_rx.attr,
@@ -522,6 +564,9 @@ static void netdev_trig_work(struct work_struct *work)
                         test_bit(TRIGGER_NETDEV_LINK_10, &trigger_data->mode) ||
                         test_bit(TRIGGER_NETDEV_LINK_100, &trigger_data->mode) ||
                         test_bit(TRIGGER_NETDEV_LINK_1000, &trigger_data->mode) ||
+                        test_bit(TRIGGER_NETDEV_LINK_2500, &trigger_data->mode) ||
+                        test_bit(TRIGGER_NETDEV_LINK_5000, &trigger_data->mode) ||
+                        test_bit(TRIGGER_NETDEV_LINK_10000, &trigger_data->mode) ||
                         test_bit(TRIGGER_NETDEV_HALF_DUPLEX, &trigger_data->mode) ||
                         test_bit(TRIGGER_NETDEV_FULL_DUPLEX, &trigger_data->mode);
                interval = jiffies_to_msecs(
index 64abf2e91608a5ecdbb80c8fd706f389ee62159c..5a6b21bfeb9af488959a54d7b3f71aff6b05dd71 100644 (file)
@@ -64,10 +64,13 @@ static long led_panic_blink(int state)
 
 static int __init ledtrig_panic_init(void)
 {
+       led_trigger_register_simple("panic", &trigger);
+       if (!trigger)
+               return -ENOMEM;
+
        atomic_notifier_chain_register(&panic_notifier_list,
                                       &led_trigger_panic_nb);
 
-       led_trigger_register_simple("panic", &trigger);
        panic_blink = led_panic_blink;
        return 0;
 }
index 8ae0d2d284aff758f7ac786d540e5ae322248161..8cf1485e816587d2bf5ece8741dbd5b0c79fa325 100644 (file)
@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 
+#include <linux/completion.h>
 #include <linux/delay.h>
 #include <linux/leds.h>
 #include <linux/module.h>
 struct ledtrig_tty_data {
        struct led_classdev *led_cdev;
        struct delayed_work dwork;
-       struct mutex mutex;
+       struct completion sysfs;
        const char *ttyname;
        struct tty_struct *tty;
        int rx, tx;
+       bool mode_rx;
+       bool mode_tx;
+       bool mode_cts;
+       bool mode_dsr;
+       bool mode_dcd;
+       bool mode_rng;
 };
 
-static void ledtrig_tty_restart(struct ledtrig_tty_data *trigger_data)
+/* Indicates which state the LED should now display */
+enum led_trigger_tty_state {
+       TTY_LED_BLINK,
+       TTY_LED_ENABLE,
+       TTY_LED_DISABLE,
+};
+
+enum led_trigger_tty_modes {
+       TRIGGER_TTY_RX = 0,
+       TRIGGER_TTY_TX,
+       TRIGGER_TTY_CTS,
+       TRIGGER_TTY_DSR,
+       TRIGGER_TTY_DCD,
+       TRIGGER_TTY_RNG,
+};
+
+static int ledtrig_tty_wait_for_completion(struct device *dev)
 {
-       schedule_delayed_work(&trigger_data->dwork, 0);
+       struct ledtrig_tty_data *trigger_data = led_trigger_get_drvdata(dev);
+       int ret;
+
+       ret = wait_for_completion_timeout(&trigger_data->sysfs,
+                                         msecs_to_jiffies(LEDTRIG_TTY_INTERVAL * 20));
+       if (ret == 0)
+               return -ETIMEDOUT;
+
+       return ret;
 }
 
 static ssize_t ttyname_show(struct device *dev,
@@ -28,14 +59,16 @@ static ssize_t ttyname_show(struct device *dev,
 {
        struct ledtrig_tty_data *trigger_data = led_trigger_get_drvdata(dev);
        ssize_t len = 0;
+       int completion;
 
-       mutex_lock(&trigger_data->mutex);
+       reinit_completion(&trigger_data->sysfs);
+       completion = ledtrig_tty_wait_for_completion(dev);
+       if (completion < 0)
+               return completion;
 
        if (trigger_data->ttyname)
                len = sprintf(buf, "%s\n", trigger_data->ttyname);
 
-       mutex_unlock(&trigger_data->mutex);
-
        return len;
 }
 
@@ -46,7 +79,7 @@ static ssize_t ttyname_store(struct device *dev,
        struct ledtrig_tty_data *trigger_data = led_trigger_get_drvdata(dev);
        char *ttyname;
        ssize_t ret = size;
-       bool running;
+       int completion;
 
        if (size > 0 && buf[size - 1] == '\n')
                size -= 1;
@@ -59,9 +92,10 @@ static ssize_t ttyname_store(struct device *dev,
                ttyname = NULL;
        }
 
-       mutex_lock(&trigger_data->mutex);
-
-       running = trigger_data->ttyname != NULL;
+       reinit_completion(&trigger_data->sysfs);
+       completion = ledtrig_tty_wait_for_completion(dev);
+       if (completion < 0)
+               return completion;
 
        kfree(trigger_data->ttyname);
        tty_kref_put(trigger_data->tty);
@@ -69,29 +103,107 @@ static ssize_t ttyname_store(struct device *dev,
 
        trigger_data->ttyname = ttyname;
 
-       mutex_unlock(&trigger_data->mutex);
-
-       if (ttyname && !running)
-               ledtrig_tty_restart(trigger_data);
-
        return ret;
 }
 static DEVICE_ATTR_RW(ttyname);
 
+static ssize_t ledtrig_tty_attr_show(struct device *dev, char *buf,
+                                    enum led_trigger_tty_modes attr)
+{
+       struct ledtrig_tty_data *trigger_data = led_trigger_get_drvdata(dev);
+       bool state;
+
+       switch (attr) {
+       case TRIGGER_TTY_RX:
+               state = trigger_data->mode_rx;
+               break;
+       case TRIGGER_TTY_TX:
+               state = trigger_data->mode_tx;
+               break;
+       case TRIGGER_TTY_CTS:
+               state = trigger_data->mode_cts;
+               break;
+       case TRIGGER_TTY_DSR:
+               state = trigger_data->mode_dsr;
+               break;
+       case TRIGGER_TTY_DCD:
+               state = trigger_data->mode_dcd;
+               break;
+       case TRIGGER_TTY_RNG:
+               state = trigger_data->mode_rng;
+               break;
+       }
+
+       return sysfs_emit(buf, "%u\n", state);
+}
+
+static ssize_t ledtrig_tty_attr_store(struct device *dev, const char *buf,
+                                     size_t size, enum led_trigger_tty_modes attr)
+{
+       struct ledtrig_tty_data *trigger_data = led_trigger_get_drvdata(dev);
+       bool state;
+       int ret;
+
+       ret = kstrtobool(buf, &state);
+       if (ret)
+               return ret;
+
+       switch (attr) {
+       case TRIGGER_TTY_RX:
+               trigger_data->mode_rx = state;
+               break;
+       case TRIGGER_TTY_TX:
+               trigger_data->mode_tx = state;
+               break;
+       case TRIGGER_TTY_CTS:
+               trigger_data->mode_cts = state;
+               break;
+       case TRIGGER_TTY_DSR:
+               trigger_data->mode_dsr = state;
+               break;
+       case TRIGGER_TTY_DCD:
+               trigger_data->mode_dcd = state;
+               break;
+       case TRIGGER_TTY_RNG:
+               trigger_data->mode_rng = state;
+               break;
+       }
+
+       return size;
+}
+
+#define DEFINE_TTY_TRIGGER(trigger_name, trigger) \
+       static ssize_t trigger_name##_show(struct device *dev, \
+               struct device_attribute *attr, char *buf) \
+       { \
+               return ledtrig_tty_attr_show(dev, buf, trigger); \
+       } \
+       static ssize_t trigger_name##_store(struct device *dev, \
+               struct device_attribute *attr, const char *buf, size_t size) \
+       { \
+               return ledtrig_tty_attr_store(dev, buf, size, trigger); \
+       } \
+       static DEVICE_ATTR_RW(trigger_name)
+
+DEFINE_TTY_TRIGGER(rx, TRIGGER_TTY_RX);
+DEFINE_TTY_TRIGGER(tx, TRIGGER_TTY_TX);
+DEFINE_TTY_TRIGGER(cts, TRIGGER_TTY_CTS);
+DEFINE_TTY_TRIGGER(dsr, TRIGGER_TTY_DSR);
+DEFINE_TTY_TRIGGER(dcd, TRIGGER_TTY_DCD);
+DEFINE_TTY_TRIGGER(rng, TRIGGER_TTY_RNG);
+
 static void ledtrig_tty_work(struct work_struct *work)
 {
        struct ledtrig_tty_data *trigger_data =
                container_of(work, struct ledtrig_tty_data, dwork.work);
-       struct serial_icounter_struct icount;
+       enum led_trigger_tty_state state = TTY_LED_DISABLE;
+       unsigned long interval = LEDTRIG_TTY_INTERVAL;
+       bool invert = false;
+       int status;
        int ret;
 
-       mutex_lock(&trigger_data->mutex);
-
-       if (!trigger_data->ttyname) {
-               /* exit without rescheduling */
-               mutex_unlock(&trigger_data->mutex);
-               return;
-       }
+       if (!trigger_data->ttyname)
+               goto out;
 
        /* try to get the tty corresponding to $ttyname */
        if (!trigger_data->tty) {
@@ -115,32 +227,83 @@ static void ledtrig_tty_work(struct work_struct *work)
                trigger_data->tty = tty;
        }
 
-       ret = tty_get_icount(trigger_data->tty, &icount);
-       if (ret) {
-               dev_info(trigger_data->tty->dev, "Failed to get icount, stopped polling\n");
-               mutex_unlock(&trigger_data->mutex);
-               return;
+       status = tty_get_tiocm(trigger_data->tty);
+       if (status > 0) {
+               if (trigger_data->mode_cts) {
+                       if (status & TIOCM_CTS)
+                               state = TTY_LED_ENABLE;
+               }
+
+               if (trigger_data->mode_dsr) {
+                       if (status & TIOCM_DSR)
+                               state = TTY_LED_ENABLE;
+               }
+
+               if (trigger_data->mode_dcd) {
+                       if (status & TIOCM_CAR)
+                               state = TTY_LED_ENABLE;
+               }
+
+               if (trigger_data->mode_rng) {
+                       if (status & TIOCM_RNG)
+                               state = TTY_LED_ENABLE;
+               }
        }
 
-       if (icount.rx != trigger_data->rx ||
-           icount.tx != trigger_data->tx) {
-               unsigned long interval = LEDTRIG_TTY_INTERVAL;
+       /*
+        * The evaluation of rx/tx must be done after the evaluation
+        * of TIOCM_*, because rx/tx has priority.
+        */
+       if (trigger_data->mode_rx || trigger_data->mode_tx) {
+               struct serial_icounter_struct icount;
 
-               led_blink_set_oneshot(trigger_data->led_cdev, &interval,
-                                     &interval, 0);
+               ret = tty_get_icount(trigger_data->tty, &icount);
+               if (ret)
+                       goto out;
 
-               trigger_data->rx = icount.rx;
-               trigger_data->tx = icount.tx;
+               if (trigger_data->mode_tx && (icount.tx != trigger_data->tx)) {
+                       trigger_data->tx = icount.tx;
+                       invert = state == TTY_LED_ENABLE;
+                       state = TTY_LED_BLINK;
+               }
+
+               if (trigger_data->mode_rx && (icount.rx != trigger_data->rx)) {
+                       trigger_data->rx = icount.rx;
+                       invert = state == TTY_LED_ENABLE;
+                       state = TTY_LED_BLINK;
+               }
        }
 
 out:
-       mutex_unlock(&trigger_data->mutex);
+       switch (state) {
+       case TTY_LED_BLINK:
+               led_blink_set_oneshot(trigger_data->led_cdev, &interval,
+                               &interval, invert);
+               break;
+       case TTY_LED_ENABLE:
+               led_set_brightness(trigger_data->led_cdev,
+                               trigger_data->led_cdev->blink_brightness);
+               break;
+       case TTY_LED_DISABLE:
+               fallthrough;
+       default:
+               led_set_brightness(trigger_data->led_cdev, LED_OFF);
+               break;
+       }
+
+       complete_all(&trigger_data->sysfs);
        schedule_delayed_work(&trigger_data->dwork,
                              msecs_to_jiffies(LEDTRIG_TTY_INTERVAL * 2));
 }
 
 static struct attribute *ledtrig_tty_attrs[] = {
        &dev_attr_ttyname.attr,
+       &dev_attr_rx.attr,
+       &dev_attr_tx.attr,
+       &dev_attr_cts.attr,
+       &dev_attr_dsr.attr,
+       &dev_attr_dcd.attr,
+       &dev_attr_rng.attr,
        NULL
 };
 ATTRIBUTE_GROUPS(ledtrig_tty);
@@ -153,11 +316,17 @@ static int ledtrig_tty_activate(struct led_classdev *led_cdev)
        if (!trigger_data)
                return -ENOMEM;
 
+       /* Enable default rx/tx mode */
+       trigger_data->mode_rx = true;
+       trigger_data->mode_tx = true;
+
        led_set_trigger_data(led_cdev, trigger_data);
 
        INIT_DELAYED_WORK(&trigger_data->dwork, ledtrig_tty_work);
        trigger_data->led_cdev = led_cdev;
-       mutex_init(&trigger_data->mutex);
+       init_completion(&trigger_data->sysfs);
+
+       schedule_delayed_work(&trigger_data->dwork, 0);
 
        return 0;
 }
@@ -168,6 +337,10 @@ static void ledtrig_tty_deactivate(struct led_classdev *led_cdev)
 
        cancel_delayed_work_sync(&trigger_data->dwork);
 
+       kfree(trigger_data->ttyname);
+       tty_kref_put(trigger_data->tty);
+       trigger_data->tty = NULL;
+
        kfree(trigger_data);
 }
 
index c6d4957c4da83e6fa1cf39efdaa49f3efad362bb..0ec21dcdbde723da1bbe01f743591bd152196dfe 100644 (file)
@@ -553,7 +553,8 @@ static irqreturn_t mhuv2_sender_interrupt(int irq, void *data)
        priv = chan->con_priv;
 
        if (!IS_PROTOCOL_DOORBELL(priv)) {
-               writel_relaxed(1, &mhu->send->ch_wn[priv->ch_wn_idx + priv->windows - 1].int_clr);
+               for (i = 0; i < priv->windows; i++)
+                       writel_relaxed(1, &mhu->send->ch_wn[priv->ch_wn_idx + i].int_clr);
 
                if (chan->cl) {
                        mbox_chan_txdone(chan, 0);
index a2b8839d4e7c5e5009581e528f648e9450b3c27b..e3e28a4f7d017cc12751c3eb331f0046c8df93ec 100644 (file)
@@ -1650,7 +1650,7 @@ fail:
        return ret;
 }
 
-static int flexrm_mbox_remove(struct platform_device *pdev)
+static void flexrm_mbox_remove(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct flexrm_mbox *mbox = platform_get_drvdata(pdev);
@@ -1661,8 +1661,6 @@ static int flexrm_mbox_remove(struct platform_device *pdev)
 
        dma_pool_destroy(mbox->cmpl_pool);
        dma_pool_destroy(mbox->bd_pool);
-
-       return 0;
 }
 
 static const struct of_device_id flexrm_mbox_of_match[] = {
@@ -1677,7 +1675,7 @@ static struct platform_driver flexrm_mbox_driver = {
                .of_match_table = flexrm_mbox_of_match,
        },
        .probe          = flexrm_mbox_probe,
-       .remove         = flexrm_mbox_remove,
+       .remove_new     = flexrm_mbox_remove,
 };
 module_platform_driver(flexrm_mbox_driver);
 
index 778faeced81e1d6c409b74a551f8b755dbb48553..1768d3d5aaa011a1e2528bbc576aa3215d2bd2e1 100644 (file)
@@ -1605,7 +1605,7 @@ cleanup:
        return err;
 }
 
-static int pdc_remove(struct platform_device *pdev)
+static void pdc_remove(struct platform_device *pdev)
 {
        struct pdc_state *pdcs = platform_get_drvdata(pdev);
 
@@ -1617,12 +1617,11 @@ static int pdc_remove(struct platform_device *pdev)
 
        dma_pool_destroy(pdcs->rx_buf_pool);
        dma_pool_destroy(pdcs->ring_pool);
-       return 0;
 }
 
 static struct platform_driver pdc_mbox_driver = {
        .probe = pdc_probe,
-       .remove = pdc_remove,
+       .remove_new = pdc_remove,
        .driver = {
                   .name = "brcm-iproc-pdc-mbox",
                   .of_match_table = pdc_mbox_of_match,
index 0af739ab571cd8736b47c0cee1d7b7620002dfea..656171362fe9e4deb8507f6cb5e4b68cc5efa3d2 100644 (file)
@@ -903,13 +903,11 @@ disable_runtime_pm:
        return ret;
 }
 
-static int imx_mu_remove(struct platform_device *pdev)
+static void imx_mu_remove(struct platform_device *pdev)
 {
        struct imx_mu_priv *priv = platform_get_drvdata(pdev);
 
        pm_runtime_disable(priv->dev);
-
-       return 0;
 }
 
 static const struct imx_mu_dcfg imx_mu_cfg_imx6sx = {
@@ -1070,7 +1068,7 @@ static const struct dev_pm_ops imx_mu_pm_ops = {
 
 static struct platform_driver imx_mu_driver = {
        .probe          = imx_mu_probe,
-       .remove         = imx_mu_remove,
+       .remove_new     = imx_mu_remove,
        .driver = {
                .name   = "imx_mu",
                .of_match_table = imx_mu_dt_ids,
index 22d6018ceec3cb08b4b727df9755ea4449b24007..3386b4e72551c78f292d3855f5fea8b210618deb 100644 (file)
@@ -418,7 +418,7 @@ static int mbox_test_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int mbox_test_remove(struct platform_device *pdev)
+static void mbox_test_remove(struct platform_device *pdev)
 {
        struct mbox_test_device *tdev = platform_get_drvdata(pdev);
 
@@ -428,8 +428,6 @@ static int mbox_test_remove(struct platform_device *pdev)
                mbox_free_channel(tdev->tx_channel);
        if (tdev->rx_channel)
                mbox_free_channel(tdev->rx_channel);
-
-       return 0;
 }
 
 static const struct of_device_id mbox_test_match[] = {
@@ -444,7 +442,7 @@ static struct platform_driver mbox_test_driver = {
                .of_match_table = mbox_test_match,
        },
        .probe  = mbox_test_probe,
-       .remove = mbox_test_remove,
+       .remove_new = mbox_test_remove,
 };
 module_platform_driver(mbox_test_driver);
 
index de862e9137d5fa0c706c699e7186e44efaabd2b5..ead2200f39ba0354ec94569a9bedf3964be14207 100644 (file)
@@ -367,7 +367,7 @@ static int cmdq_resume(struct device *dev)
        return 0;
 }
 
-static int cmdq_remove(struct platform_device *pdev)
+static void cmdq_remove(struct platform_device *pdev)
 {
        struct cmdq *cmdq = platform_get_drvdata(pdev);
 
@@ -378,7 +378,6 @@ static int cmdq_remove(struct platform_device *pdev)
                cmdq_runtime_suspend(&pdev->dev);
 
        clk_bulk_unprepare(cmdq->pdata->gce_num, cmdq->clocks);
-       return 0;
 }
 
 static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
@@ -706,62 +705,70 @@ static const struct dev_pm_ops cmdq_pm_ops = {
                           cmdq_runtime_resume, NULL)
 };
 
-static const struct gce_plat gce_plat_v2 = {
-       .thread_nr = 16,
-       .shift = 0,
+static const struct gce_plat gce_plat_mt6779 = {
+       .thread_nr = 24,
+       .shift = 3,
        .control_by_sw = false,
        .gce_num = 1
 };
 
-static const struct gce_plat gce_plat_v3 = {
-       .thread_nr = 24,
+static const struct gce_plat gce_plat_mt8173 = {
+       .thread_nr = 16,
        .shift = 0,
        .control_by_sw = false,
        .gce_num = 1
 };
 
-static const struct gce_plat gce_plat_v4 = {
+static const struct gce_plat gce_plat_mt8183 = {
        .thread_nr = 24,
-       .shift = 3,
+       .shift = 0,
        .control_by_sw = false,
        .gce_num = 1
 };
 
-static const struct gce_plat gce_plat_v5 = {
+static const struct gce_plat gce_plat_mt8186 = {
        .thread_nr = 24,
        .shift = 3,
        .control_by_sw = true,
+       .sw_ddr_en = true,
        .gce_num = 1
 };
 
-static const struct gce_plat gce_plat_v6 = {
-       .thread_nr = 24,
+static const struct gce_plat gce_plat_mt8188 = {
+       .thread_nr = 32,
        .shift = 3,
        .control_by_sw = true,
        .gce_num = 2
 };
 
-static const struct gce_plat gce_plat_v7 = {
+static const struct gce_plat gce_plat_mt8192 = {
        .thread_nr = 24,
        .shift = 3,
        .control_by_sw = true,
-       .sw_ddr_en = true,
        .gce_num = 1
 };
 
+static const struct gce_plat gce_plat_mt8195 = {
+       .thread_nr = 24,
+       .shift = 3,
+       .control_by_sw = true,
+       .gce_num = 2
+};
+
 static const struct of_device_id cmdq_of_ids[] = {
-       {.compatible = "mediatek,mt8173-gce", .data = (void *)&gce_plat_v2},
-       {.compatible = "mediatek,mt8183-gce", .data = (void *)&gce_plat_v3},
-       {.compatible = "mediatek,mt8186-gce", .data = (void *)&gce_plat_v7},
-       {.compatible = "mediatek,mt6779-gce", .data = (void *)&gce_plat_v4},
-       {.compatible = "mediatek,mt8192-gce", .data = (void *)&gce_plat_v5},
-       {.compatible = "mediatek,mt8195-gce", .data = (void *)&gce_plat_v6},
+       {.compatible = "mediatek,mt6779-gce", .data = (void *)&gce_plat_mt6779},
+       {.compatible = "mediatek,mt8173-gce", .data = (void *)&gce_plat_mt8173},
+       {.compatible = "mediatek,mt8183-gce", .data = (void *)&gce_plat_mt8183},
+       {.compatible = "mediatek,mt8186-gce", .data = (void *)&gce_plat_mt8186},
+       {.compatible = "mediatek,mt8188-gce", .data = (void *)&gce_plat_mt8188},
+       {.compatible = "mediatek,mt8192-gce", .data = (void *)&gce_plat_mt8192},
+       {.compatible = "mediatek,mt8195-gce", .data = (void *)&gce_plat_mt8195},
        {}
 };
 
 static struct platform_driver cmdq_drv = {
        .probe = cmdq_probe,
-       .remove = cmdq_remove,
+       .remove_new = cmdq_remove,
        .driver = {
                .name = "mtk_cmdq",
                .pm = &cmdq_pm_ops,
index 792bcaebbc9b8c29b8c957adbfa5a4256b1c5b54..c961706fe61d5f3e9e596c00a8df3730f69303c6 100644 (file)
@@ -865,19 +865,17 @@ unregister:
        return ret;
 }
 
-static int omap_mbox_remove(struct platform_device *pdev)
+static void omap_mbox_remove(struct platform_device *pdev)
 {
        struct omap_mbox_device *mdev = platform_get_drvdata(pdev);
 
        pm_runtime_disable(mdev->dev);
        omap_mbox_unregister(mdev);
-
-       return 0;
 }
 
 static struct platform_driver omap_mbox_driver = {
        .probe  = omap_mbox_probe,
-       .remove = omap_mbox_remove,
+       .remove_new = omap_mbox_remove,
        .driver = {
                .name = "omap-mailbox",
                .pm = &omap_mbox_pm_ops,
index 002a135ee8688885c1768b65412f746090d8c396..7d91e7c016ba7303e1ffb2c941b0733dd352c2f0 100644 (file)
@@ -129,14 +129,12 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int qcom_apcs_ipc_remove(struct platform_device *pdev)
+static void qcom_apcs_ipc_remove(struct platform_device *pdev)
 {
        struct qcom_apcs_ipc *apcs = platform_get_drvdata(pdev);
        struct platform_device *clk = apcs->clk;
 
        platform_device_unregister(clk);
-
-       return 0;
 }
 
 /* .data is the offset of the ipc register within the global block */
@@ -145,19 +143,19 @@ static const struct of_device_id qcom_apcs_ipc_of_match[] = {
        { .compatible = "qcom,msm8916-apcs-kpss-global", .data = &msm8916_apcs_data },
        { .compatible = "qcom,msm8939-apcs-kpss-global", .data = &msm8916_apcs_data },
        { .compatible = "qcom,msm8953-apcs-kpss-global", .data = &msm8994_apcs_data },
-       { .compatible = "qcom,msm8976-apcs-kpss-global", .data = &msm8994_apcs_data },
        { .compatible = "qcom,msm8994-apcs-kpss-global", .data = &msm8994_apcs_data },
        { .compatible = "qcom,msm8996-apcs-hmss-global", .data = &msm8996_apcs_data },
-       { .compatible = "qcom,msm8998-apcs-hmss-global", .data = &msm8994_apcs_data },
        { .compatible = "qcom,qcm2290-apcs-hmss-global", .data = &msm8994_apcs_data },
+       { .compatible = "qcom,sdm845-apss-shared", .data = &apps_shared_apcs_data },
+       { .compatible = "qcom,sdx55-apcs-gcc", .data = &sdx55_apcs_data },
+       /* Do not add any more entries using existing driver data */
+       { .compatible = "qcom,msm8976-apcs-kpss-global", .data = &msm8994_apcs_data },
+       { .compatible = "qcom,msm8998-apcs-hmss-global", .data = &msm8994_apcs_data },
        { .compatible = "qcom,qcs404-apcs-apps-global", .data = &msm8916_apcs_data },
        { .compatible = "qcom,sdm660-apcs-hmss-global", .data = &msm8994_apcs_data },
-       { .compatible = "qcom,sdm845-apss-shared", .data = &apps_shared_apcs_data },
        { .compatible = "qcom,sm4250-apcs-hmss-global", .data = &msm8994_apcs_data },
        { .compatible = "qcom,sm6125-apcs-hmss-global", .data = &msm8994_apcs_data },
        { .compatible = "qcom,sm6115-apcs-hmss-global", .data = &msm8994_apcs_data },
-       { .compatible = "qcom,sdx55-apcs-gcc", .data = &sdx55_apcs_data },
-       /* Do not add any more entries using existing driver data */
        { .compatible = "qcom,ipq5332-apcs-apps-global", .data = &ipq6018_apcs_data },
        { .compatible = "qcom,ipq8074-apcs-apps-global", .data = &ipq6018_apcs_data },
        { .compatible = "qcom,sc7180-apss-shared", .data = &apps_shared_apcs_data },
@@ -169,7 +167,7 @@ MODULE_DEVICE_TABLE(of, qcom_apcs_ipc_of_match);
 
 static struct platform_driver qcom_apcs_ipc_driver = {
        .probe = qcom_apcs_ipc_probe,
-       .remove = qcom_apcs_ipc_remove,
+       .remove_new = qcom_apcs_ipc_remove,
        .driver = {
                .name = "qcom_apcs_ipc",
                .of_match_table = qcom_apcs_ipc_of_match,
index f597a1bd5684782f4017bdd441ca738daced8aa8..d537cc9c4d4be0a9733919bf81772e6be811324b 100644 (file)
@@ -326,14 +326,12 @@ err_mbox:
        return ret;
 }
 
-static int qcom_ipcc_remove(struct platform_device *pdev)
+static void qcom_ipcc_remove(struct platform_device *pdev)
 {
        struct qcom_ipcc *ipcc = platform_get_drvdata(pdev);
 
        disable_irq_wake(ipcc->irq);
        irq_domain_remove(ipcc->irq_domain);
-
-       return 0;
 }
 
 static const struct of_device_id qcom_ipcc_of_match[] = {
@@ -348,7 +346,7 @@ static const struct dev_pm_ops qcom_ipcc_dev_pm_ops = {
 
 static struct platform_driver qcom_ipcc_driver = {
        .probe = qcom_ipcc_probe,
-       .remove = qcom_ipcc_remove,
+       .remove_new = qcom_ipcc_remove,
        .driver = {
                .name = "qcom-ipcc",
                .of_match_table = qcom_ipcc_of_match,
index 4ad3653f38666bb3db7ef16a86bf63c955e01c04..1442f275782bd5b66c28c1ceae2fac36b2354a13 100644 (file)
@@ -331,7 +331,7 @@ err_clk:
        return ret;
 }
 
-static int stm32_ipcc_remove(struct platform_device *pdev)
+static void stm32_ipcc_remove(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
 
@@ -339,8 +339,6 @@ static int stm32_ipcc_remove(struct platform_device *pdev)
                dev_pm_clear_wake_irq(&pdev->dev);
 
        device_set_wakeup_capable(dev, false);
-
-       return 0;
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -381,7 +379,7 @@ static struct platform_driver stm32_ipcc_driver = {
                .of_match_table = stm32_ipcc_of_match,
        },
        .probe          = stm32_ipcc_probe,
-       .remove         = stm32_ipcc_remove,
+       .remove_new     = stm32_ipcc_remove,
 };
 
 module_platform_driver(stm32_ipcc_driver);
index 7f8d931042d384a46a6484d34390aefd56f28b8d..3dcc54dc83b2df326a851d589e57a3f5c9f99bdb 100644 (file)
@@ -287,15 +287,13 @@ err_disable_unprepare:
        return ret;
 }
 
-static int sun6i_msgbox_remove(struct platform_device *pdev)
+static void sun6i_msgbox_remove(struct platform_device *pdev)
 {
        struct sun6i_msgbox *mbox = platform_get_drvdata(pdev);
 
        mbox_controller_unregister(&mbox->controller);
        /* See the comment in sun6i_msgbox_probe about the reset line. */
        clk_disable_unprepare(mbox->clk);
-
-       return 0;
 }
 
 static const struct of_device_id sun6i_msgbox_of_match[] = {
@@ -310,7 +308,7 @@ static struct platform_driver sun6i_msgbox_driver = {
                .of_match_table = sun6i_msgbox_of_match,
        },
        .probe  = sun6i_msgbox_probe,
-       .remove = sun6i_msgbox_remove,
+       .remove_new = sun6i_msgbox_remove,
 };
 module_platform_driver(sun6i_msgbox_driver);
 
index fe29fc2ca5260d104b36543eb7f39ae00faf80ab..19ef56cbcfd39da11760b81b49500f57e5d4e50e 100644 (file)
@@ -868,13 +868,11 @@ static int tegra_hsp_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int tegra_hsp_remove(struct platform_device *pdev)
+static void tegra_hsp_remove(struct platform_device *pdev)
 {
        struct tegra_hsp *hsp = platform_get_drvdata(pdev);
 
        lockdep_unregister_key(&hsp->lock_key);
-
-       return 0;
 }
 
 static int __maybe_unused tegra_hsp_resume(struct device *dev)
@@ -953,7 +951,7 @@ static struct platform_driver tegra_hsp_driver = {
                .pm = &tegra_hsp_pm_ops,
        },
        .probe = tegra_hsp_probe,
-       .remove = tegra_hsp_remove,
+       .remove_new = tegra_hsp_remove,
 };
 
 static int __init tegra_hsp_init(void)
index 7fa533e80dd97edb18b191ba0aec582586e701ae..25c65afc030a3577c2a51bf820c6b86ffe920f85 100644 (file)
@@ -81,7 +81,6 @@ struct zynqmp_ipi_mchan {
  * @remote_id:            remote IPI agent ID
  * @mbox:                 mailbox Controller
  * @mchans:               array for channels, tx channel and rx channel.
- * @irq:                  IPI agent interrupt ID
  */
 struct zynqmp_ipi_mbox {
        struct zynqmp_ipi_pdata *pdata;
@@ -688,19 +687,17 @@ free_mbox_dev:
        return ret;
 }
 
-static int zynqmp_ipi_remove(struct platform_device *pdev)
+static void zynqmp_ipi_remove(struct platform_device *pdev)
 {
        struct zynqmp_ipi_pdata *pdata;
 
        pdata = platform_get_drvdata(pdev);
        zynqmp_ipi_free_mboxes(pdata);
-
-       return 0;
 }
 
 static struct platform_driver zynqmp_ipi_driver = {
        .probe = zynqmp_ipi_probe,
-       .remove = zynqmp_ipi_remove,
+       .remove_new = zynqmp_ipi_remove,
        .driver = {
                   .name = "zynqmp-ipi",
                   .of_match_table = of_match_ptr(zynqmp_ipi_of_match),
index ba4530459de8d065253a29e98688de875f7a1ca6..61994da7bad01fe38f7254a07c77ae4224b44b96 100644 (file)
@@ -263,6 +263,7 @@ static void mcb_free_bus(struct device *dev)
 
 /**
  * mcb_alloc_bus() - Allocate a new @mcb_bus
+ * @carrier: generic &struct device for the carrier device
  *
  * Allocate a new @mcb_bus.
  */
@@ -327,7 +328,7 @@ void mcb_release_bus(struct mcb_bus *bus)
 EXPORT_SYMBOL_NS_GPL(mcb_release_bus, MCB);
 
 /**
- * mcb_bus_put() - Increment refcnt
+ * mcb_bus_get() - Increment refcnt
  * @bus: The @mcb_bus
  *
  * Get a @mcb_bus' ref
@@ -455,7 +456,7 @@ EXPORT_SYMBOL_NS_GPL(mcb_request_mem, MCB);
 
 /**
  * mcb_release_mem() - Release memory requested by device
- * @dev: The @mcb_device that requested the memory
+ * @mem: The memory resource to be released
  *
  * Release memory that was prior requested via @mcb_request_mem().
  */
index 095b9b49aa8250a1f56c531883cce5f6e8a24727..e6757a30dccad1fa1a6ae060b33d41a6a120dda3 100644 (file)
@@ -22,6 +22,8 @@
 #include "dm-ima.h"
 
 #define DM_RESERVED_MAX_IOS            1024
+#define DM_MAX_TARGETS                 1048576
+#define DM_MAX_TARGET_PARAMS           1024
 
 struct dm_io;
 
index 855b482cbff1f072912e957e8c1cc1d3b4e1b319..f745f85082434dca8fb6d6bf9efe30db79b2a81e 100644 (file)
@@ -73,10 +73,8 @@ struct dm_crypt_io {
        struct bio *base_bio;
        u8 *integrity_metadata;
        bool integrity_metadata_from_pool:1;
-       bool in_tasklet:1;
 
        struct work_struct work;
-       struct tasklet_struct tasklet;
 
        struct convert_context ctx;
 
@@ -1762,7 +1760,6 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
        io->ctx.r.req = NULL;
        io->integrity_metadata = NULL;
        io->integrity_metadata_from_pool = false;
-       io->in_tasklet = false;
        atomic_set(&io->io_pending, 0);
 }
 
@@ -1771,13 +1768,6 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
        atomic_inc(&io->io_pending);
 }
 
-static void kcryptd_io_bio_endio(struct work_struct *work)
-{
-       struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
-
-       bio_endio(io->base_bio);
-}
-
 /*
  * One of the bios was finished. Check for completion of
  * the whole request and correctly clean up the buffer.
@@ -1801,20 +1791,6 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
 
        base_bio->bi_status = error;
 
-       /*
-        * If we are running this function from our tasklet,
-        * we can't call bio_endio() here, because it will call
-        * clone_endio() from dm.c, which in turn will
-        * free the current struct dm_crypt_io structure with
-        * our tasklet. In this case we need to delay bio_endio()
-        * execution to after the tasklet is done and dequeued.
-        */
-       if (io->in_tasklet) {
-               INIT_WORK(&io->work, kcryptd_io_bio_endio);
-               queue_work(cc->io_queue, &io->work);
-               return;
-       }
-
        bio_endio(base_bio);
 }
 
@@ -2246,11 +2222,6 @@ static void kcryptd_crypt(struct work_struct *work)
                kcryptd_crypt_write_convert(io);
 }
 
-static void kcryptd_crypt_tasklet(unsigned long work)
-{
-       kcryptd_crypt((struct work_struct *)work);
-}
-
 static void kcryptd_queue_crypt(struct dm_crypt_io *io)
 {
        struct crypt_config *cc = io->cc;
@@ -2262,15 +2233,10 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io)
                 * irqs_disabled(): the kernel may run some IO completion from the idle thread, but
                 * it is being executed with irqs disabled.
                 */
-               if (in_hardirq() || irqs_disabled()) {
-                       io->in_tasklet = true;
-                       tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work);
-                       tasklet_schedule(&io->tasklet);
+               if (!(in_hardirq() || irqs_disabled())) {
+                       kcryptd_crypt(&io->work);
                        return;
                }
-
-               kcryptd_crypt(&io->work);
-               return;
        }
 
        INIT_WORK(&io->work, kcryptd_crypt);
index e65058e0ed06ab73b9d20d26dfbf7aca55829572..3b1ad7127cb846a1b50059921241f2abe63eaf53 100644 (file)
@@ -1941,7 +1941,8 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
                           minimum_data_size - sizeof(param_kernel->version)))
                return -EFAULT;
 
-       if (param_kernel->data_size < minimum_data_size) {
+       if (unlikely(param_kernel->data_size < minimum_data_size) ||
+           unlikely(param_kernel->data_size > DM_MAX_TARGETS * DM_MAX_TARGET_PARAMS)) {
                DMERR("Invalid data size in the ioctl structure: %u",
                      param_kernel->data_size);
                return -EINVAL;
index bdc14ec9981414c60e1dee432b97d50e82dbbc88..1e5d988f44da6919da6de094c6744bf1bb2a89be 100644 (file)
@@ -66,6 +66,9 @@ struct dm_stats_last_position {
        unsigned int last_rw;
 };
 
+#define DM_STAT_MAX_ENTRIES            8388608
+#define DM_STAT_MAX_HISTOGRAM_ENTRIES  134217728
+
 /*
  * A typo on the command line could possibly make the kernel run out of memory
  * and crash. To prevent the crash we account all used memory. We fail if we
@@ -285,6 +288,9 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
        if (n_entries != (size_t)n_entries || !(size_t)(n_entries + 1))
                return -EOVERFLOW;
 
+       if (n_entries > DM_STAT_MAX_ENTRIES)
+               return -EOVERFLOW;
+
        shared_alloc_size = struct_size(s, stat_shared, n_entries);
        if ((shared_alloc_size - sizeof(struct dm_stat)) / sizeof(struct dm_stat_shared) != n_entries)
                return -EOVERFLOW;
@@ -297,6 +303,9 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
        if (histogram_alloc_size / (n_histogram_entries + 1) != (size_t)n_entries * sizeof(unsigned long long))
                return -EOVERFLOW;
 
+       if ((n_histogram_entries + 1) * (size_t)n_entries > DM_STAT_MAX_HISTOGRAM_ENTRIES)
+               return -EOVERFLOW;
+
        if (!check_shared_memory(shared_alloc_size + histogram_alloc_size +
                                 num_possible_cpus() * (percpu_alloc_size + histogram_alloc_size)))
                return -ENOMEM;
index 260b5b8f2b0d7e9352ed9ed9376a91504ee10c9d..41f1d731ae5ac275d90fbc334666438187da02b4 100644 (file)
@@ -129,7 +129,12 @@ static int alloc_targets(struct dm_table *t, unsigned int num)
 int dm_table_create(struct dm_table **result, blk_mode_t mode,
                    unsigned int num_targets, struct mapped_device *md)
 {
-       struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
+       struct dm_table *t;
+
+       if (num_targets > DM_MAX_TARGETS)
+               return -EOVERFLOW;
+
+       t = kzalloc(sizeof(*t), GFP_KERNEL);
 
        if (!t)
                return -ENOMEM;
@@ -144,7 +149,7 @@ int dm_table_create(struct dm_table **result, blk_mode_t mode,
 
        if (!num_targets) {
                kfree(t);
-               return -ENOMEM;
+               return -EOVERFLOW;
        }
 
        if (alloc_targets(t, num_targets)) {
index 14e58ae705218f71923b99bdfc1d195e6a45e658..82662f5769c4af7f5456fc97e044c63574162060 100644 (file)
@@ -645,23 +645,6 @@ static void verity_work(struct work_struct *w)
        verity_finish_io(io, errno_to_blk_status(verity_verify_io(io)));
 }
 
-static void verity_tasklet(unsigned long data)
-{
-       struct dm_verity_io *io = (struct dm_verity_io *)data;
-       int err;
-
-       io->in_tasklet = true;
-       err = verity_verify_io(io);
-       if (err == -EAGAIN || err == -ENOMEM) {
-               /* fallback to retrying with work-queue */
-               INIT_WORK(&io->work, verity_work);
-               queue_work(io->v->verify_wq, &io->work);
-               return;
-       }
-
-       verity_finish_io(io, errno_to_blk_status(err));
-}
-
 static void verity_end_io(struct bio *bio)
 {
        struct dm_verity_io *io = bio->bi_private;
@@ -674,13 +657,8 @@ static void verity_end_io(struct bio *bio)
                return;
        }
 
-       if (static_branch_unlikely(&use_tasklet_enabled) && io->v->use_tasklet) {
-               tasklet_init(&io->tasklet, verity_tasklet, (unsigned long)io);
-               tasklet_schedule(&io->tasklet);
-       } else {
-               INIT_WORK(&io->work, verity_work);
-               queue_work(io->v->verify_wq, &io->work);
-       }
+       INIT_WORK(&io->work, verity_work);
+       queue_work(io->v->verify_wq, &io->work);
 }
 
 /*
index f9d522c870e61665d87271f66c690138db42108f..f3f6070084196825f21dcc17947f67974a90cbde 100644 (file)
@@ -83,7 +83,6 @@ struct dm_verity_io {
        struct bvec_iter iter;
 
        struct work_struct work;
-       struct tasklet_struct tasklet;
 
        /*
         * Three variably-size fields follow this struct:
index 074cb785eafc19172b9ebf4b6a6f2ae4591563d6..b463c28c39ad34ca23b3d2433811384901171d80 100644 (file)
@@ -299,7 +299,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
                long i;
 
                wc->memory_map = NULL;
-               pages = kvmalloc_array(p, sizeof(struct page *), GFP_KERNEL);
+               pages = vmalloc_array(p, sizeof(struct page *));
                if (!pages) {
                        r = -ENOMEM;
                        goto err2;
@@ -330,7 +330,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
                        r = -ENOMEM;
                        goto err3;
                }
-               kvfree(pages);
+               vfree(pages);
                wc->memory_vmapped = true;
        }
 
@@ -341,7 +341,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
 
        return 0;
 err3:
-       kvfree(pages);
+       vfree(pages);
 err2:
        dax_read_unlock(id);
 err1:
@@ -962,7 +962,7 @@ static int writecache_alloc_entries(struct dm_writecache *wc)
 
        if (wc->entries)
                return 0;
-       wc->entries = vmalloc(array_size(sizeof(struct wc_entry), wc->n_blocks));
+       wc->entries = vmalloc_array(wc->n_blocks, sizeof(struct wc_entry));
        if (!wc->entries)
                return -ENOMEM;
        for (b = 0; b < wc->n_blocks; b++) {
index 0a2bd72a6d76754ed4526b6a098b095476e1772b..2266358d807466f95d02b431d09ee39805dff5e8 100644 (file)
@@ -8132,6 +8132,19 @@ static void status_unused(struct seq_file *seq)
        seq_printf(seq, "\n");
 }
 
+static void status_personalities(struct seq_file *seq)
+{
+       struct md_personality *pers;
+
+       seq_puts(seq, "Personalities : ");
+       spin_lock(&pers_lock);
+       list_for_each_entry(pers, &pers_list, list)
+               seq_printf(seq, "[%s] ", pers->name);
+
+       spin_unlock(&pers_lock);
+       seq_puts(seq, "\n");
+}
+
 static int status_resync(struct seq_file *seq, struct mddev *mddev)
 {
        sector_t max_sectors, resync, res;
@@ -8273,20 +8286,10 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev)
 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
        __acquires(&all_mddevs_lock)
 {
-       struct md_personality *pers;
-
-       seq_puts(seq, "Personalities : ");
-       spin_lock(&pers_lock);
-       list_for_each_entry(pers, &pers_list, list)
-               seq_printf(seq, "[%s] ", pers->name);
-
-       spin_unlock(&pers_lock);
-       seq_puts(seq, "\n");
        seq->poll_event = atomic_read(&md_event_count);
-
        spin_lock(&all_mddevs_lock);
 
-       return seq_list_start(&all_mddevs, *pos);
+       return seq_list_start_head(&all_mddevs, *pos);
 }
 
 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
@@ -8297,16 +8300,23 @@ static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 static void md_seq_stop(struct seq_file *seq, void *v)
        __releases(&all_mddevs_lock)
 {
-       status_unused(seq);
        spin_unlock(&all_mddevs_lock);
 }
 
 static int md_seq_show(struct seq_file *seq, void *v)
 {
-       struct mddev *mddev = list_entry(v, struct mddev, all_mddevs);
+       struct mddev *mddev;
        sector_t sectors;
        struct md_rdev *rdev;
 
+       if (v == &all_mddevs) {
+               status_personalities(seq);
+               if (list_empty(&all_mddevs))
+                       status_unused(seq);
+               return 0;
+       }
+
+       mddev = list_entry(v, struct mddev, all_mddevs);
        if (!mddev_get(mddev))
                return 0;
 
@@ -8382,6 +8392,10 @@ static int md_seq_show(struct seq_file *seq, void *v)
        }
        spin_unlock(&mddev->lock);
        spin_lock(&all_mddevs_lock);
+
+       if (mddev == list_last_entry(&all_mddevs, struct mddev, all_mddevs))
+               status_unused(seq);
+
        if (atomic_dec_and_test(&mddev->active))
                __mddev_put(mddev);
 
index aaa434f0c17515f31519199e94468f92ff96b57d..286f8b16c7bde7fbc0bca0705d470748d9f5eeb5 100644 (file)
@@ -1968,12 +1968,12 @@ static void end_sync_write(struct bio *bio)
 }
 
 static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
-                          int sectors, struct page *page, int rw)
+                          int sectors, struct page *page, blk_opf_t rw)
 {
        if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
                /* success */
                return 1;
-       if (rw == WRITE) {
+       if (rw == REQ_OP_WRITE) {
                set_bit(WriteErrorSeen, &rdev->flags);
                if (!test_and_set_bit(WantReplacement,
                                      &rdev->flags))
@@ -2090,7 +2090,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
                        rdev = conf->mirrors[d].rdev;
                        if (r1_sync_page_io(rdev, sect, s,
                                            pages[idx],
-                                           WRITE) == 0) {
+                                           REQ_OP_WRITE) == 0) {
                                r1_bio->bios[d]->bi_end_io = NULL;
                                rdev_dec_pending(rdev, mddev);
                        }
@@ -2105,7 +2105,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
                        rdev = conf->mirrors[d].rdev;
                        if (r1_sync_page_io(rdev, sect, s,
                                            pages[idx],
-                                           READ) != 0)
+                                           REQ_OP_READ) != 0)
                                atomic_add(s, &rdev->corrected_errors);
                }
                sectors -= s;
@@ -2262,7 +2262,7 @@ static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
        int sectors = r1_bio->sectors;
        int read_disk = r1_bio->read_disk;
        struct mddev *mddev = conf->mddev;
-       struct md_rdev *rdev = rcu_dereference(conf->mirrors[read_disk].rdev);
+       struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
 
        if (exceed_read_errors(mddev, rdev)) {
                r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
@@ -2321,7 +2321,7 @@ static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
                            !test_bit(Faulty, &rdev->flags)) {
                                atomic_inc(&rdev->nr_pending);
                                r1_sync_page_io(rdev, sect, s,
-                                               conf->tmppage, WRITE);
+                                               conf->tmppage, REQ_OP_WRITE);
                                rdev_dec_pending(rdev, mddev);
                        }
                }
@@ -2335,7 +2335,7 @@ static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
                            !test_bit(Faulty, &rdev->flags)) {
                                atomic_inc(&rdev->nr_pending);
                                if (r1_sync_page_io(rdev, sect, s,
-                                                   conf->tmppage, READ)) {
+                                               conf->tmppage, REQ_OP_READ)) {
                                        atomic_add(s, &rdev->corrected_errors);
                                        pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %pg)\n",
                                                mdname(mddev), s,
index 41a832dd1426bae695dc90b385976b2bf4b7a304..b6bf8f232f4880ffcd1f7ff0bd00ddb0bbebccb9 100644 (file)
@@ -989,7 +989,7 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
        bool no_previous_buffers = !q_num_bufs;
        int ret = 0;
 
-       if (q->num_buffers == q->max_num_buffers) {
+       if (q_num_bufs == q->max_num_buffers) {
                dprintk(q, 1, "maximum number of buffers already allocated\n");
                return -ENOBUFS;
        }
index 54d572c3b515d67722c4dbe7490437bc83c30b96..c575198e83547ab99719eca7e81dc6e7c0e601d4 100644 (file)
@@ -671,8 +671,20 @@ int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
 }
 EXPORT_SYMBOL(vb2_querybuf);
 
-static void fill_buf_caps(struct vb2_queue *q, u32 *caps)
+static void vb2_set_flags_and_caps(struct vb2_queue *q, u32 memory,
+                                  u32 *flags, u32 *caps, u32 *max_num_bufs)
 {
+       if (!q->allow_cache_hints || memory != V4L2_MEMORY_MMAP) {
+               /*
+                * This needs to clear V4L2_MEMORY_FLAG_NON_COHERENT only,
+                * but in order to avoid bugs we zero out all bits.
+                */
+               *flags = 0;
+       } else {
+               /* Clear all unknown flags. */
+               *flags &= V4L2_MEMORY_FLAG_NON_COHERENT;
+       }
+
        *caps = V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS;
        if (q->io_modes & VB2_MMAP)
                *caps |= V4L2_BUF_CAP_SUPPORTS_MMAP;
@@ -686,21 +698,9 @@ static void fill_buf_caps(struct vb2_queue *q, u32 *caps)
                *caps |= V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS;
        if (q->supports_requests)
                *caps |= V4L2_BUF_CAP_SUPPORTS_REQUESTS;
-}
-
-static void validate_memory_flags(struct vb2_queue *q,
-                                 int memory,
-                                 u32 *flags)
-{
-       if (!q->allow_cache_hints || memory != V4L2_MEMORY_MMAP) {
-               /*
-                * This needs to clear V4L2_MEMORY_FLAG_NON_COHERENT only,
-                * but in order to avoid bugs we zero out all bits.
-                */
-               *flags = 0;
-       } else {
-               /* Clear all unknown flags. */
-               *flags &= V4L2_MEMORY_FLAG_NON_COHERENT;
+       if (max_num_bufs) {
+               *max_num_bufs = q->max_num_buffers;
+               *caps |= V4L2_BUF_CAP_SUPPORTS_MAX_NUM_BUFFERS;
        }
 }
 
@@ -709,8 +709,8 @@ int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
        int ret = vb2_verify_memory_type(q, req->memory, req->type);
        u32 flags = req->flags;
 
-       fill_buf_caps(q, &req->capabilities);
-       validate_memory_flags(q, req->memory, &flags);
+       vb2_set_flags_and_caps(q, req->memory, &flags,
+                              &req->capabilities, NULL);
        req->flags = flags;
        return ret ? ret : vb2_core_reqbufs(q, req->memory,
                                            req->flags, &req->count);
@@ -751,11 +751,9 @@ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
        int ret = vb2_verify_memory_type(q, create->memory, f->type);
        unsigned i;
 
-       fill_buf_caps(q, &create->capabilities);
-       validate_memory_flags(q, create->memory, &create->flags);
        create->index = vb2_get_num_buffers(q);
-       create->max_num_buffers = q->max_num_buffers;
-       create->capabilities |= V4L2_BUF_CAP_SUPPORTS_MAX_NUM_BUFFERS;
+       vb2_set_flags_and_caps(q, create->memory, &create->flags,
+                              &create->capabilities, &create->max_num_buffers);
        if (create->count == 0)
                return ret != -EBUSY ? ret : 0;
 
@@ -1006,8 +1004,8 @@ int vb2_ioctl_reqbufs(struct file *file, void *priv,
        int res = vb2_verify_memory_type(vdev->queue, p->memory, p->type);
        u32 flags = p->flags;
 
-       fill_buf_caps(vdev->queue, &p->capabilities);
-       validate_memory_flags(vdev->queue, p->memory, &flags);
+       vb2_set_flags_and_caps(vdev->queue, p->memory, &flags,
+                              &p->capabilities, NULL);
        p->flags = flags;
        if (res)
                return res;
@@ -1026,12 +1024,11 @@ int vb2_ioctl_create_bufs(struct file *file, void *priv,
                          struct v4l2_create_buffers *p)
 {
        struct video_device *vdev = video_devdata(file);
-       int res = vb2_verify_memory_type(vdev->queue, p->memory,
-                       p->format.type);
+       int res = vb2_verify_memory_type(vdev->queue, p->memory, p->format.type);
 
-       p->index = vdev->queue->num_buffers;
-       fill_buf_caps(vdev->queue, &p->capabilities);
-       validate_memory_flags(vdev->queue, p->memory, &p->flags);
+       p->index = vb2_get_num_buffers(vdev->queue);
+       vb2_set_flags_and_caps(vdev->queue, p->memory, &p->flags,
+                              &p->capabilities, &p->max_num_buffers);
        /*
         * If count == 0, then just check if memory and type are valid.
         * Any -EBUSY result from vb2_verify_memory_type can be mapped to 0.
index bd38ce4442325314162a26621f919a4dce7a86a3..46676f2c89c72766844f26efbf8543e731122790 100644 (file)
@@ -289,7 +289,7 @@ static const struct i2c_algorithm netup_i2c_algorithm = {
 static const struct i2c_adapter netup_i2c_adapter = {
        .owner          = THIS_MODULE,
        .name           = NETUP_UNIDVB_NAME,
-       .class          = I2C_CLASS_HWMON | I2C_CLASS_SPD,
+       .class          = I2C_CLASS_HWMON,
        .algo           = &netup_i2c_algorithm,
 };
 
index f414ee1316f29ca3feecbd0253c585c2adce8cb3..fdbb817e63601c032b312ab83a6a810cfbf71c6c 100644 (file)
 #define SOLO_MP4E_EXT_ADDR(__solo) \
        (SOLO_EREF_EXT_ADDR(__solo) + SOLO_EREF_EXT_AREA(__solo))
 #define SOLO_MP4E_EXT_SIZE(__solo) \
-       max((__solo->nr_chans * 0x00080000),                            \
-           min(((__solo->sdram_size - SOLO_MP4E_EXT_ADDR(__solo)) -    \
-                __SOLO_JPEG_MIN_SIZE(__solo)), 0x00ff0000))
+       clamp(__solo->sdram_size - SOLO_MP4E_EXT_ADDR(__solo) - \
+             __SOLO_JPEG_MIN_SIZE(__solo),                     \
+             __solo->nr_chans * 0x00080000, 0x00ff0000)
 
 #define __SOLO_JPEG_MIN_SIZE(__solo)           (__solo->nr_chans * 0x00080000)
 #define SOLO_JPEG_EXT_ADDR(__solo) \
                (SOLO_MP4E_EXT_ADDR(__solo) + SOLO_MP4E_EXT_SIZE(__solo))
 #define SOLO_JPEG_EXT_SIZE(__solo) \
-       max(__SOLO_JPEG_MIN_SIZE(__solo),                               \
-           min((__solo->sdram_size - SOLO_JPEG_EXT_ADDR(__solo)), 0x00ff0000))
+       clamp(__solo->sdram_size - SOLO_JPEG_EXT_ADDR(__solo),  \
+             __SOLO_JPEG_MIN_SIZE(__solo), 0x00ff0000)
 
 #define SOLO_SDRAM_END(__solo) \
        (SOLO_JPEG_EXT_ADDR(__solo) + SOLO_JPEG_EXT_SIZE(__solo))
index bfe4caa79cc9800f7b01839fbbb768c73010a72e..0d90b5820bef7286694129ec0c4ed4f436d399b2 100644 (file)
@@ -272,7 +272,7 @@ static const struct wave5_match_data ti_wave521c_data = {
 };
 
 static const struct of_device_id wave5_dt_ids[] = {
-       { .compatible = "ti,k3-j721s2-wave521c", .data = &ti_wave521c_data },
+       { .compatible = "ti,j721s2-wave521c", .data = &ti_wave521c_data },
        { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, wave5_dt_ids);
index c5f37c03af9c95793be0f708feaa4e6d66982371..fe368aebbc139a01d186cad4d25700a10f4c8215 100644 (file)
@@ -10,6 +10,8 @@
 #include <linux/slab.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
+#include <linux/hrtimer.h>
+#include <linux/completion.h>
 #include <media/rc-core.h>
 
 #define DRIVER_NAME    "pwm-ir-tx"
 
 struct pwm_ir {
        struct pwm_device *pwm;
-       unsigned int carrier;
-       unsigned int duty_cycle;
+       struct hrtimer timer;
+       struct completion tx_done;
+       struct pwm_state *state;
+       u32 carrier;
+       u32 duty_cycle;
+       const unsigned int *txbuf;
+       unsigned int txbuf_len;
+       unsigned int txbuf_index;
 };
 
 static const struct of_device_id pwm_ir_of_match[] = {
@@ -49,8 +57,8 @@ static int pwm_ir_set_carrier(struct rc_dev *dev, u32 carrier)
        return 0;
 }
 
-static int pwm_ir_tx(struct rc_dev *dev, unsigned int *txbuf,
-                    unsigned int count)
+static int pwm_ir_tx_sleep(struct rc_dev *dev, unsigned int *txbuf,
+                          unsigned int count)
 {
        struct pwm_ir *pwm_ir = dev->priv;
        struct pwm_device *pwm = pwm_ir->pwm;
@@ -68,7 +76,7 @@ static int pwm_ir_tx(struct rc_dev *dev, unsigned int *txbuf,
 
        for (i = 0; i < count; i++) {
                state.enabled = !(i % 2);
-               pwm_apply_state(pwm, &state);
+               pwm_apply_might_sleep(pwm, &state);
 
                edge = ktime_add_us(edge, txbuf[i]);
                delta = ktime_us_delta(edge, ktime_get());
@@ -77,11 +85,67 @@ static int pwm_ir_tx(struct rc_dev *dev, unsigned int *txbuf,
        }
 
        state.enabled = false;
-       pwm_apply_state(pwm, &state);
+       pwm_apply_might_sleep(pwm, &state);
 
        return count;
 }
 
+static int pwm_ir_tx_atomic(struct rc_dev *dev, unsigned int *txbuf,
+                           unsigned int count)
+{
+       struct pwm_ir *pwm_ir = dev->priv;
+       struct pwm_device *pwm = pwm_ir->pwm;
+       struct pwm_state state;
+
+       pwm_init_state(pwm, &state);
+
+       state.period = DIV_ROUND_CLOSEST(NSEC_PER_SEC, pwm_ir->carrier);
+       pwm_set_relative_duty_cycle(&state, pwm_ir->duty_cycle, 100);
+
+       pwm_ir->txbuf = txbuf;
+       pwm_ir->txbuf_len = count;
+       pwm_ir->txbuf_index = 0;
+       pwm_ir->state = &state;
+
+       hrtimer_start(&pwm_ir->timer, 0, HRTIMER_MODE_REL);
+
+       wait_for_completion(&pwm_ir->tx_done);
+
+       return count;
+}
+
+static enum hrtimer_restart pwm_ir_timer(struct hrtimer *timer)
+{
+       struct pwm_ir *pwm_ir = container_of(timer, struct pwm_ir, timer);
+       ktime_t now;
+
+       /*
+        * If we happen to hit an odd latency spike, loop through the
+        * pulses until we catch up.
+        */
+       do {
+               u64 ns;
+
+               pwm_ir->state->enabled = !(pwm_ir->txbuf_index % 2);
+               pwm_apply_atomic(pwm_ir->pwm, pwm_ir->state);
+
+               if (pwm_ir->txbuf_index >= pwm_ir->txbuf_len) {
+                       complete(&pwm_ir->tx_done);
+
+                       return HRTIMER_NORESTART;
+               }
+
+               ns = US_TO_NS(pwm_ir->txbuf[pwm_ir->txbuf_index]);
+               hrtimer_add_expires_ns(timer, ns);
+
+               pwm_ir->txbuf_index++;
+
+               now = timer->base->get_time();
+       } while (hrtimer_get_expires_tv64(timer) < now);
+
+       return HRTIMER_RESTART;
+}
+
 static int pwm_ir_probe(struct platform_device *pdev)
 {
        struct pwm_ir *pwm_ir;
@@ -103,10 +167,19 @@ static int pwm_ir_probe(struct platform_device *pdev)
        if (!rcdev)
                return -ENOMEM;
 
+       if (pwm_might_sleep(pwm_ir->pwm)) {
+               dev_info(&pdev->dev, "TX will not be accurate as PWM device might sleep\n");
+               rcdev->tx_ir = pwm_ir_tx_sleep;
+       } else {
+               init_completion(&pwm_ir->tx_done);
+               hrtimer_init(&pwm_ir->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+               pwm_ir->timer.function = pwm_ir_timer;
+               rcdev->tx_ir = pwm_ir_tx_atomic;
+       }
+
        rcdev->priv = pwm_ir;
        rcdev->driver_name = DRIVER_NAME;
        rcdev->device_name = DEVICE_NAME;
-       rcdev->tx_ir = pwm_ir_tx;
        rcdev->s_tx_duty_cycle = pwm_ir_set_duty_cycle;
        rcdev->s_tx_carrier = pwm_ir_set_carrier;
 
index 0ff014a9d3cd420081fb16b27c870d505a583146..1b3183951bfe5942f0a096e289e8e87b93b3894f 100644 (file)
@@ -114,9 +114,12 @@ static void tegra186_mc_client_sid_override(struct tegra_mc *mc,
 static int tegra186_mc_probe_device(struct tegra_mc *mc, struct device *dev)
 {
 #if IS_ENABLED(CONFIG_IOMMU_API)
-       struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
        struct of_phandle_args args;
        unsigned int i, index = 0;
+       u32 sid;
+
+       if (!tegra_dev_iommu_get_stream_id(dev, &sid))
+               return 0;
 
        while (!of_parse_phandle_with_args(dev->of_node, "interconnects", "#interconnect-cells",
                                           index, &args)) {
@@ -124,11 +127,10 @@ static int tegra186_mc_probe_device(struct tegra_mc *mc, struct device *dev)
                        for (i = 0; i < mc->soc->num_clients; i++) {
                                const struct tegra_mc_client *client = &mc->soc->clients[i];
 
-                               if (client->id == args.args[0]) {
-                                       u32 sid = fwspec->ids[0] & MC_SID_STREAMID_OVERRIDE_MASK;
-
-                                       tegra186_mc_client_sid_override(mc, client, sid);
-                               }
+                               if (client->id == args.args[0])
+                                       tegra186_mc_client_sid_override(
+                                               mc, client,
+                                               sid & MC_SID_STREAMID_OVERRIDE_MASK);
                        }
                }
 
index 925c19ee513b03d64fb80b7a093dce48b20e6623..e7a6e45b9fac2d5d48ec5d127218ea942b8d2589 100644 (file)
@@ -1483,6 +1483,7 @@ config MFD_SYSCON
 
 config MFD_TI_AM335X_TSCADC
        tristate "TI ADC / Touch Screen chip support"
+       depends on ARCH_OMAP2PLUS || ARCH_K3 || COMPILE_TEST
        select MFD_CORE
        select REGMAP
        select REGMAP_MMIO
index eeeb62415f538daae637fb98b35a53fb11547f6d..8f3ebe651eeaaf7080d06079d159dc8f3eb444ac 100644 (file)
@@ -30,7 +30,7 @@ static void ab8500_power_off(void)
 {
        sigset_t old;
        sigset_t all;
-       static const char * const pss[] = {"ab8500_ac", "pm2301", "ab8500_usb"};
+       static const char * const pss[] = {"ab8500_ac", "ab8500_usb"};
        int i;
        bool charger_present = false;
        union power_supply_propval val;
@@ -140,14 +140,12 @@ static int ab8500_sysctrl_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int ab8500_sysctrl_remove(struct platform_device *pdev)
+static void ab8500_sysctrl_remove(struct platform_device *pdev)
 {
        sysctrl_dev = NULL;
 
        if (pm_power_off == ab8500_power_off)
                pm_power_off = NULL;
-
-       return 0;
 }
 
 static const struct of_device_id ab8500_sysctrl_match[] = {
@@ -161,7 +159,7 @@ static struct platform_driver ab8500_sysctrl_driver = {
                .of_match_table = ab8500_sysctrl_match,
        },
        .probe = ab8500_sysctrl_probe,
-       .remove = ab8500_sysctrl_remove,
+       .remove_new = ab8500_sysctrl_remove,
 };
 
 static int __init ab8500_sysctrl_init(void)
index 79d393b602bf363d24ca8927656cc7fade5b1d8f..603b1cd5278507171d2c9d50e32e2a0a5f8cb267 100644 (file)
@@ -288,13 +288,12 @@ failed:
        return retval;
 }
 
-static int ec_device_remove(struct platform_device *pdev)
+static void ec_device_remove(struct platform_device *pdev)
 {
        struct cros_ec_dev *ec = dev_get_drvdata(&pdev->dev);
 
        mfd_remove_devices(ec->dev);
        device_unregister(&ec->class_dev);
-       return 0;
 }
 
 static const struct platform_device_id cros_ec_id[] = {
@@ -309,7 +308,7 @@ static struct platform_driver cros_ec_dev_driver = {
        },
        .id_table = cros_ec_id,
        .probe = ec_device_probe,
-       .remove = ec_device_remove,
+       .remove_new = ec_device_remove,
 };
 
 static int __init cros_ec_dev_init(void)
index 7392b3d2e6b965fd5613bd1973ca1a46985a4c1f..1d85bbf8cdd5d135cd8f94e0412669e40afda79e 100644 (file)
@@ -6,24 +6,23 @@
  *                         Cirrus Logic International Semiconductor Ltd.
  */
 
+#include <linux/device.h>
 #include <linux/err.h>
 #include <linux/errno.h>
 #include <linux/mfd/cs42l43-regs.h>
 #include <linux/module.h>
-#include <linux/device.h>
 #include <linux/soundwire/sdw.h>
 #include <linux/soundwire/sdw_registers.h>
 #include <linux/soundwire/sdw_type.h>
 
 #include "cs42l43.h"
 
-enum cs42l43_sdw_ports {
-       CS42L43_DMIC_DEC_ASP_PORT = 1,
-       CS42L43_SPK_TX_PORT,
-       CS42L43_SPDIF_HP_PORT,
-       CS42L43_SPK_RX_PORT,
-       CS42L43_ASP_PORT,
-};
+#define CS42L43_SDW_PORT(port, chans) { \
+       .num = port, \
+       .max_ch = chans, \
+       .type = SDW_DPN_FULL, \
+       .max_word = 24, \
+}
 
 static const struct regmap_config cs42l43_sdw_regmap = {
        .reg_bits               = 32,
@@ -42,65 +41,48 @@ static const struct regmap_config cs42l43_sdw_regmap = {
        .num_reg_defaults       = ARRAY_SIZE(cs42l43_reg_default),
 };
 
+static const struct sdw_dpn_prop cs42l43_src_port_props[] = {
+       CS42L43_SDW_PORT(1, 4),
+       CS42L43_SDW_PORT(2, 2),
+       CS42L43_SDW_PORT(3, 2),
+       CS42L43_SDW_PORT(4, 2),
+};
+
+static const struct sdw_dpn_prop cs42l43_sink_port_props[] = {
+       CS42L43_SDW_PORT(5, 2),
+       CS42L43_SDW_PORT(6, 2),
+       CS42L43_SDW_PORT(7, 2),
+};
+
 static int cs42l43_read_prop(struct sdw_slave *sdw)
 {
        struct sdw_slave_prop *prop = &sdw->prop;
        struct device *dev = &sdw->dev;
-       struct sdw_dpn_prop *dpn;
-       unsigned long addr;
-       int nval;
        int i;
-       u32 bit;
 
        prop->use_domain_irq = true;
        prop->paging_support = true;
        prop->wake_capable = true;
-       prop->source_ports = BIT(CS42L43_DMIC_DEC_ASP_PORT) | BIT(CS42L43_SPK_TX_PORT);
-       prop->sink_ports = BIT(CS42L43_SPDIF_HP_PORT) |
-                          BIT(CS42L43_SPK_RX_PORT) | BIT(CS42L43_ASP_PORT);
        prop->quirks = SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY;
        prop->scp_int1_mask = SDW_SCP_INT1_BUS_CLASH | SDW_SCP_INT1_PARITY |
                              SDW_SCP_INT1_IMPL_DEF;
 
-       nval = hweight32(prop->source_ports);
-       prop->src_dpn_prop = devm_kcalloc(dev, nval, sizeof(*prop->src_dpn_prop),
-                                         GFP_KERNEL);
+       for (i = 0; i < ARRAY_SIZE(cs42l43_src_port_props); i++)
+               prop->source_ports |= BIT(cs42l43_src_port_props[i].num);
+
+       prop->src_dpn_prop = devm_kmemdup(dev, cs42l43_src_port_props,
+                                         sizeof(cs42l43_src_port_props), GFP_KERNEL);
        if (!prop->src_dpn_prop)
                return -ENOMEM;
 
-       i = 0;
-       dpn = prop->src_dpn_prop;
-       addr = prop->source_ports;
-       for_each_set_bit(bit, &addr, 32) {
-               dpn[i].num = bit;
-               dpn[i].max_ch = 2;
-               dpn[i].type = SDW_DPN_FULL;
-               dpn[i].max_word = 24;
-               i++;
-       }
-       /*
-        * All ports are 2 channels max, except the first one,
-        * CS42L43_DMIC_DEC_ASP_PORT.
-        */
-       dpn[CS42L43_DMIC_DEC_ASP_PORT].max_ch = 4;
+       for (i = 0; i < ARRAY_SIZE(cs42l43_sink_port_props); i++)
+               prop->sink_ports |= BIT(cs42l43_sink_port_props[i].num);
 
-       nval = hweight32(prop->sink_ports);
-       prop->sink_dpn_prop = devm_kcalloc(dev, nval, sizeof(*prop->sink_dpn_prop),
-                                          GFP_KERNEL);
+       prop->sink_dpn_prop = devm_kmemdup(dev, cs42l43_sink_port_props,
+                                          sizeof(cs42l43_sink_port_props), GFP_KERNEL);
        if (!prop->sink_dpn_prop)
                return -ENOMEM;
 
-       i = 0;
-       dpn = prop->sink_dpn_prop;
-       addr = prop->sink_ports;
-       for_each_set_bit(bit, &addr, 32) {
-               dpn[i].num = bit;
-               dpn[i].max_ch = 2;
-               dpn[i].type = SDW_DPN_FULL;
-               dpn[i].max_word = 24;
-               i++;
-       }
-
        return 0;
 }
 
index 45da007d3e702edd9ab63ce22c83eb76ca290c1b..73a22107900c8c59cdf0a83ee8fb1826d427201d 100644 (file)
@@ -588,16 +588,8 @@ static struct regmap_config da9062_regmap_config = {
        .volatile_table = &da9062_aa_volatile_table,
 };
 
-static const struct of_device_id da9062_dt_ids[] = {
-       { .compatible = "dlg,da9061", .data = (void *)COMPAT_TYPE_DA9061, },
-       { .compatible = "dlg,da9062", .data = (void *)COMPAT_TYPE_DA9062, },
-       { }
-};
-MODULE_DEVICE_TABLE(of, da9062_dt_ids);
-
 static int da9062_i2c_probe(struct i2c_client *i2c)
 {
-       const struct i2c_device_id *id = i2c_client_get_device_id(i2c);
        struct da9062 *chip;
        unsigned int irq_base = 0;
        const struct mfd_cell *cell;
@@ -611,10 +603,7 @@ static int da9062_i2c_probe(struct i2c_client *i2c)
        if (!chip)
                return -ENOMEM;
 
-       if (i2c->dev.of_node)
-               chip->chip_type = (uintptr_t)of_device_get_match_data(&i2c->dev);
-       else
-               chip->chip_type = id->driver_data;
+       chip->chip_type = (uintptr_t)i2c_get_match_data(i2c);
 
        i2c_set_clientdata(i2c, chip);
        chip->dev = &i2c->dev;
@@ -714,10 +703,17 @@ static void da9062_i2c_remove(struct i2c_client *i2c)
        regmap_del_irq_chip(i2c->irq, chip->regmap_irq);
 }
 
+static const struct of_device_id da9062_dt_ids[] = {
+       { .compatible = "dlg,da9061", .data = (void *)COMPAT_TYPE_DA9061 },
+       { .compatible = "dlg,da9062", .data = (void *)COMPAT_TYPE_DA9062 },
+       { }
+};
+MODULE_DEVICE_TABLE(of, da9062_dt_ids);
+
 static const struct i2c_device_id da9062_i2c_id[] = {
        { "da9061", COMPAT_TYPE_DA9061 },
        { "da9062", COMPAT_TYPE_DA9062 },
-       { },
+       { }
 };
 MODULE_DEVICE_TABLE(i2c, da9062_i2c_id);
 
index 1506d8d352b19be4f1e13192c5964fb8e81708c6..e58990c85ed8780ea2eaecb13c71cef6dc6c9684 100644 (file)
@@ -137,7 +137,7 @@ static int exynos_lpass_probe(struct platform_device *pdev)
        return devm_of_platform_populate(dev);
 }
 
-static int exynos_lpass_remove(struct platform_device *pdev)
+static void exynos_lpass_remove(struct platform_device *pdev)
 {
        struct exynos_lpass *lpass = platform_get_drvdata(pdev);
 
@@ -146,8 +146,6 @@ static int exynos_lpass_remove(struct platform_device *pdev)
        if (!pm_runtime_status_suspended(&pdev->dev))
                exynos_lpass_disable(lpass);
        regmap_exit(lpass->top);
-
-       return 0;
 }
 
 static int __maybe_unused exynos_lpass_suspend(struct device *dev)
@@ -187,7 +185,7 @@ static struct platform_driver exynos_lpass_driver = {
                .of_match_table = exynos_lpass_of_match,
        },
        .probe  = exynos_lpass_probe,
-       .remove = exynos_lpass_remove,
+       .remove_new = exynos_lpass_remove,
 };
 module_platform_driver(exynos_lpass_driver);
 
index 089c2ce615b6d30eed8c28f2a7feeb9aa45b21cc..74f38bf3778f8937bb1ffca3587fa7cac1e9d372 100644 (file)
@@ -194,11 +194,9 @@ err_irq:
        return ret;
 }
 
-static int mx25_tsadc_remove(struct platform_device *pdev)
+static void mx25_tsadc_remove(struct platform_device *pdev)
 {
        mx25_tsadc_unset_irq(pdev);
-
-       return 0;
 }
 
 static const struct of_device_id mx25_tsadc_ids[] = {
@@ -213,7 +211,7 @@ static struct platform_driver mx25_tsadc_driver = {
                .of_match_table = mx25_tsadc_ids,
        },
        .probe = mx25_tsadc_probe,
-       .remove = mx25_tsadc_remove,
+       .remove_new = mx25_tsadc_remove,
 };
 module_platform_driver(mx25_tsadc_driver);
 
index 8feae8d8fd9d45daa795c56fb82ec9207b5fde0f..042109304db41959666b021af75fde0b764c2d9a 100644 (file)
@@ -144,13 +144,12 @@ static int hi655x_pmic_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int hi655x_pmic_remove(struct platform_device *pdev)
+static void hi655x_pmic_remove(struct platform_device *pdev)
 {
        struct hi655x_pmic *pmic = platform_get_drvdata(pdev);
 
        regmap_del_irq_chip(gpiod_to_irq(pmic->gpio), pmic->irq_data);
        mfd_remove_devices(&pdev->dev);
-       return 0;
 }
 
 static const struct of_device_id hi655x_pmic_match[] = {
@@ -165,7 +164,7 @@ static struct platform_driver hi655x_pmic_driver = {
                .of_match_table = hi655x_pmic_match,
        },
        .probe  = hi655x_pmic_probe,
-       .remove = hi655x_pmic_remove,
+       .remove_new = hi655x_pmic_remove,
 };
 module_platform_driver(hi655x_pmic_driver);
 
index 212818aef93e20859227209b8143af55b1dec412..2a83f8678f1d921e085f4748e5b4ed01e100e30e 100644 (file)
@@ -8,15 +8,20 @@
  *          Mika Westerberg <mika.westerberg@linux.intel.com>
  */
 
-#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/gfp_types.h>
 #include <linux/ioport.h>
-#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
+#include <linux/pm.h>
 #include <linux/pm_runtime.h>
 #include <linux/platform_device.h>
 #include <linux/property.h>
+
 #include <linux/pxa2xx_ssp.h>
 
+#include <asm/errno.h>
+
 #include "intel-lpss.h"
 
 static const struct property_entry spt_spi_properties[] = {
@@ -169,23 +174,20 @@ MODULE_DEVICE_TABLE(acpi, intel_lpss_acpi_ids);
 
 static int intel_lpss_acpi_probe(struct platform_device *pdev)
 {
+       const struct intel_lpss_platform_info *data;
        struct intel_lpss_platform_info *info;
-       const struct acpi_device_id *id;
        int ret;
 
-       id = acpi_match_device(intel_lpss_acpi_ids, &pdev->dev);
-       if (!id)
+       data = device_get_match_data(&pdev->dev);
+       if (!data)
                return -ENODEV;
 
-       info = devm_kmemdup(&pdev->dev, (void *)id->driver_data, sizeof(*info),
-                           GFP_KERNEL);
+       info = devm_kmemdup(&pdev->dev, data, sizeof(*info), GFP_KERNEL);
        if (!info)
                return -ENOMEM;
 
+       /* No need to check mem and irq here as intel_lpss_probe() does it for us */
        info->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!info->mem)
-               return -ENODEV;
-
        info->irq = platform_get_irq(pdev, 0);
 
        ret = intel_lpss_probe(&pdev->dev, info);
@@ -198,23 +200,19 @@ static int intel_lpss_acpi_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int intel_lpss_acpi_remove(struct platform_device *pdev)
+static void intel_lpss_acpi_remove(struct platform_device *pdev)
 {
        intel_lpss_remove(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
-
-       return 0;
 }
 
-static INTEL_LPSS_PM_OPS(intel_lpss_acpi_pm_ops);
-
 static struct platform_driver intel_lpss_acpi_driver = {
        .probe = intel_lpss_acpi_probe,
-       .remove = intel_lpss_acpi_remove,
+       .remove_new = intel_lpss_acpi_remove,
        .driver = {
                .name = "intel-lpss",
                .acpi_match_table = intel_lpss_acpi_ids,
-               .pm = &intel_lpss_acpi_pm_ops,
+               .pm = pm_ptr(&intel_lpss_pm_ops),
        },
 };
 
@@ -224,3 +222,4 @@ MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
 MODULE_DESCRIPTION("Intel LPSS ACPI driver");
 MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(INTEL_LPSS);
index ae5759200622c463214cad7553396a2ca9888bf1..4621d3950b8f9fab4c3ccc2ad650f14cf07c2749 100644 (file)
@@ -8,14 +8,19 @@
  *          Mika Westerberg <mika.westerberg@linux.intel.com>
  */
 
-#include <linux/ioport.h>
-#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/gfp_types.h>
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
 #include <linux/pci.h>
+#include <linux/pm.h>
 #include <linux/pm_runtime.h>
 #include <linux/property.h>
+
 #include <linux/pxa2xx_ssp.h>
 
+#include <asm/errno.h>
+
 #include "intel-lpss.h"
 
 /* Some DSDTs have an unused GEXP ACPI device conflicting with I2C4 resources */
@@ -30,6 +35,7 @@ static const struct pci_device_id ignore_resource_conflicts_ids[] = {
 static int intel_lpss_pci_probe(struct pci_dev *pdev,
                                const struct pci_device_id *id)
 {
+       const struct intel_lpss_platform_info *data = (void *)id->driver_data;
        struct intel_lpss_platform_info *info;
        int ret;
 
@@ -37,13 +43,17 @@ static int intel_lpss_pci_probe(struct pci_dev *pdev,
        if (ret)
                return ret;
 
-       info = devm_kmemdup(&pdev->dev, (void *)id->driver_data, sizeof(*info),
-                           GFP_KERNEL);
+       ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY);
+       if (ret < 0)
+               return ret;
+
+       info = devm_kmemdup(&pdev->dev, data, sizeof(*info), GFP_KERNEL);
        if (!info)
                return -ENOMEM;
 
-       info->mem = &pdev->resource[0];
-       info->irq = pdev->irq;
+       /* No need to check mem and irq here as intel_lpss_probe() does it for us */
+       info->mem = pci_resource_n(pdev, 0);
+       info->irq = pci_irq_vector(pdev, 0);
 
        if (pci_match_id(ignore_resource_conflicts_ids, pdev))
                info->ignore_resource_conflicts = true;
@@ -72,8 +82,6 @@ static void intel_lpss_pci_remove(struct pci_dev *pdev)
        intel_lpss_remove(&pdev->dev);
 }
 
-static INTEL_LPSS_PM_OPS(intel_lpss_pci_pm_ops);
-
 static const struct property_entry spt_spi_properties[] = {
        PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_SPT_SSP),
        { }
@@ -584,7 +592,7 @@ static struct pci_driver intel_lpss_pci_driver = {
        .probe = intel_lpss_pci_probe,
        .remove = intel_lpss_pci_remove,
        .driver = {
-               .pm = &intel_lpss_pci_pm_ops,
+               .pm = pm_ptr(&intel_lpss_pm_ops),
        },
 };
 
@@ -594,3 +602,4 @@ MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
 MODULE_DESCRIPTION("Intel LPSS PCI driver");
 MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(INTEL_LPSS);
index 9591b354072ad140dca799a622ac94f9e49a3c17..eff423f7dd28475052fd7efdad8014a87b806983 100644 (file)
  *          Jarkko Nikula <jarkko.nikula@linux.intel.com>
  */
 
-#include <linux/clk.h>
+#include <linux/array_size.h>
+#include <linux/bits.h>
 #include <linux/clkdev.h>
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gfp_types.h>
 #include <linux/idr.h>
 #include <linux/io.h>
 #include <linux/ioport.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/pm.h>
 #include <linux/pm_qos.h>
 #include <linux/pm_runtime.h>
-#include <linux/property.h>
-#include <linux/seq_file.h>
+#include <linux/sprintf.h>
+#include <linux/types.h>
+
 #include <linux/io-64-nonatomic-lo-hi.h>
 
 #include <linux/dma/idma64.h>
 
 #include "intel-lpss.h"
 
+struct dentry;
+
 #define LPSS_DEV_OFFSET                0x000
 #define LPSS_DEV_SIZE          0x200
 #define LPSS_PRIV_OFFSET       0x200
@@ -301,8 +309,8 @@ static int intel_lpss_register_clock_divider(struct intel_lpss *lpss,
 
        snprintf(name, sizeof(name), "%s-div", devname);
        tmp = clk_register_fractional_divider(NULL, name, __clk_get_name(tmp),
+                                             0, lpss->priv, 1, 15, 16, 15,
                                              CLK_FRAC_DIVIDER_POWER_OF_TWO_PS,
-                                             lpss->priv, 1, 15, 16, 15, 0,
                                              NULL);
        if (IS_ERR(tmp))
                return PTR_ERR(tmp);
@@ -378,9 +386,12 @@ int intel_lpss_probe(struct device *dev,
        struct intel_lpss *lpss;
        int ret;
 
-       if (!info || !info->mem || info->irq <= 0)
+       if (!info || !info->mem)
                return -EINVAL;
 
+       if (info->irq < 0)
+               return info->irq;
+
        lpss = devm_kzalloc(dev, sizeof(*lpss), GFP_KERNEL);
        if (!lpss)
                return -ENOMEM;
@@ -405,7 +416,7 @@ int intel_lpss_probe(struct device *dev,
 
        intel_lpss_init_dev(lpss);
 
-       lpss->devid = ida_simple_get(&intel_lpss_devid_ida, 0, 0, GFP_KERNEL);
+       lpss->devid = ida_alloc(&intel_lpss_devid_ida, GFP_KERNEL);
        if (lpss->devid < 0)
                return lpss->devid;
 
@@ -442,11 +453,11 @@ err_remove_ltr:
        intel_lpss_unregister_clock(lpss);
 
 err_clk_register:
-       ida_simple_remove(&intel_lpss_devid_ida, lpss->devid);
+       ida_free(&intel_lpss_devid_ida, lpss->devid);
 
        return ret;
 }
-EXPORT_SYMBOL_GPL(intel_lpss_probe);
+EXPORT_SYMBOL_NS_GPL(intel_lpss_probe, INTEL_LPSS);
 
 void intel_lpss_remove(struct device *dev)
 {
@@ -456,11 +467,10 @@ void intel_lpss_remove(struct device *dev)
        intel_lpss_debugfs_remove(lpss);
        intel_lpss_ltr_hide(lpss);
        intel_lpss_unregister_clock(lpss);
-       ida_simple_remove(&intel_lpss_devid_ida, lpss->devid);
+       ida_free(&intel_lpss_devid_ida, lpss->devid);
 }
-EXPORT_SYMBOL_GPL(intel_lpss_remove);
+EXPORT_SYMBOL_NS_GPL(intel_lpss_remove, INTEL_LPSS);
 
-#ifdef CONFIG_PM
 static int resume_lpss_device(struct device *dev, void *data)
 {
        if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
@@ -469,7 +479,7 @@ static int resume_lpss_device(struct device *dev, void *data)
        return 0;
 }
 
-int intel_lpss_prepare(struct device *dev)
+static int intel_lpss_prepare(struct device *dev)
 {
        /*
         * Resume both child devices before entering system sleep. This
@@ -478,9 +488,8 @@ int intel_lpss_prepare(struct device *dev)
        device_for_each_child_reverse(dev, NULL, resume_lpss_device);
        return 0;
 }
-EXPORT_SYMBOL_GPL(intel_lpss_prepare);
 
-int intel_lpss_suspend(struct device *dev)
+static int intel_lpss_suspend(struct device *dev)
 {
        struct intel_lpss *lpss = dev_get_drvdata(dev);
        unsigned int i;
@@ -499,9 +508,8 @@ int intel_lpss_suspend(struct device *dev)
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(intel_lpss_suspend);
 
-int intel_lpss_resume(struct device *dev)
+static int intel_lpss_resume(struct device *dev)
 {
        struct intel_lpss *lpss = dev_get_drvdata(dev);
        unsigned int i;
@@ -514,8 +522,12 @@ int intel_lpss_resume(struct device *dev)
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(intel_lpss_resume);
-#endif
+
+EXPORT_NS_GPL_DEV_PM_OPS(intel_lpss_pm_ops, INTEL_LPSS) = {
+       .prepare = pm_sleep_ptr(&intel_lpss_prepare),
+       LATE_SYSTEM_SLEEP_PM_OPS(intel_lpss_suspend, intel_lpss_resume)
+       RUNTIME_PM_OPS(intel_lpss_suspend, intel_lpss_resume, NULL)
+};
 
 static int __init intel_lpss_init(void)
 {
index 062ce95b68b9ade80331f5b0cbb8b977e6ba79fa..c1d72b117ed5e6e27faae391de1f604cf5f4b139 100644 (file)
@@ -30,32 +30,6 @@ int intel_lpss_probe(struct device *dev,
                     const struct intel_lpss_platform_info *info);
 void intel_lpss_remove(struct device *dev);
 
-#ifdef CONFIG_PM
-int intel_lpss_prepare(struct device *dev);
-int intel_lpss_suspend(struct device *dev);
-int intel_lpss_resume(struct device *dev);
-
-#ifdef CONFIG_PM_SLEEP
-#define INTEL_LPSS_SLEEP_PM_OPS                        \
-       .prepare = intel_lpss_prepare,          \
-       SET_LATE_SYSTEM_SLEEP_PM_OPS(intel_lpss_suspend, intel_lpss_resume)
-#else
-#define INTEL_LPSS_SLEEP_PM_OPS
-#endif
-
-#define INTEL_LPSS_RUNTIME_PM_OPS              \
-       .runtime_suspend = intel_lpss_suspend,  \
-       .runtime_resume = intel_lpss_resume,
-
-#else /* !CONFIG_PM */
-#define INTEL_LPSS_SLEEP_PM_OPS
-#define INTEL_LPSS_RUNTIME_PM_OPS
-#endif /* CONFIG_PM */
-
-#define INTEL_LPSS_PM_OPS(name)                        \
-const struct dev_pm_ops name = {               \
-       INTEL_LPSS_SLEEP_PM_OPS                 \
-       INTEL_LPSS_RUNTIME_PM_OPS               \
-}
+extern const struct dev_pm_ops intel_lpss_pm_ops;
 
 #endif /* __MFD_INTEL_LPSS_H */
index 33c6cfe9fe42d23d8943447aa6915b43aa357f7a..67af36a3891361cd29b573acf23a8453aeffa8af 100644 (file)
@@ -535,7 +535,7 @@ static int kempld_probe(struct platform_device *pdev)
        return kempld_detect_device(pld);
 }
 
-static int kempld_remove(struct platform_device *pdev)
+static void kempld_remove(struct platform_device *pdev)
 {
        struct kempld_device_data *pld = platform_get_drvdata(pdev);
        const struct kempld_platform_data *pdata = dev_get_platdata(pld->dev);
@@ -544,8 +544,6 @@ static int kempld_remove(struct platform_device *pdev)
 
        mfd_remove_devices(&pdev->dev);
        pdata->release_hardware_mutex(pld);
-
-       return 0;
 }
 
 #ifdef CONFIG_ACPI
@@ -563,7 +561,7 @@ static struct platform_driver kempld_driver = {
                .acpi_match_table = ACPI_PTR(kempld_acpi_table),
        },
        .probe          = kempld_probe,
-       .remove         = kempld_remove,
+       .remove_new     = kempld_remove,
 };
 
 static const struct dmi_system_id kempld_dmi_table[] __initconst = {
index 1c9831b78cf9cf1157a2ac5ef11f8e9a8ca22d8e..3883e472b739dea5cca8307b8cf1a5354d6267c0 100644 (file)
@@ -232,7 +232,7 @@ static int mcp_sa11x0_probe(struct platform_device *dev)
        return ret;
 }
 
-static int mcp_sa11x0_remove(struct platform_device *dev)
+static void mcp_sa11x0_remove(struct platform_device *dev)
 {
        struct mcp *mcp = platform_get_drvdata(dev);
        struct mcp_sa11x0 *m = priv(mcp);
@@ -251,8 +251,6 @@ static int mcp_sa11x0_remove(struct platform_device *dev)
        mcp_host_free(mcp);
        release_mem_region(mem1->start, resource_size(mem1));
        release_mem_region(mem0->start, resource_size(mem0));
-
-       return 0;
 }
 
 static int mcp_sa11x0_suspend(struct device *dev)
@@ -288,7 +286,7 @@ static const struct dev_pm_ops mcp_sa11x0_pm_ops = {
 
 static struct platform_driver mcp_sa11x0_driver = {
        .probe          = mcp_sa11x0_probe,
-       .remove         = mcp_sa11x0_remove,
+       .remove_new     = mcp_sa11x0_remove,
        .driver         = {
                .name   = DRIVER_NAME,
                .pm     = pm_sleep_ptr(&mcp_sa11x0_pm_ops),
index ec1b356562b9ad17ac96971be17318803047dcb7..73893890b50a0febe2707db22c4bff309c461f0d 100644 (file)
@@ -230,13 +230,11 @@ err_clk:
        return ret;
 }
 
-static int mxs_lradc_remove(struct platform_device *pdev)
+static void mxs_lradc_remove(struct platform_device *pdev)
 {
        struct mxs_lradc *lradc = platform_get_drvdata(pdev);
 
        clk_disable_unprepare(lradc->clk);
-
-       return 0;
 }
 
 static struct platform_driver mxs_lradc_driver = {
@@ -245,7 +243,7 @@ static struct platform_driver mxs_lradc_driver = {
                .of_match_table = mxs_lradc_dt_ids,
        },
        .probe = mxs_lradc_probe,
-       .remove = mxs_lradc_remove,
+       .remove_new = mxs_lradc_remove,
 };
 module_platform_driver(mxs_lradc_driver);
 
index 78f1bb55dbc0fa06fbac524b3e9912e6aa73e126..ebc62033db169df831015ba0f22f0ec4aef001e1 100644 (file)
@@ -816,13 +816,12 @@ static int usbhs_omap_remove_child(struct device *dev, void *data)
  *
  * Reverses the effect of usbhs_omap_probe().
  */
-static int usbhs_omap_remove(struct platform_device *pdev)
+static void usbhs_omap_remove(struct platform_device *pdev)
 {
        pm_runtime_disable(&pdev->dev);
 
        /* remove children */
        device_for_each_child(&pdev->dev, NULL, usbhs_omap_remove_child);
-       return 0;
 }
 
 static const struct dev_pm_ops usbhsomap_dev_pm_ops = {
@@ -845,7 +844,7 @@ static struct platform_driver usbhs_omap_driver = {
                .of_match_table = usbhs_omap_dt_ids,
        },
        .probe          = usbhs_omap_probe,
-       .remove         = usbhs_omap_remove,
+       .remove_new     = usbhs_omap_remove,
 };
 
 MODULE_AUTHOR("Keshava Munegowda <keshava_mgowda@ti.com>");
index 906353735c7820965fbfef36c3cb4c45c6f37ea5..b6303ddb013b0aa7372d612d5a02d5f37650beab 100644 (file)
@@ -270,7 +270,7 @@ static int usbtll_omap_probe(struct platform_device *pdev)
  *
  * Reverses the effect of usbtll_omap_probe().
  */
-static int usbtll_omap_remove(struct platform_device *pdev)
+static void usbtll_omap_remove(struct platform_device *pdev)
 {
        struct usbtll_omap *tll = platform_get_drvdata(pdev);
        int i;
@@ -287,7 +287,6 @@ static int usbtll_omap_remove(struct platform_device *pdev)
        }
 
        pm_runtime_disable(&pdev->dev);
-       return 0;
 }
 
 static const struct of_device_id usbtll_omap_dt_ids[] = {
@@ -303,7 +302,7 @@ static struct platform_driver usbtll_omap_driver = {
                .of_match_table = usbtll_omap_dt_ids,
        },
        .probe          = usbtll_omap_probe,
-       .remove         = usbtll_omap_remove,
+       .remove_new     = usbtll_omap_remove,
 };
 
 int omap_tll_init(struct usbhs_omap_platform_data *pdata)
index 191b1bc6141c2fa4f2526a6b847d836f0b664664..ab55906f91f98a8eb3cc54e5e9bdaf3e52910535 100644 (file)
@@ -218,7 +218,7 @@ static int pcf50633_adc_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int pcf50633_adc_remove(struct platform_device *pdev)
+static void pcf50633_adc_remove(struct platform_device *pdev)
 {
        struct pcf50633_adc *adc = platform_get_drvdata(pdev);
        int i, head;
@@ -236,8 +236,6 @@ static int pcf50633_adc_remove(struct platform_device *pdev)
                kfree(adc->queue[i]);
 
        mutex_unlock(&adc->queue_mutex);
-
-       return 0;
 }
 
 static struct platform_driver pcf50633_adc_driver = {
@@ -245,7 +243,7 @@ static struct platform_driver pcf50633_adc_driver = {
                .name = "pcf50633-adc",
        },
        .probe = pcf50633_adc_probe,
-       .remove = pcf50633_adc_remove,
+       .remove_new = pcf50633_adc_remove,
 };
 
 module_platform_driver(pcf50633_adc_driver);
index 07c531bd1236e5ca2bbd401c5c4cdd4b1656288c..8b6285f687da55dc9cab66a9bb17bcd4c380bb37 100644 (file)
@@ -585,19 +585,17 @@ static int pm8xxx_remove_child(struct device *dev, void *unused)
        return 0;
 }
 
-static int pm8xxx_remove(struct platform_device *pdev)
+static void pm8xxx_remove(struct platform_device *pdev)
 {
        struct pm_irq_chip *chip = platform_get_drvdata(pdev);
 
        device_for_each_child(&pdev->dev, NULL, pm8xxx_remove_child);
        irq_domain_remove(chip->irqdomain);
-
-       return 0;
 }
 
 static struct platform_driver pm8xxx_driver = {
        .probe          = pm8xxx_probe,
-       .remove         = pm8xxx_remove,
+       .remove_new     = pm8xxx_remove,
        .driver         = {
                .name   = "pm8xxx-core",
                .of_match_table = pm8xxx_id_table,
index 4549fa9f7d4bcab2167b6f53a21c251ba7004507..eab5bf6cff105a05f35163fbb67d1b7a4d3abeb7 100644 (file)
@@ -53,6 +53,7 @@ static const struct of_device_id pmic_spmi_id_table[] = {
        { .compatible = "qcom,pm8901", .data = N_USIDS(2) },
        { .compatible = "qcom,pm8909", .data = N_USIDS(2) },
        { .compatible = "qcom,pm8916", .data = N_USIDS(2) },
+       { .compatible = "qcom,pm8937", .data = N_USIDS(2) },
        { .compatible = "qcom,pm8941", .data = N_USIDS(2) },
        { .compatible = "qcom,pm8950", .data = N_USIDS(2) },
        { .compatible = "qcom,pm8994", .data = N_USIDS(2) },
index f62422740de2c87471e212da1e7e241416f04710..6ff84b2600c5438d5d7cf9387d2280e1a4a62d64 100644 (file)
@@ -471,17 +471,17 @@ static void rave_sp_receive_frame(struct rave_sp *sp,
                rave_sp_receive_reply(sp, data, length);
 }
 
-static int rave_sp_receive_buf(struct serdev_device *serdev,
-                              const unsigned char *buf, size_t size)
+static ssize_t rave_sp_receive_buf(struct serdev_device *serdev,
+                                  const u8 *buf, size_t size)
 {
        struct device *dev = &serdev->dev;
        struct rave_sp *sp = dev_get_drvdata(dev);
        struct rave_sp_deframer *deframer = &sp->deframer;
-       const unsigned char *src = buf;
-       const unsigned char *end = buf + size;
+       const u8 *src = buf;
+       const u8 *end = buf + size;
 
        while (src < end) {
-               const unsigned char byte = *src++;
+               const u8 byte = *src++;
 
                switch (deframer->state) {
                case RAVE_SP_EXPECT_SOF:
index c47164a3ec1da3c90ca2b2e2b6f3ade9a9997420..b1ffc3b9e2be70a4ccfb6928700d1c9d83309813 100644 (file)
@@ -53,76 +53,68 @@ static const struct resource rk817_charger_resources[] = {
 };
 
 static const struct mfd_cell rk805s[] = {
-       { .name = "rk808-clkout", .id = PLATFORM_DEVID_NONE, },
-       { .name = "rk808-regulator", .id = PLATFORM_DEVID_NONE, },
-       { .name = "rk805-pinctrl", .id = PLATFORM_DEVID_NONE, },
+       { .name = "rk808-clkout", },
+       { .name = "rk808-regulator", },
+       { .name = "rk805-pinctrl", },
        {
                .name = "rk808-rtc",
                .num_resources = ARRAY_SIZE(rtc_resources),
                .resources = &rtc_resources[0],
-               .id = PLATFORM_DEVID_NONE,
        },
        {       .name = "rk805-pwrkey",
                .num_resources = ARRAY_SIZE(rk805_key_resources),
                .resources = &rk805_key_resources[0],
-               .id = PLATFORM_DEVID_NONE,
        },
 };
 
 static const struct mfd_cell rk806s[] = {
-       { .name = "rk805-pinctrl", .id = PLATFORM_DEVID_AUTO, },
-       { .name = "rk808-regulator", .id = PLATFORM_DEVID_AUTO, },
+       { .name = "rk805-pinctrl", },
+       { .name = "rk808-regulator", },
        {
                .name = "rk805-pwrkey",
                .resources = rk806_pwrkey_resources,
                .num_resources = ARRAY_SIZE(rk806_pwrkey_resources),
-               .id = PLATFORM_DEVID_AUTO,
        },
 };
 
 static const struct mfd_cell rk808s[] = {
-       { .name = "rk808-clkout", .id = PLATFORM_DEVID_NONE, },
-       { .name = "rk808-regulator", .id = PLATFORM_DEVID_NONE, },
+       { .name = "rk808-clkout", },
+       { .name = "rk808-regulator", },
        {
                .name = "rk808-rtc",
                .num_resources = ARRAY_SIZE(rtc_resources),
                .resources = rtc_resources,
-               .id = PLATFORM_DEVID_NONE,
        },
 };
 
 static const struct mfd_cell rk817s[] = {
-       { .name = "rk808-clkout", .id = PLATFORM_DEVID_NONE, },
-       { .name = "rk808-regulator", .id = PLATFORM_DEVID_NONE, },
+       { .name = "rk808-clkout", },
+       { .name = "rk808-regulator", },
        {
                .name = "rk805-pwrkey",
                .num_resources = ARRAY_SIZE(rk817_pwrkey_resources),
                .resources = &rk817_pwrkey_resources[0],
-               .id = PLATFORM_DEVID_NONE,
        },
        {
                .name = "rk808-rtc",
                .num_resources = ARRAY_SIZE(rk817_rtc_resources),
                .resources = &rk817_rtc_resources[0],
-               .id = PLATFORM_DEVID_NONE,
        },
-       { .name = "rk817-codec", .id = PLATFORM_DEVID_NONE, },
+       { .name = "rk817-codec", },
        {
                .name = "rk817-charger",
                .num_resources = ARRAY_SIZE(rk817_charger_resources),
                .resources = &rk817_charger_resources[0],
-               .id = PLATFORM_DEVID_NONE,
        },
 };
 
 static const struct mfd_cell rk818s[] = {
-       { .name = "rk808-clkout", .id = PLATFORM_DEVID_NONE, },
-       { .name = "rk808-regulator", .id = PLATFORM_DEVID_NONE, },
+       { .name = "rk808-clkout", },
+       { .name = "rk808-regulator", },
        {
                .name = "rk808-rtc",
                .num_resources = ARRAY_SIZE(rtc_resources),
                .resources = rtc_resources,
-               .id = PLATFORM_DEVID_NONE,
        },
 };
 
@@ -684,7 +676,7 @@ int rk8xx_probe(struct device *dev, int variant, unsigned int irq, struct regmap
                                             pre_init_reg[i].addr);
        }
 
-       ret = devm_mfd_add_devices(dev, 0, cells, nr_cells, NULL, 0,
+       ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_AUTO, cells, nr_cells, NULL, 0,
                              regmap_irq_get_domain(rk808->irq_data));
        if (ret)
                return dev_err_probe(dev, ret, "failed to add MFD devices\n");
index 28027982cf6939a277c15ca3bbca0c8077cf9ed3..b3592982a83b55f68948cfc54e499f2f9bd6e1c0 100644 (file)
@@ -1667,7 +1667,7 @@ static void sm501_pci_remove(struct pci_dev *dev)
        pci_disable_device(dev);
 }
 
-static int sm501_plat_remove(struct platform_device *dev)
+static void sm501_plat_remove(struct platform_device *dev)
 {
        struct sm501_devdata *sm = platform_get_drvdata(dev);
 
@@ -1675,8 +1675,6 @@ static int sm501_plat_remove(struct platform_device *dev)
        iounmap(sm->regs);
 
        release_mem_region(sm->io_res->start, 0x100);
-
-       return 0;
 }
 
 static const struct pci_device_id sm501_pci_tbl[] = {
@@ -1707,7 +1705,7 @@ static struct platform_driver sm501_plat_driver = {
                .of_match_table = of_sm501_match_tbl,
        },
        .probe          = sm501_plat_probe,
-       .remove         = sm501_plat_remove,
+       .remove_new     = sm501_plat_remove,
        .suspend        = pm_sleep_ptr(sm501_plat_suspend),
        .resume         = pm_sleep_ptr(sm501_plat_resume),
 };
index a656a1c186a896b200a7d8c5518162d315439fd6..9fd13d88950c67a0539b9caa2420751f7e7899db 100644 (file)
@@ -306,7 +306,7 @@ static int stm32_timers_probe(struct platform_device *pdev)
        return ret;
 }
 
-static int stm32_timers_remove(struct platform_device *pdev)
+static void stm32_timers_remove(struct platform_device *pdev)
 {
        struct stm32_timers *ddata = platform_get_drvdata(pdev);
 
@@ -316,8 +316,6 @@ static int stm32_timers_remove(struct platform_device *pdev)
         */
        of_platform_depopulate(&pdev->dev);
        stm32_timers_dma_remove(&pdev->dev, ddata);
-
-       return 0;
 }
 
 static const struct of_device_id stm32_timers_of_match[] = {
@@ -328,7 +326,7 @@ MODULE_DEVICE_TABLE(of, stm32_timers_of_match);
 
 static struct platform_driver stm32_timers_driver = {
        .probe = stm32_timers_probe,
-       .remove = stm32_timers_remove,
+       .remove_new = stm32_timers_remove,
        .driver = {
                .name = "stm32-timers",
                .of_match_table = stm32_timers_of_match,
index 57b29c3251312bff0d34b6ca3dad1ef19f56c72a..c9550368d9ea5e1aee88170f50b1bf6c04b09d37 100644 (file)
@@ -105,6 +105,10 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_res)
        }
 
        syscon_config.name = kasprintf(GFP_KERNEL, "%pOFn@%pa", np, &res.start);
+       if (!syscon_config.name) {
+               ret = -ENOMEM;
+               goto err_regmap;
+       }
        syscon_config.reg_stride = reg_io_width;
        syscon_config.val_bits = reg_io_width * 8;
        syscon_config.max_register = resource_size(&res) - reg_io_width;
index b88eb70c17b353c89907766682ed072c5bff526e..4bbd542d753e4d89272924c5e097c0a2a6d885cf 100644 (file)
@@ -298,7 +298,7 @@ err_disable_clk:
        return err;
 }
 
-static int ti_tscadc_remove(struct platform_device *pdev)
+static void ti_tscadc_remove(struct platform_device *pdev)
 {
        struct ti_tscadc_dev *tscadc = platform_get_drvdata(pdev);
 
@@ -308,8 +308,6 @@ static int ti_tscadc_remove(struct platform_device *pdev)
        pm_runtime_disable(&pdev->dev);
 
        mfd_remove_devices(tscadc->dev);
-
-       return 0;
 }
 
 static int __maybe_unused ti_tscadc_can_wakeup(struct device *dev, void *data)
@@ -381,7 +379,7 @@ static struct platform_driver ti_tscadc_driver = {
                .of_match_table = ti_tscadc_dt_ids,
        },
        .probe  = ti_tscadc_probe,
-       .remove = ti_tscadc_remove,
+       .remove_new = ti_tscadc_remove,
 
 };
 
index 152179ee11ca8914604b99f048b4dbc7cc7747f9..fdce81b33f603804ca2a0100a898dc66351fbe01 100644 (file)
@@ -36,6 +36,7 @@ static const struct regmap_config tps65086_regmap_config = {
        .val_bits = 8,
        .cache_type = REGCACHE_MAPLE,
        .volatile_table = &tps65086_volatile_table,
+       .max_register = TPS65086_OC_STATUS,
 };
 
 static const struct regmap_irq tps65086_irqs[] = {
index 8f4210075913c7d746e7ca10ec59eab5b8ef2675..f206a9c50e9d84ac71e86a6cc3294008093a7809 100644 (file)
@@ -140,15 +140,13 @@ static int tps65911_comparator_probe(struct platform_device *pdev)
        return ret;
 }
 
-static int tps65911_comparator_remove(struct platform_device *pdev)
+static void tps65911_comparator_remove(struct platform_device *pdev)
 {
        struct tps65910 *tps65910;
 
        tps65910 = dev_get_drvdata(pdev->dev.parent);
        device_remove_file(&pdev->dev, &dev_attr_comp2_threshold);
        device_remove_file(&pdev->dev, &dev_attr_comp1_threshold);
-
-       return 0;
 }
 
 static struct platform_driver tps65911_comparator_driver = {
@@ -156,7 +154,7 @@ static struct platform_driver tps65911_comparator_driver = {
                .name = "tps65911-comparator",
        },
        .probe = tps65911_comparator_probe,
-       .remove = tps65911_comparator_remove,
+       .remove_new = tps65911_comparator_remove,
 };
 
 static int __init tps65911_comparator_init(void)
index 0fb9c5cf213a477872d325ed7c242cc492cff718..783ee59901e86b988a9eed3d2bd78ce3494671e5 100644 (file)
@@ -433,6 +433,9 @@ int tps6594_device_init(struct tps6594 *tps, bool enable_crc)
        tps6594_irq_chip.name = devm_kasprintf(dev, GFP_KERNEL, "%s-%ld-0x%02x",
                                               dev->driver->name, tps->chip_id, tps->reg);
 
+       if (!tps6594_irq_chip.name)
+               return -ENOMEM;
+
        ret = devm_regmap_add_irq_chip(dev, tps->regmap, tps->irq, IRQF_SHARED | IRQF_ONESHOT,
                                       0, &tps6594_irq_chip, &tps->irq_data);
        if (ret)
index 88002f8941e5e5342db15f22181a1ecb8e51a148..d436ddf661dab8ad3f73f4e19061cb0e15631aec 100644 (file)
@@ -258,12 +258,10 @@ static int twl4030_audio_probe(struct platform_device *pdev)
        return ret;
 }
 
-static int twl4030_audio_remove(struct platform_device *pdev)
+static void twl4030_audio_remove(struct platform_device *pdev)
 {
        mfd_remove_devices(&pdev->dev);
        twl4030_audio_dev = NULL;
-
-       return 0;
 }
 
 static const struct of_device_id twl4030_audio_of_match[] = {
@@ -278,7 +276,7 @@ static struct platform_driver twl4030_audio_driver = {
                .of_match_table = twl4030_audio_of_match,
        },
        .probe          = twl4030_audio_probe,
-       .remove         = twl4030_audio_remove,
+       .remove_new     = twl4030_audio_remove,
 };
 
 module_platform_driver(twl4030_audio_driver);
index f9fce8408c2cf401a2d40adc3048515335abf610..3c03681c124c044ae942aee5ccb223ce70ded8bc 100644 (file)
 #include <linux/kthread.h>
 #include <linux/mfd/twl.h>
 #include <linux/platform_device.h>
-#include <linux/property.h>
 #include <linux/suspend.h>
 #include <linux/of.h>
 #include <linux/irqdomain.h>
+#include <linux/of_device.h>
 
 #include "twl-core.h"
 
@@ -368,10 +368,10 @@ int twl6030_init_irq(struct device *dev, int irq_num)
        int                     nr_irqs;
        int                     status;
        u8                      mask[3];
-       const int               *irq_tbl;
+       const struct of_device_id *of_id;
 
-       irq_tbl = device_get_match_data(dev);
-       if (!irq_tbl) {
+       of_id = of_match_device(twl6030_of_match, dev);
+       if (!of_id || !of_id->data) {
                dev_err(dev, "Unknown TWL device model\n");
                return -EINVAL;
        }
@@ -409,7 +409,7 @@ int twl6030_init_irq(struct device *dev, int irq_num)
 
        twl6030_irq->pm_nb.notifier_call = twl6030_irq_pm_notifier;
        atomic_set(&twl6030_irq->wakeirqs, 0);
-       twl6030_irq->irq_mapping_tbl = irq_tbl;
+       twl6030_irq->irq_mapping_tbl = of_id->data;
 
        twl6030_irq->irq_domain =
                irq_domain_add_linear(node, nr_irqs,
index f37c4b8380ae6c847b9c62f38ca56614852f03e3..4fb291f0bf7c89863868c1fc066de3c443ebf377 100644 (file)
@@ -562,6 +562,18 @@ config TPS6594_PFSM
          This driver can also be built as a module.  If so, the module
          will be called tps6594-pfsm.
 
+config NSM
+       tristate "Nitro (Enclaves) Security Module support"
+       depends on VIRTIO
+       select HW_RANDOM
+       help
+         This driver provides support for the Nitro Security Module
+         in AWS EC2 Nitro based Enclaves. The driver exposes a /dev/nsm
+         device user space can use to communicate with the hypervisor.
+
+         To compile this driver as a module, choose M here.
+         The module will be called nsm.
+
 source "drivers/misc/c2port/Kconfig"
 source "drivers/misc/eeprom/Kconfig"
 source "drivers/misc/cb710/Kconfig"
index f2a4d1ff65d46a2a014b6e40ed737d26a68a25d0..ea6ea5bbbc9c62c4620ba59a65b3e7d776019f39 100644 (file)
@@ -67,3 +67,4 @@ obj-$(CONFIG_TMR_MANAGER)      += xilinx_tmr_manager.o
 obj-$(CONFIG_TMR_INJECT)       += xilinx_tmr_inject.o
 obj-$(CONFIG_TPS6594_ESM)      += tps6594-esm.o
 obj-$(CONFIG_TPS6594_PFSM)     += tps6594-pfsm.o
+obj-$(CONFIG_NSM)              += nsm.o
index 2bce835ca43efcf8a55a7577f7847ecc34ddc9e2..59bab76ff0a9d6835ec2f34bb44b21b755b1ce22 100644 (file)
@@ -64,9 +64,9 @@ static void bcm_vk_tty_wq_handler(struct work_struct *work)
        struct bcm_vk_tty *vktty;
        int card_status;
        int count;
-       unsigned char c;
        int i;
        int wr;
+       u8 c;
 
        card_status = vkread32(vk, BAR_0, BAR_CARD_STATUS);
        if (BCM_VK_INTF_IS_DOWN(card_status))
@@ -192,7 +192,7 @@ static ssize_t bcm_vk_tty_write(struct tty_struct *tty, const u8 *buffer,
        int index;
        struct bcm_vk *vk;
        struct bcm_vk_tty *vktty;
-       int i;
+       size_t i;
 
        index = tty->index;
        vk = dev_get_drvdata(tty->dev);
index 895128475d832c47e7bd113dafded7d2516185ce..1e1bca6b0b22ca7feb988d31c4e32f4445c7d8f8 100644 (file)
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0-only
 obj-$(CONFIG_MISC_ALCOR_PCI)   += alcor_pci.o
 obj-$(CONFIG_MISC_RTSX_PCI)    += rtsx_pci.o
-rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o rts5260.o rts5261.o rts5228.o
+rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o rts5260.o rts5261.o rts5228.o rts5264.o
 obj-$(CONFIG_MISC_RTSX_USB)    += rtsx_usb.o
diff --git a/drivers/misc/cardreader/rts5264.c b/drivers/misc/cardreader/rts5264.c
new file mode 100644 (file)
index 0000000..8be4ed7
--- /dev/null
@@ -0,0 +1,886 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * Author:
+ *   Ricky Wu <ricky_wu@realtek.com>
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/rtsx_pci.h>
+
+#include "rts5264.h"
+#include "rtsx_pcr.h"
+
+static u8 rts5264_get_ic_version(struct rtsx_pcr *pcr)
+{
+       u8 val;
+
+       rtsx_pci_read_register(pcr, DUMMY_REG_RESET_0, &val);
+       return val & 0x0F;
+}
+
+static void rts5264_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
+{
+       u8 driving_3v3[4][3] = {
+               {0x88, 0x88, 0x88},
+               {0x77, 0x77, 0x77},
+               {0x99, 0x99, 0x99},
+               {0x66, 0x66, 0x66},
+       };
+       u8 driving_1v8[4][3] = {
+               {0x99, 0x99, 0x99},
+               {0x77, 0x77, 0x77},
+               {0xBB, 0xBB, 0xBB},
+               {0x65, 0x65, 0x65},
+       };
+       u8 (*driving)[3], drive_sel;
+
+       if (voltage == OUTPUT_3V3) {
+               driving = driving_3v3;
+               drive_sel = pcr->sd30_drive_sel_3v3;
+       } else {
+               driving = driving_1v8;
+               drive_sel = pcr->sd30_drive_sel_1v8;
+       }
+
+       rtsx_pci_write_register(pcr, SD30_CLK_DRIVE_SEL,
+                       0xFF, driving[drive_sel][0]);
+       rtsx_pci_write_register(pcr, SD30_CMD_DRIVE_SEL,
+                       0xFF, driving[drive_sel][1]);
+       rtsx_pci_write_register(pcr, SD30_DAT_DRIVE_SEL,
+                       0xFF, driving[drive_sel][2]);
+}
+
+static void rts5264_force_power_down(struct rtsx_pcr *pcr, u8 pm_state, bool runtime)
+{
+       /* Set relink_time to 0 */
+       rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0);
+       rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0);
+       rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3,
+                               RELINK_TIME_MASK, 0);
+
+       if (pm_state == HOST_ENTER_S3)
+               rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
+                                       D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
+
+       if (!runtime) {
+               rtsx_pci_write_register(pcr, RTS5264_AUTOLOAD_CFG1,
+                               CD_RESUME_EN_MASK, 0);
+               rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x01, 0x00);
+               rtsx_pci_write_register(pcr, RTS5264_REG_PME_FORCE_CTL,
+                               FORCE_PM_CONTROL | FORCE_PM_VALUE, FORCE_PM_CONTROL);
+       } else {
+               rtsx_pci_write_register(pcr, RTS5264_REG_PME_FORCE_CTL,
+                               FORCE_PM_CONTROL | FORCE_PM_VALUE, 0);
+               rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x01, 0x01);
+               rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
+                                       D3_DELINK_MODE_EN, 0);
+               rtsx_pci_write_register(pcr, RTS5264_FW_CTL,
+                               RTS5264_INFORM_RTD3_COLD, RTS5264_INFORM_RTD3_COLD);
+               rtsx_pci_write_register(pcr, RTS5264_AUTOLOAD_CFG4,
+                               RTS5264_FORCE_PRSNT_LOW, RTS5264_FORCE_PRSNT_LOW);
+       }
+
+       rtsx_pci_write_register(pcr, RTS5264_REG_FPDCTL,
+               SSC_POWER_DOWN, SSC_POWER_DOWN);
+}
+
+static int rts5264_enable_auto_blink(struct rtsx_pcr *pcr)
+{
+       return rtsx_pci_write_register(pcr, OLT_LED_CTL,
+               LED_SHINE_MASK, LED_SHINE_EN);
+}
+
+static int rts5264_disable_auto_blink(struct rtsx_pcr *pcr)
+{
+       return rtsx_pci_write_register(pcr, OLT_LED_CTL,
+               LED_SHINE_MASK, LED_SHINE_DISABLE);
+}
+
+static int rts5264_turn_on_led(struct rtsx_pcr *pcr)
+{
+       return rtsx_pci_write_register(pcr, GPIO_CTL,
+               0x02, 0x02);
+}
+
+static int rts5264_turn_off_led(struct rtsx_pcr *pcr)
+{
+       return rtsx_pci_write_register(pcr, GPIO_CTL,
+               0x02, 0x00);
+}
+
+/* SD Pull Control Enable:
+ *     SD_DAT[3:0] ==> pull up
+ *     SD_CD       ==> pull up
+ *     SD_WP       ==> pull up
+ *     SD_CMD      ==> pull up
+ *     SD_CLK      ==> pull down
+ */
+static const u32 rts5264_sd_pull_ctl_enable_tbl[] = {
+       RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
+       RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9),
+       0,
+};
+
+/* SD Pull Control Disable:
+ *     SD_DAT[3:0] ==> pull down
+ *     SD_CD       ==> pull up
+ *     SD_WP       ==> pull down
+ *     SD_CMD      ==> pull down
+ *     SD_CLK      ==> pull down
+ */
+static const u32 rts5264_sd_pull_ctl_disable_tbl[] = {
+       RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
+       RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5),
+       0,
+};
+
+static int rts5264_sd_set_sample_push_timing_sd30(struct rtsx_pcr *pcr)
+{
+       rtsx_pci_write_register(pcr, SD_CFG1, SD_MODE_SELECT_MASK
+               | SD_ASYNC_FIFO_NOT_RST, SD_30_MODE | SD_ASYNC_FIFO_NOT_RST);
+       rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, CLK_LOW_FREQ);
+       rtsx_pci_write_register(pcr, CARD_CLK_SOURCE, 0xFF,
+                       CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1);
+       rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
+
+       return 0;
+}
+
+static int rts5264_card_power_on(struct rtsx_pcr *pcr, int card)
+{
+       struct rtsx_cr_option *option = &pcr->option;
+
+       if (option->ocp_en)
+               rtsx_pci_enable_ocp(pcr);
+
+       rtsx_pci_write_register(pcr, REG_CRC_DUMMY_0,
+               CFG_SD_POW_AUTO_PD, CFG_SD_POW_AUTO_PD);
+
+       rtsx_pci_write_register(pcr, RTS5264_LDO1_CFG1,
+                       RTS5264_LDO1_TUNE_MASK, RTS5264_LDO1_33);
+       rtsx_pci_write_register(pcr, RTS5264_LDO1233318_POW_CTL,
+                       RTS5264_LDO1_POWERON, RTS5264_LDO1_POWERON);
+       rtsx_pci_write_register(pcr, RTS5264_LDO1233318_POW_CTL,
+                       RTS5264_LDO3318_POWERON, RTS5264_LDO3318_POWERON);
+
+       msleep(20);
+
+       rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, SD_OUTPUT_EN);
+
+       /* Initialize SD_CFG1 register */
+       rtsx_pci_write_register(pcr, SD_CFG1, 0xFF,
+                       SD_CLK_DIVIDE_128 | SD_20_MODE | SD_BUS_WIDTH_1BIT);
+       rtsx_pci_write_register(pcr, SD_SAMPLE_POINT_CTL,
+                       0xFF, SD20_RX_POS_EDGE);
+       rtsx_pci_write_register(pcr, SD_PUSH_POINT_CTL, 0xFF, 0);
+       rtsx_pci_write_register(pcr, CARD_STOP, SD_STOP | SD_CLR_ERR,
+                       SD_STOP | SD_CLR_ERR);
+
+       /* Reset SD_CFG3 register */
+       rtsx_pci_write_register(pcr, SD_CFG3, SD30_CLK_END_EN, 0);
+       rtsx_pci_write_register(pcr, REG_SD_STOP_SDCLK_CFG,
+                       SD30_CLK_STOP_CFG_EN | SD30_CLK_STOP_CFG1 |
+                       SD30_CLK_STOP_CFG0, 0);
+
+       if (pcr->extra_caps & EXTRA_CAPS_SD_SDR50 ||
+           pcr->extra_caps & EXTRA_CAPS_SD_SDR104)
+               rts5264_sd_set_sample_push_timing_sd30(pcr);
+
+       return 0;
+}
+
+static int rts5264_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+       rtsx_pci_write_register(pcr, RTS5264_CARD_PWR_CTL,
+                       RTS5264_PUPDC, RTS5264_PUPDC);
+
+       switch (voltage) {
+       case OUTPUT_3V3:
+               rtsx_pci_write_register(pcr, RTS5264_LDO1233318_POW_CTL,
+                               RTS5264_TUNE_REF_LDO3318, RTS5264_TUNE_REF_LDO3318);
+               rtsx_pci_write_register(pcr, RTS5264_DV3318_CFG,
+                               RTS5264_DV3318_TUNE_MASK, RTS5264_DV3318_33);
+               rtsx_pci_write_register(pcr, SD_PAD_CTL,
+                               SD_IO_USING_1V8, 0);
+               break;
+       case OUTPUT_1V8:
+               rtsx_pci_write_register(pcr, RTS5264_LDO1233318_POW_CTL,
+                               RTS5264_TUNE_REF_LDO3318, RTS5264_TUNE_REF_LDO3318_DFT);
+               rtsx_pci_write_register(pcr, RTS5264_DV3318_CFG,
+                               RTS5264_DV3318_TUNE_MASK, RTS5264_DV3318_18);
+               rtsx_pci_write_register(pcr, SD_PAD_CTL,
+                               SD_IO_USING_1V8, SD_IO_USING_1V8);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       /* set pad drive */
+       rts5264_fill_driving(pcr, voltage);
+
+       return 0;
+}
+
+static void rts5264_stop_cmd(struct rtsx_pcr *pcr)
+{
+       rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
+       rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
+       rtsx_pci_write_register(pcr, DMACTL, DMA_RST, DMA_RST);
+       rtsx_pci_write_register(pcr, RBCTL, RB_FLUSH, RB_FLUSH);
+}
+
+static void rts5264_card_before_power_off(struct rtsx_pcr *pcr)
+{
+       rts5264_stop_cmd(pcr);
+       rts5264_switch_output_voltage(pcr, OUTPUT_3V3);
+}
+
+static int rts5264_card_power_off(struct rtsx_pcr *pcr, int card)
+{
+       int err = 0;
+
+       rts5264_card_before_power_off(pcr);
+       err = rtsx_pci_write_register(pcr, RTS5264_LDO1233318_POW_CTL,
+                               RTS5264_LDO_POWERON_MASK, 0);
+
+       rtsx_pci_write_register(pcr, REG_CRC_DUMMY_0,
+               CFG_SD_POW_AUTO_PD, 0);
+       if (pcr->option.ocp_en)
+               rtsx_pci_disable_ocp(pcr);
+
+       return err;
+}
+
+static void rts5264_enable_ocp(struct rtsx_pcr *pcr)
+{
+       u8 mask = 0;
+       u8 val = 0;
+
+       rtsx_pci_write_register(pcr, RTS5264_LDO1_CFG0,
+                       RTS5264_LDO1_OCP_EN | RTS5264_LDO1_OCP_LMT_EN,
+                       RTS5264_LDO1_OCP_EN | RTS5264_LDO1_OCP_LMT_EN);
+       rtsx_pci_write_register(pcr, RTS5264_LDO2_CFG0,
+                       RTS5264_LDO2_OCP_EN | RTS5264_LDO2_OCP_LMT_EN,
+                       RTS5264_LDO2_OCP_EN | RTS5264_LDO2_OCP_LMT_EN);
+       rtsx_pci_write_register(pcr, RTS5264_LDO3_CFG0,
+                       RTS5264_LDO3_OCP_EN | RTS5264_LDO3_OCP_LMT_EN,
+                       RTS5264_LDO3_OCP_EN | RTS5264_LDO3_OCP_LMT_EN);
+       rtsx_pci_write_register(pcr, RTS5264_OVP_DET,
+                       RTS5264_POW_VDET, RTS5264_POW_VDET);
+
+       mask = SD_OCP_INT_EN | SD_DETECT_EN;
+       mask |= SDVIO_OCP_INT_EN | SDVIO_DETECT_EN;
+       val = mask;
+       rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
+
+       mask = SD_VDD3_OCP_INT_EN | SD_VDD3_DETECT_EN;
+       val = mask;
+       rtsx_pci_write_register(pcr, RTS5264_OCP_VDD3_CTL, mask, val);
+
+       mask = RTS5264_OVP_INT_EN | RTS5264_OVP_DETECT_EN;
+       val = mask;
+       rtsx_pci_write_register(pcr, RTS5264_OVP_CTL, mask, val);
+}
+
+static void rts5264_disable_ocp(struct rtsx_pcr *pcr)
+{
+       u8 mask = 0;
+
+       mask = SD_OCP_INT_EN | SD_DETECT_EN;
+       mask |= SDVIO_OCP_INT_EN | SDVIO_DETECT_EN;
+       rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+
+       mask = SD_VDD3_OCP_INT_EN | SD_VDD3_DETECT_EN;
+       rtsx_pci_write_register(pcr, RTS5264_OCP_VDD3_CTL, mask, 0);
+
+       mask = RTS5264_OVP_INT_EN | RTS5264_OVP_DETECT_EN;
+       rtsx_pci_write_register(pcr, RTS5264_OVP_CTL, mask, 0);
+
+       rtsx_pci_write_register(pcr, RTS5264_LDO1_CFG0,
+                       RTS5264_LDO1_OCP_EN | RTS5264_LDO1_OCP_LMT_EN, 0);
+       rtsx_pci_write_register(pcr, RTS5264_LDO2_CFG0,
+                       RTS5264_LDO2_OCP_EN | RTS5264_LDO2_OCP_LMT_EN, 0);
+       rtsx_pci_write_register(pcr, RTS5264_LDO3_CFG0,
+                       RTS5264_LDO3_OCP_EN | RTS5264_LDO3_OCP_LMT_EN, 0);
+       rtsx_pci_write_register(pcr, RTS5264_OVP_DET, RTS5264_POW_VDET, 0);
+}
+
+static void rts5264_init_ocp(struct rtsx_pcr *pcr)
+{
+       struct rtsx_cr_option *option = &pcr->option;
+
+       if (option->ocp_en) {
+               u8 mask, val;
+
+               rtsx_pci_write_register(pcr, RTS5264_LDO1_CFG0,
+                       RTS5264_LDO1_OCP_THD_MASK, option->sd_800mA_ocp_thd);
+               rtsx_pci_write_register(pcr, RTS5264_LDO1_CFG0,
+                       RTS5264_LDO1_OCP_LMT_THD_MASK,
+                       RTS5264_LDO1_LMT_THD_2000);
+
+               rtsx_pci_write_register(pcr, RTS5264_LDO2_CFG0,
+                       RTS5264_LDO2_OCP_THD_MASK, RTS5264_LDO2_OCP_THD_950);
+               rtsx_pci_write_register(pcr, RTS5264_LDO2_CFG0,
+                       RTS5264_LDO2_OCP_LMT_THD_MASK,
+                       RTS5264_LDO2_LMT_THD_2000);
+
+               rtsx_pci_write_register(pcr, RTS5264_LDO3_CFG0,
+                       RTS5264_LDO3_OCP_THD_MASK, RTS5264_LDO3_OCP_THD_710);
+               rtsx_pci_write_register(pcr, RTS5264_LDO3_CFG0,
+                       RTS5264_LDO3_OCP_LMT_THD_MASK,
+                       RTS5264_LDO3_LMT_THD_1500);
+
+               rtsx_pci_write_register(pcr, RTS5264_OVP_DET,
+                       RTS5264_TUNE_VROV_MASK, RTS5264_TUNE_VROV_1V6);
+
+               mask = SD_OCP_GLITCH_MASK | SDVIO_OCP_GLITCH_MASK;
+               val = pcr->hw_param.ocp_glitch;
+               rtsx_pci_write_register(pcr, REG_OCPGLITCH, mask, val);
+
+       } else {
+               rtsx_pci_write_register(pcr, RTS5264_LDO1_CFG0,
+                       RTS5264_LDO1_OCP_EN | RTS5264_LDO1_OCP_LMT_EN, 0);
+               rtsx_pci_write_register(pcr, RTS5264_LDO2_CFG0,
+                       RTS5264_LDO2_OCP_EN | RTS5264_LDO2_OCP_LMT_EN, 0);
+               rtsx_pci_write_register(pcr, RTS5264_LDO3_CFG0,
+                       RTS5264_LDO3_OCP_EN | RTS5264_LDO3_OCP_LMT_EN, 0);
+               rtsx_pci_write_register(pcr, RTS5264_OVP_DET,
+                       RTS5264_POW_VDET, 0);
+       }
+}
+
+static int rts5264_get_ocpstat2(struct rtsx_pcr *pcr, u8 *val)
+{
+       return rtsx_pci_read_register(pcr, RTS5264_OCP_VDD3_STS, val);
+}
+
+static int rts5264_get_ovpstat(struct rtsx_pcr *pcr, u8 *val)
+{
+       return rtsx_pci_read_register(pcr, RTS5264_OVP_STS, val);
+}
+
+static void rts5264_clear_ocpstat(struct rtsx_pcr *pcr)
+{
+       u8 mask = 0;
+       u8 val = 0;
+
+       mask = SD_OCP_INT_CLR | SD_OC_CLR;
+       mask |= SDVIO_OCP_INT_CLR | SDVIO_OC_CLR;
+       val = mask;
+       rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
+       rtsx_pci_write_register(pcr, RTS5264_OCP_VDD3_CTL,
+               SD_VDD3_OCP_INT_CLR | SD_VDD3_OC_CLR,
+               SD_VDD3_OCP_INT_CLR | SD_VDD3_OC_CLR);
+       rtsx_pci_write_register(pcr, RTS5264_OVP_CTL,
+               RTS5264_OVP_INT_CLR | RTS5264_OVP_CLR,
+               RTS5264_OVP_INT_CLR | RTS5264_OVP_CLR);
+
+       udelay(1000);
+
+       rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+       rtsx_pci_write_register(pcr, RTS5264_OCP_VDD3_CTL,
+               SD_VDD3_OCP_INT_CLR | SD_VDD3_OC_CLR, 0);
+       rtsx_pci_write_register(pcr, RTS5264_OVP_CTL,
+               RTS5264_OVP_INT_CLR | RTS5264_OVP_CLR, 0);
+}
+
+static void rts5264_process_ocp(struct rtsx_pcr *pcr)
+{
+       if (!pcr->option.ocp_en)
+               return;
+
+       rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat);
+       rts5264_get_ocpstat2(pcr, &pcr->ocp_stat2);
+       rts5264_get_ovpstat(pcr, &pcr->ovp_stat);
+
+       if ((pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER | SDVIO_OC_NOW | SDVIO_OC_EVER)) ||
+                       (pcr->ocp_stat2 & (SD_VDD3_OC_NOW | SD_VDD3_OC_EVER)) ||
+                       (pcr->ovp_stat & (RTS5264_OVP_NOW | RTS5264_OVP_EVER))) {
+               rts5264_clear_ocpstat(pcr);
+               rts5264_card_power_off(pcr, RTSX_SD_CARD);
+               rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
+               pcr->ocp_stat = 0;
+               pcr->ocp_stat2 = 0;
+               pcr->ovp_stat = 0;
+       }
+}
+
+static void rts5264_init_from_hw(struct rtsx_pcr *pcr)
+{
+       struct pci_dev *pdev = pcr->pci;
+       u32 lval1, lval2, i;
+       u16 setting_reg1, setting_reg2;
+       u8 valid, efuse_valid, tmp;
+
+       rtsx_pci_write_register(pcr, RTS5264_REG_PME_FORCE_CTL,
+               REG_EFUSE_POR | REG_EFUSE_POWER_MASK,
+               REG_EFUSE_POR | REG_EFUSE_POWERON);
+       udelay(1);
+       rtsx_pci_write_register(pcr, RTS5264_EFUSE_ADDR,
+               RTS5264_EFUSE_ADDR_MASK, 0x00);
+       rtsx_pci_write_register(pcr, RTS5264_EFUSE_CTL,
+               RTS5264_EFUSE_ENABLE | RTS5264_EFUSE_MODE_MASK,
+               RTS5264_EFUSE_ENABLE);
+
+       /* Wait transfer end */
+       for (i = 0; i < MAX_RW_REG_CNT; i++) {
+               rtsx_pci_read_register(pcr, RTS5264_EFUSE_CTL, &tmp);
+               if ((tmp & 0x80) == 0)
+                       break;
+       }
+       rtsx_pci_read_register(pcr, RTS5264_EFUSE_READ_DATA, &tmp);
+       efuse_valid = ((tmp & 0x0C) >> 2);
+       pcr_dbg(pcr, "Load efuse valid: 0x%x\n", efuse_valid);
+
+       pci_read_config_dword(pdev, PCR_SETTING_REG2, &lval2);
+       pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, lval2);
+       /* 0x816 */
+       valid = (u8)((lval2 >> 16) & 0x03);
+
+       rtsx_pci_write_register(pcr, RTS5264_REG_PME_FORCE_CTL,
+               REG_EFUSE_POR, 0);
+       pcr_dbg(pcr, "Disable efuse por!\n");
+
+       if (efuse_valid == 2 || efuse_valid == 3) {
+               if (valid == 3) {
+                       /* Bypass efuse */
+                       setting_reg1 = PCR_SETTING_REG1;
+                       setting_reg2 = PCR_SETTING_REG2;
+               } else {
+                       /* Use efuse data */
+                       setting_reg1 = PCR_SETTING_REG4;
+                       setting_reg2 = PCR_SETTING_REG5;
+               }
+       } else if (efuse_valid == 0) {
+               // default
+               setting_reg1 = PCR_SETTING_REG1;
+               setting_reg2 = PCR_SETTING_REG2;
+       } else {
+               return;
+       }
+
+       pci_read_config_dword(pdev, setting_reg2, &lval2);
+       pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", setting_reg2, lval2);
+
+       if (!rts5264_vendor_setting_valid(lval2)) {
+               pcr_dbg(pcr, "skip fetch vendor setting\n");
+               return;
+       }
+
+       pcr->rtd3_en = rts5264_reg_to_rtd3(lval2);
+
+       if (rts5264_reg_check_reverse_socket(lval2))
+               pcr->flags |= PCR_REVERSE_SOCKET;
+
+       pci_read_config_dword(pdev, setting_reg1, &lval1);
+       pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", setting_reg1, lval1);
+
+       pcr->aspm_en = rts5264_reg_to_aspm(lval1);
+       pcr->sd30_drive_sel_1v8 = rts5264_reg_to_sd30_drive_sel_1v8(lval1);
+       pcr->sd30_drive_sel_3v3 = rts5264_reg_to_sd30_drive_sel_3v3(lval1);
+
+       if (setting_reg1 == PCR_SETTING_REG1) {
+               /* store setting */
+               rtsx_pci_write_register(pcr, 0xFF0C, 0xFF, (u8)(lval1 & 0xFF));
+               rtsx_pci_write_register(pcr, 0xFF0D, 0xFF, (u8)((lval1 >> 8) & 0xFF));
+               rtsx_pci_write_register(pcr, 0xFF0E, 0xFF, (u8)((lval1 >> 16) & 0xFF));
+               rtsx_pci_write_register(pcr, 0xFF0F, 0xFF, (u8)((lval1 >> 24) & 0xFF));
+               rtsx_pci_write_register(pcr, 0xFF10, 0xFF, (u8)(lval2 & 0xFF));
+               rtsx_pci_write_register(pcr, 0xFF11, 0xFF, (u8)((lval2 >> 8) & 0xFF));
+               rtsx_pci_write_register(pcr, 0xFF12, 0xFF, (u8)((lval2 >> 16) & 0xFF));
+
+               pci_write_config_dword(pdev, PCR_SETTING_REG4, lval1);
+               lval2 = lval2 & 0x00FFFFFF;
+               pci_write_config_dword(pdev, PCR_SETTING_REG5, lval2);
+       }
+}
+
+static void rts5264_init_from_cfg(struct rtsx_pcr *pcr)
+{
+       struct rtsx_cr_option *option = &pcr->option;
+
+       if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+                               | PM_L1_1_EN | PM_L1_2_EN))
+               rtsx_pci_disable_oobs_polling(pcr);
+       else
+               rtsx_pci_enable_oobs_polling(pcr);
+
+       rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0xFF, 0);
+
+       if (option->ltr_en) {
+               if (option->ltr_enabled)
+                       rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
+       }
+}
+
+static int rts5264_extra_init_hw(struct rtsx_pcr *pcr)
+{
+       struct rtsx_cr_option *option = &pcr->option;
+
+       rtsx_pci_write_register(pcr, RTS5264_AUTOLOAD_CFG1,
+                       CD_RESUME_EN_MASK, CD_RESUME_EN_MASK);
+       rtsx_pci_write_register(pcr, REG_VREF, PWD_SUSPND_EN, PWD_SUSPND_EN);
+
+       rts5264_init_from_cfg(pcr);
+       rts5264_init_from_hw(pcr);
+
+       /* power off efuse */
+       rtsx_pci_write_register(pcr, RTS5264_REG_PME_FORCE_CTL,
+                       REG_EFUSE_POWER_MASK, REG_EFUSE_POWEROFF);
+       rtsx_pci_write_register(pcr, RTS5264_AUTOLOAD_CFG2,
+                       RTS5264_CHIP_RST_N_SEL, 0);
+       rtsx_pci_write_register(pcr, RTS5264_REG_LDO12_CFG,
+                       RTS5264_LDO12_SR_MASK, RTS5264_LDO12_SR_0_0_MS);
+       rtsx_pci_write_register(pcr, CDGW, 0xFF, 0x01);
+       rtsx_pci_write_register(pcr, RTS5264_CKMUX_MBIAS_PWR,
+                       RTS5264_POW_CKMUX, RTS5264_POW_CKMUX);
+       rtsx_pci_write_register(pcr, RTS5264_CMD_OE_START_EARLY,
+                       RTS5264_CMD_OE_EARLY_EN | RTS5264_CMD_OE_EARLY_CYCLE_MASK,
+                       RTS5264_CMD_OE_EARLY_EN);
+       rtsx_pci_write_register(pcr, RTS5264_DAT_OE_START_EARLY,
+                       RTS5264_DAT_OE_EARLY_EN | RTS5264_DAT_OE_EARLY_CYCLE_MASK,
+                       RTS5264_DAT_OE_EARLY_EN);
+       rtsx_pci_write_register(pcr, SSC_DIV_N_0, 0xFF, 0x5D);
+
+       rtsx_pci_write_register(pcr, RTS5264_PWR_CUT,
+                       RTS5264_CFG_MEM_PD, RTS5264_CFG_MEM_PD);
+       rtsx_pci_write_register(pcr, L1SUB_CONFIG1,
+                       AUX_CLK_ACTIVE_SEL_MASK, MAC_CKSW_DONE);
+       rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, 0);
+       rtsx_pci_write_register(pcr, RTS5264_AUTOLOAD_CFG4,
+                       RTS5264_AUX_CLK_16M_EN, 0);
+
+       /* Release PRSNT# */
+       rtsx_pci_write_register(pcr, RTS5264_AUTOLOAD_CFG4,
+                       RTS5264_FORCE_PRSNT_LOW, 0);
+       rtsx_pci_write_register(pcr, PCLK_CTL,
+                       PCLK_MODE_SEL, PCLK_MODE_SEL);
+
+       /* LED shine disabled, set initial shine cycle period */
+       rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x0F, 0x02);
+
+       /* Configure driving */
+       rts5264_fill_driving(pcr, OUTPUT_3V3);
+
+       if (pcr->flags & PCR_REVERSE_SOCKET)
+               rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x30);
+       else
+               rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00);
+
+       /*
+        * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
+        * to drive low, and we forcibly request clock.
+        */
+       if (option->force_clkreq_0)
+               rtsx_pci_write_register(pcr, PETXCFG,
+                                FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+       else
+               rtsx_pci_write_register(pcr, PETXCFG,
+                                FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
+
+       rtsx_pci_write_register(pcr, PWD_SUSPEND_EN, 0xFF, 0xFF);
+       rtsx_pci_write_register(pcr, RBCTL, U_AUTO_DMA_EN_MASK, 0);
+       rtsx_pci_write_register(pcr, RTS5264_AUTOLOAD_CFG4,
+                       RTS5264_F_HIGH_RC_MASK, RTS5264_F_HIGH_RC_400K);
+
+       if (pcr->rtd3_en) {
+               rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x01, 0x00);
+               rtsx_pci_write_register(pcr, RTS5264_REG_PME_FORCE_CTL,
+                               FORCE_PM_CONTROL | FORCE_PM_VALUE, 0);
+       } else {
+               rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x01, 0x00);
+               rtsx_pci_write_register(pcr, RTS5264_REG_PME_FORCE_CTL,
+                               FORCE_PM_CONTROL | FORCE_PM_VALUE, FORCE_PM_CONTROL);
+       }
+       rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, D3_DELINK_MODE_EN, 0x00);
+
+       /* Clear Enter RTD3_cold Information*/
+       rtsx_pci_write_register(pcr, RTS5264_FW_CTL,
+               RTS5264_INFORM_RTD3_COLD, 0);
+
+       return 0;
+}
+
+static void rts5264_enable_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+       u8 val = FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1;
+       u8 mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1;
+
+       if (pcr->aspm_enabled == enable)
+               return;
+
+       val |= (pcr->aspm_en & 0x02);
+       rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
+       pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
+                                          PCI_EXP_LNKCTL_ASPMC, pcr->aspm_en);
+       pcr->aspm_enabled = enable;
+}
+
+static void rts5264_disable_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+       u8 val = FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1;
+       u8 mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1;
+
+       if (pcr->aspm_enabled == enable)
+               return;
+
+       pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
+                                          PCI_EXP_LNKCTL_ASPMC, 0);
+       rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
+       rtsx_pci_write_register(pcr, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0);
+       udelay(10);
+       pcr->aspm_enabled = enable;
+}
+
+static void rts5264_set_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+       if (enable)
+               rts5264_enable_aspm(pcr, true);
+       else
+               rts5264_disable_aspm(pcr, false);
+}
+
+static void rts5264_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active)
+{
+       struct rtsx_cr_option *option = &(pcr->option);
+
+       u32 interrupt = rtsx_pci_readl(pcr, RTSX_BIPR);
+       int card_exist = (interrupt & SD_EXIST);
+       int aspm_L1_1, aspm_L1_2;
+       u8 val = 0;
+
+       aspm_L1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN);
+       aspm_L1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN);
+
+       if (active) {
+               /* Run, latency: 60us */
+               if (aspm_L1_1)
+                       val = option->ltr_l1off_snooze_sspwrgate;
+       } else {
+               /* L1off, latency: 300us */
+               if (aspm_L1_2)
+                       val = option->ltr_l1off_sspwrgate;
+       }
+
+       if (aspm_L1_1 || aspm_L1_2) {
+               if (rtsx_check_dev_flag(pcr,
+                                       LTR_L1SS_PWR_GATE_CHECK_CARD_EN)) {
+                       if (card_exist)
+                               val &= ~L1OFF_MBIAS2_EN_5250;
+                       else
+                               val |= L1OFF_MBIAS2_EN_5250;
+               }
+       }
+       rtsx_set_l1off_sub(pcr, val);
+}
+
+static const struct pcr_ops rts5264_pcr_ops = {
+       .turn_on_led = rts5264_turn_on_led,
+       .turn_off_led = rts5264_turn_off_led,
+       .extra_init_hw = rts5264_extra_init_hw,
+       .enable_auto_blink = rts5264_enable_auto_blink,
+       .disable_auto_blink = rts5264_disable_auto_blink,
+       .card_power_on = rts5264_card_power_on,
+       .card_power_off = rts5264_card_power_off,
+       .switch_output_voltage = rts5264_switch_output_voltage,
+       .force_power_down = rts5264_force_power_down,
+       .stop_cmd = rts5264_stop_cmd,
+       .set_aspm = rts5264_set_aspm,
+       .set_l1off_cfg_sub_d0 = rts5264_set_l1off_cfg_sub_d0,
+       .enable_ocp = rts5264_enable_ocp,
+       .disable_ocp = rts5264_disable_ocp,
+       .init_ocp = rts5264_init_ocp,
+       .process_ocp = rts5264_process_ocp,
+       .clear_ocpstat = rts5264_clear_ocpstat,
+};
+
+static inline u8 double_ssc_depth(u8 depth)
+{
+       return ((depth > 1) ? (depth - 1) : depth);
+}
+
+int rts5264_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
+               u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
+{
+       int err, clk;
+       u16 n;
+       u8 clk_divider, mcu_cnt, div;
+       static const u8 depth[] = {
+               [RTSX_SSC_DEPTH_4M] = RTS5264_SSC_DEPTH_4M,
+               [RTSX_SSC_DEPTH_2M] = RTS5264_SSC_DEPTH_2M,
+               [RTSX_SSC_DEPTH_1M] = RTS5264_SSC_DEPTH_1M,
+               [RTSX_SSC_DEPTH_500K] = RTS5264_SSC_DEPTH_512K,
+       };
+
+       if (initial_mode) {
+               /* We use 250k(around) here, in initial stage */
+               clk_divider = SD_CLK_DIVIDE_128;
+               card_clock = 30000000;
+       } else {
+               clk_divider = SD_CLK_DIVIDE_0;
+       }
+       err = rtsx_pci_write_register(pcr, SD_CFG1,
+                       SD_CLK_DIVIDE_MASK, clk_divider);
+       if (err < 0)
+               return err;
+
+       card_clock /= 1000000;
+       pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock);
+
+       clk = card_clock;
+       if (!initial_mode && double_clk)
+               clk = card_clock * 2;
+       pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n",
+               clk, pcr->cur_clock);
+
+       if (clk == pcr->cur_clock)
+               return 0;
+
+       if (pcr->ops->conv_clk_and_div_n)
+               n = pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
+       else
+               n = clk - 4;
+       if ((clk <= 4) || (n > 396))
+               return -EINVAL;
+
+       mcu_cnt = 125/clk + 3;
+       if (mcu_cnt > 15)
+               mcu_cnt = 15;
+
+       div = CLK_DIV_1;
+       while ((n < MIN_DIV_N_PCR - 4) && (div < CLK_DIV_8)) {
+               if (pcr->ops->conv_clk_and_div_n) {
+                       int dbl_clk = pcr->ops->conv_clk_and_div_n(n,
+                                       DIV_N_TO_CLK) * 2;
+                       n = pcr->ops->conv_clk_and_div_n(dbl_clk,
+                                       CLK_TO_DIV_N);
+               } else {
+                       n = (n + 4) * 2 - 4;
+               }
+               div++;
+       }
+
+       n = (n / 2) - 1;
+       pcr_dbg(pcr, "n = %d, div = %d\n", n, div);
+
+       ssc_depth = depth[ssc_depth];
+       if (double_clk)
+               ssc_depth = double_ssc_depth(ssc_depth);
+
+       if (ssc_depth) {
+               if (div == CLK_DIV_2) {
+                       if (ssc_depth > 1)
+                               ssc_depth -= 1;
+                       else
+                               ssc_depth = RTS5264_SSC_DEPTH_8M;
+               } else if (div == CLK_DIV_4) {
+                       if (ssc_depth > 2)
+                               ssc_depth -= 2;
+                       else
+                               ssc_depth = RTS5264_SSC_DEPTH_8M;
+               } else if (div == CLK_DIV_8) {
+                       if (ssc_depth > 3)
+                               ssc_depth -= 3;
+                       else
+                               ssc_depth = RTS5264_SSC_DEPTH_8M;
+               }
+       } else {
+               ssc_depth = 0;
+       }
+       pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth);
+
+       rtsx_pci_init_cmd(pcr);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
+                               CHANGE_CLK, CHANGE_CLK);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV,
+                       0xFF, (div << 4) | mcu_cnt);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2,
+                       SSC_DEPTH_MASK, ssc_depth);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
+
+       if (is_version(pcr, 0x5264, IC_VER_A)) {
+               rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
+               rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RTS5264_CARD_CLK_SRC2,
+                       RTS5264_REG_BIG_KVCO_A, 0);
+       } else {
+               rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
+               rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RTS5264_SYS_DUMMY_1,
+                       RTS5264_REG_BIG_KVCO, 0);
+       }
+
+       if (vpclk) {
+               rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
+                               PHASE_NOT_RESET, 0);
+               rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK1_CTL,
+                               PHASE_NOT_RESET, 0);
+               rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
+                               PHASE_NOT_RESET, PHASE_NOT_RESET);
+               rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK1_CTL,
+                               PHASE_NOT_RESET, PHASE_NOT_RESET);
+       }
+
+       err = rtsx_pci_send_cmd(pcr, 2000);
+       if (err < 0)
+               return err;
+
+       /* Wait SSC clock stable */
+       udelay(SSC_CLOCK_STABLE_WAIT);
+       err = rtsx_pci_write_register(pcr, CLK_CTL, CHANGE_CLK, 0);
+       if (err < 0)
+               return err;
+
+       pcr->cur_clock = clk;
+       return 0;
+}
+
+void rts5264_init_params(struct rtsx_pcr *pcr)
+{
+       struct rtsx_cr_option *option = &pcr->option;
+       struct rtsx_hw_param *hw_param = &pcr->hw_param;
+       u8 val;
+
+       pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104;
+       pcr->extra_caps |= EXTRA_CAPS_NO_MMC;
+       rtsx_pci_read_register(pcr, RTS5264_FW_STATUS, &val);
+       if (!(val & RTS5264_EXPRESS_LINK_FAIL_MASK))
+               pcr->extra_caps |= EXTRA_CAPS_SD_EXPRESS;
+       pcr->num_slots = 1;
+       pcr->ops = &rts5264_pcr_ops;
+
+       pcr->flags = 0;
+       pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT;
+       pcr->sd30_drive_sel_1v8 = 0x00;
+       pcr->sd30_drive_sel_3v3 = 0x00;
+       pcr->aspm_en = ASPM_L1_EN;
+       pcr->aspm_mode = ASPM_MODE_REG;
+       pcr->tx_initial_phase = SET_CLOCK_PHASE(24, 24, 11);
+       pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
+
+       pcr->ic_version = rts5264_get_ic_version(pcr);
+       pcr->sd_pull_ctl_enable_tbl = rts5264_sd_pull_ctl_enable_tbl;
+       pcr->sd_pull_ctl_disable_tbl = rts5264_sd_pull_ctl_disable_tbl;
+
+       pcr->reg_pm_ctrl3 = RTS5264_AUTOLOAD_CFG3;
+
+       option->dev_flags = (LTR_L1SS_PWR_GATE_CHECK_CARD_EN
+                               | LTR_L1SS_PWR_GATE_EN);
+       option->ltr_en = true;
+
+       /* init latency of active, idle, L1OFF to 60us, 300us, 3ms */
+       option->ltr_active_latency = LTR_ACTIVE_LATENCY_DEF;
+       option->ltr_idle_latency = LTR_IDLE_LATENCY_DEF;
+       option->ltr_l1off_latency = LTR_L1OFF_LATENCY_DEF;
+       option->l1_snooze_delay = L1_SNOOZE_DELAY_DEF;
+       option->ltr_l1off_sspwrgate = 0x7F;
+       option->ltr_l1off_snooze_sspwrgate = 0x78;
+
+       option->ocp_en = 1;
+       hw_param->interrupt_en |= (SD_OC_INT_EN | SD_OVP_INT_EN);
+       hw_param->ocp_glitch =  SD_OCP_GLITCH_800U | SDVIO_OCP_GLITCH_800U;
+       option->sd_800mA_ocp_thd =  RTS5264_LDO1_OCP_THD_1150;
+}
diff --git a/drivers/misc/cardreader/rts5264.h b/drivers/misc/cardreader/rts5264.h
new file mode 100644 (file)
index 0000000..e3cbbf2
--- /dev/null
@@ -0,0 +1,278 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2018-2019 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * Author:
+ *   Ricky Wu <ricky_wu@realtek.com>
+ */
+#ifndef RTS5264_H
+#define RTS5264_H
+
+/*New add*/
+#define rts5264_vendor_setting_valid(reg)      ((reg) & 0x010000)
+#define rts5264_reg_to_aspm(reg) \
+       (((~(reg) >> 28) & 0x02) | (((reg) >> 28) & 0x01))
+#define rts5264_reg_check_reverse_socket(reg)  ((reg) & 0x04)
+#define rts5264_reg_to_sd30_drive_sel_1v8(reg) (((reg) >> 22) & 0x03)
+#define rts5264_reg_to_sd30_drive_sel_3v3(reg) (((reg) >> 16) & 0x03)
+#define rts5264_reg_to_rtd3(reg)               ((reg) & 0x08)
+
+#define RTS5264_AUTOLOAD_CFG0          0xFF7B
+#define RTS5264_AUTOLOAD_CFG1          0xFF7C
+#define RTS5264_AUTOLOAD_CFG3          0xFF7E
+#define RTS5264_AUTOLOAD_CFG4          0xFF7F
+#define RTS5264_FORCE_PRSNT_LOW                (1 << 6)
+#define RTS5264_AUX_CLK_16M_EN         (1 << 5)
+#define RTS5264_F_HIGH_RC_MASK         (1 << 4)
+#define RTS5264_F_HIGH_RC_1_6M         (1 << 4)
+#define RTS5264_F_HIGH_RC_400K         (0 << 4)
+
+/* SSC_CTL2 0xFC12 */
+#define RTS5264_SSC_DEPTH_MASK         0x07
+#define RTS5264_SSC_DEPTH_DISALBE      0x00
+#define RTS5264_SSC_DEPTH_8M           0x01
+#define RTS5264_SSC_DEPTH_4M           0x02
+#define RTS5264_SSC_DEPTH_2M           0x03
+#define RTS5264_SSC_DEPTH_1M           0x04
+#define RTS5264_SSC_DEPTH_512K         0x05
+#define RTS5264_SSC_DEPTH_256K         0x06
+#define RTS5264_SSC_DEPTH_128K         0x07
+
+#define RTS5264_CARD_CLK_SRC2          0xFC2F
+#define RTS5264_REG_BIG_KVCO_A         0x20
+
+/* efuse control register*/
+#define RTS5264_EFUSE_CTL              0xFC30
+#define RTS5264_EFUSE_ENABLE           0x80
+/* EFUSE_MODE: 0=READ 1=PROGRAM */
+#define RTS5264_EFUSE_MODE_MASK                0x40
+#define RTS5264_EFUSE_PROGRAM          0x40
+
+#define RTS5264_EFUSE_ADDR             0xFC31
+#define        RTS5264_EFUSE_ADDR_MASK         0x3F
+
+#define RTS5264_EFUSE_WRITE_DATA       0xFC32
+#define RTS5264_EFUSE_READ_DATA                0xFC34
+
+#define RTS5264_SYS_DUMMY_1            0xFC35
+#define RTS5264_REG_BIG_KVCO           0x04
+
+/* DMACTL 0xFE2C */
+#define RTS5264_DMA_PACK_SIZE_MASK     0x70
+
+#define RTS5264_FW_CFG1                        0xFF55
+#define RTS5264_SYS_CLK_SEL_MCU_CLK    (0x01<<7)
+#define RTS5264_CRC_CLK_SEL_MCU_CLK    (0x01<<6)
+#define RTS5264_FAKE_MCU_CLOCK_GATING  (0x01<<5)
+#define RTS5264_MCU_BUS_SEL_MASK       (0x01<<4)
+
+/* FW status register */
+#define RTS5264_FW_STATUS              0xFF56
+#define RTS5264_EXPRESS_LINK_FAIL_MASK (0x01<<7)
+
+/* FW control register */
+#define RTS5264_FW_CTL                 0xFF5F
+#define RTS5264_INFORM_RTD3_COLD       (0x01<<5)
+
+#define RTS5264_REG_FPDCTL             0xFF60
+
+#define RTS5264_REG_LDO12_CFG          0xFF6E
+#define RTS5264_LDO12_SR_MASK          (0x03<<6)
+#define RTS5264_LDO12_SR_1_0_MS                (0x03<<6)
+#define RTS5264_LDO12_SR_0_5_MS                (0x02<<6)
+#define RTS5264_LDO12_SR_0_2_5_MS      (0x01<<6)
+#define RTS5264_LDO12_SR_0_0_MS                (0x00<<6)
+#define RTS5264_LDO12_VO_TUNE_MASK     (0x07<<1)
+#define RTS5264_LDO12_115              (0x03<<1)
+#define RTS5264_LDO12_120              (0x04<<1)
+#define RTS5264_LDO12_125              (0x05<<1)
+#define RTS5264_LDO12_130              (0x06<<1)
+#define RTS5264_LDO12_135              (0x07<<1)
+
+/* LDO control register */
+#define RTS5264_CARD_PWR_CTL           0xFD50
+#define RTS5264_SD_CLK_ISO             (0x01<<7)
+#define RTS5264_PAD_SD_DAT_FW_CTRL     (0x01<<6)
+#define RTS5264_PUPDC                  (0x01<<5)
+#define RTS5264_SD_CMD_ISO             (0x01<<4)
+
+#define RTS5264_OCP_VDD3_CTL           0xFD89
+#define SD_VDD3_DETECT_EN              0x08
+#define SD_VDD3_OCP_INT_EN             0x04
+#define SD_VDD3_OCP_INT_CLR            0x02
+#define SD_VDD3_OC_CLR                 0x01
+
+#define RTS5264_OCP_VDD3_STS           0xFD8A
+#define SD_VDD3_OCP_DETECT             0x08
+#define SD_VDD3_OC_NOW                 0x04
+#define SD_VDD3_OC_EVER                        0x02
+
+#define RTS5264_OVP_CTL                        0xFD8D
+#define RTS5264_OVP_TIME_MASK          0xF0
+#define RTS5264_OVP_TIME_DFT           0x50
+#define RTS5264_OVP_DETECT_EN          0x08
+#define RTS5264_OVP_INT_EN             0x04
+#define RTS5264_OVP_INT_CLR            0x02
+#define RTS5264_OVP_CLR                        0x01
+
+#define RTS5264_OVP_STS                        0xFD8E
+#define RTS5264_OVP_GLTCH_TIME_MASK    0xF0
+#define RTS5264_OVP_GLTCH_TIME_DFT     0x50
+#define RTS5264_VOVER_DET              0x08
+#define RTS5264_OVP_NOW                        0x04
+#define RTS5264_OVP_EVER               0x02
+
+#define RTS5264_CMD_OE_START_EARLY             0xFDCB
+#define RTS5264_CMD_OE_EARLY_LEAVE             0x08
+#define RTS5264_CMD_OE_EARLY_CYCLE_MASK                0x06
+#define RTS5264_CMD_OE_EARLY_4CYCLE            0x06
+#define RTS5264_CMD_OE_EARLY_3CYCLE            0x04
+#define RTS5264_CMD_OE_EARLY_2CYCLE            0x02
+#define RTS5264_CMD_OE_EARLY_1CYCLE            0x00
+#define RTS5264_CMD_OE_EARLY_EN                        0x01
+
+#define RTS5264_DAT_OE_START_EARLY             0xFDCC
+#define RTS5264_DAT_OE_EARLY_LEAVE             0x08
+#define RTS5264_DAT_OE_EARLY_CYCLE_MASK                0x06
+#define RTS5264_DAT_OE_EARLY_4CYCLE            0x06
+#define RTS5264_DAT_OE_EARLY_3CYCLE            0x04
+#define RTS5264_DAT_OE_EARLY_2CYCLE            0x02
+#define RTS5264_DAT_OE_EARLY_1CYCLE            0x00
+#define RTS5264_DAT_OE_EARLY_EN                        0x01
+
+#define RTS5264_LDO1233318_POW_CTL     0xFF70
+#define RTS5264_TUNE_REF_LDO3318       (0x03<<6)
+#define RTS5264_TUNE_REF_LDO3318_DFT   (0x02<<6)
+#define RTS5264_LDO3318_POWERON                (0x01<<3)
+#define RTS5264_LDO3_POWERON           (0x01<<2)
+#define RTS5264_LDO2_POWERON           (0x01<<1)
+#define RTS5264_LDO1_POWERON           (0x01<<0)
+#define RTS5264_LDO_POWERON_MASK       (0x0F<<0)
+
+#define RTS5264_DV3318_CFG             0xFF71
+#define RTS5264_DV3318_TUNE_MASK       (0x07<<4)
+#define RTS5264_DV3318_18              (0x02<<4)
+#define RTS5264_DV3318_19              (0x04<<4)
+#define RTS5264_DV3318_33              (0x07<<4)
+
+#define RTS5264_LDO1_CFG0              0xFF72
+#define RTS5264_LDO1_OCP_THD_MASK      (0x07 << 5)
+#define RTS5264_LDO1_OCP_EN            (0x01 << 4)
+#define RTS5264_LDO1_OCP_LMT_THD_MASK  (0x03 << 2)
+#define RTS5264_LDO1_OCP_LMT_EN                (0x01 << 1)
+
+#define RTS5264_LDO1_OCP_THD_850       (0x00<<5)
+#define RTS5264_LDO1_OCP_THD_950       (0x01<<5)
+#define RTS5264_LDO1_OCP_THD_1050      (0x02<<5)
+#define RTS5264_LDO1_OCP_THD_1100      (0x03<<5)
+#define RTS5264_LDO1_OCP_THD_1150      (0x04<<5)
+#define RTS5264_LDO1_OCP_THD_1200      (0x05<<5)
+#define RTS5264_LDO1_OCP_THD_1300      (0x06<<5)
+#define RTS5264_LDO1_OCP_THD_1350      (0x07<<5)
+
+#define RTS5264_LDO1_LMT_THD_1700      (0x00<<2)
+#define RTS5264_LDO1_LMT_THD_1800      (0x01<<2)
+#define RTS5264_LDO1_LMT_THD_1900      (0x02<<2)
+#define RTS5264_LDO1_LMT_THD_2000      (0x03<<2)
+
+#define RTS5264_LDO1_CFG1              0xFF73
+#define RTS5264_LDO1_TUNE_MASK         (0x07<<1)
+#define RTS5264_LDO1_18                        (0x05<<1)
+#define RTS5264_LDO1_33                        (0x07<<1)
+#define RTS5264_LDO1_PWD_MASK          (0x01<<0)
+
+#define RTS5264_LDO2_CFG0              0xFF74
+#define RTS5264_LDO2_OCP_THD_MASK      (0x07<<5)
+#define RTS5264_LDO2_OCP_EN            (0x01<<4)
+#define RTS5264_LDO2_OCP_LMT_THD_MASK  (0x03<<2)
+#define RTS5264_LDO2_OCP_LMT_EN                (0x01<<1)
+
+#define RTS5264_LDO2_OCP_THD_750       (0x00<<5)
+#define RTS5264_LDO2_OCP_THD_850       (0x01<<5)
+#define RTS5264_LDO2_OCP_THD_900       (0x02<<5)
+#define RTS5264_LDO2_OCP_THD_950       (0x03<<5)
+#define RTS5264_LDO2_OCP_THD_1050      (0x04<<5)
+#define RTS5264_LDO2_OCP_THD_1100      (0x05<<5)
+#define RTS5264_LDO2_OCP_THD_1150      (0x06<<5)
+#define RTS5264_LDO2_OCP_THD_1200      (0x07<<5)
+
+#define RTS5264_LDO2_LMT_THD_1700      (0x00<<2)
+#define RTS5264_LDO2_LMT_THD_1800      (0x01<<2)
+#define RTS5264_LDO2_LMT_THD_1900      (0x02<<2)
+#define RTS5264_LDO2_LMT_THD_2000      (0x03<<2)
+
+#define RTS5264_LDO2_CFG1              0xFF75
+#define RTS5264_LDO2_TUNE_MASK         (0x07<<1)
+#define RTS5264_LDO2_18                        (0x02<<1)
+#define RTS5264_LDO2_185               (0x03<<1)
+#define RTS5264_LDO2_19                        (0x04<<1)
+#define RTS5264_LDO2_195               (0x05<<1)
+#define RTS5264_LDO2_33                        (0x07<<1)
+#define RTS5264_LDO2_PWD_MASK          (0x01<<0)
+
+#define RTS5264_LDO3_CFG0              0xFF76
+#define RTS5264_LDO3_OCP_THD_MASK      (0x07<<5)
+#define RTS5264_LDO3_OCP_EN            (0x01<<4)
+#define RTS5264_LDO3_OCP_LMT_THD_MASK  (0x03<<2)
+#define RTS5264_LDO3_OCP_LMT_EN                (0x01<<1)
+
+#define RTS5264_LDO3_OCP_THD_610       (0x00<<5)
+#define RTS5264_LDO3_OCP_THD_630       (0x01<<5)
+#define RTS5264_LDO3_OCP_THD_670       (0x02<<5)
+#define RTS5264_LDO3_OCP_THD_710       (0x03<<5)
+#define RTS5264_LDO3_OCP_THD_750       (0x04<<5)
+#define RTS5264_LDO3_OCP_THD_770       (0x05<<5)
+#define RTS5264_LDO3_OCP_THD_810       (0x06<<5)
+#define RTS5264_LDO3_OCP_THD_850       (0x07<<5)
+
+#define RTS5264_LDO3_LMT_THD_1200      (0x00<<2)
+#define RTS5264_LDO3_LMT_THD_1300      (0x01<<2)
+#define RTS5264_LDO3_LMT_THD_1400      (0x02<<2)
+#define RTS5264_LDO3_LMT_THD_1500      (0x03<<2)
+
+#define RTS5264_LDO3_CFG1              0xFF77
+#define RTS5264_LDO3_TUNE_MASK         (0x07<<1)
+#define RTS5264_LDO3_12                        (0x02<<1)
+#define RTS5264_LDO3_125               (0x03<<1)
+#define RTS5264_LDO3_13                        (0x04<<1)
+#define RTS5264_LDO3_135               (0x05<<1)
+#define RTS5264_LDO3_33                        (0x07<<1)
+#define RTS5264_LDO3_PWD_MASK          (0x01<<0)
+
+#define RTS5264_REG_PME_FORCE_CTL      0xFF78
+#define FORCE_PM_CONTROL               0x20
+#define FORCE_PM_VALUE                 0x10
+#define REG_EFUSE_BYPASS               0x08
+#define REG_EFUSE_POR                  0x04
+#define REG_EFUSE_POWER_MASK           0x03
+#define REG_EFUSE_POWERON              0x03
+#define REG_EFUSE_POWEROFF             0x00
+
+#define RTS5264_PWR_CUT                        0xFF81
+#define RTS5264_CFG_MEM_PD             0xF0
+
+#define RTS5264_OVP_DET                        0xFF8A
+#define RTS5264_POW_VDET               0x04
+#define RTS5264_TUNE_VROV_MASK         0x03
+#define RTS5264_TUNE_VROV_2V           0x03
+#define RTS5264_TUNE_VROV_1V8          0x02
+#define RTS5264_TUNE_VROV_1V6          0x01
+#define RTS5264_TUNE_VROV_1V4          0x00
+
+#define RTS5264_CKMUX_MBIAS_PWR                0xFF8B
+#define RTS5264_NON_XTAL_SEL           0x80
+#define RTS5264_POW_CKMUX              0x40
+#define RTS5264_LVD_MASK               0x04
+#define RTS5264_POW_PSW_MASK           0x03
+#define RTS5264_POW_PSW_DFT            0x03
+
+/* Single LUN, support SD/SD EXPRESS */
+#define DEFAULT_SINGLE         0
+#define SD_LUN                 1
+#define SD_EXPRESS_LUN         2
+
+int rts5264_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
+               u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk);
+
+#endif /* RTS5264_H */
index a30751ad373307701a24ba7808b19f4f5d775fad..1a64364700eb0f3d3b93197f04c154c03fd3601a 100644 (file)
@@ -26,6 +26,7 @@
 #include "rtsx_pcr.h"
 #include "rts5261.h"
 #include "rts5228.h"
+#include "rts5264.h"
 
 static bool msi_en = true;
 module_param(msi_en, bool, S_IRUGO | S_IWUSR);
@@ -54,6 +55,7 @@ static const struct pci_device_id rtsx_pci_ids[] = {
        { PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 },
        { PCI_DEVICE(0x10EC, 0x5261), PCI_CLASS_OTHERS << 16, 0xFF0000 },
        { PCI_DEVICE(0x10EC, 0x5228), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+       { PCI_DEVICE(0x10EC, 0x5264), PCI_CLASS_OTHERS << 16, 0xFF0000 },
        { 0, }
 };
 
@@ -714,6 +716,9 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
        if (PCI_PID(pcr) == PID_5228)
                return rts5228_pci_switch_clock(pcr, card_clock,
                                ssc_depth, initial_mode, double_clk, vpclk);
+       if (PCI_PID(pcr) == PID_5264)
+               return rts5264_pci_switch_clock(pcr, card_clock,
+                               ssc_depth, initial_mode, double_clk, vpclk);
 
        if (initial_mode) {
                /* We use 250k(around) here, in initial stage */
@@ -987,7 +992,8 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
 
        int_reg &= (pcr->bier | 0x7FFFFF);
 
-       if (int_reg & SD_OC_INT)
+       if ((int_reg & SD_OC_INT) ||
+                       ((int_reg & SD_OVP_INT) && (PCI_PID(pcr) == PID_5264)))
                rtsx_pci_process_ocp_interrupt(pcr);
 
        if (int_reg & SD_INT) {
@@ -1159,7 +1165,9 @@ void rtsx_pci_enable_oobs_polling(struct rtsx_pcr *pcr)
 {
        u16 val;
 
-       if ((PCI_PID(pcr) != PID_525A) && (PCI_PID(pcr) != PID_5260)) {
+       if ((PCI_PID(pcr) != PID_525A) &&
+               (PCI_PID(pcr) != PID_5260) &&
+               (PCI_PID(pcr) != PID_5264)) {
                rtsx_pci_read_phy_register(pcr, 0x01, &val);
                val |= 1<<9;
                rtsx_pci_write_phy_register(pcr, 0x01, val);
@@ -1175,7 +1183,9 @@ void rtsx_pci_disable_oobs_polling(struct rtsx_pcr *pcr)
 {
        u16 val;
 
-       if ((PCI_PID(pcr) != PID_525A) && (PCI_PID(pcr) != PID_5260)) {
+       if ((PCI_PID(pcr) != PID_525A) &&
+               (PCI_PID(pcr) != PID_5260) &&
+               (PCI_PID(pcr) != PID_5264)) {
                rtsx_pci_read_phy_register(pcr, 0x01, &val);
                val &= ~(1<<9);
                rtsx_pci_write_phy_register(pcr, 0x01, val);
@@ -1226,7 +1236,7 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
        rtsx_pci_enable_bus_int(pcr);
 
        /* Power on SSC */
-       if (PCI_PID(pcr) == PID_5261) {
+       if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5264)) {
                /* Gating real mcu clock */
                err = rtsx_pci_write_register(pcr, RTS5261_FW_CFG1,
                        RTS5261_MCU_CLOCK_GATING, 0);
@@ -1270,6 +1280,11 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
        else if (PCI_PID(pcr) == PID_5228)
                rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
                        RTS5228_SSC_DEPTH_2M);
+       else if (is_version(pcr, 0x5264, IC_VER_A))
+               rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
+       else if (PCI_PID(pcr) == PID_5264)
+               rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
+                       RTS5264_SSC_DEPTH_2M);
        else
                rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12);
 
@@ -1305,6 +1320,7 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
        case PID_5260:
        case PID_5261:
        case PID_5228:
+       case PID_5264:
                rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1);
                break;
        default:
@@ -1404,6 +1420,10 @@ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
        case 0x5228:
                rts5228_init_params(pcr);
                break;
+
+       case 0x5264:
+               rts5264_init_params(pcr);
+               break;
        }
 
        pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n",
@@ -1544,7 +1564,7 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
        pcr->pci = pcidev;
        dev_set_drvdata(&pcidev->dev, handle);
 
-       if (CHK_PCI_PID(pcr, 0x525A))
+       if ((CHK_PCI_PID(pcr, 0x525A)) || (CHK_PCI_PID(pcr, 0x5264)))
                bar = 1;
        len = pci_resource_len(pcidev, bar);
        base = pci_resource_start(pcidev, bar);
index 37d1f316ae17cfe0b8ccadecf61a76d722cc24c5..9215d66de00cc06c1985bb96823467c4d627087c 100644 (file)
@@ -74,6 +74,7 @@ void rtl8411b_init_params(struct rtsx_pcr *pcr);
 void rts5260_init_params(struct rtsx_pcr *pcr);
 void rts5261_init_params(struct rtsx_pcr *pcr);
 void rts5228_init_params(struct rtsx_pcr *pcr);
+void rts5264_init_params(struct rtsx_pcr *pcr);
 
 static inline u8 map_sd_drive(int idx)
 {
index 257c25da5199600d55371429a0458a4a97239e5a..efd0ca8cc925d177332ccc5623e48bb437f40d06 100644 (file)
@@ -333,7 +333,7 @@ static int dw_xdata_pcie_probe(struct pci_dev *pdev,
 
        dw->pdev = pdev;
 
-       id = ida_simple_get(&xdata_ida, 0, 0, GFP_KERNEL);
+       id = ida_alloc(&xdata_ida, GFP_KERNEL);
        if (id < 0) {
                dev_err(dev, "xData: unable to get id\n");
                return id;
@@ -377,7 +377,7 @@ err_kfree_name:
        kfree(dw->misc_dev.name);
 
 err_ida_remove:
-       ida_simple_remove(&xdata_ida, id);
+       ida_free(&xdata_ida, id);
 
        return err;
 }
@@ -396,7 +396,7 @@ static void dw_xdata_pcie_remove(struct pci_dev *pdev)
        dw_xdata_stop(dw);
        misc_deregister(&dw->misc_dev);
        kfree(dw->misc_dev.name);
-       ida_simple_remove(&xdata_ida, id);
+       ida_free(&xdata_ida, id);
 }
 
 static const struct pci_device_id dw_xdata_pcie_id_table[] = {
index f61a80597a22d01fd7741f0491f088545b97cb29..572333ead5fb8b002b87957dfe1dc9ea26330efb 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/nvmem-provider.h>
+#include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/property.h>
@@ -242,7 +243,7 @@ static const struct i2c_device_id at24_ids[] = {
 };
 MODULE_DEVICE_TABLE(i2c, at24_ids);
 
-static const struct of_device_id at24_of_match[] = {
+static const struct of_device_id __maybe_unused at24_of_match[] = {
        { .compatible = "atmel,24c00",          .data = &at24_data_24c00 },
        { .compatible = "atmel,24c01",          .data = &at24_data_24c01 },
        { .compatible = "atmel,24cs01",         .data = &at24_data_24cs01 },
@@ -439,12 +440,9 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
        if (off + count > at24->byte_len)
                return -EINVAL;
 
-       ret = pm_runtime_get_sync(dev);
-       if (ret < 0) {
-               pm_runtime_put_noidle(dev);
+       ret = pm_runtime_resume_and_get(dev);
+       if (ret)
                return ret;
-       }
-
        /*
         * Read data from chip, protecting against concurrent updates
         * from this host, but not from other I2C masters.
@@ -486,12 +484,9 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count)
        if (off + count > at24->byte_len)
                return -EINVAL;
 
-       ret = pm_runtime_get_sync(dev);
-       if (ret < 0) {
-               pm_runtime_put_noidle(dev);
+       ret = pm_runtime_resume_and_get(dev);
+       if (ret)
                return ret;
-       }
-
        /*
         * Write data to chip, protecting against concurrent updates
         * from this host, but not from other I2C masters.
@@ -563,6 +558,31 @@ static unsigned int at24_get_offset_adj(u8 flags, unsigned int byte_len)
        }
 }
 
+static void at24_probe_temp_sensor(struct i2c_client *client)
+{
+       struct at24_data *at24 = i2c_get_clientdata(client);
+       struct i2c_board_info info = { .type = "jc42" };
+       int ret;
+       u8 val;
+
+       /*
+        * Byte 2 has value 11 for DDR3, earlier versions don't
+        * support the thermal sensor present flag
+        */
+       ret = at24_read(at24, 2, &val, 1);
+       if (ret || val != 11)
+               return;
+
+       /* Byte 32, bit 7 is set if temp sensor is present */
+       ret = at24_read(at24, 32, &val, 1);
+       if (ret || !(val & BIT(7)))
+               return;
+
+       info.addr = 0x18 | (client->addr & 7);
+
+       i2c_new_client_device(client->adapter, &info);
+}
+
 static int at24_probe(struct i2c_client *client)
 {
        struct regmap_config regmap_config = { };
@@ -762,6 +782,10 @@ static int at24_probe(struct i2c_client *client)
                }
        }
 
+       /* If this a SPD EEPROM, probe for DDR3 thermal sensor */
+       if (cdata == &at24_data_spd)
+               at24_probe_temp_sensor(client);
+
        pm_runtime_idle(dev);
 
        if (writable)
@@ -812,7 +836,7 @@ static struct i2c_driver at24_driver = {
        .driver = {
                .name = "at24",
                .pm = &at24_pm_ops,
-               .of_match_table = at24_of_match,
+               .of_match_table = of_match_ptr(at24_of_match),
                .acpi_match_table = ACPI_PTR(at24_acpi_ids),
        },
        .probe = at24_probe,
index a1acd77130f26de8217efbcdb45b16abd58aaabf..21feebc3044c34b60f29571cbd65159a4106a735 100644 (file)
@@ -31,6 +31,7 @@
  * over performance.
  */
 
+#define EE1004_MAX_BUSSES              8
 #define EE1004_ADDR_SET_PAGE           0x36
 #define EE1004_NUM_PAGES               2
 #define EE1004_PAGE_SIZE               256
  * from page selection to end of read.
  */
 static DEFINE_MUTEX(ee1004_bus_lock);
-static struct i2c_client *ee1004_set_page[EE1004_NUM_PAGES];
-static unsigned int ee1004_dev_count;
-static int ee1004_current_page;
+
+static struct ee1004_bus_data {
+       struct i2c_adapter *adap;
+       struct i2c_client *set_page[EE1004_NUM_PAGES];
+       unsigned int dev_count;
+       int current_page;
+} ee1004_bus_data[EE1004_MAX_BUSSES];
 
 static const struct i2c_device_id ee1004_ids[] = {
        { "ee1004", 0 },
@@ -54,11 +59,29 @@ MODULE_DEVICE_TABLE(i2c, ee1004_ids);
 
 /*-------------------------------------------------------------------------*/
 
-static int ee1004_get_current_page(void)
+static struct ee1004_bus_data *ee1004_get_bus_data(struct i2c_adapter *adap)
+{
+       int i;
+
+       for (i = 0; i < EE1004_MAX_BUSSES; i++)
+               if (ee1004_bus_data[i].adap == adap)
+                       return ee1004_bus_data + i;
+
+       /* If not existent yet, create new entry */
+       for (i = 0; i < EE1004_MAX_BUSSES; i++)
+               if (!ee1004_bus_data[i].adap) {
+                       ee1004_bus_data[i].adap = adap;
+                       return ee1004_bus_data + i;
+               }
+
+       return NULL;
+}
+
+static int ee1004_get_current_page(struct ee1004_bus_data *bd)
 {
        int err;
 
-       err = i2c_smbus_read_byte(ee1004_set_page[0]);
+       err = i2c_smbus_read_byte(bd->set_page[0]);
        if (err == -ENXIO) {
                /* Nack means page 1 is selected */
                return 1;
@@ -72,28 +95,29 @@ static int ee1004_get_current_page(void)
        return 0;
 }
 
-static int ee1004_set_current_page(struct device *dev, int page)
+static int ee1004_set_current_page(struct i2c_client *client, int page)
 {
+       struct ee1004_bus_data *bd = i2c_get_clientdata(client);
        int ret;
 
-       if (page == ee1004_current_page)
+       if (page == bd->current_page)
                return 0;
 
        /* Data is ignored */
-       ret = i2c_smbus_write_byte(ee1004_set_page[page], 0x00);
+       ret = i2c_smbus_write_byte(bd->set_page[page], 0x00);
        /*
         * Don't give up just yet. Some memory modules will select the page
         * but not ack the command. Check which page is selected now.
         */
-       if (ret == -ENXIO && ee1004_get_current_page() == page)
+       if (ret == -ENXIO && ee1004_get_current_page(bd) == page)
                ret = 0;
        if (ret < 0) {
-               dev_err(dev, "Failed to select page %d (%d)\n", page, ret);
+               dev_err(&client->dev, "Failed to select page %d (%d)\n", page, ret);
                return ret;
        }
 
-       dev_dbg(dev, "Selected page %d\n", page);
-       ee1004_current_page = page;
+       dev_dbg(&client->dev, "Selected page %d\n", page);
+       bd->current_page = page;
 
        return 0;
 }
@@ -106,7 +130,7 @@ static ssize_t ee1004_eeprom_read(struct i2c_client *client, char *buf,
        page = offset >> EE1004_PAGE_SHIFT;
        offset &= (1 << EE1004_PAGE_SHIFT) - 1;
 
-       status = ee1004_set_current_page(&client->dev, page);
+       status = ee1004_set_current_page(client, page);
        if (status)
                return status;
 
@@ -158,17 +182,34 @@ static struct bin_attribute *ee1004_attrs[] = {
 
 BIN_ATTRIBUTE_GROUPS(ee1004);
 
-static void ee1004_cleanup(int idx)
+static void ee1004_probe_temp_sensor(struct i2c_client *client)
 {
-       if (--ee1004_dev_count == 0)
-               while (--idx >= 0) {
-                       i2c_unregister_device(ee1004_set_page[idx]);
-                       ee1004_set_page[idx] = NULL;
-               }
+       struct i2c_board_info info = { .type = "jc42" };
+       u8 byte14;
+       int ret;
+
+       /* byte 14, bit 7 is set if temp sensor is present */
+       ret = ee1004_eeprom_read(client, &byte14, 14, 1);
+       if (ret != 1 || !(byte14 & BIT(7)))
+               return;
+
+       info.addr = 0x18 | (client->addr & 7);
+
+       i2c_new_client_device(client->adapter, &info);
+}
+
+static void ee1004_cleanup(int idx, struct ee1004_bus_data *bd)
+{
+       if (--bd->dev_count == 0) {
+               while (--idx >= 0)
+                       i2c_unregister_device(bd->set_page[idx]);
+               memset(bd, 0, sizeof(struct ee1004_bus_data));
+       }
 }
 
 static int ee1004_probe(struct i2c_client *client)
 {
+       struct ee1004_bus_data *bd;
        int err, cnr = 0;
 
        /* Make sure we can operate on this adapter */
@@ -178,9 +219,19 @@ static int ee1004_probe(struct i2c_client *client)
                                     I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_READ_BYTE_DATA))
                return -EPFNOSUPPORT;
 
-       /* Use 2 dummy devices for page select command */
        mutex_lock(&ee1004_bus_lock);
-       if (++ee1004_dev_count == 1) {
+
+       bd = ee1004_get_bus_data(client->adapter);
+       if (!bd) {
+               mutex_unlock(&ee1004_bus_lock);
+               return dev_err_probe(&client->dev, -ENOSPC,
+                                    "Only %d busses supported", EE1004_MAX_BUSSES);
+       }
+
+       i2c_set_clientdata(client, bd);
+
+       if (++bd->dev_count == 1) {
+               /* Use 2 dummy devices for page select command */
                for (cnr = 0; cnr < EE1004_NUM_PAGES; cnr++) {
                        struct i2c_client *cl;
 
@@ -189,21 +240,19 @@ static int ee1004_probe(struct i2c_client *client)
                                err = PTR_ERR(cl);
                                goto err_clients;
                        }
-                       ee1004_set_page[cnr] = cl;
+                       bd->set_page[cnr] = cl;
                }
 
                /* Remember current page to avoid unneeded page select */
-               err = ee1004_get_current_page();
+               err = ee1004_get_current_page(bd);
                if (err < 0)
                        goto err_clients;
                dev_dbg(&client->dev, "Currently selected page: %d\n", err);
-               ee1004_current_page = err;
-       } else if (client->adapter != ee1004_set_page[0]->adapter) {
-               dev_err(&client->dev,
-                       "Driver only supports devices on a single I2C bus\n");
-               err = -EOPNOTSUPP;
-               goto err_clients;
+               bd->current_page = err;
        }
+
+       ee1004_probe_temp_sensor(client);
+
        mutex_unlock(&ee1004_bus_lock);
 
        dev_info(&client->dev,
@@ -213,7 +262,7 @@ static int ee1004_probe(struct i2c_client *client)
        return 0;
 
  err_clients:
-       ee1004_cleanup(cnr);
+       ee1004_cleanup(cnr, bd);
        mutex_unlock(&ee1004_bus_lock);
 
        return err;
@@ -221,9 +270,11 @@ static int ee1004_probe(struct i2c_client *client)
 
 static void ee1004_remove(struct i2c_client *client)
 {
+       struct ee1004_bus_data *bd = i2c_get_clientdata(client);
+
        /* Remove page select clients if this is the last device */
        mutex_lock(&ee1004_bus_lock);
-       ee1004_cleanup(EE1004_NUM_PAGES);
+       ee1004_cleanup(EE1004_NUM_PAGES, bd);
        mutex_unlock(&ee1004_bus_lock);
 }
 
index 1c6c62a7f7f5535f4c1025ee0d957006a4c5deb4..03319a1fa97fda2bf967dd425af9aef83fc1602d 100644 (file)
@@ -2191,7 +2191,7 @@ static int fastrpc_cb_remove(struct platform_device *pdev)
        int i;
 
        spin_lock_irqsave(&cctx->lock, flags);
-       for (i = 1; i < FASTRPC_MAX_SESSIONS; i++) {
+       for (i = 0; i < FASTRPC_MAX_SESSIONS; i++) {
                if (cctx->session[i].sid == sess->sid) {
                        cctx->session[i].valid = false;
                        cctx->sesscount--;
index 3882e97e96a70f580a599f940d6a400a42469c8c..c6eb27d46cb06de4ade9c0cdbbdd270ffe8ab474 100644 (file)
@@ -150,6 +150,7 @@ static int lis3lv02d_i2c_probe(struct i2c_client *client)
        lis3_dev.init     = lis3_i2c_init;
        lis3_dev.read     = lis3_i2c_read;
        lis3_dev.write    = lis3_i2c_write;
+       lis3_dev.reg_ctrl = lis3_reg_ctrl;
        lis3_dev.irq      = client->irq;
        lis3_dev.ac       = lis3lv02d_axis_map;
        lis3_dev.pm_dev   = &client->dev;
index 37db142de413eff98cd942e21d92aca1e93912c9..67d9391f18550e6c6e4c14007dfef17b92d26daf 100644 (file)
@@ -3,6 +3,7 @@
 config INTEL_MEI
        tristate "Intel Management Engine Interface"
        depends on X86 && PCI
+       default GENERIC_CPU || MCORE2 || MATOM || X86_GENERIC
        help
          The Intel Management Engine (Intel ME) provides Manageability,
          Security and Media services for system containing Intel chipsets.
@@ -11,10 +12,11 @@ config INTEL_MEI
          For more information see
          <https://software.intel.com/en-us/manageability/>
 
+if INTEL_MEI
+
 config INTEL_MEI_ME
        tristate "ME Enabled Intel Chipsets"
-       select INTEL_MEI
-       depends on X86 && PCI
+       default y
        help
          MEI support for ME Enabled Intel chipsets.
 
@@ -38,8 +40,6 @@ config INTEL_MEI_ME
 
 config INTEL_MEI_TXE
        tristate "Intel Trusted Execution Environment with ME Interface"
-       select INTEL_MEI
-       depends on X86 && PCI
        help
          MEI Support for Trusted Execution Environment device on Intel SoCs
 
@@ -48,9 +48,7 @@ config INTEL_MEI_TXE
 
 config INTEL_MEI_GSC
        tristate "Intel MEI GSC embedded device"
-       depends on INTEL_MEI
        depends on INTEL_MEI_ME
-       depends on X86 && PCI
        depends on DRM_I915
        help
          Intel auxiliary driver for GSC devices embedded in Intel graphics devices.
@@ -60,6 +58,31 @@ config INTEL_MEI_GSC
          tasks such as graphics card firmware update and security
          tasks.
 
+config INTEL_MEI_VSC_HW
+       tristate "Intel visual sensing controller device transport driver"
+       depends on ACPI && SPI
+       depends on GPIOLIB || COMPILE_TEST
+       help
+         Intel SPI transport driver between host and Intel visual sensing
+         controller (IVSC) device.
+
+         This driver can also be built as a module. If so, the module
+         will be called mei-vsc-hw.
+
+config INTEL_MEI_VSC
+       tristate "Intel visual sensing controller device with ME interface"
+       depends on INTEL_MEI_VSC_HW
+       help
+         Intel MEI over SPI driver for Intel visual sensing controller
+         (IVSC) device embedded in IA platform. It supports camera sharing
+         between IVSC for context sensing and IPU for typical media usage.
+         Select this config should enable transport layer for IVSC device.
+
+         This driver can also be built as a module. If so, the module
+         will be called mei-vsc.
+
 source "drivers/misc/mei/hdcp/Kconfig"
 source "drivers/misc/mei/pxp/Kconfig"
 source "drivers/misc/mei/gsc_proxy/Kconfig"
+
+endif
index 14aee253ae482bbbabdb119f77c780b96585a3af..6f9fdbf1a49590b27b864d028cffc8580380018e 100644 (file)
@@ -31,3 +31,10 @@ CFLAGS_mei-trace.o = -I$(src)
 obj-$(CONFIG_INTEL_MEI_HDCP) += hdcp/
 obj-$(CONFIG_INTEL_MEI_PXP) += pxp/
 obj-$(CONFIG_INTEL_MEI_GSC_PROXY) += gsc_proxy/
+
+obj-$(CONFIG_INTEL_MEI_VSC_HW) += mei-vsc-hw.o
+mei-vsc-hw-y := vsc-tp.o
+mei-vsc-hw-y += vsc-fw-loader.o
+
+obj-$(CONFIG_INTEL_MEI_VSC) += mei-vsc.o
+mei-vsc-y := platform-vsc.o
index 5f68d9f3d691b56ece3711065dbfc3f6c0cc4bf6..ac78b9d1eccd11a0514cc5095a2fa3e779bb48e3 100644 (file)
@@ -3,7 +3,7 @@
 #
 config INTEL_MEI_GSC_PROXY
        tristate "Intel GSC Proxy services of ME Interface"
-       select INTEL_MEI_ME
+       depends on INTEL_MEI_ME
        depends on DRM_I915
        help
          MEI Support for GSC Proxy Services on Intel platforms.
index 54e1c952690962725f0ae54e718924554f42f262..9be312ec798de96fb362e0dce25ec3ad587d8834 100644 (file)
@@ -3,7 +3,7 @@
 #
 config INTEL_MEI_HDCP
        tristate "Intel HDCP2.2 services of ME Interface"
-       select INTEL_MEI_ME
+       depends on INTEL_MEI_ME
        depends on DRM_I915
        help
          MEI Support for HDCP2.2 Services on Intel platforms.
diff --git a/drivers/misc/mei/platform-vsc.c b/drivers/misc/mei/platform-vsc.c
new file mode 100644 (file)
index 0000000..8d303c6
--- /dev/null
@@ -0,0 +1,450 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, Intel Corporation.
+ * Intel Visual Sensing Controller Interface Linux driver
+ */
+
+#include <linux/align.h>
+#include <linux/cache.h>
+#include <linux/cleanup.h>
+#include <linux/iopoll.h>
+#include <linux/list.h>
+#include <linux/mei.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/overflow.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/timekeeping.h>
+#include <linux/types.h>
+
+#include <asm-generic/bug.h>
+#include <asm-generic/unaligned.h>
+
+#include "mei_dev.h"
+#include "vsc-tp.h"
+
+#define MEI_VSC_DRV_NAME               "intel_vsc"
+
+#define MEI_VSC_MAX_MSG_SIZE           512
+
+#define MEI_VSC_POLL_DELAY_US          (50 * USEC_PER_MSEC)
+#define MEI_VSC_POLL_TIMEOUT_US                (200 * USEC_PER_MSEC)
+
+#define mei_dev_to_vsc_hw(dev)         ((struct mei_vsc_hw *)((dev)->hw))
+
+struct mei_vsc_host_timestamp {
+       u64 realtime;
+       u64 boottime;
+};
+
+struct mei_vsc_hw {
+       struct vsc_tp *tp;
+
+       bool fw_ready;
+       bool host_ready;
+
+       atomic_t write_lock_cnt;
+
+       u32 rx_len;
+       u32 rx_hdr;
+
+       /* buffer for tx */
+       char tx_buf[MEI_VSC_MAX_MSG_SIZE + sizeof(struct mei_msg_hdr)] ____cacheline_aligned;
+       /* buffer for rx */
+       char rx_buf[MEI_VSC_MAX_MSG_SIZE + sizeof(struct mei_msg_hdr)] ____cacheline_aligned;
+};
+
+static int mei_vsc_read_helper(struct mei_vsc_hw *hw, u8 *buf,
+                              u32 max_len)
+{
+       struct mei_vsc_host_timestamp ts = {
+               .realtime = ktime_to_ns(ktime_get_real()),
+               .boottime = ktime_to_ns(ktime_get_boottime()),
+       };
+
+       return vsc_tp_xfer(hw->tp, VSC_TP_CMD_READ, &ts, sizeof(ts),
+                          buf, max_len);
+}
+
+static int mei_vsc_write_helper(struct mei_vsc_hw *hw, u8 *buf, u32 len)
+{
+       u8 status;
+
+       return vsc_tp_xfer(hw->tp, VSC_TP_CMD_WRITE, buf, len, &status,
+                          sizeof(status));
+}
+
+static int mei_vsc_fw_status(struct mei_device *mei_dev,
+                            struct mei_fw_status *fw_status)
+{
+       if (!fw_status)
+               return -EINVAL;
+
+       fw_status->count = 0;
+
+       return 0;
+}
+
+static inline enum mei_pg_state mei_vsc_pg_state(struct mei_device *mei_dev)
+{
+       return MEI_PG_OFF;
+}
+
+static void mei_vsc_intr_enable(struct mei_device *mei_dev)
+{
+       struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+
+       vsc_tp_intr_enable(hw->tp);
+}
+
+static void mei_vsc_intr_disable(struct mei_device *mei_dev)
+{
+       struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+
+       vsc_tp_intr_disable(hw->tp);
+}
+
+/* mei framework requires this ops */
+static void mei_vsc_intr_clear(struct mei_device *mei_dev)
+{
+}
+
+/* wait for pending irq handler */
+static void mei_vsc_synchronize_irq(struct mei_device *mei_dev)
+{
+       struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+
+       vsc_tp_intr_synchronize(hw->tp);
+}
+
+static int mei_vsc_hw_config(struct mei_device *mei_dev)
+{
+       return 0;
+}
+
+static bool mei_vsc_host_is_ready(struct mei_device *mei_dev)
+{
+       struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+
+       return hw->host_ready;
+}
+
+static bool mei_vsc_hw_is_ready(struct mei_device *mei_dev)
+{
+       struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+
+       return hw->fw_ready;
+}
+
+static int mei_vsc_hw_start(struct mei_device *mei_dev)
+{
+       struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+       int ret, rlen;
+       u8 buf;
+
+       hw->host_ready = true;
+
+       vsc_tp_intr_enable(hw->tp);
+
+       ret = read_poll_timeout(mei_vsc_read_helper, rlen,
+                               rlen >= 0, MEI_VSC_POLL_DELAY_US,
+                               MEI_VSC_POLL_TIMEOUT_US, true,
+                               hw, &buf, sizeof(buf));
+       if (ret) {
+               dev_err(mei_dev->dev, "wait fw ready failed: %d\n", ret);
+               return ret;
+       }
+
+       hw->fw_ready = true;
+
+       return 0;
+}
+
+static bool mei_vsc_hbuf_is_ready(struct mei_device *mei_dev)
+{
+       struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+
+       return atomic_read(&hw->write_lock_cnt) == 0;
+}
+
+static int mei_vsc_hbuf_empty_slots(struct mei_device *mei_dev)
+{
+       return MEI_VSC_MAX_MSG_SIZE / MEI_SLOT_SIZE;
+}
+
+static u32 mei_vsc_hbuf_depth(const struct mei_device *mei_dev)
+{
+       return MEI_VSC_MAX_MSG_SIZE / MEI_SLOT_SIZE;
+}
+
+static int mei_vsc_write(struct mei_device *mei_dev,
+                        const void *hdr, size_t hdr_len,
+                        const void *data, size_t data_len)
+{
+       struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+       char *buf = hw->tx_buf;
+       int ret;
+
+       if (WARN_ON(!hdr || !IS_ALIGNED(hdr_len, 4)))
+               return -EINVAL;
+
+       if (!data || data_len > MEI_VSC_MAX_MSG_SIZE)
+               return -EINVAL;
+
+       atomic_inc(&hw->write_lock_cnt);
+
+       memcpy(buf, hdr, hdr_len);
+       memcpy(buf + hdr_len, data, data_len);
+
+       ret = mei_vsc_write_helper(hw, buf, hdr_len + data_len);
+
+       atomic_dec_if_positive(&hw->write_lock_cnt);
+
+       return ret < 0 ? ret : 0;
+}
+
+static inline u32 mei_vsc_read(const struct mei_device *mei_dev)
+{
+       struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+       int ret;
+
+       ret = mei_vsc_read_helper(hw, hw->rx_buf, sizeof(hw->rx_buf));
+       if (ret < 0 || ret < sizeof(u32))
+               return 0;
+       hw->rx_len = ret;
+
+       hw->rx_hdr = get_unaligned_le32(hw->rx_buf);
+
+       return hw->rx_hdr;
+}
+
+static int mei_vsc_count_full_read_slots(struct mei_device *mei_dev)
+{
+       return MEI_VSC_MAX_MSG_SIZE / MEI_SLOT_SIZE;
+}
+
+static int mei_vsc_read_slots(struct mei_device *mei_dev, unsigned char *buf,
+                             unsigned long len)
+{
+       struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+       struct mei_msg_hdr *hdr;
+
+       hdr = (struct mei_msg_hdr *)&hw->rx_hdr;
+       if (len != hdr->length || hdr->length + sizeof(*hdr) != hw->rx_len)
+               return -EINVAL;
+
+       memcpy(buf, hw->rx_buf + sizeof(*hdr), len);
+
+       return 0;
+}
+
+static bool mei_vsc_pg_in_transition(struct mei_device *mei_dev)
+{
+       return mei_dev->pg_event >= MEI_PG_EVENT_WAIT &&
+              mei_dev->pg_event <= MEI_PG_EVENT_INTR_WAIT;
+}
+
+static bool mei_vsc_pg_is_enabled(struct mei_device *mei_dev)
+{
+       return false;
+}
+
+static int mei_vsc_hw_reset(struct mei_device *mei_dev, bool intr_enable)
+{
+       struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+
+       vsc_tp_reset(hw->tp);
+
+       vsc_tp_intr_disable(hw->tp);
+
+       return vsc_tp_init(hw->tp, mei_dev->dev);
+}
+
+static const struct mei_hw_ops mei_vsc_hw_ops = {
+       .fw_status = mei_vsc_fw_status,
+       .pg_state = mei_vsc_pg_state,
+
+       .host_is_ready = mei_vsc_host_is_ready,
+       .hw_is_ready = mei_vsc_hw_is_ready,
+       .hw_reset = mei_vsc_hw_reset,
+       .hw_config = mei_vsc_hw_config,
+       .hw_start = mei_vsc_hw_start,
+
+       .pg_in_transition = mei_vsc_pg_in_transition,
+       .pg_is_enabled = mei_vsc_pg_is_enabled,
+
+       .intr_clear = mei_vsc_intr_clear,
+       .intr_enable = mei_vsc_intr_enable,
+       .intr_disable = mei_vsc_intr_disable,
+       .synchronize_irq = mei_vsc_synchronize_irq,
+
+       .hbuf_free_slots = mei_vsc_hbuf_empty_slots,
+       .hbuf_is_ready = mei_vsc_hbuf_is_ready,
+       .hbuf_depth = mei_vsc_hbuf_depth,
+       .write = mei_vsc_write,
+
+       .rdbuf_full_slots = mei_vsc_count_full_read_slots,
+       .read_hdr = mei_vsc_read,
+       .read = mei_vsc_read_slots,
+};
+
+static void mei_vsc_event_cb(void *context)
+{
+       struct mei_device *mei_dev = context;
+       struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+       struct list_head cmpl_list;
+       s32 slots;
+       int ret;
+
+       if (mei_dev->dev_state == MEI_DEV_RESETTING ||
+           mei_dev->dev_state == MEI_DEV_INITIALIZING)
+               return;
+
+       INIT_LIST_HEAD(&cmpl_list);
+
+       guard(mutex)(&mei_dev->device_lock);
+
+       while (vsc_tp_need_read(hw->tp)) {
+               /* check slots available for reading */
+               slots = mei_count_full_read_slots(mei_dev);
+
+               ret = mei_irq_read_handler(mei_dev, &cmpl_list, &slots);
+               if (ret) {
+                       if (ret != -ENODATA) {
+                               if (mei_dev->dev_state != MEI_DEV_RESETTING &&
+                                   mei_dev->dev_state != MEI_DEV_POWER_DOWN)
+                                       schedule_work(&mei_dev->reset_work);
+                       }
+
+                       return;
+               }
+       }
+
+       mei_dev->hbuf_is_ready = mei_hbuf_is_ready(mei_dev);
+       ret = mei_irq_write_handler(mei_dev, &cmpl_list);
+       if (ret)
+               dev_err(mei_dev->dev, "dispatch write request failed: %d\n", ret);
+
+       mei_dev->hbuf_is_ready = mei_hbuf_is_ready(mei_dev);
+       mei_irq_compl_handler(mei_dev, &cmpl_list);
+}
+
+static int mei_vsc_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct mei_device *mei_dev;
+       struct mei_vsc_hw *hw;
+       struct vsc_tp *tp;
+       int ret;
+
+       tp = *(struct vsc_tp **)dev_get_platdata(dev);
+       if (!tp)
+               return dev_err_probe(dev, -ENODEV, "no platform data\n");
+
+       mei_dev = devm_kzalloc(dev, size_add(sizeof(*mei_dev), sizeof(*hw)),
+                              GFP_KERNEL);
+       if (!mei_dev)
+               return -ENOMEM;
+
+       mei_device_init(mei_dev, dev, false, &mei_vsc_hw_ops);
+       mei_dev->fw_f_fw_ver_supported = 0;
+       mei_dev->kind = "ivsc";
+
+       hw = mei_dev_to_vsc_hw(mei_dev);
+       atomic_set(&hw->write_lock_cnt, 0);
+       hw->tp = tp;
+
+       platform_set_drvdata(pdev, mei_dev);
+
+       vsc_tp_register_event_cb(tp, mei_vsc_event_cb, mei_dev);
+
+       ret = mei_start(mei_dev);
+       if (ret) {
+               dev_err_probe(dev, ret, "init hw failed\n");
+               goto err_cancel;
+       }
+
+       ret = mei_register(mei_dev, dev);
+       if (ret)
+               goto err_stop;
+
+       pm_runtime_enable(mei_dev->dev);
+
+       return 0;
+
+err_stop:
+       mei_stop(mei_dev);
+
+err_cancel:
+       mei_cancel_work(mei_dev);
+
+       mei_disable_interrupts(mei_dev);
+
+       return ret;
+}
+
+static int mei_vsc_remove(struct platform_device *pdev)
+{
+       struct mei_device *mei_dev = platform_get_drvdata(pdev);
+
+       pm_runtime_disable(mei_dev->dev);
+
+       mei_stop(mei_dev);
+
+       mei_disable_interrupts(mei_dev);
+
+       mei_deregister(mei_dev);
+
+       return 0;
+}
+
+static int mei_vsc_suspend(struct device *dev)
+{
+       struct mei_device *mei_dev = dev_get_drvdata(dev);
+
+       mei_stop(mei_dev);
+
+       return 0;
+}
+
+static int mei_vsc_resume(struct device *dev)
+{
+       struct mei_device *mei_dev = dev_get_drvdata(dev);
+       int ret;
+
+       ret = mei_restart(mei_dev);
+       if (ret)
+               return ret;
+
+       /* start timer if stopped in suspend */
+       schedule_delayed_work(&mei_dev->timer_work, HZ);
+
+       return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(mei_vsc_pm_ops, mei_vsc_suspend, mei_vsc_resume);
+
+static const struct platform_device_id mei_vsc_id_table[] = {
+       { MEI_VSC_DRV_NAME },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, mei_vsc_id_table);
+
+static struct platform_driver mei_vsc_drv = {
+       .probe = mei_vsc_probe,
+       .remove = mei_vsc_remove,
+       .id_table = mei_vsc_id_table,
+       .driver = {
+               .name = MEI_VSC_DRV_NAME,
+               .pm = &mei_vsc_pm_ops,
+               .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+       },
+};
+module_platform_driver(mei_vsc_drv);
+
+MODULE_AUTHOR("Wentong Wu <wentong.wu@intel.com>");
+MODULE_AUTHOR("Zhifeng Wang <zhifeng.wang@intel.com>");
+MODULE_DESCRIPTION("Intel Visual Sensing Controller Interface");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(VSC_TP);
index 4029b96afc040803de6fc509487723690474c1fd..e9219b61cd92fb76fcabe89d2c6d0cf1f234390d 100644 (file)
@@ -1,10 +1,9 @@
-
 # SPDX-License-Identifier: GPL-2.0
 # Copyright (c) 2020, Intel Corporation. All rights reserved.
 #
 config INTEL_MEI_PXP
        tristate "Intel PXP services of ME Interface"
-       select INTEL_MEI_ME
+       depends on INTEL_MEI_ME
        depends on DRM_I915
        help
          MEI Support for PXP Services on Intel platforms.
diff --git a/drivers/misc/mei/vsc-fw-loader.c b/drivers/misc/mei/vsc-fw-loader.c
new file mode 100644 (file)
index 0000000..ffa4ccd
--- /dev/null
@@ -0,0 +1,770 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, Intel Corporation.
+ * Intel Visual Sensing Controller Transport Layer Linux driver
+ */
+
+#include <linux/acpi.h>
+#include <linux/align.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/firmware.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/string_helpers.h>
+#include <linux/types.h>
+
+#include <asm-generic/unaligned.h>
+
+#include "vsc-tp.h"
+
+#define VSC_MAGIC_NUM                  0x49505343 /* IPSC */
+#define VSC_MAGIC_FW                   0x49574653 /* IWFS */
+#define VSC_MAGIC_FILE                 0x46564353 /* FVCS */
+
+#define VSC_ADDR_BASE                  0xE0030000
+#define VSC_EFUSE_ADDR                 (VSC_ADDR_BASE + 0x038)
+#define VSC_STRAP_ADDR                 (VSC_ADDR_BASE + 0x100)
+
+#define VSC_MAINSTEPPING_VERSION_MASK  GENMASK(7, 4)
+#define VSC_MAINSTEPPING_VERSION_A     0
+
+#define VSC_SUBSTEPPING_VERSION_MASK   GENMASK(3, 0)
+#define VSC_SUBSTEPPING_VERSION_0      0
+#define VSC_SUBSTEPPING_VERSION_1      2
+
+#define VSC_BOOT_IMG_OPTION_MASK       GENMASK(15, 0)
+
+#define VSC_SKU_CFG_LOCATION           0x5001A000
+#define VSC_SKU_MAX_SIZE               4100u
+
+#define VSC_ACE_IMG_CNT                        2
+#define VSC_CSI_IMG_CNT                        4
+#define VSC_IMG_CNT_MAX                        6
+
+#define VSC_ROM_PKG_SIZE               256u
+#define VSC_FW_PKG_SIZE                        512u
+
+#define VSC_IMAGE_DIR                  "intel/vsc/"
+
+#define VSC_CSI_IMAGE_NAME             VSC_IMAGE_DIR "ivsc_fw.bin"
+#define VSC_ACE_IMAGE_NAME_FMT         VSC_IMAGE_DIR "ivsc_pkg_%s_0.bin"
+#define VSC_CFG_IMAGE_NAME_FMT         VSC_IMAGE_DIR "ivsc_skucfg_%s_0_1.bin"
+
+#define VSC_IMAGE_PATH_MAX_LEN         64
+
+#define VSC_SENSOR_NAME_MAX_LEN                16
+
+/* command id */
+enum {
+       VSC_CMD_QUERY = 0,
+       VSC_CMD_DL_SET = 1,
+       VSC_CMD_DL_START = 2,
+       VSC_CMD_DL_CONT = 3,
+       VSC_CMD_DUMP_MEM = 4,
+       VSC_CMD_GET_CONT = 8,
+       VSC_CMD_CAM_BOOT = 10,
+};
+
+/* command ack token */
+enum {
+       VSC_TOKEN_BOOTLOADER_REQ = 1,
+       VSC_TOKEN_DUMP_RESP = 4,
+       VSC_TOKEN_ERROR = 7,
+};
+
+/* image type */
+enum {
+       VSC_IMG_BOOTLOADER_TYPE = 1,
+       VSC_IMG_CSI_EM7D_TYPE,
+       VSC_IMG_CSI_SEM_TYPE,
+       VSC_IMG_CSI_RUNTIME_TYPE,
+       VSC_IMG_ACE_VISION_TYPE,
+       VSC_IMG_ACE_CFG_TYPE,
+       VSC_IMG_SKU_CFG_TYPE,
+};
+
+/* image fragments */
+enum {
+       VSC_IMG_BOOTLOADER_FRAG,
+       VSC_IMG_CSI_SEM_FRAG,
+       VSC_IMG_CSI_RUNTIME_FRAG,
+       VSC_IMG_ACE_VISION_FRAG,
+       VSC_IMG_ACE_CFG_FRAG,
+       VSC_IMG_CSI_EM7D_FRAG,
+       VSC_IMG_SKU_CFG_FRAG,
+       VSC_IMG_FRAG_MAX
+};
+
+struct vsc_rom_cmd {
+       __le32 magic;
+       __u8 cmd_id;
+       union {
+               /* download start */
+               struct {
+                       __u8 img_type;
+                       __le16 option;
+                       __le32 img_len;
+                       __le32 img_loc;
+                       __le32 crc;
+                       DECLARE_FLEX_ARRAY(__u8, res);
+               } __packed dl_start;
+               /* download set */
+               struct {
+                       __u8 option;
+                       __le16 img_cnt;
+                       DECLARE_FLEX_ARRAY(__le32, payload);
+               } __packed dl_set;
+               /* download continue */
+               struct {
+                       __u8 end_flag;
+                       __le16 len;
+                       /* 8 is the offset of payload */
+                       __u8 payload[VSC_ROM_PKG_SIZE - 8];
+               } __packed dl_cont;
+               /* dump memory */
+               struct {
+                       __u8 res;
+                       __le16 len;
+                       __le32 addr;
+                       DECLARE_FLEX_ARRAY(__u8, payload);
+               } __packed dump_mem;
+               /* 5 is the offset of padding */
+               __u8 padding[VSC_ROM_PKG_SIZE - 5];
+       } data;
+};
+
+struct vsc_rom_cmd_ack {
+       __le32 magic;
+       __u8 token;
+       __u8 type;
+       __u8 res[2];
+       __u8 payload[];
+};
+
+struct vsc_fw_cmd {
+       __le32 magic;
+       __u8 cmd_id;
+       union {
+               struct {
+                       __le16 option;
+                       __u8 img_type;
+                       __le32 img_len;
+                       __le32 img_loc;
+                       __le32 crc;
+                       DECLARE_FLEX_ARRAY(__u8, res);
+               } __packed dl_start;
+               struct {
+                       __le16 option;
+                       __u8 img_cnt;
+                       DECLARE_FLEX_ARRAY(__le32, payload);
+               } __packed dl_set;
+               struct {
+                       __le32 addr;
+                       __u8 len;
+                       DECLARE_FLEX_ARRAY(__u8, payload);
+               } __packed dump_mem;
+               struct {
+                       __u8 resv[3];
+                       __le32 crc;
+                       DECLARE_FLEX_ARRAY(__u8, payload);
+               } __packed boot;
+               /* 5 is the offset of padding */
+               __u8 padding[VSC_FW_PKG_SIZE - 5];
+       } data;
+};
+
+struct vsc_img {
+       __le32 magic;
+       __le32 option;
+       __le32 image_count;
+       __le32 image_location[VSC_IMG_CNT_MAX];
+};
+
+struct vsc_fw_sign {
+       __le32 magic;
+       __le32 image_size;
+       __u8 image[];
+};
+
+struct vsc_image_code_data {
+       /* fragment index */
+       u8 frag_index;
+       /* image type */
+       u8 image_type;
+};
+
+struct vsc_img_frag {
+       u8 type;
+       u32 location;
+       const u8 *data;
+       u32 size;
+};
+
+/**
+ * struct vsc_fw_loader - represent vsc firmware loader
+ * @dev: device used to request fimware
+ * @tp: transport layer used with the firmware loader
+ * @csi: CSI image
+ * @ace: ACE image
+ * @cfg: config image
+ * @tx_buf: tx buffer
+ * @rx_buf: rx buffer
+ * @option: command option
+ * @count: total image count
+ * @sensor_name: camera sensor name
+ * @frags: image fragments
+ */
+struct vsc_fw_loader {
+       struct device *dev;
+       struct vsc_tp *tp;
+
+       const struct firmware *csi;
+       const struct firmware *ace;
+       const struct firmware *cfg;
+
+       void *tx_buf;
+       void *rx_buf;
+
+       u16 option;
+       u16 count;
+
+       char sensor_name[VSC_SENSOR_NAME_MAX_LEN];
+
+       struct vsc_img_frag frags[VSC_IMG_FRAG_MAX];
+};
+
+static inline u32 vsc_sum_crc(void *data, size_t size)
+{
+       u32 crc = 0;
+       size_t i;
+
+       for (i = 0; i < size; i++)
+               crc += *((u8 *)data + i);
+
+       return crc;
+}
+
+/* get sensor name to construct image name */
+static int vsc_get_sensor_name(struct vsc_fw_loader *fw_loader,
+                              struct device *dev)
+{
+       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER };
+       union acpi_object obj = {
+               .type = ACPI_TYPE_INTEGER,
+               .integer.value = 1,
+       };
+       struct acpi_object_list arg_list = {
+               .count = 1,
+               .pointer = &obj,
+       };
+       union acpi_object *ret_obj;
+       acpi_handle handle;
+       acpi_status status;
+       int ret = 0;
+
+       handle = ACPI_HANDLE(dev);
+       if (!handle)
+               return -EINVAL;
+
+       status = acpi_evaluate_object(handle, "SID", &arg_list, &buffer);
+       if (ACPI_FAILURE(status)) {
+               dev_err(dev, "can't evaluate SID method: %d\n", status);
+               return -ENODEV;
+       }
+
+       ret_obj = buffer.pointer;
+       if (!ret_obj) {
+               dev_err(dev, "can't locate ACPI buffer\n");
+               return -ENODEV;
+       }
+
+       if (ret_obj->type != ACPI_TYPE_STRING) {
+               dev_err(dev, "found non-string entry\n");
+               ret = -ENODEV;
+               goto out_free_buff;
+       }
+
+       /* string length excludes trailing NUL */
+       if (ret_obj->string.length >= sizeof(fw_loader->sensor_name)) {
+               dev_err(dev, "sensor name buffer too small\n");
+               ret = -EINVAL;
+               goto out_free_buff;
+       }
+
+       memcpy(fw_loader->sensor_name, ret_obj->string.pointer,
+              ret_obj->string.length);
+
+       string_lower(fw_loader->sensor_name, fw_loader->sensor_name);
+
+out_free_buff:
+       ACPI_FREE(buffer.pointer);
+
+       return ret;
+}
+
+static int vsc_identify_silicon(struct vsc_fw_loader *fw_loader)
+{
+       struct vsc_rom_cmd_ack *ack = fw_loader->rx_buf;
+       struct vsc_rom_cmd *cmd = fw_loader->tx_buf;
+       u8 version, sub_version;
+       int ret;
+
+       /* identify stepping information */
+       cmd->magic = cpu_to_le32(VSC_MAGIC_NUM);
+       cmd->cmd_id = VSC_CMD_DUMP_MEM;
+       cmd->data.dump_mem.addr = cpu_to_le32(VSC_EFUSE_ADDR);
+       cmd->data.dump_mem.len = cpu_to_le16(sizeof(__le32));
+       ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, ack, VSC_ROM_PKG_SIZE);
+       if (ret)
+               return ret;
+       if (ack->token == VSC_TOKEN_ERROR)
+               return -EINVAL;
+
+       cmd->magic = cpu_to_le32(VSC_MAGIC_NUM);
+       cmd->cmd_id = VSC_CMD_GET_CONT;
+       ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, ack, VSC_ROM_PKG_SIZE);
+       if (ret)
+               return ret;
+       if (ack->token != VSC_TOKEN_DUMP_RESP)
+               return -EINVAL;
+
+       version = FIELD_GET(VSC_MAINSTEPPING_VERSION_MASK, ack->payload[0]);
+       sub_version = FIELD_GET(VSC_SUBSTEPPING_VERSION_MASK, ack->payload[0]);
+
+       if (version != VSC_MAINSTEPPING_VERSION_A)
+               return -EINVAL;
+
+       if (sub_version != VSC_SUBSTEPPING_VERSION_0 &&
+           sub_version != VSC_SUBSTEPPING_VERSION_1)
+               return -EINVAL;
+
+       dev_info(fw_loader->dev, "silicon stepping version is %u:%u\n",
+                version, sub_version);
+
+       /* identify strap information */
+       cmd->magic = cpu_to_le32(VSC_MAGIC_NUM);
+       cmd->cmd_id = VSC_CMD_DUMP_MEM;
+       cmd->data.dump_mem.addr = cpu_to_le32(VSC_STRAP_ADDR);
+       cmd->data.dump_mem.len = cpu_to_le16(sizeof(__le32));
+       ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, ack, VSC_ROM_PKG_SIZE);
+       if (ret)
+               return ret;
+       if (ack->token == VSC_TOKEN_ERROR)
+               return -EINVAL;
+
+       cmd->magic = cpu_to_le32(VSC_MAGIC_NUM);
+       cmd->cmd_id = VSC_CMD_GET_CONT;
+       ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, ack, VSC_ROM_PKG_SIZE);
+       if (ret)
+               return ret;
+       if (ack->token != VSC_TOKEN_DUMP_RESP)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int vsc_identify_csi_image(struct vsc_fw_loader *fw_loader)
+{
+       const struct firmware *image;
+       struct vsc_fw_sign *sign;
+       struct vsc_img *img;
+       unsigned int i;
+       int ret;
+
+       ret = request_firmware(&image, VSC_CSI_IMAGE_NAME, fw_loader->dev);
+       if (ret)
+               return ret;
+
+       img = (struct vsc_img *)image->data;
+       if (!img) {
+               ret = -ENOENT;
+               goto err_release_image;
+       }
+
+       if (le32_to_cpu(img->magic) != VSC_MAGIC_FILE) {
+               ret = -EINVAL;
+               goto err_release_image;
+       }
+
+       if (le32_to_cpu(img->image_count) != VSC_CSI_IMG_CNT) {
+               ret = -EINVAL;
+               goto err_release_image;
+       }
+       fw_loader->count += le32_to_cpu(img->image_count) - 1;
+
+       fw_loader->option =
+               FIELD_GET(VSC_BOOT_IMG_OPTION_MASK, le32_to_cpu(img->option));
+
+       sign = (struct vsc_fw_sign *)
+               (img->image_location + le32_to_cpu(img->image_count));
+
+       for (i = 0; i < VSC_CSI_IMG_CNT; i++) {
+               /* mapping from CSI image index to image code data */
+               static const struct vsc_image_code_data csi_image_map[] = {
+                       { VSC_IMG_BOOTLOADER_FRAG, VSC_IMG_BOOTLOADER_TYPE },
+                       { VSC_IMG_CSI_SEM_FRAG, VSC_IMG_CSI_SEM_TYPE },
+                       { VSC_IMG_CSI_RUNTIME_FRAG, VSC_IMG_CSI_RUNTIME_TYPE },
+                       { VSC_IMG_CSI_EM7D_FRAG, VSC_IMG_CSI_EM7D_TYPE },
+               };
+               struct vsc_img_frag *frag;
+
+               if ((u8 *)sign + sizeof(*sign) > image->data + image->size) {
+                       ret = -EINVAL;
+                       goto err_release_image;
+               }
+
+               if (le32_to_cpu(sign->magic) != VSC_MAGIC_FW) {
+                       ret = -EINVAL;
+                       goto err_release_image;
+               }
+
+               if (!le32_to_cpu(img->image_location[i])) {
+                       ret = -EINVAL;
+                       goto err_release_image;
+               }
+
+               frag = &fw_loader->frags[csi_image_map[i].frag_index];
+
+               frag->data = sign->image;
+               frag->size = le32_to_cpu(sign->image_size);
+               frag->location = le32_to_cpu(img->image_location[i]);
+               frag->type = csi_image_map[i].image_type;
+
+               sign = (struct vsc_fw_sign *)
+                       (sign->image + le32_to_cpu(sign->image_size));
+       }
+
+       fw_loader->csi = image;
+
+       return 0;
+
+err_release_image:
+       release_firmware(image);
+
+       return ret;
+}
+
+static int vsc_identify_ace_image(struct vsc_fw_loader *fw_loader)
+{
+       char path[VSC_IMAGE_PATH_MAX_LEN];
+       const struct firmware *image;
+       struct vsc_fw_sign *sign;
+       struct vsc_img *img;
+       unsigned int i;
+       int ret;
+
+       snprintf(path, sizeof(path), VSC_ACE_IMAGE_NAME_FMT,
+                fw_loader->sensor_name);
+
+       ret = request_firmware(&image, path, fw_loader->dev);
+       if (ret)
+               return ret;
+
+       img = (struct vsc_img *)image->data;
+       if (!img) {
+               ret = -ENOENT;
+               goto err_release_image;
+       }
+
+       if (le32_to_cpu(img->magic) != VSC_MAGIC_FILE) {
+               ret = -EINVAL;
+               goto err_release_image;
+       }
+
+       if (le32_to_cpu(img->image_count) != VSC_ACE_IMG_CNT) {
+               ret = -EINVAL;
+               goto err_release_image;
+       }
+       fw_loader->count += le32_to_cpu(img->image_count);
+
+       sign = (struct vsc_fw_sign *)
+               (img->image_location + le32_to_cpu(img->image_count));
+
+       for (i = 0; i < VSC_ACE_IMG_CNT; i++) {
+               /* mapping from ACE image index to image code data */
+               static const struct vsc_image_code_data ace_image_map[] = {
+                       { VSC_IMG_ACE_VISION_FRAG, VSC_IMG_ACE_VISION_TYPE },
+                       { VSC_IMG_ACE_CFG_FRAG, VSC_IMG_ACE_CFG_TYPE },
+               };
+               struct vsc_img_frag *frag, *last_frag;
+               u8 frag_index;
+
+               if ((u8 *)sign + sizeof(*sign) > image->data + image->size) {
+                       ret = -EINVAL;
+                       goto err_release_image;
+               }
+
+               if (le32_to_cpu(sign->magic) != VSC_MAGIC_FW) {
+                       ret = -EINVAL;
+                       goto err_release_image;
+               }
+
+               frag_index = ace_image_map[i].frag_index;
+               frag = &fw_loader->frags[frag_index];
+
+               frag->data = sign->image;
+               frag->size = le32_to_cpu(sign->image_size);
+               frag->location = le32_to_cpu(img->image_location[i]);
+               frag->type = ace_image_map[i].image_type;
+
+               if (!frag->location) {
+                       last_frag = &fw_loader->frags[frag_index - 1];
+                       frag->location =
+                               ALIGN(last_frag->location + last_frag->size, SZ_4K);
+               }
+
+               sign = (struct vsc_fw_sign *)
+                       (sign->image + le32_to_cpu(sign->image_size));
+       }
+
+       fw_loader->ace = image;
+
+       return 0;
+
+err_release_image:
+       release_firmware(image);
+
+       return ret;
+}
+
+static int vsc_identify_cfg_image(struct vsc_fw_loader *fw_loader)
+{
+       struct vsc_img_frag *frag = &fw_loader->frags[VSC_IMG_SKU_CFG_FRAG];
+       char path[VSC_IMAGE_PATH_MAX_LEN];
+       const struct firmware *image;
+       u32 size;
+       int ret;
+
+       snprintf(path, sizeof(path), VSC_CFG_IMAGE_NAME_FMT,
+                fw_loader->sensor_name);
+
+       ret = request_firmware(&image, path, fw_loader->dev);
+       if (ret)
+               return ret;
+
+       /* identify image size */
+       if (image->size <= sizeof(u32) || image->size > VSC_SKU_MAX_SIZE) {
+               ret = -EINVAL;
+               goto err_release_image;
+       }
+
+       size = le32_to_cpu(*((__le32 *)image->data)) + sizeof(u32);
+       if (image->size != size) {
+               ret = -EINVAL;
+               goto err_release_image;
+       }
+
+       frag->data = image->data;
+       frag->size = image->size;
+       frag->type = VSC_IMG_SKU_CFG_TYPE;
+       frag->location = VSC_SKU_CFG_LOCATION;
+
+       fw_loader->cfg = image;
+
+       return 0;
+
+err_release_image:
+       release_firmware(image);
+
+       return ret;
+}
+
+static int vsc_download_bootloader(struct vsc_fw_loader *fw_loader)
+{
+       struct vsc_img_frag *frag = &fw_loader->frags[VSC_IMG_BOOTLOADER_FRAG];
+       struct vsc_rom_cmd_ack *ack = fw_loader->rx_buf;
+       struct vsc_rom_cmd *cmd = fw_loader->tx_buf;
+       u32 len, c_len;
+       size_t remain;
+       const u8 *p;
+       int ret;
+
+       cmd->magic = cpu_to_le32(VSC_MAGIC_NUM);
+       cmd->cmd_id = VSC_CMD_QUERY;
+       ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, ack, VSC_ROM_PKG_SIZE);
+       if (ret)
+               return ret;
+       if (ack->token != VSC_TOKEN_DUMP_RESP &&
+           ack->token != VSC_TOKEN_BOOTLOADER_REQ)
+               return -EINVAL;
+
+       cmd->magic = cpu_to_le32(VSC_MAGIC_NUM);
+       cmd->cmd_id = VSC_CMD_DL_START;
+       cmd->data.dl_start.option = cpu_to_le16(fw_loader->option);
+       cmd->data.dl_start.img_type = frag->type;
+       cmd->data.dl_start.img_len = cpu_to_le32(frag->size);
+       cmd->data.dl_start.img_loc = cpu_to_le32(frag->location);
+
+       c_len = offsetof(struct vsc_rom_cmd, data.dl_start.crc);
+       cmd->data.dl_start.crc = cpu_to_le32(vsc_sum_crc(cmd, c_len));
+
+       ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, NULL, VSC_ROM_PKG_SIZE);
+       if (ret)
+               return ret;
+
+       p = frag->data;
+       remain = frag->size;
+
+       /* download image data */
+       while (remain > 0) {
+               len = min(remain, sizeof(cmd->data.dl_cont.payload));
+
+               cmd->magic = cpu_to_le32(VSC_MAGIC_NUM);
+               cmd->cmd_id = VSC_CMD_DL_CONT;
+               cmd->data.dl_cont.len = cpu_to_le16(len);
+               cmd->data.dl_cont.end_flag = remain == len;
+               memcpy(cmd->data.dl_cont.payload, p, len);
+
+               ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, NULL, VSC_ROM_PKG_SIZE);
+               if (ret)
+                       return ret;
+
+               p += len;
+               remain -= len;
+       }
+
+       return 0;
+}
+
+static int vsc_download_firmware(struct vsc_fw_loader *fw_loader)
+{
+       struct vsc_fw_cmd *cmd = fw_loader->tx_buf;
+       unsigned int i, index = 0;
+       u32 c_len;
+       int ret;
+
+       cmd->magic = cpu_to_le32(VSC_MAGIC_NUM);
+       cmd->cmd_id = VSC_CMD_DL_SET;
+       cmd->data.dl_set.img_cnt = cpu_to_le16(fw_loader->count);
+       put_unaligned_le16(fw_loader->option, &cmd->data.dl_set.option);
+
+       for (i = VSC_IMG_CSI_SEM_FRAG; i <= VSC_IMG_CSI_EM7D_FRAG; i++) {
+               struct vsc_img_frag *frag = &fw_loader->frags[i];
+
+               cmd->data.dl_set.payload[index++] = cpu_to_le32(frag->location);
+               cmd->data.dl_set.payload[index++] = cpu_to_le32(frag->size);
+       }
+
+       c_len = offsetof(struct vsc_fw_cmd, data.dl_set.payload[index]);
+       cmd->data.dl_set.payload[index] = cpu_to_le32(vsc_sum_crc(cmd, c_len));
+
+       ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, NULL, VSC_FW_PKG_SIZE);
+       if (ret)
+               return ret;
+
+       for (i = VSC_IMG_CSI_SEM_FRAG; i < VSC_IMG_FRAG_MAX; i++) {
+               struct vsc_img_frag *frag = &fw_loader->frags[i];
+               const u8 *p;
+               u32 remain;
+
+               cmd->magic = cpu_to_le32(VSC_MAGIC_NUM);
+               cmd->cmd_id = VSC_CMD_DL_START;
+               cmd->data.dl_start.img_type = frag->type;
+               cmd->data.dl_start.img_len = cpu_to_le32(frag->size);
+               cmd->data.dl_start.img_loc = cpu_to_le32(frag->location);
+               put_unaligned_le16(fw_loader->option, &cmd->data.dl_start.option);
+
+               c_len = offsetof(struct vsc_fw_cmd, data.dl_start.crc);
+               cmd->data.dl_start.crc = cpu_to_le32(vsc_sum_crc(cmd, c_len));
+
+               ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, NULL, VSC_FW_PKG_SIZE);
+               if (ret)
+                       return ret;
+
+               p = frag->data;
+               remain = frag->size;
+
+               /* download image data */
+               while (remain > 0) {
+                       u32 len = min(remain, VSC_FW_PKG_SIZE);
+
+                       memcpy(fw_loader->tx_buf, p, len);
+                       memset(fw_loader->tx_buf + len, 0, VSC_FW_PKG_SIZE - len);
+
+                       ret = vsc_tp_rom_xfer(fw_loader->tp, fw_loader->tx_buf,
+                                             NULL, VSC_FW_PKG_SIZE);
+                       if (ret)
+                               break;
+
+                       p += len;
+                       remain -= len;
+               }
+       }
+
+       cmd->magic = cpu_to_le32(VSC_MAGIC_NUM);
+       cmd->cmd_id = VSC_CMD_CAM_BOOT;
+
+       c_len = offsetof(struct vsc_fw_cmd, data.dl_start.crc);
+       cmd->data.boot.crc = cpu_to_le32(vsc_sum_crc(cmd, c_len));
+
+       return vsc_tp_rom_xfer(fw_loader->tp, cmd, NULL, VSC_FW_PKG_SIZE);
+}
+
+/**
+ * vsc_tp_init - init vsc_tp
+ * @tp: vsc_tp device handle
+ * @dev: device node for mei vsc device
+ * Return: 0 in case of success, negative value in case of error
+ */
+int vsc_tp_init(struct vsc_tp *tp, struct device *dev)
+{
+       struct vsc_fw_loader *fw_loader __free(kfree) = NULL;
+       void *tx_buf __free(kfree) = NULL;
+       void *rx_buf __free(kfree) = NULL;
+       int ret;
+
+       fw_loader = kzalloc(sizeof(*fw_loader), GFP_KERNEL);
+       if (!fw_loader)
+               return -ENOMEM;
+
+       tx_buf = kzalloc(VSC_FW_PKG_SIZE, GFP_KERNEL);
+       if (!tx_buf)
+               return -ENOMEM;
+
+       rx_buf = kzalloc(VSC_FW_PKG_SIZE, GFP_KERNEL);
+       if (!rx_buf)
+               return -ENOMEM;
+
+       fw_loader->tx_buf = tx_buf;
+       fw_loader->rx_buf = rx_buf;
+
+       fw_loader->tp = tp;
+       fw_loader->dev = dev;
+
+       ret = vsc_get_sensor_name(fw_loader, dev);
+       if (ret)
+               return ret;
+
+       ret = vsc_identify_silicon(fw_loader);
+       if (ret)
+               return ret;
+
+       ret = vsc_identify_csi_image(fw_loader);
+       if (ret)
+               return ret;
+
+       ret = vsc_identify_ace_image(fw_loader);
+       if (ret)
+               goto err_release_csi;
+
+       ret = vsc_identify_cfg_image(fw_loader);
+       if (ret)
+               goto err_release_ace;
+
+       ret = vsc_download_bootloader(fw_loader);
+       if (!ret)
+               ret = vsc_download_firmware(fw_loader);
+
+       release_firmware(fw_loader->cfg);
+
+err_release_ace:
+       release_firmware(fw_loader->ace);
+
+err_release_csi:
+       release_firmware(fw_loader->csi);
+
+       return ret;
+}
+EXPORT_SYMBOL_NS_GPL(vsc_tp_init, VSC_TP);
diff --git a/drivers/misc/mei/vsc-tp.c b/drivers/misc/mei/vsc-tp.c
new file mode 100644 (file)
index 0000000..6f4a4be
--- /dev/null
@@ -0,0 +1,555 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, Intel Corporation.
+ * Intel Visual Sensing Controller Transport Layer Linux driver
+ */
+
+#include <linux/acpi.h>
+#include <linux/cleanup.h>
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/irqreturn.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+
+#include "vsc-tp.h"
+
+#define VSC_TP_RESET_PIN_TOGGLE_INTERVAL_MS    20
+#define VSC_TP_ROM_BOOTUP_DELAY_MS             10
+#define VSC_TP_ROM_XFER_POLL_TIMEOUT_US                (500 * USEC_PER_MSEC)
+#define VSC_TP_ROM_XFER_POLL_DELAY_US          (20 * USEC_PER_MSEC)
+#define VSC_TP_WAIT_FW_ASSERTED_TIMEOUT                (2 * HZ)
+#define VSC_TP_MAX_XFER_COUNT                  5
+
+#define VSC_TP_PACKET_SYNC                     0x31
+#define VSC_TP_CRC_SIZE                                sizeof(u32)
+#define VSC_TP_MAX_MSG_SIZE                    2048
+/* SPI xfer timeout size */
+#define VSC_TP_XFER_TIMEOUT_BYTES              700
+#define VSC_TP_PACKET_PADDING_SIZE             1
+#define VSC_TP_PACKET_SIZE(pkt) \
+       (sizeof(struct vsc_tp_packet) + le16_to_cpu((pkt)->len) + VSC_TP_CRC_SIZE)
+#define VSC_TP_MAX_PACKET_SIZE \
+       (sizeof(struct vsc_tp_packet) + VSC_TP_MAX_MSG_SIZE + VSC_TP_CRC_SIZE)
+#define VSC_TP_MAX_XFER_SIZE \
+       (VSC_TP_MAX_PACKET_SIZE + VSC_TP_XFER_TIMEOUT_BYTES)
+#define VSC_TP_NEXT_XFER_LEN(len, offset) \
+       (len + sizeof(struct vsc_tp_packet) + VSC_TP_CRC_SIZE - offset + VSC_TP_PACKET_PADDING_SIZE)
+
+struct vsc_tp_packet {
+       __u8 sync;
+       __u8 cmd;
+       __le16 len;
+       __le32 seq;
+       __u8 buf[] __counted_by(len);
+};
+
+struct vsc_tp {
+       /* do the actual data transfer */
+       struct spi_device *spi;
+
+       /* bind with mei framework */
+       struct platform_device *pdev;
+
+       struct gpio_desc *wakeuphost;
+       struct gpio_desc *resetfw;
+       struct gpio_desc *wakeupfw;
+
+       /* command sequence number */
+       u32 seq;
+
+       /* command buffer */
+       void *tx_buf;
+       void *rx_buf;
+
+       atomic_t assert_cnt;
+       wait_queue_head_t xfer_wait;
+
+       vsc_tp_event_cb_t event_notify;
+       void *event_notify_context;
+
+       /* used to protect command download */
+       struct mutex mutex;
+};
+
+/* GPIO resources */
+static const struct acpi_gpio_params wakeuphost_gpio = { 0, 0, false };
+static const struct acpi_gpio_params wakeuphostint_gpio = { 1, 0, false };
+static const struct acpi_gpio_params resetfw_gpio = { 2, 0, false };
+static const struct acpi_gpio_params wakeupfw = { 3, 0, false };
+
+static const struct acpi_gpio_mapping vsc_tp_acpi_gpios[] = {
+       { "wakeuphost-gpios", &wakeuphost_gpio, 1 },
+       { "wakeuphostint-gpios", &wakeuphostint_gpio, 1 },
+       { "resetfw-gpios", &resetfw_gpio, 1 },
+       { "wakeupfw-gpios", &wakeupfw, 1 },
+       {}
+};
+
+/* wakeup firmware and wait for response */
+static int vsc_tp_wakeup_request(struct vsc_tp *tp)
+{
+       int ret;
+
+       gpiod_set_value_cansleep(tp->wakeupfw, 0);
+
+       ret = wait_event_timeout(tp->xfer_wait,
+                                atomic_read(&tp->assert_cnt) &&
+                                gpiod_get_value_cansleep(tp->wakeuphost),
+                                VSC_TP_WAIT_FW_ASSERTED_TIMEOUT);
+       if (!ret)
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
+static void vsc_tp_wakeup_release(struct vsc_tp *tp)
+{
+       atomic_dec_if_positive(&tp->assert_cnt);
+
+       gpiod_set_value_cansleep(tp->wakeupfw, 1);
+}
+
+static int vsc_tp_dev_xfer(struct vsc_tp *tp, void *obuf, void *ibuf, size_t len)
+{
+       struct spi_message msg = { 0 };
+       struct spi_transfer xfer = {
+               .tx_buf = obuf,
+               .rx_buf = ibuf,
+               .len = len,
+       };
+
+       spi_message_init_with_transfers(&msg, &xfer, 1);
+
+       return spi_sync_locked(tp->spi, &msg);
+}
+
+static int vsc_tp_xfer_helper(struct vsc_tp *tp, struct vsc_tp_packet *pkt,
+                             void *ibuf, u16 ilen)
+{
+       int ret, offset = 0, cpy_len, src_len, dst_len = sizeof(struct vsc_tp_packet);
+       int next_xfer_len = VSC_TP_PACKET_SIZE(pkt) + VSC_TP_XFER_TIMEOUT_BYTES;
+       u8 *src, *crc_src, *rx_buf = tp->rx_buf;
+       int count_down = VSC_TP_MAX_XFER_COUNT;
+       u32 recv_crc = 0, crc = ~0;
+       struct vsc_tp_packet ack;
+       u8 *dst = (u8 *)&ack;
+       bool synced = false;
+
+       do {
+               ret = vsc_tp_dev_xfer(tp, pkt, rx_buf, next_xfer_len);
+               if (ret)
+                       return ret;
+               memset(pkt, 0, VSC_TP_MAX_XFER_SIZE);
+
+               if (synced) {
+                       src = rx_buf;
+                       src_len = next_xfer_len;
+               } else {
+                       src = memchr(rx_buf, VSC_TP_PACKET_SYNC, next_xfer_len);
+                       if (!src)
+                               continue;
+                       synced = true;
+                       src_len = next_xfer_len - (src - rx_buf);
+               }
+
+               /* traverse received data */
+               while (src_len > 0) {
+                       cpy_len = min(src_len, dst_len);
+                       memcpy(dst, src, cpy_len);
+                       crc_src = src;
+                       src += cpy_len;
+                       src_len -= cpy_len;
+                       dst += cpy_len;
+                       dst_len -= cpy_len;
+
+                       if (offset < sizeof(ack)) {
+                               offset += cpy_len;
+                               crc = crc32(crc, crc_src, cpy_len);
+
+                               if (!src_len)
+                                       continue;
+
+                               if (le16_to_cpu(ack.len)) {
+                                       dst = ibuf;
+                                       dst_len = min(ilen, le16_to_cpu(ack.len));
+                               } else {
+                                       dst = (u8 *)&recv_crc;
+                                       dst_len = sizeof(recv_crc);
+                               }
+                       } else if (offset < sizeof(ack) + le16_to_cpu(ack.len)) {
+                               offset += cpy_len;
+                               crc = crc32(crc, crc_src, cpy_len);
+
+                               if (src_len) {
+                                       int remain = sizeof(ack) + le16_to_cpu(ack.len) - offset;
+
+                                       cpy_len = min(src_len, remain);
+                                       offset += cpy_len;
+                                       crc = crc32(crc, src, cpy_len);
+                                       src += cpy_len;
+                                       src_len -= cpy_len;
+                                       if (src_len) {
+                                               dst = (u8 *)&recv_crc;
+                                               dst_len = sizeof(recv_crc);
+                                               continue;
+                                       }
+                               }
+                               next_xfer_len = VSC_TP_NEXT_XFER_LEN(le16_to_cpu(ack.len), offset);
+                       } else if (offset < sizeof(ack) + le16_to_cpu(ack.len) + VSC_TP_CRC_SIZE) {
+                               offset += cpy_len;
+
+                               if (src_len) {
+                                       /* terminate the traverse */
+                                       next_xfer_len = 0;
+                                       break;
+                               }
+                               next_xfer_len = VSC_TP_NEXT_XFER_LEN(le16_to_cpu(ack.len), offset);
+                       }
+               }
+       } while (next_xfer_len > 0 && --count_down);
+
+       if (next_xfer_len > 0)
+               return -EAGAIN;
+
+       if (~recv_crc != crc || le32_to_cpu(ack.seq) != tp->seq) {
+               dev_err(&tp->spi->dev, "recv crc or seq error\n");
+               return -EINVAL;
+       }
+
+       if (ack.cmd == VSC_TP_CMD_ACK || ack.cmd == VSC_TP_CMD_NACK ||
+           ack.cmd == VSC_TP_CMD_BUSY) {
+               dev_err(&tp->spi->dev, "recv cmd ack error\n");
+               return -EAGAIN;
+       }
+
+       return min(le16_to_cpu(ack.len), ilen);
+}
+
+/**
+ * vsc_tp_xfer - transfer data to firmware
+ * @tp: vsc_tp device handle
+ * @cmd: the command to be sent to the device
+ * @obuf: the tx buffer to be sent to the device
+ * @olen: the length of tx buffer
+ * @ibuf: the rx buffer to receive from the device
+ * @ilen: the length of rx buffer
+ * Return: the length of received data in case of success,
+ *     otherwise negative value
+ */
+int vsc_tp_xfer(struct vsc_tp *tp, u8 cmd, const void *obuf, size_t olen,
+               void *ibuf, size_t ilen)
+{
+       struct vsc_tp_packet *pkt = tp->tx_buf;
+       u32 crc;
+       int ret;
+
+       if (!obuf || !ibuf || olen > VSC_TP_MAX_MSG_SIZE)
+               return -EINVAL;
+
+       guard(mutex)(&tp->mutex);
+
+       pkt->sync = VSC_TP_PACKET_SYNC;
+       pkt->cmd = cmd;
+       pkt->len = cpu_to_le16(olen);
+       pkt->seq = cpu_to_le32(++tp->seq);
+       memcpy(pkt->buf, obuf, olen);
+
+       crc = ~crc32(~0, (u8 *)pkt, sizeof(pkt) + olen);
+       memcpy(pkt->buf + olen, &crc, sizeof(crc));
+
+       ret = vsc_tp_wakeup_request(tp);
+       if (unlikely(ret))
+               dev_err(&tp->spi->dev, "wakeup firmware failed ret: %d\n", ret);
+       else
+               ret = vsc_tp_xfer_helper(tp, pkt, ibuf, ilen);
+
+       vsc_tp_wakeup_release(tp);
+
+       return ret;
+}
+EXPORT_SYMBOL_NS_GPL(vsc_tp_xfer, VSC_TP);
+
+/**
+ * vsc_tp_rom_xfer - transfer data to rom code
+ * @tp: vsc_tp device handle
+ * @obuf: the data buffer to be sent to the device
+ * @ibuf: the buffer to receive data from the device
+ * @len: the length of tx buffer and rx buffer
+ * Return: 0 in case of success, negative value in case of error
+ */
+int vsc_tp_rom_xfer(struct vsc_tp *tp, const void *obuf, void *ibuf, size_t len)
+{
+       size_t words = len / sizeof(__be32);
+       int ret;
+
+       if (len % sizeof(__be32) || len > VSC_TP_MAX_MSG_SIZE)
+               return -EINVAL;
+
+       guard(mutex)(&tp->mutex);
+
+       /* rom xfer is big endian */
+       cpu_to_be32_array(tp->tx_buf, obuf, words);
+
+       ret = read_poll_timeout(gpiod_get_value_cansleep, ret,
+                               !ret, VSC_TP_ROM_XFER_POLL_DELAY_US,
+                               VSC_TP_ROM_XFER_POLL_TIMEOUT_US, false,
+                               tp->wakeuphost);
+       if (ret) {
+               dev_err(&tp->spi->dev, "wait rom failed ret: %d\n", ret);
+               return ret;
+       }
+
+       ret = vsc_tp_dev_xfer(tp, tp->tx_buf, tp->rx_buf, len);
+       if (ret)
+               return ret;
+
+       if (ibuf)
+               cpu_to_be32_array(ibuf, tp->rx_buf, words);
+
+       return ret;
+}
+
+/**
+ * vsc_tp_reset - reset vsc transport layer
+ * @tp: vsc_tp device handle
+ */
+void vsc_tp_reset(struct vsc_tp *tp)
+{
+       disable_irq(tp->spi->irq);
+
+       /* toggle reset pin */
+       gpiod_set_value_cansleep(tp->resetfw, 0);
+       msleep(VSC_TP_RESET_PIN_TOGGLE_INTERVAL_MS);
+       gpiod_set_value_cansleep(tp->resetfw, 1);
+
+       /* wait for ROM */
+       msleep(VSC_TP_ROM_BOOTUP_DELAY_MS);
+
+       /*
+        * Set default host wakeup pin to non-active
+        * to avoid unexpected host irq interrupt.
+        */
+       gpiod_set_value_cansleep(tp->wakeupfw, 1);
+
+       atomic_set(&tp->assert_cnt, 0);
+
+       enable_irq(tp->spi->irq);
+}
+EXPORT_SYMBOL_NS_GPL(vsc_tp_reset, VSC_TP);
+
+/**
+ * vsc_tp_need_read - check if device has data to sent
+ * @tp: vsc_tp device handle
+ * Return: true if device has data to sent, otherwise false
+ */
+bool vsc_tp_need_read(struct vsc_tp *tp)
+{
+       if (!atomic_read(&tp->assert_cnt))
+               return false;
+       if (!gpiod_get_value_cansleep(tp->wakeuphost))
+               return false;
+       if (!gpiod_get_value_cansleep(tp->wakeupfw))
+               return false;
+
+       return true;
+}
+EXPORT_SYMBOL_NS_GPL(vsc_tp_need_read, VSC_TP);
+
+/**
+ * vsc_tp_register_event_cb - register a callback function to receive event
+ * @tp: vsc_tp device handle
+ * @event_cb: callback function
+ * @context: execution context of event callback
+ * Return: 0 in case of success, negative value in case of error
+ */
+int vsc_tp_register_event_cb(struct vsc_tp *tp, vsc_tp_event_cb_t event_cb,
+                           void *context)
+{
+       tp->event_notify = event_cb;
+       tp->event_notify_context = context;
+
+       return 0;
+}
+EXPORT_SYMBOL_NS_GPL(vsc_tp_register_event_cb, VSC_TP);
+
+/**
+ * vsc_tp_intr_synchronize - synchronize vsc_tp interrupt
+ * @tp: vsc_tp device handle
+ */
+void vsc_tp_intr_synchronize(struct vsc_tp *tp)
+{
+       synchronize_irq(tp->spi->irq);
+}
+EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_synchronize, VSC_TP);
+
+/**
+ * vsc_tp_intr_enable - enable vsc_tp interrupt
+ * @tp: vsc_tp device handle
+ */
+void vsc_tp_intr_enable(struct vsc_tp *tp)
+{
+       enable_irq(tp->spi->irq);
+}
+EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_enable, VSC_TP);
+
+/**
+ * vsc_tp_intr_disable - disable vsc_tp interrupt
+ * @tp: vsc_tp device handle
+ */
+void vsc_tp_intr_disable(struct vsc_tp *tp)
+{
+       disable_irq(tp->spi->irq);
+}
+EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_disable, VSC_TP);
+
+static irqreturn_t vsc_tp_isr(int irq, void *data)
+{
+       struct vsc_tp *tp = data;
+
+       atomic_inc(&tp->assert_cnt);
+
+       wake_up(&tp->xfer_wait);
+
+       return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t vsc_tp_thread_isr(int irq, void *data)
+{
+       struct vsc_tp *tp = data;
+
+       if (tp->event_notify)
+               tp->event_notify(tp->event_notify_context);
+
+       return IRQ_HANDLED;
+}
+
+static int vsc_tp_match_any(struct acpi_device *adev, void *data)
+{
+       struct acpi_device **__adev = data;
+
+       *__adev = adev;
+
+       return 1;
+}
+
+static int vsc_tp_probe(struct spi_device *spi)
+{
+       struct platform_device_info pinfo = { 0 };
+       struct device *dev = &spi->dev;
+       struct platform_device *pdev;
+       struct acpi_device *adev;
+       struct vsc_tp *tp;
+       int ret;
+
+       tp = devm_kzalloc(dev, sizeof(*tp), GFP_KERNEL);
+       if (!tp)
+               return -ENOMEM;
+
+       tp->tx_buf = devm_kzalloc(dev, VSC_TP_MAX_XFER_SIZE, GFP_KERNEL);
+       if (!tp->tx_buf)
+               return -ENOMEM;
+
+       tp->rx_buf = devm_kzalloc(dev, VSC_TP_MAX_XFER_SIZE, GFP_KERNEL);
+       if (!tp->rx_buf)
+               return -ENOMEM;
+
+       ret = devm_acpi_dev_add_driver_gpios(dev, vsc_tp_acpi_gpios);
+       if (ret)
+               return ret;
+
+       tp->wakeuphost = devm_gpiod_get(dev, "wakeuphost", GPIOD_IN);
+       if (IS_ERR(tp->wakeuphost))
+               return PTR_ERR(tp->wakeuphost);
+
+       tp->resetfw = devm_gpiod_get(dev, "resetfw", GPIOD_OUT_HIGH);
+       if (IS_ERR(tp->resetfw))
+               return PTR_ERR(tp->resetfw);
+
+       tp->wakeupfw = devm_gpiod_get(dev, "wakeupfw", GPIOD_OUT_HIGH);
+       if (IS_ERR(tp->wakeupfw))
+               return PTR_ERR(tp->wakeupfw);
+
+       atomic_set(&tp->assert_cnt, 0);
+       init_waitqueue_head(&tp->xfer_wait);
+       tp->spi = spi;
+
+       irq_set_status_flags(spi->irq, IRQ_DISABLE_UNLAZY);
+       ret = devm_request_threaded_irq(dev, spi->irq, vsc_tp_isr,
+                                       vsc_tp_thread_isr,
+                                       IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+                                       dev_name(dev), tp);
+       if (ret)
+               return ret;
+
+       mutex_init(&tp->mutex);
+
+       /* only one child acpi device */
+       ret = acpi_dev_for_each_child(ACPI_COMPANION(dev),
+                                     vsc_tp_match_any, &adev);
+       if (!ret) {
+               ret = -ENODEV;
+               goto err_destroy_lock;
+       }
+       pinfo.fwnode = acpi_fwnode_handle(adev);
+
+       pinfo.name = "intel_vsc";
+       pinfo.data = &tp;
+       pinfo.size_data = sizeof(tp);
+       pinfo.id = PLATFORM_DEVID_NONE;
+
+       pdev = platform_device_register_full(&pinfo);
+       if (IS_ERR(pdev)) {
+               ret = PTR_ERR(pdev);
+               goto err_destroy_lock;
+       }
+
+       tp->pdev = pdev;
+       spi_set_drvdata(spi, tp);
+
+       return 0;
+
+err_destroy_lock:
+       mutex_destroy(&tp->mutex);
+
+       return ret;
+}
+
+static void vsc_tp_remove(struct spi_device *spi)
+{
+       struct vsc_tp *tp = spi_get_drvdata(spi);
+
+       platform_device_unregister(tp->pdev);
+
+       mutex_destroy(&tp->mutex);
+}
+
+static const struct acpi_device_id vsc_tp_acpi_ids[] = {
+       { "INTC1009" }, /* Raptor Lake */
+       { "INTC1058" }, /* Tiger Lake */
+       { "INTC1094" }, /* Alder Lake */
+       {}
+};
+MODULE_DEVICE_TABLE(acpi, vsc_tp_acpi_ids);
+
+static struct spi_driver vsc_tp_driver = {
+       .probe = vsc_tp_probe,
+       .remove = vsc_tp_remove,
+       .driver = {
+               .name = "vsc-tp",
+               .acpi_match_table = vsc_tp_acpi_ids,
+       },
+};
+module_spi_driver(vsc_tp_driver);
+
+MODULE_AUTHOR("Wentong Wu <wentong.wu@intel.com>");
+MODULE_AUTHOR("Zhifeng Wang <zhifeng.wang@intel.com>");
+MODULE_DESCRIPTION("Intel Visual Sensing Controller Transport Layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/mei/vsc-tp.h b/drivers/misc/mei/vsc-tp.h
new file mode 100644 (file)
index 0000000..f9513dd
--- /dev/null
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023, Intel Corporation.
+ * Intel Visual Sensing Controller Transport Layer Linux driver
+ */
+
+#ifndef _VSC_TP_H_
+#define _VSC_TP_H_
+
+#include <linux/types.h>
+
+#define VSC_TP_CMD_WRITE       0x01
+#define VSC_TP_CMD_READ                0x02
+
+#define VSC_TP_CMD_ACK         0x10
+#define VSC_TP_CMD_NACK                0x11
+#define VSC_TP_CMD_BUSY                0x12
+
+struct vsc_tp;
+
+/**
+ * typedef vsc_event_cb_t - event callback function signature
+ * @context: the execution context of who registered this callback
+ *
+ * The callback function is called in interrupt context and the data
+ * payload is only valid during the call. If the user needs access
+ * the data payload later, it must copy the payload.
+ */
+typedef void (*vsc_tp_event_cb_t)(void *context);
+
+int vsc_tp_rom_xfer(struct vsc_tp *tp, const void *obuf, void *ibuf,
+                   size_t len);
+
+int vsc_tp_xfer(struct vsc_tp *tp, u8 cmd, const void *obuf, size_t olen,
+               void *ibuf, size_t ilen);
+
+int vsc_tp_register_event_cb(struct vsc_tp *tp, vsc_tp_event_cb_t event_cb,
+                            void *context);
+
+void vsc_tp_intr_enable(struct vsc_tp *tp);
+void vsc_tp_intr_disable(struct vsc_tp *tp);
+void vsc_tp_intr_synchronize(struct vsc_tp *tp);
+
+void vsc_tp_reset(struct vsc_tp *tp);
+
+bool vsc_tp_need_read(struct vsc_tp *tp);
+
+int vsc_tp_init(struct vsc_tp *tp, struct device *dev);
+
+#endif
diff --git a/drivers/misc/nsm.c b/drivers/misc/nsm.c
new file mode 100644 (file)
index 0000000..0eaa3b4
--- /dev/null
@@ -0,0 +1,506 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Amazon Nitro Secure Module driver.
+ *
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * The Nitro Secure Module implements commands via CBOR over virtio.
+ * This driver exposes a raw message ioctls on /dev/nsm that user
+ * space can use to issue these commands.
+ */
+
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/hw_random.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/uio.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio.h>
+#include <linux/wait.h>
+#include <uapi/linux/nsm.h>
+
+/* Timeout for NSM virtqueue respose in milliseconds. */
+#define NSM_DEFAULT_TIMEOUT_MSECS (120000) /* 2 minutes */
+
+/* Maximum length input data */
+struct nsm_data_req {
+       u32 len;
+       u8  data[NSM_REQUEST_MAX_SIZE];
+};
+
+/* Maximum length output data */
+struct nsm_data_resp {
+       u32 len;
+       u8  data[NSM_RESPONSE_MAX_SIZE];
+};
+
+/* Full NSM request/response message */
+struct nsm_msg {
+       struct nsm_data_req req;
+       struct nsm_data_resp resp;
+};
+
+struct nsm {
+       struct virtio_device *vdev;
+       struct virtqueue     *vq;
+       struct mutex          lock;
+       struct completion     cmd_done;
+       struct miscdevice     misc;
+       struct hwrng          hwrng;
+       struct work_struct    misc_init;
+       struct nsm_msg        msg;
+};
+
+/* NSM device ID */
+static const struct virtio_device_id id_table[] = {
+       { VIRTIO_ID_NITRO_SEC_MOD, VIRTIO_DEV_ANY_ID },
+       { 0 },
+};
+
+static struct nsm *file_to_nsm(struct file *file)
+{
+       return container_of(file->private_data, struct nsm, misc);
+}
+
+static struct nsm *hwrng_to_nsm(struct hwrng *rng)
+{
+       return container_of(rng, struct nsm, hwrng);
+}
+
+#define CBOR_TYPE_MASK  0xE0
+#define CBOR_TYPE_MAP 0xA0
+#define CBOR_TYPE_TEXT 0x60
+#define CBOR_TYPE_ARRAY 0x40
+#define CBOR_HEADER_SIZE_SHORT 1
+
+#define CBOR_SHORT_SIZE_MAX_VALUE 23
+#define CBOR_LONG_SIZE_U8  24
+#define CBOR_LONG_SIZE_U16 25
+#define CBOR_LONG_SIZE_U32 26
+#define CBOR_LONG_SIZE_U64 27
+
+static bool cbor_object_is_array(const u8 *cbor_object, size_t cbor_object_size)
+{
+       if (cbor_object_size == 0 || cbor_object == NULL)
+               return false;
+
+       return (cbor_object[0] & CBOR_TYPE_MASK) == CBOR_TYPE_ARRAY;
+}
+
+static int cbor_object_get_array(u8 *cbor_object, size_t cbor_object_size, u8 **cbor_array)
+{
+       u8 cbor_short_size;
+       void *array_len_p;
+       u64 array_len;
+       u64 array_offset;
+
+       if (!cbor_object_is_array(cbor_object, cbor_object_size))
+               return -EFAULT;
+
+       cbor_short_size = (cbor_object[0] & 0x1F);
+
+       /* Decoding byte array length */
+       array_offset = CBOR_HEADER_SIZE_SHORT;
+       if (cbor_short_size >= CBOR_LONG_SIZE_U8)
+               array_offset += BIT(cbor_short_size - CBOR_LONG_SIZE_U8);
+
+       if (cbor_object_size < array_offset)
+               return -EFAULT;
+
+       array_len_p = &cbor_object[1];
+
+       switch (cbor_short_size) {
+       case CBOR_SHORT_SIZE_MAX_VALUE: /* short encoding */
+               array_len = cbor_short_size;
+               break;
+       case CBOR_LONG_SIZE_U8:
+               array_len = *(u8 *)array_len_p;
+               break;
+       case CBOR_LONG_SIZE_U16:
+               array_len = be16_to_cpup((__be16 *)array_len_p);
+               break;
+       case CBOR_LONG_SIZE_U32:
+               array_len = be32_to_cpup((__be32 *)array_len_p);
+               break;
+       case CBOR_LONG_SIZE_U64:
+               array_len = be64_to_cpup((__be64 *)array_len_p);
+               break;
+       }
+
+       if (cbor_object_size < array_offset)
+               return -EFAULT;
+
+       if (cbor_object_size - array_offset < array_len)
+               return -EFAULT;
+
+       if (array_len > INT_MAX)
+               return -EFAULT;
+
+       *cbor_array = cbor_object + array_offset;
+       return array_len;
+}
+
+/* Copy the request of a raw message to kernel space */
+static int fill_req_raw(struct nsm *nsm, struct nsm_data_req *req,
+                       struct nsm_raw *raw)
+{
+       /* Verify the user input size. */
+       if (raw->request.len > sizeof(req->data))
+               return -EMSGSIZE;
+
+       /* Copy the request payload */
+       if (copy_from_user(req->data, u64_to_user_ptr(raw->request.addr),
+                          raw->request.len))
+               return -EFAULT;
+
+       req->len = raw->request.len;
+
+       return 0;
+}
+
+/* Copy the response of a raw message back to user-space */
+static int parse_resp_raw(struct nsm *nsm, struct nsm_data_resp *resp,
+                         struct nsm_raw *raw)
+{
+       /* Truncate any message that does not fit. */
+       raw->response.len = min_t(u64, raw->response.len, resp->len);
+
+       /* Copy the response content to user space */
+       if (copy_to_user(u64_to_user_ptr(raw->response.addr),
+                        resp->data, raw->response.len))
+               return -EFAULT;
+
+       return 0;
+}
+
+/* Virtqueue interrupt handler */
+static void nsm_vq_callback(struct virtqueue *vq)
+{
+       struct nsm *nsm = vq->vdev->priv;
+
+       complete(&nsm->cmd_done);
+}
+
+/* Forward a message to the NSM device and wait for the response from it */
+static int nsm_sendrecv_msg_locked(struct nsm *nsm)
+{
+       struct device *dev = &nsm->vdev->dev;
+       struct scatterlist sg_in, sg_out;
+       struct nsm_msg *msg = &nsm->msg;
+       struct virtqueue *vq = nsm->vq;
+       unsigned int len;
+       void *queue_buf;
+       bool kicked;
+       int rc;
+
+       /* Initialize scatter-gather lists with request and response buffers. */
+       sg_init_one(&sg_out, msg->req.data, msg->req.len);
+       sg_init_one(&sg_in, msg->resp.data, sizeof(msg->resp.data));
+
+       init_completion(&nsm->cmd_done);
+       /* Add the request buffer (read by the device). */
+       rc = virtqueue_add_outbuf(vq, &sg_out, 1, msg->req.data, GFP_KERNEL);
+       if (rc)
+               return rc;
+
+       /* Add the response buffer (written by the device). */
+       rc = virtqueue_add_inbuf(vq, &sg_in, 1, msg->resp.data, GFP_KERNEL);
+       if (rc)
+               goto cleanup;
+
+       kicked = virtqueue_kick(vq);
+       if (!kicked) {
+               /* Cannot kick the virtqueue. */
+               rc = -EIO;
+               goto cleanup;
+       }
+
+       /* If the kick succeeded, wait for the device's response. */
+       if (!wait_for_completion_io_timeout(&nsm->cmd_done,
+               msecs_to_jiffies(NSM_DEFAULT_TIMEOUT_MSECS))) {
+               rc = -ETIMEDOUT;
+               goto cleanup;
+       }
+
+       queue_buf = virtqueue_get_buf(vq, &len);
+       if (!queue_buf || (queue_buf != msg->req.data)) {
+               dev_err(dev, "wrong request buffer.");
+               rc = -ENODATA;
+               goto cleanup;
+       }
+
+       queue_buf = virtqueue_get_buf(vq, &len);
+       if (!queue_buf || (queue_buf != msg->resp.data)) {
+               dev_err(dev, "wrong response buffer.");
+               rc = -ENODATA;
+               goto cleanup;
+       }
+
+       msg->resp.len = len;
+
+       rc = 0;
+
+cleanup:
+       if (rc) {
+               /* Clean the virtqueue. */
+               while (virtqueue_get_buf(vq, &len) != NULL)
+                       ;
+       }
+
+       return rc;
+}
+
+static int fill_req_get_random(struct nsm *nsm, struct nsm_data_req *req)
+{
+       /*
+        * 69                          # text(9)
+        *     47657452616E646F6D      # "GetRandom"
+        */
+       const u8 request[] = { CBOR_TYPE_TEXT + strlen("GetRandom"),
+                              'G', 'e', 't', 'R', 'a', 'n', 'd', 'o', 'm' };
+
+       memcpy(req->data, request, sizeof(request));
+       req->len = sizeof(request);
+
+       return 0;
+}
+
+static int parse_resp_get_random(struct nsm *nsm, struct nsm_data_resp *resp,
+                                void *out, size_t max)
+{
+       /*
+        * A1                          # map(1)
+        *     69                      # text(9) - Name of field
+        *         47657452616E646F6D  # "GetRandom"
+        * A1                          # map(1) - The field itself
+        *     66                      # text(6)
+        *         72616E646F6D        # "random"
+        *      # The rest of the response is random data
+        */
+       const u8 response[] = { CBOR_TYPE_MAP + 1,
+                               CBOR_TYPE_TEXT + strlen("GetRandom"),
+                               'G', 'e', 't', 'R', 'a', 'n', 'd', 'o', 'm',
+                               CBOR_TYPE_MAP + 1,
+                               CBOR_TYPE_TEXT + strlen("random"),
+                               'r', 'a', 'n', 'd', 'o', 'm' };
+       struct device *dev = &nsm->vdev->dev;
+       u8 *rand_data = NULL;
+       u8 *resp_ptr = resp->data;
+       u64 resp_len = resp->len;
+       int rc;
+
+       if ((resp->len < sizeof(response) + 1) ||
+           (memcmp(resp_ptr, response, sizeof(response)) != 0)) {
+               dev_err(dev, "Invalid response for GetRandom");
+               return -EFAULT;
+       }
+
+       resp_ptr += sizeof(response);
+       resp_len -= sizeof(response);
+
+       rc = cbor_object_get_array(resp_ptr, resp_len, &rand_data);
+       if (rc < 0) {
+               dev_err(dev, "GetRandom: Invalid CBOR encoding\n");
+               return rc;
+       }
+
+       rc = min_t(size_t, rc, max);
+       memcpy(out, rand_data, rc);
+
+       return rc;
+}
+
+/*
+ * HwRNG implementation
+ */
+static int nsm_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+       struct nsm *nsm = hwrng_to_nsm(rng);
+       struct device *dev = &nsm->vdev->dev;
+       int rc = 0;
+
+       /* NSM always needs to wait for a response */
+       if (!wait)
+               return 0;
+
+       mutex_lock(&nsm->lock);
+
+       rc = fill_req_get_random(nsm, &nsm->msg.req);
+       if (rc != 0)
+               goto out;
+
+       rc = nsm_sendrecv_msg_locked(nsm);
+       if (rc != 0)
+               goto out;
+
+       rc = parse_resp_get_random(nsm, &nsm->msg.resp, data, max);
+       if (rc < 0)
+               goto out;
+
+       dev_dbg(dev, "RNG: returning rand bytes = %d", rc);
+out:
+       mutex_unlock(&nsm->lock);
+       return rc;
+}
+
+static long nsm_dev_ioctl(struct file *file, unsigned int cmd,
+       unsigned long arg)
+{
+       void __user *argp = u64_to_user_ptr((u64)arg);
+       struct nsm *nsm = file_to_nsm(file);
+       struct nsm_raw raw;
+       int r = 0;
+
+       if (cmd != NSM_IOCTL_RAW)
+               return -EINVAL;
+
+       if (_IOC_SIZE(cmd) != sizeof(raw))
+               return -EINVAL;
+
+       /* Copy user argument struct to kernel argument struct */
+       r = -EFAULT;
+       if (copy_from_user(&raw, argp, _IOC_SIZE(cmd)))
+               goto out;
+
+       mutex_lock(&nsm->lock);
+
+       /* Convert kernel argument struct to device request */
+       r = fill_req_raw(nsm, &nsm->msg.req, &raw);
+       if (r)
+               goto out;
+
+       /* Send message to NSM and read reply */
+       r = nsm_sendrecv_msg_locked(nsm);
+       if (r)
+               goto out;
+
+       /* Parse device response into kernel argument struct */
+       r = parse_resp_raw(nsm, &nsm->msg.resp, &raw);
+       if (r)
+               goto out;
+
+       /* Copy kernel argument struct back to user argument struct */
+       r = -EFAULT;
+       if (copy_to_user(argp, &raw, sizeof(raw)))
+               goto out;
+
+       r = 0;
+
+out:
+       mutex_unlock(&nsm->lock);
+       return r;
+}
+
+static int nsm_device_init_vq(struct virtio_device *vdev)
+{
+       struct virtqueue *vq = virtio_find_single_vq(vdev,
+               nsm_vq_callback, "nsm.vq.0");
+       struct nsm *nsm = vdev->priv;
+
+       if (IS_ERR(vq))
+               return PTR_ERR(vq);
+
+       nsm->vq = vq;
+
+       return 0;
+}
+
+static const struct file_operations nsm_dev_fops = {
+       .unlocked_ioctl = nsm_dev_ioctl,
+       .compat_ioctl = compat_ptr_ioctl,
+};
+
+/* Handler for probing the NSM device */
+static int nsm_device_probe(struct virtio_device *vdev)
+{
+       struct device *dev = &vdev->dev;
+       struct nsm *nsm;
+       int rc;
+
+       nsm = devm_kzalloc(&vdev->dev, sizeof(*nsm), GFP_KERNEL);
+       if (!nsm)
+               return -ENOMEM;
+
+       vdev->priv = nsm;
+       nsm->vdev = vdev;
+
+       rc = nsm_device_init_vq(vdev);
+       if (rc) {
+               dev_err(dev, "queue failed to initialize: %d.\n", rc);
+               goto err_init_vq;
+       }
+
+       mutex_init(&nsm->lock);
+
+       /* Register as hwrng provider */
+       nsm->hwrng = (struct hwrng) {
+               .read = nsm_rng_read,
+               .name = "nsm-hwrng",
+               .quality = 1000,
+       };
+
+       rc = hwrng_register(&nsm->hwrng);
+       if (rc) {
+               dev_err(dev, "RNG initialization error: %d.\n", rc);
+               goto err_hwrng;
+       }
+
+       /* Register /dev/nsm device node */
+       nsm->misc = (struct miscdevice) {
+               .minor  = MISC_DYNAMIC_MINOR,
+               .name   = "nsm",
+               .fops   = &nsm_dev_fops,
+               .mode   = 0666,
+       };
+
+       rc = misc_register(&nsm->misc);
+       if (rc) {
+               dev_err(dev, "misc device registration error: %d.\n", rc);
+               goto err_misc;
+       }
+
+       return 0;
+
+err_misc:
+       hwrng_unregister(&nsm->hwrng);
+err_hwrng:
+       vdev->config->del_vqs(vdev);
+err_init_vq:
+       return rc;
+}
+
+/* Handler for removing the NSM device */
+static void nsm_device_remove(struct virtio_device *vdev)
+{
+       struct nsm *nsm = vdev->priv;
+
+       hwrng_unregister(&nsm->hwrng);
+
+       vdev->config->del_vqs(vdev);
+       misc_deregister(&nsm->misc);
+}
+
+/* NSM device configuration structure */
+static struct virtio_driver virtio_nsm_driver = {
+       .feature_table             = 0,
+       .feature_table_size        = 0,
+       .feature_table_legacy      = 0,
+       .feature_table_size_legacy = 0,
+       .driver.name               = KBUILD_MODNAME,
+       .driver.owner              = THIS_MODULE,
+       .id_table                  = id_table,
+       .probe                     = nsm_device_probe,
+       .remove                    = nsm_device_remove,
+};
+
+module_virtio_driver(virtio_nsm_driver);
+MODULE_DEVICE_TABLE(virtio, id_table);
+MODULE_DESCRIPTION("Virtio NSM driver");
+MODULE_LICENSE("GPL");
index 8aea2d070a40c23e0a0ed9495d8039f9fa6804ac..d279a4f195e2a343a8332d25c25e85a89b6ac88f 100644 (file)
@@ -140,7 +140,6 @@ static int __init open_dice_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        *drvdata = (struct open_dice_drvdata){
-               .lock = __MUTEX_INITIALIZER(drvdata->lock),
                .rmem = rmem,
                .misc = (struct miscdevice){
                        .parent = dev,
@@ -150,6 +149,7 @@ static int __init open_dice_probe(struct platform_device *pdev)
                        .mode   = 0600,
                },
        };
+       mutex_init(&drvdata->lock);
 
        /* Index overflow check not needed, misc_register() will fail. */
        snprintf(drvdata->name, sizeof(drvdata->name), DRIVER_NAME"%u", dev_idx++);
index af519088732d9a16b724c05cec78e6c82a990f3a..c38a6083f0a73635763cbaf47b41ecadee9abff2 100644 (file)
 #define DRV_MODULE_NAME                                "pci-endpoint-test"
 
 #define IRQ_TYPE_UNDEFINED                     -1
-#define IRQ_TYPE_LEGACY                                0
+#define IRQ_TYPE_INTX                          0
 #define IRQ_TYPE_MSI                           1
 #define IRQ_TYPE_MSIX                          2
 
 #define PCI_ENDPOINT_TEST_MAGIC                        0x0
 
 #define PCI_ENDPOINT_TEST_COMMAND              0x4
-#define COMMAND_RAISE_LEGACY_IRQ               BIT(0)
+#define COMMAND_RAISE_INTX_IRQ                 BIT(0)
 #define COMMAND_RAISE_MSI_IRQ                  BIT(1)
 #define COMMAND_RAISE_MSIX_IRQ                 BIT(2)
 #define COMMAND_READ                           BIT(3)
@@ -183,8 +183,8 @@ static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
        bool res = true;
 
        switch (type) {
-       case IRQ_TYPE_LEGACY:
-               irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY);
+       case IRQ_TYPE_INTX:
+               irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_INTX);
                if (irq < 0)
                        dev_err(dev, "Failed to get Legacy interrupt\n");
                break;
@@ -244,7 +244,7 @@ static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
 
 fail:
        switch (irq_type) {
-       case IRQ_TYPE_LEGACY:
+       case IRQ_TYPE_INTX:
                dev_err(dev, "Failed to request IRQ %d for Legacy\n",
                        pci_irq_vector(pdev, i));
                break;
@@ -263,6 +263,15 @@ fail:
        return false;
 }
 
+static const u32 bar_test_pattern[] = {
+       0xA0A0A0A0,
+       0xA1A1A1A1,
+       0xA2A2A2A2,
+       0xA3A3A3A3,
+       0xA4A4A4A4,
+       0xA5A5A5A5,
+};
+
 static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
                                  enum pci_barno barno)
 {
@@ -280,26 +289,27 @@ static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
                size = 0x4;
 
        for (j = 0; j < size; j += 4)
-               pci_endpoint_test_bar_writel(test, barno, j, 0xA0A0A0A0);
+               pci_endpoint_test_bar_writel(test, barno, j,
+                                            bar_test_pattern[barno]);
 
        for (j = 0; j < size; j += 4) {
                val = pci_endpoint_test_bar_readl(test, barno, j);
-               if (val != 0xA0A0A0A0)
+               if (val != bar_test_pattern[barno])
                        return false;
        }
 
        return true;
 }
 
-static bool pci_endpoint_test_legacy_irq(struct pci_endpoint_test *test)
+static bool pci_endpoint_test_intx_irq(struct pci_endpoint_test *test)
 {
        u32 val;
 
        pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
-                                IRQ_TYPE_LEGACY);
+                                IRQ_TYPE_INTX);
        pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
        pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
-                                COMMAND_RAISE_LEGACY_IRQ);
+                                COMMAND_RAISE_INTX_IRQ);
        val = wait_for_completion_timeout(&test->irq_raised,
                                          msecs_to_jiffies(1000));
        if (!val)
@@ -385,7 +395,7 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
        if (use_dma)
                flags |= FLAG_USE_DMA;
 
-       if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
+       if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
                dev_err(dev, "Invalid IRQ type option\n");
                goto err;
        }
@@ -521,7 +531,7 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
        if (use_dma)
                flags |= FLAG_USE_DMA;
 
-       if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
+       if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
                dev_err(dev, "Invalid IRQ type option\n");
                goto err;
        }
@@ -621,7 +631,7 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
        if (use_dma)
                flags |= FLAG_USE_DMA;
 
-       if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
+       if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
                dev_err(dev, "Invalid IRQ type option\n");
                goto err;
        }
@@ -691,7 +701,7 @@ static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
        struct pci_dev *pdev = test->pdev;
        struct device *dev = &pdev->dev;
 
-       if (req_irq_type < IRQ_TYPE_LEGACY || req_irq_type > IRQ_TYPE_MSIX) {
+       if (req_irq_type < IRQ_TYPE_INTX || req_irq_type > IRQ_TYPE_MSIX) {
                dev_err(dev, "Invalid IRQ type option\n");
                return false;
        }
@@ -737,8 +747,8 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
                        goto ret;
                ret = pci_endpoint_test_bar(test, bar);
                break;
-       case PCITEST_LEGACY_IRQ:
-               ret = pci_endpoint_test_legacy_irq(test);
+       case PCITEST_INTX_IRQ:
+               ret = pci_endpoint_test_intx_irq(test);
                break;
        case PCITEST_MSI:
        case PCITEST_MSIX:
@@ -801,7 +811,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
        test->irq_type = IRQ_TYPE_UNDEFINED;
 
        if (no_msi)
-               irq_type = IRQ_TYPE_LEGACY;
+               irq_type = IRQ_TYPE_INTX;
 
        data = (struct pci_endpoint_test_data *)ent->driver_data;
        if (data) {
@@ -860,7 +870,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
 
        pci_set_drvdata(pdev, test);
 
-       id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL);
+       id = ida_alloc(&pci_endpoint_test_ida, GFP_KERNEL);
        if (id < 0) {
                err = id;
                dev_err(dev, "Unable to get id\n");
@@ -907,7 +917,7 @@ err_kfree_test_name:
        kfree(test->name);
 
 err_ida_remove:
-       ida_simple_remove(&pci_endpoint_test_ida, id);
+       ida_free(&pci_endpoint_test_ida, id);
 
 err_iounmap:
        for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
@@ -943,7 +953,7 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev)
        misc_deregister(&test->miscdev);
        kfree(misc_device->name);
        kfree(test->name);
-       ida_simple_remove(&pci_endpoint_test_ida, id);
+       ida_free(&pci_endpoint_test_ida, id);
        for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
                if (test->bar[bar])
                        pci_iounmap(pdev, test->bar[bar]);
index 9715798acce3de2142c433e8b57cbdd804d5b10d..f3f2113a54a7ed4c0f22721c946d9a719a03581a 100644 (file)
@@ -7,16 +7,15 @@
  *  Copyright (C) 2021 Oracle.
  */
 
+#include <linux/device.h>
+#include <linux/err.h>
 #include <linux/io.h>
-#include <linux/kernel.h>
+#include <linux/ioport.h>
 #include <linux/kexec.h>
 #include <linux/mod_devicetable.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/types.h>
-#include <linux/slab.h>
-
-#include <uapi/misc/pvpanic.h>
 
 #include "pvpanic.h"
 
index 689af4c28c2a9cd8822cbaac5e44c9525f56c5a0..9ad20e82785bcd6ade57d493e31152ecff334178 100644 (file)
@@ -5,17 +5,13 @@
  *  Copyright (C) 2021 Oracle.
  */
 
-#include <linux/kernel.h>
+#include <linux/errno.h>
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/types.h>
-#include <linux/slab.h>
-
-#include <uapi/misc/pvpanic.h>
 
 #include "pvpanic.h"
 
-#define PCI_VENDOR_ID_REDHAT             0x1b36
 #define PCI_DEVICE_ID_REDHAT_PVPANIC     0x0011
 
 MODULE_AUTHOR("Mihai Carabas <mihai.carabas@oracle.com>");
index 305b367e0ce34618c1e465f85e56c1cc1cb61746..df3457ce1cb1461c2737399a4c29ece2c8586e27 100644 (file)
@@ -8,16 +8,20 @@
  */
 
 #include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/gfp_types.h>
 #include <linux/io.h>
-#include <linux/kernel.h>
 #include <linux/kexec.h>
+#include <linux/kstrtox.h>
+#include <linux/limits.h>
+#include <linux/list.h>
 #include <linux/mod_devicetable.h>
 #include <linux/module.h>
-#include <linux/platform_device.h>
 #include <linux/panic_notifier.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/sysfs.h>
 #include <linux/types.h>
-#include <linux/cdev.h>
-#include <linux/list.h>
 
 #include <uapi/misc/pvpanic.h>
 
index 46ffb10438adf618d0cf6758f2a2237bcaaf1ac3..a42fa760eed58a24ca6eb326b6e2b59e437110db 100644 (file)
@@ -8,6 +8,11 @@
 #ifndef PVPANIC_H_
 #define PVPANIC_H_
 
+#include <linux/compiler_types.h>
+
+struct attribute_group;
+struct device;
+
 int devm_pvpanic_probe(struct device *dev, void __iomem *base);
 extern const struct attribute_group *pvpanic_dev_groups[];
 
index de7fee7ead1bccbfc3ce1faa052b06b521281321..681b3500125ab38573f2a6a196896d3c1f195cda 100644 (file)
@@ -8,12 +8,6 @@
 #include <linux/slab.h>
 #include "vmci_handle_array.h"
 
-static size_t handle_arr_calc_size(u32 capacity)
-{
-       return VMCI_HANDLE_ARRAY_HEADER_SIZE +
-           capacity * sizeof(struct vmci_handle);
-}
-
 struct vmci_handle_arr *vmci_handle_arr_create(u32 capacity, u32 max_capacity)
 {
        struct vmci_handle_arr *array;
@@ -25,7 +19,7 @@ struct vmci_handle_arr *vmci_handle_arr_create(u32 capacity, u32 max_capacity)
                capacity = min((u32)VMCI_HANDLE_ARRAY_DEFAULT_CAPACITY,
                               max_capacity);
 
-       array = kmalloc(handle_arr_calc_size(capacity), GFP_ATOMIC);
+       array = kmalloc(struct_size(array, entries, capacity), GFP_ATOMIC);
        if (!array)
                return NULL;
 
@@ -51,8 +45,8 @@ int vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr,
                struct vmci_handle_arr *new_array;
                u32 capacity_bump = min(array->max_capacity - array->capacity,
                                        array->capacity);
-               size_t new_size = handle_arr_calc_size(array->capacity +
-                                                      capacity_bump);
+               size_t new_size = struct_size(array, entries,
+                                             size_add(array->capacity, capacity_bump));
 
                if (array->size >= array->max_capacity)
                        return VMCI_ERROR_NO_MEM;
index b0e6b195601403df3afc368548a6f1e352604805..27a38b97e8a850462fb0bfa31737515f5778741b 100644 (file)
@@ -20,14 +20,8 @@ struct vmci_handle_arr {
        struct vmci_handle entries[] __counted_by(capacity);
 };
 
-#define VMCI_HANDLE_ARRAY_HEADER_SIZE                          \
-       offsetof(struct vmci_handle_arr, entries)
 /* Select a default capacity that results in a 64 byte sized array */
 #define VMCI_HANDLE_ARRAY_DEFAULT_CAPACITY                     6
-/* Make sure that the max array size can be expressed by a u32 */
-#define VMCI_HANDLE_ARRAY_MAX_CAPACITY                         \
-       ((U32_MAX - VMCI_HANDLE_ARRAY_HEADER_SIZE - 1) /        \
-       sizeof(struct vmci_handle))
 
 struct vmci_handle_arr *vmci_handle_arr_create(u32 capacity, u32 max_capacity);
 void vmci_handle_arr_destroy(struct vmci_handle_arr *array);
index ef38dcd3a8872357422e330c7ee3f225db8d85f7..575ebbce378e04a3dd879e336497ed7060e8e792 100644 (file)
@@ -178,11 +178,9 @@ static inline void sdio_uart_release_func(struct sdio_uart_port *port)
                sdio_release_host(port->func);
 }
 
-static inline unsigned int sdio_in(struct sdio_uart_port *port, int offset)
+static inline u8 sdio_in(struct sdio_uart_port *port, int offset)
 {
-       unsigned char c;
-       c = sdio_readb(port->func, port->regs_offset + offset, NULL);
-       return c;
+       return sdio_readb(port->func, port->regs_offset + offset, NULL);
 }
 
 static inline void sdio_out(struct sdio_uart_port *port, int offset, int value)
@@ -192,8 +190,8 @@ static inline void sdio_out(struct sdio_uart_port *port, int offset, int value)
 
 static unsigned int sdio_uart_get_mctrl(struct sdio_uart_port *port)
 {
-       unsigned char status;
        unsigned int ret;
+       u8 status;
 
        /* FIXME: What stops this losing the delta bits and breaking
           sdio_uart_check_modem_status ? */
@@ -354,15 +352,13 @@ static void sdio_uart_stop_rx(struct sdio_uart_port *port)
        sdio_out(port, UART_IER, port->ier);
 }
 
-static void sdio_uart_receive_chars(struct sdio_uart_port *port,
-                                   unsigned int *status)
+static void sdio_uart_receive_chars(struct sdio_uart_port *port, u8 *status)
 {
-       unsigned int ch, flag;
        int max_count = 256;
 
        do {
-               ch = sdio_in(port, UART_RX);
-               flag = TTY_NORMAL;
+               u8 ch = sdio_in(port, UART_RX);
+               u8 flag = TTY_NORMAL;
                port->icount.rx++;
 
                if (unlikely(*status & (UART_LSR_BI | UART_LSR_PE |
@@ -449,8 +445,8 @@ static void sdio_uart_transmit_chars(struct sdio_uart_port *port)
 
 static void sdio_uart_check_modem_status(struct sdio_uart_port *port)
 {
-       int status;
        struct tty_struct *tty;
+       u8 status;
 
        status = sdio_in(port, UART_MSR);
 
@@ -499,7 +495,7 @@ static void sdio_uart_check_modem_status(struct sdio_uart_port *port)
 static void sdio_uart_irq(struct sdio_func *func)
 {
        struct sdio_uart_port *port = sdio_get_drvdata(func);
-       unsigned int iir, lsr;
+       u8 iir, lsr;
 
        /*
         * In a few places sdio_uart_irq() is called directly instead of
@@ -795,7 +791,7 @@ static unsigned int sdio_uart_chars_in_buffer(struct tty_struct *tty)
        return kfifo_len(&port->xmit_fifo);
 }
 
-static void sdio_uart_send_xchar(struct tty_struct *tty, char ch)
+static void sdio_uart_send_xchar(struct tty_struct *tty, u8 ch)
 {
        struct sdio_uart_port *port = tty->driver_data;
 
index 2a2d949a9344ea78b540337d471996f2ec77d53e..39f45c2b6de8a885e12af08f302d8bc2dce15d61 100644 (file)
@@ -75,11 +75,15 @@ EXPORT_SYMBOL(mmc_gpio_set_cd_irq);
 int mmc_gpio_get_ro(struct mmc_host *host)
 {
        struct mmc_gpio *ctx = host->slot.handler_priv;
+       int cansleep;
 
        if (!ctx || !ctx->ro_gpio)
                return -ENOSYS;
 
-       return gpiod_get_value_cansleep(ctx->ro_gpio);
+       cansleep = gpiod_cansleep(ctx->ro_gpio);
+       return cansleep ?
+               gpiod_get_value_cansleep(ctx->ro_gpio) :
+               gpiod_get_value(ctx->ro_gpio);
 }
 EXPORT_SYMBOL(mmc_gpio_get_ro);
 
index 87d78432a1e031d35fab15100c52a35dbacf7a24..7dfe7c4e00770412e642aabb2d70cbd38f698b52 100644 (file)
@@ -7,6 +7,7 @@
  *   Wei WANG <wei_wang@realsil.com.cn>
  */
 
+#include <linux/pci.h>
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/highmem.h>
@@ -947,7 +948,7 @@ static int sd_power_on(struct realtek_pci_sdmmc *host, unsigned char power_mode)
        /* send at least 74 clocks */
        rtsx_pci_write_register(pcr, SD_BUS_STAT, SD_CLK_TOGGLE_EN, SD_CLK_TOGGLE_EN);
 
-       if (PCI_PID(pcr) == PID_5261) {
+       if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5264)) {
                /*
                 * If test mode is set switch to SD Express mandatorily,
                 * this is only for factory testing.
@@ -1364,6 +1365,14 @@ static int sdmmc_init_sd_express(struct mmc_host *mmc, struct mmc_ios *ios)
        struct realtek_pci_sdmmc *host = mmc_priv(mmc);
        struct rtsx_pcr *pcr = host->pcr;
 
+       if (PCI_PID(pcr) == PID_5264) {
+               pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL2,
+                               PCI_EXP_LNKCTL2_TLS, PCI_EXP_LNKCTL2_TLS_2_5GT);
+               pci_write_config_byte(pcr->pci, 0x80e, 0x02);
+               pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL2,
+                               PCI_EXP_LNKCTL2_TLS, PCI_EXP_LNKCTL2_TLS_5_0GT);
+       }
+
        /* Set relink_time for changing to PCIe card */
        relink_time = 0x8FFF;
 
@@ -1379,6 +1388,12 @@ static int sdmmc_init_sd_express(struct mmc_host *mmc, struct mmc_ios *ios)
        if (pcr->ops->disable_auto_blink)
                pcr->ops->disable_auto_blink(pcr);
 
+       if (PCI_PID(pcr) == PID_5264) {
+               rtsx_pci_write_register(pcr, RTS5264_AUTOLOAD_CFG2,
+                       RTS5264_CHIP_RST_N_SEL, RTS5264_CHIP_RST_N_SEL);
+               rtsx_pci_write_register(pcr, GPIO_CTL, 0x02, 0x00);
+       }
+
        /* For PCIe/NVMe mode can't enter delink issue */
        pcr->hw_param.interrupt_en &= ~(SD_INT_EN);
        rtsx_pci_writel(pcr, RTSX_BIER, pcr->hw_param.interrupt_en);
index 7bfee28116af12ebdf08efb7b0e51e68cb602956..d4a02184784a3458b55b601d1fba1216e1ef3149 100644 (file)
@@ -693,6 +693,35 @@ static int sdhci_pci_o2_init_sd_express(struct mmc_host *mmc, struct mmc_ios *io
        return 0;
 }
 
+static void sdhci_pci_o2_set_power(struct sdhci_host *host, unsigned char mode,  unsigned short vdd)
+{
+       struct sdhci_pci_chip *chip;
+       struct sdhci_pci_slot *slot = sdhci_priv(host);
+       u32 scratch_32 = 0;
+       u8 scratch_8 = 0;
+
+       chip = slot->chip;
+
+       if (mode == MMC_POWER_OFF) {
+               /* UnLock WP */
+               pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch_8);
+               scratch_8 &= 0x7f;
+               pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch_8);
+
+               /* Set PCR 0x354[16] to switch Clock Source back to OPE Clock */
+               pci_read_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, &scratch_32);
+               scratch_32 &= ~(O2_SD_SEL_DLL);
+               pci_write_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, scratch_32);
+
+               /* Lock WP */
+               pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch_8);
+               scratch_8 |= 0x80;
+               pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch_8);
+       }
+
+       sdhci_set_power(host, mode, vdd);
+}
+
 static int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
 {
        struct sdhci_pci_chip *chip;
@@ -1051,6 +1080,7 @@ static const struct sdhci_ops sdhci_pci_o2_ops = {
        .set_bus_width = sdhci_set_bus_width,
        .reset = sdhci_reset,
        .set_uhs_signaling = sdhci_set_uhs_signaling,
+       .set_power = sdhci_pci_o2_set_power,
 };
 
 const struct sdhci_pci_fixes sdhci_o2 = {
index bb0759ca12f1c9df8052f770b375ab9b80c423e4..e451b28840d58b2b0e6b5fdd4d50fe809dd29de4 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/debugfs.h>
 #include <linux/nvmem-provider.h>
 #include <linux/root_dev.h>
+#include <linux/error-injection.h>
 
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
@@ -1412,6 +1413,7 @@ int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
        return ret;
 }
 EXPORT_SYMBOL_GPL(mtd_erase);
+ALLOW_ERROR_INJECTION(mtd_erase, ERRNO);
 
 /*
  * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
@@ -1511,6 +1513,7 @@ int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
        return ret;
 }
 EXPORT_SYMBOL_GPL(mtd_read);
+ALLOW_ERROR_INJECTION(mtd_read, ERRNO);
 
 int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
              const u_char *buf)
@@ -1527,6 +1530,7 @@ int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
        return ret;
 }
 EXPORT_SYMBOL_GPL(mtd_write);
+ALLOW_ERROR_INJECTION(mtd_write, ERRNO);
 
 /*
  * In blackbox flight recorder like scenarios we want to make successful writes
@@ -2347,6 +2351,7 @@ int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
        return 0;
 }
 EXPORT_SYMBOL_GPL(mtd_block_markbad);
+ALLOW_ERROR_INJECTION(mtd_block_markbad, ERRNO);
 
 /*
  * default_mtd_writev - the default writev method
index 2ed77b7b3fcb56e631ee8283d0ea6d209453b93d..7499a540121e8b2b41e2941f1448e0691028b1e0 100644 (file)
@@ -104,4 +104,13 @@ config MTD_UBI_BLOCK
 
           If in doubt, say "N".
 
+config MTD_UBI_FAULT_INJECTION
+       bool "Fault injection capability of UBI device"
+       default n
+       depends on FAULT_INJECTION_DEBUG_FS
+       help
+          This option enables fault-injection support for UBI devices for
+          testing purposes.
+
+          If in doubt, say "N".
 endif # MTD_UBI
index 309a42aeaa4cdbbab017252175b18748c91d0cf4..654bd7372cd8c09c69bf7c205a728f263dcc3dfc 100644 (file)
@@ -434,7 +434,7 @@ out_remove_minor:
        list_del(&dev->list);
        idr_remove(&ubiblock_minor_idr, gd->first_minor);
 out_cleanup_disk:
-       put_disk(dev->gd);
+       put_disk(gd);
 out_free_tags:
        blk_mq_free_tag_set(&dev->tag_set);
 out_free_dev:
index 27168f511d6d4602d857cad9f25b3b97417d2ba3..d57f52bd2ff3cd7a20b293b3993874a4826cffb2 100644 (file)
 #include <linux/uaccess.h>
 #include <linux/module.h>
 #include <linux/seq_file.h>
+#include <linux/fault-inject.h>
+
+#ifdef CONFIG_MTD_UBI_FAULT_INJECTION
+static DECLARE_FAULT_ATTR(fault_eccerr_attr);
+static DECLARE_FAULT_ATTR(fault_bitflips_attr);
+static DECLARE_FAULT_ATTR(fault_read_failure_attr);
+static DECLARE_FAULT_ATTR(fault_write_failure_attr);
+static DECLARE_FAULT_ATTR(fault_erase_failure_attr);
+static DECLARE_FAULT_ATTR(fault_power_cut_attr);
+static DECLARE_FAULT_ATTR(fault_io_ff_attr);
+static DECLARE_FAULT_ATTR(fault_io_ff_bitflips_attr);
+static DECLARE_FAULT_ATTR(fault_bad_hdr_attr);
+static DECLARE_FAULT_ATTR(fault_bad_hdr_ebadmsg_attr);
+
+#define FAIL_ACTION(name, fault_attr)                  \
+bool should_fail_##name(void)                          \
+{                                                      \
+       return should_fail(&fault_attr, 1);             \
+}
 
+FAIL_ACTION(eccerr,            fault_eccerr_attr)
+FAIL_ACTION(bitflips,          fault_bitflips_attr)
+FAIL_ACTION(read_failure,      fault_read_failure_attr)
+FAIL_ACTION(write_failure,     fault_write_failure_attr)
+FAIL_ACTION(erase_failure,     fault_erase_failure_attr)
+FAIL_ACTION(power_cut,         fault_power_cut_attr)
+FAIL_ACTION(io_ff,             fault_io_ff_attr)
+FAIL_ACTION(io_ff_bitflips,    fault_io_ff_bitflips_attr)
+FAIL_ACTION(bad_hdr,           fault_bad_hdr_attr)
+FAIL_ACTION(bad_hdr_ebadmsg,   fault_bad_hdr_ebadmsg_attr)
+#endif
 
 /**
  * ubi_dump_flash - dump a region of flash.
@@ -212,6 +242,52 @@ void ubi_dump_mkvol_req(const struct ubi_mkvol_req *req)
  */
 static struct dentry *dfs_rootdir;
 
+#ifdef CONFIG_MTD_UBI_FAULT_INJECTION
+static void dfs_create_fault_entry(struct dentry *parent)
+{
+       struct dentry *dir;
+
+       dir = debugfs_create_dir("fault_inject", parent);
+       if (IS_ERR_OR_NULL(dir)) {
+               int err = dir ? PTR_ERR(dir) : -ENODEV;
+
+               pr_warn("UBI error: cannot create \"fault_inject\" debugfs directory, error %d\n",
+                        err);
+               return;
+       }
+
+       fault_create_debugfs_attr("emulate_eccerr", dir,
+                                 &fault_eccerr_attr);
+
+       fault_create_debugfs_attr("emulate_read_failure", dir,
+                                 &fault_read_failure_attr);
+
+       fault_create_debugfs_attr("emulate_bitflips", dir,
+                                 &fault_bitflips_attr);
+
+       fault_create_debugfs_attr("emulate_write_failure", dir,
+                                 &fault_write_failure_attr);
+
+       fault_create_debugfs_attr("emulate_erase_failure", dir,
+                                 &fault_erase_failure_attr);
+
+       fault_create_debugfs_attr("emulate_power_cut", dir,
+                                 &fault_power_cut_attr);
+
+       fault_create_debugfs_attr("emulate_io_ff", dir,
+                                 &fault_io_ff_attr);
+
+       fault_create_debugfs_attr("emulate_io_ff_bitflips", dir,
+                                 &fault_io_ff_bitflips_attr);
+
+       fault_create_debugfs_attr("emulate_bad_hdr", dir,
+                                 &fault_bad_hdr_attr);
+
+       fault_create_debugfs_attr("emulate_bad_hdr_ebadmsg", dir,
+                                 &fault_bad_hdr_ebadmsg_attr);
+}
+#endif
+
 /**
  * ubi_debugfs_init - create UBI debugfs directory.
  *
@@ -232,6 +308,10 @@ int ubi_debugfs_init(void)
                return err;
        }
 
+#ifdef CONFIG_MTD_UBI_FAULT_INJECTION
+       dfs_create_fault_entry(dfs_rootdir);
+#endif
+
        return 0;
 }
 
@@ -252,7 +332,7 @@ static ssize_t dfs_file_read(struct file *file, char __user *user_buf,
        struct dentry *dent = file->f_path.dentry;
        struct ubi_device *ubi;
        struct ubi_debug_info *d;
-       char buf[8];
+       char buf[16];
        int val;
 
        ubi = ubi_get_device(ubi_num);
@@ -272,7 +352,12 @@ static ssize_t dfs_file_read(struct file *file, char __user *user_buf,
                val = d->emulate_bitflips;
        else if (dent == d->dfs_emulate_io_failures)
                val = d->emulate_io_failures;
-       else if (dent == d->dfs_emulate_power_cut) {
+       else if (dent == d->dfs_emulate_failures) {
+               snprintf(buf, sizeof(buf), "0x%04x\n", d->emulate_failures);
+               count = simple_read_from_buffer(user_buf, count, ppos,
+                                               buf, strlen(buf));
+               goto out;
+       } else if (dent == d->dfs_emulate_power_cut) {
                snprintf(buf, sizeof(buf), "%u\n", d->emulate_power_cut);
                count = simple_read_from_buffer(user_buf, count, ppos,
                                                buf, strlen(buf));
@@ -287,8 +372,7 @@ static ssize_t dfs_file_read(struct file *file, char __user *user_buf,
                count = simple_read_from_buffer(user_buf, count, ppos,
                                                buf, strlen(buf));
                goto out;
-       }
-       else {
+       } else {
                count = -EINVAL;
                goto out;
        }
@@ -316,7 +400,7 @@ static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
        struct ubi_device *ubi;
        struct ubi_debug_info *d;
        size_t buf_size;
-       char buf[8] = {0};
+       char buf[16] = {0};
        int val;
 
        ubi = ubi_get_device(ubi_num);
@@ -330,7 +414,11 @@ static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
                goto out;
        }
 
-       if (dent == d->dfs_power_cut_min) {
+       if (dent == d->dfs_emulate_failures) {
+               if (kstrtouint(buf, 0, &d->emulate_failures) != 0)
+                       count = -EINVAL;
+               goto out;
+       } else if (dent == d->dfs_power_cut_min) {
                if (kstrtouint(buf, 0, &d->power_cut_min) != 0)
                        count = -EINVAL;
                goto out;
@@ -559,6 +647,12 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
        debugfs_create_file("detailed_erase_block_info", S_IRUSR, d->dfs_dir,
                            (void *)ubi_num, &eraseblk_count_fops);
 
+#ifdef CONFIG_MTD_UBI_FAULT_INJECTION
+       d->dfs_emulate_failures = debugfs_create_file("emulate_failures",
+                                                      mode, d->dfs_dir,
+                                                      (void *)ubi_num,
+                                                      &dfs_fops);
+#endif
        return 0;
 }
 
@@ -600,7 +694,5 @@ int ubi_dbg_power_cut(struct ubi_device *ubi, int caller)
        if (ubi->dbg.power_cut_counter)
                return 0;
 
-       ubi_msg(ubi, "XXXXXXXXXXXXXXX emulating a power cut XXXXXXXXXXXXXXXX");
-       ubi_ro_mode(ubi);
        return 1;
 }
index 23676f32b6812f09cb7895974b5f37c70dff9f9c..b2fd9754880843750e5b0612868915b10038fce2 100644 (file)
@@ -53,56 +53,315 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi);
 void ubi_debugfs_exit_dev(struct ubi_device *ubi);
 
 /**
- * ubi_dbg_is_bgt_disabled - if the background thread is disabled.
+ * The following function is a legacy implementation of UBI fault-injection
+ * hook. When using more powerful fault injection capabilities, the legacy
+ * fault injection interface should be retained.
+ */
+int ubi_dbg_power_cut(struct ubi_device *ubi, int caller);
+
+static inline int ubi_dbg_bitflip(const struct ubi_device *ubi)
+{
+       if (ubi->dbg.emulate_bitflips)
+               return !get_random_u32_below(200);
+       return 0;
+}
+
+static inline int ubi_dbg_write_failure(const struct ubi_device *ubi)
+{
+       if (ubi->dbg.emulate_io_failures)
+               return !get_random_u32_below(500);
+       return 0;
+}
+
+static inline int ubi_dbg_erase_failure(const struct ubi_device *ubi)
+{
+       if (ubi->dbg.emulate_io_failures)
+               return !get_random_u32_below(400);
+       return 0;
+}
+
+/**
+ * MASK_XXX: Mask for emulate_failures in ubi_debug_info.The mask is used to
+ * precisely control the type and process of fault injection.
+ */
+/* Emulate a power cut when writing EC/VID header */
+#define MASK_POWER_CUT_EC                      (1 << 0)
+#define MASK_POWER_CUT_VID                     (1 << 1)
+/* Emulate a power cut when writing data*/
+#define MASK_POWER_CUT_DATA                    (1 << 2)
+/* Emulate bit-flips */
+#define MASK_BITFLIPS                          (1 << 3)
+/* Emulate ecc error */
+#define MASK_ECCERR                            (1 << 4)
+/* Emulates -EIO during data read */
+#define MASK_READ_FAILURE                      (1 << 5)
+#define MASK_READ_FAILURE_EC                   (1 << 6)
+#define MASK_READ_FAILURE_VID                  (1 << 7)
+/* Emulates -EIO during data write */
+#define MASK_WRITE_FAILURE                     (1 << 8)
+/* Emulates -EIO during erase a PEB*/
+#define MASK_ERASE_FAILURE                     (1 << 9)
+/* Return UBI_IO_FF when reading EC/VID header */
+#define MASK_IO_FF_EC                          (1 << 10)
+#define MASK_IO_FF_VID                         (1 << 11)
+/* Return UBI_IO_FF_BITFLIPS when reading EC/VID header */
+#define MASK_IO_FF_BITFLIPS_EC                 (1 << 12)
+#define MASK_IO_FF_BITFLIPS_VID                        (1 << 13)
+/* Return UBI_IO_BAD_HDR when reading EC/VID header */
+#define MASK_BAD_HDR_EC                                (1 << 14)
+#define MASK_BAD_HDR_VID                       (1 << 15)
+/* Return UBI_IO_BAD_HDR_EBADMSG when reading EC/VID header */
+#define MASK_BAD_HDR_EBADMSG_EC                        (1 << 16)
+#define MASK_BAD_HDR_EBADMSG_VID               (1 << 17)
+
+#ifdef CONFIG_MTD_UBI_FAULT_INJECTION
+
+extern bool should_fail_eccerr(void);
+extern bool should_fail_bitflips(void);
+extern bool should_fail_read_failure(void);
+extern bool should_fail_write_failure(void);
+extern bool should_fail_erase_failure(void);
+extern bool should_fail_power_cut(void);
+extern bool should_fail_io_ff(void);
+extern bool should_fail_io_ff_bitflips(void);
+extern bool should_fail_bad_hdr(void);
+extern bool should_fail_bad_hdr_ebadmsg(void);
+
+static inline bool ubi_dbg_fail_bitflip(const struct ubi_device *ubi)
+{
+       if (ubi->dbg.emulate_failures & MASK_BITFLIPS)
+               return should_fail_bitflips();
+       return false;
+}
+
+static inline bool ubi_dbg_fail_write(const struct ubi_device *ubi)
+{
+       if (ubi->dbg.emulate_failures & MASK_WRITE_FAILURE)
+               return should_fail_write_failure();
+       return false;
+}
+
+static inline bool ubi_dbg_fail_erase(const struct ubi_device *ubi)
+{
+       if (ubi->dbg.emulate_failures & MASK_ERASE_FAILURE)
+               return should_fail_erase_failure();
+       return false;
+}
+
+static inline bool ubi_dbg_fail_power_cut(const struct ubi_device *ubi,
+                                         unsigned int caller)
+{
+       if (ubi->dbg.emulate_failures & caller)
+               return should_fail_power_cut();
+       return false;
+}
+
+static inline bool ubi_dbg_fail_read(const struct ubi_device *ubi,
+                                    unsigned int caller)
+{
+       if (ubi->dbg.emulate_failures & caller)
+               return should_fail_read_failure();
+       return false;
+}
+
+static inline bool ubi_dbg_fail_eccerr(const struct ubi_device *ubi)
+{
+       if (ubi->dbg.emulate_failures & MASK_ECCERR)
+               return should_fail_eccerr();
+       return false;
+}
+
+static inline bool ubi_dbg_fail_ff(const struct ubi_device *ubi,
+                                  unsigned int caller)
+{
+       if (ubi->dbg.emulate_failures & caller)
+               return should_fail_io_ff();
+       return false;
+}
+
+static inline bool ubi_dbg_fail_ff_bitflips(const struct ubi_device *ubi,
+                                           unsigned int caller)
+{
+       if (ubi->dbg.emulate_failures & caller)
+               return should_fail_io_ff_bitflips();
+       return false;
+}
+
+static inline bool ubi_dbg_fail_bad_hdr(const struct ubi_device *ubi,
+                                        unsigned int caller)
+{
+       if (ubi->dbg.emulate_failures & caller)
+               return should_fail_bad_hdr();
+       return false;
+}
+
+static inline bool ubi_dbg_fail_bad_hdr_ebadmsg(const struct ubi_device *ubi,
+                                                unsigned int caller)
+{
+       if (ubi->dbg.emulate_failures & caller)
+               return should_fail_bad_hdr_ebadmsg();
+       return false;
+}
+#else /* CONFIG_MTD_UBI_FAULT_INJECTION */
+
+#define ubi_dbg_fail_bitflip(u)             false
+#define ubi_dbg_fail_write(u)               false
+#define ubi_dbg_fail_erase(u)               false
+#define ubi_dbg_fail_power_cut(u, c)        false
+#define ubi_dbg_fail_read(u, c)             false
+#define ubi_dbg_fail_eccerr(u)              false
+#define ubi_dbg_fail_ff(u, c)               false
+#define ubi_dbg_fail_ff_bitflips(u, v)      false
+#define ubi_dbg_fail_bad_hdr(u, c)          false
+#define ubi_dbg_fail_bad_hdr_ebadmsg(u, c)  false
+
+#endif
+
+/**
+ * ubi_dbg_is_power_cut - if it is time to emulate power cut.
  * @ubi: UBI device description object
  *
- * Returns non-zero if the UBI background thread is disabled for testing
- * purposes.
+ * Returns true if power cut should be emulated, otherwise returns false.
  */
-static inline int ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi)
+static inline bool ubi_dbg_is_power_cut(struct ubi_device *ubi,
+                                       unsigned int caller)
 {
-       return ubi->dbg.disable_bgt;
+       if (ubi_dbg_power_cut(ubi, caller))
+               return true;
+       return ubi_dbg_fail_power_cut(ubi, caller);
 }
 
 /**
  * ubi_dbg_is_bitflip - if it is time to emulate a bit-flip.
  * @ubi: UBI device description object
  *
- * Returns non-zero if a bit-flip should be emulated, otherwise returns zero.
+ * Returns true if a bit-flip should be emulated, otherwise returns false.
  */
-static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi)
+static inline bool ubi_dbg_is_bitflip(const struct ubi_device *ubi)
 {
-       if (ubi->dbg.emulate_bitflips)
-               return !get_random_u32_below(200);
-       return 0;
+       if (ubi_dbg_bitflip(ubi))
+               return true;
+       return ubi_dbg_fail_bitflip(ubi);
 }
 
 /**
  * ubi_dbg_is_write_failure - if it is time to emulate a write failure.
  * @ubi: UBI device description object
  *
- * Returns non-zero if a write failure should be emulated, otherwise returns
- * zero.
+ * Returns true if a write failure should be emulated, otherwise returns
+ * false.
  */
-static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi)
+static inline bool ubi_dbg_is_write_failure(const struct ubi_device *ubi)
 {
-       if (ubi->dbg.emulate_io_failures)
-               return !get_random_u32_below(500);
-       return 0;
+       if (ubi_dbg_write_failure(ubi))
+               return true;
+       return ubi_dbg_fail_write(ubi);
 }
 
 /**
  * ubi_dbg_is_erase_failure - if its time to emulate an erase failure.
  * @ubi: UBI device description object
  *
- * Returns non-zero if an erase failure should be emulated, otherwise returns
- * zero.
+ * Returns true if an erase failure should be emulated, otherwise returns
+ * false.
  */
-static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
+static inline bool ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
 {
-       if (ubi->dbg.emulate_io_failures)
-               return !get_random_u32_below(400);
-       return 0;
+       if (ubi_dbg_erase_failure(ubi))
+               return true;
+       return ubi_dbg_fail_erase(ubi);
+}
+
+/**
+ * ubi_dbg_is_eccerr - if it is time to emulate ECC error.
+ * @ubi: UBI device description object
+ *
+ * Returns true if a ECC error should be emulated, otherwise returns false.
+ */
+static inline bool ubi_dbg_is_eccerr(const struct ubi_device *ubi)
+{
+       return ubi_dbg_fail_eccerr(ubi);
+}
+
+/**
+ * ubi_dbg_is_read_failure - if it is time to emulate a read failure.
+ * @ubi: UBI device description object
+ *
+ * Returns true if a read failure should be emulated, otherwise returns
+ * false.
+ */
+static inline bool ubi_dbg_is_read_failure(const struct ubi_device *ubi,
+                                          unsigned int caller)
+{
+       return ubi_dbg_fail_read(ubi, caller);
+}
+
+/**
+ * ubi_dbg_is_ff - if it is time to emulate that read region is only 0xFF.
+ * @ubi: UBI device description object
+ *
+ * Returns true if read region should be emulated 0xFF, otherwise
+ * returns false.
+ */
+static inline bool ubi_dbg_is_ff(const struct ubi_device *ubi,
+                                unsigned int caller)
+{
+       return ubi_dbg_fail_ff(ubi, caller);
+}
+
+/**
+ * ubi_dbg_is_ff_bitflips - if it is time to emulate that read region is only 0xFF
+ * with error reported by the MTD driver
+ *
+ * @ubi: UBI device description object
+ *
+ * Returns true if read region should be emulated 0xFF and error
+ * reported by the MTD driver, otherwise returns false.
+ */
+static inline bool ubi_dbg_is_ff_bitflips(const struct ubi_device *ubi,
+                                         unsigned int caller)
+{
+       return ubi_dbg_fail_ff_bitflips(ubi, caller);
+}
+
+/**
+ * ubi_dbg_is_bad_hdr - if it is time to emulate a bad header
+ * @ubi: UBI device description object
+ *
+ * Returns true if a bad header error should be emulated, otherwise
+ * returns false.
+ */
+static inline bool ubi_dbg_is_bad_hdr(const struct ubi_device *ubi,
+                                     unsigned int caller)
+{
+       return ubi_dbg_fail_bad_hdr(ubi, caller);
+}
+
+/**
+ * ubi_dbg_is_bad_hdr_ebadmsg - if it is time to emulate a bad header with
+ * ECC error.
+ *
+ * @ubi: UBI device description object
+ *
+ * Returns true if a bad header with ECC error should be emulated, otherwise
+ * returns false.
+ */
+static inline bool ubi_dbg_is_bad_hdr_ebadmsg(const struct ubi_device *ubi,
+                                             unsigned int caller)
+{
+       return ubi_dbg_fail_bad_hdr_ebadmsg(ubi, caller);
+}
+
+/**
+ * ubi_dbg_is_bgt_disabled - if the background thread is disabled.
+ * @ubi: UBI device description object
+ *
+ * Returns non-zero if the UBI background thread is disabled for testing
+ * purposes.
+ */
+static inline int ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi)
+{
+       return ubi->dbg.disable_bgt;
 }
 
 static inline int ubi_dbg_chk_io(const struct ubi_device *ubi)
@@ -125,5 +384,4 @@ static inline void ubi_enable_dbg_chk_fastmap(struct ubi_device *ubi)
        ubi->dbg.chk_fastmap = 1;
 }
 
-int ubi_dbg_power_cut(struct ubi_device *ubi, int caller);
 #endif /* !__UBI_DEBUG_H__ */
index 01b644861253347f7a0bed1a7cc05f0f867f7177..a4999bce435f5618986dc9b96f973cf4e64676c0 100644 (file)
@@ -195,7 +195,19 @@ retry:
 
                if (ubi_dbg_is_bitflip(ubi)) {
                        dbg_gen("bit-flip (emulated)");
-                       err = UBI_IO_BITFLIPS;
+                       return UBI_IO_BITFLIPS;
+               }
+
+               if (ubi_dbg_is_read_failure(ubi, MASK_READ_FAILURE)) {
+                       ubi_warn(ubi, "cannot read %d bytes from PEB %d:%d (emulated)",
+                                len, pnum, offset);
+                       return -EIO;
+               }
+
+               if (ubi_dbg_is_eccerr(ubi)) {
+                       ubi_warn(ubi, "ECC error (emulated) while reading %d bytes from PEB %d:%d, read %zd bytes",
+                                len, pnum, offset, read);
+                       return -EBADMSG;
                }
        }
 
@@ -782,7 +794,36 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
         * If there was %-EBADMSG, but the header CRC is still OK, report about
         * a bit-flip to force scrubbing on this PEB.
         */
-       return read_err ? UBI_IO_BITFLIPS : 0;
+       if (read_err)
+               return UBI_IO_BITFLIPS;
+
+       if (ubi_dbg_is_read_failure(ubi, MASK_READ_FAILURE_EC)) {
+               ubi_warn(ubi, "cannot read EC header from PEB %d (emulated)",
+                        pnum);
+               return -EIO;
+       }
+
+       if (ubi_dbg_is_ff(ubi, MASK_IO_FF_EC)) {
+               ubi_warn(ubi, "bit-all-ff (emulated)");
+               return UBI_IO_FF;
+       }
+
+       if (ubi_dbg_is_ff_bitflips(ubi, MASK_IO_FF_BITFLIPS_EC)) {
+               ubi_warn(ubi, "bit-all-ff with error reported by MTD driver (emulated)");
+               return UBI_IO_FF_BITFLIPS;
+       }
+
+       if (ubi_dbg_is_bad_hdr(ubi, MASK_BAD_HDR_EC)) {
+               ubi_warn(ubi, "bad_hdr (emulated)");
+               return UBI_IO_BAD_HDR;
+       }
+
+       if (ubi_dbg_is_bad_hdr_ebadmsg(ubi, MASK_BAD_HDR_EBADMSG_EC)) {
+               ubi_warn(ubi, "bad_hdr with ECC error (emulated)");
+               return UBI_IO_BAD_HDR_EBADMSG;
+       }
+
+       return 0;
 }
 
 /**
@@ -821,8 +862,11 @@ int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
        if (err)
                return err;
 
-       if (ubi_dbg_power_cut(ubi, POWER_CUT_EC_WRITE))
+       if (ubi_dbg_is_power_cut(ubi, MASK_POWER_CUT_EC)) {
+               ubi_warn(ubi, "emulating a power cut when writing EC header");
+               ubi_ro_mode(ubi);
                return -EROFS;
+       }
 
        err = ubi_io_write(ubi, ec_hdr, pnum, 0, ubi->ec_hdr_alsize);
        return err;
@@ -1029,7 +1073,36 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
                return -EINVAL;
        }
 
-       return read_err ? UBI_IO_BITFLIPS : 0;
+       if (read_err)
+               return UBI_IO_BITFLIPS;
+
+       if (ubi_dbg_is_read_failure(ubi, MASK_READ_FAILURE_VID)) {
+               ubi_warn(ubi, "cannot read VID header from PEB %d (emulated)",
+                        pnum);
+               return -EIO;
+       }
+
+       if (ubi_dbg_is_ff(ubi, MASK_IO_FF_VID)) {
+               ubi_warn(ubi, "bit-all-ff (emulated)");
+               return UBI_IO_FF;
+       }
+
+       if (ubi_dbg_is_ff_bitflips(ubi, MASK_IO_FF_BITFLIPS_VID)) {
+               ubi_warn(ubi, "bit-all-ff with error reported by MTD driver (emulated)");
+               return UBI_IO_FF_BITFLIPS;
+       }
+
+       if (ubi_dbg_is_bad_hdr(ubi, MASK_BAD_HDR_VID)) {
+               ubi_warn(ubi, "bad_hdr (emulated)");
+               return UBI_IO_BAD_HDR;
+       }
+
+       if (ubi_dbg_is_bad_hdr_ebadmsg(ubi, MASK_BAD_HDR_EBADMSG_VID)) {
+               ubi_warn(ubi, "bad_hdr with ECC error (emulated)");
+               return UBI_IO_BAD_HDR_EBADMSG;
+       }
+
+       return 0;
 }
 
 /**
@@ -1071,8 +1144,11 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
        if (err)
                return err;
 
-       if (ubi_dbg_power_cut(ubi, POWER_CUT_VID_WRITE))
+       if (ubi_dbg_is_power_cut(ubi, MASK_POWER_CUT_VID)) {
+               ubi_warn(ubi, "emulating a power cut when writing VID header");
+               ubi_ro_mode(ubi);
                return -EROFS;
+       }
 
        err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset,
                           ubi->vid_hdr_alsize);
index a5ec566df0d74d255e200f7c67f493a859d52dc1..0b42bb45dd840bf28689a72056ec2760ea7d1ef4 100644 (file)
@@ -145,17 +145,6 @@ enum {
        UBI_BAD_FASTMAP,
 };
 
-/*
- * Flags for emulate_power_cut in ubi_debug_info
- *
- * POWER_CUT_EC_WRITE: Emulate a power cut when writing an EC header
- * POWER_CUT_VID_WRITE: Emulate a power cut when writing a VID header
- */
-enum {
-       POWER_CUT_EC_WRITE = 0x01,
-       POWER_CUT_VID_WRITE = 0x02,
-};
-
 /**
  * struct ubi_vid_io_buf - VID buffer used to read/write VID info to/from the
  *                        flash.
@@ -404,6 +393,7 @@ struct ubi_volume_desc {
  * @power_cut_counter: count down for writes left until emulated power cut
  * @power_cut_min: minimum number of writes before emulating a power cut
  * @power_cut_max: maximum number of writes until emulating a power cut
+ * @emulate_failures: emulate failures for testing purposes
  * @dfs_dir_name: name of debugfs directory containing files of this UBI device
  * @dfs_dir: direntry object of the UBI device debugfs directory
  * @dfs_chk_gen: debugfs knob to enable UBI general extra checks
@@ -415,6 +405,7 @@ struct ubi_volume_desc {
  * @dfs_emulate_power_cut: debugfs knob to emulate power cuts
  * @dfs_power_cut_min: debugfs knob for minimum writes before power cut
  * @dfs_power_cut_max: debugfs knob for maximum writes until power cut
+ * @dfs_emulate_failures: debugfs entry to control the fault injection type
  */
 struct ubi_debug_info {
        unsigned int chk_gen:1;
@@ -427,6 +418,7 @@ struct ubi_debug_info {
        unsigned int power_cut_counter;
        unsigned int power_cut_min;
        unsigned int power_cut_max;
+       unsigned int emulate_failures;
        char dfs_dir_name[UBI_DFS_DIR_LEN + 1];
        struct dentry *dfs_dir;
        struct dentry *dfs_chk_gen;
@@ -438,6 +430,7 @@ struct ubi_debug_info {
        struct dentry *dfs_emulate_power_cut;
        struct dentry *dfs_power_cut_min;
        struct dentry *dfs_power_cut_max;
+       struct dentry *dfs_emulate_failures;
 };
 
 /**
@@ -1130,6 +1123,19 @@ static inline struct ubi_vid_hdr *ubi_get_vid_hdr(struct ubi_vid_io_buf *vidb)
        return vidb->hdr;
 }
 
+/**
+ * ubi_ro_mode - switch to read-only mode.
+ * @ubi: UBI device description object
+ */
+static inline void ubi_ro_mode(struct ubi_device *ubi)
+{
+       if (!ubi->ro_mode) {
+               ubi->ro_mode = 1;
+               ubi_warn(ubi, "switch to read-only mode");
+               dump_stack();
+       }
+}
+
 /*
  * This function is equivalent to 'ubi_io_read()', but @offset is relative to
  * the beginning of the logical eraseblock, not to the beginning of the
@@ -1151,20 +1157,13 @@ static inline int ubi_io_write_data(struct ubi_device *ubi, const void *buf,
                                    int pnum, int offset, int len)
 {
        ubi_assert(offset >= 0);
-       return ubi_io_write(ubi, buf, pnum, offset + ubi->leb_start, len);
-}
 
-/**
- * ubi_ro_mode - switch to read-only mode.
- * @ubi: UBI device description object
- */
-static inline void ubi_ro_mode(struct ubi_device *ubi)
-{
-       if (!ubi->ro_mode) {
-               ubi->ro_mode = 1;
-               ubi_warn(ubi, "switch to read-only mode");
-               dump_stack();
+       if (ubi_dbg_power_cut(ubi, MASK_POWER_CUT_DATA)) {
+               ubi_warn(ubi, "XXXXX emulating a power cut when writing data XXXXX");
+               ubi_ro_mode(ubi);
+               return -EROFS;
        }
+       return ubi_io_write(ubi, buf, pnum, offset + ubi->leb_start, len);
 }
 
 /**
index fd1d121a584ba211991f2ec10aef39c5bba6a339..30a952c34365f4957a9fdc2acef7009673200898 100644 (file)
@@ -44,15 +44,20 @@ static int mux_mmio_probe(struct platform_device *pdev)
        int ret;
        int i;
 
-       if (of_device_is_compatible(np, "mmio-mux"))
+       if (of_device_is_compatible(np, "mmio-mux")) {
                regmap = syscon_node_to_regmap(np->parent);
-       else
-               regmap = dev_get_regmap(dev->parent, NULL) ?: ERR_PTR(-ENODEV);
-       if (IS_ERR(regmap)) {
-               ret = PTR_ERR(regmap);
-               dev_err(dev, "failed to get regmap: %d\n", ret);
-               return ret;
+       } else {
+               regmap = device_node_to_regmap(np);
+               /* Fallback to checking the parent node on "real" errors. */
+               if (IS_ERR(regmap) && regmap != ERR_PTR(-EPROBE_DEFER)) {
+                       regmap = dev_get_regmap(dev->parent, NULL);
+                       if (!regmap)
+                               regmap = ERR_PTR(-ENODEV);
+               }
        }
+       if (IS_ERR(regmap))
+               return dev_err_probe(dev, PTR_ERR(regmap),
+                                    "failed to get regmap\n");
 
        ret = of_property_count_u32_elems(np, "mux-reg-masks");
        if (ret == 0 || ret % 2)
index 53415e83821ce3a21f5b02d77a360cfad92221ac..68e79b1272f6b95fa803ac0d571a164654af4ac7 100644 (file)
@@ -11,7 +11,7 @@
 #include <linux/net.h>
 #include <linux/igmp.h>
 #include <linux/workqueue.h>
-#include <net/sch_generic.h>
+#include <net/pkt_sched.h>
 #include <net/net_namespace.h>
 #include <net/ip.h>
 #include <net/udp.h>
@@ -80,11 +80,11 @@ static struct mld2_grec mldv2_zero_grec;
 
 static struct amt_skb_cb *amt_skb_cb(struct sk_buff *skb)
 {
-       BUILD_BUG_ON(sizeof(struct amt_skb_cb) + sizeof(struct qdisc_skb_cb) >
+       BUILD_BUG_ON(sizeof(struct amt_skb_cb) + sizeof(struct tc_skb_cb) >
                     sizeof_field(struct sk_buff, cb));
 
        return (struct amt_skb_cb *)((void *)skb->cb +
-               sizeof(struct qdisc_skb_cb));
+               sizeof(struct tc_skb_cb));
 }
 
 static void __amt_source_gc_work(void)
index f44ba2600415f639b0d80958414201cf16ed9a38..e2ec69aa46e53154217d75e09d5fc07c212ee73f 100644 (file)
@@ -30,9 +30,9 @@
 #include <linux/io.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
+#include <linux/property.h>
 #include <linux/clk.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/mfd/syscon.h>
 #include <linux/regmap.h>
 
@@ -259,22 +259,13 @@ static int c_can_plat_probe(struct platform_device *pdev)
        void __iomem *addr;
        struct net_device *dev;
        struct c_can_priv *priv;
-       const struct of_device_id *match;
        struct resource *mem;
        int irq;
        struct clk *clk;
        const struct c_can_driver_data *drvdata;
        struct device_node *np = pdev->dev.of_node;
 
-       match = of_match_device(c_can_of_table, &pdev->dev);
-       if (match) {
-               drvdata = match->data;
-       } else if (pdev->id_entry->driver_data) {
-               drvdata = (struct c_can_driver_data *)
-                       platform_get_device_id(pdev)->driver_data;
-       } else {
-               return -ENODEV;
-       }
+       drvdata = device_get_match_data(&pdev->dev);
 
        /* get the appropriate clk */
        clk = devm_clk_get(&pdev->dev, NULL);
index d15f85a40c1e5be5b703fbe9d27c104564f54f58..8ea7f2795551bb4867869a11858cfd50f3f73f67 100644 (file)
 #include <linux/module.h>
 #include <linux/netdevice.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/pinctrl/consumer.h>
 #include <linux/platform_device.h>
 #include <linux/can/platform/flexcan.h>
 #include <linux/pm_runtime.h>
+#include <linux/property.h>
 #include <linux/regmap.h>
 #include <linux/regulator/consumer.h>
 
@@ -2034,7 +2034,6 @@ MODULE_DEVICE_TABLE(platform, flexcan_id_table);
 
 static int flexcan_probe(struct platform_device *pdev)
 {
-       const struct of_device_id *of_id;
        const struct flexcan_devtype_data *devtype_data;
        struct net_device *dev;
        struct flexcan_priv *priv;
@@ -2090,14 +2089,7 @@ static int flexcan_probe(struct platform_device *pdev)
        if (IS_ERR(regs))
                return PTR_ERR(regs);
 
-       of_id = of_match_device(flexcan_of_match, &pdev->dev);
-       if (of_id)
-               devtype_data = of_id->data;
-       else if (platform_get_device_id(pdev)->driver_data)
-               devtype_data = (struct flexcan_devtype_data *)
-                       platform_get_device_id(pdev)->driver_data;
-       else
-               return -ENODEV;
+       devtype_data = device_get_match_data(&pdev->dev);
 
        if ((devtype_data->quirks & FLEXCAN_QUIRK_SUPPORT_FD) &&
            !((devtype_data->quirks &
index 4837df6efa92685fbb632986dbb51bab67590de5..5b3d69c3b6b66fe1f4cb10a42db9120ebecdea6f 100644 (file)
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
+#include <linux/property.h>
 #include <linux/netdevice.h>
 #include <linux/can/dev.h>
+#include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
 #include <linux/of_platform.h>
@@ -290,7 +292,7 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev)
        int irq, mscan_clksrc = 0;
        int err = -ENOMEM;
 
-       data = of_device_get_match_data(&ofdev->dev);
+       data = device_get_match_data(&ofdev->dev);
        if (!data)
                return -EINVAL;
 
@@ -351,13 +353,11 @@ exit_unmap_mem:
 
 static void mpc5xxx_can_remove(struct platform_device *ofdev)
 {
-       const struct of_device_id *match;
        const struct mpc5xxx_can_data *data;
        struct net_device *dev = platform_get_drvdata(ofdev);
        struct mscan_priv *priv = netdev_priv(dev);
 
-       match = of_match_device(mpc5xxx_can_table, &ofdev->dev);
-       data = match ? match->data : NULL;
+       data = device_get_match_data(&ofdev->dev);
 
        unregister_mscandev(dev);
        if (data && data->put_clock)
index 24ad9f593a7736fde6e2e0299fd80df5233ddf9c..1efa39e134f4ccefd6d3ab7717171fda86611d22 100644 (file)
@@ -1143,7 +1143,7 @@ static void __exit peak_usb_exit(void)
        int err;
 
        /* last chance do send any synchronous commands here */
-       err = driver_for_each_device(&peak_usb_driver.drvwrap.driver, NULL,
+       err = driver_for_each_device(&peak_usb_driver.driver, NULL,
                                     NULL, peak_usb_do_device_exit);
        if (err)
                pr_err("%s: failed to stop all can devices (err %d)\n",
index abe58f103043360d268d48c4fa4cbd880e1b44b2..3722eaa84234ec90d8decaad66015b8fcc40dfe3 100644 (file)
@@ -20,8 +20,8 @@
 #include <linux/module.h>
 #include <linux/netdevice.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/platform_device.h>
+#include <linux/property.h>
 #include <linux/skbuff.h>
 #include <linux/spinlock.h>
 #include <linux/string.h>
@@ -1726,8 +1726,7 @@ static int xcan_probe(struct platform_device *pdev)
        struct net_device *ndev;
        struct xcan_priv *priv;
        struct phy *transceiver;
-       const struct of_device_id *of_id;
-       const struct xcan_devtype_data *devtype = &xcan_axi_data;
+       const struct xcan_devtype_data *devtype;
        void __iomem *addr;
        int ret;
        int rx_max, tx_max;
@@ -1741,9 +1740,7 @@ static int xcan_probe(struct platform_device *pdev)
                goto err;
        }
 
-       of_id = of_match_device(xcan_of_match, &pdev->dev);
-       if (of_id && of_id->data)
-               devtype = of_id->data;
+       devtype = device_get_match_data(&pdev->dev);
 
        hw_tx_max_property = devtype->flags & XCAN_FLAG_TX_MAILBOXES ?
                             "tx-mailbox-count" : "tx-fifo-depth";
index 391c4dbdff4283d0b077608a59e4c95758eb24cf..3c1f657593a8f364e5db9500d06257f34373af8a 100644 (file)
@@ -2838,8 +2838,7 @@ static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
        /* MT753x MAC works in 1G full duplex mode for all up-clocked
         * variants.
         */
-       if (interface == PHY_INTERFACE_MODE_INTERNAL ||
-           interface == PHY_INTERFACE_MODE_TRGMII ||
+       if (interface == PHY_INTERFACE_MODE_TRGMII ||
            (phy_interface_mode_is_8023z(interface))) {
                speed = SPEED_1000;
                duplex = DUPLEX_FULL;
index 383b3c4d6f599c57358d8970c9c26941231e9898..614cabb5c1b039d8d6df6789589455fe00f09e70 100644 (file)
@@ -3659,7 +3659,7 @@ static int mv88e6xxx_mdio_read_c45(struct mii_bus *bus, int phy, int devad,
        int err;
 
        if (!chip->info->ops->phy_read_c45)
-               return -EOPNOTSUPP;
+               return 0xffff;
 
        mv88e6xxx_reg_lock(chip);
        err = chip->info->ops->phy_read_c45(chip, bus, phy, devad, reg, &val);
index c51f40960961f2b10a2f4191e4b8f5a50af9fc86..7a864329cb7267a9431a181a183c94b6f791f91e 100644 (file)
@@ -2051,12 +2051,11 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
        priv->info = of_device_get_match_data(priv->dev);
 
        priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset",
-                                                  GPIOD_ASIS);
+                                                  GPIOD_OUT_HIGH);
        if (IS_ERR(priv->reset_gpio))
                return PTR_ERR(priv->reset_gpio);
 
        if (priv->reset_gpio) {
-               gpiod_set_value_cansleep(priv->reset_gpio, 1);
                /* The active low duration must be greater than 10 ms
                 * and checkpatch.pl wants 20 ms.
                 */
index dd50502e21229652fed0fcf73e2b73975bdf010c..ae70eac3be28f84ec3ad00910faf3b4dcbe6bddc 100644 (file)
@@ -1135,6 +1135,8 @@ static int vsc73xx_gpio_probe(struct vsc73xx *vsc)
 
        vsc->gc.label = devm_kasprintf(vsc->dev, GFP_KERNEL, "VSC%04x",
                                       vsc->chipid);
+       if (!vsc->gc.label)
+               return -ENOMEM;
        vsc->gc.ngpio = 4;
        vsc->gc.owner = THIS_MODULE;
        vsc->gc.parent = vsc->dev;
index 0e0aa40168588fff69ff76fc9fa0b3b442319f58..c5636245f1cad225bbd1a638a0bea855db566235 100644 (file)
@@ -100,4 +100,5 @@ static void __exit ns8390_module_exit(void)
 module_init(ns8390_module_init);
 module_exit(ns8390_module_exit);
 #endif /* MODULE */
+MODULE_DESCRIPTION("National Semiconductor 8390 core driver");
 MODULE_LICENSE("GPL");
index 6834742057b3eb041065793057b86d31aba9d2c1..6d429b11e9c6aa5ce0a1ea3a6c3925a3672dd2bc 100644 (file)
@@ -102,4 +102,5 @@ static void __exit NS8390p_cleanup_module(void)
 
 module_init(NS8390p_init_module);
 module_exit(NS8390p_cleanup_module);
+MODULE_DESCRIPTION("National Semiconductor 8390 core for ISA driver");
 MODULE_LICENSE("GPL");
index a09f383dd249f1e1782d20de475bdac76a90c54d..828edca8d30c59dec13c8a764fe0f2ed39ceb8ea 100644 (file)
@@ -610,4 +610,5 @@ static int init_pcmcia(void)
        return 1;
 }
 
+MODULE_DESCRIPTION("National Semiconductor 8390 Amiga PCMCIA ethernet driver");
 MODULE_LICENSE("GPL");
index 24f49a8ff903ff3ae8496074c19df0235b7965cf..fd9dcdc356e681b4eba0698d7f61bbd0196fcb8f 100644 (file)
@@ -270,4 +270,5 @@ static void __exit hydra_cleanup_module(void)
 module_init(hydra_init_module);
 module_exit(hydra_cleanup_module);
 
+MODULE_DESCRIPTION("Zorro-II Hydra 8390 ethernet driver");
 MODULE_LICENSE("GPL");
index 265976e3b64ab227c55924bd8e0581b24b506d42..6cc0e190aa79c129ca65becb4699a5c55e034340 100644 (file)
@@ -296,4 +296,5 @@ static void __exit stnic_cleanup(void)
 
 module_init(stnic_probe);
 module_exit(stnic_cleanup);
+MODULE_DESCRIPTION("National Semiconductor DP83902AV ethernet driver");
 MODULE_LICENSE("GPL");
index d70390e9d03d9bfe421554ef9e50ce78123bddfe..c24dd4fe7a10666a25b53fc5246280c9891b10ac 100644 (file)
@@ -443,4 +443,5 @@ static void __exit zorro8390_cleanup_module(void)
 module_init(zorro8390_init_module);
 module_exit(zorro8390_cleanup_module);
 
+MODULE_DESCRIPTION("Zorro NS8390-based ethernet driver");
 MODULE_LICENSE("GPL");
index 5beadabc213618314ad42da120da259415eaa7b3..ea773cfa0af67bd06d86037bc8208b4111b3bbc5 100644 (file)
@@ -63,6 +63,15 @@ static int pdsc_process_notifyq(struct pdsc_qcq *qcq)
        return nq_work;
 }
 
+static bool pdsc_adminq_inc_if_up(struct pdsc *pdsc)
+{
+       if (pdsc->state & BIT_ULL(PDSC_S_STOPPING_DRIVER) ||
+           pdsc->state & BIT_ULL(PDSC_S_FW_DEAD))
+               return false;
+
+       return refcount_inc_not_zero(&pdsc->adminq_refcnt);
+}
+
 void pdsc_process_adminq(struct pdsc_qcq *qcq)
 {
        union pds_core_adminq_comp *comp;
@@ -75,9 +84,9 @@ void pdsc_process_adminq(struct pdsc_qcq *qcq)
        int aq_work = 0;
        int credits;
 
-       /* Don't process AdminQ when shutting down */
-       if (pdsc->state & BIT_ULL(PDSC_S_STOPPING_DRIVER)) {
-               dev_err(pdsc->dev, "%s: called while PDSC_S_STOPPING_DRIVER\n",
+       /* Don't process AdminQ when it's not up */
+       if (!pdsc_adminq_inc_if_up(pdsc)) {
+               dev_err(pdsc->dev, "%s: called while adminq is unavailable\n",
                        __func__);
                return;
        }
@@ -124,6 +133,7 @@ credits:
                pds_core_intr_credits(&pdsc->intr_ctrl[qcq->intx],
                                      credits,
                                      PDS_CORE_INTR_CRED_REARM);
+       refcount_dec(&pdsc->adminq_refcnt);
 }
 
 void pdsc_work_thread(struct work_struct *work)
@@ -135,18 +145,20 @@ void pdsc_work_thread(struct work_struct *work)
 
 irqreturn_t pdsc_adminq_isr(int irq, void *data)
 {
-       struct pdsc_qcq *qcq = data;
-       struct pdsc *pdsc = qcq->pdsc;
+       struct pdsc *pdsc = data;
+       struct pdsc_qcq *qcq;
 
-       /* Don't process AdminQ when shutting down */
-       if (pdsc->state & BIT_ULL(PDSC_S_STOPPING_DRIVER)) {
-               dev_err(pdsc->dev, "%s: called while PDSC_S_STOPPING_DRIVER\n",
+       /* Don't process AdminQ when it's not up */
+       if (!pdsc_adminq_inc_if_up(pdsc)) {
+               dev_err(pdsc->dev, "%s: called while adminq is unavailable\n",
                        __func__);
                return IRQ_HANDLED;
        }
 
+       qcq = &pdsc->adminqcq;
        queue_work(pdsc->wq, &qcq->work);
        pds_core_intr_mask(&pdsc->intr_ctrl[qcq->intx], PDS_CORE_INTR_MASK_CLEAR);
+       refcount_dec(&pdsc->adminq_refcnt);
 
        return IRQ_HANDLED;
 }
@@ -179,10 +191,16 @@ static int __pdsc_adminq_post(struct pdsc *pdsc,
 
        /* Check that the FW is running */
        if (!pdsc_is_fw_running(pdsc)) {
-               u8 fw_status = ioread8(&pdsc->info_regs->fw_status);
-
-               dev_info(pdsc->dev, "%s: post failed - fw not running %#02x:\n",
-                        __func__, fw_status);
+               if (pdsc->info_regs) {
+                       u8 fw_status =
+                               ioread8(&pdsc->info_regs->fw_status);
+
+                       dev_info(pdsc->dev, "%s: post failed - fw not running %#02x:\n",
+                                __func__, fw_status);
+               } else {
+                       dev_info(pdsc->dev, "%s: post failed - BARs not setup\n",
+                                __func__);
+               }
                ret = -ENXIO;
 
                goto err_out_unlock;
@@ -230,6 +248,12 @@ int pdsc_adminq_post(struct pdsc *pdsc,
        int err = 0;
        int index;
 
+       if (!pdsc_adminq_inc_if_up(pdsc)) {
+               dev_dbg(pdsc->dev, "%s: preventing adminq cmd %u\n",
+                       __func__, cmd->opcode);
+               return -ENXIO;
+       }
+
        wc.qcq = &pdsc->adminqcq;
        index = __pdsc_adminq_post(pdsc, &pdsc->adminqcq, cmd, comp, &wc);
        if (index < 0) {
@@ -248,10 +272,16 @@ int pdsc_adminq_post(struct pdsc *pdsc,
                        break;
 
                if (!pdsc_is_fw_running(pdsc)) {
-                       u8 fw_status = ioread8(&pdsc->info_regs->fw_status);
-
-                       dev_dbg(pdsc->dev, "%s: post wait failed - fw not running %#02x:\n",
-                               __func__, fw_status);
+                       if (pdsc->info_regs) {
+                               u8 fw_status =
+                                       ioread8(&pdsc->info_regs->fw_status);
+
+                               dev_dbg(pdsc->dev, "%s: post wait failed - fw not running %#02x:\n",
+                                       __func__, fw_status);
+                       } else {
+                               dev_dbg(pdsc->dev, "%s: post wait failed - BARs not setup\n",
+                                       __func__);
+                       }
                        err = -ENXIO;
                        break;
                }
@@ -285,6 +315,8 @@ err_out:
                        queue_work(pdsc->wq, &pdsc->health_work);
        }
 
+       refcount_dec(&pdsc->adminq_refcnt);
+
        return err;
 }
 EXPORT_SYMBOL_GPL(pdsc_adminq_post);
index 0d2091e9eb283a375617828c00552cceb82768ca..7658a72867675aad5287c15989155386d3ab9de7 100644 (file)
@@ -125,7 +125,7 @@ static int pdsc_qcq_intr_alloc(struct pdsc *pdsc, struct pdsc_qcq *qcq)
 
        snprintf(name, sizeof(name), "%s-%d-%s",
                 PDS_CORE_DRV_NAME, pdsc->pdev->bus->number, qcq->q.name);
-       index = pdsc_intr_alloc(pdsc, name, pdsc_adminq_isr, qcq);
+       index = pdsc_intr_alloc(pdsc, name, pdsc_adminq_isr, pdsc);
        if (index < 0)
                return index;
        qcq->intx = index;
@@ -404,10 +404,7 @@ int pdsc_setup(struct pdsc *pdsc, bool init)
        int numdescs;
        int err;
 
-       if (init)
-               err = pdsc_dev_init(pdsc);
-       else
-               err = pdsc_dev_reinit(pdsc);
+       err = pdsc_dev_init(pdsc);
        if (err)
                return err;
 
@@ -450,6 +447,7 @@ int pdsc_setup(struct pdsc *pdsc, bool init)
                pdsc_debugfs_add_viftype(pdsc);
        }
 
+       refcount_set(&pdsc->adminq_refcnt, 1);
        clear_bit(PDSC_S_FW_DEAD, &pdsc->state);
        return 0;
 
@@ -464,6 +462,8 @@ void pdsc_teardown(struct pdsc *pdsc, bool removing)
 
        if (!pdsc->pdev->is_virtfn)
                pdsc_devcmd_reset(pdsc);
+       if (pdsc->adminqcq.work.func)
+               cancel_work_sync(&pdsc->adminqcq.work);
        pdsc_qcq_free(pdsc, &pdsc->notifyqcq);
        pdsc_qcq_free(pdsc, &pdsc->adminqcq);
 
@@ -476,10 +476,9 @@ void pdsc_teardown(struct pdsc *pdsc, bool removing)
                for (i = 0; i < pdsc->nintrs; i++)
                        pdsc_intr_free(pdsc, i);
 
-               if (removing) {
-                       kfree(pdsc->intr_info);
-                       pdsc->intr_info = NULL;
-               }
+               kfree(pdsc->intr_info);
+               pdsc->intr_info = NULL;
+               pdsc->nintrs = 0;
        }
 
        if (pdsc->kern_dbpage) {
@@ -487,6 +486,7 @@ void pdsc_teardown(struct pdsc *pdsc, bool removing)
                pdsc->kern_dbpage = NULL;
        }
 
+       pci_free_irq_vectors(pdsc->pdev);
        set_bit(PDSC_S_FW_DEAD, &pdsc->state);
 }
 
@@ -512,6 +512,24 @@ void pdsc_stop(struct pdsc *pdsc)
                                           PDS_CORE_INTR_MASK_SET);
 }
 
+static void pdsc_adminq_wait_and_dec_once_unused(struct pdsc *pdsc)
+{
+       /* The driver initializes the adminq_refcnt to 1 when the adminq is
+        * allocated and ready for use. Other users/requesters will increment
+        * the refcnt while in use. If the refcnt is down to 1 then the adminq
+        * is not in use and the refcnt can be cleared and adminq freed. Before
+        * calling this function the driver will set PDSC_S_FW_DEAD, which
+        * prevent subsequent attempts to use the adminq and increment the
+        * refcnt to fail. This guarantees that this function will eventually
+        * exit.
+        */
+       while (!refcount_dec_if_one(&pdsc->adminq_refcnt)) {
+               dev_dbg_ratelimited(pdsc->dev, "%s: adminq in use\n",
+                                   __func__);
+               cpu_relax();
+       }
+}
+
 void pdsc_fw_down(struct pdsc *pdsc)
 {
        union pds_core_notifyq_comp reset_event = {
@@ -527,6 +545,8 @@ void pdsc_fw_down(struct pdsc *pdsc)
        if (pdsc->pdev->is_virtfn)
                return;
 
+       pdsc_adminq_wait_and_dec_once_unused(pdsc);
+
        /* Notify clients of fw_down */
        if (pdsc->fw_reporter)
                devlink_health_report(pdsc->fw_reporter, "FW down reported", pdsc);
@@ -577,7 +597,13 @@ err_out:
 
 static void pdsc_check_pci_health(struct pdsc *pdsc)
 {
-       u8 fw_status = ioread8(&pdsc->info_regs->fw_status);
+       u8 fw_status;
+
+       /* some sort of teardown already in progress */
+       if (!pdsc->info_regs)
+               return;
+
+       fw_status = ioread8(&pdsc->info_regs->fw_status);
 
        /* is PCI broken? */
        if (fw_status != PDS_RC_BAD_PCI)
index e35d3e7006bfc1891a0343643910b915f31ba56a..110c4b826b22d588b33ca5cd2f0f1d38c76cf4b5 100644 (file)
@@ -184,6 +184,7 @@ struct pdsc {
        struct mutex devcmd_lock;       /* lock for dev_cmd operations */
        struct mutex config_lock;       /* lock for configuration operations */
        spinlock_t adminq_lock;         /* lock for adminq operations */
+       refcount_t adminq_refcnt;
        struct pds_core_dev_info_regs __iomem *info_regs;
        struct pds_core_dev_cmd_regs __iomem *cmd_regs;
        struct pds_core_intr __iomem *intr_ctrl;
@@ -280,7 +281,6 @@ int pdsc_devcmd_locked(struct pdsc *pdsc, union pds_core_dev_cmd *cmd,
                       union pds_core_dev_comp *comp, int max_seconds);
 int pdsc_devcmd_init(struct pdsc *pdsc);
 int pdsc_devcmd_reset(struct pdsc *pdsc);
-int pdsc_dev_reinit(struct pdsc *pdsc);
 int pdsc_dev_init(struct pdsc *pdsc);
 
 void pdsc_reset_prepare(struct pci_dev *pdev);
index 8ec392299b7dcff9b74a0b08f45a5ccd25986cf1..4e8579ca1c8c71bd89659f041f3613113af16141 100644 (file)
@@ -64,6 +64,10 @@ DEFINE_SHOW_ATTRIBUTE(identity);
 
 void pdsc_debugfs_add_ident(struct pdsc *pdsc)
 {
+       /* This file will already exist in the reset flow */
+       if (debugfs_lookup("identity", pdsc->dentry))
+               return;
+
        debugfs_create_file("identity", 0400, pdsc->dentry,
                            pdsc, &identity_fops);
 }
index 31940b857e0e501d2d4d220a0ed6a0cfd03098c7..e65a1632df505d55de687ba781166299d865eaae 100644 (file)
@@ -57,6 +57,9 @@ int pdsc_err_to_errno(enum pds_core_status_code code)
 
 bool pdsc_is_fw_running(struct pdsc *pdsc)
 {
+       if (!pdsc->info_regs)
+               return false;
+
        pdsc->fw_status = ioread8(&pdsc->info_regs->fw_status);
        pdsc->last_fw_time = jiffies;
        pdsc->last_hb = ioread32(&pdsc->info_regs->fw_heartbeat);
@@ -182,13 +185,17 @@ int pdsc_devcmd_locked(struct pdsc *pdsc, union pds_core_dev_cmd *cmd,
 {
        int err;
 
+       if (!pdsc->cmd_regs)
+               return -ENXIO;
+
        memcpy_toio(&pdsc->cmd_regs->cmd, cmd, sizeof(*cmd));
        pdsc_devcmd_dbell(pdsc);
        err = pdsc_devcmd_wait(pdsc, cmd->opcode, max_seconds);
-       memcpy_fromio(comp, &pdsc->cmd_regs->comp, sizeof(*comp));
 
        if ((err == -ENXIO || err == -ETIMEDOUT) && pdsc->wq)
                queue_work(pdsc->wq, &pdsc->health_work);
+       else
+               memcpy_fromio(comp, &pdsc->cmd_regs->comp, sizeof(*comp));
 
        return err;
 }
@@ -309,13 +316,6 @@ static int pdsc_identify(struct pdsc *pdsc)
        return 0;
 }
 
-int pdsc_dev_reinit(struct pdsc *pdsc)
-{
-       pdsc_init_devinfo(pdsc);
-
-       return pdsc_identify(pdsc);
-}
-
 int pdsc_dev_init(struct pdsc *pdsc)
 {
        unsigned int nintrs;
index e9948ea5bbcdbaae713390cca46280e55b548956..54864f27c87a9e526524a023e444e318cc2bc0f7 100644 (file)
@@ -111,7 +111,8 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
 
        mutex_lock(&pdsc->devcmd_lock);
        err = pdsc_devcmd_locked(pdsc, &cmd, &comp, pdsc->devcmd_timeout * 2);
-       memcpy_fromio(&fw_list, pdsc->cmd_regs->data, sizeof(fw_list));
+       if (!err)
+               memcpy_fromio(&fw_list, pdsc->cmd_regs->data, sizeof(fw_list));
        mutex_unlock(&pdsc->devcmd_lock);
        if (err && err != -EIO)
                return err;
index 90a811f3878ae974679bc5caba97e18aae04bdfb..fa626719e68d1b206fc9bbe1d038daf51984f19b 100644 (file)
@@ -107,6 +107,9 @@ int pdsc_firmware_update(struct pdsc *pdsc, const struct firmware *fw,
 
        dev_info(pdsc->dev, "Installing firmware\n");
 
+       if (!pdsc->cmd_regs)
+               return -ENXIO;
+
        dl = priv_to_devlink(pdsc);
        devlink_flash_update_status_notify(dl, "Preparing to flash",
                                           NULL, 0, 0);
index 3080898d7b95b0122701cacb8a15796ed2cc2dcb..cdbf053b5376c51c91deadb0d9ce6ab752b90745 100644 (file)
@@ -37,6 +37,11 @@ static void pdsc_unmap_bars(struct pdsc *pdsc)
        struct pdsc_dev_bar *bars = pdsc->bars;
        unsigned int i;
 
+       pdsc->info_regs = NULL;
+       pdsc->cmd_regs = NULL;
+       pdsc->intr_status = NULL;
+       pdsc->intr_ctrl = NULL;
+
        for (i = 0; i < PDS_CORE_BARS_MAX; i++) {
                if (bars[i].vaddr)
                        pci_iounmap(pdsc->pdev, bars[i].vaddr);
@@ -293,7 +298,7 @@ err_out_stop:
 err_out_teardown:
        pdsc_teardown(pdsc, PDSC_TEARDOWN_REMOVING);
 err_out_unmap_bars:
-       del_timer_sync(&pdsc->wdtimer);
+       timer_shutdown_sync(&pdsc->wdtimer);
        if (pdsc->wq)
                destroy_workqueue(pdsc->wq);
        mutex_destroy(&pdsc->config_lock);
@@ -420,7 +425,7 @@ static void pdsc_remove(struct pci_dev *pdev)
                 */
                pdsc_sriov_configure(pdev, 0);
 
-               del_timer_sync(&pdsc->wdtimer);
+               timer_shutdown_sync(&pdsc->wdtimer);
                if (pdsc->wq)
                        destroy_workqueue(pdsc->wq);
 
@@ -433,7 +438,6 @@ static void pdsc_remove(struct pci_dev *pdev)
                mutex_destroy(&pdsc->config_lock);
                mutex_destroy(&pdsc->devcmd_lock);
 
-               pci_free_irq_vectors(pdev);
                pdsc_unmap_bars(pdsc);
                pci_release_regions(pdev);
        }
@@ -445,13 +449,26 @@ static void pdsc_remove(struct pci_dev *pdev)
        devlink_free(dl);
 }
 
+static void pdsc_stop_health_thread(struct pdsc *pdsc)
+{
+       timer_shutdown_sync(&pdsc->wdtimer);
+       if (pdsc->health_work.func)
+               cancel_work_sync(&pdsc->health_work);
+}
+
+static void pdsc_restart_health_thread(struct pdsc *pdsc)
+{
+       timer_setup(&pdsc->wdtimer, pdsc_wdtimer_cb, 0);
+       mod_timer(&pdsc->wdtimer, jiffies + 1);
+}
+
 void pdsc_reset_prepare(struct pci_dev *pdev)
 {
        struct pdsc *pdsc = pci_get_drvdata(pdev);
 
+       pdsc_stop_health_thread(pdsc);
        pdsc_fw_down(pdsc);
 
-       pci_free_irq_vectors(pdev);
        pdsc_unmap_bars(pdsc);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
@@ -486,6 +503,7 @@ void pdsc_reset_done(struct pci_dev *pdev)
        }
 
        pdsc_fw_up(pdsc);
+       pdsc_restart_health_thread(pdsc);
 }
 
 static const struct pci_error_handlers pdsc_err_handler = {
index abd4832e4ed21f3c2a22aed047a0331675162907..5acb3e16b5677b7826e488942ff6efb2c3cdf400 100644 (file)
@@ -993,7 +993,7 @@ int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
        return 0;
 
 err_exit_hwts_rx:
-       aq_ring_free(&aq_ptp->hwts_rx);
+       aq_ring_hwts_rx_free(&aq_ptp->hwts_rx);
 err_exit_ptp_rx:
        aq_ring_free(&aq_ptp->ptp_rx);
 err_exit_ptp_tx:
@@ -1011,7 +1011,7 @@ void aq_ptp_ring_free(struct aq_nic_s *aq_nic)
 
        aq_ring_free(&aq_ptp->ptp_tx);
        aq_ring_free(&aq_ptp->ptp_rx);
-       aq_ring_free(&aq_ptp->hwts_rx);
+       aq_ring_hwts_rx_free(&aq_ptp->hwts_rx);
 
        aq_ptp_skb_ring_release(&aq_ptp->skb_ring);
 }
index cda8597b4e1469d2895f895f982f84cb97ef4506..f7433abd659159203f99fbb6cc9ed394bdedacfc 100644 (file)
@@ -919,6 +919,19 @@ void aq_ring_free(struct aq_ring_s *self)
        }
 }
 
+void aq_ring_hwts_rx_free(struct aq_ring_s *self)
+{
+       if (!self)
+               return;
+
+       if (self->dx_ring) {
+               dma_free_coherent(aq_nic_get_dev(self->aq_nic),
+                                 self->size * self->dx_size + AQ_CFG_RXDS_DEF,
+                                 self->dx_ring, self->dx_ring_pa);
+               self->dx_ring = NULL;
+       }
+}
+
 unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data)
 {
        unsigned int count;
index 52847310740a21097dfc35a395e96dfe5de46321..d627ace850ff54201b760a079416e4d690e73184 100644 (file)
@@ -210,6 +210,7 @@ int aq_ring_rx_fill(struct aq_ring_s *self);
 int aq_ring_hwts_rx_alloc(struct aq_ring_s *self,
                          struct aq_nic_s *aq_nic, unsigned int idx,
                          unsigned int size, unsigned int dx_size);
+void aq_ring_hwts_rx_free(struct aq_ring_s *self);
 void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic);
 
 unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data);
index 3e7c8671cd116485252414e3dc3235d80054495d..72df1bb101728872fb8ac4b4905657ce0a67096b 100644 (file)
@@ -793,5 +793,6 @@ static struct platform_driver bcm4908_enet_driver = {
 };
 module_platform_driver(bcm4908_enet_driver);
 
+MODULE_DESCRIPTION("Broadcom BCM4908 Gigabit Ethernet driver");
 MODULE_LICENSE("GPL v2");
 MODULE_DEVICE_TABLE(of, bcm4908_enet_of_match);
index 9b83d536169940ea7f2b861c44fe256faed305aa..50b8e97a811d205fb9ed57c37a74dad701607552 100644 (file)
@@ -260,4 +260,5 @@ void bcma_mdio_mii_unregister(struct mii_bus *mii_bus)
 EXPORT_SYMBOL_GPL(bcma_mdio_mii_unregister);
 
 MODULE_AUTHOR("Rafał Miłecki");
+MODULE_DESCRIPTION("Broadcom iProc GBit BCMA MDIO helpers");
 MODULE_LICENSE("GPL");
index 6e4f36aaf5db6a6f869141e3c87bac4891cc7e50..36f9bad28e6a90da7456a8ec3e40a38038351c84 100644 (file)
@@ -362,4 +362,5 @@ module_init(bgmac_init)
 module_exit(bgmac_exit)
 
 MODULE_AUTHOR("Rafał Miłecki");
+MODULE_DESCRIPTION("Broadcom iProc GBit BCMA interface driver");
 MODULE_LICENSE("GPL");
index 0b21fd5bd4575e7a76d52e704051d3e6d30c0f72..77425c7a32dbf882672c9e664b2b006d9bc1e5f9 100644 (file)
@@ -298,4 +298,5 @@ static struct platform_driver bgmac_enet_driver = {
 };
 
 module_platform_driver(bgmac_enet_driver);
+MODULE_DESCRIPTION("Broadcom iProc GBit platform interface driver");
 MODULE_LICENSE("GPL");
index 448a1b90de5ebcf6de79a6b749f5bf279875d0e3..6ffdc42294074f86b08e92accb705e7e58409967 100644 (file)
@@ -1626,4 +1626,5 @@ int bgmac_enet_resume(struct bgmac *bgmac)
 EXPORT_SYMBOL_GPL(bgmac_enet_resume);
 
 MODULE_AUTHOR("Rafał Miłecki");
+MODULE_DESCRIPTION("Broadcom iProc GBit driver");
 MODULE_LICENSE("GPL");
index 0aacd3c6ed5c0bbf2e02f1ddddc5dc292ae84a07..39845d556bafc949cdf4e599007db26c11b414ac 100644 (file)
@@ -3817,7 +3817,7 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp)
 {
        bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
        int i, j, rc, ulp_base_vec, ulp_msix;
-       int tcs = netdev_get_num_tc(bp->dev);
+       int tcs = bp->num_tc;
 
        if (!tcs)
                tcs = 1;
@@ -5935,8 +5935,12 @@ static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
 
 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
 {
-       if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
-               return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
+       if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+               if (!rx_rings)
+                       return 0;
+               return bnxt_calc_nr_ring_pages(rx_rings - 1,
+                                              BNXT_RSS_TABLE_ENTRIES_P5);
+       }
        if (BNXT_CHIP_TYPE_NITRO_A0(bp))
                return 2;
        return 1;
@@ -6926,7 +6930,7 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
                        if (cp < (rx + tx)) {
                                rc = __bnxt_trim_rings(bp, &rx, &tx, cp, false);
                                if (rc)
-                                       return rc;
+                                       goto get_rings_exit;
                                if (bp->flags & BNXT_FLAG_AGG_RINGS)
                                        rx <<= 1;
                                hw_resc->resv_rx_rings = rx;
@@ -6938,8 +6942,9 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
                hw_resc->resv_cp_rings = cp;
                hw_resc->resv_stat_ctxs = stats;
        }
+get_rings_exit:
        hwrm_req_drop(bp, req);
-       return 0;
+       return rc;
 }
 
 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
@@ -7000,10 +7005,11 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
 
                req->num_rx_rings = cpu_to_le16(rx_rings);
                if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+                       u16 rss_ctx = bnxt_get_nr_rss_ctxs(bp, ring_grps);
+
                        req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
                        req->num_msix = cpu_to_le16(cp_rings);
-                       req->num_rsscos_ctxs =
-                               cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
+                       req->num_rsscos_ctxs = cpu_to_le16(rss_ctx);
                } else {
                        req->num_cmpl_rings = cpu_to_le16(cp_rings);
                        req->num_hw_ring_grps = cpu_to_le16(ring_grps);
@@ -7050,8 +7056,10 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
        req->num_tx_rings = cpu_to_le16(tx_rings);
        req->num_rx_rings = cpu_to_le16(rx_rings);
        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+               u16 rss_ctx = bnxt_get_nr_rss_ctxs(bp, ring_grps);
+
                req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
-               req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
+               req->num_rsscos_ctxs = cpu_to_le16(rss_ctx);
        } else {
                req->num_cmpl_rings = cpu_to_le16(cp_rings);
                req->num_hw_ring_grps = cpu_to_le16(ring_grps);
@@ -9938,7 +9946,7 @@ static int __bnxt_num_tx_to_cp(struct bnxt *bp, int tx, int tx_sets, int tx_xdp)
 
 int bnxt_num_tx_to_cp(struct bnxt *bp, int tx)
 {
-       int tcs = netdev_get_num_tc(bp->dev);
+       int tcs = bp->num_tc;
 
        if (!tcs)
                tcs = 1;
@@ -9947,7 +9955,7 @@ int bnxt_num_tx_to_cp(struct bnxt *bp, int tx)
 
 static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp)
 {
-       int tcs = netdev_get_num_tc(bp->dev);
+       int tcs = bp->num_tc;
 
        return (tx_cp - bp->tx_nr_rings_xdp) * tcs +
               bp->tx_nr_rings_xdp;
@@ -9977,7 +9985,7 @@ static void bnxt_setup_msix(struct bnxt *bp)
        struct net_device *dev = bp->dev;
        int tcs, i;
 
-       tcs = netdev_get_num_tc(dev);
+       tcs = bp->num_tc;
        if (tcs) {
                int i, off, count;
 
@@ -10009,8 +10017,10 @@ static void bnxt_setup_inta(struct bnxt *bp)
 {
        const int len = sizeof(bp->irq_tbl[0].name);
 
-       if (netdev_get_num_tc(bp->dev))
+       if (bp->num_tc) {
                netdev_reset_tc(bp->dev);
+               bp->num_tc = 0;
+       }
 
        snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
                 0);
@@ -10236,8 +10246,8 @@ static void bnxt_clear_int_mode(struct bnxt *bp)
 
 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
 {
-       int tcs = netdev_get_num_tc(bp->dev);
        bool irq_cleared = false;
+       int tcs = bp->num_tc;
        int rc;
 
        if (!bnxt_need_reserve_rings(bp))
@@ -10263,6 +10273,7 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
                    bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
                netdev_err(bp->dev, "tx ring reservation failure\n");
                netdev_reset_tc(bp->dev);
+               bp->num_tc = 0;
                if (bp->tx_nr_rings_xdp)
                        bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
                else
@@ -11564,10 +11575,12 @@ int bnxt_half_open_nic(struct bnxt *bp)
                netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
                goto half_open_err;
        }
+       bnxt_init_napi(bp);
        set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
        rc = bnxt_init_nic(bp, true);
        if (rc) {
                clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
+               bnxt_del_napi(bp);
                netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
                goto half_open_err;
        }
@@ -11586,6 +11599,7 @@ half_open_err:
 void bnxt_half_close_nic(struct bnxt *bp)
 {
        bnxt_hwrm_resource_free(bp, false, true);
+       bnxt_del_napi(bp);
        bnxt_free_skbs(bp);
        bnxt_free_mem(bp, true);
        clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
@@ -13232,6 +13246,11 @@ static int bnxt_fw_init_one_p1(struct bnxt *bp)
 
        bp->fw_cap = 0;
        rc = bnxt_hwrm_ver_get(bp);
+       /* FW may be unresponsive after FLR. FLR must complete within 100 msec
+        * so wait before continuing with recovery.
+        */
+       if (rc)
+               msleep(100);
        bnxt_try_map_fw_health_reg(bp);
        if (rc) {
                rc = bnxt_try_recover_fw(bp);
@@ -13784,7 +13803,7 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
                return -EINVAL;
        }
 
-       if (netdev_get_num_tc(dev) == tc)
+       if (bp->num_tc == tc)
                return 0;
 
        if (bp->flags & BNXT_FLAG_SHARED_RINGS)
@@ -13802,9 +13821,11 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
        if (tc) {
                bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
                netdev_set_num_tc(dev, tc);
+               bp->num_tc = tc;
        } else {
                bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
                netdev_reset_tc(dev);
+               bp->num_tc = 0;
        }
        bp->tx_nr_rings += bp->tx_nr_rings_xdp;
        tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
index b8ef1717cb65fb128b6a60d8148829f3c2b6af36..47338b48ca203d2ebea4c72b433690a0afde3c58 100644 (file)
@@ -2225,6 +2225,7 @@ struct bnxt {
        u8                      tc_to_qidx[BNXT_MAX_QUEUE];
        u8                      q_ids[BNXT_MAX_QUEUE];
        u8                      max_q;
+       u8                      num_tc;
 
        unsigned int            current_interval;
 #define BNXT_TIMER_INTERVAL    HZ
index 63e0670383852af5b6ab4a7c28c9d25e0ea64a1b..0dbb880a7aa0e721f98e42d17f845596a702abc4 100644 (file)
@@ -228,7 +228,7 @@ static int bnxt_queue_remap(struct bnxt *bp, unsigned int lltc_mask)
                }
        }
        if (bp->ieee_ets) {
-               int tc = netdev_get_num_tc(bp->dev);
+               int tc = bp->num_tc;
 
                if (!tc)
                        tc = 1;
index 27b983c0a8a9cdfb3f928fdc026ce7480307ff31..dc4ca706b0e299d9df7da1b2edba240becd68878 100644 (file)
@@ -884,7 +884,7 @@ static void bnxt_get_channels(struct net_device *dev,
        if (max_tx_sch_inputs)
                max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
 
-       tcs = netdev_get_num_tc(dev);
+       tcs = bp->num_tc;
        tx_grps = max(tcs, 1);
        if (bp->tx_nr_rings_xdp)
                tx_grps++;
@@ -944,7 +944,7 @@ static int bnxt_set_channels(struct net_device *dev,
        if (channel->combined_count)
                sh = true;
 
-       tcs = netdev_get_num_tc(dev);
+       tcs = bp->num_tc;
 
        req_tx_rings = sh ? channel->combined_count : channel->tx_count;
        req_rx_rings = sh ? channel->combined_count : channel->rx_count;
@@ -1574,7 +1574,8 @@ u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
        struct bnxt *bp = netdev_priv(dev);
 
        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
-               return ALIGN(bp->rx_nr_rings, BNXT_RSS_TABLE_ENTRIES_P5);
+               return bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) *
+                      BNXT_RSS_TABLE_ENTRIES_P5;
        return HW_HASH_INDEX_SIZE;
 }
 
index adad188e38b8256ef5a1e051310abae2d5bd9b34..cc07660330f533b5e39efcb0f5dff6f865821315 100644 (file)
@@ -684,7 +684,7 @@ static void bnxt_stamp_tx_skb(struct bnxt *bp, struct sk_buff *skb)
                timestamp.hwtstamp = ns_to_ktime(ns);
                skb_tstamp_tx(ptp->tx_skb, &timestamp);
        } else {
-               netdev_WARN_ONCE(bp->dev,
+               netdev_warn_once(bp->dev,
                                 "TS query for TX timer failed rc = %x\n", rc);
        }
 
index c2b25fc623ecc08410e8fc45cb391d5a555cc1d9..4079538bc310eaaeee0ee568f6fcc3fbf2b4e758 100644 (file)
@@ -407,7 +407,7 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
        if (prog)
                tx_xdp = bp->rx_nr_rings;
 
-       tc = netdev_get_num_tc(dev);
+       tc = bp->num_tc;
        if (!tc)
                tc = 1;
        rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
index 068ed52b66c94dfd75d038c2578740d3c8664732..b3c81a2e9d4643bb818f38069c9bfefe5ea290f0 100644 (file)
@@ -1490,7 +1490,7 @@ int cn23xx_get_vf_stats(struct octeon_device *oct, int vfidx,
        mbox_cmd.q_no = vfidx * oct->sriov_info.rings_per_vf;
        mbox_cmd.recv_len = 0;
        mbox_cmd.recv_status = 0;
-       mbox_cmd.fn = (octeon_mbox_callback_t)cn23xx_get_vf_stats_callback;
+       mbox_cmd.fn = cn23xx_get_vf_stats_callback;
        ctx.stats = stats;
        atomic_set(&ctx.status, 0);
        mbox_cmd.fn_arg = (void *)&ctx;
index dd5d80fee24f0f8175fd08488e19d5a752f7c9ea..d2fcb3da484e3de50af5d90732c4ebef5c3d508d 100644 (file)
@@ -429,7 +429,7 @@ int cn23xx_octeon_pfvf_handshake(struct octeon_device *oct)
        mbox_cmd.q_no = 0;
        mbox_cmd.recv_len = 0;
        mbox_cmd.recv_status = 0;
-       mbox_cmd.fn = (octeon_mbox_callback_t)octeon_pfvf_hs_callback;
+       mbox_cmd.fn = octeon_pfvf_hs_callback;
        mbox_cmd.fn_arg = &status;
 
        octeon_mbox_write(oct, &mbox_cmd);
index 9cc6303c82ffb7f2680d3a01e4ca4c7fb227574c..f38d31bfab1bbcecafacaa4adf2e1ee67bd332b6 100644 (file)
@@ -27,6 +27,7 @@
 #include "octeon_network.h"
 
 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
+MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Core");
 MODULE_LICENSE("GPL");
 
 /* OOM task polling interval */
index d92bd7e164775b9523430a04fa5dddfa5bbbc929..9ac85d22c615714c8be71f2a1605e9518619f416 100644 (file)
@@ -57,7 +57,10 @@ union octeon_mbox_message {
        } s;
 };
 
-typedef void (*octeon_mbox_callback_t)(void *, void *, void *);
+struct octeon_mbox_cmd;
+
+typedef void (*octeon_mbox_callback_t)(struct octeon_device *,
+                                      struct octeon_mbox_cmd *, void *);
 
 struct octeon_mbox_cmd {
        union octeon_mbox_message msg;
index 1c2a540db13d8a6806c0de8f3e31998133f8b429..1f495cfd7959b045c8186f6e84a2cbea43eeb1f2 100644 (file)
@@ -868,5 +868,6 @@ static struct platform_driver ep93xx_eth_driver = {
 
 module_platform_driver(ep93xx_eth_driver);
 
+MODULE_DESCRIPTION("Cirrus EP93xx Ethernet driver");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:ep93xx-eth");
index df40c720e7b23d517433743efb883edb8f8d4cf4..64eadd3207983a671332d47cc4fdc966561d8425 100644 (file)
@@ -719,17 +719,25 @@ static void tsnep_xdp_xmit_flush(struct tsnep_tx *tx)
 
 static bool tsnep_xdp_xmit_back(struct tsnep_adapter *adapter,
                                struct xdp_buff *xdp,
-                               struct netdev_queue *tx_nq, struct tsnep_tx *tx)
+                               struct netdev_queue *tx_nq, struct tsnep_tx *tx,
+                               bool zc)
 {
        struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
        bool xmit;
+       u32 type;
 
        if (unlikely(!xdpf))
                return false;
 
+       /* no page pool for zero copy */
+       if (zc)
+               type = TSNEP_TX_TYPE_XDP_NDO;
+       else
+               type = TSNEP_TX_TYPE_XDP_TX;
+
        __netif_tx_lock(tx_nq, smp_processor_id());
 
-       xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, TSNEP_TX_TYPE_XDP_TX);
+       xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, type);
 
        /* Avoid transmit queue timeout since we share it with the slow path */
        if (xmit)
@@ -1273,7 +1281,7 @@ static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog,
        case XDP_PASS:
                return false;
        case XDP_TX:
-               if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx))
+               if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, false))
                        goto out_failure;
                *status |= TSNEP_XDP_TX;
                return true;
@@ -1323,7 +1331,7 @@ static bool tsnep_xdp_run_prog_zc(struct tsnep_rx *rx, struct bpf_prog *prog,
        case XDP_PASS:
                return false;
        case XDP_TX:
-               if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx))
+               if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, true))
                        goto out_failure;
                *status |= TSNEP_XDP_TX;
                return true;
@@ -1485,7 +1493,7 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
 
                        xdp_prepare_buff(&xdp, page_address(entry->page),
                                         XDP_PACKET_HEADROOM + TSNEP_RX_INLINE_METADATA_SIZE,
-                                        length, false);
+                                        length - ETH_FCS_LEN, false);
 
                        consume = tsnep_xdp_run_prog(rx, prog, &xdp,
                                                     &xdp_status, tx_nq, tx);
@@ -1568,7 +1576,7 @@ static int tsnep_rx_poll_zc(struct tsnep_rx *rx, struct napi_struct *napi,
                prefetch(entry->xdp->data);
                length = __le32_to_cpu(entry->desc_wb->properties) &
                         TSNEP_DESC_LENGTH_MASK;
-               xsk_buff_set_size(entry->xdp, length);
+               xsk_buff_set_size(entry->xdp, length - ETH_FCS_LEN);
                xsk_buff_dma_sync_for_cpu(entry->xdp, rx->xsk_pool);
 
                /* RX metadata with timestamps is in front of actual data,
@@ -1762,6 +1770,19 @@ static void tsnep_rx_reopen_xsk(struct tsnep_rx *rx)
                        allocated--;
                }
        }
+
+       /* set need wakeup flag immediately if ring is not filled completely,
+        * first polling would be too late as need wakeup signalisation would
+        * be delayed for an indefinite time
+        */
+       if (xsk_uses_need_wakeup(rx->xsk_pool)) {
+               int desc_available = tsnep_rx_desc_available(rx);
+
+               if (desc_available)
+                       xsk_set_rx_need_wakeup(rx->xsk_pool);
+               else
+                       xsk_clear_rx_need_wakeup(rx->xsk_pool);
+       }
 }
 
 static bool tsnep_pending(struct tsnep_queue *queue)
index 07c2b701b5fa9793d30e27892536eca28ba0504a..9ebe751c1df0758c75ee493ddaa63876807f49be 100644 (file)
@@ -661,4 +661,5 @@ static struct platform_driver nps_enet_driver = {
 module_platform_driver(nps_enet_driver);
 
 MODULE_AUTHOR("EZchip Semiconductor");
+MODULE_DESCRIPTION("EZchip NPS Ethernet driver");
 MODULE_LICENSE("GPL v2");
index cffbf27c4656b27b0694f2a1ac88ad596c6196b4..bfdbdab443ae0ddcf93dca777f134e659b5d4040 100644 (file)
@@ -3216,4 +3216,5 @@ void enetc_pci_remove(struct pci_dev *pdev)
 }
 EXPORT_SYMBOL_GPL(enetc_pci_remove);
 
+MODULE_DESCRIPTION("NXP ENETC Ethernet driver");
 MODULE_LICENSE("Dual BSD/GPL");
index d42594f322750f5ff5d4cf039b1115e5d4424f31..432523b2c789216b21440e4e6576c06ae30b674c 100644 (file)
@@ -2036,6 +2036,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
 
                /* if any of the above changed restart the FEC */
                if (status_change) {
+                       netif_stop_queue(ndev);
                        napi_disable(&fep->napi);
                        netif_tx_lock_bh(ndev);
                        fec_restart(ndev);
@@ -2045,6 +2046,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
                }
        } else {
                if (fep->link) {
+                       netif_stop_queue(ndev);
                        napi_disable(&fep->napi);
                        netif_tx_lock_bh(ndev);
                        fec_stop(ndev);
@@ -4769,4 +4771,5 @@ static struct platform_driver fec_driver = {
 
 module_platform_driver(fec_driver);
 
+MODULE_DESCRIPTION("NXP Fast Ethernet Controller (FEC) driver");
 MODULE_LICENSE("GPL");
index 70dd982a5edce68a63a8f3b94d59c9947dd0045a..026f7270a54de8bf398516b4e563f35d4825a3d1 100644 (file)
@@ -531,4 +531,5 @@ static struct platform_driver fsl_pq_mdio_driver = {
 
 module_platform_driver(fsl_pq_mdio_driver);
 
+MODULE_DESCRIPTION("Freescale PQ MDIO helpers");
 MODULE_LICENSE("GPL");
index 7a8dc5386ffff9bd99d94eced337cf276551a88f..76615d47e055aebc9fcea0d365b28b4389337c07 100644 (file)
@@ -356,7 +356,7 @@ static enum pkt_hash_types gve_rss_type(__be16 pkt_flags)
 
 static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi,
                                        struct gve_rx_slot_page_info *page_info,
-                                       u16 packet_buffer_size, u16 len,
+                                       unsigned int truesize, u16 len,
                                        struct gve_rx_ctx *ctx)
 {
        u32 offset = page_info->page_offset + page_info->pad;
@@ -389,10 +389,10 @@ static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi,
        if (skb != ctx->skb_head) {
                ctx->skb_head->len += len;
                ctx->skb_head->data_len += len;
-               ctx->skb_head->truesize += packet_buffer_size;
+               ctx->skb_head->truesize += truesize;
        }
        skb_add_rx_frag(skb, num_frags, page_info->page,
-                       offset, len, packet_buffer_size);
+                       offset, len, truesize);
 
        return ctx->skb_head;
 }
@@ -486,7 +486,7 @@ static struct sk_buff *gve_rx_copy_to_pool(struct gve_rx_ring *rx,
 
                memcpy(alloc_page_info.page_address, src, page_info->pad + len);
                skb = gve_rx_add_frags(napi, &alloc_page_info,
-                                      rx->packet_buffer_size,
+                                      PAGE_SIZE,
                                       len, ctx);
 
                u64_stats_update_begin(&rx->statss);
index a187582d22994c607915f1fe26f5374031444976..ba9c19e6994c9defdf06eada37091e09d10881fa 100644 (file)
@@ -360,23 +360,43 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
  * As a result, a shift of INCVALUE_SHIFT_n is used to fit a value of
  * INCVALUE_n into the TIMINCA register allowing 32+8+(24-INCVALUE_SHIFT_n)
  * bits to count nanoseconds leaving the rest for fractional nonseconds.
+ *
+ * Any given INCVALUE also has an associated maximum adjustment value. This
+ * maximum adjustment value is the largest increase (or decrease) which can be
+ * safely applied without overflowing the INCVALUE. Since INCVALUE has
+ * a maximum range of 24 bits, its largest value is 0xFFFFFF.
+ *
+ * To understand where the maximum value comes from, consider the following
+ * equation:
+ *
+ *   new_incval = base_incval + (base_incval * adjustment) / 1billion
+ *
+ * To avoid overflow that means:
+ *   max_incval = base_incval + (base_incval * max_adj) / billion
+ *
+ * Re-arranging:
+ *   max_adj = floor(((max_incval - base_incval) * 1billion) / 1billion)
  */
 #define INCVALUE_96MHZ         125
 #define INCVALUE_SHIFT_96MHZ   17
 #define INCPERIOD_SHIFT_96MHZ  2
 #define INCPERIOD_96MHZ                (12 >> INCPERIOD_SHIFT_96MHZ)
+#define MAX_PPB_96MHZ          23999900 /* 23,999,900 ppb */
 
 #define INCVALUE_25MHZ         40
 #define INCVALUE_SHIFT_25MHZ   18
 #define INCPERIOD_25MHZ                1
+#define MAX_PPB_25MHZ          599999900 /* 599,999,900 ppb */
 
 #define INCVALUE_24MHZ         125
 #define INCVALUE_SHIFT_24MHZ   14
 #define INCPERIOD_24MHZ                3
+#define MAX_PPB_24MHZ          999999999 /* 999,999,999 ppb */
 
 #define INCVALUE_38400KHZ      26
 #define INCVALUE_SHIFT_38400KHZ        19
 #define INCPERIOD_38400KHZ     1
+#define MAX_PPB_38400KHZ       230769100 /* 230,769,100 ppb */
 
 /* Another drawback of scaling the incvalue by a large factor is the
  * 64-bit SYSTIM register overflows more quickly.  This is dealt with
index 02d871bc112a739cec1baffba5b63abaf14f4a7d..bbcfd529399b0fa938037858b1e2f7912f8e5a58 100644 (file)
@@ -280,8 +280,17 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
 
        switch (hw->mac.type) {
        case e1000_pch2lan:
+               adapter->ptp_clock_info.max_adj = MAX_PPB_96MHZ;
+               break;
        case e1000_pch_lpt:
+               if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)
+                       adapter->ptp_clock_info.max_adj = MAX_PPB_96MHZ;
+               else
+                       adapter->ptp_clock_info.max_adj = MAX_PPB_25MHZ;
+               break;
        case e1000_pch_spt:
+               adapter->ptp_clock_info.max_adj = MAX_PPB_24MHZ;
+               break;
        case e1000_pch_cnp:
        case e1000_pch_tgp:
        case e1000_pch_adp:
@@ -289,15 +298,14 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
        case e1000_pch_lnp:
        case e1000_pch_ptp:
        case e1000_pch_nvp:
-               if ((hw->mac.type < e1000_pch_lpt) ||
-                   (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
-                       adapter->ptp_clock_info.max_adj = 24000000 - 1;
-                       break;
-               }
-               fallthrough;
+               if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)
+                       adapter->ptp_clock_info.max_adj = MAX_PPB_24MHZ;
+               else
+                       adapter->ptp_clock_info.max_adj = MAX_PPB_38400KHZ;
+               break;
        case e1000_82574:
        case e1000_82583:
-               adapter->ptp_clock_info.max_adj = 600000000 - 1;
+               adapter->ptp_clock_info.max_adj = MAX_PPB_25MHZ;
                break;
        default:
                break;
index 18a1c3b6d72c5e4f7320871f43c79cca23f90f91..c8f35d4de271add10d9b65ef0591578621eed7b3 100644 (file)
@@ -5,6 +5,7 @@
 #define _I40E_ADMINQ_CMD_H_
 
 #include <linux/bits.h>
+#include <linux/types.h>
 
 /* This header file defines the i40e Admin Queue commands and is shared between
  * i40e Firmware and Software.
index 6b60dc9b77361a2537466c18c8d78eafbc35a01a..d76497566e40e739fd7eba7773fbb84c02ddf93b 100644 (file)
@@ -43,7 +43,7 @@
 #define I40E_LLDP_TLV_SUBTYPE_SHIFT    0
 #define I40E_LLDP_TLV_SUBTYPE_MASK     (0xFF << I40E_LLDP_TLV_SUBTYPE_SHIFT)
 #define I40E_LLDP_TLV_OUI_SHIFT                8
-#define I40E_LLDP_TLV_OUI_MASK         (0xFFFFFF << I40E_LLDP_TLV_OUI_SHIFT)
+#define I40E_LLDP_TLV_OUI_MASK         (0xFFFFFFU << I40E_LLDP_TLV_OUI_SHIFT)
 
 /* Defines for IEEE ETS TLV */
 #define I40E_IEEE_ETS_MAXTC_SHIFT      0
index ece3a6b9a5c61e59d103713f5d6bb869e98b2574..ab20202a3da3ca73a92e70185481c35067fdb8d8 100644 (file)
@@ -4,6 +4,7 @@
 #ifndef _I40E_DIAG_H_
 #define _I40E_DIAG_H_
 
+#include <linux/types.h>
 #include "i40e_adminq_cmd.h"
 
 /* forward-declare the HW struct for the compiler */
index ae8f9f135725b4de88e9d95858025ce6ef41c650..6e7fd473abfd001eb45e8b5bda8978fff9eec26b 100644 (file)
@@ -3588,40 +3588,55 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
        struct i40e_hmc_obj_rxq rx_ctx;
        int err = 0;
        bool ok;
-       int ret;
 
        bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
 
        /* clear the context structure first */
        memset(&rx_ctx, 0, sizeof(rx_ctx));
 
-       if (ring->vsi->type == I40E_VSI_MAIN)
-               xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
+       ring->rx_buf_len = vsi->rx_buf_len;
+
+       /* XDP RX-queue info only needed for RX rings exposed to XDP */
+       if (ring->vsi->type != I40E_VSI_MAIN)
+               goto skip;
+
+       if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
+               err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+                                        ring->queue_index,
+                                        ring->q_vector->napi.napi_id,
+                                        ring->rx_buf_len);
+               if (err)
+                       return err;
+       }
 
        ring->xsk_pool = i40e_xsk_pool(ring);
        if (ring->xsk_pool) {
-               ring->rx_buf_len =
-                 xsk_pool_get_rx_frame_size(ring->xsk_pool);
-               ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+               xdp_rxq_info_unreg(&ring->xdp_rxq);
+               ring->rx_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool);
+               err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+                                        ring->queue_index,
+                                        ring->q_vector->napi.napi_id,
+                                        ring->rx_buf_len);
+               if (err)
+                       return err;
+               err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
                                                 MEM_TYPE_XSK_BUFF_POOL,
                                                 NULL);
-               if (ret)
-                       return ret;
+               if (err)
+                       return err;
                dev_info(&vsi->back->pdev->dev,
                         "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
                         ring->queue_index);
 
        } else {
-               ring->rx_buf_len = vsi->rx_buf_len;
-               if (ring->vsi->type == I40E_VSI_MAIN) {
-                       ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
-                                                        MEM_TYPE_PAGE_SHARED,
-                                                        NULL);
-                       if (ret)
-                               return ret;
-               }
+               err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+                                                MEM_TYPE_PAGE_SHARED,
+                                                NULL);
+               if (err)
+                       return err;
        }
 
+skip:
        xdp_init_buff(&ring->xdp, i40e_rx_pg_size(ring) / 2, &ring->xdp_rxq);
 
        rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
index 971ba33220381b799e4900f6f2231b58bcb877e4..0d7177083708f29d3b4deba11d00abdcb017f886 100644 (file)
@@ -1548,7 +1548,6 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
 {
        struct device *dev = rx_ring->dev;
-       int err;
 
        u64_stats_init(&rx_ring->syncp);
 
@@ -1569,14 +1568,6 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
        rx_ring->next_to_process = 0;
        rx_ring->next_to_use = 0;
 
-       /* XDP RX-queue info only needed for RX rings exposed to XDP */
-       if (rx_ring->vsi->type == I40E_VSI_MAIN) {
-               err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
-                                      rx_ring->queue_index, rx_ring->q_vector->napi.napi_id);
-               if (err < 0)
-                       return err;
-       }
-
        rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
 
        rx_ring->rx_bi =
@@ -2087,7 +2078,8 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
 static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
                                  struct xdp_buff *xdp)
 {
-       u32 next = rx_ring->next_to_clean;
+       u32 nr_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
+       u32 next = rx_ring->next_to_clean, i = 0;
        struct i40e_rx_buffer *rx_buffer;
 
        xdp->flags = 0;
@@ -2100,10 +2092,10 @@ static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
                if (!rx_buffer->page)
                        continue;
 
-               if (xdp_res == I40E_XDP_CONSUMED)
-                       rx_buffer->pagecnt_bias++;
-               else
+               if (xdp_res != I40E_XDP_CONSUMED)
                        i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz);
+               else if (i++ <= nr_frags)
+                       rx_buffer->pagecnt_bias++;
 
                /* EOP buffer will be put in i40e_clean_rx_irq() */
                if (next == rx_ring->next_to_process)
@@ -2117,20 +2109,20 @@ static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
  * i40e_construct_skb - Allocate skb and populate it
  * @rx_ring: rx descriptor ring to transact packets on
  * @xdp: xdp_buff pointing to the data
- * @nr_frags: number of buffers for the packet
  *
  * This function allocates an skb.  It then populates it with the page
  * data from the current receive descriptor, taking care to set up the
  * skb correctly.
  */
 static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
-                                         struct xdp_buff *xdp,
-                                         u32 nr_frags)
+                                         struct xdp_buff *xdp)
 {
        unsigned int size = xdp->data_end - xdp->data;
        struct i40e_rx_buffer *rx_buffer;
+       struct skb_shared_info *sinfo;
        unsigned int headlen;
        struct sk_buff *skb;
+       u32 nr_frags = 0;
 
        /* prefetch first cache line of first page */
        net_prefetch(xdp->data);
@@ -2168,6 +2160,10 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
        memcpy(__skb_put(skb, headlen), xdp->data,
               ALIGN(headlen, sizeof(long)));
 
+       if (unlikely(xdp_buff_has_frags(xdp))) {
+               sinfo = xdp_get_shared_info_from_buff(xdp);
+               nr_frags = sinfo->nr_frags;
+       }
        rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
        /* update all of the pointers */
        size -= headlen;
@@ -2187,9 +2183,8 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
        }
 
        if (unlikely(xdp_buff_has_frags(xdp))) {
-               struct skb_shared_info *sinfo, *skinfo = skb_shinfo(skb);
+               struct skb_shared_info *skinfo = skb_shinfo(skb);
 
-               sinfo = xdp_get_shared_info_from_buff(xdp);
                memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0],
                       sizeof(skb_frag_t) * nr_frags);
 
@@ -2212,17 +2207,17 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
  * i40e_build_skb - Build skb around an existing buffer
  * @rx_ring: Rx descriptor ring to transact packets on
  * @xdp: xdp_buff pointing to the data
- * @nr_frags: number of buffers for the packet
  *
  * This function builds an skb around an existing Rx buffer, taking care
  * to set up the skb correctly and avoid any memcpy overhead.
  */
 static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
-                                     struct xdp_buff *xdp,
-                                     u32 nr_frags)
+                                     struct xdp_buff *xdp)
 {
        unsigned int metasize = xdp->data - xdp->data_meta;
+       struct skb_shared_info *sinfo;
        struct sk_buff *skb;
+       u32 nr_frags;
 
        /* Prefetch first cache line of first page. If xdp->data_meta
         * is unused, this points exactly as xdp->data, otherwise we
@@ -2231,6 +2226,11 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
         */
        net_prefetch(xdp->data_meta);
 
+       if (unlikely(xdp_buff_has_frags(xdp))) {
+               sinfo = xdp_get_shared_info_from_buff(xdp);
+               nr_frags = sinfo->nr_frags;
+       }
+
        /* build an skb around the page buffer */
        skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz);
        if (unlikely(!skb))
@@ -2243,9 +2243,6 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
                skb_metadata_set(skb, metasize);
 
        if (unlikely(xdp_buff_has_frags(xdp))) {
-               struct skb_shared_info *sinfo;
-
-               sinfo = xdp_get_shared_info_from_buff(xdp);
                xdp_update_skb_shared_info(skb, nr_frags,
                                           sinfo->xdp_frags_size,
                                           nr_frags * xdp->frame_sz,
@@ -2589,9 +2586,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget,
                        total_rx_bytes += size;
                } else {
                        if (ring_uses_build_skb(rx_ring))
-                               skb = i40e_build_skb(rx_ring, xdp, nfrags);
+                               skb = i40e_build_skb(rx_ring, xdp);
                        else
-                               skb = i40e_construct_skb(rx_ring, xdp, nfrags);
+                               skb = i40e_construct_skb(rx_ring, xdp);
 
                        /* drop if we failed to retrieve a buffer */
                        if (!skb) {
index af7d5fa6cdc15552935b03e5beaaaaac856b7d3f..11500003af0d47dbfb203ea51914c2f452b42368 100644 (file)
@@ -414,7 +414,8 @@ i40e_add_xsk_frag(struct i40e_ring *rx_ring, struct xdp_buff *first,
        }
 
        __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
-                                  virt_to_page(xdp->data_hard_start), 0, size);
+                                  virt_to_page(xdp->data_hard_start),
+                                  XDP_PACKET_HEADROOM, size);
        sinfo->xdp_frags_size += size;
        xsk_buff_add_frag(xdp);
 
@@ -498,7 +499,6 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
                xdp_res = i40e_run_xdp_zc(rx_ring, first, xdp_prog);
                i40e_handle_xdp_result_zc(rx_ring, first, rx_desc, &rx_packets,
                                          &rx_bytes, xdp_res, &failure);
-               first->flags = 0;
                next_to_clean = next_to_process;
                if (failure)
                        break;
index 533b923cae2d078dfecdc902d4605d08b0d7391e..7ac847718882e29b38071ca6b8adb47ca063f1d7 100644 (file)
@@ -547,19 +547,27 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
        ring->rx_buf_len = ring->vsi->rx_buf_len;
 
        if (ring->vsi->type == ICE_VSI_PF) {
-               if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
-                       /* coverity[check_return] */
-                       __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
-                                          ring->q_index,
-                                          ring->q_vector->napi.napi_id,
-                                          ring->vsi->rx_buf_len);
+               if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
+                       err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+                                                ring->q_index,
+                                                ring->q_vector->napi.napi_id,
+                                                ring->rx_buf_len);
+                       if (err)
+                               return err;
+               }
 
                ring->xsk_pool = ice_xsk_pool(ring);
                if (ring->xsk_pool) {
-                       xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
+                       xdp_rxq_info_unreg(&ring->xdp_rxq);
 
                        ring->rx_buf_len =
                                xsk_pool_get_rx_frame_size(ring->xsk_pool);
+                       err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+                                                ring->q_index,
+                                                ring->q_vector->napi.napi_id,
+                                                ring->rx_buf_len);
+                       if (err)
+                               return err;
                        err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
                                                         MEM_TYPE_XSK_BUFF_POOL,
                                                         NULL);
@@ -571,13 +579,14 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
                        dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
                                 ring->q_index);
                } else {
-                       if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
-                               /* coverity[check_return] */
-                               __xdp_rxq_info_reg(&ring->xdp_rxq,
-                                                  ring->netdev,
-                                                  ring->q_index,
-                                                  ring->q_vector->napi.napi_id,
-                                                  ring->vsi->rx_buf_len);
+                       if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
+                               err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+                                                        ring->q_index,
+                                                        ring->q_vector->napi.napi_id,
+                                                        ring->rx_buf_len);
+                               if (err)
+                                       return err;
+                       }
 
                        err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
                                                         MEM_TYPE_PAGE_SHARED,
index 82bc54fec7f36400a9be1f6603da2770ab5bb2e5..a2562f04267f23695af92be0bca5c1174702bef3 100644 (file)
@@ -24,7 +24,7 @@
 #define rd64(a, reg)           readq((a)->hw_addr + (reg))
 
 #define ice_flush(a)           rd32((a), GLGEN_STAT)
-#define ICE_M(m, s)            ((m) << (s))
+#define ICE_M(m, s)            ((m ## U) << (s))
 
 struct ice_dma_mem {
        void *va;
index 74d13cc5a3a7f1f62e6657e058548b243e2d438b..97d41d6ebf1fb69419e2cf13dae17db08fd27910 100644 (file)
@@ -513,11 +513,6 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
        if (ice_is_xdp_ena_vsi(rx_ring->vsi))
                WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
 
-       if (rx_ring->vsi->type == ICE_VSI_PF &&
-           !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
-               if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
-                                    rx_ring->q_index, rx_ring->q_vector->napi.napi_id))
-                       goto err;
        return 0;
 
 err:
@@ -603,9 +598,7 @@ out_failure:
                ret = ICE_XDP_CONSUMED;
        }
 exit:
-       rx_buf->act = ret;
-       if (unlikely(xdp_buff_has_frags(xdp)))
-               ice_set_rx_bufs_act(xdp, rx_ring, ret);
+       ice_set_rx_bufs_act(xdp, rx_ring, ret);
 }
 
 /**
@@ -893,14 +886,17 @@ ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
        }
 
        if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
-               if (unlikely(xdp_buff_has_frags(xdp)))
-                       ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED);
+               ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED);
                return -ENOMEM;
        }
 
        __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page,
                                   rx_buf->page_offset, size);
        sinfo->xdp_frags_size += size;
+       /* remember frag count before XDP prog execution; bpf_xdp_adjust_tail()
+        * can pop off frags but driver has to handle it on its own
+        */
+       rx_ring->nr_frags = sinfo->nr_frags;
 
        if (page_is_pfmemalloc(rx_buf->page))
                xdp_buff_set_frag_pfmemalloc(xdp);
@@ -1251,6 +1247,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
 
                xdp->data = NULL;
                rx_ring->first_desc = ntc;
+               rx_ring->nr_frags = 0;
                continue;
 construct_skb:
                if (likely(ice_ring_uses_build_skb(rx_ring)))
@@ -1266,10 +1263,12 @@ construct_skb:
                                                    ICE_XDP_CONSUMED);
                        xdp->data = NULL;
                        rx_ring->first_desc = ntc;
+                       rx_ring->nr_frags = 0;
                        break;
                }
                xdp->data = NULL;
                rx_ring->first_desc = ntc;
+               rx_ring->nr_frags = 0;
 
                stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
                if (unlikely(ice_test_staterr(rx_desc->wb.status_error0,
index b3379ff736747887a7404c5d020b865bc10a5024..af955b0e5dc5caeb3ce6ca3f9671772763270f52 100644 (file)
@@ -358,6 +358,7 @@ struct ice_rx_ring {
        struct ice_tx_ring *xdp_ring;
        struct ice_rx_ring *next;       /* pointer to next ring in q_vector */
        struct xsk_buff_pool *xsk_pool;
+       u32 nr_frags;
        dma_addr_t dma;                 /* physical address of ring */
        u16 rx_buf_len;
        u8 dcb_tc;                      /* Traffic class of ring */
index 762047508619603028cac48e5e091d4a584084c2..afcead4baef4b1552bdd152ee5414c8127b0b992 100644 (file)
  * act: action to store onto Rx buffers related to XDP buffer parts
  *
  * Set action that should be taken before putting Rx buffer from first frag
- * to one before last. Last one is handled by caller of this function as it
- * is the EOP frag that is currently being processed. This function is
- * supposed to be called only when XDP buffer contains frags.
+ * to the last.
  */
 static inline void
 ice_set_rx_bufs_act(struct xdp_buff *xdp, const struct ice_rx_ring *rx_ring,
                    const unsigned int act)
 {
-       const struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
-       u32 first = rx_ring->first_desc;
-       u32 nr_frags = sinfo->nr_frags;
+       u32 sinfo_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
+       u32 nr_frags = rx_ring->nr_frags + 1;
+       u32 idx = rx_ring->first_desc;
        u32 cnt = rx_ring->count;
        struct ice_rx_buf *buf;
 
        for (int i = 0; i < nr_frags; i++) {
-               buf = &rx_ring->rx_buf[first];
+               buf = &rx_ring->rx_buf[idx];
                buf->act = act;
 
-               if (++first == cnt)
-                       first = 0;
+               if (++idx == cnt)
+                       idx = 0;
+       }
+
+       /* adjust pagecnt_bias on frags freed by XDP prog */
+       if (sinfo_frags < rx_ring->nr_frags && act == ICE_XDP_CONSUMED) {
+               u32 delta = rx_ring->nr_frags - sinfo_frags;
+
+               while (delta) {
+                       if (idx == 0)
+                               idx = cnt - 1;
+                       else
+                               idx--;
+                       buf = &rx_ring->rx_buf[idx];
+                       buf->pagecnt_bias--;
+                       delta--;
+               }
        }
 }
 
index 41ab6d7bbd9ef923fb766555ba48c5533e989f93..a508e917ce5ffab9e092a62337fbe70b27efbc5e 100644 (file)
@@ -1072,7 +1072,7 @@ struct ice_aq_get_set_rss_lut_params {
 #define ICE_OROM_VER_BUILD_SHIFT       8
 #define ICE_OROM_VER_BUILD_MASK                (0xffff << ICE_OROM_VER_BUILD_SHIFT)
 #define ICE_OROM_VER_SHIFT             24
-#define ICE_OROM_VER_MASK              (0xff << ICE_OROM_VER_SHIFT)
+#define ICE_OROM_VER_MASK              (0xffU << ICE_OROM_VER_SHIFT)
 #define ICE_SR_PFA_PTR                 0x40
 #define ICE_SR_1ST_NVM_BANK_PTR                0x42
 #define ICE_SR_NVM_BANK_SIZE           0x43
index 5d1ae8e4058a4ae43bb0fb2be98070cf1f2e9559..8b81a16770459373026f2436099c0280e29f9022 100644 (file)
@@ -825,7 +825,8 @@ ice_add_xsk_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *first,
        }
 
        __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
-                                  virt_to_page(xdp->data_hard_start), 0, size);
+                                  virt_to_page(xdp->data_hard_start),
+                                  XDP_PACKET_HEADROOM, size);
        sinfo->xdp_frags_size += size;
        xsk_buff_add_frag(xdp);
 
@@ -895,7 +896,6 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
 
                if (!first) {
                        first = xdp;
-                       xdp_buff_clear_frags_flag(first);
                } else if (ice_add_xsk_frag(rx_ring, first, xdp, size)) {
                        break;
                }
index 5fea2fd957eb3563ac839b0cc966cde9de40a2ba..58179bd733ff05bf5d31cc0b6e4855d075fcae8d 100644 (file)
@@ -783,6 +783,8 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
        /* setup watchdog timeout value to be 5 second */
        netdev->watchdog_timeo = 5 * HZ;
 
+       netdev->dev_port = idx;
+
        /* configure default MTU size */
        netdev->min_mtu = ETH_MIN_MTU;
        netdev->max_mtu = vport->max_mtu;
index 8dc837889723c8a8976fd537e79e7d6acd49c4a8..4a3c4454d25abad18582ea7b93c74b616ef5cf75 100644 (file)
@@ -978,7 +978,7 @@ struct virtchnl2_ptype {
        u8 proto_id_count;
        __le16 pad;
        __le16 proto_id[];
-};
+} __packed __aligned(2);
 VIRTCHNL2_CHECK_STRUCT_LEN(6, virtchnl2_ptype);
 
 /**
index 6208923e29a2b861363317b983b577b383bbeeb1..c1adc94a5a657a6ac432a52016436479020673f3 100644 (file)
@@ -716,7 +716,8 @@ static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
        if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
                error = FIELD_GET(IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK, command);
                hw_dbg(hw, "Failed to read, error %x\n", error);
-               return -EIO;
+               ret = -EIO;
+               goto out;
        }
 
        if (!ret)
index 5182fe737c3727629fd4a24d516f2459126ddf2b..ff54fbe41bccc89ce954eadb770ceb5786b719ac 100644 (file)
@@ -318,4 +318,5 @@ static struct platform_driver liteeth_driver = {
 module_platform_driver(liteeth_driver);
 
 MODULE_AUTHOR("Joel Stanley <joel@jms.id.au>");
+MODULE_DESCRIPTION("LiteX Liteeth Ethernet driver");
 MODULE_LICENSE("GPL");
index 820b1fabe297a209dd2620092115a01361c755fd..23adf53c2aa1c08086bff5758a99673e023c7de4 100644 (file)
@@ -614,12 +614,38 @@ static void mvpp23_bm_set_8pool_mode(struct mvpp2 *priv)
        mvpp2_write(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG, val);
 }
 
+/* Cleanup pool before actual initialization in the OS */
+static void mvpp2_bm_pool_cleanup(struct mvpp2 *priv, int pool_id)
+{
+       unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu());
+       u32 val;
+       int i;
+
+       /* Drain the BM from all possible residues left by firmware */
+       for (i = 0; i < MVPP2_BM_POOL_SIZE_MAX; i++)
+               mvpp2_thread_read(priv, thread, MVPP2_BM_PHY_ALLOC_REG(pool_id));
+
+       put_cpu();
+
+       /* Stop the BM pool */
+       val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(pool_id));
+       val |= MVPP2_BM_STOP_MASK;
+       mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(pool_id), val);
+}
+
 static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
 {
        enum dma_data_direction dma_dir = DMA_FROM_DEVICE;
        int i, err, poolnum = MVPP2_BM_POOLS_NUM;
        struct mvpp2_port *port;
 
+       if (priv->percpu_pools)
+               poolnum = mvpp2_get_nrxqs(priv) * 2;
+
+       /* Clean up the pool state in case it contains stale state */
+       for (i = 0; i < poolnum; i++)
+               mvpp2_bm_pool_cleanup(priv, i);
+
        if (priv->percpu_pools) {
                for (i = 0; i < priv->port_count; i++) {
                        port = priv->port_list[i];
@@ -629,7 +655,6 @@ static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
                        }
                }
 
-               poolnum = mvpp2_get_nrxqs(priv) * 2;
                for (i = 0; i < poolnum; i++) {
                        /* the pool in use */
                        int pn = i / (poolnum / 2);
index 9690ac01f02c8db9b9ed2e524b05aa124b077c7a..b92264d0a77e71075495f5cc0e02c330e3c279bd 100644 (file)
@@ -413,4 +413,5 @@ const char *otx2_mbox_id2name(u16 id)
 EXPORT_SYMBOL(otx2_mbox_id2name);
 
 MODULE_AUTHOR("Marvell.");
+MODULE_DESCRIPTION("Marvell RVU NIC Mbox helpers");
 MODULE_LICENSE("GPL v2");
index 4728ba34b0e34cc3f5741bfc62b2ba49e00c2b8f..76218f1cb45958f3963ed340997051846093ec94 100644 (file)
@@ -506,6 +506,7 @@ u32 rpm2_get_lmac_fifo_len(void *rpmd, int lmac_id)
        rpm_t *rpm = rpmd;
        u8 num_lmacs;
        u32 fifo_len;
+       u16 max_lmac;
 
        lmac_info = rpm_read(rpm, 0, RPM2_CMRX_RX_LMACS);
        /* LMACs are divided into two groups and each group
@@ -513,7 +514,11 @@ u32 rpm2_get_lmac_fifo_len(void *rpmd, int lmac_id)
         * Group0 lmac_id range {0..3}
         * Group1 lmac_id range {4..7}
         */
-       fifo_len = rpm->mac_ops->fifo_len / 2;
+       max_lmac = (rpm_read(rpm, 0, CGX_CONST) >> 24) & 0xFF;
+       if (max_lmac > 4)
+               fifo_len = rpm->mac_ops->fifo_len / 2;
+       else
+               fifo_len = rpm->mac_ops->fifo_len;
 
        if (lmac_id < 4) {
                num_lmacs = hweight8(lmac_info & 0xF);
index 167145bdcb75d3f852134fcaa44fc2f307c42478..8cfd74ad991cc8c5a9052a7076c358d372feb804 100644 (file)
@@ -1850,8 +1850,8 @@ void npc_mcam_rsrcs_deinit(struct rvu *rvu)
 {
        struct npc_mcam *mcam = &rvu->hw->mcam;
 
-       kfree(mcam->bmap);
-       kfree(mcam->bmap_reverse);
+       bitmap_free(mcam->bmap);
+       bitmap_free(mcam->bmap_reverse);
        kfree(mcam->entry2pfvf_map);
        kfree(mcam->cntr2pfvf_map);
        kfree(mcam->entry2cntr_map);
@@ -1904,21 +1904,20 @@ int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
        mcam->pf_offset = mcam->nixlf_offset + nixlf_count;
 
        /* Allocate bitmaps for managing MCAM entries */
-       mcam->bmap = kmalloc_array(BITS_TO_LONGS(mcam->bmap_entries),
-                                  sizeof(long), GFP_KERNEL);
+       mcam->bmap = bitmap_zalloc(mcam->bmap_entries, GFP_KERNEL);
        if (!mcam->bmap)
                return -ENOMEM;
 
-       mcam->bmap_reverse = kmalloc_array(BITS_TO_LONGS(mcam->bmap_entries),
-                                          sizeof(long), GFP_KERNEL);
+       mcam->bmap_reverse = bitmap_zalloc(mcam->bmap_entries, GFP_KERNEL);
        if (!mcam->bmap_reverse)
                goto free_bmap;
 
        mcam->bmap_fcnt = mcam->bmap_entries;
 
        /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
-       mcam->entry2pfvf_map = kmalloc_array(mcam->bmap_entries,
-                                            sizeof(u16), GFP_KERNEL);
+       mcam->entry2pfvf_map = kcalloc(mcam->bmap_entries, sizeof(u16),
+                                      GFP_KERNEL);
+
        if (!mcam->entry2pfvf_map)
                goto free_bmap_reverse;
 
@@ -1941,21 +1940,21 @@ int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
        if (err)
                goto free_entry_map;
 
-       mcam->cntr2pfvf_map = kmalloc_array(mcam->counters.max,
-                                           sizeof(u16), GFP_KERNEL);
+       mcam->cntr2pfvf_map = kcalloc(mcam->counters.max, sizeof(u16),
+                                     GFP_KERNEL);
        if (!mcam->cntr2pfvf_map)
                goto free_cntr_bmap;
 
        /* Alloc memory for MCAM entry to counter mapping and for tracking
         * counter's reference count.
         */
-       mcam->entry2cntr_map = kmalloc_array(mcam->bmap_entries,
-                                            sizeof(u16), GFP_KERNEL);
+       mcam->entry2cntr_map = kcalloc(mcam->bmap_entries, sizeof(u16),
+                                      GFP_KERNEL);
        if (!mcam->entry2cntr_map)
                goto free_cntr_map;
 
-       mcam->cntr_refcnt = kmalloc_array(mcam->counters.max,
-                                         sizeof(u16), GFP_KERNEL);
+       mcam->cntr_refcnt = kcalloc(mcam->counters.max, sizeof(u16),
+                                   GFP_KERNEL);
        if (!mcam->cntr_refcnt)
                goto free_entry_cntr_map;
 
@@ -1988,9 +1987,9 @@ free_cntr_bmap:
 free_entry_map:
        kfree(mcam->entry2pfvf_map);
 free_bmap_reverse:
-       kfree(mcam->bmap_reverse);
+       bitmap_free(mcam->bmap_reverse);
 free_bmap:
-       kfree(mcam->bmap);
+       bitmap_free(mcam->bmap);
 
        return -ENOMEM;
 }
index 7ca6941ea0b9b4d684ba45482b88db066a728f98..02d0b707aea5bd6b9dea286180914b5aaba4a51d 100644 (file)
@@ -951,8 +951,11 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
        if (pfvf->ptp && qidx < pfvf->hw.tx_queues) {
                err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt,
                                 sizeof(*sq->timestamps));
-               if (err)
+               if (err) {
+                       kfree(sq->sg);
+                       sq->sg = NULL;
                        return err;
+               }
        }
 
        sq->head = 0;
@@ -968,7 +971,14 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
        sq->stats.bytes = 0;
        sq->stats.pkts = 0;
 
-       return pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura);
+       err = pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura);
+       if (err) {
+               kfree(sq->sg);
+               sq->sg = NULL;
+               return err;
+       }
+
+       return 0;
 
 }
 
index 2928898c7f8df89c45092c209f9a3dd25b43ee21..7f786de6101483a775c8aa4f12789631040fdd95 100644 (file)
@@ -314,7 +314,6 @@ static int otx2_set_channels(struct net_device *dev,
        pfvf->hw.tx_queues = channel->tx_count;
        if (pfvf->xdp_prog)
                pfvf->hw.xdp_queues = channel->rx_count;
-       pfvf->hw.non_qos_queues =  pfvf->hw.tx_queues + pfvf->hw.xdp_queues;
 
        if (if_up)
                err = dev->netdev_ops->ndo_open(dev);
index a57455aebff6fc58e24c4a4da2d60d78e59f439f..e5fe67e7386551e321949dc3b42074067eb4b3a9 100644 (file)
@@ -1744,6 +1744,7 @@ int otx2_open(struct net_device *netdev)
        /* RQ and SQs are mapped to different CQs,
         * so find out max CQ IRQs (i.e CINTs) needed.
         */
+       pf->hw.non_qos_queues =  pf->hw.tx_queues + pf->hw.xdp_queues;
        pf->hw.cint_cnt = max3(pf->hw.rx_queues, pf->hw.tx_queues,
                               pf->hw.tc_tx_queues);
 
@@ -2643,8 +2644,6 @@ static int otx2_xdp_setup(struct otx2_nic *pf, struct bpf_prog *prog)
                xdp_features_clear_redirect_target(dev);
        }
 
-       pf->hw.non_qos_queues += pf->hw.xdp_queues;
-
        if (if_up)
                otx2_open(pf->netdev);
 
index 4d519ea833b2c7c4fa439ee56fdd07962221030c..f828d32737af02f6a1492e015a1a3d77a732e732 100644 (file)
@@ -1403,7 +1403,7 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
                                     struct otx2_cq_queue *cq,
                                     bool *need_xdp_flush)
 {
-       unsigned char *hard_start, *data;
+       unsigned char *hard_start;
        int qidx = cq->cq_idx;
        struct xdp_buff xdp;
        struct page *page;
@@ -1417,9 +1417,8 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
 
        xdp_init_buff(&xdp, pfvf->rbsize, &cq->xdp_rxq);
 
-       data = (unsigned char *)phys_to_virt(pa);
-       hard_start = page_address(page);
-       xdp_prepare_buff(&xdp, hard_start, data - hard_start,
+       hard_start = (unsigned char *)phys_to_virt(pa);
+       xdp_prepare_buff(&xdp, hard_start, OTX2_HEAD_ROOM,
                         cqe->sg.seg_size, false);
 
        act = bpf_prog_run_xdp(prog, &xdp);
index a6e91573f8dae8368f7667f5f5caa5636d881a60..de123350bd46b6e55ee5ea83737f79a4bceb6867 100644 (file)
@@ -4761,7 +4761,10 @@ static int mtk_probe(struct platform_device *pdev)
        }
 
        if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) {
-               err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36));
+               err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36));
+               if (!err)
+                       err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+
                if (err) {
                        dev_err(&pdev->dev, "Wrong DMA config\n");
                        return -EINVAL;
index a7b1f9686c09a9a0d6370ee85e3cc33d8b4cd302..4957412ff1f65a8d0621410127d7d58d1cdd175f 100644 (file)
@@ -1923,6 +1923,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
 {
        const char *namep = mlx5_command_str(opcode);
        struct mlx5_cmd_stats *stats;
+       unsigned long flags;
 
        if (!err || !(strcmp(namep, "unknown command opcode")))
                return;
@@ -1930,7 +1931,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
        stats = xa_load(&dev->cmd.stats, opcode);
        if (!stats)
                return;
-       spin_lock_irq(&stats->lock);
+       spin_lock_irqsave(&stats->lock, flags);
        stats->failed++;
        if (err < 0)
                stats->last_failed_errno = -err;
@@ -1939,7 +1940,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
                stats->last_failed_mbox_status = status;
                stats->last_failed_syndrome = syndrome;
        }
-       spin_unlock_irq(&stats->lock);
+       spin_unlock_irqrestore(&stats->lock, flags);
 }
 
 /* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */
index 0bfe1ca8a364233a1d6fb92846d5b54d07d2bcdc..55c6ace0acd557b075c3bae6ff0818ca84fc3ae8 100644 (file)
@@ -1124,7 +1124,7 @@ static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
 extern const struct ethtool_ops mlx5e_ethtool_ops;
 
 int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey);
-int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
+int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises);
 void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
 int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
                       bool enable_mc_lb);
index e1283531e0b810f78d3b18d20cd6e3ba56c9b84f..671adbad0a40f643bbd1f82e56233f7ae11872ce 100644 (file)
@@ -436,6 +436,7 @@ static int fs_any_create_groups(struct mlx5e_flow_table *ft)
        in = kvzalloc(inlen, GFP_KERNEL);
        if  (!in || !ft->g) {
                kfree(ft->g);
+               ft->g = NULL;
                kvfree(in);
                return -ENOMEM;
        }
index 284253b79266b937f4d654361c7b891278e9fda9..5d213a9886f11c4bed6a2b8c5e5bd708ce08bef3 100644 (file)
@@ -1064,8 +1064,8 @@ void mlx5e_build_sq_param(struct mlx5_core_dev *mdev,
        void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
        bool allow_swp;
 
-       allow_swp =
-               mlx5_geneve_tx_allowed(mdev) || !!mlx5_ipsec_device_caps(mdev);
+       allow_swp = mlx5_geneve_tx_allowed(mdev) ||
+                   (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_CRYPTO);
        mlx5e_build_sq_param_common(mdev, param);
        MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
        MLX5_SET(sqc, sqc, allow_swp, allow_swp);
index c206cc0a84832e6ebf104cc92990c43a81e94d60..078f56a3cbb2b389499c0b609908972af691a41c 100644 (file)
@@ -213,7 +213,7 @@ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
        mlx5e_ptpsq_mark_ts_cqes_undelivered(ptpsq, hwtstamp);
 out:
        napi_consume_skb(skb, budget);
-       md_buff[*md_buff_sz++] = metadata_id;
+       md_buff[(*md_buff_sz)++] = metadata_id;
        if (unlikely(mlx5e_ptp_metadata_map_unhealthy(&ptpsq->metadata_map)) &&
            !test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
                queue_work(ptpsq->txqsq.priv->wq, &ptpsq->report_unhealthy_work);
index 161c5190c236a0d8d048bd6a253a36cdeb12b9bc..05612d9c6080c776e9bdded54d9848f8829748fa 100644 (file)
@@ -336,12 +336,17 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
        /* iv len */
        aes_gcm->icv_len = x->aead->alg_icv_len;
 
+       attrs->dir = x->xso.dir;
+
        /* esn */
        if (x->props.flags & XFRM_STATE_ESN) {
                attrs->replay_esn.trigger = true;
                attrs->replay_esn.esn = sa_entry->esn_state.esn;
                attrs->replay_esn.esn_msb = sa_entry->esn_state.esn_msb;
                attrs->replay_esn.overlap = sa_entry->esn_state.overlap;
+               if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
+                       goto skip_replay_window;
+
                switch (x->replay_esn->replay_window) {
                case 32:
                        attrs->replay_esn.replay_window =
@@ -365,7 +370,7 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
                }
        }
 
-       attrs->dir = x->xso.dir;
+skip_replay_window:
        /* spi */
        attrs->spi = be32_to_cpu(x->id.spi);
 
@@ -501,7 +506,8 @@ static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
                        return -EINVAL;
                }
 
-               if (x->replay_esn && x->replay_esn->replay_window != 32 &&
+               if (x->replay_esn && x->xso.dir == XFRM_DEV_OFFLOAD_IN &&
+                   x->replay_esn->replay_window != 32 &&
                    x->replay_esn->replay_window != 64 &&
                    x->replay_esn->replay_window != 128 &&
                    x->replay_esn->replay_window != 256) {
index bb7f86c993e5579735d0310aa70b5c53b3a3ae9e..e66f486faafe1a6b0cfc75f0f11b2e957b040842 100644 (file)
@@ -254,11 +254,13 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
 
        ft->g = kcalloc(MLX5E_ARFS_NUM_GROUPS,
                        sizeof(*ft->g), GFP_KERNEL);
-       in = kvzalloc(inlen, GFP_KERNEL);
-       if  (!in || !ft->g) {
-               kfree(ft->g);
-               kvfree(in);
+       if (!ft->g)
                return -ENOMEM;
+
+       in = kvzalloc(inlen, GFP_KERNEL);
+       if (!in) {
+               err = -ENOMEM;
+               goto err_free_g;
        }
 
        mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
@@ -278,7 +280,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
                break;
        default:
                err = -EINVAL;
-               goto out;
+               goto err_free_in;
        }
 
        switch (type) {
@@ -300,7 +302,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
                break;
        default:
                err = -EINVAL;
-               goto out;
+               goto err_free_in;
        }
 
        MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
@@ -309,7 +311,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
        MLX5_SET_CFG(in, end_flow_index, ix - 1);
        ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
        if (IS_ERR(ft->g[ft->num_groups]))
-               goto err;
+               goto err_clean_group;
        ft->num_groups++;
 
        memset(in, 0, inlen);
@@ -318,18 +320,20 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
        MLX5_SET_CFG(in, end_flow_index, ix - 1);
        ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
        if (IS_ERR(ft->g[ft->num_groups]))
-               goto err;
+               goto err_clean_group;
        ft->num_groups++;
 
        kvfree(in);
        return 0;
 
-err:
+err_clean_group:
        err = PTR_ERR(ft->g[ft->num_groups]);
        ft->g[ft->num_groups] = NULL;
-out:
+err_free_in:
        kvfree(in);
-
+err_free_g:
+       kfree(ft->g);
+       ft->g = NULL;
        return err;
 }
 
index 67f546683e85a3fa0bed05baab33790c66eb9168..6ed3a32b7e226d497234e4fa7b244bf9629b5710 100644 (file)
@@ -95,7 +95,7 @@ static void mlx5e_destroy_tises(struct mlx5_core_dev *mdev, u32 tisn[MLX5_MAX_PO
 {
        int tc, i;
 
-       for (i = 0; i < MLX5_MAX_PORTS; i++)
+       for (i = 0; i < mlx5e_get_num_lag_ports(mdev); i++)
                for (tc = 0; tc < MLX5_MAX_NUM_TC; tc++)
                        mlx5e_destroy_tis(mdev, tisn[i][tc]);
 }
@@ -110,7 +110,7 @@ static int mlx5e_create_tises(struct mlx5_core_dev *mdev, u32 tisn[MLX5_MAX_PORT
        int tc, i;
        int err;
 
-       for (i = 0; i < MLX5_MAX_PORTS; i++) {
+       for (i = 0; i < mlx5e_get_num_lag_ports(mdev); i++) {
                for (tc = 0; tc < MLX5_MAX_NUM_TC; tc++) {
                        u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
                        void *tisc;
@@ -140,7 +140,7 @@ err_close_tises:
        return err;
 }
 
-int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
+int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises)
 {
        struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs;
        int err;
@@ -169,11 +169,15 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
                goto err_destroy_mkey;
        }
 
-       err = mlx5e_create_tises(mdev, res->tisn);
-       if (err) {
-               mlx5_core_err(mdev, "alloc tises failed, %d\n", err);
-               goto err_destroy_bfreg;
+       if (create_tises) {
+               err = mlx5e_create_tises(mdev, res->tisn);
+               if (err) {
+                       mlx5_core_err(mdev, "alloc tises failed, %d\n", err);
+                       goto err_destroy_bfreg;
+               }
+               res->tisn_valid = true;
        }
+
        INIT_LIST_HEAD(&res->td.tirs_list);
        mutex_init(&res->td.list_lock);
 
@@ -203,7 +207,8 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
 
        mlx5_crypto_dek_cleanup(mdev->mlx5e_res.dek_priv);
        mdev->mlx5e_res.dek_priv = NULL;
-       mlx5e_destroy_tises(mdev, res->tisn);
+       if (res->tisn_valid)
+               mlx5e_destroy_tises(mdev, res->tisn);
        mlx5_free_bfreg(mdev, &res->bfreg);
        mlx5_core_destroy_mkey(mdev, res->mkey);
        mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
index b5f1c4ca38bac97d860ed2bb5f37eac332e6e24c..c8e8f512803efb7aea48e90259e852a55882403c 100644 (file)
@@ -5992,7 +5992,7 @@ static int mlx5e_resume(struct auxiliary_device *adev)
        if (netif_device_present(netdev))
                return 0;
 
-       err = mlx5e_create_mdev_resources(mdev);
+       err = mlx5e_create_mdev_resources(mdev, true);
        if (err)
                return err;
 
index 30932c9c9a8f08bca2c8025f0a5685f79695e54d..9fb2c057bd78723420478d93001e74e7599d646e 100644 (file)
@@ -761,7 +761,7 @@ static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
 
        err = mlx5e_rss_params_indir_init(&indir, mdev,
                                          mlx5e_rqt_size(mdev, hp->num_channels),
-                                         mlx5e_rqt_size(mdev, priv->max_nch));
+                                         mlx5e_rqt_size(mdev, hp->num_channels));
        if (err)
                return err;
 
@@ -2014,9 +2014,10 @@ static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow,
        list_for_each_entry_safe(peer_flow, tmp, &flow->peer_flows, peer_flows) {
                if (peer_index != mlx5_get_dev_index(peer_flow->priv->mdev))
                        continue;
+
+               list_del(&peer_flow->peer_flows);
                if (refcount_dec_and_test(&peer_flow->refcnt)) {
                        mlx5e_tc_del_fdb_flow(peer_flow->priv, peer_flow);
-                       list_del(&peer_flow->peer_flows);
                        kfree(peer_flow);
                }
        }
index a7ed87e9d8426befdbda753b52732400e003f1b8..22dd30cf8033f93134d08ed77fe32dc73b6bbaf2 100644 (file)
@@ -83,6 +83,7 @@ mlx5_esw_bridge_mdb_flow_create(u16 esw_owner_vhca_id, struct mlx5_esw_bridge_md
                i++;
        }
 
+       rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN;
        rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
        dmac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value, outer_headers.dmac_47_16);
        ether_addr_copy(dmac_v, entry->key.addr);
@@ -587,6 +588,7 @@ mlx5_esw_bridge_mcast_vlan_flow_create(u16 vlan_proto, struct mlx5_esw_bridge_po
        if (!rule_spec)
                return ERR_PTR(-ENOMEM);
 
+       rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN;
        rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
 
        flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
@@ -662,6 +664,7 @@ mlx5_esw_bridge_mcast_fwd_flow_create(struct mlx5_esw_bridge_port *port)
                dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
                dest.vport.vhca_id = port->esw_owner_vhca_id;
        }
+       rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN;
        handle = mlx5_add_flow_rules(port->mcast.ft, rule_spec, &flow_act, &dest, 1);
 
        kvfree(rule_spec);
index 1616a6144f7b42d4c7415bc02a05dbf63c61c420..9b8599c200e2c0990009162b90b1e368e784cdef 100644 (file)
@@ -566,6 +566,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
                 fte->flow_context.flow_tag);
        MLX5_SET(flow_context, in_flow_context, flow_source,
                 fte->flow_context.flow_source);
+       MLX5_SET(flow_context, in_flow_context, uplink_hairpin_en,
+                !!(fte->flow_context.flags & FLOW_CONTEXT_UPLINK_HAIRPIN_EN));
 
        MLX5_SET(flow_context, in_flow_context, extended_destination,
                 extended_dest);
index 58845121954c19db3bdc454046c161654ef37308..d77be1b4dd9c557b70ba74e3ebb37ac7994a4486 100644 (file)
@@ -783,7 +783,7 @@ static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u32 port_num,
                }
 
                /* This should only be called once per mdev */
-               err = mlx5e_create_mdev_resources(mdev);
+               err = mlx5e_create_mdev_resources(mdev, false);
                if (err)
                        goto destroy_ht;
        }
index 40c7be12404168094e60d0ca5dbedbde77ea1402..58bd749b5e4de07a19320e223a0103b8ae7ded25 100644 (file)
@@ -98,7 +98,7 @@ static int create_aso_cq(struct mlx5_aso_cq *cq, void *cqc_data)
        mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
                                  (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
 
-       MLX5_SET(cqc,   cqc, cq_period_mode, DIM_CQ_PERIOD_MODE_START_FROM_EQE);
+       MLX5_SET(cqc,   cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
        MLX5_SET(cqc,   cqc, c_eqn_or_apu_element, eqn);
        MLX5_SET(cqc,   cqc, uar_page,      mdev->priv.uar->index);
        MLX5_SET(cqc,   cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
index 6f9790e97fed20821f48392732a90d12e2450a01..2ebb61ef3ea9f6a906601b41c723ba9f7834afda 100644 (file)
@@ -788,6 +788,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
                switch (action_type) {
                case DR_ACTION_TYP_DROP:
                        attr.final_icm_addr = nic_dmn->drop_icm_addr;
+                       attr.hit_gvmi = nic_dmn->drop_icm_addr >> 48;
                        break;
                case DR_ACTION_TYP_FT:
                        dest_action = action;
@@ -873,11 +874,17 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
                                                        action->sampler->tx_icm_addr;
                        break;
                case DR_ACTION_TYP_VPORT:
-                       attr.hit_gvmi = action->vport->caps->vhca_gvmi;
-                       dest_action = action;
-                       attr.final_icm_addr = rx_rule ?
-                               action->vport->caps->icm_address_rx :
-                               action->vport->caps->icm_address_tx;
+                       if (unlikely(rx_rule && action->vport->caps->num == MLX5_VPORT_UPLINK)) {
+                               /* can't go to uplink on RX rule - dropping instead */
+                               attr.final_icm_addr = nic_dmn->drop_icm_addr;
+                               attr.hit_gvmi = nic_dmn->drop_icm_addr >> 48;
+                       } else {
+                               attr.hit_gvmi = action->vport->caps->vhca_gvmi;
+                               dest_action = action;
+                               attr.final_icm_addr = rx_rule ?
+                                                     action->vport->caps->icm_address_rx :
+                                                     action->vport->caps->icm_address_tx;
+                       }
                        break;
                case DR_ACTION_TYP_POP_VLAN:
                        if (!rx_rule && !(dmn->ste_ctx->actions_caps &
index 21753f32786850bd010bded5a13db6eb83fa3ade..1005bb6935b65c0d6bb2b68f71744c2857085eed 100644 (file)
@@ -440,6 +440,27 @@ out:
 }
 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
 
+int mlx5_query_nic_vport_sd_group(struct mlx5_core_dev *mdev, u8 *sd_group)
+{
+       int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+       u32 *out;
+       int err;
+
+       out = kvzalloc(outlen, GFP_KERNEL);
+       if (!out)
+               return -ENOMEM;
+
+       err = mlx5_query_nic_vport_context(mdev, 0, out);
+       if (err)
+               goto out;
+
+       *sd_group = MLX5_GET(query_nic_vport_context_out, out,
+                            nic_vport_context.sd_group);
+out:
+       kvfree(out);
+       return err;
+}
+
 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
 {
        u32 *out;
index 4c98950380d536ed30d146b3a6eeac9b098a1fae..d231f4d2888beefe838fadf6933e05ade826fdeb 100644 (file)
@@ -301,6 +301,7 @@ mlxsw_sp_acl_erp_table_alloc(struct mlxsw_sp_acl_erp_core *erp_core,
                             unsigned long *p_index)
 {
        unsigned int num_rows, entry_size;
+       unsigned long index;
 
        /* We only allow allocations of entire rows */
        if (num_erps % erp_core->num_erp_banks != 0)
@@ -309,10 +310,11 @@ mlxsw_sp_acl_erp_table_alloc(struct mlxsw_sp_acl_erp_core *erp_core,
        entry_size = erp_core->erpt_entries_size[region_type];
        num_rows = num_erps / erp_core->num_erp_banks;
 
-       *p_index = gen_pool_alloc(erp_core->erp_tables, num_rows * entry_size);
-       if (*p_index == 0)
+       index = gen_pool_alloc(erp_core->erp_tables, num_rows * entry_size);
+       if (!index)
                return -ENOBUFS;
-       *p_index -= MLXSW_SP_ACL_ERP_GENALLOC_OFFSET;
+
+       *p_index = index - MLXSW_SP_ACL_ERP_GENALLOC_OFFSET;
 
        return 0;
 }
index d50786b0a6ce47924c55a9fbc53200f50bd96335..50ea1eff02b2f713ee847a6ea8dffb2ca248ae57 100644 (file)
@@ -681,13 +681,13 @@ static void
 mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp,
                                 struct mlxsw_sp_acl_tcam_region *region)
 {
+       struct mlxsw_sp_acl_tcam *tcam = mlxsw_sp_acl_to_tcam(mlxsw_sp->acl);
        const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
 
        ops->region_fini(mlxsw_sp, region->priv);
        mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
        mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
-       mlxsw_sp_acl_tcam_region_id_put(region->group->tcam,
-                                       region->id);
+       mlxsw_sp_acl_tcam_region_id_put(tcam, region->id);
        kfree(region);
 }
 
@@ -1564,6 +1564,8 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
        tcam->max_groups = max_groups;
        tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
                                                  ACL_MAX_GROUP_SIZE);
+       tcam->max_group_size = min_t(unsigned int, tcam->max_group_size,
+                                    MLXSW_REG_PAGT_ACL_MAX_NUM);
 
        err = ops->init(mlxsw_sp, tcam->priv, tcam);
        if (err)
index 2c255ed9b8a9483da266c27cf8e9ddd8d85d2546..7164f9e6370fb76d91aad3c09f4a93f061daf1fc 100644 (file)
@@ -11472,6 +11472,13 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
        if (err)
                goto err_register_netevent_notifier;
 
+       mlxsw_sp->router->netdevice_nb.notifier_call =
+               mlxsw_sp_router_netdevice_event;
+       err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
+                                             &mlxsw_sp->router->netdevice_nb);
+       if (err)
+               goto err_register_netdev_notifier;
+
        mlxsw_sp->router->nexthop_nb.notifier_call =
                mlxsw_sp_nexthop_obj_event;
        err = register_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
@@ -11487,22 +11494,15 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
        if (err)
                goto err_register_fib_notifier;
 
-       mlxsw_sp->router->netdevice_nb.notifier_call =
-               mlxsw_sp_router_netdevice_event;
-       err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
-                                             &mlxsw_sp->router->netdevice_nb);
-       if (err)
-               goto err_register_netdev_notifier;
-
        return 0;
 
-err_register_netdev_notifier:
-       unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
-                               &mlxsw_sp->router->fib_nb);
 err_register_fib_notifier:
        unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
                                    &mlxsw_sp->router->nexthop_nb);
 err_register_nexthop_notifier:
+       unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
+                                         &router->netdevice_nb);
+err_register_netdev_notifier:
        unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
 err_register_netevent_notifier:
        unregister_inet6addr_validator_notifier(&router->inet6addr_valid_nb);
@@ -11550,11 +11550,11 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
 {
        struct mlxsw_sp_router *router = mlxsw_sp->router;
 
-       unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
-                                         &router->netdevice_nb);
        unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp), &router->fib_nb);
        unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
                                    &router->nexthop_nb);
+       unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
+                                         &router->netdevice_nb);
        unregister_netevent_notifier(&router->netevent_nb);
        unregister_inet6addr_validator_notifier(&router->inet6addr_valid_nb);
        unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
index 92108d354051c31c44c64b207fb11411d0b4295b..2e83bbb9477e0693f236e83be30277d3e92df235 100644 (file)
@@ -168,9 +168,10 @@ static void lan966x_port_link_up(struct lan966x_port *port)
        lan966x_taprio_speed_set(port, config->speed);
 
        /* Also the GIGA_MODE_ENA(1) needs to be set regardless of the
-        * port speed for QSGMII ports.
+        * port speed for QSGMII or SGMII ports.
         */
-       if (phy_interface_num_ports(config->portmode) == 4)
+       if (phy_interface_num_ports(config->portmode) == 4 ||
+           config->portmode == PHY_INTERFACE_MODE_SGMII)
                mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(1);
 
        lan_wr(config->duplex | mode,
index 61d8bfd12d5fd50d99c1927dac230bdb049ebcae..55408f16fbbc4d4f50e57a0bb43de030dbd7d3f9 100644 (file)
@@ -414,6 +414,7 @@ static const u64 fix_mac[] = {
        END_SIGN
 };
 
+MODULE_DESCRIPTION("Neterion 10GbE driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 
index 2967bab72505617abcf59f0b16f5a1d5bb9d127c..15180538b80a1535a8646b407bcc1b06b632b43c 100644 (file)
@@ -1424,10 +1424,30 @@ static void nfp_nft_ct_translate_mangle_action(struct flow_action_entry *mangle_
                mangle_action->mangle.mask = (__force u32)cpu_to_be32(mangle_action->mangle.mask);
                return;
 
+       /* Both struct tcphdr and struct udphdr start with
+        *      __be16 source;
+        *      __be16 dest;
+        * so we can use the same code for both.
+        */
        case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
        case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
-               mangle_action->mangle.val = (__force u16)cpu_to_be16(mangle_action->mangle.val);
-               mangle_action->mangle.mask = (__force u16)cpu_to_be16(mangle_action->mangle.mask);
+               if (mangle_action->mangle.offset == offsetof(struct tcphdr, source)) {
+                       mangle_action->mangle.val =
+                               (__force u32)cpu_to_be32(mangle_action->mangle.val << 16);
+                       /* The mask of mangle action is inverse mask,
+                        * so clear the dest tp port with 0xFFFF to
+                        * instead of rotate-left operation.
+                        */
+                       mangle_action->mangle.mask =
+                               (__force u32)cpu_to_be32(mangle_action->mangle.mask << 16 | 0xFFFF);
+               }
+               if (mangle_action->mangle.offset == offsetof(struct tcphdr, dest)) {
+                       mangle_action->mangle.offset = 0;
+                       mangle_action->mangle.val =
+                               (__force u32)cpu_to_be32(mangle_action->mangle.val);
+                       mangle_action->mangle.mask =
+                               (__force u32)cpu_to_be32(mangle_action->mangle.mask);
+               }
                return;
 
        default:
@@ -1864,10 +1884,30 @@ int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv,
 {
        struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
        struct nfp_fl_ct_flow_entry *ct_entry;
+       struct flow_action_entry *ct_goto;
        struct nfp_fl_ct_zone_entry *zt;
+       struct flow_action_entry *act;
        bool wildcarded = false;
        struct flow_match_ct ct;
-       struct flow_action_entry *ct_goto;
+       int i;
+
+       flow_action_for_each(i, act, &rule->action) {
+               switch (act->id) {
+               case FLOW_ACTION_REDIRECT:
+               case FLOW_ACTION_REDIRECT_INGRESS:
+               case FLOW_ACTION_MIRRED:
+               case FLOW_ACTION_MIRRED_INGRESS:
+                       if (act->dev->rtnl_link_ops &&
+                           !strcmp(act->dev->rtnl_link_ops->kind, "openvswitch")) {
+                               NL_SET_ERR_MSG_MOD(extack,
+                                                  "unsupported offload: out port is openvswitch internal port");
+                               return -EOPNOTSUPP;
+                       }
+                       break;
+               default:
+                       break;
+               }
+       }
 
        flow_rule_match_ct(rule, &ct);
        if (!ct.mask->ct_zone) {
index e522845c7c211619a252bb995dec65160d7a1ae5..0d7d138d6e0d7e4f468f66683707cd22d750b64a 100644 (file)
@@ -1084,7 +1084,7 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev,
        u16 nfp_mac_idx = 0;
 
        entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr);
-       if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) {
+       if (entry && (nfp_tunnel_is_mac_idx_global(entry->index) || netif_is_lag_port(netdev))) {
                if (entry->bridge_count ||
                    !nfp_flower_is_supported_bridge(netdev)) {
                        nfp_tunnel_offloaded_macs_inc_ref_and_link(entry,
index 3b3210d823e8038704391e085b9d7951031dd309..f28e769e6fdadab091d447f3de4cb8df1d2b4d3e 100644 (file)
@@ -2776,6 +2776,7 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
        case NFP_NFD_VER_NFD3:
                netdev->netdev_ops = &nfp_nfd3_netdev_ops;
                netdev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
+               netdev->xdp_features |= NETDEV_XDP_ACT_REDIRECT;
                break;
        case NFP_NFD_VER_NFDK:
                netdev->netdev_ops = &nfp_nfdk_netdev_ops;
index 33b4c28563162eeab3938da414b32cfd480c13d7..3f10c5365c80ebb2fe079b779fee644a46ed33da 100644 (file)
@@ -537,11 +537,13 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
        const u32 barcfg_msix_general =
                NFP_PCIE_BAR_PCIE2CPP_MapType(
                        NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL) |
-               NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT;
+               NFP_PCIE_BAR_PCIE2CPP_LengthSelect(
+                       NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT);
        const u32 barcfg_msix_xpb =
                NFP_PCIE_BAR_PCIE2CPP_MapType(
                        NFP_PCIE_BAR_PCIE2CPP_MapType_BULK) |
-               NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT |
+               NFP_PCIE_BAR_PCIE2CPP_LengthSelect(
+                       NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT) |
                NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(
                        NFP_CPP_TARGET_ISLAND_XPB);
        const u32 barcfg_explicit[4] = {
index 9adec91f35e947c97efc6d34bf7f4aa0aef92e5d..223321897b9613e1d112c888f6adaf5c46a625b1 100644 (file)
@@ -58,9 +58,8 @@ struct qcauart {
        unsigned char *tx_buffer;
 };
 
-static int
-qca_tty_receive(struct serdev_device *serdev, const unsigned char *data,
-               size_t count)
+static ssize_t
+qca_tty_receive(struct serdev_device *serdev, const u8 *data, size_t count)
 {
        struct qcauart *qca = serdev_device_get_drvdata(serdev);
        struct net_device *netdev = qca->net_dev;
index 39d24e07f30670fc1af40ee988143ffb90990e32..5b69b9268c757fca7aa42545e6ac3d87143d2971 100644 (file)
@@ -396,7 +396,7 @@ nla_put_failure:
 
 struct rtnl_link_ops rmnet_link_ops __read_mostly = {
        .kind           = "rmnet",
-       .maxtype        = __IFLA_RMNET_MAX,
+       .maxtype        = IFLA_RMNET_MAX,
        .priv_size      = sizeof(struct rmnet_priv),
        .setup          = rmnet_vnd_setup,
        .validate       = rmnet_rtnl_validate,
index 8649b3e90edb288d806998d0c97a82c858b0c947..0e3731f50fc2873dc3c4c06c16ffe1f4a8707e83 100644 (file)
@@ -1949,7 +1949,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        struct ravb_tstamp_skb *ts_skb;
        struct ravb_tx_desc *desc;
        unsigned long flags;
-       u32 dma_addr;
+       dma_addr_t dma_addr;
        void *buffer;
        u32 entry;
        u32 len;
index 721c1f8e892fc56ed1e9144619aa32ac676226b1..5ba606a596e779bc17081c55e5bf5c52555ada0b 100644 (file)
 #undef FRAME_FILTER_DEBUG
 /* #define FRAME_FILTER_DEBUG */
 
+struct stmmac_q_tx_stats {
+       u64_stats_t tx_bytes;
+       u64_stats_t tx_set_ic_bit;
+       u64_stats_t tx_tso_frames;
+       u64_stats_t tx_tso_nfrags;
+};
+
+struct stmmac_napi_tx_stats {
+       u64_stats_t tx_packets;
+       u64_stats_t tx_pkt_n;
+       u64_stats_t poll;
+       u64_stats_t tx_clean;
+       u64_stats_t tx_set_ic_bit;
+};
+
 struct stmmac_txq_stats {
-       u64 tx_bytes;
-       u64 tx_packets;
-       u64 tx_pkt_n;
-       u64 tx_normal_irq_n;
-       u64 napi_poll;
-       u64 tx_clean;
-       u64 tx_set_ic_bit;
-       u64 tx_tso_frames;
-       u64 tx_tso_nfrags;
-       struct u64_stats_sync syncp;
+       /* Updates protected by tx queue lock. */
+       struct u64_stats_sync q_syncp;
+       struct stmmac_q_tx_stats q;
+
+       /* Updates protected by NAPI poll logic. */
+       struct u64_stats_sync napi_syncp;
+       struct stmmac_napi_tx_stats napi;
 } ____cacheline_aligned_in_smp;
 
+struct stmmac_napi_rx_stats {
+       u64_stats_t rx_bytes;
+       u64_stats_t rx_packets;
+       u64_stats_t rx_pkt_n;
+       u64_stats_t poll;
+};
+
 struct stmmac_rxq_stats {
-       u64 rx_bytes;
-       u64 rx_packets;
-       u64 rx_pkt_n;
-       u64 rx_normal_irq_n;
-       u64 napi_poll;
-       struct u64_stats_sync syncp;
+       /* Updates protected by NAPI poll logic. */
+       struct u64_stats_sync napi_syncp;
+       struct stmmac_napi_rx_stats napi;
 } ____cacheline_aligned_in_smp;
 
+/* Updates on each CPU protected by not allowing nested irqs. */
+struct stmmac_pcpu_stats {
+       struct u64_stats_sync syncp;
+       u64_stats_t rx_normal_irq_n[MTL_MAX_TX_QUEUES];
+       u64_stats_t tx_normal_irq_n[MTL_MAX_RX_QUEUES];
+};
+
 /* Extra statistic and debug information exposed by ethtool */
 struct stmmac_extra_stats {
        /* Transmit errors */
@@ -205,6 +228,7 @@ struct stmmac_extra_stats {
        /* per queue statistics */
        struct stmmac_txq_stats txq_stats[MTL_MAX_TX_QUEUES];
        struct stmmac_rxq_stats rxq_stats[MTL_MAX_RX_QUEUES];
+       struct stmmac_pcpu_stats __percpu *pcpu_stats;
        unsigned long rx_dropped;
        unsigned long rx_errors;
        unsigned long tx_dropped;
@@ -216,6 +240,7 @@ struct stmmac_safety_stats {
        unsigned long mac_errors[32];
        unsigned long mtl_errors[32];
        unsigned long dma_errors[32];
+       unsigned long dma_dpp_errors[32];
 };
 
 /* Number of fields in Safety Stats */
index 8f730ada71f91d70b5c1d2707601b927f20aeb79..6b65420e11b5c518251565ca94bfb4a849068436 100644 (file)
@@ -353,6 +353,10 @@ static int imx_dwmac_probe(struct platform_device *pdev)
        if (data->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
                plat_dat->flags |= STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY;
 
+       /* Default TX Q0 to use TSO and rest TXQ for TBS */
+       for (int i = 1; i < plat_dat->tx_queues_to_use; i++)
+               plat_dat->tx_queues_cfg[i].tbs_en = 1;
+
        plat_dat->host_dma_width = dwmac->ops->addr_width;
        plat_dat->init = imx_dwmac_init;
        plat_dat->exit = imx_dwmac_exit;
index 137741b94122e5e99320eea5cad9909e6394dc7d..b21d99faa2d04c985427af61724dd073e3a2fe79 100644 (file)
@@ -441,8 +441,7 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv,
                                     struct stmmac_extra_stats *x, u32 chan,
                                     u32 dir)
 {
-       struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan];
-       struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan];
+       struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
        int ret = 0;
        u32 v;
 
@@ -455,9 +454,9 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv,
 
        if (v & EMAC_TX_INT) {
                ret |= handle_tx;
-               u64_stats_update_begin(&txq_stats->syncp);
-               txq_stats->tx_normal_irq_n++;
-               u64_stats_update_end(&txq_stats->syncp);
+               u64_stats_update_begin(&stats->syncp);
+               u64_stats_inc(&stats->tx_normal_irq_n[chan]);
+               u64_stats_update_end(&stats->syncp);
        }
 
        if (v & EMAC_TX_DMA_STOP_INT)
@@ -479,9 +478,9 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv,
 
        if (v & EMAC_RX_INT) {
                ret |= handle_rx;
-               u64_stats_update_begin(&rxq_stats->syncp);
-               rxq_stats->rx_normal_irq_n++;
-               u64_stats_update_end(&rxq_stats->syncp);
+               u64_stats_update_begin(&stats->syncp);
+               u64_stats_inc(&stats->rx_normal_irq_n[chan]);
+               u64_stats_update_end(&stats->syncp);
        }
 
        if (v & EMAC_RX_BUF_UA_INT)
index 9470d3fd2dede2bb436c05f6a92d87824c2db733..0d185e54eb7e24cfd4ef8de38e976aabd3ee9084 100644 (file)
@@ -171,8 +171,7 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
        const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
        u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(dwmac4_addrs, chan));
        u32 intr_en = readl(ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan));
-       struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan];
-       struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan];
+       struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
        int ret = 0;
 
        if (dir == DMA_DIR_RX)
@@ -201,15 +200,15 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
        }
        /* TX/RX NORMAL interrupts */
        if (likely(intr_status & DMA_CHAN_STATUS_RI)) {
-               u64_stats_update_begin(&rxq_stats->syncp);
-               rxq_stats->rx_normal_irq_n++;
-               u64_stats_update_end(&rxq_stats->syncp);
+               u64_stats_update_begin(&stats->syncp);
+               u64_stats_inc(&stats->rx_normal_irq_n[chan]);
+               u64_stats_update_end(&stats->syncp);
                ret |= handle_rx;
        }
        if (likely(intr_status & DMA_CHAN_STATUS_TI)) {
-               u64_stats_update_begin(&txq_stats->syncp);
-               txq_stats->tx_normal_irq_n++;
-               u64_stats_update_end(&txq_stats->syncp);
+               u64_stats_update_begin(&stats->syncp);
+               u64_stats_inc(&stats->tx_normal_irq_n[chan]);
+               u64_stats_update_end(&stats->syncp);
                ret |= handle_tx;
        }
 
index 7907d62d343759d661e00452198ef8e6cfef3601..85e18f9a22f92091bb98f1892d7bb1f5f08bcf2a 100644 (file)
@@ -162,8 +162,7 @@ static void show_rx_process_state(unsigned int status)
 int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
                        struct stmmac_extra_stats *x, u32 chan, u32 dir)
 {
-       struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan];
-       struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan];
+       struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
        int ret = 0;
        /* read the status register (CSR5) */
        u32 intr_status = readl(ioaddr + DMA_STATUS);
@@ -215,16 +214,16 @@ int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
                        u32 value = readl(ioaddr + DMA_INTR_ENA);
                        /* to schedule NAPI on real RIE event. */
                        if (likely(value & DMA_INTR_ENA_RIE)) {
-                               u64_stats_update_begin(&rxq_stats->syncp);
-                               rxq_stats->rx_normal_irq_n++;
-                               u64_stats_update_end(&rxq_stats->syncp);
+                               u64_stats_update_begin(&stats->syncp);
+                               u64_stats_inc(&stats->rx_normal_irq_n[chan]);
+                               u64_stats_update_end(&stats->syncp);
                                ret |= handle_rx;
                        }
                }
                if (likely(intr_status & DMA_STATUS_TI)) {
-                       u64_stats_update_begin(&txq_stats->syncp);
-                       txq_stats->tx_normal_irq_n++;
-                       u64_stats_update_end(&txq_stats->syncp);
+                       u64_stats_update_begin(&stats->syncp);
+                       u64_stats_inc(&stats->tx_normal_irq_n[chan]);
+                       u64_stats_update_end(&stats->syncp);
                        ret |= handle_tx;
                }
                if (unlikely(intr_status & DMA_STATUS_ERI))
index 207ff1799f2c712fe1e10033fa4d2b32dbd197c6..6a2c7d22df1eb81dd216e00e7525bc8a9092c048 100644 (file)
 #define XGMAC_RXCEIE                   BIT(4)
 #define XGMAC_TXCEIE                   BIT(0)
 #define XGMAC_MTL_ECC_INT_STATUS       0x000010cc
+#define XGMAC_MTL_DPP_CONTROL          0x000010e0
+#define XGMAC_DPP_DISABLE              BIT(0)
 #define XGMAC_MTL_TXQ_OPMODE(x)                (0x00001100 + (0x80 * (x)))
 #define XGMAC_TQS                      GENMASK(25, 16)
 #define XGMAC_TQS_SHIFT                        16
 #define XGMAC_DCEIE                    BIT(1)
 #define XGMAC_TCEIE                    BIT(0)
 #define XGMAC_DMA_ECC_INT_STATUS       0x0000306c
+#define XGMAC_DMA_DPP_INT_STATUS       0x00003074
 #define XGMAC_DMA_CH_CONTROL(x)                (0x00003100 + (0x80 * (x)))
 #define XGMAC_SPH                      BIT(24)
 #define XGMAC_PBLx8                    BIT(16)
index eb48211d9b0eb7013b436b0336c75f512fbf638a..323c57f03c93c8d83c9736cbc8e0a62e8a8344f6 100644 (file)
@@ -830,6 +830,43 @@ static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= {
        { false, "UNKNOWN", "Unknown Error" }, /* 31 */
 };
 
+static const char * const dpp_rx_err = "Read Rx Descriptor Parity checker Error";
+static const char * const dpp_tx_err = "Read Tx Descriptor Parity checker Error";
+static const struct dwxgmac3_error_desc dwxgmac3_dma_dpp_errors[32] = {
+       { true, "TDPES0", dpp_tx_err },
+       { true, "TDPES1", dpp_tx_err },
+       { true, "TDPES2", dpp_tx_err },
+       { true, "TDPES3", dpp_tx_err },
+       { true, "TDPES4", dpp_tx_err },
+       { true, "TDPES5", dpp_tx_err },
+       { true, "TDPES6", dpp_tx_err },
+       { true, "TDPES7", dpp_tx_err },
+       { true, "TDPES8", dpp_tx_err },
+       { true, "TDPES9", dpp_tx_err },
+       { true, "TDPES10", dpp_tx_err },
+       { true, "TDPES11", dpp_tx_err },
+       { true, "TDPES12", dpp_tx_err },
+       { true, "TDPES13", dpp_tx_err },
+       { true, "TDPES14", dpp_tx_err },
+       { true, "TDPES15", dpp_tx_err },
+       { true, "RDPES0", dpp_rx_err },
+       { true, "RDPES1", dpp_rx_err },
+       { true, "RDPES2", dpp_rx_err },
+       { true, "RDPES3", dpp_rx_err },
+       { true, "RDPES4", dpp_rx_err },
+       { true, "RDPES5", dpp_rx_err },
+       { true, "RDPES6", dpp_rx_err },
+       { true, "RDPES7", dpp_rx_err },
+       { true, "RDPES8", dpp_rx_err },
+       { true, "RDPES9", dpp_rx_err },
+       { true, "RDPES10", dpp_rx_err },
+       { true, "RDPES11", dpp_rx_err },
+       { true, "RDPES12", dpp_rx_err },
+       { true, "RDPES13", dpp_rx_err },
+       { true, "RDPES14", dpp_rx_err },
+       { true, "RDPES15", dpp_rx_err },
+};
+
 static void dwxgmac3_handle_dma_err(struct net_device *ndev,
                                    void __iomem *ioaddr, bool correctable,
                                    struct stmmac_safety_stats *stats)
@@ -841,6 +878,13 @@ static void dwxgmac3_handle_dma_err(struct net_device *ndev,
 
        dwxgmac3_log_error(ndev, value, correctable, "DMA",
                           dwxgmac3_dma_errors, STAT_OFF(dma_errors), stats);
+
+       value = readl(ioaddr + XGMAC_DMA_DPP_INT_STATUS);
+       writel(value, ioaddr + XGMAC_DMA_DPP_INT_STATUS);
+
+       dwxgmac3_log_error(ndev, value, false, "DMA_DPP",
+                          dwxgmac3_dma_dpp_errors,
+                          STAT_OFF(dma_dpp_errors), stats);
 }
 
 static int
@@ -881,6 +925,12 @@ dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp,
        value |= XGMAC_TMOUTEN; /* FSM Timeout Feature */
        writel(value, ioaddr + XGMAC_MAC_FSM_CONTROL);
 
+       /* 5. Enable Data Path Parity Protection */
+       value = readl(ioaddr + XGMAC_MTL_DPP_CONTROL);
+       /* already enabled by default, explicit enable it again */
+       value &= ~XGMAC_DPP_DISABLE;
+       writel(value, ioaddr + XGMAC_MTL_DPP_CONTROL);
+
        return 0;
 }
 
@@ -914,7 +964,11 @@ static int dwxgmac3_safety_feat_irq_status(struct net_device *ndev,
                ret |= !corr;
        }
 
-       err = dma & (XGMAC_DEUIS | XGMAC_DECIS);
+       /* DMA_DPP_Interrupt_Status is indicated by MCSIS bit in
+        * DMA_Safety_Interrupt_Status, so we handle DMA Data Path
+        * Parity Errors here
+        */
+       err = dma & (XGMAC_DEUIS | XGMAC_DECIS | XGMAC_MCSIS);
        corr = dma & XGMAC_DECIS;
        if (err) {
                dwxgmac3_handle_dma_err(ndev, ioaddr, corr, stats);
@@ -930,6 +984,7 @@ static const struct dwxgmac3_error {
        { dwxgmac3_mac_errors },
        { dwxgmac3_mtl_errors },
        { dwxgmac3_dma_errors },
+       { dwxgmac3_dma_dpp_errors },
 };
 
 static int dwxgmac3_safety_feat_dump(struct stmmac_safety_stats *stats,
index 3cde695fec91bd7592e23e725517f0cccee08a42..dd2ab6185c40e813ee4401857875d3e8478303e7 100644 (file)
@@ -337,8 +337,7 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv,
                                  struct stmmac_extra_stats *x, u32 chan,
                                  u32 dir)
 {
-       struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan];
-       struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan];
+       struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
        u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan));
        u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
        int ret = 0;
@@ -367,15 +366,15 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv,
        /* TX/RX NORMAL interrupts */
        if (likely(intr_status & XGMAC_NIS)) {
                if (likely(intr_status & XGMAC_RI)) {
-                       u64_stats_update_begin(&rxq_stats->syncp);
-                       rxq_stats->rx_normal_irq_n++;
-                       u64_stats_update_end(&rxq_stats->syncp);
+                       u64_stats_update_begin(&stats->syncp);
+                       u64_stats_inc(&stats->rx_normal_irq_n[chan]);
+                       u64_stats_update_end(&stats->syncp);
                        ret |= handle_rx;
                }
                if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) {
-                       u64_stats_update_begin(&txq_stats->syncp);
-                       txq_stats->tx_normal_irq_n++;
-                       u64_stats_update_end(&txq_stats->syncp);
+                       u64_stats_update_begin(&stats->syncp);
+                       u64_stats_inc(&stats->tx_normal_irq_n[chan]);
+                       u64_stats_update_end(&stats->syncp);
                        ret |= handle_tx;
                }
        }
index 9f89acf310502225f6a0951ffdcd190b935655e8..f155e4841c62bc707b13c70462837b7c810f8142 100644 (file)
@@ -267,6 +267,7 @@ struct stmmac_priv {
        u32 msg_enable;
        int wolopts;
        int wol_irq;
+       bool wol_irq_disabled;
        int clk_csr;
        struct timer_list eee_ctrl_timer;
        int lpi_irq;
index dd05437b51f918772cee3f696086ce3312d62675..ec44becf0e2d289c4f6aeab983c54e93d70faf75 100644 (file)
@@ -321,8 +321,9 @@ static int stmmac_ethtool_get_link_ksettings(struct net_device *dev,
 {
        struct stmmac_priv *priv = netdev_priv(dev);
 
-       if (priv->hw->pcs & STMMAC_PCS_RGMII ||
-           priv->hw->pcs & STMMAC_PCS_SGMII) {
+       if (!(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS) &&
+           (priv->hw->pcs & STMMAC_PCS_RGMII ||
+            priv->hw->pcs & STMMAC_PCS_SGMII)) {
                struct rgmii_adv adv;
                u32 supported, advertising, lp_advertising;
 
@@ -407,8 +408,9 @@ stmmac_ethtool_set_link_ksettings(struct net_device *dev,
 {
        struct stmmac_priv *priv = netdev_priv(dev);
 
-       if (priv->hw->pcs & STMMAC_PCS_RGMII ||
-           priv->hw->pcs & STMMAC_PCS_SGMII) {
+       if (!(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS) &&
+           (priv->hw->pcs & STMMAC_PCS_RGMII ||
+            priv->hw->pcs & STMMAC_PCS_SGMII)) {
                /* Only support ANE */
                if (cmd->base.autoneg != AUTONEG_ENABLE)
                        return -EINVAL;
@@ -547,44 +549,79 @@ stmmac_set_pauseparam(struct net_device *netdev,
        }
 }
 
+static u64 stmmac_get_rx_normal_irq_n(struct stmmac_priv *priv, int q)
+{
+       u64 total;
+       int cpu;
+
+       total = 0;
+       for_each_possible_cpu(cpu) {
+               struct stmmac_pcpu_stats *pcpu;
+               unsigned int start;
+               u64 irq_n;
+
+               pcpu = per_cpu_ptr(priv->xstats.pcpu_stats, cpu);
+               do {
+                       start = u64_stats_fetch_begin(&pcpu->syncp);
+                       irq_n = u64_stats_read(&pcpu->rx_normal_irq_n[q]);
+               } while (u64_stats_fetch_retry(&pcpu->syncp, start));
+               total += irq_n;
+       }
+       return total;
+}
+
+static u64 stmmac_get_tx_normal_irq_n(struct stmmac_priv *priv, int q)
+{
+       u64 total;
+       int cpu;
+
+       total = 0;
+       for_each_possible_cpu(cpu) {
+               struct stmmac_pcpu_stats *pcpu;
+               unsigned int start;
+               u64 irq_n;
+
+               pcpu = per_cpu_ptr(priv->xstats.pcpu_stats, cpu);
+               do {
+                       start = u64_stats_fetch_begin(&pcpu->syncp);
+                       irq_n = u64_stats_read(&pcpu->tx_normal_irq_n[q]);
+               } while (u64_stats_fetch_retry(&pcpu->syncp, start));
+               total += irq_n;
+       }
+       return total;
+}
+
 static void stmmac_get_per_qstats(struct stmmac_priv *priv, u64 *data)
 {
        u32 tx_cnt = priv->plat->tx_queues_to_use;
        u32 rx_cnt = priv->plat->rx_queues_to_use;
        unsigned int start;
-       int q, stat;
-       char *p;
+       int q;
 
        for (q = 0; q < tx_cnt; q++) {
                struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
-               struct stmmac_txq_stats snapshot;
+               u64 pkt_n;
 
                do {
-                       start = u64_stats_fetch_begin(&txq_stats->syncp);
-                       snapshot = *txq_stats;
-               } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
+                       start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
+                       pkt_n = u64_stats_read(&txq_stats->napi.tx_pkt_n);
+               } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
 
-               p = (char *)&snapshot + offsetof(struct stmmac_txq_stats, tx_pkt_n);
-               for (stat = 0; stat < STMMAC_TXQ_STATS; stat++) {
-                       *data++ = (*(u64 *)p);
-                       p += sizeof(u64);
-               }
+               *data++ = pkt_n;
+               *data++ = stmmac_get_tx_normal_irq_n(priv, q);
        }
 
        for (q = 0; q < rx_cnt; q++) {
                struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
-               struct stmmac_rxq_stats snapshot;
+               u64 pkt_n;
 
                do {
-                       start = u64_stats_fetch_begin(&rxq_stats->syncp);
-                       snapshot = *rxq_stats;
-               } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
+                       start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
+                       pkt_n = u64_stats_read(&rxq_stats->napi.rx_pkt_n);
+               } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
 
-               p = (char *)&snapshot + offsetof(struct stmmac_rxq_stats, rx_pkt_n);
-               for (stat = 0; stat < STMMAC_RXQ_STATS; stat++) {
-                       *data++ = (*(u64 *)p);
-                       p += sizeof(u64);
-               }
+               *data++ = pkt_n;
+               *data++ = stmmac_get_rx_normal_irq_n(priv, q);
        }
 }
 
@@ -643,39 +680,49 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
        pos = j;
        for (i = 0; i < rx_queues_count; i++) {
                struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[i];
-               struct stmmac_rxq_stats snapshot;
+               struct stmmac_napi_rx_stats snapshot;
+               u64 n_irq;
 
                j = pos;
                do {
-                       start = u64_stats_fetch_begin(&rxq_stats->syncp);
-                       snapshot = *rxq_stats;
-               } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
-
-               data[j++] += snapshot.rx_pkt_n;
-               data[j++] += snapshot.rx_normal_irq_n;
-               normal_irq_n += snapshot.rx_normal_irq_n;
-               napi_poll += snapshot.napi_poll;
+                       start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
+                       snapshot = rxq_stats->napi;
+               } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
+
+               data[j++] += u64_stats_read(&snapshot.rx_pkt_n);
+               n_irq = stmmac_get_rx_normal_irq_n(priv, i);
+               data[j++] += n_irq;
+               normal_irq_n += n_irq;
+               napi_poll += u64_stats_read(&snapshot.poll);
        }
 
        pos = j;
        for (i = 0; i < tx_queues_count; i++) {
                struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[i];
-               struct stmmac_txq_stats snapshot;
+               struct stmmac_napi_tx_stats napi_snapshot;
+               struct stmmac_q_tx_stats q_snapshot;
+               u64 n_irq;
 
                j = pos;
                do {
-                       start = u64_stats_fetch_begin(&txq_stats->syncp);
-                       snapshot = *txq_stats;
-               } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
-
-               data[j++] += snapshot.tx_pkt_n;
-               data[j++] += snapshot.tx_normal_irq_n;
-               normal_irq_n += snapshot.tx_normal_irq_n;
-               data[j++] += snapshot.tx_clean;
-               data[j++] += snapshot.tx_set_ic_bit;
-               data[j++] += snapshot.tx_tso_frames;
-               data[j++] += snapshot.tx_tso_nfrags;
-               napi_poll += snapshot.napi_poll;
+                       start = u64_stats_fetch_begin(&txq_stats->q_syncp);
+                       q_snapshot = txq_stats->q;
+               } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
+               do {
+                       start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
+                       napi_snapshot = txq_stats->napi;
+               } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
+
+               data[j++] += u64_stats_read(&napi_snapshot.tx_pkt_n);
+               n_irq = stmmac_get_tx_normal_irq_n(priv, i);
+               data[j++] += n_irq;
+               normal_irq_n += n_irq;
+               data[j++] += u64_stats_read(&napi_snapshot.tx_clean);
+               data[j++] += u64_stats_read(&q_snapshot.tx_set_ic_bit) +
+                       u64_stats_read(&napi_snapshot.tx_set_ic_bit);
+               data[j++] += u64_stats_read(&q_snapshot.tx_tso_frames);
+               data[j++] += u64_stats_read(&q_snapshot.tx_tso_nfrags);
+               napi_poll += u64_stats_read(&napi_snapshot.poll);
        }
        normal_irq_n += priv->xstats.rx_early_irq;
        data[j++] = normal_irq_n;
@@ -830,10 +877,16 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
        if (wol->wolopts) {
                pr_info("stmmac: wakeup enable\n");
                device_set_wakeup_enable(priv->device, 1);
-               enable_irq_wake(priv->wol_irq);
+               /* Avoid unbalanced enable_irq_wake calls */
+               if (priv->wol_irq_disabled)
+                       enable_irq_wake(priv->wol_irq);
+               priv->wol_irq_disabled = false;
        } else {
                device_set_wakeup_enable(priv->device, 0);
-               disable_irq_wake(priv->wol_irq);
+               /* Avoid unbalanced disable_irq_wake calls */
+               if (!priv->wol_irq_disabled)
+                       disable_irq_wake(priv->wol_irq);
+               priv->wol_irq_disabled = true;
        }
 
        mutex_lock(&priv->lock);
index 47de466e432c0545aba22d9dc2801d2fd733c7b5..75d02970450321be0e8f8e3bc2b0e0330f17e4af 100644 (file)
@@ -2482,7 +2482,6 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
        struct xdp_desc xdp_desc;
        bool work_done = true;
        u32 tx_set_ic_bit = 0;
-       unsigned long flags;
 
        /* Avoids TX time-out as we are sharing with slow path */
        txq_trans_cond_update(nq);
@@ -2566,9 +2565,9 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
                tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
                entry = tx_q->cur_tx;
        }
-       flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
-       txq_stats->tx_set_ic_bit += tx_set_ic_bit;
-       u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
+       u64_stats_update_begin(&txq_stats->napi_syncp);
+       u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
+       u64_stats_update_end(&txq_stats->napi_syncp);
 
        if (tx_desc) {
                stmmac_flush_tx_descriptors(priv, queue);
@@ -2616,7 +2615,6 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
        unsigned int bytes_compl = 0, pkts_compl = 0;
        unsigned int entry, xmits = 0, count = 0;
        u32 tx_packets = 0, tx_errors = 0;
-       unsigned long flags;
 
        __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
 
@@ -2782,11 +2780,11 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
        if (tx_q->dirty_tx != tx_q->cur_tx)
                *pending_packets = true;
 
-       flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
-       txq_stats->tx_packets += tx_packets;
-       txq_stats->tx_pkt_n += tx_packets;
-       txq_stats->tx_clean++;
-       u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
+       u64_stats_update_begin(&txq_stats->napi_syncp);
+       u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
+       u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
+       u64_stats_inc(&txq_stats->napi.tx_clean);
+       u64_stats_update_end(&txq_stats->napi_syncp);
 
        priv->xstats.tx_errors += tx_errors;
 
@@ -3628,6 +3626,7 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
        /* Request the Wake IRQ in case of another line
         * is used for WoL
         */
+       priv->wol_irq_disabled = true;
        if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
                int_name = priv->int_name_wol;
                sprintf(int_name, "%s:%s", dev->name, "wol");
@@ -3931,6 +3930,9 @@ static int __stmmac_open(struct net_device *dev,
        priv->rx_copybreak = STMMAC_RX_COPYBREAK;
 
        buf_sz = dma_conf->dma_buf_sz;
+       for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
+               if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
+                       dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
        memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
 
        stmmac_reset_queues_param(priv);
@@ -4209,7 +4211,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
        struct stmmac_tx_queue *tx_q;
        bool has_vlan, set_ic;
        u8 proto_hdr_len, hdr;
-       unsigned long flags;
        u32 pay_len, mss;
        dma_addr_t des;
        int i;
@@ -4374,13 +4375,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
                netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
        }
 
-       flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
-       txq_stats->tx_bytes += skb->len;
-       txq_stats->tx_tso_frames++;
-       txq_stats->tx_tso_nfrags += nfrags;
+       u64_stats_update_begin(&txq_stats->q_syncp);
+       u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
+       u64_stats_inc(&txq_stats->q.tx_tso_frames);
+       u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
        if (set_ic)
-               txq_stats->tx_set_ic_bit++;
-       u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
+               u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
+       u64_stats_update_end(&txq_stats->q_syncp);
 
        if (priv->sarc_type)
                stmmac_set_desc_sarc(priv, first, priv->sarc_type);
@@ -4434,6 +4435,28 @@ dma_map_err:
        return NETDEV_TX_OK;
 }
 
+/**
+ * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
+ * @skb: socket buffer to check
+ *
+ * Check if a packet has an ethertype that will trigger the IP header checks
+ * and IP/TCP checksum engine of the stmmac core.
+ *
+ * Return: true if the ethertype can trigger the checksum engine, false
+ * otherwise
+ */
+static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
+{
+       int depth = 0;
+       __be16 proto;
+
+       proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
+                                   &depth);
+
+       return (depth <= ETH_HLEN) &&
+               (proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
+}
+
 /**
  *  stmmac_xmit - Tx entry point of the driver
  *  @skb : the socket buffer
@@ -4457,7 +4480,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
        struct stmmac_tx_queue *tx_q;
        bool has_vlan, set_ic;
        int entry, first_tx;
-       unsigned long flags;
        dma_addr_t des;
 
        tx_q = &priv->dma_conf.tx_queue[queue];
@@ -4498,9 +4520,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
        /* DWMAC IPs can be synthesized to support tx coe only for a few tx
         * queues. In that case, checksum offloading for those queues that don't
         * support tx coe needs to fallback to software checksum calculation.
+        *
+        * Packets that won't trigger the COE e.g. most DSA-tagged packets will
+        * also have to be checksummed in software.
         */
        if (csum_insertion &&
-           priv->plat->tx_queues_cfg[queue].coe_unsupported) {
+           (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
+            !stmmac_has_ip_ethertype(skb))) {
                if (unlikely(skb_checksum_help(skb)))
                        goto dma_map_err;
                csum_insertion = !csum_insertion;
@@ -4623,11 +4649,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
                netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
        }
 
-       flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
-       txq_stats->tx_bytes += skb->len;
+       u64_stats_update_begin(&txq_stats->q_syncp);
+       u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
        if (set_ic)
-               txq_stats->tx_set_ic_bit++;
-       u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
+               u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
+       u64_stats_update_end(&txq_stats->q_syncp);
 
        if (priv->sarc_type)
                stmmac_set_desc_sarc(priv, first, priv->sarc_type);
@@ -4891,12 +4917,11 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
                set_ic = false;
 
        if (set_ic) {
-               unsigned long flags;
                tx_q->tx_count_frames = 0;
                stmmac_set_tx_ic(priv, tx_desc);
-               flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
-               txq_stats->tx_set_ic_bit++;
-               u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
+               u64_stats_update_begin(&txq_stats->q_syncp);
+               u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
+               u64_stats_update_end(&txq_stats->q_syncp);
        }
 
        stmmac_enable_dma_transmission(priv, priv->ioaddr);
@@ -5046,7 +5071,6 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
        unsigned int len = xdp->data_end - xdp->data;
        enum pkt_hash_types hash_type;
        int coe = priv->hw->rx_csum;
-       unsigned long flags;
        struct sk_buff *skb;
        u32 hash;
 
@@ -5065,7 +5089,7 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
                stmmac_rx_vlan(priv->dev, skb);
        skb->protocol = eth_type_trans(skb, priv->dev);
 
-       if (unlikely(!coe))
+       if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
                skb_checksum_none_assert(skb);
        else
                skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -5076,10 +5100,10 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
        skb_record_rx_queue(skb, queue);
        napi_gro_receive(&ch->rxtx_napi, skb);
 
-       flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
-       rxq_stats->rx_pkt_n++;
-       rxq_stats->rx_bytes += len;
-       u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
+       u64_stats_update_begin(&rxq_stats->napi_syncp);
+       u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
+       u64_stats_add(&rxq_stats->napi.rx_bytes, len);
+       u64_stats_update_end(&rxq_stats->napi_syncp);
 }
 
 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
@@ -5161,7 +5185,6 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
        unsigned int desc_size;
        struct bpf_prog *prog;
        bool failure = false;
-       unsigned long flags;
        int xdp_status = 0;
        int status = 0;
 
@@ -5316,9 +5339,9 @@ read_again:
 
        stmmac_finalize_xdp_rx(priv, xdp_status);
 
-       flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
-       rxq_stats->rx_pkt_n += count;
-       u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
+       u64_stats_update_begin(&rxq_stats->napi_syncp);
+       u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
+       u64_stats_update_end(&rxq_stats->napi_syncp);
 
        priv->xstats.rx_dropped += rx_dropped;
        priv->xstats.rx_errors += rx_errors;
@@ -5356,7 +5379,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
        unsigned int desc_size;
        struct sk_buff *skb = NULL;
        struct stmmac_xdp_buff ctx;
-       unsigned long flags;
        int xdp_status = 0;
        int buf_sz;
 
@@ -5588,7 +5610,7 @@ drain_data:
 
                skb->protocol = eth_type_trans(skb, priv->dev);
 
-               if (unlikely(!coe))
+               if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
                        skb_checksum_none_assert(skb);
                else
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -5616,11 +5638,11 @@ drain_data:
 
        stmmac_rx_refill(priv, queue);
 
-       flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
-       rxq_stats->rx_packets += rx_packets;
-       rxq_stats->rx_bytes += rx_bytes;
-       rxq_stats->rx_pkt_n += count;
-       u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
+       u64_stats_update_begin(&rxq_stats->napi_syncp);
+       u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
+       u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
+       u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
+       u64_stats_update_end(&rxq_stats->napi_syncp);
 
        priv->xstats.rx_dropped += rx_dropped;
        priv->xstats.rx_errors += rx_errors;
@@ -5635,13 +5657,12 @@ static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
        struct stmmac_priv *priv = ch->priv_data;
        struct stmmac_rxq_stats *rxq_stats;
        u32 chan = ch->index;
-       unsigned long flags;
        int work_done;
 
        rxq_stats = &priv->xstats.rxq_stats[chan];
-       flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
-       rxq_stats->napi_poll++;
-       u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
+       u64_stats_update_begin(&rxq_stats->napi_syncp);
+       u64_stats_inc(&rxq_stats->napi.poll);
+       u64_stats_update_end(&rxq_stats->napi_syncp);
 
        work_done = stmmac_rx(priv, budget, chan);
        if (work_done < budget && napi_complete_done(napi, work_done)) {
@@ -5663,13 +5684,12 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
        struct stmmac_txq_stats *txq_stats;
        bool pending_packets = false;
        u32 chan = ch->index;
-       unsigned long flags;
        int work_done;
 
        txq_stats = &priv->xstats.txq_stats[chan];
-       flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
-       txq_stats->napi_poll++;
-       u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
+       u64_stats_update_begin(&txq_stats->napi_syncp);
+       u64_stats_inc(&txq_stats->napi.poll);
+       u64_stats_update_end(&txq_stats->napi_syncp);
 
        work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
        work_done = min(work_done, budget);
@@ -5699,17 +5719,16 @@ static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
        struct stmmac_rxq_stats *rxq_stats;
        struct stmmac_txq_stats *txq_stats;
        u32 chan = ch->index;
-       unsigned long flags;
 
        rxq_stats = &priv->xstats.rxq_stats[chan];
-       flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
-       rxq_stats->napi_poll++;
-       u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
+       u64_stats_update_begin(&rxq_stats->napi_syncp);
+       u64_stats_inc(&rxq_stats->napi.poll);
+       u64_stats_update_end(&rxq_stats->napi_syncp);
 
        txq_stats = &priv->xstats.txq_stats[chan];
-       flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
-       txq_stats->napi_poll++;
-       u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
+       u64_stats_update_begin(&txq_stats->napi_syncp);
+       u64_stats_inc(&txq_stats->napi.poll);
+       u64_stats_update_end(&txq_stats->napi_syncp);
 
        tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
        tx_done = min(tx_done, budget);
@@ -7035,10 +7054,13 @@ static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64
                u64 tx_bytes;
 
                do {
-                       start = u64_stats_fetch_begin(&txq_stats->syncp);
-                       tx_packets = txq_stats->tx_packets;
-                       tx_bytes   = txq_stats->tx_bytes;
-               } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
+                       start = u64_stats_fetch_begin(&txq_stats->q_syncp);
+                       tx_bytes   = u64_stats_read(&txq_stats->q.tx_bytes);
+               } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
+               do {
+                       start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
+                       tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
+               } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
 
                stats->tx_packets += tx_packets;
                stats->tx_bytes += tx_bytes;
@@ -7050,10 +7072,10 @@ static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64
                u64 rx_bytes;
 
                do {
-                       start = u64_stats_fetch_begin(&rxq_stats->syncp);
-                       rx_packets = rxq_stats->rx_packets;
-                       rx_bytes   = rxq_stats->rx_bytes;
-               } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
+                       start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
+                       rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
+                       rx_bytes   = u64_stats_read(&rxq_stats->napi.rx_bytes);
+               } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
 
                stats->rx_packets += rx_packets;
                stats->rx_bytes += rx_bytes;
@@ -7447,9 +7469,16 @@ int stmmac_dvr_probe(struct device *device,
        priv->dev = ndev;
 
        for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
-               u64_stats_init(&priv->xstats.rxq_stats[i].syncp);
-       for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
-               u64_stats_init(&priv->xstats.txq_stats[i].syncp);
+               u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
+       for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
+               u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
+               u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
+       }
+
+       priv->xstats.pcpu_stats =
+               devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
+       if (!priv->xstats.pcpu_stats)
+               return -ENOMEM;
 
        stmmac_set_ethtool_ops(ndev);
        priv->pause = pause;
@@ -7515,6 +7544,9 @@ int stmmac_dvr_probe(struct device *device,
                dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
                        ERR_PTR(ret));
 
+       /* Wait a bit for the reset to take effect */
+       udelay(10);
+
        /* Init MAC and get the capabilities */
        ret = stmmac_hw_init(priv);
        if (ret)
index faa0561e988ecb1e8d866f2c9e9b27b109e474d0..9d2f4ac783e43502586b27283a4db73351ca0583 100644 (file)
@@ -56,7 +56,7 @@
 #define AM65_CPSW_MAX_PORTS    8
 
 #define AM65_CPSW_MIN_PACKET_SIZE      VLAN_ETH_ZLEN
-#define AM65_CPSW_MAX_PACKET_SIZE      (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
+#define AM65_CPSW_MAX_PACKET_SIZE      2024
 
 #define AM65_CPSW_REG_CTL              0x004
 #define AM65_CPSW_REG_STAT_PORT_EN     0x014
@@ -2244,7 +2244,8 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
        eth_hw_addr_set(port->ndev, port->slave.mac_addr);
 
        port->ndev->min_mtu = AM65_CPSW_MIN_PACKET_SIZE;
-       port->ndev->max_mtu = AM65_CPSW_MAX_PACKET_SIZE;
+       port->ndev->max_mtu = AM65_CPSW_MAX_PACKET_SIZE -
+                             (VLAN_ETH_HLEN + ETH_FCS_LEN);
        port->ndev->hw_features = NETIF_F_SG |
                                  NETIF_F_RXCSUM |
                                  NETIF_F_HW_CSUM |
index ea85c6dd5484617a038e565312e8ad0ccdce6c75..c0a5abd8d9a8e6e0d113c36a9557a1de1c360993 100644 (file)
@@ -631,6 +631,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
                }
        }
 
+       phy->mac_managed_pm = true;
+
        slave->phy = phy;
 
        phy_attached_info(slave->phy);
index 498c50c6d1a701b86596b9148dbdc4523176cee7..087dcb67505a2da5995963d5d67d36dadb580a47 100644 (file)
@@ -773,6 +773,9 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
                        slave->slave_num);
                return;
        }
+
+       phy->mac_managed_pm = true;
+
        slave->phy = phy;
 
        phy_attached_info(slave->phy);
index 23cd610bd3766c2b2785e94f1faa9da54990ec00..85cdbdd44fec70d1b20e348c38368f047107eec6 100644 (file)
@@ -26,7 +26,7 @@ config NGBE
        tristate "Wangxun(R) GbE PCI Express adapters support"
        depends on PCI
        select LIBWX
-       select PHYLIB
+       select PHYLINK
        help
          This driver supports Wangxun(R) GbE PCI Express family of
          adapters.
index 23355cc408fd7b0bd6ad982ba6d6a069cf91d302..8706223a6e5aa9ceff3fa0b5076400bcaab06cd3 100644 (file)
@@ -2769,4 +2769,5 @@ void wx_set_ring(struct wx *wx, u32 new_tx_count,
 }
 EXPORT_SYMBOL(wx_set_ring);
 
+MODULE_DESCRIPTION("Common library for Wangxun(R) Ethernet drivers.");
 MODULE_LICENSE("GPL");
index 704e949484d0c1684247302fb29f45b7ffa3b3e1..b9b5554ea8620ed7249fbdc8870779c6ad658f1b 100644 (file)
@@ -221,21 +221,25 @@ static int fjes_hw_setup(struct fjes_hw *hw)
 
        mem_size = FJES_DEV_REQ_BUF_SIZE(hw->max_epid);
        hw->hw_info.req_buf = kzalloc(mem_size, GFP_KERNEL);
-       if (!(hw->hw_info.req_buf))
-               return -ENOMEM;
+       if (!(hw->hw_info.req_buf)) {
+               result = -ENOMEM;
+               goto free_ep_info;
+       }
 
        hw->hw_info.req_buf_size = mem_size;
 
        mem_size = FJES_DEV_RES_BUF_SIZE(hw->max_epid);
        hw->hw_info.res_buf = kzalloc(mem_size, GFP_KERNEL);
-       if (!(hw->hw_info.res_buf))
-               return -ENOMEM;
+       if (!(hw->hw_info.res_buf)) {
+               result = -ENOMEM;
+               goto free_req_buf;
+       }
 
        hw->hw_info.res_buf_size = mem_size;
 
        result = fjes_hw_alloc_shared_status_region(hw);
        if (result)
-               return result;
+               goto free_res_buf;
 
        hw->hw_info.buffer_share_bit = 0;
        hw->hw_info.buffer_unshare_reserve_bit = 0;
@@ -246,11 +250,11 @@ static int fjes_hw_setup(struct fjes_hw *hw)
 
                        result = fjes_hw_alloc_epbuf(&buf_pair->tx);
                        if (result)
-                               return result;
+                               goto free_epbuf;
 
                        result = fjes_hw_alloc_epbuf(&buf_pair->rx);
                        if (result)
-                               return result;
+                               goto free_epbuf;
 
                        spin_lock_irqsave(&hw->rx_status_lock, flags);
                        fjes_hw_setup_epbuf(&buf_pair->tx, mac,
@@ -273,6 +277,25 @@ static int fjes_hw_setup(struct fjes_hw *hw)
        fjes_hw_init_command_registers(hw, &param);
 
        return 0;
+
+free_epbuf:
+       for (epidx = 0; epidx < hw->max_epid ; epidx++) {
+               if (epidx == hw->my_epid)
+                       continue;
+               fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].tx);
+               fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].rx);
+       }
+       fjes_hw_free_shared_status_region(hw);
+free_res_buf:
+       kfree(hw->hw_info.res_buf);
+       hw->hw_info.res_buf = NULL;
+free_req_buf:
+       kfree(hw->hw_info.req_buf);
+       hw->hw_info.req_buf = NULL;
+free_ep_info:
+       kfree(hw->ep_shm_info);
+       hw->ep_shm_info = NULL;
+       return result;
 }
 
 static void fjes_hw_cleanup(struct fjes_hw *hw)
index 1dafa44155d0eb31dfaea9cacdc3954ebee75f4b..a6fcbda64ecc60e5beccf20f2043ab00870cbd5d 100644 (file)
@@ -708,7 +708,10 @@ void netvsc_device_remove(struct hv_device *device)
        /* Disable NAPI and disassociate its context from the device. */
        for (i = 0; i < net_device->num_chn; i++) {
                /* See also vmbus_reset_channel_cb(). */
-               napi_disable(&net_device->chan_table[i].napi);
+               /* only disable enabled NAPI channel */
+               if (i < ndev->real_num_rx_queues)
+                       napi_disable(&net_device->chan_table[i].napi);
+
                netif_napi_del(&net_device->chan_table[i].napi);
        }
 
index 4406427d4617d58d300be5c46a368df5223d2219..11831a1c97623985401317e690b66f6985abb750 100644 (file)
 #define LINKCHANGE_INT (2 * HZ)
 #define VF_TAKEOVER_INT (HZ / 10)
 
+/* Macros to define the context of vf registration */
+#define VF_REG_IN_PROBE                1
+#define VF_REG_IN_NOTIFIER     2
+
 static unsigned int ring_size __ro_after_init = 128;
 module_param(ring_size, uint, 0444);
-MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
+MODULE_PARM_DESC(ring_size, "Ring buffer size (# of 4K pages)");
 unsigned int netvsc_ring_bytes __ro_after_init;
 
 static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
@@ -2185,7 +2189,7 @@ static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb)
 }
 
 static int netvsc_vf_join(struct net_device *vf_netdev,
-                         struct net_device *ndev)
+                         struct net_device *ndev, int context)
 {
        struct net_device_context *ndev_ctx = netdev_priv(ndev);
        int ret;
@@ -2208,7 +2212,11 @@ static int netvsc_vf_join(struct net_device *vf_netdev,
                goto upper_link_failed;
        }
 
-       schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
+       /* If this registration is called from probe context vf_takeover
+        * is taken care of later in probe itself.
+        */
+       if (context == VF_REG_IN_NOTIFIER)
+               schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
 
        call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
 
@@ -2346,7 +2354,7 @@ static int netvsc_prepare_bonding(struct net_device *vf_netdev)
        return NOTIFY_DONE;
 }
 
-static int netvsc_register_vf(struct net_device *vf_netdev)
+static int netvsc_register_vf(struct net_device *vf_netdev, int context)
 {
        struct net_device_context *net_device_ctx;
        struct netvsc_device *netvsc_dev;
@@ -2386,7 +2394,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
 
        netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
 
-       if (netvsc_vf_join(vf_netdev, ndev) != 0)
+       if (netvsc_vf_join(vf_netdev, ndev, context) != 0)
                return NOTIFY_DONE;
 
        dev_hold(vf_netdev);
@@ -2484,10 +2492,31 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev)
        return NOTIFY_OK;
 }
 
+static int check_dev_is_matching_vf(struct net_device *event_ndev)
+{
+       /* Skip NetVSC interfaces */
+       if (event_ndev->netdev_ops == &device_ops)
+               return -ENODEV;
+
+       /* Avoid non-Ethernet type devices */
+       if (event_ndev->type != ARPHRD_ETHER)
+               return -ENODEV;
+
+       /* Avoid Vlan dev with same MAC registering as VF */
+       if (is_vlan_dev(event_ndev))
+               return -ENODEV;
+
+       /* Avoid Bonding master dev with same MAC registering as VF */
+       if (netif_is_bond_master(event_ndev))
+               return -ENODEV;
+
+       return 0;
+}
+
 static int netvsc_probe(struct hv_device *dev,
                        const struct hv_vmbus_device_id *dev_id)
 {
-       struct net_device *net = NULL;
+       struct net_device *net = NULL, *vf_netdev;
        struct net_device_context *net_device_ctx;
        struct netvsc_device_info *device_info = NULL;
        struct netvsc_device *nvdev;
@@ -2599,6 +2628,30 @@ static int netvsc_probe(struct hv_device *dev,
        }
 
        list_add(&net_device_ctx->list, &netvsc_dev_list);
+
+       /* When the hv_netvsc driver is unloaded and reloaded, the
+        * NET_DEVICE_REGISTER for the vf device is replayed before probe
+        * is complete. This is because register_netdevice_notifier() gets
+        * registered before vmbus_driver_register() so that callback func
+        * is set before probe and we don't miss events like NETDEV_POST_INIT
+        * So, in this section we try to register the matching vf device that
+        * is present as a netdevice, knowing that its register call is not
+        * processed in the netvsc_netdev_notifier(as probing is progress and
+        * get_netvsc_byslot fails).
+        */
+       for_each_netdev(dev_net(net), vf_netdev) {
+               ret = check_dev_is_matching_vf(vf_netdev);
+               if (ret != 0)
+                       continue;
+
+               if (net != get_netvsc_byslot(vf_netdev))
+                       continue;
+
+               netvsc_prepare_bonding(vf_netdev);
+               netvsc_register_vf(vf_netdev, VF_REG_IN_PROBE);
+               __netvsc_vf_setup(net, vf_netdev);
+               break;
+       }
        rtnl_unlock();
 
        netvsc_devinfo_put(device_info);
@@ -2754,28 +2807,17 @@ static int netvsc_netdev_event(struct notifier_block *this,
                               unsigned long event, void *ptr)
 {
        struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
+       int ret = 0;
 
-       /* Skip our own events */
-       if (event_dev->netdev_ops == &device_ops)
-               return NOTIFY_DONE;
-
-       /* Avoid non-Ethernet type devices */
-       if (event_dev->type != ARPHRD_ETHER)
-               return NOTIFY_DONE;
-
-       /* Avoid Vlan dev with same MAC registering as VF */
-       if (is_vlan_dev(event_dev))
-               return NOTIFY_DONE;
-
-       /* Avoid Bonding master dev with same MAC registering as VF */
-       if (netif_is_bond_master(event_dev))
+       ret = check_dev_is_matching_vf(event_dev);
+       if (ret != 0)
                return NOTIFY_DONE;
 
        switch (event) {
        case NETDEV_POST_INIT:
                return netvsc_prepare_bonding(event_dev);
        case NETDEV_REGISTER:
-               return netvsc_register_vf(event_dev);
+               return netvsc_register_vf(event_dev, VF_REG_IN_NOTIFIER);
        case NETDEV_UNREGISTER:
                return netvsc_unregister_vf(event_dev);
        case NETDEV_UP:
@@ -2807,7 +2849,7 @@ static int __init netvsc_drv_init(void)
                pr_info("Increased ring_size to %u (min allowed)\n",
                        ring_size);
        }
-       netvsc_ring_bytes = ring_size * PAGE_SIZE;
+       netvsc_ring_bytes = VMBUS_RING_SIZE(ring_size * 4096);
 
        register_netdevice_notifier(&netvsc_netdev_notifier);
 
index e34816638569e4e11d7a554a7f0fdc1fe6cb07b9..7f5426285c61b1e35afd74d4c044f80c77f34e7f 100644 (file)
@@ -607,11 +607,26 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
                return ERR_PTR(-EINVAL);
        }
 
-       ret = skb_ensure_writable_head_tail(skb, dev);
-       if (unlikely(ret < 0)) {
-               macsec_txsa_put(tx_sa);
-               kfree_skb(skb);
-               return ERR_PTR(ret);
+       if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM ||
+                    skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) {
+               struct sk_buff *nskb = skb_copy_expand(skb,
+                                                      MACSEC_NEEDED_HEADROOM,
+                                                      MACSEC_NEEDED_TAILROOM,
+                                                      GFP_ATOMIC);
+               if (likely(nskb)) {
+                       consume_skb(skb);
+                       skb = nskb;
+               } else {
+                       macsec_txsa_put(tx_sa);
+                       kfree_skb(skb);
+                       return ERR_PTR(-ENOMEM);
+               }
+       } else {
+               skb = skb_unshare(skb, GFP_ATOMIC);
+               if (!skb) {
+                       macsec_txsa_put(tx_sa);
+                       return ERR_PTR(-ENOMEM);
+               }
        }
 
        unprotected_len = skb->len;
index b4d3b9cde8bd685202f135cf9c845d1be76ef428..92a7a36b93ac0cc1b02a551b974fb390254ac484 100644 (file)
@@ -835,14 +835,14 @@ static void nsim_dev_trap_report_work(struct work_struct *work)
                                      trap_report_dw.work);
        nsim_dev = nsim_trap_data->nsim_dev;
 
-       /* For each running port and enabled packet trap, generate a UDP
-        * packet with a random 5-tuple and report it.
-        */
        if (!devl_trylock(priv_to_devlink(nsim_dev))) {
-               schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw, 0);
+               schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw, 1);
                return;
        }
 
+       /* For each running port and enabled packet trap, generate a UDP
+        * packet with a random 5-tuple and report it.
+        */
        list_for_each_entry(nsim_dev_port, &nsim_dev->port_list, list) {
                if (!netif_running(nsim_dev_port->ns->netdev))
                        continue;
index aecaf5f44374f0a7eb92eb3db46f5a0ec24c5ee9..77e8250282a512ee5cf2f05d9bed4b604e5c210b 100644 (file)
@@ -369,6 +369,12 @@ static int nsim_init_netdevsim_vf(struct netdevsim *ns)
        return err;
 }
 
+static void nsim_exit_netdevsim(struct netdevsim *ns)
+{
+       nsim_udp_tunnels_info_destroy(ns->netdev);
+       mock_phc_destroy(ns->phc);
+}
+
 struct netdevsim *
 nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port)
 {
@@ -417,8 +423,7 @@ void nsim_destroy(struct netdevsim *ns)
        }
        rtnl_unlock();
        if (nsim_dev_port_is_pf(ns->nsim_dev_port))
-               nsim_udp_tunnels_info_destroy(dev);
-       mock_phc_destroy(ns->phc);
+               nsim_exit_netdevsim(ns);
        free_netdev(dev);
 }
 
index 8a20d9889f105bc609f56a2632132e0ef2c08504..0f3a1538a8b8ee045953a3c5ff308dc824ea7c0a 100644 (file)
@@ -489,7 +489,7 @@ static int tx_r50_fill_result(struct phy_device *phydev, u16 tx_r50_cal_val,
        u16 reg, val;
 
        if (phydev->drv->phy_id == MTK_GPHY_ID_MT7988)
-               bias = -2;
+               bias = -1;
 
        val = clamp_val(bias + tx_r50_cal_val, 0, 63);
 
@@ -705,6 +705,11 @@ restore:
 static void mt798x_phy_common_finetune(struct phy_device *phydev)
 {
        phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5);
+       /* SlvDSPreadyTime = 24, MasDSPreadyTime = 24 */
+       __phy_write(phydev, 0x11, 0xc71);
+       __phy_write(phydev, 0x12, 0xc);
+       __phy_write(phydev, 0x10, 0x8fae);
+
        /* EnabRandUpdTrig = 1 */
        __phy_write(phydev, 0x11, 0x2f00);
        __phy_write(phydev, 0x12, 0xe);
@@ -715,15 +720,56 @@ static void mt798x_phy_common_finetune(struct phy_device *phydev)
        __phy_write(phydev, 0x12, 0x0);
        __phy_write(phydev, 0x10, 0x83aa);
 
-       /* TrFreeze = 0 */
+       /* FfeUpdGainForce = 1(Enable), FfeUpdGainForceVal = 4 */
+       __phy_write(phydev, 0x11, 0x240);
+       __phy_write(phydev, 0x12, 0x0);
+       __phy_write(phydev, 0x10, 0x9680);
+
+       /* TrFreeze = 0 (mt7988 default) */
        __phy_write(phydev, 0x11, 0x0);
        __phy_write(phydev, 0x12, 0x0);
        __phy_write(phydev, 0x10, 0x9686);
 
+       /* SSTrKp100 = 5 */
+       /* SSTrKf100 = 6 */
+       /* SSTrKp1000Mas = 5 */
+       /* SSTrKf1000Mas = 6 */
        /* SSTrKp1000Slv = 5 */
+       /* SSTrKf1000Slv = 6 */
        __phy_write(phydev, 0x11, 0xbaef);
        __phy_write(phydev, 0x12, 0x2e);
        __phy_write(phydev, 0x10, 0x968c);
+       phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
+}
+
+static void mt7981_phy_finetune(struct phy_device *phydev)
+{
+       u16 val[8] = { 0x01ce, 0x01c1,
+                      0x020f, 0x0202,
+                      0x03d0, 0x03c0,
+                      0x0013, 0x0005 };
+       int i, k;
+
+       /* 100M eye finetune:
+        * Keep middle level of TX MLT3 shapper as default.
+        * Only change TX MLT3 overshoot level here.
+        */
+       for (k = 0, i = 1; i < 12; i++) {
+               if (i % 3 == 0)
+                       continue;
+               phy_write_mmd(phydev, MDIO_MMD_VEND1, i, val[k++]);
+       }
+
+       phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5);
+       /* ResetSyncOffset = 6 */
+       __phy_write(phydev, 0x11, 0x600);
+       __phy_write(phydev, 0x12, 0x0);
+       __phy_write(phydev, 0x10, 0x8fc0);
+
+       /* VgaDecRate = 1 */
+       __phy_write(phydev, 0x11, 0x4c2a);
+       __phy_write(phydev, 0x12, 0x3e);
+       __phy_write(phydev, 0x10, 0x8fa4);
 
        /* MrvlTrFix100Kp = 3, MrvlTrFix100Kf = 2,
         * MrvlTrFix1000Kp = 3, MrvlTrFix1000Kf = 2
@@ -738,7 +784,7 @@ static void mt798x_phy_common_finetune(struct phy_device *phydev)
        __phy_write(phydev, 0x10, 0x8ec0);
        phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
 
-       /* TR_OPEN_LOOP_EN = 1, lpf_x_average = 9*/
+       /* TR_OPEN_LOOP_EN = 1, lpf_x_average = 9 */
        phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_DEV1E_REG234,
                       MTK_PHY_TR_OPEN_LOOP_EN_MASK | MTK_PHY_LPF_X_AVERAGE_MASK,
                       BIT(0) | FIELD_PREP(MTK_PHY_LPF_X_AVERAGE_MASK, 0x9));
@@ -771,48 +817,6 @@ static void mt798x_phy_common_finetune(struct phy_device *phydev)
        phy_write_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_LDO_OUTPUT_V, 0x2222);
 }
 
-static void mt7981_phy_finetune(struct phy_device *phydev)
-{
-       u16 val[8] = { 0x01ce, 0x01c1,
-                      0x020f, 0x0202,
-                      0x03d0, 0x03c0,
-                      0x0013, 0x0005 };
-       int i, k;
-
-       /* 100M eye finetune:
-        * Keep middle level of TX MLT3 shapper as default.
-        * Only change TX MLT3 overshoot level here.
-        */
-       for (k = 0, i = 1; i < 12; i++) {
-               if (i % 3 == 0)
-                       continue;
-               phy_write_mmd(phydev, MDIO_MMD_VEND1, i, val[k++]);
-       }
-
-       phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5);
-       /* SlvDSPreadyTime = 24, MasDSPreadyTime = 24 */
-       __phy_write(phydev, 0x11, 0xc71);
-       __phy_write(phydev, 0x12, 0xc);
-       __phy_write(phydev, 0x10, 0x8fae);
-
-       /* ResetSyncOffset = 6 */
-       __phy_write(phydev, 0x11, 0x600);
-       __phy_write(phydev, 0x12, 0x0);
-       __phy_write(phydev, 0x10, 0x8fc0);
-
-       /* VgaDecRate = 1 */
-       __phy_write(phydev, 0x11, 0x4c2a);
-       __phy_write(phydev, 0x12, 0x3e);
-       __phy_write(phydev, 0x10, 0x8fa4);
-
-       /* FfeUpdGainForce = 4 */
-       __phy_write(phydev, 0x11, 0x240);
-       __phy_write(phydev, 0x12, 0x0);
-       __phy_write(phydev, 0x10, 0x9680);
-
-       phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
-}
-
 static void mt7988_phy_finetune(struct phy_device *phydev)
 {
        u16 val[12] = { 0x0187, 0x01cd, 0x01c8, 0x0182,
@@ -827,17 +831,7 @@ static void mt7988_phy_finetune(struct phy_device *phydev)
        /* TCT finetune */
        phy_write_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_TX_FILTER, 0x5);
 
-       /* Disable TX power saving */
-       phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RXADC_CTRL_RG7,
-                      MTK_PHY_DA_AD_BUF_BIAS_LP_MASK, 0x3 << 8);
-
        phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5);
-
-       /* SlvDSPreadyTime = 24, MasDSPreadyTime = 12 */
-       __phy_write(phydev, 0x11, 0x671);
-       __phy_write(phydev, 0x12, 0xc);
-       __phy_write(phydev, 0x10, 0x8fae);
-
        /* ResetSyncOffset = 5 */
        __phy_write(phydev, 0x11, 0x500);
        __phy_write(phydev, 0x12, 0x0);
@@ -845,13 +839,27 @@ static void mt7988_phy_finetune(struct phy_device *phydev)
 
        /* VgaDecRate is 1 at default on mt7988 */
 
-       phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
+       /* MrvlTrFix100Kp = 6, MrvlTrFix100Kf = 7,
+        * MrvlTrFix1000Kp = 6, MrvlTrFix1000Kf = 7
+        */
+       __phy_write(phydev, 0x11, 0xb90a);
+       __phy_write(phydev, 0x12, 0x6f);
+       __phy_write(phydev, 0x10, 0x8f82);
+
+       /* RemAckCntLimitCtrl = 1 */
+       __phy_write(phydev, 0x11, 0xfbba);
+       __phy_write(phydev, 0x12, 0xc3);
+       __phy_write(phydev, 0x10, 0x87f8);
 
-       phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_2A30);
-       /* TxClkOffset = 2 */
-       __phy_modify(phydev, MTK_PHY_ANARG_RG, MTK_PHY_TCLKOFFSET_MASK,
-                    FIELD_PREP(MTK_PHY_TCLKOFFSET_MASK, 0x2));
        phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
+
+       /* TR_OPEN_LOOP_EN = 1, lpf_x_average = 10 */
+       phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_DEV1E_REG234,
+                      MTK_PHY_TR_OPEN_LOOP_EN_MASK | MTK_PHY_LPF_X_AVERAGE_MASK,
+                      BIT(0) | FIELD_PREP(MTK_PHY_LPF_X_AVERAGE_MASK, 0xa));
+
+       /* rg_tr_lpf_cnt_val = 1023 */
+       phy_write_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_LPF_CNT_VAL, 0x3ff);
 }
 
 static void mt798x_phy_eee(struct phy_device *phydev)
@@ -884,11 +892,11 @@ static void mt798x_phy_eee(struct phy_device *phydev)
                       MTK_PHY_LPI_SLV_SEND_TX_EN,
                       FIELD_PREP(MTK_PHY_LPI_SLV_SEND_TX_TIMER_MASK, 0x120));
 
-       phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_DEV1E_REG239,
-                      MTK_PHY_LPI_SEND_LOC_TIMER_MASK |
-                      MTK_PHY_LPI_TXPCS_LOC_RCV,
-                      FIELD_PREP(MTK_PHY_LPI_SEND_LOC_TIMER_MASK, 0x117));
+       /* Keep MTK_PHY_LPI_SEND_LOC_TIMER as 375 */
+       phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_DEV1E_REG239,
+                          MTK_PHY_LPI_TXPCS_LOC_RCV);
 
+       /* This also fixes some IoT issues, such as CH340 */
        phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_DEV1E_REG2C7,
                       MTK_PHY_MAX_GAIN_MASK | MTK_PHY_MIN_GAIN_MASK,
                       FIELD_PREP(MTK_PHY_MAX_GAIN_MASK, 0x8) |
@@ -922,7 +930,7 @@ static void mt798x_phy_eee(struct phy_device *phydev)
        __phy_write(phydev, 0x12, 0x0);
        __phy_write(phydev, 0x10, 0x9690);
 
-       /* REG_EEE_st2TrKf1000 = 3 */
+       /* REG_EEE_st2TrKf1000 = 2 */
        __phy_write(phydev, 0x11, 0x114f);
        __phy_write(phydev, 0x12, 0x2);
        __phy_write(phydev, 0x10, 0x969a);
@@ -947,7 +955,7 @@ static void mt798x_phy_eee(struct phy_device *phydev)
        __phy_write(phydev, 0x12, 0x0);
        __phy_write(phydev, 0x10, 0x96b8);
 
-       /* REGEEE_wake_slv_tr_wait_dfesigdet_en = 1 */
+       /* REGEEE_wake_slv_tr_wait_dfesigdet_en = 0 */
        __phy_write(phydev, 0x11, 0x1463);
        __phy_write(phydev, 0x12, 0x0);
        __phy_write(phydev, 0x10, 0x96ca);
@@ -1459,6 +1467,13 @@ static int mt7988_phy_probe(struct phy_device *phydev)
        if (err)
                return err;
 
+       /* Disable TX power saving at probing to:
+        * 1. Meet common mode compliance test criteria
+        * 2. Make sure that TX-VCM calibration works fine
+        */
+       phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RXADC_CTRL_RG7,
+                      MTK_PHY_DA_AD_BUF_BIAS_LP_MASK, 0x3 << 8);
+
        return mt798x_phy_calibration(phydev);
 }
 
index d2aa3d0695e3367410f0b61bbe44e7b849e36d2c..dad720138baafc57f3b7efb9afd36d82ec5a1b83 100644 (file)
  */
 #define LAN8814_1PPM_FORMAT                    17179
 
+#define PTP_RX_VERSION                         0x0248
+#define PTP_TX_VERSION                         0x0288
+#define PTP_MAX_VERSION(x)                     (((x) & GENMASK(7, 0)) << 8)
+#define PTP_MIN_VERSION(x)                     ((x) & GENMASK(7, 0))
+
 #define PTP_RX_MOD                             0x024F
 #define PTP_RX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_ BIT(3)
 #define PTP_RX_TIMESTAMP_EN                    0x024D
@@ -3150,6 +3155,12 @@ static void lan8814_ptp_init(struct phy_device *phydev)
        lanphy_write_page_reg(phydev, 5, PTP_TX_PARSE_IP_ADDR_EN, 0);
        lanphy_write_page_reg(phydev, 5, PTP_RX_PARSE_IP_ADDR_EN, 0);
 
+       /* Disable checking for minorVersionPTP field */
+       lanphy_write_page_reg(phydev, 5, PTP_RX_VERSION,
+                             PTP_MAX_VERSION(0xff) | PTP_MIN_VERSION(0x0));
+       lanphy_write_page_reg(phydev, 5, PTP_TX_VERSION,
+                             PTP_MAX_VERSION(0xff) | PTP_MIN_VERSION(0x0));
+
        skb_queue_head_init(&ptp_priv->tx_queue);
        skb_queue_head_init(&ptp_priv->rx_queue);
        INIT_LIST_HEAD(&ptp_priv->rx_ts_list);
@@ -3338,8 +3349,10 @@ static int lan8814_probe(struct phy_device *phydev)
 #define LAN8841_ADC_CHANNEL_MASK               198
 #define LAN8841_PTP_RX_PARSE_L2_ADDR_EN                370
 #define LAN8841_PTP_RX_PARSE_IP_ADDR_EN                371
+#define LAN8841_PTP_RX_VERSION                 374
 #define LAN8841_PTP_TX_PARSE_L2_ADDR_EN                434
 #define LAN8841_PTP_TX_PARSE_IP_ADDR_EN                435
+#define LAN8841_PTP_TX_VERSION                 438
 #define LAN8841_PTP_CMD_CTL                    256
 #define LAN8841_PTP_CMD_CTL_PTP_ENABLE         BIT(2)
 #define LAN8841_PTP_CMD_CTL_PTP_DISABLE                BIT(1)
@@ -3383,6 +3396,12 @@ static int lan8841_config_init(struct phy_device *phydev)
        phy_write_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG,
                      LAN8841_PTP_RX_PARSE_IP_ADDR_EN, 0);
 
+       /* Disable checking for minorVersionPTP field */
+       phy_write_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG,
+                     LAN8841_PTP_RX_VERSION, 0xff00);
+       phy_write_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG,
+                     LAN8841_PTP_TX_VERSION, 0xff00);
+
        /* 100BT Clause 40 improvenent errata */
        phy_write_mmd(phydev, LAN8841_MMD_ANALOG_REG,
                      LAN8841_ANALOG_CONTROL_1,
@@ -4839,6 +4858,7 @@ static struct phy_driver ksphy_driver[] = {
        .flags          = PHY_POLL_CABLE_TEST,
        .driver_data    = &ksz9131_type,
        .probe          = kszphy_probe,
+       .soft_reset     = genphy_soft_reset,
        .config_init    = ksz9131_config_init,
        .config_intr    = kszphy_config_intr,
        .config_aneg    = ksz9131_config_aneg,
index 6fa679b36290ed528d942b8f3450217ec4a0b1bf..db39dec7f2471c6205db6c0158a229417d255876 100644 (file)
@@ -151,10 +151,6 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
        unsigned int br_min, br_nom, br_max;
        __ETHTOOL_DECLARE_LINK_MODE_MASK(modes) = { 0, };
 
-       phylink_set(modes, Autoneg);
-       phylink_set(modes, Pause);
-       phylink_set(modes, Asym_Pause);
-
        /* Decode the bitrate information to MBd */
        br_min = br_nom = br_max = 0;
        if (id->base.br_nominal) {
@@ -339,6 +335,10 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
                }
        }
 
+       phylink_set(modes, Autoneg);
+       phylink_set(modes, Pause);
+       phylink_set(modes, Asym_Pause);
+
        if (bus->sfp_quirk && bus->sfp_quirk->modes)
                bus->sfp_quirk->modes(id, modes, interfaces);
 
index 840da924708b393b16a82ab4e07746538214c0f9..125793d8aefa77fd961a708f9f7c689d5644e5c0 100644 (file)
@@ -460,6 +460,10 @@ ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg)
        case PPPIOCSMRU:
                if (get_user(val, p))
                        break;
+               if (val > U16_MAX) {
+                       err = -EINVAL;
+                       break;
+               }
                if (val < PPP_MRU)
                        val = PPP_MRU;
                ap->mru = val;
index ba93bab948e09fcea7f7fed364ca244b219d79d5..18df7ca6619814681adcb879ebb790e5cbaea959 100644 (file)
@@ -752,4 +752,5 @@ EXPORT_SYMBOL(slhc_compress);
 EXPORT_SYMBOL(slhc_uncompress);
 EXPORT_SYMBOL(slhc_toss);
 
+MODULE_DESCRIPTION("Compression helpers for SLIP (serial line)");
 MODULE_LICENSE("Dual BSD/GPL");
index e4280e37fec97fed5e0a793fd4be45f53b7e8bd8..0aba3569ccc0d4a19baa9b5b1f05cb03d47b20b9 100644 (file)
@@ -1437,5 +1437,6 @@ out:
 }
 
 #endif
+MODULE_DESCRIPTION("SLIP (serial line) protocol module");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_LDISC(N_SLIP);
index afa5497f7c35c3ab5682e66440afc8a888d14414..4a4f8c8e79fa12dc84a8c83cefbf964dd40e1aa2 100644 (file)
@@ -1630,13 +1630,19 @@ static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog,
        switch (act) {
        case XDP_REDIRECT:
                err = xdp_do_redirect(tun->dev, xdp, xdp_prog);
-               if (err)
+               if (err) {
+                       dev_core_stats_rx_dropped_inc(tun->dev);
                        return err;
+               }
+               dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data);
                break;
        case XDP_TX:
                err = tun_xdp_tx(tun->dev, xdp);
-               if (err < 0)
+               if (err < 0) {
+                       dev_core_stats_rx_dropped_inc(tun->dev);
                        return err;
+               }
+               dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data);
                break;
        case XDP_PASS:
                break;
index 83b8452220ec16a13ae977997b72a16fa82d6202..f088ea2ba6f390329ef84b18cd148cc951ccd70d 100644 (file)
@@ -363,7 +363,6 @@ static int disable_net;
 /* driver info */
 static const char driver_name[] = "hso";
 static const char tty_filename[] = "ttyHS";
-static const char *version = __FILE__ ": " MOD_AUTHOR;
 /* the usb driver itself (registered in hso_init) */
 static struct usb_driver hso_driver;
 /* serial structures */
@@ -3228,16 +3227,8 @@ static struct usb_driver hso_driver = {
 
 static int __init hso_init(void)
 {
-       int i;
        int result;
 
-       /* put it in the log */
-       pr_info("%s\n", version);
-
-       /* Initialise the serial table semaphore and table */
-       for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++)
-               serial_table[i] = NULL;
-
        /* allocate our driver using the proper amount of supported minors */
        tty_drv = tty_alloc_driver(HSO_SERIAL_TTY_MINORS, TTY_DRIVER_REAL_RAW |
                        TTY_DRIVER_DYNAMIC_DEV);
@@ -3285,8 +3276,6 @@ err_free_tty:
 
 static void __exit hso_exit(void)
 {
-       pr_info("unloaded\n");
-
        tty_unregister_driver(tty_drv);
        /* deregister the usb driver */
        usb_deregister(&hso_driver);
index 9bf2140fd0a1f4dc97a0553854c83cca184d574b..0d0672d2a65457748a77f1a39e5507773a33c2fb 100644 (file)
@@ -10069,7 +10069,7 @@ static struct usb_driver rtl8152_driver = {
        .disable_hub_initiated_lpm = 1,
 };
 
-static int rtl8152_cfgselector_probe(struct usb_device *udev)
+static int rtl8152_cfgselector_choose_configuration(struct usb_device *udev)
 {
        struct usb_host_config *c;
        int i, num_configs;
@@ -10096,19 +10096,13 @@ static int rtl8152_cfgselector_probe(struct usb_device *udev)
        if (i == num_configs)
                return -ENODEV;
 
-       if (usb_set_configuration(udev, c->desc.bConfigurationValue)) {
-               dev_err(&udev->dev, "Failed to set configuration %d\n",
-                       c->desc.bConfigurationValue);
-               return -ENODEV;
-       }
-
-       return 0;
+       return c->desc.bConfigurationValue;
 }
 
 static struct usb_device_driver rtl8152_cfgselector_driver = {
-       .name =         MODULENAME "-cfgselector",
-       .probe =        rtl8152_cfgselector_probe,
-       .id_table =     rtl8152_table,
+       .name = MODULENAME "-cfgselector",
+       .choose_configuration = rtl8152_cfgselector_choose_configuration,
+       .id_table = rtl8152_table,
        .generic_subclass = 1,
        .supports_autosuspend = 1,
 };
index 3cb8aa19388415f8f7208f45827514f8c6299f28..d7ce4a1011ea2585bca21ab3fb86978f7fee364d 100644 (file)
@@ -4295,10 +4295,11 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
 {
        vq_callback_t **callbacks;
        struct virtqueue **vqs;
-       int ret = -ENOMEM;
-       int i, total_vqs;
        const char **names;
+       int ret = -ENOMEM;
+       int total_vqs;
        bool *ctx;
+       u16 i;
 
        /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
         * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
@@ -4335,8 +4336,8 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
        for (i = 0; i < vi->max_queue_pairs; i++) {
                callbacks[rxq2vq(i)] = skb_recv_done;
                callbacks[txq2vq(i)] = skb_xmit_done;
-               sprintf(vi->rq[i].name, "input.%d", i);
-               sprintf(vi->sq[i].name, "output.%d", i);
+               sprintf(vi->rq[i].name, "input.%u", i);
+               sprintf(vi->sq[i].name, "output.%u", i);
                names[rxq2vq(i)] = vi->rq[i].name;
                names[txq2vq(i)] = vi->sq[i].name;
                if (ctx)
index 8a51cfcff99e5288e0a258a1172d2e06f688068b..cbb99fc5ea9fe7117053325428e66cdecb79f2bc 100644 (file)
@@ -28,6 +28,7 @@
 
 static struct spi_device *g_spi;
 
+MODULE_DESCRIPTION("Slic Maxim DS26522 driver");
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Zhao Qiang<B45475@freescale.com>");
 
index 43e0db78d42beccfc2883050bb2665c191e675f8..a742cec44e3db823ae3fa85d6161e20d10dc64fb 100644 (file)
@@ -1803,5 +1803,6 @@ static struct usb_driver ar5523_driver = {
 
 module_usb_driver(ar5523_driver);
 
+MODULE_DESCRIPTION("Atheros AR5523 wireless driver");
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_FIRMWARE(AR5523_FIRMWARE_FILE);
index 7e3b6779f4e969369a9e6713b9235241efa9ac44..02e160d831bed13f3358034048ce5b03d36dc090 100644 (file)
@@ -368,10 +368,6 @@ struct ath11k_vif {
        struct ieee80211_chanctx_conf chanctx;
        struct ath11k_arp_ns_offload arp_ns_offload;
        struct ath11k_rekey_data rekey_data;
-
-#ifdef CONFIG_ATH11K_DEBUGFS
-       struct dentry *debugfs_twt;
-#endif /* CONFIG_ATH11K_DEBUGFS */
 };
 
 struct ath11k_vif_iter {
index a847bc0d50c0f0b955e93947e49b771d41756ea1..a48e737ef35d661f670373617bef8f0525358543 100644 (file)
@@ -1894,35 +1894,30 @@ static const struct file_operations ath11k_fops_twt_resume_dialog = {
        .open = simple_open
 };
 
-void ath11k_debugfs_add_interface(struct ath11k_vif *arvif)
+void ath11k_debugfs_op_vif_add(struct ieee80211_hw *hw,
+                              struct ieee80211_vif *vif)
 {
+       struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
        struct ath11k_base *ab = arvif->ar->ab;
+       struct dentry *debugfs_twt;
 
        if (arvif->vif->type != NL80211_IFTYPE_AP &&
            !(arvif->vif->type == NL80211_IFTYPE_STATION &&
              test_bit(WMI_TLV_SERVICE_STA_TWT, ab->wmi_ab.svc_map)))
                return;
 
-       arvif->debugfs_twt = debugfs_create_dir("twt",
-                                               arvif->vif->debugfs_dir);
-       debugfs_create_file("add_dialog", 0200, arvif->debugfs_twt,
+       debugfs_twt = debugfs_create_dir("twt",
+                                        arvif->vif->debugfs_dir);
+       debugfs_create_file("add_dialog", 0200, debugfs_twt,
                            arvif, &ath11k_fops_twt_add_dialog);
 
-       debugfs_create_file("del_dialog", 0200, arvif->debugfs_twt,
+       debugfs_create_file("del_dialog", 0200, debugfs_twt,
                            arvif, &ath11k_fops_twt_del_dialog);
 
-       debugfs_create_file("pause_dialog", 0200, arvif->debugfs_twt,
+       debugfs_create_file("pause_dialog", 0200, debugfs_twt,
                            arvif, &ath11k_fops_twt_pause_dialog);
 
-       debugfs_create_file("resume_dialog", 0200, arvif->debugfs_twt,
+       debugfs_create_file("resume_dialog", 0200, debugfs_twt,
                            arvif, &ath11k_fops_twt_resume_dialog);
 }
 
-void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif)
-{
-       if (!arvif->debugfs_twt)
-               return;
-
-       debugfs_remove_recursive(arvif->debugfs_twt);
-       arvif->debugfs_twt = NULL;
-}
index 44d15845f39a6735f3ef15224ea12ace13079ef4..a39e458637b01366b430e138bbc53126196b512f 100644 (file)
@@ -307,8 +307,8 @@ static inline int ath11k_debugfs_rx_filter(struct ath11k *ar)
        return ar->debug.rx_filter;
 }
 
-void ath11k_debugfs_add_interface(struct ath11k_vif *arvif);
-void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif);
+void ath11k_debugfs_op_vif_add(struct ieee80211_hw *hw,
+                              struct ieee80211_vif *vif);
 void ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
                                     enum wmi_direct_buffer_module id,
                                     enum ath11k_dbg_dbr_event event,
@@ -387,14 +387,6 @@ static inline int ath11k_debugfs_get_fw_stats(struct ath11k *ar,
        return 0;
 }
 
-static inline void ath11k_debugfs_add_interface(struct ath11k_vif *arvif)
-{
-}
-
-static inline void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif)
-{
-}
-
 static inline void
 ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
                                enum wmi_direct_buffer_module id,
index db241589424d519607429b34ffd9946b32c525a9..b13525bbbb8087acbdc15247a0a428a74fd5f8b9 100644 (file)
@@ -6756,13 +6756,6 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
                goto err;
        }
 
-       /* In the case of hardware recovery, debugfs files are
-        * not deleted since ieee80211_ops.remove_interface() is
-        * not invoked. In such cases, try to delete the files.
-        * These will be re-created later.
-        */
-       ath11k_debugfs_remove_interface(arvif);
-
        memset(arvif, 0, sizeof(*arvif));
 
        arvif->ar = ar;
@@ -6939,8 +6932,6 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
 
        ath11k_dp_vdev_tx_attach(ar, arvif);
 
-       ath11k_debugfs_add_interface(arvif);
-
        if (vif->type != NL80211_IFTYPE_MONITOR &&
            test_bit(ATH11K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags)) {
                ret = ath11k_mac_monitor_vdev_create(ar);
@@ -7056,8 +7047,6 @@ err_vdev_del:
        /* Recalc txpower for remaining vdev */
        ath11k_mac_txpower_recalc(ar);
 
-       ath11k_debugfs_remove_interface(arvif);
-
        /* TODO: recal traffic pause state based on the available vdevs */
 
        mutex_unlock(&ar->conf_mutex);
@@ -9153,6 +9142,7 @@ static const struct ieee80211_ops ath11k_ops = {
 #endif
 
 #ifdef CONFIG_ATH11K_DEBUGFS
+       .vif_add_debugfs                = ath11k_debugfs_op_vif_add,
        .sta_add_debugfs                = ath11k_debugfs_sta_op_add,
 #endif
 
index 41119fb177e306f30280d1a1d83ae5583976668d..4e6b4df8562f632e34089619f7a9b485b5e71595 100644 (file)
@@ -1685,6 +1685,7 @@ static struct platform_driver wcn36xx_driver = {
 
 module_platform_driver(wcn36xx_driver);
 
+MODULE_DESCRIPTION("Qualcomm Atheros WCN3660/3680 wireless driver");
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_AUTHOR("Eugene Krasnikov k.eugene.e@gmail.com");
 MODULE_FIRMWARE(WLAN_NV_FILE);
index d55f3271d6190234220afd12ac8f6eb7a1d78f64..4f0c1e1a8e605daa4bcf907006bbd8e9a07490fa 100644 (file)
@@ -20,6 +20,7 @@ static void __exit brcmf_bca_exit(void)
        brcmf_fwvid_unregister_vendor(BRCMF_FWVENDOR_BCA, THIS_MODULE);
 }
 
+MODULE_DESCRIPTION("Broadcom FullMAC WLAN driver plugin for Broadcom AP chipsets");
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_IMPORT_NS(BRCMFMAC);
 
index 133c5ea6429cd0e17baea181209c4d701e662d0c..28d6a30cc0106d6a38b51e35c1f518accfdbe987 100644 (file)
@@ -3779,8 +3779,10 @@ static int brcmf_internal_escan_add_info(struct cfg80211_scan_request *req,
                if (req->channels[i] == chan)
                        break;
        }
-       if (i == req->n_channels)
-               req->channels[req->n_channels++] = chan;
+       if (i == req->n_channels) {
+               req->n_channels++;
+               req->channels[i] = chan;
+       }
 
        for (i = 0; i < req->n_ssids; i++) {
                if (req->ssids[i].ssid_len == ssid_len &&
index f82fbbe3ecefb7af1019281b3f031f45b9ec30e6..90d06cda03a2f007e9f00c636a22a4a130670dff 100644 (file)
@@ -20,6 +20,7 @@ static void __exit brcmf_cyw_exit(void)
        brcmf_fwvid_unregister_vendor(BRCMF_FWVENDOR_CYW, THIS_MODULE);
 }
 
+MODULE_DESCRIPTION("Broadcom FullMAC WLAN driver plugin for Cypress/Infineon chipsets");
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_IMPORT_NS(BRCMFMAC);
 
index 80220685f5e4514782a1179d3195cbd34cd890b3..d7fb88bb6ae1a3ff9975966f727810abda27ae1a 100644 (file)
@@ -2707,7 +2707,6 @@ MODULE_DEVICE_TABLE(pci, brcmf_pcie_devid_table);
 
 
 static struct pci_driver brcmf_pciedrvr = {
-       .node = {},
        .name = KBUILD_MODNAME,
        .id_table = brcmf_pcie_devid_table,
        .probe = brcmf_pcie_probe,
index 2178675ae1a44d0e04afdd4d26ad6aa0625da8cf..0ccf735316c24dc1bb06ae0999327a04ea76d212 100644 (file)
@@ -1581,7 +1581,7 @@ static int brcmf_usb_reset_device(struct device *dev, void *notused)
 
 void brcmf_usb_exit(void)
 {
-       struct device_driver *drv = &brcmf_usbdrvr.drvwrap.driver;
+       struct device_driver *drv = &brcmf_usbdrvr.driver;
        int ret;
 
        brcmf_dbg(USB, "Enter\n");
index 02918d434556b04d797a4141f3dcaede15a7b494..b66135e3cff476a95c5482e099975fd01849bedf 100644 (file)
@@ -20,6 +20,7 @@ static void __exit brcmf_wcc_exit(void)
        brcmf_fwvid_unregister_vendor(BRCMF_FWVENDOR_WCC, THIS_MODULE);
 }
 
+MODULE_DESCRIPTION("Broadcom FullMAC WLAN driver plugin for Broadcom mobility chipsets");
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_IMPORT_NS(BRCMFMAC);
 
index 798731ecbefde7f625d0cf00ef688f10281727be..b740c65a7dca25807ac648873a0df14796984d1b 100644 (file)
@@ -537,7 +537,7 @@ enum iwl_fw_dbg_config_cmd_type {
 }; /* LDBG_CFG_CMD_TYPE_API_E_VER_1 */
 
 /* this token disables debug asserts in the firmware */
-#define IWL_FW_DBG_CONFIG_TOKEN 0x00011301
+#define IWL_FW_DBG_CONFIG_TOKEN 0x00010001
 
 /**
  * struct iwl_fw_dbg_config_cmd - configure FW debug
index e27774e7ed74d82bbbb9821f24a2bc3a1578395b..80fda056e46a698458ee4ecf1230f7ef315e3a2a 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2005-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
  * Copyright (C) 2015-2017 Intel Deutschland GmbH
  */
@@ -19,7 +19,6 @@
  * @fwrt_ptr: pointer to the buffer coming from fwrt
  * @trans_ptr: pointer to struct %iwl_trans_dump_data which contains the
  *     transport's data.
- * @trans_len: length of the valid data in trans_ptr
  * @fwrt_len: length of the valid data in fwrt_ptr
  */
 struct iwl_fw_dump_ptrs {
index 3b14f647674350e3fdef138eb880b07df6eb5770..72075720969c06b2378d84d48305e84df467f201 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
  */
 #include <linux/firmware.h>
 #include "iwl-drv.h"
@@ -1096,7 +1096,7 @@ static int iwl_dbg_tlv_override_trig_node(struct iwl_fw_runtime *fwrt,
                node_trig = (void *)node_tlv->data;
        }
 
-       memcpy(node_trig->data + offset, trig->data, trig_data_len);
+       memcpy((u8 *)node_trig->data + offset, trig->data, trig_data_len);
        node_tlv->length = cpu_to_le32(size);
 
        if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG) {
index ffe2670720c9257c30cd86aa3f5edf7386bce602..abf8001bdac179b7e4b40897182f8b54532d6c97 100644 (file)
@@ -128,6 +128,7 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
        kfree(drv->fw.ucode_capa.cmd_versions);
        kfree(drv->fw.phy_integration_ver);
        kfree(drv->trans->dbg.pc_data);
+       drv->trans->dbg.pc_data = NULL;
 
        for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
                iwl_free_fw_img(drv, drv->fw.img + i);
index 402896988686990fdd7ea9410f8e01819a7a1bc4..2f6774ec37b2286f1fb72e120a18435e804fcb58 100644 (file)
@@ -668,7 +668,6 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
                        .has_eht = true,
                        .eht_cap_elem = {
                                .mac_cap_info[0] =
-                                       IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
                                        IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
                                        IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 |
                                        IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2 |
@@ -793,7 +792,6 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
                        .has_eht = true,
                        .eht_cap_elem = {
                                .mac_cap_info[0] =
-                                       IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
                                        IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
                                        IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 |
                                        IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2,
@@ -1020,8 +1018,7 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
        if (CSR_HW_REV_TYPE(trans->hw_rev) == IWL_CFG_MAC_TYPE_GL &&
            iftype_data->eht_cap.has_eht) {
                iftype_data->eht_cap.eht_cap_elem.mac_cap_info[0] &=
-                       ~(IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
-                         IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 |
+                       ~(IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 |
                          IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2);
                iftype_data->eht_cap.eht_cap_elem.phy_cap_info[3] &=
                        ~(IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO |
index 7f13dff04b265caf265f24662d7609f60289120d..3447d67a8b311be1ceaf0b232e71c9121e38bd71 100644 (file)
@@ -1600,7 +1600,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
         */
        if (vif->type == NL80211_IFTYPE_AP ||
            vif->type == NL80211_IFTYPE_ADHOC) {
-               iwl_mvm_vif_dbgfs_add_link(mvm, vif);
+               if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+                       iwl_mvm_vif_dbgfs_add_link(mvm, vif);
                ret = 0;
                goto out;
        }
@@ -1640,7 +1641,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
                        iwl_mvm_chandef_get_primary_80(&vif->bss_conf.chandef);
        }
 
-       iwl_mvm_vif_dbgfs_add_link(mvm, vif);
+       if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+               iwl_mvm_vif_dbgfs_add_link(mvm, vif);
 
        if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
            vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
index 61170173f917a00707fc63956b8d7f252737c809..893b69fc841b896b234078240631d72c792a4c7e 100644 (file)
@@ -81,7 +81,8 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
                ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS);
        }
 
-       iwl_mvm_vif_dbgfs_add_link(mvm, vif);
+       if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+               iwl_mvm_vif_dbgfs_add_link(mvm, vif);
 
        if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
            vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
@@ -437,6 +438,9 @@ __iwl_mvm_mld_unassign_vif_chanctx(struct iwl_mvm *mvm,
                mvmvif->ap_ibss_active = false;
        }
 
+       iwl_mvm_link_changed(mvm, vif, link_conf,
+                            LINK_CONTEXT_MODIFY_ACTIVE, false);
+
        if (iwl_mvm_is_esr_supported(mvm->fwrt.trans) && n_active > 1) {
                int ret = iwl_mvm_esr_mode_inactive(mvm, vif);
 
@@ -448,9 +452,6 @@ __iwl_mvm_mld_unassign_vif_chanctx(struct iwl_mvm *mvm,
        if (vif->type == NL80211_IFTYPE_MONITOR)
                iwl_mvm_mld_rm_snif_sta(mvm, vif);
 
-       iwl_mvm_link_changed(mvm, vif, link_conf,
-                            LINK_CONTEXT_MODIFY_ACTIVE, false);
-
        if (switching_chanctx)
                return;
        mvmvif->link[link_id]->phy_ctxt = NULL;
index b52cce38115d0a9e5e7655af324fdc3e266f6011..c4fe70e05b9b87771613d8569b216a1cf91ac550 100644 (file)
@@ -125,7 +125,7 @@ int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
                           "FW rev %s - Softmac protocol %x.%x\n",
                           fw_version, priv->fw_var >> 8, priv->fw_var & 0xff);
                snprintf(dev->wiphy->fw_version, sizeof(dev->wiphy->fw_version),
-                               "%s - %x.%x", fw_version,
+                               "%.19s - %x.%x", fw_version,
                                priv->fw_var >> 8, priv->fw_var & 0xff);
        }
 
index ce0179b8ab368fa7138a394afbc32678b05d20fc..0073b5e0f9c90ba473e71f1902724d8979bdc792 100644 (file)
@@ -700,6 +700,7 @@ static struct spi_driver p54spi_driver = {
 
 module_spi_driver(p54spi_driver);
 
+MODULE_DESCRIPTION("Prism54 SPI wireless driver");
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Christian Lamparter <chunkeey@web.de>");
 MODULE_ALIAS("spi:cx3110x");
index d3ab9572e71157f4436c7c7f429617070ef4b674..515e6db410f28aefc63a5587dbb274866b053482 100644 (file)
@@ -687,7 +687,7 @@ static struct usb_driver mwifiex_usb_driver = {
        .suspend = mwifiex_usb_suspend,
        .resume = mwifiex_usb_resume,
        .soft_unbind = 1,
-       .drvwrap.driver = {
+       .driver = {
                .coredump = mwifiex_usb_coredump,
        },
 };
index 89d738deea62e9ed4d4f9044e1a193548833deff..e2146d30e55363ecdfec2ed26bb7753384f04317 100644 (file)
@@ -728,6 +728,7 @@ const struct ieee80211_ops mt7603_ops = {
        .set_sar_specs = mt7603_set_sar_specs,
 };
 
+MODULE_DESCRIPTION("MediaTek MT7603E and MT76x8 wireless driver");
 MODULE_LICENSE("Dual BSD/GPL");
 
 static int __init mt7603_init(void)
index dab16b5fc3861198f9eccd3ae48c776bb03a66a1..0971c164b57e926d2d22dd1ef4f0559d45420db2 100644 (file)
@@ -1375,4 +1375,5 @@ const struct ieee80211_ops mt7615_ops = {
 };
 EXPORT_SYMBOL_GPL(mt7615_ops);
 
+MODULE_DESCRIPTION("MediaTek MT7615E and MT7663E wireless driver");
 MODULE_LICENSE("Dual BSD/GPL");
index ac036a072439d5d0a2c7eb8aad38048b4098b25f..87a956ea3ad74f6fb62a873eba0f499e04a99c32 100644 (file)
@@ -270,4 +270,5 @@ static void __exit mt7615_exit(void)
 
 module_init(mt7615_init);
 module_exit(mt7615_exit);
+MODULE_DESCRIPTION("MediaTek MT7615E MMIO helpers");
 MODULE_LICENSE("Dual BSD/GPL");
index 67cedd2555f973fc53cd0e37312771c3e0fa1116..9692890ba51b7b61c43991cb20a59d8394188c1e 100644 (file)
@@ -253,4 +253,5 @@ module_sdio_driver(mt7663s_driver);
 
 MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("MediaTek MT7663S (SDIO) wireless driver");
 MODULE_LICENSE("Dual BSD/GPL");
index 04963b9f749838c41717884ecb818e9d0894667b..df737e1ff27b79a21c1b92ca899272eb961800f0 100644 (file)
@@ -281,4 +281,5 @@ module_usb_driver(mt7663u_driver);
 
 MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("MediaTek MT7663U (USB) wireless driver");
 MODULE_LICENSE("Dual BSD/GPL");
index 0052d103e276a895e3eddb81edb4397e0149281b..820b395900275a700da43e7139c3a22effa17140 100644 (file)
@@ -349,4 +349,5 @@ EXPORT_SYMBOL_GPL(mt7663_usb_sdio_register_device);
 
 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
 MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek MT7663 SDIO/USB helpers");
 MODULE_LICENSE("Dual BSD/GPL");
index 96494ba2fdf767ba89d25b505ca7ee86b0797c19..3a20ba0d2492840304f9f5b108a8f82643ce7396 100644 (file)
@@ -3160,4 +3160,5 @@ exit:
 EXPORT_SYMBOL_GPL(mt76_connac2_mcu_fill_message);
 
 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("MediaTek MT76x connac layer helpers");
 MODULE_LICENSE("Dual BSD/GPL");
index c3a392a1a659e8a0581809a3a5e03d9847085496..bcd24c9072ec9e52f68c7dac0f124279d525eba0 100644 (file)
@@ -342,4 +342,5 @@ int mt76x0_eeprom_init(struct mt76x02_dev *dev)
        return 0;
 }
 
+MODULE_DESCRIPTION("MediaTek MT76x EEPROM helpers");
 MODULE_LICENSE("Dual BSD/GPL");
index 9277ff38b7a228fd778e8c9909eed0182eb14306..293e66fa83d5d0669d4f26acaff150a41f35debc 100644 (file)
@@ -302,6 +302,7 @@ static const struct pci_device_id mt76x0e_device_table[] = {
 MODULE_DEVICE_TABLE(pci, mt76x0e_device_table);
 MODULE_FIRMWARE(MT7610E_FIRMWARE);
 MODULE_FIRMWARE(MT7650E_FIRMWARE);
+MODULE_DESCRIPTION("MediaTek MT76x0E (PCIe) wireless driver");
 MODULE_LICENSE("Dual BSD/GPL");
 
 static struct pci_driver mt76x0e_driver = {
index 0422c332354a131dab040c72a8961ec6f1b79515..dd042949cf82bc6c87f4aaee8b7c5d912faf2162 100644 (file)
@@ -336,6 +336,7 @@ err:
 MODULE_DEVICE_TABLE(usb, mt76x0_device_table);
 MODULE_FIRMWARE(MT7610E_FIRMWARE);
 MODULE_FIRMWARE(MT7610U_FIRMWARE);
+MODULE_DESCRIPTION("MediaTek MT76x0U (USB) wireless driver");
 MODULE_LICENSE("GPL");
 
 static struct usb_driver mt76x0_driver = {
index 02da543dfc5cf381f6edac1753c0d627504e3e48..b2cc449142945f585a7d50ca0d68da21e35531e7 100644 (file)
@@ -293,4 +293,5 @@ void mt76x02u_init_mcu(struct mt76_dev *dev)
 EXPORT_SYMBOL_GPL(mt76x02u_init_mcu);
 
 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
+MODULE_DESCRIPTION("MediaTek MT76x02 MCU helpers");
 MODULE_LICENSE("Dual BSD/GPL");
index 8a0e8124b894003ed80aad02ff6c59be9f3e457f..8020446be37bd99c15b9410b697060a304ef18af 100644 (file)
@@ -696,4 +696,5 @@ void mt76x02_config_mac_addr_list(struct mt76x02_dev *dev)
 }
 EXPORT_SYMBOL_GPL(mt76x02_config_mac_addr_list);
 
+MODULE_DESCRIPTION("MediaTek MT76x02 helpers");
 MODULE_LICENSE("Dual BSD/GPL");
index 8c01855885ce3949a16e63fe4169db3345a62044..1fe5f5a02f937783c669205e286e917ec0872db1 100644 (file)
@@ -506,4 +506,5 @@ int mt76x2_eeprom_init(struct mt76x02_dev *dev)
 }
 EXPORT_SYMBOL_GPL(mt76x2_eeprom_init);
 
+MODULE_DESCRIPTION("MediaTek MT76x2 EEPROM helpers");
 MODULE_LICENSE("Dual BSD/GPL");
index df85ebc6e1df07a7c2e48c30efa50cdeec9f993f..30959746e9242712e8196724157db8c93caa96f3 100644 (file)
@@ -165,6 +165,7 @@ mt76x2e_resume(struct pci_dev *pdev)
 MODULE_DEVICE_TABLE(pci, mt76x2e_device_table);
 MODULE_FIRMWARE(MT7662_FIRMWARE);
 MODULE_FIRMWARE(MT7662_ROM_PATCH);
+MODULE_DESCRIPTION("MediaTek MT76x2E (PCIe) wireless driver");
 MODULE_LICENSE("Dual BSD/GPL");
 
 static struct pci_driver mt76pci_driver = {
index 55068f3252ef341f4fbdc6d7dd382296e47b17f7..ca78e14251c2f5cda524c046b9c80a96b4481167 100644 (file)
@@ -147,4 +147,5 @@ static struct usb_driver mt76x2u_driver = {
 module_usb_driver(mt76x2u_driver);
 
 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
+MODULE_DESCRIPTION("MediaTek MT76x2U (USB) wireless driver");
 MODULE_LICENSE("Dual BSD/GPL");
index aff4f21e843d29ae24b1ef094b407e344597bf36..3039f53e224546a406a2fc4cc0b2f8e07884d456 100644 (file)
@@ -958,4 +958,5 @@ static void __exit mt7915_exit(void)
 
 module_init(mt7915_init);
 module_exit(mt7915_exit);
+MODULE_DESCRIPTION("MediaTek MT7915E MMIO helpers");
 MODULE_LICENSE("Dual BSD/GPL");
index 0645417e05825f709e19e392e48544d36d2e3534..0d5adc5ddae38283eb618ba00284fb4b527c677c 100644 (file)
@@ -1418,5 +1418,6 @@ const struct ieee80211_ops mt7921_ops = {
 };
 EXPORT_SYMBOL_GPL(mt7921_ops);
 
+MODULE_DESCRIPTION("MediaTek MT7921 core driver");
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
index 57903c6e4f11f0735fd80c4a3c54c4299ca48be0..dde26f3274783d9a7dc84da9be5e8fa214fba21e 100644 (file)
@@ -544,4 +544,5 @@ MODULE_FIRMWARE(MT7922_FIRMWARE_WM);
 MODULE_FIRMWARE(MT7922_ROM_PATCH);
 MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("MediaTek MT7921E (PCIe) wireless driver");
 MODULE_LICENSE("Dual BSD/GPL");
index 7591e54d289733472740a5afe747833070363f2e..a9ce1e746b954bc7c7599f23ec6a6c23031dd384 100644 (file)
@@ -323,5 +323,6 @@ static struct sdio_driver mt7921s_driver = {
        .drv.pm         = pm_sleep_ptr(&mt7921s_pm_ops),
 };
 module_sdio_driver(mt7921s_driver);
+MODULE_DESCRIPTION("MediaTek MT7921S (SDIO) wireless driver");
 MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
 MODULE_LICENSE("Dual BSD/GPL");
index e5258c74fc077ac69b310b5bab0e56c02ebfcef5..8b7c03c47598de7bf9037ed40cb370607a837af4 100644 (file)
@@ -336,5 +336,6 @@ static struct usb_driver mt7921u_driver = {
 };
 module_usb_driver(mt7921u_driver);
 
+MODULE_DESCRIPTION("MediaTek MT7921U (USB) wireless driver");
 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
 MODULE_LICENSE("Dual BSD/GPL");
index 8f1075da4903908b5f149e12530d21f6735be07f..125a1be3cb64c6a1a14bb6aaac28ec0fbc11e889 100644 (file)
@@ -1450,4 +1450,5 @@ const struct ieee80211_ops mt7925_ops = {
 EXPORT_SYMBOL_GPL(mt7925_ops);
 
 MODULE_AUTHOR("Deren Wu <deren.wu@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek MT7925 core driver");
 MODULE_LICENSE("Dual BSD/GPL");
index 734f31ee40d3f740873dc0c53356a63fa11ff976..1fd99a856541589b1f3859795e8b23bcc0c06cdf 100644 (file)
@@ -583,4 +583,5 @@ MODULE_FIRMWARE(MT7925_FIRMWARE_WM);
 MODULE_FIRMWARE(MT7925_ROM_PATCH);
 MODULE_AUTHOR("Deren Wu <deren.wu@mediatek.com>");
 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("MediaTek MT7925E (PCIe) wireless driver");
 MODULE_LICENSE("Dual BSD/GPL");
index 9b885c5b3ed594ddc9c5b62f47dfd4522430f469..1e0f094fc9059dbb02585bdc168b6b763f198004 100644 (file)
@@ -329,4 +329,5 @@ static struct usb_driver mt7925u_driver = {
 module_usb_driver(mt7925u_driver);
 
 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("MediaTek MT7925U (USB) wireless driver");
 MODULE_LICENSE("Dual BSD/GPL");
index 502be22dbe3677fb475371b7e4c564be074899d8..c42101aa9e45e958f605e37e763bb090c63aeea5 100644 (file)
@@ -862,5 +862,6 @@ int mt792x_load_firmware(struct mt792x_dev *dev)
 }
 EXPORT_SYMBOL_GPL(mt792x_load_firmware);
 
+MODULE_DESCRIPTION("MediaTek MT792x core driver");
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
index 2dd283caed36bf056127d17a6cd3e93e9f6664d4..589a3efb9f8c30bbce14ec288e8ad66ecf0acf99 100644 (file)
@@ -314,5 +314,6 @@ void mt792xu_disconnect(struct usb_interface *usb_intf)
 }
 EXPORT_SYMBOL_GPL(mt792xu_disconnect);
 
+MODULE_DESCRIPTION("MediaTek MT792x USB helpers");
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
index 3c729b563edc5dd6f964e0e72c46f276367c3d65..699be57309c2e4db6d8ff3f456d55fe5a75be291 100644 (file)
@@ -4477,7 +4477,8 @@ int mt7996_mcu_set_txpower_sku(struct mt7996_phy *phy)
 
        skb_put_data(skb, &req, sizeof(req));
        /* cck and ofdm */
-       skb_put_data(skb, &la.cck, sizeof(la.cck) + sizeof(la.ofdm));
+       skb_put_data(skb, &la.cck, sizeof(la.cck));
+       skb_put_data(skb, &la.ofdm, sizeof(la.ofdm));
        /* ht20 */
        skb_put_data(skb, &la.mcs[0], 8);
        /* ht40 */
index c50d89a445e9560672aeab8752de112220c9ab1c..9f2abfa273c9b060a793ae2594963f5c123fc5b0 100644 (file)
@@ -650,4 +650,5 @@ static void __exit mt7996_exit(void)
 
 module_init(mt7996_init);
 module_exit(mt7996_exit);
+MODULE_DESCRIPTION("MediaTek MT7996 MMIO helpers");
 MODULE_LICENSE("Dual BSD/GPL");
index c52d550f0c32aac260e3163f14ec4989efbacc53..3e88798df0178c17cce2e94f588feb43711ad0a0 100644 (file)
@@ -672,4 +672,5 @@ EXPORT_SYMBOL_GPL(mt76s_init);
 
 MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("MediaTek MT76x SDIO helpers");
 MODULE_LICENSE("Dual BSD/GPL");
index 1584665fe3cb68d890bd7606e3eedcd05a470a2c..5a0bcb5071bd7d5ee8c22ef54ad3e05b3ea96cf4 100644 (file)
@@ -1128,4 +1128,5 @@ int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf)
 EXPORT_SYMBOL_GPL(mt76u_init);
 
 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
+MODULE_DESCRIPTION("MediaTek MT76x USB helpers");
 MODULE_LICENSE("Dual BSD/GPL");
index fc76c66ff1a5a58f1c73e9ff50ceeea4926b8063..d6c01a2dd1988c5a9ef50bc96ceb0e30b453302d 100644 (file)
@@ -138,4 +138,5 @@ int __mt76_worker_fn(void *ptr)
 }
 EXPORT_SYMBOL_GPL(__mt76_worker_fn);
 
+MODULE_DESCRIPTION("MediaTek MT76x helpers");
 MODULE_LICENSE("Dual BSD/GPL");
index 91d71e0f7ef2332354a0b950eea3e8795387ed7b..81e8f25863f5bdc957fa5b4ee53a78c31f81eba6 100644 (file)
@@ -1018,5 +1018,6 @@ unregister_netdev:
        return ERR_PTR(ret);
 }
 
+MODULE_DESCRIPTION("Atmel WILC1000 core wireless driver");
 MODULE_LICENSE("GPL");
 MODULE_FIRMWARE(WILC1000_FW(WILC1000_API_VER));
index 0d13e3e46e98e4b59852811324792793fc27f78e..d6d3946930905275ba021611307d6a16324a0bb8 100644 (file)
@@ -984,4 +984,5 @@ static struct sdio_driver wilc_sdio_driver = {
 module_driver(wilc_sdio_driver,
              sdio_register_driver,
              sdio_unregister_driver);
+MODULE_DESCRIPTION("Atmel WILC1000 SDIO wireless driver");
 MODULE_LICENSE("GPL");
index 77b4cdff73c370bf1bbd1e1ebec77eb0cac318b7..1d8b241ce43cae3329eb9fee84afc63a1027447e 100644 (file)
@@ -273,6 +273,7 @@ static struct spi_driver wilc_spi_driver = {
        .remove = wilc_bus_remove,
 };
 module_spi_driver(wilc_spi_driver);
+MODULE_DESCRIPTION("Atmel WILC1000 SPI wireless driver");
 MODULE_LICENSE("GPL");
 
 static int wilc_spi_tx(struct wilc *wilc, u8 *b, u32 len)
index 301bd0043a4354032ceac6c45768c347b473163b..4e5b351f80f0922cabc3f1ce57fa44960c090e3c 100644 (file)
@@ -343,5 +343,6 @@ static void __exit wl1251_sdio_exit(void)
 module_init(wl1251_sdio_init);
 module_exit(wl1251_sdio_exit);
 
+MODULE_DESCRIPTION("TI WL1251 SDIO helpers");
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Kalle Valo <kvalo@adurom.com>");
index 29292f06bd3dcb191bee70af1b295a6a62abf841..1936bb3af54ab6509edff7a6afdf23f9e9a9b728 100644 (file)
@@ -342,6 +342,7 @@ static struct spi_driver wl1251_spi_driver = {
 
 module_spi_driver(wl1251_spi_driver);
 
+MODULE_DESCRIPTION("TI WL1251 SPI helpers");
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Kalle Valo <kvalo@adurom.com>");
 MODULE_ALIAS("spi:wl1251");
index de045fe4ca1eb982105a4a7a2d502f142efd5d02..b26d42b4e3cc0fbdc21e55b4b9682b0a81e1ec8e 100644 (file)
@@ -1955,6 +1955,7 @@ module_param_named(tcxo, tcxo_param, charp, 0);
 MODULE_PARM_DESC(tcxo,
                 "TCXO clock: 19.2, 26, 38.4, 52, 16.368, 32.736, 16.8, 33.6");
 
+MODULE_DESCRIPTION("TI WL12xx wireless driver");
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
 MODULE_FIRMWARE(WL127X_FW_NAME_SINGLE);
index 20d9181b3410c40b555d7f06836469357782fc9f..2ccac1cdec0120c1709d09add0bcdf390f57e7e7 100644 (file)
@@ -2086,6 +2086,7 @@ module_param_named(num_rx_desc, num_rx_desc_param, int, 0400);
 MODULE_PARM_DESC(num_rx_desc_param,
                 "Number of Rx descriptors: u8 (default is 32)");
 
+MODULE_DESCRIPTION("TI WiLink 8 wireless driver");
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
 MODULE_FIRMWARE(WL18XX_FW_NAME);
index fb9ed97774c7a29ab1a27689f7e78bf3740c0c3f..5736acb4d2063cbd1ca9f9f23187102d1d30c599 100644 (file)
@@ -6793,6 +6793,7 @@ MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
 module_param(no_recovery, int, 0600);
 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
 
+MODULE_DESCRIPTION("TI WLAN core driver");
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
index f0686635db46e1246f3d06a8814f50d4c93c85ff..eb5482ed76ae48488ef5f55d1731c080c25b9919 100644 (file)
@@ -447,6 +447,7 @@ module_sdio_driver(wl1271_sdio_driver);
 module_param(dump, bool, 0600);
 MODULE_PARM_DESC(dump, "Enable sdio read/write dumps.");
 
+MODULE_DESCRIPTION("TI WLAN SDIO helpers");
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
index 7d9a139db59e1552e3f4cd6feae526c4a5211e69..0aa2b2f3c5c914160d05198c3fc8c6ed07c6e999 100644 (file)
@@ -562,6 +562,7 @@ static struct spi_driver wl1271_spi_driver = {
 };
 
 module_spi_driver(wl1271_spi_driver);
+MODULE_DESCRIPTION("TI WLAN SPI helpers");
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
index 88f760a7cbc35469e20be2d09f9b2cfb92b8362a..fab361a250d6054e03a40e451efda2eb3678fda6 100644 (file)
@@ -104,13 +104,12 @@ bool provides_xdp_headroom = true;
 module_param(provides_xdp_headroom, bool, 0644);
 
 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
-                              u8 status);
+                              s8 status);
 
 static void make_tx_response(struct xenvif_queue *queue,
-                            struct xen_netif_tx_request *txp,
+                            const struct xen_netif_tx_request *txp,
                             unsigned int extra_count,
-                            s8       st);
-static void push_tx_responses(struct xenvif_queue *queue);
+                            s8 status);
 
 static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
 
@@ -208,13 +207,9 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
                          unsigned int extra_count, RING_IDX end)
 {
        RING_IDX cons = queue->tx.req_cons;
-       unsigned long flags;
 
        do {
-               spin_lock_irqsave(&queue->response_lock, flags);
                make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR);
-               push_tx_responses(queue);
-               spin_unlock_irqrestore(&queue->response_lock, flags);
                if (cons == end)
                        break;
                RING_COPY_REQUEST(&queue->tx, cons++, txp);
@@ -463,12 +458,20 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
        }
 
        for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS;
-            shinfo->nr_frags++, gop++, nr_slots--) {
+            nr_slots--) {
+               if (unlikely(!txp->size)) {
+                       make_tx_response(queue, txp, 0, XEN_NETIF_RSP_OKAY);
+                       ++txp;
+                       continue;
+               }
+
                index = pending_index(queue->pending_cons++);
                pending_idx = queue->pending_ring[index];
                xenvif_tx_create_map_op(queue, pending_idx, txp,
                                        txp == first ? extra_count : 0, gop);
                frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
+               ++shinfo->nr_frags;
+               ++gop;
 
                if (txp == first)
                        txp = txfrags;
@@ -481,20 +484,33 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
                shinfo = skb_shinfo(nskb);
                frags = shinfo->frags;
 
-               for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots;
-                    shinfo->nr_frags++, txp++, gop++) {
+               for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; ++txp) {
+                       if (unlikely(!txp->size)) {
+                               make_tx_response(queue, txp, 0,
+                                                XEN_NETIF_RSP_OKAY);
+                               continue;
+                       }
+
                        index = pending_index(queue->pending_cons++);
                        pending_idx = queue->pending_ring[index];
                        xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
                                                gop);
                        frag_set_pending_idx(&frags[shinfo->nr_frags],
                                             pending_idx);
+                       ++shinfo->nr_frags;
+                       ++gop;
+               }
+
+               if (shinfo->nr_frags) {
+                       skb_shinfo(skb)->frag_list = nskb;
+                       nskb = NULL;
                }
+       }
 
-               skb_shinfo(skb)->frag_list = nskb;
-       } else if (nskb) {
+       if (nskb) {
                /* A frag_list skb was allocated but it is no longer needed
-                * because enough slots were converted to copy ops above.
+                * because enough slots were converted to copy ops above or some
+                * were empty.
                 */
                kfree_skb(nskb);
        }
@@ -963,7 +979,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
                                         (ret == 0) ?
                                         XEN_NETIF_RSP_OKAY :
                                         XEN_NETIF_RSP_ERROR);
-                       push_tx_responses(queue);
                        continue;
                }
 
@@ -975,7 +990,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
 
                        make_tx_response(queue, &txreq, extra_count,
                                         XEN_NETIF_RSP_OKAY);
-                       push_tx_responses(queue);
                        continue;
                }
 
@@ -1401,8 +1415,35 @@ int xenvif_tx_action(struct xenvif_queue *queue, int budget)
        return work_done;
 }
 
+static void _make_tx_response(struct xenvif_queue *queue,
+                            const struct xen_netif_tx_request *txp,
+                            unsigned int extra_count,
+                            s8 status)
+{
+       RING_IDX i = queue->tx.rsp_prod_pvt;
+       struct xen_netif_tx_response *resp;
+
+       resp = RING_GET_RESPONSE(&queue->tx, i);
+       resp->id     = txp->id;
+       resp->status = status;
+
+       while (extra_count-- != 0)
+               RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
+
+       queue->tx.rsp_prod_pvt = ++i;
+}
+
+static void push_tx_responses(struct xenvif_queue *queue)
+{
+       int notify;
+
+       RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
+       if (notify)
+               notify_remote_via_irq(queue->tx_irq);
+}
+
 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
-                              u8 status)
+                              s8 status)
 {
        struct pending_tx_info *pending_tx_info;
        pending_ring_idx_t index;
@@ -1412,8 +1453,8 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
 
        spin_lock_irqsave(&queue->response_lock, flags);
 
-       make_tx_response(queue, &pending_tx_info->req,
-                        pending_tx_info->extra_count, status);
+       _make_tx_response(queue, &pending_tx_info->req,
+                         pending_tx_info->extra_count, status);
 
        /* Release the pending index before pusing the Tx response so
         * its available before a new Tx request is pushed by the
@@ -1427,32 +1468,19 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
        spin_unlock_irqrestore(&queue->response_lock, flags);
 }
 
-
 static void make_tx_response(struct xenvif_queue *queue,
-                            struct xen_netif_tx_request *txp,
+                            const struct xen_netif_tx_request *txp,
                             unsigned int extra_count,
-                            s8       st)
+                            s8 status)
 {
-       RING_IDX i = queue->tx.rsp_prod_pvt;
-       struct xen_netif_tx_response *resp;
-
-       resp = RING_GET_RESPONSE(&queue->tx, i);
-       resp->id     = txp->id;
-       resp->status = st;
-
-       while (extra_count-- != 0)
-               RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
+       unsigned long flags;
 
-       queue->tx.rsp_prod_pvt = ++i;
-}
+       spin_lock_irqsave(&queue->response_lock, flags);
 
-static void push_tx_responses(struct xenvif_queue *queue)
-{
-       int notify;
+       _make_tx_response(queue, txp, extra_count, status);
+       push_tx_responses(queue);
 
-       RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
-       if (notify)
-               notify_remote_via_irq(queue->tx_irq);
+       spin_unlock_irqrestore(&queue->response_lock, flags);
 }
 
 static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
index a556acdb947bbe200a66e234a6d75de833ae18f4..2eb5978bd79e1b5114bb34bab409fbeaae00faf8 100644 (file)
@@ -203,8 +203,8 @@ static int pn532_uart_rx_is_frame(struct sk_buff *skb)
        return 0;
 }
 
-static int pn532_receive_buf(struct serdev_device *serdev,
-               const unsigned char *data, size_t count)
+static ssize_t pn532_receive_buf(struct serdev_device *serdev,
+                                const u8 *data, size_t count)
 {
        struct pn532_uart_phy *dev = serdev_device_get_drvdata(serdev);
        size_t i;
index 82ea35d748a5de5ff39bb3328e16c38cdbd78837..456d3947116c1c4029fa2477bdf1c12f508927aa 100644 (file)
@@ -51,9 +51,8 @@ static const struct s3fwrn5_phy_ops uart_phy_ops = {
        .write = s3fwrn82_uart_write,
 };
 
-static int s3fwrn82_uart_read(struct serdev_device *serdev,
-                             const unsigned char *data,
-                             size_t count)
+static ssize_t s3fwrn82_uart_read(struct serdev_device *serdev,
+                                 const u8 *data, size_t count)
 {
        struct s3fwrn82_uart_phy *phy = serdev_device_get_drvdata(serdev);
        size_t i;
index a92eb172f0e7eb9a0feed5a05a7b887066c02ec2..4ceced5cefcf1d40d9eef0817c623bd798a49d79 100644 (file)
@@ -29,12 +29,27 @@ static int init_vq(struct virtio_pmem *vpmem)
        return 0;
 };
 
+static int virtio_pmem_validate(struct virtio_device *vdev)
+{
+       struct virtio_shm_region shm_reg;
+
+       if (virtio_has_feature(vdev, VIRTIO_PMEM_F_SHMEM_REGION) &&
+               !virtio_get_shm_region(vdev, &shm_reg, (u8)VIRTIO_PMEM_SHMEM_REGION_ID)
+       ) {
+               dev_notice(&vdev->dev, "failed to get shared memory region %d\n",
+                               VIRTIO_PMEM_SHMEM_REGION_ID);
+               __virtio_clear_bit(vdev, VIRTIO_PMEM_F_SHMEM_REGION);
+       }
+       return 0;
+}
+
 static int virtio_pmem_probe(struct virtio_device *vdev)
 {
        struct nd_region_desc ndr_desc = {};
        struct nd_region *nd_region;
        struct virtio_pmem *vpmem;
        struct resource res;
+       struct virtio_shm_region shm_reg;
        int err = 0;
 
        if (!vdev->config->get) {
@@ -57,10 +72,16 @@ static int virtio_pmem_probe(struct virtio_device *vdev)
                goto out_err;
        }
 
-       virtio_cread_le(vpmem->vdev, struct virtio_pmem_config,
-                       start, &vpmem->start);
-       virtio_cread_le(vpmem->vdev, struct virtio_pmem_config,
-                       size, &vpmem->size);
+       if (virtio_has_feature(vdev, VIRTIO_PMEM_F_SHMEM_REGION)) {
+               virtio_get_shm_region(vdev, &shm_reg, (u8)VIRTIO_PMEM_SHMEM_REGION_ID);
+               vpmem->start = shm_reg.addr;
+               vpmem->size = shm_reg.len;
+       } else {
+               virtio_cread_le(vpmem->vdev, struct virtio_pmem_config,
+                               start, &vpmem->start);
+               virtio_cread_le(vpmem->vdev, struct virtio_pmem_config,
+                               size, &vpmem->size);
+       }
 
        res.start = vpmem->start;
        res.end   = vpmem->start + vpmem->size - 1;
@@ -122,10 +143,17 @@ static void virtio_pmem_remove(struct virtio_device *vdev)
        virtio_reset_device(vdev);
 }
 
+static unsigned int features[] = {
+       VIRTIO_PMEM_F_SHMEM_REGION,
+};
+
 static struct virtio_driver virtio_pmem_driver = {
+       .feature_table          = features,
+       .feature_table_size     = ARRAY_SIZE(features),
        .driver.name            = KBUILD_MODNAME,
        .driver.owner           = THIS_MODULE,
        .id_table               = id_table,
+       .validate               = virtio_pmem_validate,
        .probe                  = virtio_pmem_probe,
        .remove                 = virtio_pmem_remove,
 };
index a23ab5c968b9457bee89f14cc1f158e377ffa084..a3455f1d67fae20268a0e9e02a4c5c34ff054afe 100644 (file)
@@ -471,4 +471,5 @@ int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key)
 }
 EXPORT_SYMBOL_GPL(nvme_auth_generate_key);
 
+MODULE_DESCRIPTION("NVMe Authentication framework");
 MODULE_LICENSE("GPL v2");
index ee341b83eebaf553cbf91a045b048285d590157a..6f7e7a8fa5ae470c463586fb0c638b5dc6f7313e 100644 (file)
@@ -111,7 +111,7 @@ static struct key *nvme_tls_psk_lookup(struct key *keyring,
  * should be preferred to 'generated' PSKs,
  * and SHA-384 should be preferred to SHA-256.
  */
-struct nvme_tls_psk_priority_list {
+static struct nvme_tls_psk_priority_list {
        bool generated;
        enum nvme_tcp_tls_cipher cipher;
 } nvme_tls_psk_prio[] = {
@@ -181,5 +181,6 @@ static void __exit nvme_keyring_exit(void)
 
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Hannes Reinecke <hare@suse.de>");
+MODULE_DESCRIPTION("NVMe Keyring implementation");
 module_init(nvme_keyring_init);
 module_exit(nvme_keyring_exit);
index 596bb11eeba5a9d0a4d1637f2c061775956bb84e..c727cd1f264bf6221d2043d1f65bb70a51f00c1d 100644 (file)
@@ -797,6 +797,7 @@ static int apple_nvme_init_request(struct blk_mq_tag_set *set,
 
 static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown)
 {
+       enum nvme_ctrl_state state = nvme_ctrl_state(&anv->ctrl);
        u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS);
        bool dead = false, freeze = false;
        unsigned long flags;
@@ -808,8 +809,8 @@ static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown)
        if (csts & NVME_CSTS_CFS)
                dead = true;
 
-       if (anv->ctrl.state == NVME_CTRL_LIVE ||
-           anv->ctrl.state == NVME_CTRL_RESETTING) {
+       if (state == NVME_CTRL_LIVE ||
+           state == NVME_CTRL_RESETTING) {
                freeze = true;
                nvme_start_freeze(&anv->ctrl);
        }
@@ -881,7 +882,7 @@ static enum blk_eh_timer_return apple_nvme_timeout(struct request *req)
        unsigned long flags;
        u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS);
 
-       if (anv->ctrl.state != NVME_CTRL_LIVE) {
+       if (nvme_ctrl_state(&anv->ctrl) != NVME_CTRL_LIVE) {
                /*
                 * From rdma.c:
                 * If we are resetting, connecting or deleting we should
@@ -985,10 +986,10 @@ static void apple_nvme_reset_work(struct work_struct *work)
        u32 boot_status, aqa;
        struct apple_nvme *anv =
                container_of(work, struct apple_nvme, ctrl.reset_work);
+       enum nvme_ctrl_state state = nvme_ctrl_state(&anv->ctrl);
 
-       if (anv->ctrl.state != NVME_CTRL_RESETTING) {
-               dev_warn(anv->dev, "ctrl state %d is not RESETTING\n",
-                        anv->ctrl.state);
+       if (state != NVME_CTRL_RESETTING) {
+               dev_warn(anv->dev, "ctrl state %d is not RESETTING\n", state);
                ret = -ENODEV;
                goto out;
        }
index 72c0525c75f503bb56c7c246c733f9eea57e44ab..a264b3ae078b8c4c28382c7d8f8757c7e3eec594 100644 (file)
@@ -48,11 +48,6 @@ struct nvme_dhchap_queue_context {
 
 static struct workqueue_struct *nvme_auth_wq;
 
-#define nvme_auth_flags_from_qid(qid) \
-       (qid == 0) ? 0 : BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED
-#define nvme_auth_queue_from_qid(ctrl, qid) \
-       (qid == 0) ? (ctrl)->fabrics_q : (ctrl)->connect_q
-
 static inline int ctrl_max_dhchaps(struct nvme_ctrl *ctrl)
 {
        return ctrl->opts->nr_io_queues + ctrl->opts->nr_write_queues +
@@ -63,10 +58,15 @@ static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid,
                            void *data, size_t data_len, bool auth_send)
 {
        struct nvme_command cmd = {};
-       blk_mq_req_flags_t flags = nvme_auth_flags_from_qid(qid);
-       struct request_queue *q = nvme_auth_queue_from_qid(ctrl, qid);
+       nvme_submit_flags_t flags = NVME_SUBMIT_RETRY;
+       struct request_queue *q = ctrl->fabrics_q;
        int ret;
 
+       if (qid != 0) {
+               flags |= NVME_SUBMIT_NOWAIT | NVME_SUBMIT_RESERVED;
+               q = ctrl->connect_q;
+       }
+
        cmd.auth_common.opcode = nvme_fabrics_command;
        cmd.auth_common.secp = NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER;
        cmd.auth_common.spsp0 = 0x01;
@@ -80,8 +80,7 @@ static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid,
        }
 
        ret = __nvme_submit_sync_cmd(q, &cmd, NULL, data, data_len,
-                                    qid == 0 ? NVME_QID_ANY : qid,
-                                    0, flags);
+                                    qid == 0 ? NVME_QID_ANY : qid, flags);
        if (ret > 0)
                dev_warn(ctrl->device,
                        "qid %d auth_send failed with status %d\n", qid, ret);
@@ -897,7 +896,7 @@ static void nvme_ctrl_auth_work(struct work_struct *work)
         * If the ctrl is no connected, bail as reconnect will handle
         * authentication.
         */
-       if (ctrl->state != NVME_CTRL_LIVE)
+       if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
                return;
 
        /* Authenticate admin queue first */
index 20f46c230885c10f2a82bc87f7091645e2d02db5..6f2ebb5fcdb05e1e65971643c9aff2c3f2271c19 100644 (file)
@@ -171,15 +171,15 @@ static const char * const nvme_statuses[] = {
        [NVME_SC_HOST_ABORTED_CMD] = "Host Aborted Command",
 };
 
-const unsigned char *nvme_get_error_status_str(u16 status)
+const char *nvme_get_error_status_str(u16 status)
 {
        status &= 0x7ff;
        if (status < ARRAY_SIZE(nvme_statuses) && nvme_statuses[status])
-               return nvme_statuses[status & 0x7ff];
+               return nvme_statuses[status];
        return "Unknown";
 }
 
-const unsigned char *nvme_get_opcode_str(u8 opcode)
+const char *nvme_get_opcode_str(u8 opcode)
 {
        if (opcode < ARRAY_SIZE(nvme_ops) && nvme_ops[opcode])
                return nvme_ops[opcode];
@@ -187,7 +187,7 @@ const unsigned char *nvme_get_opcode_str(u8 opcode)
 }
 EXPORT_SYMBOL_GPL(nvme_get_opcode_str);
 
-const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
+const char *nvme_get_admin_opcode_str(u8 opcode)
 {
        if (opcode < ARRAY_SIZE(nvme_admin_ops) && nvme_admin_ops[opcode])
                return nvme_admin_ops[opcode];
@@ -195,7 +195,7 @@ const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
 }
 EXPORT_SYMBOL_GPL(nvme_get_admin_opcode_str);
 
-const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode) {
+const char *nvme_get_fabrics_opcode_str(u8 opcode) {
        if (opcode < ARRAY_SIZE(nvme_fabrics_ops) && nvme_fabrics_ops[opcode])
                return nvme_fabrics_ops[opcode];
        return "Unknown";
index 0af61238708370d1fe11d59f091a42d8b7bce3ee..60537c9224bf9341de0c01449f47e05032020e43 100644 (file)
@@ -338,6 +338,30 @@ static void nvme_log_error(struct request *req)
                           nr->status & NVME_SC_DNR  ? "DNR "  : "");
 }
 
+static void nvme_log_err_passthru(struct request *req)
+{
+       struct nvme_ns *ns = req->q->queuedata;
+       struct nvme_request *nr = nvme_req(req);
+
+       pr_err_ratelimited("%s: %s(0x%x), %s (sct 0x%x / sc 0x%x) %s%s"
+               "cdw10=0x%x cdw11=0x%x cdw12=0x%x cdw13=0x%x cdw14=0x%x cdw15=0x%x\n",
+               ns ? ns->disk->disk_name : dev_name(nr->ctrl->device),
+               ns ? nvme_get_opcode_str(nr->cmd->common.opcode) :
+                    nvme_get_admin_opcode_str(nr->cmd->common.opcode),
+               nr->cmd->common.opcode,
+               nvme_get_error_status_str(nr->status),
+               nr->status >> 8 & 7,    /* Status Code Type */
+               nr->status & 0xff,      /* Status Code */
+               nr->status & NVME_SC_MORE ? "MORE " : "",
+               nr->status & NVME_SC_DNR  ? "DNR "  : "",
+               nr->cmd->common.cdw10,
+               nr->cmd->common.cdw11,
+               nr->cmd->common.cdw12,
+               nr->cmd->common.cdw13,
+               nr->cmd->common.cdw14,
+               nr->cmd->common.cdw14);
+}
+
 enum nvme_disposition {
        COMPLETE,
        RETRY,
@@ -385,8 +409,12 @@ static inline void nvme_end_req(struct request *req)
 {
        blk_status_t status = nvme_error_status(nvme_req(req)->status);
 
-       if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET)))
-               nvme_log_error(req);
+       if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) {
+               if (blk_rq_is_passthrough(req))
+                       nvme_log_err_passthru(req);
+               else
+                       nvme_log_error(req);
+       }
        nvme_end_req_zoned(req);
        nvme_trace_bio_complete(req);
        if (req->cmd_flags & REQ_NVME_MPATH)
@@ -679,10 +707,21 @@ static inline void nvme_clear_nvme_request(struct request *req)
 /* initialize a passthrough request */
 void nvme_init_request(struct request *req, struct nvme_command *cmd)
 {
-       if (req->q->queuedata)
+       struct nvme_request *nr = nvme_req(req);
+       bool logging_enabled;
+
+       if (req->q->queuedata) {
+               struct nvme_ns *ns = req->q->disk->private_data;
+
+               logging_enabled = ns->head->passthru_err_log_enabled;
                req->timeout = NVME_IO_TIMEOUT;
-       else /* no queuedata implies admin queue */
+       } else { /* no queuedata implies admin queue */
+               logging_enabled = nr->ctrl->passthru_err_log_enabled;
                req->timeout = NVME_ADMIN_TIMEOUT;
+       }
+
+       if (!logging_enabled)
+               req->rq_flags |= RQF_QUIET;
 
        /* passthru commands should let the driver set the SGL flags */
        cmd->common.flags &= ~NVME_CMD_SGL_ALL;
@@ -691,8 +730,7 @@ void nvme_init_request(struct request *req, struct nvme_command *cmd)
        if (req->mq_hctx->type == HCTX_TYPE_POLL)
                req->cmd_flags |= REQ_POLLED;
        nvme_clear_nvme_request(req);
-       req->rq_flags |= RQF_QUIET;
-       memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd));
+       memcpy(nr->cmd, cmd, sizeof(*cmd));
 }
 EXPORT_SYMBOL_GPL(nvme_init_request);
 
@@ -721,7 +759,7 @@ blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
 EXPORT_SYMBOL_GPL(nvme_fail_nonready_command);
 
 bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
-               bool queue_live)
+               bool queue_live, enum nvme_ctrl_state state)
 {
        struct nvme_request *req = nvme_req(rq);
 
@@ -742,7 +780,7 @@ bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
                 * command, which is require to set the queue live in the
                 * appropinquate states.
                 */
-               switch (nvme_ctrl_state(ctrl)) {
+               switch (state) {
                case NVME_CTRL_CONNECTING:
                        if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) &&
                            (req->cmd->fabrics.fctype == nvme_fabrics_type_connect ||
@@ -1051,20 +1089,27 @@ EXPORT_SYMBOL_NS_GPL(nvme_execute_rq, NVME_TARGET_PASSTHRU);
  */
 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
                union nvme_result *result, void *buffer, unsigned bufflen,
-               int qid, int at_head, blk_mq_req_flags_t flags)
+               int qid, nvme_submit_flags_t flags)
 {
        struct request *req;
        int ret;
+       blk_mq_req_flags_t blk_flags = 0;
 
+       if (flags & NVME_SUBMIT_NOWAIT)
+               blk_flags |= BLK_MQ_REQ_NOWAIT;
+       if (flags & NVME_SUBMIT_RESERVED)
+               blk_flags |= BLK_MQ_REQ_RESERVED;
        if (qid == NVME_QID_ANY)
-               req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
+               req = blk_mq_alloc_request(q, nvme_req_op(cmd), blk_flags);
        else
-               req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags,
+               req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), blk_flags,
                                                qid - 1);
 
        if (IS_ERR(req))
                return PTR_ERR(req);
        nvme_init_request(req, cmd);
+       if (flags & NVME_SUBMIT_RETRY)
+               req->cmd_flags &= ~REQ_FAILFAST_DRIVER;
 
        if (buffer && bufflen) {
                ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
@@ -1072,7 +1117,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
                        goto out;
        }
 
-       ret = nvme_execute_rq(req, at_head);
+       ret = nvme_execute_rq(req, flags & NVME_SUBMIT_AT_HEAD);
        if (result && ret >= 0)
                *result = nvme_req(req)->result;
  out:
@@ -1085,7 +1130,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
                void *buffer, unsigned bufflen)
 {
        return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen,
-                       NVME_QID_ANY, 0, 0);
+                       NVME_QID_ANY, 0);
 }
 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
 
@@ -1560,7 +1605,7 @@ static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
        c.features.dword11 = cpu_to_le32(dword11);
 
        ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
-                       buffer, buflen, NVME_QID_ANY, 0, 0);
+                       buffer, buflen, NVME_QID_ANY, 0);
        if (ret >= 0 && result)
                *result = le32_to_cpu(res.u32);
        return ret;
@@ -1740,13 +1785,13 @@ static void nvme_config_discard(struct nvme_ctrl *ctrl, struct gendisk *disk,
                struct nvme_ns_head *head)
 {
        struct request_queue *queue = disk->queue;
-       u32 size = queue_logical_block_size(queue);
+       u32 max_discard_sectors;
 
-       if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(head, UINT_MAX))
-               ctrl->max_discard_sectors =
-                       nvme_lba_to_sect(head, ctrl->dmrsl);
-
-       if (ctrl->max_discard_sectors == 0) {
+       if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(head, UINT_MAX)) {
+               max_discard_sectors = nvme_lba_to_sect(head, ctrl->dmrsl);
+       } else if (ctrl->oncs & NVME_CTRL_ONCS_DSM) {
+               max_discard_sectors = UINT_MAX;
+       } else {
                blk_queue_max_discard_sectors(queue, 0);
                return;
        }
@@ -1754,14 +1799,22 @@ static void nvme_config_discard(struct nvme_ctrl *ctrl, struct gendisk *disk,
        BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
                        NVME_DSM_MAX_RANGES);
 
-       queue->limits.discard_granularity = size;
-
-       /* If discard is already enabled, don't reset queue limits */
+       /*
+        * If discard is already enabled, don't reset queue limits.
+        *
+        * This works around the fact that the block layer can't cope well with
+        * updating the hardware limits when overridden through sysfs.  This is
+        * harmless because discard limits in NVMe are purely advisory.
+        */
        if (queue->limits.max_discard_sectors)
                return;
 
-       blk_queue_max_discard_sectors(queue, ctrl->max_discard_sectors);
-       blk_queue_max_discard_segments(queue, ctrl->max_discard_segments);
+       blk_queue_max_discard_sectors(queue, max_discard_sectors);
+       if (ctrl->dmrl)
+               blk_queue_max_discard_segments(queue, ctrl->dmrl);
+       else
+               blk_queue_max_discard_segments(queue, NVME_DSM_MAX_RANGES);
+       queue->limits.discard_granularity = queue_logical_block_size(queue);
 
        if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
                blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
@@ -2164,7 +2217,7 @@ static int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t l
        cmd.common.cdw11 = cpu_to_le32(len);
 
        return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
-                       NVME_QID_ANY, 1, 0);
+                       NVME_QID_ANY, NVME_SUBMIT_AT_HEAD);
 }
 
 static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended)
@@ -2930,14 +2983,6 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
        struct nvme_id_ctrl_nvm *id;
        int ret;
 
-       if (ctrl->oncs & NVME_CTRL_ONCS_DSM) {
-               ctrl->max_discard_sectors = UINT_MAX;
-               ctrl->max_discard_segments = NVME_DSM_MAX_RANGES;
-       } else {
-               ctrl->max_discard_sectors = 0;
-               ctrl->max_discard_segments = 0;
-       }
-
        /*
         * Even though NVMe spec explicitly states that MDTS is not applicable
         * to the write-zeroes, we are cautious and limit the size to the
@@ -2967,8 +3012,7 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
        if (ret)
                goto free_data;
 
-       if (id->dmrl)
-               ctrl->max_discard_segments = id->dmrl;
+       ctrl->dmrl = id->dmrl;
        ctrl->dmrsl = le32_to_cpu(id->dmrsl);
        if (id->wzsl)
                ctrl->max_zeroes_sectors = nvme_mps_to_sectors(ctrl, id->wzsl);
@@ -3715,6 +3759,13 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
        nvme_mpath_add_disk(ns, info->anagrpid);
        nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name);
 
+       /*
+        * Set ns->disk->device->driver_data to ns so we can access
+        * ns->head->passthru_err_log_enabled in
+        * nvme_io_passthru_err_log_enabled_[store | show]().
+        */
+       dev_set_drvdata(disk_to_dev(ns->disk), ns);
+
        return;
 
  out_cleanup_ns_from_list:
@@ -4139,6 +4190,7 @@ static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
 static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
 {
        struct nvme_fw_slot_info_log *log;
+       u8 next_fw_slot, cur_fw_slot;
 
        log = kmalloc(sizeof(*log), GFP_KERNEL);
        if (!log)
@@ -4150,13 +4202,15 @@ static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
                goto out_free_log;
        }
 
-       if (log->afi & 0x70 || !(log->afi & 0x7)) {
+       cur_fw_slot = log->afi & 0x7;
+       next_fw_slot = (log->afi & 0x70) >> 4;
+       if (!cur_fw_slot || (next_fw_slot && (cur_fw_slot != next_fw_slot))) {
                dev_info(ctrl->device,
                         "Firmware is activated after next Controller Level Reset\n");
                goto out_free_log;
        }
 
-       memcpy(ctrl->subsys->firmware_rev, &log->frs[(log->afi & 0x7) - 1],
+       memcpy(ctrl->subsys->firmware_rev, &log->frs[cur_fw_slot - 1],
                sizeof(ctrl->subsys->firmware_rev));
 
 out_free_log:
@@ -4515,6 +4569,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
        int ret;
 
        WRITE_ONCE(ctrl->state, NVME_CTRL_NEW);
+       ctrl->passthru_err_log_enabled = false;
        clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
        spin_lock_init(&ctrl->lock);
        mutex_init(&ctrl->scan_lock);
@@ -4852,5 +4907,6 @@ static void __exit nvme_core_exit(void)
 
 MODULE_LICENSE("GPL");
 MODULE_VERSION("1.0");
+MODULE_DESCRIPTION("NVMe host core framework");
 module_init(nvme_core_init);
 module_exit(nvme_core_exit);
index b5752a77ad989f04a14ef9416f6244cf5255441d..3499acbf6a822fc1a45d93894f4870a9a5f7857c 100644 (file)
@@ -180,7 +180,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
        cmd.prop_get.offset = cpu_to_le32(off);
 
        ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
-                       NVME_QID_ANY, 0, 0);
+                       NVME_QID_ANY, 0);
 
        if (ret >= 0)
                *val = le64_to_cpu(res.u64);
@@ -226,7 +226,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
        cmd.prop_get.offset = cpu_to_le32(off);
 
        ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
-                       NVME_QID_ANY, 0, 0);
+                       NVME_QID_ANY, 0);
 
        if (ret >= 0)
                *val = le64_to_cpu(res.u64);
@@ -271,7 +271,7 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
        cmd.prop_set.value = cpu_to_le64(val);
 
        ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0,
-                       NVME_QID_ANY, 0, 0);
+                       NVME_QID_ANY, 0);
        if (unlikely(ret))
                dev_err(ctrl->device,
                        "Property Set error: %d, offset %#x\n",
@@ -450,8 +450,10 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
                return -ENOMEM;
 
        ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res,
-                       data, sizeof(*data), NVME_QID_ANY, 1,
-                       BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
+                       data, sizeof(*data), NVME_QID_ANY,
+                       NVME_SUBMIT_AT_HEAD |
+                       NVME_SUBMIT_NOWAIT |
+                       NVME_SUBMIT_RESERVED);
        if (ret) {
                nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
                                       &cmd, data);
@@ -525,8 +527,10 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
                return -ENOMEM;
 
        ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
-                       data, sizeof(*data), qid, 1,
-                       BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
+                       data, sizeof(*data), qid,
+                       NVME_SUBMIT_AT_HEAD |
+                       NVME_SUBMIT_RESERVED |
+                       NVME_SUBMIT_NOWAIT);
        if (ret) {
                nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
                                       &cmd, data);
@@ -1488,6 +1492,7 @@ static void __exit nvmf_exit(void)
 }
 
 MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("NVMe host fabrics library");
 
 module_init(nvmf_init);
 module_exit(nvmf_exit);
index fbaee5a7be196c08483a41b6673f00e5032bec10..06cc54851b1be39615cdfa6eed1a935dec472f82 100644 (file)
@@ -185,9 +185,11 @@ static inline bool
 nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl,
                        struct nvmf_ctrl_options *opts)
 {
-       if (ctrl->state == NVME_CTRL_DELETING ||
-           ctrl->state == NVME_CTRL_DELETING_NOIO ||
-           ctrl->state == NVME_CTRL_DEAD ||
+       enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
+
+       if (state == NVME_CTRL_DELETING ||
+           state == NVME_CTRL_DELETING_NOIO ||
+           state == NVME_CTRL_DEAD ||
            strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) ||
            strcmp(opts->host->nqn, ctrl->opts->host->nqn) ||
            !uuid_equal(&opts->host->id, &ctrl->opts->host->id))
index 16847a316421f393cfbf410f083cd97c657f062e..68a5d971657bb5080f717f5ae1ec5645830aadd5 100644 (file)
@@ -221,11 +221,6 @@ static LIST_HEAD(nvme_fc_lport_list);
 static DEFINE_IDA(nvme_fc_local_port_cnt);
 static DEFINE_IDA(nvme_fc_ctrl_cnt);
 
-static struct workqueue_struct *nvme_fc_wq;
-
-static bool nvme_fc_waiting_to_unload;
-static DECLARE_COMPLETION(nvme_fc_unload_proceed);
-
 /*
  * These items are short-term. They will eventually be moved into
  * a generic FC class. See comments in module init.
@@ -255,8 +250,6 @@ nvme_fc_free_lport(struct kref *ref)
        /* remove from transport list */
        spin_lock_irqsave(&nvme_fc_lock, flags);
        list_del(&lport->port_list);
-       if (nvme_fc_waiting_to_unload && list_empty(&nvme_fc_lport_list))
-               complete(&nvme_fc_unload_proceed);
        spin_unlock_irqrestore(&nvme_fc_lock, flags);
 
        ida_free(&nvme_fc_local_port_cnt, lport->localport.port_num);
@@ -2574,6 +2567,7 @@ static enum blk_eh_timer_return nvme_fc_timeout(struct request *rq)
 {
        struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
        struct nvme_fc_ctrl *ctrl = op->ctrl;
+       u16 qnum = op->queue->qnum;
        struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
        struct nvme_command *sqe = &cmdiu->sqe;
 
@@ -2582,10 +2576,11 @@ static enum blk_eh_timer_return nvme_fc_timeout(struct request *rq)
         * will detect the aborted io and will fail the connection.
         */
        dev_info(ctrl->ctrl.device,
-               "NVME-FC{%d.%d}: io timeout: opcode %d fctype %d w10/11: "
+               "NVME-FC{%d.%d}: io timeout: opcode %d fctype %d (%s) w10/11: "
                "x%08x/x%08x\n",
-               ctrl->cnum, op->queue->qnum, sqe->common.opcode,
-               sqe->connect.fctype, sqe->common.cdw10, sqe->common.cdw11);
+               ctrl->cnum, qnum, sqe->common.opcode, sqe->fabrics.fctype,
+               nvme_fabrics_opcode_str(qnum, sqe),
+               sqe->common.cdw10, sqe->common.cdw11);
        if (__nvme_fc_abort_op(ctrl, op))
                nvme_fc_error_recovery(ctrl, "io timeout abort failed");
 
@@ -3575,8 +3570,8 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
        flush_delayed_work(&ctrl->connect_work);
 
        dev_info(ctrl->ctrl.device,
-               "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
-               ctrl->cnum, nvmf_ctrl_subsysnqn(&ctrl->ctrl));
+               "NVME-FC{%d}: new ctrl: NQN \"%s\", hostnqn: %s\n",
+               ctrl->cnum, nvmf_ctrl_subsysnqn(&ctrl->ctrl), opts->host->nqn);
 
        return &ctrl->ctrl;
 
@@ -3894,10 +3889,6 @@ static int __init nvme_fc_init_module(void)
 {
        int ret;
 
-       nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0);
-       if (!nvme_fc_wq)
-               return -ENOMEM;
-
        /*
         * NOTE:
         * It is expected that in the future the kernel will combine
@@ -3915,7 +3906,7 @@ static int __init nvme_fc_init_module(void)
        ret = class_register(&fc_class);
        if (ret) {
                pr_err("couldn't register class fc\n");
-               goto out_destroy_wq;
+               return ret;
        }
 
        /*
@@ -3939,8 +3930,6 @@ out_destroy_device:
        device_destroy(&fc_class, MKDEV(0, 0));
 out_destroy_class:
        class_unregister(&fc_class);
-out_destroy_wq:
-       destroy_workqueue(nvme_fc_wq);
 
        return ret;
 }
@@ -3960,48 +3949,27 @@ nvme_fc_delete_controllers(struct nvme_fc_rport *rport)
        spin_unlock(&rport->lock);
 }
 
-static void
-nvme_fc_cleanup_for_unload(void)
+static void __exit nvme_fc_exit_module(void)
 {
        struct nvme_fc_lport *lport;
        struct nvme_fc_rport *rport;
-
-       list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
-               list_for_each_entry(rport, &lport->endp_list, endp_list) {
-                       nvme_fc_delete_controllers(rport);
-               }
-       }
-}
-
-static void __exit nvme_fc_exit_module(void)
-{
        unsigned long flags;
-       bool need_cleanup = false;
 
        spin_lock_irqsave(&nvme_fc_lock, flags);
-       nvme_fc_waiting_to_unload = true;
-       if (!list_empty(&nvme_fc_lport_list)) {
-               need_cleanup = true;
-               nvme_fc_cleanup_for_unload();
-       }
+       list_for_each_entry(lport, &nvme_fc_lport_list, port_list)
+               list_for_each_entry(rport, &lport->endp_list, endp_list)
+                       nvme_fc_delete_controllers(rport);
        spin_unlock_irqrestore(&nvme_fc_lock, flags);
-       if (need_cleanup) {
-               pr_info("%s: waiting for ctlr deletes\n", __func__);
-               wait_for_completion(&nvme_fc_unload_proceed);
-               pr_info("%s: ctrl deletes complete\n", __func__);
-       }
+       flush_workqueue(nvme_delete_wq);
 
        nvmf_unregister_transport(&nvme_fc_transport);
 
-       ida_destroy(&nvme_fc_local_port_cnt);
-       ida_destroy(&nvme_fc_ctrl_cnt);
-
        device_destroy(&fc_class, MKDEV(0, 0));
        class_unregister(&fc_class);
-       destroy_workqueue(nvme_fc_wq);
 }
 
 module_init(nvme_fc_init_module);
 module_exit(nvme_fc_exit_module);
 
+MODULE_DESCRIPTION("NVMe host FC transport driver");
 MODULE_LICENSE("GPL v2");
index 18f5c1be5d67e50ecef131bfe5b223e4e5eda5bd..3dfd5ae99ae05e892eb793cb3b21ba0b75dd6e98 100644 (file)
@@ -228,7 +228,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
        length = (io.nblocks + 1) << ns->head->lba_shift;
 
        if ((io.control & NVME_RW_PRINFO_PRACT) &&
-           ns->head->ms == sizeof(struct t10_pi_tuple)) {
+           (ns->head->ms == ns->head->pi_size)) {
                /*
                 * Protection information is stripped/inserted by the
                 * controller.
index 2dd4137a08b284df64788972a067d4282fa92ac7..74de1e64aeead77c604ec31e30bac179a0de245b 100644 (file)
@@ -156,7 +156,7 @@ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
                if (!ns->head->disk)
                        continue;
                kblockd_schedule_work(&ns->head->requeue_work);
-               if (ctrl->state == NVME_CTRL_LIVE)
+               if (nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE)
                        disk_uevent(ns->head->disk, KOBJ_CHANGE);
        }
        up_read(&ctrl->namespaces_rwsem);
@@ -223,13 +223,14 @@ void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
 
 static bool nvme_path_is_disabled(struct nvme_ns *ns)
 {
+       enum nvme_ctrl_state state = nvme_ctrl_state(ns->ctrl);
+
        /*
         * We don't treat NVME_CTRL_DELETING as a disabled path as I/O should
         * still be able to complete assuming that the controller is connected.
         * Otherwise it will fail immediately and return to the requeue list.
         */
-       if (ns->ctrl->state != NVME_CTRL_LIVE &&
-           ns->ctrl->state != NVME_CTRL_DELETING)
+       if (state != NVME_CTRL_LIVE && state != NVME_CTRL_DELETING)
                return true;
        if (test_bit(NVME_NS_ANA_PENDING, &ns->flags) ||
            !test_bit(NVME_NS_READY, &ns->flags))
@@ -331,7 +332,7 @@ out:
 
 static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
 {
-       return ns->ctrl->state == NVME_CTRL_LIVE &&
+       return nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE &&
                ns->ana_state == NVME_ANA_OPTIMIZED;
 }
 
@@ -358,7 +359,7 @@ static bool nvme_available_path(struct nvme_ns_head *head)
        list_for_each_entry_rcu(ns, &head->list, siblings) {
                if (test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ns->ctrl->flags))
                        continue;
-               switch (ns->ctrl->state) {
+               switch (nvme_ctrl_state(ns->ctrl)) {
                case NVME_CTRL_LIVE:
                case NVME_CTRL_RESETTING:
                case NVME_CTRL_CONNECTING:
@@ -667,7 +668,7 @@ static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
         * controller is ready.
         */
        if (nvme_state_is_live(ns->ana_state) &&
-           ns->ctrl->state == NVME_CTRL_LIVE)
+           nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE)
                nvme_mpath_set_live(ns);
 }
 
@@ -748,7 +749,7 @@ static void nvme_ana_work(struct work_struct *work)
 {
        struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work);
 
-       if (ctrl->state != NVME_CTRL_LIVE)
+       if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
                return;
 
        nvme_read_ana_log(ctrl);
index 4be7f6822966db94fdd9964ed92640eedafede91..7b87763e2f8a69f5edef68e2e657ed417e911cb8 100644 (file)
@@ -263,6 +263,7 @@ enum nvme_ctrl_flags {
 struct nvme_ctrl {
        bool comp_seen;
        bool identified;
+       bool passthru_err_log_enabled;
        enum nvme_ctrl_state state;
        spinlock_t lock;
        struct mutex scan_lock;
@@ -303,14 +304,13 @@ struct nvme_ctrl {
        u32 max_hw_sectors;
        u32 max_segments;
        u32 max_integrity_segments;
-       u32 max_discard_sectors;
-       u32 max_discard_segments;
        u32 max_zeroes_sectors;
 #ifdef CONFIG_BLK_DEV_ZONED
        u32 max_zone_append;
 #endif
        u16 crdt[3];
        u16 oncs;
+       u8 dmrl;
        u32 dmrsl;
        u16 oacs;
        u16 sqsize;
@@ -455,6 +455,7 @@ struct nvme_ns_head {
        struct list_head        entry;
        struct kref             ref;
        bool                    shared;
+       bool                    passthru_err_log_enabled;
        int                     instance;
        struct nvme_effects_log *effects;
        u64                     nuse;
@@ -523,7 +524,6 @@ struct nvme_ns {
        struct device           cdev_device;
 
        struct nvme_fault_inject fault_inject;
-
 };
 
 /* NVMe ns supports metadata actions by the controller (generate/strip) */
@@ -806,17 +806,18 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req);
 blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
                struct request *req);
 bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
-               bool queue_live);
+               bool queue_live, enum nvme_ctrl_state state);
 
 static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
                bool queue_live)
 {
-       if (likely(ctrl->state == NVME_CTRL_LIVE))
+       enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
+
+       if (likely(state == NVME_CTRL_LIVE))
                return true;
-       if (ctrl->ops->flags & NVME_F_FABRICS &&
-           ctrl->state == NVME_CTRL_DELETING)
+       if (ctrl->ops->flags & NVME_F_FABRICS && state == NVME_CTRL_DELETING)
                return queue_live;
-       return __nvme_check_ready(ctrl, rq, queue_live);
+       return __nvme_check_ready(ctrl, rq, queue_live, state);
 }
 
 /*
@@ -837,12 +838,27 @@ static inline bool nvme_is_unique_nsid(struct nvme_ctrl *ctrl,
                (ctrl->ctratt & NVME_CTRL_CTRATT_NVM_SETS);
 }
 
+/*
+ * Flags for __nvme_submit_sync_cmd()
+ */
+typedef __u32 __bitwise nvme_submit_flags_t;
+
+enum {
+       /* Insert request at the head of the queue */
+       NVME_SUBMIT_AT_HEAD  = (__force nvme_submit_flags_t)(1 << 0),
+       /* Set BLK_MQ_REQ_NOWAIT when allocating request */
+       NVME_SUBMIT_NOWAIT = (__force nvme_submit_flags_t)(1 << 1),
+       /* Set BLK_MQ_REQ_RESERVED when allocating request */
+       NVME_SUBMIT_RESERVED = (__force nvme_submit_flags_t)(1 << 2),
+       /* Retry command when NVME_SC_DNR is not set in the result */
+       NVME_SUBMIT_RETRY = (__force nvme_submit_flags_t)(1 << 3),
+};
+
 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
                void *buf, unsigned bufflen);
 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
                union nvme_result *result, void *buffer, unsigned bufflen,
-               int qid, int at_head,
-               blk_mq_req_flags_t flags);
+               int qid, nvme_submit_flags_t flags);
 int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
                      unsigned int dword11, void *buffer, size_t buflen,
                      u32 *result);
@@ -932,6 +948,10 @@ extern struct device_attribute dev_attr_ana_grpid;
 extern struct device_attribute dev_attr_ana_state;
 extern struct device_attribute subsys_attr_iopolicy;
 
+static inline bool nvme_disk_is_ns_head(struct gendisk *disk)
+{
+       return disk->fops == &nvme_ns_head_ops;
+}
 #else
 #define multipath false
 static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
@@ -1009,6 +1029,10 @@ static inline void nvme_mpath_start_request(struct request *rq)
 static inline void nvme_mpath_end_request(struct request *rq)
 {
 }
+static inline bool nvme_disk_is_ns_head(struct gendisk *disk)
+{
+       return false;
+}
 #endif /* CONFIG_NVME_MULTIPATH */
 
 int nvme_revalidate_zones(struct nvme_ns *ns);
@@ -1037,7 +1061,10 @@ static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
 
 static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
 {
-       return dev_to_disk(dev)->private_data;
+       struct gendisk *disk = dev_to_disk(dev);
+
+       WARN_ON(nvme_disk_is_ns_head(disk));
+       return disk->private_data;
 }
 
 #ifdef CONFIG_NVME_HWMON
@@ -1114,35 +1141,42 @@ static inline bool nvme_multi_css(struct nvme_ctrl *ctrl)
 }
 
 #ifdef CONFIG_NVME_VERBOSE_ERRORS
-const unsigned char *nvme_get_error_status_str(u16 status);
-const unsigned char *nvme_get_opcode_str(u8 opcode);
-const unsigned char *nvme_get_admin_opcode_str(u8 opcode);
-const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode);
+const char *nvme_get_error_status_str(u16 status);
+const char *nvme_get_opcode_str(u8 opcode);
+const char *nvme_get_admin_opcode_str(u8 opcode);
+const char *nvme_get_fabrics_opcode_str(u8 opcode);
 #else /* CONFIG_NVME_VERBOSE_ERRORS */
-static inline const unsigned char *nvme_get_error_status_str(u16 status)
+static inline const char *nvme_get_error_status_str(u16 status)
 {
        return "I/O Error";
 }
-static inline const unsigned char *nvme_get_opcode_str(u8 opcode)
+static inline const char *nvme_get_opcode_str(u8 opcode)
 {
        return "I/O Cmd";
 }
-static inline const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
+static inline const char *nvme_get_admin_opcode_str(u8 opcode)
 {
        return "Admin Cmd";
 }
 
-static inline const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode)
+static inline const char *nvme_get_fabrics_opcode_str(u8 opcode)
 {
        return "Fabrics Cmd";
 }
 #endif /* CONFIG_NVME_VERBOSE_ERRORS */
 
-static inline const unsigned char *nvme_opcode_str(int qid, u8 opcode, u8 fctype)
+static inline const char *nvme_opcode_str(int qid, u8 opcode)
 {
-       if (opcode == nvme_fabrics_command)
-               return nvme_get_fabrics_opcode_str(fctype);
        return qid ? nvme_get_opcode_str(opcode) :
                nvme_get_admin_opcode_str(opcode);
 }
+
+static inline const char *nvme_fabrics_opcode_str(
+               int qid, const struct nvme_command *cmd)
+{
+       if (nvme_is_fabrics(cmd))
+               return nvme_get_fabrics_opcode_str(cmd->fabrics.fctype);
+
+       return nvme_opcode_str(qid, cmd->common.opcode);
+}
 #endif /* _NVME_H */
index 61af7ff1a9d6ba96f56f67ab6cdb3c5b5bf9be3b..e6267a6aa3801e5d76e7d1dc4a509ba0e9fc0159 100644 (file)
@@ -1284,6 +1284,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
        struct request *abort_req;
        struct nvme_command cmd = { };
        u32 csts = readl(dev->bar + NVME_REG_CSTS);
+       u8 opcode;
 
        /* If PCI error recovery process is happening, we cannot reset or
         * the recovery mechanism will surely fail.
@@ -1310,8 +1311,8 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
 
        if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT) {
                dev_warn(dev->ctrl.device,
-                        "I/O %d QID %d timeout, completion polled\n",
-                        req->tag, nvmeq->qid);
+                        "I/O tag %d (%04x) QID %d timeout, completion polled\n",
+                        req->tag, nvme_cid(req), nvmeq->qid);
                return BLK_EH_DONE;
        }
 
@@ -1327,8 +1328,8 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
                fallthrough;
        case NVME_CTRL_DELETING:
                dev_warn_ratelimited(dev->ctrl.device,
-                        "I/O %d QID %d timeout, disable controller\n",
-                        req->tag, nvmeq->qid);
+                        "I/O tag %d (%04x) QID %d timeout, disable controller\n",
+                        req->tag, nvme_cid(req), nvmeq->qid);
                nvme_req(req)->flags |= NVME_REQ_CANCELLED;
                nvme_dev_disable(dev, true);
                return BLK_EH_DONE;
@@ -1343,10 +1344,12 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
         * command was already aborted once before and still hasn't been
         * returned to the driver, or if this is the admin queue.
         */
+       opcode = nvme_req(req)->cmd->common.opcode;
        if (!nvmeq->qid || iod->aborted) {
                dev_warn(dev->ctrl.device,
-                        "I/O %d QID %d timeout, reset controller\n",
-                        req->tag, nvmeq->qid);
+                        "I/O tag %d (%04x) opcode %#x (%s) QID %d timeout, reset controller\n",
+                        req->tag, nvme_cid(req), opcode,
+                        nvme_opcode_str(nvmeq->qid, opcode), nvmeq->qid);
                nvme_req(req)->flags |= NVME_REQ_CANCELLED;
                goto disable;
        }
@@ -1362,10 +1365,10 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
        cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
 
        dev_warn(nvmeq->dev->ctrl.device,
-               "I/O %d (%s) QID %d timeout, aborting\n",
-                req->tag,
-                nvme_get_opcode_str(nvme_req(req)->cmd->common.opcode),
-                nvmeq->qid);
+                "I/O tag %d (%04x) opcode %#x (%s) QID %d timeout, aborting req_op:%s(%u) size:%u\n",
+                req->tag, nvme_cid(req), opcode, nvme_get_opcode_str(opcode),
+                nvmeq->qid, blk_op_str(req_op(req)), req_op(req),
+                blk_rq_bytes(req));
 
        abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, nvme_req_op(&cmd),
                                         BLK_MQ_REQ_NOWAIT);
@@ -2743,10 +2746,10 @@ static void nvme_reset_work(struct work_struct *work)
         * controller around but remove all namespaces.
         */
        if (dev->online_queues > 1) {
+               nvme_dbbuf_set(dev);
                nvme_unquiesce_io_queues(&dev->ctrl);
                nvme_wait_freeze(&dev->ctrl);
                nvme_pci_update_nr_queues(dev);
-               nvme_dbbuf_set(dev);
                nvme_unfreeze(&dev->ctrl);
        } else {
                dev_warn(dev->ctrl.device, "IO queues lost\n");
@@ -3408,6 +3411,8 @@ static const struct pci_device_id nvme_id_table[] = {
                .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
        { PCI_DEVICE(0x1c5c, 0x174a),   /* SK Hynix P31 SSD */
                .driver_data = NVME_QUIRK_BOGUS_NID, },
+       { PCI_DEVICE(0x1c5c, 0x1D59),   /* SK Hynix BC901 */
+               .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
        { PCI_DEVICE(0x15b7, 0x2001),   /*  Sandisk Skyhawk */
                .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
        { PCI_DEVICE(0x1d97, 0x2263),   /* SPCC */
@@ -3538,5 +3543,6 @@ static void __exit nvme_exit(void)
 MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
 MODULE_LICENSE("GPL");
 MODULE_VERSION("1.0");
+MODULE_DESCRIPTION("NVMe host PCIe transport driver");
 module_init(nvme_init);
 module_exit(nvme_exit);
index 391b1465ebfd5e0067dfb698f8bd199a9fe3d148..fc3eed00f9ff1196189415ef1bccd0a6c1e02551 100644 (file)
@@ -98,7 +98,7 @@ static int nvme_send_pr_command(struct block_device *bdev,
                struct nvme_command *c, void *data, unsigned int data_len)
 {
        if (IS_ENABLED(CONFIG_NVME_MULTIPATH) &&
-           bdev->bd_disk->fops == &nvme_ns_head_ops)
+           nvme_disk_is_ns_head(bdev->bd_disk))
                return nvme_send_ns_head_pr_command(bdev, c, data, data_len);
 
        return nvme_send_ns_pr_command(bdev->bd_disk->private_data, c, data,
index c89503da24d7a8300ae21c22e3a58df8d382a668..20fdd40b1879f5796ab768acade2096eefde9e93 100644 (file)
@@ -1410,6 +1410,8 @@ static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue,
        struct nvme_ns *ns = rq->q->queuedata;
        struct bio *bio = rq->bio;
        struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
+       struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
+       u32 xfer_len;
        int nr;
 
        req->mr = ib_mr_pool_get(queue->qp, &queue->qp->sig_mrs);
@@ -1422,8 +1424,7 @@ static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue,
        if (unlikely(nr))
                goto mr_put;
 
-       nvme_rdma_set_sig_attrs(blk_get_integrity(bio->bi_bdev->bd_disk), c,
-                               req->mr->sig_attrs, ns->head->pi_type);
+       nvme_rdma_set_sig_attrs(bi, c, req->mr->sig_attrs, ns->head->pi_type);
        nvme_rdma_set_prot_checks(c, &req->mr->sig_attrs->check_mask);
 
        ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
@@ -1441,7 +1442,11 @@ static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue,
                     IB_ACCESS_REMOTE_WRITE;
 
        sg->addr = cpu_to_le64(req->mr->iova);
-       put_unaligned_le24(req->mr->length, sg->length);
+       xfer_len = req->mr->length;
+       /* Check if PI is added by the HW */
+       if (!pi_count)
+               xfer_len += (xfer_len >> bi->interval_exp) * ns->head->pi_size;
+       put_unaligned_le24(xfer_len, sg->length);
        put_unaligned_le32(req->mr->rkey, sg->key);
        sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
 
@@ -1946,9 +1951,13 @@ static enum blk_eh_timer_return nvme_rdma_timeout(struct request *rq)
        struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
        struct nvme_rdma_queue *queue = req->queue;
        struct nvme_rdma_ctrl *ctrl = queue->ctrl;
+       struct nvme_command *cmd = req->req.cmd;
+       int qid = nvme_rdma_queue_idx(queue);
 
-       dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n",
-                rq->tag, nvme_rdma_queue_idx(queue));
+       dev_warn(ctrl->ctrl.device,
+                "I/O tag %d (%04x) opcode %#x (%s) QID %d timeout\n",
+                rq->tag, nvme_cid(rq), cmd->common.opcode,
+                nvme_fabrics_opcode_str(qid, cmd), qid);
 
        if (nvme_ctrl_state(&ctrl->ctrl) != NVME_CTRL_LIVE) {
                /*
@@ -2291,8 +2300,8 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
        if (ret)
                goto out_uninit_ctrl;
 
-       dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n",
-               nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr);
+       dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs, hostnqn: %s\n",
+               nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr, opts->host->nqn);
 
        mutex_lock(&nvme_rdma_ctrl_mutex);
        list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list);
@@ -2395,4 +2404,5 @@ static void __exit nvme_rdma_cleanup_module(void)
 module_init(nvme_rdma_init_module);
 module_exit(nvme_rdma_cleanup_module);
 
+MODULE_DESCRIPTION("NVMe host RDMA transport driver");
 MODULE_LICENSE("GPL v2");
index ac24ad102380600cef13428eb0b3e31c0e32fecc..f2832f70e7e0a861070d066bf1ee71c19fbf5ae2 100644 (file)
@@ -35,16 +35,71 @@ static ssize_t nvme_sysfs_rescan(struct device *dev,
 }
 static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
 
+static ssize_t nvme_adm_passthru_err_log_enabled_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+       return sysfs_emit(buf,
+                         ctrl->passthru_err_log_enabled ? "on\n" : "off\n");
+}
+
+static ssize_t nvme_adm_passthru_err_log_enabled_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
+{
+       struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+       bool passthru_err_log_enabled;
+       int err;
+
+       err = kstrtobool(buf, &passthru_err_log_enabled);
+       if (err)
+               return -EINVAL;
+
+       ctrl->passthru_err_log_enabled = passthru_err_log_enabled;
+
+       return count;
+}
+
 static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
 {
        struct gendisk *disk = dev_to_disk(dev);
 
-       if (disk->fops == &nvme_bdev_ops)
-               return nvme_get_ns_from_dev(dev)->head;
-       else
+       if (nvme_disk_is_ns_head(disk))
                return disk->private_data;
+       return nvme_get_ns_from_dev(dev)->head;
 }
 
+static ssize_t nvme_io_passthru_err_log_enabled_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct nvme_ns_head *head = dev_to_ns_head(dev);
+
+       return sysfs_emit(buf, head->passthru_err_log_enabled ? "on\n" : "off\n");
+}
+
+static ssize_t nvme_io_passthru_err_log_enabled_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
+{
+       struct nvme_ns_head *head = dev_to_ns_head(dev);
+       bool passthru_err_log_enabled;
+       int err;
+
+       err = kstrtobool(buf, &passthru_err_log_enabled);
+       if (err)
+               return -EINVAL;
+       head->passthru_err_log_enabled = passthru_err_log_enabled;
+
+       return count;
+}
+
+static struct device_attribute dev_attr_adm_passthru_err_log_enabled = \
+       __ATTR(passthru_err_log_enabled, S_IRUGO | S_IWUSR, \
+       nvme_adm_passthru_err_log_enabled_show, nvme_adm_passthru_err_log_enabled_store);
+
+static struct device_attribute dev_attr_io_passthru_err_log_enabled = \
+       __ATTR(passthru_err_log_enabled, S_IRUGO | S_IWUSR, \
+       nvme_io_passthru_err_log_enabled_show, nvme_io_passthru_err_log_enabled_store);
+
 static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
                char *buf)
 {
@@ -209,6 +264,7 @@ static struct attribute *nvme_ns_attrs[] = {
        &dev_attr_ana_grpid.attr,
        &dev_attr_ana_state.attr,
 #endif
+       &dev_attr_io_passthru_err_log_enabled.attr,
        NULL,
 };
 
@@ -233,7 +289,8 @@ static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
        }
 #ifdef CONFIG_NVME_MULTIPATH
        if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) {
-               if (dev_to_disk(dev)->fops != &nvme_bdev_ops) /* per-path attr */
+               /* per-path attr */
+               if (nvme_disk_is_ns_head(dev_to_disk(dev)))
                        return 0;
                if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl))
                        return 0;
@@ -311,6 +368,7 @@ static ssize_t nvme_sysfs_show_state(struct device *dev,
                                     char *buf)
 {
        struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+       unsigned state = (unsigned)nvme_ctrl_state(ctrl);
        static const char *const state_name[] = {
                [NVME_CTRL_NEW]         = "new",
                [NVME_CTRL_LIVE]        = "live",
@@ -321,9 +379,8 @@ static ssize_t nvme_sysfs_show_state(struct device *dev,
                [NVME_CTRL_DEAD]        = "dead",
        };
 
-       if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) &&
-           state_name[ctrl->state])
-               return sysfs_emit(buf, "%s\n", state_name[ctrl->state]);
+       if (state < ARRAY_SIZE(state_name) && state_name[state])
+               return sysfs_emit(buf, "%s\n", state_name[state]);
 
        return sysfs_emit(buf, "unknown state\n");
 }
@@ -655,6 +712,7 @@ static struct attribute *nvme_dev_attrs[] = {
 #ifdef CONFIG_NVME_TCP_TLS
        &dev_attr_tls_key.attr,
 #endif
+       &dev_attr_adm_passthru_err_log_enabled.attr,
        NULL
 };
 
index 08805f0278106483c10b2b9c787aa35c36e4dcbe..a6d596e05602117ff9c38fbcb86645bda4016c59 100644 (file)
@@ -1922,14 +1922,13 @@ static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
                                                      ctrl->opts->subsysnqn);
                if (!pskid) {
                        dev_err(ctrl->device, "no valid PSK found\n");
-                       ret = -ENOKEY;
-                       goto out_free_queue;
+                       return -ENOKEY;
                }
        }
 
        ret = nvme_tcp_alloc_queue(ctrl, 0, pskid);
        if (ret)
-               goto out_free_queue;
+               return ret;
 
        ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
        if (ret)
@@ -2429,13 +2428,13 @@ static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq)
        struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
        struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
        struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
-       u8 opc = pdu->cmd.common.opcode, fctype = pdu->cmd.fabrics.fctype;
+       struct nvme_command *cmd = &pdu->cmd;
        int qid = nvme_tcp_queue_id(req->queue);
 
        dev_warn(ctrl->device,
-               "queue %d: timeout cid %#x type %d opcode %#x (%s)\n",
-               nvme_tcp_queue_id(req->queue), nvme_cid(rq), pdu->hdr.type,
-               opc, nvme_opcode_str(qid, opc, fctype));
+                "I/O tag %d (%04x) type %d opcode %#x (%s) QID %d timeout\n",
+                rq->tag, nvme_cid(rq), pdu->hdr.type, cmd->common.opcode,
+                nvme_fabrics_opcode_str(qid, cmd), qid);
 
        if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE) {
                /*
@@ -2754,8 +2753,8 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
        if (ret)
                goto out_uninit_ctrl;
 
-       dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
-               nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr);
+       dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp, hostnqn: %s\n",
+               nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr, opts->host->nqn);
 
        mutex_lock(&nvme_tcp_ctrl_mutex);
        list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
@@ -2827,4 +2826,5 @@ static void __exit nvme_tcp_cleanup_module(void)
 module_init(nvme_tcp_init_module);
 module_exit(nvme_tcp_cleanup_module);
 
+MODULE_DESCRIPTION("NVMe host TCP transport driver");
 MODULE_LICENSE("GPL v2");
index d26aa30f87026058fb23a1df97d10c1fe7fafbda..8658e9c08534df50c466314c6c70d18d79525324 100644 (file)
@@ -248,7 +248,7 @@ void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
                nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid));
                if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR))
                        continue;
-               nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
+               nvmet_add_async_event(ctrl, NVME_AER_NOTICE,
                                NVME_AER_NOTICE_NS_CHANGED,
                                NVME_LOG_CHANGED_NS);
        }
@@ -265,7 +265,7 @@ void nvmet_send_ana_event(struct nvmet_subsys *subsys,
                        continue;
                if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_ANA_CHANGE))
                        continue;
-               nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
+               nvmet_add_async_event(ctrl, NVME_AER_NOTICE,
                                NVME_AER_NOTICE_ANA, NVME_LOG_ANA);
        }
        mutex_unlock(&subsys->lock);
@@ -1705,4 +1705,5 @@ static void __exit nvmet_exit(void)
 module_init(nvmet_init);
 module_exit(nvmet_exit);
 
+MODULE_DESCRIPTION("NVMe target core framework");
 MODULE_LICENSE("GPL v2");
index 668d257fa98636dc1785e7b5f6bb6b35e8188ab9..68e82ccc0e4e38ffcb2018cce0080741a5984925 100644 (file)
@@ -21,7 +21,7 @@ static void __nvmet_disc_changed(struct nvmet_port *port,
        if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_DISC_CHANGE))
                return;
 
-       nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
+       nvmet_add_async_event(ctrl, NVME_AER_NOTICE,
                              NVME_AER_NOTICE_DISC_CHANGED, NVME_LOG_DISC);
 }
 
index bd59990b525016fb05945d4f65aa199aab61da43..fd229f310c931fbfd6c3132185f2b73c135cd633 100644 (file)
@@ -111,6 +111,8 @@ struct nvmet_fc_tgtport {
        struct nvmet_fc_port_entry      *pe;
        struct kref                     ref;
        u32                             max_sg_cnt;
+
+       struct work_struct              put_work;
 };
 
 struct nvmet_fc_port_entry {
@@ -145,7 +147,6 @@ struct nvmet_fc_tgt_queue {
        struct list_head                avail_defer_list;
        struct workqueue_struct         *work_q;
        struct kref                     ref;
-       struct rcu_head                 rcu;
        /* array of fcp_iods */
        struct nvmet_fc_fcp_iod         fod[] __counted_by(sqsize);
 } __aligned(sizeof(unsigned long long));
@@ -166,10 +167,9 @@ struct nvmet_fc_tgt_assoc {
        struct nvmet_fc_hostport        *hostport;
        struct nvmet_fc_ls_iod          *rcv_disconn;
        struct list_head                a_list;
-       struct nvmet_fc_tgt_queue __rcu *queues[NVMET_NR_QUEUES + 1];
+       struct nvmet_fc_tgt_queue       *queues[NVMET_NR_QUEUES + 1];
        struct kref                     ref;
        struct work_struct              del_work;
-       struct rcu_head                 rcu;
 };
 
 
@@ -249,6 +249,13 @@ static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
 static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
 static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
 static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
+static void nvmet_fc_put_tgtport_work(struct work_struct *work)
+{
+       struct nvmet_fc_tgtport *tgtport =
+               container_of(work, struct nvmet_fc_tgtport, put_work);
+
+       nvmet_fc_tgtport_put(tgtport);
+}
 static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
 static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
                                        struct nvmet_fc_fcp_iod *fod);
@@ -360,7 +367,7 @@ __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
 
        if (!lsop->req_queued) {
                spin_unlock_irqrestore(&tgtport->lock, flags);
-               return;
+               goto out_putwork;
        }
 
        list_del(&lsop->lsreq_list);
@@ -373,7 +380,8 @@ __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
                                  (lsreq->rqstlen + lsreq->rsplen),
                                  DMA_BIDIRECTIONAL);
 
-       nvmet_fc_tgtport_put(tgtport);
+out_putwork:
+       queue_work(nvmet_wq, &tgtport->put_work);
 }
 
 static int
@@ -489,8 +497,7 @@ nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc)
         * message is normal. Otherwise, send unless the hostport has
         * already been invalidated by the lldd.
         */
-       if (!tgtport->ops->ls_req || !assoc->hostport ||
-           assoc->hostport->invalid)
+       if (!tgtport->ops->ls_req || assoc->hostport->invalid)
                return;
 
        lsop = kzalloc((sizeof(*lsop) +
@@ -802,14 +809,11 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
        if (!queue)
                return NULL;
 
-       if (!nvmet_fc_tgt_a_get(assoc))
-               goto out_free_queue;
-
        queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
                                assoc->tgtport->fc_target_port.port_num,
                                assoc->a_id, qid);
        if (!queue->work_q)
-               goto out_a_put;
+               goto out_free_queue;
 
        queue->qid = qid;
        queue->sqsize = sqsize;
@@ -831,15 +835,13 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
                goto out_fail_iodlist;
 
        WARN_ON(assoc->queues[qid]);
-       rcu_assign_pointer(assoc->queues[qid], queue);
+       assoc->queues[qid] = queue;
 
        return queue;
 
 out_fail_iodlist:
        nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
        destroy_workqueue(queue->work_q);
-out_a_put:
-       nvmet_fc_tgt_a_put(assoc);
 out_free_queue:
        kfree(queue);
        return NULL;
@@ -852,15 +854,11 @@ nvmet_fc_tgt_queue_free(struct kref *ref)
        struct nvmet_fc_tgt_queue *queue =
                container_of(ref, struct nvmet_fc_tgt_queue, ref);
 
-       rcu_assign_pointer(queue->assoc->queues[queue->qid], NULL);
-
        nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
 
-       nvmet_fc_tgt_a_put(queue->assoc);
-
        destroy_workqueue(queue->work_q);
 
-       kfree_rcu(queue, rcu);
+       kfree(queue);
 }
 
 static void
@@ -969,7 +967,7 @@ nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
        rcu_read_lock();
        list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
                if (association_id == assoc->association_id) {
-                       queue = rcu_dereference(assoc->queues[qid]);
+                       queue = assoc->queues[qid];
                        if (queue &&
                            (!atomic_read(&queue->connected) ||
                             !nvmet_fc_tgt_q_get(queue)))
@@ -1031,7 +1029,7 @@ nvmet_fc_match_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
        list_for_each_entry(host, &tgtport->host_list, host_list) {
                if (host->hosthandle == hosthandle && !host->invalid) {
                        if (nvmet_fc_hostport_get(host))
-                               return (host);
+                               return host;
                }
        }
 
@@ -1078,8 +1076,6 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
                /* new allocation not needed */
                kfree(newhost);
                newhost = match;
-               /* no new allocation - release reference */
-               nvmet_fc_tgtport_put(tgtport);
        } else {
                newhost->tgtport = tgtport;
                newhost->hosthandle = hosthandle;
@@ -1094,23 +1090,54 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
 }
 
 static void
-nvmet_fc_delete_assoc(struct work_struct *work)
+nvmet_fc_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
+{
+       nvmet_fc_delete_target_assoc(assoc);
+       nvmet_fc_tgt_a_put(assoc);
+}
+
+static void
+nvmet_fc_delete_assoc_work(struct work_struct *work)
 {
        struct nvmet_fc_tgt_assoc *assoc =
                container_of(work, struct nvmet_fc_tgt_assoc, del_work);
+       struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
 
-       nvmet_fc_delete_target_assoc(assoc);
-       nvmet_fc_tgt_a_put(assoc);
+       nvmet_fc_delete_assoc(assoc);
+       nvmet_fc_tgtport_put(tgtport);
+}
+
+static void
+nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
+{
+       nvmet_fc_tgtport_get(assoc->tgtport);
+       queue_work(nvmet_wq, &assoc->del_work);
+}
+
+static bool
+nvmet_fc_assoc_exits(struct nvmet_fc_tgtport *tgtport, u64 association_id)
+{
+       struct nvmet_fc_tgt_assoc *a;
+
+       list_for_each_entry_rcu(a, &tgtport->assoc_list, a_list) {
+               if (association_id == a->association_id)
+                       return true;
+       }
+
+       return false;
 }
 
 static struct nvmet_fc_tgt_assoc *
 nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
 {
-       struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
+       struct nvmet_fc_tgt_assoc *assoc;
        unsigned long flags;
+       bool done;
        u64 ran;
        int idx;
-       bool needrandom = true;
+
+       if (!tgtport->pe)
+               return NULL;
 
        assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
        if (!assoc)
@@ -1120,43 +1147,35 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
        if (idx < 0)
                goto out_free_assoc;
 
-       if (!nvmet_fc_tgtport_get(tgtport))
-               goto out_ida;
-
        assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle);
        if (IS_ERR(assoc->hostport))
-               goto out_put;
+               goto out_ida;
 
        assoc->tgtport = tgtport;
        assoc->a_id = idx;
        INIT_LIST_HEAD(&assoc->a_list);
        kref_init(&assoc->ref);
-       INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
+       INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc_work);
        atomic_set(&assoc->terminating, 0);
 
-       while (needrandom) {
+       done = false;
+       do {
                get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
                ran = ran << BYTES_FOR_QID_SHIFT;
 
                spin_lock_irqsave(&tgtport->lock, flags);
-               needrandom = false;
-               list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) {
-                       if (ran == tmpassoc->association_id) {
-                               needrandom = true;
-                               break;
-                       }
-               }
-               if (!needrandom) {
+               rcu_read_lock();
+               if (!nvmet_fc_assoc_exits(tgtport, ran)) {
                        assoc->association_id = ran;
                        list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list);
+                       done = true;
                }
+               rcu_read_unlock();
                spin_unlock_irqrestore(&tgtport->lock, flags);
-       }
+       } while (!done);
 
        return assoc;
 
-out_put:
-       nvmet_fc_tgtport_put(tgtport);
 out_ida:
        ida_free(&tgtport->assoc_cnt, idx);
 out_free_assoc:
@@ -1172,13 +1191,18 @@ nvmet_fc_target_assoc_free(struct kref *ref)
        struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
        struct nvmet_fc_ls_iod  *oldls;
        unsigned long flags;
+       int i;
+
+       for (i = NVMET_NR_QUEUES; i >= 0; i--) {
+               if (assoc->queues[i])
+                       nvmet_fc_delete_target_queue(assoc->queues[i]);
+       }
 
        /* Send Disconnect now that all i/o has completed */
        nvmet_fc_xmt_disconnect_assoc(assoc);
 
        nvmet_fc_free_hostport(assoc->hostport);
        spin_lock_irqsave(&tgtport->lock, flags);
-       list_del_rcu(&assoc->a_list);
        oldls = assoc->rcv_disconn;
        spin_unlock_irqrestore(&tgtport->lock, flags);
        /* if pending Rcv Disconnect Association LS, send rsp now */
@@ -1188,8 +1212,7 @@ nvmet_fc_target_assoc_free(struct kref *ref)
        dev_info(tgtport->dev,
                "{%d:%d} Association freed\n",
                tgtport->fc_target_port.port_num, assoc->a_id);
-       kfree_rcu(assoc, rcu);
-       nvmet_fc_tgtport_put(tgtport);
+       kfree(assoc);
 }
 
 static void
@@ -1208,7 +1231,7 @@ static void
 nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
 {
        struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
-       struct nvmet_fc_tgt_queue *queue;
+       unsigned long flags;
        int i, terminating;
 
        terminating = atomic_xchg(&assoc->terminating, 1);
@@ -1217,29 +1240,21 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
        if (terminating)
                return;
 
+       spin_lock_irqsave(&tgtport->lock, flags);
+       list_del_rcu(&assoc->a_list);
+       spin_unlock_irqrestore(&tgtport->lock, flags);
 
-       for (i = NVMET_NR_QUEUES; i >= 0; i--) {
-               rcu_read_lock();
-               queue = rcu_dereference(assoc->queues[i]);
-               if (!queue) {
-                       rcu_read_unlock();
-                       continue;
-               }
+       synchronize_rcu();
 
-               if (!nvmet_fc_tgt_q_get(queue)) {
-                       rcu_read_unlock();
-                       continue;
-               }
-               rcu_read_unlock();
-               nvmet_fc_delete_target_queue(queue);
-               nvmet_fc_tgt_q_put(queue);
+       /* ensure all in-flight I/Os have been processed */
+       for (i = NVMET_NR_QUEUES; i >= 0; i--) {
+               if (assoc->queues[i])
+                       flush_workqueue(assoc->queues[i]->work_q);
        }
 
        dev_info(tgtport->dev,
                "{%d:%d} Association deleted\n",
                tgtport->fc_target_port.port_num, assoc->a_id);
-
-       nvmet_fc_tgt_a_put(assoc);
 }
 
 static struct nvmet_fc_tgt_assoc *
@@ -1415,6 +1430,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
        kref_init(&newrec->ref);
        ida_init(&newrec->assoc_cnt);
        newrec->max_sg_cnt = template->max_sgl_segments;
+       INIT_WORK(&newrec->put_work, nvmet_fc_put_tgtport_work);
 
        ret = nvmet_fc_alloc_ls_iodlist(newrec);
        if (ret) {
@@ -1492,9 +1508,8 @@ __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
        list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
                if (!nvmet_fc_tgt_a_get(assoc))
                        continue;
-               if (!queue_work(nvmet_wq, &assoc->del_work))
-                       /* already deleting - release local reference */
-                       nvmet_fc_tgt_a_put(assoc);
+               nvmet_fc_schedule_delete_assoc(assoc);
+               nvmet_fc_tgt_a_put(assoc);
        }
        rcu_read_unlock();
 }
@@ -1540,16 +1555,14 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
        spin_lock_irqsave(&tgtport->lock, flags);
        list_for_each_entry_safe(assoc, next,
                                &tgtport->assoc_list, a_list) {
-               if (!assoc->hostport ||
-                   assoc->hostport->hosthandle != hosthandle)
+               if (assoc->hostport->hosthandle != hosthandle)
                        continue;
                if (!nvmet_fc_tgt_a_get(assoc))
                        continue;
                assoc->hostport->invalid = 1;
                noassoc = false;
-               if (!queue_work(nvmet_wq, &assoc->del_work))
-                       /* already deleting - release local reference */
-                       nvmet_fc_tgt_a_put(assoc);
+               nvmet_fc_schedule_delete_assoc(assoc);
+               nvmet_fc_tgt_a_put(assoc);
        }
        spin_unlock_irqrestore(&tgtport->lock, flags);
 
@@ -1581,7 +1594,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
 
                rcu_read_lock();
                list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
-                       queue = rcu_dereference(assoc->queues[0]);
+                       queue = assoc->queues[0];
                        if (queue && queue->nvme_sq.ctrl == ctrl) {
                                if (nvmet_fc_tgt_a_get(assoc))
                                        found_ctrl = true;
@@ -1593,9 +1606,8 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
                nvmet_fc_tgtport_put(tgtport);
 
                if (found_ctrl) {
-                       if (!queue_work(nvmet_wq, &assoc->del_work))
-                               /* already deleting - release local reference */
-                               nvmet_fc_tgt_a_put(assoc);
+                       nvmet_fc_schedule_delete_assoc(assoc);
+                       nvmet_fc_tgt_a_put(assoc);
                        return;
                }
 
@@ -1625,6 +1637,8 @@ nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
        /* terminate any outstanding associations */
        __nvmet_fc_free_assocs(tgtport);
 
+       flush_workqueue(nvmet_wq);
+
        /*
         * should terminate LS's as well. However, LS's will be generated
         * at the tail end of association termination, so they likely don't
@@ -1870,9 +1884,6 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
                                sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
                        FCNVME_LS_DISCONNECT_ASSOC);
 
-       /* release get taken in nvmet_fc_find_target_assoc */
-       nvmet_fc_tgt_a_put(assoc);
-
        /*
         * The rules for LS response says the response cannot
         * go back until ABTS's have been sent for all outstanding
@@ -1887,8 +1898,6 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
        assoc->rcv_disconn = iod;
        spin_unlock_irqrestore(&tgtport->lock, flags);
 
-       nvmet_fc_delete_target_assoc(assoc);
-
        if (oldls) {
                dev_info(tgtport->dev,
                        "{%d:%d} Multiple Disconnect Association LS's "
@@ -1904,6 +1913,9 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
                nvmet_fc_xmt_ls_rsp(tgtport, oldls);
        }
 
+       nvmet_fc_schedule_delete_assoc(assoc);
+       nvmet_fc_tgt_a_put(assoc);
+
        return false;
 }
 
@@ -2540,8 +2552,9 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
 
        fod->req.cmd = &fod->cmdiubuf.sqe;
        fod->req.cqe = &fod->rspiubuf.cqe;
-       if (tgtport->pe)
-               fod->req.port = tgtport->pe->port;
+       if (!tgtport->pe)
+               goto transport_error;
+       fod->req.port = tgtport->pe->port;
 
        /* clear any response payload */
        memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
@@ -2902,6 +2915,9 @@ nvmet_fc_remove_port(struct nvmet_port *port)
 
        nvmet_fc_portentry_unbind(pe);
 
+       /* terminate any outstanding associations */
+       __nvmet_fc_free_assocs(pe->tgtport);
+
        kfree(pe);
 }
 
@@ -2933,6 +2949,9 @@ static int __init nvmet_fc_init_module(void)
 
 static void __exit nvmet_fc_exit_module(void)
 {
+       /* ensure any shutdown operation, e.g. delete ctrls have finished */
+       flush_workqueue(nvmet_wq);
+
        /* sanity check - all lports should be removed */
        if (!list_empty(&nvmet_fc_target_list))
                pr_warn("%s: targetport list not empty\n", __func__);
@@ -2945,4 +2964,5 @@ static void __exit nvmet_fc_exit_module(void)
 module_init(nvmet_fc_init_module);
 module_exit(nvmet_fc_exit_module);
 
+MODULE_DESCRIPTION("NVMe target FC transport driver");
 MODULE_LICENSE("GPL v2");
index c65a73433c05f643654616175d5fc9229af753e7..1471af250ea62267a812bdb402b05ff9099cfcb6 100644 (file)
@@ -358,7 +358,7 @@ fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
        if (!rport->targetport) {
                tls_req->status = -ECONNREFUSED;
                spin_lock(&rport->lock);
-               list_add_tail(&rport->ls_list, &tls_req->ls_list);
+               list_add_tail(&tls_req->ls_list, &rport->ls_list);
                spin_unlock(&rport->lock);
                queue_work(nvmet_wq, &rport->ls_work);
                return ret;
@@ -391,7 +391,7 @@ fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
        if (remoteport) {
                rport = remoteport->private;
                spin_lock(&rport->lock);
-               list_add_tail(&rport->ls_list, &tls_req->ls_list);
+               list_add_tail(&tls_req->ls_list, &rport->ls_list);
                spin_unlock(&rport->lock);
                queue_work(nvmet_wq, &rport->ls_work);
        }
@@ -446,7 +446,7 @@ fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
        if (!tport->remoteport) {
                tls_req->status = -ECONNREFUSED;
                spin_lock(&tport->lock);
-               list_add_tail(&tport->ls_list, &tls_req->ls_list);
+               list_add_tail(&tls_req->ls_list, &tport->ls_list);
                spin_unlock(&tport->lock);
                queue_work(nvmet_wq, &tport->ls_work);
                return ret;
@@ -995,11 +995,6 @@ fcloop_nport_free(struct kref *ref)
 {
        struct fcloop_nport *nport =
                container_of(ref, struct fcloop_nport, ref);
-       unsigned long flags;
-
-       spin_lock_irqsave(&fcloop_lock, flags);
-       list_del(&nport->nport_list);
-       spin_unlock_irqrestore(&fcloop_lock, flags);
 
        kfree(nport);
 }
@@ -1357,6 +1352,8 @@ __unlink_remote_port(struct fcloop_nport *nport)
                nport->tport->remoteport = NULL;
        nport->rport = NULL;
 
+       list_del(&nport->nport_list);
+
        return rport;
 }
 
@@ -1653,4 +1650,5 @@ static void __exit fcloop_exit(void)
 module_init(fcloop_init);
 module_exit(fcloop_exit);
 
+MODULE_DESCRIPTION("NVMe target FC loop transport driver");
 MODULE_LICENSE("GPL v2");
index 9cb434c5807514813afe91eada69c0a925daf83a..e589915ddef85cf5f67fcba50deed724b37616d3 100644 (file)
@@ -400,7 +400,7 @@ static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
        }
 
        nvme_quiesce_admin_queue(&ctrl->ctrl);
-       if (ctrl->ctrl.state == NVME_CTRL_LIVE)
+       if (nvme_ctrl_state(&ctrl->ctrl) == NVME_CTRL_LIVE)
                nvme_disable_ctrl(&ctrl->ctrl, true);
 
        nvme_cancel_admin_tagset(&ctrl->ctrl);
@@ -434,8 +434,10 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
        nvme_loop_shutdown_ctrl(ctrl);
 
        if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
-               if (ctrl->ctrl.state != NVME_CTRL_DELETING &&
-                   ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO)
+               enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
+
+               if (state != NVME_CTRL_DELETING &&
+                   state != NVME_CTRL_DELETING_NOIO)
                        /* state change failure for non-deleted ctrl? */
                        WARN_ON_ONCE(1);
                return;
@@ -688,5 +690,6 @@ static void __exit nvme_loop_cleanup_module(void)
 module_init(nvme_loop_init_module);
 module_exit(nvme_loop_cleanup_module);
 
+MODULE_DESCRIPTION("NVMe target loop transport driver");
 MODULE_LICENSE("GPL v2");
 MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */
index 4597bca43a6d87269f557dfa3b35d47da8031ff1..3a0f2c170f4c16f6c1fa6f09c2cac948403550fb 100644 (file)
@@ -37,6 +37,8 @@
 #define NVMET_RDMA_MAX_MDTS                    8
 #define NVMET_RDMA_MAX_METADATA_MDTS           5
 
+#define NVMET_RDMA_BACKLOG 128
+
 struct nvmet_rdma_srq;
 
 struct nvmet_rdma_cmd {
@@ -1583,8 +1585,19 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
        }
 
        if (queue->host_qid == 0) {
-               /* Let inflight controller teardown complete */
-               flush_workqueue(nvmet_wq);
+               struct nvmet_rdma_queue *q;
+               int pending = 0;
+
+               /* Check for pending controller teardown */
+               mutex_lock(&nvmet_rdma_queue_mutex);
+               list_for_each_entry(q, &nvmet_rdma_queue_list, queue_list) {
+                       if (q->nvme_sq.ctrl == queue->nvme_sq.ctrl &&
+                           q->state == NVMET_RDMA_Q_DISCONNECTING)
+                               pending++;
+               }
+               mutex_unlock(&nvmet_rdma_queue_mutex);
+               if (pending > NVMET_RDMA_BACKLOG)
+                       return NVME_SC_CONNECT_CTRL_BUSY;
        }
 
        ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
@@ -1880,7 +1893,7 @@ static int nvmet_rdma_enable_port(struct nvmet_rdma_port *port)
                goto out_destroy_id;
        }
 
-       ret = rdma_listen(cm_id, 128);
+       ret = rdma_listen(cm_id, NVMET_RDMA_BACKLOG);
        if (ret) {
                pr_err("listening to %pISpcs failed (%d)\n", addr, ret);
                goto out_destroy_id;
@@ -2091,5 +2104,6 @@ static void __exit nvmet_rdma_exit(void)
 module_init(nvmet_rdma_init);
 module_exit(nvmet_rdma_exit);
 
+MODULE_DESCRIPTION("NVMe target RDMA transport driver");
 MODULE_LICENSE("GPL v2");
 MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */
index 4cc27856aa8fefc53d2a77044ea3a3ef927c8ba5..c8655fc5aa5b8aac838cb4c2e4c0a76c8ebbc174 100644 (file)
@@ -24,6 +24,8 @@
 #include "nvmet.h"
 
 #define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE)
+#define NVMET_TCP_MAXH2CDATA           0x400000 /* 16M arbitrary limit */
+#define NVMET_TCP_BACKLOG 128
 
 static int param_store_val(const char *str, int *val, int min, int max)
 {
@@ -923,7 +925,7 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
        icresp->hdr.pdo = 0;
        icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen);
        icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
-       icresp->maxdata = cpu_to_le32(0x400000); /* 16M arbitrary limit */
+       icresp->maxdata = cpu_to_le32(NVMET_TCP_MAXH2CDATA);
        icresp->cpda = 0;
        if (queue->hdr_digest)
                icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
@@ -978,13 +980,13 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
 {
        struct nvme_tcp_data_pdu *data = &queue->pdu.data;
        struct nvmet_tcp_cmd *cmd;
+       unsigned int exp_data_len;
 
        if (likely(queue->nr_cmds)) {
                if (unlikely(data->ttag >= queue->nr_cmds)) {
                        pr_err("queue %d: received out of bound ttag %u, nr_cmds %u\n",
                                queue->idx, data->ttag, queue->nr_cmds);
-                       nvmet_tcp_fatal_error(queue);
-                       return -EPROTO;
+                       goto err_proto;
                }
                cmd = &queue->cmds[data->ttag];
        } else {
@@ -995,19 +997,32 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
                pr_err("ttag %u unexpected data offset %u (expected %u)\n",
                        data->ttag, le32_to_cpu(data->data_offset),
                        cmd->rbytes_done);
-               /* FIXME: use path and transport errors */
-               nvmet_req_complete(&cmd->req,
-                       NVME_SC_INVALID_FIELD | NVME_SC_DNR);
-               return -EPROTO;
+               goto err_proto;
        }
 
+       exp_data_len = le32_to_cpu(data->hdr.plen) -
+                       nvmet_tcp_hdgst_len(queue) -
+                       nvmet_tcp_ddgst_len(queue) -
+                       sizeof(*data);
+
        cmd->pdu_len = le32_to_cpu(data->data_length);
+       if (unlikely(cmd->pdu_len != exp_data_len ||
+                    cmd->pdu_len == 0 ||
+                    cmd->pdu_len > NVMET_TCP_MAXH2CDATA)) {
+               pr_err("H2CData PDU len %u is invalid\n", cmd->pdu_len);
+               goto err_proto;
+       }
        cmd->pdu_recv = 0;
        nvmet_tcp_build_pdu_iovec(cmd);
        queue->cmd = cmd;
        queue->rcv_state = NVMET_TCP_RECV_DATA;
 
        return 0;
+
+err_proto:
+       /* FIXME: use proper transport errors */
+       nvmet_tcp_fatal_error(queue);
+       return -EPROTO;
 }
 
 static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
@@ -1768,7 +1783,7 @@ static int nvmet_tcp_try_peek_pdu(struct nvmet_tcp_queue *queue)
                 (int)sizeof(struct nvme_tcp_icreq_pdu));
        if (hdr->type == nvme_tcp_icreq &&
            hdr->hlen == sizeof(struct nvme_tcp_icreq_pdu) &&
-           hdr->plen == (__le32)sizeof(struct nvme_tcp_icreq_pdu)) {
+           hdr->plen == cpu_to_le32(sizeof(struct nvme_tcp_icreq_pdu))) {
                pr_debug("queue %d: icreq detected\n",
                         queue->idx);
                return len;
@@ -2053,7 +2068,7 @@ static int nvmet_tcp_add_port(struct nvmet_port *nport)
                goto err_sock;
        }
 
-       ret = kernel_listen(port->sock, 128);
+       ret = kernel_listen(port->sock, NVMET_TCP_BACKLOG);
        if (ret) {
                pr_err("failed to listen %d on port sock\n", ret);
                goto err_sock;
@@ -2119,8 +2134,19 @@ static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
                container_of(sq, struct nvmet_tcp_queue, nvme_sq);
 
        if (sq->qid == 0) {
-               /* Let inflight controller teardown complete */
-               flush_workqueue(nvmet_wq);
+               struct nvmet_tcp_queue *q;
+               int pending = 0;
+
+               /* Check for pending controller teardown */
+               mutex_lock(&nvmet_tcp_queue_mutex);
+               list_for_each_entry(q, &nvmet_tcp_queue_list, queue_list) {
+                       if (q->nvme_sq.ctrl == sq->ctrl &&
+                           q->state == NVMET_TCP_Q_DISCONNECTING)
+                               pending++;
+               }
+               mutex_unlock(&nvmet_tcp_queue_mutex);
+               if (pending > NVMET_TCP_BACKLOG)
+                       return NVME_SC_CONNECT_CTRL_BUSY;
        }
 
        queue->nr_cmds = sq->size * 2;
@@ -2190,10 +2216,12 @@ static void __exit nvmet_tcp_exit(void)
        flush_workqueue(nvmet_wq);
 
        destroy_workqueue(nvmet_tcp_wq);
+       ida_destroy(&nvmet_tcp_queue_ida);
 }
 
 module_init(nvmet_tcp_init);
 module_exit(nvmet_tcp_exit);
 
+MODULE_DESCRIPTION("NVMe target TCP transport driver");
 MODULE_LICENSE("GPL v2");
 MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */
index bff454d46255b42162667b12a193dc8b7205469a..6ee1f3db81d04071e761b39640e573c9770aa32f 100644 (file)
@@ -211,7 +211,7 @@ const char *nvmet_trace_disk_name(struct trace_seq *p, char *name)
        return ret;
 }
 
-const char *nvmet_trace_ctrl_name(struct trace_seq *p, struct nvmet_ctrl *ctrl)
+const char *nvmet_trace_ctrl_id(struct trace_seq *p, u16 ctrl_id)
 {
        const char *ret = trace_seq_buffer_ptr(p);
 
@@ -224,8 +224,8 @@ const char *nvmet_trace_ctrl_name(struct trace_seq *p, struct nvmet_ctrl *ctrl)
         * If we can know the extra data of the connect command in this stage,
         * we can update this print statement later.
         */
-       if (ctrl)
-               trace_seq_printf(p, "%d", ctrl->cntlid);
+       if (ctrl_id)
+               trace_seq_printf(p, "%d", ctrl_id);
        else
                trace_seq_printf(p, "_");
        trace_seq_putc(p, 0);
index 6109b3806b12be7dae3d429c083d1fa49ba92c05..7f7ebf9558e505fe83b5ea1d98f52dd9cd3d2dca 100644 (file)
@@ -32,18 +32,24 @@ const char *nvmet_trace_parse_fabrics_cmd(struct trace_seq *p, u8 fctype,
         nvmet_trace_parse_nvm_cmd(p, opcode, cdw10) :                  \
         nvmet_trace_parse_admin_cmd(p, opcode, cdw10)))
 
-const char *nvmet_trace_ctrl_name(struct trace_seq *p, struct nvmet_ctrl *ctrl);
-#define __print_ctrl_name(ctrl)                                \
-       nvmet_trace_ctrl_name(p, ctrl)
+const char *nvmet_trace_ctrl_id(struct trace_seq *p, u16 ctrl_id);
+#define __print_ctrl_id(ctrl_id)                       \
+       nvmet_trace_ctrl_id(p, ctrl_id)
 
 const char *nvmet_trace_disk_name(struct trace_seq *p, char *name);
 #define __print_disk_name(name)                                \
        nvmet_trace_disk_name(p, name)
 
 #ifndef TRACE_HEADER_MULTI_READ
-static inline struct nvmet_ctrl *nvmet_req_to_ctrl(struct nvmet_req *req)
+static inline u16 nvmet_req_to_ctrl_id(struct nvmet_req *req)
 {
-       return req->sq->ctrl;
+       /*
+        * The queue and controller pointers are not valid until an association
+        * has been established.
+        */
+       if (!req->sq || !req->sq->ctrl)
+               return 0;
+       return req->sq->ctrl->cntlid;
 }
 
 static inline void __assign_req_name(char *name, struct nvmet_req *req)
@@ -53,8 +59,7 @@ static inline void __assign_req_name(char *name, struct nvmet_req *req)
                return;
        }
 
-       strncpy(name, req->ns->device_path,
-               min_t(size_t, DISK_NAME_LEN, strlen(req->ns->device_path)));
+       strscpy_pad(name, req->ns->device_path, DISK_NAME_LEN);
 }
 #endif
 
@@ -63,7 +68,7 @@ TRACE_EVENT(nvmet_req_init,
        TP_ARGS(req, cmd),
        TP_STRUCT__entry(
                __field(struct nvme_command *, cmd)
-               __field(struct nvmet_ctrl *, ctrl)
+               __field(u16, ctrl_id)
                __array(char, disk, DISK_NAME_LEN)
                __field(int, qid)
                __field(u16, cid)
@@ -76,7 +81,7 @@ TRACE_EVENT(nvmet_req_init,
        ),
        TP_fast_assign(
                __entry->cmd = cmd;
-               __entry->ctrl = nvmet_req_to_ctrl(req);
+               __entry->ctrl_id = nvmet_req_to_ctrl_id(req);
                __assign_req_name(__entry->disk, req);
                __entry->qid = req->sq->qid;
                __entry->cid = cmd->common.command_id;
@@ -85,12 +90,12 @@ TRACE_EVENT(nvmet_req_init,
                __entry->flags = cmd->common.flags;
                __entry->nsid = le32_to_cpu(cmd->common.nsid);
                __entry->metadata = le64_to_cpu(cmd->common.metadata);
-               memcpy(__entry->cdw10, &cmd->common.cdw10,
+               memcpy(__entry->cdw10, &cmd->common.cdws,
                        sizeof(__entry->cdw10));
        ),
        TP_printk("nvmet%s: %sqid=%d, cmdid=%u, nsid=%u, flags=%#x, "
                  "meta=%#llx, cmd=(%s, %s)",
-               __print_ctrl_name(__entry->ctrl),
+               __print_ctrl_id(__entry->ctrl_id),
                __print_disk_name(__entry->disk),
                __entry->qid, __entry->cid, __entry->nsid,
                __entry->flags, __entry->metadata,
@@ -104,7 +109,7 @@ TRACE_EVENT(nvmet_req_complete,
        TP_PROTO(struct nvmet_req *req),
        TP_ARGS(req),
        TP_STRUCT__entry(
-               __field(struct nvmet_ctrl *, ctrl)
+               __field(u16, ctrl_id)
                __array(char, disk, DISK_NAME_LEN)
                __field(int, qid)
                __field(int, cid)
@@ -112,7 +117,7 @@ TRACE_EVENT(nvmet_req_complete,
                __field(u16, status)
        ),
        TP_fast_assign(
-               __entry->ctrl = nvmet_req_to_ctrl(req);
+               __entry->ctrl_id = nvmet_req_to_ctrl_id(req);
                __entry->qid = req->cq->qid;
                __entry->cid = req->cqe->command_id;
                __entry->result = le64_to_cpu(req->cqe->result.u64);
@@ -120,7 +125,7 @@ TRACE_EVENT(nvmet_req_complete,
                __assign_req_name(__entry->disk, req);
        ),
        TP_printk("nvmet%s: %sqid=%d, cmdid=%u, res=%#llx, status=%#x",
-               __print_ctrl_name(__entry->ctrl),
+               __print_ctrl_id(__entry->ctrl_id),
                __print_disk_name(__entry->disk),
                __entry->qid, __entry->cid, __entry->result, __entry->status)
 
index 5bc9c4874fe3bb1faa55ded3d24d5af7546f537a..283134498fbc3315c747c4e444ecb60d71e83cd5 100644 (file)
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0-only
 menuconfig NVMEM
        bool "NVMEM Support"
+       imply NVMEM_LAYOUTS
        help
          Support for NVMEM(Non Volatile Memory) devices like EEPROM, EFUSES...
 
index 423baf089515c2b4acbd1adf65fbe3912094b58d..cdd01fbf1313b58c61f71066997e2f0fadaae823 100644 (file)
@@ -5,6 +5,8 @@
 
 obj-$(CONFIG_NVMEM)            += nvmem_core.o
 nvmem_core-y                   := core.o
+obj-$(CONFIG_NVMEM_LAYOUTS)    += nvmem_layouts.o
+nvmem_layouts-y                        := layouts.o
 obj-y                          += layouts/
 
 # Devices
index 608b352a7d91fd6b4122517aa747a0fd6f5a30d5..980123fb4dde05d0e5cd4e0cfe5645b24a8d55dc 100644 (file)
 #include <linux/of.h>
 #include <linux/slab.h>
 
-struct nvmem_device {
-       struct module           *owner;
-       struct device           dev;
-       int                     stride;
-       int                     word_size;
-       int                     id;
-       struct kref             refcnt;
-       size_t                  size;
-       bool                    read_only;
-       bool                    root_only;
-       int                     flags;
-       enum nvmem_type         type;
-       struct bin_attribute    eeprom;
-       struct device           *base_dev;
-       struct list_head        cells;
-       const struct nvmem_keepout *keepout;
-       unsigned int            nkeepout;
-       nvmem_reg_read_t        reg_read;
-       nvmem_reg_write_t       reg_write;
-       struct gpio_desc        *wp_gpio;
-       struct nvmem_layout     *layout;
-       void *priv;
-};
+#include "internals.h"
 
 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
 
@@ -77,9 +55,6 @@ static LIST_HEAD(nvmem_lookup_list);
 
 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
 
-static DEFINE_SPINLOCK(nvmem_layout_lock);
-static LIST_HEAD(nvmem_layouts);
-
 static int __nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
                            void *val, size_t bytes)
 {
@@ -324,6 +299,43 @@ static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj,
        return nvmem_bin_attr_get_umode(nvmem);
 }
 
+static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry,
+                                           const char *id, int index);
+
+static ssize_t nvmem_cell_attr_read(struct file *filp, struct kobject *kobj,
+                                   struct bin_attribute *attr, char *buf,
+                                   loff_t pos, size_t count)
+{
+       struct nvmem_cell_entry *entry;
+       struct nvmem_cell *cell = NULL;
+       size_t cell_sz, read_len;
+       void *content;
+
+       entry = attr->private;
+       cell = nvmem_create_cell(entry, entry->name, 0);
+       if (IS_ERR(cell))
+               return PTR_ERR(cell);
+
+       if (!cell)
+               return -EINVAL;
+
+       content = nvmem_cell_read(cell, &cell_sz);
+       if (IS_ERR(content)) {
+               read_len = PTR_ERR(content);
+               goto destroy_cell;
+       }
+
+       read_len = min_t(unsigned int, cell_sz - pos, count);
+       memcpy(buf, content + pos, read_len);
+       kfree(content);
+
+destroy_cell:
+       kfree_const(cell->id);
+       kfree(cell);
+
+       return read_len;
+}
+
 /* default read/write permissions */
 static struct bin_attribute bin_attr_rw_nvmem = {
        .attr   = {
@@ -345,11 +357,21 @@ static const struct attribute_group nvmem_bin_group = {
        .is_bin_visible = nvmem_bin_attr_is_visible,
 };
 
+/* Cell attributes will be dynamically allocated */
+static struct attribute_group nvmem_cells_group = {
+       .name           = "cells",
+};
+
 static const struct attribute_group *nvmem_dev_groups[] = {
        &nvmem_bin_group,
        NULL,
 };
 
+static const struct attribute_group *nvmem_cells_groups[] = {
+       &nvmem_cells_group,
+       NULL,
+};
+
 static struct bin_attribute bin_attr_nvmem_eeprom_compat = {
        .attr   = {
                .name   = "eeprom",
@@ -405,6 +427,68 @@ static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
                device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
 }
 
+static int nvmem_populate_sysfs_cells(struct nvmem_device *nvmem)
+{
+       struct bin_attribute **cells_attrs, *attrs;
+       struct nvmem_cell_entry *entry;
+       unsigned int ncells = 0, i = 0;
+       int ret = 0;
+
+       mutex_lock(&nvmem_mutex);
+
+       if (list_empty(&nvmem->cells) || nvmem->sysfs_cells_populated) {
+               nvmem_cells_group.bin_attrs = NULL;
+               goto unlock_mutex;
+       }
+
+       /* Allocate an array of attributes with a sentinel */
+       ncells = list_count_nodes(&nvmem->cells);
+       cells_attrs = devm_kcalloc(&nvmem->dev, ncells + 1,
+                                  sizeof(struct bin_attribute *), GFP_KERNEL);
+       if (!cells_attrs) {
+               ret = -ENOMEM;
+               goto unlock_mutex;
+       }
+
+       attrs = devm_kcalloc(&nvmem->dev, ncells, sizeof(struct bin_attribute), GFP_KERNEL);
+       if (!attrs) {
+               ret = -ENOMEM;
+               goto unlock_mutex;
+       }
+
+       /* Initialize each attribute to take the name and size of the cell */
+       list_for_each_entry(entry, &nvmem->cells, node) {
+               sysfs_bin_attr_init(&attrs[i]);
+               attrs[i].attr.name = devm_kasprintf(&nvmem->dev, GFP_KERNEL,
+                                                   "%s@%x", entry->name,
+                                                   entry->offset);
+               attrs[i].attr.mode = 0444;
+               attrs[i].size = entry->bytes;
+               attrs[i].read = &nvmem_cell_attr_read;
+               attrs[i].private = entry;
+               if (!attrs[i].attr.name) {
+                       ret = -ENOMEM;
+                       goto unlock_mutex;
+               }
+
+               cells_attrs[i] = &attrs[i];
+               i++;
+       }
+
+       nvmem_cells_group.bin_attrs = cells_attrs;
+
+       ret = devm_device_add_groups(&nvmem->dev, nvmem_cells_groups);
+       if (ret)
+               goto unlock_mutex;
+
+       nvmem->sysfs_cells_populated = true;
+
+unlock_mutex:
+       mutex_unlock(&nvmem_mutex);
+
+       return ret;
+}
+
 #else /* CONFIG_NVMEM_SYSFS */
 
 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
@@ -697,7 +781,6 @@ static int nvmem_validate_keepouts(struct nvmem_device *nvmem)
 
 static int nvmem_add_cells_from_dt(struct nvmem_device *nvmem, struct device_node *np)
 {
-       struct nvmem_layout *layout = nvmem->layout;
        struct device *dev = &nvmem->dev;
        struct device_node *child;
        const __be32 *addr;
@@ -727,8 +810,8 @@ static int nvmem_add_cells_from_dt(struct nvmem_device *nvmem, struct device_nod
 
                info.np = of_node_get(child);
 
-               if (layout && layout->fixup_cell_info)
-                       layout->fixup_cell_info(nvmem, layout, &info);
+               if (nvmem->fixup_dt_cell_info)
+                       nvmem->fixup_dt_cell_info(nvmem, &info);
 
                ret = nvmem_add_one_cell(nvmem, &info);
                kfree(info.name);
@@ -763,117 +846,35 @@ static int nvmem_add_cells_from_fixed_layout(struct nvmem_device *nvmem)
        return err;
 }
 
-int __nvmem_layout_register(struct nvmem_layout *layout, struct module *owner)
-{
-       layout->owner = owner;
-
-       spin_lock(&nvmem_layout_lock);
-       list_add(&layout->node, &nvmem_layouts);
-       spin_unlock(&nvmem_layout_lock);
-
-       blocking_notifier_call_chain(&nvmem_notifier, NVMEM_LAYOUT_ADD, layout);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(__nvmem_layout_register);
-
-void nvmem_layout_unregister(struct nvmem_layout *layout)
-{
-       blocking_notifier_call_chain(&nvmem_notifier, NVMEM_LAYOUT_REMOVE, layout);
-
-       spin_lock(&nvmem_layout_lock);
-       list_del(&layout->node);
-       spin_unlock(&nvmem_layout_lock);
-}
-EXPORT_SYMBOL_GPL(nvmem_layout_unregister);
-
-static struct nvmem_layout *nvmem_layout_get(struct nvmem_device *nvmem)
+int nvmem_layout_register(struct nvmem_layout *layout)
 {
-       struct device_node *layout_np;
-       struct nvmem_layout *l, *layout = ERR_PTR(-EPROBE_DEFER);
-
-       layout_np = of_nvmem_layout_get_container(nvmem);
-       if (!layout_np)
-               return NULL;
-
-       /* Fixed layouts don't have a matching driver */
-       if (of_device_is_compatible(layout_np, "fixed-layout")) {
-               of_node_put(layout_np);
-               return NULL;
-       }
-
-       /*
-        * In case the nvmem device was built-in while the layout was built as a
-        * module, we shall manually request the layout driver loading otherwise
-        * we'll never have any match.
-        */
-       of_request_module(layout_np);
-
-       spin_lock(&nvmem_layout_lock);
-
-       list_for_each_entry(l, &nvmem_layouts, node) {
-               if (of_match_node(l->of_match_table, layout_np)) {
-                       if (try_module_get(l->owner))
-                               layout = l;
-
-                       break;
-               }
-       }
-
-       spin_unlock(&nvmem_layout_lock);
-       of_node_put(layout_np);
-
-       return layout;
-}
+       int ret;
 
-static void nvmem_layout_put(struct nvmem_layout *layout)
-{
-       if (layout)
-               module_put(layout->owner);
-}
+       if (!layout->add_cells)
+               return -EINVAL;
 
-static int nvmem_add_cells_from_layout(struct nvmem_device *nvmem)
-{
-       struct nvmem_layout *layout = nvmem->layout;
-       int ret;
+       /* Populate the cells */
+       ret = layout->add_cells(layout);
+       if (ret)
+               return ret;
 
-       if (layout && layout->add_cells) {
-               ret = layout->add_cells(&nvmem->dev, nvmem, layout);
-               if (ret)
-                       return ret;
+#ifdef CONFIG_NVMEM_SYSFS
+       ret = nvmem_populate_sysfs_cells(layout->nvmem);
+       if (ret) {
+               nvmem_device_remove_all_cells(layout->nvmem);
+               return ret;
        }
+#endif
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(nvmem_layout_register);
 
-#if IS_ENABLED(CONFIG_OF)
-/**
- * of_nvmem_layout_get_container() - Get OF node to layout container.
- *
- * @nvmem: nvmem device.
- *
- * Return: a node pointer with refcount incremented or NULL if no
- * container exists. Use of_node_put() on it when done.
- */
-struct device_node *of_nvmem_layout_get_container(struct nvmem_device *nvmem)
-{
-       return of_get_child_by_name(nvmem->dev.of_node, "nvmem-layout");
-}
-EXPORT_SYMBOL_GPL(of_nvmem_layout_get_container);
-#endif
-
-const void *nvmem_layout_get_match_data(struct nvmem_device *nvmem,
-                                       struct nvmem_layout *layout)
+void nvmem_layout_unregister(struct nvmem_layout *layout)
 {
-       struct device_node __maybe_unused *layout_np;
-       const struct of_device_id *match;
-
-       layout_np = of_nvmem_layout_get_container(nvmem);
-       match = of_match_node(layout->of_match_table, layout_np);
-
-       return match ? match->data : NULL;
+       /* Keep the API even with an empty stub in case we need it later */
 }
-EXPORT_SYMBOL_GPL(nvmem_layout_get_match_data);
+EXPORT_SYMBOL_GPL(nvmem_layout_unregister);
 
 /**
  * nvmem_register() - Register a nvmem device for given nvmem_config.
@@ -925,6 +926,7 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
 
        kref_init(&nvmem->refcnt);
        INIT_LIST_HEAD(&nvmem->cells);
+       nvmem->fixup_dt_cell_info = config->fixup_dt_cell_info;
 
        nvmem->owner = config->owner;
        if (!nvmem->owner && config->dev->driver)
@@ -980,19 +982,6 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
                        goto err_put_device;
        }
 
-       /*
-        * If the driver supplied a layout by config->layout, the module
-        * pointer will be NULL and nvmem_layout_put() will be a noop.
-        */
-       nvmem->layout = config->layout ?: nvmem_layout_get(nvmem);
-       if (IS_ERR(nvmem->layout)) {
-               rval = PTR_ERR(nvmem->layout);
-               nvmem->layout = NULL;
-
-               if (rval == -EPROBE_DEFER)
-                       goto err_teardown_compat;
-       }
-
        if (config->cells) {
                rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
                if (rval)
@@ -1013,24 +1002,34 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
        if (rval)
                goto err_remove_cells;
 
-       rval = nvmem_add_cells_from_layout(nvmem);
-       if (rval)
-               goto err_remove_cells;
-
        dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
 
        rval = device_add(&nvmem->dev);
        if (rval)
                goto err_remove_cells;
 
+       rval = nvmem_populate_layout(nvmem);
+       if (rval)
+               goto err_remove_dev;
+
+#ifdef CONFIG_NVMEM_SYSFS
+       rval = nvmem_populate_sysfs_cells(nvmem);
+       if (rval)
+               goto err_destroy_layout;
+#endif
+
        blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
 
        return nvmem;
 
+#ifdef CONFIG_NVMEM_SYSFS
+err_destroy_layout:
+       nvmem_destroy_layout(nvmem);
+#endif
+err_remove_dev:
+       device_del(&nvmem->dev);
 err_remove_cells:
        nvmem_device_remove_all_cells(nvmem);
-       nvmem_layout_put(nvmem->layout);
-err_teardown_compat:
        if (config->compat)
                nvmem_sysfs_remove_compat(nvmem, config);
 err_put_device:
@@ -1052,7 +1051,7 @@ static void nvmem_device_release(struct kref *kref)
                device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
 
        nvmem_device_remove_all_cells(nvmem);
-       nvmem_layout_put(nvmem->layout);
+       nvmem_destroy_layout(nvmem);
        device_unregister(&nvmem->dev);
 }
 
@@ -1354,6 +1353,12 @@ nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
        return cell;
 }
 
+static void nvmem_layout_module_put(struct nvmem_device *nvmem)
+{
+       if (nvmem->layout && nvmem->layout->dev.driver)
+               module_put(nvmem->layout->dev.driver->owner);
+}
+
 #if IS_ENABLED(CONFIG_OF)
 static struct nvmem_cell_entry *
 nvmem_find_cell_entry_by_node(struct nvmem_device *nvmem, struct device_node *np)
@@ -1372,6 +1377,18 @@ nvmem_find_cell_entry_by_node(struct nvmem_device *nvmem, struct device_node *np
        return cell;
 }
 
+static int nvmem_layout_module_get_optional(struct nvmem_device *nvmem)
+{
+       if (!nvmem->layout)
+               return 0;
+
+       if (!nvmem->layout->dev.driver ||
+           !try_module_get(nvmem->layout->dev.driver->owner))
+               return -EPROBE_DEFER;
+
+       return 0;
+}
+
 /**
  * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
  *
@@ -1434,16 +1451,29 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
                return ERR_CAST(nvmem);
        }
 
+       ret = nvmem_layout_module_get_optional(nvmem);
+       if (ret) {
+               of_node_put(cell_np);
+               __nvmem_device_put(nvmem);
+               return ERR_PTR(ret);
+       }
+
        cell_entry = nvmem_find_cell_entry_by_node(nvmem, cell_np);
        of_node_put(cell_np);
        if (!cell_entry) {
                __nvmem_device_put(nvmem);
-               return ERR_PTR(-ENOENT);
+               nvmem_layout_module_put(nvmem);
+               if (nvmem->layout)
+                       return ERR_PTR(-EPROBE_DEFER);
+               else
+                       return ERR_PTR(-ENOENT);
        }
 
        cell = nvmem_create_cell(cell_entry, id, cell_index);
-       if (IS_ERR(cell))
+       if (IS_ERR(cell)) {
                __nvmem_device_put(nvmem);
+               nvmem_layout_module_put(nvmem);
+       }
 
        return cell;
 }
@@ -1557,6 +1587,7 @@ void nvmem_cell_put(struct nvmem_cell *cell)
 
        kfree(cell);
        __nvmem_device_put(nvmem);
+       nvmem_layout_module_put(nvmem);
 }
 EXPORT_SYMBOL_GPL(nvmem_cell_put);
 
@@ -2132,13 +2163,37 @@ const char *nvmem_dev_name(struct nvmem_device *nvmem)
 }
 EXPORT_SYMBOL_GPL(nvmem_dev_name);
 
+/**
+ * nvmem_dev_size() - Get the size of a given nvmem device.
+ *
+ * @nvmem: nvmem device.
+ *
+ * Return: size of the nvmem device.
+ */
+size_t nvmem_dev_size(struct nvmem_device *nvmem)
+{
+       return nvmem->size;
+}
+EXPORT_SYMBOL_GPL(nvmem_dev_size);
+
 static int __init nvmem_init(void)
 {
-       return bus_register(&nvmem_bus_type);
+       int ret;
+
+       ret = bus_register(&nvmem_bus_type);
+       if (ret)
+               return ret;
+
+       ret = nvmem_layout_bus_register();
+       if (ret)
+               bus_unregister(&nvmem_bus_type);
+
+       return ret;
 }
 
 static void __exit nvmem_exit(void)
 {
+       nvmem_layout_bus_unregister();
        bus_unregister(&nvmem_bus_type);
 }
 
index f1e202efaa4970beb864d8f626624004c26fe014..79dd4fda03295ac3e2574480c595e6c5d370ba50 100644 (file)
@@ -583,17 +583,12 @@ static const struct of_device_id imx_ocotp_dt_ids[] = {
 };
 MODULE_DEVICE_TABLE(of, imx_ocotp_dt_ids);
 
-static void imx_ocotp_fixup_cell_info(struct nvmem_device *nvmem,
-                                     struct nvmem_layout *layout,
-                                     struct nvmem_cell_info *cell)
+static void imx_ocotp_fixup_dt_cell_info(struct nvmem_device *nvmem,
+                                        struct nvmem_cell_info *cell)
 {
        cell->read_post_process = imx_ocotp_cell_pp;
 }
 
-static struct nvmem_layout imx_ocotp_layout = {
-       .fixup_cell_info = imx_ocotp_fixup_cell_info,
-};
-
 static int imx_ocotp_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
@@ -619,7 +614,7 @@ static int imx_ocotp_probe(struct platform_device *pdev)
        imx_ocotp_nvmem_config.size = 4 * priv->params->nregs;
        imx_ocotp_nvmem_config.dev = dev;
        imx_ocotp_nvmem_config.priv = priv;
-       imx_ocotp_nvmem_config.layout = &imx_ocotp_layout;
+       imx_ocotp_nvmem_config.fixup_dt_cell_info = &imx_ocotp_fixup_dt_cell_info;
 
        priv->config = &imx_ocotp_nvmem_config;
 
diff --git a/drivers/nvmem/internals.h b/drivers/nvmem/internals.h
new file mode 100644 (file)
index 0000000..18fed57
--- /dev/null
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_NVMEM_INTERNALS_H
+#define _LINUX_NVMEM_INTERNALS_H
+
+#include <linux/device.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/nvmem-provider.h>
+
+struct nvmem_device {
+       struct module           *owner;
+       struct device           dev;
+       struct list_head        node;
+       int                     stride;
+       int                     word_size;
+       int                     id;
+       struct kref             refcnt;
+       size_t                  size;
+       bool                    read_only;
+       bool                    root_only;
+       int                     flags;
+       enum nvmem_type         type;
+       struct bin_attribute    eeprom;
+       struct device           *base_dev;
+       struct list_head        cells;
+       void (*fixup_dt_cell_info)(struct nvmem_device *nvmem,
+                                  struct nvmem_cell_info *cell);
+       const struct nvmem_keepout *keepout;
+       unsigned int            nkeepout;
+       nvmem_reg_read_t        reg_read;
+       nvmem_reg_write_t       reg_write;
+       struct gpio_desc        *wp_gpio;
+       struct nvmem_layout     *layout;
+       void *priv;
+       bool                    sysfs_cells_populated;
+};
+
+#if IS_ENABLED(CONFIG_OF)
+int nvmem_layout_bus_register(void);
+void nvmem_layout_bus_unregister(void);
+int nvmem_populate_layout(struct nvmem_device *nvmem);
+void nvmem_destroy_layout(struct nvmem_device *nvmem);
+#else /* CONFIG_OF */
+static inline int nvmem_layout_bus_register(void)
+{
+       return 0;
+}
+
+static inline void nvmem_layout_bus_unregister(void) {}
+
+static inline int nvmem_populate_layout(struct nvmem_device *nvmem)
+{
+       return 0;
+}
+
+static inline void nvmem_destroy_layout(struct nvmem_device *nvmem) { }
+#endif /* CONFIG_OF */
+
+#endif  /* ifndef _LINUX_NVMEM_INTERNALS_H */
diff --git a/drivers/nvmem/layouts.c b/drivers/nvmem/layouts.c
new file mode 100644 (file)
index 0000000..6a6aa58
--- /dev/null
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMEM layout bus handling
+ *
+ * Copyright (C) 2023 Bootlin
+ * Author: Miquel Raynal <miquel.raynal@bootlin.com
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+
+#include "internals.h"
+
+#define to_nvmem_layout_driver(drv) \
+       (container_of((drv), struct nvmem_layout_driver, driver))
+#define to_nvmem_layout_device(_dev) \
+       container_of((_dev), struct nvmem_layout, dev)
+
+static int nvmem_layout_bus_match(struct device *dev, struct device_driver *drv)
+{
+       return of_driver_match_device(dev, drv);
+}
+
+static int nvmem_layout_bus_probe(struct device *dev)
+{
+       struct nvmem_layout_driver *drv = to_nvmem_layout_driver(dev->driver);
+       struct nvmem_layout *layout = to_nvmem_layout_device(dev);
+
+       if (!drv->probe || !drv->remove)
+               return -EINVAL;
+
+       return drv->probe(layout);
+}
+
+static void nvmem_layout_bus_remove(struct device *dev)
+{
+       struct nvmem_layout_driver *drv = to_nvmem_layout_driver(dev->driver);
+       struct nvmem_layout *layout = to_nvmem_layout_device(dev);
+
+       return drv->remove(layout);
+}
+
+static struct bus_type nvmem_layout_bus_type = {
+       .name           = "nvmem-layout",
+       .match          = nvmem_layout_bus_match,
+       .probe          = nvmem_layout_bus_probe,
+       .remove         = nvmem_layout_bus_remove,
+};
+
+int nvmem_layout_driver_register(struct nvmem_layout_driver *drv)
+{
+       drv->driver.bus = &nvmem_layout_bus_type;
+
+       return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(nvmem_layout_driver_register);
+
+void nvmem_layout_driver_unregister(struct nvmem_layout_driver *drv)
+{
+       driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(nvmem_layout_driver_unregister);
+
+static void nvmem_layout_release_device(struct device *dev)
+{
+       struct nvmem_layout *layout = to_nvmem_layout_device(dev);
+
+       of_node_put(layout->dev.of_node);
+       kfree(layout);
+}
+
+static int nvmem_layout_create_device(struct nvmem_device *nvmem,
+                                     struct device_node *np)
+{
+       struct nvmem_layout *layout;
+       struct device *dev;
+       int ret;
+
+       layout = kzalloc(sizeof(*layout), GFP_KERNEL);
+       if (!layout)
+               return -ENOMEM;
+
+       /* Create a bidirectional link */
+       layout->nvmem = nvmem;
+       nvmem->layout = layout;
+
+       /* Device model registration */
+       dev = &layout->dev;
+       device_initialize(dev);
+       dev->parent = &nvmem->dev;
+       dev->bus = &nvmem_layout_bus_type;
+       dev->release = nvmem_layout_release_device;
+       dev->coherent_dma_mask = DMA_BIT_MASK(32);
+       dev->dma_mask = &dev->coherent_dma_mask;
+       device_set_node(dev, of_fwnode_handle(of_node_get(np)));
+       of_device_make_bus_id(dev);
+       of_msi_configure(dev, dev->of_node);
+
+       ret = device_add(dev);
+       if (ret) {
+               put_device(dev);
+               return ret;
+       }
+
+       return 0;
+}
+
+static const struct of_device_id of_nvmem_layout_skip_table[] = {
+       { .compatible = "fixed-layout", },
+       {}
+};
+
+static int nvmem_layout_bus_populate(struct nvmem_device *nvmem,
+                                    struct device_node *layout_dn)
+{
+       int ret;
+
+       /* Make sure it has a compatible property */
+       if (!of_get_property(layout_dn, "compatible", NULL)) {
+               pr_debug("%s() - skipping %pOF, no compatible prop\n",
+                        __func__, layout_dn);
+               return 0;
+       }
+
+       /* Fixed layouts are parsed manually somewhere else for now */
+       if (of_match_node(of_nvmem_layout_skip_table, layout_dn)) {
+               pr_debug("%s() - skipping %pOF node\n", __func__, layout_dn);
+               return 0;
+       }
+
+       if (of_node_check_flag(layout_dn, OF_POPULATED_BUS)) {
+               pr_debug("%s() - skipping %pOF, already populated\n",
+                        __func__, layout_dn);
+
+               return 0;
+       }
+
+       /* NVMEM layout buses expect only a single device representing the layout */
+       ret = nvmem_layout_create_device(nvmem, layout_dn);
+       if (ret)
+               return ret;
+
+       of_node_set_flag(layout_dn, OF_POPULATED_BUS);
+
+       return 0;
+}
+
+struct device_node *of_nvmem_layout_get_container(struct nvmem_device *nvmem)
+{
+       return of_get_child_by_name(nvmem->dev.of_node, "nvmem-layout");
+}
+EXPORT_SYMBOL_GPL(of_nvmem_layout_get_container);
+
+/*
+ * Returns the number of devices populated, 0 if the operation was not relevant
+ * for this nvmem device, an error code otherwise.
+ */
+int nvmem_populate_layout(struct nvmem_device *nvmem)
+{
+       struct device_node *layout_dn;
+       int ret;
+
+       layout_dn = of_nvmem_layout_get_container(nvmem);
+       if (!layout_dn)
+               return 0;
+
+       /* Populate the layout device */
+       device_links_supplier_sync_state_pause();
+       ret = nvmem_layout_bus_populate(nvmem, layout_dn);
+       device_links_supplier_sync_state_resume();
+
+       of_node_put(layout_dn);
+       return ret;
+}
+
+void nvmem_destroy_layout(struct nvmem_device *nvmem)
+{
+       struct device *dev;
+
+       if (!nvmem->layout)
+               return;
+
+       dev = &nvmem->layout->dev;
+       of_node_clear_flag(dev->of_node, OF_POPULATED_BUS);
+       device_unregister(dev);
+}
+
+int nvmem_layout_bus_register(void)
+{
+       return bus_register(&nvmem_layout_bus_type);
+}
+
+void nvmem_layout_bus_unregister(void)
+{
+       bus_unregister(&nvmem_layout_bus_type);
+}
index 7ff1ee1c1f05285fd47d88f01f7390c4b0ffb620..9c6e672fc3509f0ddd2f77fc2c1c2cd6533145b8 100644 (file)
@@ -1,5 +1,11 @@
 # SPDX-License-Identifier: GPL-2.0
 
+config NVMEM_LAYOUTS
+       bool
+       depends on OF
+
+if NVMEM_LAYOUTS
+
 menu "Layout Types"
 
 config NVMEM_LAYOUT_SL28_VPD
@@ -21,3 +27,5 @@ config NVMEM_LAYOUT_ONIE_TLV
          If unsure, say N.
 
 endmenu
+
+endif
index 59fc87ccfcffeb430d7a9294d01db8fd3b454ec2..9d2ad5f2dc1012cc5930daa26b0889b61d7c1483 100644 (file)
@@ -182,9 +182,10 @@ static bool onie_tlv_crc_is_valid(struct device *dev, size_t table_len, u8 *tabl
        return true;
 }
 
-static int onie_tlv_parse_table(struct device *dev, struct nvmem_device *nvmem,
-                               struct nvmem_layout *layout)
+static int onie_tlv_parse_table(struct nvmem_layout *layout)
 {
+       struct nvmem_device *nvmem = layout->nvmem;
+       struct device *dev = &layout->dev;
        struct onie_tlv_hdr hdr;
        size_t table_len, data_len, hdr_len;
        u8 *table, *data;
@@ -226,16 +227,32 @@ static int onie_tlv_parse_table(struct device *dev, struct nvmem_device *nvmem,
        return 0;
 }
 
+static int onie_tlv_probe(struct nvmem_layout *layout)
+{
+       layout->add_cells = onie_tlv_parse_table;
+
+       return nvmem_layout_register(layout);
+}
+
+static void onie_tlv_remove(struct nvmem_layout *layout)
+{
+       nvmem_layout_unregister(layout);
+}
+
 static const struct of_device_id onie_tlv_of_match_table[] = {
        { .compatible = "onie,tlv-layout", },
        {},
 };
 MODULE_DEVICE_TABLE(of, onie_tlv_of_match_table);
 
-static struct nvmem_layout onie_tlv_layout = {
-       .name = "ONIE tlv layout",
-       .of_match_table = onie_tlv_of_match_table,
-       .add_cells = onie_tlv_parse_table,
+static struct nvmem_layout_driver onie_tlv_layout = {
+       .driver = {
+               .owner = THIS_MODULE,
+               .name = "onie-tlv-layout",
+               .of_match_table = onie_tlv_of_match_table,
+       },
+       .probe = onie_tlv_probe,
+       .remove = onie_tlv_remove,
 };
 module_nvmem_layout_driver(onie_tlv_layout);
 
index 05671371f63166edb28231ef12747fe40d22913d..53fa50f17dcaf7aa53435dc21ee039c892ab3ca5 100644 (file)
@@ -80,9 +80,10 @@ static int sl28vpd_v1_check_crc(struct device *dev, struct nvmem_device *nvmem)
        return 0;
 }
 
-static int sl28vpd_add_cells(struct device *dev, struct nvmem_device *nvmem,
-                            struct nvmem_layout *layout)
+static int sl28vpd_add_cells(struct nvmem_layout *layout)
 {
+       struct nvmem_device *nvmem = layout->nvmem;
+       struct device *dev = &layout->dev;
        const struct nvmem_cell_info *pinfo;
        struct nvmem_cell_info info = {0};
        struct device_node *layout_np;
@@ -135,16 +136,32 @@ static int sl28vpd_add_cells(struct device *dev, struct nvmem_device *nvmem,
        return 0;
 }
 
+static int sl28vpd_probe(struct nvmem_layout *layout)
+{
+       layout->add_cells = sl28vpd_add_cells;
+
+       return nvmem_layout_register(layout);
+}
+
+static void sl28vpd_remove(struct nvmem_layout *layout)
+{
+       nvmem_layout_unregister(layout);
+}
+
 static const struct of_device_id sl28vpd_of_match_table[] = {
        { .compatible = "kontron,sl28-vpd" },
        {},
 };
 MODULE_DEVICE_TABLE(of, sl28vpd_of_match_table);
 
-static struct nvmem_layout sl28vpd_layout = {
-       .name = "sl28-vpd",
-       .of_match_table = sl28vpd_of_match_table,
-       .add_cells = sl28vpd_add_cells,
+static struct nvmem_layout_driver sl28vpd_layout = {
+       .driver = {
+               .owner = THIS_MODULE,
+               .name = "kontron-sl28vpd-layout",
+               .of_match_table = sl28vpd_of_match_table,
+       },
+       .probe = sl28vpd_probe,
+       .remove = sl28vpd_remove,
 };
 module_nvmem_layout_driver(sl28vpd_layout);
 
index 87c94686cfd216ac19ac8cc02cdfc08bc192668e..84f05b40a4112ed7878e94504de48eec3737e3ad 100644 (file)
@@ -45,9 +45,8 @@ static int mtk_efuse_gpu_speedbin_pp(void *context, const char *id, int index,
        return 0;
 }
 
-static void mtk_efuse_fixup_cell_info(struct nvmem_device *nvmem,
-                                     struct nvmem_layout *layout,
-                                     struct nvmem_cell_info *cell)
+static void mtk_efuse_fixup_dt_cell_info(struct nvmem_device *nvmem,
+                                        struct nvmem_cell_info *cell)
 {
        size_t sz = strlen(cell->name);
 
@@ -61,10 +60,6 @@ static void mtk_efuse_fixup_cell_info(struct nvmem_device *nvmem,
                cell->read_post_process = mtk_efuse_gpu_speedbin_pp;
 }
 
-static struct nvmem_layout mtk_efuse_layout = {
-       .fixup_cell_info = mtk_efuse_fixup_cell_info,
-};
-
 static int mtk_efuse_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
@@ -91,7 +86,7 @@ static int mtk_efuse_probe(struct platform_device *pdev)
        econfig.priv = priv;
        econfig.dev = dev;
        if (pdata->uses_post_processing)
-               econfig.layout = &mtk_efuse_layout;
+               econfig.fixup_dt_cell_info = &mtk_efuse_fixup_dt_cell_info;
        nvmem = devm_nvmem_register(dev, &econfig);
 
        return PTR_ERR_OR_ZERO(nvmem);
index 8a553b1799a8523a23666b82251a50617643b1f8..82879b1c9eb93e0360f01c0c98205a33ce0538d6 100644 (file)
@@ -269,6 +269,19 @@ static const struct stm32_romem_cfg stm32mp13_bsec_cfg = {
        .ta = true,
 };
 
+/*
+ * STM32MP25 BSEC OTP: 3 regions of 32-bits data words
+ *   lower OTP (OTP0 to OTP127), bitwise (1-bit) programmable
+ *   mid OTP (OTP128 to OTP255), bulk (32-bit) programmable
+ *   upper OTP (OTP256 to OTP383), bulk (32-bit) programmable
+ *              but no access to HWKEY and ECIES key: limited at OTP367
+ */
+static const struct stm32_romem_cfg stm32mp25_bsec_cfg = {
+       .size = 368 * 4,
+       .lower = 127,
+       .ta = true,
+};
+
 static const struct of_device_id stm32_romem_of_match[] __maybe_unused = {
        { .compatible = "st,stm32f4-otp", }, {
                .compatible = "st,stm32mp15-bsec",
@@ -276,6 +289,9 @@ static const struct of_device_id stm32_romem_of_match[] __maybe_unused = {
        }, {
                .compatible = "st,stm32mp13-bsec",
                .data = (void *)&stm32mp13_bsec_cfg,
+       }, {
+               .compatible = "st,stm32mp25-bsec",
+               .data = (void *)&stm32mp25_bsec_cfg,
        },
        { /* sentinel */ },
 };
index c4ae94af4af78ea95aa80aa7555ea55ac8b30fb8..befbab156cda1fc6af00538d4805c247829a2952 100644 (file)
@@ -23,13 +23,10 @@ enum u_boot_env_format {
 
 struct u_boot_env {
        struct device *dev;
+       struct nvmem_device *nvmem;
        enum u_boot_env_format format;
 
        struct mtd_info *mtd;
-
-       /* Cells */
-       struct nvmem_cell_info *cells;
-       int ncells;
 };
 
 struct u_boot_env_image_single {
@@ -94,70 +91,71 @@ static int u_boot_env_read_post_process_ethaddr(void *context, const char *id, i
 static int u_boot_env_add_cells(struct u_boot_env *priv, uint8_t *buf,
                                size_t data_offset, size_t data_len)
 {
+       struct nvmem_device *nvmem = priv->nvmem;
        struct device *dev = priv->dev;
        char *data = buf + data_offset;
        char *var, *value, *eq;
-       int idx;
-
-       priv->ncells = 0;
-       for (var = data; var < data + data_len && *var; var += strlen(var) + 1)
-               priv->ncells++;
-
-       priv->cells = devm_kcalloc(dev, priv->ncells, sizeof(*priv->cells), GFP_KERNEL);
-       if (!priv->cells)
-               return -ENOMEM;
 
-       for (var = data, idx = 0;
+       for (var = data;
             var < data + data_len && *var;
-            var = value + strlen(value) + 1, idx++) {
+            var = value + strlen(value) + 1) {
+               struct nvmem_cell_info info = {};
+
                eq = strchr(var, '=');
                if (!eq)
                        break;
                *eq = '\0';
                value = eq + 1;
 
-               priv->cells[idx].name = devm_kstrdup(dev, var, GFP_KERNEL);
-               if (!priv->cells[idx].name)
+               info.name = devm_kstrdup(dev, var, GFP_KERNEL);
+               if (!info.name)
                        return -ENOMEM;
-               priv->cells[idx].offset = data_offset + value - data;
-               priv->cells[idx].bytes = strlen(value);
-               priv->cells[idx].np = of_get_child_by_name(dev->of_node, priv->cells[idx].name);
+               info.offset = data_offset + value - data;
+               info.bytes = strlen(value);
+               info.np = of_get_child_by_name(dev->of_node, info.name);
                if (!strcmp(var, "ethaddr")) {
-                       priv->cells[idx].raw_len = strlen(value);
-                       priv->cells[idx].bytes = ETH_ALEN;
-                       priv->cells[idx].read_post_process = u_boot_env_read_post_process_ethaddr;
+                       info.raw_len = strlen(value);
+                       info.bytes = ETH_ALEN;
+                       info.read_post_process = u_boot_env_read_post_process_ethaddr;
                }
-       }
 
-       if (WARN_ON(idx != priv->ncells))
-               priv->ncells = idx;
+               nvmem_add_one_cell(nvmem, &info);
+       }
 
        return 0;
 }
 
 static int u_boot_env_parse(struct u_boot_env *priv)
 {
+       struct nvmem_device *nvmem = priv->nvmem;
        struct device *dev = priv->dev;
        size_t crc32_data_offset;
        size_t crc32_data_len;
        size_t crc32_offset;
+       __le32 *crc32_addr;
        size_t data_offset;
        size_t data_len;
+       size_t dev_size;
        uint32_t crc32;
        uint32_t calc;
-       size_t bytes;
        uint8_t *buf;
+       int bytes;
        int err;
 
-       buf = kcalloc(1, priv->mtd->size, GFP_KERNEL);
+       dev_size = nvmem_dev_size(nvmem);
+
+       buf = kzalloc(dev_size, GFP_KERNEL);
        if (!buf) {
                err = -ENOMEM;
                goto err_out;
        }
 
-       err = mtd_read(priv->mtd, 0, priv->mtd->size, &bytes, buf);
-       if ((err && !mtd_is_bitflip(err)) || bytes != priv->mtd->size) {
-               dev_err(dev, "Failed to read from mtd: %d\n", err);
+       bytes = nvmem_device_read(nvmem, 0, dev_size, buf);
+       if (bytes < 0) {
+               err = bytes;
+               goto err_kfree;
+       } else if (bytes != dev_size) {
+               err = -EIO;
                goto err_kfree;
        }
 
@@ -178,9 +176,10 @@ static int u_boot_env_parse(struct u_boot_env *priv)
                data_offset = offsetof(struct u_boot_env_image_broadcom, data);
                break;
        }
-       crc32 = le32_to_cpu(*(__le32 *)(buf + crc32_offset));
-       crc32_data_len = priv->mtd->size - crc32_data_offset;
-       data_len = priv->mtd->size - data_offset;
+       crc32_addr = (__le32 *)(buf + crc32_offset);
+       crc32 = le32_to_cpu(*crc32_addr);
+       crc32_data_len = dev_size - crc32_data_offset;
+       data_len = dev_size - data_offset;
 
        calc = crc32(~0, buf + crc32_data_offset, crc32_data_len) ^ ~0L;
        if (calc != crc32) {
@@ -189,10 +188,8 @@ static int u_boot_env_parse(struct u_boot_env *priv)
                goto err_kfree;
        }
 
-       buf[priv->mtd->size - 1] = '\0';
+       buf[dev_size - 1] = '\0';
        err = u_boot_env_add_cells(priv, buf, data_offset, data_len);
-       if (err)
-               dev_err(dev, "Failed to add cells: %d\n", err);
 
 err_kfree:
        kfree(buf);
@@ -209,7 +206,6 @@ static int u_boot_env_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct device_node *np = dev->of_node;
        struct u_boot_env *priv;
-       int err;
 
        priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
        if (!priv)
@@ -224,17 +220,15 @@ static int u_boot_env_probe(struct platform_device *pdev)
                return PTR_ERR(priv->mtd);
        }
 
-       err = u_boot_env_parse(priv);
-       if (err)
-               return err;
-
        config.dev = dev;
-       config.cells = priv->cells;
-       config.ncells = priv->ncells;
        config.priv = priv;
        config.size = priv->mtd->size;
 
-       return PTR_ERR_OR_ZERO(devm_nvmem_register(dev, &config));
+       priv->nvmem = devm_nvmem_register(dev, &config);
+       if (IS_ERR(priv->nvmem))
+               return PTR_ERR(priv->nvmem);
+
+       return u_boot_env_parse(priv);
 }
 
 static const struct of_device_id u_boot_env_of_match_table[] = {
index 8d93cb6ea9cde4c9f650f6d4bcf1240a1162e2de..b0ad8fc06e80e099ab6eba7ebe10039875bc85c4 100644 (file)
@@ -1464,6 +1464,7 @@ int of_parse_phandle_with_args_map(const struct device_node *np,
                out_args->np = new;
                of_node_put(cur);
                cur = new;
+               new = NULL;
        }
 put:
        of_node_put(cur);
index 1ca42ad9dd159d4409b7edb73dd232e9b3dcc7fa..de89f99063758a84d30cf2a8346925feddcc84ca 100644 (file)
@@ -93,12 +93,12 @@ of_dma_set_restricted_buffer(struct device *dev, struct device_node *np)
 int of_dma_configure_id(struct device *dev, struct device_node *np,
                        bool force_dma, const u32 *id)
 {
-       const struct iommu_ops *iommu;
        const struct bus_dma_region *map = NULL;
        struct device_node *bus_np;
        u64 dma_start = 0;
        u64 mask, end, size = 0;
        bool coherent;
+       int iommu_ret;
        int ret;
 
        if (np == dev->of_node)
@@ -181,21 +181,29 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
        dev_dbg(dev, "device is%sdma coherent\n",
                coherent ? " " : " not ");
 
-       iommu = of_iommu_configure(dev, np, id);
-       if (PTR_ERR(iommu) == -EPROBE_DEFER) {
+       iommu_ret = of_iommu_configure(dev, np, id);
+       if (iommu_ret == -EPROBE_DEFER) {
                /* Don't touch range map if it wasn't set from a valid dma-ranges */
                if (!ret)
                        dev->dma_range_map = NULL;
                kfree(map);
                return -EPROBE_DEFER;
-       }
+       } else if (iommu_ret == -ENODEV) {
+               dev_dbg(dev, "device is not behind an iommu\n");
+       } else if (iommu_ret) {
+               dev_err(dev, "iommu configuration for device failed with %pe\n",
+                       ERR_PTR(iommu_ret));
 
-       dev_dbg(dev, "device is%sbehind an iommu\n",
-               iommu ? " " : " not ");
+               /*
+                * Historically this routine doesn't fail driver probing
+                * due to errors in of_iommu_configure()
+                */
+       } else
+               dev_dbg(dev, "device is behind an iommu\n");
 
-       arch_setup_dma_ops(dev, dma_start, size, iommu, coherent);
+       arch_setup_dma_ops(dev, dma_start, size, coherent);
 
-       if (!iommu)
+       if (iommu_ret)
                of_dma_set_restricted_buffer(dev, np);
 
        return 0;
@@ -304,3 +312,44 @@ int of_device_uevent_modalias(const struct device *dev, struct kobj_uevent_env *
        return 0;
 }
 EXPORT_SYMBOL_GPL(of_device_uevent_modalias);
+
+/**
+ * of_device_make_bus_id - Use the device node data to assign a unique name
+ * @dev: pointer to device structure that is linked to a device tree node
+ *
+ * This routine will first try using the translated bus address to
+ * derive a unique name. If it cannot, then it will prepend names from
+ * parent nodes until a unique name can be derived.
+ */
+void of_device_make_bus_id(struct device *dev)
+{
+       struct device_node *node = dev->of_node;
+       const __be32 *reg;
+       u64 addr;
+       u32 mask;
+
+       /* Construct the name, using parent nodes if necessary to ensure uniqueness */
+       while (node->parent) {
+               /*
+                * If the address can be translated, then that is as much
+                * uniqueness as we need. Make it the first component and return
+                */
+               reg = of_get_property(node, "reg", NULL);
+               if (reg && (addr = of_translate_address(node, reg)) != OF_BAD_ADDR) {
+                       if (!of_property_read_u32(node, "mask", &mask))
+                               dev_set_name(dev, dev_name(dev) ? "%llx.%x.%pOFn:%s" : "%llx.%x.%pOFn",
+                                            addr, ffs(mask) - 1, node, dev_name(dev));
+
+                       else
+                               dev_set_name(dev, dev_name(dev) ? "%llx.%pOFn:%s" : "%llx.%pOFn",
+                                            addr, node, dev_name(dev));
+                       return;
+               }
+
+               /* format arguments only used if dev_name() resolves to NULL */
+               dev_set_name(dev, dev_name(dev) ? "%s:%s" : "%s",
+                            kbasename(node->full_name), dev_name(dev));
+               node = node->parent;
+       }
+}
+EXPORT_SYMBOL_GPL(of_device_make_bus_id);
index a9a292d6d59b263ebcc7005511c01b42dbdc06f1..2ae7e9d24a645576ec34031547c384407232b382 100644 (file)
@@ -964,7 +964,7 @@ out:
        return ret;
 }
 
-/*
+/**
  * of_overlay_fdt_apply() - Create and apply an overlay changeset
  * @overlay_fdt:       pointer to overlay FDT
  * @overlay_fdt_size:  number of bytes in @overlay_fdt
index 126d265aa7d860ea553fe85864a4edc046ea1673..b7708a06dc7844d2a4ee5eb67b1d3970c78b30df 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/of_irq.h>
 #include <linux/of_platform.h>
 #include <linux/platform_device.h>
+#include <linux/sysfb.h>
 
 #include "of_private.h"
 
@@ -97,46 +98,6 @@ static const struct of_device_id of_skipped_node_table[] = {
  * mechanism for creating devices from device tree nodes.
  */
 
-/**
- * of_device_make_bus_id - Use the device node data to assign a unique name
- * @dev: pointer to device structure that is linked to a device tree node
- *
- * This routine will first try using the translated bus address to
- * derive a unique name. If it cannot, then it will prepend names from
- * parent nodes until a unique name can be derived.
- */
-static void of_device_make_bus_id(struct device *dev)
-{
-       struct device_node *node = dev->of_node;
-       const __be32 *reg;
-       u64 addr;
-       u32 mask;
-
-       /* Construct the name, using parent nodes if necessary to ensure uniqueness */
-       while (node->parent) {
-               /*
-                * If the address can be translated, then that is as much
-                * uniqueness as we need. Make it the first component and return
-                */
-               reg = of_get_property(node, "reg", NULL);
-               if (reg && (addr = of_translate_address(node, reg)) != OF_BAD_ADDR) {
-                       if (!of_property_read_u32(node, "mask", &mask))
-                               dev_set_name(dev, dev_name(dev) ? "%llx.%x.%pOFn:%s" : "%llx.%x.%pOFn",
-                                            addr, ffs(mask) - 1, node, dev_name(dev));
-
-                       else
-                               dev_set_name(dev, dev_name(dev) ? "%llx.%pOFn:%s" : "%llx.%pOFn",
-                                            addr, node, dev_name(dev));
-                       return;
-               }
-
-               /* format arguments only used if dev_name() resolves to NULL */
-               dev_set_name(dev, dev_name(dev) ? "%s:%s" : "%s",
-                            kbasename(node->full_name), dev_name(dev));
-               node = node->parent;
-       }
-}
-
 /**
  * of_device_alloc - Allocate and initialize an of_device
  * @np: device node to assign to device
@@ -621,8 +582,21 @@ static int __init of_platform_default_populate_init(void)
                }
 
                node = of_get_compatible_child(of_chosen, "simple-framebuffer");
-               of_platform_device_create(node, NULL, NULL);
-               of_node_put(node);
+               if (node) {
+                       /*
+                        * Since a "simple-framebuffer" device is already added
+                        * here, disable the Generic System Framebuffers (sysfb)
+                        * to prevent it from registering another device for the
+                        * system framebuffer later (e.g: using the screen_info
+                        * data that may had been filled as well).
+                        *
+                        * This can happen for example on DT systems that do EFI
+                        * booting and may provide a GOP handle to the EFI stub.
+                        */
+                       sysfb_disable();
+                       of_platform_device_create(node, NULL, NULL);
+                       of_node_put(node);
+               }
 
                /* Populate everything else. */
                of_platform_default_populate(NULL, NULL, NULL);
@@ -668,7 +642,7 @@ EXPORT_SYMBOL_GPL(of_platform_device_destroy);
  * @parent: device which children will be removed
  *
  * Complementary to of_platform_populate(), this function removes children
- * of the given device (and, recurrently, their children) that have been
+ * of the given device (and, recursively, their children) that have been
  * created from their respective device tree nodes (and only those,
  * leaving others - eg. manually created - unharmed).
  */
@@ -737,7 +711,7 @@ static int devm_of_platform_match(struct device *dev, void *res, void *data)
  * @dev: device that requested to depopulate from device tree data
  *
  * Complementary to devm_of_platform_populate(), this function removes children
- * of the given device (and, recurrently, their children) that have been
+ * of the given device (and, recursively, their children) that have been
  * created from their respective device tree nodes (and only those,
  * leaving others - eg. manually created - unharmed).
  */
index afdaefbd03f61563c2b2972ff0c64266d3acaa9e..641a40cf5cf34a7d0aa3bf94362a95ae58065fe1 100644 (file)
@@ -441,6 +441,7 @@ int of_property_read_string(const struct device_node *np, const char *propname,
                                const char **out_string)
 {
        const struct property *prop = of_find_property(np, propname, NULL);
+
        if (!prop)
                return -EINVAL;
        if (!prop->length)
@@ -1217,9 +1218,9 @@ static struct device_node *parse_##fname(struct device_node *np,       \
  *
  * @parse_prop: function name
  *     parse_prop() finds the node corresponding to a supplier phandle
- * @parse_prop.np: Pointer to device node holding supplier phandle property
- * @parse_prop.prop_name: Name of property holding a phandle value
- * @parse_prop.index: For properties holding a list of phandles, this is the
+ *  parse_prop.np: Pointer to device node holding supplier phandle property
+ *  parse_prop.prop_name: Name of property holding a phandle value
+ *  parse_prop.index: For properties holding a list of phandles, this is the
  *                   index into the list
  * @optional: Describes whether a supplier is mandatory or not
  * @node_not_dev: The consumer node containing the property is never converted
index d01f92f0f0db7f571cf22835bcdf3f35858d840b..554a996b2ef18e46074ed0d30228b766a47914b2 100644 (file)
                                phandle-map-pass-thru = <0x0 0xf0>;
                        };
 
+                       provider5: provider5 {
+                               #phandle-cells = <2>;
+                               phandle-map = <2 7 &provider4 2 3>;
+                               phandle-map-mask = <0xff 0xf>;
+                               phandle-map-pass-thru = <0x0 0xf0>;
+                       };
+
                        consumer-a {
                                phandle-list =  <&provider1 1>,
                                                <&provider2 2 0>,
@@ -66,7 +73,8 @@
                                                <&provider4 4 0x100>,
                                                <&provider4 0 0x61>,
                                                <&provider0>,
-                                               <&provider4 19 0x20>;
+                                               <&provider4 19 0x20>,
+                                               <&provider5 2 7>;
                                phandle-list-bad-phandle = <12345678 0 0>;
                                phandle-list-bad-args = <&provider2 1 0>,
                                                        <&provider4 0>;
index e9e90e96600e42a29b193e7fc467d00fb5defa6c..cfd60e35a8992d7d1bf7ee1ea42c10b6f43a7a2e 100644 (file)
@@ -456,6 +456,9 @@ static void __init of_unittest_parse_phandle_with_args(void)
 
                unittest(passed, "index %i - data error on node %pOF rc=%i\n",
                         i, args.np, rc);
+
+               if (rc == 0)
+                       of_node_put(args.np);
        }
 
        /* Check for missing list property */
@@ -545,8 +548,9 @@ static void __init of_unittest_parse_phandle_with_args(void)
 
 static void __init of_unittest_parse_phandle_with_args_map(void)
 {
-       struct device_node *np, *p0, *p1, *p2, *p3;
+       struct device_node *np, *p[6] = {};
        struct of_phandle_args args;
+       unsigned int prefs[6];
        int i, rc;
 
        np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-b");
@@ -555,34 +559,24 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
                return;
        }
 
-       p0 = of_find_node_by_path("/testcase-data/phandle-tests/provider0");
-       if (!p0) {
-               pr_err("missing testcase data\n");
-               return;
-       }
-
-       p1 = of_find_node_by_path("/testcase-data/phandle-tests/provider1");
-       if (!p1) {
-               pr_err("missing testcase data\n");
-               return;
-       }
-
-       p2 = of_find_node_by_path("/testcase-data/phandle-tests/provider2");
-       if (!p2) {
-               pr_err("missing testcase data\n");
-               return;
-       }
-
-       p3 = of_find_node_by_path("/testcase-data/phandle-tests/provider3");
-       if (!p3) {
-               pr_err("missing testcase data\n");
-               return;
+       p[0] = of_find_node_by_path("/testcase-data/phandle-tests/provider0");
+       p[1] = of_find_node_by_path("/testcase-data/phandle-tests/provider1");
+       p[2] = of_find_node_by_path("/testcase-data/phandle-tests/provider2");
+       p[3] = of_find_node_by_path("/testcase-data/phandle-tests/provider3");
+       p[4] = of_find_node_by_path("/testcase-data/phandle-tests/provider4");
+       p[5] = of_find_node_by_path("/testcase-data/phandle-tests/provider5");
+       for (i = 0; i < ARRAY_SIZE(p); ++i) {
+               if (!p[i]) {
+                       pr_err("missing testcase data\n");
+                       return;
+               }
+               prefs[i] = kref_read(&p[i]->kobj.kref);
        }
 
        rc = of_count_phandle_with_args(np, "phandle-list", "#phandle-cells");
-       unittest(rc == 7, "of_count_phandle_with_args() returned %i, expected 7\n", rc);
+       unittest(rc == 8, "of_count_phandle_with_args() returned %i, expected 8\n", rc);
 
-       for (i = 0; i < 8; i++) {
+       for (i = 0; i < 9; i++) {
                bool passed = true;
 
                memset(&args, 0, sizeof(args));
@@ -593,13 +587,13 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
                switch (i) {
                case 0:
                        passed &= !rc;
-                       passed &= (args.np == p1);
+                       passed &= (args.np == p[1]);
                        passed &= (args.args_count == 1);
                        passed &= (args.args[0] == 1);
                        break;
                case 1:
                        passed &= !rc;
-                       passed &= (args.np == p3);
+                       passed &= (args.np == p[3]);
                        passed &= (args.args_count == 3);
                        passed &= (args.args[0] == 2);
                        passed &= (args.args[1] == 5);
@@ -610,28 +604,36 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
                        break;
                case 3:
                        passed &= !rc;
-                       passed &= (args.np == p0);
+                       passed &= (args.np == p[0]);
                        passed &= (args.args_count == 0);
                        break;
                case 4:
                        passed &= !rc;
-                       passed &= (args.np == p1);
+                       passed &= (args.np == p[1]);
                        passed &= (args.args_count == 1);
                        passed &= (args.args[0] == 3);
                        break;
                case 5:
                        passed &= !rc;
-                       passed &= (args.np == p0);
+                       passed &= (args.np == p[0]);
                        passed &= (args.args_count == 0);
                        break;
                case 6:
                        passed &= !rc;
-                       passed &= (args.np == p2);
+                       passed &= (args.np == p[2]);
                        passed &= (args.args_count == 2);
                        passed &= (args.args[0] == 15);
                        passed &= (args.args[1] == 0x20);
                        break;
                case 7:
+                       passed &= !rc;
+                       passed &= (args.np == p[3]);
+                       passed &= (args.args_count == 3);
+                       passed &= (args.args[0] == 2);
+                       passed &= (args.args[1] == 5);
+                       passed &= (args.args[2] == 3);
+                       break;
+               case 8:
                        passed &= (rc == -ENOENT);
                        break;
                default:
@@ -640,6 +642,9 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
 
                unittest(passed, "index %i - data error on node %s rc=%i\n",
                         i, args.np->full_name, rc);
+
+               if (rc == 0)
+                       of_node_put(args.np);
        }
 
        /* Check for missing list property */
@@ -686,6 +691,13 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
                   "OF: /testcase-data/phandle-tests/consumer-b: #phandle-cells = 2 found 1");
 
        unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
+
+       for (i = 0; i < ARRAY_SIZE(p); ++i) {
+               unittest(prefs[i] == kref_read(&p[i]->kobj.kref),
+                        "provider%d: expected:%d got:%d\n",
+                        i, prefs[i], kref_read(&p[i]->kobj.kref));
+               of_node_put(p[i]);
+       }
 }
 
 static void __init of_unittest_property_string(void)
index bb0d92461b08b3796df3f2b4078987291bb2f3cf..7a6a3e7f2825be5191e024e6c7af1cd688219b75 100644 (file)
@@ -213,7 +213,7 @@ static int __init power_init(void)
        if (running_on_qemu && soft_power_reg)
                register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, SYS_OFF_PRIO_DEFAULT,
                                        qemu_power_off, (void *)soft_power_reg);
-       else
+       if (!running_on_qemu || soft_power_reg)
                power_task = kthread_run(kpowerswd, (void*)soft_power_reg,
                                        KTHREAD_NAME);
        if (IS_ERR(power_task)) {
index 9f5d784cd95d581434974c8f2734b1f3d4e9e1b9..3644997a8342555e85abc364721c10ced3a2aba9 100644 (file)
@@ -65,6 +65,10 @@ enum parport_pc_pci_cards {
        sunix_5069a,
        sunix_5079a,
        sunix_5099a,
+       brainboxes_uc257,
+       brainboxes_is300,
+       brainboxes_uc414,
+       brainboxes_px263,
 };
 
 /* each element directly indexed from enum list, above */
@@ -158,6 +162,10 @@ static struct parport_pc_pci cards[] = {
        /* sunix_5069a */               { 1, { { 1, 2 }, } },
        /* sunix_5079a */               { 1, { { 1, 2 }, } },
        /* sunix_5099a */               { 1, { { 1, 2 }, } },
+       /* brainboxes_uc257 */  { 1, { { 3, -1 }, } },
+       /* brainboxes_is300 */  { 1, { { 3, -1 }, } },
+       /* brainboxes_uc414 */  { 1, { { 3, -1 }, } },
+       /* brainboxes_px263 */  { 1, { { 3, -1 }, } },
 };
 
 static struct pci_device_id parport_serial_pci_tbl[] = {
@@ -277,6 +285,38 @@ static struct pci_device_id parport_serial_pci_tbl[] = {
        { PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999, PCI_VENDOR_ID_SUNIX,
          0x0104, 0, 0, sunix_5099a },
 
+       /* Brainboxes UC-203 */
+       { PCI_VENDOR_ID_INTASHIELD, 0x0bc1,
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 },
+       { PCI_VENDOR_ID_INTASHIELD, 0x0bc2,
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 },
+
+       /* Brainboxes UC-257 */
+       { PCI_VENDOR_ID_INTASHIELD, 0x0861,
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 },
+       { PCI_VENDOR_ID_INTASHIELD, 0x0862,
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 },
+       { PCI_VENDOR_ID_INTASHIELD, 0x0863,
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 },
+
+       /* Brainboxes UC-414 */
+       { PCI_VENDOR_ID_INTASHIELD, 0x0e61,
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc414 },
+
+       /* Brainboxes UC-475 */
+       { PCI_VENDOR_ID_INTASHIELD, 0x0981,
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 },
+       { PCI_VENDOR_ID_INTASHIELD, 0x0982,
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 },
+
+       /* Brainboxes IS-300/IS-500 */
+       { PCI_VENDOR_ID_INTASHIELD, 0x0da0,
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_is300 },
+
+       /* Brainboxes PX-263/PX-295 */
+       { PCI_VENDOR_ID_INTASHIELD, 0x402c,
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_px263 },
+
        { 0, } /* terminate list */
 };
 MODULE_DEVICE_TABLE(pci,parport_serial_pci_tbl);
@@ -542,6 +582,30 @@ static struct pciserial_board pci_parport_serial_boards[] = {
                .base_baud      = 921600,
                .uart_offset    = 0x8,
        },
+       [brainboxes_uc257] = {
+               .flags          = FL_BASE2,
+               .num_ports      = 2,
+               .base_baud      = 115200,
+               .uart_offset    = 8,
+       },
+       [brainboxes_is300] = {
+               .flags          = FL_BASE2,
+               .num_ports      = 1,
+               .base_baud      = 115200,
+               .uart_offset    = 8,
+       },
+       [brainboxes_uc414] = {
+               .flags          = FL_BASE2,
+               .num_ports      = 4,
+               .base_baud      = 115200,
+               .uart_offset    = 8,
+       },
+       [brainboxes_px263] = {
+               .flags          = FL_BASE2,
+               .num_ports      = 4,
+               .base_baud      = 921600,
+               .uart_offset    = 8,
+       },
 };
 
 struct parport_serial_private {
index e21831d93305d215ae7454c1f1ea36bbcca364b9..49c74ded8a53ce70279246c9d85aa4698c084110 100644 (file)
@@ -611,7 +611,7 @@ static void free_pardevice(struct device *dev)
 {
        struct pardevice *par_dev = to_pardevice(dev);
 
-       kfree(par_dev->name);
+       kfree_const(par_dev->name);
        kfree(par_dev);
 }
 
@@ -682,8 +682,8 @@ parport_register_dev_model(struct parport *port, const char *name,
                           const struct pardev_cb *par_dev_cb, int id)
 {
        struct pardevice *par_dev;
+       const char *devname;
        int ret;
-       char *devname;
 
        if (port->physport->flags & PARPORT_FLAG_EXCL) {
                /* An exclusive device is registered. */
@@ -726,7 +726,7 @@ parport_register_dev_model(struct parport *port, const char *name,
        if (!par_dev->state)
                goto err_put_par_dev;
 
-       devname = kstrdup(name, GFP_KERNEL);
+       devname = kstrdup_const(name, GFP_KERNEL);
        if (!devname)
                goto err_free_par_dev;
 
@@ -804,7 +804,7 @@ parport_register_dev_model(struct parport *port, const char *name,
        return par_dev;
 
 err_free_devname:
-       kfree(devname);
+       kfree_const(devname);
 err_free_par_dev:
        kfree(par_dev->state);
 err_put_par_dev:
index 9c2137dae429aa26cd69bfaadb9706193946b2b8..826b5016a101022b990045fa7b68afe85be80c7a 100644 (file)
@@ -386,21 +386,8 @@ void pci_bus_add_devices(const struct pci_bus *bus)
 }
 EXPORT_SYMBOL(pci_bus_add_devices);
 
-/** pci_walk_bus - walk devices on/under bus, calling callback.
- *  @top      bus whose devices should be walked
- *  @cb       callback to be called for each device found
- *  @userdata arbitrary pointer to be passed to callback.
- *
- *  Walk the given bus, including any bridged devices
- *  on buses under this bus.  Call the provided callback
- *  on each device found.
- *
- *  We check the return of @cb each time. If it returns anything
- *  other than 0, we break out.
- *
- */
-void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
-                 void *userdata)
+static void __pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
+                          void *userdata, bool locked)
 {
        struct pci_dev *dev;
        struct pci_bus *bus;
@@ -408,7 +395,8 @@ void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
        int retval;
 
        bus = top;
-       down_read(&pci_bus_sem);
+       if (!locked)
+               down_read(&pci_bus_sem);
        next = top->devices.next;
        for (;;) {
                if (next == &bus->devices) {
@@ -431,10 +419,37 @@ void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
                if (retval)
                        break;
        }
-       up_read(&pci_bus_sem);
+       if (!locked)
+               up_read(&pci_bus_sem);
+}
+
+/**
+ *  pci_walk_bus - walk devices on/under bus, calling callback.
+ *  @top: bus whose devices should be walked
+ *  @cb: callback to be called for each device found
+ *  @userdata: arbitrary pointer to be passed to callback
+ *
+ *  Walk the given bus, including any bridged devices
+ *  on buses under this bus.  Call the provided callback
+ *  on each device found.
+ *
+ *  We check the return of @cb each time. If it returns anything
+ *  other than 0, we break out.
+ */
+void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata)
+{
+       __pci_walk_bus(top, cb, userdata, false);
 }
 EXPORT_SYMBOL_GPL(pci_walk_bus);
 
+void pci_walk_bus_locked(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata)
+{
+       lockdep_assert_held(&pci_bus_sem);
+
+       __pci_walk_bus(top, cb, userdata, true);
+}
+EXPORT_SYMBOL_GPL(pci_walk_bus_locked);
+
 struct pci_bus *pci_bus_get(struct pci_bus *bus)
 {
        if (bus)
index 291d127113632040ae7991998129973a8ddb1a9c..1d5a70c9055ed600696958d5a438731fccf7a187 100644 (file)
@@ -47,6 +47,7 @@ config PCI_J721E
 
 config PCI_J721E_HOST
        bool "TI J721E PCIe controller (host mode)"
+       depends on ARCH_K3 || COMPILE_TEST
        depends on OF
        select PCIE_CADENCE_HOST
        select PCI_J721E
@@ -57,6 +58,7 @@ config PCI_J721E_HOST
 
 config PCI_J721E_EP
        bool "TI J721E PCIe controller (endpoint mode)"
+       depends on ARCH_K3 || COMPILE_TEST
        depends on OF
        depends on PCI_ENDPOINT
        select PCIE_CADENCE_EP
index 2c87e7728a653b4e6fab5fb14dbc8873b502defb..85718246016b733ee4c8e52ab6c6be2c6f13e93f 100644 (file)
@@ -42,18 +42,16 @@ enum link_status {
 };
 
 #define J721E_MODE_RC                  BIT(7)
-#define LANE_COUNT_MASK                        BIT(8)
 #define LANE_COUNT(n)                  ((n) << 8)
 
 #define GENERATION_SEL_MASK            GENMASK(1, 0)
 
-#define MAX_LANES                      2
-
 struct j721e_pcie {
        struct cdns_pcie        *cdns_pcie;
        struct clk              *refclk;
        u32                     mode;
        u32                     num_lanes;
+       u32                     max_lanes;
        void __iomem            *user_cfg_base;
        void __iomem            *intd_cfg_base;
        u32                     linkdown_irq_regfield;
@@ -71,6 +69,7 @@ struct j721e_pcie_data {
        unsigned int            quirk_disable_flr:1;
        u32                     linkdown_irq_regfield;
        unsigned int            byte_access_allowed:1;
+       unsigned int            max_lanes;
 };
 
 static inline u32 j721e_pcie_user_readl(struct j721e_pcie *pcie, u32 offset)
@@ -206,11 +205,15 @@ static int j721e_pcie_set_lane_count(struct j721e_pcie *pcie,
 {
        struct device *dev = pcie->cdns_pcie->dev;
        u32 lanes = pcie->num_lanes;
+       u32 mask = BIT(8);
        u32 val = 0;
        int ret;
 
+       if (pcie->max_lanes == 4)
+               mask = GENMASK(9, 8);
+
        val = LANE_COUNT(lanes - 1);
-       ret = regmap_update_bits(syscon, offset, LANE_COUNT_MASK, val);
+       ret = regmap_update_bits(syscon, offset, mask, val);
        if (ret)
                dev_err(dev, "failed to set link count\n");
 
@@ -290,11 +293,13 @@ static const struct j721e_pcie_data j721e_pcie_rc_data = {
        .quirk_retrain_flag = true,
        .byte_access_allowed = false,
        .linkdown_irq_regfield = LINK_DOWN,
+       .max_lanes = 2,
 };
 
 static const struct j721e_pcie_data j721e_pcie_ep_data = {
        .mode = PCI_MODE_EP,
        .linkdown_irq_regfield = LINK_DOWN,
+       .max_lanes = 2,
 };
 
 static const struct j721e_pcie_data j7200_pcie_rc_data = {
@@ -302,23 +307,41 @@ static const struct j721e_pcie_data j7200_pcie_rc_data = {
        .quirk_detect_quiet_flag = true,
        .linkdown_irq_regfield = J7200_LINK_DOWN,
        .byte_access_allowed = true,
+       .max_lanes = 2,
 };
 
 static const struct j721e_pcie_data j7200_pcie_ep_data = {
        .mode = PCI_MODE_EP,
        .quirk_detect_quiet_flag = true,
        .quirk_disable_flr = true,
+       .max_lanes = 2,
 };
 
 static const struct j721e_pcie_data am64_pcie_rc_data = {
        .mode = PCI_MODE_RC,
        .linkdown_irq_regfield = J7200_LINK_DOWN,
        .byte_access_allowed = true,
+       .max_lanes = 1,
 };
 
 static const struct j721e_pcie_data am64_pcie_ep_data = {
        .mode = PCI_MODE_EP,
        .linkdown_irq_regfield = J7200_LINK_DOWN,
+       .max_lanes = 1,
+};
+
+static const struct j721e_pcie_data j784s4_pcie_rc_data = {
+       .mode = PCI_MODE_RC,
+       .quirk_retrain_flag = true,
+       .byte_access_allowed = false,
+       .linkdown_irq_regfield = LINK_DOWN,
+       .max_lanes = 4,
+};
+
+static const struct j721e_pcie_data j784s4_pcie_ep_data = {
+       .mode = PCI_MODE_EP,
+       .linkdown_irq_regfield = LINK_DOWN,
+       .max_lanes = 4,
 };
 
 static const struct of_device_id of_j721e_pcie_match[] = {
@@ -346,6 +369,14 @@ static const struct of_device_id of_j721e_pcie_match[] = {
                .compatible = "ti,am64-pcie-ep",
                .data = &am64_pcie_ep_data,
        },
+       {
+               .compatible = "ti,j784s4-pcie-host",
+               .data = &j784s4_pcie_rc_data,
+       },
+       {
+               .compatible = "ti,j784s4-pcie-ep",
+               .data = &j784s4_pcie_ep_data,
+       },
        {},
 };
 
@@ -432,9 +463,13 @@ static int j721e_pcie_probe(struct platform_device *pdev)
        pcie->user_cfg_base = base;
 
        ret = of_property_read_u32(node, "num-lanes", &num_lanes);
-       if (ret || num_lanes > MAX_LANES)
+       if (ret || num_lanes > data->max_lanes) {
+               dev_warn(dev, "num-lanes property not provided or invalid, setting num-lanes to 1\n");
                num_lanes = 1;
+       }
+
        pcie->num_lanes = num_lanes;
+       pcie->max_lanes = data->max_lanes;
 
        if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)))
                return -EINVAL;
index 3142feb8ac1951ee19055311d3fb19fb7bbeeed0..2d0a8d78bffb525f45e7939561f5427e82e6062e 100644 (file)
@@ -360,8 +360,8 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx,
        writel(0, ep->irq_cpu_addr + offset);
 }
 
-static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
-                                       u8 intx)
+static int cdns_pcie_ep_send_intx_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
+                                     u8 intx)
 {
        u16 cmd;
 
@@ -371,7 +371,7 @@ static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
 
        cdns_pcie_ep_assert_intx(ep, fn, intx, true);
        /*
-        * The mdelay() value was taken from dra7xx_pcie_raise_legacy_irq()
+        * The mdelay() value was taken from dra7xx_pcie_raise_intx_irq()
         */
        mdelay(1);
        cdns_pcie_ep_assert_intx(ep, fn, intx, false);
@@ -532,25 +532,24 @@ static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
 }
 
 static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn,
-                                 enum pci_epc_irq_type type,
-                                 u16 interrupt_num)
+                                 unsigned int type, u16 interrupt_num)
 {
        struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
        struct cdns_pcie *pcie = &ep->pcie;
        struct device *dev = pcie->dev;
 
        switch (type) {
-       case PCI_EPC_IRQ_LEGACY:
+       case PCI_IRQ_INTX:
                if (vfn > 0) {
-                       dev_err(dev, "Cannot raise legacy interrupts for VF\n");
+                       dev_err(dev, "Cannot raise INTX interrupts for VF\n");
                        return -EINVAL;
                }
-               return cdns_pcie_ep_send_legacy_irq(ep, fn, vfn, 0);
+               return cdns_pcie_ep_send_intx_irq(ep, fn, vfn, 0);
 
-       case PCI_EPC_IRQ_MSI:
+       case PCI_IRQ_MSI:
                return cdns_pcie_ep_send_msi_irq(ep, fn, vfn, interrupt_num);
 
-       case PCI_EPC_IRQ_MSIX:
+       case PCI_IRQ_MSIX:
                return cdns_pcie_ep_send_msix_irq(ep, fn, vfn, interrupt_num);
 
        default:
index 373cb50fcd159db425ee5833ea5c2c556e5c147e..03b96798f858cbad49485cb83016aeef95866d1a 100644 (file)
@@ -347,16 +347,16 @@ struct cdns_pcie_epf {
  * @max_regions: maximum number of regions supported by hardware
  * @ob_region_map: bitmask of mapped outbound regions
  * @ob_addr: base addresses in the AXI bus where the outbound regions start
- * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ
+ * @irq_phys_addr: base address on the AXI bus where the MSI/INTX IRQ
  *                dedicated outbound regions is mapped.
  * @irq_cpu_addr: base address in the CPU space where a write access triggers
- *               the sending of a memory write (MSI) / normal message (legacy
+ *               the sending of a memory write (MSI) / normal message (INTX
  *               IRQ) TLP through the PCIe bus.
- * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ
+ * @irq_pci_addr: used to save the current mapping of the MSI/INTX IRQ
  *               dedicated outbound region.
  * @irq_pci_fn: the latest PCI function that has updated the mapping of
- *             the MSI/legacy IRQ dedicated outbound region.
- * @irq_pending: bitmask of asserted legacy IRQs.
+ *             the MSI/INTX IRQ dedicated outbound region.
+ * @irq_pending: bitmask of asserted INTX IRQs.
  * @lock: spin lock to disable interrupts while modifying PCIe controller
  *        registers fields (RMW) accessible by both remote RC and EP to
  *        minimize time between read and write
@@ -374,7 +374,7 @@ struct cdns_pcie_ep {
        u64                     irq_pci_addr;
        u8                      irq_pci_fn;
        u8                      irq_pending;
-       /* protect writing to PCI_STATUS while raising legacy interrupts */
+       /* protect writing to PCI_STATUS while raising INTX interrupts */
        spinlock_t              lock;
        struct cdns_pcie_epf    *epf;
        unsigned int            quirk_detect_quiet_flag:1;
index 5ac021dbd46a4d5592c95d23b494650e9dfe3615..8afacc90c63b87a909ac26275e1b7fdf567924cc 100644 (file)
@@ -336,7 +336,7 @@ config PCI_EXYNOS
 config PCIE_FU740
        bool "SiFive FU740 PCIe controller"
        depends on PCI_MSI
-       depends on SOC_SIFIVE || COMPILE_TEST
+       depends on ARCH_SIFIVE || COMPILE_TEST
        select PCIE_DW_HOST
        help
          Say Y here if you want PCIe controller support for the SiFive
index b445ffe95e3f044576bf0b819b6584a44f556bea..0e406677060d3844dd219f859dd4d75f78fbf95b 100644 (file)
@@ -371,7 +371,7 @@ static int dra7xx_pcie_init_irq_domain(struct dw_pcie_rp *pp)
 }
 
 static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = {
-       .host_init = dra7xx_pcie_host_init,
+       .init = dra7xx_pcie_host_init,
 };
 
 static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
@@ -386,7 +386,7 @@ static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
        dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
 }
 
-static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx)
+static void dra7xx_pcie_raise_intx_irq(struct dra7xx_pcie *dra7xx)
 {
        dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1);
        mdelay(1);
@@ -404,16 +404,16 @@ static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx,
 }
 
 static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
-                                enum pci_epc_irq_type type, u16 interrupt_num)
+                                unsigned int type, u16 interrupt_num)
 {
        struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
        struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
 
        switch (type) {
-       case PCI_EPC_IRQ_LEGACY:
-               dra7xx_pcie_raise_legacy_irq(dra7xx);
+       case PCI_IRQ_INTX:
+               dra7xx_pcie_raise_intx_irq(dra7xx);
                break;
-       case PCI_EPC_IRQ_MSI:
+       case PCI_IRQ_MSI:
                dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num);
                break;
        default:
@@ -436,7 +436,7 @@ dra7xx_pcie_get_features(struct dw_pcie_ep *ep)
 }
 
 static const struct dw_pcie_ep_ops pcie_ep_ops = {
-       .ep_init = dra7xx_pcie_ep_init,
+       .init = dra7xx_pcie_ep_init,
        .raise_irq = dra7xx_pcie_raise_irq,
        .get_features = dra7xx_pcie_get_features,
 };
index c6bede3469320ed73de89eac502ca8f99e047455..a33fa98a252eff20dca7247c54019c5afd2d6930 100644 (file)
@@ -268,7 +268,7 @@ static int exynos_pcie_host_init(struct dw_pcie_rp *pp)
 }
 
 static const struct dw_pcie_host_ops exynos_pcie_host_ops = {
-       .host_init = exynos_pcie_host_init,
+       .init = exynos_pcie_host_init,
 };
 
 static int exynos_add_pcie_port(struct exynos_pcie *ep,
@@ -375,7 +375,7 @@ fail_probe:
        return ret;
 }
 
-static int exynos_pcie_remove(struct platform_device *pdev)
+static void exynos_pcie_remove(struct platform_device *pdev)
 {
        struct exynos_pcie *ep = platform_get_drvdata(pdev);
 
@@ -385,8 +385,6 @@ static int exynos_pcie_remove(struct platform_device *pdev)
        phy_exit(ep->phy);
        exynos_pcie_deinit_clk_resources(ep);
        regulator_bulk_disable(ARRAY_SIZE(ep->supplies), ep->supplies);
-
-       return 0;
 }
 
 static int exynos_pcie_suspend_noirq(struct device *dev)
@@ -431,7 +429,7 @@ static const struct of_device_id exynos_pcie_of_match[] = {
 
 static struct platform_driver exynos_pcie_driver = {
        .probe          = exynos_pcie_probe,
-       .remove         = exynos_pcie_remove,
+       .remove_new     = exynos_pcie_remove,
        .driver = {
                .name   = "exynos-pcie",
                .of_match_table = exynos_pcie_of_match,
index 74703362aeec718f8b408790ebf0083bdd968d5c..dc2c036ab28cb64a2174a73b367cfe5016534123 100644 (file)
@@ -1039,8 +1039,8 @@ static void imx6_pcie_host_exit(struct dw_pcie_rp *pp)
 }
 
 static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
-       .host_init = imx6_pcie_host_init,
-       .host_deinit = imx6_pcie_host_exit,
+       .init = imx6_pcie_host_init,
+       .deinit = imx6_pcie_host_exit,
 };
 
 static const struct dw_pcie_ops dw_pcie_ops = {
@@ -1058,17 +1058,16 @@ static void imx6_pcie_ep_init(struct dw_pcie_ep *ep)
 }
 
 static int imx6_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
-                                 enum pci_epc_irq_type type,
-                                 u16 interrupt_num)
+                                 unsigned int type, u16 interrupt_num)
 {
        struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
 
        switch (type) {
-       case PCI_EPC_IRQ_LEGACY:
-               return dw_pcie_ep_raise_legacy_irq(ep, func_no);
-       case PCI_EPC_IRQ_MSI:
+       case PCI_IRQ_INTX:
+               return dw_pcie_ep_raise_intx_irq(ep, func_no);
+       case PCI_IRQ_MSI:
                return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
-       case PCI_EPC_IRQ_MSIX:
+       case PCI_IRQ_MSIX:
                return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
        default:
                dev_err(pci->dev, "UNKNOWN IRQ type\n");
@@ -1093,7 +1092,7 @@ imx6_pcie_ep_get_features(struct dw_pcie_ep *ep)
 }
 
 static const struct dw_pcie_ep_ops pcie_ep_ops = {
-       .ep_init = imx6_pcie_ep_init,
+       .init = imx6_pcie_ep_init,
        .raise_irq = imx6_pcie_ep_raise_irq,
        .get_features = imx6_pcie_ep_get_features,
 };
index 0def919f89fafb51bc28109d26d23893506b804f..c0c62533a3f17688d4bf0ae34def85f77e19e2ba 100644 (file)
@@ -115,8 +115,7 @@ struct keystone_pcie {
        struct dw_pcie          *pci;
        /* PCI Device ID */
        u32                     device_id;
-       int                     legacy_host_irqs[PCI_NUM_INTX];
-       struct                  device_node *legacy_intc_np;
+       int                     intx_host_irqs[PCI_NUM_INTX];
 
        int                     msi_host_irq;
        int                     num_lanes;
@@ -124,7 +123,7 @@ struct keystone_pcie {
        struct phy              **phy;
        struct device_link      **link;
        struct                  device_node *msi_intc_np;
-       struct irq_domain       *legacy_irq_domain;
+       struct irq_domain       *intx_irq_domain;
        struct device_node      *np;
 
        /* Application register space */
@@ -252,8 +251,8 @@ static int ks_pcie_msi_host_init(struct dw_pcie_rp *pp)
        return dw_pcie_allocate_domains(pp);
 }
 
-static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie,
-                                     int offset)
+static void ks_pcie_handle_intx_irq(struct keystone_pcie *ks_pcie,
+                                   int offset)
 {
        struct dw_pcie *pci = ks_pcie->pci;
        struct device *dev = pci->dev;
@@ -263,7 +262,7 @@ static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie,
 
        if (BIT(0) & pending) {
                dev_dbg(dev, ": irq: irq_offset %d", offset);
-               generic_handle_domain_irq(ks_pcie->legacy_irq_domain, offset);
+               generic_handle_domain_irq(ks_pcie->intx_irq_domain, offset);
        }
 
        /* EOI the INTx interrupt */
@@ -307,38 +306,37 @@ static irqreturn_t ks_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
        return IRQ_HANDLED;
 }
 
-static void ks_pcie_ack_legacy_irq(struct irq_data *d)
+static void ks_pcie_ack_intx_irq(struct irq_data *d)
 {
 }
 
-static void ks_pcie_mask_legacy_irq(struct irq_data *d)
+static void ks_pcie_mask_intx_irq(struct irq_data *d)
 {
 }
 
-static void ks_pcie_unmask_legacy_irq(struct irq_data *d)
+static void ks_pcie_unmask_intx_irq(struct irq_data *d)
 {
 }
 
-static struct irq_chip ks_pcie_legacy_irq_chip = {
-       .name = "Keystone-PCI-Legacy-IRQ",
-       .irq_ack = ks_pcie_ack_legacy_irq,
-       .irq_mask = ks_pcie_mask_legacy_irq,
-       .irq_unmask = ks_pcie_unmask_legacy_irq,
+static struct irq_chip ks_pcie_intx_irq_chip = {
+       .name = "Keystone-PCI-INTX-IRQ",
+       .irq_ack = ks_pcie_ack_intx_irq,
+       .irq_mask = ks_pcie_mask_intx_irq,
+       .irq_unmask = ks_pcie_unmask_intx_irq,
 };
 
-static int ks_pcie_init_legacy_irq_map(struct irq_domain *d,
-                                      unsigned int irq,
-                                      irq_hw_number_t hw_irq)
+static int ks_pcie_init_intx_irq_map(struct irq_domain *d,
+                                    unsigned int irq, irq_hw_number_t hw_irq)
 {
-       irq_set_chip_and_handler(irq, &ks_pcie_legacy_irq_chip,
+       irq_set_chip_and_handler(irq, &ks_pcie_intx_irq_chip,
                                 handle_level_irq);
        irq_set_chip_data(irq, d->host_data);
 
        return 0;
 }
 
-static const struct irq_domain_ops ks_pcie_legacy_irq_domain_ops = {
-       .map = ks_pcie_init_legacy_irq_map,
+static const struct irq_domain_ops ks_pcie_intx_irq_domain_ops = {
+       .map = ks_pcie_init_intx_irq_map,
        .xlate = irq_domain_xlate_onetwocell,
 };
 
@@ -605,22 +603,22 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
 }
 
 /**
- * ks_pcie_legacy_irq_handler() - Handle legacy interrupt
+ * ks_pcie_intx_irq_handler() - Handle INTX interrupt
  * @desc: Pointer to irq descriptor
  *
- * Traverse through pending legacy interrupts and invoke handler for each. Also
+ * Traverse through pending INTX interrupts and invoke handler for each. Also
  * takes care of interrupt controller level mask/ack operation.
  */
-static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
+static void ks_pcie_intx_irq_handler(struct irq_desc *desc)
 {
        unsigned int irq = irq_desc_get_irq(desc);
        struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
        struct dw_pcie *pci = ks_pcie->pci;
        struct device *dev = pci->dev;
-       u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0];
+       u32 irq_offset = irq - ks_pcie->intx_host_irqs[0];
        struct irq_chip *chip = irq_desc_get_chip(desc);
 
-       dev_dbg(dev, ": Handling legacy irq %d\n", irq);
+       dev_dbg(dev, ": Handling INTX irq %d\n", irq);
 
        /*
         * The chained irq handler installation would have replaced normal
@@ -628,7 +626,7 @@ static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
         * ack operation.
         */
        chained_irq_enter(chip, desc);
-       ks_pcie_handle_legacy_irq(ks_pcie, irq_offset);
+       ks_pcie_handle_intx_irq(ks_pcie, irq_offset);
        chained_irq_exit(chip, desc);
 }
 
@@ -686,10 +684,10 @@ err:
        return ret;
 }
 
-static int ks_pcie_config_legacy_irq(struct keystone_pcie *ks_pcie)
+static int ks_pcie_config_intx_irq(struct keystone_pcie *ks_pcie)
 {
        struct device *dev = ks_pcie->pci->dev;
-       struct irq_domain *legacy_irq_domain;
+       struct irq_domain *intx_irq_domain;
        struct device_node *np = ks_pcie->np;
        struct device_node *intc_np;
        int irq_count, irq, ret = 0, i;
@@ -697,7 +695,7 @@ static int ks_pcie_config_legacy_irq(struct keystone_pcie *ks_pcie)
        intc_np = of_get_child_by_name(np, "legacy-interrupt-controller");
        if (!intc_np) {
                /*
-                * Since legacy interrupts are modeled as edge-interrupts in
+                * Since INTX interrupts are modeled as edge-interrupts in
                 * AM6, keep it disabled for now.
                 */
                if (ks_pcie->is_am6)
@@ -719,22 +717,21 @@ static int ks_pcie_config_legacy_irq(struct keystone_pcie *ks_pcie)
                        ret = -EINVAL;
                        goto err;
                }
-               ks_pcie->legacy_host_irqs[i] = irq;
+               ks_pcie->intx_host_irqs[i] = irq;
 
                irq_set_chained_handler_and_data(irq,
-                                                ks_pcie_legacy_irq_handler,
+                                                ks_pcie_intx_irq_handler,
                                                 ks_pcie);
        }
 
-       legacy_irq_domain =
-               irq_domain_add_linear(intc_np, PCI_NUM_INTX,
-                                     &ks_pcie_legacy_irq_domain_ops, NULL);
-       if (!legacy_irq_domain) {
-               dev_err(dev, "Failed to add irq domain for legacy irqs\n");
+       intx_irq_domain = irq_domain_add_linear(intc_np, PCI_NUM_INTX,
+                                       &ks_pcie_intx_irq_domain_ops, NULL);
+       if (!intx_irq_domain) {
+               dev_err(dev, "Failed to add irq domain for INTX irqs\n");
                ret = -EINVAL;
                goto err;
        }
-       ks_pcie->legacy_irq_domain = legacy_irq_domain;
+       ks_pcie->intx_irq_domain = intx_irq_domain;
 
        for (i = 0; i < PCI_NUM_INTX; i++)
                ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET(i), INTx_EN);
@@ -808,7 +805,7 @@ static int __init ks_pcie_host_init(struct dw_pcie_rp *pp)
        if (!ks_pcie->is_am6)
                pp->bridge->child_ops = &ks_child_pcie_ops;
 
-       ret = ks_pcie_config_legacy_irq(ks_pcie);
+       ret = ks_pcie_config_intx_irq(ks_pcie);
        if (ret)
                return ret;
 
@@ -838,12 +835,12 @@ static int __init ks_pcie_host_init(struct dw_pcie_rp *pp)
 }
 
 static const struct dw_pcie_host_ops ks_pcie_host_ops = {
-       .host_init = ks_pcie_host_init,
-       .msi_host_init = ks_pcie_msi_host_init,
+       .init = ks_pcie_host_init,
+       .msi_init = ks_pcie_msi_host_init,
 };
 
 static const struct dw_pcie_host_ops ks_pcie_am654_host_ops = {
-       .host_init = ks_pcie_host_init,
+       .init = ks_pcie_host_init,
 };
 
 static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv)
@@ -881,7 +878,7 @@ static void ks_pcie_am654_ep_init(struct dw_pcie_ep *ep)
        dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, flags);
 }
 
-static void ks_pcie_am654_raise_legacy_irq(struct keystone_pcie *ks_pcie)
+static void ks_pcie_am654_raise_intx_irq(struct keystone_pcie *ks_pcie)
 {
        struct dw_pcie *pci = ks_pcie->pci;
        u8 int_pin;
@@ -900,20 +897,19 @@ static void ks_pcie_am654_raise_legacy_irq(struct keystone_pcie *ks_pcie)
 }
 
 static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
-                                  enum pci_epc_irq_type type,
-                                  u16 interrupt_num)
+                                  unsigned int type, u16 interrupt_num)
 {
        struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
        struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
 
        switch (type) {
-       case PCI_EPC_IRQ_LEGACY:
-               ks_pcie_am654_raise_legacy_irq(ks_pcie);
+       case PCI_IRQ_INTX:
+               ks_pcie_am654_raise_intx_irq(ks_pcie);
                break;
-       case PCI_EPC_IRQ_MSI:
+       case PCI_IRQ_MSI:
                dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
                break;
-       case PCI_EPC_IRQ_MSIX:
+       case PCI_IRQ_MSIX:
                dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
                break;
        default:
@@ -944,7 +940,7 @@ ks_pcie_am654_get_features(struct dw_pcie_ep *ep)
 }
 
 static const struct dw_pcie_ep_ops ks_pcie_am654_ep_ops = {
-       .ep_init = ks_pcie_am654_ep_init,
+       .init = ks_pcie_am654_ep_init,
        .raise_irq = ks_pcie_am654_raise_irq,
        .get_features = &ks_pcie_am654_get_features,
 };
@@ -1218,7 +1214,16 @@ static int ks_pcie_probe(struct platform_device *pdev)
                goto err_link;
        }
 
+       /* Obtain references to the PHYs */
+       for (i = 0; i < num_lanes; i++)
+               phy_pm_runtime_get_sync(ks_pcie->phy[i]);
+
        ret = ks_pcie_enable_phy(ks_pcie);
+
+       /* Release references to the PHYs */
+       for (i = 0; i < num_lanes; i++)
+               phy_pm_runtime_put_sync(ks_pcie->phy[i]);
+
        if (ret) {
                dev_err(dev, "failed to enable phy\n");
                goto err_link;
@@ -1302,7 +1307,7 @@ err_link:
        return ret;
 }
 
-static int ks_pcie_remove(struct platform_device *pdev)
+static void ks_pcie_remove(struct platform_device *pdev)
 {
        struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
        struct device_link **link = ks_pcie->link;
@@ -1314,13 +1319,11 @@ static int ks_pcie_remove(struct platform_device *pdev)
        ks_pcie_disable_phy(ks_pcie);
        while (num_lanes--)
                device_link_del(link[num_lanes]);
-
-       return 0;
 }
 
 static struct platform_driver ks_pcie_driver = {
        .probe  = ks_pcie_probe,
-       .remove = ks_pcie_remove,
+       .remove_new = ks_pcie_remove,
        .driver = {
                .name   = "keystone-pcie",
                .of_match_table = ks_pcie_of_match,
index 3d3c50ef4b6ff5b21a017f4dcc3d2800c28f8354..2e398494e7c0ca625fee3040b096c73c01c85b57 100644 (file)
@@ -49,7 +49,7 @@ struct ls_pcie_ep {
        bool                            big_endian;
 };
 
-static u32 ls_lut_readl(struct ls_pcie_ep *pcie, u32 offset)
+static u32 ls_pcie_pf_lut_readl(struct ls_pcie_ep *pcie, u32 offset)
 {
        struct dw_pcie *pci = pcie->pci;
 
@@ -59,7 +59,7 @@ static u32 ls_lut_readl(struct ls_pcie_ep *pcie, u32 offset)
                return ioread32(pci->dbi_base + offset);
 }
 
-static void ls_lut_writel(struct ls_pcie_ep *pcie, u32 offset, u32 value)
+static void ls_pcie_pf_lut_writel(struct ls_pcie_ep *pcie, u32 offset, u32 value)
 {
        struct dw_pcie *pci = pcie->pci;
 
@@ -76,8 +76,8 @@ static irqreturn_t ls_pcie_ep_event_handler(int irq, void *dev_id)
        u32 val, cfg;
        u8 offset;
 
-       val = ls_lut_readl(pcie, PEX_PF0_PME_MES_DR);
-       ls_lut_writel(pcie, PEX_PF0_PME_MES_DR, val);
+       val = ls_pcie_pf_lut_readl(pcie, PEX_PF0_PME_MES_DR);
+       ls_pcie_pf_lut_writel(pcie, PEX_PF0_PME_MES_DR, val);
 
        if (!val)
                return IRQ_NONE;
@@ -96,9 +96,9 @@ static irqreturn_t ls_pcie_ep_event_handler(int irq, void *dev_id)
                dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, pcie->lnkcap);
                dw_pcie_dbi_ro_wr_dis(pci);
 
-               cfg = ls_lut_readl(pcie, PEX_PF0_CONFIG);
+               cfg = ls_pcie_pf_lut_readl(pcie, PEX_PF0_CONFIG);
                cfg |= PEX_PF0_CFG_READY;
-               ls_lut_writel(pcie, PEX_PF0_CONFIG, cfg);
+               ls_pcie_pf_lut_writel(pcie, PEX_PF0_CONFIG, cfg);
                dw_pcie_ep_linkup(&pci->ep);
 
                dev_dbg(pci->dev, "Link up\n");
@@ -130,10 +130,10 @@ static int ls_pcie_ep_interrupt_init(struct ls_pcie_ep *pcie,
        }
 
        /* Enable interrupts */
-       val = ls_lut_readl(pcie, PEX_PF0_PME_MES_IER);
+       val = ls_pcie_pf_lut_readl(pcie, PEX_PF0_PME_MES_IER);
        val |=  PEX_PF0_PME_MES_IER_LDDIE | PEX_PF0_PME_MES_IER_HRDIE |
                PEX_PF0_PME_MES_IER_LUDIE;
-       ls_lut_writel(pcie, PEX_PF0_PME_MES_IER, val);
+       ls_pcie_pf_lut_writel(pcie, PEX_PF0_PME_MES_IER, val);
 
        return 0;
 }
@@ -166,16 +166,16 @@ static void ls_pcie_ep_init(struct dw_pcie_ep *ep)
 }
 
 static int ls_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
-                               enum pci_epc_irq_type type, u16 interrupt_num)
+                               unsigned int type, u16 interrupt_num)
 {
        struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
 
        switch (type) {
-       case PCI_EPC_IRQ_LEGACY:
-               return dw_pcie_ep_raise_legacy_irq(ep, func_no);
-       case PCI_EPC_IRQ_MSI:
+       case PCI_IRQ_INTX:
+               return dw_pcie_ep_raise_intx_irq(ep, func_no);
+       case PCI_IRQ_MSI:
                return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
-       case PCI_EPC_IRQ_MSIX:
+       case PCI_IRQ_MSIX:
                return dw_pcie_ep_raise_msix_irq_doorbell(ep, func_no,
                                                          interrupt_num);
        default:
@@ -184,8 +184,7 @@ static int ls_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
        }
 }
 
-static unsigned int ls_pcie_ep_func_conf_select(struct dw_pcie_ep *ep,
-                                               u8 func_no)
+static unsigned int ls_pcie_ep_get_dbi_offset(struct dw_pcie_ep *ep, u8 func_no)
 {
        struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
        struct ls_pcie_ep *pcie = to_ls_pcie_ep(pci);
@@ -195,10 +194,10 @@ static unsigned int ls_pcie_ep_func_conf_select(struct dw_pcie_ep *ep,
 }
 
 static const struct dw_pcie_ep_ops ls_pcie_ep_ops = {
-       .ep_init = ls_pcie_ep_init,
+       .init = ls_pcie_ep_init,
        .raise_irq = ls_pcie_ep_raise_irq,
        .get_features = ls_pcie_ep_get_features,
-       .func_conf_select = ls_pcie_ep_func_conf_select,
+       .get_dbi_offset = ls_pcie_ep_get_dbi_offset,
 };
 
 static const struct ls_pcie_ep_drvdata ls1_ep_drvdata = {
index 37956e09c65bd51c95937e25b982f6e00a0491d4..ee6f5256813374bdf656bef4f9b96e1b8760d1b5 100644 (file)
 #define PF_MCR_PTOMR           BIT(0)
 #define PF_MCR_EXL2S           BIT(1)
 
+/* LS1021A PEXn PM Write Control Register */
+#define SCFG_PEXPMWRCR(idx)    (0x5c + (idx) * 0x64)
+#define PMXMTTURNOFF           BIT(31)
+#define SCFG_PEXSFTRSTCR       0x190
+#define PEXSR(idx)             BIT(idx)
+
+/* LS1043A PEX PME control register */
+#define SCFG_PEXPMECR          0x144
+#define PEXPME(idx)            BIT(31 - (idx) * 4)
+
+/* LS1043A PEX LUT debug register */
+#define LS_PCIE_LDBG   0x7fc
+#define LDBG_SR                BIT(30)
+#define LDBG_WE                BIT(31)
+
 #define PCIE_IATU_NUM          6
 
 struct ls_pcie_drvdata {
-       const u32 pf_off;
+       const u32 pf_lut_off;
+       const struct dw_pcie_host_ops *ops;
+       int (*exit_from_l2)(struct dw_pcie_rp *pp);
+       bool scfg_support;
        bool pm_support;
 };
 
 struct ls_pcie {
        struct dw_pcie *pci;
        const struct ls_pcie_drvdata *drvdata;
-       void __iomem *pf_base;
+       void __iomem *pf_lut_base;
+       struct regmap *scfg;
+       int index;
        bool big_endian;
 };
 
-#define ls_pcie_pf_readl_addr(addr)    ls_pcie_pf_readl(pcie, addr)
+#define ls_pcie_pf_lut_readl_addr(addr)        ls_pcie_pf_lut_readl(pcie, addr)
 #define to_ls_pcie(x)  dev_get_drvdata((x)->dev)
 
 static bool ls_pcie_is_bridge(struct ls_pcie *pcie)
@@ -90,20 +110,20 @@ static void ls_pcie_fix_error_response(struct ls_pcie *pcie)
        iowrite32(PCIE_ABSERR_SETTING, pci->dbi_base + PCIE_ABSERR);
 }
 
-static u32 ls_pcie_pf_readl(struct ls_pcie *pcie, u32 off)
+static u32 ls_pcie_pf_lut_readl(struct ls_pcie *pcie, u32 off)
 {
        if (pcie->big_endian)
-               return ioread32be(pcie->pf_base + off);
+               return ioread32be(pcie->pf_lut_base + off);
 
-       return ioread32(pcie->pf_base + off);
+       return ioread32(pcie->pf_lut_base + off);
 }
 
-static void ls_pcie_pf_writel(struct ls_pcie *pcie, u32 off, u32 val)
+static void ls_pcie_pf_lut_writel(struct ls_pcie *pcie, u32 off, u32 val)
 {
        if (pcie->big_endian)
-               iowrite32be(val, pcie->pf_base + off);
+               iowrite32be(val, pcie->pf_lut_base + off);
        else
-               iowrite32(val, pcie->pf_base + off);
+               iowrite32(val, pcie->pf_lut_base + off);
 }
 
 static void ls_pcie_send_turnoff_msg(struct dw_pcie_rp *pp)
@@ -113,11 +133,11 @@ static void ls_pcie_send_turnoff_msg(struct dw_pcie_rp *pp)
        u32 val;
        int ret;
 
-       val = ls_pcie_pf_readl(pcie, LS_PCIE_PF_MCR);
+       val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_PF_MCR);
        val |= PF_MCR_PTOMR;
-       ls_pcie_pf_writel(pcie, LS_PCIE_PF_MCR, val);
+       ls_pcie_pf_lut_writel(pcie, LS_PCIE_PF_MCR, val);
 
-       ret = readx_poll_timeout(ls_pcie_pf_readl_addr, LS_PCIE_PF_MCR,
+       ret = readx_poll_timeout(ls_pcie_pf_lut_readl_addr, LS_PCIE_PF_MCR,
                                 val, !(val & PF_MCR_PTOMR),
                                 PCIE_PME_TO_L2_TIMEOUT_US/10,
                                 PCIE_PME_TO_L2_TIMEOUT_US);
@@ -125,7 +145,7 @@ static void ls_pcie_send_turnoff_msg(struct dw_pcie_rp *pp)
                dev_err(pcie->pci->dev, "PME_Turn_off timeout\n");
 }
 
-static void ls_pcie_exit_from_l2(struct dw_pcie_rp *pp)
+static int ls_pcie_exit_from_l2(struct dw_pcie_rp *pp)
 {
        struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
        struct ls_pcie *pcie = to_ls_pcie(pci);
@@ -136,20 +156,22 @@ static void ls_pcie_exit_from_l2(struct dw_pcie_rp *pp)
         * Set PF_MCR_EXL2S bit in LS_PCIE_PF_MCR register for the link
         * to exit L2 state.
         */
-       val = ls_pcie_pf_readl(pcie, LS_PCIE_PF_MCR);
+       val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_PF_MCR);
        val |= PF_MCR_EXL2S;
-       ls_pcie_pf_writel(pcie, LS_PCIE_PF_MCR, val);
+       ls_pcie_pf_lut_writel(pcie, LS_PCIE_PF_MCR, val);
 
        /*
         * L2 exit timeout of 10ms is not defined in the specifications,
         * it was chosen based on empirical observations.
         */
-       ret = readx_poll_timeout(ls_pcie_pf_readl_addr, LS_PCIE_PF_MCR,
+       ret = readx_poll_timeout(ls_pcie_pf_lut_readl_addr, LS_PCIE_PF_MCR,
                                 val, !(val & PF_MCR_EXL2S),
                                 1000,
                                 10000);
        if (ret)
                dev_err(pcie->pci->dev, "L2 exit timeout\n");
+
+       return ret;
 }
 
 static int ls_pcie_host_init(struct dw_pcie_rp *pp)
@@ -168,25 +190,130 @@ static int ls_pcie_host_init(struct dw_pcie_rp *pp)
        return 0;
 }
 
+static void scfg_pcie_send_turnoff_msg(struct regmap *scfg, u32 reg, u32 mask)
+{
+       /* Send PME_Turn_Off message */
+       regmap_write_bits(scfg, reg, mask, mask);
+
+       /*
+        * There is no specific register to check for PME_To_Ack from endpoint.
+        * So on the safe side, wait for PCIE_PME_TO_L2_TIMEOUT_US.
+        */
+       mdelay(PCIE_PME_TO_L2_TIMEOUT_US/1000);
+
+       /*
+        * Layerscape hardware reference manual recommends clearing the PMXMTTURNOFF bit
+        * to complete the PME_Turn_Off handshake.
+        */
+       regmap_write_bits(scfg, reg, mask, 0);
+}
+
+static void ls1021a_pcie_send_turnoff_msg(struct dw_pcie_rp *pp)
+{
+       struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+       struct ls_pcie *pcie = to_ls_pcie(pci);
+
+       scfg_pcie_send_turnoff_msg(pcie->scfg, SCFG_PEXPMWRCR(pcie->index), PMXMTTURNOFF);
+}
+
+static int scfg_pcie_exit_from_l2(struct regmap *scfg, u32 reg, u32 mask)
+{
+       /* Reset the PEX wrapper to bring the link out of L2 */
+       regmap_write_bits(scfg, reg, mask, mask);
+       regmap_write_bits(scfg, reg, mask, 0);
+
+       return 0;
+}
+
+static int ls1021a_pcie_exit_from_l2(struct dw_pcie_rp *pp)
+{
+       struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+       struct ls_pcie *pcie = to_ls_pcie(pci);
+
+       return scfg_pcie_exit_from_l2(pcie->scfg, SCFG_PEXSFTRSTCR, PEXSR(pcie->index));
+}
+
+static void ls1043a_pcie_send_turnoff_msg(struct dw_pcie_rp *pp)
+{
+       struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+       struct ls_pcie *pcie = to_ls_pcie(pci);
+
+       scfg_pcie_send_turnoff_msg(pcie->scfg, SCFG_PEXPMECR, PEXPME(pcie->index));
+}
+
+static int ls1043a_pcie_exit_from_l2(struct dw_pcie_rp *pp)
+{
+       struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+       struct ls_pcie *pcie = to_ls_pcie(pci);
+       u32 val;
+
+       /*
+        * Reset the PEX wrapper to bring the link out of L2.
+        * LDBG_WE: allows the user to have write access to the PEXDBG[SR] for both setting and
+        *          clearing the soft reset on the PEX module.
+        * LDBG_SR: When SR is set to 1, the PEX module enters soft reset.
+        */
+       val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_LDBG);
+       val |= LDBG_WE;
+       ls_pcie_pf_lut_writel(pcie, LS_PCIE_LDBG, val);
+
+       val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_LDBG);
+       val |= LDBG_SR;
+       ls_pcie_pf_lut_writel(pcie, LS_PCIE_LDBG, val);
+
+       val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_LDBG);
+       val &= ~LDBG_SR;
+       ls_pcie_pf_lut_writel(pcie, LS_PCIE_LDBG, val);
+
+       val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_LDBG);
+       val &= ~LDBG_WE;
+       ls_pcie_pf_lut_writel(pcie, LS_PCIE_LDBG, val);
+
+       return 0;
+}
+
 static const struct dw_pcie_host_ops ls_pcie_host_ops = {
-       .host_init = ls_pcie_host_init,
+       .init = ls_pcie_host_init,
        .pme_turn_off = ls_pcie_send_turnoff_msg,
 };
 
+static const struct dw_pcie_host_ops ls1021a_pcie_host_ops = {
+       .init = ls_pcie_host_init,
+       .pme_turn_off = ls1021a_pcie_send_turnoff_msg,
+};
+
 static const struct ls_pcie_drvdata ls1021a_drvdata = {
-       .pm_support = false,
+       .pm_support = true,
+       .scfg_support = true,
+       .ops = &ls1021a_pcie_host_ops,
+       .exit_from_l2 = ls1021a_pcie_exit_from_l2,
+};
+
+static const struct dw_pcie_host_ops ls1043a_pcie_host_ops = {
+       .init = ls_pcie_host_init,
+       .pme_turn_off = ls1043a_pcie_send_turnoff_msg,
+};
+
+static const struct ls_pcie_drvdata ls1043a_drvdata = {
+       .pf_lut_off = 0x10000,
+       .pm_support = true,
+       .scfg_support = true,
+       .ops = &ls1043a_pcie_host_ops,
+       .exit_from_l2 = ls1043a_pcie_exit_from_l2,
 };
 
 static const struct ls_pcie_drvdata layerscape_drvdata = {
-       .pf_off = 0xc0000,
+       .pf_lut_off = 0xc0000,
        .pm_support = true,
+       .ops = &ls_pcie_host_ops,
+       .exit_from_l2 = ls_pcie_exit_from_l2,
 };
 
 static const struct of_device_id ls_pcie_of_match[] = {
        { .compatible = "fsl,ls1012a-pcie", .data = &layerscape_drvdata },
        { .compatible = "fsl,ls1021a-pcie", .data = &ls1021a_drvdata },
        { .compatible = "fsl,ls1028a-pcie", .data = &layerscape_drvdata },
-       { .compatible = "fsl,ls1043a-pcie", .data = &ls1021a_drvdata },
+       { .compatible = "fsl,ls1043a-pcie", .data = &ls1043a_drvdata },
        { .compatible = "fsl,ls1046a-pcie", .data = &layerscape_drvdata },
        { .compatible = "fsl,ls2080a-pcie", .data = &layerscape_drvdata },
        { .compatible = "fsl,ls2085a-pcie", .data = &layerscape_drvdata },
@@ -201,6 +328,8 @@ static int ls_pcie_probe(struct platform_device *pdev)
        struct dw_pcie *pci;
        struct ls_pcie *pcie;
        struct resource *dbi_base;
+       u32 index[2];
+       int ret;
 
        pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
        if (!pcie)
@@ -213,9 +342,8 @@ static int ls_pcie_probe(struct platform_device *pdev)
        pcie->drvdata = of_device_get_match_data(dev);
 
        pci->dev = dev;
-       pci->pp.ops = &ls_pcie_host_ops;
-
        pcie->pci = pci;
+       pci->pp.ops = pcie->drvdata->ops;
 
        dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
        pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base);
@@ -224,7 +352,21 @@ static int ls_pcie_probe(struct platform_device *pdev)
 
        pcie->big_endian = of_property_read_bool(dev->of_node, "big-endian");
 
-       pcie->pf_base = pci->dbi_base + pcie->drvdata->pf_off;
+       pcie->pf_lut_base = pci->dbi_base + pcie->drvdata->pf_lut_off;
+
+       if (pcie->drvdata->scfg_support) {
+               pcie->scfg = syscon_regmap_lookup_by_phandle(dev->of_node, "fsl,pcie-scfg");
+               if (IS_ERR(pcie->scfg)) {
+                       dev_err(dev, "No syscfg phandle specified\n");
+                       return PTR_ERR(pcie->scfg);
+               }
+
+               ret = of_property_read_u32_array(dev->of_node, "fsl,pcie-scfg", index, 2);
+               if (ret)
+                       return ret;
+
+               pcie->index = index[1];
+       }
 
        if (!ls_pcie_is_bridge(pcie))
                return -ENODEV;
@@ -247,11 +389,14 @@ static int ls_pcie_suspend_noirq(struct device *dev)
 static int ls_pcie_resume_noirq(struct device *dev)
 {
        struct ls_pcie *pcie = dev_get_drvdata(dev);
+       int ret;
 
        if (!pcie->drvdata->pm_support)
                return 0;
 
-       ls_pcie_exit_from_l2(&pcie->pci->pp);
+       ret = pcie->drvdata->exit_from_l2(&pcie->pci->pp);
+       if (ret)
+               return ret;
 
        return dw_pcie_resume_noirq(pcie->pci);
 }
index 407558f5d74acdb730105b25f3452422fe9e9a33..6477c83262c20632d91a22349a2082e8327d1b65 100644 (file)
@@ -389,7 +389,7 @@ static int meson_pcie_host_init(struct dw_pcie_rp *pp)
 }
 
 static const struct dw_pcie_host_ops meson_pcie_host_ops = {
-       .host_init = meson_pcie_host_init,
+       .init = meson_pcie_host_init,
 };
 
 static const struct dw_pcie_ops dw_pcie_ops = {
index b8cb77c9c4bd2c86498ffa6c7089935bb3b5292f..6dfdda59f32836a21b10b4c6f88d3746d1e1462a 100644 (file)
@@ -311,7 +311,7 @@ static int al_pcie_host_init(struct dw_pcie_rp *pp)
 }
 
 static const struct dw_pcie_host_ops al_pcie_host_ops = {
-       .host_init = al_pcie_host_init,
+       .init = al_pcie_host_init,
 };
 
 static int al_pcie_probe(struct platform_device *pdev)
index 5c999e15c357f0a598d631d094afb3818dd708d0..b5c599ccaacf0c4d1e834582dc4c97deb43114c6 100644 (file)
@@ -225,7 +225,7 @@ static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg)
 }
 
 static const struct dw_pcie_host_ops armada8k_pcie_host_ops = {
-       .host_init = armada8k_pcie_host_init,
+       .init = armada8k_pcie_host_init,
 };
 
 static int armada8k_add_pcie_port(struct armada8k_pcie *pcie,
index 9b572a2b2c9a54cac43ef0afeb888133505d0d7d..9ed0a9ba761920b641808592210de6513a68ccfd 100644 (file)
@@ -333,7 +333,7 @@ static int artpec6_pcie_host_init(struct dw_pcie_rp *pp)
 }
 
 static const struct dw_pcie_host_ops artpec6_pcie_host_ops = {
-       .host_init = artpec6_pcie_host_init,
+       .init = artpec6_pcie_host_init,
 };
 
 static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep)
@@ -352,15 +352,15 @@ static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep)
 }
 
 static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
-                                 enum pci_epc_irq_type type, u16 interrupt_num)
+                                 unsigned int type, u16 interrupt_num)
 {
        struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
 
        switch (type) {
-       case PCI_EPC_IRQ_LEGACY:
-               dev_err(pci->dev, "EP cannot trigger legacy IRQs\n");
+       case PCI_IRQ_INTX:
+               dev_err(pci->dev, "EP cannot trigger INTx IRQs\n");
                return -EINVAL;
-       case PCI_EPC_IRQ_MSI:
+       case PCI_IRQ_MSI:
                return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
        default:
                dev_err(pci->dev, "UNKNOWN IRQ type\n");
@@ -370,7 +370,7 @@ static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
 }
 
 static const struct dw_pcie_ep_ops pcie_ep_ops = {
-       .ep_init = artpec6_pcie_ep_init,
+       .init = artpec6_pcie_ep_init,
        .raise_irq = artpec6_pcie_raise_irq,
 };
 
index 17e696797ff50f92116190059310a4e0ae8e170e..76d0ddea80075716a796c5d625cdd51f8cd8afd2 100644 (file)
@@ -559,8 +559,8 @@ static void bt1_pcie_host_deinit(struct dw_pcie_rp *pp)
 }
 
 static const struct dw_pcie_host_ops bt1_pcie_host_ops = {
-       .host_init = bt1_pcie_host_init,
-       .host_deinit = bt1_pcie_host_deinit,
+       .init = bt1_pcie_host_init,
+       .deinit = bt1_pcie_host_deinit,
 };
 
 static struct bt1_pcie *bt1_pcie_create_data(struct platform_device *pdev)
index f6207989fc6ad2e6fcbc377c881fe2c92895f0a3..9a437cfce073c16996927af40fa0e3816b7c7b32 100644 (file)
@@ -6,6 +6,7 @@
  * Author: Kishon Vijay Abraham I <kishon@ti.com>
  */
 
+#include <linux/align.h>
 #include <linux/bitfield.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
@@ -43,46 +44,19 @@ dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no)
        return NULL;
 }
 
-static unsigned int dw_pcie_ep_func_select(struct dw_pcie_ep *ep, u8 func_no)
-{
-       unsigned int func_offset = 0;
-
-       if (ep->ops->func_conf_select)
-               func_offset = ep->ops->func_conf_select(ep, func_no);
-
-       return func_offset;
-}
-
-static unsigned int dw_pcie_ep_get_dbi2_offset(struct dw_pcie_ep *ep, u8 func_no)
-{
-       unsigned int dbi2_offset = 0;
-
-       if (ep->ops->get_dbi2_offset)
-               dbi2_offset = ep->ops->get_dbi2_offset(ep, func_no);
-       else if (ep->ops->func_conf_select)     /* for backward compatibility */
-               dbi2_offset = ep->ops->func_conf_select(ep, func_no);
-
-       return dbi2_offset;
-}
-
 static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, u8 func_no,
                                   enum pci_barno bar, int flags)
 {
-       unsigned int func_offset, dbi2_offset;
        struct dw_pcie_ep *ep = &pci->ep;
-       u32 reg, reg_dbi2;
-
-       func_offset = dw_pcie_ep_func_select(ep, func_no);
-       dbi2_offset = dw_pcie_ep_get_dbi2_offset(ep, func_no);
+       u32 reg;
 
-       reg = func_offset + PCI_BASE_ADDRESS_0 + (4 * bar);
-       reg_dbi2 = dbi2_offset + PCI_BASE_ADDRESS_0 + (4 * bar);
+       reg = PCI_BASE_ADDRESS_0 + (4 * bar);
        dw_pcie_dbi_ro_wr_en(pci);
-       dw_pcie_writel_dbi2(pci, reg_dbi2, 0x0);
-       dw_pcie_writel_dbi(pci, reg, 0x0);
+       dw_pcie_ep_writel_dbi2(ep, func_no, reg, 0x0);
+       dw_pcie_ep_writel_dbi(ep, func_no, reg, 0x0);
        if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
-               dw_pcie_writel_dbi2(pci, reg_dbi2 + 4, 0x0);
-               dw_pcie_writel_dbi(pci, reg + 4, 0x0);
+               dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, 0x0);
+               dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0x0);
        }
        dw_pcie_dbi_ro_wr_dis(pci);
 }
@@ -99,19 +73,15 @@ void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
 EXPORT_SYMBOL_GPL(dw_pcie_ep_reset_bar);
 
 static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie_ep *ep, u8 func_no,
-               u8 cap_ptr, u8 cap)
+                                    u8 cap_ptr, u8 cap)
 {
-       struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
-       unsigned int func_offset = 0;
        u8 cap_id, next_cap_ptr;
        u16 reg;
 
        if (!cap_ptr)
                return 0;
 
-       func_offset = dw_pcie_ep_func_select(ep, func_no);
-
-       reg = dw_pcie_readw_dbi(pci, func_offset + cap_ptr);
+       reg = dw_pcie_ep_readw_dbi(ep, func_no, cap_ptr);
        cap_id = (reg & 0x00ff);
 
        if (cap_id > PCI_CAP_ID_MAX)
@@ -126,14 +96,10 @@ static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie_ep *ep, u8 func_no,
 
 static u8 dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, u8 func_no, u8 cap)
 {
-       struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
-       unsigned int func_offset = 0;
        u8 next_cap_ptr;
        u16 reg;
 
-       func_offset = dw_pcie_ep_func_select(ep, func_no);
-
-       reg = dw_pcie_readw_dbi(pci, func_offset + PCI_CAPABILITY_LIST);
+       reg = dw_pcie_ep_readw_dbi(ep, func_no, PCI_CAPABILITY_LIST);
        next_cap_ptr = (reg & 0x00ff);
 
        return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap);
@@ -144,24 +110,21 @@ static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
 {
        struct dw_pcie_ep *ep = epc_get_drvdata(epc);
        struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
-       unsigned int func_offset = 0;
-
-       func_offset = dw_pcie_ep_func_select(ep, func_no);
 
        dw_pcie_dbi_ro_wr_en(pci);
-       dw_pcie_writew_dbi(pci, func_offset + PCI_VENDOR_ID, hdr->vendorid);
-       dw_pcie_writew_dbi(pci, func_offset + PCI_DEVICE_ID, hdr->deviceid);
-       dw_pcie_writeb_dbi(pci, func_offset + PCI_REVISION_ID, hdr->revid);
-       dw_pcie_writeb_dbi(pci, func_offset + PCI_CLASS_PROG, hdr->progif_code);
-       dw_pcie_writew_dbi(pci, func_offset + PCI_CLASS_DEVICE,
-                          hdr->subclass_code | hdr->baseclass_code << 8);
-       dw_pcie_writeb_dbi(pci, func_offset + PCI_CACHE_LINE_SIZE,
-                          hdr->cache_line_size);
-       dw_pcie_writew_dbi(pci, func_offset + PCI_SUBSYSTEM_VENDOR_ID,
-                          hdr->subsys_vendor_id);
-       dw_pcie_writew_dbi(pci, func_offset + PCI_SUBSYSTEM_ID, hdr->subsys_id);
-       dw_pcie_writeb_dbi(pci, func_offset + PCI_INTERRUPT_PIN,
-                          hdr->interrupt_pin);
+       dw_pcie_ep_writew_dbi(ep, func_no, PCI_VENDOR_ID, hdr->vendorid);
+       dw_pcie_ep_writew_dbi(ep, func_no, PCI_DEVICE_ID, hdr->deviceid);
+       dw_pcie_ep_writeb_dbi(ep, func_no, PCI_REVISION_ID, hdr->revid);
+       dw_pcie_ep_writeb_dbi(ep, func_no, PCI_CLASS_PROG, hdr->progif_code);
+       dw_pcie_ep_writew_dbi(ep, func_no, PCI_CLASS_DEVICE,
+                             hdr->subclass_code | hdr->baseclass_code << 8);
+       dw_pcie_ep_writeb_dbi(ep, func_no, PCI_CACHE_LINE_SIZE,
+                             hdr->cache_line_size);
+       dw_pcie_ep_writew_dbi(ep, func_no, PCI_SUBSYSTEM_VENDOR_ID,
+                             hdr->subsys_vendor_id);
+       dw_pcie_ep_writew_dbi(ep, func_no, PCI_SUBSYSTEM_ID, hdr->subsys_id);
+       dw_pcie_ep_writeb_dbi(ep, func_no, PCI_INTERRUPT_PIN,
+                             hdr->interrupt_pin);
        dw_pcie_dbi_ro_wr_dis(pci);
 
        return 0;
@@ -243,18 +206,13 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
 {
        struct dw_pcie_ep *ep = epc_get_drvdata(epc);
        struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
-       unsigned int func_offset, dbi2_offset;
        enum pci_barno bar = epf_bar->barno;
        size_t size = epf_bar->size;
        int flags = epf_bar->flags;
-       u32 reg, reg_dbi2;
        int ret, type;
+       u32 reg;
 
-       func_offset = dw_pcie_ep_func_select(ep, func_no);
-       dbi2_offset = dw_pcie_ep_get_dbi2_offset(ep, func_no);
-
-       reg = PCI_BASE_ADDRESS_0 + (4 * bar) + func_offset;
-       reg_dbi2 = PCI_BASE_ADDRESS_0 + (4 * bar) + dbi2_offset;
+       reg = PCI_BASE_ADDRESS_0 + (4 * bar);
 
        if (!(flags & PCI_BASE_ADDRESS_SPACE))
                type = PCIE_ATU_TYPE_MEM;
@@ -270,12 +228,12 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
 
        dw_pcie_dbi_ro_wr_en(pci);
 
-       dw_pcie_writel_dbi2(pci, reg_dbi2, lower_32_bits(size - 1));
-       dw_pcie_writel_dbi(pci, reg, flags);
+       dw_pcie_ep_writel_dbi2(ep, func_no, reg, lower_32_bits(size - 1));
+       dw_pcie_ep_writel_dbi(ep, func_no, reg, flags);
 
        if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
-               dw_pcie_writel_dbi2(pci, reg_dbi2 + 4, upper_32_bits(size - 1));
-               dw_pcie_writel_dbi(pci, reg + 4, 0);
+               dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, upper_32_bits(size - 1));
+               dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0);
        }
 
        ep->epf_bar[bar] = epf_bar;
@@ -335,19 +293,15 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
 static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
 {
        struct dw_pcie_ep *ep = epc_get_drvdata(epc);
-       struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
-       u32 val, reg;
-       unsigned int func_offset = 0;
        struct dw_pcie_ep_func *ep_func;
+       u32 val, reg;
 
        ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
        if (!ep_func || !ep_func->msi_cap)
                return -EINVAL;
 
-       func_offset = dw_pcie_ep_func_select(ep, func_no);
-
-       reg = ep_func->msi_cap + func_offset + PCI_MSI_FLAGS;
-       val = dw_pcie_readw_dbi(pci, reg);
+       reg = ep_func->msi_cap + PCI_MSI_FLAGS;
+       val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
        if (!(val & PCI_MSI_FLAGS_ENABLE))
                return -EINVAL;
 
@@ -361,22 +315,19 @@ static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
 {
        struct dw_pcie_ep *ep = epc_get_drvdata(epc);
        struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
-       u32 val, reg;
-       unsigned int func_offset = 0;
        struct dw_pcie_ep_func *ep_func;
+       u32 val, reg;
 
        ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
        if (!ep_func || !ep_func->msi_cap)
                return -EINVAL;
 
-       func_offset = dw_pcie_ep_func_select(ep, func_no);
-
-       reg = ep_func->msi_cap + func_offset + PCI_MSI_FLAGS;
-       val = dw_pcie_readw_dbi(pci, reg);
+       reg = ep_func->msi_cap + PCI_MSI_FLAGS;
+       val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
        val &= ~PCI_MSI_FLAGS_QMASK;
        val |= FIELD_PREP(PCI_MSI_FLAGS_QMASK, interrupts);
        dw_pcie_dbi_ro_wr_en(pci);
-       dw_pcie_writew_dbi(pci, reg, val);
+       dw_pcie_ep_writew_dbi(ep, func_no, reg, val);
        dw_pcie_dbi_ro_wr_dis(pci);
 
        return 0;
@@ -385,19 +336,15 @@ static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
 static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
 {
        struct dw_pcie_ep *ep = epc_get_drvdata(epc);
-       struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
-       u32 val, reg;
-       unsigned int func_offset = 0;
        struct dw_pcie_ep_func *ep_func;
+       u32 val, reg;
 
        ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
        if (!ep_func || !ep_func->msix_cap)
                return -EINVAL;
 
-       func_offset = dw_pcie_ep_func_select(ep, func_no);
-
-       reg = ep_func->msix_cap + func_offset + PCI_MSIX_FLAGS;
-       val = dw_pcie_readw_dbi(pci, reg);
+       reg = ep_func->msix_cap + PCI_MSIX_FLAGS;
+       val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
        if (!(val & PCI_MSIX_FLAGS_ENABLE))
                return -EINVAL;
 
@@ -411,9 +358,8 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
 {
        struct dw_pcie_ep *ep = epc_get_drvdata(epc);
        struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
-       u32 val, reg;
-       unsigned int func_offset = 0;
        struct dw_pcie_ep_func *ep_func;
+       u32 val, reg;
 
        ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
        if (!ep_func || !ep_func->msix_cap)
@@ -421,21 +367,19 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
 
        dw_pcie_dbi_ro_wr_en(pci);
 
-       func_offset = dw_pcie_ep_func_select(ep, func_no);
-
-       reg = ep_func->msix_cap + func_offset + PCI_MSIX_FLAGS;
-       val = dw_pcie_readw_dbi(pci, reg);
+       reg = ep_func->msix_cap + PCI_MSIX_FLAGS;
+       val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
        val &= ~PCI_MSIX_FLAGS_QSIZE;
        val |= interrupts;
        dw_pcie_writew_dbi(pci, reg, val);
 
-       reg = ep_func->msix_cap + func_offset + PCI_MSIX_TABLE;
+       reg = ep_func->msix_cap + PCI_MSIX_TABLE;
        val = offset | bir;
-       dw_pcie_writel_dbi(pci, reg, val);
+       dw_pcie_ep_writel_dbi(ep, func_no, reg, val);
 
-       reg = ep_func->msix_cap + func_offset + PCI_MSIX_PBA;
+       reg = ep_func->msix_cap + PCI_MSIX_PBA;
        val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir;
-       dw_pcie_writel_dbi(pci, reg, val);
+       dw_pcie_ep_writel_dbi(ep, func_no, reg, val);
 
        dw_pcie_dbi_ro_wr_dis(pci);
 
@@ -443,7 +387,7 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
 }
 
 static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
-                               enum pci_epc_irq_type type, u16 interrupt_num)
+                               unsigned int type, u16 interrupt_num)
 {
        struct dw_pcie_ep *ep = epc_get_drvdata(epc);
 
@@ -496,56 +440,53 @@ static const struct pci_epc_ops epc_ops = {
        .get_features           = dw_pcie_ep_get_features,
 };
 
-int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no)
+int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no)
 {
        struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
        struct device *dev = pci->dev;
 
-       dev_err(dev, "EP cannot trigger legacy IRQs\n");
+       dev_err(dev, "EP cannot raise INTX IRQs\n");
 
        return -EINVAL;
 }
-EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_legacy_irq);
+EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_intx_irq);
 
 int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
                             u8 interrupt_num)
 {
-       struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+       u32 msg_addr_lower, msg_addr_upper, reg;
        struct dw_pcie_ep_func *ep_func;
        struct pci_epc *epc = ep->epc;
        unsigned int aligned_offset;
-       unsigned int func_offset = 0;
        u16 msg_ctrl, msg_data;
-       u32 msg_addr_lower, msg_addr_upper, reg;
-       u64 msg_addr;
        bool has_upper;
+       u64 msg_addr;
        int ret;
 
        ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
        if (!ep_func || !ep_func->msi_cap)
                return -EINVAL;
 
-       func_offset = dw_pcie_ep_func_select(ep, func_no);
-
        /* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */
-       reg = ep_func->msi_cap + func_offset + PCI_MSI_FLAGS;
-       msg_ctrl = dw_pcie_readw_dbi(pci, reg);
+       reg = ep_func->msi_cap + PCI_MSI_FLAGS;
+       msg_ctrl = dw_pcie_ep_readw_dbi(ep, func_no, reg);
        has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT);
-       reg = ep_func->msi_cap + func_offset + PCI_MSI_ADDRESS_LO;
-       msg_addr_lower = dw_pcie_readl_dbi(pci, reg);
+       reg = ep_func->msi_cap + PCI_MSI_ADDRESS_LO;
+       msg_addr_lower = dw_pcie_ep_readl_dbi(ep, func_no, reg);
        if (has_upper) {
-               reg = ep_func->msi_cap + func_offset + PCI_MSI_ADDRESS_HI;
-               msg_addr_upper = dw_pcie_readl_dbi(pci, reg);
-               reg = ep_func->msi_cap + func_offset + PCI_MSI_DATA_64;
-               msg_data = dw_pcie_readw_dbi(pci, reg);
+               reg = ep_func->msi_cap + PCI_MSI_ADDRESS_HI;
+               msg_addr_upper = dw_pcie_ep_readl_dbi(ep, func_no, reg);
+               reg = ep_func->msi_cap + PCI_MSI_DATA_64;
+               msg_data = dw_pcie_ep_readw_dbi(ep, func_no, reg);
        } else {
                msg_addr_upper = 0;
-               reg = ep_func->msi_cap + func_offset + PCI_MSI_DATA_32;
-               msg_data = dw_pcie_readw_dbi(pci, reg);
+               reg = ep_func->msi_cap + PCI_MSI_DATA_32;
+               msg_data = dw_pcie_ep_readw_dbi(ep, func_no, reg);
        }
-       aligned_offset = msg_addr_lower & (epc->mem->window.page_size - 1);
-       msg_addr = ((u64)msg_addr_upper) << 32 |
-                       (msg_addr_lower & ~aligned_offset);
+       msg_addr = ((u64)msg_addr_upper) << 32 | msg_addr_lower;
+
+       aligned_offset = msg_addr & (epc->mem->window.page_size - 1);
+       msg_addr = ALIGN_DOWN(msg_addr, epc->mem->window.page_size);
        ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
                                  epc->mem->window.page_size);
        if (ret)
@@ -582,10 +523,9 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
                              u16 interrupt_num)
 {
        struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
-       struct dw_pcie_ep_func *ep_func;
        struct pci_epf_msix_tbl *msix_tbl;
+       struct dw_pcie_ep_func *ep_func;
        struct pci_epc *epc = ep->epc;
-       unsigned int func_offset = 0;
        u32 reg, msg_data, vec_ctrl;
        unsigned int aligned_offset;
        u32 tbl_offset;
@@ -597,10 +537,8 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
        if (!ep_func || !ep_func->msix_cap)
                return -EINVAL;
 
-       func_offset = dw_pcie_ep_func_select(ep, func_no);
-
-       reg = ep_func->msix_cap + func_offset + PCI_MSIX_TABLE;
-       tbl_offset = dw_pcie_readl_dbi(pci, reg);
+       reg = ep_func->msix_cap + PCI_MSIX_TABLE;
+       tbl_offset = dw_pcie_ep_readl_dbi(ep, func_no, reg);
        bir = FIELD_GET(PCI_MSIX_TABLE_BIR, tbl_offset);
        tbl_offset &= PCI_MSIX_TABLE_OFFSET;
 
@@ -615,6 +553,7 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
        }
 
        aligned_offset = msg_addr & (epc->mem->window.page_size - 1);
+       msg_addr = ALIGN_DOWN(msg_addr, epc->mem->window.page_size);
        ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
                                  epc->mem->window.page_size);
        if (ret)
@@ -794,8 +733,8 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
                list_add_tail(&ep_func->list, &ep->func_list);
        }
 
-       if (ep->ops->ep_init)
-               ep->ops->ep_init(ep);
+       if (ep->ops->init)
+               ep->ops->init(ep);
 
        ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size,
                               ep->page_size);
index 7991f0e179b215a46726f6d8f2693409d49358a5..d5fc31f8345f70f587a2052db681f3b023d08b59 100644 (file)
@@ -441,14 +441,14 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
        bridge->ops = &dw_pcie_ops;
        bridge->child_ops = &dw_child_pcie_ops;
 
-       if (pp->ops->host_init) {
-               ret = pp->ops->host_init(pp);
+       if (pp->ops->init) {
+               ret = pp->ops->init(pp);
                if (ret)
                        return ret;
        }
 
        if (pci_msi_enabled()) {
-               pp->has_msi_ctrl = !(pp->ops->msi_host_init ||
+               pp->has_msi_ctrl = !(pp->ops->msi_init ||
                                     of_property_read_bool(np, "msi-parent") ||
                                     of_property_read_bool(np, "msi-map"));
 
@@ -464,8 +464,8 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
                        goto err_deinit_host;
                }
 
-               if (pp->ops->msi_host_init) {
-                       ret = pp->ops->msi_host_init(pp);
+               if (pp->ops->msi_init) {
+                       ret = pp->ops->msi_init(pp);
                        if (ret < 0)
                                goto err_deinit_host;
                } else if (pp->has_msi_ctrl) {
@@ -502,8 +502,8 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
        if (ret)
                goto err_stop_link;
 
-       if (pp->ops->host_post_init)
-               pp->ops->host_post_init(pp);
+       if (pp->ops->post_init)
+               pp->ops->post_init(pp);
 
        return 0;
 
@@ -518,8 +518,8 @@ err_free_msi:
                dw_pcie_free_msi(pp);
 
 err_deinit_host:
-       if (pp->ops->host_deinit)
-               pp->ops->host_deinit(pp);
+       if (pp->ops->deinit)
+               pp->ops->deinit(pp);
 
        return ret;
 }
@@ -539,8 +539,8 @@ void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
        if (pp->has_msi_ctrl)
                dw_pcie_free_msi(pp);
 
-       if (pp->ops->host_deinit)
-               pp->ops->host_deinit(pp);
+       if (pp->ops->deinit)
+               pp->ops->deinit(pp);
 }
 EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
 
@@ -842,8 +842,8 @@ int dw_pcie_suspend_noirq(struct dw_pcie *pci)
                return ret;
        }
 
-       if (pci->pp.ops->host_deinit)
-               pci->pp.ops->host_deinit(&pci->pp);
+       if (pci->pp.ops->deinit)
+               pci->pp.ops->deinit(&pci->pp);
 
        pci->suspended = true;
 
@@ -860,8 +860,8 @@ int dw_pcie_resume_noirq(struct dw_pcie *pci)
 
        pci->suspended = false;
 
-       if (pci->pp.ops->host_init) {
-               ret = pci->pp.ops->host_init(&pci->pp);
+       if (pci->pp.ops->init) {
+               ret = pci->pp.ops->init(&pci->pp);
                if (ret) {
                        dev_err(pci->dev, "Host init failed: %d\n", ret);
                        return ret;
index b625841e98aa0e69f1c8d3e7a999c8ec0d0fe72d..778588b4be706357002f83fb3862999db1f6963a 100644 (file)
@@ -42,17 +42,16 @@ static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep)
 }
 
 static int dw_plat_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
-                                    enum pci_epc_irq_type type,
-                                    u16 interrupt_num)
+                                    unsigned int type, u16 interrupt_num)
 {
        struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
 
        switch (type) {
-       case PCI_EPC_IRQ_LEGACY:
-               return dw_pcie_ep_raise_legacy_irq(ep, func_no);
-       case PCI_EPC_IRQ_MSI:
+       case PCI_IRQ_INTX:
+               return dw_pcie_ep_raise_intx_irq(ep, func_no);
+       case PCI_IRQ_MSI:
                return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
-       case PCI_EPC_IRQ_MSIX:
+       case PCI_IRQ_MSIX:
                return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
        default:
                dev_err(pci->dev, "UNKNOWN IRQ type\n");
@@ -74,7 +73,7 @@ dw_plat_pcie_get_features(struct dw_pcie_ep *ep)
 }
 
 static const struct dw_pcie_ep_ops pcie_ep_ops = {
-       .ep_init = dw_plat_pcie_ep_init,
+       .init = dw_plat_pcie_ep_init,
        .raise_irq = dw_plat_pcie_ep_raise_irq,
        .get_features = dw_plat_pcie_get_features,
 };
index 55ff76e3d384665fda4f538b3bc71c711a51d4c4..26dae483746277e932e22d03d297fb2c25a44b7b 100644 (file)
@@ -300,10 +300,10 @@ enum dw_pcie_ltssm {
 };
 
 struct dw_pcie_host_ops {
-       int (*host_init)(struct dw_pcie_rp *pp);
-       void (*host_deinit)(struct dw_pcie_rp *pp);
-       void (*host_post_init)(struct dw_pcie_rp *pp);
-       int (*msi_host_init)(struct dw_pcie_rp *pp);
+       int (*init)(struct dw_pcie_rp *pp);
+       void (*deinit)(struct dw_pcie_rp *pp);
+       void (*post_init)(struct dw_pcie_rp *pp);
+       int (*msi_init)(struct dw_pcie_rp *pp);
        void (*pme_turn_off)(struct dw_pcie_rp *pp);
 };
 
@@ -332,10 +332,10 @@ struct dw_pcie_rp {
 
 struct dw_pcie_ep_ops {
        void    (*pre_init)(struct dw_pcie_ep *ep);
-       void    (*ep_init)(struct dw_pcie_ep *ep);
+       void    (*init)(struct dw_pcie_ep *ep);
        void    (*deinit)(struct dw_pcie_ep *ep);
        int     (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no,
-                            enum pci_epc_irq_type type, u16 interrupt_num);
+                            unsigned int type, u16 interrupt_num);
        const struct pci_epc_features* (*get_features)(struct dw_pcie_ep *ep);
        /*
         * Provide a method to implement the different func config space
@@ -344,7 +344,7 @@ struct dw_pcie_ep_ops {
         * return a 0, and implement code in callback function of platform
         * driver.
         */
-       unsigned int (*func_conf_select)(struct dw_pcie_ep *ep, u8 func_no);
+       unsigned int (*get_dbi_offset)(struct dw_pcie_ep *ep, u8 func_no);
        unsigned int (*get_dbi2_offset)(struct dw_pcie_ep *ep, u8 func_no);
 };
 
@@ -486,6 +486,99 @@ static inline void dw_pcie_writel_dbi2(struct dw_pcie *pci, u32 reg, u32 val)
        dw_pcie_write_dbi2(pci, reg, 0x4, val);
 }
 
+static inline unsigned int dw_pcie_ep_get_dbi_offset(struct dw_pcie_ep *ep,
+                                                    u8 func_no)
+{
+       unsigned int dbi_offset = 0;
+
+       if (ep->ops->get_dbi_offset)
+               dbi_offset = ep->ops->get_dbi_offset(ep, func_no);
+
+       return dbi_offset;
+}
+
+static inline u32 dw_pcie_ep_read_dbi(struct dw_pcie_ep *ep, u8 func_no,
+                                     u32 reg, size_t size)
+{
+       unsigned int offset = dw_pcie_ep_get_dbi_offset(ep, func_no);
+       struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+       return dw_pcie_read_dbi(pci, offset + reg, size);
+}
+
+static inline void dw_pcie_ep_write_dbi(struct dw_pcie_ep *ep, u8 func_no,
+                                       u32 reg, size_t size, u32 val)
+{
+       unsigned int offset = dw_pcie_ep_get_dbi_offset(ep, func_no);
+       struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+       dw_pcie_write_dbi(pci, offset + reg, size, val);
+}
+
+static inline void dw_pcie_ep_writel_dbi(struct dw_pcie_ep *ep, u8 func_no,
+                                        u32 reg, u32 val)
+{
+       dw_pcie_ep_write_dbi(ep, func_no, reg, 0x4, val);
+}
+
+static inline u32 dw_pcie_ep_readl_dbi(struct dw_pcie_ep *ep, u8 func_no,
+                                      u32 reg)
+{
+       return dw_pcie_ep_read_dbi(ep, func_no, reg, 0x4);
+}
+
+static inline void dw_pcie_ep_writew_dbi(struct dw_pcie_ep *ep, u8 func_no,
+                                        u32 reg, u16 val)
+{
+       dw_pcie_ep_write_dbi(ep, func_no, reg, 0x2, val);
+}
+
+static inline u16 dw_pcie_ep_readw_dbi(struct dw_pcie_ep *ep, u8 func_no,
+                                      u32 reg)
+{
+       return dw_pcie_ep_read_dbi(ep, func_no, reg, 0x2);
+}
+
+static inline void dw_pcie_ep_writeb_dbi(struct dw_pcie_ep *ep, u8 func_no,
+                                        u32 reg, u8 val)
+{
+       dw_pcie_ep_write_dbi(ep, func_no, reg, 0x1, val);
+}
+
+static inline u8 dw_pcie_ep_readb_dbi(struct dw_pcie_ep *ep, u8 func_no,
+                                     u32 reg)
+{
+       return dw_pcie_ep_read_dbi(ep, func_no, reg, 0x1);
+}
+
+static inline unsigned int dw_pcie_ep_get_dbi2_offset(struct dw_pcie_ep *ep,
+                                                     u8 func_no)
+{
+       unsigned int dbi2_offset = 0;
+
+       if (ep->ops->get_dbi2_offset)
+               dbi2_offset = ep->ops->get_dbi2_offset(ep, func_no);
+       else if (ep->ops->get_dbi_offset)     /* for backward compatibility */
+               dbi2_offset = ep->ops->get_dbi_offset(ep, func_no);
+
+       return dbi2_offset;
+}
+
+static inline void dw_pcie_ep_write_dbi2(struct dw_pcie_ep *ep, u8 func_no,
+                                        u32 reg, size_t size, u32 val)
+{
+       unsigned int offset = dw_pcie_ep_get_dbi2_offset(ep, func_no);
+       struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+       dw_pcie_write_dbi2(pci, offset + reg, size, val);
+}
+
+static inline void dw_pcie_ep_writel_dbi2(struct dw_pcie_ep *ep, u8 func_no,
+                                         u32 reg, u32 val)
+{
+       dw_pcie_ep_write_dbi2(ep, func_no, reg, 0x4, val);
+}
+
 static inline void dw_pcie_dbi_ro_wr_en(struct dw_pcie *pci)
 {
        u32 reg;
@@ -580,7 +673,7 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep);
 int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep);
 void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep);
 void dw_pcie_ep_exit(struct dw_pcie_ep *ep);
-int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no);
+int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no);
 int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
                             u8 interrupt_num);
 int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
@@ -613,7 +706,7 @@ static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
 {
 }
 
-static inline int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no)
+static inline int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no)
 {
        return 0;
 }
index 2fe42c70097fdbae7a733f500e3e9f6a0e800448..d6842141d384d9c7f25d0743457ffeb73f8f963a 100644 (file)
@@ -72,7 +72,7 @@ static void rockchip_pcie_writel_apb(struct rockchip_pcie *rockchip,
        writel_relaxed(val, rockchip->apb_base + reg);
 }
 
-static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc)
+static void rockchip_pcie_intx_handler(struct irq_desc *desc)
 {
        struct irq_chip *chip = irq_desc_get_chip(desc);
        struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc);
@@ -202,7 +202,7 @@ static int rockchip_pcie_host_init(struct dw_pcie_rp *pp)
        if (ret < 0)
                dev_err(dev, "failed to init irq domain\n");
 
-       irq_set_chained_handler_and_data(irq, rockchip_pcie_legacy_int_handler,
+       irq_set_chained_handler_and_data(irq, rockchip_pcie_intx_handler,
                                         rockchip);
 
        /* LTSSM enable control mode */
@@ -215,7 +215,7 @@ static int rockchip_pcie_host_init(struct dw_pcie_rp *pp)
 }
 
 static const struct dw_pcie_host_ops rockchip_pcie_host_ops = {
-       .host_init = rockchip_pcie_host_init,
+       .init = rockchip_pcie_host_init,
 };
 
 static int rockchip_pcie_clk_init(struct rockchip_pcie *rockchip)
index 1e9b44b8bba48e8930c2ae8de101356f0339b729..66367252032b84fd42c719287057471b1fe44e9a 100644 (file)
@@ -279,7 +279,7 @@ static int fu740_pcie_host_init(struct dw_pcie_rp *pp)
 }
 
 static const struct dw_pcie_host_ops fu740_pcie_host_ops = {
-       .host_init = fu740_pcie_host_init,
+       .init = fu740_pcie_host_init,
 };
 
 static const struct dw_pcie_ops dw_pcie_ops = {
index fd484cc7c481dc8e54efeb4e469350aae80bcb7d..7a11c618b9d9c46a851ca71eb10a88681d9e0892 100644 (file)
@@ -198,7 +198,7 @@ static int histb_pcie_host_init(struct dw_pcie_rp *pp)
 }
 
 static const struct dw_pcie_host_ops histb_pcie_host_ops = {
-       .host_init = histb_pcie_host_init,
+       .init = histb_pcie_host_init,
 };
 
 static void histb_pcie_host_disable(struct histb_pcie *hipcie)
index c9c93524e01dc393a35f0219b1f580ba69a9506e..acbe4f6d3291d829636093746fa2c339668334b9 100644 (file)
@@ -391,7 +391,7 @@ static const struct dw_pcie_ops intel_pcie_ops = {
 };
 
 static const struct dw_pcie_host_ops intel_pcie_dw_ops = {
-       .host_init =            intel_pcie_rc_init,
+       .init = intel_pcie_rc_init,
 };
 
 static int intel_pcie_probe(struct platform_device *pdev)
index 289bff99d76282e388d9562e03546a6af344dcf1..208d3b0ba196021aa9e0377c87136f07367594f8 100644 (file)
@@ -289,19 +289,18 @@ static void keembay_pcie_ep_init(struct dw_pcie_ep *ep)
 }
 
 static int keembay_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
-                                    enum pci_epc_irq_type type,
-                                    u16 interrupt_num)
+                                    unsigned int type, u16 interrupt_num)
 {
        struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
 
        switch (type) {
-       case PCI_EPC_IRQ_LEGACY:
-               /* Legacy interrupts are not supported in Keem Bay */
-               dev_err(pci->dev, "Legacy IRQ is not supported\n");
+       case PCI_IRQ_INTX:
+               /* INTx interrupts are not supported in Keem Bay */
+               dev_err(pci->dev, "INTx IRQ is not supported\n");
                return -EINVAL;
-       case PCI_EPC_IRQ_MSI:
+       case PCI_IRQ_MSI:
                return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
-       case PCI_EPC_IRQ_MSIX:
+       case PCI_IRQ_MSIX:
                return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
        default:
                dev_err(pci->dev, "Unknown IRQ type %d\n", type);
@@ -325,7 +324,7 @@ keembay_pcie_get_features(struct dw_pcie_ep *ep)
 }
 
 static const struct dw_pcie_ep_ops keembay_pcie_ep_ops = {
-       .ep_init        = keembay_pcie_ep_init,
+       .init           = keembay_pcie_ep_init,
        .raise_irq      = keembay_pcie_ep_raise_irq,
        .get_features   = keembay_pcie_get_features,
 };
index 2ee146767971c3d41af3e9e3916c6a3f29090afc..d5523f3021024cc96c6b239a2ca4a7bffde8b2e3 100644 (file)
@@ -366,7 +366,6 @@ static int kirin_pcie_get_gpio_enable(struct kirin_pcie *pcie,
                                      struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
-       char name[32];
        int ret, i;
 
        /* This is an optional property */
@@ -387,9 +386,8 @@ static int kirin_pcie_get_gpio_enable(struct kirin_pcie *pcie,
                if (pcie->gpio_id_clkreq[i] < 0)
                        return pcie->gpio_id_clkreq[i];
 
-               sprintf(name, "pcie_clkreq_%d", i);
-               pcie->clkreq_names[i] = devm_kstrdup_const(dev, name,
-                                                           GFP_KERNEL);
+               pcie->clkreq_names[i] = devm_kasprintf(dev, GFP_KERNEL,
+                                                      "pcie_clkreq_%d", i);
                if (!pcie->clkreq_names[i])
                        return -ENOMEM;
        }
@@ -404,7 +402,6 @@ static int kirin_pcie_parse_port(struct kirin_pcie *pcie,
        struct device *dev = &pdev->dev;
        struct device_node *parent, *child;
        int ret, slot, i;
-       char name[32];
 
        for_each_available_child_of_node(node, parent) {
                for_each_available_child_of_node(parent, child) {
@@ -430,9 +427,9 @@ static int kirin_pcie_parse_port(struct kirin_pcie *pcie,
 
                        slot = PCI_SLOT(ret);
 
-                       sprintf(name, "pcie_perst_%d", slot);
-                       pcie->reset_names[i] = devm_kstrdup_const(dev, name,
-                                                               GFP_KERNEL);
+                       pcie->reset_names[i] = devm_kasprintf(dev, GFP_KERNEL,
+                                                             "pcie_perst_%d",
+                                                             slot);
                        if (!pcie->reset_names[i]) {
                                ret = -ENOMEM;
                                goto put_node;
@@ -672,7 +669,7 @@ static const struct dw_pcie_ops kirin_dw_pcie_ops = {
 };
 
 static const struct dw_pcie_host_ops kirin_pcie_host_ops = {
-       .host_init = kirin_pcie_host_init,
+       .init = kirin_pcie_host_init,
 };
 
 static int kirin_pcie_power_off(struct kirin_pcie *kirin_pcie)
@@ -741,15 +738,13 @@ err:
        return ret;
 }
 
-static int kirin_pcie_remove(struct platform_device *pdev)
+static void kirin_pcie_remove(struct platform_device *pdev)
 {
        struct kirin_pcie *kirin_pcie = platform_get_drvdata(pdev);
 
        dw_pcie_host_deinit(&kirin_pcie->pci->pp);
 
        kirin_pcie_power_off(kirin_pcie);
-
-       return 0;
 }
 
 struct kirin_pcie_data {
@@ -818,7 +813,7 @@ static int kirin_pcie_probe(struct platform_device *pdev)
 
 static struct platform_driver kirin_pcie_driver = {
        .probe                  = kirin_pcie_probe,
-       .remove                 = kirin_pcie_remove,
+       .remove_new             = kirin_pcie_remove,
        .driver                 = {
                .name                   = "kirin-pcie",
                .of_match_table         = kirin_pcie_match,
index 9e58f055199ad7071229088addb0196bad68377b..36e5e80cd22f59536d74c6c188736371fd6a99ce 100644 (file)
@@ -726,14 +726,14 @@ static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev,
 }
 
 static int qcom_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
-                                 enum pci_epc_irq_type type, u16 interrupt_num)
+                                 unsigned int type, u16 interrupt_num)
 {
        struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
 
        switch (type) {
-       case PCI_EPC_IRQ_LEGACY:
-               return dw_pcie_ep_raise_legacy_irq(ep, func_no);
-       case PCI_EPC_IRQ_MSI:
+       case PCI_IRQ_INTX:
+               return dw_pcie_ep_raise_intx_irq(ep, func_no);
+       case PCI_IRQ_MSI:
                return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
        default:
                dev_err(pci->dev, "Unknown IRQ type\n");
@@ -796,7 +796,7 @@ static void qcom_pcie_ep_init(struct dw_pcie_ep *ep)
 }
 
 static const struct dw_pcie_ep_ops pci_ep_ops = {
-       .ep_init = qcom_pcie_ep_init,
+       .init = qcom_pcie_ep_init,
        .raise_irq = qcom_pcie_ep_raise_irq,
        .get_features = qcom_pcie_epc_get_features,
 };
index 11c80555d97543990fdd17cf9326cf58826ab457..2ce2a3bd932bd7e3824b69cc9450135895b7a89e 100644 (file)
@@ -972,7 +972,7 @@ static int qcom_pcie_enable_aspm(struct pci_dev *pdev, void *userdata)
         * Downstream devices need to be in D0 state before enabling PCI PM
         * substates.
         */
-       pci_set_power_state(pdev, PCI_D0);
+       pci_set_power_state_locked(pdev, PCI_D0);
        pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL);
 
        return 0;
@@ -1247,9 +1247,9 @@ static void qcom_pcie_host_post_init(struct dw_pcie_rp *pp)
 }
 
 static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
-       .host_init      = qcom_pcie_host_init,
-       .host_deinit    = qcom_pcie_host_deinit,
-       .host_post_init = qcom_pcie_host_post_init,
+       .init           = qcom_pcie_host_init,
+       .deinit         = qcom_pcie_host_deinit,
+       .post_init      = qcom_pcie_host_post_init,
 };
 
 /* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */
index 3bc45e513b3d53a84acab1266942b6ffb86844ff..e9166619b1f9f656a83d60330515e7b20f34d7ca 100644 (file)
@@ -8,7 +8,7 @@
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/pci.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
@@ -307,8 +307,8 @@ static void rcar_gen4_pcie_host_deinit(struct dw_pcie_rp *pp)
 }
 
 static const struct dw_pcie_host_ops rcar_gen4_pcie_host_ops = {
-       .host_init = rcar_gen4_pcie_host_init,
-       .host_deinit = rcar_gen4_pcie_host_deinit,
+       .init = rcar_gen4_pcie_host_init,
+       .deinit = rcar_gen4_pcie_host_deinit,
 };
 
 static int rcar_gen4_add_dw_pcie_rp(struct rcar_gen4_pcie *rcar)
@@ -362,15 +362,14 @@ static void rcar_gen4_pcie_ep_deinit(struct dw_pcie_ep *ep)
 }
 
 static int rcar_gen4_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
-                                      enum pci_epc_irq_type type,
-                                      u16 interrupt_num)
+                                      unsigned int type, u16 interrupt_num)
 {
        struct dw_pcie *dw = to_dw_pcie_from_ep(ep);
 
        switch (type) {
-       case PCI_EPC_IRQ_LEGACY:
-               return dw_pcie_ep_raise_legacy_irq(ep, func_no);
-       case PCI_EPC_IRQ_MSI:
+       case PCI_IRQ_INTX:
+               return dw_pcie_ep_raise_intx_irq(ep, func_no);
+       case PCI_IRQ_MSI:
                return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
        default:
                dev_err(dw->dev, "Unknown IRQ type\n");
@@ -394,7 +393,7 @@ rcar_gen4_pcie_ep_get_features(struct dw_pcie_ep *ep)
        return &rcar_gen4_pcie_epc_features;
 }
 
-static unsigned int rcar_gen4_pcie_ep_func_conf_select(struct dw_pcie_ep *ep,
+static unsigned int rcar_gen4_pcie_ep_get_dbi_offset(struct dw_pcie_ep *ep,
                                                       u8 func_no)
 {
        return func_no * RCAR_GEN4_PCIE_EP_FUNC_DBI_OFFSET;
@@ -408,11 +407,11 @@ static unsigned int rcar_gen4_pcie_ep_get_dbi2_offset(struct dw_pcie_ep *ep,
 
 static const struct dw_pcie_ep_ops pcie_ep_ops = {
        .pre_init = rcar_gen4_pcie_ep_pre_init,
-       .ep_init = rcar_gen4_pcie_ep_init,
+       .init = rcar_gen4_pcie_ep_init,
        .deinit = rcar_gen4_pcie_ep_deinit,
        .raise_irq = rcar_gen4_pcie_ep_raise_irq,
        .get_features = rcar_gen4_pcie_ep_get_features,
-       .func_conf_select = rcar_gen4_pcie_ep_func_conf_select,
+       .get_dbi_offset = rcar_gen4_pcie_ep_get_dbi_offset,
        .get_dbi2_offset = rcar_gen4_pcie_ep_get_dbi2_offset,
 };
 
@@ -436,7 +435,7 @@ static void rcar_gen4_remove_dw_pcie_ep(struct rcar_gen4_pcie *rcar)
 /* Common */
 static int rcar_gen4_add_dw_pcie(struct rcar_gen4_pcie *rcar)
 {
-       rcar->mode = (enum dw_pcie_device_mode)of_device_get_match_data(&rcar->pdev->dev);
+       rcar->mode = (uintptr_t)of_device_get_match_data(&rcar->pdev->dev);
 
        switch (rcar->mode) {
        case DW_PCIE_RC_TYPE:
index 99d47ae80331f4662087c1fb2cb1ff9b4399b27b..201dced209f0822fcd7faebbca2a7686bf016c8d 100644 (file)
@@ -148,7 +148,7 @@ static int spear13xx_pcie_host_init(struct dw_pcie_rp *pp)
 }
 
 static const struct dw_pcie_host_ops spear13xx_pcie_host_ops = {
-       .host_init = spear13xx_pcie_host_init,
+       .init = spear13xx_pcie_host_init,
 };
 
 static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie,
index 0fe113598ebbc766566b0db805a29bd3fa9910ef..7afa9e9aabe2165828aff5eaed0dd5033b08fc4c 100644 (file)
@@ -773,13 +773,13 @@ static void tegra_pcie_enable_system_interrupts(struct dw_pcie_rp *pp)
                           val_w);
 }
 
-static void tegra_pcie_enable_legacy_interrupts(struct dw_pcie_rp *pp)
+static void tegra_pcie_enable_intx_interrupts(struct dw_pcie_rp *pp)
 {
        struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
        struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
        u32 val;
 
-       /* Enable legacy interrupt generation */
+       /* Enable INTX interrupt generation */
        val = appl_readl(pcie, APPL_INTR_EN_L0_0);
        val |= APPL_INTR_EN_L0_0_SYS_INTR_EN;
        val |= APPL_INTR_EN_L0_0_INT_INT_EN;
@@ -830,7 +830,7 @@ static void tegra_pcie_enable_interrupts(struct dw_pcie_rp *pp)
        appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
 
        tegra_pcie_enable_system_interrupts(pp);
-       tegra_pcie_enable_legacy_interrupts(pp);
+       tegra_pcie_enable_intx_interrupts(pp);
        if (IS_ENABLED(CONFIG_PCI_MSI))
                tegra_pcie_enable_msi_interrupts(pp);
 }
@@ -1060,7 +1060,7 @@ static const struct dw_pcie_ops tegra_dw_pcie_ops = {
 };
 
 static const struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
-       .host_init = tegra_pcie_dw_host_init,
+       .init = tegra_pcie_dw_host_init,
 };
 
 static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie)
@@ -1947,7 +1947,7 @@ static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg)
        return IRQ_HANDLED;
 }
 
-static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq)
+static int tegra_pcie_ep_raise_intx_irq(struct tegra_pcie_dw *pcie, u16 irq)
 {
        /* Tegra194 supports only INTA */
        if (irq > 1)
@@ -1979,20 +1979,19 @@ static int tegra_pcie_ep_raise_msix_irq(struct tegra_pcie_dw *pcie, u16 irq)
 }
 
 static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
-                                  enum pci_epc_irq_type type,
-                                  u16 interrupt_num)
+                                  unsigned int type, u16 interrupt_num)
 {
        struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
        struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
 
        switch (type) {
-       case PCI_EPC_IRQ_LEGACY:
-               return tegra_pcie_ep_raise_legacy_irq(pcie, interrupt_num);
+       case PCI_IRQ_INTX:
+               return tegra_pcie_ep_raise_intx_irq(pcie, interrupt_num);
 
-       case PCI_EPC_IRQ_MSI:
+       case PCI_IRQ_MSI:
                return tegra_pcie_ep_raise_msi_irq(pcie, interrupt_num);
 
-       case PCI_EPC_IRQ_MSIX:
+       case PCI_IRQ_MSIX:
                return tegra_pcie_ep_raise_msix_irq(pcie, interrupt_num);
 
        default:
index cba3c88fcf39519607bd173378f7f12b0e0e73bb..3fced0d3e85125067024f948717713278422f6d4 100644 (file)
@@ -212,7 +212,7 @@ static void uniphier_pcie_ep_init(struct dw_pcie_ep *ep)
                dw_pcie_ep_reset_bar(pci, bar);
 }
 
-static int uniphier_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep)
+static int uniphier_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep)
 {
        struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
        struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci);
@@ -256,15 +256,14 @@ static int uniphier_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep,
 }
 
 static int uniphier_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
-                                     enum pci_epc_irq_type type,
-                                     u16 interrupt_num)
+                                     unsigned int type, u16 interrupt_num)
 {
        struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
 
        switch (type) {
-       case PCI_EPC_IRQ_LEGACY:
-               return uniphier_pcie_ep_raise_legacy_irq(ep);
-       case PCI_EPC_IRQ_MSI:
+       case PCI_IRQ_INTX:
+               return uniphier_pcie_ep_raise_intx_irq(ep);
+       case PCI_IRQ_MSI:
                return uniphier_pcie_ep_raise_msi_irq(ep, func_no,
                                                      interrupt_num);
        default:
@@ -284,7 +283,7 @@ uniphier_pcie_get_features(struct dw_pcie_ep *ep)
 }
 
 static const struct dw_pcie_ep_ops uniphier_pcie_ep_ops = {
-       .ep_init = uniphier_pcie_ep_init,
+       .init = uniphier_pcie_ep_init,
        .raise_irq = uniphier_pcie_ep_raise_irq,
        .get_features = uniphier_pcie_get_features,
 };
index 48c3eba817b43cae49b887be2c95a2bec63dfdb8..5757ca3803c99c6f44e94ecb54b87f64663f75f9 100644 (file)
@@ -67,7 +67,7 @@ struct uniphier_pcie {
        struct clk *clk;
        struct reset_control *rst;
        struct phy *phy;
-       struct irq_domain *legacy_irq_domain;
+       struct irq_domain *intx_irq_domain;
 };
 
 #define to_uniphier_pcie(x)    dev_get_drvdata((x)->dev)
@@ -253,12 +253,12 @@ static void uniphier_pcie_irq_handler(struct irq_desc *desc)
        reg = FIELD_GET(PCL_RCV_INTX_ALL_STATUS, val);
 
        for_each_set_bit(bit, &reg, PCI_NUM_INTX)
-               generic_handle_domain_irq(pcie->legacy_irq_domain, bit);
+               generic_handle_domain_irq(pcie->intx_irq_domain, bit);
 
        chained_irq_exit(chip, desc);
 }
 
-static int uniphier_pcie_config_legacy_irq(struct dw_pcie_rp *pp)
+static int uniphier_pcie_config_intx_irq(struct dw_pcie_rp *pp)
 {
        struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
        struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
@@ -279,9 +279,9 @@ static int uniphier_pcie_config_legacy_irq(struct dw_pcie_rp *pp)
                goto out_put_node;
        }
 
-       pcie->legacy_irq_domain = irq_domain_add_linear(np_intc, PCI_NUM_INTX,
+       pcie->intx_irq_domain = irq_domain_add_linear(np_intc, PCI_NUM_INTX,
                                                &uniphier_intx_domain_ops, pp);
-       if (!pcie->legacy_irq_domain) {
+       if (!pcie->intx_irq_domain) {
                dev_err(pci->dev, "Failed to get INTx domain\n");
                ret = -ENODEV;
                goto out_put_node;
@@ -301,7 +301,7 @@ static int uniphier_pcie_host_init(struct dw_pcie_rp *pp)
        struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
        int ret;
 
-       ret = uniphier_pcie_config_legacy_irq(pp);
+       ret = uniphier_pcie_config_intx_irq(pp);
        if (ret)
                return ret;
 
@@ -311,7 +311,7 @@ static int uniphier_pcie_host_init(struct dw_pcie_rp *pp)
 }
 
 static const struct dw_pcie_host_ops uniphier_pcie_host_ops = {
-       .host_init = uniphier_pcie_host_init,
+       .init = uniphier_pcie_host_init,
 };
 
 static int uniphier_pcie_host_enable(struct uniphier_pcie *pcie)
index 71026fefa36680a2e7ebd230db84488d87d63f04..318c278e65c898fd6a1675956403a8ad3f71db78 100644 (file)
@@ -236,7 +236,7 @@ static int visconti_pcie_host_init(struct dw_pcie_rp *pp)
 }
 
 static const struct dw_pcie_host_ops visconti_pcie_host_ops = {
-       .host_init = visconti_pcie_host_init,
+       .init = visconti_pcie_host_init,
 };
 
 static int visconti_get_resources(struct platform_device *pdev,
index 6be3266cd7b5b2f803c60a71686ce8ffbf20be6d..45b71806182d2b8f9ae7cd876465684f6418abca 100644 (file)
@@ -85,7 +85,7 @@ int pci_host_common_probe(struct platform_device *pdev)
 }
 EXPORT_SYMBOL_GPL(pci_host_common_probe);
 
-int pci_host_common_remove(struct platform_device *pdev)
+void pci_host_common_remove(struct platform_device *pdev)
 {
        struct pci_host_bridge *bridge = platform_get_drvdata(pdev);
 
@@ -93,8 +93,6 @@ int pci_host_common_remove(struct platform_device *pdev)
        pci_stop_root_bus(bridge->bus);
        pci_remove_root_bus(bridge->bus);
        pci_unlock_rescan_remove();
-
-       return 0;
 }
 EXPORT_SYMBOL_GPL(pci_host_common_remove);
 
index 63865aeb636b88d3f39e28095cb219dbc4522358..41cb6a057f6e4578c6f3460e80c8aaa677e9c0a4 100644 (file)
@@ -82,7 +82,7 @@ static struct platform_driver gen_pci_driver = {
                .of_match_table = gen_pci_of_match,
        },
        .probe = pci_host_common_probe,
-       .remove = pci_host_common_remove,
+       .remove_new = pci_host_common_remove,
 };
 module_platform_driver(gen_pci_driver);
 
index f9dd6622fe109566fc292e9d2d935691c5b8cc23..5b0730c3891b84226896b6865ddb50a5bd377c11 100644 (file)
@@ -48,6 +48,9 @@
 #define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY                      0x04dc
 #define  PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK   0xc00
 
+#define PCIE_RC_CFG_PRIV1_ROOT_CAP                     0x4f8
+#define  PCIE_RC_CFG_PRIV1_ROOT_CAP_L1SS_MODE_MASK     0xf8
+
 #define PCIE_RC_DL_MDIO_ADDR                           0x1100
 #define PCIE_RC_DL_MDIO_WR_DATA                                0x1104
 #define PCIE_RC_DL_MDIO_RD_DATA                                0x1108
 
 #define PCIE_MISC_HARD_PCIE_HARD_DEBUG                                 0x4204
 #define  PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK       0x2
+#define  PCIE_MISC_HARD_PCIE_HARD_DEBUG_L1SS_ENABLE_MASK               0x200000
 #define  PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK               0x08000000
 #define  PCIE_BMIPS_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK         0x00800000
-
+#define  PCIE_CLKREQ_MASK \
+         (PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK | \
+          PCIE_MISC_HARD_PCIE_HARD_DEBUG_L1SS_ENABLE_MASK)
 
 #define PCIE_INTR2_CPU_BASE            0x4300
 #define PCIE_MSI_INTR2_BASE            0x4500
@@ -1028,13 +1034,89 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
        return 0;
 }
 
+/*
+ * This extends the timeout period for an access to an internal bus.  This
+ * access timeout may occur during L1SS sleep periods, even without the
+ * presence of a PCIe access.
+ */
+static void brcm_extend_rbus_timeout(struct brcm_pcie *pcie)
+{
+       /* TIMEOUT register is two registers before RGR1_SW_INIT_1 */
+       const unsigned int REG_OFFSET = PCIE_RGR1_SW_INIT_1(pcie) - 8;
+       u32 timeout_us = 4000000; /* 4 seconds, our setting for L1SS */
+
+       /* Each unit in timeout register is 1/216,000,000 seconds */
+       writel(216 * timeout_us, pcie->base + REG_OFFSET);
+}
+
+static void brcm_config_clkreq(struct brcm_pcie *pcie)
+{
+       static const char err_msg[] = "invalid 'brcm,clkreq-mode' DT string\n";
+       const char *mode = "default";
+       u32 clkreq_cntl;
+       int ret, tmp;
+
+       ret = of_property_read_string(pcie->np, "brcm,clkreq-mode", &mode);
+       if (ret && ret != -EINVAL) {
+               dev_err(pcie->dev, err_msg);
+               mode = "safe";
+       }
+
+       /* Start out assuming safe mode (both mode bits cleared) */
+       clkreq_cntl = readl(pcie->base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+       clkreq_cntl &= ~PCIE_CLKREQ_MASK;
+
+       if (strcmp(mode, "no-l1ss") == 0) {
+               /*
+                * "no-l1ss" -- Provides Clock Power Management, L0s, and
+                * L1, but cannot provide L1 substate (L1SS) power
+                * savings. If the downstream device connected to the RC is
+                * L1SS capable AND the OS enables L1SS, all PCIe traffic
+                * may abruptly halt, potentially hanging the system.
+                */
+               clkreq_cntl |= PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK;
+               /*
+                * We want to un-advertise L1 substates because if the OS
+                * tries to configure the controller into using L1 substate
+                * power savings it may fail or hang when the RC HW is in
+                * "no-l1ss" mode.
+                */
+               tmp = readl(pcie->base + PCIE_RC_CFG_PRIV1_ROOT_CAP);
+               u32p_replace_bits(&tmp, 2, PCIE_RC_CFG_PRIV1_ROOT_CAP_L1SS_MODE_MASK);
+               writel(tmp, pcie->base + PCIE_RC_CFG_PRIV1_ROOT_CAP);
+
+       } else if (strcmp(mode, "default") == 0) {
+               /*
+                * "default" -- Provides L0s, L1, and L1SS, but not
+                * compliant to provide Clock Power Management;
+                * specifically, may not be able to meet the Tclron max
+                * timing of 400ns as specified in "Dynamic Clock Control",
+                * section 3.2.5.2.2 of the PCIe spec.  This situation is
+                * atypical and should happen only with older devices.
+                */
+               clkreq_cntl |= PCIE_MISC_HARD_PCIE_HARD_DEBUG_L1SS_ENABLE_MASK;
+               brcm_extend_rbus_timeout(pcie);
+
+       } else {
+               /*
+                * "safe" -- No power savings; refclk is driven by RC
+                * unconditionally.
+                */
+               if (strcmp(mode, "safe") != 0)
+                       dev_err(pcie->dev, err_msg);
+               mode = "safe";
+       }
+       writel(clkreq_cntl, pcie->base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+
+       dev_info(pcie->dev, "clkreq-mode set to %s\n", mode);
+}
+
 static int brcm_pcie_start_link(struct brcm_pcie *pcie)
 {
        struct device *dev = pcie->dev;
        void __iomem *base = pcie->base;
        u16 nlw, cls, lnksta;
        bool ssc_good = false;
-       u32 tmp;
        int ret, i;
 
        /* Unassert the fundamental reset */
@@ -1059,6 +1141,8 @@ static int brcm_pcie_start_link(struct brcm_pcie *pcie)
                return -ENODEV;
        }
 
+       brcm_config_clkreq(pcie);
+
        if (pcie->gen)
                brcm_pcie_set_gen(pcie, pcie->gen);
 
@@ -1077,14 +1161,6 @@ static int brcm_pcie_start_link(struct brcm_pcie *pcie)
                 pci_speed_string(pcie_link_speed[cls]), nlw,
                 ssc_good ? "(SSC)" : "(!SSC)");
 
-       /*
-        * Refclk from RC should be gated with CLKREQ# input when ASPM L0s,L1
-        * is enabled => setting the CLKREQ_DEBUG_ENABLE field to 1.
-        */
-       tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
-       tmp |= PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK;
-       writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
-
        return 0;
 }
 
index acdc583d29802ca3a33e62aec8c7de02b6fbc32c..4e6aa882a567abb00b1b94a6550586bd6df47452 100644 (file)
@@ -52,7 +52,7 @@ static int iproc_pltfm_pcie_probe(struct platform_device *pdev)
        pcie = pci_host_bridge_priv(bridge);
 
        pcie->dev = dev;
-       pcie->type = (enum iproc_pcie_type) of_device_get_match_data(dev);
+       pcie->type = (uintptr_t)of_device_get_match_data(dev);
 
        ret = of_address_to_resource(np, 0, &reg);
        if (ret < 0) {
index e0e27645fdf4c8da2ecedaf8f552c1195f1165a2..975b3024fb08cd1f4c6ea55915dd3da830a81845 100644 (file)
@@ -245,35 +245,60 @@ static int mtk_pcie_set_trans_table(struct mtk_gen3_pcie *pcie,
                                    resource_size_t cpu_addr,
                                    resource_size_t pci_addr,
                                    resource_size_t size,
-                                   unsigned long type, int num)
+                                   unsigned long type, int *num)
 {
+       resource_size_t remaining = size;
+       resource_size_t table_size;
+       resource_size_t addr_align;
+       const char *range_type;
        void __iomem *table;
        u32 val;
 
-       if (num >= PCIE_MAX_TRANS_TABLES) {
-               dev_err(pcie->dev, "not enough translate table for addr: %#llx, limited to [%d]\n",
-                       (unsigned long long)cpu_addr, PCIE_MAX_TRANS_TABLES);
-               return -ENODEV;
-       }
+       while (remaining && (*num < PCIE_MAX_TRANS_TABLES)) {
+               /* Table size needs to be a power of 2 */
+               table_size = BIT(fls(remaining) - 1);
+
+               if (cpu_addr > 0) {
+                       addr_align = BIT(ffs(cpu_addr) - 1);
+                       table_size = min(table_size, addr_align);
+               }
+
+               /* Minimum size of translate table is 4KiB */
+               if (table_size < 0x1000) {
+                       dev_err(pcie->dev, "illegal table size %#llx\n",
+                               (unsigned long long)table_size);
+                       return -EINVAL;
+               }
 
-       table = pcie->base + PCIE_TRANS_TABLE_BASE_REG +
-               num * PCIE_ATR_TLB_SET_OFFSET;
+               table = pcie->base + PCIE_TRANS_TABLE_BASE_REG + *num * PCIE_ATR_TLB_SET_OFFSET;
+               writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(table_size) - 1), table);
+               writel_relaxed(upper_32_bits(cpu_addr), table + PCIE_ATR_SRC_ADDR_MSB_OFFSET);
+               writel_relaxed(lower_32_bits(pci_addr), table + PCIE_ATR_TRSL_ADDR_LSB_OFFSET);
+               writel_relaxed(upper_32_bits(pci_addr), table + PCIE_ATR_TRSL_ADDR_MSB_OFFSET);
 
-       writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(size) - 1),
-                      table);
-       writel_relaxed(upper_32_bits(cpu_addr),
-                      table + PCIE_ATR_SRC_ADDR_MSB_OFFSET);
-       writel_relaxed(lower_32_bits(pci_addr),
-                      table + PCIE_ATR_TRSL_ADDR_LSB_OFFSET);
-       writel_relaxed(upper_32_bits(pci_addr),
-                      table + PCIE_ATR_TRSL_ADDR_MSB_OFFSET);
+               if (type == IORESOURCE_IO) {
+                       val = PCIE_ATR_TYPE_IO | PCIE_ATR_TLP_TYPE_IO;
+                       range_type = "IO";
+               } else {
+                       val = PCIE_ATR_TYPE_MEM | PCIE_ATR_TLP_TYPE_MEM;
+                       range_type = "MEM";
+               }
 
-       if (type == IORESOURCE_IO)
-               val = PCIE_ATR_TYPE_IO | PCIE_ATR_TLP_TYPE_IO;
-       else
-               val = PCIE_ATR_TYPE_MEM | PCIE_ATR_TLP_TYPE_MEM;
+               writel_relaxed(val, table + PCIE_ATR_TRSL_PARAM_OFFSET);
 
-       writel_relaxed(val, table + PCIE_ATR_TRSL_PARAM_OFFSET);
+               dev_dbg(pcie->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n",
+                       range_type, *num, (unsigned long long)cpu_addr,
+                       (unsigned long long)pci_addr, (unsigned long long)table_size);
+
+               cpu_addr += table_size;
+               pci_addr += table_size;
+               remaining -= table_size;
+               (*num)++;
+       }
+
+       if (remaining)
+               dev_warn(pcie->dev, "not enough translate table for addr: %#llx, limited to [%d]\n",
+                        (unsigned long long)cpu_addr, PCIE_MAX_TRANS_TABLES);
 
        return 0;
 }
@@ -380,30 +405,20 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
                resource_size_t cpu_addr;
                resource_size_t pci_addr;
                resource_size_t size;
-               const char *range_type;
 
-               if (type == IORESOURCE_IO) {
+               if (type == IORESOURCE_IO)
                        cpu_addr = pci_pio_to_address(res->start);
-                       range_type = "IO";
-               } else if (type == IORESOURCE_MEM) {
+               else if (type == IORESOURCE_MEM)
                        cpu_addr = res->start;
-                       range_type = "MEM";
-               } else {
+               else
                        continue;
-               }
 
                pci_addr = res->start - entry->offset;
                size = resource_size(res);
                err = mtk_pcie_set_trans_table(pcie, cpu_addr, pci_addr, size,
-                                              type, table_index);
+                                              type, &table_index);
                if (err)
                        return err;
-
-               dev_dbg(pcie->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n",
-                       range_type, table_index, (unsigned long long)cpu_addr,
-                       (unsigned long long)pci_addr, (unsigned long long)size);
-
-               table_index++;
        }
 
        return 0;
index 66a8f73296fc8b7b91ae84d72824550671a45e4d..48372013f26d23b6a9e197c61c4ef4eeaece9cb7 100644 (file)
@@ -617,12 +617,18 @@ static void mtk_pcie_intr_handler(struct irq_desc *desc)
                if (status & MSI_STATUS){
                        unsigned long imsi_status;
 
+                       /*
+                        * The interrupt status can be cleared even if the
+                        * MSI status remains pending. As such, given the
+                        * edge-triggered interrupt type, its status should
+                        * be cleared before being dispatched to the
+                        * handler of the underlying device.
+                        */
+                       writel(MSI_STATUS, port->base + PCIE_INT_STATUS);
                        while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) {
                                for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM)
                                        generic_handle_domain_irq(port->inner_domain, bit);
                        }
-                       /* Clear MSI interrupt status */
-                       writel(MSI_STATUS, port->base + PCIE_INT_STATUS);
                }
        }
 
index 7034c0ff23d0d917b600fb8e058a06c7bb01104e..e6909271def798e5332dfa02b47befdcee5f610d 100644 (file)
@@ -402,16 +402,15 @@ static int rcar_pcie_ep_assert_msi(struct rcar_pcie *pcie,
 }
 
 static int rcar_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn,
-                                 enum pci_epc_irq_type type,
-                                 u16 interrupt_num)
+                                 unsigned int type, u16 interrupt_num)
 {
        struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
 
        switch (type) {
-       case PCI_EPC_IRQ_LEGACY:
+       case PCI_IRQ_INTX:
                return rcar_pcie_ep_assert_intx(ep, fn, 0);
 
-       case PCI_EPC_IRQ_MSI:
+       case PCI_IRQ_MSI:
                return rcar_pcie_ep_assert_msi(&ep->pcie, fn, interrupt_num);
 
        default:
index bf7cc0b6a695736f8b9a27af05fe23c3778369e3..996077ab7cfdb88f2061c82370822b8f3f5776e7 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/phy/phy.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
 
 #include "pcie-rcar.h"
 
@@ -953,14 +954,22 @@ static const struct of_device_id rcar_pcie_of_match[] = {
        {},
 };
 
+/* Design note 346 from Linear Technology says order is not important. */
+static const char * const rcar_pcie_supplies[] = {
+       "vpcie1v5",
+       "vpcie3v3",
+       "vpcie12v",
+};
+
 static int rcar_pcie_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
+       struct pci_host_bridge *bridge;
        struct rcar_pcie_host *host;
        struct rcar_pcie *pcie;
+       unsigned int i;
        u32 data;
        int err;
-       struct pci_host_bridge *bridge;
 
        bridge = devm_pci_alloc_host_bridge(dev, sizeof(*host));
        if (!bridge)
@@ -971,6 +980,13 @@ static int rcar_pcie_probe(struct platform_device *pdev)
        pcie->dev = dev;
        platform_set_drvdata(pdev, host);
 
+       for (i = 0; i < ARRAY_SIZE(rcar_pcie_supplies); i++) {
+               err = devm_regulator_get_enable_optional(dev, rcar_pcie_supplies[i]);
+               if (err < 0 && err != -ENODEV)
+                       return dev_err_probe(dev, err, "failed to enable regulator: %s\n",
+                                            rcar_pcie_supplies[i]);
+       }
+
        pm_runtime_enable(pcie->dev);
        err = pm_runtime_get_sync(pcie->dev);
        if (err < 0) {
index 0af0e965fb57eaafda768bd2e0aed42ed036a362..c9046e97a1d27fe2bd93d8b60e7f354bedabcba8 100644 (file)
  * @max_regions: maximum number of regions supported by hardware
  * @ob_region_map: bitmask of mapped outbound regions
  * @ob_addr: base addresses in the AXI bus where the outbound regions start
- * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ
+ * @irq_phys_addr: base address on the AXI bus where the MSI/INTX IRQ
  *                dedicated outbound regions is mapped.
  * @irq_cpu_addr: base address in the CPU space where a write access triggers
- *               the sending of a memory write (MSI) / normal message (legacy
+ *               the sending of a memory write (MSI) / normal message (INTX
  *               IRQ) TLP through the PCIe bus.
- * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ
+ * @irq_pci_addr: used to save the current mapping of the MSI/INTX IRQ
  *               dedicated outbound region.
  * @irq_pci_fn: the latest PCI function that has updated the mapping of
- *             the MSI/legacy IRQ dedicated outbound region.
- * @irq_pending: bitmask of asserted legacy IRQs.
+ *             the MSI/INTX IRQ dedicated outbound region.
+ * @irq_pending: bitmask of asserted INTX IRQs.
  */
 struct rockchip_pcie_ep {
        struct rockchip_pcie    rockchip;
@@ -325,8 +325,8 @@ static void rockchip_pcie_ep_assert_intx(struct rockchip_pcie_ep *ep, u8 fn,
        }
 }
 
-static int rockchip_pcie_ep_send_legacy_irq(struct rockchip_pcie_ep *ep, u8 fn,
-                                           u8 intx)
+static int rockchip_pcie_ep_send_intx_irq(struct rockchip_pcie_ep *ep, u8 fn,
+                                         u8 intx)
 {
        u16 cmd;
 
@@ -407,15 +407,14 @@ static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn,
 }
 
 static int rockchip_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn,
-                                     enum pci_epc_irq_type type,
-                                     u16 interrupt_num)
+                                     unsigned int type, u16 interrupt_num)
 {
        struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
 
        switch (type) {
-       case PCI_EPC_IRQ_LEGACY:
-               return rockchip_pcie_ep_send_legacy_irq(ep, fn, 0);
-       case PCI_EPC_IRQ_MSI:
+       case PCI_IRQ_INTX:
+               return rockchip_pcie_ep_send_intx_irq(ep, fn, 0);
+       case PCI_IRQ_MSI:
                return rockchip_pcie_ep_send_msi_irq(ep, fn, interrupt_num);
        default:
                return -EINVAL;
index afbbdccd195d97b0e19e920b4e57e1fa56adf65e..300b9dc85ecc7b02b726d9d929c360926f8d1d7d 100644 (file)
@@ -505,7 +505,7 @@ static irqreturn_t rockchip_pcie_client_irq_handler(int irq, void *arg)
        return IRQ_HANDLED;
 }
 
-static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc)
+static void rockchip_pcie_intx_handler(struct irq_desc *desc)
 {
        struct irq_chip *chip = irq_desc_get_chip(desc);
        struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc);
@@ -553,7 +553,7 @@ static int rockchip_pcie_setup_irq(struct rockchip_pcie *rockchip)
                return irq;
 
        irq_set_chained_handler_and_data(irq,
-                                        rockchip_pcie_legacy_int_handler,
+                                        rockchip_pcie_intx_handler,
                                         rockchip);
 
        irq = platform_get_irq_byname(pdev, "client");
index 2f7d676c683cca03b3cd0e46efdea3ca98e5f8bb..5be5dfd8398f2a79e28164a1375e1d0221bdf268 100644 (file)
@@ -576,7 +576,7 @@ static int xilinx_pl_dma_pcie_init_irq_domain(struct pl_dma_pcie *port)
                                                  &intx_domain_ops, port);
        if (!port->intx_domain) {
                dev_err(dev, "Failed to get a INTx IRQ domain\n");
-               return PTR_ERR(port->intx_domain);
+               return -ENOMEM;
        }
 
        irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED);
@@ -635,14 +635,14 @@ static int xilinx_pl_dma_pcie_setup_irq(struct pl_dma_pcie *port)
        err = devm_request_irq(dev, port->intx_irq, xilinx_pl_dma_pcie_intx_flow,
                               IRQF_SHARED | IRQF_NO_THREAD, NULL, port);
        if (err) {
-               dev_err(dev, "Failed to request INTx IRQ %d\n", irq);
+               dev_err(dev, "Failed to request INTx IRQ %d\n", port->intx_irq);
                return err;
        }
 
        err = devm_request_irq(dev, port->irq, xilinx_pl_dma_pcie_event_flow,
                               IRQF_SHARED | IRQF_NO_THREAD, NULL, port);
        if (err) {
-               dev_err(dev, "Failed to request event IRQ %d\n", irq);
+               dev_err(dev, "Failed to request event IRQ %d\n", port->irq);
                return err;
        }
 
@@ -684,10 +684,8 @@ static int xilinx_request_msi_irq(struct pl_dma_pcie *port)
        int ret;
 
        port->msi.irq_msi0 = platform_get_irq_byname(pdev, "msi0");
-       if (port->msi.irq_msi0 <= 0) {
-               dev_err(dev, "Unable to find msi0 IRQ line\n");
+       if (port->msi.irq_msi0 <= 0)
                return port->msi.irq_msi0;
-       }
 
        ret = devm_request_irq(dev, port->msi.irq_msi0, xilinx_pl_dma_pcie_msi_handler_low,
                               IRQF_SHARED | IRQF_NO_THREAD, "xlnx-pcie-dma-pl",
@@ -698,10 +696,8 @@ static int xilinx_request_msi_irq(struct pl_dma_pcie *port)
        }
 
        port->msi.irq_msi1 = platform_get_irq_byname(pdev, "msi1");
-       if (port->msi.irq_msi1 <= 0) {
-               dev_err(dev, "Unable to find msi1 IRQ line\n");
+       if (port->msi.irq_msi1 <= 0)
                return port->msi.irq_msi1;
-       }
 
        ret = devm_request_irq(dev, port->msi.irq_msi1, xilinx_pl_dma_pcie_msi_handler_high,
                               IRQF_SHARED | IRQF_NO_THREAD, "xlnx-pcie-dma-pl",
index e307aceba5c977c8c93440b4a7d70df5a8823c96..0408f4d612b5af4702e3fd6c67b2dce3f81bf148 100644 (file)
@@ -166,7 +166,7 @@ struct nwl_pcie {
        int irq_intx;
        int irq_misc;
        struct nwl_msi msi;
-       struct irq_domain *legacy_irq_domain;
+       struct irq_domain *intx_irq_domain;
        struct clk *clk;
        raw_spinlock_t leg_mask_lock;
 };
@@ -324,7 +324,7 @@ static void nwl_pcie_leg_handler(struct irq_desc *desc)
        while ((status = nwl_bridge_readl(pcie, MSGF_LEG_STATUS) &
                                MSGF_LEG_SR_MASKALL) != 0) {
                for_each_set_bit(bit, &status, PCI_NUM_INTX)
-                       generic_handle_domain_irq(pcie->legacy_irq_domain, bit);
+                       generic_handle_domain_irq(pcie->intx_irq_domain, bit);
        }
 
        chained_irq_exit(chip, desc);
@@ -364,7 +364,7 @@ static void nwl_pcie_msi_handler_low(struct irq_desc *desc)
        chained_irq_exit(chip, desc);
 }
 
-static void nwl_mask_leg_irq(struct irq_data *data)
+static void nwl_mask_intx_irq(struct irq_data *data)
 {
        struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
        unsigned long flags;
@@ -378,7 +378,7 @@ static void nwl_mask_leg_irq(struct irq_data *data)
        raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags);
 }
 
-static void nwl_unmask_leg_irq(struct irq_data *data)
+static void nwl_unmask_intx_irq(struct irq_data *data)
 {
        struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
        unsigned long flags;
@@ -392,26 +392,26 @@ static void nwl_unmask_leg_irq(struct irq_data *data)
        raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags);
 }
 
-static struct irq_chip nwl_leg_irq_chip = {
+static struct irq_chip nwl_intx_irq_chip = {
        .name = "nwl_pcie:legacy",
-       .irq_enable = nwl_unmask_leg_irq,
-       .irq_disable = nwl_mask_leg_irq,
-       .irq_mask = nwl_mask_leg_irq,
-       .irq_unmask = nwl_unmask_leg_irq,
+       .irq_enable = nwl_unmask_intx_irq,
+       .irq_disable = nwl_mask_intx_irq,
+       .irq_mask = nwl_mask_intx_irq,
+       .irq_unmask = nwl_unmask_intx_irq,
 };
 
-static int nwl_legacy_map(struct irq_domain *domain, unsigned int irq,
-                         irq_hw_number_t hwirq)
+static int nwl_intx_map(struct irq_domain *domain, unsigned int irq,
+                       irq_hw_number_t hwirq)
 {
-       irq_set_chip_and_handler(irq, &nwl_leg_irq_chip, handle_level_irq);
+       irq_set_chip_and_handler(irq, &nwl_intx_irq_chip, handle_level_irq);
        irq_set_chip_data(irq, domain->host_data);
        irq_set_status_flags(irq, IRQ_LEVEL);
 
        return 0;
 }
 
-static const struct irq_domain_ops legacy_domain_ops = {
-       .map = nwl_legacy_map,
+static const struct irq_domain_ops intx_domain_ops = {
+       .map = nwl_intx_map,
        .xlate = pci_irqd_intx_xlate,
 };
 
@@ -525,20 +525,20 @@ static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
 {
        struct device *dev = pcie->dev;
        struct device_node *node = dev->of_node;
-       struct device_node *legacy_intc_node;
+       struct device_node *intc_node;
 
-       legacy_intc_node = of_get_next_child(node, NULL);
-       if (!legacy_intc_node) {
+       intc_node = of_get_next_child(node, NULL);
+       if (!intc_node) {
                dev_err(dev, "No legacy intc node found\n");
                return -EINVAL;
        }
 
-       pcie->legacy_irq_domain = irq_domain_add_linear(legacy_intc_node,
-                                                       PCI_NUM_INTX,
-                                                       &legacy_domain_ops,
-                                                       pcie);
-       of_node_put(legacy_intc_node);
-       if (!pcie->legacy_irq_domain) {
+       pcie->intx_irq_domain = irq_domain_add_linear(intc_node,
+                                                     PCI_NUM_INTX,
+                                                     &intx_domain_ops,
+                                                     pcie);
+       of_node_put(intc_node);
+       if (!pcie->intx_irq_domain) {
                dev_err(dev, "failed to create IRQ domain\n");
                return -ENOMEM;
        }
@@ -710,14 +710,14 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
        /* Enable all misc interrupts */
        nwl_bridge_writel(pcie, MSGF_MISC_SR_MASKALL, MSGF_MISC_MASK);
 
-       /* Disable all legacy interrupts */
+       /* Disable all INTX interrupts */
        nwl_bridge_writel(pcie, (u32)~MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK);
 
-       /* Clear pending legacy interrupts */
+       /* Clear pending INTX interrupts */
        nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_LEG_STATUS) &
                          MSGF_LEG_SR_MASKALL, MSGF_LEG_STATUS);
 
-       /* Enable all legacy interrupts */
+       /* Enable all INTX interrupts */
        nwl_bridge_writel(pcie, MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK);
 
        /* Enable the bridge config interrupt */
index 0452cbc362eef7b8707337a2d6efde15a01e9dcc..87b7856f375abf48676e4f892d05fd92a3b5d5a9 100644 (file)
@@ -984,7 +984,7 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
                return -ENOMEM;
 
        vmd->dev = dev;
-       vmd->instance = ida_simple_get(&vmd_instance_ida, 0, 0, GFP_KERNEL);
+       vmd->instance = ida_alloc(&vmd_instance_ida, GFP_KERNEL);
        if (vmd->instance < 0)
                return vmd->instance;
 
@@ -1026,7 +1026,7 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
        return 0;
 
  out_release_instance:
-       ida_simple_remove(&vmd_instance_ida, vmd->instance);
+       ida_free(&vmd_instance_ida, vmd->instance);
        return err;
 }
 
@@ -1048,7 +1048,7 @@ static void vmd_remove(struct pci_dev *dev)
        vmd_cleanup_srcu(vmd);
        vmd_detach_resources(vmd);
        vmd_remove_irq_domain(vmd);
-       ida_simple_remove(&vmd_instance_ida, vmd->instance);
+       ida_free(&vmd_instance_ida, vmd->instance);
 }
 
 static void vmd_shutdown(struct pci_dev *dev)
index b7b9d3e21f97d368893066d2ea10ef99483eadb9..1c3e4ea76bd2578e47397d0ddbcfa96a8f705574 100644 (file)
 /* Platform specific flags */
 #define MHI_EPF_USE_DMA BIT(0)
 
+struct pci_epf_mhi_dma_transfer {
+       struct pci_epf_mhi *epf_mhi;
+       struct mhi_ep_buf_info buf_info;
+       struct list_head node;
+       dma_addr_t paddr;
+       enum dma_data_direction dir;
+       size_t size;
+};
+
 struct pci_epf_mhi_ep_info {
        const struct mhi_ep_cntrl_config *config;
        struct pci_epf_header *epf_header;
@@ -124,6 +133,10 @@ struct pci_epf_mhi {
        resource_size_t mmio_phys;
        struct dma_chan *dma_chan_tx;
        struct dma_chan *dma_chan_rx;
+       struct workqueue_struct *dma_wq;
+       struct work_struct dma_work;
+       struct list_head dma_list;
+       spinlock_t list_lock;
        u32 mmio_size;
        int irq;
 };
@@ -205,63 +218,69 @@ static void pci_epf_mhi_raise_irq(struct mhi_ep_cntrl *mhi_cntrl, u32 vector)
         * MHI supplies 0 based MSI vectors but the API expects the vector
         * number to start from 1, so we need to increment the vector by 1.
         */
-       pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, PCI_EPC_IRQ_MSI,
+       pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, PCI_IRQ_MSI,
                          vector + 1);
 }
 
-static int pci_epf_mhi_iatu_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from,
-                                void *to, size_t size)
+static int pci_epf_mhi_iatu_read(struct mhi_ep_cntrl *mhi_cntrl,
+                                struct mhi_ep_buf_info *buf_info)
 {
        struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
-       size_t offset = get_align_offset(epf_mhi, from);
+       size_t offset = get_align_offset(epf_mhi, buf_info->host_addr);
        void __iomem *tre_buf;
        phys_addr_t tre_phys;
        int ret;
 
        mutex_lock(&epf_mhi->lock);
 
-       ret = __pci_epf_mhi_alloc_map(mhi_cntrl, from, &tre_phys, &tre_buf,
-                                     offset, size);
+       ret = __pci_epf_mhi_alloc_map(mhi_cntrl, buf_info->host_addr, &tre_phys,
+                                     &tre_buf, offset, buf_info->size);
        if (ret) {
                mutex_unlock(&epf_mhi->lock);
                return ret;
        }
 
-       memcpy_fromio(to, tre_buf, size);
+       memcpy_fromio(buf_info->dev_addr, tre_buf, buf_info->size);
 
-       __pci_epf_mhi_unmap_free(mhi_cntrl, from, tre_phys, tre_buf, offset,
-                                size);
+       __pci_epf_mhi_unmap_free(mhi_cntrl, buf_info->host_addr, tre_phys,
+                                tre_buf, offset, buf_info->size);
 
        mutex_unlock(&epf_mhi->lock);
 
+       if (buf_info->cb)
+               buf_info->cb(buf_info);
+
        return 0;
 }
 
 static int pci_epf_mhi_iatu_write(struct mhi_ep_cntrl *mhi_cntrl,
-                                 void *from, u64 to, size_t size)
+                                 struct mhi_ep_buf_info *buf_info)
 {
        struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
-       size_t offset = get_align_offset(epf_mhi, to);
+       size_t offset = get_align_offset(epf_mhi, buf_info->host_addr);
        void __iomem *tre_buf;
        phys_addr_t tre_phys;
        int ret;
 
        mutex_lock(&epf_mhi->lock);
 
-       ret = __pci_epf_mhi_alloc_map(mhi_cntrl, to, &tre_phys, &tre_buf,
-                                     offset, size);
+       ret = __pci_epf_mhi_alloc_map(mhi_cntrl, buf_info->host_addr, &tre_phys,
+                                     &tre_buf, offset, buf_info->size);
        if (ret) {
                mutex_unlock(&epf_mhi->lock);
                return ret;
        }
 
-       memcpy_toio(tre_buf, from, size);
+       memcpy_toio(tre_buf, buf_info->dev_addr, buf_info->size);
 
-       __pci_epf_mhi_unmap_free(mhi_cntrl, to, tre_phys, tre_buf, offset,
-                                size);
+       __pci_epf_mhi_unmap_free(mhi_cntrl, buf_info->host_addr, tre_phys,
+                                tre_buf, offset, buf_info->size);
 
        mutex_unlock(&epf_mhi->lock);
 
+       if (buf_info->cb)
+               buf_info->cb(buf_info);
+
        return 0;
 }
 
@@ -270,8 +289,8 @@ static void pci_epf_mhi_dma_callback(void *param)
        complete(param);
 }
 
-static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from,
-                                void *to, size_t size)
+static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl,
+                                struct mhi_ep_buf_info *buf_info)
 {
        struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
        struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
@@ -284,13 +303,13 @@ static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from,
        dma_addr_t dst_addr;
        int ret;
 
-       if (size < SZ_4K)
-               return pci_epf_mhi_iatu_read(mhi_cntrl, from, to, size);
+       if (buf_info->size < SZ_4K)
+               return pci_epf_mhi_iatu_read(mhi_cntrl, buf_info);
 
        mutex_lock(&epf_mhi->lock);
 
        config.direction = DMA_DEV_TO_MEM;
-       config.src_addr = from;
+       config.src_addr = buf_info->host_addr;
 
        ret = dmaengine_slave_config(chan, &config);
        if (ret) {
@@ -298,14 +317,16 @@ static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from,
                goto err_unlock;
        }
 
-       dst_addr = dma_map_single(dma_dev, to, size, DMA_FROM_DEVICE);
+       dst_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
+                                 DMA_FROM_DEVICE);
        ret = dma_mapping_error(dma_dev, dst_addr);
        if (ret) {
                dev_err(dev, "Failed to map remote memory\n");
                goto err_unlock;
        }
 
-       desc = dmaengine_prep_slave_single(chan, dst_addr, size, DMA_DEV_TO_MEM,
+       desc = dmaengine_prep_slave_single(chan, dst_addr, buf_info->size,
+                                          DMA_DEV_TO_MEM,
                                           DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
        if (!desc) {
                dev_err(dev, "Failed to prepare DMA\n");
@@ -332,15 +353,15 @@ static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from,
        }
 
 err_unmap:
-       dma_unmap_single(dma_dev, dst_addr, size, DMA_FROM_DEVICE);
+       dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
 err_unlock:
        mutex_unlock(&epf_mhi->lock);
 
        return ret;
 }
 
-static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl, void *from,
-                                 u64 to, size_t size)
+static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl,
+                                 struct mhi_ep_buf_info *buf_info)
 {
        struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
        struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
@@ -353,13 +374,13 @@ static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl, void *from,
        dma_addr_t src_addr;
        int ret;
 
-       if (size < SZ_4K)
-               return pci_epf_mhi_iatu_write(mhi_cntrl, from, to, size);
+       if (buf_info->size < SZ_4K)
+               return pci_epf_mhi_iatu_write(mhi_cntrl, buf_info);
 
        mutex_lock(&epf_mhi->lock);
 
        config.direction = DMA_MEM_TO_DEV;
-       config.dst_addr = to;
+       config.dst_addr = buf_info->host_addr;
 
        ret = dmaengine_slave_config(chan, &config);
        if (ret) {
@@ -367,14 +388,16 @@ static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl, void *from,
                goto err_unlock;
        }
 
-       src_addr = dma_map_single(dma_dev, from, size, DMA_TO_DEVICE);
+       src_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
+                                 DMA_TO_DEVICE);
        ret = dma_mapping_error(dma_dev, src_addr);
        if (ret) {
                dev_err(dev, "Failed to map remote memory\n");
                goto err_unlock;
        }
 
-       desc = dmaengine_prep_slave_single(chan, src_addr, size, DMA_MEM_TO_DEV,
+       desc = dmaengine_prep_slave_single(chan, src_addr, buf_info->size,
+                                          DMA_MEM_TO_DEV,
                                           DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
        if (!desc) {
                dev_err(dev, "Failed to prepare DMA\n");
@@ -401,7 +424,199 @@ static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl, void *from,
        }
 
 err_unmap:
-       dma_unmap_single(dma_dev, src_addr, size, DMA_FROM_DEVICE);
+       dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
+err_unlock:
+       mutex_unlock(&epf_mhi->lock);
+
+       return ret;
+}
+
+static void pci_epf_mhi_dma_worker(struct work_struct *work)
+{
+       struct pci_epf_mhi *epf_mhi = container_of(work, struct pci_epf_mhi, dma_work);
+       struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
+       struct pci_epf_mhi_dma_transfer *itr, *tmp;
+       struct mhi_ep_buf_info *buf_info;
+       unsigned long flags;
+       LIST_HEAD(head);
+
+       spin_lock_irqsave(&epf_mhi->list_lock, flags);
+       list_splice_tail_init(&epf_mhi->dma_list, &head);
+       spin_unlock_irqrestore(&epf_mhi->list_lock, flags);
+
+       list_for_each_entry_safe(itr, tmp, &head, node) {
+               list_del(&itr->node);
+               dma_unmap_single(dma_dev, itr->paddr, itr->size, itr->dir);
+               buf_info = &itr->buf_info;
+               buf_info->cb(buf_info);
+               kfree(itr);
+       }
+}
+
+static void pci_epf_mhi_dma_async_callback(void *param)
+{
+       struct pci_epf_mhi_dma_transfer *transfer = param;
+       struct pci_epf_mhi *epf_mhi = transfer->epf_mhi;
+
+       spin_lock(&epf_mhi->list_lock);
+       list_add_tail(&transfer->node, &epf_mhi->dma_list);
+       spin_unlock(&epf_mhi->list_lock);
+
+       queue_work(epf_mhi->dma_wq, &epf_mhi->dma_work);
+}
+
+static int pci_epf_mhi_edma_read_async(struct mhi_ep_cntrl *mhi_cntrl,
+                                      struct mhi_ep_buf_info *buf_info)
+{
+       struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
+       struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
+       struct pci_epf_mhi_dma_transfer *transfer = NULL;
+       struct dma_chan *chan = epf_mhi->dma_chan_rx;
+       struct device *dev = &epf_mhi->epf->dev;
+       DECLARE_COMPLETION_ONSTACK(complete);
+       struct dma_async_tx_descriptor *desc;
+       struct dma_slave_config config = {};
+       dma_cookie_t cookie;
+       dma_addr_t dst_addr;
+       int ret;
+
+       mutex_lock(&epf_mhi->lock);
+
+       config.direction = DMA_DEV_TO_MEM;
+       config.src_addr = buf_info->host_addr;
+
+       ret = dmaengine_slave_config(chan, &config);
+       if (ret) {
+               dev_err(dev, "Failed to configure DMA channel\n");
+               goto err_unlock;
+       }
+
+       dst_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
+                                 DMA_FROM_DEVICE);
+       ret = dma_mapping_error(dma_dev, dst_addr);
+       if (ret) {
+               dev_err(dev, "Failed to map remote memory\n");
+               goto err_unlock;
+       }
+
+       desc = dmaengine_prep_slave_single(chan, dst_addr, buf_info->size,
+                                          DMA_DEV_TO_MEM,
+                                          DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+       if (!desc) {
+               dev_err(dev, "Failed to prepare DMA\n");
+               ret = -EIO;
+               goto err_unmap;
+       }
+
+       transfer = kzalloc(sizeof(*transfer), GFP_KERNEL);
+       if (!transfer) {
+               ret = -ENOMEM;
+               goto err_unmap;
+       }
+
+       transfer->epf_mhi = epf_mhi;
+       transfer->paddr = dst_addr;
+       transfer->size = buf_info->size;
+       transfer->dir = DMA_FROM_DEVICE;
+       memcpy(&transfer->buf_info, buf_info, sizeof(*buf_info));
+
+       desc->callback = pci_epf_mhi_dma_async_callback;
+       desc->callback_param = transfer;
+
+       cookie = dmaengine_submit(desc);
+       ret = dma_submit_error(cookie);
+       if (ret) {
+               dev_err(dev, "Failed to do DMA submit\n");
+               goto err_free_transfer;
+       }
+
+       dma_async_issue_pending(chan);
+
+       goto err_unlock;
+
+err_free_transfer:
+       kfree(transfer);
+err_unmap:
+       dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
+err_unlock:
+       mutex_unlock(&epf_mhi->lock);
+
+       return ret;
+}
+
+static int pci_epf_mhi_edma_write_async(struct mhi_ep_cntrl *mhi_cntrl,
+                                       struct mhi_ep_buf_info *buf_info)
+{
+       struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
+       struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
+       struct pci_epf_mhi_dma_transfer *transfer = NULL;
+       struct dma_chan *chan = epf_mhi->dma_chan_tx;
+       struct device *dev = &epf_mhi->epf->dev;
+       DECLARE_COMPLETION_ONSTACK(complete);
+       struct dma_async_tx_descriptor *desc;
+       struct dma_slave_config config = {};
+       dma_cookie_t cookie;
+       dma_addr_t src_addr;
+       int ret;
+
+       mutex_lock(&epf_mhi->lock);
+
+       config.direction = DMA_MEM_TO_DEV;
+       config.dst_addr = buf_info->host_addr;
+
+       ret = dmaengine_slave_config(chan, &config);
+       if (ret) {
+               dev_err(dev, "Failed to configure DMA channel\n");
+               goto err_unlock;
+       }
+
+       src_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
+                                 DMA_TO_DEVICE);
+       ret = dma_mapping_error(dma_dev, src_addr);
+       if (ret) {
+               dev_err(dev, "Failed to map remote memory\n");
+               goto err_unlock;
+       }
+
+       desc = dmaengine_prep_slave_single(chan, src_addr, buf_info->size,
+                                          DMA_MEM_TO_DEV,
+                                          DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+       if (!desc) {
+               dev_err(dev, "Failed to prepare DMA\n");
+               ret = -EIO;
+               goto err_unmap;
+       }
+
+       transfer = kzalloc(sizeof(*transfer), GFP_KERNEL);
+       if (!transfer) {
+               ret = -ENOMEM;
+               goto err_unmap;
+       }
+
+       transfer->epf_mhi = epf_mhi;
+       transfer->paddr = src_addr;
+       transfer->size = buf_info->size;
+       transfer->dir = DMA_TO_DEVICE;
+       memcpy(&transfer->buf_info, buf_info, sizeof(*buf_info));
+
+       desc->callback = pci_epf_mhi_dma_async_callback;
+       desc->callback_param = transfer;
+
+       cookie = dmaengine_submit(desc);
+       ret = dma_submit_error(cookie);
+       if (ret) {
+               dev_err(dev, "Failed to do DMA submit\n");
+               goto err_free_transfer;
+       }
+
+       dma_async_issue_pending(chan);
+
+       goto err_unlock;
+
+err_free_transfer:
+       kfree(transfer);
+err_unmap:
+       dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
 err_unlock:
        mutex_unlock(&epf_mhi->lock);
 
@@ -431,6 +646,7 @@ static int pci_epf_mhi_dma_init(struct pci_epf_mhi *epf_mhi)
        struct device *dev = &epf_mhi->epf->dev;
        struct epf_dma_filter filter;
        dma_cap_mask_t mask;
+       int ret;
 
        dma_cap_zero(mask);
        dma_cap_set(DMA_SLAVE, mask);
@@ -449,16 +665,35 @@ static int pci_epf_mhi_dma_init(struct pci_epf_mhi *epf_mhi)
                                                   &filter);
        if (IS_ERR_OR_NULL(epf_mhi->dma_chan_rx)) {
                dev_err(dev, "Failed to request rx channel\n");
-               dma_release_channel(epf_mhi->dma_chan_tx);
-               epf_mhi->dma_chan_tx = NULL;
-               return -ENODEV;
+               ret = -ENODEV;
+               goto err_release_tx;
+       }
+
+       epf_mhi->dma_wq = alloc_workqueue("pci_epf_mhi_dma_wq", 0, 0);
+       if (!epf_mhi->dma_wq) {
+               ret = -ENOMEM;
+               goto err_release_rx;
        }
 
+       INIT_LIST_HEAD(&epf_mhi->dma_list);
+       INIT_WORK(&epf_mhi->dma_work, pci_epf_mhi_dma_worker);
+       spin_lock_init(&epf_mhi->list_lock);
+
        return 0;
+
+err_release_rx:
+       dma_release_channel(epf_mhi->dma_chan_rx);
+       epf_mhi->dma_chan_rx = NULL;
+err_release_tx:
+       dma_release_channel(epf_mhi->dma_chan_tx);
+       epf_mhi->dma_chan_tx = NULL;
+
+       return ret;
 }
 
 static void pci_epf_mhi_dma_deinit(struct pci_epf_mhi *epf_mhi)
 {
+       destroy_workqueue(epf_mhi->dma_wq);
        dma_release_channel(epf_mhi->dma_chan_tx);
        dma_release_channel(epf_mhi->dma_chan_rx);
        epf_mhi->dma_chan_tx = NULL;
@@ -531,12 +766,13 @@ static int pci_epf_mhi_link_up(struct pci_epf *epf)
        mhi_cntrl->raise_irq = pci_epf_mhi_raise_irq;
        mhi_cntrl->alloc_map = pci_epf_mhi_alloc_map;
        mhi_cntrl->unmap_free = pci_epf_mhi_unmap_free;
+       mhi_cntrl->read_sync = mhi_cntrl->read_async = pci_epf_mhi_iatu_read;
+       mhi_cntrl->write_sync = mhi_cntrl->write_async = pci_epf_mhi_iatu_write;
        if (info->flags & MHI_EPF_USE_DMA) {
-               mhi_cntrl->read_from_host = pci_epf_mhi_edma_read;
-               mhi_cntrl->write_to_host = pci_epf_mhi_edma_write;
-       } else {
-               mhi_cntrl->read_from_host = pci_epf_mhi_iatu_read;
-               mhi_cntrl->write_to_host = pci_epf_mhi_iatu_write;
+               mhi_cntrl->read_sync = pci_epf_mhi_edma_read;
+               mhi_cntrl->write_sync = pci_epf_mhi_edma_write;
+               mhi_cntrl->read_async = pci_epf_mhi_edma_read_async;
+               mhi_cntrl->write_async = pci_epf_mhi_edma_write_async;
        }
 
        /* Register the MHI EP controller */
@@ -644,7 +880,7 @@ static void pci_epf_mhi_unbind(struct pci_epf *epf)
        pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no, epf_bar);
 }
 
-static struct pci_epc_event_ops pci_epf_mhi_event_ops = {
+static const struct pci_epc_event_ops pci_epf_mhi_event_ops = {
        .core_init = pci_epf_mhi_core_init,
        .link_up = pci_epf_mhi_link_up,
        .link_down = pci_epf_mhi_link_down,
@@ -682,7 +918,7 @@ static const struct pci_epf_device_id pci_epf_mhi_ids[] = {
        {},
 };
 
-static struct pci_epf_ops pci_epf_mhi_ops = {
+static const struct pci_epf_ops pci_epf_mhi_ops = {
        .unbind = pci_epf_mhi_unbind,
        .bind   = pci_epf_mhi_bind,
 };
index 9aac2c6f3bb99ddd2c3515a74b59479f77f8ad4b..0553946005c4d92990af6ca8ce6962ea51e74772 100644 (file)
@@ -140,9 +140,9 @@ static struct pci_epf_header epf_ntb_header = {
 static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up)
 {
        enum pci_epc_interface_type type;
-       enum pci_epc_irq_type irq_type;
        struct epf_ntb_epc *ntb_epc;
        struct epf_ntb_ctrl *ctrl;
+       unsigned int irq_type;
        struct pci_epc *epc;
        u8 func_no, vfunc_no;
        bool is_msix;
@@ -159,7 +159,7 @@ static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up)
                        ctrl->link_status |= LINK_STATUS_UP;
                else
                        ctrl->link_status &= ~LINK_STATUS_UP;
-               irq_type = is_msix ? PCI_EPC_IRQ_MSIX : PCI_EPC_IRQ_MSI;
+               irq_type = is_msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI;
                ret = pci_epc_raise_irq(epc, func_no, vfunc_no, irq_type, 1);
                if (ret) {
                        dev_err(&epc->dev,
@@ -2099,7 +2099,7 @@ static int epf_ntb_probe(struct pci_epf *epf,
        return 0;
 }
 
-static struct pci_epf_ops epf_ntb_ops = {
+static const struct pci_epf_ops epf_ntb_ops = {
        .bind   = epf_ntb_bind,
        .unbind = epf_ntb_unbind,
        .add_cfs = epf_ntb_add_cfs,
index 1f0d2b84296a34f081648dcc783cdb96fe8accb7..18c80002d3bd57c1116e04937371eb7077582835 100644 (file)
 #include <linux/pci-epf.h>
 #include <linux/pci_regs.h>
 
-#define IRQ_TYPE_LEGACY                        0
+#define IRQ_TYPE_INTX                  0
 #define IRQ_TYPE_MSI                   1
 #define IRQ_TYPE_MSIX                  2
 
-#define COMMAND_RAISE_LEGACY_IRQ       BIT(0)
+#define COMMAND_RAISE_INTX_IRQ         BIT(0)
 #define COMMAND_RAISE_MSI_IRQ          BIT(1)
 #define COMMAND_RAISE_MSIX_IRQ         BIT(2)
 #define COMMAND_READ                   BIT(3)
@@ -600,9 +600,9 @@ static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test,
        WRITE_ONCE(reg->status, status);
 
        switch (reg->irq_type) {
-       case IRQ_TYPE_LEGACY:
+       case IRQ_TYPE_INTX:
                pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
-                                 PCI_EPC_IRQ_LEGACY, 0);
+                                 PCI_IRQ_INTX, 0);
                break;
        case IRQ_TYPE_MSI:
                count = pci_epc_get_msi(epc, epf->func_no, epf->vfunc_no);
@@ -612,7 +612,7 @@ static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test,
                        return;
                }
                pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
-                                 PCI_EPC_IRQ_MSI, reg->irq_number);
+                                 PCI_IRQ_MSI, reg->irq_number);
                break;
        case IRQ_TYPE_MSIX:
                count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no);
@@ -622,7 +622,7 @@ static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test,
                        return;
                }
                pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
-                                 PCI_EPC_IRQ_MSIX, reg->irq_number);
+                                 PCI_IRQ_MSIX, reg->irq_number);
                break;
        default:
                dev_err(dev, "Failed to raise IRQ, unknown type\n");
@@ -659,7 +659,7 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
        }
 
        switch (command) {
-       case COMMAND_RAISE_LEGACY_IRQ:
+       case COMMAND_RAISE_INTX_IRQ:
        case COMMAND_RAISE_MSI_IRQ:
        case COMMAND_RAISE_MSIX_IRQ:
                pci_epf_test_raise_irq(epf_test, reg);
@@ -973,7 +973,7 @@ static int pci_epf_test_probe(struct pci_epf *epf,
        return 0;
 }
 
-static struct pci_epf_ops ops = {
+static const struct pci_epf_ops ops = {
        .unbind = pci_epf_test_unbind,
        .bind   = pci_epf_test_bind,
 };
index 3f60128560ed0f3b71e6ba160495f13de5f6a6b7..e75a2af77328ea47efb67ea358b4f847d4275687 100644 (file)
@@ -1172,11 +1172,8 @@ static int vntb_epf_peer_db_set(struct ntb_dev *ndev, u64 db_bits)
        func_no = ntb->epf->func_no;
        vfunc_no = ntb->epf->vfunc_no;
 
-       ret = pci_epc_raise_irq(ntb->epf->epc,
-                               func_no,
-                               vfunc_no,
-                               PCI_EPC_IRQ_MSI,
-                               interrupt_num + 1);
+       ret = pci_epc_raise_irq(ntb->epf->epc, func_no, vfunc_no,
+                               PCI_IRQ_MSI, interrupt_num + 1);
        if (ret)
                dev_err(&ntb->ntb.dev, "Failed to raise IRQ\n");
 
@@ -1387,7 +1384,7 @@ static void epf_ntb_unbind(struct pci_epf *epf)
 }
 
 // EPF driver probe
-static struct pci_epf_ops epf_ntb_ops = {
+static const struct pci_epf_ops epf_ntb_ops = {
        .bind   = epf_ntb_bind,
        .unbind = epf_ntb_unbind,
        .add_cfs = epf_ntb_add_cfs,
index 56e1184bc6c24f9e36097f2a167258ab89361fb9..dcd4e66430c10a9328f05a11f9c28c88b2c5c4bf 100644 (file)
@@ -211,13 +211,13 @@ EXPORT_SYMBOL_GPL(pci_epc_start);
  * @epc: the EPC device which has to interrupt the host
  * @func_no: the physical endpoint function number in the EPC device
  * @vfunc_no: the virtual endpoint function number in the physical function
- * @type: specify the type of interrupt; legacy, MSI or MSI-X
+ * @type: specify the type of interrupt; INTX, MSI or MSI-X
  * @interrupt_num: the MSI or MSI-X interrupt number with range (1-N)
  *
- * Invoke to raise an legacy, MSI or MSI-X interrupt
+ * Invoke to raise an INTX, MSI or MSI-X interrupt
  */
 int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
-                     enum pci_epc_irq_type type, u16 interrupt_num)
+                     unsigned int type, u16 interrupt_num)
 {
        int ret;
 
index 25dbe85c4217585505510aeb19cadb7f7f491bdb..aaa33e8dc4c97734b5b1a678418de69426e2f05f 100644 (file)
@@ -745,6 +745,7 @@ static int sriov_init(struct pci_dev *dev, int pos)
        u16 ctrl, total;
        struct pci_sriov *iov;
        struct resource *res;
+       const char *res_name;
        struct pci_dev *pdev;
 
        pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &ctrl);
@@ -785,6 +786,8 @@ found:
        nres = 0;
        for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
                res = &dev->resource[i + PCI_IOV_RESOURCES];
+               res_name = pci_resource_name(dev, i + PCI_IOV_RESOURCES);
+
                /*
                 * If it is already FIXED, don't change it, something
                 * (perhaps EA or header fixups) wants it this way.
@@ -802,8 +805,8 @@ found:
                }
                iov->barsz[i] = resource_size(res);
                res->end = res->start + resource_size(res) * total - 1;
-               pci_info(dev, "VF(n) BAR%d space: %pR (contains BAR%d for %d VFs)\n",
-                        i, res, i, total);
+               pci_info(dev, "%s %pR: contains BAR %d for %d VFs\n",
+                        res_name, res, i, total);
                i += bar64;
                nres++;
        }
index ae550d71b815c4f9bd5d9127de6b835e173fcfb5..9ab9b1008d8b9de897f1a1372f128e849b8a73a2 100644 (file)
@@ -850,6 +850,66 @@ struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
 }
 EXPORT_SYMBOL(pci_find_resource);
 
+/**
+ * pci_resource_name - Return the name of the PCI resource
+ * @dev: PCI device to query
+ * @i: index of the resource
+ *
+ * Return the standard PCI resource (BAR) name according to their index.
+ */
+const char *pci_resource_name(struct pci_dev *dev, unsigned int i)
+{
+       static const char * const bar_name[] = {
+               "BAR 0",
+               "BAR 1",
+               "BAR 2",
+               "BAR 3",
+               "BAR 4",
+               "BAR 5",
+               "ROM",
+#ifdef CONFIG_PCI_IOV
+               "VF BAR 0",
+               "VF BAR 1",
+               "VF BAR 2",
+               "VF BAR 3",
+               "VF BAR 4",
+               "VF BAR 5",
+#endif
+               "bridge window",        /* "io" included in %pR */
+               "bridge window",        /* "mem" included in %pR */
+               "bridge window",        /* "mem pref" included in %pR */
+       };
+       static const char * const cardbus_name[] = {
+               "BAR 1",
+               "unknown",
+               "unknown",
+               "unknown",
+               "unknown",
+               "unknown",
+#ifdef CONFIG_PCI_IOV
+               "unknown",
+               "unknown",
+               "unknown",
+               "unknown",
+               "unknown",
+               "unknown",
+#endif
+               "CardBus bridge window 0",      /* I/O */
+               "CardBus bridge window 1",      /* I/O */
+               "CardBus bridge window 0",      /* mem */
+               "CardBus bridge window 1",      /* mem */
+       };
+
+       if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS &&
+           i < ARRAY_SIZE(cardbus_name))
+               return cardbus_name[i];
+
+       if (i < ARRAY_SIZE(bar_name))
+               return bar_name[i];
+
+       return "unknown";
+}
+
 /**
  * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
  * @dev: the PCI device to operate on
@@ -1294,6 +1354,7 @@ end:
 /**
  * pci_set_full_power_state - Put a PCI device into D0 and update its state
  * @dev: PCI device to power up
+ * @locked: whether pci_bus_sem is held
  *
  * Call pci_power_up() to put @dev into D0, read from its PCI_PM_CTRL register
  * to confirm the state change, restore its BARs if they might be lost and
@@ -1303,7 +1364,7 @@ end:
  * to D0, it is more efficient to use pci_power_up() directly instead of this
  * function.
  */
-static int pci_set_full_power_state(struct pci_dev *dev)
+static int pci_set_full_power_state(struct pci_dev *dev, bool locked)
 {
        u16 pmcsr;
        int ret;
@@ -1339,7 +1400,7 @@ static int pci_set_full_power_state(struct pci_dev *dev)
        }
 
        if (dev->bus->self)
-               pcie_aspm_pm_state_change(dev->bus->self);
+               pcie_aspm_pm_state_change(dev->bus->self, locked);
 
        return 0;
 }
@@ -1368,10 +1429,22 @@ void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
                pci_walk_bus(bus, __pci_dev_set_current_state, &state);
 }
 
+static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state, bool locked)
+{
+       if (!bus)
+               return;
+
+       if (locked)
+               pci_walk_bus_locked(bus, __pci_dev_set_current_state, &state);
+       else
+               pci_walk_bus(bus, __pci_dev_set_current_state, &state);
+}
+
 /**
  * pci_set_low_power_state - Put a PCI device into a low-power state.
  * @dev: PCI device to handle.
  * @state: PCI power state (D1, D2, D3hot) to put the device into.
+ * @locked: whether pci_bus_sem is held
  *
  * Use the device's PCI_PM_CTRL register to put it into a low-power state.
  *
@@ -1382,7 +1455,7 @@ void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
  * 0 if device already is in the requested state.
  * 0 if device's power state has been successfully changed.
  */
-static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state)
+static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state, bool locked)
 {
        u16 pmcsr;
 
@@ -1436,29 +1509,12 @@ static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state)
                                     pci_power_name(state));
 
        if (dev->bus->self)
-               pcie_aspm_pm_state_change(dev->bus->self);
+               pcie_aspm_pm_state_change(dev->bus->self, locked);
 
        return 0;
 }
 
-/**
- * pci_set_power_state - Set the power state of a PCI device
- * @dev: PCI device to handle.
- * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
- *
- * Transition a device to a new power state, using the platform firmware and/or
- * the device's PCI PM registers.
- *
- * RETURN VALUE:
- * -EINVAL if the requested state is invalid.
- * -EIO if device does not support PCI PM or its PM capabilities register has a
- * wrong version, or device doesn't support the requested state.
- * 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
- * 0 if device already is in the requested state.
- * 0 if the transition is to D3 but D3 is not supported.
- * 0 if device's power state has been successfully changed.
- */
-int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
+static int __pci_set_power_state(struct pci_dev *dev, pci_power_t state, bool locked)
 {
        int error;
 
@@ -1482,7 +1538,7 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
                return 0;
 
        if (state == PCI_D0)
-               return pci_set_full_power_state(dev);
+               return pci_set_full_power_state(dev, locked);
 
        /*
         * This device is quirked not to be put into D3, so don't put it in
@@ -1496,16 +1552,16 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
                 * To put the device in D3cold, put it into D3hot in the native
                 * way, then put it into D3cold using platform ops.
                 */
-               error = pci_set_low_power_state(dev, PCI_D3hot);
+               error = pci_set_low_power_state(dev, PCI_D3hot, locked);
 
                if (pci_platform_power_transition(dev, PCI_D3cold))
                        return error;
 
                /* Powering off a bridge may power off the whole hierarchy */
                if (dev->current_state == PCI_D3cold)
-                       pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
+                       __pci_bus_set_current_state(dev->subordinate, PCI_D3cold, locked);
        } else {
-               error = pci_set_low_power_state(dev, state);
+               error = pci_set_low_power_state(dev, state, locked);
 
                if (pci_platform_power_transition(dev, state))
                        return error;
@@ -1513,8 +1569,38 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
 
        return 0;
 }
+
+/**
+ * pci_set_power_state - Set the power state of a PCI device
+ * @dev: PCI device to handle.
+ * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
+ *
+ * Transition a device to a new power state, using the platform firmware and/or
+ * the device's PCI PM registers.
+ *
+ * RETURN VALUE:
+ * -EINVAL if the requested state is invalid.
+ * -EIO if device does not support PCI PM or its PM capabilities register has a
+ * wrong version, or device doesn't support the requested state.
+ * 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
+ * 0 if device already is in the requested state.
+ * 0 if the transition is to D3 but D3 is not supported.
+ * 0 if device's power state has been successfully changed.
+ */
+int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
+{
+       return __pci_set_power_state(dev, state, false);
+}
 EXPORT_SYMBOL(pci_set_power_state);
 
+int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state)
+{
+       lockdep_assert_held(&pci_bus_sem);
+
+       return __pci_set_power_state(dev, state, true);
+}
+EXPORT_SYMBOL(pci_set_power_state_locked);
+
 #define PCI_EXP_SAVE_REGS      7
 
 static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
@@ -3299,6 +3385,7 @@ static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
 static int pci_ea_read(struct pci_dev *dev, int offset)
 {
        struct resource *res;
+       const char *res_name;
        int ent_size, ent_offset = offset;
        resource_size_t start, end;
        unsigned long flags;
@@ -3328,6 +3415,7 @@ static int pci_ea_read(struct pci_dev *dev, int offset)
                goto out;
 
        res = pci_ea_get_resource(dev, bei, prop);
+       res_name = pci_resource_name(dev, bei);
        if (!res) {
                pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
                goto out;
@@ -3401,16 +3489,16 @@ static int pci_ea_read(struct pci_dev *dev, int offset)
        res->flags = flags;
 
        if (bei <= PCI_EA_BEI_BAR5)
-               pci_info(dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
-                          bei, res, prop);
+               pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n",
+                        res_name, res, prop);
        else if (bei == PCI_EA_BEI_ROM)
-               pci_info(dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
-                          res, prop);
+               pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n",
+                        res_name, res, prop);
        else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
-               pci_info(dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
-                          bei - PCI_EA_BEI_VF_BAR0, res, prop);
+               pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n",
+                        res_name, res, prop);
        else
-               pci_info(dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
+               pci_info(dev, "BEI %d %pR: from Enhanced Allocation, properties %#02x\n",
                           bei, res, prop);
 
 out:
@@ -6233,6 +6321,41 @@ int pcie_set_mps(struct pci_dev *dev, int mps)
 }
 EXPORT_SYMBOL(pcie_set_mps);
 
+static enum pci_bus_speed to_pcie_link_speed(u16 lnksta)
+{
+       return pcie_link_speed[FIELD_GET(PCI_EXP_LNKSTA_CLS, lnksta)];
+}
+
+int pcie_link_speed_mbps(struct pci_dev *pdev)
+{
+       u16 lnksta;
+       int err;
+
+       err = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta);
+       if (err)
+               return err;
+
+       switch (to_pcie_link_speed(lnksta)) {
+       case PCIE_SPEED_2_5GT:
+               return 2500;
+       case PCIE_SPEED_5_0GT:
+               return 5000;
+       case PCIE_SPEED_8_0GT:
+               return 8000;
+       case PCIE_SPEED_16_0GT:
+               return 16000;
+       case PCIE_SPEED_32_0GT:
+               return 32000;
+       case PCIE_SPEED_64_0GT:
+               return 64000;
+       default:
+               break;
+       }
+
+       return -EINVAL;
+}
+EXPORT_SYMBOL(pcie_link_speed_mbps);
+
 /**
  * pcie_bandwidth_available - determine minimum link settings of a PCIe
  *                           device and its bandwidth limitation
@@ -6266,8 +6389,7 @@ u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
        while (dev) {
                pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
 
-               next_speed = pcie_link_speed[FIELD_GET(PCI_EXP_LNKSTA_CLS,
-                                                      lnksta)];
+               next_speed = to_pcie_link_speed(lnksta);
                next_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta);
 
                next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
@@ -6698,14 +6820,15 @@ static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
                                           resource_size_t align, bool resize)
 {
        struct resource *r = &dev->resource[bar];
+       const char *r_name = pci_resource_name(dev, bar);
        resource_size_t size;
 
        if (!(r->flags & IORESOURCE_MEM))
                return;
 
        if (r->flags & IORESOURCE_PCI_FIXED) {
-               pci_info(dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
-                        bar, r, (unsigned long long)align);
+               pci_info(dev, "%s %pR: ignoring requested alignment %#llx\n",
+                        r_name, r, (unsigned long long)align);
                return;
        }
 
@@ -6741,8 +6864,8 @@ static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
         * devices and we use the second.
         */
 
-       pci_info(dev, "BAR%d %pR: requesting alignment to %#llx\n",
-                bar, r, (unsigned long long)align);
+       pci_info(dev, "%s %pR: requesting alignment to %#llx\n",
+                r_name, r, (unsigned long long)align);
 
        if (resize) {
                r->start = 0;
index f43873049d52cf08e71fcf26e9dcc74afb7ef6d8..e9750b1b19bad5bfc500909f390f1d890f5eab73 100644 (file)
@@ -255,6 +255,8 @@ void __pci_bus_assign_resources(const struct pci_bus *bus,
                                struct list_head *fail_head);
 bool pci_bus_clip_resource(struct pci_dev *dev, int idx);
 
+const char *pci_resource_name(struct pci_dev *dev, unsigned int i);
+
 void pci_reassigndev_resource_alignment(struct pci_dev *dev);
 void pci_disable_bridge_window(struct pci_dev *dev);
 struct pci_bus *pci_bus_get(struct pci_bus *bus);
@@ -272,7 +274,7 @@ void pci_bus_put(struct pci_bus *bus);
 
 /* PCIe speed to Mb/s reduced by encoding overhead */
 #define PCIE_SPEED2MBS_ENC(speed) \
-       ((speed) == PCIE_SPEED_64_0GT ? 64000*128/130 : \
+       ((speed) == PCIE_SPEED_64_0GT ? 64000*1/1 : \
         (speed) == PCIE_SPEED_32_0GT ? 32000*128/130 : \
         (speed) == PCIE_SPEED_16_0GT ? 16000*128/130 : \
         (speed) == PCIE_SPEED_8_0GT  ?  8000*128/130 : \
@@ -569,12 +571,12 @@ int pcie_retrain_link(struct pci_dev *pdev, bool use_lt);
 #ifdef CONFIG_PCIEASPM
 void pcie_aspm_init_link_state(struct pci_dev *pdev);
 void pcie_aspm_exit_link_state(struct pci_dev *pdev);
-void pcie_aspm_pm_state_change(struct pci_dev *pdev);
+void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked);
 void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
 #else
 static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { }
 static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { }
-static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev) { }
+static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked) { }
 static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { }
 #endif
 
index 42a3bd35a3e118d8eb656d1d24b9f0fa0f4afaf7..05fc30bb5134d9ae31e681375d843b0f49738c30 100644 (file)
@@ -41,8 +41,8 @@
 #define AER_MAX_TYPEOF_UNCOR_ERRS      27      /* as per PCI_ERR_UNCOR_STATUS*/
 
 struct aer_err_source {
-       unsigned int status;
-       unsigned int id;
+       u32 status;                     /* PCI_ERR_ROOT_STATUS */
+       u32 id;                         /* PCI_ERR_ROOT_ERR_SRC */
 };
 
 struct aer_rpc {
@@ -435,10 +435,10 @@ void pci_aer_exit(struct pci_dev *dev)
 /*
  * AER error strings
  */
-static const char *aer_error_severity_string[] = {
-       "Uncorrected (Non-Fatal)",
-       "Uncorrected (Fatal)",
-       "Corrected"
+static const char * const aer_error_severity_string[] = {
+       "Uncorrectable (Non-Fatal)",
+       "Uncorrectable (Fatal)",
+       "Correctable"
 };
 
 static const char *aer_error_layer[] = {
@@ -740,7 +740,7 @@ static void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info)
        u8 bus = info->id >> 8;
        u8 devfn = info->id & 0xff;
 
-       pci_info(dev, "%s%s error received: %04x:%02x:%02x.%d\n",
+       pci_info(dev, "%s%s error message received from %04x:%02x:%02x.%d\n",
                 info->multi_error_valid ? "Multiple " : "",
                 aer_error_severity_string[info->severity],
                 pci_domain_nr(dev->bus), bus, PCI_SLOT(devfn),
@@ -929,7 +929,12 @@ static bool find_source_device(struct pci_dev *parent,
                pci_walk_bus(parent->subordinate, find_device_iter, e_info);
 
        if (!e_info->error_dev_num) {
-               pci_info(parent, "can't find device of ID%04x\n", e_info->id);
+               u8 bus = e_info->id >> 8;
+               u8 devfn = e_info->id & 0xff;
+
+               pci_info(parent, "found no error details for %04x:%02x:%02x.%d\n",
+                        pci_domain_nr(parent->bus), bus, PCI_SLOT(devfn),
+                        PCI_FUNC(devfn));
                return false;
        }
        return true;
index 5a0066ecc3c5adcc97e14f08f166c783f254f6e9..bc0bd86695ec62a2d43428b69eb562f771334bb3 100644 (file)
@@ -1003,8 +1003,11 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
        up_read(&pci_bus_sem);
 }
 
-/* @pdev: the root port or switch downstream port */
-void pcie_aspm_pm_state_change(struct pci_dev *pdev)
+/*
+ * @pdev: the root port or switch downstream port
+ * @locked: whether pci_bus_sem is held
+ */
+void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked)
 {
        struct pcie_link_state *link = pdev->link_state;
 
@@ -1014,12 +1017,14 @@ void pcie_aspm_pm_state_change(struct pci_dev *pdev)
         * Devices changed PM state, we should recheck if latency
         * meets all functions' requirement
         */
-       down_read(&pci_bus_sem);
+       if (!locked)
+               down_read(&pci_bus_sem);
        mutex_lock(&aspm_lock);
        pcie_update_aspm_capable(link->root);
        pcie_config_aspm_path(link);
        mutex_unlock(&aspm_lock);
-       up_read(&pci_bus_sem);
+       if (!locked)
+               up_read(&pci_bus_sem);
 }
 
 void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
index ed6b7f48736ad8b3c1a0cd83b2c0d213bffce4f5..b7335be56008f76ce0da493b56b40a35928af756 100644 (file)
@@ -180,6 +180,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
        u64 l64, sz64, mask64;
        u16 orig_cmd;
        struct pci_bus_region region, inverted_region;
+       const char *res_name = pci_resource_name(dev, res - dev->resource);
 
        mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
 
@@ -254,8 +255,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
 
        sz64 = pci_size(l64, sz64, mask64);
        if (!sz64) {
-               pci_info(dev, FW_BUG "reg 0x%x: invalid BAR (can't size)\n",
-                        pos);
+               pci_info(dev, FW_BUG "%s: invalid; can't size\n", res_name);
                goto fail;
        }
 
@@ -265,8 +265,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
                        res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
                        res->start = 0;
                        res->end = 0;
-                       pci_err(dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n",
-                               pos, (unsigned long long)sz64);
+                       pci_err(dev, "%s: can't handle BAR larger than 4GB (size %#010llx)\n",
+                               res_name, (unsigned long long)sz64);
                        goto out;
                }
 
@@ -275,8 +275,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
                        res->flags |= IORESOURCE_UNSET;
                        res->start = 0;
                        res->end = sz64 - 1;
-                       pci_info(dev, "reg 0x%x: can't handle BAR above 4GB (bus address %#010llx)\n",
-                                pos, (unsigned long long)l64);
+                       pci_info(dev, "%s: can't handle BAR above 4GB (bus address %#010llx)\n",
+                                res_name, (unsigned long long)l64);
                        goto out;
                }
        }
@@ -302,8 +302,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
                res->flags |= IORESOURCE_UNSET;
                res->start = 0;
                res->end = region.end - region.start;
-               pci_info(dev, "reg 0x%x: initial BAR value %#010llx invalid\n",
-                        pos, (unsigned long long)region.start);
+               pci_info(dev, "%s: initial BAR value %#010llx invalid\n",
+                        res_name, (unsigned long long)region.start);
        }
 
        goto out;
@@ -313,7 +313,7 @@ fail:
        res->flags = 0;
 out:
        if (res->flags)
-               pci_info(dev, "reg 0x%x: %pR\n", pos, res);
+               pci_info(dev, "%s %pR\n", res_name, res);
 
        return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
 }
@@ -344,64 +344,12 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
        }
 }
 
-static void pci_read_bridge_windows(struct pci_dev *bridge)
+static void pci_read_bridge_io(struct pci_dev *dev, struct resource *res,
+                              bool log)
 {
-       u16 io;
-       u32 pmem, tmp;
-
-       pci_read_config_word(bridge, PCI_IO_BASE, &io);
-       if (!io) {
-               pci_write_config_word(bridge, PCI_IO_BASE, 0xe0f0);
-               pci_read_config_word(bridge, PCI_IO_BASE, &io);
-               pci_write_config_word(bridge, PCI_IO_BASE, 0x0);
-       }
-       if (io)
-               bridge->io_window = 1;
-
-       /*
-        * DECchip 21050 pass 2 errata: the bridge may miss an address
-        * disconnect boundary by one PCI data phase.  Workaround: do not
-        * use prefetching on this device.
-        */
-       if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001)
-               return;
-
-       pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
-       if (!pmem) {
-               pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE,
-                                              0xffe0fff0);
-               pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
-               pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0);
-       }
-       if (!pmem)
-               return;
-
-       bridge->pref_window = 1;
-
-       if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
-
-               /*
-                * Bridge claims to have a 64-bit prefetchable memory
-                * window; verify that the upper bits are actually
-                * writable.
-                */
-               pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &pmem);
-               pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
-                                      0xffffffff);
-               pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp);
-               pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, pmem);
-               if (tmp)
-                       bridge->pref_64_window = 1;
-       }
-}
-
-static void pci_read_bridge_io(struct pci_bus *child)
-{
-       struct pci_dev *dev = child->self;
        u8 io_base_lo, io_limit_lo;
        unsigned long io_mask, io_granularity, base, limit;
        struct pci_bus_region region;
-       struct resource *res;
 
        io_mask = PCI_IO_RANGE_MASK;
        io_granularity = 0x1000;
@@ -411,7 +359,6 @@ static void pci_read_bridge_io(struct pci_bus *child)
                io_granularity = 0x400;
        }
 
-       res = child->resource[0];
        pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
        pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
        base = (io_base_lo & io_mask) << 8;
@@ -431,19 +378,18 @@ static void pci_read_bridge_io(struct pci_bus *child)
                region.start = base;
                region.end = limit + io_granularity - 1;
                pcibios_bus_to_resource(dev->bus, res, &region);
-               pci_info(dev, "  bridge window %pR\n", res);
+               if (log)
+                       pci_info(dev, "  bridge window %pR\n", res);
        }
 }
 
-static void pci_read_bridge_mmio(struct pci_bus *child)
+static void pci_read_bridge_mmio(struct pci_dev *dev, struct resource *res,
+                                bool log)
 {
-       struct pci_dev *dev = child->self;
        u16 mem_base_lo, mem_limit_lo;
        unsigned long base, limit;
        struct pci_bus_region region;
-       struct resource *res;
 
-       res = child->resource[1];
        pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
        pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
        base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
@@ -453,20 +399,19 @@ static void pci_read_bridge_mmio(struct pci_bus *child)
                region.start = base;
                region.end = limit + 0xfffff;
                pcibios_bus_to_resource(dev->bus, res, &region);
-               pci_info(dev, "  bridge window %pR\n", res);
+               if (log)
+                       pci_info(dev, "  bridge window %pR\n", res);
        }
 }
 
-static void pci_read_bridge_mmio_pref(struct pci_bus *child)
+static void pci_read_bridge_mmio_pref(struct pci_dev *dev, struct resource *res,
+                                     bool log)
 {
-       struct pci_dev *dev = child->self;
        u16 mem_base_lo, mem_limit_lo;
        u64 base64, limit64;
        pci_bus_addr_t base, limit;
        struct pci_bus_region region;
-       struct resource *res;
 
-       res = child->resource[2];
        pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
        pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
        base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
@@ -506,10 +451,77 @@ static void pci_read_bridge_mmio_pref(struct pci_bus *child)
                region.start = base;
                region.end = limit + 0xfffff;
                pcibios_bus_to_resource(dev->bus, res, &region);
-               pci_info(dev, "  bridge window %pR\n", res);
+               if (log)
+                       pci_info(dev, "  bridge window %pR\n", res);
        }
 }
 
+static void pci_read_bridge_windows(struct pci_dev *bridge)
+{
+       u32 buses;
+       u16 io;
+       u32 pmem, tmp;
+       struct resource res;
+
+       pci_read_config_dword(bridge, PCI_PRIMARY_BUS, &buses);
+       res.flags = IORESOURCE_BUS;
+       res.start = (buses >> 8) & 0xff;
+       res.end = (buses >> 16) & 0xff;
+       pci_info(bridge, "PCI bridge to %pR%s\n", &res,
+                bridge->transparent ? " (subtractive decode)" : "");
+
+       pci_read_config_word(bridge, PCI_IO_BASE, &io);
+       if (!io) {
+               pci_write_config_word(bridge, PCI_IO_BASE, 0xe0f0);
+               pci_read_config_word(bridge, PCI_IO_BASE, &io);
+               pci_write_config_word(bridge, PCI_IO_BASE, 0x0);
+       }
+       if (io) {
+               bridge->io_window = 1;
+               pci_read_bridge_io(bridge, &res, true);
+       }
+
+       pci_read_bridge_mmio(bridge, &res, true);
+
+       /*
+        * DECchip 21050 pass 2 errata: the bridge may miss an address
+        * disconnect boundary by one PCI data phase.  Workaround: do not
+        * use prefetching on this device.
+        */
+       if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001)
+               return;
+
+       pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
+       if (!pmem) {
+               pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE,
+                                              0xffe0fff0);
+               pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
+               pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0);
+       }
+       if (!pmem)
+               return;
+
+       bridge->pref_window = 1;
+
+       if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
+
+               /*
+                * Bridge claims to have a 64-bit prefetchable memory
+                * window; verify that the upper bits are actually
+                * writable.
+                */
+               pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &pmem);
+               pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
+                                      0xffffffff);
+               pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp);
+               pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, pmem);
+               if (tmp)
+                       bridge->pref_64_window = 1;
+       }
+
+       pci_read_bridge_mmio_pref(bridge, &res, true);
+}
+
 void pci_read_bridge_bases(struct pci_bus *child)
 {
        struct pci_dev *dev = child->self;
@@ -527,9 +539,9 @@ void pci_read_bridge_bases(struct pci_bus *child)
        for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
                child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
 
-       pci_read_bridge_io(child);
-       pci_read_bridge_mmio(child);
-       pci_read_bridge_mmio_pref(child);
+       pci_read_bridge_io(child->self, child->resource[0], false);
+       pci_read_bridge_mmio(child->self, child->resource[1], false);
+       pci_read_bridge_mmio_pref(child->self, child->resource[2], false);
 
        if (dev->transparent) {
                pci_bus_for_each_resource(child->parent, res) {
@@ -1817,6 +1829,43 @@ static void early_dump_pci_device(struct pci_dev *pdev)
                       value, 256, false);
 }
 
+static const char *pci_type_str(struct pci_dev *dev)
+{
+       static const char * const str[] = {
+               "PCIe Endpoint",
+               "PCIe Legacy Endpoint",
+               "PCIe unknown",
+               "PCIe unknown",
+               "PCIe Root Port",
+               "PCIe Switch Upstream Port",
+               "PCIe Switch Downstream Port",
+               "PCIe to PCI/PCI-X bridge",
+               "PCI/PCI-X to PCIe bridge",
+               "PCIe Root Complex Integrated Endpoint",
+               "PCIe Root Complex Event Collector",
+       };
+       int type;
+
+       if (pci_is_pcie(dev)) {
+               type = pci_pcie_type(dev);
+               if (type < ARRAY_SIZE(str))
+                       return str[type];
+
+               return "PCIe unknown";
+       }
+
+       switch (dev->hdr_type) {
+       case PCI_HEADER_TYPE_NORMAL:
+               return "conventional PCI endpoint";
+       case PCI_HEADER_TYPE_BRIDGE:
+               return "conventional PCI bridge";
+       case PCI_HEADER_TYPE_CARDBUS:
+               return "CardBus bridge";
+       default:
+               return "conventional PCI";
+       }
+}
+
 /**
  * pci_setup_device - Fill in class and map information of a device
  * @dev: the device structure to fill
@@ -1887,8 +1936,9 @@ int pci_setup_device(struct pci_dev *dev)
 
        pci_set_removable(dev);
 
-       pci_info(dev, "[%04x:%04x] type %02x class %#08x\n",
-                dev->vendor, dev->device, dev->hdr_type, dev->class);
+       pci_info(dev, "[%04x:%04x] type %02x class %#08x %s\n",
+                dev->vendor, dev->device, dev->hdr_type, dev->class,
+                pci_type_str(dev));
 
        /* Device class may be changed after fixup */
        class = dev->class >> 8;
@@ -1929,14 +1979,14 @@ int pci_setup_device(struct pci_dev *dev)
                                res = &dev->resource[0];
                                res->flags = LEGACY_IO_RESOURCE;
                                pcibios_bus_to_resource(dev->bus, res, &region);
-                               pci_info(dev, "legacy IDE quirk: reg 0x10: %pR\n",
+                               pci_info(dev, "BAR 0 %pR: legacy IDE quirk\n",
                                         res);
                                region.start = 0x3F6;
                                region.end = 0x3F6;
                                res = &dev->resource[1];
                                res->flags = LEGACY_IO_RESOURCE;
                                pcibios_bus_to_resource(dev->bus, res, &region);
-                               pci_info(dev, "legacy IDE quirk: reg 0x14: %pR\n",
+                               pci_info(dev, "BAR 1 %pR: legacy IDE quirk\n",
                                         res);
                        }
                        if ((progif & 4) == 0) {
@@ -1945,14 +1995,14 @@ int pci_setup_device(struct pci_dev *dev)
                                res = &dev->resource[2];
                                res->flags = LEGACY_IO_RESOURCE;
                                pcibios_bus_to_resource(dev->bus, res, &region);
-                               pci_info(dev, "legacy IDE quirk: reg 0x18: %pR\n",
+                               pci_info(dev, "BAR 2 %pR: legacy IDE quirk\n",
                                         res);
                                region.start = 0x376;
                                region.end = 0x376;
                                res = &dev->resource[3];
                                res->flags = LEGACY_IO_RESOURCE;
                                pcibios_bus_to_resource(dev->bus, res, &region);
-                               pci_info(dev, "legacy IDE quirk: reg 0x1c: %pR\n",
+                               pci_info(dev, "BAR 3 %pR: legacy IDE quirk\n",
                                         res);
                        }
                }
index d208047d1b8f22f972b59218e1ba870a4c462cc2..d797df6e5f3e917d08dfce4e40f594e322a5782e 100644 (file)
@@ -570,13 +570,14 @@ static void quirk_extend_bar_to_page(struct pci_dev *dev)
 
        for (i = 0; i < PCI_STD_NUM_BARS; i++) {
                struct resource *r = &dev->resource[i];
+               const char *r_name = pci_resource_name(dev, i);
 
                if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) {
                        r->end = PAGE_SIZE - 1;
                        r->start = 0;
                        r->flags |= IORESOURCE_UNSET;
-                       pci_info(dev, "expanded BAR %d to page size: %pR\n",
-                                i, r);
+                       pci_info(dev, "%s %pR: expanded to page size\n",
+                                r_name, r);
                }
        }
 }
@@ -605,6 +606,7 @@ static void quirk_io(struct pci_dev *dev, int pos, unsigned int size,
        u32 region;
        struct pci_bus_region bus_region;
        struct resource *res = dev->resource + pos;
+       const char *res_name = pci_resource_name(dev, pos);
 
        pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + (pos << 2), &region);
 
@@ -622,8 +624,7 @@ static void quirk_io(struct pci_dev *dev, int pos, unsigned int size,
        bus_region.end = region + size - 1;
        pcibios_bus_to_resource(dev->bus, res, &bus_region);
 
-       pci_info(dev, FW_BUG "%s quirk: reg 0x%x: %pR\n",
-                name, PCI_BASE_ADDRESS_0 + (pos << 2), res);
+       pci_info(dev, FW_BUG "%s %pR: %s quirk\n", res_name, res, name);
 }
 
 /*
@@ -670,6 +671,12 @@ static void quirk_io_region(struct pci_dev *dev, int port,
        bus_region.end = region + size - 1;
        pcibios_bus_to_resource(dev->bus, res, &bus_region);
 
+       /*
+        * "res" is typically a bridge window resource that's not being
+        * used for a bridge window, so it's just a place to stash this
+        * non-standard resource.  Printing "nr" or pci_resource_name() of
+        * it doesn't really make sense.
+        */
        if (!pci_claim_resource(dev, nr))
                pci_info(dev, "quirk: %pR claimed by %s\n", res, name);
 }
@@ -702,10 +709,13 @@ static void quirk_amd_dwc_class(struct pci_dev *pdev)
 {
        u32 class = pdev->class;
 
-       /* Use "USB Device (not host controller)" class */
-       pdev->class = PCI_CLASS_SERIAL_USB_DEVICE;
-       pci_info(pdev, "PCI class overridden (%#08x -> %#08x) so dwc3 driver can claim this instead of xhci\n",
-                class, pdev->class);
+       if (class != PCI_CLASS_SERIAL_USB_DEVICE) {
+               /* Use "USB Device (not host controller)" class */
+               pdev->class = PCI_CLASS_SERIAL_USB_DEVICE;
+               pci_info(pdev,
+                       "PCI class overridden (%#08x -> %#08x) so dwc3 driver can claim this instead of xhci\n",
+                       class, pdev->class);
+       }
 }
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB,
                quirk_amd_dwc_class);
@@ -4712,17 +4722,21 @@ static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags)
  * But the implementation could block peer-to-peer transactions between them
  * and provide ACS-like functionality.
  */
-static int  pci_quirk_zhaoxin_pcie_ports_acs(struct pci_dev *dev, u16 acs_flags)
+static int pci_quirk_zhaoxin_pcie_ports_acs(struct pci_dev *dev, u16 acs_flags)
 {
        if (!pci_is_pcie(dev) ||
            ((pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) &&
             (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)))
                return -ENOTTY;
 
+       /*
+        * Future Zhaoxin Root Ports and Switch Downstream Ports will
+        * implement ACS capability in accordance with the PCIe Spec.
+        */
        switch (dev->device) {
        case 0x0710 ... 0x071e:
        case 0x0721:
-       case 0x0723 ... 0x0732:
+       case 0x0723 ... 0x0752:
                return pci_acs_ctrl_enabled(acs_flags,
                        PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
        }
index fd74f1c99dbae766ab67e3cd9d3550b4452b495e..909e6a7c3cc31b46e7d68c8cff2edbfd4b2420d6 100644 (file)
@@ -213,6 +213,7 @@ static void reassign_resources_sorted(struct list_head *realloc_head,
                                      struct list_head *head)
 {
        struct resource *res;
+       const char *res_name;
        struct pci_dev_resource *add_res, *tmp;
        struct pci_dev_resource *dev_res;
        resource_size_t add_size, align;
@@ -222,6 +223,7 @@ static void reassign_resources_sorted(struct list_head *realloc_head,
                bool found_match = false;
 
                res = add_res->res;
+
                /* Skip resource that has been reset */
                if (!res->flags)
                        goto out;
@@ -237,6 +239,7 @@ static void reassign_resources_sorted(struct list_head *realloc_head,
                        continue;
 
                idx = res - &add_res->dev->resource[0];
+               res_name = pci_resource_name(add_res->dev, idx);
                add_size = add_res->add_size;
                align = add_res->min_align;
                if (!resource_size(res)) {
@@ -249,9 +252,9 @@ static void reassign_resources_sorted(struct list_head *realloc_head,
                                 (IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN);
                        if (pci_reassign_resource(add_res->dev, idx,
                                                  add_size, align))
-                               pci_info(add_res->dev, "failed to add %llx res[%d]=%pR\n",
-                                        (unsigned long long) add_size, idx,
-                                        res);
+                               pci_info(add_res->dev, "%s %pR: failed to add %llx\n",
+                                        res_name, res,
+                                        (unsigned long long) add_size);
                }
 out:
                list_del(&add_res->list);
@@ -571,6 +574,7 @@ EXPORT_SYMBOL(pci_setup_cardbus);
 static void pci_setup_bridge_io(struct pci_dev *bridge)
 {
        struct resource *res;
+       const char *res_name;
        struct pci_bus_region region;
        unsigned long io_mask;
        u8 io_base_lo, io_limit_lo;
@@ -583,6 +587,7 @@ static void pci_setup_bridge_io(struct pci_dev *bridge)
 
        /* Set up the top and bottom of the PCI I/O segment for this bus */
        res = &bridge->resource[PCI_BRIDGE_IO_WINDOW];
+       res_name = pci_resource_name(bridge, PCI_BRIDGE_IO_WINDOW);
        pcibios_resource_to_bus(bridge->bus, &region, res);
        if (res->flags & IORESOURCE_IO) {
                pci_read_config_word(bridge, PCI_IO_BASE, &l);
@@ -591,7 +596,7 @@ static void pci_setup_bridge_io(struct pci_dev *bridge)
                l = ((u16) io_limit_lo << 8) | io_base_lo;
                /* Set up upper 16 bits of I/O base/limit */
                io_upper16 = (region.end & 0xffff0000) | (region.start >> 16);
-               pci_info(bridge, "  bridge window %pR\n", res);
+               pci_info(bridge, "  %s %pR\n", res_name, res);
        } else {
                /* Clear upper 16 bits of I/O base/limit */
                io_upper16 = 0;
@@ -608,16 +613,18 @@ static void pci_setup_bridge_io(struct pci_dev *bridge)
 static void pci_setup_bridge_mmio(struct pci_dev *bridge)
 {
        struct resource *res;
+       const char *res_name;
        struct pci_bus_region region;
        u32 l;
 
        /* Set up the top and bottom of the PCI Memory segment for this bus */
        res = &bridge->resource[PCI_BRIDGE_MEM_WINDOW];
+       res_name = pci_resource_name(bridge, PCI_BRIDGE_MEM_WINDOW);
        pcibios_resource_to_bus(bridge->bus, &region, res);
        if (res->flags & IORESOURCE_MEM) {
                l = (region.start >> 16) & 0xfff0;
                l |= region.end & 0xfff00000;
-               pci_info(bridge, "  bridge window %pR\n", res);
+               pci_info(bridge, "  %s %pR\n", res_name, res);
        } else {
                l = 0x0000fff0;
        }
@@ -627,6 +634,7 @@ static void pci_setup_bridge_mmio(struct pci_dev *bridge)
 static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge)
 {
        struct resource *res;
+       const char *res_name;
        struct pci_bus_region region;
        u32 l, bu, lu;
 
@@ -640,6 +648,7 @@ static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge)
        /* Set up PREF base/limit */
        bu = lu = 0;
        res = &bridge->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
+       res_name = pci_resource_name(bridge, PCI_BRIDGE_PREF_MEM_WINDOW);
        pcibios_resource_to_bus(bridge->bus, &region, res);
        if (res->flags & IORESOURCE_PREFETCH) {
                l = (region.start >> 16) & 0xfff0;
@@ -648,7 +657,7 @@ static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge)
                        bu = upper_32_bits(region.start);
                        lu = upper_32_bits(region.end);
                }
-               pci_info(bridge, "  bridge window %pR\n", res);
+               pci_info(bridge, "  %s %pR\n", res_name, res);
        } else {
                l = 0x0000fff0;
        }
@@ -1013,6 +1022,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
                int i;
 
                pci_dev_for_each_resource(dev, r, i) {
+                       const char *r_name = pci_resource_name(dev, i);
                        resource_size_t r_size;
 
                        if (r->parent || (r->flags & IORESOURCE_PCI_FIXED) ||
@@ -1043,8 +1053,8 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
                        if (order < 0)
                                order = 0;
                        if (order >= ARRAY_SIZE(aligns)) {
-                               pci_warn(dev, "disabling BAR %d: %pR (bad alignment %#llx)\n",
-                                        i, r, (unsigned long long) align);
+                               pci_warn(dev, "%s %pR: disabling; bad alignment %#llx\n",
+                                        r_name, r, (unsigned long long) align);
                                r->flags = 0;
                                continue;
                        }
@@ -2235,6 +2245,7 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
                for (i = PCI_BRIDGE_RESOURCES; i < PCI_BRIDGE_RESOURCE_END;
                     i++) {
                        struct resource *res = &bridge->resource[i];
+                       const char *res_name = pci_resource_name(bridge, i);
 
                        if ((res->flags ^ type) & PCI_RES_TYPE_MASK)
                                continue;
@@ -2247,8 +2258,7 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
                        if (ret)
                                goto cleanup;
 
-                       pci_info(bridge, "BAR %d: releasing %pR\n",
-                                i, res);
+                       pci_info(bridge, "%s %pR: releasing\n", res_name, res);
 
                        if (res->parent)
                                release_resource(res);
index ceaa69491f5efe7f06af3ee8b1fe7d0a9cce38f6..c6d933ddfd46491c60bc2e9501bf5ac7610cbcd8 100644 (file)
@@ -30,6 +30,7 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno)
        u32 new, check, mask;
        int reg;
        struct resource *res = dev->resource + resno;
+       const char *res_name = pci_resource_name(dev, resno);
 
        /* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */
        if (dev->is_virtfn)
@@ -104,8 +105,8 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno)
        pci_read_config_dword(dev, reg, &check);
 
        if ((new ^ check) & mask) {
-               pci_err(dev, "BAR %d: error updating (%#010x != %#010x)\n",
-                       resno, new, check);
+               pci_err(dev, "%s: error updating (%#010x != %#010x)\n",
+                       res_name, new, check);
        }
 
        if (res->flags & IORESOURCE_MEM_64) {
@@ -113,8 +114,8 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno)
                pci_write_config_dword(dev, reg + 4, new);
                pci_read_config_dword(dev, reg + 4, &check);
                if (check != new) {
-                       pci_err(dev, "BAR %d: error updating (high %#010x != %#010x)\n",
-                               resno, new, check);
+                       pci_err(dev, "%s: error updating (high %#010x != %#010x)\n",
+                               res_name, new, check);
                }
        }
 
@@ -135,11 +136,12 @@ void pci_update_resource(struct pci_dev *dev, int resno)
 int pci_claim_resource(struct pci_dev *dev, int resource)
 {
        struct resource *res = &dev->resource[resource];
+       const char *res_name = pci_resource_name(dev, resource);
        struct resource *root, *conflict;
 
        if (res->flags & IORESOURCE_UNSET) {
-               pci_info(dev, "can't claim BAR %d %pR: no address assigned\n",
-                        resource, res);
+               pci_info(dev, "%s %pR: can't claim; no address assigned\n",
+                        res_name, res);
                return -EINVAL;
        }
 
@@ -153,16 +155,16 @@ int pci_claim_resource(struct pci_dev *dev, int resource)
 
        root = pci_find_parent_resource(dev, res);
        if (!root) {
-               pci_info(dev, "can't claim BAR %d %pR: no compatible bridge window\n",
-                        resource, res);
+               pci_info(dev, "%s %pR: can't claim; no compatible bridge window\n",
+                        res_name, res);
                res->flags |= IORESOURCE_UNSET;
                return -EINVAL;
        }
 
        conflict = request_resource_conflict(root, res);
        if (conflict) {
-               pci_info(dev, "can't claim BAR %d %pR: address conflict with %s %pR\n",
-                        resource, res, conflict->name, conflict);
+               pci_info(dev, "%s %pR: can't claim; address conflict with %s %pR\n",
+                        res_name, res, conflict->name, conflict);
                res->flags |= IORESOURCE_UNSET;
                return -EBUSY;
        }
@@ -201,6 +203,7 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
 {
        struct resource *root, *conflict;
        resource_size_t fw_addr, start, end;
+       const char *res_name = pci_resource_name(dev, resno);
 
        fw_addr = pcibios_retrieve_fw_addr(dev, resno);
        if (!fw_addr)
@@ -231,12 +234,11 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
                        root = &iomem_resource;
        }
 
-       pci_info(dev, "BAR %d: trying firmware assignment %pR\n",
-                resno, res);
+       pci_info(dev, "%s: trying firmware assignment %pR\n", res_name, res);
        conflict = request_resource_conflict(root, res);
        if (conflict) {
-               pci_info(dev, "BAR %d: %pR conflicts with %s %pR\n",
-                        resno, res, conflict->name, conflict);
+               pci_info(dev, "%s %pR: conflicts with %s %pR\n", res_name, res,
+                        conflict->name, conflict);
                res->start = start;
                res->end = end;
                res->flags |= IORESOURCE_UNSET;
@@ -325,6 +327,7 @@ static int _pci_assign_resource(struct pci_dev *dev, int resno,
 int pci_assign_resource(struct pci_dev *dev, int resno)
 {
        struct resource *res = dev->resource + resno;
+       const char *res_name = pci_resource_name(dev, resno);
        resource_size_t align, size;
        int ret;
 
@@ -334,8 +337,8 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
        res->flags |= IORESOURCE_UNSET;
        align = pci_resource_alignment(dev, res);
        if (!align) {
-               pci_info(dev, "BAR %d: can't assign %pR (bogus alignment)\n",
-                        resno, res);
+               pci_info(dev, "%s %pR: can't assign; bogus alignment\n",
+                        res_name, res);
                return -EINVAL;
        }
 
@@ -348,18 +351,18 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
         * working, which is better than just leaving it disabled.
         */
        if (ret < 0) {
-               pci_info(dev, "BAR %d: no space for %pR\n", resno, res);
+               pci_info(dev, "%s %pR: can't assign; no space\n", res_name, res);
                ret = pci_revert_fw_address(res, dev, resno, size);
        }
 
        if (ret < 0) {
-               pci_info(dev, "BAR %d: failed to assign %pR\n", resno, res);
+               pci_info(dev, "%s %pR: failed to assign\n", res_name, res);
                return ret;
        }
 
        res->flags &= ~IORESOURCE_UNSET;
        res->flags &= ~IORESOURCE_STARTALIGN;
-       pci_info(dev, "BAR %d: assigned %pR\n", resno, res);
+       pci_info(dev, "%s %pR: assigned\n", res_name, res);
        if (resno < PCI_BRIDGE_RESOURCES)
                pci_update_resource(dev, resno);
 
@@ -367,10 +370,11 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
 }
 EXPORT_SYMBOL(pci_assign_resource);
 
-int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsize,
-                       resource_size_t min_align)
+int pci_reassign_resource(struct pci_dev *dev, int resno,
+                         resource_size_t addsize, resource_size_t min_align)
 {
        struct resource *res = dev->resource + resno;
+       const char *res_name = pci_resource_name(dev, resno);
        unsigned long flags;
        resource_size_t new_size;
        int ret;
@@ -381,8 +385,8 @@ int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsiz
        flags = res->flags;
        res->flags |= IORESOURCE_UNSET;
        if (!res->parent) {
-               pci_info(dev, "BAR %d: can't reassign an unassigned resource %pR\n",
-                        resno, res);
+               pci_info(dev, "%s %pR: can't reassign; unassigned resource\n",
+                        res_name, res);
                return -EINVAL;
        }
 
@@ -391,15 +395,15 @@ int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsiz
        ret = _pci_assign_resource(dev, resno, new_size, min_align);
        if (ret) {
                res->flags = flags;
-               pci_info(dev, "BAR %d: %pR (failed to expand by %#llx)\n",
-                        resno, res, (unsigned long long) addsize);
+               pci_info(dev, "%s %pR: failed to expand by %#llx\n",
+                        res_name, res, (unsigned long long) addsize);
                return ret;
        }
 
        res->flags &= ~IORESOURCE_UNSET;
        res->flags &= ~IORESOURCE_STARTALIGN;
-       pci_info(dev, "BAR %d: reassigned %pR (expanded by %#llx)\n",
-                resno, res, (unsigned long long) addsize);
+       pci_info(dev, "%s %pR: reassigned; expanded by %#llx\n",
+                res_name, res, (unsigned long long) addsize);
        if (resno < PCI_BRIDGE_RESOURCES)
                pci_update_resource(dev, resno);
 
@@ -409,8 +413,9 @@ int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsiz
 void pci_release_resource(struct pci_dev *dev, int resno)
 {
        struct resource *res = dev->resource + resno;
+       const char *res_name = pci_resource_name(dev, resno);
 
-       pci_info(dev, "BAR %d: releasing %pR\n", resno, res);
+       pci_info(dev, "%s %pR: releasing\n", res_name, res);
 
        if (!res->parent)
                return;
@@ -480,6 +485,7 @@ int pci_enable_resources(struct pci_dev *dev, int mask)
        u16 cmd, old_cmd;
        int i;
        struct resource *r;
+       const char *r_name;
 
        pci_read_config_word(dev, PCI_COMMAND, &cmd);
        old_cmd = cmd;
@@ -488,6 +494,8 @@ int pci_enable_resources(struct pci_dev *dev, int mask)
                if (!(mask & (1 << i)))
                        continue;
 
+               r_name = pci_resource_name(dev, i);
+
                if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM)))
                        continue;
                if ((i == PCI_ROM_RESOURCE) &&
@@ -495,14 +503,14 @@ int pci_enable_resources(struct pci_dev *dev, int mask)
                        continue;
 
                if (r->flags & IORESOURCE_UNSET) {
-                       pci_err(dev, "can't enable device: BAR %d %pR not assigned\n",
-                               i, r);
+                       pci_err(dev, "%s %pR: not assigned; can't enable device\n",
+                               r_name, r);
                        return -EINVAL;
                }
 
                if (!r->parent) {
-                       pci_err(dev, "can't enable device: BAR %d %pR not claimed\n",
-                               i, r);
+                       pci_err(dev, "%s %pR: not claimed; can't enable device\n",
+                               r_name, r);
                        return -EINVAL;
                }
 
index 5b921387eca61a7444f20ec3ae9b531c01fdb6c4..1804794d0e686f22febe508c58aa340f03eecd23 100644 (file)
@@ -1308,13 +1308,6 @@ static void stdev_release(struct device *dev)
 {
        struct switchtec_dev *stdev = to_stdev(dev);
 
-       if (stdev->dma_mrpc) {
-               iowrite32(0, &stdev->mmio_mrpc->dma_en);
-               flush_wc_buf(stdev);
-               writeq(0, &stdev->mmio_mrpc->dma_addr);
-               dma_free_coherent(&stdev->pdev->dev, sizeof(*stdev->dma_mrpc),
-                               stdev->dma_mrpc, stdev->dma_mrpc_dma_addr);
-       }
        kfree(stdev);
 }
 
@@ -1358,7 +1351,7 @@ static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
                return ERR_PTR(-ENOMEM);
 
        stdev->alive = true;
-       stdev->pdev = pdev;
+       stdev->pdev = pci_dev_get(pdev);
        INIT_LIST_HEAD(&stdev->mrpc_queue);
        mutex_init(&stdev->mrpc_mutex);
        stdev->mrpc_busy = 0;
@@ -1391,6 +1384,7 @@ static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
        return stdev;
 
 err_put:
+       pci_dev_put(stdev->pdev);
        put_device(&stdev->dev);
        return ERR_PTR(rc);
 }
@@ -1644,6 +1638,18 @@ static int switchtec_init_pci(struct switchtec_dev *stdev,
        return 0;
 }
 
+static void switchtec_exit_pci(struct switchtec_dev *stdev)
+{
+       if (stdev->dma_mrpc) {
+               iowrite32(0, &stdev->mmio_mrpc->dma_en);
+               flush_wc_buf(stdev);
+               writeq(0, &stdev->mmio_mrpc->dma_addr);
+               dma_free_coherent(&stdev->pdev->dev, sizeof(*stdev->dma_mrpc),
+                                 stdev->dma_mrpc, stdev->dma_mrpc_dma_addr);
+               stdev->dma_mrpc = NULL;
+       }
+}
+
 static int switchtec_pci_probe(struct pci_dev *pdev,
                               const struct pci_device_id *id)
 {
@@ -1703,6 +1709,9 @@ static void switchtec_pci_remove(struct pci_dev *pdev)
        ida_free(&switchtec_minor_ida, MINOR(stdev->dev.devt));
        dev_info(&stdev->dev, "unregistered.\n");
        stdev_kill(stdev);
+       switchtec_exit_pci(stdev);
+       pci_dev_put(stdev->pdev);
+       stdev->pdev = NULL;
        put_device(&stdev->dev);
 }
 
index dd3c26099048773bd9a9d3d0ebbc4b12a56a50bf..a5414441834ad00e78527e1c53ccce11dc690cb9 100644 (file)
@@ -437,7 +437,7 @@ err:
        return ret;
 }
 
-static int bcm63xx_drv_pcmcia_remove(struct platform_device *pdev)
+static void bcm63xx_drv_pcmcia_remove(struct platform_device *pdev)
 {
        struct bcm63xx_pcmcia_socket *skt;
        struct resource *res;
@@ -449,12 +449,11 @@ static int bcm63xx_drv_pcmcia_remove(struct platform_device *pdev)
        res = skt->reg_res;
        release_mem_region(res->start, resource_size(res));
        kfree(skt);
-       return 0;
 }
 
 struct platform_driver bcm63xx_pcmcia_driver = {
        .probe  = bcm63xx_drv_pcmcia_probe,
-       .remove = bcm63xx_drv_pcmcia_remove,
+       .remove_new = bcm63xx_drv_pcmcia_remove,
        .driver = {
                .name   = "bcm63xx_pcmcia",
                .owner  = THIS_MODULE,
index 87a33ecc2cf14eaaec9e7556cde36879d8dee1e1..509713b9a502b7ed57caa1c451043a8669fc3f2c 100644 (file)
@@ -577,7 +577,7 @@ out0:
        return ret;
 }
 
-static int db1x_pcmcia_socket_remove(struct platform_device *pdev)
+static void db1x_pcmcia_socket_remove(struct platform_device *pdev)
 {
        struct db1x_pcmcia_sock *sock = platform_get_drvdata(pdev);
 
@@ -585,8 +585,6 @@ static int db1x_pcmcia_socket_remove(struct platform_device *pdev)
        pcmcia_unregister_socket(&sock->socket);
        iounmap((void *)(sock->virt_io + (u32)mips_io_port_base));
        kfree(sock);
-
-       return 0;
 }
 
 static struct platform_driver db1x_pcmcia_socket_driver = {
@@ -594,7 +592,7 @@ static struct platform_driver db1x_pcmcia_socket_driver = {
                .name   = "db1xxx_pcmcia",
        },
        .probe          = db1x_pcmcia_socket_probe,
-       .remove         = db1x_pcmcia_socket_remove,
+       .remove_new     = db1x_pcmcia_socket_remove,
 };
 
 module_platform_driver(db1x_pcmcia_socket_driver);
index efc27bc15152e3ededfa9a60fb6766340553eeee..5ae826e548116cbdfbf5e17845cee6074274f13f 100644 (file)
@@ -307,7 +307,7 @@ out_free_cf:
 
 }
 
-static int electra_cf_remove(struct platform_device *ofdev)
+static void electra_cf_remove(struct platform_device *ofdev)
 {
        struct device *device = &ofdev->dev;
        struct electra_cf_socket *cf;
@@ -326,8 +326,6 @@ static int electra_cf_remove(struct platform_device *ofdev)
        release_region(cf->io_base, cf->io_size);
 
        kfree(cf);
-
-       return 0;
 }
 
 static const struct of_device_id electra_cf_match[] = {
@@ -344,7 +342,7 @@ static struct platform_driver electra_cf_driver = {
                .of_match_table = electra_cf_match,
        },
        .probe    = electra_cf_probe,
-       .remove   = electra_cf_remove,
+       .remove_new = electra_cf_remove,
 };
 
 module_platform_driver(electra_cf_driver);
index e613818dc0bc90b006a9e81bc1fcbaa4ebd69401..80137c7afe0d9bcf08741280ff2924a8cb3ced16 100644 (file)
@@ -290,7 +290,7 @@ fail0:
        return status;
 }
 
-static int __exit omap_cf_remove(struct platform_device *pdev)
+static void __exit omap_cf_remove(struct platform_device *pdev)
 {
        struct omap_cf_socket *cf = platform_get_drvdata(pdev);
 
@@ -300,14 +300,13 @@ static int __exit omap_cf_remove(struct platform_device *pdev)
        release_mem_region(cf->phys_cf, SZ_8K);
        free_irq(cf->irq, cf);
        kfree(cf);
-       return 0;
 }
 
 static struct platform_driver omap_cf_driver = {
        .driver = {
                .name   = driver_name,
        },
-       .remove         = __exit_p(omap_cf_remove),
+       .remove_new     = __exit_p(omap_cf_remove),
 };
 
 static int __init omap_cf_init(void)
index 5254028354f401b72a5a258ab391c2edb75576be..457fb81b497a10de69ec58973df4247bad244bbc 100644 (file)
@@ -313,15 +313,13 @@ err0:
        return ret;
 }
 
-static int pxa2xx_drv_pcmcia_remove(struct platform_device *dev)
+static void pxa2xx_drv_pcmcia_remove(struct platform_device *dev)
 {
        struct skt_dev_info *sinfo = platform_get_drvdata(dev);
        int i;
 
        for (i = 0; i < sinfo->nskt; i++)
                soc_pcmcia_remove_one(&sinfo->skt[i]);
-
-       return 0;
 }
 
 static int pxa2xx_drv_pcmcia_resume(struct device *dev)
@@ -338,7 +336,7 @@ static const struct dev_pm_ops pxa2xx_drv_pcmcia_pm_ops = {
 
 static struct platform_driver pxa2xx_pcmcia_driver = {
        .probe          = pxa2xx_drv_pcmcia_probe,
-       .remove         = pxa2xx_drv_pcmcia_remove,
+       .remove_new     = pxa2xx_drv_pcmcia_remove,
        .driver         = {
                .name   = "pxa2xx-pcmcia",
                .pm     = &pxa2xx_drv_pcmcia_pm_ops,
index 89d4ba58c891354c17bd49ec1f5a1736b92b57e1..ccb219c3876170fe7e6580c791472a7e716aa461 100644 (file)
@@ -158,20 +158,18 @@ static int sa11x0_drv_pcmcia_probe(struct platform_device *pdev)
        return sa11xx_drv_pcmcia_add_one(skt);
 }
 
-static int sa11x0_drv_pcmcia_remove(struct platform_device *dev)
+static void sa11x0_drv_pcmcia_remove(struct platform_device *dev)
 {
        struct soc_pcmcia_socket *skt;
 
        if (dev->id == -1) {
                sa11x0_drv_pcmcia_legacy_remove(dev);
-               return 0;
+               return;
        }
 
        skt = platform_get_drvdata(dev);
 
        soc_pcmcia_remove_one(skt);
-
-       return 0;
 }
 
 static struct platform_driver sa11x0_pcmcia_driver = {
@@ -179,7 +177,7 @@ static struct platform_driver sa11x0_pcmcia_driver = {
                .name           = "sa11x0-pcmcia",
        },
        .probe          = sa11x0_drv_pcmcia_probe,
-       .remove         = sa11x0_drv_pcmcia_remove,
+       .remove_new     = sa11x0_drv_pcmcia_remove,
 };
 
 /* sa11x0_pcmcia_init()
index b11c7abb1dc07e9a21487cffed56dec7b43a35d8..2a93fbbd128da99a422ed6f86c11d033bd6af45c 100644 (file)
@@ -301,7 +301,7 @@ out0:
        return ret;
 }
 
-static int xxs1500_pcmcia_remove(struct platform_device *pdev)
+static void xxs1500_pcmcia_remove(struct platform_device *pdev)
 {
        struct xxs1500_pcmcia_sock *sock = platform_get_drvdata(pdev);
 
@@ -309,8 +309,6 @@ static int xxs1500_pcmcia_remove(struct platform_device *pdev)
        free_irq(gpio_to_irq(GPIO_CDA), sock);
        iounmap((void *)(sock->virt_io + (u32)mips_io_port_base));
        kfree(sock);
-
-       return 0;
 }
 
 static struct platform_driver xxs1500_pcmcia_socket_driver = {
@@ -318,7 +316,7 @@ static struct platform_driver xxs1500_pcmcia_socket_driver = {
                .name   = "xxs1500_pcmcia",
        },
        .probe          = xxs1500_pcmcia_probe,
-       .remove         = xxs1500_pcmcia_remove,
+       .remove_new     = xxs1500_pcmcia_remove,
 };
 
 module_platform_driver(xxs1500_pcmcia_socket_driver);
index 05eab9014132fc9ff782e855e2c2351e8cedb64d..a4746f6cb8a187e2a1172fd5fc2dd1f7ceee7bb1 100644 (file)
 #define P3D_RG_CDR_BIR_LTD1            GENMASK(28, 24)
 #define P3D_RG_CDR_BIR_LTD0            GENMASK(12, 8)
 
+#define U3P_U3_PHYD_TOP1               0x100
+#define P3D_RG_PHY_MODE                        GENMASK(2, 1)
+#define P3D_RG_FORCE_PHY_MODE          BIT(0)
+
 #define U3P_U3_PHYD_RXDET1             0x128
 #define P3D_RG_RXDET_STB2_SET          GENMASK(17, 9)
 
@@ -327,6 +331,7 @@ struct mtk_phy_instance {
        int discth;
        int pre_emphasis;
        bool bc12_en;
+       bool type_force_mode;
 };
 
 struct mtk_tphy {
@@ -768,6 +773,23 @@ static void u3_phy_instance_init(struct mtk_tphy *tphy,
        void __iomem *phya = u3_banks->phya;
        void __iomem *phyd = u3_banks->phyd;
 
+       if (instance->type_force_mode) {
+               /* force phy as usb mode, default is pcie rc mode */
+               mtk_phy_update_field(phyd + U3P_U3_PHYD_TOP1, P3D_RG_PHY_MODE, 1);
+               mtk_phy_set_bits(phyd + U3P_U3_PHYD_TOP1, P3D_RG_FORCE_PHY_MODE);
+               /* power down phy by ip and pipe reset */
+               mtk_phy_set_bits(u3_banks->chip + U3P_U3_CHIP_GPIO_CTLD,
+                                P3C_FORCE_IP_SW_RST | P3C_MCU_BUS_CK_GATE_EN);
+               mtk_phy_set_bits(u3_banks->chip + U3P_U3_CHIP_GPIO_CTLE,
+                                P3C_RG_SWRST_U3_PHYD | P3C_RG_SWRST_U3_PHYD_FORCE_EN);
+               udelay(10);
+               /* power on phy again */
+               mtk_phy_clear_bits(u3_banks->chip + U3P_U3_CHIP_GPIO_CTLD,
+                                  P3C_FORCE_IP_SW_RST | P3C_MCU_BUS_CK_GATE_EN);
+               mtk_phy_clear_bits(u3_banks->chip + U3P_U3_CHIP_GPIO_CTLE,
+                                  P3C_RG_SWRST_U3_PHYD | P3C_RG_SWRST_U3_PHYD_FORCE_EN);
+       }
+
        /* gating PCIe Analog XTAL clock */
        mtk_phy_set_bits(u3_banks->spllc + U3P_SPLLC_XTALCTL3,
                         XC3_RG_U3_XTAL_RX_PWD | XC3_RG_U3_FRC_XTAL_RX_PWD);
@@ -1120,6 +1142,9 @@ static void phy_parse_property(struct mtk_tphy *tphy,
 {
        struct device *dev = &instance->phy->dev;
 
+       if (instance->type == PHY_TYPE_USB3)
+               instance->type_force_mode = device_property_read_bool(dev, "mediatek,force-mode");
+
        if (instance->type != PHY_TYPE_USB2)
                return;
 
index c1a41b6cd29b1d8f785134627547d68e38079747..b5ac2b7995e7156b73e348814ce4e29b6f71a874 100644 (file)
@@ -96,6 +96,8 @@ static const struct serdes_mux lan966x_serdes_muxes[] = {
        SERDES_MUX_SGMII(SERDES6G(1), 3, HSIO_HW_CFG_SD6G_1_CFG,
                         HSIO_HW_CFG_SD6G_1_CFG_SET(1)),
 
+       SERDES_MUX_SGMII(SERDES6G(2), 4, 0, 0),
+
        SERDES_MUX_RGMII(RGMII(0), 2, HSIO_HW_CFG_RGMII_0_CFG |
                         HSIO_HW_CFG_RGMII_ENA |
                         HSIO_HW_CFG_GMII_ENA,
index 840b7f8a31c5fd743af6611673fed4072c99fbd8..ee4ce42496985d28baf9a7e3172dd902cc7254f9 100644 (file)
@@ -6,11 +6,11 @@
  *
  */
 #include <linux/of.h>
-#include<linux/phy/phy.h>
-#include<linux/platform_device.h>
-#include<linux/module.h>
-#include<linux/gpio.h>
-#include<linux/gpio/consumer.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/mux/consumer.h>
 
 struct can_transceiver_data {
index 96a0b1e111f34997f664d63151038cf582c2fb67..d9be6a4d538387fac816d4d46c781ebd84cc29de 100644 (file)
@@ -959,7 +959,7 @@ struct phy *phy_create(struct device *dev, struct device_node *node,
        if (!phy)
                return ERR_PTR(-ENOMEM);
 
-       id = ida_simple_get(&phy_ida, 0, 0, GFP_KERNEL);
+       id = ida_alloc(&phy_ida, GFP_KERNEL);
        if (id < 0) {
                dev_err(dev, "unable to get id\n");
                ret = id;
@@ -1232,7 +1232,7 @@ static void phy_release(struct device *dev)
        dev_vdbg(dev, "releasing '%s'\n", dev_name(dev));
        debugfs_remove_recursive(phy->debugfs);
        regulator_put(phy->pwr);
-       ida_simple_remove(&phy_ida, phy->id);
+       ida_free(&phy_ida, phy->id);
        kfree(phy);
 }
 
index f6c727249104fdd3dfc016ec908d792f3dd65316..1ad10110dd2544b77ae38a1459497ae6e2905b84 100644 (file)
@@ -1203,6 +1203,127 @@ static const struct qmp_phy_init_tbl sc8280xp_usb43dp_pcs_tbl[] = {
        QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
 };
 
+static const struct qmp_phy_init_tbl x1e80100_usb43dp_serdes_tbl[] = {
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x62),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x02),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0xc2),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x03),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE1, 0xc2),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE1, 0x03),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_BUF_ENABLE, 0x0a),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE0, 0x02),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE1, 0x02),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE0, 0x16),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE1, 0x16),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE0, 0x36),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE1, 0x36),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_EN_SEL, 0x1a),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x04),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_CFG, 0x04),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x08),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x1a),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE1, 0x16),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE1, 0x41),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x82),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MSB_MODE0, 0x00),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE1, 0x82),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MSB_MODE1, 0x00),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START1_MODE0, 0x55),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START2_MODE0, 0x55),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x03),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START1_MODE1, 0x55),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START2_MODE1, 0x55),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE1, 0x03),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_MAP, 0x14),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE1_MODE0, 0xba),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE2_MODE0, 0x00),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE1_MODE1, 0xba),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE2_MODE1, 0x00),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x13),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_HS_SWITCH_SEL_1, 0x00),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CORE_CLK_DIV_MODE0, 0x0a),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORECLK_DIV_MODE1, 0x04),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORE_CLK_EN, 0xa0),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_CONFIG_1, 0x76),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_IVCO, 0x0f),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_IVCO_MODE1, 0x0f),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_INTEGLOOP_GAIN0_MODE0, 0x20),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_INTEGLOOP_GAIN0_MODE1, 0x20),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_INITVAL2, 0x00),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_MAXVAL2, 0x01),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_SVS_MODE_CLK_SEL, 0x0a),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_BG_TIMER, 0x0a),
+};
+
+static const struct qmp_phy_init_tbl x1e80100_usb43dp_tx_tbl[] = {
+       QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_LANE_MODE_1, 0x05),
+       QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_LANE_MODE_2, 0x50),
+       QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_LANE_MODE_3, 0x50),
+       QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_RES_CODE_LANE_OFFSET_TX, 0x1f),
+       QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_RES_CODE_LANE_OFFSET_RX, 0x0a),
+};
+
+static const struct qmp_phy_init_tbl x1e80100_usb43dp_rx_tbl[] = {
+       QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_SIGDET_CNTRL, 0x04),
+       QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+       QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_SIGDET_ENABLES, 0x00),
+       QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_MODE_RATE_0_1_B0, 0xc3),
+       QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_MODE_RATE_0_1_B1, 0xc3),
+       QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_MODE_RATE_0_1_B2, 0xd8),
+       QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_MODE_RATE_0_1_B3, 0x9e),
+       QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_MODE_RATE_0_1_B4, 0x36),
+       QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_MODE_RATE_0_1_B5, 0xb6),
+       QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_MODE_RATE_0_1_B6, 0x64),
+       QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_MODE_RATE2_B0, 0xd6),
+       QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_MODE_RATE2_B1, 0xee),
+       QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_MODE_RATE2_B2, 0x18),
+       QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_MODE_RATE2_B3, 0x9a),
+       QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_MODE_RATE2_B4, 0x04),
+       QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_MODE_RATE2_B5, 0x36),
+       QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_MODE_RATE2_B6, 0xe3),
+       QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_IVCM_CAL_CODE_OVERRIDE, 0x00),
+       QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_RX_IVCM_CAL_CTRL2, 0x80),
+       QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_RX_SUMMER_CAL_SPD_MODE, 0x2f),
+       QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x08),
+       QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_UCDR_PI_CONTROLS, 0x15),
+       QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_UCDR_PI_CTRL1, 0xd0),
+       QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_UCDR_PI_CTRL2, 0x48),
+       QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_UCDR_SB2_GAIN2_RATE2, 0x0a),
+       QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_RX_IVCM_POSTCAL_OFFSET, 0x7c),
+       QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_VGA_CAL_CNTRL1, 0x00),
+       QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_VGA_CAL_MAN_VAL, 0x04),
+       QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_DFE_DAC_ENABLE1, 0x88),
+       QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_DFE_3, 0x45),
+       QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_GM_CAL, 0x0d),
+       QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_UCDR_FO_GAIN_RATE2, 0x09),
+       QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_UCDR_SO_GAIN_RATE2, 0x05),
+       QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_Q_PI_INTRINSIC_BIAS_RATE32, 0x2f),
+       QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_RX_BKUP_CTRL1, 0x14),
+};
+
+static const struct qmp_phy_init_tbl x1e80100_usb43dp_pcs_tbl[] = {
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG1, 0xc4),
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG2, 0x89),
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG3, 0x20),
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG6, 0x13),
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_REFGEN_REQ_CONFIG1, 0x21),
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_RX_SIGDET_LVL, 0x55),
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_CDR_RESET_TIME, 0x0a),
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_ALIGN_DETECT_CONFIG1, 0xd4),
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_ALIGN_DETECT_CONFIG2, 0x30),
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_PCS_TX_RX_CONFIG, 0x0c),
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_EQ_CONFIG1, 0x4b),
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_EQ_CONFIG5, 0x10),
+};
+
+static const struct qmp_phy_init_tbl x1e80100_usb43dp_pcs_usb_tbl[] = {
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
+};
+
 /* list of regulators */
 struct qmp_regulator_data {
        const char *name;
@@ -1682,6 +1803,51 @@ static const struct qmp_phy_cfg sc8280xp_usb43dpphy_cfg = {
        .regs                   = qmp_v5_5nm_usb3phy_regs_layout,
 };
 
+static const struct qmp_phy_cfg x1e80100_usb3dpphy_cfg = {
+       .offsets                = &qmp_combo_offsets_v5,
+
+       .serdes_tbl             = x1e80100_usb43dp_serdes_tbl,
+       .serdes_tbl_num         = ARRAY_SIZE(x1e80100_usb43dp_serdes_tbl),
+       .tx_tbl                 = x1e80100_usb43dp_tx_tbl,
+       .tx_tbl_num             = ARRAY_SIZE(x1e80100_usb43dp_tx_tbl),
+       .rx_tbl                 = x1e80100_usb43dp_rx_tbl,
+       .rx_tbl_num             = ARRAY_SIZE(x1e80100_usb43dp_rx_tbl),
+       .pcs_tbl                = x1e80100_usb43dp_pcs_tbl,
+       .pcs_tbl_num            = ARRAY_SIZE(x1e80100_usb43dp_pcs_tbl),
+       .pcs_usb_tbl            = x1e80100_usb43dp_pcs_usb_tbl,
+       .pcs_usb_tbl_num        = ARRAY_SIZE(x1e80100_usb43dp_pcs_usb_tbl),
+
+       .dp_serdes_tbl          = qmp_v6_dp_serdes_tbl,
+       .dp_serdes_tbl_num      = ARRAY_SIZE(qmp_v6_dp_serdes_tbl),
+       .dp_tx_tbl              = qmp_v6_dp_tx_tbl,
+       .dp_tx_tbl_num          = ARRAY_SIZE(qmp_v6_dp_tx_tbl),
+
+       .serdes_tbl_rbr         = qmp_v6_dp_serdes_tbl_rbr,
+       .serdes_tbl_rbr_num     = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_rbr),
+       .serdes_tbl_hbr         = qmp_v6_dp_serdes_tbl_hbr,
+       .serdes_tbl_hbr_num     = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr),
+       .serdes_tbl_hbr2        = qmp_v6_dp_serdes_tbl_hbr2,
+       .serdes_tbl_hbr2_num    = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr2),
+       .serdes_tbl_hbr3        = qmp_v6_dp_serdes_tbl_hbr3,
+       .serdes_tbl_hbr3_num    = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr3),
+
+       .swing_hbr_rbr          = &qmp_dp_v5_voltage_swing_hbr_rbr,
+       .pre_emphasis_hbr_rbr   = &qmp_dp_v5_pre_emphasis_hbr_rbr,
+       .swing_hbr3_hbr2        = &qmp_dp_v5_voltage_swing_hbr3_hbr2,
+       .pre_emphasis_hbr3_hbr2 = &qmp_dp_v5_pre_emphasis_hbr3_hbr2,
+
+       .dp_aux_init            = qmp_v4_dp_aux_init,
+       .configure_dp_tx        = qmp_v4_configure_dp_tx,
+       .configure_dp_phy       = qmp_v4_configure_dp_phy,
+       .calibrate_dp_phy       = qmp_v4_calibrate_dp_phy,
+
+       .reset_list             = msm8996_usb3phy_reset_l,
+       .num_resets             = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+       .vreg_list              = qmp_phy_vreg_l,
+       .num_vregs              = ARRAY_SIZE(qmp_phy_vreg_l),
+       .regs                   = qmp_v45_usb3phy_regs_layout,
+};
+
 static const struct qmp_phy_cfg sm6350_usb3dpphy_cfg = {
        .offsets                = &qmp_combo_offsets_v3,
 
@@ -3518,6 +3684,14 @@ static const struct of_device_id qmp_combo_of_match_table[] = {
                .compatible = "qcom,sm8550-qmp-usb3-dp-phy",
                .data = &sm8550_usb3dpphy_cfg,
        },
+       {
+               .compatible = "qcom,sm8650-qmp-usb3-dp-phy",
+               .data = &sm8550_usb3dpphy_cfg,
+       },
+       {
+               .compatible = "qcom,x1e80100-qmp-usb3-dp-phy",
+               .data = &x1e80100_usb3dpphy_cfg,
+       },
        { }
 };
 MODULE_DEVICE_TABLE(of, qmp_combo_of_match_table);
index b64598ac59f4d9d928105a17af0186b0038e1c55..2af7115ef96891ea33443bbcbf823c3d3c03fafd 100644 (file)
@@ -1909,6 +1909,35 @@ static const struct qmp_phy_init_tbl sm8550_qmp_gen4x2_pcie_pcs_misc_tbl[] = {
        QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_G4_FOM_EQ_CONFIG5, 0xf2),
 };
 
+static const struct qmp_phy_init_tbl sm8650_qmp_gen4x2_pcie_rx_tbl[] = {
+       QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_UCDR_FO_GAIN_RATE_2, 0x0a),
+       QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_UCDR_FO_GAIN_RATE_3, 0x0a),
+       QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_UCDR_PI_CONTROLS, 0x16),
+       QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_UCDR_SO_ACC_DEFAULT_VAL_RATE3, 0x00),
+       QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_IVCM_CAL_CTRL2, 0x82),
+       QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_DFE_3, 0x05),
+       QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_VGA_CAL_MAN_VAL, 0x0a),
+       QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_GM_CAL, 0x0d),
+       QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_EQU_ADAPTOR_CNTRL4, 0x0b),
+       QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_SIGDET_ENABLES, 0x1c),
+       QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_PHPRE_CTRL, 0x20),
+       QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+       QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B0, 0xd3),
+       QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B1, 0xd3),
+       QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B2, 0x00),
+       QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B3, 0x9a),
+       QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B4, 0x06),
+       QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B5, 0xb6),
+       QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B6, 0xee),
+       QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B0, 0x23),
+       QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B1, 0x9b),
+       QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B2, 0x60),
+       QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B3, 0xdf),
+       QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B4, 0x43),
+       QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B5, 0x76),
+       QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B6, 0xff),
+};
+
 static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x2_pcie_serdes_alt_tbl[] = {
        QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIAS_EN_CLKBUFLR_EN, 0x14),
        QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_IVCO, 0x0f),
@@ -3047,6 +3076,36 @@ static const struct qmp_phy_cfg sm8550_qmp_gen4x2_pciephy_cfg = {
        .has_nocsr_reset        = true,
 };
 
+static const struct qmp_phy_cfg sm8650_qmp_gen4x2_pciephy_cfg = {
+       .lanes = 2,
+
+       .offsets                = &qmp_pcie_offsets_v6_20,
+
+       .tbls = {
+               .serdes                 = sm8550_qmp_gen4x2_pcie_serdes_tbl,
+               .serdes_num             = ARRAY_SIZE(sm8550_qmp_gen4x2_pcie_serdes_tbl),
+               .tx                     = sm8550_qmp_gen4x2_pcie_tx_tbl,
+               .tx_num                 = ARRAY_SIZE(sm8550_qmp_gen4x2_pcie_tx_tbl),
+               .rx                     = sm8650_qmp_gen4x2_pcie_rx_tbl,
+               .rx_num                 = ARRAY_SIZE(sm8650_qmp_gen4x2_pcie_rx_tbl),
+               .pcs                    = sm8550_qmp_gen4x2_pcie_pcs_tbl,
+               .pcs_num                = ARRAY_SIZE(sm8550_qmp_gen4x2_pcie_pcs_tbl),
+               .pcs_misc               = sm8550_qmp_gen4x2_pcie_pcs_misc_tbl,
+               .pcs_misc_num           = ARRAY_SIZE(sm8550_qmp_gen4x2_pcie_pcs_misc_tbl),
+               .ln_shrd                = sm8550_qmp_gen4x2_pcie_ln_shrd_tbl,
+               .ln_shrd_num            = ARRAY_SIZE(sm8550_qmp_gen4x2_pcie_ln_shrd_tbl),
+       },
+       .reset_list             = sdm845_pciephy_reset_l,
+       .num_resets             = ARRAY_SIZE(sdm845_pciephy_reset_l),
+       .vreg_list              = sm8550_qmp_phy_vreg_l,
+       .num_vregs              = ARRAY_SIZE(sm8550_qmp_phy_vreg_l),
+       .regs                   = pciephy_v5_regs_layout,
+
+       .pwrdn_ctrl             = SW_PWRDN | REFCLK_DRV_DSBL,
+       .phy_status             = PHYSTATUS_4_20,
+       .has_nocsr_reset        = true,
+};
+
 static const struct qmp_phy_cfg sa8775p_qmp_gen4x2_pciephy_cfg = {
        .lanes                  = 2,
        .offsets                = &qmp_pcie_offsets_v5_20,
@@ -3820,6 +3879,12 @@ static const struct of_device_id qmp_pcie_of_match_table[] = {
        }, {
                .compatible = "qcom,sm8550-qmp-gen4x2-pcie-phy",
                .data = &sm8550_qmp_gen4x2_pciephy_cfg,
+       }, {
+               .compatible = "qcom,sm8650-qmp-gen3x2-pcie-phy",
+               .data = &sm8550_qmp_gen3x2_pciephy_cfg,
+       }, {
+               .compatible = "qcom,sm8650-qmp-gen4x2-pcie-phy",
+               .data = &sm8650_qmp_gen4x2_pciephy_cfg,
        },
        { },
 };
index c23d5e41e25b552a7b7e64a48587cdaf162c678f..fe6c450f612382b281ccceab453a086373a27ab0 100644 (file)
@@ -12,6 +12,7 @@
 #define QPHY_V6_PCS_UFS_SW_RESET                       0x008
 #define QPHY_V6_PCS_UFS_TIMER_20US_CORECLK_STEPS_MSB   0x00c
 #define QPHY_V6_PCS_UFS_TIMER_20US_CORECLK_STEPS_LSB   0x010
+#define QPHY_V6_PCS_UFS_PCS_CTRL1                      0x020
 #define QPHY_V6_PCS_UFS_PLL_CNTL                       0x02c
 #define QPHY_V6_PCS_UFS_TX_LARGE_AMP_DRV_LVL           0x030
 #define QPHY_V6_PCS_UFS_TX_SMALL_AMP_DRV_LVL           0x038
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v7.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v7.h
new file mode 100644 (file)
index 0000000..24368d4
--- /dev/null
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef QCOM_PHY_QMP_PCS_USB_V7_H_
+#define QCOM_PHY_QMP_PCS_USB_V7_H_
+
+#define QPHY_V7_PCS_USB3_POWER_STATE_CONFIG1           0x00
+#define QPHY_V7_PCS_USB3_AUTONOMOUS_MODE_CTRL          0x08
+#define QPHY_V7_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR         0x14
+#define QPHY_V7_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL       0x18
+#define QPHY_V7_PCS_USB3_RXEQTRAINING_DFE_TIME_S2      0x3c
+#define QPHY_V7_PCS_USB3_RCVR_DTCT_DLY_U3_L            0x40
+#define QPHY_V7_PCS_USB3_RCVR_DTCT_DLY_U3_H            0x44
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v7.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v7.h
new file mode 100644 (file)
index 0000000..c775989
--- /dev/null
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef QCOM_PHY_QMP_PCS_V7_H_
+#define QCOM_PHY_QMP_PCS_V7_H_
+
+/* Only for QMP V7 PHY - USB/PCIe PCS registers */
+#define QPHY_V7_PCS_SW_RESET                   0x000
+#define QPHY_V7_PCS_PCS_STATUS1                        0x014
+#define QPHY_V7_PCS_POWER_DOWN_CONTROL         0x040
+#define QPHY_V7_PCS_START_CONTROL              0x044
+#define QPHY_V7_PCS_POWER_STATE_CONFIG1                0x090
+#define QPHY_V7_PCS_LOCK_DETECT_CONFIG1                0x0c4
+#define QPHY_V7_PCS_LOCK_DETECT_CONFIG2                0x0c8
+#define QPHY_V7_PCS_LOCK_DETECT_CONFIG3                0x0cc
+#define QPHY_V7_PCS_LOCK_DETECT_CONFIG6                0x0d8
+#define QPHY_V7_PCS_REFGEN_REQ_CONFIG1         0x0dc
+#define QPHY_V7_PCS_RX_SIGDET_LVL              0x188
+#define QPHY_V7_PCS_RCVR_DTCT_DLY_P1U2_L       0x190
+#define QPHY_V7_PCS_RCVR_DTCT_DLY_P1U2_H       0x194
+#define QPHY_V7_PCS_RATE_SLEW_CNTRL1           0x198
+#define QPHY_V7_PCS_CDR_RESET_TIME             0x1b0
+#define QPHY_V7_PCS_ALIGN_DETECT_CONFIG1       0x1c0
+#define QPHY_V7_PCS_ALIGN_DETECT_CONFIG2       0x1c4
+#define QPHY_V7_PCS_PCS_TX_RX_CONFIG           0x1d0
+#define QPHY_V7_PCS_EQ_CONFIG1                 0x1dc
+#define QPHY_V7_PCS_EQ_CONFIG2                 0x1e0
+#define QPHY_V7_PCS_EQ_CONFIG5                 0x1ec
+
+#endif
index f420f8faf16a7aa5e47b60eadd4a8d2abc09c673..ec7291424dd1f1bb7f706bbb5b77419f76d0bfda 100644 (file)
@@ -22,6 +22,8 @@
 #define QSERDES_V6_COM_DIV_FRAC_START2_MODE1                   0x34
 #define QSERDES_V6_COM_DIV_FRAC_START3_MODE1                   0x38
 #define QSERDES_V6_COM_HSCLK_SEL_1                             0x3c
+#define QSERDES_V6_COM_INTEGLOOP_GAIN0_MODE1                   0x40
+#define QSERDES_V6_COM_INTEGLOOP_GAIN1_MODE1                   0x44
 #define QSERDES_V6_COM_VCO_TUNE1_MODE1                         0x48
 #define QSERDES_V6_COM_VCO_TUNE2_MODE1                         0x4c
 #define QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE1              0x50
@@ -48,6 +50,7 @@
 #define QSERDES_V6_COM_VCO_TUNE2_MODE0                         0xac
 #define QSERDES_V6_COM_BG_TIMER                                        0xbc
 #define QSERDES_V6_COM_SSC_EN_CENTER                           0xc0
+#define QSERDES_V6_COM_SSC_ADJ_PER1                            0xc4
 #define QSERDES_V6_COM_SSC_PER1                                        0xcc
 #define QSERDES_V6_COM_SSC_PER2                                        0xd0
 #define QSERDES_V6_COM_PLL_POST_DIV_MUX                                0xd8
@@ -56,6 +59,7 @@
 #define QSERDES_V6_COM_SYS_CLK_CTRL                            0xe4
 #define QSERDES_V6_COM_SYSCLK_BUF_ENABLE                       0xe8
 #define QSERDES_V6_COM_PLL_IVCO                                        0xf4
+#define QSERDES_V6_COM_PLL_IVCO_MODE1                          0xf8
 #define QSERDES_V6_COM_SYSCLK_EN_SEL                           0x110
 #define QSERDES_V6_COM_RESETSM_CNTRL                           0x118
 #define QSERDES_V6_COM_LOCK_CMP_EN                             0x120
@@ -63,6 +67,7 @@
 #define QSERDES_V6_COM_VCO_TUNE_CTRL                           0x13c
 #define QSERDES_V6_COM_VCO_TUNE_MAP                            0x140
 #define QSERDES_V6_COM_VCO_TUNE_INITVAL2                       0x148
+#define QSERDES_V6_COM_VCO_TUNE_MAXVAL2                                0x158
 #define QSERDES_V6_COM_CLK_SELECT                              0x164
 #define QSERDES_V6_COM_CORE_CLK_EN                             0x170
 #define QSERDES_V6_COM_CMN_CONFIG_1                            0x174
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v7.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v7.h
new file mode 100644 (file)
index 0000000..7430f49
--- /dev/null
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef QCOM_PHY_QMP_QSERDES_COM_V7_H_
+#define QCOM_PHY_QMP_QSERDES_COM_V7_H_
+
+/* Only for QMP V7 PHY - QSERDES COM registers */
+
+#define QSERDES_V7_COM_SSC_STEP_SIZE1_MODE1                    0x00
+#define QSERDES_V7_COM_SSC_STEP_SIZE2_MODE1                    0x04
+#define QSERDES_V7_COM_CP_CTRL_MODE1                           0x10
+#define QSERDES_V7_COM_PLL_RCTRL_MODE1                         0x14
+#define QSERDES_V7_COM_PLL_CCTRL_MODE1                         0x18
+#define QSERDES_V7_COM_CORECLK_DIV_MODE1                       0x1c
+#define QSERDES_V7_COM_LOCK_CMP1_MODE1                         0x20
+#define QSERDES_V7_COM_LOCK_CMP2_MODE1                         0x24
+#define QSERDES_V7_COM_DEC_START_MODE1                         0x28
+#define QSERDES_V7_COM_DEC_START_MSB_MODE1                     0x2c
+#define QSERDES_V7_COM_DIV_FRAC_START1_MODE1                   0x30
+#define QSERDES_V7_COM_DIV_FRAC_START2_MODE1                   0x34
+#define QSERDES_V7_COM_DIV_FRAC_START3_MODE1                   0x38
+#define QSERDES_V7_COM_HSCLK_SEL_1                             0x3c
+#define QSERDES_V7_COM_INTEGLOOP_GAIN0_MODE1                   0x40
+#define QSERDES_V7_COM_INTEGLOOP_GAIN1_MODE1                   0x44
+#define QSERDES_V7_COM_VCO_TUNE1_MODE1                         0x48
+#define QSERDES_V7_COM_VCO_TUNE2_MODE1                         0x4c
+#define QSERDES_V7_COM_BIN_VCOCAL_CMP_CODE1_MODE1              0x50
+#define QSERDES_V7_COM_BIN_VCOCAL_CMP_CODE2_MODE1              0x54
+#define QSERDES_V7_COM_BIN_VCOCAL_CMP_CODE1_MODE0              0x58
+#define QSERDES_V7_COM_BIN_VCOCAL_CMP_CODE2_MODE0              0x5c
+#define QSERDES_V7_COM_SSC_STEP_SIZE1_MODE0                    0x60
+#define QSERDES_V7_COM_SSC_STEP_SIZE2_MODE0                    0x64
+#define QSERDES_V7_COM_CP_CTRL_MODE0                           0x70
+#define QSERDES_V7_COM_PLL_RCTRL_MODE0                         0x74
+#define QSERDES_V7_COM_PLL_CCTRL_MODE0                         0x78
+#define QSERDES_V7_COM_PLL_CORE_CLK_DIV_MODE0                  0x7c
+#define QSERDES_V7_COM_LOCK_CMP1_MODE0                         0x80
+#define QSERDES_V7_COM_LOCK_CMP2_MODE0                         0x84
+#define QSERDES_V7_COM_DEC_START_MODE0                         0x88
+#define QSERDES_V7_COM_DEC_START_MSB_MODE0                     0x8c
+#define QSERDES_V7_COM_DIV_FRAC_START1_MODE0                   0x90
+#define QSERDES_V7_COM_DIV_FRAC_START2_MODE0                   0x94
+#define QSERDES_V7_COM_DIV_FRAC_START3_MODE0                   0x98
+#define QSERDES_V7_COM_HSCLK_HS_SWITCH_SEL_1                   0x9c
+#define QSERDES_V7_COM_INTEGLOOP_GAIN0_MODE0                   0xa0
+#define QSERDES_V7_COM_INTEGLOOP_GAIN1_MODE0                   0xa4
+#define QSERDES_V7_COM_VCO_TUNE1_MODE0                         0xa8
+#define QSERDES_V7_COM_VCO_TUNE2_MODE0                         0xac
+#define QSERDES_V7_COM_BG_TIMER                                        0xbc
+#define QSERDES_V7_COM_SSC_EN_CENTER                           0xc0
+#define QSERDES_V7_COM_SSC_ADJ_PER1                            0xc4
+#define QSERDES_V7_COM_SSC_PER1                                        0xcc
+#define QSERDES_V7_COM_SSC_PER2                                        0xd0
+#define QSERDES_V7_COM_PLL_POST_DIV_MUX                                0xd8
+#define QSERDES_V7_COM_PLL_BIAS_EN_CLK_BUFLR_EN                        0xdc
+#define QSERDES_V7_COM_CLK_ENABLE1                             0xe0
+#define QSERDES_V7_COM_SYS_CLK_CTRL                            0xe4
+#define QSERDES_V7_COM_SYSCLK_BUF_ENABLE                       0xe8
+#define QSERDES_V7_COM_PLL_IVCO                                        0xf4
+#define QSERDES_V7_COM_PLL_IVCO_MODE1                          0xf8
+#define QSERDES_V7_COM_SYSCLK_EN_SEL                           0x110
+#define QSERDES_V7_COM_RESETSM_CNTRL                           0x118
+#define QSERDES_V7_COM_LOCK_CMP_EN                             0x120
+#define QSERDES_V7_COM_LOCK_CMP_CFG                            0x124
+#define QSERDES_V7_COM_VCO_TUNE_CTRL                           0x13c
+#define QSERDES_V7_COM_VCO_TUNE_MAP                            0x140
+#define QSERDES_V7_COM_VCO_TUNE_INITVAL2                       0x148
+#define QSERDES_V7_COM_VCO_TUNE_MAXVAL2                                0x158
+#define QSERDES_V7_COM_CLK_SELECT                              0x164
+#define QSERDES_V7_COM_CORE_CLK_EN                             0x170
+#define QSERDES_V7_COM_CMN_CONFIG_1                            0x174
+#define QSERDES_V7_COM_SVS_MODE_CLK_SEL                                0x17c
+#define QSERDES_V7_COM_CMN_MISC_1                              0x184
+#define QSERDES_V7_COM_CMN_MODE                                        0x188
+#define QSERDES_V7_COM_PLL_VCO_DC_LEVEL_CTRL                   0x198
+#define QSERDES_V7_COM_AUTO_GAIN_ADJ_CTRL_1                    0x1a4
+#define QSERDES_V7_COM_AUTO_GAIN_ADJ_CTRL_2                    0x1a8
+#define QSERDES_V7_COM_AUTO_GAIN_ADJ_CTRL_3                    0x1ac
+#define QSERDES_V7_COM_ADDITIONAL_MISC                         0x1b4
+#define QSERDES_V7_COM_ADDITIONAL_MISC_2                       0x1b8
+#define QSERDES_V7_COM_ADDITIONAL_MISC_3                       0x1bc
+#define QSERDES_V7_COM_CMN_STATUS                              0x1d0
+#define QSERDES_V7_COM_C_READY_STATUS                          0x1f8
+
+#endif
index 15bcb4ba91399894b8c934b584d9ae76c083eafc..35d497fd9f9a4420e8c02ae8dd91de011c94c14a 100644 (file)
 #define QSERDES_UFS_V6_TX_RES_CODE_LANE_RX                     0x2c
 #define QSERDES_UFS_V6_TX_RES_CODE_LANE_OFFSET_TX              0x30
 #define QSERDES_UFS_V6_TX_RES_CODE_LANE_OFFSET_RX              0x34
+#define QSERDES_UFS_V6_TX_LANE_MODE_1                          0x7c
+#define QSERDES_UFS_V6_TX_FR_DCC_CTRL                          0x108
 
 #define QSERDES_UFS_V6_RX_UCDR_FASTLOCK_FO_GAIN_RATE2          0x08
 #define QSERDES_UFS_V6_RX_UCDR_FASTLOCK_FO_GAIN_RATE4          0x10
+#define QSERDES_UFS_V6_RX_UCDR_SO_SATURATION                   0x28
+#define QSERDES_UFS_V6_RX_UCDR_PI_CTRL1                                0x58
+#define QSERDES_UFS_V6_RX_RX_TERM_BW_CTRL0                     0xc4
+#define QSERDES_UFS_V6_RX_UCDR_FO_GAIN_RATE2                   0xd4
+#define QSERDES_UFS_V6_RX_UCDR_FO_GAIN_RATE4                   0xdc
 #define QSERDES_UFS_V6_RX_VGA_CAL_MAN_VAL                      0x178
+#define QSERDES_UFS_V6_RX_INTERFACE_MODE                       0x1e0
 #define QSERDES_UFS_V6_RX_MODE_RATE_0_1_B0                     0x208
 #define QSERDES_UFS_V6_RX_MODE_RATE_0_1_B1                     0x20c
 #define QSERDES_UFS_V6_RX_MODE_RATE_0_1_B3                     0x214
index 8883e1de730eff83a8ca3cd7065e78fe93e4add8..23ffcfae9efab4a9e081414f9b3bbd0079d34f18 100644 (file)
@@ -23,6 +23,7 @@
 #define QSERDES_V6_TX_PARRATE_REC_DETECT_IDLE_EN               0x60
 #define QSERDES_V6_TX_BIST_PATTERN7                            0x7c
 #define QSERDES_V6_TX_LANE_MODE_1                              0x84
+#define QSERDES_V6_TX_LANE_MODE_2                              0x88
 #define QSERDES_V6_TX_LANE_MODE_3                              0x8c
 #define QSERDES_V6_TX_LANE_MODE_4                              0x90
 #define QSERDES_V6_TX_LANE_MODE_5                              0x94
index 5385a8b60970748373f890a6f17c5a528ae421dc..6ed5339fd2ea86dd4a69df077887ea9a0713dcdd 100644 (file)
 
 #define QSERDES_V6_20_RX_UCDR_FO_GAIN_RATE_2                   0x08
 #define QSERDES_V6_20_RX_UCDR_FO_GAIN_RATE_3                   0x0c
+#define QSERDES_V6_20_RX_UCDR_SO_GAIN_RATE_2                   0x18
 #define QSERDES_V6_20_RX_UCDR_PI_CONTROLS                      0x20
 #define QSERDES_V6_20_RX_UCDR_SO_ACC_DEFAULT_VAL_RATE3         0x34
 #define QSERDES_V6_20_RX_IVCM_CAL_CTRL2                                0x9c
 #define QSERDES_V6_20_RX_IVCM_POSTCAL_OFFSET                   0xa0
+#define QSERDES_V6_20_RX_DFE_1                                 0xac
+#define QSERDES_V6_20_RX_DFE_2                                 0xb0
 #define QSERDES_V6_20_RX_DFE_3                                 0xb4
 #define QSERDES_V6_20_RX_VGA_CAL_MAN_VAL                       0xe8
 #define QSERDES_V6_20_RX_GM_CAL                                        0x10c
@@ -41,5 +44,6 @@
 #define QSERDES_V6_20_RX_MODE_RATE3_B4                         0x220
 #define QSERDES_V6_20_RX_MODE_RATE3_B5                         0x224
 #define QSERDES_V6_20_RX_MODE_RATE3_B6                         0x228
+#define QSERDES_V6_20_RX_BKUP_CTRL1                            0x22c
 
 #endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_n4.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_n4.h
new file mode 100644 (file)
index 0000000..a814ad1
--- /dev/null
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef QCOM_PHY_QMP_QSERDES_TXRX_V6_N4_H_
+#define QCOM_PHY_QMP_QSERDES_TXRX_V6_N4_H_
+
+#define QSERDES_V6_N4_TX_RES_CODE_LANE_OFFSET_TX       0x30
+#define QSERDES_V6_N4_TX_RES_CODE_LANE_OFFSET_RX       0x34
+#define QSERDES_V6_N4_TX_LANE_MODE_1                   0x78
+#define QSERDES_V6_N4_TX_LANE_MODE_2                   0x7c
+#define QSERDES_V6_N4_TX_LANE_MODE_3                   0x80
+
+#define QSERDES_V6_N4_RX_UCDR_FO_GAIN_RATE2            0x8
+#define QSERDES_V6_N4_RX_UCDR_SO_GAIN_RATE2            0x18
+#define QSERDES_V6_N4_RX_UCDR_PI_CONTROLS              0x20
+#define QSERDES_V6_N4_RX_IVCM_CAL_CODE_OVERRIDE                0x94
+#define QSERDES_V6_N4_RX_RX_IVCM_CAL_CTRL2             0x9c
+#define QSERDES_V6_N4_RX_RX_IVCM_POSTCAL_OFFSET                0xa0
+#define QSERDES_V6_N4_RX_DFE_3                         0xb4
+#define QSERDES_V6_N4_RX_VGA_CAL_CNTRL1                        0xe0
+#define QSERDES_V6_N4_RX_VGA_CAL_MAN_VAL               0xe8
+#define QSERDES_V6_N4_RX_GM_CAL                                0x10c
+#define QSERDES_V6_N4_RX_SIGDET_ENABLES                        0x148
+#define QSERDES_V6_N4_RX_SIGDET_CNTRL                  0x14c
+#define QSERDES_V6_N4_RX_SIGDET_DEGLITCH_CNTRL         0x154
+#define QSERDES_V6_N4_RX_DFE_CTLE_POST_CAL_OFFSET      0x194
+#define QSERDES_V6_N4_RX_Q_PI_INTRINSIC_BIAS_RATE32    0x1dc
+#define QSERDES_V6_N4_RX_UCDR_PI_CTRL1                 0x23c
+#define QSERDES_V6_N4_RX_UCDR_PI_CTRL2                 0x240
+#define QSERDES_V6_N4_RX_UCDR_SB2_GAIN2_RATE2          0x27c
+#define QSERDES_V6_N4_RX_DFE_DAC_ENABLE1               0x298
+#define QSERDES_V6_N4_RX_MODE_RATE_0_1_B0              0x2b8
+#define QSERDES_V6_N4_RX_MODE_RATE_0_1_B1              0x2bc
+#define QSERDES_V6_N4_RX_MODE_RATE_0_1_B2              0x2c0
+#define QSERDES_V6_N4_RX_MODE_RATE_0_1_B3              0x2c4
+#define QSERDES_V6_N4_RX_MODE_RATE_0_1_B4              0x2c8
+#define QSERDES_V6_N4_RX_MODE_RATE_0_1_B5              0x2cc
+#define QSERDES_V6_N4_RX_MODE_RATE_0_1_B6              0x2d0
+#define QSERDES_V6_N4_RX_MODE_RATE2_B0                 0x2d4
+#define QSERDES_V6_N4_RX_MODE_RATE2_B1                 0x2d8
+#define QSERDES_V6_N4_RX_MODE_RATE2_B2                 0x2dc
+#define QSERDES_V6_N4_RX_MODE_RATE2_B3                 0x2e0
+#define QSERDES_V6_N4_RX_MODE_RATE2_B4                 0x2e4
+#define QSERDES_V6_N4_RX_MODE_RATE2_B5                 0x2e8
+#define QSERDES_V6_N4_RX_MODE_RATE2_B6                 0x2ec
+#define QSERDES_V6_N4_RX_RX_SUMMER_CAL_SPD_MODE                0x30c
+#define QSERDES_V6_N4_RX_RX_BKUP_CTRL1                 0x310
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v7.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v7.h
new file mode 100644 (file)
index 0000000..91f865b
--- /dev/null
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef QCOM_PHY_QMP_QSERDES_TXRX_V7_H_
+#define QCOM_PHY_QMP_QSERDES_TXRX_V7_H_
+
+#define QSERDES_V7_TX_CLKBUF_ENABLE                            0x08
+#define QSERDES_V7_TX_RESET_TSYNC_EN                           0x1c
+#define QSERDES_V7_TX_PRE_STALL_LDO_BOOST_EN                   0x20
+#define QSERDES_V7_TX_TX_BAND                                  0x24
+#define QSERDES_V7_TX_INTERFACE_SELECT                         0x2c
+#define QSERDES_V7_TX_RES_CODE_LANE_TX                         0x34
+#define QSERDES_V7_TX_RES_CODE_LANE_RX                         0x38
+#define QSERDES_V7_TX_RES_CODE_LANE_OFFSET_TX                  0x3c
+#define QSERDES_V7_TX_RES_CODE_LANE_OFFSET_RX                  0x40
+#define QSERDES_V7_TX_PARRATE_REC_DETECT_IDLE_EN               0x60
+#define QSERDES_V7_TX_BIST_PATTERN7                            0x7c
+#define QSERDES_V7_TX_LANE_MODE_1                              0x84
+#define QSERDES_V7_TX_LANE_MODE_2                              0x88
+#define QSERDES_V7_TX_LANE_MODE_3                              0x8c
+#define QSERDES_V7_TX_LANE_MODE_4                              0x90
+#define QSERDES_V7_TX_LANE_MODE_5                              0x94
+#define QSERDES_V7_TX_RCV_DETECT_LVL_2                         0xa4
+#define QSERDES_V7_TX_TRAN_DRVR_EMP_EN                         0xc0
+#define QSERDES_V7_TX_TX_INTERFACE_MODE                                0xc4
+#define QSERDES_V7_TX_VMODE_CTRL1                              0xc8
+#define QSERDES_V7_TX_PI_QEC_CTRL                              0xe4
+
+#define QSERDES_V7_RX_UCDR_FO_GAIN                             0x08
+#define QSERDES_V7_RX_UCDR_SO_GAIN                             0x14
+#define QSERDES_V7_RX_UCDR_FASTLOCK_FO_GAIN                    0x30
+#define QSERDES_V7_RX_UCDR_SO_SATURATION_AND_ENABLE            0x34
+#define QSERDES_V7_RX_UCDR_FASTLOCK_COUNT_LOW                  0x3c
+#define QSERDES_V7_RX_UCDR_FASTLOCK_COUNT_HIGH                 0x40
+#define QSERDES_V7_RX_UCDR_PI_CONTROLS                         0x44
+#define QSERDES_V7_RX_UCDR_SB2_THRESH1                         0x4c
+#define QSERDES_V7_RX_UCDR_SB2_THRESH2                         0x50
+#define QSERDES_V7_RX_UCDR_SB2_GAIN1                           0x54
+#define QSERDES_V7_RX_UCDR_SB2_GAIN2                           0x58
+#define QSERDES_V7_RX_AUX_DATA_TCOARSE_TFINE                   0x60
+#define QSERDES_V7_RX_TX_ADAPT_POST_THRESH                     0xcc
+#define QSERDES_V7_RX_VGA_CAL_CNTRL1                           0xd4
+#define QSERDES_V7_RX_VGA_CAL_CNTRL2                           0xd8
+#define QSERDES_V7_RX_GM_CAL                                   0xdc
+#define QSERDES_V7_RX_RX_EQU_ADAPTOR_CNTRL2                    0xec
+#define QSERDES_V7_RX_RX_EQU_ADAPTOR_CNTRL3                    0xf0
+#define QSERDES_V7_RX_RX_EQU_ADAPTOR_CNTRL4                    0xf4
+#define QSERDES_V7_RX_RX_IDAC_TSETTLE_LOW                      0xf8
+#define QSERDES_V7_RX_RX_IDAC_TSETTLE_HIGH                     0xfc
+#define QSERDES_V7_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1              0x110
+#define QSERDES_V7_RX_SIDGET_ENABLES                           0x118
+#define QSERDES_V7_RX_SIGDET_CNTRL                             0x11c
+#define QSERDES_V7_RX_SIGDET_DEGLITCH_CNTRL                    0x124
+#define QSERDES_V7_RX_RX_MODE_00_LOW                           0x15c
+#define QSERDES_V7_RX_RX_MODE_00_HIGH                          0x160
+#define QSERDES_V7_RX_RX_MODE_00_HIGH2                         0x164
+#define QSERDES_V7_RX_RX_MODE_00_HIGH3                         0x168
+#define QSERDES_V7_RX_RX_MODE_00_HIGH4                         0x16c
+#define QSERDES_V7_RX_RX_MODE_01_LOW                           0x170
+#define QSERDES_V7_RX_RX_MODE_01_HIGH                          0x174
+#define QSERDES_V7_RX_RX_MODE_01_HIGH2                         0x178
+#define QSERDES_V7_RX_RX_MODE_01_HIGH3                         0x17c
+#define QSERDES_V7_RX_RX_MODE_01_HIGH4                         0x180
+#define QSERDES_V7_RX_RX_MODE_10_LOW                           0x184
+#define QSERDES_V7_RX_RX_MODE_10_HIGH                          0x188
+#define QSERDES_V7_RX_RX_MODE_10_HIGH2                         0x18c
+#define QSERDES_V7_RX_RX_MODE_10_HIGH3                         0x190
+#define QSERDES_V7_RX_RX_MODE_10_HIGH4                         0x194
+#define QSERDES_V7_RX_DFE_EN_TIMER                             0x1a0
+#define QSERDES_V7_RX_DFE_CTLE_POST_CAL_OFFSET                 0x1a4
+#define QSERDES_V7_RX_DCC_CTRL1                                        0x1a8
+#define QSERDES_V7_RX_VTH_CODE                                 0x1b0
+#define QSERDES_V7_RX_SIGDET_CAL_CTRL1                         0x1e4
+#define QSERDES_V7_RX_SIGDET_CAL_TRIM                          0x1f8
+
+#endif
index 514fa14df63452dd5505b50697fde7953caa6592..3c2e6255e26f66d21fec72595c680e5c2cccc9c4 100644 (file)
@@ -763,22 +763,26 @@ static const struct qmp_phy_init_tbl sm8550_ufsphy_serdes[] = {
        QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE0, 0x14),
        QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x7f),
        QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x06),
-       QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x4c),
-       QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE0, 0x0a),
-       QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE0, 0x18),
-       QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE0, 0x14),
-       QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x99),
-       QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x07),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE1, 0x4c),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE1, 0x0a),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE1, 0x18),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE1, 0x14),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE1, 0x99),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE1, 0x07),
+};
+
+static const struct qmp_phy_init_tbl sm8550_ufsphy_hs_b_serdes[] = {
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_MAP, 0x44),
 };
 
 static const struct qmp_phy_init_tbl sm8550_ufsphy_tx[] = {
-       QMP_PHY_INIT_CFG(QSERDES_V6_TX_LANE_MODE_1, 0x05),
+       QMP_PHY_INIT_CFG(QSERDES_UFS_V6_TX_LANE_MODE_1, 0x05),
        QMP_PHY_INIT_CFG(QSERDES_UFS_V6_TX_RES_CODE_LANE_OFFSET_TX, 0x07),
+       QMP_PHY_INIT_CFG(QSERDES_UFS_V6_TX_FR_DCC_CTRL, 0x4c),
 };
 
 static const struct qmp_phy_init_tbl sm8550_ufsphy_rx[] = {
-       QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_UCDR_FASTLOCK_FO_GAIN_RATE2, 0x0c),
-       QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_UCDR_FASTLOCK_FO_GAIN_RATE4, 0x0f),
+       QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_UCDR_FO_GAIN_RATE2, 0x0c),
        QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_VGA_CAL_MAN_VAL, 0x0e),
 
        QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE_0_1_B0, 0xc2),
@@ -801,6 +805,69 @@ static const struct qmp_phy_init_tbl sm8550_ufsphy_pcs[] = {
        QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_TX_MID_TERM_CTRL1, 0x43),
        QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_PLL_CNTL, 0x2b),
        QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_MULTI_LANE_CTRL1, 0x02),
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_TX_HSGEAR_CAPABILITY, 0x04),
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_RX_HSGEAR_CAPABILITY, 0x04),
+};
+
+static const struct qmp_phy_init_tbl sm8650_ufsphy_serdes[] = {
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_EN_SEL, 0xd9),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_CONFIG_1, 0x16),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x11),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_HS_SWITCH_SEL_1, 0x00),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x01),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_IVCO, 0x0f),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_MAP, 0x44),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_INITVAL2, 0x00),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x41),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE0, 0x0a),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE0, 0x18),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE0, 0x14),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x7f),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x06),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE1, 0x4c),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE1, 0x0a),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE1, 0x18),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE1, 0x14),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE1, 0x99),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE1, 0x07),
+};
+
+static const struct qmp_phy_init_tbl sm8650_ufsphy_tx[] = {
+       QMP_PHY_INIT_CFG(QSERDES_UFS_V6_TX_LANE_MODE_1, 0x05),
+       QMP_PHY_INIT_CFG(QSERDES_UFS_V6_TX_RES_CODE_LANE_OFFSET_TX, 0x07),
+};
+
+static const struct qmp_phy_init_tbl sm8650_ufsphy_rx[] = {
+       QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_UCDR_FO_GAIN_RATE2, 0x0c),
+       QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_UCDR_FO_GAIN_RATE4, 0x0f),
+       QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_VGA_CAL_MAN_VAL, 0x0e),
+       QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE_0_1_B0, 0xc2),
+       QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE_0_1_B1, 0xc2),
+       QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE_0_1_B3, 0x1a),
+       QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE_0_1_B6, 0x60),
+       QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE2_B3, 0x9e),
+       QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE2_B6, 0x60),
+       QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE3_B3, 0x9e),
+       QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE3_B4, 0x0e),
+       QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE3_B5, 0x36),
+       QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE3_B8, 0x02),
+       QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE4_B3, 0xb9),
+       QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE4_B6, 0xff),
+       QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_UCDR_SO_SATURATION, 0x1f),
+       QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_UCDR_PI_CTRL1, 0x94),
+       QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_RX_TERM_BW_CTRL0, 0xfa),
+};
+
+static const struct qmp_phy_init_tbl sm8650_ufsphy_pcs[] = {
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_MULTI_LANE_CTRL1, 0x00),
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_TX_MID_TERM_CTRL1, 0x43),
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_PCS_CTRL1, 0xc1),
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_PLL_CNTL, 0x33),
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_TX_HSGEAR_CAPABILITY, 0x04),
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_RX_HSGEAR_CAPABILITY, 0x04),
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_TX_LARGE_AMP_DRV_LVL, 0x0f),
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_RX_SIGDET_CTRL2, 0x69),
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_MULTI_LANE_CTRL1, 0x02),
 };
 
 struct qmp_ufs_offsets {
@@ -1296,6 +1363,32 @@ static const struct qmp_phy_cfg sm8550_ufsphy_cfg = {
                .pcs            = sm8550_ufsphy_pcs,
                .pcs_num        = ARRAY_SIZE(sm8550_ufsphy_pcs),
        },
+       .tbls_hs_b = {
+               .serdes         = sm8550_ufsphy_hs_b_serdes,
+               .serdes_num     = ARRAY_SIZE(sm8550_ufsphy_hs_b_serdes),
+       },
+       .clk_list               = sdm845_ufs_phy_clk_l,
+       .num_clks               = ARRAY_SIZE(sdm845_ufs_phy_clk_l),
+       .vreg_list              = qmp_phy_vreg_l,
+       .num_vregs              = ARRAY_SIZE(qmp_phy_vreg_l),
+       .regs                   = ufsphy_v6_regs_layout,
+};
+
+static const struct qmp_phy_cfg sm8650_ufsphy_cfg = {
+       .lanes                  = 2,
+
+       .offsets                = &qmp_ufs_offsets_v6,
+
+       .tbls = {
+               .serdes         = sm8650_ufsphy_serdes,
+               .serdes_num     = ARRAY_SIZE(sm8650_ufsphy_serdes),
+               .tx             = sm8650_ufsphy_tx,
+               .tx_num         = ARRAY_SIZE(sm8650_ufsphy_tx),
+               .rx             = sm8650_ufsphy_rx,
+               .rx_num         = ARRAY_SIZE(sm8650_ufsphy_rx),
+               .pcs            = sm8650_ufsphy_pcs,
+               .pcs_num        = ARRAY_SIZE(sm8650_ufsphy_pcs),
+       },
        .clk_list               = sdm845_ufs_phy_clk_l,
        .num_clks               = ARRAY_SIZE(sdm845_ufs_phy_clk_l),
        .vreg_list              = qmp_phy_vreg_l,
@@ -1826,6 +1919,9 @@ static const struct of_device_id qmp_ufs_of_match_table[] = {
        }, {
                .compatible = "qcom,sm8550-qmp-ufs-phy",
                .data = &sm8550_ufsphy_cfg,
+       }, {
+               .compatible = "qcom,sm8650-qmp-ufs-phy",
+               .data = &sm8650_ufsphy_cfg,
        },
        { },
 };
index 02f156298e77ce2761c1d14e70e13873fc6c420d..6621246e4ddf0c567f58abdb6fb6799a08ff594e 100644 (file)
@@ -24,6 +24,8 @@
 #include "phy-qcom-qmp-pcs-misc-v4.h"
 #include "phy-qcom-qmp-pcs-usb-v4.h"
 #include "phy-qcom-qmp-pcs-usb-v5.h"
+#include "phy-qcom-qmp-pcs-usb-v6.h"
+#include "phy-qcom-qmp-pcs-usb-v7.h"
 
 /* QPHY_SW_RESET bit */
 #define SW_RESET                               BIT(0)
@@ -151,6 +153,28 @@ static const unsigned int qmp_v5_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
        [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V5_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR,
 };
 
+static const unsigned int qmp_v6_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+       [QPHY_SW_RESET]                 = QPHY_V6_PCS_SW_RESET,
+       [QPHY_START_CTRL]               = QPHY_V6_PCS_START_CONTROL,
+       [QPHY_PCS_STATUS]               = QPHY_V6_PCS_PCS_STATUS1,
+       [QPHY_PCS_POWER_DOWN_CONTROL]   = QPHY_V6_PCS_POWER_DOWN_CONTROL,
+
+       /* In PCS_USB */
+       [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V6_PCS_USB3_AUTONOMOUS_MODE_CTRL,
+       [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V6_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR,
+};
+
+static const unsigned int qmp_v7_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+       [QPHY_SW_RESET]                 = QPHY_V7_PCS_SW_RESET,
+       [QPHY_START_CTRL]               = QPHY_V7_PCS_START_CONTROL,
+       [QPHY_PCS_STATUS]               = QPHY_V7_PCS_PCS_STATUS1,
+       [QPHY_PCS_POWER_DOWN_CONTROL]   = QPHY_V7_PCS_POWER_DOWN_CONTROL,
+
+       /* In PCS_USB */
+       [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V7_PCS_USB3_AUTONOMOUS_MODE_CTRL,
+       [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V7_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR,
+};
+
 static const struct qmp_phy_init_tbl ipq9574_usb3_serdes_tbl[] = {
        QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x1a),
        QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
@@ -871,6 +895,134 @@ static const struct qmp_phy_init_tbl sdx65_usb3_uniphy_rx_tbl[] = {
        QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_ENABLES, 0x00),
 };
 
+static const struct qmp_phy_init_tbl sdx75_usb3_uniphy_serdes_tbl[] = {
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE1, 0x9e),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE1, 0x06),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE1, 0x02),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE1, 0x16),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE1, 0x36),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORECLK_DIV_MODE1, 0x04),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE1, 0x2e),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE1, 0x82),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE1, 0x82),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START1_MODE1, 0xab),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START2_MODE1, 0xea),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE1, 0x02),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x01),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE1_MODE1, 0x25),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE2_MODE1, 0x02),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xb7),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x1e),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xb7),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0x9e),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x06),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE0, 0x02),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE0, 0x16),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE0, 0x36),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x12),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x34),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x82),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START1_MODE0, 0xab),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START2_MODE0, 0xea),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x02),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE1_MODE0, 0x25),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE2_MODE0, 0x02),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_BG_TIMER, 0x0e),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x31),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x01),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_BUF_ENABLE, 0x0a),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_EN_SEL, 0x1a),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_CFG, 0x14),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_MAP, 0x04),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORE_CLK_EN, 0x20),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_CONFIG_1, 0x16),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_AUTO_GAIN_ADJ_CTRL_1, 0xb6),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_AUTO_GAIN_ADJ_CTRL_2, 0x4b),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_AUTO_GAIN_ADJ_CTRL_3, 0x37),
+       QMP_PHY_INIT_CFG(QSERDES_V6_COM_ADDITIONAL_MISC, 0x0c),
+};
+
+static const struct qmp_phy_init_tbl sdx75_usb3_uniphy_tx_tbl[] = {
+       QMP_PHY_INIT_CFG(QSERDES_V6_TX_RES_CODE_LANE_TX, 0x00),
+       QMP_PHY_INIT_CFG(QSERDES_V6_TX_RES_CODE_LANE_RX, 0x00),
+       QMP_PHY_INIT_CFG(QSERDES_V6_TX_RES_CODE_LANE_OFFSET_TX, 0x1f),
+       QMP_PHY_INIT_CFG(QSERDES_V6_TX_RES_CODE_LANE_OFFSET_RX, 0x09),
+       QMP_PHY_INIT_CFG(QSERDES_V6_TX_LANE_MODE_1, 0xf5),
+       QMP_PHY_INIT_CFG(QSERDES_V6_TX_LANE_MODE_3, 0x3f),
+       QMP_PHY_INIT_CFG(QSERDES_V6_TX_LANE_MODE_4, 0x3f),
+       QMP_PHY_INIT_CFG(QSERDES_V6_TX_LANE_MODE_5, 0x5f),
+       QMP_PHY_INIT_CFG(QSERDES_V6_TX_RCV_DETECT_LVL_2, 0x12),
+       QMP_PHY_INIT_CFG(QSERDES_V6_TX_PI_QEC_CTRL, 0x21),
+};
+
+static const struct qmp_phy_init_tbl sdx75_usb3_uniphy_rx_tbl[] = {
+       QMP_PHY_INIT_CFG(QSERDES_V6_RX_UCDR_FO_GAIN, 0x0a),
+       QMP_PHY_INIT_CFG(QSERDES_V6_RX_UCDR_SO_GAIN, 0x06),
+       QMP_PHY_INIT_CFG(QSERDES_V6_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
+       QMP_PHY_INIT_CFG(QSERDES_V6_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
+       QMP_PHY_INIT_CFG(QSERDES_V6_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
+       QMP_PHY_INIT_CFG(QSERDES_V6_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
+       QMP_PHY_INIT_CFG(QSERDES_V6_RX_UCDR_PI_CONTROLS, 0x99),
+       QMP_PHY_INIT_CFG(QSERDES_V6_RX_UCDR_SB2_THRESH1, 0x08),
+       QMP_PHY_INIT_CFG(QSERDES_V6_RX_UCDR_SB2_THRESH2, 0x08),
+       QMP_PHY_INIT_CFG(QSERDES_V6_RX_UCDR_SB2_GAIN1, 0x00),
+       QMP_PHY_INIT_CFG(QSERDES_V6_RX_UCDR_SB2_GAIN2, 0x0a),
+       QMP_PHY_INIT_CFG(QSERDES_V6_RX_AUX_DATA_TCOARSE_TFINE, 0xa0),
+       QMP_PHY_INIT_CFG(QSERDES_V6_RX_VGA_CAL_CNTRL1, 0x54),
+       QMP_PHY_INIT_CFG(QSERDES_V6_RX_VGA_CAL_CNTRL2, 0x0f),
+       QMP_PHY_INIT_CFG(QSERDES_V6_RX_GM_CAL, 0x13),
+       QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+       QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
+       QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
+       QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_IDAC_TSETTLE_LOW, 0x07),
+       QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+       QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47),
+       QMP_PHY_INIT_CFG(QSERDES_V6_RX_SIGDET_CNTRL, 0x04),
+       QMP_PHY_INIT_CFG(QSERDES_V6_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+       QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_MODE_00_LOW, 0x3f),
+       QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_MODE_00_HIGH, 0xbf),
+       QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_MODE_00_HIGH2, 0xff),
+       QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_MODE_00_HIGH3, 0xdf),
+       QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_MODE_00_HIGH4, 0xed),
+       QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_MODE_01_LOW, 0xdc),
+       QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_MODE_01_HIGH, 0x5c),
+       QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_MODE_01_HIGH2, 0x9c),
+       QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_MODE_01_HIGH3, 0x1d),
+       QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_MODE_01_HIGH4, 0x09),
+       QMP_PHY_INIT_CFG(QSERDES_V6_RX_DFE_EN_TIMER, 0x04),
+       QMP_PHY_INIT_CFG(QSERDES_V6_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+       QMP_PHY_INIT_CFG(QSERDES_V6_RX_DCC_CTRL1, 0x0c),
+       QMP_PHY_INIT_CFG(QSERDES_V6_RX_VTH_CODE, 0x10),
+       QMP_PHY_INIT_CFG(QSERDES_V6_RX_SIGDET_CAL_CTRL1, 0x14),
+       QMP_PHY_INIT_CFG(QSERDES_V6_RX_SIGDET_CAL_TRIM, 0x08),
+};
+
+static const struct qmp_phy_init_tbl sdx75_usb3_uniphy_pcs_tbl[] = {
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG1, 0xc4),
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG2, 0x89),
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG3, 0x20),
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG6, 0x13),
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_REFGEN_REQ_CONFIG1, 0x21),
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_RX_SIGDET_LVL, 0xaa),
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_CDR_RESET_TIME, 0x0a),
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_ALIGN_DETECT_CONFIG1, 0x88),
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_ALIGN_DETECT_CONFIG2, 0x13),
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_PCS_TX_RX_CONFIG, 0x0c),
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_EQ_CONFIG1, 0x4b),
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_EQ_CONFIG5, 0x10),
+};
+
+static const struct qmp_phy_init_tbl sdx75_usb3_uniphy_pcs_usb_tbl[] = {
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_USB3_RCVR_DTCT_DLY_U3_L, 0x40),
+       QMP_PHY_INIT_CFG(QPHY_V6_PCS_USB3_RCVR_DTCT_DLY_U3_H, 0x00),
+};
+
 static const struct qmp_phy_init_tbl sm8350_usb3_uniphy_tx_tbl[] = {
        QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0xa5),
        QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_2, 0x82),
@@ -1161,6 +1313,134 @@ static const struct qmp_phy_init_tbl sa8775p_usb3_uniphy_pcs_usb_tbl[] = {
        QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_POWER_STATE_CONFIG1, 0x6f),
 };
 
+static const struct qmp_phy_init_tbl x1e80100_usb3_uniphy_serdes_tbl[] = {
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_SSC_STEP_SIZE1_MODE1, 0xc0),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_SSC_STEP_SIZE2_MODE1, 0x01),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_CP_CTRL_MODE1, 0x02),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_PLL_RCTRL_MODE1, 0x16),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_PLL_CCTRL_MODE1, 0x36),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_CORECLK_DIV_MODE1, 0x04),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_LOCK_CMP1_MODE1, 0x16),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_LOCK_CMP2_MODE1, 0x41),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_DEC_START_MODE1, 0x41),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_DIV_FRAC_START1_MODE1, 0x55),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_DIV_FRAC_START2_MODE1, 0x75),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_DIV_FRAC_START3_MODE1, 0x01),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_HSCLK_SEL_1, 0x01),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_VCO_TUNE1_MODE1, 0x25),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_VCO_TUNE2_MODE1, 0x02),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0x5c),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x0f),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x5c),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x0f),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_SSC_STEP_SIZE1_MODE0, 0xc0),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_SSC_STEP_SIZE2_MODE0, 0x01),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_CP_CTRL_MODE0, 0x02),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_PLL_RCTRL_MODE0, 0x16),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_PLL_CCTRL_MODE0, 0x36),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_LOCK_CMP1_MODE0, 0x08),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_LOCK_CMP2_MODE0, 0x1a),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_DEC_START_MODE0, 0x41),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_DIV_FRAC_START1_MODE0, 0x55),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_DIV_FRAC_START2_MODE0, 0x75),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_DIV_FRAC_START3_MODE0, 0x01),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_VCO_TUNE1_MODE0, 0x25),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_VCO_TUNE2_MODE0, 0x02),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_BG_TIMER, 0x0a),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_SSC_EN_CENTER, 0x01),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_SSC_PER1, 0x62),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_SSC_PER2, 0x02),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_SYSCLK_BUF_ENABLE, 0x0a),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_SYSCLK_EN_SEL, 0x1a),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_LOCK_CMP_CFG, 0x14),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_VCO_TUNE_MAP, 0x04),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_CORE_CLK_EN, 0x20),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_CMN_CONFIG_1, 0x16),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_AUTO_GAIN_ADJ_CTRL_1, 0xb6),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_AUTO_GAIN_ADJ_CTRL_2, 0x4b),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_AUTO_GAIN_ADJ_CTRL_3, 0x37),
+       QMP_PHY_INIT_CFG(QSERDES_V7_COM_ADDITIONAL_MISC, 0x0c),
+};
+
+static const struct qmp_phy_init_tbl x1e80100_usb3_uniphy_tx_tbl[] = {
+       QMP_PHY_INIT_CFG(QSERDES_V7_TX_RES_CODE_LANE_TX, 0x00),
+       QMP_PHY_INIT_CFG(QSERDES_V7_TX_RES_CODE_LANE_RX, 0x00),
+       QMP_PHY_INIT_CFG(QSERDES_V7_TX_RES_CODE_LANE_OFFSET_TX, 0x1f),
+       QMP_PHY_INIT_CFG(QSERDES_V7_TX_RES_CODE_LANE_OFFSET_RX, 0x09),
+       QMP_PHY_INIT_CFG(QSERDES_V7_TX_LANE_MODE_1, 0xf5),
+       QMP_PHY_INIT_CFG(QSERDES_V7_TX_LANE_MODE_3, 0x3f),
+       QMP_PHY_INIT_CFG(QSERDES_V7_TX_LANE_MODE_4, 0x3f),
+       QMP_PHY_INIT_CFG(QSERDES_V7_TX_LANE_MODE_5, 0x5f),
+       QMP_PHY_INIT_CFG(QSERDES_V7_TX_RCV_DETECT_LVL_2, 0x12),
+       QMP_PHY_INIT_CFG(QSERDES_V7_TX_PI_QEC_CTRL, 0x21),
+};
+
+static const struct qmp_phy_init_tbl x1e80100_usb3_uniphy_rx_tbl[] = {
+       QMP_PHY_INIT_CFG(QSERDES_V7_RX_UCDR_FO_GAIN, 0x0a),
+       QMP_PHY_INIT_CFG(QSERDES_V7_RX_UCDR_SO_GAIN, 0x06),
+       QMP_PHY_INIT_CFG(QSERDES_V7_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
+       QMP_PHY_INIT_CFG(QSERDES_V7_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
+       QMP_PHY_INIT_CFG(QSERDES_V7_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
+       QMP_PHY_INIT_CFG(QSERDES_V7_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
+       QMP_PHY_INIT_CFG(QSERDES_V7_RX_UCDR_PI_CONTROLS, 0x99),
+       QMP_PHY_INIT_CFG(QSERDES_V7_RX_UCDR_SB2_THRESH1, 0x08),
+       QMP_PHY_INIT_CFG(QSERDES_V7_RX_UCDR_SB2_THRESH2, 0x08),
+       QMP_PHY_INIT_CFG(QSERDES_V7_RX_UCDR_SB2_GAIN1, 0x00),
+       QMP_PHY_INIT_CFG(QSERDES_V7_RX_UCDR_SB2_GAIN2, 0x0a),
+       QMP_PHY_INIT_CFG(QSERDES_V7_RX_AUX_DATA_TCOARSE_TFINE, 0xa0),
+       QMP_PHY_INIT_CFG(QSERDES_V7_RX_VGA_CAL_CNTRL1, 0x54),
+       QMP_PHY_INIT_CFG(QSERDES_V7_RX_VGA_CAL_CNTRL2, 0x0f),
+       QMP_PHY_INIT_CFG(QSERDES_V7_RX_GM_CAL, 0x13),
+       QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+       QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
+       QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
+       QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_IDAC_TSETTLE_LOW, 0x07),
+       QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+       QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47),
+       QMP_PHY_INIT_CFG(QSERDES_V7_RX_SIGDET_CNTRL, 0x04),
+       QMP_PHY_INIT_CFG(QSERDES_V7_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+       QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_00_LOW, 0x3f),
+       QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_00_HIGH, 0xbf),
+       QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_00_HIGH2, 0xff),
+       QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_00_HIGH3, 0xdf),
+       QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_00_HIGH4, 0xed),
+       QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_01_LOW, 0xdc),
+       QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_01_HIGH, 0x5c),
+       QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_01_HIGH2, 0x9c),
+       QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_01_HIGH3, 0x1d),
+       QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_01_HIGH4, 0x09),
+       QMP_PHY_INIT_CFG(QSERDES_V7_RX_DFE_EN_TIMER, 0x04),
+       QMP_PHY_INIT_CFG(QSERDES_V7_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+       QMP_PHY_INIT_CFG(QSERDES_V7_RX_DCC_CTRL1, 0x0c),
+       QMP_PHY_INIT_CFG(QSERDES_V7_RX_VTH_CODE, 0x10),
+       QMP_PHY_INIT_CFG(QSERDES_V7_RX_SIGDET_CAL_CTRL1, 0x14),
+       QMP_PHY_INIT_CFG(QSERDES_V7_RX_SIGDET_CAL_TRIM, 0x08),
+};
+
+static const struct qmp_phy_init_tbl x1e80100_usb3_uniphy_pcs_tbl[] = {
+       QMP_PHY_INIT_CFG(QPHY_V7_PCS_LOCK_DETECT_CONFIG1, 0xc4),
+       QMP_PHY_INIT_CFG(QPHY_V7_PCS_LOCK_DETECT_CONFIG2, 0x89),
+       QMP_PHY_INIT_CFG(QPHY_V7_PCS_LOCK_DETECT_CONFIG3, 0x20),
+       QMP_PHY_INIT_CFG(QPHY_V7_PCS_LOCK_DETECT_CONFIG6, 0x13),
+       QMP_PHY_INIT_CFG(QPHY_V7_PCS_REFGEN_REQ_CONFIG1, 0x21),
+       QMP_PHY_INIT_CFG(QPHY_V7_PCS_RX_SIGDET_LVL, 0xaa),
+       QMP_PHY_INIT_CFG(QPHY_V7_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+       QMP_PHY_INIT_CFG(QPHY_V7_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+       QMP_PHY_INIT_CFG(QPHY_V7_PCS_CDR_RESET_TIME, 0x0a),
+       QMP_PHY_INIT_CFG(QPHY_V7_PCS_ALIGN_DETECT_CONFIG1, 0x88),
+       QMP_PHY_INIT_CFG(QPHY_V7_PCS_ALIGN_DETECT_CONFIG2, 0x13),
+       QMP_PHY_INIT_CFG(QPHY_V7_PCS_PCS_TX_RX_CONFIG, 0x0c),
+       QMP_PHY_INIT_CFG(QPHY_V7_PCS_EQ_CONFIG1, 0x4b),
+       QMP_PHY_INIT_CFG(QPHY_V7_PCS_EQ_CONFIG5, 0x10),
+};
+
+static const struct qmp_phy_init_tbl x1e80100_usb3_uniphy_pcs_usb_tbl[] = {
+       QMP_PHY_INIT_CFG(QPHY_V7_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+       QMP_PHY_INIT_CFG(QPHY_V7_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
+       QMP_PHY_INIT_CFG(QPHY_V7_PCS_USB3_RCVR_DTCT_DLY_U3_L, 0x40),
+       QMP_PHY_INIT_CFG(QPHY_V7_PCS_USB3_RCVR_DTCT_DLY_U3_H, 0x00),
+};
+
 struct qmp_usb_offsets {
        u16 serdes;
        u16 pcs;
@@ -1276,6 +1556,14 @@ static const char * const qmp_phy_vreg_l[] = {
        "vdda-phy", "vdda-pll",
 };
 
+static const struct qmp_usb_offsets qmp_usb_offsets_ipq8074 = {
+       .serdes         = 0,
+       .pcs            = 0x800,
+       .pcs_misc       = 0x600,
+       .tx             = 0x200,
+       .rx             = 0x400,
+};
+
 static const struct qmp_usb_offsets qmp_usb_offsets_ipq9574 = {
        .serdes         = 0,
        .pcs            = 0x800,
@@ -1317,10 +1605,44 @@ static const struct qmp_usb_offsets qmp_usb_offsets_v5 = {
        .rx             = 0x1000,
 };
 
+static const struct qmp_usb_offsets qmp_usb_offsets_v6 = {
+       .serdes         = 0,
+       .pcs            = 0x0200,
+       .pcs_usb        = 0x1200,
+       .tx             = 0x0e00,
+       .rx             = 0x1000,
+};
+
+static const struct qmp_usb_offsets qmp_usb_offsets_v7 = {
+       .serdes         = 0,
+       .pcs            = 0x0200,
+       .pcs_usb        = 0x1200,
+       .tx             = 0x0e00,
+       .rx             = 0x1000,
+};
+
+static const struct qmp_phy_cfg ipq6018_usb3phy_cfg = {
+       .lanes                  = 1,
+
+       .offsets                = &qmp_usb_offsets_ipq8074,
+
+       .serdes_tbl             = ipq9574_usb3_serdes_tbl,
+       .serdes_tbl_num         = ARRAY_SIZE(ipq9574_usb3_serdes_tbl),
+       .tx_tbl                 = msm8996_usb3_tx_tbl,
+       .tx_tbl_num             = ARRAY_SIZE(msm8996_usb3_tx_tbl),
+       .rx_tbl                 = ipq8074_usb3_rx_tbl,
+       .rx_tbl_num             = ARRAY_SIZE(ipq8074_usb3_rx_tbl),
+       .pcs_tbl                = ipq8074_usb3_pcs_tbl,
+       .pcs_tbl_num            = ARRAY_SIZE(ipq8074_usb3_pcs_tbl),
+       .vreg_list              = qmp_phy_vreg_l,
+       .num_vregs              = ARRAY_SIZE(qmp_phy_vreg_l),
+       .regs                   = qmp_v3_usb3phy_regs_layout,
+};
+
 static const struct qmp_phy_cfg ipq8074_usb3phy_cfg = {
        .lanes                  = 1,
 
-       .offsets                = &qmp_usb_offsets_v3,
+       .offsets                = &qmp_usb_offsets_ipq8074,
 
        .serdes_tbl             = ipq8074_usb3_serdes_tbl,
        .serdes_tbl_num         = ARRAY_SIZE(ipq8074_usb3_serdes_tbl),
@@ -1541,6 +1863,28 @@ static const struct qmp_phy_cfg sdx65_usb3_uniphy_cfg = {
        .has_pwrdn_delay        = true,
 };
 
+static const struct qmp_phy_cfg sdx75_usb3_uniphy_cfg = {
+       .lanes                  = 1,
+       .offsets                = &qmp_usb_offsets_v6,
+
+       .serdes_tbl             = sdx75_usb3_uniphy_serdes_tbl,
+       .serdes_tbl_num         = ARRAY_SIZE(sdx75_usb3_uniphy_serdes_tbl),
+       .tx_tbl                 = sdx75_usb3_uniphy_tx_tbl,
+       .tx_tbl_num             = ARRAY_SIZE(sdx75_usb3_uniphy_tx_tbl),
+       .rx_tbl                 = sdx75_usb3_uniphy_rx_tbl,
+       .rx_tbl_num             = ARRAY_SIZE(sdx75_usb3_uniphy_rx_tbl),
+       .pcs_tbl                = sdx75_usb3_uniphy_pcs_tbl,
+       .pcs_tbl_num            = ARRAY_SIZE(sdx75_usb3_uniphy_pcs_tbl),
+       .pcs_usb_tbl            = sdx75_usb3_uniphy_pcs_usb_tbl,
+       .pcs_usb_tbl_num        = ARRAY_SIZE(sdx75_usb3_uniphy_pcs_usb_tbl),
+       .vreg_list              = qmp_phy_vreg_l,
+       .num_vregs              = ARRAY_SIZE(qmp_phy_vreg_l),
+       .regs                   = qmp_v6_usb3phy_regs_layout,
+       .pcs_usb_offset         = 0x1000,
+
+       .has_pwrdn_delay        = true,
+};
+
 static const struct qmp_phy_cfg sm8350_usb3_uniphy_cfg = {
        .lanes                  = 1,
 
@@ -1582,6 +1926,26 @@ static const struct qmp_phy_cfg qcm2290_usb3phy_cfg = {
        .regs                   = qmp_v3_usb3phy_regs_layout_qcm2290,
 };
 
+static const struct qmp_phy_cfg x1e80100_usb3_uniphy_cfg = {
+       .lanes                  = 1,
+
+       .offsets                = &qmp_usb_offsets_v7,
+
+       .serdes_tbl             = x1e80100_usb3_uniphy_serdes_tbl,
+       .serdes_tbl_num         = ARRAY_SIZE(x1e80100_usb3_uniphy_serdes_tbl),
+       .tx_tbl                 = x1e80100_usb3_uniphy_tx_tbl,
+       .tx_tbl_num             = ARRAY_SIZE(x1e80100_usb3_uniphy_tx_tbl),
+       .rx_tbl                 = x1e80100_usb3_uniphy_rx_tbl,
+       .rx_tbl_num             = ARRAY_SIZE(x1e80100_usb3_uniphy_rx_tbl),
+       .pcs_tbl                = x1e80100_usb3_uniphy_pcs_tbl,
+       .pcs_tbl_num            = ARRAY_SIZE(x1e80100_usb3_uniphy_pcs_tbl),
+       .pcs_usb_tbl            = x1e80100_usb3_uniphy_pcs_usb_tbl,
+       .pcs_usb_tbl_num        = ARRAY_SIZE(x1e80100_usb3_uniphy_pcs_usb_tbl),
+       .vreg_list              = qmp_phy_vreg_l,
+       .num_vregs              = ARRAY_SIZE(qmp_phy_vreg_l),
+       .regs                   = qmp_v7_usb3phy_regs_layout,
+};
+
 static void qmp_usb_configure_lane(void __iomem *base,
                                        const struct qmp_phy_init_tbl tbl[],
                                        int num,
@@ -2225,7 +2589,7 @@ err_node_put:
 static const struct of_device_id qmp_usb_of_match_table[] = {
        {
                .compatible = "qcom,ipq6018-qmp-usb3-phy",
-               .data = &ipq8074_usb3phy_cfg,
+               .data = &ipq6018_usb3phy_cfg,
        }, {
                .compatible = "qcom,ipq8074-qmp-usb3-phy",
                .data = &ipq8074_usb3phy_cfg,
@@ -2256,6 +2620,9 @@ static const struct of_device_id qmp_usb_of_match_table[] = {
        }, {
                .compatible = "qcom,sdx65-qmp-usb3-uni-phy",
                .data = &sdx65_usb3_uniphy_cfg,
+       }, {
+               .compatible = "qcom,sdx75-qmp-usb3-uni-phy",
+               .data = &sdx75_usb3_uniphy_cfg,
        }, {
                .compatible = "qcom,sm6115-qmp-usb3-phy",
                .data = &qcm2290_usb3phy_cfg,
@@ -2268,6 +2635,9 @@ static const struct of_device_id qmp_usb_of_match_table[] = {
        }, {
                .compatible = "qcom,sm8350-qmp-usb3-uni-phy",
                .data = &sm8350_usb3_uniphy_cfg,
+       }, {
+               .compatible = "qcom,x1e80100-qmp-usb3-uni-phy",
+               .data = &x1e80100_usb3_uniphy_cfg,
        },
        { },
 };
index 71f063f4a56e3d6234792af5332fd4104a6e9e25..6923496cbfee21c2bef6d1fa342254f806dc26fd 100644 (file)
 #include "phy-qcom-qmp-qserdes-com-v6.h"
 #include "phy-qcom-qmp-qserdes-txrx-v6.h"
 #include "phy-qcom-qmp-qserdes-txrx-v6_20.h"
+#include "phy-qcom-qmp-qserdes-txrx-v6_n4.h"
 #include "phy-qcom-qmp-qserdes-ln-shrd-v6.h"
 
+#include "phy-qcom-qmp-qserdes-com-v7.h"
+#include "phy-qcom-qmp-qserdes-txrx-v7.h"
+
 #include "phy-qcom-qmp-qserdes-pll.h"
 
 #include "phy-qcom-qmp-pcs-v2.h"
@@ -44,6 +48,8 @@
 
 #include "phy-qcom-qmp-pcs-v6_20.h"
 
+#include "phy-qcom-qmp-pcs-v7.h"
+
 /* Only for QMP V3 & V4 PHY - DP COM registers */
 #define QPHY_V3_DP_COM_PHY_MODE_CTRL                   0x00
 #define QPHY_V3_DP_COM_SW_RESET                                0x04
index 36505fc5f386e2ca17b3efa8b30c1eb554b3e9a7..e342eef0640b78a47dd766ead8772604a02b24ed 100644 (file)
@@ -13,7 +13,7 @@ config PHY_R8A779F0_ETHERNET_SERDES
 config PHY_RCAR_GEN2
        tristate "Renesas R-Car generation 2 USB PHY driver"
        depends on ARCH_RENESAS
-       depends on GENERIC_PHY
+       select GENERIC_PHY
        help
          Support for USB PHY found on Renesas R-Car generation 2 SoCs.
 
index e53eace7c91e372e60d0fcbb6032e2e8fd510595..6387c0d34c551c0e4e28e09af0792cee69eb2952 100644 (file)
@@ -673,8 +673,6 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
        channel->irq = platform_get_irq_optional(pdev, 0);
        channel->dr_mode = rcar_gen3_get_dr_mode(dev->of_node);
        if (channel->dr_mode != USB_DR_MODE_UNKNOWN) {
-               int ret;
-
                channel->is_otg_channel = true;
                channel->uses_otg_pins = !of_property_read_bool(dev->of_node,
                                                        "renesas,no-otg-pins");
@@ -738,8 +736,6 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
                ret = PTR_ERR(provider);
                goto error;
        } else if (channel->is_otg_channel) {
-               int ret;
-
                ret = device_create_file(dev, &dev_attr_role);
                if (ret < 0)
                        goto error;
index a24d2af154df7807adab8070d637860b1b4aa2a9..4f71373ae6e1a3782ffdaf4c32cd2c21befa5908 100644 (file)
@@ -123,9 +123,12 @@ struct rockchip_chg_det_reg {
  * @disrise_en: host disconnect rise edge detection enable.
  * @disrise_st: host disconnect rise edge detection state.
  * @disrise_clr: host disconnect rise edge detection clear.
- * @id_det_en: id detection enable register.
- * @id_det_st: id detection state register.
- * @id_det_clr: id detection clear register.
+ * @idfall_det_en: id detection enable register, falling edge
+ * @idfall_det_st: id detection state register, falling edge
+ * @idfall_det_clr: id detection clear register, falling edge
+ * @idrise_det_en: id detection enable register, rising edge
+ * @idrise_det_st: id detection state register, rising edge
+ * @idrise_det_clr: id detection clear register, rising edge
  * @ls_det_en: linestate detection enable register.
  * @ls_det_st: linestate detection state register.
  * @ls_det_clr: linestate detection clear register.
@@ -146,9 +149,12 @@ struct rockchip_usb2phy_port_cfg {
        struct usb2phy_reg      disrise_en;
        struct usb2phy_reg      disrise_st;
        struct usb2phy_reg      disrise_clr;
-       struct usb2phy_reg      id_det_en;
-       struct usb2phy_reg      id_det_st;
-       struct usb2phy_reg      id_det_clr;
+       struct usb2phy_reg      idfall_det_en;
+       struct usb2phy_reg      idfall_det_st;
+       struct usb2phy_reg      idfall_det_clr;
+       struct usb2phy_reg      idrise_det_en;
+       struct usb2phy_reg      idrise_det_st;
+       struct usb2phy_reg      idrise_det_clr;
        struct usb2phy_reg      ls_det_en;
        struct usb2phy_reg      ls_det_st;
        struct usb2phy_reg      ls_det_clr;
@@ -488,15 +494,27 @@ static int rockchip_usb2phy_init(struct phy *phy)
                        if (ret)
                                goto out;
 
-                       /* clear id status and enable id detect irq */
+                       /* clear id status and enable id detect irqs */
                        ret = property_enable(rphy->grf,
-                                             &rport->port_cfg->id_det_clr,
+                                             &rport->port_cfg->idfall_det_clr,
                                              true);
                        if (ret)
                                goto out;
 
                        ret = property_enable(rphy->grf,
-                                             &rport->port_cfg->id_det_en,
+                                             &rport->port_cfg->idrise_det_clr,
+                                             true);
+                       if (ret)
+                               goto out;
+
+                       ret = property_enable(rphy->grf,
+                                             &rport->port_cfg->idfall_det_en,
+                                             true);
+                       if (ret)
+                               goto out;
+
+                       ret = property_enable(rphy->grf,
+                                             &rport->port_cfg->idrise_det_en,
                                              true);
                        if (ret)
                                goto out;
@@ -1030,11 +1048,16 @@ static irqreturn_t rockchip_usb2phy_id_irq(int irq, void *data)
        struct rockchip_usb2phy *rphy = dev_get_drvdata(rport->phy->dev.parent);
        bool id;
 
-       if (!property_enabled(rphy->grf, &rport->port_cfg->id_det_st))
+       if (!property_enabled(rphy->grf, &rport->port_cfg->idfall_det_st) &&
+           !property_enabled(rphy->grf, &rport->port_cfg->idrise_det_st))
                return IRQ_NONE;
 
        /* clear id detect irq pending status */
-       property_enable(rphy->grf, &rport->port_cfg->id_det_clr, true);
+       if (property_enabled(rphy->grf, &rport->port_cfg->idfall_det_st))
+               property_enable(rphy->grf, &rport->port_cfg->idfall_det_clr, true);
+
+       if (property_enabled(rphy->grf, &rport->port_cfg->idrise_det_st))
+               property_enable(rphy->grf, &rport->port_cfg->idrise_det_clr, true);
 
        id = property_enabled(rphy->grf, &rport->port_cfg->utmi_id);
        extcon_set_state_sync(rphy->edev, EXTCON_USB_HOST, !id);
@@ -1464,6 +1487,14 @@ put_child:
        return ret;
 }
 
+static int rk3128_usb2phy_tuning(struct rockchip_usb2phy *rphy)
+{
+       /* Turn off differential receiver in suspend mode */
+       return regmap_write_bits(rphy->grf, 0x298,
+                               BIT(2) << BIT_WRITEABLE_SHIFT | BIT(2),
+                               BIT(2) << BIT_WRITEABLE_SHIFT | 0);
+}
+
 static int rk3588_usb2phy_tuning(struct rockchip_usb2phy *rphy)
 {
        int ret;
@@ -1513,6 +1544,54 @@ static int rk3588_usb2phy_tuning(struct rockchip_usb2phy *rphy)
        return ret;
 }
 
+static const struct rockchip_usb2phy_cfg rk3128_phy_cfgs[] = {
+       {
+               .reg = 0x17c,
+               .num_ports      = 2,
+               .phy_tuning     = rk3128_usb2phy_tuning,
+               .clkout_ctl     = { 0x0190, 15, 15, 1, 0 },
+               .port_cfgs      = {
+                       [USB2PHY_PORT_OTG] = {
+                               .phy_sus        = { 0x017c, 8, 0, 0, 0x1d1 },
+                               .bvalid_det_en  = { 0x017c, 14, 14, 0, 1 },
+                               .bvalid_det_st  = { 0x017c, 15, 15, 0, 1 },
+                               .bvalid_det_clr = { 0x017c, 15, 15, 0, 1 },
+                               .idfall_det_en  = { 0x01a0, 2, 2, 0, 1 },
+                               .idfall_det_st  = { 0x01a0, 3, 3, 0, 1 },
+                               .idfall_det_clr = { 0x01a0, 3, 3, 0, 1 },
+                               .idrise_det_en  = { 0x01a0, 0, 0, 0, 1 },
+                               .idrise_det_st  = { 0x01a0, 1, 1, 0, 1 },
+                               .idrise_det_clr = { 0x01a0, 1, 1, 0, 1 },
+                               .ls_det_en      = { 0x017c, 12, 12, 0, 1 },
+                               .ls_det_st      = { 0x017c, 13, 13, 0, 1 },
+                               .ls_det_clr     = { 0x017c, 13, 13, 0, 1 },
+                               .utmi_bvalid    = { 0x014c, 5, 5, 0, 1 },
+                               .utmi_id        = { 0x014c, 8, 8, 0, 1 },
+                               .utmi_ls        = { 0x014c, 7, 6, 0, 1 },
+                       },
+                       [USB2PHY_PORT_HOST] = {
+                               .phy_sus        = { 0x0194, 8, 0, 0, 0x1d1 },
+                               .ls_det_en      = { 0x0194, 14, 14, 0, 1 },
+                               .ls_det_st      = { 0x0194, 15, 15, 0, 1 },
+                               .ls_det_clr     = { 0x0194, 15, 15, 0, 1 }
+                       }
+               },
+               .chg_det = {
+                       .opmode         = { 0x017c, 3, 0, 5, 1 },
+                       .cp_det         = { 0x02c0, 6, 6, 0, 1 },
+                       .dcp_det        = { 0x02c0, 5, 5, 0, 1 },
+                       .dp_det         = { 0x02c0, 7, 7, 0, 1 },
+                       .idm_sink_en    = { 0x0184, 8, 8, 0, 1 },
+                       .idp_sink_en    = { 0x0184, 7, 7, 0, 1 },
+                       .idp_src_en     = { 0x0184, 9, 9, 0, 1 },
+                       .rdm_pdwn_en    = { 0x0184, 10, 10, 0, 1 },
+                       .vdm_src_en     = { 0x0184, 12, 12, 0, 1 },
+                       .vdp_src_en     = { 0x0184, 11, 11, 0, 1 },
+               },
+       },
+       { /* sentinel */ }
+};
+
 static const struct rockchip_usb2phy_cfg rk3228_phy_cfgs[] = {
        {
                .reg = 0x760,
@@ -1524,9 +1603,12 @@ static const struct rockchip_usb2phy_cfg rk3228_phy_cfgs[] = {
                                .bvalid_det_en  = { 0x0680, 3, 3, 0, 1 },
                                .bvalid_det_st  = { 0x0690, 3, 3, 0, 1 },
                                .bvalid_det_clr = { 0x06a0, 3, 3, 0, 1 },
-                               .id_det_en      = { 0x0680, 6, 5, 0, 3 },
-                               .id_det_st      = { 0x0690, 6, 5, 0, 3 },
-                               .id_det_clr     = { 0x06a0, 6, 5, 0, 3 },
+                               .idfall_det_en  = { 0x0680, 6, 6, 0, 1 },
+                               .idfall_det_st  = { 0x0690, 6, 6, 0, 1 },
+                               .idfall_det_clr = { 0x06a0, 6, 6, 0, 1 },
+                               .idrise_det_en  = { 0x0680, 5, 5, 0, 1 },
+                               .idrise_det_st  = { 0x0690, 5, 5, 0, 1 },
+                               .idrise_det_clr = { 0x06a0, 5, 5, 0, 1 },
                                .ls_det_en      = { 0x0680, 2, 2, 0, 1 },
                                .ls_det_st      = { 0x0690, 2, 2, 0, 1 },
                                .ls_det_clr     = { 0x06a0, 2, 2, 0, 1 },
@@ -1587,9 +1669,12 @@ static const struct rockchip_usb2phy_cfg rk3308_phy_cfgs[] = {
                                .bvalid_det_en  = { 0x3020, 3, 2, 0, 3 },
                                .bvalid_det_st  = { 0x3024, 3, 2, 0, 3 },
                                .bvalid_det_clr = { 0x3028, 3, 2, 0, 3 },
-                               .id_det_en      = { 0x3020, 5, 4, 0, 3 },
-                               .id_det_st      = { 0x3024, 5, 4, 0, 3 },
-                               .id_det_clr     = { 0x3028, 5, 4, 0, 3 },
+                               .idfall_det_en  = { 0x3020, 5, 5, 0, 1 },
+                               .idfall_det_st  = { 0x3024, 5, 5, 0, 1 },
+                               .idfall_det_clr = { 0x3028, 5, 5, 0, 1 },
+                               .idrise_det_en  = { 0x3020, 4, 4, 0, 1 },
+                               .idrise_det_st  = { 0x3024, 4, 4, 0, 1 },
+                               .idrise_det_clr = { 0x3028, 4, 4, 0, 1 },
                                .ls_det_en      = { 0x3020, 0, 0, 0, 1 },
                                .ls_det_st      = { 0x3024, 0, 0, 0, 1 },
                                .ls_det_clr     = { 0x3028, 0, 0, 0, 1 },
@@ -1634,9 +1719,12 @@ static const struct rockchip_usb2phy_cfg rk3328_phy_cfgs[] = {
                                .bvalid_det_en  = { 0x0110, 3, 2, 0, 3 },
                                .bvalid_det_st  = { 0x0114, 3, 2, 0, 3 },
                                .bvalid_det_clr = { 0x0118, 3, 2, 0, 3 },
-                               .id_det_en      = { 0x0110, 5, 4, 0, 3 },
-                               .id_det_st      = { 0x0114, 5, 4, 0, 3 },
-                               .id_det_clr     = { 0x0118, 5, 4, 0, 3 },
+                               .idfall_det_en  = { 0x0110, 5, 5, 0, 1 },
+                               .idfall_det_st  = { 0x0114, 5, 5, 0, 1 },
+                               .idfall_det_clr = { 0x0118, 5, 5, 0, 1 },
+                               .idrise_det_en  = { 0x0110, 4, 4, 0, 1 },
+                               .idrise_det_st  = { 0x0114, 4, 4, 0, 1 },
+                               .idrise_det_clr = { 0x0118, 4, 4, 0, 1 },
                                .ls_det_en      = { 0x0110, 0, 0, 0, 1 },
                                .ls_det_st      = { 0x0114, 0, 0, 0, 1 },
                                .ls_det_clr     = { 0x0118, 0, 0, 0, 1 },
@@ -1700,9 +1788,12 @@ static const struct rockchip_usb2phy_cfg rk3399_phy_cfgs[] = {
                                .bvalid_det_en  = { 0xe3c0, 3, 3, 0, 1 },
                                .bvalid_det_st  = { 0xe3e0, 3, 3, 0, 1 },
                                .bvalid_det_clr = { 0xe3d0, 3, 3, 0, 1 },
-                               .id_det_en      = { 0xe3c0, 5, 4, 0, 3 },
-                               .id_det_st      = { 0xe3e0, 5, 4, 0, 3 },
-                               .id_det_clr     = { 0xe3d0, 5, 4, 0, 3 },
+                               .idfall_det_en  = { 0xe3c0, 5, 5, 0, 1 },
+                               .idfall_det_st  = { 0xe3e0, 5, 5, 0, 1 },
+                               .idfall_det_clr = { 0xe3d0, 5, 5, 0, 1 },
+                               .idrise_det_en  = { 0xe3c0, 4, 4, 0, 1 },
+                               .idrise_det_st  = { 0xe3e0, 4, 4, 0, 1 },
+                               .idrise_det_clr = { 0xe3d0, 4, 4, 0, 1 },
                                .utmi_avalid    = { 0xe2ac, 7, 7, 0, 1 },
                                .utmi_bvalid    = { 0xe2ac, 12, 12, 0, 1 },
                                .utmi_id        = { 0xe2ac, 8, 8, 0, 1 },
@@ -1739,9 +1830,12 @@ static const struct rockchip_usb2phy_cfg rk3399_phy_cfgs[] = {
                                .bvalid_det_en  = { 0xe3c0, 8, 8, 0, 1 },
                                .bvalid_det_st  = { 0xe3e0, 8, 8, 0, 1 },
                                .bvalid_det_clr = { 0xe3d0, 8, 8, 0, 1 },
-                               .id_det_en      = { 0xe3c0, 10, 9, 0, 3 },
-                               .id_det_st      = { 0xe3e0, 10, 9, 0, 3 },
-                               .id_det_clr     = { 0xe3d0, 10, 9, 0, 3 },
+                               .idfall_det_en  = { 0xe3c0, 10, 10, 0, 1 },
+                               .idfall_det_st  = { 0xe3e0, 10, 10, 0, 1 },
+                               .idfall_det_clr = { 0xe3d0, 10, 10, 0, 1 },
+                               .idrise_det_en  = { 0xe3c0, 9, 9, 0, 1 },
+                               .idrise_det_st  = { 0xe3e0, 9, 9, 0, 1 },
+                               .idrise_det_clr = { 0xe3d0, 9, 9, 0, 1 },
                                .utmi_avalid    = { 0xe2ac, 10, 10, 0, 1 },
                                .utmi_bvalid    = { 0xe2ac, 16, 16, 0, 1 },
                                .utmi_id        = { 0xe2ac, 11, 11, 0, 1 },
@@ -1770,9 +1864,12 @@ static const struct rockchip_usb2phy_cfg rk3568_phy_cfgs[] = {
                                .bvalid_det_en  = { 0x0080, 3, 2, 0, 3 },
                                .bvalid_det_st  = { 0x0084, 3, 2, 0, 3 },
                                .bvalid_det_clr = { 0x0088, 3, 2, 0, 3 },
-                               .id_det_en      = { 0x0080, 5, 4, 0, 3 },
-                               .id_det_st      = { 0x0084, 5, 4, 0, 3 },
-                               .id_det_clr     = { 0x0088, 5, 4, 0, 3 },
+                               .idfall_det_en  = { 0x0080, 5, 5, 0, 1 },
+                               .idfall_det_st  = { 0x0084, 5, 5, 0, 1 },
+                               .idfall_det_clr = { 0x0088, 5, 5, 0, 1 },
+                               .idrise_det_en  = { 0x0080, 4, 4, 0, 1 },
+                               .idrise_det_st  = { 0x0084, 4, 4, 0, 1 },
+                               .idrise_det_clr = { 0x0088, 4, 4, 0, 1 },
                                .utmi_avalid    = { 0x00c0, 10, 10, 0, 1 },
                                .utmi_bvalid    = { 0x00c0, 9, 9, 0, 1 },
                                .utmi_id        = { 0x00c0, 6, 6, 0, 1 },
@@ -1990,6 +2087,7 @@ static const struct rockchip_usb2phy_cfg rv1108_phy_cfgs[] = {
 
 static const struct of_device_id rockchip_usb2phy_dt_match[] = {
        { .compatible = "rockchip,px30-usb2phy", .data = &rk3328_phy_cfgs },
+       { .compatible = "rockchip,rk3128-usb2phy", .data = &rk3128_phy_cfgs },
        { .compatible = "rockchip,rk3228-usb2phy", .data = &rk3228_phy_cfgs },
        { .compatible = "rockchip,rk3308-usb2phy", .data = &rk3308_phy_cfgs },
        { .compatible = "rockchip,rk3328-usb2phy", .data = &rk3328_phy_cfgs },
index bc847d3879f79c0684693a475a3c4664f2792794..0f4818adb440022d0d9b21a723e36ca062091268 100644 (file)
@@ -248,7 +248,7 @@ static const
 struct phy_gmii_sel_soc_data phy_gmii_sel_cpsw9g_soc_j784s4 = {
        .use_of_data = true,
        .regfields = phy_gmii_sel_fields_am654,
-       .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) |
+       .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII) |
                       BIT(PHY_INTERFACE_MODE_USXGMII),
        .num_ports = 8,
        .num_qsgmii_main_ports = 2,
index fc3cd98c60ff42e1a0aa30698e68319b49009063..00d7e6a6de03a2cb85c26eeba29f90d57096f592 100644 (file)
@@ -1240,6 +1240,7 @@ static int wiz_phy_fullrt_div(struct wiz *wiz, int lane)
        case J721E_WIZ_10G:
        case J7200_WIZ_10G:
        case J721S2_WIZ_10G:
+       case J784S4_WIZ_10G:
                if (wiz->lane_phy_type[lane] == PHY_TYPE_SGMII)
                        return regmap_field_write(wiz->p0_fullrt_div[lane], 0x2);
                break;
index dd2913ac0fa28cea0cabf82c491e2ba49dfcb80e..78e19b128962a9a504986c7d0e8135da50527aa3 100644 (file)
@@ -117,7 +117,7 @@ static int omap_usb_set_vbus(struct usb_otg *otg, bool enabled)
 {
        struct omap_usb *phy = phy_to_omapusb(otg->usb_phy);
 
-       if (!phy->comparator)
+       if (!phy->comparator || !phy->comparator->set_vbus)
                return -ENODEV;
 
        return phy->comparator->set_vbus(phy->comparator, enabled);
@@ -127,7 +127,7 @@ static int omap_usb_start_srp(struct usb_otg *otg)
 {
        struct omap_usb *phy = phy_to_omapusb(otg->usb_phy);
 
-       if (!phy->comparator)
+       if (!phy->comparator || !phy->comparator->start_srp)
                return -ENODEV;
 
        return phy->comparator->start_srp(phy->comparator);
index b11144bb448c90c325d2264483afb2c221095ccd..8163a5983166a8f925b302ff92f027f4b351ac3a 100644 (file)
@@ -484,6 +484,22 @@ config PINCTRL_TB10X
        depends on OF && ARC_PLAT_TB10X
        select GPIOLIB
 
+config PINCTRL_TPS6594
+       tristate "Pinctrl and GPIO driver for TI TPS6594 PMIC"
+       depends on OF && MFD_TPS6594
+       default MFD_TPS6594
+       select PINMUX
+       select GPIOLIB
+       select REGMAP
+       select GPIO_REGMAP
+       select GENERIC_PINCONF
+       help
+         Say Y to select the pinmuxing and GPIOs driver for the TPS6594
+         PMICs chip family.
+
+         This driver can also be built as a module
+         called tps6594-pinctrl.
+
 config PINCTRL_ZYNQ
        bool "Pinctrl driver for Xilinx Zynq"
        depends on ARCH_ZYNQ
index 7ac5d59c83e7846378024649c34892e769b48df0..1071f301cc70b6705a5b7e8cc4c0f9e18ddf84dc 100644 (file)
@@ -49,6 +49,7 @@ obj-$(CONFIG_PINCTRL_ST)      += pinctrl-st.o
 obj-$(CONFIG_PINCTRL_STMFX)    += pinctrl-stmfx.o
 obj-$(CONFIG_PINCTRL_SX150X)   += pinctrl-sx150x.o
 obj-$(CONFIG_PINCTRL_TB10X)    += pinctrl-tb10x.o
+obj-$(CONFIG_PINCTRL_TPS6594)  += pinctrl-tps6594.o
 obj-$(CONFIG_PINCTRL_ZYNQMP)   += pinctrl-zynqmp.o
 obj-$(CONFIG_PINCTRL_ZYNQ)     += pinctrl-zynq.o
 
index d099a7f25f64c9f816918c6b584b2e0322b1c78c..6bb2b461950bef5c420f1b250d48fcf6e907b98f 100644 (file)
@@ -171,8 +171,8 @@ static int ns_pinctrl_set_mux(struct pinctrl_dev *pctrl_dev,
        if (!group)
                return -EINVAL;
 
-       for (i = 0; i < group->num_pins; i++)
-               unset |= BIT(group->pins[i]);
+       for (i = 0; i < group->grp.npins; i++)
+               unset |= BIT(group->grp.pins[i]);
 
        tmp = readl(ns_pinctrl->base);
        tmp &= ~unset;
index f2977eb65522e26192d456752f3e3e2b7010b586..ee56856cb80c33e4733f2b7f2a43fb681c89fc61 100644 (file)
@@ -13,6 +13,7 @@
 #define pr_fmt(fmt) "pinctrl core: " fmt
 
 #include <linux/array_size.h>
+#include <linux/cleanup.h>
 #include <linux/debugfs.h>
 #include <linux/device.h>
 #include <linux/err.h>
@@ -23,6 +24,7 @@
 #include <linux/seq_file.h>
 #include <linux/slab.h>
 
+#include <linux/gpio.h>
 #include <linux/gpio/driver.h>
 
 #include <linux/pinctrl/consumer.h>
 #include <linux/pinctrl/machine.h>
 #include <linux/pinctrl/pinctrl.h>
 
-#ifdef CONFIG_GPIOLIB
-#include "../gpio/gpiolib.h"
-#endif
-
 #include "core.h"
 #include "devicetree.h"
 #include "pinconf.h"
@@ -145,7 +143,7 @@ struct pinctrl_dev *get_pinctrl_dev_from_of_node(struct device_node *np)
  */
 int pin_get_from_name(struct pinctrl_dev *pctldev, const char *name)
 {
-       unsigned i, pin;
+       unsigned int i, pin;
 
        /* The pin number can be retrived from the pin controller descriptor */
        for (i = 0; i < pctldev->desc->npins; i++) {
@@ -166,7 +164,7 @@ int pin_get_from_name(struct pinctrl_dev *pctldev, const char *name)
  * @pctldev: the pin control device to lookup the pin on
  * @pin: pin number/id to look up
  */
-const char *pin_get_name(struct pinctrl_dev *pctldev, const unsigned pin)
+const char *pin_get_name(struct pinctrl_dev *pctldev, const unsigned int pin)
 {
        const struct pin_desc *desc;
 
@@ -184,7 +182,7 @@ EXPORT_SYMBOL_GPL(pin_get_name);
 /* Deletes a range of pin descriptors */
 static void pinctrl_free_pindescs(struct pinctrl_dev *pctldev,
                                  const struct pinctrl_pin_desc *pins,
-                                 unsigned num_pins)
+                                 unsigned int num_pins)
 {
        int i;
 
@@ -252,9 +250,9 @@ failed:
 
 static int pinctrl_register_pins(struct pinctrl_dev *pctldev,
                                 const struct pinctrl_pin_desc *pins,
-                                unsigned num_descs)
+                                unsigned int num_descs)
 {
-       unsigned i;
+       unsigned int i;
        int ret = 0;
 
        for (i = 0; i < num_descs; i++) {
@@ -428,7 +426,7 @@ EXPORT_SYMBOL_GPL(pinctrl_add_gpio_range);
 
 void pinctrl_add_gpio_ranges(struct pinctrl_dev *pctldev,
                             struct pinctrl_gpio_range *ranges,
-                            unsigned nranges)
+                            unsigned int nranges)
 {
        int i;
 
@@ -459,7 +457,7 @@ struct pinctrl_dev *pinctrl_find_and_add_gpio_range(const char *devname,
 EXPORT_SYMBOL_GPL(pinctrl_find_and_add_gpio_range);
 
 int pinctrl_get_group_pins(struct pinctrl_dev *pctldev, const char *pin_group,
-                               const unsigned **pins, unsigned *num_pins)
+                          const unsigned int **pins, unsigned int *num_pins)
 {
        const struct pinctrl_ops *pctlops = pctldev->desc->pctlops;
        int gs;
@@ -559,7 +557,7 @@ const char *pinctrl_generic_get_group_name(struct pinctrl_dev *pctldev,
        if (!group)
                return NULL;
 
-       return group->name;
+       return group->grp.name;
 }
 EXPORT_SYMBOL_GPL(pinctrl_generic_get_group_name);
 
@@ -585,8 +583,8 @@ int pinctrl_generic_get_group_pins(struct pinctrl_dev *pctldev,
                return -EINVAL;
        }
 
-       *pins = group->pins;
-       *num_pins = group->num_pins;
+       *pins = group->grp.pins;
+       *num_pins = group->grp.npins;
 
        return 0;
 }
@@ -642,7 +640,7 @@ static int pinctrl_generic_group_name_to_selector(struct pinctrl_dev *pctldev,
  * Note that the caller must take care of locking.
  */
 int pinctrl_generic_add_group(struct pinctrl_dev *pctldev, const char *name,
-                             int *pins, int num_pins, void *data)
+                             const unsigned int *pins, int num_pins, void *data)
 {
        struct group_desc *group;
        int selector, error;
@@ -660,10 +658,7 @@ int pinctrl_generic_add_group(struct pinctrl_dev *pctldev, const char *name,
        if (!group)
                return -ENOMEM;
 
-       group->name = name;
-       group->pins = pins;
-       group->num_pins = num_pins;
-       group->data = data;
+       *group = PINCTRL_GROUP_DESC(name, pins, num_pins, data);
 
        error = radix_tree_insert(&pctldev->pin_group_tree, selector, group);
        if (error)
@@ -734,8 +729,8 @@ int pinctrl_get_group_selector(struct pinctrl_dev *pctldev,
                               const char *pin_group)
 {
        const struct pinctrl_ops *pctlops = pctldev->desc->pctlops;
-       unsigned ngroups = pctlops->get_groups_count(pctldev);
-       unsigned group_selector = 0;
+       unsigned int ngroups = pctlops->get_groups_count(pctldev);
+       unsigned int group_selector = 0;
 
        while (group_selector < ngroups) {
                const char *gname = pctlops->get_group_name(pctldev,
@@ -1432,7 +1427,7 @@ EXPORT_SYMBOL_GPL(devm_pinctrl_put);
  * @num_maps: the number of maps in the mapping table
  */
 int pinctrl_register_mappings(const struct pinctrl_map *maps,
-                             unsigned num_maps)
+                             unsigned int num_maps)
 {
        int i, ret;
        struct pinctrl_maps *maps_node;
@@ -1647,10 +1642,10 @@ static int pinctrl_pins_show(struct seq_file *s, void *what)
 {
        struct pinctrl_dev *pctldev = s->private;
        const struct pinctrl_ops *ops = pctldev->desc->pctlops;
-       unsigned i, pin;
+       unsigned int i, pin;
 #ifdef CONFIG_GPIOLIB
+       struct gpio_device *gdev __free(gpio_device_put) = NULL;
        struct pinctrl_gpio_range *range;
-       struct gpio_chip *chip;
        int gpio_num;
 #endif
 
@@ -1685,11 +1680,11 @@ static int pinctrl_pins_show(struct seq_file *s, void *what)
                         * we need to get rid of the range->base eventually and
                         * get the descriptor directly from the gpio_chip.
                         */
-                       chip = gpiod_to_chip(gpio_to_desc(gpio_num));
-               else
-                       chip = NULL;
-               if (chip)
-                       seq_printf(s, "%u:%s ", gpio_num - chip->gpiodev->base, chip->label);
+                       gdev = gpiod_to_gpio_device(gpio_to_desc(gpio_num));
+               if (gdev)
+                       seq_printf(s, "%u:%s ",
+                                  gpio_num - gpio_device_get_base(gdev),
+                                  gpio_device_get_label(gdev));
                else
                        seq_puts(s, "0:? ");
 #endif
@@ -1711,7 +1706,7 @@ static int pinctrl_groups_show(struct seq_file *s, void *what)
 {
        struct pinctrl_dev *pctldev = s->private;
        const struct pinctrl_ops *ops = pctldev->desc->pctlops;
-       unsigned ngroups, selector = 0;
+       unsigned int ngroups, selector = 0;
 
        mutex_lock(&pctldev->mutex);
 
@@ -1719,8 +1714,8 @@ static int pinctrl_groups_show(struct seq_file *s, void *what)
 
        seq_puts(s, "registered pin groups:\n");
        while (selector < ngroups) {
-               const unsigned *pins = NULL;
-               unsigned num_pins = 0;
+               const unsigned int *pins = NULL;
+               unsigned int num_pins = 0;
                const char *gname = ops->get_group_name(pctldev, selector);
                const char *pname;
                int ret = 0;
index 530370443c191ff5111c90a151f8a412559594fd..837fd5bd903df811dd0731917d43f7f8bc906e4f 100644 (file)
@@ -111,8 +111,8 @@ struct pinctrl_state {
  * @func: the function selector to program
  */
 struct pinctrl_setting_mux {
-       unsigned group;
-       unsigned func;
+       unsigned int group;
+       unsigned int func;
 };
 
 /**
@@ -124,9 +124,9 @@ struct pinctrl_setting_mux {
  * @num_configs: the number of entries in array @configs
  */
 struct pinctrl_setting_configs {
-       unsigned group_or_pin;
+       unsigned int group_or_pin;
        unsigned long *configs;
-       unsigned num_configs;
+       unsigned int num_configs;
 };
 
 /**
@@ -173,7 +173,7 @@ struct pin_desc {
        void *drv_data;
        /* These fields only added when supporting pinmux drivers */
 #ifdef CONFIG_PINMUX
-       unsigned mux_usecount;
+       unsigned int mux_usecount;
        const char *mux_owner;
        const struct pinctrl_setting_mux *mux_setting;
        const char *gpio_owner;
@@ -189,25 +189,30 @@ struct pin_desc {
 struct pinctrl_maps {
        struct list_head node;
        const struct pinctrl_map *maps;
-       unsigned num_maps;
+       unsigned int num_maps;
 };
 
 #ifdef CONFIG_GENERIC_PINCTRL_GROUPS
 
+#include <linux/pinctrl/pinctrl.h>
+
 /**
  * struct group_desc - generic pin group descriptor
- * @name: name of the pin group
- * @pins: array of pins that belong to the group
- * @num_pins: number of pins in the group
+ * @grp: generic data of the pin group (name and pins)
  * @data: pin controller driver specific data
  */
 struct group_desc {
-       const char *name;
-       int *pins;
-       int num_pins;
+       struct pingroup grp;
        void *data;
 };
 
+/* Convenience macro to define a generic pin group descriptor */
+#define PINCTRL_GROUP_DESC(_name, _pins, _num_pins, _data)     \
+(struct group_desc) {                                          \
+       .grp = PINCTRL_PINGROUP(_name, _pins, _num_pins),       \
+       .data = _data,                                          \
+}
+
 int pinctrl_generic_get_group_count(struct pinctrl_dev *pctldev);
 
 const char *pinctrl_generic_get_group_name(struct pinctrl_dev *pctldev,
@@ -222,7 +227,7 @@ struct group_desc *pinctrl_generic_get_group(struct pinctrl_dev *pctldev,
                                             unsigned int group_selector);
 
 int pinctrl_generic_add_group(struct pinctrl_dev *pctldev, const char *name,
-                             int *gpins, int ngpins, void *data);
+                             const unsigned int *pins, int num_pins, void *data);
 
 int pinctrl_generic_remove_group(struct pinctrl_dev *pctldev,
                                 unsigned int group_selector);
@@ -232,7 +237,7 @@ int pinctrl_generic_remove_group(struct pinctrl_dev *pctldev,
 struct pinctrl_dev *get_pinctrl_dev_from_devname(const char *dev_name);
 struct pinctrl_dev *get_pinctrl_dev_from_of_node(struct device_node *np);
 int pin_get_from_name(struct pinctrl_dev *pctldev, const char *name);
-const char *pin_get_name(struct pinctrl_dev *pctldev, const unsigned pin);
+const char *pin_get_name(struct pinctrl_dev *pctldev, const unsigned int pin);
 int pinctrl_get_group_selector(struct pinctrl_dev *pctldev,
                               const char *pin_group);
 
index 6e0a40962f384ad05688550993c3cfa6d9980a70..df1efc2e5202591d5f9f23d95f6008853b91e046 100644 (file)
@@ -24,11 +24,11 @@ struct pinctrl_dt_map {
        struct list_head node;
        struct pinctrl_dev *pctldev;
        struct pinctrl_map *map;
-       unsigned num_maps;
+       unsigned int num_maps;
 };
 
 static void dt_free_map(struct pinctrl_dev *pctldev,
-                    struct pinctrl_map *map, unsigned num_maps)
+                       struct pinctrl_map *map, unsigned int num_maps)
 {
        int i;
 
@@ -64,7 +64,7 @@ void pinctrl_dt_free_maps(struct pinctrl *p)
 
 static int dt_remember_or_free_map(struct pinctrl *p, const char *statename,
                                   struct pinctrl_dev *pctldev,
-                                  struct pinctrl_map *map, unsigned num_maps)
+                                  struct pinctrl_map *map, unsigned int num_maps)
 {
        int i;
        struct pinctrl_dt_map *dt_map;
@@ -116,7 +116,7 @@ static int dt_to_map_one_config(struct pinctrl *p,
        const struct pinctrl_ops *ops;
        int ret;
        struct pinctrl_map *map;
-       unsigned num_maps;
+       unsigned int num_maps;
        bool allow_default = false;
 
        /* Find the pin controller containing np_config */
index 9bc16943014f035ddff80500938c17b5a94e1d86..2d3d80921c0d1b14afc934d3296efbbb8f591005 100644 (file)
@@ -42,7 +42,7 @@ static inline const struct group_desc *imx_pinctrl_find_group_by_name(
 
        for (i = 0; i < pctldev->num_groups; i++) {
                grp = pinctrl_generic_get_group(pctldev, i);
-               if (grp && !strcmp(grp->name, name))
+               if (grp && !strcmp(grp->grp.name, name))
                        break;
        }
 
@@ -79,9 +79,9 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev,
        }
 
        if (info->flags & IMX_USE_SCU) {
-               map_num += grp->num_pins;
+               map_num += grp->grp.npins;
        } else {
-               for (i = 0; i < grp->num_pins; i++) {
+               for (i = 0; i < grp->grp.npins; i++) {
                        pin = &((struct imx_pin *)(grp->data))[i];
                        if (!(pin->conf.mmio.config & IMX_NO_PAD_CTL))
                                map_num++;
@@ -109,7 +109,7 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev,
 
        /* create config map */
        new_map++;
-       for (i = j = 0; i < grp->num_pins; i++) {
+       for (i = j = 0; i < grp->grp.npins; i++) {
                pin = &((struct imx_pin *)(grp->data))[i];
 
                /*
@@ -263,10 +263,10 @@ static int imx_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
        if (!func)
                return -EINVAL;
 
-       npins = grp->num_pins;
+       npins = grp->grp.npins;
 
        dev_dbg(ipctl->dev, "enable function %s group %s\n",
-               func->name, grp->name);
+               func->name, grp->grp.name);
 
        for (i = 0; i < npins; i++) {
                /*
@@ -423,7 +423,7 @@ static void imx_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
        if (!grp)
                return;
 
-       for (i = 0; i < grp->num_pins; i++) {
+       for (i = 0; i < grp->grp.npins; i++) {
                struct imx_pin *pin = &((struct imx_pin *)(grp->data))[i];
 
                name = pin_get_name(pctldev, pin->pin);
@@ -511,6 +511,7 @@ static int imx_pinctrl_parse_groups(struct device_node *np,
 {
        const struct imx_pinctrl_soc_info *info = ipctl->info;
        struct imx_pin *pin;
+       unsigned int *pins;
        int size, pin_size;
        const __be32 *list;
        int i;
@@ -525,7 +526,7 @@ static int imx_pinctrl_parse_groups(struct device_node *np,
                pin_size = FSL_PIN_SIZE;
 
        /* Initialise group */
-       grp->name = np->name;
+       grp->grp.name = np->name;
 
        /*
         * the binding format is fsl,pins = <PIN_FUNC_ID CONFIG ...>,
@@ -553,24 +554,22 @@ static int imx_pinctrl_parse_groups(struct device_node *np,
                return -EINVAL;
        }
 
-       grp->num_pins = size / pin_size;
-       grp->data = devm_kcalloc(ipctl->dev,
-                                grp->num_pins, sizeof(struct imx_pin),
-                                GFP_KERNEL);
-       grp->pins = devm_kcalloc(ipctl->dev,
-                                grp->num_pins, sizeof(unsigned int),
-                                GFP_KERNEL);
-       if (!grp->pins || !grp->data)
+       grp->grp.npins = size / pin_size;
+       grp->data = devm_kcalloc(ipctl->dev, grp->grp.npins, sizeof(*pin), GFP_KERNEL);
+       if (!grp->data)
                return -ENOMEM;
 
-       for (i = 0; i < grp->num_pins; i++) {
+       pins = devm_kcalloc(ipctl->dev, grp->grp.npins, sizeof(*pins), GFP_KERNEL);
+       if (!pins)
+               return -ENOMEM;
+       grp->grp.pins = pins;
+
+       for (i = 0; i < grp->grp.npins; i++) {
                pin = &((struct imx_pin *)(grp->data))[i];
                if (info->flags & IMX_USE_SCU)
-                       info->imx_pinctrl_parse_pin(ipctl, &grp->pins[i],
-                                                 pin, &list);
+                       info->imx_pinctrl_parse_pin(ipctl, &pins[i], pin, &list);
                else
-                       imx_pinctrl_parse_pin_mmio(ipctl, &grp->pins[i],
-                                                  pin, &list, np);
+                       imx_pinctrl_parse_pin_mmio(ipctl, &pins[i], pin, &list, np);
        }
 
        return 0;
@@ -612,8 +611,7 @@ static int imx_pinctrl_parse_functions(struct device_node *np,
 
        i = 0;
        for_each_child_of_node(np, child) {
-               grp = devm_kzalloc(ipctl->dev, sizeof(struct group_desc),
-                                  GFP_KERNEL);
+               grp = devm_kzalloc(ipctl->dev, sizeof(*grp), GFP_KERNEL);
                if (!grp) {
                        of_node_put(child);
                        return -ENOMEM;
index d66f4f6932d8d2d468223f29a4404f72ad903e14..2101d30bd66c15908fc74f929529cdbf3fc8adef 100644 (file)
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 # Intel pin control drivers
 menu "Intel pinctrl drivers"
-       depends on ACPI && (X86 || COMPILE_TEST)
+       depends on (ACPI && X86) || COMPILE_TEST
 
 config PINCTRL_BAYTRAIL
        bool "Intel Baytrail GPIO pin control"
@@ -37,6 +37,16 @@ config PINCTRL_INTEL
        select GPIOLIB
        select GPIOLIB_IRQCHIP
 
+config PINCTRL_INTEL_PLATFORM
+       tristate "Intel pinctrl and GPIO platform driver"
+       depends on ACPI
+       select PINCTRL_INTEL
+       help
+         This pinctrl driver provides an interface that allows configuring
+         of Intel PCH pins and using them as GPIOs. Currently the following
+         Intel SoCs / platforms require this to be functional:
+         - Lunar Lake
+
 config PINCTRL_ALDERLAKE
        tristate "Intel Alder Lake pinctrl and GPIO driver"
        select PINCTRL_INTEL
@@ -128,6 +138,15 @@ config PINCTRL_METEORLAKE
          This pinctrl driver provides an interface that allows configuring
          of Intel Meteor Lake pins and using them as GPIOs.
 
+config PINCTRL_METEORPOINT
+       tristate "Intel Meteor Point pinctrl and GPIO driver"
+       depends on ACPI
+       select PINCTRL_INTEL
+       help
+         Meteor Point is the PCH of Intel Meteor Lake. This pinctrl driver
+         provides an interface that allows configuring of PCH pins and
+         using them as GPIOs.
+
 config PINCTRL_SUNRISEPOINT
        tristate "Intel Sunrisepoint pinctrl and GPIO driver"
        select PINCTRL_INTEL
index f6d30f2d973a01028890fa1bc3963f854a2f07ab..d0d868c9a6229c9a67c94311c3db4427a7c3eba9 100644 (file)
@@ -8,6 +8,7 @@ obj-$(CONFIG_PINCTRL_TANGIER)           += pinctrl-tangier.o
 obj-$(CONFIG_PINCTRL_MERRIFIELD)       += pinctrl-merrifield.o
 obj-$(CONFIG_PINCTRL_MOOREFIELD)       += pinctrl-moorefield.o
 obj-$(CONFIG_PINCTRL_INTEL)            += pinctrl-intel.o
+obj-$(CONFIG_PINCTRL_INTEL_PLATFORM)   += pinctrl-intel-platform.o
 obj-$(CONFIG_PINCTRL_ALDERLAKE)                += pinctrl-alderlake.o
 obj-$(CONFIG_PINCTRL_BROXTON)          += pinctrl-broxton.o
 obj-$(CONFIG_PINCTRL_CANNONLAKE)       += pinctrl-cannonlake.o
@@ -21,5 +22,6 @@ obj-$(CONFIG_PINCTRL_JASPERLAKE)      += pinctrl-jasperlake.o
 obj-$(CONFIG_PINCTRL_LAKEFIELD)                += pinctrl-lakefield.o
 obj-$(CONFIG_PINCTRL_LEWISBURG)                += pinctrl-lewisburg.o
 obj-$(CONFIG_PINCTRL_METEORLAKE)       += pinctrl-meteorlake.o
+obj-$(CONFIG_PINCTRL_METEORPOINT)      += pinctrl-meteorpoint.o
 obj-$(CONFIG_PINCTRL_SUNRISEPOINT)     += pinctrl-sunrisepoint.o
 obj-$(CONFIG_PINCTRL_TIGERLAKE)                += pinctrl-tigerlake.o
index 4a37dc273d63518a2292e211fd68dfceea15d412..7d9948e5f422f6bd029861af6b209e2084e04abc 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/mod_devicetable.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/pm.h>
 
 #include <linux/pinctrl/pinctrl.h>
 
@@ -733,14 +734,12 @@ static const struct acpi_device_id adl_pinctrl_acpi_match[] = {
 };
 MODULE_DEVICE_TABLE(acpi, adl_pinctrl_acpi_match);
 
-static INTEL_PINCTRL_PM_OPS(adl_pinctrl_pm_ops);
-
 static struct platform_driver adl_pinctrl_driver = {
        .probe = intel_pinctrl_probe_by_hid,
        .driver = {
                .name = "alderlake-pinctrl",
                .acpi_match_table = adl_pinctrl_acpi_match,
-               .pm = &adl_pinctrl_pm_ops,
+               .pm = pm_sleep_ptr(&intel_pinctrl_pm_ops),
        },
 };
 module_platform_driver(adl_pinctrl_driver);
index 3c8c02043481683474aa90597a071cac4cba1dcd..ac97724c59bae9705e89c93246ec3f09580d663d 100644 (file)
@@ -588,10 +588,9 @@ static void byt_set_group_simple_mux(struct intel_pinctrl *vg,
                                     const struct intel_pingroup group,
                                     unsigned int func)
 {
-       unsigned long flags;
        int i;
 
-       raw_spin_lock_irqsave(&byt_lock, flags);
+       guard(raw_spinlock_irqsave)(&byt_lock);
 
        for (i = 0; i < group.grp.npins; i++) {
                void __iomem *padcfg0;
@@ -609,18 +608,15 @@ static void byt_set_group_simple_mux(struct intel_pinctrl *vg,
                value |= func;
                writel(value, padcfg0);
        }
-
-       raw_spin_unlock_irqrestore(&byt_lock, flags);
 }
 
 static void byt_set_group_mixed_mux(struct intel_pinctrl *vg,
                                    const struct intel_pingroup group,
                                    const unsigned int *func)
 {
-       unsigned long flags;
        int i;
 
-       raw_spin_lock_irqsave(&byt_lock, flags);
+       guard(raw_spinlock_irqsave)(&byt_lock);
 
        for (i = 0; i < group.grp.npins; i++) {
                void __iomem *padcfg0;
@@ -638,8 +634,6 @@ static void byt_set_group_mixed_mux(struct intel_pinctrl *vg,
                value |= func[i];
                writel(value, padcfg0);
        }
-
-       raw_spin_unlock_irqrestore(&byt_lock, flags);
 }
 
 static int byt_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
@@ -677,10 +671,10 @@ static u32 byt_get_gpio_mux(struct intel_pinctrl *vg, unsigned int offset)
 static void byt_gpio_clear_triggering(struct intel_pinctrl *vg, unsigned int offset)
 {
        void __iomem *reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
-       unsigned long flags;
        u32 value;
 
-       raw_spin_lock_irqsave(&byt_lock, flags);
+       guard(raw_spinlock_irqsave)(&byt_lock);
+
        value = readl(reg);
 
        /* Do not clear direct-irq enabled IRQs (from gpio_disable_free) */
@@ -688,7 +682,6 @@ static void byt_gpio_clear_triggering(struct intel_pinctrl *vg, unsigned int off
                value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL);
 
        writel(value, reg);
-       raw_spin_unlock_irqrestore(&byt_lock, flags);
 }
 
 static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev,
@@ -698,9 +691,8 @@ static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev,
        struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctl_dev);
        void __iomem *reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
        u32 value, gpio_mux;
-       unsigned long flags;
 
-       raw_spin_lock_irqsave(&byt_lock, flags);
+       guard(raw_spinlock_irqsave)(&byt_lock);
 
        /*
         * In most cases, func pin mux 000 means GPIO function.
@@ -713,15 +705,14 @@ static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev,
         */
        value = readl(reg) & BYT_PIN_MUX;
        gpio_mux = byt_get_gpio_mux(vg, offset);
-       if (gpio_mux != value) {
-               value = readl(reg) & ~BYT_PIN_MUX;
-               value |= gpio_mux;
-               writel(value, reg);
+       if (gpio_mux == value)
+               return 0;
 
-               dev_warn(vg->dev, FW_BUG "Pin %i: forcibly re-configured as GPIO\n", offset);
-       }
+       value = readl(reg) & ~BYT_PIN_MUX;
+       value |= gpio_mux;
+       writel(value, reg);
 
-       raw_spin_unlock_irqrestore(&byt_lock, flags);
+       dev_warn(vg->dev, FW_BUG "Pin %i: forcibly re-configured as GPIO\n", offset);
 
        return 0;
 }
@@ -759,10 +750,9 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
 {
        struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctl_dev);
        void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
-       unsigned long flags;
        u32 value;
 
-       raw_spin_lock_irqsave(&byt_lock, flags);
+       guard(raw_spinlock_irqsave)(&byt_lock);
 
        value = readl(val_reg);
        value &= ~BYT_DIR_MASK;
@@ -773,8 +763,6 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
 
        writel(value, val_reg);
 
-       raw_spin_unlock_irqrestore(&byt_lock, flags);
-
        return 0;
 }
 
@@ -811,6 +799,7 @@ static int byt_set_pull_strength(u32 *reg, u16 strength)
        *reg &= ~BYT_PULL_STR_MASK;
 
        switch (strength) {
+       case 1: /* Set default strength value in case none is given */
        case 2000:
                *reg |= BYT_PULL_STR_2K;
                break;
@@ -830,6 +819,24 @@ static int byt_set_pull_strength(u32 *reg, u16 strength)
        return 0;
 }
 
+static void byt_gpio_force_input_mode(struct intel_pinctrl *vg, unsigned int offset)
+{
+       void __iomem *reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
+       u32 value;
+
+       value = readl(reg);
+       if (!(value & BYT_INPUT_EN))
+               return;
+
+       /*
+        * Pull assignment is only applicable in input mode. If
+        * chip is not in input mode, set it and warn about it.
+        */
+       value &= ~BYT_INPUT_EN;
+       writel(value, reg);
+       dev_warn(vg->dev, "Pin %i: forcibly set to input mode\n", offset);
+}
+
 static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
                              unsigned long *config)
 {
@@ -838,15 +845,15 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
        void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
        void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
        void __iomem *db_reg = byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG);
-       unsigned long flags;
        u32 conf, pull, val, debounce;
        u16 arg = 0;
 
-       raw_spin_lock_irqsave(&byt_lock, flags);
-       conf = readl(conf_reg);
+       scoped_guard(raw_spinlock_irqsave, &byt_lock) {
+               conf = readl(conf_reg);
+               val = readl(val_reg);
+       }
+
        pull = conf & BYT_PULL_ASSIGN_MASK;
-       val = readl(val_reg);
-       raw_spin_unlock_irqrestore(&byt_lock, flags);
 
        switch (param) {
        case PIN_CONFIG_BIAS_DISABLE:
@@ -873,9 +880,8 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
                if (!(conf & BYT_DEBOUNCE_EN))
                        return -EINVAL;
 
-               raw_spin_lock_irqsave(&byt_lock, flags);
-               debounce = readl(db_reg);
-               raw_spin_unlock_irqrestore(&byt_lock, flags);
+               scoped_guard(raw_spinlock_irqsave, &byt_lock)
+                       debounce = readl(db_reg);
 
                switch (debounce & BYT_DEBOUNCE_PULSE_MASK) {
                case BYT_DEBOUNCE_PULSE_375US:
@@ -919,18 +925,16 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
                              unsigned int num_configs)
 {
        struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctl_dev);
-       unsigned int param, arg;
        void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
-       void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
        void __iomem *db_reg = byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG);
-       u32 conf, val, db_pulse, debounce;
-       unsigned long flags;
-       int i, ret = 0;
+       u32 conf, db_pulse, debounce;
+       enum pin_config_param param;
+       int i, ret;
+       u32 arg;
 
-       raw_spin_lock_irqsave(&byt_lock, flags);
+       guard(raw_spinlock_irqsave)(&byt_lock);
 
        conf = readl(conf_reg);
-       val = readl(val_reg);
 
        for (i = 0; i < num_configs; i++) {
                param = pinconf_to_config_param(configs[i]);
@@ -941,59 +945,30 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
                        conf &= ~BYT_PULL_ASSIGN_MASK;
                        break;
                case PIN_CONFIG_BIAS_PULL_DOWN:
-                       /* Set default strength value in case none is given */
-                       if (arg == 1)
-                               arg = 2000;
-
-                       /*
-                        * Pull assignment is only applicable in input mode. If
-                        * chip is not in input mode, set it and warn about it.
-                        */
-                       if (val & BYT_INPUT_EN) {
-                               val &= ~BYT_INPUT_EN;
-                               writel(val, val_reg);
-                               dev_warn(vg->dev, "Pin %i: forcibly set to input mode\n", offset);
-                       }
+                       byt_gpio_force_input_mode(vg, offset);
 
                        conf &= ~BYT_PULL_ASSIGN_MASK;
                        conf |= BYT_PULL_ASSIGN_DOWN;
                        ret = byt_set_pull_strength(&conf, arg);
+                       if (ret)
+                               return ret;
 
                        break;
                case PIN_CONFIG_BIAS_PULL_UP:
-                       /* Set default strength value in case none is given */
-                       if (arg == 1)
-                               arg = 2000;
-
-                       /*
-                        * Pull assignment is only applicable in input mode. If
-                        * chip is not in input mode, set it and warn about it.
-                        */
-                       if (val & BYT_INPUT_EN) {
-                               val &= ~BYT_INPUT_EN;
-                               writel(val, val_reg);
-                               dev_warn(vg->dev, "Pin %i: forcibly set to input mode\n", offset);
-                       }
+                       byt_gpio_force_input_mode(vg, offset);
 
                        conf &= ~BYT_PULL_ASSIGN_MASK;
                        conf |= BYT_PULL_ASSIGN_UP;
                        ret = byt_set_pull_strength(&conf, arg);
+                       if (ret)
+                               return ret;
 
                        break;
                case PIN_CONFIG_INPUT_DEBOUNCE:
-                       if (arg) {
-                               conf |= BYT_DEBOUNCE_EN;
-                       } else {
-                               conf &= ~BYT_DEBOUNCE_EN;
-
-                               /*
-                                * No need to update the pulse value.
-                                * Debounce is going to be disabled.
-                                */
-                               break;
-                       }
-
                        switch (arg) {
+                       case 0:
+                               db_pulse = 0;
+                               break;
                        case 375:
                                db_pulse = BYT_DEBOUNCE_PULSE_375US;
                                break;
@@ -1016,33 +991,28 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
                                db_pulse = BYT_DEBOUNCE_PULSE_24MS;
                                break;
                        default:
-                               if (arg)
-                                       ret = -EINVAL;
-                               break;
+                               return -EINVAL;
                        }
 
-                       if (ret)
-                               break;
+                       if (db_pulse) {
+                               debounce = readl(db_reg);
+                               debounce = (debounce & ~BYT_DEBOUNCE_PULSE_MASK) | db_pulse;
+                               writel(debounce, db_reg);
 
-                       debounce = readl(db_reg);
-                       debounce = (debounce & ~BYT_DEBOUNCE_PULSE_MASK) | db_pulse;
-                       writel(debounce, db_reg);
+                               conf |= BYT_DEBOUNCE_EN;
+                       } else {
+                               conf &= ~BYT_DEBOUNCE_EN;
+                       }
 
                        break;
                default:
-                       ret = -ENOTSUPP;
+                       return -ENOTSUPP;
                }
-
-               if (ret)
-                       break;
        }
 
-       if (!ret)
-               writel(conf, conf_reg);
-
-       raw_spin_unlock_irqrestore(&byt_lock, flags);
+       writel(conf, conf_reg);
 
-       return ret;
+       return 0;
 }
 
 static const struct pinconf_ops byt_pinconf_ops = {
@@ -1062,12 +1032,10 @@ static int byt_gpio_get(struct gpio_chip *chip, unsigned int offset)
 {
        struct intel_pinctrl *vg = gpiochip_get_data(chip);
        void __iomem *reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
-       unsigned long flags;
        u32 val;
 
-       raw_spin_lock_irqsave(&byt_lock, flags);
-       val = readl(reg);
-       raw_spin_unlock_irqrestore(&byt_lock, flags);
+       scoped_guard(raw_spinlock_irqsave, &byt_lock)
+               val = readl(reg);
 
        return !!(val & BYT_LEVEL);
 }
@@ -1075,35 +1043,34 @@ static int byt_gpio_get(struct gpio_chip *chip, unsigned int offset)
 static void byt_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
 {
        struct intel_pinctrl *vg = gpiochip_get_data(chip);
-       void __iomem *reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
-       unsigned long flags;
+       void __iomem *reg;
        u32 old_val;
 
+       reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
        if (!reg)
                return;
 
-       raw_spin_lock_irqsave(&byt_lock, flags);
+       guard(raw_spinlock_irqsave)(&byt_lock);
+
        old_val = readl(reg);
        if (value)
                writel(old_val | BYT_LEVEL, reg);
        else
                writel(old_val & ~BYT_LEVEL, reg);
-       raw_spin_unlock_irqrestore(&byt_lock, flags);
 }
 
 static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
 {
        struct intel_pinctrl *vg = gpiochip_get_data(chip);
-       void __iomem *reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
-       unsigned long flags;
+       void __iomem *reg;
        u32 value;
 
+       reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
        if (!reg)
                return -EINVAL;
 
-       raw_spin_lock_irqsave(&byt_lock, flags);
-       value = readl(reg);
-       raw_spin_unlock_irqrestore(&byt_lock, flags);
+       scoped_guard(raw_spinlock_irqsave, &byt_lock)
+               value = readl(reg);
 
        if (!(value & BYT_OUTPUT_EN))
                return GPIO_LINE_DIRECTION_OUT;
@@ -1117,17 +1084,15 @@ static int byt_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
 {
        struct intel_pinctrl *vg = gpiochip_get_data(chip);
        void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
-       unsigned long flags;
        u32 reg;
 
-       raw_spin_lock_irqsave(&byt_lock, flags);
+       guard(raw_spinlock_irqsave)(&byt_lock);
 
        reg = readl(val_reg);
        reg &= ~BYT_DIR_MASK;
        reg |= BYT_OUTPUT_EN;
        writel(reg, val_reg);
 
-       raw_spin_unlock_irqrestore(&byt_lock, flags);
        return 0;
 }
 
@@ -1142,10 +1107,9 @@ static int byt_gpio_direction_output(struct gpio_chip *chip,
 {
        struct intel_pinctrl *vg = gpiochip_get_data(chip);
        void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
-       unsigned long flags;
        u32 reg;
 
-       raw_spin_lock_irqsave(&byt_lock, flags);
+       guard(raw_spinlock_irqsave)(&byt_lock);
 
        byt_gpio_direct_irq_check(vg, offset);
 
@@ -1158,7 +1122,6 @@ static int byt_gpio_direction_output(struct gpio_chip *chip,
 
        writel(reg, val_reg);
 
-       raw_spin_unlock_irqrestore(&byt_lock, flags);
        return 0;
 }
 
@@ -1173,7 +1136,6 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
                void __iomem *conf_reg, *val_reg;
                const char *pull_str = NULL;
                const char *pull = NULL;
-               unsigned long flags;
                unsigned int pin;
 
                pin = vg->soc->pins[i].number;
@@ -1190,10 +1152,10 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
                        continue;
                }
 
-               raw_spin_lock_irqsave(&byt_lock, flags);
-               conf0 = readl(conf_reg);
-               val = readl(val_reg);
-               raw_spin_unlock_irqrestore(&byt_lock, flags);
+               scoped_guard(raw_spinlock_irqsave, &byt_lock) {
+                       conf0 = readl(conf_reg);
+                       val = readl(val_reg);
+               }
 
                comm = intel_get_community(vg, pin);
                if (!comm) {
@@ -1278,9 +1240,9 @@ static void byt_irq_ack(struct irq_data *d)
        if (!reg)
                return;
 
-       raw_spin_lock(&byt_lock);
+       guard(raw_spinlock)(&byt_lock);
+
        writel(BIT(hwirq % 32), reg);
-       raw_spin_unlock(&byt_lock);
 }
 
 static void byt_irq_mask(struct irq_data *d)
@@ -1298,7 +1260,6 @@ static void byt_irq_unmask(struct irq_data *d)
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
        struct intel_pinctrl *vg = gpiochip_get_data(gc);
        irq_hw_number_t hwirq = irqd_to_hwirq(d);
-       unsigned long flags;
        void __iomem *reg;
        u32 value;
 
@@ -1308,7 +1269,8 @@ static void byt_irq_unmask(struct irq_data *d)
        if (!reg)
                return;
 
-       raw_spin_lock_irqsave(&byt_lock, flags);
+       guard(raw_spinlock_irqsave)(&byt_lock);
+
        value = readl(reg);
 
        switch (irqd_get_trigger_type(d)) {
@@ -1330,23 +1292,21 @@ static void byt_irq_unmask(struct irq_data *d)
        }
 
        writel(value, reg);
-
-       raw_spin_unlock_irqrestore(&byt_lock, flags);
 }
 
 static int byt_irq_type(struct irq_data *d, unsigned int type)
 {
        struct intel_pinctrl *vg = gpiochip_get_data(irq_data_get_irq_chip_data(d));
        irq_hw_number_t hwirq = irqd_to_hwirq(d);
-       u32 value;
-       unsigned long flags;
        void __iomem *reg;
+       u32 value;
 
        reg = byt_gpio_reg(vg, hwirq, BYT_CONF0_REG);
        if (!reg)
                return -EINVAL;
 
-       raw_spin_lock_irqsave(&byt_lock, flags);
+       guard(raw_spinlock_irqsave)(&byt_lock);
+
        value = readl(reg);
 
        WARN(value & BYT_DIRECT_IRQ_EN,
@@ -1368,8 +1328,6 @@ static int byt_irq_type(struct irq_data *d, unsigned int type)
        else if (type & IRQ_TYPE_LEVEL_MASK)
                irq_set_handler_locked(d, handle_level_irq);
 
-       raw_spin_unlock_irqrestore(&byt_lock, flags);
-
        return 0;
 }
 
@@ -1401,9 +1359,8 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
                        continue;
                }
 
-               raw_spin_lock(&byt_lock);
-               pending = readl(reg);
-               raw_spin_unlock(&byt_lock);
+               scoped_guard(raw_spinlock, &byt_lock)
+                       pending = readl(reg);
                for_each_set_bit(pin, &pending, 32)
                        generic_handle_domain_irq(vg->chip.irq.domain, base + pin);
        }
@@ -1666,10 +1623,9 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
 static int byt_gpio_suspend(struct device *dev)
 {
        struct intel_pinctrl *vg = dev_get_drvdata(dev);
-       unsigned long flags;
        int i;
 
-       raw_spin_lock_irqsave(&byt_lock, flags);
+       guard(raw_spinlock_irqsave)(&byt_lock);
 
        for (i = 0; i < vg->soc->npins; i++) {
                void __iomem *reg;
@@ -1693,17 +1649,15 @@ static int byt_gpio_suspend(struct device *dev)
                vg->context.pads[i].val = value;
        }
 
-       raw_spin_unlock_irqrestore(&byt_lock, flags);
        return 0;
 }
 
 static int byt_gpio_resume(struct device *dev)
 {
        struct intel_pinctrl *vg = dev_get_drvdata(dev);
-       unsigned long flags;
        int i;
 
-       raw_spin_lock_irqsave(&byt_lock, flags);
+       guard(raw_spinlock_irqsave)(&byt_lock);
 
        for (i = 0; i < vg->soc->npins; i++) {
                void __iomem *reg;
@@ -1743,7 +1697,6 @@ static int byt_gpio_resume(struct device *dev)
                }
        }
 
-       raw_spin_unlock_irqrestore(&byt_lock, flags);
        return 0;
 }
 
index 3118c7c8842fe1e2191627ba8c2c955d4049a3fe..d99541676630deb8b405774f2de9042035aa9f55 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/mod_devicetable.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/pm.h>
 
 #include <linux/pinctrl/pinctrl.h>
 
@@ -1000,14 +1001,12 @@ static const struct platform_device_id bxt_pinctrl_platform_ids[] = {
 };
 MODULE_DEVICE_TABLE(platform, bxt_pinctrl_platform_ids);
 
-static INTEL_PINCTRL_PM_OPS(bxt_pinctrl_pm_ops);
-
 static struct platform_driver bxt_pinctrl_driver = {
        .probe = intel_pinctrl_probe_by_uid,
        .driver = {
                .name = "broxton-pinctrl",
                .acpi_match_table = bxt_pinctrl_acpi_match,
-               .pm = &bxt_pinctrl_pm_ops,
+               .pm = pm_sleep_ptr(&intel_pinctrl_pm_ops),
        },
        .id_table = bxt_pinctrl_platform_ids,
 };
index 95976abfb7850f95e2f3e7d05926182a38c1c3ee..1aa09f950be1ebacef7c574f410c9e40ff0b3b76 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/mod_devicetable.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/pm.h>
 
 #include <linux/pinctrl/pinctrl.h>
 
@@ -824,14 +825,12 @@ static const struct acpi_device_id cnl_pinctrl_acpi_match[] = {
 };
 MODULE_DEVICE_TABLE(acpi, cnl_pinctrl_acpi_match);
 
-static INTEL_PINCTRL_PM_OPS(cnl_pinctrl_pm_ops);
-
 static struct platform_driver cnl_pinctrl_driver = {
        .probe = intel_pinctrl_probe_by_hid,
        .driver = {
                .name = "cannonlake-pinctrl",
                .acpi_match_table = cnl_pinctrl_acpi_match,
-               .pm = &cnl_pinctrl_pm_ops,
+               .pm = pm_sleep_ptr(&intel_pinctrl_pm_ops),
        },
 };
 module_platform_driver(cnl_pinctrl_driver);
index a20465eb2dc68db905646ad8f0cfb5dac275ded4..48af8930dd1f951241d3a32a9cd820076c8b6165 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/mod_devicetable.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/pm.h>
 
 #include <linux/pinctrl/pinctrl.h>
 
@@ -319,8 +320,6 @@ static const struct intel_pinctrl_soc_data cdf_soc_data = {
        .ncommunities = ARRAY_SIZE(cdf_communities),
 };
 
-static INTEL_PINCTRL_PM_OPS(cdf_pinctrl_pm_ops);
-
 static const struct acpi_device_id cdf_pinctrl_acpi_match[] = {
        { "INTC3001", (kernel_ulong_t)&cdf_soc_data },
        { }
@@ -332,7 +331,7 @@ static struct platform_driver cdf_pinctrl_driver = {
        .driver = {
                .name = "cedarfork-pinctrl",
                .acpi_match_table = cdf_pinctrl_acpi_match,
-               .pm = &cdf_pinctrl_pm_ops,
+               .pm = pm_sleep_ptr(&intel_pinctrl_pm_ops),
        },
 };
 
index 562a4f9188e4bdaae43b8c481a68b79b1a689d82..666507f54f2761a6d08e24d6f4fedd6e9640a832 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/mod_devicetable.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/pm.h>
 
 #include <linux/pinctrl/pinctrl.h>
 
@@ -249,8 +250,6 @@ static const struct intel_pinctrl_soc_data dnv_soc_data = {
        .ncommunities = ARRAY_SIZE(dnv_communities),
 };
 
-static INTEL_PINCTRL_PM_OPS(dnv_pinctrl_pm_ops);
-
 static const struct acpi_device_id dnv_pinctrl_acpi_match[] = {
        { "INTC3000", (kernel_ulong_t)&dnv_soc_data },
        { }
@@ -268,7 +267,7 @@ static struct platform_driver dnv_pinctrl_driver = {
        .driver = {
                .name = "denverton-pinctrl",
                .acpi_match_table = dnv_pinctrl_acpi_match,
-               .pm = &dnv_pinctrl_pm_ops,
+               .pm = pm_sleep_ptr(&intel_pinctrl_pm_ops),
        },
        .id_table = dnv_pinctrl_platform_ids,
 };
index 81581ab8531675cef74dd5e956caa8e3897ff42a..1678634ebc06c6d3e25f34a52d04df5945ff7270 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/mod_devicetable.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/pm.h>
 
 #include <linux/pinctrl/pinctrl.h>
 
@@ -485,14 +486,12 @@ static const struct acpi_device_id ehl_pinctrl_acpi_match[] = {
 };
 MODULE_DEVICE_TABLE(acpi, ehl_pinctrl_acpi_match);
 
-static INTEL_PINCTRL_PM_OPS(ehl_pinctrl_pm_ops);
-
 static struct platform_driver ehl_pinctrl_driver = {
        .probe = intel_pinctrl_probe_by_uid,
        .driver = {
                .name = "elkhartlake-pinctrl",
                .acpi_match_table = ehl_pinctrl_acpi_match,
-               .pm = &ehl_pinctrl_pm_ops,
+               .pm = pm_sleep_ptr(&intel_pinctrl_pm_ops),
        },
 };
 module_platform_driver(ehl_pinctrl_driver);
index 099ec8351d5fad192dd596539cd4a38eb8a3bd19..e4798d32492c5888e4052553e73f28a5b422690c 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/mod_devicetable.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/pm.h>
 
 #include <linux/pinctrl/pinctrl.h>
 
@@ -358,14 +359,12 @@ static const struct acpi_device_id ebg_pinctrl_acpi_match[] = {
 };
 MODULE_DEVICE_TABLE(acpi, ebg_pinctrl_acpi_match);
 
-static INTEL_PINCTRL_PM_OPS(ebg_pinctrl_pm_ops);
-
 static struct platform_driver ebg_pinctrl_driver = {
        .probe = intel_pinctrl_probe_by_hid,
        .driver = {
                .name = "emmitsburg-pinctrl",
                .acpi_match_table = ebg_pinctrl_acpi_match,
-               .pm = &ebg_pinctrl_pm_ops,
+               .pm = pm_sleep_ptr(&intel_pinctrl_pm_ops),
        },
 };
 module_platform_driver(ebg_pinctrl_driver);
index 9effa06b61e91c6406204d3318a0b74c6b5012a8..6dcf0ac2059f5da3ef6c898093c7c05e2c8f6e7c 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/mod_devicetable.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/pm.h>
 
 #include <linux/pinctrl/pinctrl.h>
 
@@ -447,14 +448,12 @@ static const struct acpi_device_id glk_pinctrl_acpi_match[] = {
 };
 MODULE_DEVICE_TABLE(acpi, glk_pinctrl_acpi_match);
 
-static INTEL_PINCTRL_PM_OPS(glk_pinctrl_pm_ops);
-
 static struct platform_driver glk_pinctrl_driver = {
        .probe = intel_pinctrl_probe_by_uid,
        .driver = {
                .name = "geminilake-pinctrl",
                .acpi_match_table = glk_pinctrl_acpi_match,
-               .pm = &glk_pinctrl_pm_ops,
+               .pm = pm_sleep_ptr(&intel_pinctrl_pm_ops),
        },
 };
 
index 300e1538c8d0551d21e6215ba7c931ad82dd48a8..fe3042de891a61d41b17dce76b471c37887150a5 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/acpi.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/pm.h>
 
 #include <linux/pinctrl/pinctrl.h>
 
@@ -668,8 +669,6 @@ static const struct intel_pinctrl_soc_data icln_soc_data = {
        .ncommunities = ARRAY_SIZE(icln_communities),
 };
 
-static INTEL_PINCTRL_PM_OPS(icl_pinctrl_pm_ops);
-
 static const struct acpi_device_id icl_pinctrl_acpi_match[] = {
        { "INT3455", (kernel_ulong_t)&icllp_soc_data },
        { "INT34C3", (kernel_ulong_t)&icln_soc_data },
@@ -682,7 +681,7 @@ static struct platform_driver icl_pinctrl_driver = {
        .driver = {
                .name = "icelake-pinctrl",
                .acpi_match_table = icl_pinctrl_acpi_match,
-               .pm = &icl_pinctrl_pm_ops,
+               .pm = pm_sleep_ptr(&intel_pinctrl_pm_ops),
        },
 };
 module_platform_driver(icl_pinctrl_driver);
diff --git a/drivers/pinctrl/intel/pinctrl-intel-platform.c b/drivers/pinctrl/intel/pinctrl-intel-platform.c
new file mode 100644 (file)
index 0000000..4a19ab3
--- /dev/null
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel PCH pinctrl/GPIO driver
+ *
+ * Copyright (C) 2021-2023, Intel Corporation
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ */
+
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/property.h>
+#include <linux/string_helpers.h>
+
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-intel.h"
+
+struct intel_platform_pins {
+       struct pinctrl_pin_desc *pins;
+       size_t npins;
+};
+
+static int intel_platform_pinctrl_prepare_pins(struct device *dev, size_t base,
+                                              const char *name, u32 size,
+                                              struct intel_platform_pins *pins)
+{
+       struct pinctrl_pin_desc *descs;
+       char **pin_names;
+       unsigned int i;
+
+       pin_names = devm_kasprintf_strarray(dev, name, size);
+       if (IS_ERR(pin_names))
+               return PTR_ERR(pin_names);
+
+       descs = devm_krealloc_array(dev, pins->pins, base + size, sizeof(*descs), GFP_KERNEL);
+       if (!descs)
+               return -ENOMEM;
+
+       for (i = 0; i < size; i++) {
+               unsigned int pin_number = base + i;
+               char *pin_name = pin_names[i];
+               struct pinctrl_pin_desc *desc;
+
+               /* Unify delimiter for pin name */
+               strreplace(pin_name, '-', '_');
+
+               desc = &descs[pin_number];
+               desc->number = pin_number;
+               desc->name = pin_name;
+       }
+
+       pins->pins = descs;
+       pins->npins = base + size;
+
+       return 0;
+}
+
+static int intel_platform_pinctrl_prepare_group(struct device *dev,
+                                               struct fwnode_handle *child,
+                                               struct intel_padgroup *gpp,
+                                               struct intel_platform_pins *pins)
+{
+       size_t base = pins->npins;
+       const char *name;
+       u32 size;
+       int ret;
+
+       ret = fwnode_property_read_string(child, "intc-gpio-group-name", &name);
+       if (ret)
+               return ret;
+
+       ret = fwnode_property_read_u32(child, "intc-gpio-pad-count", &size);
+       if (ret)
+               return ret;
+
+       ret = intel_platform_pinctrl_prepare_pins(dev, base, name, size, pins);
+       if (ret)
+               return ret;
+
+       gpp->base = base;
+       gpp->size = size;
+       gpp->gpio_base = INTEL_GPIO_BASE_MATCH;
+
+       return 0;
+}
+
+static int intel_platform_pinctrl_prepare_community(struct device *dev,
+                                                   struct intel_community *community,
+                                                   struct intel_platform_pins *pins)
+{
+       struct fwnode_handle *child;
+       struct intel_padgroup *gpps;
+       unsigned int group;
+       size_t ngpps;
+       u32 offset;
+       int ret;
+
+       ret = device_property_read_u32(dev, "intc-gpio-pad-ownership-offset", &offset);
+       if (ret)
+               return ret;
+       community->padown_offset = offset;
+
+       ret = device_property_read_u32(dev, "intc-gpio-pad-configuration-lock-offset", &offset);
+       if (ret)
+               return ret;
+       community->padcfglock_offset = offset;
+
+       ret = device_property_read_u32(dev, "intc-gpio-host-software-pad-ownership-offset", &offset);
+       if (ret)
+               return ret;
+       community->hostown_offset = offset;
+
+       ret = device_property_read_u32(dev, "intc-gpio-gpi-interrupt-status-offset", &offset);
+       if (ret)
+               return ret;
+       community->is_offset = offset;
+
+       ret = device_property_read_u32(dev, "intc-gpio-gpi-interrupt-enable-offset", &offset);
+       if (ret)
+               return ret;
+       community->ie_offset = offset;
+
+       ngpps = device_get_child_node_count(dev);
+       if (!ngpps)
+               return -ENODEV;
+
+       gpps = devm_kcalloc(dev, ngpps, sizeof(*gpps), GFP_KERNEL);
+       if (!gpps)
+               return -ENOMEM;
+
+       group = 0;
+       device_for_each_child_node(dev, child) {
+               struct intel_padgroup *gpp = &gpps[group];
+
+               gpp->reg_num = group;
+
+               ret = intel_platform_pinctrl_prepare_group(dev, child, gpp, pins);
+               if (ret)
+                       return ret;
+
+               group++;
+       }
+
+       community->ngpps = ngpps;
+       community->gpps = gpps;
+
+       return 0;
+}
+
+static int intel_platform_pinctrl_prepare_soc_data(struct device *dev,
+                                                  struct intel_pinctrl_soc_data *data)
+{
+       struct intel_platform_pins pins = {};
+       struct intel_community *communities;
+       size_t ncommunities;
+       unsigned int i;
+       int ret;
+
+       /* Version 1.0 of the specification assumes only a single community per device node */
+       ncommunities = 1,
+       communities = devm_kcalloc(dev, ncommunities, sizeof(*communities), GFP_KERNEL);
+       if (!communities)
+               return -ENOMEM;
+
+       for (i = 0; i < ncommunities; i++) {
+               struct intel_community *community = &communities[i];
+
+               community->barno = i;
+               community->pin_base = pins.npins;
+
+               ret = intel_platform_pinctrl_prepare_community(dev, community, &pins);
+               if (ret)
+                       return ret;
+
+               community->npins = pins.npins - community->pin_base;
+       }
+
+       data->ncommunities = ncommunities;
+       data->communities = communities;
+
+       data->npins = pins.npins;
+       data->pins = pins.pins;
+
+       return 0;
+}
+
+static int intel_platform_pinctrl_probe(struct platform_device *pdev)
+{
+       struct intel_pinctrl_soc_data *data;
+       struct device *dev = &pdev->dev;
+       int ret;
+
+       data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       ret = intel_platform_pinctrl_prepare_soc_data(dev, data);
+       if (ret)
+               return ret;
+
+       return intel_pinctrl_probe(pdev, data);
+}
+
+static const struct acpi_device_id intel_platform_pinctrl_acpi_match[] = {
+       { "INTC105F" },
+       { }
+};
+MODULE_DEVICE_TABLE(acpi, intel_platform_pinctrl_acpi_match);
+
+static struct platform_driver intel_platform_pinctrl_driver = {
+       .probe = intel_platform_pinctrl_probe,
+       .driver = {
+               .name = "intel-pinctrl",
+               .acpi_match_table = intel_platform_pinctrl_acpi_match,
+               .pm = pm_sleep_ptr(&intel_pinctrl_pm_ops),
+       },
+};
+module_platform_driver(intel_platform_pinctrl_driver);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_DESCRIPTION("Intel PCH pinctrl/GPIO driver");
+MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(PINCTRL_INTEL);
index 652ba451f885f0a40cd5a5aeea35493c4a498b24..d6f29e6faab79b9059f06b5ae7d933da4d28584c 100644 (file)
@@ -677,10 +677,6 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin,
        u32 term = 0, up = 0, value;
        void __iomem *padcfg1;
 
-       /* Set default strength value in case none is given */
-       if (arg == 1)
-               arg = 5000;
-
        switch (param) {
        case PIN_CONFIG_BIAS_DISABLE:
                break;
@@ -690,6 +686,7 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin,
                case 20000:
                        term = PADCFG1_TERM_20K;
                        break;
+               case 1: /* Set default strength value in case none is given */
                case 5000:
                        term = PADCFG1_TERM_5K;
                        break;
@@ -716,6 +713,7 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin,
                case 20000:
                        term = PADCFG1_TERM_20K;
                        break;
+               case 1: /* Set default strength value in case none is given */
                case 5000:
                        term = PADCFG1_TERM_5K;
                        break;
@@ -899,7 +897,7 @@ static int intel_gpio_to_pin(struct intel_pinctrl *pctrl, unsigned int offset,
  *
  * Return: a GPIO offset, or negative error code if translation can't be done.
  */
-static __maybe_unused int intel_pin_to_gpio(struct intel_pinctrl *pctrl, int pin)
+static int intel_pin_to_gpio(struct intel_pinctrl *pctrl, int pin)
 {
        const struct intel_community *community;
        const struct intel_padgroup *padgrp;
@@ -1506,8 +1504,8 @@ static int intel_pinctrl_probe_pwm(struct intel_pinctrl *pctrl,
        return PTR_ERR_OR_ZERO(pwm);
 }
 
-static int intel_pinctrl_probe(struct platform_device *pdev,
-                              const struct intel_pinctrl_soc_data *soc_data)
+int intel_pinctrl_probe(struct platform_device *pdev,
+                       const struct intel_pinctrl_soc_data *soc_data)
 {
        struct device *dev = &pdev->dev;
        struct intel_pinctrl *pctrl;
@@ -1625,6 +1623,7 @@ static int intel_pinctrl_probe(struct platform_device *pdev,
 
        return 0;
 }
+EXPORT_SYMBOL_NS_GPL(intel_pinctrl_probe, PINCTRL_INTEL);
 
 int intel_pinctrl_probe_by_hid(struct platform_device *pdev)
 {
@@ -1653,7 +1652,7 @@ EXPORT_SYMBOL_NS_GPL(intel_pinctrl_probe_by_uid, PINCTRL_INTEL);
 const struct intel_pinctrl_soc_data *intel_pinctrl_get_soc_data(struct platform_device *pdev)
 {
        const struct intel_pinctrl_soc_data * const *table;
-       const struct intel_pinctrl_soc_data *data = NULL;
+       const struct intel_pinctrl_soc_data *data;
        struct device *dev = &pdev->dev;
 
        table = device_get_match_data(dev);
@@ -1662,11 +1661,10 @@ const struct intel_pinctrl_soc_data *intel_pinctrl_get_soc_data(struct platform_
                unsigned int i;
 
                for (i = 0; table[i]; i++) {
-                       if (acpi_dev_uid_match(adev, table[i]->uid)) {
-                               data = table[i];
+                       if (acpi_dev_uid_match(adev, table[i]->uid))
                                break;
-                       }
                }
+               data = table[i];
        } else {
                const struct platform_device_id *id;
 
@@ -1682,7 +1680,6 @@ const struct intel_pinctrl_soc_data *intel_pinctrl_get_soc_data(struct platform_
 }
 EXPORT_SYMBOL_NS_GPL(intel_pinctrl_get_soc_data, PINCTRL_INTEL);
 
-#ifdef CONFIG_PM_SLEEP
 static bool __intel_gpio_is_direct_irq(u32 value)
 {
        return (value & PADCFG0_GPIROUTIOXAPIC) && (value & PADCFG0_GPIOTXDIS) &&
@@ -1728,7 +1725,7 @@ static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned int
        return false;
 }
 
-int intel_pinctrl_suspend_noirq(struct device *dev)
+static int intel_pinctrl_suspend_noirq(struct device *dev)
 {
        struct intel_pinctrl *pctrl = dev_get_drvdata(dev);
        struct intel_community_context *communities;
@@ -1771,7 +1768,6 @@ int intel_pinctrl_suspend_noirq(struct device *dev)
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(intel_pinctrl_suspend_noirq);
 
 static bool intel_gpio_update_reg(void __iomem *reg, u32 mask, u32 value)
 {
@@ -1838,7 +1834,7 @@ static void intel_restore_padcfg(struct intel_pinctrl *pctrl, unsigned int pin,
        dev_dbg(dev, "restored pin %u padcfg%u %#08x\n", pin, n, readl(padcfg));
 }
 
-int intel_pinctrl_resume_noirq(struct device *dev)
+static int intel_pinctrl_resume_noirq(struct device *dev)
 {
        struct intel_pinctrl *pctrl = dev_get_drvdata(dev);
        const struct intel_community_context *communities;
@@ -1882,8 +1878,10 @@ int intel_pinctrl_resume_noirq(struct device *dev)
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(intel_pinctrl_resume_noirq);
-#endif
+
+EXPORT_NS_GPL_DEV_SLEEP_PM_OPS(intel_pinctrl_pm_ops, PINCTRL_INTEL) = {
+       NOIRQ_SYSTEM_SLEEP_PM_OPS(intel_pinctrl_suspend_noirq, intel_pinctrl_resume_noirq)
+};
 
 MODULE_AUTHOR("Mathias Nyman <mathias.nyman@linux.intel.com>");
 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
index 2bb553598e8bd0d4dda73697234d68eb35597a55..fde65e18cd145ecb1eb809f87ecec289067e44e8 100644 (file)
@@ -252,19 +252,13 @@ struct intel_pinctrl {
        int irq;
 };
 
+int intel_pinctrl_probe(struct platform_device *pdev,
+                       const struct intel_pinctrl_soc_data *soc_data);
+
 int intel_pinctrl_probe_by_hid(struct platform_device *pdev);
 int intel_pinctrl_probe_by_uid(struct platform_device *pdev);
 
-#ifdef CONFIG_PM_SLEEP
-int intel_pinctrl_suspend_noirq(struct device *dev);
-int intel_pinctrl_resume_noirq(struct device *dev);
-#endif
-
-#define INTEL_PINCTRL_PM_OPS(_name)                                    \
-const struct dev_pm_ops _name = {                                      \
-       SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(intel_pinctrl_suspend_noirq,      \
-                                     intel_pinctrl_resume_noirq)       \
-}
+extern const struct dev_pm_ops intel_pinctrl_pm_ops;
 
 struct intel_community *intel_get_community(struct intel_pinctrl *pctrl, unsigned int pin);
 
index 50f137deed9c169b714ed35e4f5f4e00838b3541..3525480428ea66b226d896dbe51d9926738fddd5 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/mod_devicetable.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/pm.h>
 
 #include <linux/pinctrl/pinctrl.h>
 
@@ -326,14 +327,12 @@ static const struct acpi_device_id jsl_pinctrl_acpi_match[] = {
 };
 MODULE_DEVICE_TABLE(acpi, jsl_pinctrl_acpi_match);
 
-static INTEL_PINCTRL_PM_OPS(jsl_pinctrl_pm_ops);
-
 static struct platform_driver jsl_pinctrl_driver = {
        .probe = intel_pinctrl_probe_by_hid,
        .driver = {
                .name = "jasperlake-pinctrl",
                .acpi_match_table = jsl_pinctrl_acpi_match,
-               .pm = &jsl_pinctrl_pm_ops,
+               .pm = pm_sleep_ptr(&intel_pinctrl_pm_ops),
        },
 };
 module_platform_driver(jsl_pinctrl_driver);
index 0b94e11b78ac2aef544afafa5855b392f3701116..adef85db82ca7626732973ac3c768a496bd4a5f3 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/mod_devicetable.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/pm.h>
 
 #include <linux/pinctrl/pinctrl.h>
 
@@ -347,14 +348,12 @@ static const struct acpi_device_id lkf_pinctrl_acpi_match[] = {
 };
 MODULE_DEVICE_TABLE(acpi, lkf_pinctrl_acpi_match);
 
-static INTEL_PINCTRL_PM_OPS(lkf_pinctrl_pm_ops);
-
 static struct platform_driver lkf_pinctrl_driver = {
        .probe = intel_pinctrl_probe_by_hid,
        .driver = {
                .name = "lakefield-pinctrl",
                .acpi_match_table = lkf_pinctrl_acpi_match,
-               .pm = &lkf_pinctrl_pm_ops,
+               .pm = pm_sleep_ptr(&intel_pinctrl_pm_ops),
        },
 };
 module_platform_driver(lkf_pinctrl_driver);
index aa725a5d62b9c9ff2ddd26d85adc57c93b701a76..a304d30ea9ed33503f9486ca18bcf2d58493df88 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/mod_devicetable.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/pm.h>
 
 #include <linux/pinctrl/pinctrl.h>
 
@@ -301,8 +302,6 @@ static const struct intel_pinctrl_soc_data lbg_soc_data = {
        .ncommunities = ARRAY_SIZE(lbg_communities),
 };
 
-static INTEL_PINCTRL_PM_OPS(lbg_pinctrl_pm_ops);
-
 static const struct acpi_device_id lbg_pinctrl_acpi_match[] = {
        { "INT3536", (kernel_ulong_t)&lbg_soc_data },
        { }
@@ -314,7 +313,7 @@ static struct platform_driver lbg_pinctrl_driver = {
        .driver = {
                .name = "lewisburg-pinctrl",
                .acpi_match_table = lbg_pinctrl_acpi_match,
-               .pm = &lbg_pinctrl_pm_ops,
+               .pm = pm_sleep_ptr(&intel_pinctrl_pm_ops),
        },
 };
 module_platform_driver(lbg_pinctrl_driver);
index e6878e4cf20cb508d46ae51f74f4403ad3fee105..1fb0bba8b386b9114f240e2767f13888f24821ae 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/acpi.h>
 #include <linux/array_size.h>
 #include <linux/bitops.h>
+#include <linux/cleanup.h>
 #include <linux/gpio/driver.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
@@ -291,10 +292,9 @@ static int lp_pinmux_set_mux(struct pinctrl_dev *pctldev,
 {
        struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
        const struct intel_pingroup *grp = &lg->soc->groups[group];
-       unsigned long flags;
        int i;
 
-       raw_spin_lock_irqsave(&lg->lock, flags);
+       guard(raw_spinlock_irqsave)(&lg->lock);
 
        /* Now enable the mux setting for each pin in the group */
        for (i = 0; i < grp->grp.npins; i++) {
@@ -312,8 +312,6 @@ static int lp_pinmux_set_mux(struct pinctrl_dev *pctldev,
                iowrite32(value, reg);
        }
 
-       raw_spin_unlock_irqrestore(&lg->lock, flags);
-
        return 0;
 }
 
@@ -334,10 +332,9 @@ static int lp_gpio_request_enable(struct pinctrl_dev *pctldev,
        struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
        void __iomem *reg = lp_gpio_reg(&lg->chip, pin, LP_CONFIG1);
        void __iomem *conf2 = lp_gpio_reg(&lg->chip, pin, LP_CONFIG2);
-       unsigned long flags;
        u32 value;
 
-       raw_spin_lock_irqsave(&lg->lock, flags);
+       guard(raw_spinlock_irqsave)(&lg->lock);
 
        /*
         * Reconfigure pin to GPIO mode if needed and issue a warning,
@@ -352,8 +349,6 @@ static int lp_gpio_request_enable(struct pinctrl_dev *pctldev,
        /* Enable input sensing */
        lp_gpio_enable_input(conf2);
 
-       raw_spin_unlock_irqrestore(&lg->lock, flags);
-
        return 0;
 }
 
@@ -363,14 +358,11 @@ static void lp_gpio_disable_free(struct pinctrl_dev *pctldev,
 {
        struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
        void __iomem *conf2 = lp_gpio_reg(&lg->chip, pin, LP_CONFIG2);
-       unsigned long flags;
 
-       raw_spin_lock_irqsave(&lg->lock, flags);
+       guard(raw_spinlock_irqsave)(&lg->lock);
 
        /* Disable input sensing */
        lp_gpio_disable_input(conf2);
-
-       raw_spin_unlock_irqrestore(&lg->lock, flags);
 }
 
 static int lp_gpio_set_direction(struct pinctrl_dev *pctldev,
@@ -379,10 +371,9 @@ static int lp_gpio_set_direction(struct pinctrl_dev *pctldev,
 {
        struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
        void __iomem *reg = lp_gpio_reg(&lg->chip, pin, LP_CONFIG1);
-       unsigned long flags;
        u32 value;
 
-       raw_spin_lock_irqsave(&lg->lock, flags);
+       guard(raw_spinlock_irqsave)(&lg->lock);
 
        value = ioread32(reg);
        value &= ~DIR_BIT;
@@ -400,8 +391,6 @@ static int lp_gpio_set_direction(struct pinctrl_dev *pctldev,
        }
        iowrite32(value, reg);
 
-       raw_spin_unlock_irqrestore(&lg->lock, flags);
-
        return 0;
 }
 
@@ -421,13 +410,11 @@ static int lp_pin_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
        struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
        void __iomem *conf2 = lp_gpio_reg(&lg->chip, pin, LP_CONFIG2);
        enum pin_config_param param = pinconf_to_config_param(*config);
-       unsigned long flags;
        u32 value, pull;
        u16 arg;
 
-       raw_spin_lock_irqsave(&lg->lock, flags);
-       value = ioread32(conf2);
-       raw_spin_unlock_irqrestore(&lg->lock, flags);
+       scoped_guard(raw_spinlock_irqsave, &lg->lock)
+               value = ioread32(conf2);
 
        pull = value & GPIWP_MASK;
 
@@ -464,11 +451,10 @@ static int lp_pin_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
        struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
        void __iomem *conf2 = lp_gpio_reg(&lg->chip, pin, LP_CONFIG2);
        enum pin_config_param param;
-       unsigned long flags;
-       int i, ret = 0;
+       unsigned int i;
        u32 value;
 
-       raw_spin_lock_irqsave(&lg->lock, flags);
+       guard(raw_spinlock_irqsave)(&lg->lock);
 
        value = ioread32(conf2);
 
@@ -489,19 +475,13 @@ static int lp_pin_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
                        value |= GPIWP_UP;
                        break;
                default:
-                       ret = -ENOTSUPP;
+                       return -ENOTSUPP;
                }
-
-               if (ret)
-                       break;
        }
 
-       if (!ret)
-               iowrite32(value, conf2);
+       iowrite32(value, conf2);
 
-       raw_spin_unlock_irqrestore(&lg->lock, flags);
-
-       return ret;
+       return 0;
 }
 
 static const struct pinconf_ops lptlp_pinconf_ops = {
@@ -527,16 +507,13 @@ static void lp_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
 {
        struct intel_pinctrl *lg = gpiochip_get_data(chip);
        void __iomem *reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
-       unsigned long flags;
 
-       raw_spin_lock_irqsave(&lg->lock, flags);
+       guard(raw_spinlock_irqsave)(&lg->lock);
 
        if (value)
                iowrite32(ioread32(reg) | OUT_LVL_BIT, reg);
        else
                iowrite32(ioread32(reg) & ~OUT_LVL_BIT, reg);
-
-       raw_spin_unlock_irqrestore(&lg->lock, flags);
 }
 
 static int lp_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
@@ -592,11 +569,10 @@ static void lp_irq_ack(struct irq_data *d)
        struct intel_pinctrl *lg = gpiochip_get_data(gc);
        irq_hw_number_t hwirq = irqd_to_hwirq(d);
        void __iomem *reg = lp_gpio_reg(&lg->chip, hwirq, LP_INT_STAT);
-       unsigned long flags;
 
-       raw_spin_lock_irqsave(&lg->lock, flags);
+       guard(raw_spinlock_irqsave)(&lg->lock);
+
        iowrite32(BIT(hwirq % 32), reg);
-       raw_spin_unlock_irqrestore(&lg->lock, flags);
 }
 
 static void lp_irq_unmask(struct irq_data *d)
@@ -613,13 +589,11 @@ static void lp_irq_enable(struct irq_data *d)
        struct intel_pinctrl *lg = gpiochip_get_data(gc);
        irq_hw_number_t hwirq = irqd_to_hwirq(d);
        void __iomem *reg = lp_gpio_reg(&lg->chip, hwirq, LP_INT_ENABLE);
-       unsigned long flags;
 
        gpiochip_enable_irq(gc, hwirq);
 
-       raw_spin_lock_irqsave(&lg->lock, flags);
-       iowrite32(ioread32(reg) | BIT(hwirq % 32), reg);
-       raw_spin_unlock_irqrestore(&lg->lock, flags);
+       scoped_guard(raw_spinlock_irqsave, &lg->lock)
+               iowrite32(ioread32(reg) | BIT(hwirq % 32), reg);
 }
 
 static void lp_irq_disable(struct irq_data *d)
@@ -628,11 +602,9 @@ static void lp_irq_disable(struct irq_data *d)
        struct intel_pinctrl *lg = gpiochip_get_data(gc);
        irq_hw_number_t hwirq = irqd_to_hwirq(d);
        void __iomem *reg = lp_gpio_reg(&lg->chip, hwirq, LP_INT_ENABLE);
-       unsigned long flags;
 
-       raw_spin_lock_irqsave(&lg->lock, flags);
-       iowrite32(ioread32(reg) & ~BIT(hwirq % 32), reg);
-       raw_spin_unlock_irqrestore(&lg->lock, flags);
+       scoped_guard(raw_spinlock_irqsave, &lg->lock)
+               iowrite32(ioread32(reg) & ~BIT(hwirq % 32), reg);
 
        gpiochip_disable_irq(gc, hwirq);
 }
@@ -642,7 +614,6 @@ static int lp_irq_set_type(struct irq_data *d, unsigned int type)
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
        struct intel_pinctrl *lg = gpiochip_get_data(gc);
        irq_hw_number_t hwirq = irqd_to_hwirq(d);
-       unsigned long flags;
        void __iomem *reg;
        u32 value;
 
@@ -656,7 +627,8 @@ static int lp_irq_set_type(struct irq_data *d, unsigned int type)
                return -EBUSY;
        }
 
-       raw_spin_lock_irqsave(&lg->lock, flags);
+       guard(raw_spinlock_irqsave)(&lg->lock);
+
        value = ioread32(reg);
 
        /* set both TRIG_SEL and INV bits to 0 for rising edge */
@@ -682,8 +654,6 @@ static int lp_irq_set_type(struct irq_data *d, unsigned int type)
        else if (type & IRQ_TYPE_LEVEL_MASK)
                irq_set_handler_locked(d, handle_level_irq);
 
-       raw_spin_unlock_irqrestore(&lg->lock, flags);
-
        return 0;
 }
 
index 7ced2b402dce04a06127d4c1863a2a2b1d58c719..cc44890c6699dcffb14d5ccee205b89417cae5d5 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/mod_devicetable.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/pm.h>
 
 #include <linux/pinctrl/pinctrl.h>
 
@@ -589,14 +590,12 @@ static const struct acpi_device_id mtl_pinctrl_acpi_match[] = {
 };
 MODULE_DEVICE_TABLE(acpi, mtl_pinctrl_acpi_match);
 
-static INTEL_PINCTRL_PM_OPS(mtl_pinctrl_pm_ops);
-
 static struct platform_driver mtl_pinctrl_driver = {
        .probe = intel_pinctrl_probe_by_hid,
        .driver = {
                .name = "meteorlake-pinctrl",
                .acpi_match_table = mtl_pinctrl_acpi_match,
-               .pm = &mtl_pinctrl_pm_ops,
+               .pm = pm_sleep_ptr(&intel_pinctrl_pm_ops),
        },
 };
 module_platform_driver(mtl_pinctrl_driver);
diff --git a/drivers/pinctrl/intel/pinctrl-meteorpoint.c b/drivers/pinctrl/intel/pinctrl-meteorpoint.c
new file mode 100644 (file)
index 0000000..77e9777
--- /dev/null
@@ -0,0 +1,465 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Meteor Point PCH pinctrl/GPIO driver
+ *
+ * Copyright (C) 2022-2023, Intel Corporation
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ */
+
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-intel.h"
+
+#define MTP_PAD_OWN    0x0b0
+#define MTP_PADCFGLOCK 0x110
+#define MTP_HOSTSW_OWN 0x150
+#define MTP_GPI_IS     0x200
+#define MTP_GPI_IE     0x220
+
+#define MTP_GPP(r, s, e, g)                            \
+       {                                               \
+               .reg_num = (r),                         \
+               .base = (s),                            \
+               .size = ((e) - (s) + 1),                \
+               .gpio_base = (g),                       \
+       }
+
+#define MTP_COMMUNITY(b, s, e, g)                      \
+       INTEL_COMMUNITY_GPPS(b, s, e, g, MTP)
+
+/* Meteor Point-S */
+static const struct pinctrl_pin_desc mtps_pins[] = {
+       /* GPP_D */
+       PINCTRL_PIN(0, "GPP_D_0"),
+       PINCTRL_PIN(1, "GPP_D_1"),
+       PINCTRL_PIN(2, "GPP_D_2"),
+       PINCTRL_PIN(3, "GPP_D_3"),
+       PINCTRL_PIN(4, "GPP_D_4"),
+       PINCTRL_PIN(5, "CNV_RF_RESET_B"),
+       PINCTRL_PIN(6, "CRF_CLKREQ"),
+       PINCTRL_PIN(7, "GPP_D_7"),
+       PINCTRL_PIN(8, "GPP_D_8"),
+       PINCTRL_PIN(9, "SML0CLK"),
+       PINCTRL_PIN(10, "SML0DATA"),
+       PINCTRL_PIN(11, "GPP_D_11"),
+       PINCTRL_PIN(12, "GPP_D_12"),
+       PINCTRL_PIN(13, "GPP_D_13"),
+       PINCTRL_PIN(14, "GPP_D_14"),
+       PINCTRL_PIN(15, "GPP_D_15"),
+       PINCTRL_PIN(16, "GPP_D_16"),
+       PINCTRL_PIN(17, "GPP_D_17"),
+       PINCTRL_PIN(18, "GPP_D_18"),
+       PINCTRL_PIN(19, "GPP_D_19"),
+       PINCTRL_PIN(20, "GPP_D_20"),
+       PINCTRL_PIN(21, "GPP_D_21"),
+       PINCTRL_PIN(22, "GPP_D_22"),
+       PINCTRL_PIN(23, "GPP_D_23"),
+       PINCTRL_PIN(24, "GSPI3_CLK_LOOPBK"),
+       /* GPP_R */
+       PINCTRL_PIN(25, "HDA_BCLK"),
+       PINCTRL_PIN(26, "HDA_SYNC"),
+       PINCTRL_PIN(27, "HDA_SDO"),
+       PINCTRL_PIN(28, "HDA_SDI_0"),
+       PINCTRL_PIN(29, "HDA_RSTB"),
+       PINCTRL_PIN(30, "GPP_R_5"),
+       PINCTRL_PIN(31, "GPP_R_6"),
+       PINCTRL_PIN(32, "GPP_R_7"),
+       PINCTRL_PIN(33, "GPP_R_8"),
+       PINCTRL_PIN(34, "GPP_R_9"),
+       PINCTRL_PIN(35, "GPP_R_10"),
+       PINCTRL_PIN(36, "GPP_R_11"),
+       PINCTRL_PIN(37, "GPP_R_12"),
+       PINCTRL_PIN(38, "GSPI2_CLK_LOOPBK"),
+       /* GPP_J */
+       PINCTRL_PIN(39, "GPP_J_0"),
+       PINCTRL_PIN(40, "CNV_BRI_DT"),
+       PINCTRL_PIN(41, "CNV_BRI_RSP"),
+       PINCTRL_PIN(42, "CNV_RGI_DT"),
+       PINCTRL_PIN(43, "CNV_RGI_RSP"),
+       PINCTRL_PIN(44, "GPP_J_5"),
+       PINCTRL_PIN(45, "GPP_J_6"),
+       PINCTRL_PIN(46, "BOOTHALT_B"),
+       PINCTRL_PIN(47, "RTCCLKOUT"),
+       PINCTRL_PIN(48, "BPKI3C_SDA"),
+       PINCTRL_PIN(49, "BPKI3C_SCL"),
+       PINCTRL_PIN(50, "DAM"),
+       PINCTRL_PIN(51, "HDACPU_SDI"),
+       PINCTRL_PIN(52, "HDACPU_SDO"),
+       PINCTRL_PIN(53, "HDACPU_BCLK"),
+       PINCTRL_PIN(54, "AUX_PWRGD"),
+       PINCTRL_PIN(55, "GLB_RST_WARN_B"),
+       PINCTRL_PIN(56, "RESET_SYNCB"),
+       /* vGPIO */
+       PINCTRL_PIN(57, "CNV_BTEN"),
+       PINCTRL_PIN(58, "CNV_BT_HOST_WAKEB"),
+       PINCTRL_PIN(59, "CNV_BT_IF_SELECT"),
+       PINCTRL_PIN(60, "vCNV_BT_UART_TXD"),
+       PINCTRL_PIN(61, "vCNV_BT_UART_RXD"),
+       PINCTRL_PIN(62, "vCNV_BT_UART_CTS_B"),
+       PINCTRL_PIN(63, "vCNV_BT_UART_RTS_B"),
+       PINCTRL_PIN(64, "vCNV_MFUART1_TXD"),
+       PINCTRL_PIN(65, "vCNV_MFUART1_RXD"),
+       PINCTRL_PIN(66, "vCNV_MFUART1_CTS_B"),
+       PINCTRL_PIN(67, "vCNV_MFUART1_RTS_B"),
+       PINCTRL_PIN(68, "vUART0_TXD"),
+       PINCTRL_PIN(69, "vUART0_RXD"),
+       PINCTRL_PIN(70, "vUART0_CTS_B"),
+       PINCTRL_PIN(71, "vUART0_RTS_B"),
+       PINCTRL_PIN(72, "vISH_UART0_TXD"),
+       PINCTRL_PIN(73, "vISH_UART0_RXD"),
+       PINCTRL_PIN(74, "vISH_UART0_CTS_B"),
+       PINCTRL_PIN(75, "vISH_UART0_RTS_B"),
+       PINCTRL_PIN(76, "vCNV_BT_I2S_BCLK"),
+       PINCTRL_PIN(77, "vCNV_BT_I2S_WS_SYNC"),
+       PINCTRL_PIN(78, "vCNV_BT_I2S_SDO"),
+       PINCTRL_PIN(79, "vCNV_BT_I2S_SDI"),
+       PINCTRL_PIN(80, "vI2S2_SCLK"),
+       PINCTRL_PIN(81, "vI2S2_SFRM"),
+       PINCTRL_PIN(82, "vI2S2_TXD"),
+       PINCTRL_PIN(83, "vI2S2_RXD"),
+       PINCTRL_PIN(84, "THC0_WOT_INT"),
+       PINCTRL_PIN(85, "THC1_WOT_INT"),
+       PINCTRL_PIN(86, "THC0_WHC_INT"),
+       PINCTRL_PIN(87, "THC1_WHC_INT"),
+       /* GPP_A */
+       PINCTRL_PIN(88, "ESPI_IO_0"),
+       PINCTRL_PIN(89, "ESPI_IO_1"),
+       PINCTRL_PIN(90, "ESPI_IO_2"),
+       PINCTRL_PIN(91, "ESPI_IO_3"),
+       PINCTRL_PIN(92, "ESPI_CS0B"),
+       PINCTRL_PIN(93, "ESPI_CLK"),
+       PINCTRL_PIN(94, "ESPI_RESETB"),
+       PINCTRL_PIN(95, "ESPI_CS1B"),
+       PINCTRL_PIN(96, "ESPI_CS2B"),
+       PINCTRL_PIN(97, "ESPI_CS3B"),
+       PINCTRL_PIN(98, "ESPI_ALERT0B"),
+       PINCTRL_PIN(99, "ESPI_ALERT1B"),
+       PINCTRL_PIN(100, "ESPI_ALERT2B"),
+       PINCTRL_PIN(101, "ESPI_ALERT3B"),
+       PINCTRL_PIN(102, "ESPI_CLK_LOOPBK"),
+       /* DIR_ESPI */
+       PINCTRL_PIN(103, "PWRBTNB_OUT"),
+       PINCTRL_PIN(104, "DMI_PERSTB"),
+       PINCTRL_PIN(105, "DMI_CLKREQB"),
+       PINCTRL_PIN(106, "DIR_ESPI_IO_0"),
+       PINCTRL_PIN(107, "DIR_ESPI_IO_1"),
+       PINCTRL_PIN(108, "DIR_ESPI_IO_2"),
+       PINCTRL_PIN(109, "DIR_ESPI_IO_3"),
+       PINCTRL_PIN(110, "DIR_ESPI_CSB"),
+       PINCTRL_PIN(111, "DIR_ESPI_RESETB"),
+       PINCTRL_PIN(112, "DIR_ESPI_CLK"),
+       PINCTRL_PIN(113, "DIR_ESPI_RCLK"),
+       PINCTRL_PIN(114, "DIR_ESPI_ALERTB"),
+       /* GPP_B */
+       PINCTRL_PIN(115, "GPP_B_0"),
+       PINCTRL_PIN(116, "GPP_B_1"),
+       PINCTRL_PIN(117, "GPP_B_2"),
+       PINCTRL_PIN(118, "GPP_B_3"),
+       PINCTRL_PIN(119, "GPP_B_4"),
+       PINCTRL_PIN(120, "GPP_B_5"),
+       PINCTRL_PIN(121, "CLKOUT_48"),
+       PINCTRL_PIN(122, "GPP_B_7"),
+       PINCTRL_PIN(123, "GPP_B_8"),
+       PINCTRL_PIN(124, "GPP_B_9"),
+       PINCTRL_PIN(125, "GPP_B_10"),
+       PINCTRL_PIN(126, "GPP_B_11"),
+       PINCTRL_PIN(127, "SLP_S0B"),
+       PINCTRL_PIN(128, "PLTRSTB"),
+       PINCTRL_PIN(129, "GPP_B_14"),
+       PINCTRL_PIN(130, "GPP_B_15"),
+       PINCTRL_PIN(131, "GPP_B_16"),
+       PINCTRL_PIN(132, "GPP_B_17"),
+       PINCTRL_PIN(133, "GPP_B_18"),
+       PINCTRL_PIN(134, "FUSA_DIAGTEST_EN"),
+       PINCTRL_PIN(135, "FUSA_DIAGTEST_MODE"),
+       PINCTRL_PIN(136, "GPP_B_21"),
+       /* SPI0 */
+       PINCTRL_PIN(137, "SPI0_IO_2"),
+       PINCTRL_PIN(138, "SPI0_IO_3"),
+       PINCTRL_PIN(139, "SPI0_MOSI_IO_0"),
+       PINCTRL_PIN(140, "SPI0_MISO_IO_1"),
+       PINCTRL_PIN(141, "SPI0_TPM_CSB"),
+       PINCTRL_PIN(142, "SPI0_FLASH_0_CSB"),
+       PINCTRL_PIN(143, "SPI0_FLASH_1_CSB"),
+       PINCTRL_PIN(144, "SPI0_CLK"),
+       PINCTRL_PIN(145, "SPI0_CLK_LOOPBK"),
+       /* GPP_C */
+       PINCTRL_PIN(146, "SMBCLK"),
+       PINCTRL_PIN(147, "SMBDATA"),
+       PINCTRL_PIN(148, "SMBALERTB"),
+       PINCTRL_PIN(149, "GPP_C_3"),
+       PINCTRL_PIN(150, "GPP_C_4"),
+       PINCTRL_PIN(151, "GPP_C_5"),
+       PINCTRL_PIN(152, "GPP_C_6"),
+       PINCTRL_PIN(153, "GPP_C_7"),
+       PINCTRL_PIN(154, "GPP_C_8"),
+       PINCTRL_PIN(155, "GPP_C_9"),
+       PINCTRL_PIN(156, "GPP_C_10"),
+       PINCTRL_PIN(157, "GPP_C_11"),
+       PINCTRL_PIN(158, "GPP_C_12"),
+       PINCTRL_PIN(159, "GPP_C_13"),
+       PINCTRL_PIN(160, "GPP_C_14"),
+       PINCTRL_PIN(161, "GPP_C_15"),
+       PINCTRL_PIN(162, "GPP_C_16"),
+       PINCTRL_PIN(163, "GPP_C_17"),
+       PINCTRL_PIN(164, "GPP_C_18"),
+       PINCTRL_PIN(165, "GPP_C_19"),
+       PINCTRL_PIN(166, "GPP_C_20"),
+       PINCTRL_PIN(167, "GPP_C_21"),
+       PINCTRL_PIN(168, "GPP_C_22"),
+       PINCTRL_PIN(169, "GPP_C_23"),
+       /* GPP_H */
+       PINCTRL_PIN(170, "GPP_H_0"),
+       PINCTRL_PIN(171, "GPP_H_1"),
+       PINCTRL_PIN(172, "GPP_H_2"),
+       PINCTRL_PIN(173, "GPP_H_3"),
+       PINCTRL_PIN(174, "GPP_H_4"),
+       PINCTRL_PIN(175, "GPP_H_5"),
+       PINCTRL_PIN(176, "GPP_H_6"),
+       PINCTRL_PIN(177, "GPP_H_7"),
+       PINCTRL_PIN(178, "GPP_H_8"),
+       PINCTRL_PIN(179, "GPP_H_9"),
+       PINCTRL_PIN(180, "GPP_H_10"),
+       PINCTRL_PIN(181, "GPP_H_11"),
+       PINCTRL_PIN(182, "GPP_H_12"),
+       PINCTRL_PIN(183, "GPP_H_13"),
+       PINCTRL_PIN(184, "GPP_H_14"),
+       PINCTRL_PIN(185, "GPP_H_15"),
+       PINCTRL_PIN(186, "GPP_H_16"),
+       PINCTRL_PIN(187, "GPP_H_17"),
+       PINCTRL_PIN(188, "GPP_H_18"),
+       PINCTRL_PIN(189, "GPP_H_19"),
+       /* vGPIO_3 */
+       PINCTRL_PIN(190, "CPU_PCIE_LNK_DN_0"),
+       PINCTRL_PIN(191, "CPU_PCIE_LNK_DN_1"),
+       PINCTRL_PIN(192, "CPU_PCIE_LNK_DN_2"),
+       PINCTRL_PIN(193, "CPU_PCIE_LNK_DN_3"),
+       /* vGPIO_0 */
+       PINCTRL_PIN(194, "ESPI_USB_OCB_0"),
+       PINCTRL_PIN(195, "ESPI_USB_OCB_1"),
+       PINCTRL_PIN(196, "ESPI_USB_OCB_2"),
+       PINCTRL_PIN(197, "ESPI_USB_OCB_3"),
+       PINCTRL_PIN(198, "USB_CPU_OCB_0"),
+       PINCTRL_PIN(199, "USB_CPU_OCB_1"),
+       PINCTRL_PIN(200, "USB_CPU_OCB_2"),
+       PINCTRL_PIN(201, "USB_CPU_OCB_3"),
+       /* vGPIO_4 */
+       PINCTRL_PIN(202, "ESPI_ISCLK_XTAL_CLKREQ"),
+       PINCTRL_PIN(203, "ISCLK_ESPI_XTAL_CLKACK"),
+       PINCTRL_PIN(204, "ME_SLPC_FTPM_ENABLE"),
+       PINCTRL_PIN(205, "GP_SLPC_DTFUS_CORE_SPITPM_DIS"),
+       PINCTRL_PIN(206, "GP_SLPC_SPI_STRAP_TOS"),
+       PINCTRL_PIN(207, "GP_SLPC_DTFUS_CORE_SPITPM_DIS_L01"),
+       PINCTRL_PIN(208, "GP_SLPC_SPI_STRAP_TOS_L01"),
+       PINCTRL_PIN(209, "LPC_PRR_TS_OVR"),
+       PINCTRL_PIN(210, "ITSS_KU1_SHTDWN"),
+       PINCTRL_PIN(211, "vGPIO_SPARE_0"),
+       PINCTRL_PIN(212, "vGPIO_SPARE_1"),
+       PINCTRL_PIN(213, "vGPIO_SPARE_2"),
+       PINCTRL_PIN(214, "vGPIO_SPARE_3"),
+       PINCTRL_PIN(215, "vGPIO_SPARE_4"),
+       PINCTRL_PIN(216, "vGPIO_SPARE_5"),
+       PINCTRL_PIN(217, "vGPIO_SPARE_6"),
+       PINCTRL_PIN(218, "vGPIO_SPARE_7"),
+       PINCTRL_PIN(219, "vGPIO_SPARE_8"),
+       PINCTRL_PIN(220, "vGPIO_SPARE_9"),
+       PINCTRL_PIN(221, "vGPIO_SPARE_10"),
+       PINCTRL_PIN(222, "vGPIO_SPARE_11"),
+       PINCTRL_PIN(223, "vGPIO_SPARE_12"),
+       PINCTRL_PIN(224, "vGPIO_SPARE_13"),
+       PINCTRL_PIN(225, "vGPIO_SPARE_14"),
+       PINCTRL_PIN(226, "vGPIO_SPARE_15"),
+       PINCTRL_PIN(227, "vGPIO_SPARE_16"),
+       PINCTRL_PIN(228, "vGPIO_SPARE_17"),
+       PINCTRL_PIN(229, "vGPIO_SPARE_18"),
+       PINCTRL_PIN(230, "vGPIO_SPARE_19"),
+       PINCTRL_PIN(231, "vGPIO_SPARE_20"),
+       PINCTRL_PIN(232, "vGPIO_SPARE_21"),
+       /* GPP_S */
+       PINCTRL_PIN(233, "GPP_S_0"),
+       PINCTRL_PIN(234, "GPP_S_1"),
+       PINCTRL_PIN(235, "GPP_S_2"),
+       PINCTRL_PIN(236, "GPP_S_3"),
+       PINCTRL_PIN(237, "GPP_S_4"),
+       PINCTRL_PIN(238, "GPP_S_5"),
+       PINCTRL_PIN(239, "GPP_S_6"),
+       PINCTRL_PIN(240, "GPP_S_7"),
+       /* GPP_E */
+       PINCTRL_PIN(241, "GPP_E_0"),
+       PINCTRL_PIN(242, "GPP_E_1"),
+       PINCTRL_PIN(243, "GPP_E_2"),
+       PINCTRL_PIN(244, "GPP_E_3"),
+       PINCTRL_PIN(245, "GPP_E_4"),
+       PINCTRL_PIN(246, "GPP_E_5"),
+       PINCTRL_PIN(247, "GPP_E_6"),
+       PINCTRL_PIN(248, "GPP_E_7"),
+       PINCTRL_PIN(249, "GPP_E_8"),
+       PINCTRL_PIN(250, "GPP_E_9"),
+       PINCTRL_PIN(251, "GPP_E_10"),
+       PINCTRL_PIN(252, "GPP_E_11"),
+       PINCTRL_PIN(253, "GPP_E_12"),
+       PINCTRL_PIN(254, "GPP_E_13"),
+       PINCTRL_PIN(255, "GPP_E_14"),
+       PINCTRL_PIN(256, "GPP_E_15"),
+       PINCTRL_PIN(257, "GPP_E_16"),
+       PINCTRL_PIN(258, "GPP_E_17"),
+       PINCTRL_PIN(259, "GPP_E_18"),
+       PINCTRL_PIN(260, "GPP_E_19"),
+       PINCTRL_PIN(261, "GPP_E_20"),
+       PINCTRL_PIN(262, "GPP_E_21"),
+       PINCTRL_PIN(263, "SPI1_CLK_LOOPBK"),
+       /* GPP_K */
+       PINCTRL_PIN(264, "GPP_K_0"),
+       PINCTRL_PIN(265, "GPP_K_1"),
+       PINCTRL_PIN(266, "GPP_K_2"),
+       PINCTRL_PIN(267, "GPP_K_3"),
+       PINCTRL_PIN(268, "GPP_K_4"),
+       PINCTRL_PIN(269, "GPP_K_5"),
+       PINCTRL_PIN(270, "FUSE_SORT_BUMP_0"),
+       PINCTRL_PIN(271, "FUSE_SORT_BUMP_1"),
+       PINCTRL_PIN(272, "CORE_VID_0"),
+       PINCTRL_PIN(273, "CORE_VID_1"),
+       PINCTRL_PIN(274, "FUSE_SORT_BUMP_2"),
+       PINCTRL_PIN(275, "MISC_SPARE"),
+       PINCTRL_PIN(276, "SYS_RESETB"),
+       PINCTRL_PIN(277, "MLK_RSTB"),
+       /* GPP_F */
+       PINCTRL_PIN(278, "SATAXPCIE_3"),
+       PINCTRL_PIN(279, "SATAXPCIE_4"),
+       PINCTRL_PIN(280, "SATAXPCIE_5"),
+       PINCTRL_PIN(281, "SATAXPCIE_6"),
+       PINCTRL_PIN(282, "SATAXPCIE_7"),
+       PINCTRL_PIN(283, "SATA_DEVSLP_3"),
+       PINCTRL_PIN(284, "SATA_DEVSLP_4"),
+       PINCTRL_PIN(285, "SATA_DEVSLP_5"),
+       PINCTRL_PIN(286, "SATA_DEVSLP_6"),
+       PINCTRL_PIN(287, "GPP_F_9"),
+       PINCTRL_PIN(288, "GPP_F_10"),
+       PINCTRL_PIN(289, "GPP_F_11"),
+       PINCTRL_PIN(290, "GPP_F_12"),
+       PINCTRL_PIN(291, "GPP_F_13"),
+       PINCTRL_PIN(292, "GPP_F_14"),
+       PINCTRL_PIN(293, "GPP_F_15"),
+       PINCTRL_PIN(294, "GPP_F_16"),
+       PINCTRL_PIN(295, "GPP_F_17"),
+       PINCTRL_PIN(296, "GPP_F_18"),
+       PINCTRL_PIN(297, "DNX_FORCE_RELOAD"),
+       PINCTRL_PIN(298, "GPP_F_20"),
+       PINCTRL_PIN(299, "GPP_F_21"),
+       PINCTRL_PIN(300, "GPP_F_22"),
+       PINCTRL_PIN(301, "GPP_F_23"),
+       /* GPP_I */
+       PINCTRL_PIN(302, "GPP_I_0"),
+       PINCTRL_PIN(303, "GPP_I_1"),
+       PINCTRL_PIN(304, "GPP_I_2"),
+       PINCTRL_PIN(305, "GPP_I_3"),
+       PINCTRL_PIN(306, "GPP_I_4"),
+       PINCTRL_PIN(307, "GPP_I_5"),
+       PINCTRL_PIN(308, "GPP_I_6"),
+       PINCTRL_PIN(309, "GPP_I_7"),
+       PINCTRL_PIN(310, "GPP_I_8"),
+       PINCTRL_PIN(311, "GPP_I_9"),
+       PINCTRL_PIN(312, "GPP_I_10"),
+       PINCTRL_PIN(313, "GPP_I_11"),
+       PINCTRL_PIN(314, "GPP_I_12"),
+       PINCTRL_PIN(315, "GPP_I_13"),
+       PINCTRL_PIN(316, "GPP_I_14"),
+       PINCTRL_PIN(317, "GPP_I_15"),
+       PINCTRL_PIN(318, "GPP_I_16"),
+       PINCTRL_PIN(319, "GSPI0_CLK_LOOPBK"),
+       PINCTRL_PIN(320, "GSPI1_CLK_LOOPBK"),
+       PINCTRL_PIN(321, "ISH_I3C0_CLK_LOOPBK"),
+       PINCTRL_PIN(322, "I3C0_CLK_LOOPBK"),
+       /* JTAG_CPU */
+       PINCTRL_PIN(323, "JTAG_TDO"),
+       PINCTRL_PIN(324, "JTAGX"),
+       PINCTRL_PIN(325, "PRDYB"),
+       PINCTRL_PIN(326, "PREQB"),
+       PINCTRL_PIN(327, "JTAG_TDI"),
+       PINCTRL_PIN(328, "JTAG_TMS"),
+       PINCTRL_PIN(329, "JTAG_TCK"),
+       PINCTRL_PIN(330, "DBG_PMODE"),
+       PINCTRL_PIN(331, "CPU_TRSTB"),
+       PINCTRL_PIN(332, "CPUPWRGD"),
+       PINCTRL_PIN(333, "PM_SPARE0"),
+       PINCTRL_PIN(334, "PM_SPARE1"),
+       PINCTRL_PIN(335, "CRASHLOG_TRIG_N"),
+       PINCTRL_PIN(336, "TRIGGER_IN"),
+       PINCTRL_PIN(337, "TRIGGER_OUT"),
+       PINCTRL_PIN(338, "FBRK_OUT_N"),
+};
+
+static const struct intel_padgroup mtps_community0_gpps[] = {
+       MTP_GPP(0, 0, 24, 0),           /* GPP_D */
+       MTP_GPP(1, 25, 38, 32),         /* GPP_R */
+       MTP_GPP(2, 39, 56, 64),         /* GPP_J */
+       MTP_GPP(3, 57, 87, 96),         /* vGPIO */
+};
+
+static const struct intel_padgroup mtps_community1_gpps[] = {
+       MTP_GPP(0, 88, 102, 128),       /* GPP_A */
+       MTP_GPP(1, 103, 114, 160),      /* DIR_ESPI */
+       MTP_GPP(2, 115, 136, 192),      /* GPP_B */
+};
+
+static const struct intel_padgroup mtps_community3_gpps[] = {
+       MTP_GPP(0, 137, 145, 224),      /* SPI0 */
+       MTP_GPP(1, 146, 169, 256),      /* GPP_C */
+       MTP_GPP(2, 170, 189, 288),      /* GPP_H */
+       MTP_GPP(3, 190, 193, 320),      /* vGPIO_3 */
+       MTP_GPP(4, 194, 201, 352),      /* vGPIO_0 */
+       MTP_GPP(5, 202, 232, 384),      /* vGPIO_4 */
+};
+
+static const struct intel_padgroup mtps_community4_gpps[] = {
+       MTP_GPP(0, 233, 240, 416),      /* GPP_S */
+       MTP_GPP(1, 241, 263, 448),      /* GPP_E */
+       MTP_GPP(2, 264, 277, 480),      /* GPP_K */
+       MTP_GPP(3, 278, 301, 512),      /* GPP_F */
+};
+
+static const struct intel_padgroup mtps_community5_gpps[] = {
+       MTP_GPP(0, 302, 322, 544),      /* GPP_I */
+       MTP_GPP(1, 323, 338, 576),      /* JTAG_CPU */
+};
+
+static const struct intel_community mtps_communities[] = {
+       MTP_COMMUNITY(0, 0, 87, mtps_community0_gpps),
+       MTP_COMMUNITY(1, 88, 136, mtps_community1_gpps),
+       MTP_COMMUNITY(2, 137, 232, mtps_community3_gpps),
+       MTP_COMMUNITY(3, 233, 301, mtps_community4_gpps),
+       MTP_COMMUNITY(4, 302, 338, mtps_community5_gpps),
+};
+
+static const struct intel_pinctrl_soc_data mtps_soc_data = {
+       .pins = mtps_pins,
+       .npins = ARRAY_SIZE(mtps_pins),
+       .communities = mtps_communities,
+       .ncommunities = ARRAY_SIZE(mtps_communities),
+};
+
+static const struct acpi_device_id mtp_pinctrl_acpi_match[] = {
+       { "INTC1084", (kernel_ulong_t)&mtps_soc_data },
+       { }
+};
+MODULE_DEVICE_TABLE(acpi, mtp_pinctrl_acpi_match);
+
+static struct platform_driver mtp_pinctrl_driver = {
+       .probe = intel_pinctrl_probe_by_hid,
+       .driver = {
+               .name = "meteorpoint-pinctrl",
+               .acpi_match_table = mtp_pinctrl_acpi_match,
+               .pm = pm_sleep_ptr(&intel_pinctrl_pm_ops),
+       },
+};
+module_platform_driver(mtp_pinctrl_driver);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_DESCRIPTION("Intel Meteor Point PCH pinctrl/GPIO driver");
+MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(PINCTRL_INTEL);
index b7a40ab0bca8f52a3a884eb26e0fd157c18e5d7e..55df9d2cfb1beead5f6ca290386046886c26ac27 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/mod_devicetable.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/pm.h>
 
 #include <linux/pinctrl/pinctrl.h>
 
@@ -579,14 +580,12 @@ static const struct acpi_device_id spt_pinctrl_acpi_match[] = {
 };
 MODULE_DEVICE_TABLE(acpi, spt_pinctrl_acpi_match);
 
-static INTEL_PINCTRL_PM_OPS(spt_pinctrl_pm_ops);
-
 static struct platform_driver spt_pinctrl_driver = {
        .probe = intel_pinctrl_probe_by_hid,
        .driver = {
                .name = "sunrisepoint-pinctrl",
                .acpi_match_table = spt_pinctrl_acpi_match,
-               .pm = &spt_pinctrl_pm_ops,
+               .pm = pm_sleep_ptr(&intel_pinctrl_pm_ops),
        },
 };
 
index 40dd60c9e5267e6c1c23f7d08cf017b0031bc73f..2cb0b4758269ee1d7808ec56b16a61209cc95985 100644 (file)
@@ -9,6 +9,7 @@
  */
 
 #include <linux/bits.h>
+#include <linux/cleanup.h>
 #include <linux/device.h>
 #include <linux/err.h>
 #include <linux/errno.h>
@@ -220,7 +221,6 @@ static int tng_pinmux_set_mux(struct pinctrl_dev *pctldev,
        const struct intel_pingroup *grp = &tp->groups[group];
        u32 bits = grp->mode << BUFCFG_PINMODE_SHIFT;
        u32 mask = BUFCFG_PINMODE_MASK;
-       unsigned long flags;
        unsigned int i;
 
        /*
@@ -232,11 +232,11 @@ static int tng_pinmux_set_mux(struct pinctrl_dev *pctldev,
                        return -EBUSY;
        }
 
+       guard(raw_spinlock_irqsave)(&tp->lock);
+
        /* Now enable the mux setting for each pin in the group */
-       raw_spin_lock_irqsave(&tp->lock, flags);
        for (i = 0; i < grp->grp.npins; i++)
                tng_update_bufcfg(tp, grp->grp.pins[i], bits, mask);
-       raw_spin_unlock_irqrestore(&tp->lock, flags);
 
        return 0;
 }
@@ -248,14 +248,13 @@ static int tng_gpio_request_enable(struct pinctrl_dev *pctldev,
        struct tng_pinctrl *tp = pinctrl_dev_get_drvdata(pctldev);
        u32 bits = BUFCFG_PINMODE_GPIO << BUFCFG_PINMODE_SHIFT;
        u32 mask = BUFCFG_PINMODE_MASK;
-       unsigned long flags;
 
        if (!tng_buf_available(tp, pin))
                return -EBUSY;
 
-       raw_spin_lock_irqsave(&tp->lock, flags);
+       guard(raw_spinlock_irqsave)(&tp->lock);
+
        tng_update_bufcfg(tp, pin, bits, mask);
-       raw_spin_unlock_irqrestore(&tp->lock, flags);
 
        return 0;
 }
@@ -360,7 +359,6 @@ static int tng_config_set_pin(struct tng_pinctrl *tp, unsigned int pin,
        unsigned int param = pinconf_to_config_param(config);
        unsigned int arg = pinconf_to_config_argument(config);
        u32 mask, term, value = 0;
-       unsigned long flags;
 
        switch (param) {
        case PIN_CONFIG_BIAS_DISABLE:
@@ -368,20 +366,20 @@ static int tng_config_set_pin(struct tng_pinctrl *tp, unsigned int pin,
                break;
 
        case PIN_CONFIG_BIAS_PULL_UP:
-               /* Set default strength value in case none is given */
-               if (arg == 1)
-                       arg = 20000;
-
                switch (arg) {
                case 50000:
                        term = BUFCFG_PUPD_VAL_50K;
                        break;
+               case 1: /* Set default strength value in case none is given */
                case 20000:
                        term = BUFCFG_PUPD_VAL_20K;
                        break;
                case 2000:
                        term = BUFCFG_PUPD_VAL_2K;
                        break;
+               case 910:
+                       term = BUFCFG_PUPD_VAL_910;
+                       break;
                default:
                        return -EINVAL;
                }
@@ -391,20 +389,20 @@ static int tng_config_set_pin(struct tng_pinctrl *tp, unsigned int pin,
                break;
 
        case PIN_CONFIG_BIAS_PULL_DOWN:
-               /* Set default strength value in case none is given */
-               if (arg == 1)
-                       arg = 20000;
-
                switch (arg) {
                case 50000:
                        term = BUFCFG_PUPD_VAL_50K;
                        break;
+               case 1: /* Set default strength value in case none is given */
                case 20000:
                        term = BUFCFG_PUPD_VAL_20K;
                        break;
                case 2000:
                        term = BUFCFG_PUPD_VAL_2K;
                        break;
+               case 910:
+                       term = BUFCFG_PUPD_VAL_910;
+                       break;
                default:
                        return -EINVAL;
                }
@@ -432,9 +430,9 @@ static int tng_config_set_pin(struct tng_pinctrl *tp, unsigned int pin,
                return -EINVAL;
        }
 
-       raw_spin_lock_irqsave(&tp->lock, flags);
+       guard(raw_spinlock_irqsave)(&tp->lock);
+
        tng_update_bufcfg(tp, pin, value, mask);
-       raw_spin_unlock_irqrestore(&tp->lock, flags);
 
        return 0;
 }
index 4768a69a9258876404ad4888e78e45a8d5e28cf9..80cd7a06fe5ad68d6e096bf28c646d5111eb75f9 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/mod_devicetable.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/pm.h>
 
 #include <linux/pinctrl/pinctrl.h>
 
@@ -743,14 +744,12 @@ static const struct acpi_device_id tgl_pinctrl_acpi_match[] = {
 };
 MODULE_DEVICE_TABLE(acpi, tgl_pinctrl_acpi_match);
 
-static INTEL_PINCTRL_PM_OPS(tgl_pinctrl_pm_ops);
-
 static struct platform_driver tgl_pinctrl_driver = {
        .probe = intel_pinctrl_probe_by_hid,
        .driver = {
                .name = "tigerlake-pinctrl",
                .acpi_match_table = tgl_pinctrl_acpi_match,
-               .pm = &tgl_pinctrl_pm_ops,
+               .pm = pm_sleep_ptr(&intel_pinctrl_pm_ops),
        },
 };
 module_platform_driver(tgl_pinctrl_driver);
index c3d59eddd994aa2e00f125875bb7530ff89c1a1e..d972584c0519b7324cc560b85b81b2e4c9e6f60d 100644 (file)
@@ -56,12 +56,12 @@ static int mtk_pinmux_set_mux(struct pinctrl_dev *pctldev,
                return -EINVAL;
 
        dev_dbg(pctldev->dev, "enable function %s group %s\n",
-               func->name, grp->name);
+               func->name, grp->grp.name);
 
-       for (i = 0; i < grp->num_pins; i++) {
+       for (i = 0; i < grp->grp.npins; i++) {
                const struct mtk_pin_desc *desc;
                int *pin_modes = grp->data;
-               int pin = grp->pins[i];
+               int pin = grp->grp.pins[i];
 
                desc = (const struct mtk_pin_desc *)&hw->soc->pins[pin];
                if (!desc->name)
@@ -602,13 +602,12 @@ static int mtk_build_groups(struct mtk_pinctrl *hw)
 
        for (i = 0; i < hw->soc->ngrps; i++) {
                const struct group_desc *group = hw->soc->grps + i;
+               const struct pingroup *grp = &group->grp;
 
-               err = pinctrl_generic_add_group(hw->pctrl, group->name,
-                                               group->pins, group->num_pins,
+               err = pinctrl_generic_add_group(hw->pctrl, grp->name, grp->pins, grp->npins,
                                                group->data);
                if (err < 0) {
-                       dev_err(hw->dev, "Failed to register group %s\n",
-                               group->name);
+                       dev_err(hw->dev, "Failed to register group %s\n", grp->name);
                        return err;
                }
        }
index e1b4b82b9d3db47f238c0d3a5a83ef9ad7553f6b..e0313e7a1fe00e48e36289804de91ba34fc5811d 100644 (file)
                .funcs = NULL,                          \
        }
 
-#define PINCTRL_PIN_GROUP(name, id)                    \
-       {                                               \
-               name,                                   \
-               id##_pins,                              \
-               ARRAY_SIZE(id##_pins),                  \
-               id##_funcs,                             \
+#define PINCTRL_PIN_GROUP(_name_, id)                                                  \
+       {                                                                               \
+               .grp = PINCTRL_PINGROUP(_name_, id##_pins, ARRAY_SIZE(id##_pins)),      \
+               .data = id##_funcs,                                                     \
        }
 
 int mtk_moore_pinctrl_probe(struct platform_device *pdev,
index 5fb377c1668bb74c3b242e9e8e450fcd8a93613a..6b1c7122b0fb996cd9c03281123c9a28b35253ed 100644 (file)
@@ -533,7 +533,7 @@ static struct platform_driver mtk_pinctrl_driver = {
        .driver = {
                .name = "mediatek-mt2701-pinctrl",
                .of_match_table = mt2701_pctrl_match,
-               .pm = &mtk_eint_pm_ops,
+               .pm = pm_sleep_ptr(&mtk_eint_pm_ops),
        },
 };
 
index 8a6daa0db54b0af4b9ad25b1d53afa64852d87ac..bb7394ae252b489988f1161269c09648165537e1 100644 (file)
@@ -581,7 +581,7 @@ static struct platform_driver mtk_pinctrl_driver = {
        .driver = {
                .name = "mediatek-mt2712-pinctrl",
                .of_match_table = mt2712_pctrl_match,
-               .pm = &mtk_eint_pm_ops,
+               .pm = pm_sleep_ptr(&mtk_eint_pm_ops),
        },
 };
 
index 01e855ccd4dd9d61b062ae8b520fe6e05ddf52d8..ee3ae3d2fa7e878db169344a9c17fd3f16133855 100644 (file)
@@ -612,7 +612,7 @@ static struct platform_driver mt6795_pinctrl_driver = {
        .driver = {
                .name = "mt6795-pinctrl",
                .of_match_table = mt6795_pctrl_match,
-               .pm = &mtk_paris_pinctrl_pm_ops,
+               .pm = pm_sleep_ptr(&mtk_paris_pinctrl_pm_ops),
        },
        .probe = mtk_paris_pinctrl_probe,
 };
index ba7f30c3296fecf60d26978e15fe112f7e36a9ab..143c2662227252519a14954fb9e801eacb31eb07 100644 (file)
@@ -334,7 +334,7 @@ static struct platform_driver mtk_pinctrl_driver = {
        .driver = {
                .name = "mediatek-mt8167-pinctrl",
                .of_match_table = mt8167_pctrl_match,
-               .pm = &mtk_eint_pm_ops,
+               .pm = pm_sleep_ptr(&mtk_eint_pm_ops),
        },
 };
 
index 455eec018f93e6b5058bb20b59ed283f5da3320b..b214deeafbf1d6d160e90532cc2ccef8cfd411e9 100644 (file)
@@ -347,7 +347,7 @@ static struct platform_driver mtk_pinctrl_driver = {
        .driver = {
                .name = "mediatek-mt8173-pinctrl",
                .of_match_table = mt8173_pctrl_match,
-               .pm = &mtk_eint_pm_ops,
+               .pm = pm_sleep_ptr(&mtk_eint_pm_ops),
        },
 };
 
index ddc48b725c22d99a03f3ddd2d3af2e446491ae1d..93e482c6b5fda99bd78c1aa9d367a00f39321af0 100644 (file)
@@ -576,7 +576,7 @@ static struct platform_driver mt8183_pinctrl_driver = {
        .driver = {
                .name = "mt8183-pinctrl",
                .of_match_table = mt8183_pinctrl_of_match,
-               .pm = &mtk_paris_pinctrl_pm_ops,
+               .pm = pm_sleep_ptr(&mtk_paris_pinctrl_pm_ops),
        },
        .probe = mtk_paris_pinctrl_probe,
 };
index a02f7c3269707ee9adf3b0dc75706f97289892b6..7be591591cce54d04efde09957537cf91c33506c 100644 (file)
@@ -1255,7 +1255,7 @@ static struct platform_driver mt8186_pinctrl_driver = {
        .driver = {
                .name = "mt8186-pinctrl",
                .of_match_table = mt8186_pinctrl_of_match,
-               .pm = &mtk_paris_pinctrl_pm_ops,
+               .pm = pm_sleep_ptr(&mtk_paris_pinctrl_pm_ops),
        },
        .probe = mtk_paris_pinctrl_probe,
 };
index c067e043e6192d1d15264b4887246a639401b3b1..3975e99d9cf4034c430a00aeb08ff29baa24a9ee 100644 (file)
@@ -1658,7 +1658,7 @@ static struct platform_driver mt8188_pinctrl_driver = {
        .driver = {
                .name = "mt8188-pinctrl",
                .of_match_table = mt8188_pinctrl_of_match,
-               .pm = &mtk_paris_pinctrl_pm_ops
+               .pm = pm_sleep_ptr(&mtk_paris_pinctrl_pm_ops)
        },
        .probe = mtk_paris_pinctrl_probe,
 };
index dee1b3aefd36ec96d25b32504f4696534d468fa3..e3a76381f7f4eeef03b2c808ada00f9761f5f322 100644 (file)
@@ -1420,7 +1420,7 @@ static struct platform_driver mt8192_pinctrl_driver = {
        .driver = {
                .name = "mt8192-pinctrl",
                .of_match_table = mt8192_pinctrl_of_match,
-               .pm = &mtk_paris_pinctrl_pm_ops,
+               .pm = pm_sleep_ptr(&mtk_paris_pinctrl_pm_ops),
        },
        .probe = mtk_paris_pinctrl_probe,
 };
index 09c4dcef933838038969cc3f7a65008c6c87fa12..83345c52b2fa2cde1797da63120ceb8cba557b59 100644 (file)
@@ -968,7 +968,7 @@ static struct platform_driver mt8195_pinctrl_driver = {
        .driver = {
                .name = "mt8195-pinctrl",
                .of_match_table = mt8195_pinctrl_of_match,
-               .pm = &mtk_paris_pinctrl_pm_ops,
+               .pm = pm_sleep_ptr(&mtk_paris_pinctrl_pm_ops),
        },
        .probe = mtk_paris_pinctrl_probe,
 };
index 1db04bbdb42369a5ceb2241dd0ce512482b7e1b7..e3e0d66cfbbfcbd182adc4c57003cc1096c5422d 100644 (file)
@@ -484,7 +484,7 @@ static struct platform_driver mtk_pinctrl_driver = {
        .driver = {
                .name = "mediatek-mt8365-pinctrl",
                .of_match_table = mt8365_pctrl_match,
-               .pm = &mtk_eint_pm_ops,
+               .pm = pm_sleep_ptr(&mtk_eint_pm_ops),
        },
 };
 
index 950275c47122bb917cd0d828ac264e889691ead8..abda75d4354e282279135d937576c2a15794fa78 100644 (file)
@@ -334,7 +334,7 @@ static struct platform_driver mtk_pinctrl_driver = {
        .driver = {
                .name = "mediatek-mt8516-pinctrl",
                .of_match_table = mt8516_pctrl_match,
-               .pm = &mtk_eint_pm_ops,
+               .pm = pm_sleep_ptr(&mtk_eint_pm_ops),
        },
 };
 
index e79d66a0419401ee7097d7ea9694b6fe4df16bb5..d39afc122516f12b9965208bcdf11c42565d7205 100644 (file)
@@ -914,9 +914,8 @@ static int mtk_eint_resume(struct device *device)
        return mtk_eint_do_resume(pctl->eint);
 }
 
-const struct dev_pm_ops mtk_eint_pm_ops = {
-       .suspend_noirq = mtk_eint_suspend,
-       .resume_noirq = mtk_eint_resume,
+EXPORT_GPL_DEV_SLEEP_PM_OPS(mtk_eint_pm_ops) = {
+       NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_eint_suspend, mtk_eint_resume)
 };
 
 static int mtk_pctrl_build_state(struct platform_device *pdev)
index 6392f1e05d028b56697e08b163c17a3dbbe92da7..b6bc31abd2b068695dcfd5025c0f31ea0b3c679e 100644 (file)
@@ -1131,9 +1131,8 @@ static int mtk_paris_pinctrl_resume(struct device *device)
        return mtk_eint_do_resume(pctl->eint);
 }
 
-const struct dev_pm_ops mtk_paris_pinctrl_pm_ops = {
-       .suspend_noirq = mtk_paris_pinctrl_suspend,
-       .resume_noirq = mtk_paris_pinctrl_resume,
+EXPORT_GPL_DEV_SLEEP_PM_OPS(mtk_paris_pinctrl_pm_ops) = {
+       NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_paris_pinctrl_suspend, mtk_paris_pinctrl_resume)
 };
 
 MODULE_LICENSE("GPL v2");
index 8762ac59932927c14d27d9e58542a68335299ecc..948ce126aa0cb1dab87e5c8b258cd89ce3f2ed73 100644 (file)
                        __VA_ARGS__, { } },                             \
        }
 
-#define PINCTRL_PIN_GROUP(name, id)                    \
-       {                                               \
-               name,                                   \
-               id##_pins,                              \
-               ARRAY_SIZE(id##_pins),                  \
-               id##_funcs,                             \
+#define PINCTRL_PIN_GROUP(_name_, id)                                                  \
+       {                                                                               \
+               .grp = PINCTRL_PINGROUP(_name_,id##_pins, ARRAY_SIZE(id##_pins)),       \
+               .data = id##_funcs,                                                     \
        }
 
 int mtk_paris_pinctrl_probe(struct platform_device *pdev);
index 1e658721aaba51fa3ffd5de21f1dcf36c38af439..62a46d824b4659d6f2c4bb4b80bbfba51ad0c4cf 100644 (file)
@@ -1790,8 +1790,8 @@ static int npcm7xx_config_set_one(struct npcm7xx_pinctrl *npcm,
                bank->direction_input(&bank->gc, pin % bank->gc.ngpio);
                break;
        case PIN_CONFIG_OUTPUT:
-               iowrite32(gpio, bank->base + NPCM7XX_GP_N_OES);
                bank->direction_output(&bank->gc, pin % bank->gc.ngpio, arg);
+               iowrite32(gpio, bank->base + NPCM7XX_GP_N_OES);
                break;
        case PIN_CONFIG_DRIVE_PUSH_PULL:
                npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_OTYP, gpio);
index 0cff44b07b292f6e8cd6c7959c1ff2e59fd142fb..4589900244c7990548015979a5182a236471b3e8 100644 (file)
@@ -474,9 +474,8 @@ enum {
 #undef WPCM450_GRP
 };
 
-static struct group_desc wpcm450_groups[] = {
-#define WPCM450_GRP(x) { .name = #x, .pins = x ## _pins, \
-                       .num_pins = ARRAY_SIZE(x ## _pins) }
+static struct pingroup wpcm450_groups[] = {
+#define WPCM450_GRP(x) PINCTRL_PINGROUP(#x, x ## _pins, ARRAY_SIZE(x ## _pins))
        WPCM450_GRPS
 #undef WPCM450_GRP
 };
@@ -852,7 +851,7 @@ static int wpcm450_get_group_pins(struct pinctrl_dev *pctldev,
                                  const unsigned int **pins,
                                  unsigned int *npins)
 {
-       *npins = wpcm450_groups[selector].num_pins;
+       *npins = wpcm450_groups[selector].npins;
        *pins  = wpcm450_groups[selector].pins;
 
        return 0;
@@ -901,7 +900,7 @@ static int wpcm450_pinmux_set_mux(struct pinctrl_dev *pctldev,
        struct wpcm450_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
 
        wpcm450_setfunc(pctrl->gcr_regmap, wpcm450_groups[group].pins,
-                       wpcm450_groups[group].num_pins, function);
+                       wpcm450_groups[group].npins, function);
 
        return 0;
 }
index 8313cb5f3b3cd4119f83a5378584d51576cfbe18..cada5d18ffae14d9db2812cf48ad14d53a66dd9c 100644 (file)
@@ -57,7 +57,7 @@ static const struct pin_config_item conf_items[] = {
 
 static void pinconf_generic_dump_one(struct pinctrl_dev *pctldev,
                                     struct seq_file *s, const char *gname,
-                                    unsigned pin,
+                                    unsigned int pin,
                                     const struct pin_config_item *items,
                                     int nitems, int *print_sep)
 {
@@ -110,7 +110,7 @@ static void pinconf_generic_dump_one(struct pinctrl_dev *pctldev,
  * to be specified the other can be NULL/0.
  */
 void pinconf_generic_dump_pins(struct pinctrl_dev *pctldev, struct seq_file *s,
-                              const char *gname, unsigned pin)
+                              const char *gname, unsigned int pin)
 {
        const struct pinconf_ops *ops = pctldev->desc->confops;
        int print_sep = 0;
@@ -295,15 +295,15 @@ EXPORT_SYMBOL_GPL(pinconf_generic_parse_dt_config);
 
 int pinconf_generic_dt_subnode_to_map(struct pinctrl_dev *pctldev,
                struct device_node *np, struct pinctrl_map **map,
-               unsigned *reserved_maps, unsigned *num_maps,
+               unsigned int *reserved_maps, unsigned int *num_maps,
                enum pinctrl_map_type type)
 {
        int ret;
        const char *function;
        struct device *dev = pctldev->dev;
        unsigned long *configs = NULL;
-       unsigned num_configs = 0;
-       unsigned reserve, strings_count;
+       unsigned int num_configs = 0;
+       unsigned int reserve, strings_count;
        struct property *prop;
        const char *group;
        const char *subnode_target_type = "pins";
@@ -379,9 +379,9 @@ EXPORT_SYMBOL_GPL(pinconf_generic_dt_subnode_to_map);
 
 int pinconf_generic_dt_node_to_map(struct pinctrl_dev *pctldev,
                struct device_node *np_config, struct pinctrl_map **map,
-               unsigned *num_maps, enum pinctrl_map_type type)
+               unsigned int *num_maps, enum pinctrl_map_type type)
 {
-       unsigned reserved_maps;
+       unsigned int reserved_maps;
        struct device_node *np;
        int ret;
 
@@ -412,7 +412,7 @@ EXPORT_SYMBOL_GPL(pinconf_generic_dt_node_to_map);
 
 void pinconf_generic_dt_free_map(struct pinctrl_dev *pctldev,
                                 struct pinctrl_map *map,
-                                unsigned num_maps)
+                                unsigned int num_maps)
 {
        pinctrl_utils_free_map(pctldev, map, num_maps);
 }
index 96d853a8f339b41be9859fd92a26b2b963286c7f..dca963633b5d136b9459d8389a509c8874f518b5 100644 (file)
@@ -55,7 +55,7 @@ int pinconf_validate_map(const struct pinctrl_map *map, int i)
        return 0;
 }
 
-int pin_config_get_for_pin(struct pinctrl_dev *pctldev, unsigned pin,
+int pin_config_get_for_pin(struct pinctrl_dev *pctldev, unsigned int pin,
                           unsigned long *config)
 {
        const struct pinconf_ops *ops = pctldev->desc->confops;
@@ -199,7 +199,7 @@ int pinconf_apply_setting(const struct pinctrl_setting *setting)
        return 0;
 }
 
-int pinconf_set_config(struct pinctrl_dev *pctldev, unsigned pin,
+int pinconf_set_config(struct pinctrl_dev *pctldev, unsigned int pin,
                       unsigned long *configs, size_t nconfigs)
 {
        const struct pinconf_ops *ops;
@@ -214,7 +214,7 @@ int pinconf_set_config(struct pinctrl_dev *pctldev, unsigned pin,
 #ifdef CONFIG_DEBUG_FS
 
 static void pinconf_show_config(struct seq_file *s, struct pinctrl_dev *pctldev,
-                     unsigned long *configs, unsigned num_configs)
+                               unsigned long *configs, unsigned int num_configs)
 {
        const struct pinconf_ops *confops;
        int i;
@@ -304,7 +304,7 @@ static void pinconf_dump_pin(struct pinctrl_dev *pctldev,
 static int pinconf_pins_show(struct seq_file *s, void *what)
 {
        struct pinctrl_dev *pctldev = s->private;
-       unsigned i, pin;
+       unsigned int i, pin;
 
        seq_puts(s, "Pin config settings per pin\n");
        seq_puts(s, "Format: pin (name): configs\n");
@@ -333,7 +333,7 @@ static int pinconf_pins_show(struct seq_file *s, void *what)
 }
 
 static void pinconf_dump_group(struct pinctrl_dev *pctldev,
-                              struct seq_file *s, unsigned selector,
+                              struct seq_file *s, unsigned int selector,
                               const char *gname)
 {
        const struct pinconf_ops *ops = pctldev->desc->confops;
@@ -348,8 +348,8 @@ static int pinconf_groups_show(struct seq_file *s, void *what)
 {
        struct pinctrl_dev *pctldev = s->private;
        const struct pinctrl_ops *pctlops = pctldev->desc->pctlops;
-       unsigned ngroups = pctlops->get_groups_count(pctldev);
-       unsigned selector = 0;
+       unsigned int ngroups = pctlops->get_groups_count(pctldev);
+       unsigned int selector = 0;
 
        seq_puts(s, "Pin config settings per pin group\n");
        seq_puts(s, "Format: group (name): configs\n");
index 694bfc9961faf1fdfbf737ba3c2fd699a297cd42..a14c950bc70041f135b16a17f3747c61bdbf0f84 100644 (file)
@@ -29,14 +29,14 @@ int pinconf_map_to_setting(const struct pinctrl_map *map,
 void pinconf_free_setting(const struct pinctrl_setting *setting);
 int pinconf_apply_setting(const struct pinctrl_setting *setting);
 
-int pinconf_set_config(struct pinctrl_dev *pctldev, unsigned pin,
+int pinconf_set_config(struct pinctrl_dev *pctldev, unsigned int pin,
                       unsigned long *configs, size_t nconfigs);
 
 /*
  * You will only be interested in these if you're using PINCONF
  * so don't supply any stubs for these.
  */
-int pin_config_get_for_pin(struct pinctrl_dev *pctldev, unsigned pin,
+int pin_config_get_for_pin(struct pinctrl_dev *pctldev, unsigned int pin,
                           unsigned long *config);
 int pin_config_group_get(const char *dev_name, const char *pin_group,
                         unsigned long *config);
@@ -68,7 +68,7 @@ static inline int pinconf_apply_setting(const struct pinctrl_setting *setting)
        return 0;
 }
 
-static inline int pinconf_set_config(struct pinctrl_dev *pctldev, unsigned pin,
+static inline int pinconf_set_config(struct pinctrl_dev *pctldev, unsigned int pin,
                                     unsigned long *configs, size_t nconfigs)
 {
        return -ENOTSUPP;
@@ -112,7 +112,7 @@ static inline void pinconf_init_device_debugfs(struct dentry *devroot,
 
 void pinconf_generic_dump_pins(struct pinctrl_dev *pctldev,
                               struct seq_file *s, const char *gname,
-                              unsigned pin);
+                              unsigned int pin);
 
 void pinconf_generic_dump_config(struct pinctrl_dev *pctldev,
                                 struct seq_file *s, unsigned long config);
@@ -120,7 +120,7 @@ void pinconf_generic_dump_config(struct pinctrl_dev *pctldev,
 
 static inline void pinconf_generic_dump_pins(struct pinctrl_dev *pctldev,
                                             struct seq_file *s,
-                                            const char *gname, unsigned pin)
+                                            const char *gname, unsigned int pin)
 {
        return;
 }
index 03ecb3d1aaf60da974f32bb344203b418969064f..49f89b70dcecb4a4465b62aecded05aa3e0b19f7 100644 (file)
@@ -1159,7 +1159,7 @@ static int amd_gpio_probe(struct platform_device *pdev)
        }
 
        ret = devm_request_irq(&pdev->dev, gpio_dev->irq, amd_gpio_irq_handler,
-                              IRQF_SHARED, KBUILD_MODNAME, gpio_dev);
+                              IRQF_SHARED | IRQF_ONESHOT, KBUILD_MODNAME, gpio_dev);
        if (ret)
                goto out2;
 
index 6a5f23cf7a2a2119b6a9c4e43815f5a2094414d2..0d8c75ce20eda97627aef773342a0b63ff6114a1 100644 (file)
@@ -542,7 +542,6 @@ static int as3722_pinctrl_probe(struct platform_device *pdev)
 
        as_pci->dev = &pdev->dev;
        as_pci->as3722 = dev_get_drvdata(pdev->dev.parent);
-       platform_set_drvdata(pdev, as_pci);
 
        as_pci->pins = as3722_pins_desc;
        as_pci->num_pins = ARRAY_SIZE(as3722_pins_desc);
@@ -562,7 +561,7 @@ static int as3722_pinctrl_probe(struct platform_device *pdev)
 
        as_pci->gpio_chip = as3722_gpio_chip;
        as_pci->gpio_chip.parent = &pdev->dev;
-       ret = gpiochip_add_data(&as_pci->gpio_chip, as_pci);
+       ret = devm_gpiochip_add_data(&pdev->dev, &as_pci->gpio_chip, as_pci);
        if (ret < 0) {
                dev_err(&pdev->dev, "Couldn't register gpiochip, %d\n", ret);
                return ret;
@@ -572,21 +571,10 @@ static int as3722_pinctrl_probe(struct platform_device *pdev)
                                0, 0, AS3722_PIN_NUM);
        if (ret < 0) {
                dev_err(&pdev->dev, "Couldn't add pin range, %d\n", ret);
-               goto fail_range_add;
+               return ret;
        }
 
        return 0;
-
-fail_range_add:
-       gpiochip_remove(&as_pci->gpio_chip);
-       return ret;
-}
-
-static void as3722_pinctrl_remove(struct platform_device *pdev)
-{
-       struct as3722_pctrl_info *as_pci = platform_get_drvdata(pdev);
-
-       gpiochip_remove(&as_pci->gpio_chip);
 }
 
 static const struct of_device_id as3722_pinctrl_of_match[] = {
@@ -601,7 +589,6 @@ static struct platform_driver as3722_pinctrl_driver = {
                .of_match_table = as3722_pinctrl_of_match,
        },
        .probe = as3722_pinctrl_probe,
-       .remove_new = as3722_pinctrl_remove,
 };
 module_platform_driver(as3722_pinctrl_driver);
 
index fe9545c630a2fa4418a579c5751afee237f13853..67b5d160c027ed3e429d1f246c3dc1028c4c52c7 100644 (file)
 
 #define CY8C95X0_PIN_TO_OFFSET(x) (((x) >= 20) ? ((x) + 4) : (x))
 
+#define CY8C95X0_MUX_REGMAP_TO_PORT(x) ((x) / MUXED_STRIDE)
+#define CY8C95X0_MUX_REGMAP_TO_REG(x) (((x) % MUXED_STRIDE) + CY8C95X0_INTMASK)
+#define CY8C95X0_MUX_REGMAP_TO_OFFSET(x, p) ((x) - CY8C95X0_INTMASK + (p) * MUXED_STRIDE)
+
 static const struct i2c_device_id cy8c95x0_id[] = {
        { "cy8c9520", 20, },
        { "cy8c9540", 40, },
@@ -119,12 +123,13 @@ static const struct dmi_system_id cy8c95x0_dmi_acpi_irq_info[] = {
 #define MAX_BANK 8
 #define BANK_SZ 8
 #define MAX_LINE       (MAX_BANK * BANK_SZ)
-
+#define MUXED_STRIDE   16
 #define CY8C95X0_GPIO_MASK             GENMASK(7, 0)
 
 /**
  * struct cy8c95x0_pinctrl - driver data
- * @regmap:         Device's regmap
+ * @regmap:         Device's regmap. Only direct access registers.
+ * @muxed_regmap:   Regmap for all muxed registers.
  * @irq_lock:       IRQ bus lock
  * @i2c_lock:       Mutex for the device internal mux register
  * @irq_mask:       I/O bits affected by interrupts
@@ -147,6 +152,7 @@ static const struct dmi_system_id cy8c95x0_dmi_acpi_irq_info[] = {
  */
 struct cy8c95x0_pinctrl {
        struct regmap *regmap;
+       struct regmap *muxed_regmap;
        struct mutex irq_lock;
        struct mutex i2c_lock;
        DECLARE_BITMAP(irq_mask, MAX_LINE);
@@ -379,6 +385,54 @@ static bool cy8c95x0_precious_register(struct device *dev, unsigned int reg)
        }
 }
 
+static bool cy8c95x0_muxed_register(unsigned int reg)
+{
+       switch (reg) {
+       case CY8C95X0_INTMASK:
+       case CY8C95X0_PWMSEL:
+       case CY8C95X0_INVERT:
+       case CY8C95X0_DIRECTION:
+       case CY8C95X0_DRV_PU:
+       case CY8C95X0_DRV_PD:
+       case CY8C95X0_DRV_ODH:
+       case CY8C95X0_DRV_ODL:
+       case CY8C95X0_DRV_PP_FAST:
+       case CY8C95X0_DRV_PP_SLOW:
+       case CY8C95X0_DRV_HIZ:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static bool cy8c95x0_wc_register(unsigned int reg)
+{
+       switch (reg) {
+       case CY8C95X0_DRV_PU:
+       case CY8C95X0_DRV_PD:
+       case CY8C95X0_DRV_ODH:
+       case CY8C95X0_DRV_ODL:
+       case CY8C95X0_DRV_PP_FAST:
+       case CY8C95X0_DRV_PP_SLOW:
+       case CY8C95X0_DRV_HIZ:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static bool cy8c95x0_quick_path_register(unsigned int reg)
+{
+       switch (reg) {
+       case CY8C95X0_INPUT_(0) ... CY8C95X0_INPUT_(7):
+       case CY8C95X0_INTSTATUS_(0) ... CY8C95X0_INTSTATUS_(7):
+       case CY8C95X0_OUTPUT_(0) ... CY8C95X0_OUTPUT_(7):
+               return true;
+       default:
+               return false;
+       }
+}
+
 static const struct reg_default cy8c95x0_reg_defaults[] = {
        { CY8C95X0_OUTPUT_(0), GENMASK(7, 0) },
        { CY8C95X0_OUTPUT_(1), GENMASK(7, 0) },
@@ -392,7 +446,89 @@ static const struct reg_default cy8c95x0_reg_defaults[] = {
        { CY8C95X0_PWMSEL, 0 },
 };
 
+static int
+cy8c95x0_mux_reg_read(void *context, unsigned int off, unsigned int *val)
+{
+       struct cy8c95x0_pinctrl *chip = context;
+       u8 port = CY8C95X0_MUX_REGMAP_TO_PORT(off);
+       int ret, reg = CY8C95X0_MUX_REGMAP_TO_REG(off);
+
+       mutex_lock(&chip->i2c_lock);
+       /* Select the correct bank */
+       ret = regmap_write(chip->regmap, CY8C95X0_PORTSEL, port);
+       if (ret < 0)
+               goto out;
+
+       /*
+        * Read the register through direct access regmap. The target range
+        * is marked volatile.
+        */
+       ret = regmap_read(chip->regmap, reg, val);
+out:
+       mutex_unlock(&chip->i2c_lock);
+
+       return ret;
+}
+
+static int
+cy8c95x0_mux_reg_write(void *context, unsigned int off, unsigned int val)
+{
+       struct cy8c95x0_pinctrl *chip = context;
+       u8 port = CY8C95X0_MUX_REGMAP_TO_PORT(off);
+       int ret, reg = CY8C95X0_MUX_REGMAP_TO_REG(off);
+
+       mutex_lock(&chip->i2c_lock);
+       /* Select the correct bank */
+       ret = regmap_write(chip->regmap, CY8C95X0_PORTSEL, port);
+       if (ret < 0)
+               goto out;
+
+       /*
+        * Write the register through direct access regmap. The target range
+        * is marked volatile.
+        */
+       ret = regmap_write(chip->regmap, reg, val);
+out:
+       mutex_unlock(&chip->i2c_lock);
+
+       return ret;
+}
+
+static bool cy8c95x0_mux_accessible_register(struct device *dev, unsigned int off)
+{
+       struct i2c_client *i2c = to_i2c_client(dev);
+       struct cy8c95x0_pinctrl *chip = i2c_get_clientdata(i2c);
+       u8 port = CY8C95X0_MUX_REGMAP_TO_PORT(off);
+       u8 reg = CY8C95X0_MUX_REGMAP_TO_REG(off);
+
+       if (port >= chip->nport)
+               return false;
+
+       return cy8c95x0_muxed_register(reg);
+}
+
+static struct regmap_bus cy8c95x0_regmap_bus = {
+       .reg_read = cy8c95x0_mux_reg_read,
+       .reg_write = cy8c95x0_mux_reg_write,
+};
+
+/* Regmap for muxed registers CY8C95X0_INTMASK - CY8C95X0_DRV_HIZ */
+static const struct regmap_config cy8c95x0_muxed_regmap = {
+       .name = "muxed",
+       .reg_bits = 8,
+       .val_bits = 8,
+       .cache_type = REGCACHE_FLAT,
+       .use_single_read = true,
+       .use_single_write = true,
+       .max_register = MUXED_STRIDE * BANK_SZ,
+       .num_reg_defaults_raw = MUXED_STRIDE * BANK_SZ,
+       .readable_reg = cy8c95x0_mux_accessible_register,
+       .writeable_reg = cy8c95x0_mux_accessible_register,
+};
+
+/* Direct access regmap */
 static const struct regmap_config cy8c95x0_i2c_regmap = {
+       .name = "direct",
        .reg_bits = 8,
        .val_bits = 8,
 
@@ -408,6 +544,147 @@ static const struct regmap_config cy8c95x0_i2c_regmap = {
        .max_register = CY8C95X0_COMMAND,
 };
 
+static inline int cy8c95x0_regmap_update_bits_base(struct cy8c95x0_pinctrl *chip,
+                                                  unsigned int reg,
+                                                  unsigned int port,
+                                                  unsigned int mask,
+                                                  unsigned int val,
+                                                  bool *change, bool async,
+                                                  bool force)
+{
+       struct regmap *regmap;
+       int ret, off, i, read_val;
+
+       /* Caller should never modify PORTSEL directly */
+       if (reg == CY8C95X0_PORTSEL)
+               return -EINVAL;
+
+       /* Registers behind the PORTSEL mux have their own regmap */
+       if (cy8c95x0_muxed_register(reg)) {
+               regmap = chip->muxed_regmap;
+               off = CY8C95X0_MUX_REGMAP_TO_OFFSET(reg, port);
+       } else {
+               regmap = chip->regmap;
+               /* Quick path direct access registers honor the port argument */
+               if (cy8c95x0_quick_path_register(reg))
+                       off = reg + port;
+               else
+                       off = reg;
+       }
+
+       ret = regmap_update_bits_base(regmap, off, mask, val, change, async, force);
+       if (ret < 0)
+               return ret;
+
+       /* Update the cache when a WC bit is written */
+       if (cy8c95x0_wc_register(reg) && (mask & val)) {
+               for (i = CY8C95X0_DRV_PU; i <= CY8C95X0_DRV_HIZ; i++) {
+                       if (i == reg)
+                               continue;
+                       off = CY8C95X0_MUX_REGMAP_TO_OFFSET(i, port);
+
+                       ret = regmap_read(regmap, off, &read_val);
+                       if (ret < 0)
+                               continue;
+
+                       if (!(read_val & mask & val))
+                               continue;
+
+                       regcache_cache_only(regmap, true);
+                       regmap_update_bits(regmap, off, mask & val, 0);
+                       regcache_cache_only(regmap, false);
+               }
+       }
+
+       return ret;
+}
+
+/**
+ * cy8c95x0_regmap_write_bits() - writes a register using the regmap cache
+ * @chip: The pinctrl to work on
+ * @reg: The register to write to. Can be direct access or muxed register.
+ *       MUST NOT be the PORTSEL register.
+ * @port: The port to be used for muxed registers or quick path direct access
+ *        registers. Otherwise unused.
+ * @mask: Bitmask to change
+ * @val: New value for bitmask
+ *
+ * This function handles the register writes to the direct access registers and
+ * the muxed registers while caching all register accesses, internally handling
+ * the correct state of the PORTSEL register and protecting the access to muxed
+ * registers.
+ * The caller must only use this function to change registers behind the PORTSEL mux.
+ *
+ * Return: 0 for successful request, else a corresponding error value
+ */
+static int cy8c95x0_regmap_write_bits(struct cy8c95x0_pinctrl *chip, unsigned int reg,
+                                     unsigned int port, unsigned int mask, unsigned int val)
+{
+       return cy8c95x0_regmap_update_bits_base(chip, reg, port, mask, val, NULL, false, true);
+}
+
+/**
+ * cy8c95x0_regmap_update_bits() - updates a register using the regmap cache
+ * @chip: The pinctrl to work on
+ * @reg: The register to write to. Can be direct access or muxed register.
+ *       MUST NOT be the PORTSEL register.
+ * @port: The port to be used for muxed registers or quick path direct access
+ *        registers. Otherwise unused.
+ * @mask: Bitmask to change
+ * @val: New value for bitmask
+ *
+ * This function handles the register updates to the direct access registers and
+ * the muxed registers while caching all register accesses, internally handling
+ * the correct state of the PORTSEL register and protecting the access to muxed
+ * registers.
+ * The caller must only use this function to change registers behind the PORTSEL mux.
+ *
+ * Return: 0 for successful request, else a corresponding error value
+ */
+static int cy8c95x0_regmap_update_bits(struct cy8c95x0_pinctrl *chip, unsigned int reg,
+                                      unsigned int port, unsigned int mask, unsigned int val)
+{
+       return cy8c95x0_regmap_update_bits_base(chip, reg, port, mask, val, NULL, false, false);
+}
+
+/**
+ * cy8c95x0_regmap_read() - reads a register using the regmap cache
+ * @chip: The pinctrl to work on
+ * @reg: The register to read from. Can be direct access or muxed register.
+ * @port: The port to be used for muxed registers or quick path direct access
+ *        registers. Otherwise unused.
+ * @read_val: Value read from hardware or cache
+ *
+ * This function handles the register reads from the direct access registers and
+ * the muxed registers while caching all register accesses, internally handling
+ * the correct state of the PORTSEL register and protecting the access to muxed
+ * registers.
+ * The caller must only use this function to read registers behind the PORTSEL mux.
+ *
+ * Return: 0 for successful request, else a corresponding error value
+ */
+static int cy8c95x0_regmap_read(struct cy8c95x0_pinctrl *chip, unsigned int reg,
+                               unsigned int port, unsigned int *read_val)
+{
+       struct regmap *regmap;
+       int off;
+
+       /* Registers behind the PORTSEL mux have their own regmap */
+       if (cy8c95x0_muxed_register(reg)) {
+               regmap = chip->muxed_regmap;
+               off = CY8C95X0_MUX_REGMAP_TO_OFFSET(reg, port);
+       } else {
+               regmap = chip->regmap;
+               /* Quick path direct access registers honor the port argument */
+               if (cy8c95x0_quick_path_register(reg))
+                       off = reg + port;
+               else
+                       off = reg;
+       }
+
+       return regmap_read(regmap, off, read_val);
+}
+
 static int cy8c95x0_write_regs_mask(struct cy8c95x0_pinctrl *chip, int reg,
                                    unsigned long *val, unsigned long *mask)
 {
@@ -415,7 +692,7 @@ static int cy8c95x0_write_regs_mask(struct cy8c95x0_pinctrl *chip, int reg,
        DECLARE_BITMAP(tval, MAX_LINE);
        int write_val;
        int ret = 0;
-       int i, off = 0;
+       int i;
        u8 bits;
 
        /* Add the 4 bit gap of Gport2 */
@@ -427,53 +704,22 @@ static int cy8c95x0_write_regs_mask(struct cy8c95x0_pinctrl *chip, int reg,
        bitmap_shift_left(tval, tval, 4, MAX_LINE);
        bitmap_replace(tval, tval, val, chip->shiftmask, BANK_SZ * 3);
 
-       mutex_lock(&chip->i2c_lock);
        for (i = 0; i < chip->nport; i++) {
                /* Skip over unused banks */
                bits = bitmap_get_value8(tmask, i * BANK_SZ);
                if (!bits)
                        continue;
 
-               switch (reg) {
-               /* Muxed registers */
-               case CY8C95X0_INTMASK:
-               case CY8C95X0_PWMSEL:
-               case CY8C95X0_INVERT:
-               case CY8C95X0_DIRECTION:
-               case CY8C95X0_DRV_PU:
-               case CY8C95X0_DRV_PD:
-               case CY8C95X0_DRV_ODH:
-               case CY8C95X0_DRV_ODL:
-               case CY8C95X0_DRV_PP_FAST:
-               case CY8C95X0_DRV_PP_SLOW:
-               case CY8C95X0_DRV_HIZ:
-                       ret = regmap_write(chip->regmap, CY8C95X0_PORTSEL, i);
-                       if (ret < 0)
-                               goto out;
-                       off = reg;
-                       break;
-               /* Direct access registers */
-               case CY8C95X0_INPUT:
-               case CY8C95X0_OUTPUT:
-               case CY8C95X0_INTSTATUS:
-                       off = reg + i;
-                       break;
-               default:
-                       ret = -EINVAL;
-                       goto out;
-               }
-
                write_val = bitmap_get_value8(tval, i * BANK_SZ);
 
-               ret = regmap_update_bits(chip->regmap, off, bits, write_val);
+               ret = cy8c95x0_regmap_update_bits(chip, reg, i, bits, write_val);
                if (ret < 0)
                        goto out;
        }
 out:
-       mutex_unlock(&chip->i2c_lock);
 
        if (ret < 0)
-               dev_err(chip->dev, "failed writing register %d: err %d\n", off, ret);
+               dev_err(chip->dev, "failed writing register %d, port %d: err %d\n", reg, i, ret);
 
        return ret;
 }
@@ -486,7 +732,7 @@ static int cy8c95x0_read_regs_mask(struct cy8c95x0_pinctrl *chip, int reg,
        DECLARE_BITMAP(tmp, MAX_LINE);
        int read_val;
        int ret = 0;
-       int i, off = 0;
+       int i;
        u8 bits;
 
        /* Add the 4 bit gap of Gport2 */
@@ -498,43 +744,13 @@ static int cy8c95x0_read_regs_mask(struct cy8c95x0_pinctrl *chip, int reg,
        bitmap_shift_left(tval, tval, 4, MAX_LINE);
        bitmap_replace(tval, tval, val, chip->shiftmask, BANK_SZ * 3);
 
-       mutex_lock(&chip->i2c_lock);
        for (i = 0; i < chip->nport; i++) {
                /* Skip over unused banks */
                bits = bitmap_get_value8(tmask, i * BANK_SZ);
                if (!bits)
                        continue;
 
-               switch (reg) {
-               /* Muxed registers */
-               case CY8C95X0_INTMASK:
-               case CY8C95X0_PWMSEL:
-               case CY8C95X0_INVERT:
-               case CY8C95X0_DIRECTION:
-               case CY8C95X0_DRV_PU:
-               case CY8C95X0_DRV_PD:
-               case CY8C95X0_DRV_ODH:
-               case CY8C95X0_DRV_ODL:
-               case CY8C95X0_DRV_PP_FAST:
-               case CY8C95X0_DRV_PP_SLOW:
-               case CY8C95X0_DRV_HIZ:
-                       ret = regmap_write(chip->regmap, CY8C95X0_PORTSEL, i);
-                       if (ret < 0)
-                               goto out;
-                       off = reg;
-                       break;
-               /* Direct access registers */
-               case CY8C95X0_INPUT:
-               case CY8C95X0_OUTPUT:
-               case CY8C95X0_INTSTATUS:
-                       off = reg + i;
-                       break;
-               default:
-                       ret = -EINVAL;
-                       goto out;
-               }
-
-               ret = regmap_read(chip->regmap, off, &read_val);
+               ret = cy8c95x0_regmap_read(chip, reg, i, &read_val);
                if (ret < 0)
                        goto out;
 
@@ -548,10 +764,8 @@ static int cy8c95x0_read_regs_mask(struct cy8c95x0_pinctrl *chip, int reg,
        bitmap_replace(val, tmp, tval, chip->shiftmask, MAX_LINE);
 
 out:
-       mutex_unlock(&chip->i2c_lock);
-
        if (ret < 0)
-               dev_err(chip->dev, "failed reading register %d: err %d\n", off, ret);
+               dev_err(chip->dev, "failed reading register %d, port %d: err %d\n", reg, i, ret);
 
        return ret;
 }
@@ -566,12 +780,11 @@ static int cy8c95x0_gpio_direction_output(struct gpio_chip *gc,
 {
        struct cy8c95x0_pinctrl *chip = gpiochip_get_data(gc);
        u8 port = cypress_get_port(chip, off);
-       u8 outreg = CY8C95X0_OUTPUT_(port);
        u8 bit = cypress_get_pin_mask(chip, off);
        int ret;
 
        /* Set output level */
-       ret = regmap_write_bits(chip->regmap, outreg, bit, val ? bit : 0);
+       ret = cy8c95x0_regmap_write_bits(chip, CY8C95X0_OUTPUT, port, bit, val ? bit : 0);
        if (ret)
                return ret;
 
@@ -581,12 +794,12 @@ static int cy8c95x0_gpio_direction_output(struct gpio_chip *gc,
 static int cy8c95x0_gpio_get_value(struct gpio_chip *gc, unsigned int off)
 {
        struct cy8c95x0_pinctrl *chip = gpiochip_get_data(gc);
-       u8 inreg = CY8C95X0_INPUT_(cypress_get_port(chip, off));
+       u8 port = cypress_get_port(chip, off);
        u8 bit = cypress_get_pin_mask(chip, off);
        u32 reg_val;
        int ret;
 
-       ret = regmap_read(chip->regmap, inreg, &reg_val);
+       ret = cy8c95x0_regmap_read(chip, CY8C95X0_INPUT, port, &reg_val);
        if (ret < 0) {
                /*
                 * NOTE:
@@ -604,10 +817,10 @@ static void cy8c95x0_gpio_set_value(struct gpio_chip *gc, unsigned int off,
                                    int val)
 {
        struct cy8c95x0_pinctrl *chip = gpiochip_get_data(gc);
-       u8 outreg = CY8C95X0_OUTPUT_(cypress_get_port(chip, off));
+       u8 port = cypress_get_port(chip, off);
        u8 bit = cypress_get_pin_mask(chip, off);
 
-       regmap_write_bits(chip->regmap, outreg, bit, val ? bit : 0);
+       cy8c95x0_regmap_write_bits(chip, CY8C95X0_OUTPUT, port, bit, val ? bit : 0);
 }
 
 static int cy8c95x0_gpio_get_direction(struct gpio_chip *gc, unsigned int off)
@@ -618,24 +831,15 @@ static int cy8c95x0_gpio_get_direction(struct gpio_chip *gc, unsigned int off)
        u32 reg_val;
        int ret;
 
-       mutex_lock(&chip->i2c_lock);
-
-       ret = regmap_write(chip->regmap, CY8C95X0_PORTSEL, port);
-       if (ret < 0)
-               goto out;
-
-       ret = regmap_read(chip->regmap, CY8C95X0_DIRECTION, &reg_val);
+       ret = cy8c95x0_regmap_read(chip, CY8C95X0_DIRECTION, port, &reg_val);
        if (ret < 0)
                goto out;
 
-       mutex_unlock(&chip->i2c_lock);
-
        if (reg_val & bit)
                return GPIO_LINE_DIRECTION_IN;
 
        return GPIO_LINE_DIRECTION_OUT;
 out:
-       mutex_unlock(&chip->i2c_lock);
        return ret;
 }
 
@@ -651,13 +855,6 @@ static int cy8c95x0_gpio_get_pincfg(struct cy8c95x0_pinctrl *chip,
        u16 arg = 0;
        int ret;
 
-       mutex_lock(&chip->i2c_lock);
-
-       /* Select port */
-       ret = regmap_write(chip->regmap, CY8C95X0_PORTSEL, port);
-       if (ret < 0)
-               goto out;
-
        switch (param) {
        case PIN_CONFIG_BIAS_PULL_UP:
                reg = CY8C95X0_DRV_PU;
@@ -684,7 +881,7 @@ static int cy8c95x0_gpio_get_pincfg(struct cy8c95x0_pinctrl *chip,
                reg = CY8C95X0_PWMSEL;
                break;
        case PIN_CONFIG_OUTPUT:
-               reg = CY8C95X0_OUTPUT_(port);
+               reg = CY8C95X0_OUTPUT;
                break;
        case PIN_CONFIG_OUTPUT_ENABLE:
                reg = CY8C95X0_DIRECTION;
@@ -712,7 +909,10 @@ static int cy8c95x0_gpio_get_pincfg(struct cy8c95x0_pinctrl *chip,
         * Writing 1 to one of the drive mode registers will automatically
         * clear conflicting set bits in the other drive mode registers.
         */
-       ret = regmap_read(chip->regmap, reg, &reg_val);
+       ret = cy8c95x0_regmap_read(chip, reg, port, &reg_val);
+       if (ret < 0)
+               goto out;
+
        if (reg_val & bit)
                arg = 1;
        if (param == PIN_CONFIG_OUTPUT_ENABLE)
@@ -720,8 +920,6 @@ static int cy8c95x0_gpio_get_pincfg(struct cy8c95x0_pinctrl *chip,
 
        *config = pinconf_to_config_packed(param, (u16)arg);
 out:
-       mutex_unlock(&chip->i2c_lock);
-
        return ret;
 }
 
@@ -736,13 +934,6 @@ static int cy8c95x0_gpio_set_pincfg(struct cy8c95x0_pinctrl *chip,
        unsigned int reg;
        int ret;
 
-       mutex_lock(&chip->i2c_lock);
-
-       /* Select port */
-       ret = regmap_write(chip->regmap, CY8C95X0_PORTSEL, port);
-       if (ret < 0)
-               goto out;
-
        switch (param) {
        case PIN_CONFIG_BIAS_PULL_UP:
                __clear_bit(off, chip->push_pull);
@@ -785,10 +976,8 @@ static int cy8c95x0_gpio_set_pincfg(struct cy8c95x0_pinctrl *chip,
         * Writing 1 to one of the drive mode registers will automatically
         * clear conflicting set bits in the other drive mode registers.
         */
-       ret = regmap_write_bits(chip->regmap, reg, bit, bit);
-
+       ret = cy8c95x0_regmap_write_bits(chip, reg, port, bit, bit);
 out:
-       mutex_unlock(&chip->i2c_lock);
        return ret;
 }
 
@@ -1105,14 +1294,8 @@ static int cy8c95x0_set_mode(struct cy8c95x0_pinctrl *chip, unsigned int off, bo
 {
        u8 port = cypress_get_port(chip, off);
        u8 bit = cypress_get_pin_mask(chip, off);
-       int ret;
 
-       /* Select port */
-       ret = regmap_write(chip->regmap, CY8C95X0_PORTSEL, port);
-       if (ret < 0)
-               return ret;
-
-       return regmap_write_bits(chip->regmap, CY8C95X0_PWMSEL, bit, mode ? bit : 0);
+       return cy8c95x0_regmap_write_bits(chip, CY8C95X0_PWMSEL, port, bit, mode ? bit : 0);
 }
 
 static int cy8c95x0_pinmux_mode(struct cy8c95x0_pinctrl *chip,
@@ -1130,24 +1313,19 @@ static int cy8c95x0_pinmux_mode(struct cy8c95x0_pinctrl *chip,
                return 0;
 
        /* Set direction to output & set output to 1 so that PWM can work */
-       ret = regmap_write_bits(chip->regmap, CY8C95X0_DIRECTION, bit, bit);
+       ret = cy8c95x0_regmap_write_bits(chip, CY8C95X0_DIRECTION, port, bit, bit);
        if (ret < 0)
                return ret;
 
-       return regmap_write_bits(chip->regmap, CY8C95X0_OUTPUT_(port), bit, bit);
+       return cy8c95x0_regmap_write_bits(chip, CY8C95X0_OUTPUT, port, bit, bit);
 }
 
 static int cy8c95x0_set_mux(struct pinctrl_dev *pctldev, unsigned int selector,
                            unsigned int group)
 {
        struct cy8c95x0_pinctrl *chip = pinctrl_dev_get_drvdata(pctldev);
-       int ret;
 
-       mutex_lock(&chip->i2c_lock);
-       ret = cy8c95x0_pinmux_mode(chip, selector, group);
-       mutex_unlock(&chip->i2c_lock);
-
-       return ret;
+       return cy8c95x0_pinmux_mode(chip, selector, group);
 }
 
 static int cy8c95x0_gpio_request_enable(struct pinctrl_dev *pctldev,
@@ -1155,13 +1333,8 @@ static int cy8c95x0_gpio_request_enable(struct pinctrl_dev *pctldev,
                                        unsigned int pin)
 {
        struct cy8c95x0_pinctrl *chip = pinctrl_dev_get_drvdata(pctldev);
-       int ret;
-
-       mutex_lock(&chip->i2c_lock);
-       ret = cy8c95x0_set_mode(chip, pin, false);
-       mutex_unlock(&chip->i2c_lock);
 
-       return ret;
+       return cy8c95x0_set_mode(chip, pin, false);
 }
 
 static int cy8c95x0_pinmux_direction(struct cy8c95x0_pinctrl *chip,
@@ -1171,13 +1344,7 @@ static int cy8c95x0_pinmux_direction(struct cy8c95x0_pinctrl *chip,
        u8 bit = cypress_get_pin_mask(chip, pin);
        int ret;
 
-       /* Select port... */
-       ret = regmap_write(chip->regmap, CY8C95X0_PORTSEL, port);
-       if (ret)
-               return ret;
-
-       /* ...then direction */
-       ret = regmap_write_bits(chip->regmap, CY8C95X0_DIRECTION, bit, input ? bit : 0);
+       ret = cy8c95x0_regmap_write_bits(chip, CY8C95X0_DIRECTION, port, bit, input ? bit : 0);
        if (ret)
                return ret;
 
@@ -1186,7 +1353,7 @@ static int cy8c95x0_pinmux_direction(struct cy8c95x0_pinctrl *chip,
         * the direction register isn't sufficient in Push-Pull mode.
         */
        if (input && test_bit(pin, chip->push_pull)) {
-               ret = regmap_write_bits(chip->regmap, CY8C95X0_DRV_HIZ, bit, bit);
+               ret = cy8c95x0_regmap_write_bits(chip, CY8C95X0_DRV_HIZ, port, bit, bit);
                if (ret)
                        return ret;
 
@@ -1201,13 +1368,8 @@ static int cy8c95x0_gpio_set_direction(struct pinctrl_dev *pctldev,
                                       unsigned int pin, bool input)
 {
        struct cy8c95x0_pinctrl *chip = pinctrl_dev_get_drvdata(pctldev);
-       int ret;
 
-       mutex_lock(&chip->i2c_lock);
-       ret = cy8c95x0_pinmux_direction(chip, pin, input);
-       mutex_unlock(&chip->i2c_lock);
-
-       return ret;
+       return cy8c95x0_pinmux_direction(chip, pin, input);
 }
 
 static const struct pinmux_ops cy8c95x0_pmxops = {
@@ -1409,12 +1571,22 @@ static int cy8c95x0_probe(struct i2c_client *client)
                gpiod_set_consumer_name(chip->gpio_reset, "CY8C95X0 RESET");
        }
 
+       /* Generic regmap for direct access registers */
        chip->regmap = devm_regmap_init_i2c(client, &cy8c95x0_i2c_regmap);
        if (IS_ERR(chip->regmap)) {
                ret = PTR_ERR(chip->regmap);
                goto err_exit;
        }
 
+       /* Port specific regmap behind PORTSEL mux */
+       chip->muxed_regmap = devm_regmap_init(&client->dev, &cy8c95x0_regmap_bus,
+                                             chip, &cy8c95x0_muxed_regmap);
+       if (IS_ERR(chip->muxed_regmap)) {
+               ret = dev_err_probe(&client->dev, PTR_ERR(chip->muxed_regmap),
+                                   "Failed to register muxed regmap\n");
+               goto err_exit;
+       }
+
        bitmap_zero(chip->push_pull, MAX_LINE);
        bitmap_zero(chip->shiftmask, MAX_LINE);
        bitmap_set(chip->shiftmask, 0, 20);
index 5b5ddf7e5d0eb10a9ec8cc4154f29e8fe895e7ee..6e1be38865c32cd1820042535de30016d872525a 100644 (file)
@@ -331,8 +331,8 @@ static int eqbr_pinmux_set_mux(struct pinctrl_dev *pctldev,
                return -EINVAL;
 
        pinmux = grp->data;
-       for (i = 0; i < grp->num_pins; i++)
-               eqbr_set_pin_mux(pctl, pinmux[i], grp->pins[i]);
+       for (i = 0; i < grp->grp.npins; i++)
+               eqbr_set_pin_mux(pctl, pinmux[i], grp->grp.pins[i]);
 
        return 0;
 }
@@ -704,8 +704,8 @@ static int eqbr_build_groups(struct eqbr_pinctrl_drv_data *drvdata)
 {
        struct device *dev = drvdata->dev;
        struct device_node *node = dev->of_node;
-       unsigned int *pinmux, pin_id, pinmux_id;
-       struct group_desc group;
+       unsigned int *pins, *pinmux, pin_id, pinmux_id;
+       struct pingroup group, *grp = &group;
        struct device_node *np;
        struct property *prop;
        int j, err;
@@ -715,55 +715,55 @@ static int eqbr_build_groups(struct eqbr_pinctrl_drv_data *drvdata)
                if (!prop)
                        continue;
 
-               group.num_pins = of_property_count_u32_elems(np, "pins");
-               if (group.num_pins < 0) {
+               err = of_property_count_u32_elems(np, "pins");
+               if (err < 0) {
                        dev_err(dev, "No pins in the group: %s\n", prop->name);
                        of_node_put(np);
-                       return -EINVAL;
+                       return err;
                }
-               group.name = prop->value;
-               group.pins = devm_kcalloc(dev, group.num_pins,
-                                         sizeof(*(group.pins)), GFP_KERNEL);
-               if (!group.pins) {
+               grp->npins = err;
+               grp->name = prop->value;
+               pins = devm_kcalloc(dev, grp->npins, sizeof(*pins), GFP_KERNEL);
+               if (!pins) {
                        of_node_put(np);
                        return -ENOMEM;
                }
+               grp->pins = pins;
 
-               pinmux = devm_kcalloc(dev, group.num_pins, sizeof(*pinmux),
-                                     GFP_KERNEL);
+               pinmux = devm_kcalloc(dev, grp->npins, sizeof(*pinmux), GFP_KERNEL);
                if (!pinmux) {
                        of_node_put(np);
                        return -ENOMEM;
                }
 
-               for (j = 0; j < group.num_pins; j++) {
+               for (j = 0; j < grp->npins; j++) {
                        if (of_property_read_u32_index(np, "pins", j, &pin_id)) {
                                dev_err(dev, "Group %s: Read intel pins id failed\n",
-                                       group.name);
+                                       grp->name);
                                of_node_put(np);
                                return -EINVAL;
                        }
                        if (pin_id >= drvdata->pctl_desc.npins) {
                                dev_err(dev, "Group %s: Invalid pin ID, idx: %d, pin %u\n",
-                                       group.name, j, pin_id);
+                                       grp->name, j, pin_id);
                                of_node_put(np);
                                return -EINVAL;
                        }
-                       group.pins[j] = pin_id;
+                       pins[j] = pin_id;
                        if (of_property_read_u32_index(np, "pinmux", j, &pinmux_id)) {
                                dev_err(dev, "Group %s: Read intel pinmux id failed\n",
-                                       group.name);
+                                       grp->name);
                                of_node_put(np);
                                return -EINVAL;
                        }
                        pinmux[j] = pinmux_id;
                }
 
-               err = pinctrl_generic_add_group(drvdata->pctl_dev, group.name,
-                                               group.pins, group.num_pins,
+               err = pinctrl_generic_add_group(drvdata->pctl_dev,
+                                               grp->name, grp->pins, grp->npins,
                                                pinmux);
                if (err < 0) {
-                       dev_err(dev, "Failed to register group %s\n", group.name);
+                       dev_err(dev, "Failed to register group %s\n", grp->name);
                        of_node_put(np);
                        return err;
                }
index ee718f6e25566a437382623a17e370e2e70abab9..bc6358a686fcde9c2808eb4fdf226f51a5d389f9 100644 (file)
 #define PINS_PER_GPIO_CHIP                     32
 #define JZ4730_PINS_PER_PAIRED_REG     16
 
-#define INGENIC_PIN_GROUP_FUNCS(name, id, funcs)               \
-       {                                               \
-               name,                                   \
-               id##_pins,                              \
-               ARRAY_SIZE(id##_pins),                  \
-               funcs,                                  \
+#define INGENIC_PIN_GROUP_FUNCS(_name_, id, funcs)                                     \
+       {                                                                               \
+               .grp = PINCTRL_PINGROUP(_name_, id##_pins, ARRAY_SIZE(id##_pins)),      \
+               .data = funcs,                                                          \
        }
 
-#define INGENIC_PIN_GROUP(name, id, func)              \
-       INGENIC_PIN_GROUP_FUNCS(name, id, (void *)(func))
+#define INGENIC_PIN_GROUP(_name_, id, func)                                            \
+       {                                                                               \
+               .grp = PINCTRL_PINGROUP(_name_, id##_pins, ARRAY_SIZE(id##_pins)),      \
+               .data = (void *)func,                                                   \
+       }
 
 enum jz_version {
        ID_JZ4730,
@@ -3761,17 +3762,17 @@ static int ingenic_pinmux_set_mux(struct pinctrl_dev *pctldev,
                return -EINVAL;
 
        dev_dbg(pctldev->dev, "enable function %s group %s\n",
-               func->name, grp->name);
+               func->name, grp->grp.name);
 
        mode = (uintptr_t)grp->data;
        if (mode <= 3) {
-               for (i = 0; i < grp->num_pins; i++)
-                       ingenic_pinmux_set_pin_fn(jzpc, grp->pins[i], mode);
+               for (i = 0; i < grp->grp.npins; i++)
+                       ingenic_pinmux_set_pin_fn(jzpc, grp->grp.pins[i], mode);
        } else {
                pin_modes = grp->data;
 
-               for (i = 0; i < grp->num_pins; i++)
-                       ingenic_pinmux_set_pin_fn(jzpc, grp->pins[i], pin_modes[i]);
+               for (i = 0; i < grp->grp.npins; i++)
+                       ingenic_pinmux_set_pin_fn(jzpc, grp->grp.pins[i], pin_modes[i]);
        }
 
        return 0;
@@ -4298,12 +4299,12 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev)
 
        for (i = 0; i < chip_info->num_groups; i++) {
                const struct group_desc *group = &chip_info->groups[i];
+               const struct pingroup *grp = &group->grp;
 
-               err = pinctrl_generic_add_group(jzpc->pctl, group->name,
-                               group->pins, group->num_pins, group->data);
+               err = pinctrl_generic_add_group(jzpc->pctl, grp->name, grp->pins, grp->npins,
+                                               group->data);
                if (err < 0) {
-                       dev_err(dev, "Failed to register group %s\n",
-                                       group->name);
+                       dev_err(dev, "Failed to register group %s\n", grp->name);
                        return err;
                }
        }
index 152c35bce8ecc0f119fa83e56c8ca793709e589d..b1349ee22799de0e141eb4692de062bd2885be7c 100644 (file)
@@ -945,7 +945,7 @@ static int keembay_set_mux(struct pinctrl_dev *pctldev, unsigned int fun_sel,
                return -EINVAL;
 
        /* Change modes for pins in the selected group */
-       pin = *grp->pins;
+       pin = *grp->grp.pins;
        pin_mode = *(u8 *)(func->data);
 
        val = keembay_read_reg(kpc->base1 + KEEMBAY_GPIO_MODE, pin);
@@ -1517,7 +1517,7 @@ static int keembay_gpiochip_probe(struct keembay_pinctrl *kpc,
 
 static int keembay_build_groups(struct keembay_pinctrl *kpc)
 {
-       struct group_desc *grp;
+       struct pingroup *grp;
        unsigned int i;
 
        kpc->ngroups = kpc->npins;
@@ -1528,7 +1528,7 @@ static int keembay_build_groups(struct keembay_pinctrl *kpc)
        /* Each pin is categorised as one group */
        for (i = 0; i < kpc->ngroups; i++) {
                const struct pinctrl_pin_desc *pdesc = keembay_pins + i;
-               struct group_desc *kmb_grp = grp + i;
+               struct pingroup *kmb_grp = grp + i;
 
                kmb_grp->name = pdesc->name;
                kmb_grp->pins = (int *)&pdesc->number;
index 8267be7696352722e89a0e02927af2cb1376b08e..19cc0db771a5a1485e750f89e91862a3e5c8a480 100644 (file)
@@ -1955,6 +1955,10 @@ static const struct pcs_soc_data pinctrl_single_am654 = {
        .irq_status_mask = (1 << 30),   /* WKUP_EVT */
 };
 
+static const struct pcs_soc_data pinctrl_single_j7200 = {
+       .flags = PCS_CONTEXT_LOSS_OFF,
+};
+
 static const struct pcs_soc_data pinctrl_single = {
 };
 
@@ -1969,6 +1973,7 @@ static const struct of_device_id pcs_of_match[] = {
        { .compatible = "ti,omap3-padconf", .data = &pinctrl_single_omap_wkup },
        { .compatible = "ti,omap4-padconf", .data = &pinctrl_single_omap_wkup },
        { .compatible = "ti,omap5-padconf", .data = &pinctrl_single_omap_wkup },
+       { .compatible = "ti,j7200-padconf", .data = &pinctrl_single_j7200 },
        { .compatible = "pinctrl-single", .data = &pinctrl_single },
        { .compatible = "pinconf-single", .data = &pinconf_single },
        { },
diff --git a/drivers/pinctrl/pinctrl-tps6594.c b/drivers/pinctrl/pinctrl-tps6594.c
new file mode 100644 (file)
index 0000000..66985e5
--- /dev/null
@@ -0,0 +1,373 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Pinmux and GPIO driver for tps6594 PMIC
+ *
+ * Copyright (C) 2023 BayLibre Incorporated - https://www.baylibre.com/
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/gpio/regmap.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
+
+#include <linux/mfd/tps6594.h>
+
+#define TPS6594_PINCTRL_PINS_NB 11
+
+#define TPS6594_PINCTRL_GPIO_FUNCTION 0
+#define TPS6594_PINCTRL_SCL_I2C2_CS_SPI_FUNCTION 1
+#define TPS6594_PINCTRL_TRIG_WDOG_FUNCTION 1
+#define TPS6594_PINCTRL_CLK32KOUT_FUNCTION 1
+#define TPS6594_PINCTRL_SCLK_SPMI_FUNCTION 1
+#define TPS6594_PINCTRL_SDATA_SPMI_FUNCTION 1
+#define TPS6594_PINCTRL_NERR_MCU_FUNCTION 1
+#define TPS6594_PINCTRL_PDOG_FUNCTION 1
+#define TPS6594_PINCTRL_SYNCCLKIN_FUNCTION 1
+#define TPS6594_PINCTRL_NRSTOUT_SOC_FUNCTION 2
+#define TPS6594_PINCTRL_SYNCCLKOUT_FUNCTION 2
+#define TPS6594_PINCTRL_SDA_I2C2_SDO_SPI_FUNCTION 2
+#define TPS6594_PINCTRL_NERR_SOC_FUNCTION 2
+#define TPS6594_PINCTRL_DISABLE_WDOG_FUNCTION 3
+#define TPS6594_PINCTRL_NSLEEP1_FUNCTION 4
+#define TPS6594_PINCTRL_NSLEEP2_FUNCTION 5
+#define TPS6594_PINCTRL_WKUP1_FUNCTION 6
+#define TPS6594_PINCTRL_WKUP2_FUNCTION 7
+
+/* Special muxval for recalcitrant pins */
+#define TPS6594_PINCTRL_DISABLE_WDOG_FUNCTION_GPIO8 2
+#define TPS6594_PINCTRL_SYNCCLKOUT_FUNCTION_GPIO8 3
+#define TPS6594_PINCTRL_CLK32KOUT_FUNCTION_GPIO9 3
+
+#define TPS6594_OFFSET_GPIO_SEL 5
+
+#define FUNCTION(fname, v)                                                                     \
+{                                                                                      \
+       .pinfunction = PINCTRL_PINFUNCTION(#fname,                                      \
+                                       tps6594_##fname##_func_group_names,             \
+                                       ARRAY_SIZE(tps6594_##fname##_func_group_names)),\
+       .muxval = v,                                                                    \
+}
+
+static const struct pinctrl_pin_desc tps6594_pins[TPS6594_PINCTRL_PINS_NB] = {
+       PINCTRL_PIN(0, "GPIO0"),   PINCTRL_PIN(1, "GPIO1"),
+       PINCTRL_PIN(2, "GPIO2"),   PINCTRL_PIN(3, "GPIO3"),
+       PINCTRL_PIN(4, "GPIO4"),   PINCTRL_PIN(5, "GPIO5"),
+       PINCTRL_PIN(6, "GPIO6"),   PINCTRL_PIN(7, "GPIO7"),
+       PINCTRL_PIN(8, "GPIO8"),   PINCTRL_PIN(9, "GPIO9"),
+       PINCTRL_PIN(10, "GPIO10"),
+};
+
+static const char *const tps6594_gpio_func_group_names[] = {
+       "GPIO0", "GPIO1", "GPIO2", "GPIO3", "GPIO4", "GPIO5",
+       "GPIO6", "GPIO7", "GPIO8", "GPIO9", "GPIO10",
+};
+
+static const char *const tps6594_nsleep1_func_group_names[] = {
+       "GPIO0", "GPIO1", "GPIO2", "GPIO3", "GPIO4", "GPIO5",
+       "GPIO6", "GPIO7", "GPIO8", "GPIO9", "GPIO10",
+};
+
+static const char *const tps6594_nsleep2_func_group_names[] = {
+       "GPIO0", "GPIO1", "GPIO2", "GPIO3", "GPIO4", "GPIO5",
+       "GPIO6", "GPIO7", "GPIO8", "GPIO9", "GPIO10",
+};
+
+static const char *const tps6594_wkup1_func_group_names[] = {
+       "GPIO0", "GPIO1", "GPIO2", "GPIO3", "GPIO4", "GPIO5",
+       "GPIO6", "GPIO7", "GPIO8", "GPIO9", "GPIO10",
+};
+
+static const char *const tps6594_wkup2_func_group_names[] = {
+       "GPIO0", "GPIO1", "GPIO2", "GPIO3", "GPIO4", "GPIO5",
+       "GPIO6", "GPIO7", "GPIO8", "GPIO9", "GPIO10",
+};
+
+static const char *const tps6594_scl_i2c2_cs_spi_func_group_names[] = {
+       "GPIO0",
+       "GPIO1",
+};
+
+static const char *const tps6594_nrstout_soc_func_group_names[] = {
+       "GPIO0",
+       "GPIO10",
+};
+
+static const char *const tps6594_trig_wdog_func_group_names[] = {
+       "GPIO1",
+       "GPIO10",
+};
+
+static const char *const tps6594_sda_i2c2_sdo_spi_func_group_names[] = {
+       "GPIO1",
+};
+
+static const char *const tps6594_clk32kout_func_group_names[] = {
+       "GPIO2",
+       "GPIO3",
+       "GPIO7",
+};
+
+static const char *const tps6594_nerr_soc_func_group_names[] = {
+       "GPIO2",
+};
+
+static const char *const tps6594_sclk_spmi_func_group_names[] = {
+       "GPIO4",
+};
+
+static const char *const tps6594_sdata_spmi_func_group_names[] = {
+       "GPIO5",
+};
+
+static const char *const tps6594_nerr_mcu_func_group_names[] = {
+       "GPIO6",
+};
+
+static const char *const tps6594_syncclkout_func_group_names[] = {
+       "GPIO7",
+       "GPIO9",
+};
+
+static const char *const tps6594_disable_wdog_func_group_names[] = {
+       "GPIO7",
+       "GPIO8",
+};
+
+static const char *const tps6594_pdog_func_group_names[] = {
+       "GPIO8",
+};
+
+static const char *const tps6594_syncclkin_func_group_names[] = {
+       "GPIO9",
+};
+
+struct tps6594_pinctrl_function {
+       struct pinfunction pinfunction;
+       u8 muxval;
+};
+
+static const struct tps6594_pinctrl_function pinctrl_functions[] = {
+       FUNCTION(gpio, TPS6594_PINCTRL_GPIO_FUNCTION),
+       FUNCTION(nsleep1, TPS6594_PINCTRL_NSLEEP1_FUNCTION),
+       FUNCTION(nsleep2, TPS6594_PINCTRL_NSLEEP2_FUNCTION),
+       FUNCTION(wkup1, TPS6594_PINCTRL_WKUP1_FUNCTION),
+       FUNCTION(wkup2, TPS6594_PINCTRL_WKUP2_FUNCTION),
+       FUNCTION(scl_i2c2_cs_spi, TPS6594_PINCTRL_SCL_I2C2_CS_SPI_FUNCTION),
+       FUNCTION(nrstout_soc, TPS6594_PINCTRL_NRSTOUT_SOC_FUNCTION),
+       FUNCTION(trig_wdog, TPS6594_PINCTRL_TRIG_WDOG_FUNCTION),
+       FUNCTION(sda_i2c2_sdo_spi, TPS6594_PINCTRL_SDA_I2C2_SDO_SPI_FUNCTION),
+       FUNCTION(clk32kout, TPS6594_PINCTRL_CLK32KOUT_FUNCTION),
+       FUNCTION(nerr_soc, TPS6594_PINCTRL_NERR_SOC_FUNCTION),
+       FUNCTION(sclk_spmi, TPS6594_PINCTRL_SCLK_SPMI_FUNCTION),
+       FUNCTION(sdata_spmi, TPS6594_PINCTRL_SDATA_SPMI_FUNCTION),
+       FUNCTION(nerr_mcu, TPS6594_PINCTRL_NERR_MCU_FUNCTION),
+       FUNCTION(syncclkout, TPS6594_PINCTRL_SYNCCLKOUT_FUNCTION),
+       FUNCTION(disable_wdog, TPS6594_PINCTRL_DISABLE_WDOG_FUNCTION),
+       FUNCTION(pdog, TPS6594_PINCTRL_PDOG_FUNCTION),
+       FUNCTION(syncclkin, TPS6594_PINCTRL_SYNCCLKIN_FUNCTION),
+};
+
+struct tps6594_pinctrl {
+       struct tps6594 *tps;
+       struct gpio_regmap *gpio_regmap;
+       struct pinctrl_dev *pctl_dev;
+       const struct tps6594_pinctrl_function *funcs;
+       const struct pinctrl_pin_desc *pins;
+};
+
+static int tps6594_gpio_regmap_xlate(struct gpio_regmap *gpio,
+                                    unsigned int base, unsigned int offset,
+                                    unsigned int *reg, unsigned int *mask)
+{
+       unsigned int line = offset % 8;
+       unsigned int stride = offset / 8;
+
+       switch (base) {
+       case TPS6594_REG_GPIOX_CONF(0):
+               *reg = TPS6594_REG_GPIOX_CONF(offset);
+               *mask = TPS6594_BIT_GPIO_DIR;
+               return 0;
+       case TPS6594_REG_GPIO_IN_1:
+       case TPS6594_REG_GPIO_OUT_1:
+               *reg = base + stride;
+               *mask = BIT(line);
+               return 0;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int tps6594_pmx_func_cnt(struct pinctrl_dev *pctldev)
+{
+       return ARRAY_SIZE(pinctrl_functions);
+}
+
+static const char *tps6594_pmx_func_name(struct pinctrl_dev *pctldev,
+                                        unsigned int selector)
+{
+       struct tps6594_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctldev);
+
+       return pinctrl->funcs[selector].pinfunction.name;
+}
+
+static int tps6594_pmx_func_groups(struct pinctrl_dev *pctldev,
+                                  unsigned int selector,
+                                  const char *const **groups,
+                                  unsigned int *num_groups)
+{
+       struct tps6594_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctldev);
+
+       *groups = pinctrl->funcs[selector].pinfunction.groups;
+       *num_groups = pinctrl->funcs[selector].pinfunction.ngroups;
+
+       return 0;
+}
+
+static int tps6594_pmx_set(struct tps6594_pinctrl *pinctrl, unsigned int pin,
+                          u8 muxval)
+{
+       u8 mux_sel_val = muxval << TPS6594_OFFSET_GPIO_SEL;
+
+       return regmap_update_bits(pinctrl->tps->regmap,
+                                 TPS6594_REG_GPIOX_CONF(pin),
+                                 TPS6594_MASK_GPIO_SEL, mux_sel_val);
+}
+
+static int tps6594_pmx_set_mux(struct pinctrl_dev *pctldev,
+                              unsigned int function, unsigned int group)
+{
+       struct tps6594_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctldev);
+       u8 muxval = pinctrl->funcs[function].muxval;
+
+       /* Some pins don't have the same muxval for the same function... */
+       if (group == 8) {
+               if (muxval == TPS6594_PINCTRL_DISABLE_WDOG_FUNCTION)
+                       muxval = TPS6594_PINCTRL_DISABLE_WDOG_FUNCTION_GPIO8;
+               else if (muxval == TPS6594_PINCTRL_SYNCCLKOUT_FUNCTION)
+                       muxval = TPS6594_PINCTRL_SYNCCLKOUT_FUNCTION_GPIO8;
+       } else if (group == 9) {
+               if (muxval == TPS6594_PINCTRL_CLK32KOUT_FUNCTION)
+                       muxval = TPS6594_PINCTRL_CLK32KOUT_FUNCTION_GPIO9;
+       }
+
+       return tps6594_pmx_set(pinctrl, group, muxval);
+}
+
+static int tps6594_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
+                                         struct pinctrl_gpio_range *range,
+                                         unsigned int offset, bool input)
+{
+       struct tps6594_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctldev);
+       u8 muxval = pinctrl->funcs[TPS6594_PINCTRL_GPIO_FUNCTION].muxval;
+
+       return tps6594_pmx_set(pinctrl, offset, muxval);
+}
+
+static const struct pinmux_ops tps6594_pmx_ops = {
+       .get_functions_count = tps6594_pmx_func_cnt,
+       .get_function_name = tps6594_pmx_func_name,
+       .get_function_groups = tps6594_pmx_func_groups,
+       .set_mux = tps6594_pmx_set_mux,
+       .gpio_set_direction = tps6594_pmx_gpio_set_direction,
+       .strict = true,
+};
+
+static int tps6594_groups_cnt(struct pinctrl_dev *pctldev)
+{
+       return ARRAY_SIZE(tps6594_pins);
+}
+
+static int tps6594_group_pins(struct pinctrl_dev *pctldev,
+                             unsigned int selector, const unsigned int **pins,
+                             unsigned int *num_pins)
+{
+       struct tps6594_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctldev);
+
+       *pins = &pinctrl->pins[selector].number;
+       *num_pins = 1;
+
+       return 0;
+}
+
+static const char *tps6594_group_name(struct pinctrl_dev *pctldev,
+                                     unsigned int selector)
+{
+       struct tps6594_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctldev);
+
+       return pinctrl->pins[selector].name;
+}
+
+static const struct pinctrl_ops tps6594_pctrl_ops = {
+       .dt_node_to_map = pinconf_generic_dt_node_to_map_group,
+       .dt_free_map = pinconf_generic_dt_free_map,
+       .get_groups_count = tps6594_groups_cnt,
+       .get_group_name = tps6594_group_name,
+       .get_group_pins = tps6594_group_pins,
+};
+
+static int tps6594_pinctrl_probe(struct platform_device *pdev)
+{
+       struct tps6594 *tps = dev_get_drvdata(pdev->dev.parent);
+       struct device *dev = &pdev->dev;
+       struct tps6594_pinctrl *pinctrl;
+       struct pinctrl_desc *pctrl_desc;
+       struct gpio_regmap_config config = {};
+
+       pctrl_desc = devm_kzalloc(dev, sizeof(*pctrl_desc), GFP_KERNEL);
+       if (!pctrl_desc)
+               return -ENOMEM;
+       pctrl_desc->name = dev_name(dev);
+       pctrl_desc->owner = THIS_MODULE;
+       pctrl_desc->pins = tps6594_pins;
+       pctrl_desc->npins = ARRAY_SIZE(tps6594_pins);
+       pctrl_desc->pctlops = &tps6594_pctrl_ops;
+       pctrl_desc->pmxops = &tps6594_pmx_ops;
+
+       pinctrl = devm_kzalloc(dev, sizeof(*pinctrl), GFP_KERNEL);
+       if (!pinctrl)
+               return -ENOMEM;
+       pinctrl->tps = dev_get_drvdata(dev->parent);
+       pinctrl->funcs = pinctrl_functions;
+       pinctrl->pins = tps6594_pins;
+       pinctrl->pctl_dev = devm_pinctrl_register(dev, pctrl_desc, pinctrl);
+       if (IS_ERR(pinctrl->pctl_dev))
+               return dev_err_probe(dev, PTR_ERR(pinctrl->pctl_dev),
+                                    "Couldn't register pinctrl driver\n");
+
+       config.parent = tps->dev;
+       config.regmap = tps->regmap;
+       config.ngpio = TPS6594_PINCTRL_PINS_NB;
+       config.ngpio_per_reg = 8;
+       config.reg_dat_base = TPS6594_REG_GPIO_IN_1;
+       config.reg_set_base = TPS6594_REG_GPIO_OUT_1;
+       config.reg_dir_out_base = TPS6594_REG_GPIOX_CONF(0);
+       config.reg_mask_xlate = tps6594_gpio_regmap_xlate;
+
+       pinctrl->gpio_regmap = devm_gpio_regmap_register(dev, &config);
+       if (IS_ERR(pinctrl->gpio_regmap))
+               return dev_err_probe(dev, PTR_ERR(pinctrl->gpio_regmap),
+                                    "Couldn't register gpio_regmap driver\n");
+
+       return 0;
+}
+
+static const struct platform_device_id tps6594_pinctrl_id_table[] = {
+       { "tps6594-pinctrl", },
+       {}
+};
+MODULE_DEVICE_TABLE(platform, tps6594_pinctrl_id_table);
+
+static struct platform_driver tps6594_pinctrl_driver = {
+       .probe = tps6594_pinctrl_probe,
+       .driver = {
+               .name = "tps6594-pinctrl",
+       },
+       .id_table = tps6594_pinctrl_id_table,
+};
+module_platform_driver(tps6594_pinctrl_driver);
+
+MODULE_AUTHOR("Esteban Blanc <eblanc@baylibre.com>");
+MODULE_DESCRIPTION("TPS6594 pinctrl and GPIO driver");
+MODULE_LICENSE("GPL");
index 40862f7bd6ca077e6d842a3afad34141a44932a7..d81d7b46116cc516260a7257fc73f4c0cfd3699a 100644 (file)
 #include "pinctrl-utils.h"
 
 int pinctrl_utils_reserve_map(struct pinctrl_dev *pctldev,
-               struct pinctrl_map **map, unsigned *reserved_maps,
-               unsigned *num_maps, unsigned reserve)
+               struct pinctrl_map **map, unsigned int *reserved_maps,
+               unsigned int *num_maps, unsigned int reserve)
 {
-       unsigned old_num = *reserved_maps;
-       unsigned new_num = *num_maps + reserve;
+       unsigned int old_num = *reserved_maps;
+       unsigned int new_num = *num_maps + reserve;
        struct pinctrl_map *new_map;
 
        if (old_num >= new_num)
@@ -43,8 +43,8 @@ int pinctrl_utils_reserve_map(struct pinctrl_dev *pctldev,
 EXPORT_SYMBOL_GPL(pinctrl_utils_reserve_map);
 
 int pinctrl_utils_add_map_mux(struct pinctrl_dev *pctldev,
-               struct pinctrl_map **map, unsigned *reserved_maps,
-               unsigned *num_maps, const char *group,
+               struct pinctrl_map **map, unsigned int *reserved_maps,
+               unsigned int *num_maps, const char *group,
                const char *function)
 {
        if (WARN_ON(*num_maps == *reserved_maps))
@@ -60,9 +60,9 @@ int pinctrl_utils_add_map_mux(struct pinctrl_dev *pctldev,
 EXPORT_SYMBOL_GPL(pinctrl_utils_add_map_mux);
 
 int pinctrl_utils_add_map_configs(struct pinctrl_dev *pctldev,
-               struct pinctrl_map **map, unsigned *reserved_maps,
-               unsigned *num_maps, const char *group,
-               unsigned long *configs, unsigned num_configs,
+               struct pinctrl_map **map, unsigned int *reserved_maps,
+               unsigned int *num_maps, const char *group,
+               unsigned long *configs, unsigned int num_configs,
                enum pinctrl_map_type type)
 {
        unsigned long *dup_configs;
@@ -86,11 +86,11 @@ int pinctrl_utils_add_map_configs(struct pinctrl_dev *pctldev,
 EXPORT_SYMBOL_GPL(pinctrl_utils_add_map_configs);
 
 int pinctrl_utils_add_config(struct pinctrl_dev *pctldev,
-               unsigned long **configs, unsigned *num_configs,
+               unsigned long **configs, unsigned int *num_configs,
                unsigned long config)
 {
-       unsigned old_num = *num_configs;
-       unsigned new_num = old_num + 1;
+       unsigned int old_num = *num_configs;
+       unsigned int new_num = old_num + 1;
        unsigned long *new_configs;
 
        new_configs = krealloc(*configs, sizeof(*new_configs) * new_num,
@@ -110,7 +110,7 @@ int pinctrl_utils_add_config(struct pinctrl_dev *pctldev,
 EXPORT_SYMBOL_GPL(pinctrl_utils_add_config);
 
 void pinctrl_utils_free_map(struct pinctrl_dev *pctldev,
-             struct pinctrl_map *map, unsigned num_maps)
+             struct pinctrl_map *map, unsigned int num_maps)
 {
        int i;
 
index 4108ee2dd6d02d66e0491b9b30f3bd03bc6de732..203fba257d71798a99e0e3fc0446986dc7eb967c 100644 (file)
@@ -15,21 +15,21 @@ struct pinctrl_dev;
 struct pinctrl_map;
 
 int pinctrl_utils_reserve_map(struct pinctrl_dev *pctldev,
-               struct pinctrl_map **map, unsigned *reserved_maps,
-               unsigned *num_maps, unsigned reserve);
+               struct pinctrl_map **map, unsigned int *reserved_maps,
+               unsigned int *num_maps, unsigned int reserve);
 int pinctrl_utils_add_map_mux(struct pinctrl_dev *pctldev,
-               struct pinctrl_map **map, unsigned *reserved_maps,
-               unsigned *num_maps, const char *group,
+               struct pinctrl_map **map, unsigned int *reserved_maps,
+               unsigned int *num_maps, const char *group,
                const char *function);
 int pinctrl_utils_add_map_configs(struct pinctrl_dev *pctldev,
-               struct pinctrl_map **map, unsigned *reserved_maps,
-               unsigned *num_maps, const char *group,
-               unsigned long *configs, unsigned num_configs,
+               struct pinctrl_map **map, unsigned int *reserved_maps,
+               unsigned int *num_maps, const char *group,
+               unsigned long *configs, unsigned int num_configs,
                enum pinctrl_map_type type);
 int pinctrl_utils_add_config(struct pinctrl_dev *pctldev,
-               unsigned long **configs, unsigned *num_configs,
+               unsigned long **configs, unsigned int *num_configs,
                unsigned long config);
 void pinctrl_utils_free_map(struct pinctrl_dev *pctldev,
-               struct pinctrl_map *map, unsigned num_maps);
+               struct pinctrl_map *map, unsigned int num_maps);
 
 #endif /* __PINCTRL_UTILS_H__ */
index 23d2da0b99b928eaa93b9db9433c452ec754f6aa..abbb044d6acec66b299fea281c38cfb9f3480818 100644 (file)
@@ -35,8 +35,8 @@
 int pinmux_check_ops(struct pinctrl_dev *pctldev)
 {
        const struct pinmux_ops *ops = pctldev->desc->pmxops;
-       unsigned nfuncs;
-       unsigned selector = 0;
+       unsigned int nfuncs;
+       unsigned int selector = 0;
 
        /* Check that we implement required operations */
        if (!ops ||
@@ -84,7 +84,7 @@ int pinmux_validate_map(const struct pinctrl_map *map, int i)
  * Controllers not defined as strict will always return true,
  * menaning that the gpio can be used.
  */
-bool pinmux_can_be_used_for_gpio(struct pinctrl_dev *pctldev, unsigned pin)
+bool pinmux_can_be_used_for_gpio(struct pinctrl_dev *pctldev, unsigned int pin)
 {
        struct pin_desc *desc = pin_desc_get(pctldev, pin);
        const struct pinmux_ops *ops = pctldev->desc->pmxops;
@@ -262,7 +262,7 @@ static const char *pin_free(struct pinctrl_dev *pctldev, int pin,
  */
 int pinmux_request_gpio(struct pinctrl_dev *pctldev,
                        struct pinctrl_gpio_range *range,
-                       unsigned pin, unsigned gpio)
+                       unsigned int pin, unsigned int gpio)
 {
        const char *owner;
        int ret;
@@ -285,7 +285,7 @@ int pinmux_request_gpio(struct pinctrl_dev *pctldev,
  * @pin: the affected currently GPIO-muxed in pin
  * @range: applicable GPIO range
  */
-void pinmux_free_gpio(struct pinctrl_dev *pctldev, unsigned pin,
+void pinmux_free_gpio(struct pinctrl_dev *pctldev, unsigned int pin,
                      struct pinctrl_gpio_range *range)
 {
        const char *owner;
@@ -303,7 +303,7 @@ void pinmux_free_gpio(struct pinctrl_dev *pctldev, unsigned pin,
  */
 int pinmux_gpio_direction(struct pinctrl_dev *pctldev,
                          struct pinctrl_gpio_range *range,
-                         unsigned pin, bool input)
+                         unsigned int pin, bool input)
 {
        const struct pinmux_ops *ops;
        int ret;
@@ -322,8 +322,8 @@ static int pinmux_func_name_to_selector(struct pinctrl_dev *pctldev,
                                        const char *function)
 {
        const struct pinmux_ops *ops = pctldev->desc->pmxops;
-       unsigned nfuncs = ops->get_functions_count(pctldev);
-       unsigned selector = 0;
+       unsigned int nfuncs = ops->get_functions_count(pctldev);
+       unsigned int selector = 0;
 
        /* See if this pctldev has this function */
        while (selector < nfuncs) {
@@ -344,7 +344,7 @@ int pinmux_map_to_setting(const struct pinctrl_map *map,
        struct pinctrl_dev *pctldev = setting->pctldev;
        const struct pinmux_ops *pmxops = pctldev->desc->pmxops;
        char const * const *groups;
-       unsigned num_groups;
+       unsigned int num_groups;
        int ret;
        const char *group;
 
@@ -409,8 +409,8 @@ int pinmux_enable_setting(const struct pinctrl_setting *setting)
        const struct pinctrl_ops *pctlops = pctldev->desc->pctlops;
        const struct pinmux_ops *ops = pctldev->desc->pmxops;
        int ret = 0;
-       const unsigned *pins = NULL;
-       unsigned num_pins = 0;
+       const unsigned int *pins = NULL;
+       unsigned int num_pins = 0;
        int i;
        struct pin_desc *desc;
 
@@ -489,8 +489,8 @@ void pinmux_disable_setting(const struct pinctrl_setting *setting)
        struct pinctrl_dev *pctldev = setting->pctldev;
        const struct pinctrl_ops *pctlops = pctldev->desc->pctlops;
        int ret = 0;
-       const unsigned *pins = NULL;
-       unsigned num_pins = 0;
+       const unsigned int *pins = NULL;
+       unsigned int num_pins = 0;
        int i;
        struct pin_desc *desc;
 
@@ -541,8 +541,8 @@ static int pinmux_functions_show(struct seq_file *s, void *what)
 {
        struct pinctrl_dev *pctldev = s->private;
        const struct pinmux_ops *pmxops = pctldev->desc->pmxops;
-       unsigned nfuncs;
-       unsigned func_selector = 0;
+       unsigned int nfuncs;
+       unsigned int func_selector = 0;
 
        if (!pmxops)
                return 0;
@@ -553,7 +553,7 @@ static int pinmux_functions_show(struct seq_file *s, void *what)
                const char *func = pmxops->get_function_name(pctldev,
                                                          func_selector);
                const char * const *groups;
-               unsigned num_groups;
+               unsigned int num_groups;
                int ret;
                int i;
 
@@ -584,7 +584,7 @@ static int pinmux_pins_show(struct seq_file *s, void *what)
        struct pinctrl_dev *pctldev = s->private;
        const struct pinctrl_ops *pctlops = pctldev->desc->pctlops;
        const struct pinmux_ops *pmxops = pctldev->desc->pmxops;
-       unsigned i, pin;
+       unsigned int i, pin;
 
        if (!pmxops)
                return 0;
@@ -818,7 +818,7 @@ EXPORT_SYMBOL_GPL(pinmux_generic_get_function_name);
 int pinmux_generic_get_function_groups(struct pinctrl_dev *pctldev,
                                       unsigned int selector,
                                       const char * const **groups,
-                                      unsigned * const num_groups)
+                                      unsigned int * const num_groups)
 {
        struct function_desc *function;
 
index ea6f99c24aa5e1eb441afb5d7200ff3d99928cbb..7c8aa25ccc80680ddb15c7d32ce721a5db5f43a1 100644 (file)
@@ -26,16 +26,16 @@ int pinmux_check_ops(struct pinctrl_dev *pctldev);
 
 int pinmux_validate_map(const struct pinctrl_map *map, int i);
 
-bool pinmux_can_be_used_for_gpio(struct pinctrl_dev *pctldev, unsigned pin);
+bool pinmux_can_be_used_for_gpio(struct pinctrl_dev *pctldev, unsigned int pin);
 
 int pinmux_request_gpio(struct pinctrl_dev *pctldev,
                        struct pinctrl_gpio_range *range,
-                       unsigned pin, unsigned gpio);
-void pinmux_free_gpio(struct pinctrl_dev *pctldev, unsigned pin,
+                       unsigned int pin, unsigned int gpio);
+void pinmux_free_gpio(struct pinctrl_dev *pctldev, unsigned int pin,
                      struct pinctrl_gpio_range *range);
 int pinmux_gpio_direction(struct pinctrl_dev *pctldev,
                          struct pinctrl_gpio_range *range,
-                         unsigned pin, bool input);
+                         unsigned int pin, bool input);
 
 int pinmux_map_to_setting(const struct pinctrl_map *map,
                          struct pinctrl_setting *setting);
@@ -56,27 +56,27 @@ static inline int pinmux_validate_map(const struct pinctrl_map *map, int i)
 }
 
 static inline bool pinmux_can_be_used_for_gpio(struct pinctrl_dev *pctldev,
-                                              unsigned pin)
+                                              unsigned int pin)
 {
        return true;
 }
 
 static inline int pinmux_request_gpio(struct pinctrl_dev *pctldev,
                        struct pinctrl_gpio_range *range,
-                       unsigned pin, unsigned gpio)
+                       unsigned int pin, unsigned int gpio)
 {
        return 0;
 }
 
 static inline void pinmux_free_gpio(struct pinctrl_dev *pctldev,
-                                   unsigned pin,
+                                   unsigned int pin,
                                    struct pinctrl_gpio_range *range)
 {
 }
 
 static inline int pinmux_gpio_direction(struct pinctrl_dev *pctldev,
                                        struct pinctrl_gpio_range *range,
-                                       unsigned pin, bool input)
+                                       unsigned int pin, bool input)
 {
        return 0;
 }
@@ -154,7 +154,7 @@ pinmux_generic_get_function_name(struct pinctrl_dev *pctldev,
 int pinmux_generic_get_function_groups(struct pinctrl_dev *pctldev,
                                       unsigned int selector,
                                       const char * const **groups,
-                                      unsigned * const num_groups);
+                                      unsigned int * const num_groups);
 
 struct function_desc *pinmux_generic_get_function(struct pinctrl_dev *pctldev,
                                                  unsigned int selector);
@@ -162,7 +162,7 @@ struct function_desc *pinmux_generic_get_function(struct pinctrl_dev *pctldev,
 int pinmux_generic_add_function(struct pinctrl_dev *pctldev,
                                const char *name,
                                const char * const *groups,
-                               unsigned const num_groups,
+                               unsigned int const num_groups,
                                void *data);
 
 int pinmux_generic_remove_function(struct pinctrl_dev *pctldev,
index f84c0d3b79517b0d0fbd6be265d3065e5868c654..e0f2829c15d6af163e4c7db0d6700212865653bc 100644 (file)
@@ -124,4 +124,14 @@ config PINCTRL_SM8550_LPASS_LPI
          (Low Power Island) found on the Qualcomm Technologies Inc SM8550
          platform.
 
+config PINCTRL_SM8650_LPASS_LPI
+       tristate "Qualcomm Technologies Inc SM8550 LPASS LPI pin controller driver"
+       depends on ARM64 || COMPILE_TEST
+       depends on PINCTRL_LPASS_LPI
+       help
+         This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+         Qualcomm Technologies Inc LPASS (Low Power Audio SubSystem) LPI
+         (Low Power Island) found on the Qualcomm Technologies Inc SM8650
+         platform.
+
 endif
index 01dd7b1343541eefcb67e04c1abe6ff85ef4806b..8fe459d082ede63039e32133d11762516469d285 100644 (file)
@@ -286,6 +286,14 @@ config PINCTRL_SDX75
          Qualcomm Technologies Inc TLMM block found on the Qualcomm
          Technologies Inc SDX75 platform.
 
+config PINCTRL_SM4450
+       tristate "Qualcomm Technologies Inc SM4450 pin controller driver"
+       depends on ARM64 || COMPILE_TEST
+       help
+        This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+        Qualcomm Technologies Inc TLMM block found on the Qualcomm
+        Technologies Inc SM4450 platform.
+
 config PINCTRL_SM6115
        tristate "Qualcomm Technologies Inc SM6115,SM4250 pin controller driver"
        depends on ARM64 || COMPILE_TEST
@@ -366,4 +374,22 @@ config PINCTRL_SM8550
          Qualcomm Technologies Inc TLMM block found on the Qualcomm
          Technologies Inc SM8550 platform.
 
+config PINCTRL_SM8650
+       tristate "Qualcomm Technologies Inc SM8650 pin controller driver"
+       depends on ARM64 || COMPILE_TEST
+       help
+         This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+         Qualcomm Technologies Inc TLMM block found on the Qualcomm
+         Technologies Inc SM8650 platform.
+
+config PINCTRL_X1E80100
+       tristate "Qualcomm Technologies Inc X1E80100 pin controller driver"
+       depends on ARM64 || COMPILE_TEST
+       help
+         This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+         Qualcomm Technologies Inc Top Level Mode Multiplexer block (TLMM)
+         block found on the Qualcomm Technologies Inc X1E80100 platform.
+         Say Y here to compile statically, or M here to compile it as a module.
+         If unsure, say N.
+
 endif
index 5910e08c84cecafbce1f918f35ba3cecdf91ea2f..e2e76071d26844183cb9f2ef67b6001eac3078e5 100644 (file)
@@ -43,6 +43,7 @@ obj-$(CONFIG_PINCTRL_SDM845) += pinctrl-sdm845.o
 obj-$(CONFIG_PINCTRL_SDX55) += pinctrl-sdx55.o
 obj-$(CONFIG_PINCTRL_SDX65) += pinctrl-sdx65.o
 obj-$(CONFIG_PINCTRL_SDX75) += pinctrl-sdx75.o
+obj-$(CONFIG_PINCTRL_SM4450) += pinctrl-sm4450.o
 obj-$(CONFIG_PINCTRL_SM6115) += pinctrl-sm6115.o
 obj-$(CONFIG_PINCTRL_SM6115_LPASS_LPI) += pinctrl-sm6115-lpass-lpi.o
 obj-$(CONFIG_PINCTRL_SM6125) += pinctrl-sm6125.o
@@ -58,5 +59,8 @@ obj-$(CONFIG_PINCTRL_SM8450) += pinctrl-sm8450.o
 obj-$(CONFIG_PINCTRL_SM8450_LPASS_LPI) += pinctrl-sm8450-lpass-lpi.o
 obj-$(CONFIG_PINCTRL_SM8550) += pinctrl-sm8550.o
 obj-$(CONFIG_PINCTRL_SM8550_LPASS_LPI) += pinctrl-sm8550-lpass-lpi.o
+obj-$(CONFIG_PINCTRL_SM8650) += pinctrl-sm8650.o
+obj-$(CONFIG_PINCTRL_SM8650_LPASS_LPI) += pinctrl-sm8650-lpass-lpi.o
 obj-$(CONFIG_PINCTRL_SC8280XP_LPASS_LPI) += pinctrl-sc8280xp-lpass-lpi.o
 obj-$(CONFIG_PINCTRL_LPASS_LPI) += pinctrl-lpass-lpi.o
+obj-$(CONFIG_PINCTRL_X1E80100) += pinctrl-x1e80100.o
index 9651aed048cf4bb64670cdc93120356e009c7f76..0d98008e33eea1cc053c3adb4d7f0dbc15e1fd98 100644 (file)
@@ -186,6 +186,41 @@ static int lpi_config_get(struct pinctrl_dev *pctldev,
        return 0;
 }
 
+static int lpi_config_set_slew_rate(struct lpi_pinctrl *pctrl,
+                                   const struct lpi_pingroup *g,
+                                   unsigned int group, unsigned int slew)
+{
+       unsigned long sval;
+       void __iomem *reg;
+       int slew_offset;
+
+       if (slew > LPI_SLEW_RATE_MAX) {
+               dev_err(pctrl->dev, "invalid slew rate %u for pin: %d\n",
+                       slew, group);
+               return -EINVAL;
+       }
+
+       slew_offset = g->slew_offset;
+       if (slew_offset == LPI_NO_SLEW)
+               return 0;
+
+       if (pctrl->data->flags & LPI_FLAG_SLEW_RATE_SAME_REG)
+               reg = pctrl->tlmm_base + LPI_TLMM_REG_OFFSET * group + LPI_GPIO_CFG_REG;
+       else
+               reg = pctrl->slew_base + LPI_SLEW_RATE_CTL_REG;
+
+       mutex_lock(&pctrl->lock);
+
+       sval = ioread32(reg);
+       sval &= ~(LPI_SLEW_RATE_MASK << slew_offset);
+       sval |= slew << slew_offset;
+       iowrite32(sval, reg);
+
+       mutex_unlock(&pctrl->lock);
+
+       return 0;
+}
+
 static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group,
                          unsigned long *configs, unsigned int nconfs)
 {
@@ -193,8 +228,7 @@ static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group,
        unsigned int param, arg, pullup = LPI_GPIO_BIAS_DISABLE, strength = 2;
        bool value, output_enabled = false;
        const struct lpi_pingroup *g;
-       unsigned long sval;
-       int i, slew_offset;
+       int i, ret;
        u32 val;
 
        g = &pctrl->data->groups[group];
@@ -226,24 +260,9 @@ static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group,
                        strength = arg;
                        break;
                case PIN_CONFIG_SLEW_RATE:
-                       if (arg > LPI_SLEW_RATE_MAX) {
-                               dev_err(pctldev->dev, "invalid slew rate %u for pin: %d\n",
-                                       arg, group);
-                               return -EINVAL;
-                       }
-
-                       slew_offset = g->slew_offset;
-                       if (slew_offset == LPI_NO_SLEW)
-                               break;
-
-                       mutex_lock(&pctrl->lock);
-
-                       sval = ioread32(pctrl->slew_base + LPI_SLEW_RATE_CTL_REG);
-                       sval &= ~(LPI_SLEW_RATE_MASK << slew_offset);
-                       sval |= arg << slew_offset;
-                       iowrite32(sval, pctrl->slew_base + LPI_SLEW_RATE_CTL_REG);
-
-                       mutex_unlock(&pctrl->lock);
+                       ret = lpi_config_set_slew_rate(pctrl, g, group, arg);
+                       if (ret)
+                               return ret;
                        break;
                default:
                        return -EINVAL;
@@ -319,7 +338,6 @@ static void lpi_gpio_set(struct gpio_chip *chip, unsigned int pin, int value)
 }
 
 #ifdef CONFIG_DEBUG_FS
-#include <linux/seq_file.h>
 
 static unsigned int lpi_regval_to_drive(u32 val)
 {
@@ -439,10 +457,12 @@ int lpi_pinctrl_probe(struct platform_device *pdev)
                return dev_err_probe(dev, PTR_ERR(pctrl->tlmm_base),
                                     "TLMM resource not provided\n");
 
-       pctrl->slew_base = devm_platform_ioremap_resource(pdev, 1);
-       if (IS_ERR(pctrl->slew_base))
-               return dev_err_probe(dev, PTR_ERR(pctrl->slew_base),
-                                    "Slew resource not provided\n");
+       if (!(data->flags & LPI_FLAG_SLEW_RATE_SAME_REG)) {
+               pctrl->slew_base = devm_platform_ioremap_resource(pdev, 1);
+               if (IS_ERR(pctrl->slew_base))
+                       return dev_err_probe(dev, PTR_ERR(pctrl->slew_base),
+                                            "Slew resource not provided\n");
+       }
 
        ret = devm_clk_bulk_get_optional(dev, MAX_LPI_NUM_CLKS, pctrl->clks);
        if (ret)
index 387d83ee95b5df7d9e3337dd91de1f211d721a2d..a9b2f65c1ebe0f8fb5d7814f8ef8b723c617c85b 100644 (file)
@@ -6,8 +6,8 @@
 #ifndef __PINCTRL_LPASS_LPI_H__
 #define __PINCTRL_LPASS_LPI_H__
 
+#include <linux/array_size.h>
 #include <linux/bits.h>
-#include <linux/kernel.h>
 
 #include "../core.h"
 
@@ -45,11 +45,8 @@ struct pinctrl_pin_desc;
 
 #define LPI_PINGROUP(id, soff, f1, f2, f3, f4)         \
        {                                               \
-               .group.name = "gpio" #id,                       \
-               .group.pins = gpio##id##_pins,          \
                .pin = id,                              \
                .slew_offset = soff,                    \
-               .group.num_pins = ARRAY_SIZE(gpio##id##_pins),  \
                .funcs = (int[]){                       \
                        LPI_MUX_gpio,                   \
                        LPI_MUX_##f1,                   \
@@ -60,8 +57,13 @@ struct pinctrl_pin_desc;
                .nfuncs = 5,                            \
        }
 
+/*
+ * Slew rate control is done in the same register as rest of the
+ * pin configuration.
+ */
+#define LPI_FLAG_SLEW_RATE_SAME_REG                    BIT(0)
+
 struct lpi_pingroup {
-       struct group_desc group;
        unsigned int pin;
        /* Bit offset in slew register for SoundWire pins only */
        int slew_offset;
@@ -82,6 +84,7 @@ struct lpi_pinctrl_variant_data {
        int ngroups;
        const struct lpi_function *functions;
        int nfunctions;
+       unsigned int flags;
 };
 
 int lpi_pinctrl_probe(struct platform_device *pdev);
index 395040346d0f4df8cf8def0810828e2e88313daf..aeaf0d1958f56a614dfbbf6658f7676129657e8c 100644 (file)
@@ -358,6 +358,10 @@ static int msm_config_group_get(struct pinctrl_dev *pctldev,
        int ret;
        u32 val;
 
+       /* Pin information can only be requested from valid pin groups */
+       if (!gpiochip_line_is_valid(&pctrl->chip, group))
+               return -EINVAL;
+
        g = &pctrl->soc->groups[group];
 
        ret = msm_config_reg(pctrl, g, param, &mask, &bit);
@@ -1196,6 +1200,8 @@ static int msm_gpio_irq_reqres(struct irq_data *d)
 {
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
        struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
+       const struct msm_pingroup *g = &pctrl->soc->groups[d->hwirq];
+       unsigned long flags;
        int ret;
 
        if (!try_module_get(gc->owner))
@@ -1221,6 +1227,28 @@ static int msm_gpio_irq_reqres(struct irq_data *d)
         */
        irq_set_status_flags(d->irq, IRQ_DISABLE_UNLAZY);
 
+       /*
+        * If the wakeup_enable bit is present and marked as available for the
+        * requested GPIO, it should be enabled when the GPIO is marked as
+        * wake irq in order to allow the interrupt event to be transfered to
+        * the PDC HW.
+        * While the name implies only the wakeup event, it's also required for
+        * the interrupt event.
+        */
+       if (test_bit(d->hwirq, pctrl->skip_wake_irqs) && g->intr_wakeup_present_bit) {
+               u32 intr_cfg;
+
+               raw_spin_lock_irqsave(&pctrl->lock, flags);
+
+               intr_cfg = msm_readl_intr_cfg(pctrl, g);
+               if (intr_cfg & BIT(g->intr_wakeup_present_bit)) {
+                       intr_cfg |= BIT(g->intr_wakeup_enable_bit);
+                       msm_writel_intr_cfg(intr_cfg, pctrl, g);
+               }
+
+               raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+       }
+
        return 0;
 out:
        module_put(gc->owner);
@@ -1230,6 +1258,24 @@ out:
 static void msm_gpio_irq_relres(struct irq_data *d)
 {
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+       struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
+       const struct msm_pingroup *g = &pctrl->soc->groups[d->hwirq];
+       unsigned long flags;
+
+       /* Disable the wakeup_enable bit if it has been set in msm_gpio_irq_reqres() */
+       if (test_bit(d->hwirq, pctrl->skip_wake_irqs) && g->intr_wakeup_present_bit) {
+               u32 intr_cfg;
+
+               raw_spin_lock_irqsave(&pctrl->lock, flags);
+
+               intr_cfg = msm_readl_intr_cfg(pctrl, g);
+               if (intr_cfg & BIT(g->intr_wakeup_present_bit)) {
+                       intr_cfg &= ~BIT(g->intr_wakeup_enable_bit);
+                       msm_writel_intr_cfg(intr_cfg, pctrl, g);
+               }
+
+               raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+       }
 
        gpiochip_unlock_as_irq(gc, d->hwirq);
        module_put(gc->owner);
index 4968d08a384da90917dd3c1797311c5742d36ccd..63852ed702957b8111b364267af17737b7a582a1 100644 (file)
@@ -58,6 +58,9 @@ struct pinctrl_pin_desc;
  * @intr_enable_bit:      Offset in @intr_cfg_reg for enabling the interrupt for this group.
  * @intr_status_bit:      Offset in @intr_status_reg for reading and acking the interrupt
  *                        status.
+ * @intr_wakeup_present_bit: Offset in @intr_target_reg specifying the GPIO can generate
+ *                       wakeup events.
+ * @intr_wakeup_enable_bit: Offset in @intr_target_reg to enable wakeup events for the GPIO.
  * @intr_target_bit:      Offset in @intr_target_reg for configuring the interrupt routing.
  * @intr_target_width:    Number of bits used for specifying interrupt routing target.
  * @intr_target_kpss_val: Value in @intr_target_bit for specifying that the interrupt from
@@ -100,6 +103,8 @@ struct msm_pingroup {
        unsigned intr_status_bit:5;
        unsigned intr_ack_high:1;
 
+       unsigned intr_wakeup_present_bit:5;
+       unsigned intr_wakeup_enable_bit:5;
        unsigned intr_target_bit:5;
        unsigned intr_target_width:5;
        unsigned intr_target_kpss_val:5;
index 99156217c6a5a0e84e76104aacfb1281b7845227..6bb39812e1d8b31a002e84a6df44a8a28d62d11c 100644 (file)
@@ -36,22 +36,6 @@ enum lpass_lpi_functions {
        LPI_MUX__,
 };
 
-static int gpio0_pins[] = { 0 };
-static int gpio1_pins[] = { 1 };
-static int gpio2_pins[] = { 2 };
-static int gpio3_pins[] = { 3 };
-static int gpio4_pins[] = { 4 };
-static int gpio5_pins[] = { 5 };
-static int gpio6_pins[] = { 6 };
-static int gpio7_pins[] = { 7 };
-static int gpio8_pins[] = { 8 };
-static int gpio9_pins[] = { 9 };
-static int gpio10_pins[] = { 10 };
-static int gpio11_pins[] = { 11 };
-static int gpio12_pins[] = { 12 };
-static int gpio13_pins[] = { 13 };
-static int gpio14_pins[] = { 14 };
-
 static const struct pinctrl_pin_desc sc7280_lpi_pins[] = {
        PINCTRL_PIN(0, "gpio0"),
        PINCTRL_PIN(1, "gpio1"),
index b33483056f42337ed9e661766bd641c94b70836b..c0369baf33989629405c92df9e14f9524715475e 100644 (file)
@@ -45,26 +45,6 @@ enum lpass_lpi_functions {
        LPI_MUX__,
 };
 
-static int gpio0_pins[] = { 0 };
-static int gpio1_pins[] = { 1 };
-static int gpio2_pins[] = { 2 };
-static int gpio3_pins[] = { 3 };
-static int gpio4_pins[] = { 4 };
-static int gpio5_pins[] = { 5 };
-static int gpio6_pins[] = { 6 };
-static int gpio7_pins[] = { 7 };
-static int gpio8_pins[] = { 8 };
-static int gpio9_pins[] = { 9 };
-static int gpio10_pins[] = { 10 };
-static int gpio11_pins[] = { 11 };
-static int gpio12_pins[] = { 12 };
-static int gpio13_pins[] = { 13 };
-static int gpio14_pins[] = { 14 };
-static int gpio15_pins[] = { 15 };
-static int gpio16_pins[] = { 16 };
-static int gpio17_pins[] = { 17 };
-static int gpio18_pins[] = { 18 };
-
 static const struct pinctrl_pin_desc sc8280xp_lpi_pins[] = {
        PINCTRL_PIN(0, "gpio0"),
        PINCTRL_PIN(1, "gpio1"),
diff --git a/drivers/pinctrl/qcom/pinctrl-sm4450.c b/drivers/pinctrl/qcom/pinctrl-sm4450.c
new file mode 100644 (file)
index 0000000..27317b8
--- /dev/null
@@ -0,0 +1,1014 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-msm.h"
+
+#define REG_SIZE 0x1000
+
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9)       \
+       {                                               \
+               .grp = PINCTRL_PINGROUP("gpio" #id,     \
+                       gpio##id##_pins,                \
+                       ARRAY_SIZE(gpio##id##_pins)),   \
+               .funcs = (int[]){                       \
+                       msm_mux_gpio, /* gpio mode */   \
+                       msm_mux_##f1,                   \
+                       msm_mux_##f2,                   \
+                       msm_mux_##f3,                   \
+                       msm_mux_##f4,                   \
+                       msm_mux_##f5,                   \
+                       msm_mux_##f6,                   \
+                       msm_mux_##f7,                   \
+                       msm_mux_##f8,                   \
+                       msm_mux_##f9                    \
+               },                                      \
+               .nfuncs = 10,                           \
+               .ctl_reg = REG_SIZE * id,                       \
+               .io_reg = 0x4 + REG_SIZE * id,          \
+               .intr_cfg_reg = 0x8 + REG_SIZE * id,            \
+               .intr_status_reg = 0xc + REG_SIZE * id, \
+               .intr_target_reg = 0x8 + REG_SIZE * id, \
+               .mux_bit = 2,                   \
+               .pull_bit = 0,                  \
+               .drv_bit = 6,                   \
+               .egpio_enable = 12,             \
+               .egpio_present = 11,            \
+               .oe_bit = 9,                    \
+               .in_bit = 0,                    \
+               .out_bit = 1,                   \
+               .intr_enable_bit = 0,           \
+               .intr_status_bit = 0,           \
+               .intr_target_bit = 5,           \
+               .intr_target_kpss_val = 3,      \
+               .intr_raw_status_bit = 4,       \
+               .intr_polarity_bit = 1,         \
+               .intr_detection_bit = 2,        \
+               .intr_detection_width = 2,      \
+       }
+
+#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv)     \
+       {                                               \
+               .grp = PINCTRL_PINGROUP(#pg_name,       \
+                       pg_name##_pins,                 \
+                       ARRAY_SIZE(pg_name##_pins)),    \
+               .ctl_reg = ctl,                         \
+               .io_reg = 0,                            \
+               .intr_cfg_reg = 0,                      \
+               .intr_status_reg = 0,                   \
+               .intr_target_reg = 0,                   \
+               .mux_bit = -1,                          \
+               .pull_bit = pull,                       \
+               .drv_bit = drv,                         \
+               .oe_bit = -1,                           \
+               .in_bit = -1,                           \
+               .out_bit = -1,                          \
+               .intr_enable_bit = -1,                  \
+               .intr_status_bit = -1,                  \
+               .intr_target_bit = -1,                  \
+               .intr_raw_status_bit = -1,              \
+               .intr_polarity_bit = -1,                \
+               .intr_detection_bit = -1,               \
+               .intr_detection_width = -1,             \
+       }
+
+#define UFS_RESET(pg_name, offset)                             \
+       {                                               \
+               .grp = PINCTRL_PINGROUP(#pg_name,       \
+                       pg_name##_pins,                 \
+                       ARRAY_SIZE(pg_name##_pins)),    \
+               .ctl_reg = offset,                      \
+               .io_reg = offset + 0x4,                 \
+               .intr_cfg_reg = 0,                      \
+               .intr_status_reg = 0,                   \
+               .intr_target_reg = 0,                   \
+               .mux_bit = -1,                          \
+               .pull_bit = 3,                          \
+               .drv_bit = 0,                           \
+               .oe_bit = -1,                           \
+               .in_bit = -1,                           \
+               .out_bit = 0,                           \
+               .intr_enable_bit = -1,                  \
+               .intr_status_bit = -1,                  \
+               .intr_target_bit = -1,                  \
+               .intr_raw_status_bit = -1,              \
+               .intr_polarity_bit = -1,                \
+               .intr_detection_bit = -1,               \
+               .intr_detection_width = -1,             \
+       }
+
+#define QUP_I3C(qup_mode, qup_offset)                  \
+       {                                               \
+               .mode = qup_mode,                       \
+               .offset = qup_offset,                   \
+       }
+
+
+static const struct pinctrl_pin_desc sm4450_pins[] = {
+       PINCTRL_PIN(0, "GPIO_0"),
+       PINCTRL_PIN(1, "GPIO_1"),
+       PINCTRL_PIN(2, "GPIO_2"),
+       PINCTRL_PIN(3, "GPIO_3"),
+       PINCTRL_PIN(4, "GPIO_4"),
+       PINCTRL_PIN(5, "GPIO_5"),
+       PINCTRL_PIN(6, "GPIO_6"),
+       PINCTRL_PIN(7, "GPIO_7"),
+       PINCTRL_PIN(8, "GPIO_8"),
+       PINCTRL_PIN(9, "GPIO_9"),
+       PINCTRL_PIN(10, "GPIO_10"),
+       PINCTRL_PIN(11, "GPIO_11"),
+       PINCTRL_PIN(12, "GPIO_12"),
+       PINCTRL_PIN(13, "GPIO_13"),
+       PINCTRL_PIN(14, "GPIO_14"),
+       PINCTRL_PIN(15, "GPIO_15"),
+       PINCTRL_PIN(16, "GPIO_16"),
+       PINCTRL_PIN(17, "GPIO_17"),
+       PINCTRL_PIN(18, "GPIO_18"),
+       PINCTRL_PIN(19, "GPIO_19"),
+       PINCTRL_PIN(20, "GPIO_20"),
+       PINCTRL_PIN(21, "GPIO_21"),
+       PINCTRL_PIN(22, "GPIO_22"),
+       PINCTRL_PIN(23, "GPIO_23"),
+       PINCTRL_PIN(24, "GPIO_24"),
+       PINCTRL_PIN(25, "GPIO_25"),
+       PINCTRL_PIN(26, "GPIO_26"),
+       PINCTRL_PIN(27, "GPIO_27"),
+       PINCTRL_PIN(28, "GPIO_28"),
+       PINCTRL_PIN(29, "GPIO_29"),
+       PINCTRL_PIN(30, "GPIO_30"),
+       PINCTRL_PIN(31, "GPIO_31"),
+       PINCTRL_PIN(32, "GPIO_32"),
+       PINCTRL_PIN(33, "GPIO_33"),
+       PINCTRL_PIN(34, "GPIO_34"),
+       PINCTRL_PIN(35, "GPIO_35"),
+       PINCTRL_PIN(36, "GPIO_36"),
+       PINCTRL_PIN(37, "GPIO_37"),
+       PINCTRL_PIN(38, "GPIO_38"),
+       PINCTRL_PIN(39, "GPIO_39"),
+       PINCTRL_PIN(40, "GPIO_40"),
+       PINCTRL_PIN(41, "GPIO_41"),
+       PINCTRL_PIN(42, "GPIO_42"),
+       PINCTRL_PIN(43, "GPIO_43"),
+       PINCTRL_PIN(44, "GPIO_44"),
+       PINCTRL_PIN(45, "GPIO_45"),
+       PINCTRL_PIN(46, "GPIO_46"),
+       PINCTRL_PIN(47, "GPIO_47"),
+       PINCTRL_PIN(48, "GPIO_48"),
+       PINCTRL_PIN(49, "GPIO_49"),
+       PINCTRL_PIN(50, "GPIO_50"),
+       PINCTRL_PIN(51, "GPIO_51"),
+       PINCTRL_PIN(52, "GPIO_52"),
+       PINCTRL_PIN(53, "GPIO_53"),
+       PINCTRL_PIN(54, "GPIO_54"),
+       PINCTRL_PIN(55, "GPIO_55"),
+       PINCTRL_PIN(56, "GPIO_56"),
+       PINCTRL_PIN(57, "GPIO_57"),
+       PINCTRL_PIN(58, "GPIO_58"),
+       PINCTRL_PIN(59, "GPIO_59"),
+       PINCTRL_PIN(60, "GPIO_60"),
+       PINCTRL_PIN(61, "GPIO_61"),
+       PINCTRL_PIN(62, "GPIO_62"),
+       PINCTRL_PIN(63, "GPIO_63"),
+       PINCTRL_PIN(64, "GPIO_64"),
+       PINCTRL_PIN(65, "GPIO_65"),
+       PINCTRL_PIN(66, "GPIO_66"),
+       PINCTRL_PIN(67, "GPIO_67"),
+       PINCTRL_PIN(68, "GPIO_68"),
+       PINCTRL_PIN(69, "GPIO_69"),
+       PINCTRL_PIN(70, "GPIO_70"),
+       PINCTRL_PIN(71, "GPIO_71"),
+       PINCTRL_PIN(72, "GPIO_72"),
+       PINCTRL_PIN(73, "GPIO_73"),
+       PINCTRL_PIN(74, "GPIO_74"),
+       PINCTRL_PIN(75, "GPIO_75"),
+       PINCTRL_PIN(76, "GPIO_76"),
+       PINCTRL_PIN(77, "GPIO_77"),
+       PINCTRL_PIN(78, "GPIO_78"),
+       PINCTRL_PIN(79, "GPIO_79"),
+       PINCTRL_PIN(80, "GPIO_80"),
+       PINCTRL_PIN(81, "GPIO_81"),
+       PINCTRL_PIN(82, "GPIO_82"),
+       PINCTRL_PIN(83, "GPIO_83"),
+       PINCTRL_PIN(84, "GPIO_84"),
+       PINCTRL_PIN(85, "GPIO_85"),
+       PINCTRL_PIN(86, "GPIO_86"),
+       PINCTRL_PIN(87, "GPIO_87"),
+       PINCTRL_PIN(88, "GPIO_88"),
+       PINCTRL_PIN(89, "GPIO_89"),
+       PINCTRL_PIN(90, "GPIO_90"),
+       PINCTRL_PIN(91, "GPIO_91"),
+       PINCTRL_PIN(92, "GPIO_92"),
+       PINCTRL_PIN(93, "GPIO_93"),
+       PINCTRL_PIN(94, "GPIO_94"),
+       PINCTRL_PIN(95, "GPIO_95"),
+       PINCTRL_PIN(96, "GPIO_96"),
+       PINCTRL_PIN(97, "GPIO_97"),
+       PINCTRL_PIN(98, "GPIO_98"),
+       PINCTRL_PIN(99, "GPIO_99"),
+       PINCTRL_PIN(100, "GPIO_100"),
+       PINCTRL_PIN(101, "GPIO_101"),
+       PINCTRL_PIN(102, "GPIO_102"),
+       PINCTRL_PIN(103, "GPIO_103"),
+       PINCTRL_PIN(104, "GPIO_104"),
+       PINCTRL_PIN(105, "GPIO_105"),
+       PINCTRL_PIN(106, "GPIO_106"),
+       PINCTRL_PIN(107, "GPIO_107"),
+       PINCTRL_PIN(108, "GPIO_108"),
+       PINCTRL_PIN(109, "GPIO_109"),
+       PINCTRL_PIN(110, "GPIO_110"),
+       PINCTRL_PIN(111, "GPIO_111"),
+       PINCTRL_PIN(112, "GPIO_112"),
+       PINCTRL_PIN(113, "GPIO_113"),
+       PINCTRL_PIN(114, "GPIO_114"),
+       PINCTRL_PIN(115, "GPIO_115"),
+       PINCTRL_PIN(116, "GPIO_116"),
+       PINCTRL_PIN(117, "GPIO_117"),
+       PINCTRL_PIN(118, "GPIO_118"),
+       PINCTRL_PIN(119, "GPIO_119"),
+       PINCTRL_PIN(120, "GPIO_120"),
+       PINCTRL_PIN(121, "GPIO_121"),
+       PINCTRL_PIN(122, "GPIO_122"),
+       PINCTRL_PIN(123, "GPIO_123"),
+       PINCTRL_PIN(124, "GPIO_124"),
+       PINCTRL_PIN(125, "GPIO_125"),
+       PINCTRL_PIN(126, "GPIO_126"),
+       PINCTRL_PIN(127, "GPIO_127"),
+       PINCTRL_PIN(128, "GPIO_128"),
+       PINCTRL_PIN(129, "GPIO_129"),
+       PINCTRL_PIN(130, "GPIO_130"),
+       PINCTRL_PIN(131, "GPIO_131"),
+       PINCTRL_PIN(132, "GPIO_132"),
+       PINCTRL_PIN(133, "GPIO_133"),
+       PINCTRL_PIN(134, "GPIO_134"),
+       PINCTRL_PIN(135, "GPIO_135"),
+       PINCTRL_PIN(136, "UFS_RESET"),
+       PINCTRL_PIN(137, "SDC1_RCLK"),
+       PINCTRL_PIN(138, "SDC1_CLK"),
+       PINCTRL_PIN(139, "SDC1_CMD"),
+       PINCTRL_PIN(140, "SDC1_DATA"),
+       PINCTRL_PIN(141, "SDC2_CLK"),
+       PINCTRL_PIN(142, "SDC2_CMD"),
+       PINCTRL_PIN(143, "SDC2_DATA"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+       static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+DECLARE_MSM_GPIO_PINS(120);
+DECLARE_MSM_GPIO_PINS(121);
+DECLARE_MSM_GPIO_PINS(122);
+DECLARE_MSM_GPIO_PINS(123);
+DECLARE_MSM_GPIO_PINS(124);
+DECLARE_MSM_GPIO_PINS(125);
+DECLARE_MSM_GPIO_PINS(126);
+DECLARE_MSM_GPIO_PINS(127);
+DECLARE_MSM_GPIO_PINS(128);
+DECLARE_MSM_GPIO_PINS(129);
+DECLARE_MSM_GPIO_PINS(130);
+DECLARE_MSM_GPIO_PINS(131);
+DECLARE_MSM_GPIO_PINS(132);
+DECLARE_MSM_GPIO_PINS(133);
+DECLARE_MSM_GPIO_PINS(134);
+DECLARE_MSM_GPIO_PINS(135);
+
+static const unsigned int ufs_reset_pins[] = { 136 };
+static const unsigned int sdc1_rclk_pins[] = { 137 };
+static const unsigned int sdc1_clk_pins[] = { 138 };
+static const unsigned int sdc1_cmd_pins[] = { 139 };
+static const unsigned int sdc1_data_pins[] = { 140 };
+static const unsigned int sdc2_clk_pins[] = { 141 };
+static const unsigned int sdc2_cmd_pins[] = { 142 };
+static const unsigned int sdc2_data_pins[] = { 143 };
+
+enum sm4450_functions {
+       msm_mux_gpio,
+       msm_mux_atest_char,
+       msm_mux_atest_usb0,
+       msm_mux_audio_ref_clk,
+       msm_mux_cam_mclk,
+       msm_mux_cci_async_in0,
+       msm_mux_cci_i2c,
+       msm_mux_cci,
+       msm_mux_cmu_rng,
+       msm_mux_coex_uart1_rx,
+       msm_mux_coex_uart1_tx,
+       msm_mux_cri_trng,
+       msm_mux_dbg_out_clk,
+       msm_mux_ddr_bist,
+       msm_mux_ddr_pxi0_test,
+       msm_mux_ddr_pxi1_test,
+       msm_mux_gcc_gp1_clk,
+       msm_mux_gcc_gp2_clk,
+       msm_mux_gcc_gp3_clk,
+       msm_mux_host2wlan_sol,
+       msm_mux_ibi_i3c_qup0,
+       msm_mux_ibi_i3c_qup1,
+       msm_mux_jitter_bist_ref,
+       msm_mux_mdp_vsync0_out,
+       msm_mux_mdp_vsync1_out,
+       msm_mux_mdp_vsync2_out,
+       msm_mux_mdp_vsync3_out,
+       msm_mux_mdp_vsync,
+       msm_mux_nav,
+       msm_mux_pcie0_clk_req,
+       msm_mux_phase_flag,
+       msm_mux_pll_bist_sync,
+       msm_mux_pll_clk_aux,
+       msm_mux_prng_rosc,
+       msm_mux_qdss_cti_trig0,
+       msm_mux_qdss_cti_trig1,
+       msm_mux_qdss_gpio,
+       msm_mux_qlink0_enable,
+       msm_mux_qlink0_request,
+       msm_mux_qlink0_wmss_reset,
+       msm_mux_qup0_se0,
+       msm_mux_qup0_se1,
+       msm_mux_qup0_se2,
+       msm_mux_qup0_se3,
+       msm_mux_qup0_se4,
+       msm_mux_qup1_se0,
+       msm_mux_qup1_se1,
+       msm_mux_qup1_se2,
+       msm_mux_qup1_se3,
+       msm_mux_qup1_se4,
+       msm_mux_sd_write_protect,
+       msm_mux_tb_trig_sdc1,
+       msm_mux_tb_trig_sdc2,
+       msm_mux_tgu_ch0_trigout,
+       msm_mux_tgu_ch1_trigout,
+       msm_mux_tgu_ch2_trigout,
+       msm_mux_tgu_ch3_trigout,
+       msm_mux_tmess_prng,
+       msm_mux_tsense_pwm1_out,
+       msm_mux_tsense_pwm2_out,
+       msm_mux_uim0,
+       msm_mux_uim1,
+       msm_mux_usb0_hs_ac,
+       msm_mux_usb0_phy_ps,
+       msm_mux_vfr_0_mira,
+       msm_mux_vfr_0_mirb,
+       msm_mux_vfr_1,
+       msm_mux_vsense_trigger_mirnat,
+       msm_mux_wlan1_adc_dtest0,
+       msm_mux_wlan1_adc_dtest1,
+       msm_mux__,
+};
+
+static const char * const gpio_groups[] = {
+       "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+       "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+       "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+       "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+       "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+       "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+       "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+       "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+       "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+       "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+       "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+       "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+       "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+       "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+       "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+       "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+       "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116",
+       "gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122",
+       "gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128",
+       "gpio129", "gpio130", "gpio131", "gpio132", "gpio133", "gpio134",
+       "gpio135",
+};
+static const char * const atest_char_groups[] = {
+       "gpio95", "gpio97", "gpio98", "gpio99", "gpio100",
+};
+static const char * const atest_usb0_groups[] = {
+       "gpio75", "gpio10", "gpio78", "gpio79", "gpio80",
+};
+static const char * const audio_ref_clk_groups[] = {
+       "gpio71",
+};
+static const char * const cam_mclk_groups[] = {
+       "gpio36", "gpio37", "gpio38", "gpio39",
+};
+static const char * const cci_async_in0_groups[] = {
+       "gpio40",
+};
+static const char * const cci_i2c_groups[] = {
+       "gpio45", "gpio47", "gpio49", "gpio44",
+       "gpio46", "gpio48",
+};
+static const char * const cci_groups[] = {
+       "gpio40", "gpio41", "gpio42", "gpio43",
+};
+static const char * const cmu_rng_groups[] = {
+       "gpio28", "gpio3", "gpio1", "gpio0",
+};
+static const char * const coex_uart1_rx_groups[] = {
+       "gpio54",
+};
+static const char * const coex_uart1_tx_groups[] = {
+       "gpio55",
+};
+static const char * const cri_trng_groups[] = {
+       "gpio42", "gpio40", "gpio41",
+};
+static const char * const dbg_out_clk_groups[] = {
+       "gpio80",
+};
+static const char * const ddr_bist_groups[] = {
+       "gpio32", "gpio29", "gpio30", "gpio31",
+};
+static const char * const ddr_pxi0_test_groups[] = {
+       "gpio90", "gpio127",
+};
+static const char * const ddr_pxi1_test_groups[] = {
+       "gpio118", "gpio122",
+};
+static const char * const gcc_gp1_clk_groups[] = {
+       "gpio37", "gpio48",
+};
+static const char * const gcc_gp2_clk_groups[] = {
+       "gpio30", "gpio49",
+};
+static const char * const gcc_gp3_clk_groups[] = {
+       "gpio3", "gpio50",
+};
+static const char * const host2wlan_sol_groups[] = {
+       "gpio106",
+};
+static const char * const ibi_i3c_qup0_groups[] = {
+       "gpio4", "gpio5",
+};
+static const char * const ibi_i3c_qup1_groups[] = {
+       "gpio0", "gpio1",
+};
+static const char * const jitter_bist_ref_groups[] = {
+       "gpio90",
+};
+static const char * const mdp_vsync0_out_groups[] = {
+       "gpio93",
+};
+static const char * const mdp_vsync1_out_groups[] = {
+       "gpio93",
+};
+static const char * const mdp_vsync2_out_groups[] = {
+       "gpio22",
+};
+static const char * const mdp_vsync3_out_groups[] = {
+       "gpio22",
+};
+static const char * const mdp_vsync_groups[] = {
+       "gpio26", "gpio22", "gpio30", "gpio34", "gpio93", "gpio97",
+};
+static const char * const nav_groups[] = {
+       "gpio81", "gpio83", "gpio84",
+};
+static const char * const pcie0_clk_req_groups[] = {
+       "gpio107",
+};
+static const char * const phase_flag_groups[] = {
+       "gpio7", "gpio8", "gpio9", "gpio11", "gpio13", "gpio14", "gpio15",
+       "gpio17", "gpio18", "gpio19", "gpio21", "gpio24", "gpio25", "gpio31",
+       "gpio32", "gpio33", "gpio35", "gpio61", "gpio72", "gpio82", "gpio91",
+       "gpio95", "gpio97", "gpio98", "gpio99", "gpio100", "gpio105", "gpio115",
+       "gpio116", "gpio117", "gpio133", "gpio135",
+};
+static const char * const pll_bist_sync_groups[] = {
+       "gpio73",
+};
+static const char * const pll_clk_aux_groups[] = {
+       "gpio108",
+};
+static const char * const prng_rosc_groups[] = {
+       "gpio36", "gpio37", "gpio38", "gpio39",
+};
+static const char * const qdss_cti_trig0_groups[] = {
+       "gpio26", "gpio60", "gpio113", "gpio114",
+};
+static const char * const qdss_cti_trig1_groups[] = {
+       "gpio6", "gpio27", "gpio57", "gpio58",
+};
+static const char * const qdss_gpio_groups[] = {
+       "gpio0", "gpio1", "gpio3", "gpio4", "gpio5", "gpio7", "gpio8",
+       "gpio9", "gpio14", "gpio15", "gpio17", "gpio23", "gpio31", "gpio32",
+       "gpio33", "gpio35", "gpio36", "gpio37", "gpio38", "gpio39", "gpio40",
+       "gpio41", "gpio42", "gpio43", "gpio44", "gpio45", "gpio46", "gpio47",
+       "gpio49",  "gpio59", "gpio62", "gpio118", "gpio121", "gpio122", "gpio126",
+       "gpio127",
+};
+static const char * const qlink0_enable_groups[] = {
+       "gpio88",
+};
+static const char * const qlink0_request_groups[] = {
+       "gpio87",
+};
+static const char * const qlink0_wmss_reset_groups[] = {
+       "gpio89",
+};
+static const char * const qup0_se0_groups[] = {
+       "gpio4", "gpio5", "gpio34", "gpio35",
+};
+static const char * const qup0_se1_groups[] = {
+       "gpio10", "gpio11", "gpio12", "gpio13",
+};
+static const char * const qup0_se2_groups[] = {
+       "gpio14", "gpio15", "gpio16", "gpio17",
+};
+static const char * const qup0_se3_groups[] = {
+       "gpio18", "gpio19", "gpio20", "gpio21",
+};
+static const char * const qup0_se4_groups[] = {
+       "gpio6", "gpio7", "gpio8", "gpio9",
+       "gpio26", "gpio27", "gpio34",
+};
+static const char * const qup1_se0_groups[] = {
+       "gpio0", "gpio1", "gpio2", "gpio3",
+};
+static const char * const qup1_se1_groups[] = {
+       "gpio26", "gpio27", "gpio50", "gpio51",
+};
+static const char * const qup1_se2_groups[] = {
+       "gpio22", "gpio23", "gpio31", "gpio32",
+};
+static const char * const qup1_se3_groups[] = {
+       "gpio24", "gpio25", "gpio51", "gpio50",
+};
+static const char * const qup1_se4_groups[] = {
+       "gpio43", "gpio48", "gpio49", "gpio90",
+       "gpio91",
+};
+static const char * const sd_write_protect_groups[] = {
+       "gpio102",
+};
+static const char * const tb_trig_sdc1_groups[] = {
+       "gpio128",
+};
+static const char * const tb_trig_sdc2_groups[] = {
+       "gpio51",
+};
+static const char * const tgu_ch0_trigout_groups[] = {
+       "gpio20",
+};
+static const char * const tgu_ch1_trigout_groups[] = {
+       "gpio21",
+};
+static const char * const tgu_ch2_trigout_groups[] = {
+       "gpio22",
+};
+static const char * const tgu_ch3_trigout_groups[] = {
+       "gpio23",
+};
+static const char * const tmess_prng_groups[] = {
+       "gpio57", "gpio58", "gpio59", "gpio60",
+};
+static const char * const tsense_pwm1_out_groups[] = {
+       "gpio134",
+};
+static const char * const tsense_pwm2_out_groups[] = {
+       "gpio134",
+};
+static const char * const uim0_groups[] = {
+       "gpio64", "gpio63", "gpio66", "gpio65",
+};
+static const char * const uim1_groups[] = {
+       "gpio68", "gpio67", "gpio69", "gpio70",
+};
+static const char * const usb0_hs_ac_groups[] = {
+       "gpio99",
+};
+static const char * const usb0_phy_ps_groups[] = {
+       "gpio94",
+};
+static const char * const vfr_0_mira_groups[] = {
+       "gpio19",
+};
+static const char * const vfr_0_mirb_groups[] = {
+       "gpio100",
+};
+static const char * const vfr_1_groups[] = {
+       "gpio84",
+};
+static const char * const vsense_trigger_mirnat_groups[] = {
+       "gpio75",
+};
+static const char * const wlan1_adc_dtest0_groups[] = {
+       "gpio79",
+};
+static const char * const wlan1_adc_dtest1_groups[] = {
+       "gpio80",
+};
+
+static const struct pinfunction sm4450_functions[] = {
+       MSM_PIN_FUNCTION(gpio),
+       MSM_PIN_FUNCTION(atest_char),
+       MSM_PIN_FUNCTION(atest_usb0),
+       MSM_PIN_FUNCTION(audio_ref_clk),
+       MSM_PIN_FUNCTION(cam_mclk),
+       MSM_PIN_FUNCTION(cci_async_in0),
+       MSM_PIN_FUNCTION(cci_i2c),
+       MSM_PIN_FUNCTION(cci),
+       MSM_PIN_FUNCTION(cmu_rng),
+       MSM_PIN_FUNCTION(coex_uart1_rx),
+       MSM_PIN_FUNCTION(coex_uart1_tx),
+       MSM_PIN_FUNCTION(cri_trng),
+       MSM_PIN_FUNCTION(dbg_out_clk),
+       MSM_PIN_FUNCTION(ddr_bist),
+       MSM_PIN_FUNCTION(ddr_pxi0_test),
+       MSM_PIN_FUNCTION(ddr_pxi1_test),
+       MSM_PIN_FUNCTION(gcc_gp1_clk),
+       MSM_PIN_FUNCTION(gcc_gp2_clk),
+       MSM_PIN_FUNCTION(gcc_gp3_clk),
+       MSM_PIN_FUNCTION(host2wlan_sol),
+       MSM_PIN_FUNCTION(ibi_i3c_qup0),
+       MSM_PIN_FUNCTION(ibi_i3c_qup1),
+       MSM_PIN_FUNCTION(jitter_bist_ref),
+       MSM_PIN_FUNCTION(mdp_vsync0_out),
+       MSM_PIN_FUNCTION(mdp_vsync1_out),
+       MSM_PIN_FUNCTION(mdp_vsync2_out),
+       MSM_PIN_FUNCTION(mdp_vsync3_out),
+       MSM_PIN_FUNCTION(mdp_vsync),
+       MSM_PIN_FUNCTION(nav),
+       MSM_PIN_FUNCTION(pcie0_clk_req),
+       MSM_PIN_FUNCTION(phase_flag),
+       MSM_PIN_FUNCTION(pll_bist_sync),
+       MSM_PIN_FUNCTION(pll_clk_aux),
+       MSM_PIN_FUNCTION(prng_rosc),
+       MSM_PIN_FUNCTION(qdss_cti_trig0),
+       MSM_PIN_FUNCTION(qdss_cti_trig1),
+       MSM_PIN_FUNCTION(qdss_gpio),
+       MSM_PIN_FUNCTION(qlink0_enable),
+       MSM_PIN_FUNCTION(qlink0_request),
+       MSM_PIN_FUNCTION(qlink0_wmss_reset),
+       MSM_PIN_FUNCTION(qup0_se0),
+       MSM_PIN_FUNCTION(qup0_se1),
+       MSM_PIN_FUNCTION(qup0_se2),
+       MSM_PIN_FUNCTION(qup0_se3),
+       MSM_PIN_FUNCTION(qup0_se4),
+       MSM_PIN_FUNCTION(qup1_se0),
+       MSM_PIN_FUNCTION(qup1_se1),
+       MSM_PIN_FUNCTION(qup1_se2),
+       MSM_PIN_FUNCTION(qup1_se3),
+       MSM_PIN_FUNCTION(qup1_se4),
+       MSM_PIN_FUNCTION(sd_write_protect),
+       MSM_PIN_FUNCTION(tb_trig_sdc1),
+       MSM_PIN_FUNCTION(tb_trig_sdc2),
+       MSM_PIN_FUNCTION(tgu_ch0_trigout),
+       MSM_PIN_FUNCTION(tgu_ch1_trigout),
+       MSM_PIN_FUNCTION(tgu_ch2_trigout),
+       MSM_PIN_FUNCTION(tgu_ch3_trigout),
+       MSM_PIN_FUNCTION(tmess_prng),
+       MSM_PIN_FUNCTION(tsense_pwm1_out),
+       MSM_PIN_FUNCTION(tsense_pwm2_out),
+       MSM_PIN_FUNCTION(uim0),
+       MSM_PIN_FUNCTION(uim1),
+       MSM_PIN_FUNCTION(usb0_hs_ac),
+       MSM_PIN_FUNCTION(usb0_phy_ps),
+       MSM_PIN_FUNCTION(vfr_0_mira),
+       MSM_PIN_FUNCTION(vfr_0_mirb),
+       MSM_PIN_FUNCTION(vfr_1),
+       MSM_PIN_FUNCTION(vsense_trigger_mirnat),
+       MSM_PIN_FUNCTION(wlan1_adc_dtest0),
+       MSM_PIN_FUNCTION(wlan1_adc_dtest1),
+};
+
+/*
+ * Every pin is maintained as a single group, and missing or non-existing pin
+ * would be maintained as dummy group to synchronize pin group index with
+ * pin descriptor registered with pinctrl core.
+ * Clients would not be able to request these dummy pin groups.
+ */
+static const struct msm_pingroup sm4450_groups[] = {
+       [0] = PINGROUP(0, qup1_se0, ibi_i3c_qup1, cmu_rng, qdss_gpio, _, _, _, _, _),
+       [1] = PINGROUP(1, qup1_se0, ibi_i3c_qup1, cmu_rng, qdss_gpio, _, _, _, _, _),
+       [2] = PINGROUP(2, qup1_se0, _, _, _, _, _, _, _, _),
+       [3] = PINGROUP(3, qup1_se0, gcc_gp3_clk, cmu_rng, qdss_gpio, _, _, _, _, _),
+       [4] = PINGROUP(4, qup0_se0, ibi_i3c_qup0, qdss_gpio, _, _, _, _, _, _),
+       [5] = PINGROUP(5, qup0_se0, ibi_i3c_qup0, qdss_gpio, _, _, _, _, _, _),
+       [6] = PINGROUP(6, qup0_se4, qdss_cti_trig1, _, _, _, _, _, _, _),
+       [7] = PINGROUP(7, qup0_se4, _, phase_flag, qdss_gpio, _, _, _, _, _),
+       [8] = PINGROUP(8, qup0_se4, _, phase_flag, qdss_gpio, _, _, _, _, _),
+       [9] = PINGROUP(9, qup0_se4, _, phase_flag, qdss_gpio, _, _, _, _, _),
+       [10] = PINGROUP(10, qup0_se1, _, atest_usb0, _, _, _, _, _, _),
+       [11] = PINGROUP(11, qup0_se1, _, phase_flag, _, _, _, _, _, _),
+       [12] = PINGROUP(12, qup0_se1, _, _, _, _, _, _, _, _),
+       [13] = PINGROUP(13, qup0_se1, _, phase_flag, _, _, _, _, _, _),
+       [14] = PINGROUP(14, qup0_se2, _, phase_flag, _, qdss_gpio, _, _, _, _),
+       [15] = PINGROUP(15, qup0_se2, _, phase_flag, _, qdss_gpio, _, _, _, _),
+       [16] = PINGROUP(16, qup0_se2, _, _, _, _, _, _, _, _),
+       [17] = PINGROUP(17, qup0_se2, _, phase_flag, _, qdss_gpio, _, _, _, _),
+       [18] = PINGROUP(18, qup0_se3, _, phase_flag, _, _, _, _, _, _),
+       [19] = PINGROUP(19, qup0_se3, vfr_0_mira, _, phase_flag, _, _, _, _, _),
+       [20] = PINGROUP(20, qup0_se3, tgu_ch0_trigout, _, _, _, _, _, _, _),
+       [21] = PINGROUP(21, qup0_se3, _, phase_flag, tgu_ch1_trigout, _, _, _, _, _),
+       [22] = PINGROUP(22, qup1_se2, mdp_vsync, mdp_vsync2_out, mdp_vsync3_out, tgu_ch2_trigout, _, _, _, _),
+       [23] = PINGROUP(23, qup1_se2, tgu_ch3_trigout, qdss_gpio, _, _, _, _, _, _),
+       [24] = PINGROUP(24, qup1_se3, _, phase_flag, _, _, _, _, _, _),
+       [25] = PINGROUP(25, qup1_se3, _, phase_flag, _, _, _, _, _, _),
+       [26] = PINGROUP(26, qup1_se1, mdp_vsync, qup0_se4, qdss_cti_trig0, _, _, _, _, _),
+       [27] = PINGROUP(27, qup1_se1, qup0_se4, qdss_cti_trig1, _, _, _, _, _, _),
+       [28] = PINGROUP(28, cmu_rng, _, _, _, _, _, _, _, _),
+       [29] = PINGROUP(29, ddr_bist, _, _, _, _, _, _, _, _),
+       [30] = PINGROUP(30, mdp_vsync, gcc_gp2_clk, ddr_bist, _, _, _, _, _, _),
+       [31] = PINGROUP(31, qup1_se2, _, phase_flag, ddr_bist, qdss_gpio, _, _, _, _),
+       [32] = PINGROUP(32, qup1_se2, _, phase_flag, ddr_bist, qdss_gpio, _, _, _, _),
+       [33] = PINGROUP(33, _, phase_flag, qdss_gpio, _, _, _, _, _, _),
+       [34] = PINGROUP(34, qup0_se0, qup0_se4, mdp_vsync, _, _, _, _, _, _),
+       [35] = PINGROUP(35, qup0_se0, _, phase_flag, qdss_gpio, _, _, _, _, _),
+       [36] = PINGROUP(36, cam_mclk, prng_rosc, qdss_gpio, _, _, _, _, _, _),
+       [37] = PINGROUP(37, cam_mclk, gcc_gp1_clk, prng_rosc, qdss_gpio, _, _, _, _, _),
+       [38] = PINGROUP(38, cam_mclk, prng_rosc, qdss_gpio, _, _, _, _, _, _),
+       [39] = PINGROUP(39, cam_mclk, prng_rosc, qdss_gpio, _, _, _, _, _, _),
+       [40] = PINGROUP(40, cci, cci_async_in0, cri_trng, qdss_gpio, _, _, _, _, _),
+       [41] = PINGROUP(41, cci, cri_trng, qdss_gpio, _, _, _, _, _, _),
+       [42] = PINGROUP(42, cci, cri_trng, qdss_gpio, _, _, _, _, _, _),
+       [43] = PINGROUP(43, cci, qup1_se4, qdss_gpio, _, _, _, _, _, _),
+       [44] = PINGROUP(44, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+       [45] = PINGROUP(45, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+       [46] = PINGROUP(46, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+       [47] = PINGROUP(47, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+       [48] = PINGROUP(48, cci_i2c, qup1_se4, gcc_gp1_clk, _, _, _, _, _, _),
+       [49] = PINGROUP(49, cci_i2c, qup1_se4, gcc_gp2_clk, qdss_gpio, _, _, _, _, _),
+       [50] = PINGROUP(50, qup1_se1, qup1_se3, _, gcc_gp3_clk, _, _, _, _, _),
+       [51] = PINGROUP(51, qup1_se1, qup1_se3, _, tb_trig_sdc2, _, _, _, _, _),
+       [52] = PINGROUP(52, _, _, _, _, _, _, _, _, _),
+       [53] = PINGROUP(53, _, _, _, _, _, _, _, _, _),
+       [54] = PINGROUP(54, coex_uart1_rx, _, _, _, _, _, _, _, _),
+       [55] = PINGROUP(55, coex_uart1_tx, _, _, _, _, _, _, _, _),
+       [56] = PINGROUP(56, _, _, _, _, _, _, _, _, _),
+       [57] = PINGROUP(57, tmess_prng, qdss_cti_trig1, _, _, _, _, _, _, _),
+       [58] = PINGROUP(58, tmess_prng, qdss_cti_trig1, _, _, _, _, _, _, _),
+       [59] = PINGROUP(59, tmess_prng, qdss_gpio, _, _, _, _, _, _, _),
+       [60] = PINGROUP(60, tmess_prng, qdss_cti_trig0, _, _, _, _, _, _, _),
+       [61] = PINGROUP(61, _, phase_flag, _, _, _, _, _, _, _),
+       [62] = PINGROUP(62, qdss_gpio, _, _, _, _, _, _, _, _),
+       [63] = PINGROUP(63, uim0, _, _, _, _, _, _, _, _),
+       [64] = PINGROUP(64, uim0, _, _, _, _, _, _, _, _),
+       [65] = PINGROUP(65, uim0, _, _, _, _, _, _, _, _),
+       [66] = PINGROUP(66, uim0, _, _, _, _, _, _, _, _),
+       [67] = PINGROUP(67, uim1, _, _, _, _, _, _, _, _),
+       [68] = PINGROUP(68, uim1, _, _, _, _, _, _, _, _),
+       [69] = PINGROUP(69, uim1, _, _, _, _, _, _, _, _),
+       [70] = PINGROUP(70, uim1, _, _, _, _, _, _, _, _),
+       [71] = PINGROUP(71, _, _, _, audio_ref_clk, _, _, _, _, _),
+       [72] = PINGROUP(72, _, _, _, phase_flag, _, _, _, _, _),
+       [73] = PINGROUP(73, _, _, _, pll_bist_sync, _, _, _, _, _),
+       [74] = PINGROUP(74, _, _, _, _, _, _, _, _, _),
+       [75] = PINGROUP(75, _, _, _, vsense_trigger_mirnat, atest_usb0, _, _, _, _),
+       [76] = PINGROUP(76, _, _, _, _, _, _, _, _, _),
+       [77] = PINGROUP(77, _, _, _, _, _, _, _, _, _),
+       [78] = PINGROUP(78, _, _, _, atest_usb0, _, _, _, _, _),
+       [79] = PINGROUP(79, _, _, _, wlan1_adc_dtest0, atest_usb0, _, _, _, _),
+       [80] = PINGROUP(80, _, _, dbg_out_clk, wlan1_adc_dtest1, atest_usb0, _, _, _, _),
+       [81] = PINGROUP(81, _, nav, _, _, _, _, _, _, _),
+       [82] = PINGROUP(82, _, _, phase_flag, _, _, _, _, _, _),
+       [83] = PINGROUP(83, nav, _, _, _, _, _, _, _, _),
+       [84] = PINGROUP(84, nav, vfr_1, _, _, _, _, _, _, _),
+       [85] = PINGROUP(85, _, _, _, _, _, _, _, _, _),
+       [86] = PINGROUP(86, _, _, _, _, _, _, _, _, _),
+       [87] = PINGROUP(87, qlink0_request, _, _, _, _, _, _, _, _),
+       [88] = PINGROUP(88, qlink0_enable, _, _, _, _, _, _, _, _),
+       [89] = PINGROUP(89, qlink0_wmss_reset, _, _, _, _, _, _, _, _),
+       [90] = PINGROUP(90, qup1_se4, jitter_bist_ref, ddr_pxi0_test, _, _, _, _, _, _),
+       [91] = PINGROUP(91, qup1_se4, _, phase_flag, _, _, _, _, _, _),
+       [92] = PINGROUP(92, _, _, _, _, _, _, _, _, _),
+       [93] = PINGROUP(93, mdp_vsync, mdp_vsync0_out, mdp_vsync1_out, _, _, _, _, _, _),
+       [94] = PINGROUP(94, usb0_phy_ps, _, _, _, _, _, _, _, _),
+       [95] = PINGROUP(95, _, phase_flag, atest_char, _, _, _, _, _, _),
+       [96] = PINGROUP(96, _, _, _, _, _, _, _, _, _),
+       [97] = PINGROUP(97, mdp_vsync, _, phase_flag, atest_char, _, _, _, _, _),
+       [98] = PINGROUP(98, _, phase_flag, atest_char, _, _, _, _, _, _),
+       [99] = PINGROUP(99, usb0_hs_ac, _, phase_flag, atest_char, _, _, _, _, _),
+       [100] = PINGROUP(100, vfr_0_mirb, _, phase_flag, atest_char, _, _, _, _, _),
+       [101] = PINGROUP(101, _, _, _, _, _, _, _, _, _),
+       [102] = PINGROUP(102, sd_write_protect, _, _, _, _, _, _, _, _),
+       [103] = PINGROUP(103, _, _, _, _, _, _, _, _, _),
+       [104] = PINGROUP(104, _, _, _, _, _, _, _, _, _),
+       [105] = PINGROUP(105, _, phase_flag, _, _, _, _, _, _, _),
+       [106] = PINGROUP(106, host2wlan_sol, _, _, _, _, _, _, _, _),
+       [107] = PINGROUP(107, pcie0_clk_req, _, _, _, _, _, _, _, _),
+       [108] = PINGROUP(108, pll_clk_aux, _, _, _, _, _, _, _, _),
+       [109] = PINGROUP(109, _, _, _, _, _, _, _, _, _),
+       [110] = PINGROUP(110, _, _, _, _, _, _, _, _, _),
+       [111] = PINGROUP(111, _, _, _, _, _, _, _, _, _),
+       [112] = PINGROUP(112, _, _, _, _, _, _, _, _, _),
+       [113] = PINGROUP(113, qdss_cti_trig0, _, _, _, _, _, _, _, _),
+       [114] = PINGROUP(114, qdss_cti_trig0, _, _, _, _, _, _, _, _),
+       [115] = PINGROUP(115, _, phase_flag, _, _, _, _, _, _, _),
+       [116] = PINGROUP(116, _, phase_flag, _, _, _, _, _, _, _),
+       [117] = PINGROUP(117, _, phase_flag, _, _, _, _, _, _, _),
+       [118] = PINGROUP(118, qdss_gpio, _, ddr_pxi1_test, _, _, _, _, _, _),
+       [119] = PINGROUP(119, _, _, _, _, _, _, _, _, _),
+       [120] = PINGROUP(120, _, _, _, _, _, _, _, _, _),
+       [121] = PINGROUP(121, qdss_gpio, _, _, _, _, _, _, _, _),
+       [122] = PINGROUP(122, qdss_gpio, _, ddr_pxi1_test, _, _, _, _, _, _),
+       [123] = PINGROUP(123, _, _, _, _, _, _, _, _, _),
+       [124] = PINGROUP(124, _, _, _, _, _, _, _, _, _),
+       [125] = PINGROUP(125, _, _, _, _, _, _, _, _, _),
+       [126] = PINGROUP(126, qdss_gpio, _, _, _, _, _, _, _, _),
+       [127] = PINGROUP(127, qdss_gpio, ddr_pxi0_test, _, _, _, _, _, _, _),
+       [128] = PINGROUP(128, tb_trig_sdc1, _, _, _, _, _, _, _, _),
+       [129] = PINGROUP(129, _, _, _, _, _, _, _, _, _),
+       [130] = PINGROUP(130, _, _, _, _, _, _, _, _, _),
+       [131] = PINGROUP(131, _, _, _, _, _, _, _, _, _),
+       [132] = PINGROUP(132, _, _, _, _, _, _, _, _, _),
+       [133] = PINGROUP(133, _, phase_flag, _, _, _, _, _, _, _),
+       [134] = PINGROUP(134, tsense_pwm1_out, tsense_pwm2_out, _, _, _, _, _, _, _),
+       [135] = PINGROUP(135, _, phase_flag, _, _, _, _, _, _, _),
+       [136] = UFS_RESET(ufs_reset, 0x97000),
+       [137] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x8c004, 0, 0),
+       [138] = SDC_QDSD_PINGROUP(sdc1_clk, 0x8c000, 13, 6),
+       [139] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x8c000, 11, 3),
+       [140] = SDC_QDSD_PINGROUP(sdc1_data, 0x8c000, 9, 0),
+       [141] = SDC_QDSD_PINGROUP(sdc2_clk, 0x8f000, 14, 6),
+       [142] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x8f000, 11, 3),
+       [143] = SDC_QDSD_PINGROUP(sdc2_data, 0x8f000, 9, 0),
+};
+
+static const struct msm_gpio_wakeirq_map sm4450_pdc_map[] = {
+       { 0, 67 }, { 3, 82 }, { 4, 69 }, { 5, 70 }, { 6, 44 }, { 7, 43 },
+       { 8, 71 }, { 9, 86 }, { 10, 48 }, { 11, 77 }, { 12, 90 },
+       { 13, 54 }, { 14, 91 }, { 17, 97 }, { 18, 102 }, { 21, 103 },
+       { 22, 104 }, { 23, 105 }, { 24, 53 }, { 25, 106 }, { 26, 65 },
+       { 27, 55 }, { 28, 89 }, { 30, 80 }, { 31, 109 }, { 33, 87 },
+       { 34, 81 }, { 35, 75 }, { 40, 88 }, { 41, 98 }, { 42, 110 },
+       { 43, 95 }, { 47, 118 }, { 50, 111 }, { 52, 52 }, { 53, 114 },
+       { 54, 115 }, { 55, 99 }, { 56, 45 }, { 57, 85 }, { 58, 56 },
+       { 59, 84 }, { 60, 83 }, { 61, 96 }, { 62, 93 }, { 66, 116 },
+       { 67, 113 }, { 70, 42 }, { 71, 122 }, { 73, 119 }, { 75, 121 },
+       { 77, 120 }, { 79, 123 }, { 81, 124 }, { 83, 64 }, { 84, 128 },
+       { 86, 129 }, { 87, 63 }, { 91, 92 }, { 92, 66 }, { 93, 125 },
+       { 94, 76 }, { 95, 62 }, { 96, 132 }, { 97, 135 }, { 98, 73 },
+       { 99, 133 }, { 101, 46 }, { 102, 134 }, { 103, 49 }, { 105, 58 },
+       { 107, 94 }, { 110, 59 }, { 113, 57 }, { 114, 60 }, { 118, 107 },
+       { 120, 61 }, { 121, 108 }, { 123, 68 }, { 125, 72 }, { 128, 112 },
+};
+
+static const struct msm_pinctrl_soc_data sm4450_tlmm = {
+       .pins = sm4450_pins,
+       .npins = ARRAY_SIZE(sm4450_pins),
+       .functions = sm4450_functions,
+       .nfunctions = ARRAY_SIZE(sm4450_functions),
+       .groups = sm4450_groups,
+       .ngroups = ARRAY_SIZE(sm4450_groups),
+       .ngpios = 137,
+       .wakeirq_map = sm4450_pdc_map,
+       .nwakeirq_map = ARRAY_SIZE(sm4450_pdc_map),
+};
+
+static int sm4450_tlmm_probe(struct platform_device *pdev)
+{
+       return msm_pinctrl_probe(pdev, &sm4450_tlmm);
+}
+
+static const struct of_device_id sm4450_tlmm_of_match[] = {
+       { .compatible = "qcom,sm4450-tlmm", },
+       { }
+};
+
+static struct platform_driver sm4450_tlmm_driver = {
+       .driver = {
+               .name = "sm4450-tlmm",
+               .of_match_table = sm4450_tlmm_of_match,
+       },
+       .probe = sm4450_tlmm_probe,
+       .remove_new = msm_pinctrl_remove,
+};
+MODULE_DEVICE_TABLE(of, sm4450_tlmm_of_match);
+
+static int __init sm4450_tlmm_init(void)
+{
+       return platform_driver_register(&sm4450_tlmm_driver);
+}
+arch_initcall(sm4450_tlmm_init);
+
+static void __exit sm4450_tlmm_exit(void)
+{
+       platform_driver_unregister(&sm4450_tlmm_driver);
+}
+module_exit(sm4450_tlmm_exit);
+
+MODULE_DESCRIPTION("QTI SM4450 TLMM driver");
+MODULE_LICENSE("GPL");
index e8a6f6f6af547b10458e9732e9e292afa61e50b1..316d6fc69131b56ec0131adbd27245e0ee577b22 100644 (file)
@@ -36,26 +36,6 @@ enum lpass_lpi_functions {
        LPI_MUX__,
 };
 
-static int gpio0_pins[] = { 0 };
-static int gpio1_pins[] = { 1 };
-static int gpio2_pins[] = { 2 };
-static int gpio3_pins[] = { 3 };
-static int gpio4_pins[] = { 4 };
-static int gpio5_pins[] = { 5 };
-static int gpio6_pins[] = { 6 };
-static int gpio7_pins[] = { 7 };
-static int gpio8_pins[] = { 8 };
-static int gpio9_pins[] = { 9 };
-static int gpio10_pins[] = { 10 };
-static int gpio11_pins[] = { 11 };
-static int gpio12_pins[] = { 12 };
-static int gpio13_pins[] = { 13 };
-static int gpio14_pins[] = { 14 };
-static int gpio15_pins[] = { 15 };
-static int gpio16_pins[] = { 16 };
-static int gpio17_pins[] = { 17 };
-static int gpio18_pins[] = { 18 };
-
 static const struct pinctrl_pin_desc sm6115_lpi_pins[] = {
        PINCTRL_PIN(0, "gpio0"),
        PINCTRL_PIN(1, "gpio1"),
index cb10ce8d5d28f8f0cc3bfb851edbc5c24c24437c..9791d9ba5087c2bf2c11468d507c322172256cbf 100644 (file)
@@ -36,21 +36,6 @@ enum lpass_lpi_functions {
        LPI_MUX__,
 };
 
-static int gpio0_pins[] = { 0 };
-static int gpio1_pins[] = { 1 };
-static int gpio2_pins[] = { 2 };
-static int gpio3_pins[] = { 3 };
-static int gpio4_pins[] = { 4 };
-static int gpio5_pins[] = { 5 };
-static int gpio6_pins[] = { 6 };
-static int gpio7_pins[] = { 7 };
-static int gpio8_pins[] = { 8 };
-static int gpio9_pins[] = { 9 };
-static int gpio10_pins[] = { 10 };
-static int gpio11_pins[] = { 11 };
-static int gpio12_pins[] = { 12 };
-static int gpio13_pins[] = { 13 };
-
 static const struct pinctrl_pin_desc sm8250_lpi_pins[] = {
        PINCTRL_PIN(0, "gpio0"),
        PINCTRL_PIN(1, "gpio1"),
index 297cc95ac3c013e8892f5e78f3c81aaa511e0487..5b9a2cb216bd809f1a187112f0c469ac082436ad 100644 (file)
@@ -36,22 +36,6 @@ enum lpass_lpi_functions {
        LPI_MUX__,
 };
 
-static int gpio0_pins[] = { 0 };
-static int gpio1_pins[] = { 1 };
-static int gpio2_pins[] = { 2 };
-static int gpio3_pins[] = { 3 };
-static int gpio4_pins[] = { 4 };
-static int gpio5_pins[] = { 5 };
-static int gpio6_pins[] = { 6 };
-static int gpio7_pins[] = { 7 };
-static int gpio8_pins[] = { 8 };
-static int gpio9_pins[] = { 9 };
-static int gpio10_pins[] = { 10 };
-static int gpio11_pins[] = { 11 };
-static int gpio12_pins[] = { 12 };
-static int gpio13_pins[] = { 13 };
-static int gpio14_pins[] = { 14 };
-
 static const struct pinctrl_pin_desc sm8350_lpi_pins[] = {
        PINCTRL_PIN(0, "gpio0"),
        PINCTRL_PIN(1, "gpio1"),
index 2e7896791fc0c43a6f0b0efdc8b1d8d0f8b45c19..a028cbb4994725fb0775a28aa924ca660e2c2871 100644 (file)
@@ -52,30 +52,6 @@ enum lpass_lpi_functions {
        LPI_MUX__,
 };
 
-static int gpio0_pins[] = { 0 };
-static int gpio1_pins[] = { 1 };
-static int gpio2_pins[] = { 2 };
-static int gpio3_pins[] = { 3 };
-static int gpio4_pins[] = { 4 };
-static int gpio5_pins[] = { 5 };
-static int gpio6_pins[] = { 6 };
-static int gpio7_pins[] = { 7 };
-static int gpio8_pins[] = { 8 };
-static int gpio9_pins[] = { 9 };
-static int gpio10_pins[] = { 10 };
-static int gpio11_pins[] = { 11 };
-static int gpio12_pins[] = { 12 };
-static int gpio13_pins[] = { 13 };
-static int gpio14_pins[] = { 14 };
-static int gpio15_pins[] = { 15 };
-static int gpio16_pins[] = { 16 };
-static int gpio17_pins[] = { 17 };
-static int gpio18_pins[] = { 18 };
-static int gpio19_pins[] = { 19 };
-static int gpio20_pins[] = { 20 };
-static int gpio21_pins[] = { 21 };
-static int gpio22_pins[] = { 22 };
-
 static const struct pinctrl_pin_desc sm8450_lpi_pins[] = {
        PINCTRL_PIN(0, "gpio0"),
        PINCTRL_PIN(1, "gpio1"),
index 64458c3fbe5e68dc816a68c312ede313f0351a7d..852192b044e170ef44ca6c971cd333d3624309d6 100644 (file)
@@ -52,30 +52,6 @@ enum lpass_lpi_functions {
        LPI_MUX__,
 };
 
-static int gpio0_pins[] = { 0 };
-static int gpio1_pins[] = { 1 };
-static int gpio2_pins[] = { 2 };
-static int gpio3_pins[] = { 3 };
-static int gpio4_pins[] = { 4 };
-static int gpio5_pins[] = { 5 };
-static int gpio6_pins[] = { 6 };
-static int gpio7_pins[] = { 7 };
-static int gpio8_pins[] = { 8 };
-static int gpio9_pins[] = { 9 };
-static int gpio10_pins[] = { 10 };
-static int gpio11_pins[] = { 11 };
-static int gpio12_pins[] = { 12 };
-static int gpio13_pins[] = { 13 };
-static int gpio14_pins[] = { 14 };
-static int gpio15_pins[] = { 15 };
-static int gpio16_pins[] = { 16 };
-static int gpio17_pins[] = { 17 };
-static int gpio18_pins[] = { 18 };
-static int gpio19_pins[] = { 19 };
-static int gpio20_pins[] = { 20 };
-static int gpio21_pins[] = { 21 };
-static int gpio22_pins[] = { 22 };
-
 static const struct pinctrl_pin_desc sm8550_lpi_pins[] = {
        PINCTRL_PIN(0, "gpio0"),
        PINCTRL_PIN(1, "gpio1"),
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8650-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sm8650-lpass-lpi.c
new file mode 100644 (file)
index 0000000..04400c8
--- /dev/null
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2023 Linaro Ltd.
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-lpass-lpi.h"
+
+enum lpass_lpi_functions {
+       LPI_MUX_dmic1_clk,
+       LPI_MUX_dmic1_data,
+       LPI_MUX_dmic2_clk,
+       LPI_MUX_dmic2_data,
+       LPI_MUX_dmic3_clk,
+       LPI_MUX_dmic3_data,
+       LPI_MUX_dmic4_clk,
+       LPI_MUX_dmic4_data,
+       LPI_MUX_i2s0_clk,
+       LPI_MUX_i2s0_data,
+       LPI_MUX_i2s0_ws,
+       LPI_MUX_i2s1_clk,
+       LPI_MUX_i2s1_data,
+       LPI_MUX_i2s1_ws,
+       LPI_MUX_i2s2_clk,
+       LPI_MUX_i2s2_data,
+       LPI_MUX_i2s2_ws,
+       LPI_MUX_i2s3_clk,
+       LPI_MUX_i2s3_data,
+       LPI_MUX_i2s3_ws,
+       LPI_MUX_i2s4_clk,
+       LPI_MUX_i2s4_data,
+       LPI_MUX_i2s4_ws,
+       LPI_MUX_qca_swr_clk,
+       LPI_MUX_qca_swr_data,
+       LPI_MUX_slimbus_clk,
+       LPI_MUX_slimbus_data,
+       LPI_MUX_swr_rx_clk,
+       LPI_MUX_swr_rx_data,
+       LPI_MUX_swr_tx_clk,
+       LPI_MUX_swr_tx_data,
+       LPI_MUX_wsa_swr_clk,
+       LPI_MUX_wsa_swr_data,
+       LPI_MUX_wsa2_swr_clk,
+       LPI_MUX_wsa2_swr_data,
+       LPI_MUX_ext_mclk1_a,
+       LPI_MUX_ext_mclk1_b,
+       LPI_MUX_ext_mclk1_c,
+       LPI_MUX_ext_mclk1_d,
+       LPI_MUX_ext_mclk1_e,
+       LPI_MUX_gpio,
+       LPI_MUX__,
+};
+
+static const struct pinctrl_pin_desc sm8650_lpi_pins[] = {
+       PINCTRL_PIN(0, "gpio0"),
+       PINCTRL_PIN(1, "gpio1"),
+       PINCTRL_PIN(2, "gpio2"),
+       PINCTRL_PIN(3, "gpio3"),
+       PINCTRL_PIN(4, "gpio4"),
+       PINCTRL_PIN(5, "gpio5"),
+       PINCTRL_PIN(6, "gpio6"),
+       PINCTRL_PIN(7, "gpio7"),
+       PINCTRL_PIN(8, "gpio8"),
+       PINCTRL_PIN(9, "gpio9"),
+       PINCTRL_PIN(10, "gpio10"),
+       PINCTRL_PIN(11, "gpio11"),
+       PINCTRL_PIN(12, "gpio12"),
+       PINCTRL_PIN(13, "gpio13"),
+       PINCTRL_PIN(14, "gpio14"),
+       PINCTRL_PIN(15, "gpio15"),
+       PINCTRL_PIN(16, "gpio16"),
+       PINCTRL_PIN(17, "gpio17"),
+       PINCTRL_PIN(18, "gpio18"),
+       PINCTRL_PIN(19, "gpio19"),
+       PINCTRL_PIN(20, "gpio20"),
+       PINCTRL_PIN(21, "gpio21"),
+       PINCTRL_PIN(22, "gpio22"),
+};
+
+static const char * const gpio_groups[] = {
+       "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+       "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+       "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+       "gpio22",
+};
+
+static const char * const dmic1_clk_groups[] = { "gpio6" };
+static const char * const dmic1_data_groups[] = { "gpio7" };
+static const char * const dmic2_clk_groups[] = { "gpio8" };
+static const char * const dmic2_data_groups[] = { "gpio9" };
+static const char * const dmic3_clk_groups[] = { "gpio12" };
+static const char * const dmic3_data_groups[] = { "gpio13" };
+static const char * const dmic4_clk_groups[] = { "gpio17" };
+static const char * const dmic4_data_groups[] = { "gpio18" };
+static const char * const i2s0_clk_groups[] = { "gpio0" };
+static const char * const i2s0_ws_groups[] = { "gpio1" };
+static const char * const i2s0_data_groups[] = { "gpio2", "gpio3", "gpio4", "gpio5" };
+static const char * const i2s1_clk_groups[] = { "gpio6" };
+static const char * const i2s1_ws_groups[] = { "gpio7" };
+static const char * const i2s1_data_groups[] = { "gpio8", "gpio9" };
+static const char * const i2s2_clk_groups[] = { "gpio10" };
+static const char * const i2s2_ws_groups[] = { "gpio11" };
+static const char * const i2s2_data_groups[] = { "gpio15", "gpio16" };
+static const char * const i2s3_clk_groups[] = { "gpio12" };
+static const char * const i2s3_ws_groups[] = { "gpio13" };
+static const char * const i2s3_data_groups[] = { "gpio17", "gpio18" };
+static const char * const i2s4_clk_groups[] = { "gpio19"};
+static const char * const i2s4_ws_groups[] = { "gpio20"};
+static const char * const i2s4_data_groups[] = { "gpio21", "gpio22"};
+static const char * const qca_swr_clk_groups[] = { "gpio19" };
+static const char * const qca_swr_data_groups[] = { "gpio20" };
+static const char * const slimbus_clk_groups[] = { "gpio19"};
+static const char * const slimbus_data_groups[] = { "gpio20"};
+static const char * const swr_tx_clk_groups[] = { "gpio0" };
+static const char * const swr_tx_data_groups[] = { "gpio1", "gpio2", "gpio14" };
+static const char * const swr_rx_clk_groups[] = { "gpio3" };
+static const char * const swr_rx_data_groups[] = { "gpio4", "gpio5", "gpio15" };
+static const char * const wsa_swr_clk_groups[] = { "gpio10" };
+static const char * const wsa_swr_data_groups[] = { "gpio11" };
+static const char * const wsa2_swr_clk_groups[] = { "gpio15" };
+static const char * const wsa2_swr_data_groups[] = { "gpio16" };
+static const char * const ext_mclk1_c_groups[] = { "gpio5" };
+static const char * const ext_mclk1_b_groups[] = { "gpio9" };
+static const char * const ext_mclk1_a_groups[] = { "gpio13" };
+static const char * const ext_mclk1_d_groups[] = { "gpio14" };
+static const char * const ext_mclk1_e_groups[] = { "gpio22" };
+
+static const struct lpi_pingroup sm8650_groups[] = {
+       LPI_PINGROUP(0, 11, swr_tx_clk, i2s0_clk, _, _),
+       LPI_PINGROUP(1, 11, swr_tx_data, i2s0_ws, _, _),
+       LPI_PINGROUP(2, 11, swr_tx_data, i2s0_data, _, _),
+       LPI_PINGROUP(3, 11, swr_rx_clk, i2s0_data, _, _),
+       LPI_PINGROUP(4, 11, swr_rx_data, i2s0_data, _, _),
+       LPI_PINGROUP(5, 11, swr_rx_data, ext_mclk1_c, i2s0_data, _),
+       LPI_PINGROUP(6, LPI_NO_SLEW, dmic1_clk, i2s1_clk, _,  _),
+       LPI_PINGROUP(7, LPI_NO_SLEW, dmic1_data, i2s1_ws, _, _),
+       LPI_PINGROUP(8, LPI_NO_SLEW, dmic2_clk, i2s1_data, _, _),
+       LPI_PINGROUP(9, LPI_NO_SLEW, dmic2_data, i2s1_data, ext_mclk1_b, _),
+       LPI_PINGROUP(10, 11, i2s2_clk, wsa_swr_clk, _, _),
+       LPI_PINGROUP(11, 11, i2s2_ws, wsa_swr_data, _, _),
+       LPI_PINGROUP(12, LPI_NO_SLEW, dmic3_clk, i2s3_clk, _, _),
+       LPI_PINGROUP(13, LPI_NO_SLEW, dmic3_data, i2s3_ws, ext_mclk1_a, _),
+       LPI_PINGROUP(14, 11, swr_tx_data, ext_mclk1_d, _, _),
+       LPI_PINGROUP(15, 11, i2s2_data, wsa2_swr_clk, _, _),
+       LPI_PINGROUP(16, 11, i2s2_data, wsa2_swr_data, _, _),
+       LPI_PINGROUP(17, LPI_NO_SLEW, dmic4_clk, i2s3_data, _, _),
+       LPI_PINGROUP(18, LPI_NO_SLEW, dmic4_data, i2s3_data, _, _),
+       LPI_PINGROUP(19, 11, i2s4_clk, slimbus_clk, qca_swr_clk, _),
+       LPI_PINGROUP(20, 11, i2s4_ws, slimbus_data, qca_swr_data, _),
+       LPI_PINGROUP(21, LPI_NO_SLEW, i2s4_data, _, _, _),
+       LPI_PINGROUP(22, LPI_NO_SLEW, i2s4_data, ext_mclk1_e, _, _),
+};
+
+static const struct lpi_function sm8650_functions[] = {
+       LPI_FUNCTION(gpio),
+       LPI_FUNCTION(dmic1_clk),
+       LPI_FUNCTION(dmic1_data),
+       LPI_FUNCTION(dmic2_clk),
+       LPI_FUNCTION(dmic2_data),
+       LPI_FUNCTION(dmic3_clk),
+       LPI_FUNCTION(dmic3_data),
+       LPI_FUNCTION(dmic4_clk),
+       LPI_FUNCTION(dmic4_data),
+       LPI_FUNCTION(i2s0_clk),
+       LPI_FUNCTION(i2s0_data),
+       LPI_FUNCTION(i2s0_ws),
+       LPI_FUNCTION(i2s1_clk),
+       LPI_FUNCTION(i2s1_data),
+       LPI_FUNCTION(i2s1_ws),
+       LPI_FUNCTION(i2s2_clk),
+       LPI_FUNCTION(i2s2_data),
+       LPI_FUNCTION(i2s2_ws),
+       LPI_FUNCTION(i2s3_clk),
+       LPI_FUNCTION(i2s3_data),
+       LPI_FUNCTION(i2s3_ws),
+       LPI_FUNCTION(i2s4_clk),
+       LPI_FUNCTION(i2s4_data),
+       LPI_FUNCTION(i2s4_ws),
+       LPI_FUNCTION(qca_swr_clk),
+       LPI_FUNCTION(qca_swr_data),
+       LPI_FUNCTION(slimbus_clk),
+       LPI_FUNCTION(slimbus_data),
+       LPI_FUNCTION(swr_rx_clk),
+       LPI_FUNCTION(swr_rx_data),
+       LPI_FUNCTION(swr_tx_clk),
+       LPI_FUNCTION(swr_tx_data),
+       LPI_FUNCTION(wsa_swr_clk),
+       LPI_FUNCTION(wsa_swr_data),
+       LPI_FUNCTION(wsa2_swr_clk),
+       LPI_FUNCTION(wsa2_swr_data),
+       LPI_FUNCTION(ext_mclk1_a),
+       LPI_FUNCTION(ext_mclk1_b),
+       LPI_FUNCTION(ext_mclk1_c),
+       LPI_FUNCTION(ext_mclk1_d),
+       LPI_FUNCTION(ext_mclk1_e),
+};
+
+static const struct lpi_pinctrl_variant_data sm8650_lpi_data = {
+       .pins = sm8650_lpi_pins,
+       .npins = ARRAY_SIZE(sm8650_lpi_pins),
+       .groups = sm8650_groups,
+       .ngroups = ARRAY_SIZE(sm8650_groups),
+       .functions = sm8650_functions,
+       .nfunctions = ARRAY_SIZE(sm8650_functions),
+       .flags = LPI_FLAG_SLEW_RATE_SAME_REG,
+};
+
+static const struct of_device_id lpi_pinctrl_of_match[] = {
+       {
+              .compatible = "qcom,sm8650-lpass-lpi-pinctrl",
+              .data = &sm8650_lpi_data,
+       },
+       { }
+};
+MODULE_DEVICE_TABLE(of, lpi_pinctrl_of_match);
+
+static struct platform_driver lpi_pinctrl_driver = {
+       .driver = {
+                  .name = "qcom-sm8650-lpass-lpi-pinctrl",
+                  .of_match_table = lpi_pinctrl_of_match,
+       },
+       .probe = lpi_pinctrl_probe,
+       .remove_new = lpi_pinctrl_remove,
+};
+
+module_platform_driver(lpi_pinctrl_driver);
+MODULE_DESCRIPTION("Qualcomm SM8650 LPI GPIO pin control driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8650.c b/drivers/pinctrl/qcom/pinctrl-sm8650.c
new file mode 100644 (file)
index 0000000..adaddd7
--- /dev/null
@@ -0,0 +1,1762 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-msm.h"
+
+#define REG_SIZE 0x1000
+
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10)  \
+       {                                               \
+               .grp = PINCTRL_PINGROUP("gpio" #id,     \
+                       gpio##id##_pins,                \
+                       ARRAY_SIZE(gpio##id##_pins)),   \
+               .funcs = (int[]){                       \
+                       msm_mux_gpio, /* gpio mode */   \
+                       msm_mux_##f1,                   \
+                       msm_mux_##f2,                   \
+                       msm_mux_##f3,                   \
+                       msm_mux_##f4,                   \
+                       msm_mux_##f5,                   \
+                       msm_mux_##f6,                   \
+                       msm_mux_##f7,                   \
+                       msm_mux_##f8,                   \
+                       msm_mux_##f9,                   \
+                       msm_mux_##f10                   \
+               },                                      \
+               .nfuncs = 11,                           \
+               .ctl_reg = REG_SIZE * id,                       \
+               .io_reg = 0x4 + REG_SIZE * id,          \
+               .intr_cfg_reg = 0x8 + REG_SIZE * id,            \
+               .intr_status_reg = 0xc + REG_SIZE * id, \
+               .intr_target_reg = 0x8 + REG_SIZE * id, \
+               .mux_bit = 2,                   \
+               .pull_bit = 0,                  \
+               .drv_bit = 6,                   \
+               .i2c_pull_bit = 13,             \
+               .egpio_enable = 12,             \
+               .egpio_present = 11,            \
+               .oe_bit = 9,                    \
+               .in_bit = 0,                    \
+               .out_bit = 1,                   \
+               .intr_enable_bit = 0,           \
+               .intr_status_bit = 0,           \
+               .intr_wakeup_present_bit = 6,   \
+               .intr_wakeup_enable_bit = 7,    \
+               .intr_target_bit = 8,           \
+               .intr_target_kpss_val = 3,      \
+               .intr_raw_status_bit = 4,       \
+               .intr_polarity_bit = 1,         \
+               .intr_detection_bit = 2,        \
+               .intr_detection_width = 2,      \
+       }
+
+#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv)     \
+       {                                               \
+               .grp = PINCTRL_PINGROUP(#pg_name,       \
+                       pg_name##_pins,                 \
+                       ARRAY_SIZE(pg_name##_pins)),    \
+               .ctl_reg = ctl,                         \
+               .io_reg = 0,                            \
+               .intr_cfg_reg = 0,                      \
+               .intr_status_reg = 0,                   \
+               .intr_target_reg = 0,                   \
+               .mux_bit = -1,                          \
+               .pull_bit = pull,                       \
+               .drv_bit = drv,                         \
+               .oe_bit = -1,                           \
+               .in_bit = -1,                           \
+               .out_bit = -1,                          \
+               .intr_enable_bit = -1,                  \
+               .intr_status_bit = -1,                  \
+               .intr_target_bit = -1,                  \
+               .intr_raw_status_bit = -1,              \
+               .intr_polarity_bit = -1,                \
+               .intr_detection_bit = -1,               \
+               .intr_detection_width = -1,             \
+       }
+
+#define UFS_RESET(pg_name, ctl, io)                    \
+       {                                               \
+               .grp = PINCTRL_PINGROUP(#pg_name,       \
+                       pg_name##_pins,                 \
+                       ARRAY_SIZE(pg_name##_pins)),    \
+               .ctl_reg = ctl,                         \
+               .io_reg = io,                           \
+               .intr_cfg_reg = 0,                      \
+               .intr_status_reg = 0,                   \
+               .intr_target_reg = 0,                   \
+               .mux_bit = -1,                          \
+               .pull_bit = 3,                          \
+               .drv_bit = 0,                           \
+               .oe_bit = -1,                           \
+               .in_bit = -1,                           \
+               .out_bit = 0,                           \
+               .intr_enable_bit = -1,                  \
+               .intr_status_bit = -1,                  \
+               .intr_target_bit = -1,                  \
+               .intr_raw_status_bit = -1,              \
+               .intr_polarity_bit = -1,                \
+               .intr_detection_bit = -1,               \
+               .intr_detection_width = -1,             \
+       }
+
+static const struct pinctrl_pin_desc sm8650_pins[] = {
+       PINCTRL_PIN(0, "GPIO_0"),
+       PINCTRL_PIN(1, "GPIO_1"),
+       PINCTRL_PIN(2, "GPIO_2"),
+       PINCTRL_PIN(3, "GPIO_3"),
+       PINCTRL_PIN(4, "GPIO_4"),
+       PINCTRL_PIN(5, "GPIO_5"),
+       PINCTRL_PIN(6, "GPIO_6"),
+       PINCTRL_PIN(7, "GPIO_7"),
+       PINCTRL_PIN(8, "GPIO_8"),
+       PINCTRL_PIN(9, "GPIO_9"),
+       PINCTRL_PIN(10, "GPIO_10"),
+       PINCTRL_PIN(11, "GPIO_11"),
+       PINCTRL_PIN(12, "GPIO_12"),
+       PINCTRL_PIN(13, "GPIO_13"),
+       PINCTRL_PIN(14, "GPIO_14"),
+       PINCTRL_PIN(15, "GPIO_15"),
+       PINCTRL_PIN(16, "GPIO_16"),
+       PINCTRL_PIN(17, "GPIO_17"),
+       PINCTRL_PIN(18, "GPIO_18"),
+       PINCTRL_PIN(19, "GPIO_19"),
+       PINCTRL_PIN(20, "GPIO_20"),
+       PINCTRL_PIN(21, "GPIO_21"),
+       PINCTRL_PIN(22, "GPIO_22"),
+       PINCTRL_PIN(23, "GPIO_23"),
+       PINCTRL_PIN(24, "GPIO_24"),
+       PINCTRL_PIN(25, "GPIO_25"),
+       PINCTRL_PIN(26, "GPIO_26"),
+       PINCTRL_PIN(27, "GPIO_27"),
+       PINCTRL_PIN(28, "GPIO_28"),
+       PINCTRL_PIN(29, "GPIO_29"),
+       PINCTRL_PIN(30, "GPIO_30"),
+       PINCTRL_PIN(31, "GPIO_31"),
+       PINCTRL_PIN(32, "GPIO_32"),
+       PINCTRL_PIN(33, "GPIO_33"),
+       PINCTRL_PIN(34, "GPIO_34"),
+       PINCTRL_PIN(35, "GPIO_35"),
+       PINCTRL_PIN(36, "GPIO_36"),
+       PINCTRL_PIN(37, "GPIO_37"),
+       PINCTRL_PIN(38, "GPIO_38"),
+       PINCTRL_PIN(39, "GPIO_39"),
+       PINCTRL_PIN(40, "GPIO_40"),
+       PINCTRL_PIN(41, "GPIO_41"),
+       PINCTRL_PIN(42, "GPIO_42"),
+       PINCTRL_PIN(43, "GPIO_43"),
+       PINCTRL_PIN(44, "GPIO_44"),
+       PINCTRL_PIN(45, "GPIO_45"),
+       PINCTRL_PIN(46, "GPIO_46"),
+       PINCTRL_PIN(47, "GPIO_47"),
+       PINCTRL_PIN(48, "GPIO_48"),
+       PINCTRL_PIN(49, "GPIO_49"),
+       PINCTRL_PIN(50, "GPIO_50"),
+       PINCTRL_PIN(51, "GPIO_51"),
+       PINCTRL_PIN(52, "GPIO_52"),
+       PINCTRL_PIN(53, "GPIO_53"),
+       PINCTRL_PIN(54, "GPIO_54"),
+       PINCTRL_PIN(55, "GPIO_55"),
+       PINCTRL_PIN(56, "GPIO_56"),
+       PINCTRL_PIN(57, "GPIO_57"),
+       PINCTRL_PIN(58, "GPIO_58"),
+       PINCTRL_PIN(59, "GPIO_59"),
+       PINCTRL_PIN(60, "GPIO_60"),
+       PINCTRL_PIN(61, "GPIO_61"),
+       PINCTRL_PIN(62, "GPIO_62"),
+       PINCTRL_PIN(63, "GPIO_63"),
+       PINCTRL_PIN(64, "GPIO_64"),
+       PINCTRL_PIN(65, "GPIO_65"),
+       PINCTRL_PIN(66, "GPIO_66"),
+       PINCTRL_PIN(67, "GPIO_67"),
+       PINCTRL_PIN(68, "GPIO_68"),
+       PINCTRL_PIN(69, "GPIO_69"),
+       PINCTRL_PIN(70, "GPIO_70"),
+       PINCTRL_PIN(71, "GPIO_71"),
+       PINCTRL_PIN(72, "GPIO_72"),
+       PINCTRL_PIN(73, "GPIO_73"),
+       PINCTRL_PIN(74, "GPIO_74"),
+       PINCTRL_PIN(75, "GPIO_75"),
+       PINCTRL_PIN(76, "GPIO_76"),
+       PINCTRL_PIN(77, "GPIO_77"),
+       PINCTRL_PIN(78, "GPIO_78"),
+       PINCTRL_PIN(79, "GPIO_79"),
+       PINCTRL_PIN(80, "GPIO_80"),
+       PINCTRL_PIN(81, "GPIO_81"),
+       PINCTRL_PIN(82, "GPIO_82"),
+       PINCTRL_PIN(83, "GPIO_83"),
+       PINCTRL_PIN(84, "GPIO_84"),
+       PINCTRL_PIN(85, "GPIO_85"),
+       PINCTRL_PIN(86, "GPIO_86"),
+       PINCTRL_PIN(87, "GPIO_87"),
+       PINCTRL_PIN(88, "GPIO_88"),
+       PINCTRL_PIN(89, "GPIO_89"),
+       PINCTRL_PIN(90, "GPIO_90"),
+       PINCTRL_PIN(91, "GPIO_91"),
+       PINCTRL_PIN(92, "GPIO_92"),
+       PINCTRL_PIN(93, "GPIO_93"),
+       PINCTRL_PIN(94, "GPIO_94"),
+       PINCTRL_PIN(95, "GPIO_95"),
+       PINCTRL_PIN(96, "GPIO_96"),
+       PINCTRL_PIN(97, "GPIO_97"),
+       PINCTRL_PIN(98, "GPIO_98"),
+       PINCTRL_PIN(99, "GPIO_99"),
+       PINCTRL_PIN(100, "GPIO_100"),
+       PINCTRL_PIN(101, "GPIO_101"),
+       PINCTRL_PIN(102, "GPIO_102"),
+       PINCTRL_PIN(103, "GPIO_103"),
+       PINCTRL_PIN(104, "GPIO_104"),
+       PINCTRL_PIN(105, "GPIO_105"),
+       PINCTRL_PIN(106, "GPIO_106"),
+       PINCTRL_PIN(107, "GPIO_107"),
+       PINCTRL_PIN(108, "GPIO_108"),
+       PINCTRL_PIN(109, "GPIO_109"),
+       PINCTRL_PIN(110, "GPIO_110"),
+       PINCTRL_PIN(111, "GPIO_111"),
+       PINCTRL_PIN(112, "GPIO_112"),
+       PINCTRL_PIN(113, "GPIO_113"),
+       PINCTRL_PIN(114, "GPIO_114"),
+       PINCTRL_PIN(115, "GPIO_115"),
+       PINCTRL_PIN(116, "GPIO_116"),
+       PINCTRL_PIN(117, "GPIO_117"),
+       PINCTRL_PIN(118, "GPIO_118"),
+       PINCTRL_PIN(119, "GPIO_119"),
+       PINCTRL_PIN(120, "GPIO_120"),
+       PINCTRL_PIN(121, "GPIO_121"),
+       PINCTRL_PIN(122, "GPIO_122"),
+       PINCTRL_PIN(123, "GPIO_123"),
+       PINCTRL_PIN(124, "GPIO_124"),
+       PINCTRL_PIN(125, "GPIO_125"),
+       PINCTRL_PIN(126, "GPIO_126"),
+       PINCTRL_PIN(127, "GPIO_127"),
+       PINCTRL_PIN(128, "GPIO_128"),
+       PINCTRL_PIN(129, "GPIO_129"),
+       PINCTRL_PIN(130, "GPIO_130"),
+       PINCTRL_PIN(131, "GPIO_131"),
+       PINCTRL_PIN(132, "GPIO_132"),
+       PINCTRL_PIN(133, "GPIO_133"),
+       PINCTRL_PIN(134, "GPIO_134"),
+       PINCTRL_PIN(135, "GPIO_135"),
+       PINCTRL_PIN(136, "GPIO_136"),
+       PINCTRL_PIN(137, "GPIO_137"),
+       PINCTRL_PIN(138, "GPIO_138"),
+       PINCTRL_PIN(139, "GPIO_139"),
+       PINCTRL_PIN(140, "GPIO_140"),
+       PINCTRL_PIN(141, "GPIO_141"),
+       PINCTRL_PIN(142, "GPIO_142"),
+       PINCTRL_PIN(143, "GPIO_143"),
+       PINCTRL_PIN(144, "GPIO_144"),
+       PINCTRL_PIN(145, "GPIO_145"),
+       PINCTRL_PIN(146, "GPIO_146"),
+       PINCTRL_PIN(147, "GPIO_147"),
+       PINCTRL_PIN(148, "GPIO_148"),
+       PINCTRL_PIN(149, "GPIO_149"),
+       PINCTRL_PIN(150, "GPIO_150"),
+       PINCTRL_PIN(151, "GPIO_151"),
+       PINCTRL_PIN(152, "GPIO_152"),
+       PINCTRL_PIN(153, "GPIO_153"),
+       PINCTRL_PIN(154, "GPIO_154"),
+       PINCTRL_PIN(155, "GPIO_155"),
+       PINCTRL_PIN(156, "GPIO_156"),
+       PINCTRL_PIN(157, "GPIO_157"),
+       PINCTRL_PIN(158, "GPIO_158"),
+       PINCTRL_PIN(159, "GPIO_159"),
+       PINCTRL_PIN(160, "GPIO_160"),
+       PINCTRL_PIN(161, "GPIO_161"),
+       PINCTRL_PIN(162, "GPIO_162"),
+       PINCTRL_PIN(163, "GPIO_163"),
+       PINCTRL_PIN(164, "GPIO_164"),
+       PINCTRL_PIN(165, "GPIO_165"),
+       PINCTRL_PIN(166, "GPIO_166"),
+       PINCTRL_PIN(167, "GPIO_167"),
+       PINCTRL_PIN(168, "GPIO_168"),
+       PINCTRL_PIN(169, "GPIO_169"),
+       PINCTRL_PIN(170, "GPIO_170"),
+       PINCTRL_PIN(171, "GPIO_171"),
+       PINCTRL_PIN(172, "GPIO_172"),
+       PINCTRL_PIN(173, "GPIO_173"),
+       PINCTRL_PIN(174, "GPIO_174"),
+       PINCTRL_PIN(175, "GPIO_175"),
+       PINCTRL_PIN(176, "GPIO_176"),
+       PINCTRL_PIN(177, "GPIO_177"),
+       PINCTRL_PIN(178, "GPIO_178"),
+       PINCTRL_PIN(179, "GPIO_179"),
+       PINCTRL_PIN(180, "GPIO_180"),
+       PINCTRL_PIN(181, "GPIO_181"),
+       PINCTRL_PIN(182, "GPIO_182"),
+       PINCTRL_PIN(183, "GPIO_183"),
+       PINCTRL_PIN(184, "GPIO_184"),
+       PINCTRL_PIN(185, "GPIO_185"),
+       PINCTRL_PIN(186, "GPIO_186"),
+       PINCTRL_PIN(187, "GPIO_187"),
+       PINCTRL_PIN(188, "GPIO_188"),
+       PINCTRL_PIN(189, "GPIO_189"),
+       PINCTRL_PIN(190, "GPIO_190"),
+       PINCTRL_PIN(191, "GPIO_191"),
+       PINCTRL_PIN(192, "GPIO_192"),
+       PINCTRL_PIN(193, "GPIO_193"),
+       PINCTRL_PIN(194, "GPIO_194"),
+       PINCTRL_PIN(195, "GPIO_195"),
+       PINCTRL_PIN(196, "GPIO_196"),
+       PINCTRL_PIN(197, "GPIO_197"),
+       PINCTRL_PIN(198, "GPIO_198"),
+       PINCTRL_PIN(199, "GPIO_199"),
+       PINCTRL_PIN(200, "GPIO_200"),
+       PINCTRL_PIN(201, "GPIO_201"),
+       PINCTRL_PIN(202, "GPIO_202"),
+       PINCTRL_PIN(203, "GPIO_203"),
+       PINCTRL_PIN(204, "GPIO_204"),
+       PINCTRL_PIN(205, "GPIO_205"),
+       PINCTRL_PIN(206, "GPIO_206"),
+       PINCTRL_PIN(207, "GPIO_207"),
+       PINCTRL_PIN(208, "GPIO_208"),
+       PINCTRL_PIN(209, "GPIO_209"),
+       PINCTRL_PIN(210, "UFS_RESET"),
+       PINCTRL_PIN(211, "SDC2_CLK"),
+       PINCTRL_PIN(212, "SDC2_CMD"),
+       PINCTRL_PIN(213, "SDC2_DATA"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+       static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+DECLARE_MSM_GPIO_PINS(120);
+DECLARE_MSM_GPIO_PINS(121);
+DECLARE_MSM_GPIO_PINS(122);
+DECLARE_MSM_GPIO_PINS(123);
+DECLARE_MSM_GPIO_PINS(124);
+DECLARE_MSM_GPIO_PINS(125);
+DECLARE_MSM_GPIO_PINS(126);
+DECLARE_MSM_GPIO_PINS(127);
+DECLARE_MSM_GPIO_PINS(128);
+DECLARE_MSM_GPIO_PINS(129);
+DECLARE_MSM_GPIO_PINS(130);
+DECLARE_MSM_GPIO_PINS(131);
+DECLARE_MSM_GPIO_PINS(132);
+DECLARE_MSM_GPIO_PINS(133);
+DECLARE_MSM_GPIO_PINS(134);
+DECLARE_MSM_GPIO_PINS(135);
+DECLARE_MSM_GPIO_PINS(136);
+DECLARE_MSM_GPIO_PINS(137);
+DECLARE_MSM_GPIO_PINS(138);
+DECLARE_MSM_GPIO_PINS(139);
+DECLARE_MSM_GPIO_PINS(140);
+DECLARE_MSM_GPIO_PINS(141);
+DECLARE_MSM_GPIO_PINS(142);
+DECLARE_MSM_GPIO_PINS(143);
+DECLARE_MSM_GPIO_PINS(144);
+DECLARE_MSM_GPIO_PINS(145);
+DECLARE_MSM_GPIO_PINS(146);
+DECLARE_MSM_GPIO_PINS(147);
+DECLARE_MSM_GPIO_PINS(148);
+DECLARE_MSM_GPIO_PINS(149);
+DECLARE_MSM_GPIO_PINS(150);
+DECLARE_MSM_GPIO_PINS(151);
+DECLARE_MSM_GPIO_PINS(152);
+DECLARE_MSM_GPIO_PINS(153);
+DECLARE_MSM_GPIO_PINS(154);
+DECLARE_MSM_GPIO_PINS(155);
+DECLARE_MSM_GPIO_PINS(156);
+DECLARE_MSM_GPIO_PINS(157);
+DECLARE_MSM_GPIO_PINS(158);
+DECLARE_MSM_GPIO_PINS(159);
+DECLARE_MSM_GPIO_PINS(160);
+DECLARE_MSM_GPIO_PINS(161);
+DECLARE_MSM_GPIO_PINS(162);
+DECLARE_MSM_GPIO_PINS(163);
+DECLARE_MSM_GPIO_PINS(164);
+DECLARE_MSM_GPIO_PINS(165);
+DECLARE_MSM_GPIO_PINS(166);
+DECLARE_MSM_GPIO_PINS(167);
+DECLARE_MSM_GPIO_PINS(168);
+DECLARE_MSM_GPIO_PINS(169);
+DECLARE_MSM_GPIO_PINS(170);
+DECLARE_MSM_GPIO_PINS(171);
+DECLARE_MSM_GPIO_PINS(172);
+DECLARE_MSM_GPIO_PINS(173);
+DECLARE_MSM_GPIO_PINS(174);
+DECLARE_MSM_GPIO_PINS(175);
+DECLARE_MSM_GPIO_PINS(176);
+DECLARE_MSM_GPIO_PINS(177);
+DECLARE_MSM_GPIO_PINS(178);
+DECLARE_MSM_GPIO_PINS(179);
+DECLARE_MSM_GPIO_PINS(180);
+DECLARE_MSM_GPIO_PINS(181);
+DECLARE_MSM_GPIO_PINS(182);
+DECLARE_MSM_GPIO_PINS(183);
+DECLARE_MSM_GPIO_PINS(184);
+DECLARE_MSM_GPIO_PINS(185);
+DECLARE_MSM_GPIO_PINS(186);
+DECLARE_MSM_GPIO_PINS(187);
+DECLARE_MSM_GPIO_PINS(188);
+DECLARE_MSM_GPIO_PINS(189);
+DECLARE_MSM_GPIO_PINS(190);
+DECLARE_MSM_GPIO_PINS(191);
+DECLARE_MSM_GPIO_PINS(192);
+DECLARE_MSM_GPIO_PINS(193);
+DECLARE_MSM_GPIO_PINS(194);
+DECLARE_MSM_GPIO_PINS(195);
+DECLARE_MSM_GPIO_PINS(196);
+DECLARE_MSM_GPIO_PINS(197);
+DECLARE_MSM_GPIO_PINS(198);
+DECLARE_MSM_GPIO_PINS(199);
+DECLARE_MSM_GPIO_PINS(200);
+DECLARE_MSM_GPIO_PINS(201);
+DECLARE_MSM_GPIO_PINS(202);
+DECLARE_MSM_GPIO_PINS(203);
+DECLARE_MSM_GPIO_PINS(204);
+DECLARE_MSM_GPIO_PINS(205);
+DECLARE_MSM_GPIO_PINS(206);
+DECLARE_MSM_GPIO_PINS(207);
+DECLARE_MSM_GPIO_PINS(208);
+DECLARE_MSM_GPIO_PINS(209);
+
+static const unsigned int ufs_reset_pins[] = { 210 };
+static const unsigned int sdc2_clk_pins[] = { 211 };
+static const unsigned int sdc2_cmd_pins[] = { 212 };
+static const unsigned int sdc2_data_pins[] = { 213 };
+
+enum sm8650_functions {
+       msm_mux_gpio,
+       msm_mux_aoss_cti,
+       msm_mux_atest_char,
+       msm_mux_atest_usb,
+       msm_mux_audio_ext_mclk0,
+       msm_mux_audio_ext_mclk1,
+       msm_mux_audio_ref_clk,
+       msm_mux_cam_aon_mclk2,
+       msm_mux_cam_aon_mclk4,
+       msm_mux_cam_mclk,
+       msm_mux_cci_async_in,
+       msm_mux_cci_i2c_scl,
+       msm_mux_cci_i2c_sda,
+       msm_mux_cci_timer,
+       msm_mux_cmu_rng,
+       msm_mux_coex_uart1_rx,
+       msm_mux_coex_uart1_tx,
+       msm_mux_coex_uart2_rx,
+       msm_mux_coex_uart2_tx,
+       msm_mux_cri_trng,
+       msm_mux_dbg_out_clk,
+       msm_mux_ddr_bist_complete,
+       msm_mux_ddr_bist_fail,
+       msm_mux_ddr_bist_start,
+       msm_mux_ddr_bist_stop,
+       msm_mux_ddr_pxi0,
+       msm_mux_ddr_pxi1,
+       msm_mux_ddr_pxi2,
+       msm_mux_ddr_pxi3,
+       msm_mux_do_not,
+       msm_mux_dp_hot,
+       msm_mux_egpio,
+       msm_mux_gcc_gp1,
+       msm_mux_gcc_gp2,
+       msm_mux_gcc_gp3,
+       msm_mux_gnss_adc0,
+       msm_mux_gnss_adc1,
+       msm_mux_i2chub0_se0,
+       msm_mux_i2chub0_se1,
+       msm_mux_i2chub0_se2,
+       msm_mux_i2chub0_se3,
+       msm_mux_i2chub0_se4,
+       msm_mux_i2chub0_se5,
+       msm_mux_i2chub0_se6,
+       msm_mux_i2chub0_se7,
+       msm_mux_i2chub0_se8,
+       msm_mux_i2chub0_se9,
+       msm_mux_i2s0_data0,
+       msm_mux_i2s0_data1,
+       msm_mux_i2s0_sck,
+       msm_mux_i2s0_ws,
+       msm_mux_i2s1_data0,
+       msm_mux_i2s1_data1,
+       msm_mux_i2s1_sck,
+       msm_mux_i2s1_ws,
+       msm_mux_ibi_i3c,
+       msm_mux_jitter_bist,
+       msm_mux_mdp_vsync,
+       msm_mux_mdp_vsync0_out,
+       msm_mux_mdp_vsync1_out,
+       msm_mux_mdp_vsync2_out,
+       msm_mux_mdp_vsync3_out,
+       msm_mux_mdp_vsync_e,
+       msm_mux_nav_gpio0,
+       msm_mux_nav_gpio1,
+       msm_mux_nav_gpio2,
+       msm_mux_nav_gpio3,
+       msm_mux_pcie0_clk_req_n,
+       msm_mux_pcie1_clk_req_n,
+       msm_mux_phase_flag,
+       msm_mux_pll_bist_sync,
+       msm_mux_pll_clk_aux,
+       msm_mux_prng_rosc0,
+       msm_mux_prng_rosc1,
+       msm_mux_prng_rosc2,
+       msm_mux_prng_rosc3,
+       msm_mux_qdss_cti,
+       msm_mux_qdss_gpio,
+       msm_mux_qlink_big_enable,
+       msm_mux_qlink_big_request,
+       msm_mux_qlink_little_enable,
+       msm_mux_qlink_little_request,
+       msm_mux_qlink_wmss,
+       msm_mux_qspi0,
+       msm_mux_qspi1,
+       msm_mux_qspi2,
+       msm_mux_qspi3,
+       msm_mux_qspi_clk,
+       msm_mux_qspi_cs,
+       msm_mux_qup1_se0,
+       msm_mux_qup1_se1,
+       msm_mux_qup1_se2,
+       msm_mux_qup1_se3,
+       msm_mux_qup1_se4,
+       msm_mux_qup1_se5,
+       msm_mux_qup1_se6,
+       msm_mux_qup1_se7,
+       msm_mux_qup2_se0,
+       msm_mux_qup2_se1,
+       msm_mux_qup2_se2,
+       msm_mux_qup2_se3,
+       msm_mux_qup2_se4,
+       msm_mux_qup2_se5,
+       msm_mux_qup2_se6,
+       msm_mux_qup2_se7,
+       msm_mux_sd_write_protect,
+       msm_mux_sdc40,
+       msm_mux_sdc41,
+       msm_mux_sdc42,
+       msm_mux_sdc43,
+       msm_mux_sdc4_clk,
+       msm_mux_sdc4_cmd,
+       msm_mux_tb_trig_sdc2,
+       msm_mux_tb_trig_sdc4,
+       msm_mux_tgu_ch0_trigout,
+       msm_mux_tgu_ch1_trigout,
+       msm_mux_tgu_ch2_trigout,
+       msm_mux_tgu_ch3_trigout,
+       msm_mux_tmess_prng0,
+       msm_mux_tmess_prng1,
+       msm_mux_tmess_prng2,
+       msm_mux_tmess_prng3,
+       msm_mux_tsense_pwm1,
+       msm_mux_tsense_pwm2,
+       msm_mux_tsense_pwm3,
+       msm_mux_uim0_clk,
+       msm_mux_uim0_data,
+       msm_mux_uim0_present,
+       msm_mux_uim0_reset,
+       msm_mux_uim1_clk,
+       msm_mux_uim1_data,
+       msm_mux_uim1_present,
+       msm_mux_uim1_reset,
+       msm_mux_usb1_hs,
+       msm_mux_usb_phy,
+       msm_mux_vfr_0,
+       msm_mux_vfr_1,
+       msm_mux_vsense_trigger_mirnat,
+       msm_mux__,
+};
+
+static const char *const gpio_groups[] = {
+       "gpio0",   "gpio1",   "gpio2",   "gpio3",
+       "gpio4",   "gpio5",   "gpio6",   "gpio7",
+       "gpio8",   "gpio9",   "gpio10",  "gpio11",
+       "gpio12",  "gpio13",  "gpio14",  "gpio15",
+       "gpio16",  "gpio17",  "gpio18",  "gpio19",
+       "gpio20",  "gpio21",  "gpio22",  "gpio23",
+       "gpio24",  "gpio25",  "gpio26",  "gpio27",
+       "gpio28",  "gpio29",  "gpio30",  "gpio31",
+       "gpio32",  "gpio33",  "gpio34",  "gpio35",
+       "gpio36",  "gpio37",  "gpio38",  "gpio39",
+       "gpio40",  "gpio41",  "gpio42",  "gpio43",
+       "gpio44",  "gpio45",  "gpio46",  "gpio47",
+       "gpio48",  "gpio49",  "gpio50",  "gpio51",
+       "gpio52",  "gpio53",  "gpio54",  "gpio55",
+       "gpio56",  "gpio57",  "gpio58",  "gpio59",
+       "gpio60",  "gpio61",  "gpio62",  "gpio63",
+       "gpio64",  "gpio65",  "gpio66",  "gpio67",
+       "gpio68",  "gpio69",  "gpio70",  "gpio71",
+       "gpio72",  "gpio73",  "gpio74",  "gpio75",
+       "gpio76",  "gpio77",  "gpio78",  "gpio79",
+       "gpio80",  "gpio81",  "gpio82",  "gpio83",
+       "gpio84",  "gpio85",  "gpio86",  "gpio87",
+       "gpio88",  "gpio89",  "gpio90",  "gpio91",
+       "gpio92",  "gpio93",  "gpio94",  "gpio95",
+       "gpio96",  "gpio97",  "gpio98",  "gpio99",
+       "gpio100", "gpio101", "gpio102", "gpio103",
+       "gpio104", "gpio105", "gpio106", "gpio107",
+       "gpio108", "gpio109", "gpio110", "gpio111",
+       "gpio112", "gpio113", "gpio114", "gpio115",
+       "gpio116", "gpio117", "gpio118", "gpio119",
+       "gpio120", "gpio121", "gpio122", "gpio123",
+       "gpio124", "gpio125", "gpio126", "gpio127",
+       "gpio128", "gpio129", "gpio130", "gpio131",
+       "gpio132", "gpio133", "gpio134", "gpio135",
+       "gpio136", "gpio137", "gpio138", "gpio139",
+       "gpio140", "gpio141", "gpio142", "gpio143",
+       "gpio144", "gpio145", "gpio146", "gpio147",
+       "gpio148", "gpio149", "gpio150", "gpio151",
+       "gpio152", "gpio153", "gpio154", "gpio155",
+       "gpio156", "gpio157", "gpio158", "gpio159",
+       "gpio160", "gpio161", "gpio162", "gpio163",
+       "gpio164", "gpio165", "gpio166", "gpio167",
+       "gpio168", "gpio169", "gpio170", "gpio171",
+       "gpio172", "gpio173", "gpio174", "gpio175",
+       "gpio176", "gpio177", "gpio178", "gpio179",
+       "gpio180", "gpio181", "gpio182", "gpio183",
+       "gpio184", "gpio185", "gpio186", "gpio187",
+       "gpio188", "gpio189", "gpio190", "gpio191",
+       "gpio192", "gpio193", "gpio194", "gpio195",
+       "gpio196", "gpio197", "gpio198", "gpio199",
+       "gpio200", "gpio201", "gpio202", "gpio203",
+       "gpio204", "gpio205", "gpio206", "gpio207",
+       "gpio208", "gpio209",
+};
+
+static const char * const egpio_groups[] = {
+       "gpio0",   "gpio1",   "gpio2",   "gpio3",   "gpio4",   "gpio5",
+       "gpio6",   "gpio7",   "gpio165", "gpio166", "gpio167", "gpio168",
+       "gpio169", "gpio170", "gpio171", "gpio172", "gpio173", "gpio174",
+       "gpio175", "gpio176", "gpio177", "gpio178", "gpio179", "gpio180",
+       "gpio181", "gpio182", "gpio183", "gpio184", "gpio185", "gpio186",
+       "gpio187", "gpio188", "gpio189", "gpio190", "gpio191", "gpio192",
+       "gpio193", "gpio194", "gpio195", "gpio196", "gpio197", "gpio198",
+       "gpio199", "gpio200", "gpio201", "gpio202", "gpio203", "gpio204",
+       "gpio205", "gpio206", "gpio207", "gpio208", "gpio209",
+};
+
+static const char * const aoss_cti_groups[] = {
+       "gpio50", "gpio51", "gpio60", "gpio61",
+};
+
+static const char *const atest_char_groups[] = {
+       "gpio130", "gpio131", "gpio132", "gpio133",
+       "gpio137",
+};
+
+static const char *const atest_usb_groups[] = {
+       "gpio71", "gpio72", "gpio74", "gpio130",
+       "gpio131",
+};
+
+static const char *const audio_ext_mclk0_groups[] = {
+       "gpio125",
+};
+
+static const char *const audio_ext_mclk1_groups[] = {
+       "gpio124",
+};
+
+static const char *const audio_ref_clk_groups[] = {
+       "gpio124",
+};
+
+static const char *const cam_aon_mclk2_groups[] = {
+       "gpio102",
+};
+
+static const char *const cam_aon_mclk4_groups[] = {
+       "gpio104",
+};
+
+static const char *const cam_mclk_groups[] = {
+       "gpio100", "gpio101", "gpio103", "gpio105",
+       "gpio106", "gpio108",
+};
+
+static const char *const cci_async_in_groups[] = {
+       "gpio15", "gpio163", "gpio164",
+};
+
+static const char *const cci_i2c_scl_groups[] = {
+       "gpio13",  "gpio114", "gpio116", "gpio118",
+       "gpio120", "gpio153",
+};
+
+static const char *const cci_i2c_sda_groups[] = {
+       "gpio12",  "gpio112", "gpio113", "gpio115",
+       "gpio117", "gpio119",
+};
+
+static const char *const cci_timer_groups[] = {
+       "gpio10", "gpio11", "gpio109", "gpio110",
+       "gpio111",
+};
+
+static const char *const cmu_rng_groups[] = {
+       "gpio95",  "gpio96", "gpio112", "gpio127",
+       "gpio122", "gpio128",
+};
+
+static const char *const coex_uart1_rx_groups[] = {
+       "gpio148",
+};
+
+static const char *const coex_uart1_tx_groups[] = {
+       "gpio149",
+};
+
+static const char *const coex_uart2_rx_groups[] = {
+       "gpio150",
+};
+
+static const char *const coex_uart2_tx_groups[] = {
+       "gpio151",
+};
+
+static const char *const cri_trng_groups[] = {
+       "gpio187",
+};
+
+static const char *const dbg_out_clk_groups[] = {
+       "gpio92",
+};
+
+static const char *const ddr_bist_complete_groups[] = {
+       "gpio44",
+};
+
+static const char *const ddr_bist_fail_groups[] = {
+       "gpio40",
+};
+
+static const char *const ddr_bist_start_groups[] = {
+       "gpio41",
+};
+
+static const char *const ddr_bist_stop_groups[] = {
+       "gpio45",
+};
+
+static const char *const ddr_pxi0_groups[] = {
+       "gpio75", "gpio76",
+};
+
+static const char *const ddr_pxi1_groups[] = {
+       "gpio44", "gpio45",
+};
+
+static const char *const ddr_pxi2_groups[] = {
+       "gpio51", "gpio62",
+};
+
+static const char *const ddr_pxi3_groups[] = {
+       "gpio46", "gpio47",
+};
+
+static const char *const do_not_groups[] = {
+       "gpio36", "gpio37", "gpio38", "gpio39",
+       "gpio134", "gpio135", "gpio136",
+};
+
+static const char *const dp_hot_groups[] = {
+       "gpio47",
+};
+
+static const char *const gcc_gp1_groups[] = {
+       "gpio86", "gpio134",
+};
+
+static const char *const gcc_gp2_groups[] = {
+       "gpio87", "gpio135",
+};
+
+static const char *const gcc_gp3_groups[] = {
+       "gpio88", "gpio136",
+};
+
+static const char *const gnss_adc0_groups[] = {
+       "gpio89", "gpio91",
+};
+
+static const char *const gnss_adc1_groups[] = {
+       "gpio90", "gpio92",
+};
+
+static const char *const i2chub0_se0_groups[] = {
+       "gpio64", "gpio65",
+};
+
+static const char *const i2chub0_se1_groups[] = {
+       "gpio66", "gpio67",
+};
+
+static const char *const i2chub0_se2_groups[] = {
+       "gpio68", "gpio69",
+};
+
+static const char *const i2chub0_se3_groups[] = {
+       "gpio70", "gpio71",
+};
+
+static const char *const i2chub0_se4_groups[] = {
+       "gpio72", "gpio73",
+};
+
+static const char *const i2chub0_se5_groups[] = {
+       "gpio74", "gpio75",
+};
+
+static const char *const i2chub0_se6_groups[] = {
+       "gpio76", "gpio77",
+};
+
+static const char *const i2chub0_se7_groups[] = {
+       "gpio78", "gpio79",
+};
+
+static const char *const i2chub0_se8_groups[] = {
+       "gpio206", "gpio207",
+};
+
+static const char *const i2chub0_se9_groups[] = {
+       "gpio80", "gpio81",
+};
+
+static const char *const i2s0_data0_groups[] = {
+       "gpio127",
+};
+
+static const char *const i2s0_data1_groups[] = {
+       "gpio128",
+};
+
+static const char *const i2s0_sck_groups[] = {
+       "gpio126",
+};
+
+static const char *const i2s0_ws_groups[] = {
+       "gpio129",
+};
+
+static const char *const i2s1_data0_groups[] = {
+       "gpio122",
+};
+
+static const char *const i2s1_data1_groups[] = {
+       "gpio124",
+};
+
+static const char *const i2s1_sck_groups[] = {
+       "gpio121",
+};
+
+static const char *const i2s1_ws_groups[] = {
+       "gpio123",
+};
+
+static const char *const ibi_i3c_groups[] = {
+       "gpio0",  "gpio1",  "gpio4",  "gpio5",
+       "gpio8",  "gpio9",  "gpio12", "gpio13",
+       "gpio32", "gpio33", "gpio36", "gpio37",
+       "gpio48", "gpio49", "gpio56", "gpio57",
+};
+
+static const char *const jitter_bist_groups[] = {
+       "gpio73",
+};
+
+static const char *const mdp_vsync_groups[] = {
+       "gpio86", "gpio87", "gpio133", "gpio137",
+};
+
+static const char *const mdp_vsync0_out_groups[] = {
+       "gpio86",
+};
+
+static const char *const mdp_vsync1_out_groups[] = {
+       "gpio86",
+};
+
+static const char *const mdp_vsync2_out_groups[] = {
+       "gpio87",
+};
+
+static const char *const mdp_vsync3_out_groups[] = {
+       "gpio87",
+};
+
+static const char *const mdp_vsync_e_groups[] = {
+       "gpio88",
+};
+
+static const char *const nav_gpio0_groups[] = {
+       "gpio154",
+};
+
+static const char *const nav_gpio1_groups[] = {
+       "gpio155",
+};
+
+static const char *const nav_gpio2_groups[] = {
+       "gpio152",
+};
+
+static const char *const nav_gpio3_groups[] = {
+       "gpio154",
+};
+
+static const char *const pcie0_clk_req_n_groups[] = {
+       "gpio95",
+};
+
+static const char *const pcie1_clk_req_n_groups[] = {
+       "gpio98",
+};
+
+static const char *const phase_flag_groups[] = {
+       "gpio0",   "gpio1",   "gpio3",   "gpio4",
+       "gpio5",   "gpio7",   "gpio8",   "gpio9",
+       "gpio11",  "gpio12",  "gpio13",  "gpio15",
+       "gpio16",  "gpio17",  "gpio19",  "gpio94",
+       "gpio95",  "gpio96",  "gpio109", "gpio111",
+       "gpio112", "gpio113", "gpio114", "gpio115",
+       "gpio116", "gpio117", "gpio118", "gpio119",
+       "gpio120", "gpio153", "gpio163", "gpio164",
+};
+
+static const char *const pll_bist_sync_groups[] = {
+       "gpio68",
+};
+
+static const char *const pll_clk_aux_groups[] = {
+       "gpio106",
+};
+
+static const char *const prng_rosc0_groups[] = {
+       "gpio186",
+};
+
+static const char *const prng_rosc1_groups[] = {
+       "gpio183",
+};
+
+static const char *const prng_rosc2_groups[] = {
+       "gpio182",
+};
+
+static const char *const prng_rosc3_groups[] = {
+       "gpio181",
+};
+
+static const char *const qdss_cti_groups[] = {
+       "gpio27", "gpio31", "gpio78",  "gpio79",
+       "gpio82", "gpio83", "gpio159", "gpio162",
+};
+
+static const char *const qdss_gpio_groups[] = {
+       "gpio3",   "gpio7",   "gpio8",   "gpio13",
+       "gpio15",  "gpio100", "gpio101", "gpio102",
+       "gpio103", "gpio104", "gpio105", "gpio113",
+       "gpio114", "gpio115", "gpio116", "gpio117",
+       "gpio118", "gpio140", "gpio141", "gpio142",
+       "gpio143", "gpio144", "gpio145", "gpio146",
+       "gpio147", "gpio148", "gpio149", "gpio150",
+       "gpio151", "gpio152", "gpio153", "gpio154",
+       "gpio155", "gpio156", "gpio157", "gpio158",
+};
+
+static const char *const qlink_big_enable_groups[] = {
+       "gpio160",
+};
+
+static const char *const qlink_big_request_groups[] = {
+       "gpio159",
+};
+
+static const char *const qlink_little_enable_groups[] = {
+       "gpio157",
+};
+
+static const char *const qlink_little_request_groups[] = {
+       "gpio156",
+};
+
+static const char *const qlink_wmss_groups[] = {
+       "gpio158",
+};
+
+static const char *const qspi0_groups[] = {
+       "gpio134",
+};
+
+static const char *const qspi1_groups[] = {
+       "gpio136",
+};
+
+static const char *const qspi2_groups[] = {
+       "gpio56",
+};
+
+static const char *const qspi3_groups[] = {
+       "gpio57",
+};
+
+static const char *const qspi_clk_groups[] = {
+       "gpio135",
+};
+
+static const char *const qspi_cs_groups[] = {
+       "gpio58", "gpio59",
+};
+
+static const char *const qup1_se0_groups[] = {
+       "gpio32", "gpio33", "gpio34", "gpio35",
+};
+
+static const char *const qup1_se1_groups[] = {
+       "gpio36", "gpio37", "gpio38", "gpio39",
+};
+
+static const char *const qup1_se2_groups[] = {
+       "gpio40", "gpio41", "gpio42", "gpio43",
+       "gpio44", "gpio45", "gpio46",
+};
+
+static const char *const qup1_se3_groups[] = {
+       "gpio44", "gpio45", "gpio46", "gpio47",
+};
+
+static const char *const qup1_se4_groups[] = {
+       "gpio48", "gpio49", "gpio50", "gpio51",
+};
+
+static const char *const qup1_se5_groups[] = {
+       "gpio52", "gpio53", "gpio54", "gpio55",
+};
+
+static const char *const qup1_se6_groups[] = {
+       "gpio56", "gpio57", "gpio58", "gpio59",
+};
+
+static const char *const qup1_se7_groups[] = {
+       "gpio60", "gpio61", "gpio62", "gpio63",
+};
+
+static const char *const qup2_se0_groups[] = {
+       "gpio0", "gpio1", "gpio2", "gpio3",
+};
+
+static const char *const qup2_se1_groups[] = {
+       "gpio4", "gpio5", "gpio6", "gpio7",
+};
+
+static const char *const qup2_se2_groups[] = {
+       "gpio8",  "gpio9",  "gpio10", "gpio11",
+       "gpio13", "gpio15", "gpio12",
+};
+
+static const char *const qup2_se3_groups[] = {
+       "gpio12", "gpio13", "gpio14", "gpio15",
+};
+
+static const char *const qup2_se4_groups[] = {
+       "gpio16", "gpio17", "gpio18", "gpio19",
+};
+
+static const char *const qup2_se5_groups[] = {
+       "gpio20", "gpio21", "gpio22", "gpio23",
+       "gpio23",
+};
+
+static const char *const qup2_se6_groups[] = {
+       "gpio24", "gpio25", "gpio26", "gpio27",
+};
+
+static const char *const qup2_se7_groups[] = {
+       "gpio28", "gpio29", "gpio30", "gpio31",
+};
+
+static const char *const sd_write_protect_groups[] = {
+       "gpio93",
+};
+
+static const char *const sdc40_groups[] = {
+       "gpio134",
+};
+
+static const char *const sdc41_groups[] = {
+       "gpio136",
+};
+
+static const char *const sdc42_groups[] = {
+       "gpio56",
+};
+
+static const char *const sdc43_groups[] = {
+       "gpio57",
+};
+
+static const char *const sdc4_clk_groups[] = {
+       "gpio135",
+};
+
+static const char *const sdc4_cmd_groups[] = {
+       "gpio59",
+};
+
+static const char *const tb_trig_sdc2_groups[] = {
+       "gpio8",
+};
+
+static const char *const tb_trig_sdc4_groups[] = {
+       "gpio58",
+};
+
+static const char *const tgu_ch0_trigout_groups[] = {
+       "gpio8",
+};
+
+static const char *const tgu_ch1_trigout_groups[] = {
+       "gpio9",
+};
+
+static const char *const tgu_ch2_trigout_groups[] = {
+       "gpio10",
+};
+
+static const char *const tgu_ch3_trigout_groups[] = {
+       "gpio11",
+};
+
+static const char *const tmess_prng0_groups[] = {
+       "gpio94",
+};
+
+static const char *const tmess_prng1_groups[] = {
+       "gpio95",
+};
+
+static const char *const tmess_prng2_groups[] = {
+       "gpio96",
+};
+
+static const char *const tmess_prng3_groups[] = {
+       "gpio109",
+};
+
+static const char *const tsense_pwm1_groups[] = {
+       "gpio58",
+};
+
+static const char *const tsense_pwm2_groups[] = {
+       "gpio58",
+};
+
+static const char *const tsense_pwm3_groups[] = {
+       "gpio58",
+};
+
+static const char *const uim0_clk_groups[] = {
+       "gpio131",
+};
+
+static const char *const uim0_data_groups[] = {
+       "gpio130",
+};
+
+static const char *const uim0_present_groups[] = {
+       "gpio47",
+};
+
+static const char *const uim0_reset_groups[] = {
+       "gpio132",
+};
+
+static const char *const uim1_clk_groups[] = {
+       "gpio135",
+};
+
+static const char *const uim1_data_groups[] = {
+       "gpio134",
+};
+
+static const char *const uim1_present_groups[] = {
+       "gpio76",
+};
+
+static const char *const uim1_reset_groups[] = {
+       "gpio136",
+};
+
+static const char *const usb1_hs_groups[] = {
+       "gpio89",
+};
+
+static const char *const usb_phy_groups[] = {
+       "gpio29", "gpio54",
+};
+
+static const char *const vfr_0_groups[] = {
+       "gpio150",
+};
+
+static const char *const vfr_1_groups[] = {
+       "gpio155",
+};
+
+static const char *const vsense_trigger_mirnat_groups[] = {
+       "gpio60",
+};
+
+static const struct pinfunction sm8650_functions[] = {
+       MSM_PIN_FUNCTION(gpio),
+       MSM_PIN_FUNCTION(aoss_cti),
+       MSM_PIN_FUNCTION(atest_char),
+       MSM_PIN_FUNCTION(atest_usb),
+       MSM_PIN_FUNCTION(audio_ext_mclk0),
+       MSM_PIN_FUNCTION(audio_ext_mclk1),
+       MSM_PIN_FUNCTION(audio_ref_clk),
+       MSM_PIN_FUNCTION(cam_aon_mclk2),
+       MSM_PIN_FUNCTION(cam_aon_mclk4),
+       MSM_PIN_FUNCTION(cam_mclk),
+       MSM_PIN_FUNCTION(cci_async_in),
+       MSM_PIN_FUNCTION(cci_i2c_scl),
+       MSM_PIN_FUNCTION(cci_i2c_sda),
+       MSM_PIN_FUNCTION(cci_timer),
+       MSM_PIN_FUNCTION(cmu_rng),
+       MSM_PIN_FUNCTION(coex_uart1_rx),
+       MSM_PIN_FUNCTION(coex_uart1_tx),
+       MSM_PIN_FUNCTION(coex_uart2_rx),
+       MSM_PIN_FUNCTION(coex_uart2_tx),
+       MSM_PIN_FUNCTION(cri_trng),
+       MSM_PIN_FUNCTION(dbg_out_clk),
+       MSM_PIN_FUNCTION(ddr_bist_complete),
+       MSM_PIN_FUNCTION(ddr_bist_fail),
+       MSM_PIN_FUNCTION(ddr_bist_start),
+       MSM_PIN_FUNCTION(ddr_bist_stop),
+       MSM_PIN_FUNCTION(ddr_pxi0),
+       MSM_PIN_FUNCTION(ddr_pxi1),
+       MSM_PIN_FUNCTION(ddr_pxi2),
+       MSM_PIN_FUNCTION(ddr_pxi3),
+       MSM_PIN_FUNCTION(do_not),
+       MSM_PIN_FUNCTION(dp_hot),
+       MSM_PIN_FUNCTION(egpio),
+       MSM_PIN_FUNCTION(gcc_gp1),
+       MSM_PIN_FUNCTION(gcc_gp2),
+       MSM_PIN_FUNCTION(gcc_gp3),
+       MSM_PIN_FUNCTION(gnss_adc0),
+       MSM_PIN_FUNCTION(gnss_adc1),
+       MSM_PIN_FUNCTION(i2chub0_se0),
+       MSM_PIN_FUNCTION(i2chub0_se1),
+       MSM_PIN_FUNCTION(i2chub0_se2),
+       MSM_PIN_FUNCTION(i2chub0_se3),
+       MSM_PIN_FUNCTION(i2chub0_se4),
+       MSM_PIN_FUNCTION(i2chub0_se5),
+       MSM_PIN_FUNCTION(i2chub0_se6),
+       MSM_PIN_FUNCTION(i2chub0_se7),
+       MSM_PIN_FUNCTION(i2chub0_se8),
+       MSM_PIN_FUNCTION(i2chub0_se9),
+       MSM_PIN_FUNCTION(i2s0_data0),
+       MSM_PIN_FUNCTION(i2s0_data1),
+       MSM_PIN_FUNCTION(i2s0_sck),
+       MSM_PIN_FUNCTION(i2s0_ws),
+       MSM_PIN_FUNCTION(i2s1_data0),
+       MSM_PIN_FUNCTION(i2s1_data1),
+       MSM_PIN_FUNCTION(i2s1_sck),
+       MSM_PIN_FUNCTION(i2s1_ws),
+       MSM_PIN_FUNCTION(ibi_i3c),
+       MSM_PIN_FUNCTION(jitter_bist),
+       MSM_PIN_FUNCTION(mdp_vsync),
+       MSM_PIN_FUNCTION(mdp_vsync0_out),
+       MSM_PIN_FUNCTION(mdp_vsync1_out),
+       MSM_PIN_FUNCTION(mdp_vsync2_out),
+       MSM_PIN_FUNCTION(mdp_vsync3_out),
+       MSM_PIN_FUNCTION(mdp_vsync_e),
+       MSM_PIN_FUNCTION(nav_gpio0),
+       MSM_PIN_FUNCTION(nav_gpio1),
+       MSM_PIN_FUNCTION(nav_gpio2),
+       MSM_PIN_FUNCTION(nav_gpio3),
+       MSM_PIN_FUNCTION(pcie0_clk_req_n),
+       MSM_PIN_FUNCTION(pcie1_clk_req_n),
+       MSM_PIN_FUNCTION(phase_flag),
+       MSM_PIN_FUNCTION(pll_bist_sync),
+       MSM_PIN_FUNCTION(pll_clk_aux),
+       MSM_PIN_FUNCTION(prng_rosc0),
+       MSM_PIN_FUNCTION(prng_rosc1),
+       MSM_PIN_FUNCTION(prng_rosc2),
+       MSM_PIN_FUNCTION(prng_rosc3),
+       MSM_PIN_FUNCTION(qdss_cti),
+       MSM_PIN_FUNCTION(qdss_gpio),
+       MSM_PIN_FUNCTION(qlink_big_enable),
+       MSM_PIN_FUNCTION(qlink_big_request),
+       MSM_PIN_FUNCTION(qlink_little_enable),
+       MSM_PIN_FUNCTION(qlink_little_request),
+       MSM_PIN_FUNCTION(qlink_wmss),
+       MSM_PIN_FUNCTION(qspi0),
+       MSM_PIN_FUNCTION(qspi1),
+       MSM_PIN_FUNCTION(qspi2),
+       MSM_PIN_FUNCTION(qspi3),
+       MSM_PIN_FUNCTION(qspi_clk),
+       MSM_PIN_FUNCTION(qspi_cs),
+       MSM_PIN_FUNCTION(qup1_se0),
+       MSM_PIN_FUNCTION(qup1_se1),
+       MSM_PIN_FUNCTION(qup1_se2),
+       MSM_PIN_FUNCTION(qup1_se3),
+       MSM_PIN_FUNCTION(qup1_se4),
+       MSM_PIN_FUNCTION(qup1_se5),
+       MSM_PIN_FUNCTION(qup1_se6),
+       MSM_PIN_FUNCTION(qup1_se7),
+       MSM_PIN_FUNCTION(qup2_se0),
+       MSM_PIN_FUNCTION(qup2_se1),
+       MSM_PIN_FUNCTION(qup2_se2),
+       MSM_PIN_FUNCTION(qup2_se3),
+       MSM_PIN_FUNCTION(qup2_se4),
+       MSM_PIN_FUNCTION(qup2_se5),
+       MSM_PIN_FUNCTION(qup2_se6),
+       MSM_PIN_FUNCTION(qup2_se7),
+       MSM_PIN_FUNCTION(sd_write_protect),
+       MSM_PIN_FUNCTION(sdc40),
+       MSM_PIN_FUNCTION(sdc41),
+       MSM_PIN_FUNCTION(sdc42),
+       MSM_PIN_FUNCTION(sdc43),
+       MSM_PIN_FUNCTION(sdc4_clk),
+       MSM_PIN_FUNCTION(sdc4_cmd),
+       MSM_PIN_FUNCTION(tb_trig_sdc2),
+       MSM_PIN_FUNCTION(tb_trig_sdc4),
+       MSM_PIN_FUNCTION(tgu_ch0_trigout),
+       MSM_PIN_FUNCTION(tgu_ch1_trigout),
+       MSM_PIN_FUNCTION(tgu_ch2_trigout),
+       MSM_PIN_FUNCTION(tgu_ch3_trigout),
+       MSM_PIN_FUNCTION(tmess_prng0),
+       MSM_PIN_FUNCTION(tmess_prng1),
+       MSM_PIN_FUNCTION(tmess_prng2),
+       MSM_PIN_FUNCTION(tmess_prng3),
+       MSM_PIN_FUNCTION(tsense_pwm1),
+       MSM_PIN_FUNCTION(tsense_pwm2),
+       MSM_PIN_FUNCTION(tsense_pwm3),
+       MSM_PIN_FUNCTION(uim0_clk),
+       MSM_PIN_FUNCTION(uim0_data),
+       MSM_PIN_FUNCTION(uim0_present),
+       MSM_PIN_FUNCTION(uim0_reset),
+       MSM_PIN_FUNCTION(uim1_clk),
+       MSM_PIN_FUNCTION(uim1_data),
+       MSM_PIN_FUNCTION(uim1_present),
+       MSM_PIN_FUNCTION(uim1_reset),
+       MSM_PIN_FUNCTION(usb1_hs),
+       MSM_PIN_FUNCTION(usb_phy),
+       MSM_PIN_FUNCTION(vfr_0),
+       MSM_PIN_FUNCTION(vfr_1),
+       MSM_PIN_FUNCTION(vsense_trigger_mirnat),
+};
+
+/*
+ * Every pin is maintained as a single group, and missing or non-existing pin
+ * would be maintained as dummy group to synchronize pin group index with
+ * pin descriptor registered with pinctrl core.
+ * Clients would not be able to request these dummy pin groups.
+ */
+static const struct msm_pingroup sm8650_groups[] = {
+       [0] = PINGROUP(0, qup2_se0, ibi_i3c, phase_flag, _, _, _, _, _, _, egpio),
+       [1] = PINGROUP(1, qup2_se0, ibi_i3c, phase_flag, _, _, _, _, _, _, egpio),
+       [2] = PINGROUP(2, qup2_se0, _, _, _, _, _, _, _, _, egpio),
+       [3] = PINGROUP(3, qup2_se0, phase_flag, _, qdss_gpio, _, _, _, _, _, egpio),
+       [4] = PINGROUP(4, qup2_se1, ibi_i3c, phase_flag, _, _, _, _, _, _, egpio),
+       [5] = PINGROUP(5, qup2_se1, ibi_i3c, phase_flag, _, _, _, _, _, _, egpio),
+       [6] = PINGROUP(6, qup2_se1, _, _, _, _, _, _, _, _, egpio),
+       [7] = PINGROUP(7, qup2_se1, phase_flag, _, qdss_gpio, _, _, _, _, _, egpio),
+       [8] = PINGROUP(8, qup2_se2, ibi_i3c, tb_trig_sdc2, phase_flag, tgu_ch0_trigout, _, qdss_gpio, _, _, _),
+       [9] = PINGROUP(9, qup2_se2, ibi_i3c, phase_flag, tgu_ch1_trigout, _, _, _, _, _, _),
+       [10] = PINGROUP(10, qup2_se2, cci_timer, tgu_ch2_trigout, _, _, _, _, _, _, _),
+       [11] = PINGROUP(11, qup2_se2, cci_timer, phase_flag, tgu_ch3_trigout, _, _, _, _, _, _),
+       [12] = PINGROUP(12, qup2_se3, cci_i2c_sda, ibi_i3c, qup2_se2, phase_flag, _, _, _, _, _),
+       [13] = PINGROUP(13, qup2_se3, cci_i2c_scl, ibi_i3c, qup2_se2, phase_flag, _, qdss_gpio, _, _, _),
+       [14] = PINGROUP(14, qup2_se3, _, _, _, _, _, _, _, _, _),
+       [15] = PINGROUP(15, qup2_se3, cci_async_in, qup2_se2, phase_flag, _, qdss_gpio, _, _, _, _),
+       [16] = PINGROUP(16, qup2_se4, phase_flag, _, _, _, _, _, _, _, _),
+       [17] = PINGROUP(17, qup2_se4, phase_flag, _, _, _, _, _, _, _, _),
+       [18] = PINGROUP(18, qup2_se4, _, _, _, _, _, _, _, _, _),
+       [19] = PINGROUP(19, qup2_se4, phase_flag, _, _, _, _, _, _, _, _),
+       [20] = PINGROUP(20, qup2_se5, _, _, _, _, _, _, _, _, _),
+       [21] = PINGROUP(21, qup2_se5, _, _, _, _, _, _, _, _, _),
+       [22] = PINGROUP(22, qup2_se5, _, _, _, _, _, _, _, _, _),
+       [23] = PINGROUP(23, qup2_se5, qup2_se5, _, _, _, _, _, _, _, _),
+       [24] = PINGROUP(24, qup2_se6, _, _, _, _, _, _, _, _, _),
+       [25] = PINGROUP(25, qup2_se6, _, _, _, _, _, _, _, _, _),
+       [26] = PINGROUP(26, qup2_se6, _, _, _, _, _, _, _, _, _),
+       [27] = PINGROUP(27, qup2_se6, qdss_cti, _, _, _, _, _, _, _, _),
+       [28] = PINGROUP(28, qup2_se7, _, _, _, _, _, _, _, _, _),
+       [29] = PINGROUP(29, qup2_se7, usb_phy, _, _, _, _, _, _, _, _),
+       [30] = PINGROUP(30, qup2_se7, _, _, _, _, _, _, _, _, _),
+       [31] = PINGROUP(31, qup2_se7, qdss_cti, _, _, _, _, _, _, _, _),
+       [32] = PINGROUP(32, qup1_se0, ibi_i3c, _, _, _, _, _, _, _, _),
+       [33] = PINGROUP(33, qup1_se0, ibi_i3c, _, _, _, _, _, _, _, _),
+       [34] = PINGROUP(34, qup1_se0, _, _, _, _, _, _, _, _, _),
+       [35] = PINGROUP(35, qup1_se0, _, _, _, _, _, _, _, _, _),
+       [36] = PINGROUP(36, qup1_se1, do_not, ibi_i3c, _, _, _, _, _, _, _),
+       [37] = PINGROUP(37, qup1_se1, do_not, ibi_i3c, _, _, _, _, _, _, _),
+       [38] = PINGROUP(38, qup1_se1, do_not, _, _, _, _, _, _, _, _),
+       [39] = PINGROUP(39, qup1_se1, do_not, _, _, _, _, _, _, _, _),
+       [40] = PINGROUP(40, qup1_se2, ddr_bist_fail, _, _, _, _, _, _, _, _),
+       [41] = PINGROUP(41, qup1_se2, ddr_bist_start, _, _, _, _, _, _, _, _),
+       [42] = PINGROUP(42, qup1_se2, _, _, _, _, _, _, _, _, _),
+       [43] = PINGROUP(43, qup1_se2, _, _, _, _, _, _, _, _, _),
+       [44] = PINGROUP(44, qup1_se3, qup1_se2, ddr_bist_complete, ddr_pxi1, _, _, _, _, _, _),
+       [45] = PINGROUP(45, qup1_se3, qup1_se2, ddr_bist_stop, ddr_pxi1, _, _, _, _, _, _),
+       [46] = PINGROUP(46, qup1_se3, qup1_se2, ddr_pxi3, _, _, _, _, _, _, _),
+       [47] = PINGROUP(47, qup1_se3, uim0_present, dp_hot, ddr_pxi3, _, _, _, _, _, _),
+       [48] = PINGROUP(48, qup1_se4, ibi_i3c, _, _, _, _, _, _, _, _),
+       [49] = PINGROUP(49, qup1_se4, ibi_i3c, _, _, _, _, _, _, _, _),
+       [50] = PINGROUP(50, qup1_se4, aoss_cti, _, _, _, _, _, _, _, _),
+       [51] = PINGROUP(51, qup1_se4, aoss_cti, ddr_pxi2, _, _, _, _, _, _, _),
+       [52] = PINGROUP(52, qup1_se5, _, _, _, _, _, _, _, _, _),
+       [53] = PINGROUP(53, qup1_se5, _, _, _, _, _, _, _, _, _),
+       [54] = PINGROUP(54, qup1_se5, usb_phy, _, _, _, _, _, _, _, _),
+       [55] = PINGROUP(55, qup1_se5, _, _, _, _, _, _, _, _, _),
+       [56] = PINGROUP(56, qup1_se6, ibi_i3c, qspi2, sdc42, _, _, _, _, _, _),
+       [57] = PINGROUP(57, qup1_se6, ibi_i3c, qspi3, sdc43, _, _, _, _, _, _),
+       [58] = PINGROUP(58, qup1_se6, qspi_cs, tb_trig_sdc4, tsense_pwm1, tsense_pwm2, tsense_pwm3, _, _, _, _),
+       [59] = PINGROUP(59, qup1_se6, _, qspi_cs, sdc4_cmd, _, _, _, _, _, _),
+       [60] = PINGROUP(60, qup1_se7, aoss_cti, vsense_trigger_mirnat, _, _, _, _, _, _, _),
+       [61] = PINGROUP(61, qup1_se7, aoss_cti, _, _, _, _, _, _, _, _),
+       [62] = PINGROUP(62, qup1_se7, ddr_pxi2, _, _, _, _, _, _, _, _),
+       [63] = PINGROUP(63, qup1_se7, _, _, _, _, _, _, _, _, _),
+       [64] = PINGROUP(64, i2chub0_se0, _, _, _, _, _, _, _, _, _),
+       [65] = PINGROUP(65, i2chub0_se0, _, _, _, _, _, _, _, _, _),
+       [66] = PINGROUP(66, i2chub0_se1, _, _, _, _, _, _, _, _, _),
+       [67] = PINGROUP(67, i2chub0_se1, _, _, _, _, _, _, _, _, _),
+       [68] = PINGROUP(68, i2chub0_se2, pll_bist_sync, _, _, _, _, _, _, _, _),
+       [69] = PINGROUP(69, i2chub0_se2, _, _, _, _, _, _, _, _, _),
+       [70] = PINGROUP(70, i2chub0_se3, _, _, _, _, _, _, _, _, _),
+       [71] = PINGROUP(71, i2chub0_se3, _, atest_usb, _, _, _, _, _, _, _),
+       [72] = PINGROUP(72, i2chub0_se4, _, atest_usb, _, _, _, _, _, _, _),
+       [73] = PINGROUP(73, i2chub0_se4, jitter_bist, _, _, _, _, _, _, _, _),
+       [74] = PINGROUP(74, i2chub0_se5, atest_usb, _, _, _, _, _, _, _, _),
+       [75] = PINGROUP(75, i2chub0_se5, ddr_pxi0, _, _, _, _, _, _, _, _),
+       [76] = PINGROUP(76, i2chub0_se6, ddr_pxi0, uim1_present, _, _, _, _, _, _, _),
+       [77] = PINGROUP(77, i2chub0_se6, _, _, _, _, _, _, _, _, _),
+       [78] = PINGROUP(78, i2chub0_se7, qdss_cti, _, _, _, _, _, _, _, _),
+       [79] = PINGROUP(79, i2chub0_se7, qdss_cti, _, _, _, _, _, _, _, _),
+       [80] = PINGROUP(80, i2chub0_se9, _, _, _, _, _, _, _, _, _),
+       [81] = PINGROUP(81, i2chub0_se9, _, _, _, _, _, _, _, _, _),
+       [82] = PINGROUP(82, qdss_cti, _, _, _, _, _, _, _, _, _),
+       [83] = PINGROUP(83, qdss_cti, _, _, _, _, _, _, _, _, _),
+       [84] = PINGROUP(84, _, _, _, _, _, _, _, _, _, _),
+       [85] = PINGROUP(85, _, _, _, _, _, _, _, _, _, _),
+       [86] = PINGROUP(86, mdp_vsync, mdp_vsync0_out, mdp_vsync1_out, gcc_gp1, _, _, _, _, _, _),
+       [87] = PINGROUP(87, mdp_vsync, mdp_vsync2_out, mdp_vsync3_out, gcc_gp2, _, _, _, _, _, _),
+       [88] = PINGROUP(88, mdp_vsync_e, gcc_gp3, _, _, _, _, _, _, _, _),
+       [89] = PINGROUP(89, usb1_hs, gnss_adc0, _, _, _, _, _, _, _, _),
+       [90] = PINGROUP(90, gnss_adc1, _, _, _, _, _, _, _, _, _),
+       [91] = PINGROUP(91, _, gnss_adc0, _, _, _, _, _, _, _, _),
+       [92] = PINGROUP(92, dbg_out_clk, gnss_adc1, _, _, _, _, _, _, _, _),
+       [93] = PINGROUP(93, sd_write_protect, _, _, _, _, _, _, _, _, _),
+       [94] = PINGROUP(94, cmu_rng, phase_flag, tmess_prng0, _, _, _, _, _, _, _),
+       [95] = PINGROUP(95, pcie0_clk_req_n, cmu_rng, phase_flag, tmess_prng1, _, _, _, _, _, _),
+       [96] = PINGROUP(96, cmu_rng, phase_flag, tmess_prng2, _, _, _, _, _, _, _),
+       [97] = PINGROUP(97, _, _, _, _, _, _, _, _, _, _),
+       [98] = PINGROUP(98, pcie1_clk_req_n, _, _, _, _, _, _, _, _, _),
+       [99] = PINGROUP(99, _, _, _, _, _, _, _, _, _, _),
+       [100] = PINGROUP(100, cam_mclk, qdss_gpio, _, _, _, _, _, _, _, _),
+       [101] = PINGROUP(101, cam_mclk, qdss_gpio, _, _, _, _, _, _, _, _),
+       [102] = PINGROUP(102, cam_aon_mclk2, qdss_gpio, _, _, _, _, _, _, _, _),
+       [103] = PINGROUP(103, cam_mclk, qdss_gpio, _, _, _, _, _, _, _, _),
+       [104] = PINGROUP(104, cam_aon_mclk4, qdss_gpio, _, _, _, _, _, _, _, _),
+       [105] = PINGROUP(105, cam_mclk, qdss_gpio, _, _, _, _, _, _, _, _),
+       [106] = PINGROUP(106, cam_mclk, pll_clk_aux, _, _, _, _, _, _, _, _),
+       [107] = PINGROUP(107, _, _, _, _, _, _, _, _, _, _),
+       [108] = PINGROUP(108, cam_mclk, _, _, _, _, _, _, _, _, _),
+       [109] = PINGROUP(109, cci_timer, phase_flag, tmess_prng3, _, _, _, _, _, _, _),
+       [110] = PINGROUP(110, cci_timer, _, _, _, _, _, _, _, _, _),
+       [111] = PINGROUP(111, cci_timer, phase_flag, _, _, _, _, _, _, _, _),
+       [112] = PINGROUP(112, cci_i2c_sda, cmu_rng, phase_flag, _, _, _, _, _, _, _),
+       [113] = PINGROUP(113, cci_i2c_sda, phase_flag, _, qdss_gpio, _, _, _, _, _, _),
+       [114] = PINGROUP(114, cci_i2c_scl, phase_flag, _, qdss_gpio, _, _, _, _, _, _),
+       [115] = PINGROUP(115, cci_i2c_sda, phase_flag, _, qdss_gpio, _, _, _, _, _, _),
+       [116] = PINGROUP(116, cci_i2c_scl, phase_flag, _, qdss_gpio, _, _, _, _, _, _),
+       [117] = PINGROUP(117, cci_i2c_sda, phase_flag, _, qdss_gpio, _, _, _, _, _, _),
+       [118] = PINGROUP(118, cci_i2c_scl, phase_flag, _, qdss_gpio, _, _, _, _, _, _),
+       [119] = PINGROUP(119, cci_i2c_sda, phase_flag, _, _, _, _, _, _, _, _),
+       [120] = PINGROUP(120, cci_i2c_scl, phase_flag, _, _, _, _, _, _, _, _),
+       [121] = PINGROUP(121, i2s1_sck, _, _, _, _, _, _, _, _, _),
+       [122] = PINGROUP(122, i2s1_data0, cmu_rng, _, _, _, _, _, _, _, _),
+       [123] = PINGROUP(123, i2s1_ws, _, _, _, _, _, _, _, _, _),
+       [124] = PINGROUP(124, i2s1_data1, audio_ext_mclk1, audio_ref_clk, _, _, _, _, _, _, _),
+       [125] = PINGROUP(125, audio_ext_mclk0, _, _, _, _, _, _, _, _, _),
+       [126] = PINGROUP(126, i2s0_sck, _, _, _, _, _, _, _, _, _),
+       [127] = PINGROUP(127, i2s0_data0, cmu_rng, _, _, _, _, _, _, _, _),
+       [128] = PINGROUP(128, i2s0_data1, cmu_rng, _, _, _, _, _, _, _, _),
+       [129] = PINGROUP(129, i2s0_ws, cmu_rng, _, _, _, _, _, _, _, _),
+       [130] = PINGROUP(130, uim0_data, atest_usb, atest_char, _, _, _, _, _, _, _),
+       [131] = PINGROUP(131, uim0_clk, atest_usb, atest_char, _, _, _, _, _, _, _),
+       [132] = PINGROUP(132, uim0_reset, atest_char, _, _, _, _, _, _, _, _),
+       [133] = PINGROUP(133, mdp_vsync, atest_char, _, _, _, _, _, _, _, _),
+       [134] = PINGROUP(134, uim1_data, do_not, qspi0, sdc40, gcc_gp1, _, _, _, _, _),
+       [135] = PINGROUP(135, uim1_clk, do_not, qspi_clk, sdc4_clk, gcc_gp2, _, _, _, _, _),
+       [136] = PINGROUP(136, uim1_reset, do_not, qspi1, sdc41, gcc_gp3, _, _, _, _, _),
+       [137] = PINGROUP(137, mdp_vsync, atest_char, _, _, _, _, _, _, _, _),
+       [138] = PINGROUP(138, _, _, _, _, _, _, _, _, _, _),
+       [139] = PINGROUP(139, _, _, _, _, _, _, _, _, _, _),
+       [140] = PINGROUP(140, _, _, qdss_gpio, _, _, _, _, _, _, _),
+       [141] = PINGROUP(141, _, _, qdss_gpio, _, _, _, _, _, _, _),
+       [142] = PINGROUP(142, _, _, qdss_gpio, _, _, _, _, _, _, _),
+       [143] = PINGROUP(143, _, _, qdss_gpio, _, _, _, _, _, _, _),
+       [144] = PINGROUP(144, _, qdss_gpio, _, _, _, _, _, _, _, _),
+       [145] = PINGROUP(145, _, qdss_gpio, _, _, _, _, _, _, _, _),
+       [146] = PINGROUP(146, _, qdss_gpio, _, _, _, _, _, _, _, _),
+       [147] = PINGROUP(147, _, qdss_gpio, _, _, _, _, _, _, _, _),
+       [148] = PINGROUP(148, coex_uart1_rx, qdss_gpio, _, _, _, _, _, _, _, _),
+       [149] = PINGROUP(149, coex_uart1_tx, qdss_gpio, _, _, _, _, _, _, _, _),
+       [150] = PINGROUP(150, _, vfr_0, coex_uart2_rx, qdss_gpio, _, _, _, _, _, _),
+       [151] = PINGROUP(151, _, coex_uart2_tx, qdss_gpio, _, _, _, _, _, _, _),
+       [152] = PINGROUP(152, nav_gpio2, _, qdss_gpio, _, _, _, _, _, _, _),
+       [153] = PINGROUP(153, cci_i2c_scl, phase_flag, _, qdss_gpio, _, _, _, _, _, _),
+       [154] = PINGROUP(154, nav_gpio0, nav_gpio3, qdss_gpio, _, _, _, _, _, _, _),
+       [155] = PINGROUP(155, nav_gpio1, vfr_1, qdss_gpio, _, _, _, _, _, _, _),
+       [156] = PINGROUP(156, qlink_little_request, qdss_gpio, _, _, _, _, _, _, _, _),
+       [157] = PINGROUP(157, qlink_little_enable, qdss_gpio, _, _, _, _, _, _, _, _),
+       [158] = PINGROUP(158, qlink_wmss, qdss_gpio, _, _, _, _, _, _, _, _),
+       [159] = PINGROUP(159, qlink_big_request, qdss_cti, _, _, _, _, _, _, _, _),
+       [160] = PINGROUP(160, qlink_big_enable, _, _, _, _, _, _, _, _, _),
+       [161] = PINGROUP(161, _, _, _, _, _, _, _, _, _, _),
+       [162] = PINGROUP(162, qdss_cti, _, _, _, _, _, _, _, _, _),
+       [163] = PINGROUP(163, cci_async_in, phase_flag, _, _, _, _, _, _, _, _),
+       [164] = PINGROUP(164, cci_async_in, phase_flag, _, _, _, _, _, _, _, _),
+       [165] = PINGROUP(165, _, _, _, _, _, _, _, _, _, egpio),
+       [166] = PINGROUP(166, _, _, _, _, _, _, _, _, _, egpio),
+       [167] = PINGROUP(167, _, _, _, _, _, _, _, _, _, egpio),
+       [168] = PINGROUP(168, _, _, _, _, _, _, _, _, _, egpio),
+       [169] = PINGROUP(169, _, _, _, _, _, _, _, _, _, egpio),
+       [170] = PINGROUP(170, _, _, _, _, _, _, _, _, _, egpio),
+       [171] = PINGROUP(171, _, _, _, _, _, _, _, _, _, egpio),
+       [172] = PINGROUP(172, _, _, _, _, _, _, _, _, _, egpio),
+       [173] = PINGROUP(173, _, _, _, _, _, _, _, _, _, egpio),
+       [174] = PINGROUP(174, _, _, _, _, _, _, _, _, _, egpio),
+       [175] = PINGROUP(175, _, _, _, _, _, _, _, _, _, egpio),
+       [176] = PINGROUP(176, _, _, _, _, _, _, _, _, _, egpio),
+       [177] = PINGROUP(177, _, _, _, _, _, _, _, _, _, egpio),
+       [178] = PINGROUP(178, _, _, _, _, _, _, _, _, _, egpio),
+       [179] = PINGROUP(179, _, _, _, _, _, _, _, _, _, egpio),
+       [180] = PINGROUP(180, _, _, _, _, _, _, _, _, _, egpio),
+       [181] = PINGROUP(181, prng_rosc3, _, _, _, _, _, _, _, _, egpio),
+       [182] = PINGROUP(182, prng_rosc2, _, _, _, _, _, _, _, _, egpio),
+       [183] = PINGROUP(183, prng_rosc1, _, _, _, _, _, _, _, _, egpio),
+       [184] = PINGROUP(184, _, _, _, _, _, _, _, _, _, egpio),
+       [185] = PINGROUP(185, _, _, _, _, _, _, _, _, _, egpio),
+       [186] = PINGROUP(186, prng_rosc0, _, _, _, _, _, _, _, _, egpio),
+       [187] = PINGROUP(187, cri_trng, _, _, _, _, _, _, _, _, egpio),
+       [188] = PINGROUP(188, _, _, _, _, _, _, _, _, _, egpio),
+       [189] = PINGROUP(189, _, _, _, _, _, _, _, _, _, egpio),
+       [190] = PINGROUP(190, _, _, _, _, _, _, _, _, _, egpio),
+       [191] = PINGROUP(191, _, _, _, _, _, _, _, _, _, egpio),
+       [192] = PINGROUP(192, _, _, _, _, _, _, _, _, _, egpio),
+       [193] = PINGROUP(193, _, _, _, _, _, _, _, _, _, egpio),
+       [194] = PINGROUP(194, _, _, _, _, _, _, _, _, _, egpio),
+       [195] = PINGROUP(195, _, _, _, _, _, _, _, _, _, egpio),
+       [196] = PINGROUP(196, _, _, _, _, _, _, _, _, _, egpio),
+       [197] = PINGROUP(197, _, _, _, _, _, _, _, _, _, egpio),
+       [198] = PINGROUP(198, _, _, _, _, _, _, _, _, _, egpio),
+       [199] = PINGROUP(199, _, _, _, _, _, _, _, _, _, egpio),
+       [200] = PINGROUP(200, _, _, _, _, _, _, _, _, _, egpio),
+       [201] = PINGROUP(201, _, _, _, _, _, _, _, _, _, egpio),
+       [202] = PINGROUP(202, _, _, _, _, _, _, _, _, _, egpio),
+       [203] = PINGROUP(203, _, _, _, _, _, _, _, _, _, egpio),
+       [204] = PINGROUP(204, _, _, _, _, _, _, _, _, _, egpio),
+       [205] = PINGROUP(205, _, _, _, _, _, _, _, _, _, egpio),
+       [206] = PINGROUP(206, i2chub0_se8, _, _, _, _, _, _, _, _, egpio),
+       [207] = PINGROUP(207, i2chub0_se8, _, _, _, _, _, _, _, _, egpio),
+       [208] = PINGROUP(208, _, _, _, _, _, _, _, _, _, egpio),
+       [209] = PINGROUP(209, _, _, _, _, _, _, _, _, _, egpio),
+       [210] = UFS_RESET(ufs_reset, 0xde004, 0xdf000),
+       [211] = SDC_QDSD_PINGROUP(sdc2_clk, 0xd6000, 14, 6),
+       [212] = SDC_QDSD_PINGROUP(sdc2_cmd, 0xd6000, 11, 3),
+       [213] = SDC_QDSD_PINGROUP(sdc2_data, 0xd6000, 9, 0),
+};
+
+static const struct msm_gpio_wakeirq_map sm8650_pdc_map[] = {
+       { 0, 94 },    { 3, 105 },   { 4, 78 },    { 7, 67 },    { 8, 64 },
+       { 11, 121 },  { 12, 71 },   { 15, 82 },   { 18, 75 },   { 19, 63 },
+       { 20, 114 },  { 23, 84 },   { 27, 61 },   { 29, 112 },  { 31, 113 },
+       { 32, 66 },   { 35, 52 },   { 36, 123 },  { 39, 56 },   { 43, 59 },
+       { 46, 79 },   { 47, 124 },  { 48, 125 },  { 51, 93 },   { 54, 60 },
+       { 55, 104 },  { 56, 72 },   { 57, 77 },   { 59, 51 },   { 63, 85 },
+       { 64, 107 },  { 65, 108 },  { 66, 109 },  { 67, 83 },   { 68, 110 },
+       { 69, 111 },  { 75, 96 },   { 76, 97 },   { 77, 98 },   { 80, 89 },
+       { 81, 90 },   { 84, 106 },  { 85, 100 },  { 86, 87 },   { 87, 88 },
+       { 88, 65 },   { 90, 92 },   { 92, 99 },   { 95, 118 },  { 96, 119 },
+       { 98, 101 },  { 99, 62 },   { 112, 120 }, { 133, 80 },  { 136, 69 },
+       { 137, 81 },  { 148, 57 },  { 150, 58 },  { 152, 127 }, { 153, 74 },
+       { 154, 126 }, { 155, 73 },  { 156, 128 }, { 159, 129 }, { 162, 86 },
+       { 163, 122 }, { 166, 139 }, { 169, 140 }, { 171, 141 }, { 172, 142 },
+       { 174, 102 }, { 176, 143 }, { 177, 55 },  { 181, 144 }, { 182, 145 },
+       { 185, 146 }, { 187, 95 },  { 188, 130 }, { 190, 131 }, { 191, 132 },
+       { 192, 133 }, { 193, 134 }, { 195, 68 },  { 196, 135 }, { 197, 136 },
+       { 198, 54 },  { 199, 103 }, { 200, 53 },  { 201, 137 }, { 202, 70 },
+       { 203, 138 }, { 204, 76 },  { 205, 91 },
+};
+
+static const struct msm_pinctrl_soc_data sm8650_tlmm = {
+       .pins = sm8650_pins,
+       .npins = ARRAY_SIZE(sm8650_pins),
+       .functions = sm8650_functions,
+       .nfunctions = ARRAY_SIZE(sm8650_functions),
+       .groups = sm8650_groups,
+       .ngroups = ARRAY_SIZE(sm8650_groups),
+       .ngpios = 211,
+       .wakeirq_map = sm8650_pdc_map,
+       .nwakeirq_map = ARRAY_SIZE(sm8650_pdc_map),
+       .egpio_func = 10,
+};
+
+static int sm8650_tlmm_probe(struct platform_device *pdev)
+{
+       return msm_pinctrl_probe(pdev, &sm8650_tlmm);
+}
+
+static const struct of_device_id sm8650_tlmm_of_match[] = {
+       { .compatible = "qcom,sm8650-tlmm", },
+       {},
+};
+
+static struct platform_driver sm8650_tlmm_driver = {
+       .driver = {
+               .name = "sm8650-tlmm",
+               .of_match_table = sm8650_tlmm_of_match,
+       },
+       .probe = sm8650_tlmm_probe,
+       .remove_new = msm_pinctrl_remove,
+};
+
+static int __init sm8650_tlmm_init(void)
+{
+       return platform_driver_register(&sm8650_tlmm_driver);
+}
+arch_initcall(sm8650_tlmm_init);
+
+static void __exit sm8650_tlmm_exit(void)
+{
+       platform_driver_unregister(&sm8650_tlmm_driver);
+}
+module_exit(sm8650_tlmm_exit);
+
+MODULE_DESCRIPTION("QTI SM8650 TLMM driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(of, sm8650_tlmm_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-x1e80100.c b/drivers/pinctrl/qcom/pinctrl-x1e80100.c
new file mode 100644 (file)
index 0000000..e30e938
--- /dev/null
@@ -0,0 +1,1876 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-msm.h"
+
+#define REG_SIZE 0x1000
+
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9)       \
+       {                                               \
+               .grp = PINCTRL_PINGROUP("gpio" #id,     \
+                       gpio##id##_pins,                \
+                       ARRAY_SIZE(gpio##id##_pins)),   \
+               .funcs = (int[]){                       \
+                       msm_mux_gpio, /* gpio mode */   \
+                       msm_mux_##f1,                   \
+                       msm_mux_##f2,                   \
+                       msm_mux_##f3,                   \
+                       msm_mux_##f4,                   \
+                       msm_mux_##f5,                   \
+                       msm_mux_##f6,                   \
+                       msm_mux_##f7,                   \
+                       msm_mux_##f8,                   \
+                       msm_mux_##f9                    \
+               },                                      \
+               .nfuncs = 10,                           \
+               .ctl_reg = REG_SIZE * id,                       \
+               .io_reg = 0x4 + REG_SIZE * id,          \
+               .intr_cfg_reg = 0x8 + REG_SIZE * id,            \
+               .intr_status_reg = 0xc + REG_SIZE * id, \
+               .intr_target_reg = 0x8 + REG_SIZE * id, \
+               .mux_bit = 2,                   \
+               .pull_bit = 0,                  \
+               .drv_bit = 6,                   \
+               .i2c_pull_bit = 13,             \
+               .egpio_enable = 12,             \
+               .egpio_present = 11,            \
+               .oe_bit = 9,                    \
+               .in_bit = 0,                    \
+               .out_bit = 1,                   \
+               .intr_enable_bit = 0,           \
+               .intr_status_bit = 0,           \
+               .intr_target_bit = 5,           \
+               .intr_target_kpss_val = 3,      \
+               .intr_raw_status_bit = 4,       \
+               .intr_polarity_bit = 1,         \
+               .intr_detection_bit = 2,        \
+               .intr_detection_width = 2,      \
+       }
+
+#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv)     \
+       {                                               \
+               .grp = PINCTRL_PINGROUP(#pg_name,       \
+                       pg_name##_pins,                 \
+                       ARRAY_SIZE(pg_name##_pins)),    \
+               .ctl_reg = ctl,                         \
+               .io_reg = 0,                            \
+               .intr_cfg_reg = 0,                      \
+               .intr_status_reg = 0,                   \
+               .intr_target_reg = 0,                   \
+               .mux_bit = -1,                          \
+               .pull_bit = pull,                       \
+               .drv_bit = drv,                         \
+               .oe_bit = -1,                           \
+               .in_bit = -1,                           \
+               .out_bit = -1,                          \
+               .intr_enable_bit = -1,                  \
+               .intr_status_bit = -1,                  \
+               .intr_target_bit = -1,                  \
+               .intr_raw_status_bit = -1,              \
+               .intr_polarity_bit = -1,                \
+               .intr_detection_bit = -1,               \
+               .intr_detection_width = -1,             \
+       }
+
+#define UFS_RESET(pg_name, offset)                             \
+       {                                               \
+               .grp = PINCTRL_PINGROUP(#pg_name,       \
+                       pg_name##_pins,                 \
+                       ARRAY_SIZE(pg_name##_pins)),    \
+               .ctl_reg = offset,                      \
+               .io_reg = offset + 0x4,                 \
+               .intr_cfg_reg = 0,                      \
+               .intr_status_reg = 0,                   \
+               .intr_target_reg = 0,                   \
+               .mux_bit = -1,                          \
+               .pull_bit = 3,                          \
+               .drv_bit = 0,                           \
+               .oe_bit = -1,                           \
+               .in_bit = -1,                           \
+               .out_bit = 0,                           \
+               .intr_enable_bit = -1,                  \
+               .intr_status_bit = -1,                  \
+               .intr_target_bit = -1,                  \
+               .intr_raw_status_bit = -1,              \
+               .intr_polarity_bit = -1,                \
+               .intr_detection_bit = -1,               \
+               .intr_detection_width = -1,             \
+       }
+
+static const struct pinctrl_pin_desc x1e80100_pins[] = {
+       PINCTRL_PIN(0, "GPIO_0"),
+       PINCTRL_PIN(1, "GPIO_1"),
+       PINCTRL_PIN(2, "GPIO_2"),
+       PINCTRL_PIN(3, "GPIO_3"),
+       PINCTRL_PIN(4, "GPIO_4"),
+       PINCTRL_PIN(5, "GPIO_5"),
+       PINCTRL_PIN(6, "GPIO_6"),
+       PINCTRL_PIN(7, "GPIO_7"),
+       PINCTRL_PIN(8, "GPIO_8"),
+       PINCTRL_PIN(9, "GPIO_9"),
+       PINCTRL_PIN(10, "GPIO_10"),
+       PINCTRL_PIN(11, "GPIO_11"),
+       PINCTRL_PIN(12, "GPIO_12"),
+       PINCTRL_PIN(13, "GPIO_13"),
+       PINCTRL_PIN(14, "GPIO_14"),
+       PINCTRL_PIN(15, "GPIO_15"),
+       PINCTRL_PIN(16, "GPIO_16"),
+       PINCTRL_PIN(17, "GPIO_17"),
+       PINCTRL_PIN(18, "GPIO_18"),
+       PINCTRL_PIN(19, "GPIO_19"),
+       PINCTRL_PIN(20, "GPIO_20"),
+       PINCTRL_PIN(21, "GPIO_21"),
+       PINCTRL_PIN(22, "GPIO_22"),
+       PINCTRL_PIN(23, "GPIO_23"),
+       PINCTRL_PIN(24, "GPIO_24"),
+       PINCTRL_PIN(25, "GPIO_25"),
+       PINCTRL_PIN(26, "GPIO_26"),
+       PINCTRL_PIN(27, "GPIO_27"),
+       PINCTRL_PIN(28, "GPIO_28"),
+       PINCTRL_PIN(29, "GPIO_29"),
+       PINCTRL_PIN(30, "GPIO_30"),
+       PINCTRL_PIN(31, "GPIO_31"),
+       PINCTRL_PIN(32, "GPIO_32"),
+       PINCTRL_PIN(33, "GPIO_33"),
+       PINCTRL_PIN(34, "GPIO_34"),
+       PINCTRL_PIN(35, "GPIO_35"),
+       PINCTRL_PIN(36, "GPIO_36"),
+       PINCTRL_PIN(37, "GPIO_37"),
+       PINCTRL_PIN(38, "GPIO_38"),
+       PINCTRL_PIN(39, "GPIO_39"),
+       PINCTRL_PIN(40, "GPIO_40"),
+       PINCTRL_PIN(41, "GPIO_41"),
+       PINCTRL_PIN(42, "GPIO_42"),
+       PINCTRL_PIN(43, "GPIO_43"),
+       PINCTRL_PIN(44, "GPIO_44"),
+       PINCTRL_PIN(45, "GPIO_45"),
+       PINCTRL_PIN(46, "GPIO_46"),
+       PINCTRL_PIN(47, "GPIO_47"),
+       PINCTRL_PIN(48, "GPIO_48"),
+       PINCTRL_PIN(49, "GPIO_49"),
+       PINCTRL_PIN(50, "GPIO_50"),
+       PINCTRL_PIN(51, "GPIO_51"),
+       PINCTRL_PIN(52, "GPIO_52"),
+       PINCTRL_PIN(53, "GPIO_53"),
+       PINCTRL_PIN(54, "GPIO_54"),
+       PINCTRL_PIN(55, "GPIO_55"),
+       PINCTRL_PIN(56, "GPIO_56"),
+       PINCTRL_PIN(57, "GPIO_57"),
+       PINCTRL_PIN(58, "GPIO_58"),
+       PINCTRL_PIN(59, "GPIO_59"),
+       PINCTRL_PIN(60, "GPIO_60"),
+       PINCTRL_PIN(61, "GPIO_61"),
+       PINCTRL_PIN(62, "GPIO_62"),
+       PINCTRL_PIN(63, "GPIO_63"),
+       PINCTRL_PIN(64, "GPIO_64"),
+       PINCTRL_PIN(65, "GPIO_65"),
+       PINCTRL_PIN(66, "GPIO_66"),
+       PINCTRL_PIN(67, "GPIO_67"),
+       PINCTRL_PIN(68, "GPIO_68"),
+       PINCTRL_PIN(69, "GPIO_69"),
+       PINCTRL_PIN(70, "GPIO_70"),
+       PINCTRL_PIN(71, "GPIO_71"),
+       PINCTRL_PIN(72, "GPIO_72"),
+       PINCTRL_PIN(73, "GPIO_73"),
+       PINCTRL_PIN(74, "GPIO_74"),
+       PINCTRL_PIN(75, "GPIO_75"),
+       PINCTRL_PIN(76, "GPIO_76"),
+       PINCTRL_PIN(77, "GPIO_77"),
+       PINCTRL_PIN(78, "GPIO_78"),
+       PINCTRL_PIN(79, "GPIO_79"),
+       PINCTRL_PIN(80, "GPIO_80"),
+       PINCTRL_PIN(81, "GPIO_81"),
+       PINCTRL_PIN(82, "GPIO_82"),
+       PINCTRL_PIN(83, "GPIO_83"),
+       PINCTRL_PIN(84, "GPIO_84"),
+       PINCTRL_PIN(85, "GPIO_85"),
+       PINCTRL_PIN(86, "GPIO_86"),
+       PINCTRL_PIN(87, "GPIO_87"),
+       PINCTRL_PIN(88, "GPIO_88"),
+       PINCTRL_PIN(89, "GPIO_89"),
+       PINCTRL_PIN(90, "GPIO_90"),
+       PINCTRL_PIN(91, "GPIO_91"),
+       PINCTRL_PIN(92, "GPIO_92"),
+       PINCTRL_PIN(93, "GPIO_93"),
+       PINCTRL_PIN(94, "GPIO_94"),
+       PINCTRL_PIN(95, "GPIO_95"),
+       PINCTRL_PIN(96, "GPIO_96"),
+       PINCTRL_PIN(97, "GPIO_97"),
+       PINCTRL_PIN(98, "GPIO_98"),
+       PINCTRL_PIN(99, "GPIO_99"),
+       PINCTRL_PIN(100, "GPIO_100"),
+       PINCTRL_PIN(101, "GPIO_101"),
+       PINCTRL_PIN(102, "GPIO_102"),
+       PINCTRL_PIN(103, "GPIO_103"),
+       PINCTRL_PIN(104, "GPIO_104"),
+       PINCTRL_PIN(105, "GPIO_105"),
+       PINCTRL_PIN(106, "GPIO_106"),
+       PINCTRL_PIN(107, "GPIO_107"),
+       PINCTRL_PIN(108, "GPIO_108"),
+       PINCTRL_PIN(109, "GPIO_109"),
+       PINCTRL_PIN(110, "GPIO_110"),
+       PINCTRL_PIN(111, "GPIO_111"),
+       PINCTRL_PIN(112, "GPIO_112"),
+       PINCTRL_PIN(113, "GPIO_113"),
+       PINCTRL_PIN(114, "GPIO_114"),
+       PINCTRL_PIN(115, "GPIO_115"),
+       PINCTRL_PIN(116, "GPIO_116"),
+       PINCTRL_PIN(117, "GPIO_117"),
+       PINCTRL_PIN(118, "GPIO_118"),
+       PINCTRL_PIN(119, "GPIO_119"),
+       PINCTRL_PIN(120, "GPIO_120"),
+       PINCTRL_PIN(121, "GPIO_121"),
+       PINCTRL_PIN(122, "GPIO_122"),
+       PINCTRL_PIN(123, "GPIO_123"),
+       PINCTRL_PIN(124, "GPIO_124"),
+       PINCTRL_PIN(125, "GPIO_125"),
+       PINCTRL_PIN(126, "GPIO_126"),
+       PINCTRL_PIN(127, "GPIO_127"),
+       PINCTRL_PIN(128, "GPIO_128"),
+       PINCTRL_PIN(129, "GPIO_129"),
+       PINCTRL_PIN(130, "GPIO_130"),
+       PINCTRL_PIN(131, "GPIO_131"),
+       PINCTRL_PIN(132, "GPIO_132"),
+       PINCTRL_PIN(133, "GPIO_133"),
+       PINCTRL_PIN(134, "GPIO_134"),
+       PINCTRL_PIN(135, "GPIO_135"),
+       PINCTRL_PIN(136, "GPIO_136"),
+       PINCTRL_PIN(137, "GPIO_137"),
+       PINCTRL_PIN(138, "GPIO_138"),
+       PINCTRL_PIN(139, "GPIO_139"),
+       PINCTRL_PIN(140, "GPIO_140"),
+       PINCTRL_PIN(141, "GPIO_141"),
+       PINCTRL_PIN(142, "GPIO_142"),
+       PINCTRL_PIN(143, "GPIO_143"),
+       PINCTRL_PIN(144, "GPIO_144"),
+       PINCTRL_PIN(145, "GPIO_145"),
+       PINCTRL_PIN(146, "GPIO_146"),
+       PINCTRL_PIN(147, "GPIO_147"),
+       PINCTRL_PIN(148, "GPIO_148"),
+       PINCTRL_PIN(149, "GPIO_149"),
+       PINCTRL_PIN(150, "GPIO_150"),
+       PINCTRL_PIN(151, "GPIO_151"),
+       PINCTRL_PIN(152, "GPIO_152"),
+       PINCTRL_PIN(153, "GPIO_153"),
+       PINCTRL_PIN(154, "GPIO_154"),
+       PINCTRL_PIN(155, "GPIO_155"),
+       PINCTRL_PIN(156, "GPIO_156"),
+       PINCTRL_PIN(157, "GPIO_157"),
+       PINCTRL_PIN(158, "GPIO_158"),
+       PINCTRL_PIN(159, "GPIO_159"),
+       PINCTRL_PIN(160, "GPIO_160"),
+       PINCTRL_PIN(161, "GPIO_161"),
+       PINCTRL_PIN(162, "GPIO_162"),
+       PINCTRL_PIN(163, "GPIO_163"),
+       PINCTRL_PIN(164, "GPIO_164"),
+       PINCTRL_PIN(165, "GPIO_165"),
+       PINCTRL_PIN(166, "GPIO_166"),
+       PINCTRL_PIN(167, "GPIO_167"),
+       PINCTRL_PIN(168, "GPIO_168"),
+       PINCTRL_PIN(169, "GPIO_169"),
+       PINCTRL_PIN(170, "GPIO_170"),
+       PINCTRL_PIN(171, "GPIO_171"),
+       PINCTRL_PIN(172, "GPIO_172"),
+       PINCTRL_PIN(173, "GPIO_173"),
+       PINCTRL_PIN(174, "GPIO_174"),
+       PINCTRL_PIN(175, "GPIO_175"),
+       PINCTRL_PIN(176, "GPIO_176"),
+       PINCTRL_PIN(177, "GPIO_177"),
+       PINCTRL_PIN(178, "GPIO_178"),
+       PINCTRL_PIN(179, "GPIO_179"),
+       PINCTRL_PIN(180, "GPIO_180"),
+       PINCTRL_PIN(181, "GPIO_181"),
+       PINCTRL_PIN(182, "GPIO_182"),
+       PINCTRL_PIN(183, "GPIO_183"),
+       PINCTRL_PIN(184, "GPIO_184"),
+       PINCTRL_PIN(185, "GPIO_185"),
+       PINCTRL_PIN(186, "GPIO_186"),
+       PINCTRL_PIN(187, "GPIO_187"),
+       PINCTRL_PIN(188, "GPIO_188"),
+       PINCTRL_PIN(189, "GPIO_189"),
+       PINCTRL_PIN(190, "GPIO_190"),
+       PINCTRL_PIN(191, "GPIO_191"),
+       PINCTRL_PIN(192, "GPIO_192"),
+       PINCTRL_PIN(193, "GPIO_193"),
+       PINCTRL_PIN(194, "GPIO_194"),
+       PINCTRL_PIN(195, "GPIO_195"),
+       PINCTRL_PIN(196, "GPIO_196"),
+       PINCTRL_PIN(197, "GPIO_197"),
+       PINCTRL_PIN(198, "GPIO_198"),
+       PINCTRL_PIN(199, "GPIO_199"),
+       PINCTRL_PIN(200, "GPIO_200"),
+       PINCTRL_PIN(201, "GPIO_201"),
+       PINCTRL_PIN(202, "GPIO_202"),
+       PINCTRL_PIN(203, "GPIO_203"),
+       PINCTRL_PIN(204, "GPIO_204"),
+       PINCTRL_PIN(205, "GPIO_205"),
+       PINCTRL_PIN(206, "GPIO_206"),
+       PINCTRL_PIN(207, "GPIO_207"),
+       PINCTRL_PIN(208, "GPIO_208"),
+       PINCTRL_PIN(209, "GPIO_209"),
+       PINCTRL_PIN(210, "GPIO_210"),
+       PINCTRL_PIN(211, "GPIO_211"),
+       PINCTRL_PIN(212, "GPIO_212"),
+       PINCTRL_PIN(213, "GPIO_213"),
+       PINCTRL_PIN(214, "GPIO_214"),
+       PINCTRL_PIN(215, "GPIO_215"),
+       PINCTRL_PIN(216, "GPIO_216"),
+       PINCTRL_PIN(217, "GPIO_217"),
+       PINCTRL_PIN(218, "GPIO_218"),
+       PINCTRL_PIN(219, "GPIO_219"),
+       PINCTRL_PIN(220, "GPIO_220"),
+       PINCTRL_PIN(221, "GPIO_221"),
+       PINCTRL_PIN(222, "GPIO_222"),
+       PINCTRL_PIN(223, "GPIO_223"),
+       PINCTRL_PIN(224, "GPIO_224"),
+       PINCTRL_PIN(225, "GPIO_225"),
+       PINCTRL_PIN(226, "GPIO_226"),
+       PINCTRL_PIN(227, "GPIO_227"),
+       PINCTRL_PIN(228, "GPIO_228"),
+       PINCTRL_PIN(229, "GPIO_229"),
+       PINCTRL_PIN(230, "GPIO_230"),
+       PINCTRL_PIN(231, "GPIO_231"),
+       PINCTRL_PIN(232, "GPIO_232"),
+       PINCTRL_PIN(233, "GPIO_233"),
+       PINCTRL_PIN(234, "GPIO_234"),
+       PINCTRL_PIN(235, "GPIO_235"),
+       PINCTRL_PIN(236, "GPIO_236"),
+       PINCTRL_PIN(237, "GPIO_237"),
+       PINCTRL_PIN(238, "UFS_RESET"),
+       PINCTRL_PIN(239, "SDC2_CLK"),
+       PINCTRL_PIN(240, "SDC2_CMD"),
+       PINCTRL_PIN(241, "SDC2_DATA"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+       static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+DECLARE_MSM_GPIO_PINS(120);
+DECLARE_MSM_GPIO_PINS(121);
+DECLARE_MSM_GPIO_PINS(122);
+DECLARE_MSM_GPIO_PINS(123);
+DECLARE_MSM_GPIO_PINS(124);
+DECLARE_MSM_GPIO_PINS(125);
+DECLARE_MSM_GPIO_PINS(126);
+DECLARE_MSM_GPIO_PINS(127);
+DECLARE_MSM_GPIO_PINS(128);
+DECLARE_MSM_GPIO_PINS(129);
+DECLARE_MSM_GPIO_PINS(130);
+DECLARE_MSM_GPIO_PINS(131);
+DECLARE_MSM_GPIO_PINS(132);
+DECLARE_MSM_GPIO_PINS(133);
+DECLARE_MSM_GPIO_PINS(134);
+DECLARE_MSM_GPIO_PINS(135);
+DECLARE_MSM_GPIO_PINS(136);
+DECLARE_MSM_GPIO_PINS(137);
+DECLARE_MSM_GPIO_PINS(138);
+DECLARE_MSM_GPIO_PINS(139);
+DECLARE_MSM_GPIO_PINS(140);
+DECLARE_MSM_GPIO_PINS(141);
+DECLARE_MSM_GPIO_PINS(142);
+DECLARE_MSM_GPIO_PINS(143);
+DECLARE_MSM_GPIO_PINS(144);
+DECLARE_MSM_GPIO_PINS(145);
+DECLARE_MSM_GPIO_PINS(146);
+DECLARE_MSM_GPIO_PINS(147);
+DECLARE_MSM_GPIO_PINS(148);
+DECLARE_MSM_GPIO_PINS(149);
+DECLARE_MSM_GPIO_PINS(150);
+DECLARE_MSM_GPIO_PINS(151);
+DECLARE_MSM_GPIO_PINS(152);
+DECLARE_MSM_GPIO_PINS(153);
+DECLARE_MSM_GPIO_PINS(154);
+DECLARE_MSM_GPIO_PINS(155);
+DECLARE_MSM_GPIO_PINS(156);
+DECLARE_MSM_GPIO_PINS(157);
+DECLARE_MSM_GPIO_PINS(158);
+DECLARE_MSM_GPIO_PINS(159);
+DECLARE_MSM_GPIO_PINS(160);
+DECLARE_MSM_GPIO_PINS(161);
+DECLARE_MSM_GPIO_PINS(162);
+DECLARE_MSM_GPIO_PINS(163);
+DECLARE_MSM_GPIO_PINS(164);
+DECLARE_MSM_GPIO_PINS(165);
+DECLARE_MSM_GPIO_PINS(166);
+DECLARE_MSM_GPIO_PINS(167);
+DECLARE_MSM_GPIO_PINS(168);
+DECLARE_MSM_GPIO_PINS(169);
+DECLARE_MSM_GPIO_PINS(170);
+DECLARE_MSM_GPIO_PINS(171);
+DECLARE_MSM_GPIO_PINS(172);
+DECLARE_MSM_GPIO_PINS(173);
+DECLARE_MSM_GPIO_PINS(174);
+DECLARE_MSM_GPIO_PINS(175);
+DECLARE_MSM_GPIO_PINS(176);
+DECLARE_MSM_GPIO_PINS(177);
+DECLARE_MSM_GPIO_PINS(178);
+DECLARE_MSM_GPIO_PINS(179);
+DECLARE_MSM_GPIO_PINS(180);
+DECLARE_MSM_GPIO_PINS(181);
+DECLARE_MSM_GPIO_PINS(182);
+DECLARE_MSM_GPIO_PINS(183);
+DECLARE_MSM_GPIO_PINS(184);
+DECLARE_MSM_GPIO_PINS(185);
+DECLARE_MSM_GPIO_PINS(186);
+DECLARE_MSM_GPIO_PINS(187);
+DECLARE_MSM_GPIO_PINS(188);
+DECLARE_MSM_GPIO_PINS(189);
+DECLARE_MSM_GPIO_PINS(190);
+DECLARE_MSM_GPIO_PINS(191);
+DECLARE_MSM_GPIO_PINS(192);
+DECLARE_MSM_GPIO_PINS(193);
+DECLARE_MSM_GPIO_PINS(194);
+DECLARE_MSM_GPIO_PINS(195);
+DECLARE_MSM_GPIO_PINS(196);
+DECLARE_MSM_GPIO_PINS(197);
+DECLARE_MSM_GPIO_PINS(198);
+DECLARE_MSM_GPIO_PINS(199);
+DECLARE_MSM_GPIO_PINS(200);
+DECLARE_MSM_GPIO_PINS(201);
+DECLARE_MSM_GPIO_PINS(202);
+DECLARE_MSM_GPIO_PINS(203);
+DECLARE_MSM_GPIO_PINS(204);
+DECLARE_MSM_GPIO_PINS(205);
+DECLARE_MSM_GPIO_PINS(206);
+DECLARE_MSM_GPIO_PINS(207);
+DECLARE_MSM_GPIO_PINS(208);
+DECLARE_MSM_GPIO_PINS(209);
+DECLARE_MSM_GPIO_PINS(210);
+DECLARE_MSM_GPIO_PINS(211);
+DECLARE_MSM_GPIO_PINS(212);
+DECLARE_MSM_GPIO_PINS(213);
+DECLARE_MSM_GPIO_PINS(214);
+DECLARE_MSM_GPIO_PINS(215);
+DECLARE_MSM_GPIO_PINS(216);
+DECLARE_MSM_GPIO_PINS(217);
+DECLARE_MSM_GPIO_PINS(218);
+DECLARE_MSM_GPIO_PINS(219);
+DECLARE_MSM_GPIO_PINS(220);
+DECLARE_MSM_GPIO_PINS(221);
+DECLARE_MSM_GPIO_PINS(222);
+DECLARE_MSM_GPIO_PINS(223);
+DECLARE_MSM_GPIO_PINS(224);
+DECLARE_MSM_GPIO_PINS(225);
+DECLARE_MSM_GPIO_PINS(226);
+DECLARE_MSM_GPIO_PINS(227);
+DECLARE_MSM_GPIO_PINS(228);
+DECLARE_MSM_GPIO_PINS(229);
+DECLARE_MSM_GPIO_PINS(230);
+DECLARE_MSM_GPIO_PINS(231);
+DECLARE_MSM_GPIO_PINS(232);
+DECLARE_MSM_GPIO_PINS(233);
+DECLARE_MSM_GPIO_PINS(234);
+DECLARE_MSM_GPIO_PINS(235);
+DECLARE_MSM_GPIO_PINS(236);
+DECLARE_MSM_GPIO_PINS(237);
+
+static const unsigned int ufs_reset_pins[] = { 238 };
+static const unsigned int sdc2_clk_pins[] = { 239 };
+static const unsigned int sdc2_cmd_pins[] = { 240 };
+static const unsigned int sdc2_data_pins[] = { 241 };
+
+enum x1e80100_functions {
+       msm_mux_gpio,
+       msm_mux_RESOUT_GPIO,
+       msm_mux_aon_cci,
+       msm_mux_aoss_cti,
+       msm_mux_atest_char,
+       msm_mux_atest_char0,
+       msm_mux_atest_char1,
+       msm_mux_atest_char2,
+       msm_mux_atest_char3,
+       msm_mux_atest_usb,
+       msm_mux_audio_ext,
+       msm_mux_audio_ref,
+       msm_mux_cam_aon,
+       msm_mux_cam_mclk,
+       msm_mux_cci_async,
+       msm_mux_cci_i2c,
+       msm_mux_cci_timer0,
+       msm_mux_cci_timer1,
+       msm_mux_cci_timer2,
+       msm_mux_cci_timer3,
+       msm_mux_cci_timer4,
+       msm_mux_cmu_rng0,
+       msm_mux_cmu_rng1,
+       msm_mux_cmu_rng2,
+       msm_mux_cmu_rng3,
+       msm_mux_cri_trng,
+       msm_mux_dbg_out,
+       msm_mux_ddr_bist,
+       msm_mux_ddr_pxi0,
+       msm_mux_ddr_pxi1,
+       msm_mux_ddr_pxi2,
+       msm_mux_ddr_pxi3,
+       msm_mux_ddr_pxi4,
+       msm_mux_ddr_pxi5,
+       msm_mux_ddr_pxi6,
+       msm_mux_ddr_pxi7,
+       msm_mux_edp0_hot,
+       msm_mux_edp0_lcd,
+       msm_mux_edp1_hot,
+       msm_mux_edp1_lcd,
+       msm_mux_eusb0_ac,
+       msm_mux_eusb1_ac,
+       msm_mux_eusb2_ac,
+       msm_mux_eusb3_ac,
+       msm_mux_eusb5_ac,
+       msm_mux_eusb6_ac,
+       msm_mux_gcc_gp1,
+       msm_mux_gcc_gp2,
+       msm_mux_gcc_gp3,
+       msm_mux_i2s0_data0,
+       msm_mux_i2s0_data1,
+       msm_mux_i2s0_sck,
+       msm_mux_i2s0_ws,
+       msm_mux_i2s1_data0,
+       msm_mux_i2s1_data1,
+       msm_mux_i2s1_sck,
+       msm_mux_i2s1_ws,
+       msm_mux_ibi_i3c,
+       msm_mux_jitter_bist,
+       msm_mux_mdp_vsync0,
+       msm_mux_mdp_vsync1,
+       msm_mux_mdp_vsync2,
+       msm_mux_mdp_vsync3,
+       msm_mux_mdp_vsync4,
+       msm_mux_mdp_vsync5,
+       msm_mux_mdp_vsync6,
+       msm_mux_mdp_vsync7,
+       msm_mux_mdp_vsync8,
+       msm_mux_pcie3_clk,
+       msm_mux_pcie4_clk,
+       msm_mux_pcie5_clk,
+       msm_mux_pcie6a_clk,
+       msm_mux_pcie6b_clk,
+       msm_mux_phase_flag,
+       msm_mux_pll_bist,
+       msm_mux_pll_clk,
+       msm_mux_prng_rosc0,
+       msm_mux_prng_rosc1,
+       msm_mux_prng_rosc2,
+       msm_mux_prng_rosc3,
+       msm_mux_qdss_cti,
+       msm_mux_qdss_gpio,
+       msm_mux_qspi00,
+       msm_mux_qspi01,
+       msm_mux_qspi02,
+       msm_mux_qspi03,
+       msm_mux_qspi0_clk,
+       msm_mux_qspi0_cs0,
+       msm_mux_qspi0_cs1,
+       msm_mux_qup0_se0,
+       msm_mux_qup0_se1,
+       msm_mux_qup0_se2,
+       msm_mux_qup0_se3,
+       msm_mux_qup0_se4,
+       msm_mux_qup0_se5,
+       msm_mux_qup0_se6,
+       msm_mux_qup0_se7,
+       msm_mux_qup1_se0,
+       msm_mux_qup1_se1,
+       msm_mux_qup1_se2,
+       msm_mux_qup1_se3,
+       msm_mux_qup1_se4,
+       msm_mux_qup1_se5,
+       msm_mux_qup1_se6,
+       msm_mux_qup1_se7,
+       msm_mux_qup2_se0,
+       msm_mux_qup2_se1,
+       msm_mux_qup2_se2,
+       msm_mux_qup2_se3,
+       msm_mux_qup2_se4,
+       msm_mux_qup2_se5,
+       msm_mux_qup2_se6,
+       msm_mux_qup2_se7,
+       msm_mux_sd_write,
+       msm_mux_sdc4_clk,
+       msm_mux_sdc4_cmd,
+       msm_mux_sdc4_data0,
+       msm_mux_sdc4_data1,
+       msm_mux_sdc4_data2,
+       msm_mux_sdc4_data3,
+       msm_mux_sys_throttle,
+       msm_mux_tb_trig,
+       msm_mux_tgu_ch0,
+       msm_mux_tgu_ch1,
+       msm_mux_tgu_ch2,
+       msm_mux_tgu_ch3,
+       msm_mux_tgu_ch4,
+       msm_mux_tgu_ch5,
+       msm_mux_tgu_ch6,
+       msm_mux_tgu_ch7,
+       msm_mux_tmess_prng0,
+       msm_mux_tmess_prng1,
+       msm_mux_tmess_prng2,
+       msm_mux_tmess_prng3,
+       msm_mux_tsense_pwm1,
+       msm_mux_tsense_pwm2,
+       msm_mux_tsense_pwm3,
+       msm_mux_tsense_pwm4,
+       msm_mux_usb0_dp,
+       msm_mux_usb0_phy,
+       msm_mux_usb0_sbrx,
+       msm_mux_usb0_sbtx,
+       msm_mux_usb1_dp,
+       msm_mux_usb1_phy,
+       msm_mux_usb1_sbrx,
+       msm_mux_usb1_sbtx,
+       msm_mux_usb2_dp,
+       msm_mux_usb2_phy,
+       msm_mux_usb2_sbrx,
+       msm_mux_usb2_sbtx,
+       msm_mux_vsense_trigger,
+       msm_mux__,
+};
+
+static const char * const gpio_groups[] = {
+       "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+       "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+       "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+       "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+       "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+       "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+       "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+       "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+       "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+       "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+       "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+       "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+       "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+       "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+       "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+       "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+       "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116",
+       "gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122",
+       "gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128",
+       "gpio129", "gpio130", "gpio131", "gpio132", "gpio133", "gpio134",
+       "gpio135", "gpio136", "gpio137", "gpio138", "gpio139", "gpio140",
+       "gpio141", "gpio142", "gpio143", "gpio144", "gpio145", "gpio146",
+       "gpio147", "gpio148", "gpio149", "gpio150", "gpio151", "gpio152",
+       "gpio153", "gpio154", "gpio155", "gpio156", "gpio157", "gpio158",
+       "gpio159", "gpio160", "gpio161", "gpio162", "gpio163", "gpio164",
+       "gpio165", "gpio166", "gpio167", "gpio168", "gpio169", "gpio170",
+       "gpio171", "gpio172", "gpio173", "gpio174", "gpio175", "gpio176",
+       "gpio177", "gpio178", "gpio179", "gpio180", "gpio181", "gpio182",
+       "gpio183", "gpio184", "gpio185", "gpio186", "gpio187", "gpio188",
+       "gpio189", "gpio190", "gpio191", "gpio192", "gpio193", "gpio194",
+       "gpio195", "gpio196", "gpio197", "gpio198", "gpio199", "gpio200",
+       "gpio201", "gpio202", "gpio203", "gpio204", "gpio205", "gpio206",
+       "gpio207", "gpio208", "gpio209", "gpio210", "gpio211", "gpio212",
+       "gpio213", "gpio214", "gpio215", "gpio216", "gpio217", "gpio218",
+       "gpio219", "gpio220", "gpio221", "gpio222", "gpio223", "gpio224",
+       "gpio225", "gpio226", "gpio227", "gpio228", "gpio229", "gpio230",
+       "gpio231", "gpio232", "gpio233", "gpio234", "gpio235", "gpio236",
+       "gpio237",
+};
+
+static const char * const RESOUT_GPIO_groups[] = {
+       "gpio160",
+};
+
+static const char * const aon_cci_groups[] = {
+       "gpio235", "gpio236",
+};
+
+static const char * const aoss_cti_groups[] = {
+       "gpio60", "gpio61", "gpio62", "gpio63",
+};
+
+static const char * const atest_char_groups[] = {
+       "gpio181",
+};
+
+static const char * const atest_char0_groups[] = {
+       "gpio185",
+};
+
+static const char * const atest_char1_groups[] = {
+       "gpio184",
+};
+
+static const char * const atest_char2_groups[] = {
+       "gpio188",
+};
+
+static const char * const atest_char3_groups[] = {
+       "gpio182",
+};
+
+static const char * const atest_usb_groups[] = {
+       "gpio9", "gpio10", "gpio35", "gpio38", "gpio41", "gpio42",
+       "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48",
+       "gpio49", "gpio50", "gpio51", "gpio52", "gpio53", "gpio54",
+       "gpio58", "gpio59", "gpio65", "gpio66", "gpio67", "gpio72",
+       "gpio73", "gpio74", "gpio75", "gpio80", "gpio81", "gpio83",
+};
+
+static const char * const audio_ext_groups[] = {
+       "gpio134", "gpio142",
+};
+
+static const char * const audio_ref_groups[] = {
+       "gpio142",
+};
+
+static const char * const cam_aon_groups[] = {
+       "gpio100",
+};
+
+static const char * const cam_mclk_groups[] = {
+       "gpio96", "gpio97", "gpio98", "gpio99",
+};
+
+static const char * const cci_async_groups[] = {
+       "gpio111", "gpio112", "gpio113",
+};
+
+static const char * const cci_i2c_groups[] = {
+       "gpio101", "gpio102", "gpio103", "gpio104", "gpio105", "gpio106",
+};
+
+static const char * const cci_timer0_groups[] = {
+       "gpio109",
+};
+
+static const char * const cci_timer1_groups[] = {
+       "gpio110",
+};
+
+static const char * const cci_timer2_groups[] = {
+       "gpio111",
+};
+
+static const char * const cci_timer3_groups[] = {
+       "gpio112",
+};
+
+static const char * const cci_timer4_groups[] = {
+       "gpio113",
+};
+
+static const char * const cmu_rng0_groups[] = {
+       "gpio48",
+};
+
+static const char * const cmu_rng1_groups[] = {
+       "gpio47",
+};
+
+static const char * const cmu_rng2_groups[] = {
+       "gpio46",
+};
+
+static const char * const cmu_rng3_groups[] = {
+       "gpio45",
+};
+
+static const char * const cri_trng_groups[] = {
+       "gpio187",
+};
+
+static const char * const dbg_out_groups[] = {
+       "gpio51",
+};
+
+static const char * const ddr_bist_groups[] = {
+       "gpio54", "gpio55", "gpio56", "gpio57",
+};
+
+static const char * const ddr_pxi0_groups[] = {
+       "gpio9", "gpio38",
+};
+
+static const char * const ddr_pxi1_groups[] = {
+       "gpio10", "gpio41",
+};
+
+static const char * const ddr_pxi2_groups[] = {
+       "gpio42", "gpio43",
+};
+
+static const char * const ddr_pxi3_groups[] = {
+       "gpio44", "gpio45",
+};
+
+static const char * const ddr_pxi4_groups[] = {
+       "gpio46", "gpio47",
+};
+
+static const char * const ddr_pxi5_groups[] = {
+       "gpio48", "gpio49",
+};
+
+static const char * const ddr_pxi6_groups[] = {
+       "gpio50", "gpio51",
+};
+
+static const char * const ddr_pxi7_groups[] = {
+       "gpio52", "gpio53",
+};
+
+static const char * const edp0_hot_groups[] = {
+       "gpio119",
+};
+
+static const char * const edp0_lcd_groups[] = {
+       "gpio120",
+};
+
+static const char * const edp1_hot_groups[] = {
+       "gpio120",
+};
+
+static const char * const edp1_lcd_groups[] = {
+       "gpio115", "gpio119",
+};
+
+static const char * const eusb0_ac_groups[] = {
+       "gpio168",
+};
+
+static const char * const eusb1_ac_groups[] = {
+       "gpio177",
+};
+
+static const char * const eusb2_ac_groups[] = {
+       "gpio186",
+};
+
+static const char * const eusb3_ac_groups[] = {
+       "gpio169",
+};
+
+static const char * const eusb5_ac_groups[] = {
+       "gpio187",
+};
+
+static const char * const eusb6_ac_groups[] = {
+       "gpio178",
+};
+
+static const char * const gcc_gp1_groups[] = {
+       "gpio71", "gpio72",
+};
+
+static const char * const gcc_gp2_groups[] = {
+       "gpio64", "gpio73",
+};
+
+static const char * const gcc_gp3_groups[] = {
+       "gpio74", "gpio82",
+};
+
+static const char * const i2s0_data0_groups[] = {
+       "gpio136",
+};
+
+static const char * const i2s0_data1_groups[] = {
+       "gpio137",
+};
+
+static const char * const i2s0_sck_groups[] = {
+       "gpio135",
+};
+
+static const char * const i2s0_ws_groups[] = {
+       "gpio138",
+};
+
+static const char * const i2s1_data0_groups[] = {
+       "gpio140",
+};
+
+static const char * const i2s1_data1_groups[] = {
+       "gpio142",
+};
+
+static const char * const i2s1_sck_groups[] = {
+       "gpio139",
+};
+
+static const char * const i2s1_ws_groups[] = {
+       "gpio141",
+};
+
+static const char * const ibi_i3c_groups[] = {
+       "gpio0", "gpio1", "gpio32", "gpio33", "gpio36", "gpio37", "gpio68",
+       "gpio69",
+};
+
+static const char * const jitter_bist_groups[] = {
+       "gpio42",
+};
+
+static const char * const mdp_vsync0_groups[] = {
+       "gpio114",
+};
+
+static const char * const mdp_vsync1_groups[] = {
+       "gpio114",
+};
+
+static const char * const mdp_vsync2_groups[] = {
+       "gpio115",
+};
+
+static const char * const mdp_vsync3_groups[] = {
+       "gpio115",
+};
+
+static const char * const mdp_vsync4_groups[] = {
+       "gpio109",
+};
+
+static const char * const mdp_vsync5_groups[] = {
+       "gpio110",
+};
+
+static const char * const mdp_vsync6_groups[] = {
+       "gpio111",
+};
+
+static const char * const mdp_vsync7_groups[] = {
+       "gpio112",
+};
+
+static const char * const mdp_vsync8_groups[] = {
+       "gpio113",
+};
+
+static const char * const pcie3_clk_groups[] = {
+       "gpio144",
+};
+
+static const char * const pcie4_clk_groups[] = {
+       "gpio147",
+};
+
+static const char * const pcie5_clk_groups[] = {
+       "gpio150",
+};
+
+static const char * const pcie6a_clk_groups[] = {
+       "gpio153",
+};
+
+static const char * const pcie6b_clk_groups[] = {
+       "gpio156",
+};
+
+static const char * const phase_flag_groups[] = {
+       "gpio6", "gpio7", "gpio8", "gpio11", "gpio12", "gpio13",
+       "gpio14", "gpio15", "gpio16", "gpio17", "gpio18", "gpio19",
+       "gpio20", "gpio21", "gpio22", "gpio23", "gpio24", "gpio25",
+       "gpio26", "gpio27", "gpio39", "gpio40", "gpio76", "gpio77",
+       "gpio78", "gpio181", "gpio182", "gpio184", "gpio185",
+       "gpio186", "gpio187", "gpio188",
+};
+
+static const char * const pll_bist_groups[] = {
+       "gpio28",
+};
+
+static const char * const pll_clk_groups[] = {
+       "gpio35",
+};
+
+static const char * const prng_rosc0_groups[] = {
+       "gpio186",
+};
+
+static const char * const prng_rosc1_groups[] = {
+       "gpio188",
+};
+
+static const char * const prng_rosc2_groups[] = {
+       "gpio182",
+};
+
+static const char * const prng_rosc3_groups[] = {
+       "gpio181",
+};
+
+static const char * const qdss_cti_groups[] = {
+       "gpio18", "gpio19", "gpio23", "gpio27", "gpio161", "gpio162",
+       "gpio215", "gpio217",
+};
+
+static const char * const qdss_gpio_groups[] = {
+       "gpio96", "gpio97", "gpio98", "gpio99", "gpio100", "gpio101",
+       "gpio102", "gpio103", "gpio104", "gpio105", "gpio106", "gpio107",
+       "gpio108", "gpio109", "gpio110", "gpio111", "gpio112", "gpio113",
+       "gpio219", "gpio220", "gpio221", "gpio222", "gpio223", "gpio224",
+       "gpio225", "gpio226", "gpio227", "gpio228", "gpio229", "gpio230",
+       "gpio231", "gpio232", "gpio233", "gpio234", "gpio235", "gpio236",
+};
+
+static const char * const qspi00_groups[] = {
+       "gpio128",
+};
+
+static const char * const qspi01_groups[] = {
+       "gpio129",
+};
+
+static const char * const qspi02_groups[] = {
+       "gpio130",
+};
+
+static const char * const qspi03_groups[] = {
+       "gpio131",
+};
+
+static const char * const qspi0_clk_groups[] = {
+       "gpio127",
+};
+
+static const char * const qspi0_cs0_groups[] = {
+       "gpio132",
+};
+
+static const char * const qspi0_cs1_groups[] = {
+       "gpio133",
+};
+
+static const char * const qup0_se0_groups[] = {
+       "gpio0", "gpio1", "gpio2", "gpio3",
+};
+
+static const char * const qup0_se1_groups[] = {
+       "gpio4", "gpio5", "gpio6", "gpio7",
+};
+
+static const char * const qup0_se2_groups[] = {
+       "gpio8", "gpio9", "gpio10", "gpio11", "gpio17", "gpio18", "gpio19",
+};
+
+static const char * const qup0_se3_groups[] = {
+       "gpio12", "gpio13", "gpio14", "gpio15", "gpio21", "gpio22", "gpio23",
+};
+
+static const char * const qup0_se4_groups[] = {
+       "gpio16", "gpio17", "gpio18", "gpio19",
+};
+
+static const char * const qup0_se5_groups[] = {
+       "gpio20", "gpio21", "gpio22", "gpio23",
+};
+
+static const char * const qup0_se6_groups[] = {
+       "gpio24", "gpio25", "gpio26", "gpio27",
+};
+
+static const char * const qup0_se7_groups[] = {
+       "gpio12", "gpio13", "gpio14", "gpio15",
+};
+
+static const char * const qup1_se0_groups[] = {
+       "gpio32", "gpio33", "gpio34", "gpio35",
+};
+
+static const char * const qup1_se1_groups[] = {
+       "gpio36", "gpio37", "gpio38", "gpio39",
+};
+
+static const char * const qup1_se2_groups[] = {
+       "gpio40", "gpio41", "gpio42", "gpio43", "gpio49", "gpio50", "gpio51",
+};
+
+static const char * const qup1_se3_groups[] = {
+       "gpio33", "gpio34", "gpio35", "gpio44", "gpio45", "gpio46", "gpio47",
+};
+
+static const char * const qup1_se4_groups[] = {
+       "gpio48", "gpio49", "gpio50", "gpio51",
+};
+
+static const char * const qup1_se5_groups[] = {
+       "gpio52", "gpio53", "gpio54", "gpio55",
+};
+
+static const char * const qup1_se6_groups[] = {
+       "gpio56", "gpio57", "gpio58", "gpio59",
+};
+
+static const char * const qup1_se7_groups[] = {
+       "gpio52", "gpio53", "gpio54", "gpio55",
+};
+
+static const char * const qup2_se0_groups[] = {
+       "gpio64", "gpio65", "gpio66", "gpio67",
+};
+
+static const char * const qup2_se1_groups[] = {
+       "gpio68", "gpio69", "gpio70", "gpio71",
+};
+
+static const char * const qup2_se2_groups[] = {
+       "gpio72", "gpio73", "gpio74", "gpio75", "gpio81", "gpio82", "gpio83",
+};
+
+static const char * const qup2_se3_groups[] = {
+       "gpio65", "gpio66", "gpio67", "gpio76", "gpio77", "gpio78", "gpio79",
+};
+
+static const char * const qup2_se4_groups[] = {
+       "gpio80", "gpio81", "gpio82", "gpio83",
+};
+
+static const char * const qup2_se5_groups[] = {
+       "gpio84", "gpio85", "gpio86", "gpio87",
+};
+
+static const char * const qup2_se6_groups[] = {
+       "gpio88", "gpio89", "gpio90", "gpio91",
+};
+
+static const char * const qup2_se7_groups[] = {
+       "gpio84", "gpio85", "gpio86", "gpio87",
+};
+
+static const char * const sd_write_groups[] = {
+       "gpio162",
+};
+
+static const char * const sdc4_clk_groups[] = {
+       "gpio127",
+};
+
+static const char * const sdc4_cmd_groups[] = {
+       "gpio132",
+};
+
+static const char * const sdc4_data0_groups[] = {
+       "gpio128",
+};
+
+static const char * const sdc4_data1_groups[] = {
+       "gpio129",
+};
+
+static const char * const sdc4_data2_groups[] = {
+       "gpio130",
+};
+
+static const char * const sdc4_data3_groups[] = {
+       "gpio131",
+};
+
+static const char * const sys_throttle_groups[] = {
+       "gpio39", "gpio94",
+};
+
+static const char * const tb_trig_groups[] = {
+       "gpio133", "gpio137",
+};
+
+static const char * const tgu_ch0_groups[] = {
+       "gpio81",
+};
+
+static const char * const tgu_ch1_groups[] = {
+       "gpio65",
+};
+
+static const char * const tgu_ch2_groups[] = {
+       "gpio66",
+};
+
+static const char * const tgu_ch3_groups[] = {
+       "gpio67",
+};
+
+static const char * const tgu_ch4_groups[] = {
+       "gpio68",
+};
+
+static const char * const tgu_ch5_groups[] = {
+       "gpio69",
+};
+
+static const char * const tgu_ch6_groups[] = {
+       "gpio83",
+};
+
+static const char * const tgu_ch7_groups[] = {
+       "gpio80",
+};
+
+static const char * const tmess_prng0_groups[] = {
+       "gpio92",
+};
+
+static const char * const tmess_prng1_groups[] = {
+       "gpio93",
+};
+
+static const char * const tmess_prng2_groups[] = {
+       "gpio94",
+};
+
+static const char * const tmess_prng3_groups[] = {
+       "gpio95",
+};
+
+static const char * const tsense_pwm1_groups[] = {
+       "gpio34",
+};
+
+static const char * const tsense_pwm2_groups[] = {
+       "gpio34",
+};
+
+static const char * const tsense_pwm3_groups[] = {
+       "gpio34",
+};
+
+static const char * const tsense_pwm4_groups[] = {
+       "gpio34",
+};
+
+static const char * const usb0_dp_groups[] = {
+       "gpio122",
+};
+
+static const char * const usb0_phy_groups[] = {
+       "gpio121",
+};
+
+static const char * const usb0_sbrx_groups[] = {
+       "gpio163",
+};
+
+static const char * const usb0_sbtx_groups[] = {
+       "gpio164", "gpio165",
+};
+
+static const char * const usb1_dp_groups[] = {
+       "gpio124",
+};
+
+static const char * const usb1_phy_groups[] = {
+       "gpio123",
+};
+
+static const char * const usb1_sbrx_groups[] = {
+       "gpio172",
+};
+
+static const char * const usb1_sbtx_groups[] = {
+       "gpio173", "gpio174",
+};
+
+static const char * const usb2_dp_groups[] = {
+       "gpio126",
+};
+
+static const char * const usb2_phy_groups[] = {
+       "gpio125",
+};
+
+static const char * const usb2_sbrx_groups[] = {
+       "gpio181",
+};
+
+static const char * const usb2_sbtx_groups[] = {
+       "gpio182", "gpio183",
+};
+
+static const char * const vsense_trigger_groups[] = {
+       "gpio38",
+};
+
+static const struct pinfunction x1e80100_functions[] = {
+       MSM_PIN_FUNCTION(gpio),
+       MSM_PIN_FUNCTION(RESOUT_GPIO),
+       MSM_PIN_FUNCTION(aon_cci),
+       MSM_PIN_FUNCTION(aoss_cti),
+       MSM_PIN_FUNCTION(atest_char),
+       MSM_PIN_FUNCTION(atest_char0),
+       MSM_PIN_FUNCTION(atest_char1),
+       MSM_PIN_FUNCTION(atest_char2),
+       MSM_PIN_FUNCTION(atest_char3),
+       MSM_PIN_FUNCTION(atest_usb),
+       MSM_PIN_FUNCTION(audio_ext),
+       MSM_PIN_FUNCTION(audio_ref),
+       MSM_PIN_FUNCTION(cam_aon),
+       MSM_PIN_FUNCTION(cam_mclk),
+       MSM_PIN_FUNCTION(cci_async),
+       MSM_PIN_FUNCTION(cci_i2c),
+       MSM_PIN_FUNCTION(cci_timer0),
+       MSM_PIN_FUNCTION(cci_timer1),
+       MSM_PIN_FUNCTION(cci_timer2),
+       MSM_PIN_FUNCTION(cci_timer3),
+       MSM_PIN_FUNCTION(cci_timer4),
+       MSM_PIN_FUNCTION(cmu_rng0),
+       MSM_PIN_FUNCTION(cmu_rng1),
+       MSM_PIN_FUNCTION(cmu_rng2),
+       MSM_PIN_FUNCTION(cmu_rng3),
+       MSM_PIN_FUNCTION(cri_trng),
+       MSM_PIN_FUNCTION(dbg_out),
+       MSM_PIN_FUNCTION(ddr_bist),
+       MSM_PIN_FUNCTION(ddr_pxi0),
+       MSM_PIN_FUNCTION(ddr_pxi1),
+       MSM_PIN_FUNCTION(ddr_pxi2),
+       MSM_PIN_FUNCTION(ddr_pxi3),
+       MSM_PIN_FUNCTION(ddr_pxi4),
+       MSM_PIN_FUNCTION(ddr_pxi5),
+       MSM_PIN_FUNCTION(ddr_pxi6),
+       MSM_PIN_FUNCTION(ddr_pxi7),
+       MSM_PIN_FUNCTION(edp0_hot),
+       MSM_PIN_FUNCTION(edp0_lcd),
+       MSM_PIN_FUNCTION(edp1_hot),
+       MSM_PIN_FUNCTION(edp1_lcd),
+       MSM_PIN_FUNCTION(eusb0_ac),
+       MSM_PIN_FUNCTION(eusb1_ac),
+       MSM_PIN_FUNCTION(eusb2_ac),
+       MSM_PIN_FUNCTION(eusb3_ac),
+       MSM_PIN_FUNCTION(eusb5_ac),
+       MSM_PIN_FUNCTION(eusb6_ac),
+       MSM_PIN_FUNCTION(gcc_gp1),
+       MSM_PIN_FUNCTION(gcc_gp2),
+       MSM_PIN_FUNCTION(gcc_gp3),
+       MSM_PIN_FUNCTION(i2s0_data0),
+       MSM_PIN_FUNCTION(i2s0_data1),
+       MSM_PIN_FUNCTION(i2s0_sck),
+       MSM_PIN_FUNCTION(i2s0_ws),
+       MSM_PIN_FUNCTION(i2s1_data0),
+       MSM_PIN_FUNCTION(i2s1_data1),
+       MSM_PIN_FUNCTION(i2s1_sck),
+       MSM_PIN_FUNCTION(i2s1_ws),
+       MSM_PIN_FUNCTION(ibi_i3c),
+       MSM_PIN_FUNCTION(jitter_bist),
+       MSM_PIN_FUNCTION(mdp_vsync0),
+       MSM_PIN_FUNCTION(mdp_vsync1),
+       MSM_PIN_FUNCTION(mdp_vsync2),
+       MSM_PIN_FUNCTION(mdp_vsync3),
+       MSM_PIN_FUNCTION(mdp_vsync4),
+       MSM_PIN_FUNCTION(mdp_vsync5),
+       MSM_PIN_FUNCTION(mdp_vsync6),
+       MSM_PIN_FUNCTION(mdp_vsync7),
+       MSM_PIN_FUNCTION(mdp_vsync8),
+       MSM_PIN_FUNCTION(pcie3_clk),
+       MSM_PIN_FUNCTION(pcie4_clk),
+       MSM_PIN_FUNCTION(pcie5_clk),
+       MSM_PIN_FUNCTION(pcie6a_clk),
+       MSM_PIN_FUNCTION(pcie6b_clk),
+       MSM_PIN_FUNCTION(phase_flag),
+       MSM_PIN_FUNCTION(pll_bist),
+       MSM_PIN_FUNCTION(pll_clk),
+       MSM_PIN_FUNCTION(prng_rosc0),
+       MSM_PIN_FUNCTION(prng_rosc1),
+       MSM_PIN_FUNCTION(prng_rosc2),
+       MSM_PIN_FUNCTION(prng_rosc3),
+       MSM_PIN_FUNCTION(qdss_cti),
+       MSM_PIN_FUNCTION(qdss_gpio),
+       MSM_PIN_FUNCTION(qspi00),
+       MSM_PIN_FUNCTION(qspi01),
+       MSM_PIN_FUNCTION(qspi02),
+       MSM_PIN_FUNCTION(qspi03),
+       MSM_PIN_FUNCTION(qspi0_clk),
+       MSM_PIN_FUNCTION(qspi0_cs0),
+       MSM_PIN_FUNCTION(qspi0_cs1),
+       MSM_PIN_FUNCTION(qup0_se0),
+       MSM_PIN_FUNCTION(qup0_se1),
+       MSM_PIN_FUNCTION(qup0_se2),
+       MSM_PIN_FUNCTION(qup0_se3),
+       MSM_PIN_FUNCTION(qup0_se4),
+       MSM_PIN_FUNCTION(qup0_se5),
+       MSM_PIN_FUNCTION(qup0_se6),
+       MSM_PIN_FUNCTION(qup0_se7),
+       MSM_PIN_FUNCTION(qup1_se0),
+       MSM_PIN_FUNCTION(qup1_se1),
+       MSM_PIN_FUNCTION(qup1_se2),
+       MSM_PIN_FUNCTION(qup1_se3),
+       MSM_PIN_FUNCTION(qup1_se4),
+       MSM_PIN_FUNCTION(qup1_se5),
+       MSM_PIN_FUNCTION(qup1_se6),
+       MSM_PIN_FUNCTION(qup1_se7),
+       MSM_PIN_FUNCTION(qup2_se0),
+       MSM_PIN_FUNCTION(qup2_se1),
+       MSM_PIN_FUNCTION(qup2_se2),
+       MSM_PIN_FUNCTION(qup2_se3),
+       MSM_PIN_FUNCTION(qup2_se4),
+       MSM_PIN_FUNCTION(qup2_se5),
+       MSM_PIN_FUNCTION(qup2_se6),
+       MSM_PIN_FUNCTION(qup2_se7),
+       MSM_PIN_FUNCTION(sd_write),
+       MSM_PIN_FUNCTION(sdc4_clk),
+       MSM_PIN_FUNCTION(sdc4_cmd),
+       MSM_PIN_FUNCTION(sdc4_data0),
+       MSM_PIN_FUNCTION(sdc4_data1),
+       MSM_PIN_FUNCTION(sdc4_data2),
+       MSM_PIN_FUNCTION(sdc4_data3),
+       MSM_PIN_FUNCTION(sys_throttle),
+       MSM_PIN_FUNCTION(tb_trig),
+       MSM_PIN_FUNCTION(tgu_ch0),
+       MSM_PIN_FUNCTION(tgu_ch1),
+       MSM_PIN_FUNCTION(tgu_ch2),
+       MSM_PIN_FUNCTION(tgu_ch3),
+       MSM_PIN_FUNCTION(tgu_ch4),
+       MSM_PIN_FUNCTION(tgu_ch5),
+       MSM_PIN_FUNCTION(tgu_ch6),
+       MSM_PIN_FUNCTION(tgu_ch7),
+       MSM_PIN_FUNCTION(tmess_prng0),
+       MSM_PIN_FUNCTION(tmess_prng1),
+       MSM_PIN_FUNCTION(tmess_prng2),
+       MSM_PIN_FUNCTION(tmess_prng3),
+       MSM_PIN_FUNCTION(tsense_pwm1),
+       MSM_PIN_FUNCTION(tsense_pwm2),
+       MSM_PIN_FUNCTION(tsense_pwm3),
+       MSM_PIN_FUNCTION(tsense_pwm4),
+       MSM_PIN_FUNCTION(usb0_dp),
+       MSM_PIN_FUNCTION(usb0_phy),
+       MSM_PIN_FUNCTION(usb0_sbrx),
+       MSM_PIN_FUNCTION(usb0_sbtx),
+       MSM_PIN_FUNCTION(usb1_dp),
+       MSM_PIN_FUNCTION(usb1_phy),
+       MSM_PIN_FUNCTION(usb1_sbrx),
+       MSM_PIN_FUNCTION(usb1_sbtx),
+       MSM_PIN_FUNCTION(usb2_dp),
+       MSM_PIN_FUNCTION(usb2_phy),
+       MSM_PIN_FUNCTION(usb2_sbrx),
+       MSM_PIN_FUNCTION(usb2_sbtx),
+       MSM_PIN_FUNCTION(vsense_trigger),
+};
+
+/*
+ * Every pin is maintained as a single group, and missing or non-existing pin
+ * would be maintained as dummy group to synchronize pin group index with
+ * pin descriptor registered with pinctrl core.
+ * Clients would not be able to request these dummy pin groups.
+ */
+static const struct msm_pingroup x1e80100_groups[] = {
+       [0] = PINGROUP(0, qup0_se0, ibi_i3c, _, _, _, _, _, _, _),
+       [1] = PINGROUP(1, qup0_se0, ibi_i3c, _, _, _, _, _, _, _),
+       [2] = PINGROUP(2, qup0_se0, _, _, _, _, _, _, _, _),
+       [3] = PINGROUP(3, qup0_se0, _, _, _, _, _, _, _, _),
+       [4] = PINGROUP(4, qup0_se1, _, _, _, _, _, _, _, _),
+       [5] = PINGROUP(5, qup0_se1, _, _, _, _, _, _, _, _),
+       [6] = PINGROUP(6, qup0_se1, phase_flag, _, _, _, _, _, _, _),
+       [7] = PINGROUP(7, qup0_se1, phase_flag, _, _, _, _, _, _, _),
+       [8] = PINGROUP(8, qup0_se2, phase_flag, _, _, _, _, _, _, _),
+       [9] = PINGROUP(9, qup0_se2, _, atest_usb, ddr_pxi0, _, _, _, _, _),
+       [10] = PINGROUP(10, qup0_se2, _, atest_usb, ddr_pxi1, _, _, _, _, _),
+       [11] = PINGROUP(11, qup0_se2, phase_flag, _, _, _, _, _, _, _),
+       [12] = PINGROUP(12, qup0_se3, qup0_se7, phase_flag, _, _, _, _, _, _),
+       [13] = PINGROUP(13, qup0_se3, qup0_se7, phase_flag, _, _, _, _, _, _),
+       [14] = PINGROUP(14, qup0_se3, qup0_se7, phase_flag, _, _, _, _, _, _),
+       [15] = PINGROUP(15, qup0_se3, qup0_se7, phase_flag, _, _, _, _, _, _),
+       [16] = PINGROUP(16, qup0_se4, phase_flag, _, _, _, _, _, _, _),
+       [17] = PINGROUP(17, qup0_se4, qup0_se2, phase_flag, _, _, _, _, _, _),
+       [18] = PINGROUP(18, qup0_se4, qup0_se2, phase_flag, _, qdss_cti, _, _, _, _),
+       [19] = PINGROUP(19, qup0_se4, qup0_se2, phase_flag, _, qdss_cti, _, _, _, _),
+       [20] = PINGROUP(20, qup0_se5, _, phase_flag, _, _, _, _, _, _),
+       [21] = PINGROUP(21, qup0_se5, qup0_se3, _, phase_flag, _, _, _, _, _),
+       [22] = PINGROUP(22, qup0_se5, qup0_se3, _, phase_flag, _, _, _, _, _),
+       [23] = PINGROUP(23, qup0_se5, qup0_se3, phase_flag, _, qdss_cti, _, _, _, _),
+       [24] = PINGROUP(24, qup0_se6, phase_flag, _, _, _, _, _, _, _),
+       [25] = PINGROUP(25, qup0_se6, phase_flag, _, _, _, _, _, _, _),
+       [26] = PINGROUP(26, qup0_se6, phase_flag, _, _, _, _, _, _, _),
+       [27] = PINGROUP(27, qup0_se6, phase_flag, _, qdss_cti, _, _, _, _, _),
+       [28] = PINGROUP(28, pll_bist, _, _, _, _, _, _, _, _),
+       [29] = PINGROUP(29, _, _, _, _, _, _, _, _, _),
+       [30] = PINGROUP(30, _, _, _, _, _, _, _, _, _),
+       [31] = PINGROUP(31, _, _, _, _, _, _, _, _, _),
+       [32] = PINGROUP(32, qup1_se0, ibi_i3c, _, _, _, _, _, _, _),
+       [33] = PINGROUP(33, qup1_se0, ibi_i3c, qup1_se3, _, _, _, _, _, _),
+       [34] = PINGROUP(34, qup1_se0, qup1_se3, tsense_pwm1, tsense_pwm2, tsense_pwm3, tsense_pwm4, _, _, _),
+       [35] = PINGROUP(35, qup1_se0, qup1_se3, pll_clk, atest_usb, _, _, _, _, _),
+       [36] = PINGROUP(36, qup1_se1, ibi_i3c, _, _, _, _, _, _, _),
+       [37] = PINGROUP(37, qup1_se1, ibi_i3c, _, _, _, _, _, _, _),
+       [38] = PINGROUP(38, qup1_se1, vsense_trigger, atest_usb, ddr_pxi0, _, _, _, _, _),
+       [39] = PINGROUP(39, qup1_se1, sys_throttle, phase_flag, _, _, _, _, _, _),
+       [40] = PINGROUP(40, qup1_se2, phase_flag, _, _, _, _, _, _, _),
+       [41] = PINGROUP(41, qup1_se2, atest_usb, ddr_pxi1, _, _, _, _, _, _),
+       [42] = PINGROUP(42, qup1_se2, jitter_bist, atest_usb, ddr_pxi2, _, _, _, _, _),
+       [43] = PINGROUP(43, qup1_se2, _, atest_usb, ddr_pxi2, _, _, _, _, _),
+       [44] = PINGROUP(44, qup1_se3, _, atest_usb, ddr_pxi3, _, _, _, _, _),
+       [45] = PINGROUP(45, qup1_se3, cmu_rng3, _, atest_usb, ddr_pxi3, _, _, _, _),
+       [46] = PINGROUP(46, qup1_se3, cmu_rng2, _, atest_usb, ddr_pxi4, _, _, _, _),
+       [47] = PINGROUP(47, qup1_se3, cmu_rng1, _, atest_usb, ddr_pxi4, _, _, _, _),
+       [48] = PINGROUP(48, qup1_se4, cmu_rng0, _, atest_usb, ddr_pxi5, _, _, _, _),
+       [49] = PINGROUP(49, qup1_se4, qup1_se2, _, atest_usb, ddr_pxi5, _, _, _, _),
+       [50] = PINGROUP(50, qup1_se4, qup1_se2, _, atest_usb, ddr_pxi6, _, _, _, _),
+       [51] = PINGROUP(51, qup1_se4, qup1_se2, dbg_out, atest_usb, ddr_pxi6, _, _, _, _),
+       [52] = PINGROUP(52, qup1_se5, qup1_se7, atest_usb, ddr_pxi7, _, _, _, _, _),
+       [53] = PINGROUP(53, qup1_se5, qup1_se7, _, atest_usb, ddr_pxi7, _, _, _, _),
+       [54] = PINGROUP(54, qup1_se5, qup1_se7, ddr_bist, atest_usb, _, _, _, _, _),
+       [55] = PINGROUP(55, qup1_se5, qup1_se7, ddr_bist, _, _, _, _, _, _),
+       [56] = PINGROUP(56, qup1_se6, ddr_bist, _, _, _, _, _, _, _),
+       [57] = PINGROUP(57, qup1_se6, ddr_bist, _, _, _, _, _, _, _),
+       [58] = PINGROUP(58, qup1_se6, atest_usb, _, _, _, _, _, _, _),
+       [59] = PINGROUP(59, qup1_se6, atest_usb, _, _, _, _, _, _, _),
+       [60] = PINGROUP(60, aoss_cti, _, _, _, _, _, _, _, _),
+       [61] = PINGROUP(61, aoss_cti, _, _, _, _, _, _, _, _),
+       [62] = PINGROUP(62, aoss_cti, _, _, _, _, _, _, _, _),
+       [63] = PINGROUP(63, aoss_cti, _, _, _, _, _, _, _, _),
+       [64] = PINGROUP(64, qup2_se0, gcc_gp2, _, _, _, _, _, _, _),
+       [65] = PINGROUP(65, qup2_se0, qup2_se3, tgu_ch1, atest_usb, _, _, _, _, _),
+       [66] = PINGROUP(66, qup2_se0, qup2_se3, tgu_ch2, atest_usb, _, _, _, _, _),
+       [67] = PINGROUP(67, qup2_se0, qup2_se3, tgu_ch3, atest_usb, _, _, _, _, _),
+       [68] = PINGROUP(68, qup2_se1, ibi_i3c, tgu_ch4, _, _, _, _, _, _),
+       [69] = PINGROUP(69, qup2_se1, ibi_i3c, tgu_ch5, _, _, _, _, _, _),
+       [70] = PINGROUP(70, qup2_se1, _, _, _, _, _, _, _, _),
+       [71] = PINGROUP(71, qup2_se1, gcc_gp1, _, _, _, _, _, _, _),
+       [72] = PINGROUP(72, qup2_se2, gcc_gp1, atest_usb, _, _, _, _, _, _),
+       [73] = PINGROUP(73, qup2_se2, gcc_gp2, atest_usb, _, _, _, _, _, _),
+       [74] = PINGROUP(74, qup2_se2, gcc_gp3, atest_usb, _, _, _, _, _, _),
+       [75] = PINGROUP(75, qup2_se2, atest_usb, _, _, _, _, _, _, _),
+       [76] = PINGROUP(76, qup2_se3, phase_flag, _, _, _, _, _, _, _),
+       [77] = PINGROUP(77, qup2_se3, phase_flag, _, _, _, _, _, _, _),
+       [78] = PINGROUP(78, qup2_se3, phase_flag, _, _, _, _, _, _, _),
+       [79] = PINGROUP(79, qup2_se3, _, _, _, _, _, _, _, _),
+       [80] = PINGROUP(80, qup2_se4, tgu_ch7, atest_usb, _, _, _, _, _, _),
+       [81] = PINGROUP(81, qup2_se4, qup2_se2, tgu_ch0, atest_usb, _, _, _, _, _),
+       [82] = PINGROUP(82, qup2_se4, qup2_se2, gcc_gp3, _, _, _, _, _, _),
+       [83] = PINGROUP(83, qup2_se4, qup2_se2, tgu_ch6, atest_usb, _, _, _, _, _),
+       [84] = PINGROUP(84, qup2_se5, qup2_se7, _, _, _, _, _, _, _),
+       [85] = PINGROUP(85, qup2_se5, qup2_se7, _, _, _, _, _, _, _),
+       [86] = PINGROUP(86, qup2_se5, qup2_se7, _, _, _, _, _, _, _),
+       [87] = PINGROUP(87, qup2_se5, qup2_se7, _, _, _, _, _, _, _),
+       [88] = PINGROUP(88, qup2_se6, _, _, _, _, _, _, _, _),
+       [89] = PINGROUP(89, qup2_se6, _, _, _, _, _, _, _, _),
+       [90] = PINGROUP(90, qup2_se6, _, _, _, _, _, _, _, _),
+       [91] = PINGROUP(91, qup2_se6, _, _, _, _, _, _, _, _),
+       [92] = PINGROUP(92, tmess_prng0, _, _, _, _, _, _, _, _),
+       [93] = PINGROUP(93, tmess_prng1, _, _, _, _, _, _, _, _),
+       [94] = PINGROUP(94, sys_throttle, tmess_prng2, _, _, _, _, _, _, _),
+       [95] = PINGROUP(95, tmess_prng3, _, _, _, _, _, _, _, _),
+       [96] = PINGROUP(96, cam_mclk, qdss_gpio, _, _, _, _, _, _, _),
+       [97] = PINGROUP(97, cam_mclk, qdss_gpio, _, _, _, _, _, _, _),
+       [98] = PINGROUP(98, cam_mclk, qdss_gpio, _, _, _, _, _, _, _),
+       [99] = PINGROUP(99, cam_mclk, qdss_gpio, _, _, _, _, _, _, _),
+       [100] = PINGROUP(100, cam_aon, qdss_gpio, _, _, _, _, _, _, _),
+       [101] = PINGROUP(101, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+       [102] = PINGROUP(102, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+       [103] = PINGROUP(103, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+       [104] = PINGROUP(104, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+       [105] = PINGROUP(105, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+       [106] = PINGROUP(106, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+       [107] = PINGROUP(107, qdss_gpio, _, _, _, _, _, _, _, _),
+       [108] = PINGROUP(108, qdss_gpio, _, _, _, _, _, _, _, _),
+       [109] = PINGROUP(109, cci_timer0, mdp_vsync4, qdss_gpio, _, _, _, _, _, _),
+       [110] = PINGROUP(110, cci_timer1, mdp_vsync5, qdss_gpio, _, _, _, _, _, _),
+       [111] = PINGROUP(111, cci_timer2, cci_async, mdp_vsync6, qdss_gpio, _, _, _, _, _),
+       [112] = PINGROUP(112, cci_timer3, cci_async, mdp_vsync7, qdss_gpio, _, _, _, _, _),
+       [113] = PINGROUP(113, cci_timer4, cci_async, mdp_vsync8, qdss_gpio, _, _, _, _, _),
+       [114] = PINGROUP(114, mdp_vsync0, mdp_vsync1, _, _, _, _, _, _, _),
+       [115] = PINGROUP(115, mdp_vsync3, mdp_vsync2, edp1_lcd, _, _, _, _, _, _),
+       [116] = PINGROUP(116, _, _, _, _, _, _, _, _, _),
+       [117] = PINGROUP(117, _, _, _, _, _, _, _, _, _),
+       [118] = PINGROUP(118, _, _, _, _, _, _, _, _, _),
+       [119] = PINGROUP(119, edp0_hot, edp1_lcd, _, _, _, _, _, _, _),
+       [120] = PINGROUP(120, edp1_hot, edp0_lcd, _, _, _, _, _, _, _),
+       [121] = PINGROUP(121, usb0_phy, _, _, _, _, _, _, _, _),
+       [122] = PINGROUP(122, usb0_dp, _, _, _, _, _, _, _, _),
+       [123] = PINGROUP(123, usb1_phy, _, _, _, _, _, _, _, _),
+       [124] = PINGROUP(124, usb1_dp, _, _, _, _, _, _, _, _),
+       [125] = PINGROUP(125, usb2_phy, _, _, _, _, _, _, _, _),
+       [126] = PINGROUP(126, usb2_dp, _, _, _, _, _, _, _, _),
+       [127] = PINGROUP(127, qspi0_clk, sdc4_clk, _, _, _, _, _, _, _),
+       [128] = PINGROUP(128, qspi00, sdc4_data0, _, _, _, _, _, _, _),
+       [129] = PINGROUP(129, qspi01, sdc4_data1, _, _, _, _, _, _, _),
+       [130] = PINGROUP(130, qspi02, sdc4_data2, _, _, _, _, _, _, _),
+       [131] = PINGROUP(131, qspi03, sdc4_data3, _, _, _, _, _, _, _),
+       [132] = PINGROUP(132, qspi0_cs0, sdc4_cmd, _, _, _, _, _, _, _),
+       [133] = PINGROUP(133, qspi0_cs1, tb_trig, _, _, _, _, _, _, _),
+       [134] = PINGROUP(134, audio_ext, _, _, _, _, _, _, _, _),
+       [135] = PINGROUP(135, i2s0_sck, _, _, _, _, _, _, _, _),
+       [136] = PINGROUP(136, i2s0_data0, _, _, _, _, _, _, _, _),
+       [137] = PINGROUP(137, i2s0_data1, tb_trig, _, _, _, _, _, _, _),
+       [138] = PINGROUP(138, i2s0_ws, _, _, _, _, _, _, _, _),
+       [139] = PINGROUP(139, i2s1_sck, _, _, _, _, _, _, _, _),
+       [140] = PINGROUP(140, i2s1_data0, _, _, _, _, _, _, _, _),
+       [141] = PINGROUP(141, i2s1_ws, _, _, _, _, _, _, _, _),
+       [142] = PINGROUP(142, i2s1_data1, audio_ext, audio_ref, _, _, _, _, _, _),
+       [143] = PINGROUP(143, _, _, _, _, _, _, _, _, _),
+       [144] = PINGROUP(144, pcie3_clk, _, _, _, _, _, _, _, _),
+       [145] = PINGROUP(145, _, _, _, _, _, _, _, _, _),
+       [146] = PINGROUP(146, _, _, _, _, _, _, _, _, _),
+       [147] = PINGROUP(147, pcie4_clk, _, _, _, _, _, _, _, _),
+       [148] = PINGROUP(148, _, _, _, _, _, _, _, _, _),
+       [149] = PINGROUP(149, _, _, _, _, _, _, _, _, _),
+       [150] = PINGROUP(150, pcie5_clk, _, _, _, _, _, _, _, _),
+       [151] = PINGROUP(151, _, _, _, _, _, _, _, _, _),
+       [152] = PINGROUP(152, _, _, _, _, _, _, _, _, _),
+       [153] = PINGROUP(153, pcie6a_clk, _, _, _, _, _, _, _, _),
+       [154] = PINGROUP(154, _, _, _, _, _, _, _, _, _),
+       [155] = PINGROUP(155, _, _, _, _, _, _, _, _, _),
+       [156] = PINGROUP(156, pcie6b_clk, _, _, _, _, _, _, _, _),
+       [157] = PINGROUP(157, _, _, _, _, _, _, _, _, _),
+       [158] = PINGROUP(158, _, _, _, _, _, _, _, _, _),
+       [159] = PINGROUP(159, _, _, _, _, _, _, _, _, _),
+       [160] = PINGROUP(160, RESOUT_GPIO, _, _, _, _, _, _, _, _),
+       [161] = PINGROUP(161, qdss_cti, _, _, _, _, _, _, _, _),
+       [162] = PINGROUP(162, sd_write, qdss_cti, _, _, _, _, _, _, _),
+       [163] = PINGROUP(163, usb0_sbrx, _, _, _, _, _, _, _, _),
+       [164] = PINGROUP(164, usb0_sbtx, _, _, _, _, _, _, _, _),
+       [165] = PINGROUP(165, usb0_sbtx, _, _, _, _, _, _, _, _),
+       [166] = PINGROUP(166, _, _, _, _, _, _, _, _, _),
+       [167] = PINGROUP(167, _, _, _, _, _, _, _, _, _),
+       [168] = PINGROUP(168, eusb0_ac, _, _, _, _, _, _, _, _),
+       [169] = PINGROUP(169, eusb3_ac, _, _, _, _, _, _, _, _),
+       [170] = PINGROUP(170, _, _, _, _, _, _, _, _, _),
+       [171] = PINGROUP(171, _, _, _, _, _, _, _, _, _),
+       [172] = PINGROUP(172, usb1_sbrx, _, _, _, _, _, _, _, _),
+       [173] = PINGROUP(173, usb1_sbtx, _, _, _, _, _, _, _, _),
+       [174] = PINGROUP(174, usb1_sbtx, _, _, _, _, _, _, _, _),
+       [175] = PINGROUP(175, _, _, _, _, _, _, _, _, _),
+       [176] = PINGROUP(176, _, _, _, _, _, _, _, _, _),
+       [177] = PINGROUP(177, eusb1_ac, _, _, _, _, _, _, _, _),
+       [178] = PINGROUP(178, eusb6_ac, _, _, _, _, _, _, _, _),
+       [179] = PINGROUP(179, _, _, _, _, _, _, _, _, _),
+       [180] = PINGROUP(180, _, _, _, _, _, _, _, _, _),
+       [181] = PINGROUP(181, usb2_sbrx, prng_rosc3, phase_flag, _, atest_char, _, _, _, _),
+       [182] = PINGROUP(182, usb2_sbtx, prng_rosc2, phase_flag, _, atest_char3, _, _, _, _),
+       [183] = PINGROUP(183, usb2_sbtx, _, _, _, _, _, _, _, _),
+       [184] = PINGROUP(184, phase_flag, _, atest_char1, _, _, _, _, _, _),
+       [185] = PINGROUP(185, phase_flag, _, atest_char0, _, _, _, _, _, _),
+       [186] = PINGROUP(186, eusb2_ac, prng_rosc0, phase_flag, _, _, _, _, _, _),
+       [187] = PINGROUP(187, eusb5_ac, cri_trng, phase_flag, _, _, _, _, _, _),
+       [188] = PINGROUP(188, prng_rosc1, phase_flag, _, atest_char2, _, _, _, _, _),
+       [189] = PINGROUP(189, _, _, _, _, _, _, _, _, _),
+       [190] = PINGROUP(190, _, _, _, _, _, _, _, _, _),
+       [191] = PINGROUP(191, _, _, _, _, _, _, _, _, _),
+       [192] = PINGROUP(192, _, _, _, _, _, _, _, _, _),
+       [193] = PINGROUP(193, _, _, _, _, _, _, _, _, _),
+       [194] = PINGROUP(194, _, _, _, _, _, _, _, _, _),
+       [195] = PINGROUP(195, _, _, _, _, _, _, _, _, _),
+       [196] = PINGROUP(196, _, _, _, _, _, _, _, _, _),
+       [197] = PINGROUP(197, _, _, _, _, _, _, _, _, _),
+       [198] = PINGROUP(198, _, _, _, _, _, _, _, _, _),
+       [199] = PINGROUP(199, _, _, _, _, _, _, _, _, _),
+       [200] = PINGROUP(200, _, _, _, _, _, _, _, _, _),
+       [201] = PINGROUP(201, _, _, _, _, _, _, _, _, _),
+       [202] = PINGROUP(202, _, _, _, _, _, _, _, _, _),
+       [203] = PINGROUP(203, _, _, _, _, _, _, _, _, _),
+       [204] = PINGROUP(204, _, _, _, _, _, _, _, _, _),
+       [205] = PINGROUP(205, _, _, _, _, _, _, _, _, _),
+       [206] = PINGROUP(206, _, _, _, _, _, _, _, _, _),
+       [207] = PINGROUP(207, _, _, _, _, _, _, _, _, _),
+       [208] = PINGROUP(208, _, _, _, _, _, _, _, _, _),
+       [209] = PINGROUP(209, _, _, _, _, _, _, _, _, _),
+       [210] = PINGROUP(210, _, _, _, _, _, _, _, _, _),
+       [211] = PINGROUP(211, _, _, _, _, _, _, _, _, _),
+       [212] = PINGROUP(212, _, _, _, _, _, _, _, _, _),
+       [213] = PINGROUP(213, _, _, _, _, _, _, _, _, _),
+       [214] = PINGROUP(214, _, _, _, _, _, _, _, _, _),
+       [215] = PINGROUP(215, _, qdss_cti, _, _, _, _, _, _, _),
+       [216] = PINGROUP(216, _, _, _, _, _, _, _, _, _),
+       [217] = PINGROUP(217, _, qdss_cti, _, _, _, _, _, _, _),
+       [218] = PINGROUP(218, _, _, _, _, _, _, _, _, _),
+       [219] = PINGROUP(219, _, qdss_gpio, _, _, _, _, _, _, _),
+       [220] = PINGROUP(220, _, qdss_gpio, _, _, _, _, _, _, _),
+       [221] = PINGROUP(221, _, qdss_gpio, _, _, _, _, _, _, _),
+       [222] = PINGROUP(222, _, qdss_gpio, _, _, _, _, _, _, _),
+       [223] = PINGROUP(223, _, qdss_gpio, _, _, _, _, _, _, _),
+       [224] = PINGROUP(224, _, qdss_gpio, _, _, _, _, _, _, _),
+       [225] = PINGROUP(225, _, qdss_gpio, _, _, _, _, _, _, _),
+       [226] = PINGROUP(226, _, qdss_gpio, _, _, _, _, _, _, _),
+       [227] = PINGROUP(227, _, qdss_gpio, _, _, _, _, _, _, _),
+       [228] = PINGROUP(228, _, qdss_gpio, _, _, _, _, _, _, _),
+       [229] = PINGROUP(229, qdss_gpio, _, _, _, _, _, _, _, _),
+       [230] = PINGROUP(230, qdss_gpio, _, _, _, _, _, _, _, _),
+       [231] = PINGROUP(231, qdss_gpio, _, _, _, _, _, _, _, _),
+       [232] = PINGROUP(232, qdss_gpio, _, _, _, _, _, _, _, _),
+       [233] = PINGROUP(233, qdss_gpio, _, _, _, _, _, _, _, _),
+       [234] = PINGROUP(234, qdss_gpio, _, _, _, _, _, _, _, _),
+       [235] = PINGROUP(235, aon_cci, qdss_gpio, _, _, _, _, _, _, _),
+       [236] = PINGROUP(236, aon_cci, qdss_gpio, _, _, _, _, _, _, _),
+       [237] = PINGROUP(237, _, _, _, _, _, _, _, _, _),
+       [238] = UFS_RESET(ufs_reset, 0x1f9000),
+       [239] = SDC_QDSD_PINGROUP(sdc2_clk, 0x1f2000, 14, 6),
+       [240] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x1f2000, 11, 3),
+       [241] = SDC_QDSD_PINGROUP(sdc2_data, 0x1f2000, 9, 0),
+};
+
+static const struct msm_gpio_wakeirq_map x1e80100_pdc_map[] = {
+       { 0, 72 }, { 2, 70 }, { 3, 71 }, { 6, 123 }, { 7, 67 }, { 11, 85 },
+       { 15, 68 }, { 18, 122 }, { 19, 69 }, { 21, 158 }, { 23, 143 }, { 26, 129 },
+       { 27, 144 }, { 28, 77 }, { 29, 78 }, { 30, 92 }, { 32, 145 }, { 33, 115 },
+       { 34, 130 }, { 35, 146 }, { 36, 147 }, { 39, 80 }, { 43, 148 }, { 47, 149 },
+       { 51, 79 }, { 53, 89 }, { 59, 87 }, { 64, 90 }, { 65, 106 }, { 66, 142 },
+       { 67, 88 }, { 71, 91 }, { 75, 152 }, { 79, 153 }, { 80, 125 }, { 81, 128 },
+       { 84, 137 }, { 85, 155 }, { 87, 156 }, { 91, 157 }, { 92, 138 }, { 94, 140 },
+       { 95, 141 }, { 113, 84 }, { 121, 73 }, { 123, 74 }, { 129, 76 }, { 131, 82 },
+       { 134, 83 }, { 141, 93 }, { 144, 94 }, { 147, 96 }, { 148, 97 }, { 150, 102 },
+       { 151, 103 }, { 153, 104 }, { 156, 105 }, { 157, 107 }, { 163, 98 }, { 166, 112 },
+       { 172, 99 }, { 181, 101 }, { 184, 116 }, { 193, 40 }, { 193, 117 }, { 196, 108 },
+       { 203, 133 }, { 212, 120 }, { 213, 150 }, { 214, 121 }, { 215, 118 }, { 217, 109 },
+       { 220, 110 }, { 221, 111 }, { 222, 124 }, { 224, 131 }, { 225, 132 },
+};
+
+static const struct msm_pinctrl_soc_data x1e80100_pinctrl = {
+       .pins = x1e80100_pins,
+       .npins = ARRAY_SIZE(x1e80100_pins),
+       .functions = x1e80100_functions,
+       .nfunctions = ARRAY_SIZE(x1e80100_functions),
+       .groups = x1e80100_groups,
+       .ngroups = ARRAY_SIZE(x1e80100_groups),
+       .ngpios = 239,
+       .wakeirq_map = x1e80100_pdc_map,
+       .nwakeirq_map = ARRAY_SIZE(x1e80100_pdc_map),
+       .egpio_func = 9,
+};
+
+static int x1e80100_pinctrl_probe(struct platform_device *pdev)
+{
+       return msm_pinctrl_probe(pdev, &x1e80100_pinctrl);
+}
+
+static const struct of_device_id x1e80100_pinctrl_of_match[] = {
+       { .compatible = "qcom,x1e80100-tlmm", },
+       { },
+};
+
+static struct platform_driver x1e80100_pinctrl_driver = {
+       .driver = {
+               .name = "x1e80100-tlmm",
+               .of_match_table = x1e80100_pinctrl_of_match,
+       },
+       .probe = x1e80100_pinctrl_probe,
+       .remove_new = msm_pinctrl_remove,
+};
+
+static int __init x1e80100_pinctrl_init(void)
+{
+       return platform_driver_register(&x1e80100_pinctrl_driver);
+}
+arch_initcall(x1e80100_pinctrl_init);
+
+static void __exit x1e80100_pinctrl_exit(void)
+{
+       platform_driver_unregister(&x1e80100_pinctrl_driver);
+}
+module_exit(x1e80100_pinctrl_exit);
+
+MODULE_DESCRIPTION("QTI X1E80100 TLMM pinctrl driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(of, x1e80100_pinctrl_of_match);
index f43f1196fea861f3504fb3eff7888099eac4018b..edcbe7c9ad565e858955fac8b5dd2d78faa445d1 100644 (file)
@@ -1131,7 +1131,7 @@ static int rza1_set_mux(struct pinctrl_dev *pctldev, unsigned int selector,
                return -EINVAL;
 
        mux_confs = (struct rza1_mux_conf *)func->data;
-       for (i = 0; i < grp->num_pins; ++i) {
+       for (i = 0; i < grp->grp.npins; ++i) {
                int ret;
 
                ret = rza1_pin_mux_single(rza1_pctl, &mux_confs[i]);
index 990b96d459671bc7824f90dd1a787e6aa4d2fced..af689d7c117f35482487a0e2fa1f5bc45e5fa262 100644 (file)
@@ -447,15 +447,15 @@ static int rza2_set_mux(struct pinctrl_dev *pctldev, unsigned int selector,
 
        psel_val = func->data;
 
-       for (i = 0; i < grp->num_pins; ++i) {
+       for (i = 0; i < grp->grp.npins; ++i) {
                dev_dbg(priv->dev, "Setting P%c_%d to PSEL=%d\n",
-                       port_names[RZA2_PIN_ID_TO_PORT(grp->pins[i])],
-                       RZA2_PIN_ID_TO_PIN(grp->pins[i]),
+                       port_names[RZA2_PIN_ID_TO_PORT(grp->grp.pins[i])],
+                       RZA2_PIN_ID_TO_PIN(grp->grp.pins[i]),
                        psel_val[i]);
                rza2_set_pin_function(
                        priv->base,
-                       RZA2_PIN_ID_TO_PORT(grp->pins[i]),
-                       RZA2_PIN_ID_TO_PIN(grp->pins[i]),
+                       RZA2_PIN_ID_TO_PORT(grp->grp.pins[i]),
+                       RZA2_PIN_ID_TO_PIN(grp->grp.pins[i]),
                        psel_val[i]);
        }
 
index 9de350ad7e7d5d2d31eb183cbdd38ee012de809f..80fb5011c7bbc715cabd5b8f852f81c652ee8f0f 100644 (file)
@@ -57,6 +57,7 @@
 #define PIN_CFG_FILCLKSEL              BIT(12)
 #define PIN_CFG_IOLH_C                 BIT(13)
 #define PIN_CFG_SOFT_PS                        BIT(14)
+#define PIN_CFG_OEN                    BIT(15)
 
 #define RZG2L_MPXED_COMMON_PIN_FUNCS(group) \
                                        (PIN_CFG_IOLH_##group | \
 #define IEN(off)               (0x1800 + (off) * 8)
 #define ISEL(off)              (0x2C00 + (off) * 8)
 #define SD_CH(off, ch)         ((off) + (ch) * 4)
+#define ETH_POC(off, ch)       ((off) + (ch) * 4)
 #define QSPI                   (0x3008)
+#define ETH_MODE               (0x3018)
 
+#define PVDD_2500              2       /* I/O domain voltage 2.5V */
 #define PVDD_1800              1       /* I/O domain voltage <= 1.8V */
 #define PVDD_3300              0       /* I/O domain voltage >= 3.3V */
 
 #define PWPR_PFCWE             BIT(6)  /* PFC Register Write Enable */
 
 #define PM_MASK                        0x03
-#define PVDD_MASK              0x01
 #define PFC_MASK               0x07
 #define IEN_MASK               0x01
 #define IOLH_MASK              0x03
  * struct rzg2l_register_offsets - specific register offsets
  * @pwpr: PWPR register offset
  * @sd_ch: SD_CH register offset
+ * @eth_poc: ETH_POC register offset
  */
 struct rzg2l_register_offsets {
        u16 pwpr;
        u16 sd_ch;
+       u16 eth_poc;
 };
 
 /**
@@ -167,6 +172,8 @@ enum rzg2l_iolh_index {
  * @iolh_groupb_oi: IOLH group B output impedance specific values
  * @drive_strength_ua: drive strength in uA is supported (otherwise mA is supported)
  * @func_base: base number for port function (see register PFC)
+ * @oen_max_pin: the maximum pin number supporting output enable
+ * @oen_max_port: the maximum port number supporting output enable
  */
 struct rzg2l_hwcfg {
        const struct rzg2l_register_offsets regs;
@@ -176,6 +183,8 @@ struct rzg2l_hwcfg {
        u16 iolh_groupb_oi[4];
        bool drive_strength_ua;
        u8 func_base;
+       u8 oen_max_pin;
+       u8 oen_max_port;
 };
 
 struct rzg2l_dedicated_configs {
@@ -273,7 +282,7 @@ static int rzg2l_pinctrl_set_mux(struct pinctrl_dev *pctldev,
        struct function_desc *func;
        unsigned int i, *psel_val;
        struct group_desc *group;
-       int *pins;
+       const unsigned int *pins;
 
        func = pinmux_generic_get_function(pctldev, func_selector);
        if (!func)
@@ -283,9 +292,9 @@ static int rzg2l_pinctrl_set_mux(struct pinctrl_dev *pctldev,
                return -EINVAL;
 
        psel_val = func->data;
-       pins = group->pins;
+       pins = group->grp.pins;
 
-       for (i = 0; i < group->num_pins; i++) {
+       for (i = 0; i < group->grp.npins; i++) {
                unsigned int *pin_data = pctrl->desc.pins[pins[i]].drv_data;
                u32 off = RZG2L_PIN_CFG_TO_PORT_OFFSET(*pin_data);
                u32 pin = RZG2L_PIN_ID_TO_PIN(pins[i]);
@@ -376,8 +385,11 @@ static int rzg2l_dt_subnode_to_map(struct pinctrl_dev *pctldev,
                goto done;
        }
 
-       if (num_pinmux)
+       if (num_pinmux) {
                nmaps += 1;
+               if (num_configs)
+                       nmaps += 1;
+       }
 
        if (num_pins)
                nmaps += num_pins;
@@ -462,6 +474,16 @@ static int rzg2l_dt_subnode_to_map(struct pinctrl_dev *pctldev,
        maps[idx].data.mux.function = name;
        idx++;
 
+       if (num_configs) {
+               ret = rzg2l_map_add_config(&maps[idx], name,
+                                          PIN_MAP_TYPE_CONFIGS_GROUP,
+                                          configs, num_configs);
+               if (ret < 0)
+                       goto remove_group;
+
+               idx++;
+       }
+
        dev_dbg(pctrl->dev, "Parsed %pOF with %d pins\n", np, num_pinmux);
        ret = 0;
        goto done;
@@ -591,6 +613,10 @@ static int rzg2l_caps_to_pwr_reg(const struct rzg2l_register_offsets *regs, u32
                return SD_CH(regs->sd_ch, 0);
        if (caps & PIN_CFG_IO_VMC_SD1)
                return SD_CH(regs->sd_ch, 1);
+       if (caps & PIN_CFG_IO_VMC_ETH0)
+               return ETH_POC(regs->eth_poc, 0);
+       if (caps & PIN_CFG_IO_VMC_ETH1)
+               return ETH_POC(regs->eth_poc, 1);
        if (caps & PIN_CFG_IO_VMC_QSPI)
                return QSPI;
 
@@ -602,6 +628,7 @@ static int rzg2l_get_power_source(struct rzg2l_pinctrl *pctrl, u32 pin, u32 caps
        const struct rzg2l_hwcfg *hwcfg = pctrl->data->hwcfg;
        const struct rzg2l_register_offsets *regs = &hwcfg->regs;
        int pwr_reg;
+       u8 val;
 
        if (caps & PIN_CFG_SOFT_PS)
                return pctrl->settings[pin].power_source;
@@ -610,7 +637,18 @@ static int rzg2l_get_power_source(struct rzg2l_pinctrl *pctrl, u32 pin, u32 caps
        if (pwr_reg < 0)
                return pwr_reg;
 
-       return (readl(pctrl->base + pwr_reg) & PVDD_MASK) ? 1800 : 3300;
+       val = readb(pctrl->base + pwr_reg);
+       switch (val) {
+       case PVDD_1800:
+               return 1800;
+       case PVDD_2500:
+               return 2500;
+       case PVDD_3300:
+               return 3300;
+       default:
+               /* Should not happen. */
+               return -EINVAL;
+       }
 }
 
 static int rzg2l_set_power_source(struct rzg2l_pinctrl *pctrl, u32 pin, u32 caps, u32 ps)
@@ -618,17 +656,32 @@ static int rzg2l_set_power_source(struct rzg2l_pinctrl *pctrl, u32 pin, u32 caps
        const struct rzg2l_hwcfg *hwcfg = pctrl->data->hwcfg;
        const struct rzg2l_register_offsets *regs = &hwcfg->regs;
        int pwr_reg;
+       u8 val;
 
        if (caps & PIN_CFG_SOFT_PS) {
                pctrl->settings[pin].power_source = ps;
                return 0;
        }
 
+       switch (ps) {
+       case 1800:
+               val = PVDD_1800;
+               break;
+       case 2500:
+               val = PVDD_2500;
+               break;
+       case 3300:
+               val = PVDD_3300;
+               break;
+       default:
+               return -EINVAL;
+       }
+
        pwr_reg = rzg2l_caps_to_pwr_reg(regs, caps);
        if (pwr_reg < 0)
                return pwr_reg;
 
-       writel((ps == 1800) ? PVDD_1800 : PVDD_3300, pctrl->base + pwr_reg);
+       writeb(val, pctrl->base + pwr_reg);
        pctrl->settings[pin].power_source = ps;
 
        return 0;
@@ -735,6 +788,66 @@ static bool rzg2l_ds_is_supported(struct rzg2l_pinctrl *pctrl, u32 caps,
        return false;
 }
 
+static bool rzg2l_oen_is_supported(u32 caps, u8 pin, u8 max_pin)
+{
+       if (!(caps & PIN_CFG_OEN))
+               return false;
+
+       if (pin > max_pin)
+               return false;
+
+       return true;
+}
+
+static u8 rzg2l_pin_to_oen_bit(u32 offset, u8 pin, u8 max_port)
+{
+       if (pin)
+               pin *= 2;
+
+       if (offset / RZG2L_PINS_PER_PORT == max_port)
+               pin += 1;
+
+       return pin;
+}
+
+static u32 rzg2l_read_oen(struct rzg2l_pinctrl *pctrl, u32 caps, u32 offset, u8 pin)
+{
+       u8 max_port = pctrl->data->hwcfg->oen_max_port;
+       u8 max_pin = pctrl->data->hwcfg->oen_max_pin;
+       u8 bit;
+
+       if (!rzg2l_oen_is_supported(caps, pin, max_pin))
+               return 0;
+
+       bit = rzg2l_pin_to_oen_bit(offset, pin, max_port);
+
+       return !(readb(pctrl->base + ETH_MODE) & BIT(bit));
+}
+
+static int rzg2l_write_oen(struct rzg2l_pinctrl *pctrl, u32 caps, u32 offset, u8 pin, u8 oen)
+{
+       u8 max_port = pctrl->data->hwcfg->oen_max_port;
+       u8 max_pin = pctrl->data->hwcfg->oen_max_pin;
+       unsigned long flags;
+       u8 val, bit;
+
+       if (!rzg2l_oen_is_supported(caps, pin, max_pin))
+               return -EINVAL;
+
+       bit = rzg2l_pin_to_oen_bit(offset, pin, max_port);
+
+       spin_lock_irqsave(&pctrl->lock, flags);
+       val = readb(pctrl->base + ETH_MODE);
+       if (oen)
+               val &= ~BIT(bit);
+       else
+               val |= BIT(bit);
+       writeb(val, pctrl->base + ETH_MODE);
+       spin_unlock_irqrestore(&pctrl->lock, flags);
+
+       return 0;
+}
+
 static int rzg2l_pinctrl_pinconf_get(struct pinctrl_dev *pctldev,
                                     unsigned int _pin,
                                     unsigned long *config)
@@ -772,6 +885,12 @@ static int rzg2l_pinctrl_pinconf_get(struct pinctrl_dev *pctldev,
                        return -EINVAL;
                break;
 
+       case PIN_CONFIG_OUTPUT_ENABLE:
+               arg = rzg2l_read_oen(pctrl, cfg, _pin, bit);
+               if (!arg)
+                       return -EINVAL;
+               break;
+
        case PIN_CONFIG_POWER_SOURCE:
                ret = rzg2l_get_power_source(pctrl, _pin, cfg);
                if (ret < 0)
@@ -842,7 +961,7 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
        struct rzg2l_pinctrl_pin_settings settings = pctrl->settings[_pin];
        unsigned int *pin_data = pin->drv_data;
        enum pin_config_param param;
-       unsigned int i;
+       unsigned int i, arg, index;
        u32 cfg, off;
        int ret;
        u8 bit;
@@ -864,24 +983,28 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
        for (i = 0; i < num_configs; i++) {
                param = pinconf_to_config_param(_configs[i]);
                switch (param) {
-               case PIN_CONFIG_INPUT_ENABLE: {
-                       unsigned int arg =
-                                       pinconf_to_config_argument(_configs[i]);
+               case PIN_CONFIG_INPUT_ENABLE:
+                       arg = pinconf_to_config_argument(_configs[i]);
 
                        if (!(cfg & PIN_CFG_IEN))
                                return -EINVAL;
 
                        rzg2l_rmw_pin_config(pctrl, IEN(off), bit, IEN_MASK, !!arg);
                        break;
-               }
+
+               case PIN_CONFIG_OUTPUT_ENABLE:
+                       arg = pinconf_to_config_argument(_configs[i]);
+                       ret = rzg2l_write_oen(pctrl, cfg, _pin, bit, !!arg);
+                       if (ret)
+                               return ret;
+                       break;
 
                case PIN_CONFIG_POWER_SOURCE:
                        settings.power_source = pinconf_to_config_argument(_configs[i]);
                        break;
 
-               case PIN_CONFIG_DRIVE_STRENGTH: {
-                       unsigned int arg = pinconf_to_config_argument(_configs[i]);
-                       unsigned int index;
+               case PIN_CONFIG_DRIVE_STRENGTH:
+                       arg = pinconf_to_config_argument(_configs[i]);
 
                        if (!(cfg & PIN_CFG_IOLH_A) || hwcfg->drive_strength_ua)
                                return -EINVAL;
@@ -896,7 +1019,6 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
 
                        rzg2l_rmw_pin_config(pctrl, IOLH(off), bit, IOLH_MASK, index);
                        break;
-               }
 
                case PIN_CONFIG_DRIVE_STRENGTH_UA:
                        if (!(cfg & (PIN_CFG_IOLH_A | PIN_CFG_IOLH_B | PIN_CFG_IOLH_C)) ||
@@ -906,9 +1028,8 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
                        settings.drive_strength_ua = pinconf_to_config_argument(_configs[i]);
                        break;
 
-               case PIN_CONFIG_OUTPUT_IMPEDANCE_OHMS: {
-                       unsigned int arg = pinconf_to_config_argument(_configs[i]);
-                       unsigned int index;
+               case PIN_CONFIG_OUTPUT_IMPEDANCE_OHMS:
+                       arg = pinconf_to_config_argument(_configs[i]);
 
                        if (!(cfg & PIN_CFG_IOLH_B) || !hwcfg->iolh_groupb_oi[0])
                                return -EINVAL;
@@ -922,7 +1043,6 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
 
                        rzg2l_rmw_pin_config(pctrl, IOLH(off), bit, IOLH_MASK, index);
                        break;
-               }
 
                default:
                        return -EOPNOTSUPP;
@@ -1323,7 +1443,8 @@ static const u32 r9a07g043_gpio_configs[] = {
 static const u32 r9a08g045_gpio_configs[] = {
        RZG2L_GPIO_PORT_PACK(4, 0x20, RZG3S_MPXED_PIN_FUNCS(A)),                        /* P0  */
        RZG2L_GPIO_PORT_PACK(5, 0x30, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_C |
-                                                               PIN_CFG_IO_VMC_ETH0)),  /* P1 */
+                                                               PIN_CFG_IO_VMC_ETH0)) |
+                                     PIN_CFG_OEN | PIN_CFG_IEN,                        /* P1 */
        RZG2L_GPIO_PORT_PACK(4, 0x31, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_C |
                                                                PIN_CFG_IO_VMC_ETH0)),  /* P2 */
        RZG2L_GPIO_PORT_PACK(4, 0x32, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_C |
@@ -1333,7 +1454,8 @@ static const u32 r9a08g045_gpio_configs[] = {
        RZG2L_GPIO_PORT_PACK(5, 0x21, RZG3S_MPXED_PIN_FUNCS(A)),                        /* P5  */
        RZG2L_GPIO_PORT_PACK(5, 0x22, RZG3S_MPXED_PIN_FUNCS(A)),                        /* P6  */
        RZG2L_GPIO_PORT_PACK(5, 0x34, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_C |
-                                                               PIN_CFG_IO_VMC_ETH1)),  /* P7 */
+                                                               PIN_CFG_IO_VMC_ETH1)) |
+                                     PIN_CFG_OEN | PIN_CFG_IEN,                        /* P7 */
        RZG2L_GPIO_PORT_PACK(5, 0x35, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_C |
                                                                PIN_CFG_IO_VMC_ETH1)),  /* P8 */
        RZG2L_GPIO_PORT_PACK(4, 0x36, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_C |
@@ -1576,6 +1698,7 @@ static const struct irq_chip rzg2l_gpio_irqchip = {
        .irq_set_type = rzg2l_gpio_irq_set_type,
        .irq_eoi = rzg2l_gpio_irqc_eoi,
        .irq_print_chip = rzg2l_gpio_irq_print_chip,
+       .irq_set_affinity = irq_chip_set_affinity_parent,
        .flags = IRQCHIP_IMMUTABLE,
        GPIOCHIP_IRQ_RESOURCE_HELPERS,
 };
@@ -1877,6 +2000,7 @@ static const struct rzg2l_hwcfg rzg2l_hwcfg = {
        .regs = {
                .pwpr = 0x3014,
                .sd_ch = 0x3000,
+               .eth_poc = 0x300c,
        },
        .iolh_groupa_ua = {
                /* 3v3 power source */
@@ -1889,6 +2013,7 @@ static const struct rzg2l_hwcfg rzg3s_hwcfg = {
        .regs = {
                .pwpr = 0x3000,
                .sd_ch = 0x3004,
+               .eth_poc = 0x3010,
        },
        .iolh_groupa_ua = {
                /* 1v8 power source */
@@ -1912,6 +2037,8 @@ static const struct rzg2l_hwcfg rzg3s_hwcfg = {
        },
        .drive_strength_ua = true,
        .func_base = 1,
+       .oen_max_pin = 1, /* Pin 1 of P0 and P7 is the maximum OEN pin. */
+       .oen_max_port = 7, /* P7_1 is the maximum OEN port. */
 };
 
 static struct rzg2l_pinctrl_data r9a07g043_data = {
index 21d7d5ac8c4a711f666033fcd077a63450d7feaa..0767a5ac23e08027e4335f14cdff48f505fb8db4 100644 (file)
@@ -165,7 +165,7 @@ static int rzv2m_pinctrl_set_mux(struct pinctrl_dev *pctldev,
        struct function_desc *func;
        unsigned int i, *psel_val;
        struct group_desc *group;
-       int *pins;
+       const unsigned int *pins;
 
        func = pinmux_generic_get_function(pctldev, func_selector);
        if (!func)
@@ -175,9 +175,9 @@ static int rzv2m_pinctrl_set_mux(struct pinctrl_dev *pctldev,
                return -EINVAL;
 
        psel_val = func->data;
-       pins = group->pins;
+       pins = group->grp.pins;
 
-       for (i = 0; i < group->num_pins; i++) {
+       for (i = 0; i < group->grp.npins; i++) {
                dev_dbg(pctrl->dev, "port:%u pin: %u PSEL:%u\n",
                        RZV2M_PIN_ID_TO_PORT(pins[i]), RZV2M_PIN_ID_TO_PIN(pins[i]),
                        psel_val[i]);
index cb965cf9370575027126fc8a5120ed0e9c65e22b..5480e0884abecf633b9850582fc4cb916ea00ae0 100644 (file)
@@ -726,6 +726,146 @@ const struct samsung_pinctrl_of_match_data exynosautov9_of_data __initconst = {
        .num_ctrl       = ARRAY_SIZE(exynosautov9_pin_ctrl),
 };
 
+/* pin banks of exynosautov920 pin-controller 0 (ALIVE) */
+static const struct samsung_pin_bank_data exynosautov920_pin_banks0[] = {
+       EXYNOSV920_PIN_BANK_EINTW(8, 0x0000, "gpa0", 0x18, 0x24, 0x28),
+       EXYNOSV920_PIN_BANK_EINTW(2, 0x1000, "gpa1", 0x18, 0x20, 0x24),
+       EXYNOS850_PIN_BANK_EINTN(2, 0x2000, "gpq0"),
+};
+
+/* pin banks of exynosautov920 pin-controller 1 (AUD) */
+static const struct samsung_pin_bank_data exynosautov920_pin_banks1[] = {
+       EXYNOSV920_PIN_BANK_EINTG(7, 0x0000, "gpb0", 0x18, 0x24, 0x28),
+       EXYNOSV920_PIN_BANK_EINTG(6, 0x1000, "gpb1", 0x18, 0x24, 0x28),
+       EXYNOSV920_PIN_BANK_EINTG(8, 0x2000, "gpb2", 0x18, 0x24, 0x28),
+       EXYNOSV920_PIN_BANK_EINTG(8, 0x3000, "gpb3", 0x18, 0x24, 0x28),
+       EXYNOSV920_PIN_BANK_EINTG(8, 0x4000, "gpb4", 0x18, 0x24, 0x28),
+       EXYNOSV920_PIN_BANK_EINTG(5, 0x5000, "gpb5", 0x18, 0x24, 0x28),
+       EXYNOSV920_PIN_BANK_EINTG(5, 0x6000, "gpb6", 0x18, 0x24, 0x28),
+};
+
+/* pin banks of exynosautov920 pin-controller 2 (HSI0) */
+static const struct samsung_pin_bank_data exynosautov920_pin_banks2[] = {
+       EXYNOSV920_PIN_BANK_EINTG(6, 0x0000, "gph0", 0x18, 0x24, 0x28),
+       EXYNOSV920_PIN_BANK_EINTG(2, 0x1000, "gph1", 0x18, 0x20, 0x24),
+};
+
+/* pin banks of exynosautov920 pin-controller 3 (HSI1) */
+static const struct samsung_pin_bank_data exynosautov920_pin_banks3[] = {
+       EXYNOSV920_PIN_BANK_EINTG(7, 0x000, "gph8", 0x18, 0x24, 0x28),
+};
+
+/* pin banks of exynosautov920 pin-controller 4 (HSI2) */
+static const struct samsung_pin_bank_data exynosautov920_pin_banks4[] = {
+       EXYNOSV920_PIN_BANK_EINTG(8, 0x0000, "gph3", 0x18, 0x24, 0x28),
+       EXYNOSV920_PIN_BANK_EINTG(7, 0x1000, "gph4", 0x18, 0x24, 0x28),
+       EXYNOSV920_PIN_BANK_EINTG(8, 0x2000, "gph5", 0x18, 0x24, 0x28),
+       EXYNOSV920_PIN_BANK_EINTG(7, 0x3000, "gph6", 0x18, 0x24, 0x28),
+};
+
+/* pin banks of exynosautov920 pin-controller 5 (HSI2UFS) */
+static const struct samsung_pin_bank_data exynosautov920_pin_banks5[] = {
+       EXYNOSV920_PIN_BANK_EINTG(4, 0x000, "gph2", 0x18, 0x20, 0x24),
+};
+
+/* pin banks of exynosautov920 pin-controller 6 (PERIC0) */
+static const struct samsung_pin_bank_data exynosautov920_pin_banks6[] = {
+       EXYNOSV920_PIN_BANK_EINTG(8, 0x0000, "gpp0", 0x18, 0x24, 0x28),
+       EXYNOSV920_PIN_BANK_EINTG(8, 0x1000, "gpp1", 0x18, 0x24, 0x28),
+       EXYNOSV920_PIN_BANK_EINTG(8, 0x2000, "gpp2", 0x18, 0x24, 0x28),
+       EXYNOSV920_PIN_BANK_EINTG(5, 0x3000, "gpg0", 0x18, 0x24, 0x28),
+       EXYNOSV920_PIN_BANK_EINTG(8, 0x4000, "gpp3", 0x18, 0x24, 0x28),
+       EXYNOSV920_PIN_BANK_EINTG(4, 0x5000, "gpp4", 0x18, 0x20, 0x24),
+       EXYNOSV920_PIN_BANK_EINTG(4, 0x6000, "gpg2", 0x18, 0x20, 0x24),
+       EXYNOSV920_PIN_BANK_EINTG(4, 0x7000, "gpg5", 0x18, 0x20, 0x24),
+       EXYNOSV920_PIN_BANK_EINTG(3, 0x8000, "gpg3", 0x18, 0x20, 0x24),
+       EXYNOSV920_PIN_BANK_EINTG(5, 0x9000, "gpg4", 0x18, 0x24, 0x28),
+};
+
+/* pin banks of exynosautov920 pin-controller 7 (PERIC1) */
+static const struct samsung_pin_bank_data exynosautov920_pin_banks7[] = {
+       EXYNOSV920_PIN_BANK_EINTG(8, 0x0000, "gpp5",  0x18, 0x24, 0x28),
+       EXYNOSV920_PIN_BANK_EINTG(5, 0x1000, "gpp6",  0x18, 0x24, 0x28),
+       EXYNOSV920_PIN_BANK_EINTG(4, 0x2000, "gpp10", 0x18, 0x20, 0x24),
+       EXYNOSV920_PIN_BANK_EINTG(8, 0x3000, "gpp7",  0x18, 0x24, 0x28),
+       EXYNOSV920_PIN_BANK_EINTG(4, 0x4000, "gpp8",  0x18, 0x20, 0x24),
+       EXYNOSV920_PIN_BANK_EINTG(4, 0x5000, "gpp11", 0x18, 0x20, 0x24),
+       EXYNOSV920_PIN_BANK_EINTG(4, 0x6000, "gpp9",  0x18, 0x20, 0x24),
+       EXYNOSV920_PIN_BANK_EINTG(4, 0x7000, "gpp12", 0x18, 0x20, 0x24),
+       EXYNOSV920_PIN_BANK_EINTG(8, 0x8000, "gpg1",  0x18, 0x24, 0x28),
+};
+
+static const struct samsung_retention_data exynosautov920_retention_data __initconst = {
+       .regs    = NULL,
+       .nr_regs = 0,
+       .value   = 0,
+       .refcnt  = &exynos_shared_retention_refcnt,
+       .init    = exynos_retention_init,
+};
+
+static const struct samsung_pin_ctrl exynosautov920_pin_ctrl[] = {
+       {
+               /* pin-controller instance 0 ALIVE data */
+               .pin_banks      = exynosautov920_pin_banks0,
+               .nr_banks       = ARRAY_SIZE(exynosautov920_pin_banks0),
+               .eint_wkup_init = exynos_eint_wkup_init,
+               .suspend        = exynos_pinctrl_suspend,
+               .resume         = exynos_pinctrl_resume,
+               .retention_data = &exynosautov920_retention_data,
+       }, {
+               /* pin-controller instance 1 AUD data */
+               .pin_banks      = exynosautov920_pin_banks1,
+               .nr_banks       = ARRAY_SIZE(exynosautov920_pin_banks1),
+       }, {
+               /* pin-controller instance 2 HSI0 data */
+               .pin_banks      = exynosautov920_pin_banks2,
+               .nr_banks       = ARRAY_SIZE(exynosautov920_pin_banks2),
+               .eint_gpio_init = exynos_eint_gpio_init,
+               .suspend        = exynos_pinctrl_suspend,
+               .resume         = exynos_pinctrl_resume,
+       }, {
+               /* pin-controller instance 3 HSI1 data */
+               .pin_banks      = exynosautov920_pin_banks3,
+               .nr_banks       = ARRAY_SIZE(exynosautov920_pin_banks3),
+               .eint_gpio_init = exynos_eint_gpio_init,
+               .suspend        = exynos_pinctrl_suspend,
+               .resume         = exynos_pinctrl_resume,
+       }, {
+               /* pin-controller instance 4 HSI2 data */
+               .pin_banks      = exynosautov920_pin_banks4,
+               .nr_banks       = ARRAY_SIZE(exynosautov920_pin_banks4),
+               .eint_gpio_init = exynos_eint_gpio_init,
+               .suspend        = exynos_pinctrl_suspend,
+               .resume         = exynos_pinctrl_resume,
+       }, {
+               /* pin-controller instance 5 HSI2UFS data */
+               .pin_banks      = exynosautov920_pin_banks5,
+               .nr_banks       = ARRAY_SIZE(exynosautov920_pin_banks5),
+               .eint_gpio_init = exynos_eint_gpio_init,
+               .suspend        = exynos_pinctrl_suspend,
+               .resume         = exynos_pinctrl_resume,
+       }, {
+               /* pin-controller instance 6 PERIC0 data */
+               .pin_banks      = exynosautov920_pin_banks6,
+               .nr_banks       = ARRAY_SIZE(exynosautov920_pin_banks6),
+               .eint_gpio_init = exynos_eint_gpio_init,
+               .suspend        = exynos_pinctrl_suspend,
+               .resume         = exynos_pinctrl_resume,
+       }, {
+               /* pin-controller instance 7 PERIC1 data */
+               .pin_banks      = exynosautov920_pin_banks7,
+               .nr_banks       = ARRAY_SIZE(exynosautov920_pin_banks7),
+               .eint_gpio_init = exynos_eint_gpio_init,
+               .suspend        = exynos_pinctrl_suspend,
+               .resume         = exynos_pinctrl_resume,
+       },
+};
+
+const struct samsung_pinctrl_of_match_data exynosautov920_of_data __initconst = {
+       .ctrl           = exynosautov920_pin_ctrl,
+       .num_ctrl       = ARRAY_SIZE(exynosautov920_pin_ctrl),
+};
+
 /*
  * Pinctrl driver data for Tesla FSD SoC. FSD SoC includes three
  * gpio/pin-mux/pinconfig controllers.
@@ -796,3 +936,143 @@ const struct samsung_pinctrl_of_match_data fsd_of_data __initconst = {
        .ctrl           = fsd_pin_ctrl,
        .num_ctrl       = ARRAY_SIZE(fsd_pin_ctrl),
 };
+
+/* pin banks of gs101 pin-controller (ALIVE) */
+static const struct samsung_pin_bank_data gs101_pin_alive[] = {
+       EXYNOS850_PIN_BANK_EINTW(8, 0x0, "gpa0", 0x00),
+       EXYNOS850_PIN_BANK_EINTW(7, 0x20, "gpa1", 0x04),
+       EXYNOS850_PIN_BANK_EINTW(5, 0x40, "gpa2", 0x08),
+       EXYNOS850_PIN_BANK_EINTW(4, 0x60, "gpa3", 0x0c),
+       EXYNOS850_PIN_BANK_EINTW(4, 0x80, "gpa4", 0x10),
+       EXYNOS850_PIN_BANK_EINTW(7, 0xa0, "gpa5", 0x14),
+       EXYNOS850_PIN_BANK_EINTW(8, 0xc0, "gpa9", 0x18),
+       EXYNOS850_PIN_BANK_EINTW(2, 0xe0, "gpa10", 0x1c),
+};
+
+/* pin banks of gs101 pin-controller (FAR_ALIVE) */
+static const struct samsung_pin_bank_data gs101_pin_far_alive[] = {
+       EXYNOS850_PIN_BANK_EINTW(8, 0x0, "gpa6", 0x00),
+       EXYNOS850_PIN_BANK_EINTW(4, 0x20, "gpa7", 0x04),
+       EXYNOS850_PIN_BANK_EINTW(8, 0x40, "gpa8", 0x08),
+       EXYNOS850_PIN_BANK_EINTW(2, 0x60, "gpa11", 0x0c),
+};
+
+/* pin banks of gs101 pin-controller (GSACORE) */
+static const struct samsung_pin_bank_data gs101_pin_gsacore[] = {
+       EXYNOS850_PIN_BANK_EINTG(2, 0x0, "gps0", 0x00),
+       EXYNOS850_PIN_BANK_EINTG(8, 0x20, "gps1", 0x04),
+       EXYNOS850_PIN_BANK_EINTG(3, 0x40, "gps2", 0x08),
+};
+
+/* pin banks of gs101 pin-controller (GSACTRL) */
+static const struct samsung_pin_bank_data gs101_pin_gsactrl[] = {
+       EXYNOS850_PIN_BANK_EINTW(6, 0x0, "gps3", 0x00),
+};
+
+/* pin banks of gs101 pin-controller (PERIC0) */
+static const struct samsung_pin_bank_data gs101_pin_peric0[] = {
+       EXYNOS850_PIN_BANK_EINTG(5, 0x0, "gpp0", 0x00),
+       EXYNOS850_PIN_BANK_EINTG(4, 0x20, "gpp1", 0x04),
+       EXYNOS850_PIN_BANK_EINTG(4, 0x40, "gpp2", 0x08),
+       EXYNOS850_PIN_BANK_EINTG(2, 0x60, "gpp3", 0x0c),
+       EXYNOS850_PIN_BANK_EINTG(4, 0x80, "gpp4", 0x10),
+       EXYNOS850_PIN_BANK_EINTG(2, 0xa0, "gpp5", 0x14),
+       EXYNOS850_PIN_BANK_EINTG(4, 0xc0, "gpp6", 0x18),
+       EXYNOS850_PIN_BANK_EINTG(2, 0xe0, "gpp7", 0x1c),
+       EXYNOS850_PIN_BANK_EINTG(4, 0x100, "gpp8", 0x20),
+       EXYNOS850_PIN_BANK_EINTG(2, 0x120, "gpp9", 0x24),
+       EXYNOS850_PIN_BANK_EINTG(4, 0x140, "gpp10", 0x28),
+       EXYNOS850_PIN_BANK_EINTG(2, 0x160, "gpp11", 0x2c),
+       EXYNOS850_PIN_BANK_EINTG(4, 0x180, "gpp12", 0x30),
+       EXYNOS850_PIN_BANK_EINTG(2, 0x1a0, "gpp13", 0x34),
+       EXYNOS850_PIN_BANK_EINTG(4, 0x1c0, "gpp14", 0x38),
+       EXYNOS850_PIN_BANK_EINTG(2, 0x1e0, "gpp15", 0x3c),
+       EXYNOS850_PIN_BANK_EINTG(4, 0x200, "gpp16", 0x40),
+       EXYNOS850_PIN_BANK_EINTG(2, 0x220, "gpp17", 0x44),
+       EXYNOS850_PIN_BANK_EINTG(4, 0x240, "gpp18", 0x48),
+       EXYNOS850_PIN_BANK_EINTG(4, 0x260, "gpp19", 0x4c),
+};
+
+/* pin banks of gs101 pin-controller (PERIC1) */
+static const struct samsung_pin_bank_data gs101_pin_peric1[] = {
+       EXYNOS850_PIN_BANK_EINTG(8, 0x0, "gpp20", 0x00),
+       EXYNOS850_PIN_BANK_EINTG(4, 0x20, "gpp21", 0x04),
+       EXYNOS850_PIN_BANK_EINTG(2, 0x40, "gpp22", 0x08),
+       EXYNOS850_PIN_BANK_EINTG(8, 0x60, "gpp23", 0x0c),
+       EXYNOS850_PIN_BANK_EINTG(4, 0x80, "gpp24", 0x10),
+       EXYNOS850_PIN_BANK_EINTG(4, 0xa0, "gpp25", 0x14),
+       EXYNOS850_PIN_BANK_EINTG(5, 0xc0, "gpp26", 0x18),
+       EXYNOS850_PIN_BANK_EINTG(4, 0xe0, "gpp27", 0x1c),
+};
+
+/* pin banks of gs101 pin-controller (HSI1) */
+static const struct samsung_pin_bank_data gs101_pin_hsi1[] = {
+       EXYNOS850_PIN_BANK_EINTG(6, 0x0, "gph0", 0x00),
+       EXYNOS850_PIN_BANK_EINTG(7, 0x20, "gph1", 0x04),
+};
+
+/* pin banks of gs101 pin-controller (HSI2) */
+static const struct samsung_pin_bank_data gs101_pin_hsi2[] = {
+       EXYNOS850_PIN_BANK_EINTG(6, 0x0, "gph2", 0x00),
+       EXYNOS850_PIN_BANK_EINTG(2, 0x20, "gph3", 0x04),
+       EXYNOS850_PIN_BANK_EINTG(6, 0x40, "gph4", 0x08),
+};
+
+static const struct samsung_pin_ctrl gs101_pin_ctrl[] __initconst = {
+       {
+               /* pin banks of gs101 pin-controller (ALIVE) */
+               .pin_banks      = gs101_pin_alive,
+               .nr_banks       = ARRAY_SIZE(gs101_pin_alive),
+               .eint_wkup_init = exynos_eint_wkup_init,
+               .suspend        = exynos_pinctrl_suspend,
+               .resume         = exynos_pinctrl_resume,
+       }, {
+               /* pin banks of gs101 pin-controller (FAR_ALIVE) */
+               .pin_banks      = gs101_pin_far_alive,
+               .nr_banks       = ARRAY_SIZE(gs101_pin_far_alive),
+               .eint_wkup_init = exynos_eint_wkup_init,
+               .suspend        = exynos_pinctrl_suspend,
+               .resume         = exynos_pinctrl_resume,
+       }, {
+               /* pin banks of gs101 pin-controller (GSACORE) */
+               .pin_banks      = gs101_pin_gsacore,
+               .nr_banks       = ARRAY_SIZE(gs101_pin_gsacore),
+       }, {
+               /* pin banks of gs101 pin-controller (GSACTRL) */
+               .pin_banks      = gs101_pin_gsactrl,
+               .nr_banks       = ARRAY_SIZE(gs101_pin_gsactrl),
+       }, {
+               /* pin banks of gs101 pin-controller (PERIC0) */
+               .pin_banks      = gs101_pin_peric0,
+               .nr_banks       = ARRAY_SIZE(gs101_pin_peric0),
+               .eint_gpio_init = exynos_eint_gpio_init,
+               .suspend        = exynos_pinctrl_suspend,
+               .resume         = exynos_pinctrl_resume,
+       }, {
+               /* pin banks of gs101 pin-controller (PERIC1) */
+               .pin_banks      = gs101_pin_peric1,
+               .nr_banks       = ARRAY_SIZE(gs101_pin_peric1),
+               .eint_gpio_init = exynos_eint_gpio_init,
+               .suspend        = exynos_pinctrl_suspend,
+               .resume = exynos_pinctrl_resume,
+       }, {
+               /* pin banks of gs101 pin-controller (HSI1) */
+               .pin_banks      = gs101_pin_hsi1,
+               .nr_banks       = ARRAY_SIZE(gs101_pin_hsi1),
+               .eint_gpio_init = exynos_eint_gpio_init,
+               .suspend        = exynos_pinctrl_suspend,
+               .resume         = exynos_pinctrl_resume,
+       }, {
+               /* pin banks of gs101 pin-controller (HSI2) */
+               .pin_banks      = gs101_pin_hsi2,
+               .nr_banks       = ARRAY_SIZE(gs101_pin_hsi2),
+               .eint_gpio_init = exynos_eint_gpio_init,
+               .suspend        = exynos_pinctrl_suspend,
+               .resume         = exynos_pinctrl_resume,
+       },
+};
+
+const struct samsung_pinctrl_of_match_data gs101_of_data __initconst = {
+       .ctrl           = gs101_pin_ctrl,
+       .num_ctrl       = ARRAY_SIZE(gs101_pin_ctrl),
+};
index 6b58ec84e34b1c14cc9957da1af208d099e40558..871c1eb46ddfd0417a0161912ef2bdc58abc1969 100644 (file)
@@ -52,10 +52,15 @@ static void exynos_irq_mask(struct irq_data *irqd)
        struct irq_chip *chip = irq_data_get_irq_chip(irqd);
        struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
        struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
-       unsigned long reg_mask = our_chip->eint_mask + bank->eint_offset;
+       unsigned long reg_mask;
        unsigned int mask;
        unsigned long flags;
 
+       if (bank->eint_mask_offset)
+               reg_mask = bank->pctl_offset + bank->eint_mask_offset;
+       else
+               reg_mask = our_chip->eint_mask + bank->eint_offset;
+
        raw_spin_lock_irqsave(&bank->slock, flags);
 
        mask = readl(bank->eint_base + reg_mask);
@@ -70,7 +75,12 @@ static void exynos_irq_ack(struct irq_data *irqd)
        struct irq_chip *chip = irq_data_get_irq_chip(irqd);
        struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
        struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
-       unsigned long reg_pend = our_chip->eint_pend + bank->eint_offset;
+       unsigned long reg_pend;
+
+       if (bank->eint_pend_offset)
+               reg_pend = bank->pctl_offset + bank->eint_pend_offset;
+       else
+               reg_pend = our_chip->eint_pend + bank->eint_offset;
 
        writel(1 << irqd->hwirq, bank->eint_base + reg_pend);
 }
@@ -80,7 +90,7 @@ static void exynos_irq_unmask(struct irq_data *irqd)
        struct irq_chip *chip = irq_data_get_irq_chip(irqd);
        struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
        struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
-       unsigned long reg_mask = our_chip->eint_mask + bank->eint_offset;
+       unsigned long reg_mask;
        unsigned int mask;
        unsigned long flags;
 
@@ -95,6 +105,11 @@ static void exynos_irq_unmask(struct irq_data *irqd)
        if (irqd_get_trigger_type(irqd) & IRQ_TYPE_LEVEL_MASK)
                exynos_irq_ack(irqd);
 
+       if (bank->eint_mask_offset)
+               reg_mask = bank->pctl_offset + bank->eint_mask_offset;
+       else
+               reg_mask = our_chip->eint_mask + bank->eint_offset;
+
        raw_spin_lock_irqsave(&bank->slock, flags);
 
        mask = readl(bank->eint_base + reg_mask);
@@ -111,7 +126,7 @@ static int exynos_irq_set_type(struct irq_data *irqd, unsigned int type)
        struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
        unsigned int shift = EXYNOS_EINT_CON_LEN * irqd->hwirq;
        unsigned int con, trig_type;
-       unsigned long reg_con = our_chip->eint_con + bank->eint_offset;
+       unsigned long reg_con;
 
        switch (type) {
        case IRQ_TYPE_EDGE_RISING:
@@ -139,6 +154,11 @@ static int exynos_irq_set_type(struct irq_data *irqd, unsigned int type)
        else
                irq_set_handler_locked(irqd, handle_level_irq);
 
+       if (bank->eint_con_offset)
+               reg_con = bank->pctl_offset + bank->eint_con_offset;
+       else
+               reg_con = our_chip->eint_con + bank->eint_offset;
+
        con = readl(bank->eint_base + reg_con);
        con &= ~(EXYNOS_EINT_CON_MASK << shift);
        con |= trig_type << shift;
@@ -147,6 +167,19 @@ static int exynos_irq_set_type(struct irq_data *irqd, unsigned int type)
        return 0;
 }
 
+static int exynos_irq_set_affinity(struct irq_data *irqd,
+                                  const struct cpumask *dest, bool force)
+{
+       struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
+       struct samsung_pinctrl_drv_data *d = bank->drvdata;
+       struct irq_data *parent = irq_get_irq_data(d->irq);
+
+       if (parent)
+               return parent->chip->irq_set_affinity(parent, dest, force);
+
+       return -EINVAL;
+}
+
 static int exynos_irq_request_resources(struct irq_data *irqd)
 {
        struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
@@ -212,6 +245,7 @@ static const struct exynos_irq_chip exynos_gpio_irq_chip __initconst = {
                .irq_mask = exynos_irq_mask,
                .irq_ack = exynos_irq_ack,
                .irq_set_type = exynos_irq_set_type,
+               .irq_set_affinity = exynos_irq_set_affinity,
                .irq_request_resources = exynos_irq_request_resources,
                .irq_release_resources = exynos_irq_release_resources,
        },
@@ -247,7 +281,10 @@ static irqreturn_t exynos_eint_gpio_irq(int irq, void *data)
        unsigned int svc, group, pin;
        int ret;
 
-       svc = readl(bank->eint_base + EXYNOS_SVC_OFFSET);
+       if (bank->eint_con_offset)
+               svc = readl(bank->eint_base + EXYNOSAUTO_SVC_OFFSET);
+       else
+               svc = readl(bank->eint_base + EXYNOS_SVC_OFFSET);
        group = EXYNOS_SVC_GROUP(svc);
        pin = svc & EXYNOS_SVC_NUM_MASK;
 
@@ -456,6 +493,22 @@ static const struct exynos_irq_chip exynos7_wkup_irq_chip __initconst = {
        .set_eint_wakeup_mask = exynos_pinctrl_set_eint_wakeup_mask,
 };
 
+static const struct exynos_irq_chip exynosautov920_wkup_irq_chip __initconst = {
+       .chip = {
+               .name = "exynosautov920_wkup_irq_chip",
+               .irq_unmask = exynos_irq_unmask,
+               .irq_mask = exynos_irq_mask,
+               .irq_ack = exynos_irq_ack,
+               .irq_set_type = exynos_irq_set_type,
+               .irq_set_wake = exynos_wkup_irq_set_wake,
+               .irq_request_resources = exynos_irq_request_resources,
+               .irq_release_resources = exynos_irq_release_resources,
+       },
+       .eint_wake_mask_value = &eint_wake_mask_value,
+       .eint_wake_mask_reg = EXYNOS5433_EINT_WAKEUP_MASK,
+       .set_eint_wakeup_mask = exynos_pinctrl_set_eint_wakeup_mask,
+};
+
 /* list of external wakeup controllers supported */
 static const struct of_device_id exynos_wkup_irq_ids[] = {
        { .compatible = "samsung,s5pv210-wakeup-eint",
@@ -468,6 +521,8 @@ static const struct of_device_id exynos_wkup_irq_ids[] = {
                        .data = &exynos7_wkup_irq_chip },
        { .compatible = "samsung,exynosautov9-wakeup-eint",
                        .data = &exynos7_wkup_irq_chip },
+       { .compatible = "samsung,exynosautov920-wakeup-eint",
+                       .data = &exynosautov920_wkup_irq_chip },
        { }
 };
 
@@ -638,7 +693,7 @@ static void exynos_pinctrl_suspend_bank(
                                struct samsung_pin_bank *bank)
 {
        struct exynos_eint_gpio_save *save = bank->soc_priv;
-       void __iomem *regs = bank->eint_base;
+       const void __iomem *regs = bank->eint_base;
 
        save->eint_con = readl(regs + EXYNOS_GPIO_ECON_OFFSET
                                                + bank->eint_offset);
@@ -655,6 +710,19 @@ static void exynos_pinctrl_suspend_bank(
        pr_debug("%s: save    mask %#010x\n", bank->name, save->eint_mask);
 }
 
+static void exynosauto_pinctrl_suspend_bank(struct samsung_pinctrl_drv_data *drvdata,
+                                           struct samsung_pin_bank *bank)
+{
+       struct exynos_eint_gpio_save *save = bank->soc_priv;
+       const void __iomem *regs = bank->eint_base;
+
+       save->eint_con = readl(regs + bank->pctl_offset + bank->eint_con_offset);
+       save->eint_mask = readl(regs + bank->pctl_offset + bank->eint_mask_offset);
+
+       pr_debug("%s: save     con %#010x\n", bank->name, save->eint_con);
+       pr_debug("%s: save    mask %#010x\n", bank->name, save->eint_mask);
+}
+
 void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata)
 {
        struct samsung_pin_bank *bank = drvdata->pin_banks;
@@ -662,8 +730,12 @@ void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata)
        int i;
 
        for (i = 0; i < drvdata->nr_banks; ++i, ++bank) {
-               if (bank->eint_type == EINT_TYPE_GPIO)
-                       exynos_pinctrl_suspend_bank(drvdata, bank);
+               if (bank->eint_type == EINT_TYPE_GPIO) {
+                       if (bank->eint_con_offset)
+                               exynosauto_pinctrl_suspend_bank(drvdata, bank);
+                       else
+                               exynos_pinctrl_suspend_bank(drvdata, bank);
+               }
                else if (bank->eint_type == EINT_TYPE_WKUP) {
                        if (!irq_chip) {
                                irq_chip = bank->irq_chip;
@@ -704,14 +776,33 @@ static void exynos_pinctrl_resume_bank(
                                                + bank->eint_offset);
 }
 
+static void exynosauto_pinctrl_resume_bank(struct samsung_pinctrl_drv_data *drvdata,
+                                          struct samsung_pin_bank *bank)
+{
+       struct exynos_eint_gpio_save *save = bank->soc_priv;
+       void __iomem *regs = bank->eint_base;
+
+       pr_debug("%s:     con %#010x => %#010x\n", bank->name,
+                readl(regs + bank->pctl_offset + bank->eint_con_offset), save->eint_con);
+       pr_debug("%s:    mask %#010x => %#010x\n", bank->name,
+                readl(regs + bank->pctl_offset + bank->eint_mask_offset), save->eint_mask);
+
+       writel(save->eint_con, regs + bank->pctl_offset + bank->eint_con_offset);
+       writel(save->eint_mask, regs + bank->pctl_offset + bank->eint_mask_offset);
+}
+
 void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata)
 {
        struct samsung_pin_bank *bank = drvdata->pin_banks;
        int i;
 
        for (i = 0; i < drvdata->nr_banks; ++i, ++bank)
-               if (bank->eint_type == EINT_TYPE_GPIO)
-                       exynos_pinctrl_resume_bank(drvdata, bank);
+               if (bank->eint_type == EINT_TYPE_GPIO) {
+                       if (bank->eint_con_offset)
+                               exynosauto_pinctrl_resume_bank(drvdata, bank);
+                       else
+                               exynos_pinctrl_resume_bank(drvdata, bank);
+               }
 }
 
 static void exynos_retention_enable(struct samsung_pinctrl_drv_data *drvdata)
index 3ac52c2cf9984709df7a5f604acd6d45086f4c1b..305cb1d31de491cd14c57d2aa48f2cdd0e1b4871 100644 (file)
@@ -31,6 +31,7 @@
 #define EXYNOS7_WKUP_EMASK_OFFSET      0x900
 #define EXYNOS7_WKUP_EPEND_OFFSET      0xA00
 #define EXYNOS_SVC_OFFSET              0xB08
+#define EXYNOSAUTO_SVC_OFFSET          0xF008
 
 /* helpers to access interrupt service register */
 #define EXYNOS_SVC_GROUP_SHIFT         3
                .name           = id                            \
        }
 
+#define EXYNOSV920_PIN_BANK_EINTG(pins, reg, id, con_offs, mask_offs, pend_offs)       \
+       {                                                       \
+               .type                   = &exynos850_bank_type_off,     \
+               .pctl_offset            = reg,                          \
+               .nr_pins                = pins,                         \
+               .eint_type              = EINT_TYPE_GPIO,               \
+               .eint_con_offset        = con_offs,                     \
+               .eint_mask_offset       = mask_offs,                    \
+               .eint_pend_offset       = pend_offs,                    \
+               .name                   = id                            \
+       }
+
+#define EXYNOSV920_PIN_BANK_EINTW(pins, reg, id, con_offs, mask_offs, pend_offs)       \
+       {                                                       \
+               .type                   = &exynos850_bank_type_alive,   \
+               .pctl_offset            = reg,                          \
+               .nr_pins                = pins,                         \
+               .eint_type              = EINT_TYPE_WKUP,               \
+               .eint_con_offset        = con_offs,                     \
+               .eint_mask_offset       = mask_offs,                    \
+               .eint_pend_offset       = pend_offs,                    \
+               .name                   = id                            \
+       }
+
 /**
  * struct exynos_weint_data: irq specific data for all the wakeup interrupts
  * generated by the external wakeup interrupt controller.
index 79babbb39cedbae84d6a8ecc564a024afba94b3c..ed07e23e091220a925e7fd4646f3e4a651f999bf 100644 (file)
@@ -565,7 +565,7 @@ static void samsung_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
 /* gpiolib gpio_get callback function */
 static int samsung_gpio_get(struct gpio_chip *gc, unsigned offset)
 {
-       void __iomem *reg;
+       const void __iomem *reg;
        u32 data;
        struct samsung_pin_bank *bank = gpiochip_get_data(gc);
        const struct samsung_pin_bank_type *type = bank->type;
@@ -1106,6 +1106,9 @@ samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d,
                bank->eint_type = bdata->eint_type;
                bank->eint_mask = bdata->eint_mask;
                bank->eint_offset = bdata->eint_offset;
+               bank->eint_con_offset = bdata->eint_con_offset;
+               bank->eint_mask_offset = bdata->eint_mask_offset;
+               bank->eint_pend_offset = bdata->eint_pend_offset;
                bank->name = bdata->name;
 
                raw_spin_lock_init(&bank->slock);
@@ -1201,7 +1204,7 @@ static int __maybe_unused samsung_pinctrl_suspend(struct device *dev)
 
        for (i = 0; i < drvdata->nr_banks; i++) {
                struct samsung_pin_bank *bank = &drvdata->pin_banks[i];
-               void __iomem *reg = bank->pctl_base + bank->pctl_offset;
+               const void __iomem *reg = bank->pctl_base + bank->pctl_offset;
                const u8 *offs = bank->type->reg_offset;
                const u8 *widths = bank->type->fld_width;
                enum pincfg_type type;
@@ -1309,6 +1312,8 @@ static const struct of_device_id samsung_pinctrl_dt_match[] = {
                .data = &s5pv210_of_data },
 #endif
 #ifdef CONFIG_PINCTRL_EXYNOS_ARM64
+       { .compatible = "google,gs101-pinctrl",
+               .data = &gs101_of_data },
        { .compatible = "samsung,exynos5433-pinctrl",
                .data = &exynos5433_of_data },
        { .compatible = "samsung,exynos7-pinctrl",
@@ -1319,6 +1324,8 @@ static const struct of_device_id samsung_pinctrl_dt_match[] = {
                .data = &exynos850_of_data },
        { .compatible = "samsung,exynosautov9-pinctrl",
                .data = &exynosautov9_of_data },
+       { .compatible = "samsung,exynosautov920-pinctrl",
+               .data = &exynosautov920_of_data },
        { .compatible = "tesla,fsd-pinctrl",
                .data = &fsd_of_data },
 #endif
index 9b3db50adef32a59bc95acb47ff5cd508d6f241d..ab791afaabf5510ffbb3df430dd10ffaf04081b0 100644 (file)
@@ -122,6 +122,9 @@ struct samsung_pin_bank_type {
  * @eint_type: type of the external interrupt supported by the bank.
  * @eint_mask: bit mask of pins which support EINT function.
  * @eint_offset: SoC-specific EINT register or interrupt offset of bank.
+ * @eint_con_offset: ExynosAuto SoC-specific EINT control register offset of bank.
+ * @eint_mask_offset: ExynosAuto SoC-specific EINT mask register offset of bank.
+ * @eint_pend_offset: ExynosAuto SoC-specific EINT pend register offset of bank.
  * @name: name to be prefixed for each pin in this pin bank.
  */
 struct samsung_pin_bank_data {
@@ -133,6 +136,9 @@ struct samsung_pin_bank_data {
        enum eint_type  eint_type;
        u32             eint_mask;
        u32             eint_offset;
+       u32             eint_con_offset;
+       u32             eint_mask_offset;
+       u32             eint_pend_offset;
        const char      *name;
 };
 
@@ -147,6 +153,9 @@ struct samsung_pin_bank_data {
  * @eint_type: type of the external interrupt supported by the bank.
  * @eint_mask: bit mask of pins which support EINT function.
  * @eint_offset: SoC-specific EINT register or interrupt offset of bank.
+ * @eint_con_offset: ExynosAuto SoC-specific EINT register or interrupt offset of bank.
+ * @eint_mask_offset: ExynosAuto SoC-specific EINT mask register offset of bank.
+ * @eint_pend_offset: ExynosAuto SoC-specific EINT pend register offset of bank.
  * @name: name to be prefixed for each pin in this pin bank.
  * @id: id of the bank, propagated to the pin range.
  * @pin_base: starting pin number of the bank.
@@ -170,6 +179,9 @@ struct samsung_pin_bank {
        enum eint_type  eint_type;
        u32             eint_mask;
        u32             eint_offset;
+       u32             eint_con_offset;
+       u32             eint_mask_offset;
+       u32             eint_pend_offset;
        const char      *name;
        u32             id;
 
@@ -350,7 +362,9 @@ extern const struct samsung_pinctrl_of_match_data exynos7_of_data;
 extern const struct samsung_pinctrl_of_match_data exynos7885_of_data;
 extern const struct samsung_pinctrl_of_match_data exynos850_of_data;
 extern const struct samsung_pinctrl_of_match_data exynosautov9_of_data;
+extern const struct samsung_pinctrl_of_match_data exynosautov920_of_data;
 extern const struct samsung_pinctrl_of_match_data fsd_of_data;
+extern const struct samsung_pinctrl_of_match_data gs101_of_data;
 extern const struct samsung_pinctrl_of_match_data s3c64xx_of_data;
 extern const struct samsung_pinctrl_of_match_data s3c2412_of_data;
 extern const struct samsung_pinctrl_of_match_data s3c2416_of_data;
index b29b0ab9892b8eda0c5325d8afd868853d1652ee..6df7a310c7ed62f2dd20fc542900e0187a3e483a 100644 (file)
@@ -654,7 +654,7 @@ static int starfive_set_mux(struct pinctrl_dev *pctldev,
                return -EINVAL;
 
        pinmux = group->data;
-       for (i = 0; i < group->num_pins; i++) {
+       for (i = 0; i < group->grp.npins; i++) {
                u32 v = pinmux[i];
                unsigned int gpio = starfive_pinmux_to_gpio(v);
                u32 dout = starfive_pinmux_to_dout(v);
@@ -797,7 +797,7 @@ static int starfive_pinconf_group_get(struct pinctrl_dev *pctldev,
        if (!group)
                return -EINVAL;
 
-       return starfive_pinconf_get(pctldev, group->pins[0], config);
+       return starfive_pinconf_get(pctldev, group->grp.pins[0], config);
 }
 
 static int starfive_pinconf_group_set(struct pinctrl_dev *pctldev,
@@ -876,8 +876,8 @@ static int starfive_pinconf_group_set(struct pinctrl_dev *pctldev,
                }
        }
 
-       for (i = 0; i < group->num_pins; i++)
-               starfive_padctl_rmw(sfp, group->pins[i], mask, value);
+       for (i = 0; i < group->grp.npins; i++)
+               starfive_padctl_rmw(sfp, group->grp.pins[i], mask, value);
 
        return 0;
 }
index 6de11a4057346dcea3a9f02cf2d3fb82caa99ea9..9609eb1ecc3d8428247fa28f0e12e2a4010e7415 100644 (file)
@@ -307,7 +307,7 @@ static int jh7110_set_mux(struct pinctrl_dev *pctldev,
                return -EINVAL;
 
        pinmux = group->data;
-       for (i = 0; i < group->num_pins; i++) {
+       for (i = 0; i < group->grp.npins; i++) {
                u32 v = pinmux[i];
 
                if (info->jh7110_set_one_pin_mux)
@@ -437,7 +437,7 @@ static int jh7110_pinconf_group_get(struct pinctrl_dev *pctldev,
        if (!group)
                return -EINVAL;
 
-       return jh7110_pinconf_get(pctldev, group->pins[0], config);
+       return jh7110_pinconf_get(pctldev, group->grp.pins[0], config);
 }
 
 static int jh7110_pinconf_group_set(struct pinctrl_dev *pctldev,
@@ -508,8 +508,8 @@ static int jh7110_pinconf_group_set(struct pinctrl_dev *pctldev,
                }
        }
 
-       for (i = 0; i < group->num_pins; i++)
-               jh7110_padcfg_rmw(sfp, group->pins[i], mask, value);
+       for (i = 0; i < group->grp.npins; i++)
+               jh7110_padcfg_rmw(sfp, group->grp.pins[i], mask, value);
 
        return 0;
 }
index 603f900e88c18a50ea01c33b8e52ef8154327880..978ccdbaf3d3f400a61eca4f888e211cb76add66 100644 (file)
@@ -240,9 +240,8 @@ static int stm32_gpio_direction_output(struct gpio_chip *chip,
        struct stm32_gpio_bank *bank = gpiochip_get_data(chip);
 
        __stm32_gpio_set(bank, offset, value);
-       pinctrl_gpio_direction_output(chip, offset);
 
-       return 0;
+       return pinctrl_gpio_direction_output(chip, offset);
 }
 
 
index 6bf8db424bec2f2668cf7733d8b2b8f03a32fe15..ccfa3870a67d39151b7509777450202d4cd2ff92 100644 (file)
@@ -636,6 +636,14 @@ static void tegra_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
                seq_printf(s, "\n\t%s=%u",
                           strip_prefix(cfg_params[i].property), val);
        }
+
+       if (g->mux_reg >= 0) {
+               /* read pinmux function and dump to seq_file */
+               val = pmx_readl(pmx, g->mux_bank, g->mux_reg);
+               val = g->funcs[(val >> g->mux_bit) & 0x3];
+
+               seq_printf(s, "\n\tfunction=%s", pmx->functions[val].name);
+       }
 }
 
 static void tegra_pinconf_config_dbg_show(struct pinctrl_dev *pctldev,
index 788246559bbba2556d6d980c670bc5edeaf03b30..68d80559fddc25b076648aa6c69f99951c15f262 100644 (file)
@@ -81,9 +81,8 @@ struct cros_ec_uart {
        struct response_info response;
 };
 
-static int cros_ec_uart_rx_bytes(struct serdev_device *serdev,
-                                const u8 *data,
-                                size_t count)
+static ssize_t cros_ec_uart_rx_bytes(struct serdev_device *serdev,
+                                    const u8 *data, size_t count)
 {
        struct ec_host_response *host_response;
        struct cros_ec_device *ec_dev = serdev_device_get_drvdata(serdev);
index 7737d56191d7cca84471fa1c0202c01acbed7ac0..061aa9647c191635880dd988cd99e1331f7c0f14 100644 (file)
@@ -915,12 +915,11 @@ static int goldfish_pipe_probe(struct platform_device *pdev)
        return goldfish_pipe_device_init(pdev, dev);
 }
 
-static int goldfish_pipe_remove(struct platform_device *pdev)
+static void goldfish_pipe_remove(struct platform_device *pdev)
 {
        struct goldfish_pipe_dev *dev = platform_get_drvdata(pdev);
 
        goldfish_pipe_device_deinit(pdev, dev);
-       return 0;
 }
 
 static const struct acpi_device_id goldfish_pipe_acpi_match[] = {
@@ -937,7 +936,7 @@ MODULE_DEVICE_TABLE(of, goldfish_pipe_of_match);
 
 static struct platform_driver goldfish_pipe_driver = {
        .probe = goldfish_pipe_probe,
-       .remove = goldfish_pipe_remove,
+       .remove_new = goldfish_pipe_remove,
        .driver = {
                .name = "goldfish_pipe",
                .of_match_table = goldfish_pipe_of_match,
index 1dd84c7a79de97f44b25c48fd241db068492b4f9..b1995ac268d77a9c56c81ce6e65048e2c465c449 100644 (file)
@@ -1170,7 +1170,7 @@ static int mlxbf_pmc_program_crspace_counter(int blk_num, uint32_t cnt_num,
        int ret;
 
        addr = pmc->block[blk_num].mmio_base +
-               (rounddown(cnt_num, 2) * MLXBF_PMC_CRSPACE_PERFSEL_SZ);
+               ((cnt_num / 2) * MLXBF_PMC_CRSPACE_PERFSEL_SZ);
        ret = mlxbf_pmc_readl(addr, &word);
        if (ret)
                return ret;
@@ -1413,7 +1413,7 @@ static int mlxbf_pmc_read_crspace_event(int blk_num, uint32_t cnt_num,
        int ret;
 
        addr = pmc->block[blk_num].mmio_base +
-               (rounddown(cnt_num, 2) * MLXBF_PMC_CRSPACE_PERFSEL_SZ);
+               ((cnt_num / 2) * MLXBF_PMC_CRSPACE_PERFSEL_SZ);
        ret = mlxbf_pmc_readl(addr, &word);
        if (ret)
                return ret;
index ed16ec422a7b33e9b529bfe1912d0ff687e4db5d..b8d1e32e97ebafaa1d0091d32d110b9854022ff9 100644 (file)
@@ -47,6 +47,9 @@
 /* Message with data needs at least two words (for header & data). */
 #define MLXBF_TMFIFO_DATA_MIN_WORDS            2
 
+/* Tx timeout in milliseconds. */
+#define TMFIFO_TX_TIMEOUT                      2000
+
 /* ACPI UID for BlueField-3. */
 #define TMFIFO_BF3_UID                         1
 
@@ -62,12 +65,14 @@ struct mlxbf_tmfifo;
  * @drop_desc: dummy desc for packet dropping
  * @cur_len: processed length of the current descriptor
  * @rem_len: remaining length of the pending packet
+ * @rem_padding: remaining bytes to send as paddings
  * @pkt_len: total length of the pending packet
  * @next_avail: next avail descriptor id
  * @num: vring size (number of descriptors)
  * @align: vring alignment size
  * @index: vring index
  * @vdev_id: vring virtio id (VIRTIO_ID_xxx)
+ * @tx_timeout: expire time of last tx packet
  * @fifo: pointer to the tmfifo structure
  */
 struct mlxbf_tmfifo_vring {
@@ -79,12 +84,14 @@ struct mlxbf_tmfifo_vring {
        struct vring_desc drop_desc;
        int cur_len;
        int rem_len;
+       int rem_padding;
        u32 pkt_len;
        u16 next_avail;
        int num;
        int align;
        int index;
        int vdev_id;
+       unsigned long tx_timeout;
        struct mlxbf_tmfifo *fifo;
 };
 
@@ -819,6 +826,50 @@ mlxbf_tmfifo_desc_done:
        return true;
 }
 
+static void mlxbf_tmfifo_check_tx_timeout(struct mlxbf_tmfifo_vring *vring)
+{
+       unsigned long flags;
+
+       /* Only handle Tx timeout for network vdev. */
+       if (vring->vdev_id != VIRTIO_ID_NET)
+               return;
+
+       /* Initialize the timeout or return if not expired. */
+       if (!vring->tx_timeout) {
+               /* Initialize the timeout. */
+               vring->tx_timeout = jiffies +
+                       msecs_to_jiffies(TMFIFO_TX_TIMEOUT);
+               return;
+       } else if (time_before(jiffies, vring->tx_timeout)) {
+               /* Return if not timeout yet. */
+               return;
+       }
+
+       /*
+        * Drop the packet after timeout. The outstanding packet is
+        * released and the remaining bytes will be sent with padding byte 0x00
+        * as a recovery. On the peer(host) side, the padding bytes 0x00 will be
+        * either dropped directly, or appended into existing outstanding packet
+        * thus dropped as corrupted network packet.
+        */
+       vring->rem_padding = round_up(vring->rem_len, sizeof(u64));
+       mlxbf_tmfifo_release_pkt(vring);
+       vring->cur_len = 0;
+       vring->rem_len = 0;
+       vring->fifo->vring[0] = NULL;
+
+       /*
+        * Make sure the load/store are in order before
+        * returning back to virtio.
+        */
+       virtio_mb(false);
+
+       /* Notify upper layer. */
+       spin_lock_irqsave(&vring->fifo->spin_lock[0], flags);
+       vring_interrupt(0, vring->vq);
+       spin_unlock_irqrestore(&vring->fifo->spin_lock[0], flags);
+}
+
 /* Rx & Tx processing of a queue. */
 static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
 {
@@ -841,6 +892,7 @@ static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
                return;
 
        do {
+retry:
                /* Get available FIFO space. */
                if (avail == 0) {
                        if (is_rx)
@@ -851,6 +903,17 @@ static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
                                break;
                }
 
+               /* Insert paddings for discarded Tx packet. */
+               if (!is_rx) {
+                       vring->tx_timeout = 0;
+                       while (vring->rem_padding >= sizeof(u64)) {
+                               writeq(0, vring->fifo->tx.data);
+                               vring->rem_padding -= sizeof(u64);
+                               if (--avail == 0)
+                                       goto retry;
+                       }
+               }
+
                /* Console output always comes from the Tx buffer. */
                if (!is_rx && devid == VIRTIO_ID_CONSOLE) {
                        mlxbf_tmfifo_console_tx(fifo, avail);
@@ -860,6 +923,10 @@ static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
                /* Handle one descriptor. */
                more = mlxbf_tmfifo_rxtx_one_desc(vring, is_rx, &avail);
        } while (more);
+
+       /* Check Tx timeout. */
+       if (avail <= 0 && !is_rx)
+               mlxbf_tmfifo_check_tx_timeout(vring);
 }
 
 /* Handle Rx or Tx queues. */
index 42ccd7f1c9b9c3aac498af13878305724d8aa04b..118caa651bec4267fc2bf1653ef68bfe97ba594b 100644 (file)
@@ -35,6 +35,8 @@ static struct attribute *ssam_device_attrs[] = {
 };
 ATTRIBUTE_GROUPS(ssam_device);
 
+static const struct bus_type ssam_bus_type;
+
 static int ssam_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
 {
        const struct ssam_device *sdev = to_ssam_device(dev);
@@ -329,13 +331,12 @@ static void ssam_bus_remove(struct device *dev)
                sdrv->remove(to_ssam_device(dev));
 }
 
-struct bus_type ssam_bus_type = {
+static const struct bus_type ssam_bus_type = {
        .name   = "surface_aggregator",
        .match  = ssam_bus_match,
        .probe  = ssam_bus_probe,
        .remove = ssam_bus_remove,
 };
-EXPORT_SYMBOL_GPL(ssam_bus_type);
 
 /**
  * __ssam_device_driver_register() - Register a SSAM client device driver.
index f0d987abc51e3cdcacfdbecb5fb284a8682fbd06..f1638c2081e8bbb19826d9a75df3995d93a02e39 100644 (file)
@@ -238,8 +238,8 @@ struct ssam_controller {
  * layer of the controller has been shut down, %-ESHUTDOWN.
  */
 static inline
-int ssam_controller_receive_buf(struct ssam_controller *ctrl,
-                               const unsigned char *buf, size_t n)
+ssize_t ssam_controller_receive_buf(struct ssam_controller *ctrl, const u8 *buf,
+                                   size_t n)
 {
        return ssh_ptl_rx_rcvbuf(&ctrl->rtl.ptl, buf, n);
 }
index 6152be38398c48feac4b48a5084faea7448365c1..9591a28bc38a9f40da8f9c53fb49c11a6396a968 100644 (file)
@@ -227,8 +227,8 @@ EXPORT_SYMBOL_GPL(ssam_client_bind);
 
 /* -- Glue layer (serdev_device -> ssam_controller). ------------------------ */
 
-static int ssam_receive_buf(struct serdev_device *dev, const unsigned char *buf,
-                           size_t n)
+static ssize_t ssam_receive_buf(struct serdev_device *dev, const u8 *buf,
+                               size_t n)
 {
        struct ssam_controller *ctrl;
        int ret;
index def8d7ac541f790f88dfe373ce1c758fb2f3fbe4..d726b1a8631999f74f6ad621ec410ae7060ba258 100644 (file)
@@ -1887,9 +1887,9 @@ int ssh_ptl_rx_stop(struct ssh_ptl *ptl)
  * Return: Returns the number of bytes transferred (positive or zero) on
  * success. Returns %-ESHUTDOWN if the packet layer has been shut down.
  */
-int ssh_ptl_rx_rcvbuf(struct ssh_ptl *ptl, const u8 *buf, size_t n)
+ssize_t ssh_ptl_rx_rcvbuf(struct ssh_ptl *ptl, const u8 *buf, size_t n)
 {
-       int used;
+       size_t used;
 
        if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state))
                return -ESHUTDOWN;
index 64633522f9716ea60e4ca9f35fa2ae2485c23574..c80e822070dfdd0e675b8fc420846dde0e1e6806 100644 (file)
@@ -162,7 +162,7 @@ void ssh_ptl_shutdown(struct ssh_ptl *ptl);
 int ssh_ptl_submit(struct ssh_ptl *ptl, struct ssh_packet *p);
 void ssh_ptl_cancel(struct ssh_packet *p);
 
-int ssh_ptl_rx_rcvbuf(struct ssh_ptl *ptl, const u8 *buf, size_t n);
+ssize_t ssh_ptl_rx_rcvbuf(struct ssh_ptl *ptl, const u8 *buf, size_t n);
 
 /**
  * ssh_ptl_tx_wakeup_transfer() - Wake up packet transmitter thread for
index f246252bddd85d97f3232617a53cb437b75a92c0..f4fa8bd8bda832a622078d84e2a7181d5f65cb88 100644 (file)
@@ -10,6 +10,7 @@ config AMD_PMF
        depends on AMD_NB
        select ACPI_PLATFORM_PROFILE
        depends on TEE && AMDTEE
+       depends on AMD_SFH_HID
        help
          This driver provides support for the AMD Platform Management Framework.
          The goal is to enhance end user experience by making AMD PCs smarter,
index a0423942f771e457457cc88cb604913eeb5b2657..a3dec14c30043ecc9c1d109247452d6d19949976 100644 (file)
@@ -10,6 +10,7 @@
  */
 
 #include <acpi/button.h>
+#include <linux/amd-pmf-io.h>
 #include <linux/power_supply.h>
 #include <linux/units.h>
 #include "pmf.h"
@@ -44,6 +45,8 @@ void amd_pmf_dump_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *
        dev_dbg(dev->dev, "Max C0 Residency: %u\n", in->ev_info.max_c0residency);
        dev_dbg(dev->dev, "GFX Busy: %u\n", in->ev_info.gfx_busy);
        dev_dbg(dev->dev, "LID State: %s\n", in->ev_info.lid_state ? "close" : "open");
+       dev_dbg(dev->dev, "User Presence: %s\n", in->ev_info.user_present ? "Present" : "Away");
+       dev_dbg(dev->dev, "Ambient Light: %d\n", in->ev_info.ambient_light);
        dev_dbg(dev->dev, "==== TA inputs END ====\n");
 }
 #else
@@ -147,6 +150,38 @@ static int amd_pmf_get_slider_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_
        return 0;
 }
 
+static int amd_pmf_get_sensor_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in)
+{
+       struct amd_sfh_info sfh_info;
+       int ret;
+
+       /* Get ALS data */
+       ret = amd_get_sfh_info(&sfh_info, MT_ALS);
+       if (!ret)
+               in->ev_info.ambient_light = sfh_info.ambient_light;
+       else
+               return ret;
+
+       /* get HPD data */
+       ret = amd_get_sfh_info(&sfh_info, MT_HPD);
+       if (ret)
+               return ret;
+
+       switch (sfh_info.user_present) {
+       case SFH_NOT_DETECTED:
+               in->ev_info.user_present = 0xff; /* assume no sensors connected */
+               break;
+       case SFH_USER_PRESENT:
+               in->ev_info.user_present = 1;
+               break;
+       case SFH_USER_AWAY:
+               in->ev_info.user_present = 0;
+               break;
+       }
+
+       return 0;
+}
+
 void amd_pmf_populate_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in)
 {
        /* TA side lid open is 1 and close is 0, hence the ! here */
@@ -155,4 +190,5 @@ void amd_pmf_populate_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_tab
        amd_pmf_get_smu_info(dev, in);
        amd_pmf_get_battery_info(dev, in);
        amd_pmf_get_slider_info(dev, in);
+       amd_pmf_get_sensor_info(dev, in);
 }
index 502ce93d5cddac57f2f482080ea8a0800ea5123f..f8c0177afb0dae60d4f67f2876ba98c6100d1ceb 100644 (file)
@@ -298,8 +298,10 @@ static ssize_t amd_pmf_get_pb_data(struct file *filp, const char __user *buf,
        if (!new_policy_buf)
                return -ENOMEM;
 
-       if (copy_from_user(new_policy_buf, buf, length))
+       if (copy_from_user(new_policy_buf, buf, length)) {
+               kfree(new_policy_buf);
                return -EFAULT;
+       }
 
        kfree(dev->policy_buf);
        dev->policy_buf = new_policy_buf;
index 848baecc1bb02671f88052b7a8aa42a49a3080ad..93f75ba1dafdffff5c469884b67ed3fbfe2f3ff5 100644 (file)
@@ -136,7 +136,7 @@ static const struct software_node altmodes_node = {
 };
 
 static const struct property_entry dp_altmode_properties[] = {
-       PROPERTY_ENTRY_U32("svid", 0xff01),
+       PROPERTY_ENTRY_U16("svid", 0xff01),
        PROPERTY_ENTRY_U32("vdo", 0x0c0086),
        { }
 };
index a1ee1a74fc3c4cb7e7bc62cda0297acdbe942d54..2cf3b4a8813f9b30cb5a79aaf2ee6acee2474c68 100644 (file)
@@ -399,7 +399,8 @@ int ifs_load_firmware(struct device *dev)
        if (fw->size != expected_size) {
                dev_err(dev, "File size mismatch (expected %u, actual %zu). Corrupted IFS image.\n",
                        expected_size, fw->size);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto release;
        }
 
        ret = image_sanity_check(dev, (struct microcode_header_intel *)fw->data);
index 33ab207493e3e62946dc5d76c2eda98805725888..33bb58dc3f78c30a304a7a35595666152c34e908 100644 (file)
@@ -23,23 +23,23 @@ static int (*uncore_read)(struct uncore_data *data, unsigned int *min, unsigned
 static int (*uncore_write)(struct uncore_data *data, unsigned int input, unsigned int min_max);
 static int (*uncore_read_freq)(struct uncore_data *data, unsigned int *freq);
 
-static ssize_t show_domain_id(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_domain_id(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 {
-       struct uncore_data *data = container_of(attr, struct uncore_data, domain_id_dev_attr);
+       struct uncore_data *data = container_of(attr, struct uncore_data, domain_id_kobj_attr);
 
        return sprintf(buf, "%u\n", data->domain_id);
 }
 
-static ssize_t show_fabric_cluster_id(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_fabric_cluster_id(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 {
-       struct uncore_data *data = container_of(attr, struct uncore_data, fabric_cluster_id_dev_attr);
+       struct uncore_data *data = container_of(attr, struct uncore_data, fabric_cluster_id_kobj_attr);
 
        return sprintf(buf, "%u\n", data->cluster_id);
 }
 
-static ssize_t show_package_id(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_package_id(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 {
-       struct uncore_data *data = container_of(attr, struct uncore_data, package_id_dev_attr);
+       struct uncore_data *data = container_of(attr, struct uncore_data, package_id_kobj_attr);
 
        return sprintf(buf, "%u\n", data->package_id);
 }
@@ -97,30 +97,30 @@ static ssize_t show_perf_status_freq_khz(struct uncore_data *data, char *buf)
 }
 
 #define store_uncore_min_max(name, min_max)                            \
-       static ssize_t store_##name(struct device *dev,         \
-                                    struct device_attribute *attr,     \
+       static ssize_t store_##name(struct kobject *kobj,               \
+                                    struct kobj_attribute *attr,       \
                                     const char *buf, size_t count)     \
        {                                                               \
-               struct uncore_data *data = container_of(attr, struct uncore_data, name##_dev_attr);\
+               struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\
                                                                        \
                return store_min_max_freq_khz(data, buf, count, \
                                              min_max);         \
        }
 
 #define show_uncore_min_max(name, min_max)                             \
-       static ssize_t show_##name(struct device *dev,          \
-                                   struct device_attribute *attr, char *buf)\
+       static ssize_t show_##name(struct kobject *kobj,                \
+                                   struct kobj_attribute *attr, char *buf)\
        {                                                               \
-               struct uncore_data *data = container_of(attr, struct uncore_data, name##_dev_attr);\
+               struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\
                                                                        \
                return show_min_max_freq_khz(data, buf, min_max);       \
        }
 
 #define show_uncore_perf_status(name)                                  \
-       static ssize_t show_##name(struct device *dev,          \
-                                  struct device_attribute *attr, char *buf)\
+       static ssize_t show_##name(struct kobject *kobj,                \
+                                  struct kobj_attribute *attr, char *buf)\
        {                                                               \
-               struct uncore_data *data = container_of(attr, struct uncore_data, name##_dev_attr);\
+               struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\
                                                                        \
                return show_perf_status_freq_khz(data, buf); \
        }
@@ -134,11 +134,11 @@ show_uncore_min_max(max_freq_khz, 1);
 show_uncore_perf_status(current_freq_khz);
 
 #define show_uncore_data(member_name)                                  \
-       static ssize_t show_##member_name(struct device *dev,   \
-                                          struct device_attribute *attr, char *buf)\
+       static ssize_t show_##member_name(struct kobject *kobj, \
+                                          struct kobj_attribute *attr, char *buf)\
        {                                                               \
                struct uncore_data *data = container_of(attr, struct uncore_data,\
-                                                         member_name##_dev_attr);\
+                                                         member_name##_kobj_attr);\
                                                                        \
                return sysfs_emit(buf, "%u\n",                          \
                                 data->member_name);                    \
@@ -149,29 +149,29 @@ show_uncore_data(initial_max_freq_khz);
 
 #define init_attribute_rw(_name)                                       \
        do {                                                            \
-               sysfs_attr_init(&data->_name##_dev_attr.attr);  \
-               data->_name##_dev_attr.show = show_##_name;             \
-               data->_name##_dev_attr.store = store_##_name;           \
-               data->_name##_dev_attr.attr.name = #_name;              \
-               data->_name##_dev_attr.attr.mode = 0644;                \
+               sysfs_attr_init(&data->_name##_kobj_attr.attr); \
+               data->_name##_kobj_attr.show = show_##_name;            \
+               data->_name##_kobj_attr.store = store_##_name;          \
+               data->_name##_kobj_attr.attr.name = #_name;             \
+               data->_name##_kobj_attr.attr.mode = 0644;               \
        } while (0)
 
 #define init_attribute_ro(_name)                                       \
        do {                                                            \
-               sysfs_attr_init(&data->_name##_dev_attr.attr);  \
-               data->_name##_dev_attr.show = show_##_name;             \
-               data->_name##_dev_attr.store = NULL;                    \
-               data->_name##_dev_attr.attr.name = #_name;              \
-               data->_name##_dev_attr.attr.mode = 0444;                \
+               sysfs_attr_init(&data->_name##_kobj_attr.attr); \
+               data->_name##_kobj_attr.show = show_##_name;            \
+               data->_name##_kobj_attr.store = NULL;                   \
+               data->_name##_kobj_attr.attr.name = #_name;             \
+               data->_name##_kobj_attr.attr.mode = 0444;               \
        } while (0)
 
 #define init_attribute_root_ro(_name)                                  \
        do {                                                            \
-               sysfs_attr_init(&data->_name##_dev_attr.attr);  \
-               data->_name##_dev_attr.show = show_##_name;             \
-               data->_name##_dev_attr.store = NULL;                    \
-               data->_name##_dev_attr.attr.name = #_name;              \
-               data->_name##_dev_attr.attr.mode = 0400;                \
+               sysfs_attr_init(&data->_name##_kobj_attr.attr); \
+               data->_name##_kobj_attr.show = show_##_name;            \
+               data->_name##_kobj_attr.store = NULL;                   \
+               data->_name##_kobj_attr.attr.name = #_name;             \
+               data->_name##_kobj_attr.attr.mode = 0400;               \
        } while (0)
 
 static int create_attr_group(struct uncore_data *data, char *name)
@@ -186,21 +186,21 @@ static int create_attr_group(struct uncore_data *data, char *name)
 
        if (data->domain_id != UNCORE_DOMAIN_ID_INVALID) {
                init_attribute_root_ro(domain_id);
-               data->uncore_attrs[index++] = &data->domain_id_dev_attr.attr;
+               data->uncore_attrs[index++] = &data->domain_id_kobj_attr.attr;
                init_attribute_root_ro(fabric_cluster_id);
-               data->uncore_attrs[index++] = &data->fabric_cluster_id_dev_attr.attr;
+               data->uncore_attrs[index++] = &data->fabric_cluster_id_kobj_attr.attr;
                init_attribute_root_ro(package_id);
-               data->uncore_attrs[index++] = &data->package_id_dev_attr.attr;
+               data->uncore_attrs[index++] = &data->package_id_kobj_attr.attr;
        }
 
-       data->uncore_attrs[index++] = &data->max_freq_khz_dev_attr.attr;
-       data->uncore_attrs[index++] = &data->min_freq_khz_dev_attr.attr;
-       data->uncore_attrs[index++] = &data->initial_min_freq_khz_dev_attr.attr;
-       data->uncore_attrs[index++] = &data->initial_max_freq_khz_dev_attr.attr;
+       data->uncore_attrs[index++] = &data->max_freq_khz_kobj_attr.attr;
+       data->uncore_attrs[index++] = &data->min_freq_khz_kobj_attr.attr;
+       data->uncore_attrs[index++] = &data->initial_min_freq_khz_kobj_attr.attr;
+       data->uncore_attrs[index++] = &data->initial_max_freq_khz_kobj_attr.attr;
 
        ret = uncore_read_freq(data, &freq);
        if (!ret)
-               data->uncore_attrs[index++] = &data->current_freq_khz_dev_attr.attr;
+               data->uncore_attrs[index++] = &data->current_freq_khz_kobj_attr.attr;
 
        data->uncore_attrs[index] = NULL;
 
index 7afb69977c7e8c80b0db3ba819799434e41ff60a..0e5bf507e555209a69ba61e8e8eaaf7392209bfa 100644 (file)
  * @instance_id:       Unique instance id to append to directory name
  * @name:              Sysfs entry name for this instance
  * @uncore_attr_group: Attribute group storage
- * @max_freq_khz_dev_attr: Storage for device attribute max_freq_khz
- * @mix_freq_khz_dev_attr: Storage for device attribute min_freq_khz
- * @initial_max_freq_khz_dev_attr: Storage for device attribute initial_max_freq_khz
- * @initial_min_freq_khz_dev_attr: Storage for device attribute initial_min_freq_khz
- * @current_freq_khz_dev_attr: Storage for device attribute current_freq_khz
- * @domain_id_dev_attr: Storage for device attribute domain_id
- * @fabric_cluster_id_dev_attr: Storage for device attribute fabric_cluster_id
- * @package_id_dev_attr: Storage for device attribute package_id
+ * @max_freq_khz_kobj_attr: Storage for kobject attribute max_freq_khz
+ * @mix_freq_khz_kobj_attr: Storage for kobject attribute min_freq_khz
+ * @initial_max_freq_khz_kobj_attr: Storage for kobject attribute initial_max_freq_khz
+ * @initial_min_freq_khz_kobj_attr: Storage for kobject attribute initial_min_freq_khz
+ * @current_freq_khz_kobj_attr: Storage for kobject attribute current_freq_khz
+ * @domain_id_kobj_attr: Storage for kobject attribute domain_id
+ * @fabric_cluster_id_kobj_attr: Storage for kobject attribute fabric_cluster_id
+ * @package_id_kobj_attr: Storage for kobject attribute package_id
  * @uncore_attrs:      Attribute storage for group creation
  *
  * This structure is used to encapsulate all data related to uncore sysfs
@@ -53,14 +53,14 @@ struct uncore_data {
        char name[32];
 
        struct attribute_group uncore_attr_group;
-       struct device_attribute max_freq_khz_dev_attr;
-       struct device_attribute min_freq_khz_dev_attr;
-       struct device_attribute initial_max_freq_khz_dev_attr;
-       struct device_attribute initial_min_freq_khz_dev_attr;
-       struct device_attribute current_freq_khz_dev_attr;
-       struct device_attribute domain_id_dev_attr;
-       struct device_attribute fabric_cluster_id_dev_attr;
-       struct device_attribute package_id_dev_attr;
+       struct kobj_attribute max_freq_khz_kobj_attr;
+       struct kobj_attribute min_freq_khz_kobj_attr;
+       struct kobj_attribute initial_max_freq_khz_kobj_attr;
+       struct kobj_attribute initial_min_freq_khz_kobj_attr;
+       struct kobj_attribute current_freq_khz_kobj_attr;
+       struct kobj_attribute domain_id_kobj_attr;
+       struct kobj_attribute fabric_cluster_id_kobj_attr;
+       struct kobj_attribute package_id_kobj_attr;
        struct attribute *uncore_attrs[9];
 };
 
index 9cf5ed0f8dc2848b9f85f59dafd3cd43373bf960..040153ad67c1cb7c36fe85616cb97d10f7f99c46 100644 (file)
@@ -32,7 +32,7 @@ static int get_fwu_request(struct device *dev, u32 *out)
                return -ENODEV;
 
        if (obj->type != ACPI_TYPE_INTEGER) {
-               dev_warn(dev, "wmi_query_block returned invalid value\n");
+               dev_warn(dev, "wmidev_block_query returned invalid value\n");
                kfree(obj);
                return -EINVAL;
        }
@@ -55,7 +55,7 @@ static int set_fwu_request(struct device *dev, u32 in)
 
        status = wmidev_block_set(to_wmi_device(dev), 0, &input);
        if (ACPI_FAILURE(status)) {
-               dev_err(dev, "wmi_set_block failed\n");
+               dev_err(dev, "wmidev_block_set failed\n");
                return -ENODEV;
        }
 
index b8d0239192cbf58d50a233a7606cfdb1a8907d24..fd62bf746ebde47d9524d6104b8309d8842f093f 100644 (file)
@@ -435,7 +435,7 @@ static int yogabook_pdev_set_kbd_backlight(struct yogabook_data *data, u8 level)
                .enabled = level,
        };
 
-       pwm_apply_state(data->kbd_bl_pwm, &state);
+       pwm_apply_might_sleep(data->kbd_bl_pwm, &state);
        gpiod_set_value(data->kbd_bl_led_enable, level ? 1 : 0);
        return 0;
 }
index 1cf2471d54ddef765b017fd079864b3ffb868fdc..6bd14d0132dbd73b1ea497679d4dd8297671859e 100644 (file)
@@ -26,6 +26,21 @@ static const struct x86_cpu_id p2sb_cpu_ids[] = {
        {}
 };
 
+/*
+ * Cache BAR0 of P2SB device functions 0 to 7.
+ * TODO: The constant 8 is the number of functions that PCI specification
+ *       defines. Same definitions exist tree-wide. Unify this definition and
+ *       the other definitions then move to include/uapi/linux/pci.h.
+ */
+#define NR_P2SB_RES_CACHE 8
+
+struct p2sb_res_cache {
+       u32 bus_dev_id;
+       struct resource res;
+};
+
+static struct p2sb_res_cache p2sb_resources[NR_P2SB_RES_CACHE];
+
 static int p2sb_get_devfn(unsigned int *devfn)
 {
        unsigned int fn = P2SB_DEVFN_DEFAULT;
@@ -39,10 +54,18 @@ static int p2sb_get_devfn(unsigned int *devfn)
        return 0;
 }
 
+static bool p2sb_valid_resource(struct resource *res)
+{
+       if (res->flags)
+               return true;
+
+       return false;
+}
+
 /* Copy resource from the first BAR of the device in question */
-static int p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem)
+static void p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem)
 {
-       struct resource *bar0 = &pdev->resource[0];
+       struct resource *bar0 = pci_resource_n(pdev, 0);
 
        /* Make sure we have no dangling pointers in the output */
        memset(mem, 0, sizeof(*mem));
@@ -56,49 +79,66 @@ static int p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem)
        mem->end = bar0->end;
        mem->flags = bar0->flags;
        mem->desc = bar0->desc;
-
-       return 0;
 }
 
-static int p2sb_scan_and_read(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
+static void p2sb_scan_and_cache_devfn(struct pci_bus *bus, unsigned int devfn)
 {
+       struct p2sb_res_cache *cache = &p2sb_resources[PCI_FUNC(devfn)];
        struct pci_dev *pdev;
-       int ret;
 
        pdev = pci_scan_single_device(bus, devfn);
        if (!pdev)
-               return -ENODEV;
+               return;
 
-       ret = p2sb_read_bar0(pdev, mem);
+       p2sb_read_bar0(pdev, &cache->res);
+       cache->bus_dev_id = bus->dev.id;
 
        pci_stop_and_remove_bus_device(pdev);
-       return ret;
 }
 
-/**
- * p2sb_bar - Get Primary to Sideband (P2SB) bridge device BAR
- * @bus: PCI bus to communicate with
- * @devfn: PCI slot and function to communicate with
- * @mem: memory resource to be filled in
- *
- * The BIOS prevents the P2SB device from being enumerated by the PCI
- * subsystem, so we need to unhide and hide it back to lookup the BAR.
- *
- * if @bus is NULL, the bus 0 in domain 0 will be used.
- * If @devfn is 0, it will be replaced by devfn of the P2SB device.
- *
- * Caller must provide a valid pointer to @mem.
- *
- * Locking is handled by pci_rescan_remove_lock mutex.
- *
- * Return:
- * 0 on success or appropriate errno value on error.
- */
-int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
+static int p2sb_scan_and_cache(struct pci_bus *bus, unsigned int devfn)
+{
+       unsigned int slot, fn;
+
+       if (PCI_FUNC(devfn) == 0) {
+               /*
+                * When function number of the P2SB device is zero, scan it and
+                * other function numbers, and if devices are available, cache
+                * their BAR0s.
+                */
+               slot = PCI_SLOT(devfn);
+               for (fn = 0; fn < NR_P2SB_RES_CACHE; fn++)
+                       p2sb_scan_and_cache_devfn(bus, PCI_DEVFN(slot, fn));
+       } else {
+               /* Scan the P2SB device and cache its BAR0 */
+               p2sb_scan_and_cache_devfn(bus, devfn);
+       }
+
+       if (!p2sb_valid_resource(&p2sb_resources[PCI_FUNC(devfn)].res))
+               return -ENOENT;
+
+       return 0;
+}
+
+static struct pci_bus *p2sb_get_bus(struct pci_bus *bus)
+{
+       static struct pci_bus *p2sb_bus;
+
+       bus = bus ?: p2sb_bus;
+       if (bus)
+               return bus;
+
+       /* Assume P2SB is on the bus 0 in domain 0 */
+       p2sb_bus = pci_find_bus(0, 0);
+       return p2sb_bus;
+}
+
+static int p2sb_cache_resources(void)
 {
-       struct pci_dev *pdev_p2sb;
        unsigned int devfn_p2sb;
        u32 value = P2SBC_HIDE;
+       struct pci_bus *bus;
+       u16 class;
        int ret;
 
        /* Get devfn for P2SB device itself */
@@ -106,8 +146,17 @@ int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
        if (ret)
                return ret;
 
-       /* if @bus is NULL, use bus 0 in domain 0 */
-       bus = bus ?: pci_find_bus(0, 0);
+       bus = p2sb_get_bus(NULL);
+       if (!bus)
+               return -ENODEV;
+
+       /*
+        * When a device with same devfn exists and its device class is not
+        * PCI_CLASS_MEMORY_OTHER for P2SB, do not touch it.
+        */
+       pci_bus_read_config_word(bus, devfn_p2sb, PCI_CLASS_DEVICE, &class);
+       if (!PCI_POSSIBLE_ERROR(class) && class != PCI_CLASS_MEMORY_OTHER)
+               return -ENODEV;
 
        /*
         * Prevent concurrent PCI bus scan from seeing the P2SB device and
@@ -115,17 +164,16 @@ int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
         */
        pci_lock_rescan_remove();
 
-       /* Unhide the P2SB device, if needed */
+       /*
+        * The BIOS prevents the P2SB device from being enumerated by the PCI
+        * subsystem, so we need to unhide and hide it back to lookup the BAR.
+        * Unhide the P2SB device here, if needed.
+        */
        pci_bus_read_config_dword(bus, devfn_p2sb, P2SBC, &value);
        if (value & P2SBC_HIDE)
                pci_bus_write_config_dword(bus, devfn_p2sb, P2SBC, 0);
 
-       pdev_p2sb = pci_scan_single_device(bus, devfn_p2sb);
-       if (devfn)
-               ret = p2sb_scan_and_read(bus, devfn, mem);
-       else
-               ret = p2sb_read_bar0(pdev_p2sb, mem);
-       pci_stop_and_remove_bus_device(pdev_p2sb);
+       ret = p2sb_scan_and_cache(bus, devfn_p2sb);
 
        /* Hide the P2SB device, if it was hidden */
        if (value & P2SBC_HIDE)
@@ -133,12 +181,62 @@ int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
 
        pci_unlock_rescan_remove();
 
-       if (ret)
-               return ret;
+       return ret;
+}
+
+/**
+ * p2sb_bar - Get Primary to Sideband (P2SB) bridge device BAR
+ * @bus: PCI bus to communicate with
+ * @devfn: PCI slot and function to communicate with
+ * @mem: memory resource to be filled in
+ *
+ * If @bus is NULL, the bus 0 in domain 0 will be used.
+ * If @devfn is 0, it will be replaced by devfn of the P2SB device.
+ *
+ * Caller must provide a valid pointer to @mem.
+ *
+ * Return:
+ * 0 on success or appropriate errno value on error.
+ */
+int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
+{
+       struct p2sb_res_cache *cache;
+       int ret;
+
+       bus = p2sb_get_bus(bus);
+       if (!bus)
+               return -ENODEV;
+
+       if (!devfn) {
+               ret = p2sb_get_devfn(&devfn);
+               if (ret)
+                       return ret;
+       }
 
-       if (mem->flags == 0)
+       cache = &p2sb_resources[PCI_FUNC(devfn)];
+       if (cache->bus_dev_id != bus->dev.id)
                return -ENODEV;
 
+       if (!p2sb_valid_resource(&cache->res))
+               return -ENOENT;
+
+       memcpy(mem, &cache->res, sizeof(*mem));
        return 0;
 }
 EXPORT_SYMBOL_GPL(p2sb_bar);
+
+static int __init p2sb_fs_init(void)
+{
+       p2sb_cache_resources();
+       return 0;
+}
+
+/*
+ * pci_rescan_remove_lock to avoid access to unhidden P2SB devices can
+ * not be locked in sysfs pci bus rescan path because of deadlock. To
+ * avoid the deadlock, access to P2SB devices with the lock at an early
+ * step in kernel initialization and cache required resources. This
+ * should happen after subsys_initcall which initializes PCI subsystem
+ * and before device_initcall which requires P2SB resources.
+ */
+fs_initcall(p2sb_fs_init);
index 0c6733772698408ef1a23b977d1a0698a19347d5..7aee5e9ff2b8dd5810f83cc0317ed329b4361d2e 100644 (file)
@@ -944,6 +944,32 @@ static const struct ts_dmi_data teclast_tbook11_data = {
        .properties     = teclast_tbook11_props,
 };
 
+static const struct property_entry teclast_x16_plus_props[] = {
+       PROPERTY_ENTRY_U32("touchscreen-min-x", 8),
+       PROPERTY_ENTRY_U32("touchscreen-min-y", 14),
+       PROPERTY_ENTRY_U32("touchscreen-size-x", 1916),
+       PROPERTY_ENTRY_U32("touchscreen-size-y", 1264),
+       PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-teclast-x16-plus.fw"),
+       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+       PROPERTY_ENTRY_BOOL("silead,home-button"),
+       { }
+};
+
+static const struct ts_dmi_data teclast_x16_plus_data = {
+       .embedded_fw = {
+               .name   = "silead/gsl3692-teclast-x16-plus.fw",
+               .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
+               .length = 43560,
+               .sha256 = { 0x9d, 0xb0, 0x3d, 0xf1, 0x00, 0x3c, 0xb5, 0x25,
+                           0x62, 0x8a, 0xa0, 0x93, 0x4b, 0xe0, 0x4e, 0x75,
+                           0xd1, 0x27, 0xb1, 0x65, 0x3c, 0xba, 0xa5, 0x0f,
+                           0xcd, 0xb4, 0xbe, 0x00, 0xbb, 0xf6, 0x43, 0x29 },
+       },
+       .acpi_name      = "MSSL1680:00",
+       .properties     = teclast_x16_plus_props,
+};
+
 static const struct property_entry teclast_x3_plus_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
@@ -1612,6 +1638,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
                        DMI_MATCH(DMI_PRODUCT_SKU, "E5A6_A1"),
                },
        },
+       {
+               /* Teclast X16 Plus */
+               .driver_data = (void *)&teclast_x16_plus_data,
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "TECLAST"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
+                       DMI_MATCH(DMI_PRODUCT_SKU, "D3A5_A1"),
+               },
+       },
        {
                /* Teclast X3 Plus */
                .driver_data = (void *)&teclast_x3_plus_data,
index bd271a5730aa51f1c1e6286e2b481e865799b79a..3c288e8f404beb5d4887235c85654e9ac77cd425 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/list.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/rwsem.h>
 #include <linux/slab.h>
 #include <linux/sysfs.h>
 #include <linux/types.h>
@@ -56,7 +57,6 @@ static_assert(__alignof__(struct guid_block) == 1);
 
 enum { /* wmi_block flags */
        WMI_READ_TAKES_NO_ARGS,
-       WMI_PROBED,
 };
 
 struct wmi_block {
@@ -64,8 +64,10 @@ struct wmi_block {
        struct list_head list;
        struct guid_block gblock;
        struct acpi_device *acpi_device;
+       struct rw_semaphore notify_lock;        /* Protects notify callback add/remove */
        wmi_notify_handler handler;
        void *handler_data;
+       bool driver_ready;
        unsigned long flags;
 };
 
@@ -219,6 +221,17 @@ static int wmidev_match_guid(struct device *dev, const void *data)
        return 0;
 }
 
+static int wmidev_match_notify_id(struct device *dev, const void *data)
+{
+       struct wmi_block *wblock = dev_to_wblock(dev);
+       const u32 *notify_id = data;
+
+       if (wblock->gblock.flags & ACPI_WMI_EVENT && wblock->gblock.notify_id == *notify_id)
+               return 1;
+
+       return 0;
+}
+
 static struct bus_type wmi_bus_type;
 
 static struct wmi_device *wmi_find_device_by_guid(const char *guid_string)
@@ -238,6 +251,17 @@ static struct wmi_device *wmi_find_device_by_guid(const char *guid_string)
        return dev_to_wdev(dev);
 }
 
+static struct wmi_device *wmi_find_event_by_notify_id(const u32 notify_id)
+{
+       struct device *dev;
+
+       dev = bus_find_device(&wmi_bus_type, NULL, &notify_id, wmidev_match_notify_id);
+       if (!dev)
+               return ERR_PTR(-ENODEV);
+
+       return to_wmi_device(dev);
+}
+
 static void wmi_device_put(struct wmi_device *wdev)
 {
        put_device(&wdev->dev);
@@ -572,32 +596,31 @@ acpi_status wmi_install_notify_handler(const char *guid,
                                       wmi_notify_handler handler,
                                       void *data)
 {
-       struct wmi_block *block;
-       acpi_status status = AE_NOT_EXIST;
-       guid_t guid_input;
-
-       if (!guid || !handler)
-               return AE_BAD_PARAMETER;
+       struct wmi_block *wblock;
+       struct wmi_device *wdev;
+       acpi_status status;
 
-       if (guid_parse(guid, &guid_input))
-               return AE_BAD_PARAMETER;
+       wdev = wmi_find_device_by_guid(guid);
+       if (IS_ERR(wdev))
+               return AE_ERROR;
 
-       list_for_each_entry(block, &wmi_block_list, list) {
-               acpi_status wmi_status;
+       wblock = container_of(wdev, struct wmi_block, dev);
 
-               if (guid_equal(&block->gblock.guid, &guid_input)) {
-                       if (block->handler)
-                               return AE_ALREADY_ACQUIRED;
+       down_write(&wblock->notify_lock);
+       if (wblock->handler) {
+               status = AE_ALREADY_ACQUIRED;
+       } else {
+               wblock->handler = handler;
+               wblock->handler_data = data;
 
-                       block->handler = handler;
-                       block->handler_data = data;
+               if (ACPI_FAILURE(wmi_method_enable(wblock, true)))
+                       dev_warn(&wblock->dev.dev, "Failed to enable device\n");
 
-                       wmi_status = wmi_method_enable(block, true);
-                       if ((wmi_status != AE_OK) ||
-                           ((wmi_status == AE_OK) && (status == AE_NOT_EXIST)))
-                               status = wmi_status;
-               }
+               status = AE_OK;
        }
+       up_write(&wblock->notify_lock);
+
+       wmi_device_put(wdev);
 
        return status;
 }
@@ -613,30 +636,31 @@ EXPORT_SYMBOL_GPL(wmi_install_notify_handler);
  */
 acpi_status wmi_remove_notify_handler(const char *guid)
 {
-       struct wmi_block *block;
-       acpi_status status = AE_NOT_EXIST;
-       guid_t guid_input;
+       struct wmi_block *wblock;
+       struct wmi_device *wdev;
+       acpi_status status;
 
-       if (!guid)
-               return AE_BAD_PARAMETER;
+       wdev = wmi_find_device_by_guid(guid);
+       if (IS_ERR(wdev))
+               return AE_ERROR;
 
-       if (guid_parse(guid, &guid_input))
-               return AE_BAD_PARAMETER;
+       wblock = container_of(wdev, struct wmi_block, dev);
 
-       list_for_each_entry(block, &wmi_block_list, list) {
-               acpi_status wmi_status;
+       down_write(&wblock->notify_lock);
+       if (!wblock->handler) {
+               status = AE_NULL_ENTRY;
+       } else {
+               if (ACPI_FAILURE(wmi_method_enable(wblock, false)))
+                       dev_warn(&wblock->dev.dev, "Failed to disable device\n");
 
-               if (guid_equal(&block->gblock.guid, &guid_input)) {
-                       if (!block->handler)
-                               return AE_NULL_ENTRY;
+               wblock->handler = NULL;
+               wblock->handler_data = NULL;
 
-                       wmi_status = wmi_method_enable(block, false);
-                       block->handler = NULL;
-                       block->handler_data = NULL;
-                       if (wmi_status != AE_OK || (wmi_status == AE_OK && status == AE_NOT_EXIST))
-                               status = wmi_status;
-               }
+               status = AE_OK;
        }
+       up_write(&wblock->notify_lock);
+
+       wmi_device_put(wdev);
 
        return status;
 }
@@ -655,15 +679,19 @@ EXPORT_SYMBOL_GPL(wmi_remove_notify_handler);
 acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out)
 {
        struct wmi_block *wblock;
+       struct wmi_device *wdev;
+       acpi_status status;
 
-       list_for_each_entry(wblock, &wmi_block_list, list) {
-               struct guid_block *gblock = &wblock->gblock;
+       wdev = wmi_find_event_by_notify_id(event);
+       if (IS_ERR(wdev))
+               return AE_NOT_FOUND;
 
-               if ((gblock->flags & ACPI_WMI_EVENT) && gblock->notify_id == event)
-                       return get_event_data(wblock, out);
-       }
+       wblock = container_of(wdev, struct wmi_block, dev);
+       status = get_event_data(wblock, out);
 
-       return AE_NOT_FOUND;
+       wmi_device_put(wdev);
+
+       return status;
 }
 EXPORT_SYMBOL_GPL(wmi_get_event_data);
 
@@ -868,7 +896,7 @@ static int wmi_dev_probe(struct device *dev)
        if (wdriver->probe) {
                ret = wdriver->probe(dev_to_wdev(dev),
                                find_guid_context(wblock, wdriver));
-               if (!ret) {
+               if (ret) {
                        if (ACPI_FAILURE(wmi_method_enable(wblock, false)))
                                dev_warn(dev, "Failed to disable device\n");
 
@@ -876,7 +904,9 @@ static int wmi_dev_probe(struct device *dev)
                }
        }
 
-       set_bit(WMI_PROBED, &wblock->flags);
+       down_write(&wblock->notify_lock);
+       wblock->driver_ready = true;
+       up_write(&wblock->notify_lock);
 
        return 0;
 }
@@ -886,7 +916,9 @@ static void wmi_dev_remove(struct device *dev)
        struct wmi_block *wblock = dev_to_wblock(dev);
        struct wmi_driver *wdriver = drv_to_wdrv(dev->driver);
 
-       clear_bit(WMI_PROBED, &wblock->flags);
+       down_write(&wblock->notify_lock);
+       wblock->driver_ready = false;
+       up_write(&wblock->notify_lock);
 
        if (wdriver->remove)
                wdriver->remove(dev_to_wdev(dev));
@@ -999,6 +1031,8 @@ static int wmi_create_device(struct device *wmi_bus_dev,
                wblock->dev.setable = true;
 
  out_init:
+       init_rwsem(&wblock->notify_lock);
+       wblock->driver_ready = false;
        wblock->dev.dev.bus = &wmi_bus_type;
        wblock->dev.dev.parent = wmi_bus_dev;
 
@@ -1171,6 +1205,26 @@ acpi_wmi_ec_space_handler(u32 function, acpi_physical_address address,
        }
 }
 
+static void wmi_notify_driver(struct wmi_block *wblock)
+{
+       struct wmi_driver *driver = drv_to_wdrv(wblock->dev.dev.driver);
+       struct acpi_buffer data = { ACPI_ALLOCATE_BUFFER, NULL };
+       acpi_status status;
+
+       if (!driver->no_notify_data) {
+               status = get_event_data(wblock, &data);
+               if (ACPI_FAILURE(status)) {
+                       dev_warn(&wblock->dev.dev, "Failed to get event data\n");
+                       return;
+               }
+       }
+
+       if (driver->notify)
+               driver->notify(&wblock->dev, data.pointer);
+
+       kfree(data.pointer);
+}
+
 static int wmi_notify_device(struct device *dev, void *data)
 {
        struct wmi_block *wblock = dev_to_wblock(dev);
@@ -1179,28 +1233,17 @@ static int wmi_notify_device(struct device *dev, void *data)
        if (!(wblock->gblock.flags & ACPI_WMI_EVENT && wblock->gblock.notify_id == *event))
                return 0;
 
-       /* If a driver is bound, then notify the driver. */
-       if (test_bit(WMI_PROBED, &wblock->flags) && wblock->dev.dev.driver) {
-               struct wmi_driver *driver = drv_to_wdrv(wblock->dev.dev.driver);
-               struct acpi_buffer evdata = { ACPI_ALLOCATE_BUFFER, NULL };
-               acpi_status status;
-
-               if (!driver->no_notify_data) {
-                       status = get_event_data(wblock, &evdata);
-                       if (ACPI_FAILURE(status)) {
-                               dev_warn(&wblock->dev.dev, "failed to get event data\n");
-                               return -EIO;
-                       }
-               }
-
-               if (driver->notify)
-                       driver->notify(&wblock->dev, evdata.pointer);
-
-               kfree(evdata.pointer);
-       } else if (wblock->handler) {
-               /* Legacy handler */
-               wblock->handler(*event, wblock->handler_data);
+       down_read(&wblock->notify_lock);
+       /* The WMI driver notify handler conflicts with the legacy WMI handler.
+        * Because of this the WMI driver notify handler takes precedence.
+        */
+       if (wblock->dev.dev.driver && wblock->driver_ready) {
+               wmi_notify_driver(wblock);
+       } else {
+               if (wblock->handler)
+                       wblock->handler(*event, wblock->handler_data);
        }
+       up_read(&wblock->notify_lock);
 
        acpi_bus_generate_netlink_event(wblock->acpi_device->pnp.device_class,
                                        dev_name(&wblock->dev.dev), *event, 0);
index 9ee1bf2ece10ff243bf4b1fbd10bc6fee5858a22..18e232b5ed53d73ab24bd4fe3dab94c69235436d 100644 (file)
@@ -1109,7 +1109,7 @@ static int __init genpd_power_off_unused(void)
 
        return 0;
 }
-late_initcall(genpd_power_off_unused);
+late_initcall_sync(genpd_power_off_unused);
 
 #ifdef CONFIG_PM_SLEEP
 
@@ -2668,7 +2668,7 @@ static void genpd_release_dev(struct device *dev)
        kfree(dev);
 }
 
-static struct bus_type genpd_bus_type = {
+static const struct bus_type genpd_bus_type = {
        .name           = "genpd",
 };
 
index e26dc17d07ad71d8398044670227c93a6bdd4427..e274e3315fe7a60887bec6a1fa85db69156e7fd6 100644 (file)
@@ -561,6 +561,11 @@ static int scpsys_add_subdomain(struct scpsys *scpsys, struct device_node *paren
                        goto err_put_node;
                }
 
+               /* recursive call to add all subdomains */
+               ret = scpsys_add_subdomain(scpsys, child);
+               if (ret)
+                       goto err_put_node;
+
                ret = pm_genpd_add_subdomain(parent_pd, child_pd);
                if (ret) {
                        dev_err(scpsys->dev, "failed to add %s subdomain to parent %s\n",
@@ -570,11 +575,6 @@ static int scpsys_add_subdomain(struct scpsys *scpsys, struct device_node *paren
                        dev_dbg(scpsys->dev, "%s add subdomain: %s\n", parent_pd->name,
                                child_pd->name);
                }
-
-               /* recursive call to add all subdomains */
-               ret = scpsys_add_subdomain(scpsys, child);
-               if (ret)
-                       goto err_put_node;
        }
 
        return 0;
@@ -588,9 +588,6 @@ static void scpsys_remove_one_domain(struct scpsys_domain *pd)
 {
        int ret;
 
-       if (scpsys_domain_is_on(pd))
-               scpsys_power_off(&pd->genpd);
-
        /*
         * We're in the error cleanup already, so we only complain,
         * but won't emit another error on top of the original one.
@@ -600,6 +597,8 @@ static void scpsys_remove_one_domain(struct scpsys_domain *pd)
                dev_err(pd->scpsys->dev,
                        "failed to remove domain '%s' : %d - state may be inconsistent\n",
                        pd->genpd.name, ret);
+       if (scpsys_domain_is_on(pd))
+               scpsys_power_off(&pd->genpd);
 
        clk_bulk_put(pd->num_clks, pd->clks);
        clk_bulk_put(pd->num_subsys_clks, pd->subsys_clks);
index 39ca84a67daadd21202e1ba80f13ec6cbc671a7a..621e411fc9991a4050cd6da699695912f18a46b0 100644 (file)
@@ -25,7 +25,8 @@ static const struct rcar_sysc_area r8a77980_areas[] __initconst = {
          PD_CPU_NOCR },
        { "ca53-cpu3",  0x200, 3, R8A77980_PD_CA53_CPU3, R8A77980_PD_CA53_SCU,
          PD_CPU_NOCR },
-       { "cr7",        0x240, 0, R8A77980_PD_CR7,      R8A77980_PD_ALWAYS_ON },
+       { "cr7",        0x240, 0, R8A77980_PD_CR7,      R8A77980_PD_ALWAYS_ON,
+         PD_CPU_NOCR },
        { "a3ir",       0x180, 0, R8A77980_PD_A3IR,     R8A77980_PD_ALWAYS_ON },
        { "a2ir0",      0x400, 0, R8A77980_PD_A2IR0,    R8A77980_PD_A3IR },
        { "a2ir1",      0x400, 1, R8A77980_PD_A2IR1,    R8A77980_PD_A3IR },
index 46c534f6b1c974c32379c3089e325439c4316847..0a5d0d8befa8408d02b3eaaf278d6a936a83e102 100644 (file)
@@ -256,7 +256,7 @@ static const struct dev_pm_ops pnp_bus_dev_pm_ops = {
        .restore = pnp_bus_resume,
 };
 
-struct bus_type pnp_bus_type = {
+const struct bus_type pnp_bus_type = {
        .name    = "pnp",
        .match   = pnp_bus_match,
        .probe   = pnp_device_probe,
index 829e0dba2fda3beca8f52b00d4f5e12add8c0803..ab3350ce2d6214416a211e3fe3cf11936164688f 100644 (file)
@@ -61,13 +61,11 @@ static int as3722_poweroff_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int as3722_poweroff_remove(struct platform_device *pdev)
+static void as3722_poweroff_remove(struct platform_device *pdev)
 {
        if (pm_power_off == as3722_pm_power_off)
                pm_power_off = NULL;
        as3722_pm_poweroff = NULL;
-
-       return 0;
 }
 
 static struct platform_driver as3722_poweroff_driver = {
@@ -75,7 +73,7 @@ static struct platform_driver as3722_poweroff_driver = {
                .name = "as3722-power-off",
        },
        .probe = as3722_poweroff_probe,
-       .remove = as3722_poweroff_remove,
+       .remove_new = as3722_poweroff_remove,
 };
 
 module_platform_driver(as3722_poweroff_driver);
index dd5399785b6917a3d1e40a878411283eb2c0fce2..93eece0278652207b5fdfd597268f7ef1beaf867 100644 (file)
@@ -57,7 +57,7 @@ static struct shdwc {
        void __iomem *mpddrc_base;
 } at91_shdwc;
 
-static void __init at91_wakeup_status(struct platform_device *pdev)
+static void at91_wakeup_status(struct platform_device *pdev)
 {
        const char *reason;
        u32 reg = readl(at91_shdwc.shdwc_base + AT91_SHDW_SR);
@@ -149,7 +149,7 @@ static void at91_poweroff_dt_set_wakeup_mode(struct platform_device *pdev)
        writel(wakeup_mode | mode, at91_shdwc.shdwc_base + AT91_SHDW_MR);
 }
 
-static int __init at91_poweroff_probe(struct platform_device *pdev)
+static int at91_poweroff_probe(struct platform_device *pdev)
 {
        struct device_node *np;
        u32 ddr_type;
@@ -202,7 +202,7 @@ clk_disable:
        return ret;
 }
 
-static int __exit at91_poweroff_remove(struct platform_device *pdev)
+static void at91_poweroff_remove(struct platform_device *pdev)
 {
        if (pm_power_off == at91_poweroff)
                pm_power_off = NULL;
@@ -211,8 +211,6 @@ static int __exit at91_poweroff_remove(struct platform_device *pdev)
                iounmap(at91_shdwc.mpddrc_base);
 
        clk_disable_unprepare(at91_shdwc.sclk);
-
-       return 0;
 }
 
 static const struct of_device_id at91_poweroff_of_match[] = {
@@ -224,13 +222,14 @@ static const struct of_device_id at91_poweroff_of_match[] = {
 MODULE_DEVICE_TABLE(of, at91_poweroff_of_match);
 
 static struct platform_driver at91_poweroff_driver = {
-       .remove = __exit_p(at91_poweroff_remove),
+       .probe = at91_poweroff_probe,
+       .remove_new = at91_poweroff_remove,
        .driver = {
                .name = "at91-poweroff",
                .of_match_table = at91_poweroff_of_match,
        },
 };
-module_platform_driver_probe(at91_poweroff_driver, at91_poweroff_probe);
+module_platform_driver(at91_poweroff_driver);
 
 MODULE_AUTHOR("Atmel Corporation");
 MODULE_DESCRIPTION("Shutdown driver for Atmel SoCs");
index aa9b012d3d00b8489b4a82539aad03157987b328..16512654295f5c4007860d01859f42154177ec70 100644 (file)
@@ -337,7 +337,7 @@ static int at91_rcdev_init(struct at91_reset *reset,
        return devm_reset_controller_register(&pdev->dev, &reset->rcdev);
 }
 
-static int __init at91_reset_probe(struct platform_device *pdev)
+static int at91_reset_probe(struct platform_device *pdev)
 {
        const struct of_device_id *match;
        struct at91_reset *reset;
@@ -417,24 +417,23 @@ disable_clk:
        return ret;
 }
 
-static int __exit at91_reset_remove(struct platform_device *pdev)
+static void at91_reset_remove(struct platform_device *pdev)
 {
        struct at91_reset *reset = platform_get_drvdata(pdev);
 
        unregister_restart_handler(&reset->nb);
        clk_disable_unprepare(reset->sclk);
-
-       return 0;
 }
 
 static struct platform_driver at91_reset_driver = {
-       .remove = __exit_p(at91_reset_remove),
+       .probe = at91_reset_probe,
+       .remove_new = at91_reset_remove,
        .driver = {
                .name = "at91-reset",
                .of_match_table = at91_reset_of_match,
        },
 };
-module_platform_driver_probe(at91_reset_driver, at91_reset_probe);
+module_platform_driver(at91_reset_driver);
 
 MODULE_AUTHOR("Atmel Corporation");
 MODULE_DESCRIPTION("Reset driver for Atmel SoCs");
index e76b102b57b1fc9c3d5fc274f35520b55158269c..959ce0dbe91d112d006176bd36165cf208e4f810 100644 (file)
@@ -107,7 +107,7 @@ static const unsigned long long sdwc_dbc_period[] = {
        0, 3, 32, 512, 4096, 32768,
 };
 
-static void __init at91_wakeup_status(struct platform_device *pdev)
+static void at91_wakeup_status(struct platform_device *pdev)
 {
        struct shdwc *shdw = platform_get_drvdata(pdev);
        const struct reg_config *rcfg = shdw->rcfg;
@@ -329,7 +329,7 @@ static const struct of_device_id at91_pmc_ids[] = {
        { /* Sentinel. */ }
 };
 
-static int __init at91_shdwc_probe(struct platform_device *pdev)
+static int at91_shdwc_probe(struct platform_device *pdev)
 {
        const struct of_device_id *match;
        struct device_node *np;
@@ -421,7 +421,7 @@ clk_disable:
        return ret;
 }
 
-static int __exit at91_shdwc_remove(struct platform_device *pdev)
+static void at91_shdwc_remove(struct platform_device *pdev)
 {
        struct shdwc *shdw = platform_get_drvdata(pdev);
 
@@ -437,18 +437,17 @@ static int __exit at91_shdwc_remove(struct platform_device *pdev)
        iounmap(shdw->pmc_base);
 
        clk_disable_unprepare(shdw->sclk);
-
-       return 0;
 }
 
 static struct platform_driver at91_shdwc_driver = {
-       .remove = __exit_p(at91_shdwc_remove),
+       .probe = at91_shdwc_probe,
+       .remove_new = at91_shdwc_remove,
        .driver = {
                .name = "at91-shdwc",
                .of_match_table = at91_shdwc_of_match,
        },
 };
-module_platform_driver_probe(at91_shdwc_driver, at91_shdwc_probe);
+module_platform_driver(at91_shdwc_driver);
 
 MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
 MODULE_DESCRIPTION("Atmel shutdown controller driver");
index 98f20251a6d18d7cd590ef5d9bd02df5301e0800..b4aa50e9685e1fbb496901a4d75bd9bce64779cb 100644 (file)
@@ -233,7 +233,7 @@ static int atc260x_pwrc_probe(struct platform_device *pdev)
        return ret;
 }
 
-static int atc260x_pwrc_remove(struct platform_device *pdev)
+static void atc260x_pwrc_remove(struct platform_device *pdev)
 {
        struct atc260x_pwrc *priv = platform_get_drvdata(pdev);
 
@@ -243,13 +243,11 @@ static int atc260x_pwrc_remove(struct platform_device *pdev)
        }
 
        unregister_restart_handler(&priv->restart_nb);
-
-       return 0;
 }
 
 static struct platform_driver atc260x_pwrc_driver = {
        .probe = atc260x_pwrc_probe,
-       .remove = atc260x_pwrc_remove,
+       .remove_new = atc260x_pwrc_remove,
        .driver = {
                .name = "atc260x-pwrc",
        },
index 3aa19765772dce4bbe8b8a39dc27725cc8fd12b0..d1e177176fa1f157fae75f1804d5436f19a13c6d 100644 (file)
 
 struct gpio_restart {
        struct gpio_desc *reset_gpio;
-       struct notifier_block restart_handler;
        u32 active_delay_ms;
        u32 inactive_delay_ms;
        u32 wait_delay_ms;
 };
 
-static int gpio_restart_notify(struct notifier_block *this,
-                               unsigned long mode, void *cmd)
+static int gpio_restart_notify(struct sys_off_data *data)
 {
-       struct gpio_restart *gpio_restart =
-               container_of(this, struct gpio_restart, restart_handler);
+       struct gpio_restart *gpio_restart = data->cb_data;
 
        /* drive it active, also inactive->active edge */
        gpiod_direction_output(gpio_restart->reset_gpio, 1);
@@ -52,6 +49,7 @@ static int gpio_restart_probe(struct platform_device *pdev)
 {
        struct gpio_restart *gpio_restart;
        bool open_source = false;
+       int priority = 129;
        u32 property;
        int ret;
 
@@ -71,8 +69,6 @@ static int gpio_restart_probe(struct platform_device *pdev)
                return ret;
        }
 
-       gpio_restart->restart_handler.notifier_call = gpio_restart_notify;
-       gpio_restart->restart_handler.priority = 129;
        gpio_restart->active_delay_ms = 100;
        gpio_restart->inactive_delay_ms = 100;
        gpio_restart->wait_delay_ms = 3000;
@@ -83,7 +79,7 @@ static int gpio_restart_probe(struct platform_device *pdev)
                        dev_err(&pdev->dev, "Invalid priority property: %u\n",
                                        property);
                else
-                       gpio_restart->restart_handler.priority = property;
+                       priority = property;
        }
 
        of_property_read_u32(pdev->dev.of_node, "active-delay",
@@ -93,9 +89,11 @@ static int gpio_restart_probe(struct platform_device *pdev)
        of_property_read_u32(pdev->dev.of_node, "wait-delay",
                        &gpio_restart->wait_delay_ms);
 
-       platform_set_drvdata(pdev, gpio_restart);
-
-       ret = register_restart_handler(&gpio_restart->restart_handler);
+       ret = devm_register_sys_off_handler(&pdev->dev,
+                                           SYS_OFF_MODE_RESTART,
+                                           priority,
+                                           gpio_restart_notify,
+                                           gpio_restart);
        if (ret) {
                dev_err(&pdev->dev, "%s: cannot register restart handler, %d\n",
                                __func__, ret);
@@ -105,19 +103,6 @@ static int gpio_restart_probe(struct platform_device *pdev)
        return 0;
 }
 
-static void gpio_restart_remove(struct platform_device *pdev)
-{
-       struct gpio_restart *gpio_restart = platform_get_drvdata(pdev);
-       int ret;
-
-       ret = unregister_restart_handler(&gpio_restart->restart_handler);
-       if (ret) {
-               dev_err(&pdev->dev,
-                               "%s: cannot unregister restart handler, %d\n",
-                               __func__, ret);
-       }
-}
-
 static const struct of_device_id of_gpio_restart_match[] = {
        { .compatible = "gpio-restart", },
        {},
@@ -125,7 +110,6 @@ static const struct of_device_id of_gpio_restart_match[] = {
 
 static struct platform_driver gpio_restart_driver = {
        .probe = gpio_restart_probe,
-       .remove_new = gpio_restart_remove,
        .driver = {
                .name = "restart-gpio",
                .of_match_table = of_gpio_restart_match,
index eea05921a054b54e7543c8d549a08a79763fe912..fa25fbd5393433930845cc42de6bd91ea7cb8b81 100644 (file)
@@ -286,7 +286,7 @@ static int ltc2952_poweroff_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int ltc2952_poweroff_remove(struct platform_device *pdev)
+static void ltc2952_poweroff_remove(struct platform_device *pdev)
 {
        struct ltc2952_poweroff *data = platform_get_drvdata(pdev);
 
@@ -295,7 +295,6 @@ static int ltc2952_poweroff_remove(struct platform_device *pdev)
        hrtimer_cancel(&data->timer_wde);
        atomic_notifier_chain_unregister(&panic_notifier_list,
                                         &data->panic_notifier);
-       return 0;
 }
 
 static const struct of_device_id of_ltc2952_poweroff_match[] = {
@@ -306,7 +305,7 @@ MODULE_DEVICE_TABLE(of, of_ltc2952_poweroff_match);
 
 static struct platform_driver ltc2952_poweroff_driver = {
        .probe = ltc2952_poweroff_probe,
-       .remove = ltc2952_poweroff_remove,
+       .remove_new = ltc2952_poweroff_remove,
        .driver = {
                .name = "ltc2952-poweroff",
                .of_match_table = of_ltc2952_poweroff_match,
index 108167f7738bbca05b8d8443cb146314f44746c1..57a63c0ab7fb702e4c15aea74f3d5b36d21b57fd 100644 (file)
@@ -70,12 +70,10 @@ static int mt6323_pwrc_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int mt6323_pwrc_remove(struct platform_device *pdev)
+static void mt6323_pwrc_remove(struct platform_device *pdev)
 {
        if (pm_power_off == &mt6323_do_pwroff)
                pm_power_off = NULL;
-
-       return 0;
 }
 
 static const struct of_device_id mt6323_pwrc_dt_match[] = {
@@ -86,7 +84,7 @@ MODULE_DEVICE_TABLE(of, mt6323_pwrc_dt_match);
 
 static struct platform_driver mt6323_pwrc_driver = {
        .probe          = mt6323_pwrc_probe,
-       .remove         = mt6323_pwrc_remove,
+       .remove_new     = mt6323_pwrc_remove,
        .driver         = {
                .name   = "mt6323-pwrc",
                .of_match_table = mt6323_pwrc_dt_match,
index de35d24bb7ef3edcf22afbdf597ad3436cba0ae5..1775b318d0ef4187cd96031a5a83af3b1e94358a 100644 (file)
 #include <linux/types.h>
 
 struct pwr_mlxbf {
-       struct work_struct send_work;
+       struct work_struct reboot_work;
+       struct work_struct shutdown_work;
        const char *hid;
 };
 
-static void pwr_mlxbf_send_work(struct work_struct *work)
+static void pwr_mlxbf_reboot_work(struct work_struct *work)
+{
+       acpi_bus_generate_netlink_event("button/reboot.*", "Reboot Button", 0x80, 1);
+}
+
+static void pwr_mlxbf_shutdown_work(struct work_struct *work)
 {
        acpi_bus_generate_netlink_event("button/power.*", "Power Button", 0x80, 1);
 }
@@ -33,10 +39,10 @@ static irqreturn_t pwr_mlxbf_irq(int irq, void *ptr)
        struct pwr_mlxbf *priv = ptr;
 
        if (!strncmp(priv->hid, rst_pwr_hid, 8))
-               emergency_restart();
+               schedule_work(&priv->reboot_work);
 
        if (!strncmp(priv->hid, low_pwr_hid, 8))
-               schedule_work(&priv->send_work);
+               schedule_work(&priv->shutdown_work);
 
        return IRQ_HANDLED;
 }
@@ -64,7 +70,11 @@ static int pwr_mlxbf_probe(struct platform_device *pdev)
        if (irq < 0)
                return dev_err_probe(dev, irq, "Error getting %s irq.\n", priv->hid);
 
-       err = devm_work_autocancel(dev, &priv->send_work, pwr_mlxbf_send_work);
+       err = devm_work_autocancel(dev, &priv->shutdown_work, pwr_mlxbf_shutdown_work);
+       if (err)
+               return err;
+
+       err = devm_work_autocancel(dev, &priv->reboot_work, pwr_mlxbf_reboot_work);
        if (err)
                return err;
 
index 0ddf7f25f7b8749cf92c800498efd37095c97553..e0f2ff6b147c19a932d600513f652b42df1b03fd 100644 (file)
@@ -111,15 +111,14 @@ static int qnap_power_off_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int qnap_power_off_remove(struct platform_device *pdev)
+static void qnap_power_off_remove(struct platform_device *pdev)
 {
        pm_power_off = NULL;
-       return 0;
 }
 
 static struct platform_driver qnap_power_off_driver = {
        .probe  = qnap_power_off_probe,
-       .remove = qnap_power_off_remove,
+       .remove_new = qnap_power_off_remove,
        .driver = {
                .name   = "qnap_power_off",
                .of_match_table = of_match_ptr(qnap_power_off_of_match_table),
index 7f87fbb8b051e23cc17f107efc405302223cc341..15160809c423a5d4e67fa07cea9f9b1c20cdd06f 100644 (file)
@@ -52,12 +52,10 @@ static int regulator_poweroff_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int regulator_poweroff_remove(__maybe_unused struct platform_device *pdev)
+static void regulator_poweroff_remove(struct platform_device *pdev)
 {
        if (pm_power_off == &regulator_poweroff_do_poweroff)
                pm_power_off = NULL;
-
-       return 0;
 }
 
 static const struct of_device_id of_regulator_poweroff_match[] = {
@@ -68,7 +66,7 @@ MODULE_DEVICE_TABLE(of, of_regulator_poweroff_match);
 
 static struct platform_driver regulator_poweroff_driver = {
        .probe = regulator_poweroff_probe,
-       .remove = regulator_poweroff_remove,
+       .remove_new = regulator_poweroff_remove,
        .driver = {
                .name = "poweroff-regulator",
                .of_match_table = of_regulator_poweroff_match,
index 28f1822db162610c2b7c7a5dc4a907a31af43224..f4d6004793d3aa0cd5af28fa05e2de5df234c812 100644 (file)
@@ -33,12 +33,10 @@ static int restart_poweroff_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int restart_poweroff_remove(struct platform_device *pdev)
+static void restart_poweroff_remove(struct platform_device *pdev)
 {
        if (pm_power_off == &restart_poweroff_do_poweroff)
                pm_power_off = NULL;
-
-       return 0;
 }
 
 static const struct of_device_id of_restart_poweroff_match[] = {
@@ -49,7 +47,7 @@ MODULE_DEVICE_TABLE(of, of_restart_poweroff_match);
 
 static struct platform_driver restart_poweroff_driver = {
        .probe = restart_poweroff_probe,
-       .remove = restart_poweroff_remove,
+       .remove_new = restart_poweroff_remove,
        .driver = {
                .name = "poweroff-restart",
                .of_match_table = of_restart_poweroff_match,
index bd3b396558e0df8c469ddc18869620289885665e..5df9b41c68c79cc93f9f1a28dc4e3ec85522b992 100644 (file)
@@ -59,11 +59,10 @@ fail_unmap:
        return error;
 }
 
-static int rmobile_reset_remove(struct platform_device *pdev)
+static void rmobile_reset_remove(struct platform_device *pdev)
 {
        unregister_restart_handler(&rmobile_reset_nb);
        iounmap(sysc_base2);
-       return 0;
 }
 
 static const struct of_device_id rmobile_reset_of_match[] = {
@@ -74,7 +73,7 @@ MODULE_DEVICE_TABLE(of, rmobile_reset_of_match);
 
 static struct platform_driver rmobile_reset_driver = {
        .probe = rmobile_reset_probe,
-       .remove = rmobile_reset_remove,
+       .remove_new = rmobile_reset_remove,
        .driver = {
                .name = "rmobile_reset",
                .of_match_table = rmobile_reset_of_match,
index c3aab7f59345a502a31713b54682dbf960dec10f..1b2ce7734260c7170803371449b94e9e53ce7677 100644 (file)
@@ -76,12 +76,10 @@ static int syscon_poweroff_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int syscon_poweroff_remove(struct platform_device *pdev)
+static void syscon_poweroff_remove(struct platform_device *pdev)
 {
        if (pm_power_off == syscon_poweroff)
                pm_power_off = NULL;
-
-       return 0;
 }
 
 static const struct of_device_id syscon_poweroff_of_match[] = {
@@ -91,7 +89,7 @@ static const struct of_device_id syscon_poweroff_of_match[] = {
 
 static struct platform_driver syscon_poweroff_driver = {
        .probe = syscon_poweroff_probe,
-       .remove = syscon_poweroff_remove,
+       .remove_new = syscon_poweroff_remove,
        .driver = {
                .name = "syscon-poweroff",
                .of_match_table = syscon_poweroff_of_match,
index 5ec819eac7da4d1b6535308e6551fa72b3dfe1b7..ee8e9f4b837eaee09f2224c6cda81cfe08f296f8 100644 (file)
@@ -62,19 +62,21 @@ static int tps65086_restart_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int tps65086_restart_remove(struct platform_device *pdev)
+static void tps65086_restart_remove(struct platform_device *pdev)
 {
        struct tps65086_restart *tps65086_restart = platform_get_drvdata(pdev);
        int ret;
 
        ret = unregister_restart_handler(&tps65086_restart->handler);
        if (ret) {
+               /*
+                * tps65086_restart_probe() registered the restart handler. So
+                * unregistering should work fine. Checking the error code
+                * shouldn't be needed, still doing it for completeness.
+                */
                dev_err(&pdev->dev, "%s: cannot unregister restart handler: %d\n",
                        __func__, ret);
-               return -ENODEV;
        }
-
-       return 0;
 }
 
 static const struct platform_device_id tps65086_restart_id_table[] = {
@@ -88,7 +90,7 @@ static struct platform_driver tps65086_restart_driver = {
                .name = "tps65086-restart",
        },
        .probe = tps65086_restart_probe,
-       .remove = tps65086_restart_remove,
+       .remove_new = tps65086_restart_remove,
        .id_table = tps65086_restart_id_table,
 };
 module_platform_driver(tps65086_restart_driver);
index 1db290ee2591adef9e89437eec0dde519e958675..2b393eb5c2820e18d6244fad53efc6ef689613de 100644 (file)
 #define BQ24190_REG_POC_WDT_RESET_SHIFT                6
 #define BQ24190_REG_POC_CHG_CONFIG_MASK                (BIT(5) | BIT(4))
 #define BQ24190_REG_POC_CHG_CONFIG_SHIFT       4
-#define BQ24190_REG_POC_CHG_CONFIG_DISABLE             0x0
-#define BQ24190_REG_POC_CHG_CONFIG_CHARGE              0x1
-#define BQ24190_REG_POC_CHG_CONFIG_OTG                 0x2
-#define BQ24190_REG_POC_CHG_CONFIG_OTG_ALT             0x3
+#define BQ24190_REG_POC_CHG_CONFIG_DISABLE     0x0
+#define BQ24190_REG_POC_CHG_CONFIG_CHARGE      0x1
+#define BQ24190_REG_POC_CHG_CONFIG_OTG         0x2
+#define BQ24190_REG_POC_CHG_CONFIG_OTG_ALT     0x3
+#define BQ24296_REG_POC_OTG_CONFIG_MASK                BIT(5)
+#define BQ24296_REG_POC_OTG_CONFIG_SHIFT       5
+#define BQ24296_REG_POC_CHG_CONFIG_MASK                BIT(4)
+#define BQ24296_REG_POC_CHG_CONFIG_SHIFT       4
+#define BQ24296_REG_POC_OTG_CONFIG_DISABLE     0x0
+#define BQ24296_REG_POC_OTG_CONFIG_OTG         0x1
 #define BQ24190_REG_POC_SYS_MIN_MASK           (BIT(3) | BIT(2) | BIT(1))
 #define BQ24190_REG_POC_SYS_MIN_SHIFT          1
 #define BQ24190_REG_POC_SYS_MIN_MIN                    3000
 #define BQ24190_REG_F_BAT_FAULT_SHIFT          3
 #define BQ24190_REG_F_NTC_FAULT_MASK           (BIT(2) | BIT(1) | BIT(0))
 #define BQ24190_REG_F_NTC_FAULT_SHIFT          0
+#define BQ24296_REG_F_NTC_FAULT_MASK           (BIT(1) | BIT(0))
+#define BQ24296_REG_F_NTC_FAULT_SHIFT          0
 
 #define BQ24190_REG_VPRS       0x0A /* Vendor/Part/Revision Status */
 #define BQ24190_REG_VPRS_PN_MASK               (BIT(5) | BIT(4) | BIT(3))
 #define BQ24190_REG_VPRS_PN_SHIFT              3
-#define BQ24190_REG_VPRS_PN_24190                      0x4
-#define BQ24190_REG_VPRS_PN_24192                      0x5 /* Also 24193, 24196 */
-#define BQ24190_REG_VPRS_PN_24192I                     0x3
+#define BQ24190_REG_VPRS_PN_24190              0x4
+#define BQ24190_REG_VPRS_PN_24192              0x5 /* Also 24193, 24196 */
+#define BQ24190_REG_VPRS_PN_24192I             0x3
+#define BQ24296_REG_VPRS_PN_MASK               (BIT(7) | BIT(6) | BIT(5))
+#define BQ24296_REG_VPRS_PN_SHIFT              5
+#define BQ24296_REG_VPRS_PN_24296              0x1
 #define BQ24190_REG_VPRS_TS_PROFILE_MASK       BIT(2)
 #define BQ24190_REG_VPRS_TS_PROFILE_SHIFT      2
 #define BQ24190_REG_VPRS_DEV_REG_MASK          (BIT(1) | BIT(0))
 #define BQ24190_REG_VPRS_DEV_REG_SHIFT         0
 
-/*
- * The FAULT register is latched by the bq24190 (except for NTC_FAULT)
- * so the first read after a fault returns the latched value and subsequent
- * reads return the current value.  In order to return the fault status
- * to the user, have the interrupt handler save the reg's value and retrieve
- * it in the appropriate health/status routine.
- */
-struct bq24190_dev_info {
-       struct i2c_client               *client;
-       struct device                   *dev;
-       struct extcon_dev               *edev;
-       struct power_supply             *charger;
-       struct power_supply             *battery;
-       struct delayed_work             input_current_limit_work;
-       char                            model_name[I2C_NAME_SIZE];
-       bool                            initialized;
-       bool                            irq_event;
-       bool                            otg_vbus_enabled;
-       int                             charge_type;
-       u16                             sys_min;
-       u16                             iprechg;
-       u16                             iterm;
-       u32                             ichg;
-       u32                             ichg_max;
-       u32                             vreg;
-       u32                             vreg_max;
-       struct mutex                    f_reg_lock;
-       u8                              f_reg;
-       u8                              ss_reg;
-       u8                              watchdog;
-};
-
-static int bq24190_charger_set_charge_type(struct bq24190_dev_info *bdi,
-                                          const union power_supply_propval *val);
-
-static const unsigned int bq24190_usb_extcon_cable[] = {
-       EXTCON_USB,
-       EXTCON_NONE,
-};
-
 /*
  * The tables below provide a 2-way mapping for the value that goes in
  * the register field and the real-world value that it represents.
@@ -211,6 +182,9 @@ static const int bq24190_ccc_ichg_values[] = {
        4096000, 4160000, 4224000, 4288000, 4352000, 4416000, 4480000, 4544000
 };
 
+/* ICHG higher than 3008mA is not supported in BQ24296 */
+#define BQ24296_CCC_ICHG_VALUES_LEN    40
+
 /* REG04[7:2] (VREG) in uV */
 static const int bq24190_cvc_vreg_values[] = {
        3504000, 3520000, 3536000, 3552000, 3568000, 3584000, 3600000, 3616000,
@@ -228,6 +202,68 @@ static const int bq24190_ictrc_treg_values[] = {
        600, 800, 1000, 1200
 };
 
+enum bq24190_chip {
+       BQ24190,
+       BQ24192,
+       BQ24192i,
+       BQ24196,
+       BQ24296,
+};
+
+/*
+ * The FAULT register is latched by the bq24190 (except for NTC_FAULT)
+ * so the first read after a fault returns the latched value and subsequent
+ * reads return the current value.  In order to return the fault status
+ * to the user, have the interrupt handler save the reg's value and retrieve
+ * it in the appropriate health/status routine.
+ */
+struct bq24190_dev_info {
+       struct i2c_client               *client;
+       struct device                   *dev;
+       struct extcon_dev               *edev;
+       struct power_supply             *charger;
+       struct power_supply             *battery;
+       struct delayed_work             input_current_limit_work;
+       char                            model_name[I2C_NAME_SIZE];
+       bool                            initialized;
+       bool                            irq_event;
+       bool                            otg_vbus_enabled;
+       int                             charge_type;
+       u16                             sys_min;
+       u16                             iprechg;
+       u16                             iterm;
+       u32                             ichg;
+       u32                             ichg_max;
+       u32                             vreg;
+       u32                             vreg_max;
+       struct mutex                    f_reg_lock;
+       u8                              f_reg;
+       u8                              ss_reg;
+       u8                              watchdog;
+       const struct bq24190_chip_info  *info;
+};
+
+struct bq24190_chip_info {
+       int ichg_array_size;
+#ifdef CONFIG_REGULATOR
+       const struct regulator_desc *vbus_desc;
+#endif
+       int (*check_chip)(struct bq24190_dev_info *bdi);
+       int (*set_chg_config)(struct bq24190_dev_info *bdi, const u8 chg_config);
+       int (*set_otg_vbus)(struct bq24190_dev_info *bdi, bool enable);
+       u8 ntc_fault_mask;
+       int (*get_ntc_status)(const u8 value);
+};
+
+static int bq24190_charger_set_charge_type(struct bq24190_dev_info *bdi,
+                                          const union power_supply_propval *val);
+
+static const unsigned int bq24190_usb_extcon_cable[] = {
+       EXTCON_USB,
+       EXTCON_NONE,
+};
+
+
 /*
  * Return the index in 'tbl' of greatest value that is less than or equal to
  * 'val'.  The index range returned is 0 to 'tbl_size' - 1.  Assumes that
@@ -529,6 +565,43 @@ static int bq24190_set_otg_vbus(struct bq24190_dev_info *bdi, bool enable)
        return ret;
 }
 
+static int bq24296_set_otg_vbus(struct bq24190_dev_info *bdi, bool enable)
+{
+       int ret;
+
+       ret = pm_runtime_resume_and_get(bdi->dev);
+       if (ret < 0) {
+               dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", ret);
+               return ret;
+       }
+
+       bdi->otg_vbus_enabled = enable;
+       if (enable) {
+               ret = bq24190_write_mask(bdi, BQ24190_REG_POC,
+                                        BQ24296_REG_POC_CHG_CONFIG_MASK,
+                                        BQ24296_REG_POC_CHG_CONFIG_SHIFT,
+                                        BQ24190_REG_POC_CHG_CONFIG_DISABLE);
+
+               if (ret < 0)
+                       goto out;
+
+               ret = bq24190_write_mask(bdi, BQ24190_REG_POC,
+                                        BQ24296_REG_POC_OTG_CONFIG_MASK,
+                                        BQ24296_REG_POC_CHG_CONFIG_SHIFT,
+                                        BQ24296_REG_POC_OTG_CONFIG_OTG);
+       } else
+               ret = bq24190_write_mask(bdi, BQ24190_REG_POC,
+                                        BQ24296_REG_POC_OTG_CONFIG_MASK,
+                                        BQ24296_REG_POC_CHG_CONFIG_SHIFT,
+                                        BQ24296_REG_POC_OTG_CONFIG_DISABLE);
+
+out:
+       pm_runtime_mark_last_busy(bdi->dev);
+       pm_runtime_put_autosuspend(bdi->dev);
+
+       return ret;
+}
+
 #ifdef CONFIG_REGULATOR
 static int bq24190_vbus_enable(struct regulator_dev *dev)
 {
@@ -567,6 +640,43 @@ static int bq24190_vbus_is_enabled(struct regulator_dev *dev)
        return bdi->otg_vbus_enabled;
 }
 
+static int bq24296_vbus_enable(struct regulator_dev *dev)
+{
+       return bq24296_set_otg_vbus(rdev_get_drvdata(dev), true);
+}
+
+static int bq24296_vbus_disable(struct regulator_dev *dev)
+{
+       return bq24296_set_otg_vbus(rdev_get_drvdata(dev), false);
+}
+
+static int bq24296_vbus_is_enabled(struct regulator_dev *dev)
+{
+       struct bq24190_dev_info *bdi = rdev_get_drvdata(dev);
+       int ret;
+       u8 val;
+
+       ret = pm_runtime_resume_and_get(bdi->dev);
+       if (ret < 0) {
+               dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", ret);
+               return ret;
+       }
+
+       ret = bq24190_read_mask(bdi, BQ24190_REG_POC,
+                               BQ24296_REG_POC_OTG_CONFIG_MASK,
+                               BQ24296_REG_POC_OTG_CONFIG_SHIFT, &val);
+
+       pm_runtime_mark_last_busy(bdi->dev);
+       pm_runtime_put_autosuspend(bdi->dev);
+
+       if (ret)
+               return ret;
+
+       bdi->otg_vbus_enabled = (val == BQ24296_REG_POC_OTG_CONFIG_OTG);
+
+       return bdi->otg_vbus_enabled;
+}
+
 static const struct regulator_ops bq24190_vbus_ops = {
        .enable = bq24190_vbus_enable,
        .disable = bq24190_vbus_disable,
@@ -583,6 +693,22 @@ static const struct regulator_desc bq24190_vbus_desc = {
        .n_voltages = 1,
 };
 
+static const struct regulator_ops bq24296_vbus_ops = {
+       .enable = bq24296_vbus_enable,
+       .disable = bq24296_vbus_disable,
+       .is_enabled = bq24296_vbus_is_enabled,
+};
+
+static const struct regulator_desc bq24296_vbus_desc = {
+       .name = "usb_otg_vbus",
+       .of_match = "usb-otg-vbus",
+       .type = REGULATOR_VOLTAGE,
+       .owner = THIS_MODULE,
+       .ops = &bq24296_vbus_ops,
+       .fixed_uV = 5000000,
+       .n_voltages = 1,
+};
+
 static const struct regulator_init_data bq24190_vbus_init_data = {
        .constraints = {
                .valid_ops_mask = REGULATOR_CHANGE_STATUS,
@@ -602,7 +728,7 @@ static int bq24190_register_vbus_regulator(struct bq24190_dev_info *bdi)
        else
                cfg.init_data = &bq24190_vbus_init_data;
        cfg.driver_data = bdi;
-       reg = devm_regulator_register(bdi->dev, &bq24190_vbus_desc, &cfg);
+       reg = devm_regulator_register(bdi->dev, bdi->info->vbus_desc, &cfg);
        if (IS_ERR(reg)) {
                ret = PTR_ERR(reg);
                dev_err(bdi->dev, "Can't register regulator: %d\n", ret);
@@ -678,7 +804,7 @@ static int bq24190_set_config(struct bq24190_dev_info *bdi)
                                            BQ24190_REG_CCC_ICHG_MASK,
                                            BQ24190_REG_CCC_ICHG_SHIFT,
                                            bq24190_ccc_ichg_values,
-                                           ARRAY_SIZE(bq24190_ccc_ichg_values),
+                                           bdi->info->ichg_array_size,
                                            bdi->ichg);
                if (ret < 0)
                        return ret;
@@ -777,6 +903,24 @@ static int bq24190_charger_get_charge_type(struct bq24190_dev_info *bdi,
        return 0;
 }
 
+static int bq24190_battery_set_chg_config(struct bq24190_dev_info *bdi,
+               const u8 chg_config)
+{
+       return bq24190_write_mask(bdi, BQ24190_REG_POC,
+                       BQ24190_REG_POC_CHG_CONFIG_MASK,
+                       BQ24190_REG_POC_CHG_CONFIG_SHIFT,
+                       chg_config);
+}
+
+static int bq24296_battery_set_chg_config(struct bq24190_dev_info *bdi,
+               const u8 chg_config)
+{
+       return bq24190_write_mask(bdi, BQ24190_REG_POC,
+                       BQ24296_REG_POC_CHG_CONFIG_MASK,
+                       BQ24296_REG_POC_CHG_CONFIG_SHIFT,
+                       chg_config);
+}
+
 static int bq24190_charger_set_charge_type(struct bq24190_dev_info *bdi,
                const union power_supply_propval *val)
 {
@@ -835,9 +979,50 @@ static int bq24190_charger_set_charge_type(struct bq24190_dev_info *bdi,
                        return ret;
        }
 
-       return bq24190_write_mask(bdi, BQ24190_REG_POC,
-                       BQ24190_REG_POC_CHG_CONFIG_MASK,
-                       BQ24190_REG_POC_CHG_CONFIG_SHIFT, chg_config);
+       return bdi->info->set_chg_config(bdi, chg_config);
+}
+
+static int bq24190_charger_get_ntc_status(u8 value)
+{
+       int health;
+
+       switch (value >> BQ24190_REG_F_NTC_FAULT_SHIFT & 0x7) {
+       case 0x1: /* TS1  Cold */
+       case 0x3: /* TS2  Cold */
+       case 0x5: /* Both Cold */
+               health = POWER_SUPPLY_HEALTH_COLD;
+               break;
+       case 0x2: /* TS1  Hot */
+       case 0x4: /* TS2  Hot */
+       case 0x6: /* Both Hot */
+               health = POWER_SUPPLY_HEALTH_OVERHEAT;
+               break;
+       default:
+               health = POWER_SUPPLY_HEALTH_UNKNOWN;
+       }
+
+       return health;
+}
+
+static int bq24296_charger_get_ntc_status(u8 value)
+{
+       int health;
+
+       switch (value >> BQ24296_REG_F_NTC_FAULT_SHIFT & 0x3) {
+       case 0x0: /* Normal */
+               health = POWER_SUPPLY_HEALTH_GOOD;
+               break;
+       case 0x1: /* Hot */
+               health = POWER_SUPPLY_HEALTH_OVERHEAT;
+               break;
+       case 0x2: /* Cold */
+               health = POWER_SUPPLY_HEALTH_COLD;
+               break;
+       default:
+               health = POWER_SUPPLY_HEALTH_UNKNOWN;
+       }
+
+       return health;
 }
 
 static int bq24190_charger_get_health(struct bq24190_dev_info *bdi,
@@ -850,21 +1035,8 @@ static int bq24190_charger_get_health(struct bq24190_dev_info *bdi,
        v = bdi->f_reg;
        mutex_unlock(&bdi->f_reg_lock);
 
-       if (v & BQ24190_REG_F_NTC_FAULT_MASK) {
-               switch (v >> BQ24190_REG_F_NTC_FAULT_SHIFT & 0x7) {
-               case 0x1: /* TS1  Cold */
-               case 0x3: /* TS2  Cold */
-               case 0x5: /* Both Cold */
-                       health = POWER_SUPPLY_HEALTH_COLD;
-                       break;
-               case 0x2: /* TS1  Hot */
-               case 0x4: /* TS2  Hot */
-               case 0x6: /* Both Hot */
-                       health = POWER_SUPPLY_HEALTH_OVERHEAT;
-                       break;
-               default:
-                       health = POWER_SUPPLY_HEALTH_UNKNOWN;
-               }
+       if (v & bdi->info->ntc_fault_mask) {
+               health = bdi->info->get_ntc_status(v);
        } else if (v & BQ24190_REG_F_BAT_FAULT_MASK) {
                health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
        } else if (v & BQ24190_REG_F_CHRG_FAULT_MASK) {
@@ -1015,7 +1187,7 @@ static int bq24190_charger_get_current(struct bq24190_dev_info *bdi,
        ret = bq24190_get_field_val(bdi, BQ24190_REG_CCC,
                        BQ24190_REG_CCC_ICHG_MASK, BQ24190_REG_CCC_ICHG_SHIFT,
                        bq24190_ccc_ichg_values,
-                       ARRAY_SIZE(bq24190_ccc_ichg_values), &curr);
+                       bdi->info->ichg_array_size, &curr);
        if (ret < 0)
                return ret;
 
@@ -1055,7 +1227,7 @@ static int bq24190_charger_set_current(struct bq24190_dev_info *bdi,
        ret = bq24190_set_field_val(bdi, BQ24190_REG_CCC,
                        BQ24190_REG_CCC_ICHG_MASK, BQ24190_REG_CCC_ICHG_SHIFT,
                        bq24190_ccc_ichg_values,
-                       ARRAY_SIZE(bq24190_ccc_ichg_values), curr);
+                       bdi->info->ichg_array_size, curr);
        if (ret < 0)
                return ret;
 
@@ -1395,26 +1567,9 @@ static int bq24190_battery_get_health(struct bq24190_dev_info *bdi,
        if (v & BQ24190_REG_F_BAT_FAULT_MASK) {
                health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
        } else {
-               v &= BQ24190_REG_F_NTC_FAULT_MASK;
-               v >>= BQ24190_REG_F_NTC_FAULT_SHIFT;
+               v &= bdi->info->ntc_fault_mask;
 
-               switch (v) {
-               case 0x0: /* Normal */
-                       health = POWER_SUPPLY_HEALTH_GOOD;
-                       break;
-               case 0x1: /* TS1 Cold */
-               case 0x3: /* TS2 Cold */
-               case 0x5: /* Both Cold */
-                       health = POWER_SUPPLY_HEALTH_COLD;
-                       break;
-               case 0x2: /* TS1 Hot */
-               case 0x4: /* TS2 Hot */
-               case 0x6: /* Both Hot */
-                       health = POWER_SUPPLY_HEALTH_OVERHEAT;
-                       break;
-               default:
-                       health = POWER_SUPPLY_HEALTH_UNKNOWN;
-               }
+               health = v ? bdi->info->get_ntc_status(v) : POWER_SUPPLY_HEALTH_GOOD;
        }
 
        val->intval = health;
@@ -1601,12 +1756,13 @@ static int bq24190_configure_usb_otg(struct bq24190_dev_info *bdi, u8 ss_reg)
 static void bq24190_check_status(struct bq24190_dev_info *bdi)
 {
        const u8 battery_mask_ss = BQ24190_REG_SS_CHRG_STAT_MASK;
-       const u8 battery_mask_f = BQ24190_REG_F_BAT_FAULT_MASK
-                               | BQ24190_REG_F_NTC_FAULT_MASK;
+       u8 battery_mask_f = BQ24190_REG_F_BAT_FAULT_MASK;
        bool alert_charger = false, alert_battery = false;
        u8 ss_reg = 0, f_reg = 0;
        int i, ret;
 
+       battery_mask_f |= bdi->info->ntc_fault_mask;
+
        ret = bq24190_read(bdi, BQ24190_REG_SS, &ss_reg);
        if (ret < 0) {
                dev_err(bdi->dev, "Can't read SS reg: %d\n", ret);
@@ -1633,7 +1789,7 @@ static void bq24190_check_status(struct bq24190_dev_info *bdi)
                        !!(f_reg & BQ24190_REG_F_BOOST_FAULT_MASK),
                        !!(f_reg & BQ24190_REG_F_CHRG_FAULT_MASK),
                        !!(f_reg & BQ24190_REG_F_BAT_FAULT_MASK),
-                       !!(f_reg & BQ24190_REG_F_NTC_FAULT_MASK));
+                       !!(f_reg & bdi->info->ntc_fault_mask));
 
                mutex_lock(&bdi->f_reg_lock);
                if ((bdi->f_reg & battery_mask_f) != (f_reg & battery_mask_f))
@@ -1696,12 +1852,11 @@ static irqreturn_t bq24190_irq_handler_thread(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-static int bq24190_hw_init(struct bq24190_dev_info *bdi)
+static int bq24190_check_chip(struct bq24190_dev_info *bdi)
 {
        u8 v;
        int ret;
 
-       /* First check that the device really is what its supposed to be */
        ret = bq24190_read_mask(bdi, BQ24190_REG_VPRS,
                        BQ24190_REG_VPRS_PN_MASK,
                        BQ24190_REG_VPRS_PN_SHIFT,
@@ -1719,6 +1874,40 @@ static int bq24190_hw_init(struct bq24190_dev_info *bdi)
                return -ENODEV;
        }
 
+       return 0;
+}
+
+static int bq24296_check_chip(struct bq24190_dev_info *bdi)
+{
+       u8 v;
+       int ret;
+
+       ret = bq24190_read_mask(bdi, BQ24190_REG_VPRS,
+                       BQ24296_REG_VPRS_PN_MASK,
+                       BQ24296_REG_VPRS_PN_SHIFT,
+                       &v);
+       if (ret < 0)
+               return ret;
+
+       switch (v) {
+       case BQ24296_REG_VPRS_PN_24296:
+               break;
+       default:
+               dev_err(bdi->dev, "Error unknown model: 0x%02x\n", v);
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+static int bq24190_hw_init(struct bq24190_dev_info *bdi)
+{
+       int ret;
+
+       ret = bdi->info->check_chip(bdi);
+       if (ret < 0)
+               return ret;
+
        ret = bq24190_register_reset(bdi);
        if (ret < 0)
                return ret;
@@ -1736,7 +1925,8 @@ static int bq24190_get_config(struct bq24190_dev_info *bdi)
        struct power_supply_battery_info *info;
        int v, idx;
 
-       idx = ARRAY_SIZE(bq24190_ccc_ichg_values) - 1;
+       idx = bdi->info->ichg_array_size - 1;
+
        bdi->ichg_max = bq24190_ccc_ichg_values[idx];
 
        idx = ARRAY_SIZE(bq24190_cvc_vreg_values) - 1;
@@ -1781,6 +1971,64 @@ static int bq24190_get_config(struct bq24190_dev_info *bdi)
        return 0;
 }
 
+static const struct bq24190_chip_info bq24190_chip_info_tbl[] = {
+       [BQ24190] = {
+               .ichg_array_size = ARRAY_SIZE(bq24190_ccc_ichg_values),
+#ifdef CONFIG_REGULATOR
+               .vbus_desc = &bq24190_vbus_desc,
+#endif
+               .check_chip = bq24190_check_chip,
+               .set_chg_config = bq24190_battery_set_chg_config,
+               .ntc_fault_mask = BQ24190_REG_F_NTC_FAULT_MASK,
+               .get_ntc_status = bq24190_charger_get_ntc_status,
+               .set_otg_vbus = bq24190_set_otg_vbus,
+       },
+       [BQ24192] = {
+               .ichg_array_size = ARRAY_SIZE(bq24190_ccc_ichg_values),
+#ifdef CONFIG_REGULATOR
+               .vbus_desc = &bq24190_vbus_desc,
+#endif
+               .check_chip = bq24190_check_chip,
+               .set_chg_config = bq24190_battery_set_chg_config,
+               .ntc_fault_mask = BQ24190_REG_F_NTC_FAULT_MASK,
+               .get_ntc_status = bq24190_charger_get_ntc_status,
+               .set_otg_vbus = bq24190_set_otg_vbus,
+       },
+       [BQ24192i] = {
+               .ichg_array_size = ARRAY_SIZE(bq24190_ccc_ichg_values),
+#ifdef CONFIG_REGULATOR
+               .vbus_desc = &bq24190_vbus_desc,
+#endif
+               .check_chip = bq24190_check_chip,
+               .set_chg_config = bq24190_battery_set_chg_config,
+               .ntc_fault_mask = BQ24190_REG_F_NTC_FAULT_MASK,
+               .get_ntc_status = bq24190_charger_get_ntc_status,
+               .set_otg_vbus = bq24190_set_otg_vbus,
+       },
+       [BQ24196] = {
+               .ichg_array_size = ARRAY_SIZE(bq24190_ccc_ichg_values),
+#ifdef CONFIG_REGULATOR
+               .vbus_desc = &bq24190_vbus_desc,
+#endif
+               .check_chip = bq24190_check_chip,
+               .set_chg_config = bq24190_battery_set_chg_config,
+               .ntc_fault_mask = BQ24190_REG_F_NTC_FAULT_MASK,
+               .get_ntc_status = bq24190_charger_get_ntc_status,
+               .set_otg_vbus = bq24190_set_otg_vbus,
+       },
+       [BQ24296] = {
+               .ichg_array_size = BQ24296_CCC_ICHG_VALUES_LEN,
+#ifdef CONFIG_REGULATOR
+               .vbus_desc = &bq24296_vbus_desc,
+#endif
+               .check_chip = bq24296_check_chip,
+               .set_chg_config = bq24296_battery_set_chg_config,
+               .ntc_fault_mask = BQ24296_REG_F_NTC_FAULT_MASK,
+               .get_ntc_status = bq24296_charger_get_ntc_status,
+               .set_otg_vbus = bq24296_set_otg_vbus,
+       },
+};
+
 static int bq24190_probe(struct i2c_client *client)
 {
        const struct i2c_device_id *id = i2c_client_get_device_id(client);
@@ -1804,6 +2052,7 @@ static int bq24190_probe(struct i2c_client *client)
        bdi->client = client;
        bdi->dev = dev;
        strscpy(bdi->model_name, id->name, sizeof(bdi->model_name));
+       bdi->info = i2c_get_match_data(client);
        mutex_init(&bdi->f_reg_lock);
        bdi->charge_type = POWER_SUPPLY_CHARGE_TYPE_FAST;
        bdi->f_reg = 0;
@@ -1940,7 +2189,7 @@ static void bq24190_shutdown(struct i2c_client *client)
        struct bq24190_dev_info *bdi = i2c_get_clientdata(client);
 
        /* Turn off 5V boost regulator on shutdown */
-       bq24190_set_otg_vbus(bdi, false);
+       bdi->info->set_otg_vbus(bdi, false);
 }
 
 static __maybe_unused int bq24190_runtime_suspend(struct device *dev)
@@ -2029,19 +2278,21 @@ static const struct dev_pm_ops bq24190_pm_ops = {
 };
 
 static const struct i2c_device_id bq24190_i2c_ids[] = {
-       { "bq24190" },
-       { "bq24192" },
-       { "bq24192i" },
-       { "bq24196" },
+       { "bq24190", (kernel_ulong_t)&bq24190_chip_info_tbl[BQ24190] },
+       { "bq24192", (kernel_ulong_t)&bq24190_chip_info_tbl[BQ24192] },
+       { "bq24192i", (kernel_ulong_t)&bq24190_chip_info_tbl[BQ24192i] },
+       { "bq24196", (kernel_ulong_t)&bq24190_chip_info_tbl[BQ24196] },
+       { "bq24296", (kernel_ulong_t)&bq24190_chip_info_tbl[BQ24296] },
        { },
 };
 MODULE_DEVICE_TABLE(i2c, bq24190_i2c_ids);
 
 static const struct of_device_id bq24190_of_match[] = {
-       { .compatible = "ti,bq24190", },
-       { .compatible = "ti,bq24192", },
-       { .compatible = "ti,bq24192i", },
-       { .compatible = "ti,bq24196", },
+       { .compatible = "ti,bq24190", .data = &bq24190_chip_info_tbl[BQ24190] },
+       { .compatible = "ti,bq24192", .data = &bq24190_chip_info_tbl[BQ24192] },
+       { .compatible = "ti,bq24192i", .data = &bq24190_chip_info_tbl[BQ24192i] },
+       { .compatible = "ti,bq24196", .data = &bq24190_chip_info_tbl[BQ24196] },
+       { .compatible = "ti,bq24296", .data = &bq24190_chip_info_tbl[BQ24296] },
        { },
 };
 MODULE_DEVICE_TABLE(of, bq24190_of_match);
index 789a31bd70c39f2954527bfb565dc53fda0d6c21..1a935bc885108e7769b6fe2c5aa344a7530ed839 100644 (file)
@@ -1574,13 +1574,16 @@ static int bq256xx_hw_init(struct bq256xx_device *bq)
                        wd_reg_val = i;
                        break;
                }
-               if (bq->watchdog_timer > bq256xx_watchdog_time[i] &&
+               if (i + 1 < BQ256XX_NUM_WD_VAL &&
+                   bq->watchdog_timer > bq256xx_watchdog_time[i] &&
                    bq->watchdog_timer < bq256xx_watchdog_time[i + 1])
                        wd_reg_val = i;
        }
        ret = regmap_update_bits(bq->regmap, BQ256XX_CHARGER_CONTROL_1,
                                 BQ256XX_WATCHDOG_MASK, wd_reg_val <<
                                                BQ256XX_WDT_BIT_SHIFT);
+       if (ret)
+               return ret;
 
        ret = power_supply_get_battery_info(bq->charger, &bat_info);
        if (ret == -ENOMEM)
index 4296600e8912a3988c45286f58420ffabb547345..1c4a9d1377442ad98f4e3fcb3b1215bf3e49b20b 100644 (file)
@@ -2162,6 +2162,28 @@ void bq27xxx_battery_teardown(struct bq27xxx_device_info *di)
 }
 EXPORT_SYMBOL_GPL(bq27xxx_battery_teardown);
 
+#ifdef CONFIG_PM_SLEEP
+static int bq27xxx_battery_suspend(struct device *dev)
+{
+       struct bq27xxx_device_info *di = dev_get_drvdata(dev);
+
+       cancel_delayed_work(&di->work);
+       return 0;
+}
+
+static int bq27xxx_battery_resume(struct device *dev)
+{
+       struct bq27xxx_device_info *di = dev_get_drvdata(dev);
+
+       schedule_delayed_work(&di->work, 0);
+       return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+SIMPLE_DEV_PM_OPS(bq27xxx_battery_battery_pm_ops,
+                 bq27xxx_battery_suspend, bq27xxx_battery_resume);
+EXPORT_SYMBOL_GPL(bq27xxx_battery_battery_pm_ops);
+
 MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
 MODULE_DESCRIPTION("BQ27xxx battery monitor driver");
 MODULE_LICENSE("GPL");
index 9b5475590518fb23153acdc2a9ceea403bc73fef..3a1798b0c1a79f3ed3a3fd0be4d84f6df390b3b4 100644 (file)
@@ -295,6 +295,7 @@ static struct i2c_driver bq27xxx_battery_i2c_driver = {
        .driver = {
                .name = "bq27xxx-battery",
                .of_match_table = of_match_ptr(bq27xxx_battery_i2c_of_match_table),
+               .pm = &bq27xxx_battery_battery_pm_ops,
        },
        .probe = bq27xxx_battery_i2c_probe,
        .remove = bq27xxx_battery_i2c_remove,
index bb29e9ebd24a8eb2b96f5a1513f02419aa14d043..99f3ccdc30a6a77dc06ba6c7ba54ed47e7358b9c 100644 (file)
@@ -491,7 +491,7 @@ static int cw_battery_get_property(struct power_supply *psy,
 
        case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
                if (cw_battery_valid_time_to_empty(cw_bat))
-                       val->intval = cw_bat->time_to_empty;
+                       val->intval = cw_bat->time_to_empty * 60;
                else
                        val->intval = 0;
                break;
index 73265001dd4b22b9575f0249702e0da798f9c22a..ecef35ac3b7e48550afd0cde054d61f45fab53b6 100644 (file)
@@ -861,44 +861,44 @@ const size_t power_supply_battery_info_properties_size = ARRAY_SIZE(power_supply
 EXPORT_SYMBOL_GPL(power_supply_battery_info_properties_size);
 
 bool power_supply_battery_info_has_prop(struct power_supply_battery_info *info,
-                                       enum power_supply_property psp)
+                                       enum power_supply_property psp)
 {
        if (!info)
                return false;
 
        switch (psp) {
-               case POWER_SUPPLY_PROP_TECHNOLOGY:
-                       return info->technology != POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
-               case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
-                       return info->energy_full_design_uwh >= 0;
-               case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
-                       return info->charge_full_design_uah >= 0;
-               case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
-                       return info->voltage_min_design_uv >= 0;
-               case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
-                       return info->voltage_max_design_uv >= 0;
-               case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
-                       return info->precharge_current_ua >= 0;
-               case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
-                       return info->charge_term_current_ua >= 0;
-               case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
-                       return info->constant_charge_current_max_ua >= 0;
-               case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
-                       return info->constant_charge_voltage_max_uv >= 0;
-               case POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN:
-                       return info->temp_ambient_alert_min > INT_MIN;
-               case POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX:
-                       return info->temp_ambient_alert_max < INT_MAX;
-               case POWER_SUPPLY_PROP_TEMP_ALERT_MIN:
-                       return info->temp_alert_min > INT_MIN;
-               case POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
-                       return info->temp_alert_max < INT_MAX;
-               case POWER_SUPPLY_PROP_TEMP_MIN:
-                       return info->temp_min > INT_MIN;
-               case POWER_SUPPLY_PROP_TEMP_MAX:
-                       return info->temp_max < INT_MAX;
-               default:
-                       return false;
+       case POWER_SUPPLY_PROP_TECHNOLOGY:
+               return info->technology != POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
+       case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
+               return info->energy_full_design_uwh >= 0;
+       case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+               return info->charge_full_design_uah >= 0;
+       case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+               return info->voltage_min_design_uv >= 0;
+       case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+               return info->voltage_max_design_uv >= 0;
+       case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
+               return info->precharge_current_ua >= 0;
+       case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+               return info->charge_term_current_ua >= 0;
+       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+               return info->constant_charge_current_max_ua >= 0;
+       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
+               return info->constant_charge_voltage_max_uv >= 0;
+       case POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN:
+               return info->temp_ambient_alert_min > INT_MIN;
+       case POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX:
+               return info->temp_ambient_alert_max < INT_MAX;
+       case POWER_SUPPLY_PROP_TEMP_ALERT_MIN:
+               return info->temp_alert_min > INT_MIN;
+       case POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
+               return info->temp_alert_max < INT_MAX;
+       case POWER_SUPPLY_PROP_TEMP_MIN:
+               return info->temp_min > INT_MIN;
+       case POWER_SUPPLY_PROP_TEMP_MAX:
+               return info->temp_max < INT_MAX;
+       default:
+               return false;
        }
 }
 EXPORT_SYMBOL_GPL(power_supply_battery_info_has_prop);
@@ -914,53 +914,53 @@ int power_supply_battery_info_get_prop(struct power_supply_battery_info *info,
                return -EINVAL;
 
        switch (psp) {
-               case POWER_SUPPLY_PROP_TECHNOLOGY:
-                       val->intval = info->technology;
-                       return 0;
-               case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
-                       val->intval = info->energy_full_design_uwh;
-                       return 0;
-               case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
-                       val->intval = info->charge_full_design_uah;
-                       return 0;
-               case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
-                       val->intval = info->voltage_min_design_uv;
-                       return 0;
-               case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
-                       val->intval = info->voltage_max_design_uv;
-                       return 0;
-               case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
-                       val->intval = info->precharge_current_ua;
-                       return 0;
-               case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
-                       val->intval = info->charge_term_current_ua;
-                       return 0;
-               case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
-                       val->intval = info->constant_charge_current_max_ua;
-                       return 0;
-               case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
-                       val->intval = info->constant_charge_voltage_max_uv;
-                       return 0;
-               case POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN:
-                       val->intval = info->temp_ambient_alert_min;
-                       return 0;
-               case POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX:
-                       val->intval = info->temp_ambient_alert_max;
-                       return 0;
-               case POWER_SUPPLY_PROP_TEMP_ALERT_MIN:
-                       val->intval = info->temp_alert_min;
-                       return 0;
-               case POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
-                       val->intval = info->temp_alert_max;
-                       return 0;
-               case POWER_SUPPLY_PROP_TEMP_MIN:
-                       val->intval = info->temp_min;
-                       return 0;
-               case POWER_SUPPLY_PROP_TEMP_MAX:
-                       val->intval = info->temp_max;
-                       return 0;
-               default:
-                       return -EINVAL;
+       case POWER_SUPPLY_PROP_TECHNOLOGY:
+               val->intval = info->technology;
+               return 0;
+       case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
+               val->intval = info->energy_full_design_uwh;
+               return 0;
+       case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+               val->intval = info->charge_full_design_uah;
+               return 0;
+       case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+               val->intval = info->voltage_min_design_uv;
+               return 0;
+       case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+               val->intval = info->voltage_max_design_uv;
+               return 0;
+       case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
+               val->intval = info->precharge_current_ua;
+               return 0;
+       case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+               val->intval = info->charge_term_current_ua;
+               return 0;
+       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+               val->intval = info->constant_charge_current_max_ua;
+               return 0;
+       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
+               val->intval = info->constant_charge_voltage_max_uv;
+               return 0;
+       case POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN:
+               val->intval = info->temp_ambient_alert_min;
+               return 0;
+       case POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX:
+               val->intval = info->temp_ambient_alert_max;
+               return 0;
+       case POWER_SUPPLY_PROP_TEMP_ALERT_MIN:
+               val->intval = info->temp_alert_min;
+               return 0;
+       case POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
+               val->intval = info->temp_alert_max;
+               return 0;
+       case POWER_SUPPLY_PROP_TEMP_MIN:
+               val->intval = info->temp_min;
+               return 0;
+       case POWER_SUPPLY_PROP_TEMP_MAX:
+               val->intval = info->temp_max;
+               return 0;
+       default:
+               return -EINVAL;
        }
 }
 EXPORT_SYMBOL_GPL(power_supply_battery_info_get_prop);
@@ -1255,6 +1255,7 @@ EXPORT_SYMBOL_GPL(power_supply_powers);
 static void power_supply_dev_release(struct device *dev)
 {
        struct power_supply *psy = to_power_supply(dev);
+
        dev_dbg(dev, "%s\n", __func__);
        kfree(psy);
 }
@@ -1636,6 +1637,6 @@ subsys_initcall(power_supply_class_init);
 module_exit(power_supply_class_exit);
 
 MODULE_DESCRIPTION("Universal power supply monitor class");
-MODULE_AUTHOR("Ian Molton <spyro@f2s.com>, "
-             "Szabolcs Gyurko, "
-             "Anton Vorontsov <cbou@mail.ru>");
+MODULE_AUTHOR("Ian Molton <spyro@f2s.com>");
+MODULE_AUTHOR("Szabolcs Gyurko");
+MODULE_AUTHOR("Anton Vorontsov <cbou@mail.ru>");
index 8acf63ee6897f15d4b231240162e658fb9af76c1..9bb7774060138ed2149c88c79bc4ec96c6a256f9 100644 (file)
@@ -972,10 +972,14 @@ static int smb2_probe(struct platform_device *pdev)
        supply_config.of_node = pdev->dev.of_node;
 
        desc = devm_kzalloc(chip->dev, sizeof(smb2_psy_desc), GFP_KERNEL);
+       if (!desc)
+               return -ENOMEM;
        memcpy(desc, &smb2_psy_desc, sizeof(smb2_psy_desc));
        desc->name =
                devm_kasprintf(chip->dev, GFP_KERNEL, "%s-charger",
                               (const char *)device_get_match_data(chip->dev));
+       if (!desc->name)
+               return -ENOMEM;
 
        chip->chg_psy =
                devm_power_supply_register(chip->dev, desc, &supply_config);
index 29078486534d40323015ca255c2b5d5328854b4f..f2728ee787d7a5167e8a9f25e1c8550803c60117 100644 (file)
@@ -8,6 +8,7 @@
 
 #include <linux/acpi.h>
 #include <linux/module.h>
+#include <linux/idr.h>
 #include <linux/of.h>
 #include <linux/pwm.h>
 #include <linux/list.h>
 #define CREATE_TRACE_POINTS
 #include <trace/events/pwm.h>
 
-#define MAX_PWMS 1024
-
 static DEFINE_MUTEX(pwm_lookup_lock);
 static LIST_HEAD(pwm_lookup_list);
 
-/* protects access to pwm_chips and allocated_pwms */
+/* protects access to pwm_chips */
 static DEFINE_MUTEX(pwm_lock);
 
-static LIST_HEAD(pwm_chips);
-static DECLARE_BITMAP(allocated_pwms, MAX_PWMS);
-
-/* Called with pwm_lock held */
-static int alloc_pwms(unsigned int count)
-{
-       unsigned int start;
-
-       start = bitmap_find_next_zero_area(allocated_pwms, MAX_PWMS, 0,
-                                          count, 0);
-
-       if (start + count > MAX_PWMS)
-               return -ENOSPC;
-
-       bitmap_set(allocated_pwms, start, count);
-
-       return start;
-}
-
-/* Called with pwm_lock held */
-static void free_pwms(struct pwm_chip *chip)
-{
-       bitmap_clear(allocated_pwms, chip->base, chip->npwm);
-
-       kfree(chip->pwms);
-       chip->pwms = NULL;
-}
+static DEFINE_IDR(pwm_chips);
 
 static struct pwm_chip *pwmchip_find_by_name(const char *name)
 {
        struct pwm_chip *chip;
+       unsigned long id, tmp;
 
        if (!name)
                return NULL;
 
        mutex_lock(&pwm_lock);
 
-       list_for_each_entry(chip, &pwm_chips, list) {
+       idr_for_each_entry_ul(&pwm_chips, chip, tmp, id) {
                const char *chip_name = dev_name(chip->dev);
 
                if (chip_name && strcmp(chip_name, name) == 0) {
@@ -85,22 +59,24 @@ static struct pwm_chip *pwmchip_find_by_name(const char *name)
 static int pwm_device_request(struct pwm_device *pwm, const char *label)
 {
        int err;
+       struct pwm_chip *chip = pwm->chip;
+       const struct pwm_ops *ops = chip->ops;
 
        if (test_bit(PWMF_REQUESTED, &pwm->flags))
                return -EBUSY;
 
-       if (!try_module_get(pwm->chip->owner))
+       if (!try_module_get(chip->owner))
                return -ENODEV;
 
-       if (pwm->chip->ops->request) {
-               err = pwm->chip->ops->request(pwm->chip, pwm);
+       if (ops->request) {
+               err = ops->request(chip, pwm);
                if (err) {
-                       module_put(pwm->chip->owner);
+                       module_put(chip->owner);
                        return err;
                }
        }
 
-       if (pwm->chip->ops->get_state) {
+       if (ops->get_state) {
                /*
                 * Zero-initialize state because most drivers are unaware of
                 * .usage_power. The other members of state are supposed to be
@@ -110,7 +86,7 @@ static int pwm_device_request(struct pwm_device *pwm, const char *label)
                 */
                struct pwm_state state = { 0, };
 
-               err = pwm->chip->ops->get_state(pwm->chip, pwm, &state);
+               err = ops->get_state(chip, pwm, &state);
                trace_pwm_get(pwm, &state, err);
 
                if (!err)
@@ -176,7 +152,7 @@ of_pwm_single_xlate(struct pwm_chip *chip, const struct of_phandle_args *args)
        pwm->args.period = args->args[0];
        pwm->args.polarity = PWM_POLARITY_NORMAL;
 
-       if (args->args_count == 2 && args->args[2] & PWM_POLARITY_INVERTED)
+       if (args->args_count == 2 && args->args[1] & PWM_POLARITY_INVERTED)
                pwm->args.polarity = PWM_POLARITY_INVERSED;
 
        return pwm;
@@ -234,7 +210,6 @@ static bool pwm_ops_check(const struct pwm_chip *chip)
  */
 int __pwmchip_add(struct pwm_chip *chip, struct module *owner)
 {
-       struct pwm_device *pwm;
        unsigned int i;
        int ret;
 
@@ -246,31 +221,28 @@ int __pwmchip_add(struct pwm_chip *chip, struct module *owner)
 
        chip->owner = owner;
 
-       chip->pwms = kcalloc(chip->npwm, sizeof(*pwm), GFP_KERNEL);
+       chip->pwms = kcalloc(chip->npwm, sizeof(*chip->pwms), GFP_KERNEL);
        if (!chip->pwms)
                return -ENOMEM;
 
        mutex_lock(&pwm_lock);
 
-       ret = alloc_pwms(chip->npwm);
+       ret = idr_alloc(&pwm_chips, chip, 0, 0, GFP_KERNEL);
        if (ret < 0) {
                mutex_unlock(&pwm_lock);
                kfree(chip->pwms);
                return ret;
        }
 
-       chip->base = ret;
+       chip->id = ret;
 
        for (i = 0; i < chip->npwm; i++) {
-               pwm = &chip->pwms[i];
+               struct pwm_device *pwm = &chip->pwms[i];
 
                pwm->chip = chip;
-               pwm->pwm = chip->base + i;
                pwm->hwpwm = i;
        }
 
-       list_add(&chip->list, &pwm_chips);
-
        mutex_unlock(&pwm_lock);
 
        if (IS_ENABLED(CONFIG_OF))
@@ -297,11 +269,11 @@ void pwmchip_remove(struct pwm_chip *chip)
 
        mutex_lock(&pwm_lock);
 
-       list_del_init(&chip->list);
-
-       free_pwms(chip);
+       idr_remove(&pwm_chips, chip->id);
 
        mutex_unlock(&pwm_lock);
+
+       kfree(chip->pwms);
 }
 EXPORT_SYMBOL_GPL(pwmchip_remove);
 
@@ -356,8 +328,8 @@ struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
 }
 EXPORT_SYMBOL_GPL(pwm_request_from_chip);
 
-static void pwm_apply_state_debug(struct pwm_device *pwm,
-                                 const struct pwm_state *state)
+static void pwm_apply_debug(struct pwm_device *pwm,
+                           const struct pwm_state *state)
 {
        struct pwm_state *last = &pwm->last;
        struct pwm_chip *chip = pwm->chip;
@@ -463,24 +435,15 @@ static void pwm_apply_state_debug(struct pwm_device *pwm,
 }
 
 /**
- * pwm_apply_state() - atomically apply a new state to a PWM device
+ * __pwm_apply() - atomically apply a new state to a PWM device
  * @pwm: PWM device
  * @state: new state to apply
  */
-int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state)
+static int __pwm_apply(struct pwm_device *pwm, const struct pwm_state *state)
 {
        struct pwm_chip *chip;
        int err;
 
-       /*
-        * Some lowlevel driver's implementations of .apply() make use of
-        * mutexes, also with some drivers only returning when the new
-        * configuration is active calling pwm_apply_state() from atomic context
-        * is a bad idea. So make it explicit that calling this function might
-        * sleep.
-        */
-       might_sleep();
-
        if (!pwm || !state || !state->period ||
            state->duty_cycle > state->period)
                return -EINVAL;
@@ -505,11 +468,60 @@ int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state)
         * only do this after pwm->state was applied as some
         * implementations of .get_state depend on this
         */
-       pwm_apply_state_debug(pwm, state);
+       pwm_apply_debug(pwm, state);
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(pwm_apply_state);
+
+/**
+ * pwm_apply_might_sleep() - atomically apply a new state to a PWM device
+ * Cannot be used in atomic context.
+ * @pwm: PWM device
+ * @state: new state to apply
+ */
+int pwm_apply_might_sleep(struct pwm_device *pwm, const struct pwm_state *state)
+{
+       int err;
+
+       /*
+        * Some lowlevel driver's implementations of .apply() make use of
+        * mutexes, also with some drivers only returning when the new
+        * configuration is active calling pwm_apply_might_sleep() from atomic context
+        * is a bad idea. So make it explicit that calling this function might
+        * sleep.
+        */
+       might_sleep();
+
+       if (IS_ENABLED(CONFIG_PWM_DEBUG) && pwm->chip->atomic) {
+               /*
+                * Catch any drivers that have been marked as atomic but
+                * that will sleep anyway.
+                */
+               non_block_start();
+               err = __pwm_apply(pwm, state);
+               non_block_end();
+       } else {
+               err = __pwm_apply(pwm, state);
+       }
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(pwm_apply_might_sleep);
+
+/**
+ * pwm_apply_atomic() - apply a new state to a PWM device from atomic context
+ * Not all PWM devices support this function, check with pwm_might_sleep().
+ * @pwm: PWM device
+ * @state: new state to apply
+ */
+int pwm_apply_atomic(struct pwm_device *pwm, const struct pwm_state *state)
+{
+       WARN_ONCE(!pwm->chip->atomic,
+                 "sleeping PWM driver used in atomic context\n");
+
+       return __pwm_apply(pwm, state);
+}
+EXPORT_SYMBOL_GPL(pwm_apply_atomic);
 
 /**
  * pwm_capture() - capture and report a PWM signal
@@ -567,7 +579,7 @@ int pwm_adjust_config(struct pwm_device *pwm)
                state.period = pargs.period;
                state.polarity = pargs.polarity;
 
-               return pwm_apply_state(pwm, &state);
+               return pwm_apply_might_sleep(pwm, &state);
        }
 
        /*
@@ -590,17 +602,18 @@ int pwm_adjust_config(struct pwm_device *pwm)
                state.duty_cycle = state.period - state.duty_cycle;
        }
 
-       return pwm_apply_state(pwm, &state);
+       return pwm_apply_might_sleep(pwm, &state);
 }
 EXPORT_SYMBOL_GPL(pwm_adjust_config);
 
 static struct pwm_chip *fwnode_to_pwmchip(struct fwnode_handle *fwnode)
 {
        struct pwm_chip *chip;
+       unsigned long id, tmp;
 
        mutex_lock(&pwm_lock);
 
-       list_for_each_entry(chip, &pwm_chips, list)
+       idr_for_each_entry_ul(&pwm_chips, chip, tmp, id)
                if (chip->dev && device_match_fwnode(chip->dev, fwnode)) {
                        mutex_unlock(&pwm_lock);
                        return chip;
@@ -1058,17 +1071,27 @@ static void pwm_dbg_show(struct pwm_chip *chip, struct seq_file *s)
 
 static void *pwm_seq_start(struct seq_file *s, loff_t *pos)
 {
+       unsigned long id = *pos;
+       void *ret;
+
        mutex_lock(&pwm_lock);
        s->private = "";
 
-       return seq_list_start(&pwm_chips, *pos);
+       ret = idr_get_next_ul(&pwm_chips, &id);
+       *pos = id;
+       return ret;
 }
 
 static void *pwm_seq_next(struct seq_file *s, void *v, loff_t *pos)
 {
+       unsigned long id = *pos + 1;
+       void *ret;
+
        s->private = "\n";
 
-       return seq_list_next(v, &pwm_chips, pos);
+       ret = idr_get_next_ul(&pwm_chips, &id);
+       *pos = id;
+       return ret;
 }
 
 static void pwm_seq_stop(struct seq_file *s, void *v)
@@ -1078,9 +1101,10 @@ static void pwm_seq_stop(struct seq_file *s, void *v)
 
 static int pwm_seq_show(struct seq_file *s, void *v)
 {
-       struct pwm_chip *chip = list_entry(v, struct pwm_chip, list);
+       struct pwm_chip *chip = v;
 
-       seq_printf(s, "%s%s/%s, %d PWM device%s\n", (char *)s->private,
+       seq_printf(s, "%s%d: %s/%s, %d PWM device%s\n",
+                  (char *)s->private, chip->id,
                   chip->dev->bus ? chip->dev->bus->name : "no-bus",
                   dev_name(chip->dev), chip->npwm,
                   (chip->npwm != 1) ? "s" : "");
index 07920e0347575aeb420c303c93fdd3ae99dc8448..3f2c5031a3ba85fd756f02a17b63739fd4575256 100644 (file)
@@ -180,7 +180,6 @@ static const struct atmel_hlcdc_pwm_errata atmel_hlcdc_pwm_sama5d3_errata = {
        .div1_clk_erratum = true,
 };
 
-#ifdef CONFIG_PM_SLEEP
 static int atmel_hlcdc_pwm_suspend(struct device *dev)
 {
        struct atmel_hlcdc_pwm *atmel = dev_get_drvdata(dev);
@@ -210,10 +209,9 @@ static int atmel_hlcdc_pwm_resume(struct device *dev)
        return atmel_hlcdc_pwm_apply(&atmel->chip, &atmel->chip.pwms[0],
                                     &state);
 }
-#endif
 
-static SIMPLE_DEV_PM_OPS(atmel_hlcdc_pwm_pm_ops,
-                        atmel_hlcdc_pwm_suspend, atmel_hlcdc_pwm_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(atmel_hlcdc_pwm_pm_ops,
+                               atmel_hlcdc_pwm_suspend, atmel_hlcdc_pwm_resume);
 
 static const struct of_device_id atmel_hlcdc_dt_ids[] = {
        {
@@ -297,7 +295,7 @@ static struct platform_driver atmel_hlcdc_pwm_driver = {
        .driver = {
                .name = "atmel-hlcdc-pwm",
                .of_match_table = atmel_hlcdc_pwm_dt_ids,
-               .pm = &atmel_hlcdc_pwm_pm_ops,
+               .pm = pm_ptr(&atmel_hlcdc_pwm_pm_ops),
        },
        .probe = atmel_hlcdc_pwm_probe,
        .remove_new = atmel_hlcdc_pwm_remove,
index 98b33c016c3c76b8a9bd4cd2492b5871a33690aa..d42c897cb85e59231b716b3ec5b9baa8c3f8c270 100644 (file)
@@ -489,7 +489,6 @@ static const struct of_device_id atmel_tcb_pwm_dt_ids[] = {
 };
 MODULE_DEVICE_TABLE(of, atmel_tcb_pwm_dt_ids);
 
-#ifdef CONFIG_PM_SLEEP
 static int atmel_tcb_pwm_suspend(struct device *dev)
 {
        struct atmel_tcb_pwm_chip *tcbpwm = dev_get_drvdata(dev);
@@ -522,16 +521,15 @@ static int atmel_tcb_pwm_resume(struct device *dev)
 
        return 0;
 }
-#endif
 
-static SIMPLE_DEV_PM_OPS(atmel_tcb_pwm_pm_ops, atmel_tcb_pwm_suspend,
-                        atmel_tcb_pwm_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(atmel_tcb_pwm_pm_ops, atmel_tcb_pwm_suspend,
+                               atmel_tcb_pwm_resume);
 
 static struct platform_driver atmel_tcb_pwm_driver = {
        .driver = {
                .name = "atmel-tcb-pwm",
                .of_match_table = atmel_tcb_pwm_dt_ids,
-               .pm = &atmel_tcb_pwm_pm_ops,
+               .pm = pm_ptr(&atmel_tcb_pwm_pm_ops),
        },
        .probe = atmel_tcb_pwm_probe,
        .remove_new = atmel_tcb_pwm_remove,
index 15d6ed03c3ce05a58db5d65e8fb1f3532b35635f..45046a5c20a5e5202598653aa6b0596e99229347 100644 (file)
@@ -260,7 +260,7 @@ static int kona_pwmc_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                        return err;
        }
 
-       err = kona_pwmc_config(pwm->chip, pwm, state->duty_cycle, state->period);
+       err = kona_pwmc_config(chip, pwm, state->duty_cycle, state->period);
        if (err && !pwm->state.enabled)
                clk_disable_unprepare(kp->clk);
 
index ab30667f4f951c0d46ccbeddf365437652c269ba..283cf27f25bae01fd6b2702781e04ce77188c924 100644 (file)
@@ -28,6 +28,7 @@ struct bcm2835_pwm {
        struct device *dev;
        void __iomem *base;
        struct clk *clk;
+       unsigned long rate;
 };
 
 static inline struct bcm2835_pwm *to_bcm2835_pwm(struct pwm_chip *chip)
@@ -63,17 +64,11 @@ static int bcm2835_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 {
 
        struct bcm2835_pwm *pc = to_bcm2835_pwm(chip);
-       unsigned long rate = clk_get_rate(pc->clk);
        unsigned long long period_cycles;
        u64 max_period;
 
        u32 val;
 
-       if (!rate) {
-               dev_err(pc->dev, "failed to get clock rate\n");
-               return -EINVAL;
-       }
-
        /*
         * period_cycles must be a 32 bit value, so period * rate / NSEC_PER_SEC
         * must be <= U32_MAX. As U32_MAX * NSEC_PER_SEC < U64_MAX the
@@ -88,13 +83,13 @@ static int bcm2835_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
         * <=> period < ((U32_MAX * NSEC_PER_SEC + NSEC_PER_SEC/2) / rate
         * <=> period <= ceil((U32_MAX * NSEC_PER_SEC + NSEC_PER_SEC/2) / rate) - 1
         */
-       max_period = DIV_ROUND_UP_ULL((u64)U32_MAX * NSEC_PER_SEC + NSEC_PER_SEC / 2, rate) - 1;
+       max_period = DIV_ROUND_UP_ULL((u64)U32_MAX * NSEC_PER_SEC + NSEC_PER_SEC / 2, pc->rate) - 1;
 
        if (state->period > max_period)
                return -EINVAL;
 
        /* set period */
-       period_cycles = DIV_ROUND_CLOSEST_ULL(state->period * rate, NSEC_PER_SEC);
+       period_cycles = DIV_ROUND_CLOSEST_ULL(state->period * pc->rate, NSEC_PER_SEC);
 
        /* don't accept a period that is too small */
        if (period_cycles < PERIOD_MIN)
@@ -103,7 +98,7 @@ static int bcm2835_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
        writel(period_cycles, pc->base + PERIOD(pwm->hwpwm));
 
        /* set duty cycle */
-       val = DIV_ROUND_CLOSEST_ULL(state->duty_cycle * rate, NSEC_PER_SEC);
+       val = DIV_ROUND_CLOSEST_ULL(state->duty_cycle * pc->rate, NSEC_PER_SEC);
        writel(val, pc->base + DUTY(pwm->hwpwm));
 
        /* set polarity */
@@ -131,6 +126,13 @@ static const struct pwm_ops bcm2835_pwm_ops = {
        .apply = bcm2835_pwm_apply,
 };
 
+static void devm_clk_rate_exclusive_put(void *data)
+{
+       struct clk *clk = data;
+
+       clk_rate_exclusive_put(clk);
+}
+
 static int bcm2835_pwm_probe(struct platform_device *pdev)
 {
        struct bcm2835_pwm *pc;
@@ -151,8 +153,24 @@ static int bcm2835_pwm_probe(struct platform_device *pdev)
                return dev_err_probe(&pdev->dev, PTR_ERR(pc->clk),
                                     "clock not found\n");
 
+       ret = clk_rate_exclusive_get(pc->clk);
+       if (ret)
+               return dev_err_probe(&pdev->dev, ret,
+                                    "fail to get exclusive rate\n");
+
+       ret = devm_add_action_or_reset(&pdev->dev, devm_clk_rate_exclusive_put,
+                                      pc->clk);
+       if (ret)
+               return ret;
+
+       pc->rate = clk_get_rate(pc->clk);
+       if (!pc->rate)
+               return dev_err_probe(&pdev->dev, -EINVAL,
+                                    "failed to get clock rate\n");
+
        pc->chip.dev = &pdev->dev;
        pc->chip.ops = &bcm2835_pwm_ops;
+       pc->chip.atomic = true;
        pc->chip.npwm = 2;
 
        platform_set_drvdata(pdev, pc);
index ba2d799917695fc4d767160049dfe45a4dca83d3..442913232dc01efc756406a2e29d02d9aae16ec9 100644 (file)
@@ -226,7 +226,6 @@ static int berlin_pwm_probe(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
 static int berlin_pwm_suspend(struct device *dev)
 {
        struct berlin_pwm_chip *bpc = dev_get_drvdata(dev);
@@ -267,17 +266,16 @@ static int berlin_pwm_resume(struct device *dev)
 
        return 0;
 }
-#endif
 
-static SIMPLE_DEV_PM_OPS(berlin_pwm_pm_ops, berlin_pwm_suspend,
-                        berlin_pwm_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(berlin_pwm_pm_ops, berlin_pwm_suspend,
+                               berlin_pwm_resume);
 
 static struct platform_driver berlin_pwm_driver = {
        .probe = berlin_pwm_probe,
        .driver = {
                .name = "berlin-pwm",
                .of_match_table = berlin_pwm_match,
-               .pm = &berlin_pwm_pm_ops,
+               .pm = pm_ptr(&berlin_pwm_pm_ops),
        },
 };
 module_platform_driver(berlin_pwm_driver);
index b723c2d4f485c762f8e301bae794c4c86c971303..0fdeb0b2dbf3762970702e49cf7e764f0aee5d5a 100644 (file)
@@ -259,7 +259,6 @@ static int brcmstb_pwm_probe(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
 static int brcmstb_pwm_suspend(struct device *dev)
 {
        struct brcmstb_pwm *p = dev_get_drvdata(dev);
@@ -275,17 +274,16 @@ static int brcmstb_pwm_resume(struct device *dev)
 
        return clk_prepare_enable(p->clk);
 }
-#endif
 
-static SIMPLE_DEV_PM_OPS(brcmstb_pwm_pm_ops, brcmstb_pwm_suspend,
-                        brcmstb_pwm_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(brcmstb_pwm_pm_ops, brcmstb_pwm_suspend,
+                               brcmstb_pwm_resume);
 
 static struct platform_driver brcmstb_pwm_driver = {
        .probe = brcmstb_pwm_probe,
        .driver = {
                .name = "pwm-brcmstb",
                .of_match_table = brcmstb_pwm_of_match,
-               .pm = &brcmstb_pwm_pm_ops,
+               .pm = pm_ptr(&brcmstb_pwm_pm_ops),
        },
 };
 module_platform_driver(brcmstb_pwm_driver);
index 2b0b659eee9797520587ada0537abb2c75329d42..e09358901ab508dd3a434a54285606a83147d220 100644 (file)
@@ -160,22 +160,22 @@ static const struct pwm_ops crc_pwm_ops = {
 
 static int crystalcove_pwm_probe(struct platform_device *pdev)
 {
-       struct crystalcove_pwm *pwm;
+       struct crystalcove_pwm *crc_pwm;
        struct device *dev = pdev->dev.parent;
        struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
 
-       pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
-       if (!pwm)
+       crc_pwm = devm_kzalloc(&pdev->dev, sizeof(*crc_pwm), GFP_KERNEL);
+       if (!crc_pwm)
                return -ENOMEM;
 
-       pwm->chip.dev = &pdev->dev;
-       pwm->chip.ops = &crc_pwm_ops;
-       pwm->chip.npwm = 1;
+       crc_pwm->chip.dev = &pdev->dev;
+       crc_pwm->chip.ops = &crc_pwm_ops;
+       crc_pwm->chip.npwm = 1;
 
        /* get the PMIC regmap */
-       pwm->regmap = pmic->regmap;
+       crc_pwm->regmap = pmic->regmap;
 
-       return devm_pwmchip_add(&pdev->dev, &pwm->chip);
+       return devm_pwmchip_add(&pdev->dev, &crc_pwm->chip);
 }
 
 static struct platform_driver crystalcove_pwm_driver = {
index 4fbd23e4ef693c40ba73bf84ed4851903c8b67bd..5fe303b8656def69218b63d0242df10ace261bbd 100644 (file)
 /**
  * struct cros_ec_pwm_device - Driver data for EC PWM
  *
- * @dev: Device node
  * @ec: Pointer to EC device
  * @chip: PWM controller chip
  * @use_pwm_type: Use PWM types instead of generic channels
  * @channel: array with per-channel data
  */
 struct cros_ec_pwm_device {
-       struct device *dev;
        struct cros_ec_device *ec;
        struct pwm_chip chip;
        bool use_pwm_type;
index bd9cadb497d70e34b1b3eb974aa125bf6d44ab8d..4929354f8cd95c8b1bc37feccf4e92b3e541444c 100644 (file)
@@ -71,7 +71,6 @@ static void dwc_pwm_remove(struct pci_dev *pci)
        pm_runtime_get_noresume(&pci->dev);
 }
 
-#ifdef CONFIG_PM_SLEEP
 static int dwc_pwm_suspend(struct device *dev)
 {
        struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
@@ -106,9 +105,8 @@ static int dwc_pwm_resume(struct device *dev)
 
        return 0;
 }
-#endif
 
-static SIMPLE_DEV_PM_OPS(dwc_pwm_pm_ops, dwc_pwm_suspend, dwc_pwm_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(dwc_pwm_pm_ops, dwc_pwm_suspend, dwc_pwm_resume);
 
 static const struct pci_device_id dwc_pwm_id_table[] = {
        { PCI_VDEVICE(INTEL, 0x4bb7) }, /* Elkhart Lake */
@@ -122,7 +120,7 @@ static struct pci_driver dwc_pwm_driver = {
        .remove = dwc_pwm_remove,
        .id_table = dwc_pwm_id_table,
        .driver = {
-               .pm = &dwc_pwm_pm_ops,
+               .pm = pm_ptr(&dwc_pwm_pm_ops),
        },
 };
 
index 116fa060e3029942f557fceb4d090794f87d3044..5965ac35b32eac9d560132a9498f3e525373fd61 100644 (file)
@@ -13,9 +13,9 @@
 #include <linux/mfd/syscon.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
+#include <linux/property.h>
 #include <linux/pwm.h>
 #include <linux/regmap.h>
 #include <linux/slab.h>
@@ -196,7 +196,7 @@ static int img_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                return 0;
        }
 
-       err = img_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period);
+       err = img_pwm_config(chip, pwm, state->duty_cycle, state->period);
        if (err)
                return err;
 
@@ -260,7 +260,6 @@ static int img_pwm_probe(struct platform_device *pdev)
        u64 val;
        unsigned long clk_rate;
        struct img_pwm_chip *imgchip;
-       const struct of_device_id *of_dev_id;
 
        imgchip = devm_kzalloc(&pdev->dev, sizeof(*imgchip), GFP_KERNEL);
        if (!imgchip)
@@ -272,10 +271,7 @@ static int img_pwm_probe(struct platform_device *pdev)
        if (IS_ERR(imgchip->base))
                return PTR_ERR(imgchip->base);
 
-       of_dev_id = of_match_device(img_pwm_of_match, &pdev->dev);
-       if (!of_dev_id)
-               return -ENODEV;
-       imgchip->data = of_dev_id->data;
+       imgchip->data = device_get_match_data(&pdev->dev);
 
        imgchip->periph_regs = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
                                                               "img,cr-periph");
index dc6aafeb9f7b44eb976cb89edbaf965fb648723a..9fc290e647e1bdbef4bf2f2553d20c1219a12697 100644 (file)
@@ -371,7 +371,7 @@ static int pwm_imx_tpm_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int __maybe_unused pwm_imx_tpm_suspend(struct device *dev)
+static int pwm_imx_tpm_suspend(struct device *dev)
 {
        struct imx_tpm_pwm_chip *tpm = dev_get_drvdata(dev);
 
@@ -390,7 +390,7 @@ static int __maybe_unused pwm_imx_tpm_suspend(struct device *dev)
        return 0;
 }
 
-static int __maybe_unused pwm_imx_tpm_resume(struct device *dev)
+static int pwm_imx_tpm_resume(struct device *dev)
 {
        struct imx_tpm_pwm_chip *tpm = dev_get_drvdata(dev);
        int ret = 0;
@@ -402,8 +402,8 @@ static int __maybe_unused pwm_imx_tpm_resume(struct device *dev)
        return ret;
 }
 
-static SIMPLE_DEV_PM_OPS(imx_tpm_pwm_pm,
-                        pwm_imx_tpm_suspend, pwm_imx_tpm_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(imx_tpm_pwm_pm,
+                               pwm_imx_tpm_suspend, pwm_imx_tpm_resume);
 
 static const struct of_device_id imx_tpm_pwm_dt_ids[] = {
        { .compatible = "fsl,imx7ulp-pwm", },
@@ -415,7 +415,7 @@ static struct platform_driver imx_tpm_pwm_driver = {
        .driver = {
                .name = "imx7ulp-tpm-pwm",
                .of_match_table = imx_tpm_pwm_dt_ids,
-               .pm = &imx_tpm_pwm_pm,
+               .pm = pm_ptr(&imx_tpm_pwm_pm),
        },
        .probe  = pwm_imx_tpm_probe,
 };
index e9375de60ad6219729e8d0f36073fda1df49cbb8..3933418e551b412b22edbb4b099add56cf6f601b 100644 (file)
@@ -61,9 +61,10 @@ static int jz4740_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
        snprintf(name, sizeof(name), "timer%u", pwm->hwpwm);
 
        clk = clk_get(chip->dev, name);
-       if (IS_ERR(clk))
-               return dev_err_probe(chip->dev, PTR_ERR(clk),
-                                    "Failed to get clock\n");
+       if (IS_ERR(clk)) {
+               dev_err(chip->dev, "error %pe: Failed to get clock\n", clk);
+               return PTR_ERR(clk);
+       }
 
        err = clk_prepare_enable(clk);
        if (err < 0) {
@@ -123,7 +124,7 @@ static void jz4740_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
 static int jz4740_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                            const struct pwm_state *state)
 {
-       struct jz4740_pwm_chip *jz = to_jz4740(pwm->chip);
+       struct jz4740_pwm_chip *jz = to_jz4740(chip);
        unsigned long long tmp = 0xffffull * NSEC_PER_SEC;
        struct clk *clk = jz->clk[pwm->hwpwm];
        unsigned long period, duty;
@@ -149,7 +150,7 @@ static int jz4740_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
         */
        rate = clk_round_rate(clk, tmp);
        if (rate < 0) {
-               dev_err(chip->dev, "Unable to round rate: %ld", rate);
+               dev_err(chip->dev, "Unable to round rate: %ld\n", rate);
                return rate;
        }
 
@@ -170,7 +171,7 @@ static int jz4740_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 
        err = clk_set_rate(clk, rate);
        if (err) {
-               dev_err(chip->dev, "Unable to set rate: %d", err);
+               dev_err(chip->dev, "Unable to set rate: %d\n", err);
                return err;
        }
 
index ef7d0da137ede3453684322dc3e2312d6ff0e59e..fe891fa71a1ddc7fd50ac7cd444cce53aa35ebff 100644 (file)
@@ -194,7 +194,7 @@ static int lpc18xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
                              int duty_ns, int period_ns)
 {
        struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
-       int requested_events, i;
+       int requested_events;
 
        if (period_ns < lpc18xx_pwm->min_period_ns ||
            period_ns > lpc18xx_pwm->max_period_ns) {
@@ -223,8 +223,6 @@ static int lpc18xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
        if ((requested_events <= 2 && lpc18xx_pwm->period_ns != period_ns) ||
            !lpc18xx_pwm->period_ns) {
                lpc18xx_pwm->period_ns = period_ns;
-               for (i = 0; i < chip->npwm; i++)
-                       pwm_set_period(&chip->pwms[i], period_ns);
                lpc18xx_pwm_config_period(chip, period_ns);
        }
 
@@ -328,7 +326,7 @@ static int lpc18xx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                return 0;
        }
 
-       err = lpc18xx_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period);
+       err = lpc18xx_pwm_config(chip, pwm, state->duty_cycle, state->period);
        if (err)
                return err;
 
index 78f664e41e6e3a174f16ed0ecc216f2e0b1ae087..1d9f3e7a2434ae55e515d64acacadb212b3bb776 100644 (file)
@@ -103,7 +103,7 @@ static int lpc32xx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                return 0;
        }
 
-       err = lpc32xx_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period);
+       err = lpc32xx_pwm_config(chip, pwm, state->duty_cycle, state->period);
        if (err)
                return err;
 
index 373abfd25acb28509520e82ce6b3efc13896e9d9..17d290f847af60841b0928602e9df1aa8c1342bc 100644 (file)
@@ -217,7 +217,7 @@ static int pwm_mediatek_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                return 0;
        }
 
-       err = pwm_mediatek_config(pwm->chip, pwm, state->duty_cycle, state->period);
+       err = pwm_mediatek_config(chip, pwm, state->duty_cycle, state->period);
        if (err)
                return err;
 
index 5bea53243ed2fab3c170591f7b385fdb25890658..2971bbf3b5e7c6e7ecf1a2f7659450e28e0edf72 100644 (file)
@@ -468,10 +468,9 @@ static int meson_pwm_init_channels(struct meson_pwm *meson)
                channel->mux.hw.init = &init;
 
                err = devm_clk_hw_register(dev, &channel->mux.hw);
-               if (err) {
-                       dev_err(dev, "failed to register %s: %d\n", name, err);
-                       return err;
-               }
+               if (err)
+                       return dev_err_probe(dev, err,
+                                            "failed to register %s\n", name);
 
                snprintf(name, sizeof(name), "%s#div%u", dev_name(dev), i);
 
@@ -491,10 +490,9 @@ static int meson_pwm_init_channels(struct meson_pwm *meson)
                channel->div.lock = &meson->lock;
 
                err = devm_clk_hw_register(dev, &channel->div.hw);
-               if (err) {
-                       dev_err(dev, "failed to register %s: %d\n", name, err);
-                       return err;
-               }
+               if (err)
+                       return dev_err_probe(dev, err,
+                                            "failed to register %s\n", name);
 
                snprintf(name, sizeof(name), "%s#gate%u", dev_name(dev), i);
 
@@ -513,17 +511,13 @@ static int meson_pwm_init_channels(struct meson_pwm *meson)
                channel->gate.lock = &meson->lock;
 
                err = devm_clk_hw_register(dev, &channel->gate.hw);
-               if (err) {
-                       dev_err(dev, "failed to register %s: %d\n", name, err);
-                       return err;
-               }
+               if (err)
+                       return dev_err_probe(dev, err, "failed to register %s\n", name);
 
                channel->clk = devm_clk_hw_get_clk(dev, &channel->gate.hw, NULL);
-               if (IS_ERR(channel->clk)) {
-                       err = PTR_ERR(channel->clk);
-                       dev_err(dev, "failed to register %s: %d\n", name, err);
-                       return err;
-               }
+               if (IS_ERR(channel->clk))
+                       return dev_err_probe(dev, PTR_ERR(channel->clk),
+                                            "failed to register %s\n", name);
        }
 
        return 0;
@@ -554,10 +548,9 @@ static int meson_pwm_probe(struct platform_device *pdev)
                return err;
 
        err = devm_pwmchip_add(&pdev->dev, &meson->chip);
-       if (err < 0) {
-               dev_err(&pdev->dev, "failed to register PWM chip: %d\n", err);
-               return err;
-       }
+       if (err < 0)
+               return dev_err_probe(&pdev->dev, err,
+                                    "failed to register PWM chip\n");
 
        return 0;
 }
index 13161e08dd6eb38e86bb1863d131517f7e2f1706..496bd73d29fe1dc154a4d3c401e3a059eae33ec9 100644 (file)
@@ -37,7 +37,6 @@
 #include <linux/err.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/mutex.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
 #include <clocksource/timer-ti-dm.h>
@@ -55,7 +54,6 @@
  * struct pwm_omap_dmtimer_chip - Structure representing a pwm chip
  *                               corresponding to omap dmtimer.
  * @chip:              PWM chip structure representing PWM controller
- * @mutex:             Mutex to protect pwm apply state
  * @dm_timer:          Pointer to omap dm timer.
  * @pdata:             Pointer to omap dm timer ops.
  * @dm_timer_pdev:     Pointer to omap dm timer platform device
@@ -63,7 +61,6 @@
 struct pwm_omap_dmtimer_chip {
        struct pwm_chip chip;
        /* Mutex to protect pwm apply state */
-       struct mutex mutex;
        struct omap_dm_timer *dm_timer;
        const struct omap_dm_timer_ops *pdata;
        struct platform_device *dm_timer_pdev;
@@ -277,13 +274,11 @@ static int pwm_omap_dmtimer_apply(struct pwm_chip *chip,
                                  const struct pwm_state *state)
 {
        struct pwm_omap_dmtimer_chip *omap = to_pwm_omap_dmtimer_chip(chip);
-       int ret = 0;
-
-       mutex_lock(&omap->mutex);
+       int ret;
 
        if (pwm_omap_dmtimer_is_enabled(omap) && !state->enabled) {
                omap->pdata->stop(omap->dm_timer);
-               goto unlock_mutex;
+               return 0;
        }
 
        if (pwm_omap_dmtimer_polarity(omap) != state->polarity)
@@ -292,7 +287,7 @@ static int pwm_omap_dmtimer_apply(struct pwm_chip *chip,
        ret = pwm_omap_dmtimer_config(chip, pwm, state->duty_cycle,
                                      state->period);
        if (ret)
-               goto unlock_mutex;
+               return ret;
 
        if (!pwm_omap_dmtimer_is_enabled(omap) && state->enabled) {
                omap->pdata->set_pwm(omap->dm_timer,
@@ -303,10 +298,7 @@ static int pwm_omap_dmtimer_apply(struct pwm_chip *chip,
                pwm_omap_dmtimer_start(omap);
        }
 
-unlock_mutex:
-       mutex_unlock(&omap->mutex);
-
-       return ret;
+       return 0;
 }
 
 static const struct pwm_ops pwm_omap_dmtimer_ops = {
@@ -404,8 +396,6 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
        omap->chip.ops = &pwm_omap_dmtimer_ops;
        omap->chip.npwm = 1;
 
-       mutex_init(&omap->mutex);
-
        ret = pwmchip_add(&omap->chip);
        if (ret < 0) {
                dev_err(&pdev->dev, "failed to register PWM\n");
@@ -452,8 +442,6 @@ static void pwm_omap_dmtimer_remove(struct platform_device *pdev)
        omap->pdata->free(omap->dm_timer);
 
        put_device(&omap->dm_timer_pdev->dev);
-
-       mutex_destroy(&omap->mutex);
 }
 
 static const struct of_device_id pwm_omap_dmtimer_of_match[] = {
index 4239f2c3e8b2a3288bfe43d2a7e7a2cb98a9b2b5..28265fdfc92a917708e394d6d331d29e72821e0e 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/init.h>
 #include <linux/ioport.h>
 #include <linux/module.h>
-#include <linux/mutex.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
@@ -416,7 +415,7 @@ static int tpu_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                return 0;
        }
 
-       err = tpu_pwm_config(pwm->chip, pwm,
+       err = tpu_pwm_config(chip, pwm,
                             state->duty_cycle, state->period, enabled);
        if (err)
                return err;
index cce4381e188af0bfb7549db11de1d0819c893be6..a7c647e3783739720c54010e403fe0711d51beeb 100644 (file)
@@ -10,8 +10,8 @@
 #include <linux/io.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/platform_device.h>
+#include <linux/property.h>
 #include <linux/pwm.h>
 #include <linux/time.h>
 
@@ -296,16 +296,11 @@ MODULE_DEVICE_TABLE(of, rockchip_pwm_dt_ids);
 
 static int rockchip_pwm_probe(struct platform_device *pdev)
 {
-       const struct of_device_id *id;
        struct rockchip_pwm_chip *pc;
        u32 enable_conf, ctrl;
        bool enabled;
        int ret, count;
 
-       id = of_match_device(rockchip_pwm_dt_ids, &pdev->dev);
-       if (!id)
-               return -EINVAL;
-
        pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
        if (!pc)
                return -ENOMEM;
@@ -344,7 +339,7 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, pc);
 
-       pc->data = id->data;
+       pc->data = device_get_match_data(&pdev->dev);
        pc->chip.dev = &pdev->dev;
        pc->chip.ops = &rockchip_pwm_ops;
        pc->chip.npwm = 1;
index 69d9f4577b3479fd6038b7e71cc56cfff48fa7c2..6e77302f73687f5ac4e63b1a00663b639772264b 100644 (file)
@@ -620,7 +620,6 @@ static void pwm_samsung_remove(struct platform_device *pdev)
        clk_disable_unprepare(our_chip->base_clk);
 }
 
-#ifdef CONFIG_PM_SLEEP
 static int pwm_samsung_resume(struct device *dev)
 {
        struct samsung_pwm_chip *our_chip = dev_get_drvdata(dev);
@@ -653,14 +652,13 @@ static int pwm_samsung_resume(struct device *dev)
 
        return 0;
 }
-#endif
 
-static SIMPLE_DEV_PM_OPS(pwm_samsung_pm_ops, NULL, pwm_samsung_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(pwm_samsung_pm_ops, NULL, pwm_samsung_resume);
 
 static struct platform_driver pwm_samsung_driver = {
        .driver         = {
                .name   = "samsung-pwm",
-               .pm     = &pwm_samsung_pm_ops,
+               .pm     = pm_ptr(&pwm_samsung_pm_ops),
                .of_match_table = of_match_ptr(samsung_pwm_matches),
        },
        .probe          = pwm_samsung_probe,
index dc92cea31cd0782021c13fe7064e249b111a4efb..6cf55cf34d39f6d9df7ed4164eb8eb1c81720dc8 100644 (file)
@@ -407,7 +407,7 @@ static int sti_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                return 0;
        }
 
-       err = sti_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period);
+       err = sti_pwm_config(chip, pwm, state->duty_cycle, state->period);
        if (err)
                return err;
 
index b67974cc18725189fd1bbc33e1449a68d035757a..439068f3eca18f213ef1f2ce72a6f682a886be97 100644 (file)
@@ -218,7 +218,7 @@ static int stm32_pwm_lp_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int __maybe_unused stm32_pwm_lp_suspend(struct device *dev)
+static int stm32_pwm_lp_suspend(struct device *dev)
 {
        struct stm32_pwm_lp *priv = dev_get_drvdata(dev);
        struct pwm_state state;
@@ -233,13 +233,13 @@ static int __maybe_unused stm32_pwm_lp_suspend(struct device *dev)
        return pinctrl_pm_select_sleep_state(dev);
 }
 
-static int __maybe_unused stm32_pwm_lp_resume(struct device *dev)
+static int stm32_pwm_lp_resume(struct device *dev)
 {
        return pinctrl_pm_select_default_state(dev);
 }
 
-static SIMPLE_DEV_PM_OPS(stm32_pwm_lp_pm_ops, stm32_pwm_lp_suspend,
-                        stm32_pwm_lp_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(stm32_pwm_lp_pm_ops, stm32_pwm_lp_suspend,
+                               stm32_pwm_lp_resume);
 
 static const struct of_device_id stm32_pwm_lp_of_match[] = {
        { .compatible = "st,stm32-pwm-lp", },
@@ -252,7 +252,7 @@ static struct platform_driver stm32_pwm_lp_driver = {
        .driver = {
                .name = "stm32-pwm-lp",
                .of_match_table = stm32_pwm_lp_of_match,
-               .pm = &stm32_pwm_lp_pm_ops,
+               .pm = pm_ptr(&stm32_pwm_lp_pm_ops),
        },
 };
 module_platform_driver(stm32_pwm_lp_driver);
index 3303a754ea020fb3300c1e9e2c46346a4709744f..5f10cba492ecba6389f49462033d911c9a197c46 100644 (file)
@@ -52,21 +52,6 @@ static u32 active_channels(struct stm32_pwm *dev)
        return ccer & TIM_CCER_CCXE;
 }
 
-static int write_ccrx(struct stm32_pwm *dev, int ch, u32 value)
-{
-       switch (ch) {
-       case 0:
-               return regmap_write(dev->regmap, TIM_CCR1, value);
-       case 1:
-               return regmap_write(dev->regmap, TIM_CCR2, value);
-       case 2:
-               return regmap_write(dev->regmap, TIM_CCR3, value);
-       case 3:
-               return regmap_write(dev->regmap, TIM_CCR4, value);
-       }
-       return -EINVAL;
-}
-
 #define TIM_CCER_CC12P (TIM_CCER_CC1P | TIM_CCER_CC2P)
 #define TIM_CCER_CC12E (TIM_CCER_CC1E | TIM_CCER_CC2E)
 #define TIM_CCER_CC34P (TIM_CCER_CC3P | TIM_CCER_CC4P)
@@ -323,7 +308,7 @@ unlock:
        return ret;
 }
 
-static int stm32_pwm_config(struct stm32_pwm *priv, int ch,
+static int stm32_pwm_config(struct stm32_pwm *priv, unsigned int ch,
                            int duty_ns, int period_ns)
 {
        unsigned long long prd, div, dty;
@@ -369,7 +354,7 @@ static int stm32_pwm_config(struct stm32_pwm *priv, int ch,
        dty = prd * duty_ns;
        do_div(dty, period_ns);
 
-       write_ccrx(priv, ch, dty);
+       regmap_write(priv->regmap, TIM_CCR1 + 4 * ch, dty);
 
        /* Configure output mode */
        shift = (ch & 0x1) * CCMR_CHANNEL_SHIFT;
@@ -386,7 +371,7 @@ static int stm32_pwm_config(struct stm32_pwm *priv, int ch,
        return 0;
 }
 
-static int stm32_pwm_set_polarity(struct stm32_pwm *priv, int ch,
+static int stm32_pwm_set_polarity(struct stm32_pwm *priv, unsigned int ch,
                                  enum pwm_polarity polarity)
 {
        u32 mask;
@@ -401,7 +386,7 @@ static int stm32_pwm_set_polarity(struct stm32_pwm *priv, int ch,
        return 0;
 }
 
-static int stm32_pwm_enable(struct stm32_pwm *priv, int ch)
+static int stm32_pwm_enable(struct stm32_pwm *priv, unsigned int ch)
 {
        u32 mask;
        int ret;
@@ -426,7 +411,7 @@ static int stm32_pwm_enable(struct stm32_pwm *priv, int ch)
        return 0;
 }
 
-static void stm32_pwm_disable(struct stm32_pwm *priv, int ch)
+static void stm32_pwm_disable(struct stm32_pwm *priv, unsigned int ch)
 {
        u32 mask;
 
@@ -486,8 +471,50 @@ static int stm32_pwm_apply_locked(struct pwm_chip *chip, struct pwm_device *pwm,
        return ret;
 }
 
+static int stm32_pwm_get_state(struct pwm_chip *chip,
+                              struct pwm_device *pwm, struct pwm_state *state)
+{
+       struct stm32_pwm *priv = to_stm32_pwm_dev(chip);
+       int ch = pwm->hwpwm;
+       unsigned long rate;
+       u32 ccer, psc, arr, ccr;
+       u64 dty, prd;
+       int ret;
+
+       mutex_lock(&priv->lock);
+
+       ret = regmap_read(priv->regmap, TIM_CCER, &ccer);
+       if (ret)
+               goto out;
+
+       state->enabled = ccer & (TIM_CCER_CC1E << (ch * 4));
+       state->polarity = (ccer & (TIM_CCER_CC1P << (ch * 4))) ?
+                         PWM_POLARITY_INVERSED : PWM_POLARITY_NORMAL;
+       ret = regmap_read(priv->regmap, TIM_PSC, &psc);
+       if (ret)
+               goto out;
+       ret = regmap_read(priv->regmap, TIM_ARR, &arr);
+       if (ret)
+               goto out;
+       ret = regmap_read(priv->regmap, TIM_CCR1 + 4 * ch, &ccr);
+       if (ret)
+               goto out;
+
+       rate = clk_get_rate(priv->clk);
+
+       prd = (u64)NSEC_PER_SEC * (psc + 1) * (arr + 1);
+       state->period = DIV_ROUND_UP_ULL(prd, rate);
+       dty = (u64)NSEC_PER_SEC * (psc + 1) * ccr;
+       state->duty_cycle = DIV_ROUND_UP_ULL(dty, rate);
+
+out:
+       mutex_unlock(&priv->lock);
+       return ret;
+}
+
 static const struct pwm_ops stm32pwm_ops = {
        .apply = stm32_pwm_apply_locked,
+       .get_state = stm32_pwm_get_state,
        .capture = IS_ENABLED(CONFIG_DMA_ENGINE) ? stm32_pwm_capture : NULL,
 };
 
@@ -578,32 +605,23 @@ static void stm32_pwm_detect_complementary(struct stm32_pwm *priv)
        priv->have_complementary_output = (ccer != 0);
 }
 
-static int stm32_pwm_detect_channels(struct stm32_pwm *priv)
+static unsigned int stm32_pwm_detect_channels(struct stm32_pwm *priv,
+                                             unsigned int *num_enabled)
 {
-       u32 ccer;
-       int npwm = 0;
+       u32 ccer, ccer_backup;
 
        /*
         * If channels enable bits don't exist writing 1 will have no
         * effect so we can detect and count them.
         */
+       regmap_read(priv->regmap, TIM_CCER, &ccer_backup);
        regmap_set_bits(priv->regmap, TIM_CCER, TIM_CCER_CCXE);
        regmap_read(priv->regmap, TIM_CCER, &ccer);
-       regmap_clear_bits(priv->regmap, TIM_CCER, TIM_CCER_CCXE);
-
-       if (ccer & TIM_CCER_CC1E)
-               npwm++;
-
-       if (ccer & TIM_CCER_CC2E)
-               npwm++;
+       regmap_write(priv->regmap, TIM_CCER, ccer_backup);
 
-       if (ccer & TIM_CCER_CC3E)
-               npwm++;
+       *num_enabled = hweight32(ccer_backup & TIM_CCER_CCXE);
 
-       if (ccer & TIM_CCER_CC4E)
-               npwm++;
-
-       return npwm;
+       return hweight32(ccer & TIM_CCER_CCXE);
 }
 
 static int stm32_pwm_probe(struct platform_device *pdev)
@@ -612,6 +630,8 @@ static int stm32_pwm_probe(struct platform_device *pdev)
        struct device_node *np = dev->of_node;
        struct stm32_timers *ddata = dev_get_drvdata(pdev->dev.parent);
        struct stm32_pwm *priv;
+       unsigned int num_enabled;
+       unsigned int i;
        int ret;
 
        priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -634,7 +654,11 @@ static int stm32_pwm_probe(struct platform_device *pdev)
 
        priv->chip.dev = dev;
        priv->chip.ops = &stm32pwm_ops;
-       priv->chip.npwm = stm32_pwm_detect_channels(priv);
+       priv->chip.npwm = stm32_pwm_detect_channels(priv, &num_enabled);
+
+       /* Initialize clock refcount to number of enabled PWM channels. */
+       for (i = 0; i < num_enabled; i++)
+               clk_enable(priv->clk);
 
        ret = devm_pwmchip_add(dev, &priv->chip);
        if (ret < 0)
@@ -645,7 +669,7 @@ static int stm32_pwm_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int __maybe_unused stm32_pwm_suspend(struct device *dev)
+static int stm32_pwm_suspend(struct device *dev)
 {
        struct stm32_pwm *priv = dev_get_drvdata(dev);
        unsigned int i;
@@ -666,7 +690,7 @@ static int __maybe_unused stm32_pwm_suspend(struct device *dev)
        return pinctrl_pm_select_sleep_state(dev);
 }
 
-static int __maybe_unused stm32_pwm_resume(struct device *dev)
+static int stm32_pwm_resume(struct device *dev)
 {
        struct stm32_pwm *priv = dev_get_drvdata(dev);
        int ret;
@@ -679,7 +703,7 @@ static int __maybe_unused stm32_pwm_resume(struct device *dev)
        return stm32_pwm_apply_breakinputs(priv);
 }
 
-static SIMPLE_DEV_PM_OPS(stm32_pwm_pm_ops, stm32_pwm_suspend, stm32_pwm_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(stm32_pwm_pm_ops, stm32_pwm_suspend, stm32_pwm_resume);
 
 static const struct of_device_id stm32_pwm_of_match[] = {
        { .compatible = "st,stm32-pwm", },
@@ -692,7 +716,7 @@ static struct platform_driver stm32_pwm_driver = {
        .driver = {
                .name = "stm32-pwm",
                .of_match_table = stm32_pwm_of_match,
-               .pm = &stm32_pwm_pm_ops,
+               .pm = pm_ptr(&stm32_pwm_pm_ops),
        },
 };
 module_platform_driver(stm32_pwm_driver);
index a46f5b4dd81622ce3651f6c35c5fc4ff0b91b83a..19c0c0f39675d3315512f45cc425229e1da120b7 100644 (file)
@@ -44,7 +44,7 @@ static int stmpe_24xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
 
        ret = stmpe_reg_read(stmpe_pwm->stmpe, STMPE24XX_PWMCS);
        if (ret < 0) {
-               dev_err(chip->dev, "error reading PWM#%u control\n",
+               dev_dbg(chip->dev, "error reading PWM#%u control\n",
                        pwm->hwpwm);
                return ret;
        }
@@ -53,7 +53,7 @@ static int stmpe_24xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
 
        ret = stmpe_reg_write(stmpe_pwm->stmpe, STMPE24XX_PWMCS, value);
        if (ret) {
-               dev_err(chip->dev, "error writing PWM#%u control\n",
+               dev_dbg(chip->dev, "error writing PWM#%u control\n",
                        pwm->hwpwm);
                return ret;
        }
@@ -70,7 +70,7 @@ static int stmpe_24xx_pwm_disable(struct pwm_chip *chip,
 
        ret = stmpe_reg_read(stmpe_pwm->stmpe, STMPE24XX_PWMCS);
        if (ret < 0) {
-               dev_err(chip->dev, "error reading PWM#%u control\n",
+               dev_dbg(chip->dev, "error reading PWM#%u control\n",
                        pwm->hwpwm);
                return ret;
        }
@@ -79,7 +79,7 @@ static int stmpe_24xx_pwm_disable(struct pwm_chip *chip,
 
        ret = stmpe_reg_write(stmpe_pwm->stmpe, STMPE24XX_PWMCS, value);
        if (ret)
-               dev_err(chip->dev, "error writing PWM#%u control\n",
+               dev_dbg(chip->dev, "error writing PWM#%u control\n",
                        pwm->hwpwm);
        return ret;
 }
@@ -233,7 +233,7 @@ static int stmpe_24xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
 
                ret = stmpe_reg_write(stmpe_pwm->stmpe, offset, value);
                if (ret) {
-                       dev_err(chip->dev, "error writing register %02x: %d\n",
+                       dev_dbg(chip->dev, "error writing register %02x: %d\n",
                                offset, ret);
                        return ret;
                }
@@ -242,7 +242,7 @@ static int stmpe_24xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
 
                ret = stmpe_reg_write(stmpe_pwm->stmpe, offset, value);
                if (ret) {
-                       dev_err(chip->dev, "error writing register %02x: %d\n",
+                       dev_dbg(chip->dev, "error writing register %02x: %d\n",
                                offset, ret);
                        return ret;
                }
@@ -275,7 +275,7 @@ static int stmpe_24xx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                return 0;
        }
 
-       err = stmpe_24xx_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period);
+       err = stmpe_24xx_pwm_config(chip, pwm, state->duty_cycle, state->period);
        if (err)
                return err;
 
index 39ea51e08c946d0ec24a7cc311fc930b3ca1b823..82ee2f0754f965cfd36b0196794985204c412f58 100644 (file)
@@ -256,7 +256,7 @@ static int tegra_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                return 0;
        }
 
-       err = tegra_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period);
+       err = tegra_pwm_config(chip, pwm, state->duty_cycle, state->period);
        if (err)
                return err;
 
index 11e3549cf103445cfd4f6f6de5eca55769ecd1e5..d974f4414ac9ae9d2d29e1283e9b7e718922de4a 100644 (file)
@@ -269,7 +269,6 @@ static void ecap_pwm_remove(struct platform_device *pdev)
        pm_runtime_disable(&pdev->dev);
 }
 
-#ifdef CONFIG_PM_SLEEP
 static void ecap_pwm_save_context(struct ecap_pwm_chip *pc)
 {
        pm_runtime_get_sync(pc->chip.dev);
@@ -312,15 +311,14 @@ static int ecap_pwm_resume(struct device *dev)
        ecap_pwm_restore_context(pc);
        return 0;
 }
-#endif
 
-static SIMPLE_DEV_PM_OPS(ecap_pwm_pm_ops, ecap_pwm_suspend, ecap_pwm_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(ecap_pwm_pm_ops, ecap_pwm_suspend, ecap_pwm_resume);
 
 static struct platform_driver ecap_pwm_driver = {
        .driver = {
                .name = "ecap",
                .of_match_table = ecap_of_match,
-               .pm = &ecap_pwm_pm_ops,
+               .pm = pm_ptr(&ecap_pwm_pm_ops),
        },
        .probe = ecap_pwm_probe,
        .remove_new = ecap_pwm_remove,
index 66ac2655845f783a20bf5efd7f974f061ee5ee25..af231fa74fa9011e50f8d425e9e1cc1b1e433c13 100644 (file)
@@ -521,7 +521,6 @@ static void ehrpwm_pwm_remove(struct platform_device *pdev)
        pm_runtime_disable(&pdev->dev);
 }
 
-#ifdef CONFIG_PM_SLEEP
 static void ehrpwm_pwm_save_context(struct ehrpwm_pwm_chip *pc)
 {
        pm_runtime_get_sync(pc->chip.dev);
@@ -589,16 +588,15 @@ static int ehrpwm_pwm_resume(struct device *dev)
 
        return 0;
 }
-#endif
 
-static SIMPLE_DEV_PM_OPS(ehrpwm_pwm_pm_ops, ehrpwm_pwm_suspend,
-                        ehrpwm_pwm_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(ehrpwm_pwm_pm_ops, ehrpwm_pwm_suspend,
+                               ehrpwm_pwm_resume);
 
 static struct platform_driver ehrpwm_pwm_driver = {
        .driver = {
                .name = "ehrpwm",
                .of_match_table = ehrpwm_of_match,
-               .pm = &ehrpwm_pwm_pm_ops,
+               .pm = pm_ptr(&ehrpwm_pwm_pm_ops),
        },
        .probe = ehrpwm_pwm_probe,
        .remove_new = ehrpwm_pwm_remove,
index 625233f4703a972462c2d096bd1b7ca79c1fb5db..c670ccb81653609d3c665a4729b377abaf69f1b3 100644 (file)
@@ -172,10 +172,10 @@ static int twl4030_pwmled_apply(struct pwm_chip *chip, struct pwm_device *pwm,
         * We cannot skip calling ->config even if state->period ==
         * pwm->state.period && state->duty_cycle == pwm->state.duty_cycle
         * because we might have exited early in the last call to
-        * pwm_apply_state because of !state->enabled and so the two values in
+        * pwm_apply_might_sleep because of !state->enabled and so the two values in
         * pwm->state might not be configured in hardware.
         */
-       ret = twl4030_pwmled_config(pwm->chip, pwm,
+       ret = twl4030_pwmled_config(chip, pwm,
                                    state->duty_cycle, state->period);
        if (ret)
                return ret;
@@ -275,7 +275,7 @@ static int twl6030_pwmled_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                return 0;
        }
 
-       err = twl6030_pwmled_config(pwm->chip, pwm,
+       err = twl6030_pwmled_config(chip, pwm,
                                    state->duty_cycle, state->period);
        if (err)
                return err;
index 603d31f27470f6b80b6cc347b2fcc44d0d855bb6..68e02c9a6bf9a1e519b91e537ebf44495813c793 100644 (file)
@@ -294,7 +294,7 @@ static int twl4030_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                return 0;
        }
 
-       err = twl_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period);
+       err = twl_pwm_config(chip, pwm, state->duty_cycle, state->period);
        if (err)
                return err;
 
@@ -319,7 +319,7 @@ static int twl6030_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                return 0;
        }
 
-       err = twl_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period);
+       err = twl_pwm_config(chip, pwm, state->duty_cycle, state->period);
        if (err)
                return err;
 
index 5568d5312d3caf4d148692fd4a383053c1576747..7bfeacee05d0faed908689ec7e321c45f6a36dd6 100644 (file)
@@ -206,10 +206,10 @@ static int vt8500_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
         * We cannot skip calling ->config even if state->period ==
         * pwm->state.period && state->duty_cycle == pwm->state.duty_cycle
         * because we might have exited early in the last call to
-        * pwm_apply_state because of !state->enabled and so the two values in
+        * pwm_apply_might_sleep because of !state->enabled and so the two values in
         * pwm->state might not be configured in hardware.
         */
-       err = vt8500_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period);
+       err = vt8500_pwm_config(chip, pwm, state->duty_cycle, state->period);
        if (err)
                return err;
 
index 8d1254761e4dd2ab465008875573c71a7ecfa3e7..1698609d91c8ac8b63a0df5ed2863cf35abf9cbc 100644 (file)
@@ -62,7 +62,7 @@ static ssize_t period_store(struct device *child,
        mutex_lock(&export->lock);
        pwm_get_state(pwm, &state);
        state.period = val;
-       ret = pwm_apply_state(pwm, &state);
+       ret = pwm_apply_might_sleep(pwm, &state);
        mutex_unlock(&export->lock);
 
        return ret ? : size;
@@ -97,7 +97,7 @@ static ssize_t duty_cycle_store(struct device *child,
        mutex_lock(&export->lock);
        pwm_get_state(pwm, &state);
        state.duty_cycle = val;
-       ret = pwm_apply_state(pwm, &state);
+       ret = pwm_apply_might_sleep(pwm, &state);
        mutex_unlock(&export->lock);
 
        return ret ? : size;
@@ -144,7 +144,7 @@ static ssize_t enable_store(struct device *child,
                goto unlock;
        }
 
-       ret = pwm_apply_state(pwm, &state);
+       ret = pwm_apply_might_sleep(pwm, &state);
 
 unlock:
        mutex_unlock(&export->lock);
@@ -194,7 +194,7 @@ static ssize_t polarity_store(struct device *child,
        mutex_lock(&export->lock);
        pwm_get_state(pwm, &state);
        state.polarity = polarity;
-       ret = pwm_apply_state(pwm, &state);
+       ret = pwm_apply_might_sleep(pwm, &state);
        mutex_unlock(&export->lock);
 
        return ret ? : size;
@@ -401,7 +401,7 @@ static int pwm_class_apply_state(struct pwm_export *export,
                                 struct pwm_device *pwm,
                                 struct pwm_state *state)
 {
-       int ret = pwm_apply_state(pwm, state);
+       int ret = pwm_apply_might_sleep(pwm, state);
 
        /* release lock taken in pwm_class_get_state */
        mutex_unlock(&export->lock);
@@ -510,7 +510,7 @@ void pwmchip_sysfs_export(struct pwm_chip *chip)
         * the kernel it's just not exported.
         */
        parent = device_create(&pwm_class, chip->dev, MKDEV(0, 0), chip,
-                              "pwmchip%d", chip->base);
+                              "pwmchip%d", chip->id);
        if (IS_ERR(parent)) {
                dev_warn(chip->dev,
                         "device_create failed for pwm_chip sysfs export\n");
index bc88a40a88d4cac0bbe61efc4725703b1136654e..830a1c4cd705784687fb424f3bfd23fdc7743fcf 100644 (file)
@@ -392,7 +392,7 @@ static int max597x_regmap_read_clear(struct regmap *map, unsigned int reg,
                return ret;
 
        if (*val)
-               return regmap_write(map, reg, *val);
+               return regmap_write(map, reg, 0);
 
        return 0;
 }
index 2aff6db748e2c96b84ae9aa02265e3bec32eeb30..60cfcd741c2af31ce7e351cdf8cfb35f996264cf 100644 (file)
@@ -90,7 +90,7 @@ static int pwm_regulator_set_voltage_sel(struct regulator_dev *rdev,
        pwm_set_relative_duty_cycle(&pstate,
                        drvdata->duty_cycle_table[selector].dutycycle, 100);
 
-       ret = pwm_apply_state(drvdata->pwm, &pstate);
+       ret = pwm_apply_might_sleep(drvdata->pwm, &pstate);
        if (ret) {
                dev_err(&rdev->dev, "Failed to configure PWM: %d\n", ret);
                return ret;
@@ -157,7 +157,17 @@ static int pwm_regulator_get_voltage(struct regulator_dev *rdev)
 
        pwm_get_state(drvdata->pwm, &pstate);
 
+       if (!pstate.enabled) {
+               if (pstate.polarity == PWM_POLARITY_INVERSED)
+                       pstate.duty_cycle = pstate.period;
+               else
+                       pstate.duty_cycle = 0;
+       }
+
        voltage = pwm_get_relative_duty_cycle(&pstate, duty_unit);
+       if (voltage < min(max_uV_duty, min_uV_duty) ||
+           voltage > max(max_uV_duty, min_uV_duty))
+               return -ENOTRECOVERABLE;
 
        /*
         * The dutycycle for min_uV might be greater than the one for max_uV.
@@ -216,7 +226,7 @@ static int pwm_regulator_set_voltage(struct regulator_dev *rdev,
 
        pwm_set_relative_duty_cycle(&pstate, dutycycle, duty_unit);
 
-       ret = pwm_apply_state(drvdata->pwm, &pstate);
+       ret = pwm_apply_might_sleep(drvdata->pwm, &pstate);
        if (ret) {
                dev_err(&rdev->dev, "Failed to configure PWM: %d\n", ret);
                return ret;
@@ -313,6 +323,32 @@ static int pwm_regulator_init_continuous(struct platform_device *pdev,
        return 0;
 }
 
+static int pwm_regulator_init_boot_on(struct platform_device *pdev,
+                                     struct pwm_regulator_data *drvdata,
+                                     const struct regulator_init_data *init_data)
+{
+       struct pwm_state pstate;
+
+       if (!init_data->constraints.boot_on || drvdata->enb_gpio)
+               return 0;
+
+       pwm_get_state(drvdata->pwm, &pstate);
+       if (pstate.enabled)
+               return 0;
+
+       /*
+        * Update the duty cycle so the output does not change
+        * when the regulator core enables the regulator (and
+        * thus the PWM channel).
+        */
+       if (pstate.polarity == PWM_POLARITY_INVERSED)
+               pstate.duty_cycle = pstate.period;
+       else
+               pstate.duty_cycle = 0;
+
+       return pwm_apply_might_sleep(drvdata->pwm, &pstate);
+}
+
 static int pwm_regulator_probe(struct platform_device *pdev)
 {
        const struct regulator_init_data *init_data;
@@ -372,6 +408,13 @@ static int pwm_regulator_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
+       ret = pwm_regulator_init_boot_on(pdev, drvdata, init_data);
+       if (ret) {
+               dev_err(&pdev->dev, "Failed to apply boot_on settings: %d\n",
+                       ret);
+               return ret;
+       }
+
        regulator = devm_regulator_register(&pdev->dev,
                                            &drvdata->desc, &config);
        if (IS_ERR(regulator)) {
index f48214e2c3b46000eb2e833b422be4474a08f920..04133510e5af7dee68f7d4cb8f10f7af02ff44ab 100644 (file)
@@ -726,9 +726,25 @@ static int ti_abb_probe(struct platform_device *pdev)
                        return PTR_ERR(abb->setup_reg);
        }
 
-       abb->int_base = devm_platform_ioremap_resource_byname(pdev, "int-address");
-       if (IS_ERR(abb->int_base))
-               return PTR_ERR(abb->int_base);
+       pname = "int-address";
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, pname);
+       if (!res) {
+               dev_err(dev, "Missing '%s' IO resource\n", pname);
+               return -ENODEV;
+       }
+       /*
+        * The MPU interrupt status register (PRM_IRQSTATUS_MPU) is
+        * shared between regulator-abb-{ivahd,dspeve,gpu} driver
+        * instances. Therefore use devm_ioremap() rather than
+        * devm_platform_ioremap_resource_byname() to avoid busy
+        * resource region conflicts.
+        */
+       abb->int_base = devm_ioremap(dev, res->start,
+                                            resource_size(res));
+       if (!abb->int_base) {
+               dev_err(dev, "Unable to map '%s'\n", pname);
+               return -ENOMEM;
+       }
 
        /* Map Optional resources */
        pname = "efuse-address";
index 8fcda9b7454597dec300ae6addb6bcbc656efe7c..a1c62d15f16c6ba00a9edd6edbe2fac490b61b16 100644 (file)
@@ -940,6 +940,7 @@ static const struct rproc_ops imx_dsp_rproc_ops = {
        .kick           = imx_dsp_rproc_kick,
        .load           = imx_dsp_rproc_elf_load_segments,
        .parse_fw       = imx_dsp_rproc_parse_fw,
+       .find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
        .sanity_check   = rproc_elf_sanity_check,
        .get_boot_addr  = rproc_elf_get_boot_addr,
 };
index 913a5d2068e8cef52db9cb381405ce829e4362cc..a9dd58608052c285aa87010b1ead045162970d63 100644 (file)
@@ -1165,6 +1165,22 @@ static const struct adsp_data sm8550_mpss_resource = {
        .region_assign_idx = 2,
 };
 
+static const struct adsp_data sc7280_wpss_resource = {
+       .crash_reason_smem = 626,
+       .firmware_name = "wpss.mdt",
+       .pas_id = 6,
+       .auto_boot = true,
+       .proxy_pd_names = (char*[]){
+               "cx",
+               "mx",
+               NULL
+       },
+       .load_state = "wpss",
+       .ssr_name = "wpss",
+       .sysmon_name = "wpss",
+       .ssctl_id = 0x19,
+};
+
 static const struct of_device_id adsp_of_match[] = {
        { .compatible = "qcom,msm8226-adsp-pil", .data = &adsp_resource_init},
        { .compatible = "qcom,msm8953-adsp-pil", .data = &msm8996_adsp_resource},
@@ -1178,7 +1194,10 @@ static const struct of_device_id adsp_of_match[] = {
        { .compatible = "qcom,qcs404-wcss-pas", .data = &wcss_resource_init },
        { .compatible = "qcom,sc7180-adsp-pas", .data = &sm8250_adsp_resource},
        { .compatible = "qcom,sc7180-mpss-pas", .data = &mpss_resource_init},
+       { .compatible = "qcom,sc7280-adsp-pas", .data = &sm8350_adsp_resource},
+       { .compatible = "qcom,sc7280-cdsp-pas", .data = &sm6350_cdsp_resource},
        { .compatible = "qcom,sc7280-mpss-pas", .data = &mpss_resource_init},
+       { .compatible = "qcom,sc7280-wpss-pas", .data = &sc7280_wpss_resource},
        { .compatible = "qcom,sc8180x-adsp-pas", .data = &sm8150_adsp_resource},
        { .compatible = "qcom,sc8180x-cdsp-pas", .data = &sm8150_cdsp_resource},
        { .compatible = "qcom,sc8180x-mpss-pas", .data = &sc8180x_mpss_resource},
index ef8415a7cd542d065c13bc704ddafe7d03b2bd71..ab882e3b7130bcb50df0840207b112280e3f80b9 100644 (file)
@@ -158,8 +158,8 @@ static void k3_dsp_rproc_kick(struct rproc *rproc, int vqid)
        /* send the index of the triggered virtqueue in the mailbox payload */
        ret = mbox_send_message(kproc->mbox, (void *)msg);
        if (ret < 0)
-               dev_err(dev, "failed to send mailbox message, status = %d\n",
-                       ret);
+               dev_err(dev, "failed to send mailbox message (%pe)\n",
+                       ERR_PTR(ret));
 }
 
 /* Put the DSP processor into reset */
@@ -170,7 +170,7 @@ static int k3_dsp_rproc_reset(struct k3_dsp_rproc *kproc)
 
        ret = reset_control_assert(kproc->reset);
        if (ret) {
-               dev_err(dev, "local-reset assert failed, ret = %d\n", ret);
+               dev_err(dev, "local-reset assert failed (%pe)\n", ERR_PTR(ret));
                return ret;
        }
 
@@ -180,7 +180,7 @@ static int k3_dsp_rproc_reset(struct k3_dsp_rproc *kproc)
        ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
                                                    kproc->ti_sci_id);
        if (ret) {
-               dev_err(dev, "module-reset assert failed, ret = %d\n", ret);
+               dev_err(dev, "module-reset assert failed (%pe)\n", ERR_PTR(ret));
                if (reset_control_deassert(kproc->reset))
                        dev_warn(dev, "local-reset deassert back failed\n");
        }
@@ -200,14 +200,14 @@ static int k3_dsp_rproc_release(struct k3_dsp_rproc *kproc)
        ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
                                                    kproc->ti_sci_id);
        if (ret) {
-               dev_err(dev, "module-reset deassert failed, ret = %d\n", ret);
+               dev_err(dev, "module-reset deassert failed (%pe)\n", ERR_PTR(ret));
                return ret;
        }
 
 lreset:
        ret = reset_control_deassert(kproc->reset);
        if (ret) {
-               dev_err(dev, "local-reset deassert failed, ret = %d\n", ret);
+               dev_err(dev, "local-reset deassert failed, (%pe)\n", ERR_PTR(ret));
                if (kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
                                                          kproc->ti_sci_id))
                        dev_warn(dev, "module-reset assert back failed\n");
@@ -246,7 +246,7 @@ static int k3_dsp_rproc_request_mbox(struct rproc *rproc)
         */
        ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
        if (ret < 0) {
-               dev_err(dev, "mbox_send_message failed: %d\n", ret);
+               dev_err(dev, "mbox_send_message failed (%pe)\n", ERR_PTR(ret));
                mbox_free_channel(kproc->mbox);
                return ret;
        }
@@ -272,8 +272,8 @@ static int k3_dsp_rproc_prepare(struct rproc *rproc)
        ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
                                                    kproc->ti_sci_id);
        if (ret)
-               dev_err(dev, "module-reset deassert failed, cannot enable internal RAM loading, ret = %d\n",
-                       ret);
+               dev_err(dev, "module-reset deassert failed, cannot enable internal RAM loading (%pe)\n",
+                       ERR_PTR(ret));
 
        return ret;
 }
@@ -296,7 +296,7 @@ static int k3_dsp_rproc_unprepare(struct rproc *rproc)
        ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
                                                    kproc->ti_sci_id);
        if (ret)
-               dev_err(dev, "module-reset assert failed, ret = %d\n", ret);
+               dev_err(dev, "module-reset assert failed (%pe)\n", ERR_PTR(ret));
 
        return ret;
 }
@@ -561,9 +561,9 @@ static int k3_dsp_reserved_mem_init(struct k3_dsp_rproc *kproc)
 
        num_rmems = of_property_count_elems_of_size(np, "memory-region",
                                                    sizeof(phandle));
-       if (num_rmems <= 0) {
-               dev_err(dev, "device does not reserved memory regions, ret = %d\n",
-                       num_rmems);
+       if (num_rmems < 0) {
+               dev_err(dev, "device does not reserved memory regions (%pe)\n",
+                       ERR_PTR(num_rmems));
                return -EINVAL;
        }
        if (num_rmems < 2) {
@@ -575,8 +575,8 @@ static int k3_dsp_reserved_mem_init(struct k3_dsp_rproc *kproc)
        /* use reserved memory region 0 for vring DMA allocations */
        ret = of_reserved_mem_device_init_by_idx(dev, np, 0);
        if (ret) {
-               dev_err(dev, "device cannot initialize DMA pool, ret = %d\n",
-                       ret);
+               dev_err(dev, "device cannot initialize DMA pool (%pe)\n",
+                       ERR_PTR(ret));
                return ret;
        }
 
@@ -687,11 +687,8 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
                return -ENODEV;
 
        ret = rproc_of_parse_firmware(dev, 0, &fw_name);
-       if (ret) {
-               dev_err(dev, "failed to parse firmware-name property, ret = %d\n",
-                       ret);
-               return ret;
-       }
+       if (ret)
+               return dev_err_probe(dev, ret, "failed to parse firmware-name property\n");
 
        rproc = rproc_alloc(dev, dev_name(dev), &k3_dsp_rproc_ops, fw_name,
                            sizeof(*kproc));
@@ -711,39 +708,35 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
 
        kproc->ti_sci = ti_sci_get_by_phandle(np, "ti,sci");
        if (IS_ERR(kproc->ti_sci)) {
-               ret = PTR_ERR(kproc->ti_sci);
-               if (ret != -EPROBE_DEFER) {
-                       dev_err(dev, "failed to get ti-sci handle, ret = %d\n",
-                               ret);
-               }
+               ret = dev_err_probe(dev, PTR_ERR(kproc->ti_sci),
+                                   "failed to get ti-sci handle\n");
                kproc->ti_sci = NULL;
                goto free_rproc;
        }
 
        ret = of_property_read_u32(np, "ti,sci-dev-id", &kproc->ti_sci_id);
        if (ret) {
-               dev_err(dev, "missing 'ti,sci-dev-id' property\n");
+               dev_err_probe(dev, ret, "missing 'ti,sci-dev-id' property\n");
                goto put_sci;
        }
 
        kproc->reset = devm_reset_control_get_exclusive(dev, NULL);
        if (IS_ERR(kproc->reset)) {
-               ret = PTR_ERR(kproc->reset);
-               dev_err(dev, "failed to get reset, status = %d\n", ret);
+               ret = dev_err_probe(dev, PTR_ERR(kproc->reset),
+                                   "failed to get reset\n");
                goto put_sci;
        }
 
        kproc->tsp = k3_dsp_rproc_of_get_tsp(dev, kproc->ti_sci);
        if (IS_ERR(kproc->tsp)) {
-               dev_err(dev, "failed to construct ti-sci proc control, ret = %d\n",
-                       ret);
-               ret = PTR_ERR(kproc->tsp);
+               ret = dev_err_probe(dev, PTR_ERR(kproc->tsp),
+                                   "failed to construct ti-sci proc control\n");
                goto put_sci;
        }
 
        ret = ti_sci_proc_request(kproc->tsp);
        if (ret < 0) {
-               dev_err(dev, "ti_sci_proc_request failed, ret = %d\n", ret);
+               dev_err_probe(dev, ret, "ti_sci_proc_request failed\n");
                goto free_tsp;
        }
 
@@ -753,15 +746,14 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
 
        ret = k3_dsp_reserved_mem_init(kproc);
        if (ret) {
-               dev_err(dev, "reserved memory init failed, ret = %d\n", ret);
+               dev_err_probe(dev, ret, "reserved memory init failed\n");
                goto release_tsp;
        }
 
        ret = kproc->ti_sci->ops.dev_ops.is_on(kproc->ti_sci, kproc->ti_sci_id,
                                               NULL, &p_state);
        if (ret) {
-               dev_err(dev, "failed to get initial state, mode cannot be determined, ret = %d\n",
-                       ret);
+               dev_err_probe(dev, ret, "failed to get initial state, mode cannot be determined\n");
                goto release_mem;
        }
 
@@ -787,8 +779,7 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
                if (data->uses_lreset) {
                        ret = reset_control_status(kproc->reset);
                        if (ret < 0) {
-                               dev_err(dev, "failed to get reset status, status = %d\n",
-                                       ret);
+                               dev_err_probe(dev, ret, "failed to get reset status\n");
                                goto release_mem;
                        } else if (ret == 0) {
                                dev_warn(dev, "local reset is deasserted for device\n");
@@ -799,8 +790,7 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
 
        ret = rproc_add(rproc);
        if (ret) {
-               dev_err(dev, "failed to add register device with remoteproc core, status = %d\n",
-                       ret);
+               dev_err_probe(dev, ret, "failed to add register device with remoteproc core\n");
                goto release_mem;
        }
 
@@ -813,19 +803,19 @@ release_mem:
 release_tsp:
        ret1 = ti_sci_proc_release(kproc->tsp);
        if (ret1)
-               dev_err(dev, "failed to release proc, ret = %d\n", ret1);
+               dev_err(dev, "failed to release proc (%pe)\n", ERR_PTR(ret1));
 free_tsp:
        kfree(kproc->tsp);
 put_sci:
        ret1 = ti_sci_put_handle(kproc->ti_sci);
        if (ret1)
-               dev_err(dev, "failed to put ti_sci handle, ret = %d\n", ret1);
+               dev_err(dev, "failed to put ti_sci handle (%pe)\n", ERR_PTR(ret1));
 free_rproc:
        rproc_free(rproc);
        return ret;
 }
 
-static int k3_dsp_rproc_remove(struct platform_device *pdev)
+static void k3_dsp_rproc_remove(struct platform_device *pdev)
 {
        struct k3_dsp_rproc *kproc = platform_get_drvdata(pdev);
        struct rproc *rproc = kproc->rproc;
@@ -835,8 +825,9 @@ static int k3_dsp_rproc_remove(struct platform_device *pdev)
        if (rproc->state == RPROC_ATTACHED) {
                ret = rproc_detach(rproc);
                if (ret) {
-                       dev_err(dev, "failed to detach proc, ret = %d\n", ret);
-                       return ret;
+                       /* Note this error path leaks resources */
+                       dev_err(dev, "failed to detach proc (%pe)\n", ERR_PTR(ret));
+                       return;
                }
        }
 
@@ -844,18 +835,16 @@ static int k3_dsp_rproc_remove(struct platform_device *pdev)
 
        ret = ti_sci_proc_release(kproc->tsp);
        if (ret)
-               dev_err(dev, "failed to release proc, ret = %d\n", ret);
+               dev_err(dev, "failed to release proc (%pe)\n", ERR_PTR(ret));
 
        kfree(kproc->tsp);
 
        ret = ti_sci_put_handle(kproc->ti_sci);
        if (ret)
-               dev_err(dev, "failed to put ti_sci handle, ret = %d\n", ret);
+               dev_err(dev, "failed to put ti_sci handle (%pe)\n", ERR_PTR(ret));
 
        k3_dsp_reserved_mem_exit(kproc);
        rproc_free(kproc->rproc);
-
-       return 0;
 }
 
 static const struct k3_dsp_mem_data c66_mems[] = {
@@ -906,7 +895,7 @@ MODULE_DEVICE_TABLE(of, k3_dsp_of_match);
 
 static struct platform_driver k3_dsp_rproc_driver = {
        .probe  = k3_dsp_rproc_probe,
-       .remove = k3_dsp_rproc_remove,
+       .remove_new = k3_dsp_rproc_remove,
        .driver = {
                .name = "k3-dsp-rproc",
                .of_match_table = k3_dsp_of_match,
index dc87965f8164167008a686f45f6084cce2c3ab53..1062939c32645f11c83b508fdc35a9b18a335113 100644 (file)
@@ -378,6 +378,7 @@ static void virtio_rpmsg_release_device(struct device *dev)
        struct rpmsg_device *rpdev = to_rpmsg_device(dev);
        struct virtio_rpmsg_channel *vch = to_virtio_rpmsg_channel(rpdev);
 
+       kfree(rpdev->driver_override);
        kfree(vch);
 }
 
index 3814e0845e7729d9d584680e16abb4ec21f0dda7..e37a4341f442d8ca2fcd80e82bd2adf6c2ca9ea5 100644 (file)
@@ -373,6 +373,19 @@ config RTC_DRV_MAX8997
          This driver can also be built as a module. If so, the module
          will be called rtc-max8997.
 
+config RTC_DRV_MAX31335
+       tristate "Analog Devices MAX31335"
+       depends on I2C
+       depends on COMMON_CLK
+       depends on HWMON || HWMON=n
+       select REGMAP_I2C
+       help
+         If you say yes here you get support for the Analog Devices
+         MAX31335.
+
+         This driver can also be built as a module. If so, the module
+         will be called rtc-max31335.
+
 config RTC_DRV_MAX77686
        tristate "Maxim MAX77686"
        depends on MFD_MAX77686 || MFD_MAX77620 || MFD_MAX77714 || COMPILE_TEST
@@ -578,6 +591,18 @@ config RTC_DRV_TPS6586X
          along with alarm. This driver supports the RTC driver for
          the TPS6586X RTC module.
 
+config RTC_DRV_TPS6594
+       tristate "TI TPS6594 RTC driver"
+       depends on MFD_TPS6594
+       default MFD_TPS6594
+       help
+         TI Power Management IC TPS6594 supports RTC functionality
+         along with alarm. This driver supports the RTC driver for
+         the TPS6594 RTC module.
+
+         This driver can also be built as a module. If so, the module
+         will be called rtc-tps6594.
+
 config RTC_DRV_TPS65910
        tristate "TI TPS65910 RTC driver"
        depends on MFD_TPS65910
@@ -1705,6 +1730,7 @@ config RTC_DRV_LPC24XX
        tristate "NXP RTC for LPC178x/18xx/408x/43xx"
        depends on ARCH_LPC18XX || COMPILE_TEST
        depends on OF && HAS_IOMEM
+       depends on COMMON_CLK
        help
          This enables support for the NXP RTC found which can be found on
          NXP LPC178x/18xx/408x/43xx devices.
@@ -1930,6 +1956,17 @@ config RTC_DRV_TI_K3
          This driver can also be built as a module, if so, the module
          will be called "rtc-ti-k3".
 
+config RTC_DRV_MA35D1
+       tristate "Nuvoton MA35D1 RTC"
+       depends on ARCH_MA35 || COMPILE_TEST
+       select REGMAP_MMIO
+       help
+          If you say yes here you get support for the Nuvoton MA35D1
+          On-Chip Real Time Clock.
+
+          This driver can also be built as a module, if so, the module
+          will be called "rtc-ma35d1".
+
 comment "HID Sensor RTC drivers"
 
 config RTC_DRV_HID_SENSOR_TIME
index 7b03c3abfd786eb03356566fc839aff542da7442..6efff381c484d5d386180de033c07569621c667f 100644 (file)
@@ -88,6 +88,8 @@ obj-$(CONFIG_RTC_DRV_M41T94)  += rtc-m41t94.o
 obj-$(CONFIG_RTC_DRV_M48T35)   += rtc-m48t35.o
 obj-$(CONFIG_RTC_DRV_M48T59)   += rtc-m48t59.o
 obj-$(CONFIG_RTC_DRV_M48T86)   += rtc-m48t86.o
+obj-$(CONFIG_RTC_DRV_MA35D1)   += rtc-ma35d1.o
+obj-$(CONFIG_RTC_DRV_MAX31335) += rtc-max31335.o
 obj-$(CONFIG_RTC_DRV_MAX6900)  += rtc-max6900.o
 obj-$(CONFIG_RTC_DRV_MAX6902)  += rtc-max6902.o
 obj-$(CONFIG_RTC_DRV_MAX6916)  += rtc-max6916.o
@@ -176,6 +178,7 @@ obj-$(CONFIG_RTC_DRV_TEGRA) += rtc-tegra.o
 obj-$(CONFIG_RTC_DRV_TEST)     += rtc-test.o
 obj-$(CONFIG_RTC_DRV_TI_K3)    += rtc-ti-k3.o
 obj-$(CONFIG_RTC_DRV_TPS6586X) += rtc-tps6586x.o
+obj-$(CONFIG_RTC_DRV_TPS6594)  += rtc-tps6594.o
 obj-$(CONFIG_RTC_DRV_TPS65910) += rtc-tps65910.o
 obj-$(CONFIG_RTC_DRV_TWL4030)  += rtc-twl.o
 obj-$(CONFIG_RTC_DRV_VT8500)   += rtc-vt8500.o
index edfd942f8c54942162808d034823f5fd86424b59..921ee182797439b1496239c93b5d9e8ed76a0b45 100644 (file)
@@ -256,7 +256,7 @@ static int rtc_device_get_id(struct device *dev)
                of_id = of_alias_get_id(dev->parent->of_node, "rtc");
 
        if (of_id >= 0) {
-               id = ida_simple_get(&rtc_ida, of_id, of_id + 1, GFP_KERNEL);
+               id = ida_alloc_range(&rtc_ida, of_id, of_id, GFP_KERNEL);
                if (id < 0)
                        dev_warn(dev, "/aliases ID %d not available\n", of_id);
        }
index eaf2c9ab96619c1baad80711112f30ce9a94c22d..fa642bba3cee008c9d4fe4abe2cc37aebe2eb41c 100644 (file)
@@ -99,7 +99,7 @@ struct ac100_rtc_dev {
        struct clk_hw_onecell_data *clk_data;
 };
 
-/**
+/*
  * Clock controls for 3 clock output pins
  */
 
@@ -378,7 +378,7 @@ static void ac100_rtc_unregister_clks(struct ac100_rtc_dev *chip)
        clk_unregister_fixed_rate(chip->rtc_32k_clk->clk);
 }
 
-/**
+/*
  * RTC related bits
  */
 static int ac100_rtc_get_time(struct device *dev, struct rtc_time *rtc_tm)
index 228fb2d11c7091e00d9bb26b2254db2ed923c097..7d99cd2c37a0ba87c06beb2c7dcbe0f560d26bb9 100644 (file)
@@ -231,7 +231,7 @@ static int cmos_read_time(struct device *dev, struct rtc_time *t)
        if (!pm_trace_rtc_valid())
                return -EIO;
 
-       ret = mc146818_get_time(t);
+       ret = mc146818_get_time(t, 1000);
        if (ret < 0) {
                dev_err_ratelimited(dev, "unable to read current time\n");
                return ret;
@@ -292,7 +292,7 @@ static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t)
 
        /* This not only a rtc_op, but also called directly */
        if (!is_valid_irq(cmos->irq))
-               return -EIO;
+               return -ETIMEDOUT;
 
        /* Basic alarms only support hour, minute, and seconds fields.
         * Some also support day and month, for alarms up to a year in
@@ -307,7 +307,7 @@ static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t)
         *
         * Use the mc146818_avoid_UIP() function to avoid this.
         */
-       if (!mc146818_avoid_UIP(cmos_read_alarm_callback, &p))
+       if (!mc146818_avoid_UIP(cmos_read_alarm_callback, 10, &p))
                return -EIO;
 
        if (!(p.rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
@@ -556,8 +556,8 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
         *
         * Use mc146818_avoid_UIP() to avoid this.
         */
-       if (!mc146818_avoid_UIP(cmos_set_alarm_callback, &p))
-               return -EIO;
+       if (!mc146818_avoid_UIP(cmos_set_alarm_callback, 10, &p))
+               return -ETIMEDOUT;
 
        cmos->alarm_expires = rtc_tm_to_time64(&t->time);
 
@@ -818,18 +818,24 @@ static void rtc_wake_off(struct device *dev)
 }
 
 #ifdef CONFIG_X86
-/* Enable use_acpi_alarm mode for Intel platforms no earlier than 2015 */
 static void use_acpi_alarm_quirks(void)
 {
-       if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+       switch (boot_cpu_data.x86_vendor) {
+       case X86_VENDOR_INTEL:
+               if (dmi_get_bios_year() < 2015)
+                       return;
+               break;
+       case X86_VENDOR_AMD:
+       case X86_VENDOR_HYGON:
+               if (dmi_get_bios_year() < 2021)
+                       return;
+               break;
+       default:
                return;
-
+       }
        if (!is_hpet_enabled())
                return;
 
-       if (dmi_get_bios_year() < 2015)
-               return;
-
        use_acpi_alarm = true;
 }
 #else
index 2f5d60622564a0d83ec3be91bf89710f34e2b1b7..859397541f2983fb32163b780245554e0e84084a 100644 (file)
@@ -377,7 +377,6 @@ static int da9063_rtc_probe(struct platform_device *pdev)
 {
        struct da9063_compatible_rtc *rtc;
        const struct da9063_compatible_rtc_regmap *config;
-       const struct of_device_id *match;
        int irq_alarm;
        u8 data[RTC_DATA_LEN];
        int ret;
@@ -385,14 +384,11 @@ static int da9063_rtc_probe(struct platform_device *pdev)
        if (!pdev->dev.of_node)
                return -ENXIO;
 
-       match = of_match_node(da9063_compatible_reg_id_table,
-                             pdev->dev.of_node);
-
        rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
        if (!rtc)
                return -ENOMEM;
 
-       rtc->config = match->data;
+       rtc->config = device_get_match_data(&pdev->dev);
        if (of_device_is_compatible(pdev->dev.of_node, "dlg,da9063-rtc")) {
                struct da9063 *chip = dev_get_drvdata(pdev->dev.parent);
 
@@ -411,57 +407,49 @@ static int da9063_rtc_probe(struct platform_device *pdev)
                                 config->rtc_enable_reg,
                                 config->rtc_enable_mask,
                                 config->rtc_enable_mask);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "Failed to enable RTC\n");
-               return ret;
-       }
+       if (ret < 0)
+               return dev_err_probe(&pdev->dev, ret, "Failed to enable RTC\n");
 
        ret = regmap_update_bits(rtc->regmap,
                                 config->rtc_enable_32k_crystal_reg,
                                 config->rtc_crystal_mask,
                                 config->rtc_crystal_mask);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "Failed to run 32kHz oscillator\n");
-               return ret;
-       }
+       if (ret < 0)
+               return dev_err_probe(&pdev->dev, ret,
+                                    "Failed to run 32kHz oscillator\n");
 
        ret = regmap_update_bits(rtc->regmap,
                                 config->rtc_alarm_secs_reg,
                                 config->rtc_alarm_status_mask,
                                 0);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "Failed to access RTC alarm register\n");
-               return ret;
-       }
+       if (ret < 0)
+               return dev_err_probe(&pdev->dev, ret,
+                                    "Failed to access RTC alarm register\n");
 
        ret = regmap_update_bits(rtc->regmap,
                                 config->rtc_alarm_secs_reg,
                                 DA9063_ALARM_STATUS_ALARM,
                                 DA9063_ALARM_STATUS_ALARM);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "Failed to access RTC alarm register\n");
-               return ret;
-       }
+       if (ret < 0)
+               return dev_err_probe(&pdev->dev, ret,
+                                    "Failed to access RTC alarm register\n");
 
        ret = regmap_update_bits(rtc->regmap,
                                 config->rtc_alarm_year_reg,
                                 config->rtc_tick_on_mask,
                                 0);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "Failed to disable TICKs\n");
-               return ret;
-       }
+       if (ret < 0)
+               return dev_err_probe(&pdev->dev, ret,
+                                    "Failed to disable TICKs\n");
 
        data[RTC_SEC] = 0;
        ret = regmap_bulk_read(rtc->regmap,
                               config->rtc_alarm_secs_reg,
                               &data[config->rtc_data_start],
                               config->rtc_alarm_len);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "Failed to read initial alarm data: %d\n",
-                       ret);
-               return ret;
-       }
+       if (ret < 0)
+               return dev_err_probe(&pdev->dev, ret,
+                                    "Failed to read initial alarm data\n");
 
        platform_set_drvdata(pdev, rtc);
 
@@ -485,25 +473,29 @@ static int da9063_rtc_probe(struct platform_device *pdev)
                clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->rtc_dev->features);
        }
 
-       irq_alarm = platform_get_irq_byname(pdev, "ALARM");
-       if (irq_alarm < 0)
+       irq_alarm = platform_get_irq_byname_optional(pdev, "ALARM");
+       if (irq_alarm >= 0) {
+               ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL,
+                                               da9063_alarm_event,
+                                               IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+                                               "ALARM", rtc);
+               if (ret)
+                       dev_err(&pdev->dev,
+                               "Failed to request ALARM IRQ %d: %d\n",
+                               irq_alarm, ret);
+
+               ret = dev_pm_set_wake_irq(&pdev->dev, irq_alarm);
+               if (ret)
+                       dev_warn(&pdev->dev,
+                                "Failed to set IRQ %d as a wake IRQ: %d\n",
+                                irq_alarm, ret);
+
+               device_init_wakeup(&pdev->dev, true);
+       }  else if (irq_alarm != -ENXIO) {
                return irq_alarm;
-
-       ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL,
-                                       da9063_alarm_event,
-                                       IRQF_TRIGGER_LOW | IRQF_ONESHOT,
-                                       "ALARM", rtc);
-       if (ret)
-               dev_err(&pdev->dev, "Failed to request ALARM IRQ %d: %d\n",
-                       irq_alarm, ret);
-
-       ret = dev_pm_set_wake_irq(&pdev->dev, irq_alarm);
-       if (ret)
-               dev_warn(&pdev->dev,
-                        "Failed to set IRQ %d as a wake IRQ: %d\n",
-                        irq_alarm, ret);
-
-       device_init_wakeup(&pdev->dev, true);
+       } else {
+               clear_bit(RTC_FEATURE_ALARM, rtc->rtc_dev->features);
+       }
 
        return devm_rtc_register_device(rtc->rtc_dev);
 }
index 89d7b085f7219954b1a154ef41245e76251e9336..1485a6ae51e613092c4d08fb44f3d6cc7d385816 100644 (file)
@@ -536,6 +536,8 @@ static int ds3232_probe(struct device *dev, struct regmap *regmap, int irq,
        return 0;
 }
 
+#if IS_ENABLED(CONFIG_I2C)
+
 #ifdef CONFIG_PM_SLEEP
 static int ds3232_suspend(struct device *dev)
 {
@@ -564,8 +566,6 @@ static const struct dev_pm_ops ds3232_pm_ops = {
        SET_SYSTEM_SLEEP_PM_OPS(ds3232_suspend, ds3232_resume)
 };
 
-#if IS_ENABLED(CONFIG_I2C)
-
 static int ds3232_i2c_probe(struct i2c_client *client)
 {
        struct regmap *regmap;
diff --git a/drivers/rtc/rtc-ma35d1.c b/drivers/rtc/rtc-ma35d1.c
new file mode 100644 (file)
index 0000000..cfcfc28
--- /dev/null
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RTC driver for Nuvoton MA35D1
+ *
+ * Copyright (C) 2023 Nuvoton Technology Corp.
+ */
+
+#include <linux/bcd.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+
+/* MA35D1 RTC Control Registers */
+#define MA35_REG_RTC_INIT      0x00
+#define MA35_REG_RTC_SINFASTS  0x04
+#define MA35_REG_RTC_FREQADJ   0x08
+#define MA35_REG_RTC_TIME      0x0c
+#define MA35_REG_RTC_CAL       0x10
+#define MA35_REG_RTC_CLKFMT    0x14
+#define MA35_REG_RTC_WEEKDAY   0x18
+#define MA35_REG_RTC_TALM      0x1c
+#define MA35_REG_RTC_CALM      0x20
+#define MA35_REG_RTC_LEAPYEAR  0x24
+#define MA35_REG_RTC_INTEN     0x28
+#define MA35_REG_RTC_INTSTS    0x2c
+
+/* register MA35_REG_RTC_INIT */
+#define RTC_INIT_ACTIVE                BIT(0)
+#define RTC_INIT_MAGIC_CODE    0xa5eb1357
+
+/* register MA35_REG_RTC_CLKFMT */
+#define RTC_CLKFMT_24HEN       BIT(0)
+#define RTC_CLKFMT_DCOMPEN     BIT(16)
+
+/* register MA35_REG_RTC_INTEN */
+#define RTC_INTEN_ALMIEN       BIT(0)
+#define RTC_INTEN_UIEN         BIT(1)
+#define RTC_INTEN_CLKFIEN      BIT(24)
+#define RTC_INTEN_CLKSTIEN     BIT(25)
+
+/* register MA35_REG_RTC_INTSTS */
+#define RTC_INTSTS_ALMIF       BIT(0)
+#define RTC_INTSTS_UIF         BIT(1)
+#define RTC_INTSTS_CLKFIF      BIT(24)
+#define RTC_INTSTS_CLKSTIF     BIT(25)
+
+#define RTC_INIT_TIMEOUT       250
+
+struct ma35_rtc {
+       int irq_num;
+       void __iomem *rtc_reg;
+       struct rtc_device *rtcdev;
+};
+
+static u32 rtc_reg_read(struct ma35_rtc *p, u32 offset)
+{
+       return __raw_readl(p->rtc_reg + offset);
+}
+
+static inline void rtc_reg_write(struct ma35_rtc *p, u32 offset, u32 value)
+{
+       __raw_writel(value, p->rtc_reg + offset);
+}
+
+static irqreturn_t ma35d1_rtc_interrupt(int irq, void *data)
+{
+       struct ma35_rtc *rtc = (struct ma35_rtc *)data;
+       unsigned long events = 0, rtc_irq;
+
+       rtc_irq = rtc_reg_read(rtc, MA35_REG_RTC_INTSTS);
+
+       if (rtc_irq & RTC_INTSTS_ALMIF) {
+               rtc_reg_write(rtc, MA35_REG_RTC_INTSTS, RTC_INTSTS_ALMIF);
+               events |= RTC_AF | RTC_IRQF;
+       }
+
+       rtc_update_irq(rtc->rtcdev, 1, events);
+
+       return IRQ_HANDLED;
+}
+
+static int ma35d1_rtc_init(struct ma35_rtc *rtc, u32 ms_timeout)
+{
+       const unsigned long timeout = jiffies + msecs_to_jiffies(ms_timeout);
+
+       do {
+               if (rtc_reg_read(rtc, MA35_REG_RTC_INIT) & RTC_INIT_ACTIVE)
+                       return 0;
+
+               rtc_reg_write(rtc, MA35_REG_RTC_INIT, RTC_INIT_MAGIC_CODE);
+
+               mdelay(1);
+
+       } while (time_before(jiffies, timeout));
+
+       return -ETIMEDOUT;
+}
+
+static int ma35d1_alarm_irq_enable(struct device *dev, u32 enabled)
+{
+       struct ma35_rtc *rtc = dev_get_drvdata(dev);
+       u32 reg_ien;
+
+       reg_ien = rtc_reg_read(rtc, MA35_REG_RTC_INTEN);
+
+       if (enabled)
+               rtc_reg_write(rtc, MA35_REG_RTC_INTEN, reg_ien | RTC_INTEN_ALMIEN);
+       else
+               rtc_reg_write(rtc, MA35_REG_RTC_INTEN, reg_ien & ~RTC_INTEN_ALMIEN);
+
+       return 0;
+}
+
+static int ma35d1_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+       struct ma35_rtc *rtc = dev_get_drvdata(dev);
+       u32 time, cal, wday;
+
+       do {
+               time = rtc_reg_read(rtc, MA35_REG_RTC_TIME);
+               cal  = rtc_reg_read(rtc, MA35_REG_RTC_CAL);
+               wday = rtc_reg_read(rtc, MA35_REG_RTC_WEEKDAY);
+       } while (time != rtc_reg_read(rtc, MA35_REG_RTC_TIME) ||
+                cal != rtc_reg_read(rtc, MA35_REG_RTC_CAL));
+
+       tm->tm_mday = bcd2bin(cal >> 0);
+       tm->tm_wday = wday;
+       tm->tm_mon = bcd2bin(cal >> 8);
+       tm->tm_mon = tm->tm_mon - 1;
+       tm->tm_year = bcd2bin(cal >> 16) + 100;
+
+       tm->tm_sec = bcd2bin(time >> 0);
+       tm->tm_min = bcd2bin(time >> 8);
+       tm->tm_hour = bcd2bin(time >> 16);
+
+       return rtc_valid_tm(tm);
+}
+
+static int ma35d1_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+       struct ma35_rtc *rtc = dev_get_drvdata(dev);
+       u32 val;
+
+       val = bin2bcd(tm->tm_mday) << 0 | bin2bcd(tm->tm_mon + 1) << 8 |
+             bin2bcd(tm->tm_year - 100) << 16;
+       rtc_reg_write(rtc, MA35_REG_RTC_CAL, val);
+
+       val = bin2bcd(tm->tm_sec) << 0 | bin2bcd(tm->tm_min) << 8 |
+             bin2bcd(tm->tm_hour) << 16;
+       rtc_reg_write(rtc, MA35_REG_RTC_TIME, val);
+
+       val = tm->tm_wday;
+       rtc_reg_write(rtc, MA35_REG_RTC_WEEKDAY, val);
+
+       return 0;
+}
+
+static int ma35d1_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+       struct ma35_rtc *rtc = dev_get_drvdata(dev);
+       u32 talm, calm;
+
+       talm = rtc_reg_read(rtc, MA35_REG_RTC_TALM);
+       calm = rtc_reg_read(rtc, MA35_REG_RTC_CALM);
+
+       alrm->time.tm_mday = bcd2bin(calm >> 0);
+       alrm->time.tm_mon = bcd2bin(calm >> 8);
+       alrm->time.tm_mon = alrm->time.tm_mon - 1;
+
+       alrm->time.tm_year = bcd2bin(calm >> 16) + 100;
+
+       alrm->time.tm_sec = bcd2bin(talm >> 0);
+       alrm->time.tm_min = bcd2bin(talm >> 8);
+       alrm->time.tm_hour = bcd2bin(talm >> 16);
+
+       return rtc_valid_tm(&alrm->time);
+}
+
+static int ma35d1_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+       struct ma35_rtc *rtc = dev_get_drvdata(dev);
+       unsigned long val;
+
+       val = bin2bcd(alrm->time.tm_mday) << 0 | bin2bcd(alrm->time.tm_mon + 1) << 8 |
+             bin2bcd(alrm->time.tm_year - 100) << 16;
+       rtc_reg_write(rtc, MA35_REG_RTC_CALM, val);
+
+       val = bin2bcd(alrm->time.tm_sec) << 0 | bin2bcd(alrm->time.tm_min) << 8 |
+             bin2bcd(alrm->time.tm_hour) << 16;
+       rtc_reg_write(rtc, MA35_REG_RTC_TALM, val);
+
+       ma35d1_alarm_irq_enable(dev, alrm->enabled);
+
+       return 0;
+}
+
+static const struct rtc_class_ops ma35d1_rtc_ops = {
+       .read_time = ma35d1_rtc_read_time,
+       .set_time = ma35d1_rtc_set_time,
+       .read_alarm = ma35d1_rtc_read_alarm,
+       .set_alarm = ma35d1_rtc_set_alarm,
+       .alarm_irq_enable = ma35d1_alarm_irq_enable,
+};
+
+static int ma35d1_rtc_probe(struct platform_device *pdev)
+{
+       struct ma35_rtc *rtc;
+       struct clk *clk;
+       int ret;
+
+       rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
+       if (!rtc)
+               return -ENOMEM;
+
+       rtc->rtc_reg = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(rtc->rtc_reg))
+               return PTR_ERR(rtc->rtc_reg);
+
+       clk = of_clk_get(pdev->dev.of_node, 0);
+       if (IS_ERR(clk))
+               return dev_err_probe(&pdev->dev, PTR_ERR(clk), "failed to find rtc clock\n");
+
+       ret = clk_prepare_enable(clk);
+       if (ret)
+               return ret;
+
+       if (!(rtc_reg_read(rtc, MA35_REG_RTC_INIT) & RTC_INIT_ACTIVE)) {
+               ret = ma35d1_rtc_init(rtc, RTC_INIT_TIMEOUT);
+               if (ret)
+                       return dev_err_probe(&pdev->dev, ret, "rtc init failed\n");
+       }
+
+       rtc->irq_num = platform_get_irq(pdev, 0);
+
+       ret = devm_request_irq(&pdev->dev, rtc->irq_num, ma35d1_rtc_interrupt,
+                              IRQF_NO_SUSPEND, "ma35d1rtc", rtc);
+       if (ret)
+               return dev_err_probe(&pdev->dev, ret, "Failed to request rtc irq\n");
+
+       platform_set_drvdata(pdev, rtc);
+
+       device_init_wakeup(&pdev->dev, true);
+
+       rtc->rtcdev = devm_rtc_allocate_device(&pdev->dev);
+       if (IS_ERR(rtc->rtcdev))
+               return PTR_ERR(rtc->rtcdev);
+
+       rtc->rtcdev->ops = &ma35d1_rtc_ops;
+       rtc->rtcdev->range_min = RTC_TIMESTAMP_BEGIN_2000;
+       rtc->rtcdev->range_max = RTC_TIMESTAMP_END_2099;
+
+       ret = devm_rtc_register_device(rtc->rtcdev);
+       if (ret)
+               return dev_err_probe(&pdev->dev, ret, "Failed to register rtc device\n");
+
+       return 0;
+}
+
+static int ma35d1_rtc_suspend(struct platform_device *pdev, pm_message_t state)
+{
+       struct ma35_rtc *rtc = platform_get_drvdata(pdev);
+
+       if (device_may_wakeup(&pdev->dev))
+               enable_irq_wake(rtc->irq_num);
+
+       return 0;
+}
+
+static int ma35d1_rtc_resume(struct platform_device *pdev)
+{
+       struct ma35_rtc *rtc = platform_get_drvdata(pdev);
+
+       if (device_may_wakeup(&pdev->dev))
+               disable_irq_wake(rtc->irq_num);
+
+       return 0;
+}
+
+static const struct of_device_id ma35d1_rtc_of_match[] = {
+       { .compatible = "nuvoton,ma35d1-rtc", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, ma35d1_rtc_of_match);
+
+static struct platform_driver ma35d1_rtc_driver = {
+       .suspend    = ma35d1_rtc_suspend,
+       .resume     = ma35d1_rtc_resume,
+       .probe      = ma35d1_rtc_probe,
+       .driver         = {
+               .name   = "rtc-ma35d1",
+               .of_match_table = ma35d1_rtc_of_match,
+       },
+};
+
+module_platform_driver(ma35d1_rtc_driver);
+
+MODULE_AUTHOR("Ming-Jen Chen <mjchen@nuvoton.com>");
+MODULE_DESCRIPTION("MA35D1 RTC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-max31335.c b/drivers/rtc/rtc-max31335.c
new file mode 100644 (file)
index 0000000..402fda8
--- /dev/null
@@ -0,0 +1,697 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RTC driver for the MAX31335
+ *
+ * Copyright (C) 2023 Analog Devices
+ *
+ * Antoniu Miclaus <antoniu.miclaus@analog.com>
+ *
+ */
+
+#include <asm-generic/unaligned.h>
+#include <linux/bcd.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/hwmon.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/rtc.h>
+#include <linux/util_macros.h>
+
+/* MAX31335 Register Map */
+#define MAX31335_STATUS1                       0x00
+#define MAX31335_INT_EN1                       0x01
+#define MAX31335_STATUS2                       0x02
+#define MAX31335_INT_EN2                       0x03
+#define MAX31335_RTC_RESET                     0x04
+#define MAX31335_RTC_CONFIG                    0x05
+#define MAX31335_RTC_CONFIG2                   0x06
+#define MAX31335_TIMESTAMP_CONFIG              0x07
+#define MAX31335_TIMER_CONFIG                  0x08
+#define MAX31335_SECONDS_1_128                 0x09
+#define MAX31335_SECONDS                       0x0A
+#define MAX31335_MINUTES                       0x0B
+#define MAX31335_HOURS                         0x0C
+#define MAX31335_DAY                           0x0D
+#define MAX31335_DATE                          0x0E
+#define MAX31335_MONTH                         0x0F
+#define MAX31335_YEAR                          0x0F
+#define MAX31335_ALM1_SEC                      0x11
+#define MAX31335_ALM1_MIN                      0x12
+#define MAX31335_ALM1_HRS                      0x13
+#define MAX31335_ALM1_DAY_DATE                 0x14
+#define MAX31335_ALM1_MON                      0x15
+#define MAX31335_ALM1_YEAR                     0x16
+#define MAX31335_ALM2_MIN                      0x17
+#define MAX31335_ALM2_HRS                      0x18
+#define MAX31335_ALM2_DAY_DATE                 0x19
+#define MAX31335_TIMER_COUNT                   0x1A
+#define MAX31335_TIMER_INIT                    0x1B
+#define MAX31335_PWR_MGMT                      0x1C
+#define MAX31335_TRICKLE_REG                   0x1D
+#define MAX31335_AGING_OFFSET                  0x1E
+#define MAX31335_TS_CONFIG                     0x30
+#define MAX31335_TEMP_ALARM_HIGH_MSB           0x31
+#define MAX31335_TEMP_ALARM_HIGH_LSB           0x32
+#define MAX31335_TEMP_ALARM_LOW_MSB            0x33
+#define MAX31335_TEMP_ALARM_LOW_LSB            0x34
+#define MAX31335_TEMP_DATA_MSB                 0x35
+#define MAX31335_TEMP_DATA_LSB                 0x36
+#define MAX31335_TS0_SEC_1_128                 0x40
+#define MAX31335_TS0_SEC                       0x41
+#define MAX31335_TS0_MIN                       0x42
+#define MAX31335_TS0_HOUR                      0x43
+#define MAX31335_TS0_DATE                      0x44
+#define MAX31335_TS0_MONTH                     0x45
+#define MAX31335_TS0_YEAR                      0x46
+#define MAX31335_TS0_FLAGS                     0x47
+#define MAX31335_TS1_SEC_1_128                 0x48
+#define MAX31335_TS1_SEC                       0x49
+#define MAX31335_TS1_MIN                       0x4A
+#define MAX31335_TS1_HOUR                      0x4B
+#define MAX31335_TS1_DATE                      0x4C
+#define MAX31335_TS1_MONTH                     0x4D
+#define MAX31335_TS1_YEAR                      0x4E
+#define MAX31335_TS1_FLAGS                     0x4F
+#define MAX31335_TS2_SEC_1_128                 0x50
+#define MAX31335_TS2_SEC                       0x51
+#define MAX31335_TS2_MIN                       0x52
+#define MAX31335_TS2_HOUR                      0x53
+#define MAX31335_TS2_DATE                      0x54
+#define MAX31335_TS2_MONTH                     0x55
+#define MAX31335_TS2_YEAR                      0x56
+#define MAX31335_TS2_FLAGS                     0x57
+#define MAX31335_TS3_SEC_1_128                 0x58
+#define MAX31335_TS3_SEC                       0x59
+#define MAX31335_TS3_MIN                       0x5A
+#define MAX31335_TS3_HOUR                      0x5B
+#define MAX31335_TS3_DATE                      0x5C
+#define MAX31335_TS3_MONTH                     0x5D
+#define MAX31335_TS3_YEAR                      0x5E
+#define MAX31335_TS3_FLAGS                     0x5F
+
+/* MAX31335_STATUS1 Bit Definitions */
+#define MAX31335_STATUS1_PSDECT                        BIT(7)
+#define MAX31335_STATUS1_OSF                   BIT(6)
+#define MAX31335_STATUS1_PFAIL                 BIT(5)
+#define MAX31335_STATUS1_VBATLOW               BIT(4)
+#define MAX31335_STATUS1_DIF                   BIT(3)
+#define MAX31335_STATUS1_TIF                   BIT(2)
+#define MAX31335_STATUS1_A2F                   BIT(1)
+#define MAX31335_STATUS1_A1F                   BIT(0)
+
+/* MAX31335_INT_EN1 Bit Definitions */
+#define MAX31335_INT_EN1_DOSF                  BIT(6)
+#define MAX31335_INT_EN1_PFAILE                        BIT(5)
+#define MAX31335_INT_EN1_VBATLOWE              BIT(4)
+#define MAX31335_INT_EN1_DIE                   BIT(3)
+#define MAX31335_INT_EN1_TIE                   BIT(2)
+#define MAX31335_INT_EN1_A2IE                  BIT(1)
+#define MAX31335_INT_EN1_A1IE                  BIT(0)
+
+/* MAX31335_STATUS2 Bit Definitions */
+#define MAX31335_STATUS2_TEMP_RDY              BIT(2)
+#define MAX31335_STATUS2_OTF                   BIT(1)
+#define MAX31335_STATUS2_UTF                   BIT(0)
+
+/* MAX31335_INT_EN2 Bit Definitions */
+#define MAX31335_INT_EN2_TEMP_RDY_EN           BIT(2)
+#define MAX31335_INT_EN2_OTIE                  BIT(1)
+#define MAX31335_INT_EN2_UTIE                  BIT(0)
+
+/* MAX31335_RTC_RESET Bit Definitions */
+#define MAX31335_RTC_RESET_SWRST               BIT(0)
+
+/* MAX31335_RTC_CONFIG1 Bit Definitions */
+#define MAX31335_RTC_CONFIG1_EN_IO             BIT(6)
+#define MAX31335_RTC_CONFIG1_A1AC              GENMASK(5, 4)
+#define MAX31335_RTC_CONFIG1_DIP               BIT(3)
+#define MAX31335_RTC_CONFIG1_I2C_TIMEOUT       BIT(1)
+#define MAX31335_RTC_CONFIG1_EN_OSC            BIT(0)
+
+/* MAX31335_RTC_CONFIG2 Bit Definitions */
+#define MAX31335_RTC_CONFIG2_ENCLKO            BIT(2)
+#define MAX31335_RTC_CONFIG2_CLKO_HZ           GENMASK(1, 0)
+
+/* MAX31335_TIMESTAMP_CONFIG Bit Definitions */
+#define MAX31335_TIMESTAMP_CONFIG_TSVLOW       BIT(5)
+#define MAX31335_TIMESTAMP_CONFIG_TSPWM                BIT(4)
+#define MAX31335_TIMESTAMP_CONFIG_TSDIN                BIT(3)
+#define MAX31335_TIMESTAMP_CONFIG_TSOW         BIT(2)
+#define MAX31335_TIMESTAMP_CONFIG_TSR          BIT(1)
+#define MAX31335_TIMESTAMP_CONFIG_TSE          BIT(0)
+
+/* MAX31335_TIMER_CONFIG Bit Definitions */
+#define MAX31335_TIMER_CONFIG_TE               BIT(4)
+#define MAX31335_TIMER_CONFIG_TPAUSE           BIT(3)
+#define MAX31335_TIMER_CONFIG_TRPT             BIT(2)
+#define MAX31335_TIMER_CONFIG_TFS              GENMASK(1, 0)
+
+/* MAX31335_HOURS Bit Definitions */
+#define MAX31335_HOURS_F_24_12                 BIT(6)
+#define MAX31335_HOURS_HR_20_AM_PM             BIT(5)
+
+/* MAX31335_MONTH Bit Definitions */
+#define MAX31335_MONTH_CENTURY                 BIT(7)
+
+/* MAX31335_PWR_MGMT Bit Definitions */
+#define MAX31335_PWR_MGMT_PFVT                 BIT(0)
+
+/* MAX31335_TRICKLE_REG Bit Definitions */
+#define MAX31335_TRICKLE_REG_TRICKLE           GENMASK(3, 1)
+#define MAX31335_TRICKLE_REG_EN_TRICKLE                BIT(0)
+
+/* MAX31335_TS_CONFIG Bit Definitions */
+#define MAX31335_TS_CONFIG_AUTO                        BIT(4)
+#define MAX31335_TS_CONFIG_CONVERT_T           BIT(3)
+#define MAX31335_TS_CONFIG_TSINT               GENMASK(2, 0)
+
+/* MAX31335_TS_FLAGS Bit Definitions */
+#define MAX31335_TS_FLAGS_VLOWF                        BIT(3)
+#define MAX31335_TS_FLAGS_VBATF                        BIT(2)
+#define MAX31335_TS_FLAGS_VCCF                 BIT(1)
+#define MAX31335_TS_FLAGS_DINF                 BIT(0)
+
+/* MAX31335 Miscellaneous Definitions */
+#define MAX31335_TRICKLE_SCHOTTKY_DIODE                1
+#define MAX31335_TRICKLE_STANDARD_DIODE                4
+#define MAX31335_RAM_SIZE                      32
+#define MAX31335_TIME_SIZE                     0x07
+
+#define clk_hw_to_max31335(_hw) container_of(_hw, struct max31335_data, clkout)
+
+struct max31335_data {
+       struct regmap *regmap;
+       struct rtc_device *rtc;
+       struct clk_hw clkout;
+};
+
+static const int max31335_clkout_freq[] = { 1, 64, 1024, 32768 };
+
+static const u16 max31335_trickle_resistors[] = {3000, 6000, 11000};
+
+static bool max31335_volatile_reg(struct device *dev, unsigned int reg)
+{
+       /* time keeping registers */
+       if (reg >= MAX31335_SECONDS &&
+           reg < MAX31335_SECONDS + MAX31335_TIME_SIZE)
+               return true;
+
+       /* interrupt status register */
+       if (reg == MAX31335_INT_EN1_A1IE)
+               return true;
+
+       /* temperature registers */
+       if (reg == MAX31335_TEMP_DATA_MSB || reg == MAX31335_TEMP_DATA_LSB)
+               return true;
+
+       return false;
+}
+
+static const struct regmap_config regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+       .max_register = 0x5F,
+       .volatile_reg = max31335_volatile_reg,
+};
+
+static int max31335_read_time(struct device *dev, struct rtc_time *tm)
+{
+       struct max31335_data *max31335 = dev_get_drvdata(dev);
+       u8 date[7];
+       int ret;
+
+       ret = regmap_bulk_read(max31335->regmap, MAX31335_SECONDS, date,
+                              sizeof(date));
+       if (ret)
+               return ret;
+
+       tm->tm_sec  = bcd2bin(date[0] & 0x7f);
+       tm->tm_min  = bcd2bin(date[1] & 0x7f);
+       tm->tm_hour = bcd2bin(date[2] & 0x3f);
+       tm->tm_wday = bcd2bin(date[3] & 0x7) - 1;
+       tm->tm_mday = bcd2bin(date[4] & 0x3f);
+       tm->tm_mon  = bcd2bin(date[5] & 0x1f) - 1;
+       tm->tm_year = bcd2bin(date[6]) + 100;
+
+       if (FIELD_GET(MAX31335_MONTH_CENTURY, date[5]))
+               tm->tm_year += 100;
+
+       return 0;
+}
+
+static int max31335_set_time(struct device *dev, struct rtc_time *tm)
+{
+       struct max31335_data *max31335 = dev_get_drvdata(dev);
+       u8 date[7];
+
+       date[0] = bin2bcd(tm->tm_sec);
+       date[1] = bin2bcd(tm->tm_min);
+       date[2] = bin2bcd(tm->tm_hour);
+       date[3] = bin2bcd(tm->tm_wday + 1);
+       date[4] = bin2bcd(tm->tm_mday);
+       date[5] = bin2bcd(tm->tm_mon + 1);
+       date[6] = bin2bcd(tm->tm_year % 100);
+
+       if (tm->tm_year >= 200)
+               date[5] |= FIELD_PREP(MAX31335_MONTH_CENTURY, 1);
+
+       return regmap_bulk_write(max31335->regmap, MAX31335_SECONDS, date,
+                                sizeof(date));
+}
+
+static int max31335_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+       struct max31335_data *max31335 = dev_get_drvdata(dev);
+       int ret, ctrl, status;
+       struct rtc_time time;
+       u8 regs[6];
+
+       ret = regmap_bulk_read(max31335->regmap, MAX31335_ALM1_SEC, regs,
+                              sizeof(regs));
+       if (ret)
+               return ret;
+
+       alrm->time.tm_sec  = bcd2bin(regs[0] & 0x7f);
+       alrm->time.tm_min  = bcd2bin(regs[1] & 0x7f);
+       alrm->time.tm_hour = bcd2bin(regs[2] & 0x3f);
+       alrm->time.tm_mday = bcd2bin(regs[3] & 0x3f);
+       alrm->time.tm_mon  = bcd2bin(regs[4] & 0x1f) - 1;
+       alrm->time.tm_year = bcd2bin(regs[5]) + 100;
+
+       ret = max31335_read_time(dev, &time);
+       if (ret)
+               return ret;
+
+       if (time.tm_year >= 200)
+               alrm->time.tm_year += 100;
+
+       ret = regmap_read(max31335->regmap, MAX31335_INT_EN1, &ctrl);
+       if (ret)
+               return ret;
+
+       ret = regmap_read(max31335->regmap, MAX31335_STATUS1, &status);
+       if (ret)
+               return ret;
+
+       alrm->enabled = FIELD_GET(MAX31335_INT_EN1_A1IE, ctrl);
+       alrm->pending = FIELD_GET(MAX31335_STATUS1_A1F, status);
+
+       return 0;
+}
+
+static int max31335_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+       struct max31335_data *max31335 = dev_get_drvdata(dev);
+       unsigned int reg;
+       u8 regs[6];
+       int ret;
+
+       regs[0] = bin2bcd(alrm->time.tm_sec);
+       regs[1] = bin2bcd(alrm->time.tm_min);
+       regs[2] = bin2bcd(alrm->time.tm_hour);
+       regs[3] = bin2bcd(alrm->time.tm_mday);
+       regs[4] = bin2bcd(alrm->time.tm_mon + 1);
+       regs[5] = bin2bcd(alrm->time.tm_year % 100);
+
+       ret = regmap_bulk_write(max31335->regmap, MAX31335_ALM1_SEC,
+                               regs, sizeof(regs));
+       if (ret)
+               return ret;
+
+       reg = FIELD_PREP(MAX31335_INT_EN1_A1IE, alrm->enabled);
+       ret = regmap_update_bits(max31335->regmap, MAX31335_INT_EN1,
+                                MAX31335_INT_EN1_A1IE, reg);
+       if (ret)
+               return ret;
+
+       ret = regmap_update_bits(max31335->regmap, MAX31335_STATUS1,
+                                MAX31335_STATUS1_A1F, 0);
+
+       return 0;
+}
+
+static int max31335_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+       struct max31335_data *max31335 = dev_get_drvdata(dev);
+
+       return regmap_update_bits(max31335->regmap, MAX31335_INT_EN1,
+                                 MAX31335_INT_EN1_A1IE, enabled);
+}
+
+static irqreturn_t max31335_handle_irq(int irq, void *dev_id)
+{
+       struct max31335_data *max31335 = dev_id;
+       bool status;
+       int ret;
+
+       ret = regmap_update_bits_check(max31335->regmap, MAX31335_STATUS1,
+                                      MAX31335_STATUS1_A1F, 0, &status);
+       if (ret)
+               return IRQ_HANDLED;
+
+       if (status)
+               rtc_update_irq(max31335->rtc, 1, RTC_AF | RTC_IRQF);
+
+       return IRQ_HANDLED;
+}
+
+static const struct rtc_class_ops max31335_rtc_ops = {
+       .read_time = max31335_read_time,
+       .set_time = max31335_set_time,
+       .read_alarm = max31335_read_alarm,
+       .set_alarm = max31335_set_alarm,
+       .alarm_irq_enable = max31335_alarm_irq_enable,
+};
+
+static int max31335_trickle_charger_setup(struct device *dev,
+                                         struct max31335_data *max31335)
+{
+       u32 ohms, chargeable;
+       int i, trickle_cfg;
+       const char *diode;
+
+       if (device_property_read_u32(dev, "aux-voltage-chargeable",
+                                    &chargeable))
+               return 0;
+
+       if (device_property_read_u32(dev, "trickle-resistor-ohms", &ohms))
+               return 0;
+
+       if (device_property_read_string(dev, "adi,tc-diode", &diode))
+               return 0;
+
+       if (!strcmp(diode, "schottky"))
+               trickle_cfg = MAX31335_TRICKLE_SCHOTTKY_DIODE;
+       else if (!strcmp(diode, "standard+schottky"))
+               trickle_cfg = MAX31335_TRICKLE_STANDARD_DIODE;
+       else
+               return dev_err_probe(dev, -EINVAL,
+                                    "Invalid tc-diode value: %s\n", diode);
+
+       for (i = 0; i < ARRAY_SIZE(max31335_trickle_resistors); i++)
+               if (ohms == max31335_trickle_resistors[i])
+                       break;
+
+       if (i >= ARRAY_SIZE(max31335_trickle_resistors))
+               return 0;
+
+       i = i + trickle_cfg;
+
+       return regmap_write(max31335->regmap, MAX31335_TRICKLE_REG,
+                           FIELD_PREP(MAX31335_TRICKLE_REG_TRICKLE, i) |
+                           FIELD_PREP(MAX31335_TRICKLE_REG_EN_TRICKLE,
+                                      chargeable));
+}
+
+static unsigned long max31335_clkout_recalc_rate(struct clk_hw *hw,
+                                                unsigned long parent_rate)
+{
+       struct max31335_data *max31335 = clk_hw_to_max31335(hw);
+       unsigned int freq_mask;
+       unsigned int reg;
+       int ret;
+
+       ret = regmap_read(max31335->regmap, MAX31335_RTC_CONFIG2, &reg);
+       if (ret)
+               return 0;
+
+       freq_mask = __roundup_pow_of_two(ARRAY_SIZE(max31335_clkout_freq)) - 1;
+
+       return max31335_clkout_freq[reg & freq_mask];
+}
+
+static long max31335_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
+                                      unsigned long *prate)
+{
+       int index;
+
+       index = find_closest(rate, max31335_clkout_freq,
+                            ARRAY_SIZE(max31335_clkout_freq));
+
+       return max31335_clkout_freq[index];
+}
+
+static int max31335_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
+                                   unsigned long parent_rate)
+{
+       struct max31335_data *max31335 = clk_hw_to_max31335(hw);
+       unsigned int freq_mask;
+       int index;
+
+       index = find_closest(rate, max31335_clkout_freq,
+                            ARRAY_SIZE(max31335_clkout_freq));
+       freq_mask = __roundup_pow_of_two(ARRAY_SIZE(max31335_clkout_freq)) - 1;
+
+       return regmap_update_bits(max31335->regmap, MAX31335_RTC_CONFIG2,
+                                 freq_mask, index);
+}
+
+static int max31335_clkout_enable(struct clk_hw *hw)
+{
+       struct max31335_data *max31335 = clk_hw_to_max31335(hw);
+
+       return regmap_set_bits(max31335->regmap, MAX31335_RTC_CONFIG2,
+                              MAX31335_RTC_CONFIG2_ENCLKO);
+}
+
+static void max31335_clkout_disable(struct clk_hw *hw)
+{
+       struct max31335_data *max31335 = clk_hw_to_max31335(hw);
+
+       regmap_clear_bits(max31335->regmap, MAX31335_RTC_CONFIG2,
+                         MAX31335_RTC_CONFIG2_ENCLKO);
+}
+
+static int max31335_clkout_is_enabled(struct clk_hw *hw)
+{
+       struct max31335_data *max31335 = clk_hw_to_max31335(hw);
+       unsigned int reg;
+       int ret;
+
+       ret = regmap_read(max31335->regmap, MAX31335_RTC_CONFIG2, &reg);
+       if (ret)
+               return ret;
+
+       return !!(reg & MAX31335_RTC_CONFIG2_ENCLKO);
+}
+
+static const struct clk_ops max31335_clkout_ops = {
+       .recalc_rate = max31335_clkout_recalc_rate,
+       .round_rate = max31335_clkout_round_rate,
+       .set_rate = max31335_clkout_set_rate,
+       .enable = max31335_clkout_enable,
+       .disable = max31335_clkout_disable,
+       .is_enabled = max31335_clkout_is_enabled,
+};
+
+static struct clk_init_data max31335_clk_init = {
+       .name = "max31335-clkout",
+       .ops = &max31335_clkout_ops,
+};
+
+static int max31335_nvmem_reg_read(void *priv, unsigned int offset,
+                                  void *val, size_t bytes)
+{
+       struct max31335_data *max31335 = priv;
+       unsigned int reg = MAX31335_TS0_SEC_1_128 + offset;
+
+       return regmap_bulk_read(max31335->regmap, reg, val, bytes);
+}
+
+static int max31335_nvmem_reg_write(void *priv, unsigned int offset,
+                                   void *val, size_t bytes)
+{
+       struct max31335_data *max31335 = priv;
+       unsigned int reg = MAX31335_TS0_SEC_1_128 + offset;
+
+       return regmap_bulk_write(max31335->regmap, reg, val, bytes);
+}
+
+static struct nvmem_config max31335_nvmem_cfg = {
+       .reg_read = max31335_nvmem_reg_read,
+       .reg_write = max31335_nvmem_reg_write,
+       .word_size = 8,
+       .size = MAX31335_RAM_SIZE,
+};
+
+#if IS_REACHABLE(HWMON)
+static int max31335_read_temp(struct device *dev, enum hwmon_sensor_types type,
+                             u32 attr, int channel, long *val)
+{
+       struct max31335_data *max31335 = dev_get_drvdata(dev);
+       u8 reg[2];
+       s16 temp;
+       int ret;
+
+       if (type != hwmon_temp || attr != hwmon_temp_input)
+               return -EOPNOTSUPP;
+
+       ret = regmap_bulk_read(max31335->regmap, MAX31335_TEMP_DATA_MSB,
+                              reg, 2);
+       if (ret)
+               return ret;
+
+       temp = get_unaligned_be16(reg);
+
+       *val = (temp / 64) * 250;
+
+       return 0;
+}
+
+static umode_t max31335_is_visible(const void *data,
+                                  enum hwmon_sensor_types type,
+                                  u32 attr, int channel)
+{
+       if (type == hwmon_temp && attr == hwmon_temp_input)
+               return 0444;
+
+       return 0;
+}
+
+static const struct hwmon_channel_info *max31335_info[] = {
+       HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
+       NULL
+};
+
+static const struct hwmon_ops max31335_hwmon_ops = {
+       .is_visible = max31335_is_visible,
+       .read = max31335_read_temp,
+};
+
+static const struct hwmon_chip_info max31335_chip_info = {
+       .ops = &max31335_hwmon_ops,
+       .info = max31335_info,
+};
+#endif
+
+static int max31335_clkout_register(struct device *dev)
+{
+       struct max31335_data *max31335 = dev_get_drvdata(dev);
+       int ret;
+
+       if (!device_property_present(dev, "#clock-cells"))
+               return regmap_clear_bits(max31335->regmap, MAX31335_RTC_CONFIG2,
+                                        MAX31335_RTC_CONFIG2_ENCLKO);
+
+       max31335->clkout.init = &max31335_clk_init;
+
+       ret = devm_clk_hw_register(dev, &max31335->clkout);
+       if (ret)
+               return dev_err_probe(dev, ret, "cannot register clock\n");
+
+       ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
+                                         &max31335->clkout);
+       if (ret)
+               return dev_err_probe(dev, ret, "cannot add hw provider\n");
+
+       max31335->clkout.clk = devm_clk_get_enabled(dev, NULL);
+       if (IS_ERR(max31335->clkout.clk))
+               return dev_err_probe(dev, PTR_ERR(max31335->clkout.clk),
+                                    "cannot enable clkout\n");
+
+       return 0;
+}
+
+static int max31335_probe(struct i2c_client *client)
+{
+       struct max31335_data *max31335;
+#if IS_REACHABLE(HWMON)
+       struct device *hwmon;
+#endif
+       int ret;
+
+       max31335 = devm_kzalloc(&client->dev, sizeof(*max31335), GFP_KERNEL);
+       if (!max31335)
+               return -ENOMEM;
+
+       max31335->regmap = devm_regmap_init_i2c(client, &regmap_config);
+       if (IS_ERR(max31335->regmap))
+               return PTR_ERR(max31335->regmap);
+
+       i2c_set_clientdata(client, max31335);
+
+       max31335->rtc = devm_rtc_allocate_device(&client->dev);
+       if (IS_ERR(max31335->rtc))
+               return PTR_ERR(max31335->rtc);
+
+       max31335->rtc->ops = &max31335_rtc_ops;
+       max31335->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+       max31335->rtc->range_max = RTC_TIMESTAMP_END_2199;
+       max31335->rtc->alarm_offset_max = 24 * 60 * 60;
+
+       ret = max31335_clkout_register(&client->dev);
+       if (ret)
+               return ret;
+
+       if (client->irq > 0) {
+               ret = devm_request_threaded_irq(&client->dev, client->irq,
+                                               NULL, max31335_handle_irq,
+                                               IRQF_ONESHOT,
+                                               "max31335", max31335);
+               if (ret) {
+                       dev_warn(&client->dev,
+                                "unable to request IRQ, alarm max31335 disabled\n");
+                       client->irq = 0;
+               }
+       }
+
+       if (!client->irq)
+               clear_bit(RTC_FEATURE_ALARM, max31335->rtc->features);
+
+       max31335_nvmem_cfg.priv = max31335;
+       ret = devm_rtc_nvmem_register(max31335->rtc, &max31335_nvmem_cfg);
+       if (ret)
+               return dev_err_probe(&client->dev, ret,
+                                    "cannot register rtc nvmem\n");
+
+#if IS_REACHABLE(HWMON)
+       hwmon = devm_hwmon_device_register_with_info(&client->dev, client->name,
+                                                    max31335,
+                                                    &max31335_chip_info,
+                                                    NULL);
+       if (IS_ERR(hwmon))
+               return dev_err_probe(&client->dev, PTR_ERR(hwmon),
+                                    "cannot register hwmon device\n");
+#endif
+
+       ret = max31335_trickle_charger_setup(&client->dev, max31335);
+       if (ret)
+               return ret;
+
+       return devm_rtc_register_device(max31335->rtc);
+}
+
+static const struct i2c_device_id max31335_id[] = {
+       { "max31335", 0 },
+       { }
+};
+
+MODULE_DEVICE_TABLE(i2c, max31335_id);
+
+static const struct of_device_id max31335_of_match[] = {
+       { .compatible = "adi,max31335" },
+       { }
+};
+
+MODULE_DEVICE_TABLE(of, max31335_of_match);
+
+static struct i2c_driver max31335_driver = {
+       .driver = {
+               .name = "rtc-max31335",
+               .of_match_table = max31335_of_match,
+       },
+       .probe = max31335_probe,
+       .id_table = max31335_id,
+};
+module_i2c_driver(max31335_driver);
+
+MODULE_AUTHOR("Antoniu Miclaus <antoniu.miclaus@analog.com>");
+MODULE_DESCRIPTION("MAX31335 RTC driver");
+MODULE_LICENSE("GPL");
index f1c09f1db044c8481fca7ddb8801c952954e246e..651bf3c279c7462151096cf8565a56fffab9052d 100644 (file)
@@ -8,26 +8,31 @@
 #include <linux/acpi.h>
 #endif
 
+#define UIP_RECHECK_DELAY              100     /* usec */
+#define UIP_RECHECK_DELAY_MS           (USEC_PER_MSEC / UIP_RECHECK_DELAY)
+#define UIP_RECHECK_LOOPS_MS(x)                (x / UIP_RECHECK_DELAY_MS)
+
 /*
  * Execute a function while the UIP (Update-in-progress) bit of the RTC is
- * unset.
+ * unset. The timeout is configurable by the caller in ms.
  *
  * Warning: callback may be executed more then once.
  */
 bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
+                       int timeout,
                        void *param)
 {
        int i;
        unsigned long flags;
        unsigned char seconds;
 
-       for (i = 0; i < 100; i++) {
+       for (i = 0; UIP_RECHECK_LOOPS_MS(i) < timeout; i++) {
                spin_lock_irqsave(&rtc_lock, flags);
 
                /*
                 * Check whether there is an update in progress during which the
                 * readout is unspecified. The maximum update time is ~2ms. Poll
-                * every 100 usec for completion.
+                * for completion.
                 *
                 * Store the second value before checking UIP so a long lasting
                 * NMI which happens to hit after the UIP check cannot make
@@ -37,7 +42,7 @@ bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
 
                if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) {
                        spin_unlock_irqrestore(&rtc_lock, flags);
-                       udelay(100);
+                       udelay(UIP_RECHECK_DELAY);
                        continue;
                }
 
@@ -56,7 +61,7 @@ bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
                 */
                if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) {
                        spin_unlock_irqrestore(&rtc_lock, flags);
-                       udelay(100);
+                       udelay(UIP_RECHECK_DELAY);
                        continue;
                }
 
@@ -72,6 +77,10 @@ bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
                }
                spin_unlock_irqrestore(&rtc_lock, flags);
 
+               if (UIP_RECHECK_LOOPS_MS(i) >= 100)
+                       pr_warn("Reading current time from RTC took around %li ms\n",
+                               UIP_RECHECK_LOOPS_MS(i));
+
                return true;
        }
        return false;
@@ -84,7 +93,7 @@ EXPORT_SYMBOL_GPL(mc146818_avoid_UIP);
  */
 bool mc146818_does_rtc_work(void)
 {
-       return mc146818_avoid_UIP(NULL, NULL);
+       return mc146818_avoid_UIP(NULL, 1000, NULL);
 }
 EXPORT_SYMBOL_GPL(mc146818_does_rtc_work);
 
@@ -130,15 +139,27 @@ static void mc146818_get_time_callback(unsigned char seconds, void *param_in)
        p->ctrl = CMOS_READ(RTC_CONTROL);
 }
 
-int mc146818_get_time(struct rtc_time *time)
+/**
+ * mc146818_get_time - Get the current time from the RTC
+ * @time: pointer to struct rtc_time to store the current time
+ * @timeout: timeout value in ms
+ *
+ * This function reads the current time from the RTC and stores it in the
+ * provided struct rtc_time. The timeout parameter specifies the maximum
+ * time to wait for the RTC to become ready.
+ *
+ * Return: 0 on success, -ETIMEDOUT if the RTC did not become ready within
+ * the specified timeout, or another error code if an error occurred.
+ */
+int mc146818_get_time(struct rtc_time *time, int timeout)
 {
        struct mc146818_get_time_callback_param p = {
                .time = time
        };
 
-       if (!mc146818_avoid_UIP(mc146818_get_time_callback, &p)) {
+       if (!mc146818_avoid_UIP(mc146818_get_time_callback, timeout, &p)) {
                memset(time, 0, sizeof(*time));
-               return -EIO;
+               return -ETIMEDOUT;
        }
 
        if (!(p.ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
index ed4e606be8e58d2779bd6aff148b388435770830..f488a189a4651ffead1bbcc1e4e9d3a5cd01d5df 100644 (file)
@@ -23,6 +23,7 @@
 #define NCT3018Y_REG_CTRL      0x0A /* timer control */
 #define NCT3018Y_REG_ST                0x0B /* status */
 #define NCT3018Y_REG_CLKO      0x0C /* clock out */
+#define NCT3018Y_REG_PART      0x21 /* part info */
 
 #define NCT3018Y_BIT_AF                BIT(7)
 #define NCT3018Y_BIT_ST                BIT(7)
 #define NCT3018Y_REG_BAT_MASK          0x07
 #define NCT3018Y_REG_CLKO_F_MASK       0x03 /* frequenc mask */
 #define NCT3018Y_REG_CLKO_CKE          0x80 /* clock out enabled */
+#define NCT3018Y_REG_PART_NCT3018Y     0x02
 
 struct nct3018y {
        struct rtc_device *rtc;
        struct i2c_client *client;
+       int part_num;
 #ifdef CONFIG_COMMON_CLK
        struct clk_hw clkout_hw;
 #endif
@@ -177,8 +180,27 @@ static int nct3018y_rtc_read_time(struct device *dev, struct rtc_time *tm)
 static int nct3018y_rtc_set_time(struct device *dev, struct rtc_time *tm)
 {
        struct i2c_client *client = to_i2c_client(dev);
+       struct nct3018y *nct3018y = dev_get_drvdata(dev);
        unsigned char buf[4] = {0};
-       int err;
+       int err, flags;
+       int restore_flags = 0;
+
+       flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CTRL);
+       if (flags < 0) {
+               dev_dbg(&client->dev, "Failed to read NCT3018Y_REG_CTRL.\n");
+               return flags;
+       }
+
+       /* Check and set TWO bit */
+       if (nct3018y->part_num == NCT3018Y_REG_PART_NCT3018Y && !(flags & NCT3018Y_BIT_TWO)) {
+               restore_flags = 1;
+               flags |= NCT3018Y_BIT_TWO;
+               err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_CTRL, flags);
+               if (err < 0) {
+                       dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_CTRL.\n");
+                       return err;
+               }
+       }
 
        buf[0] = bin2bcd(tm->tm_sec);
        err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_SC, buf[0]);
@@ -212,6 +234,18 @@ static int nct3018y_rtc_set_time(struct device *dev, struct rtc_time *tm)
                return -EIO;
        }
 
+       /* Restore TWO bit */
+       if (restore_flags) {
+               if (nct3018y->part_num == NCT3018Y_REG_PART_NCT3018Y)
+                       flags &= ~NCT3018Y_BIT_TWO;
+
+               err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_CTRL, flags);
+               if (err < 0) {
+                       dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_CTRL.\n");
+                       return err;
+               }
+       }
+
        return err;
 }
 
@@ -479,11 +513,17 @@ static int nct3018y_probe(struct i2c_client *client)
                dev_dbg(&client->dev, "%s: NCT3018Y_BIT_TWO is set\n", __func__);
        }
 
-       flags = NCT3018Y_BIT_TWO;
-       err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_CTRL, flags);
-       if (err < 0) {
-               dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_CTRL\n");
-               return err;
+       nct3018y->part_num = i2c_smbus_read_byte_data(client, NCT3018Y_REG_PART);
+       if (nct3018y->part_num < 0) {
+               dev_dbg(&client->dev, "Failed to read NCT3018Y_REG_PART.\n");
+               return nct3018y->part_num;
+       } else if (nct3018y->part_num == NCT3018Y_REG_PART_NCT3018Y) {
+               flags = NCT3018Y_BIT_HF;
+               err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_CTRL, flags);
+               if (err < 0) {
+                       dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_CTRL.\n");
+                       return err;
+               }
        }
 
        flags = 0;
index 1a3ec1bb5b8148dd945ad127f45e657cd4c5a882..1327251e527c21cae4a247a62791ec3cbf0df067 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/rtc.h>
+#include <linux/pm_wakeirq.h>
 
 #define RV8803_I2C_TRY_COUNT           4
 
@@ -607,6 +608,28 @@ static int rv8803_regs_configure(struct rv8803_data *rv8803)
        return 0;
 }
 
+static int rv8803_resume(struct device *dev)
+{
+       struct rv8803_data *rv8803 = dev_get_drvdata(dev);
+
+       if (rv8803->client->irq > 0 && device_may_wakeup(dev))
+               disable_irq_wake(rv8803->client->irq);
+
+       return 0;
+}
+
+static int rv8803_suspend(struct device *dev)
+{
+       struct rv8803_data *rv8803 = dev_get_drvdata(dev);
+
+       if (rv8803->client->irq > 0 && device_may_wakeup(dev))
+               enable_irq_wake(rv8803->client->irq);
+
+       return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(rv8803_pm_ops, rv8803_suspend, rv8803_resume);
+
 static const struct i2c_device_id rv8803_id[] = {
        { "rv8803", rv_8803 },
        { "rv8804", rx_8804 },
@@ -683,10 +706,18 @@ static int rv8803_probe(struct i2c_client *client)
                if (err) {
                        dev_warn(&client->dev, "unable to request IRQ, alarms disabled\n");
                        client->irq = 0;
+               } else {
+                       device_init_wakeup(&client->dev, true);
+                       err = dev_pm_set_wake_irq(&client->dev, client->irq);
+                       if (err)
+                               dev_err(&client->dev, "failed to set wake IRQ\n");
                }
+       } else {
+               if (device_property_read_bool(&client->dev, "wakeup-source"))
+                       device_init_wakeup(&client->dev, true);
+               else
+                       clear_bit(RTC_FEATURE_ALARM, rv8803->rtc->features);
        }
-       if (!client->irq)
-               clear_bit(RTC_FEATURE_ALARM, rv8803->rtc->features);
 
        if (of_property_read_bool(client->dev.of_node, "epson,vdet-disable"))
                rv8803->backup |= RX8900_FLAG_VDETOFF;
@@ -737,6 +768,7 @@ static struct i2c_driver rv8803_driver = {
        .driver = {
                .name = "rtc-rv8803",
                .of_match_table = of_match_ptr(rv8803_of_match),
+               .pm = &rv8803_pm_ops,
        },
        .probe          = rv8803_probe,
        .id_table       = rv8803_id,
diff --git a/drivers/rtc/rtc-tps6594.c b/drivers/rtc/rtc-tps6594.c
new file mode 100644 (file)
index 0000000..838ae85
--- /dev/null
@@ -0,0 +1,454 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RTC driver for tps6594 PMIC
+ *
+ * Copyright (C) 2023 BayLibre Incorporated - https://www.baylibre.com/
+ */
+
+#include <linux/bcd.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/limits.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/property.h>
+#include <linux/rtc.h>
+#include <linux/types.h>
+#include <linux/units.h>
+
+#include <linux/mfd/tps6594.h>
+
+// Total number of RTC registers needed to set time
+#define NUM_TIME_REGS (TPS6594_REG_RTC_WEEKS - TPS6594_REG_RTC_SECONDS + 1)
+
+// Total number of RTC alarm registers
+#define NUM_TIME_ALARM_REGS (NUM_TIME_REGS - 1)
+
+/*
+ * Min and max values supported by 'offset' interface (swapped sign).
+ * After conversion, the values do not exceed the range [-32767, 33767]
+ * which COMP_REG must conform to.
+ */
+#define MIN_OFFSET (-277774)
+#define MAX_OFFSET (277774)
+
+// Number of ticks per hour
+#define TICKS_PER_HOUR (32768 * 3600)
+
+// Multiplier for ppb conversions
+#define PPB_MULT NANO
+
+static int tps6594_rtc_alarm_irq_enable(struct device *dev,
+                                       unsigned int enabled)
+{
+       struct tps6594 *tps = dev_get_drvdata(dev->parent);
+       u8 val;
+
+       val = enabled ? TPS6594_BIT_IT_ALARM : 0;
+
+       return regmap_update_bits(tps->regmap, TPS6594_REG_RTC_INTERRUPTS,
+                                 TPS6594_BIT_IT_ALARM, val);
+}
+
+/* Pulse GET_TIME field of RTC_CTRL_1 to store a timestamp in shadow registers. */
+static int tps6594_rtc_shadow_timestamp(struct device *dev, struct tps6594 *tps)
+{
+       int ret;
+
+       /*
+        * Set GET_TIME to 0. Next time we set GET_TIME to 1 we will be sure to store
+        * an up-to-date timestamp.
+        */
+       ret = regmap_clear_bits(tps->regmap, TPS6594_REG_RTC_CTRL_1,
+                               TPS6594_BIT_GET_TIME);
+       if (ret < 0)
+               return ret;
+
+       /*
+        * Copy content of RTC registers to shadow registers or latches to read
+        * a coherent timestamp.
+        */
+       return regmap_set_bits(tps->regmap, TPS6594_REG_RTC_CTRL_1,
+                              TPS6594_BIT_GET_TIME);
+}
+
+static int tps6594_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+       unsigned char rtc_data[NUM_TIME_REGS];
+       struct tps6594 *tps = dev_get_drvdata(dev->parent);
+       int ret;
+
+       // Check if RTC is running.
+       ret = regmap_test_bits(tps->regmap, TPS6594_REG_RTC_STATUS,
+                              TPS6594_BIT_RUN);
+       if (ret < 0)
+               return ret;
+       if (ret == 0)
+               return -EINVAL;
+
+       ret = tps6594_rtc_shadow_timestamp(dev, tps);
+       if (ret < 0)
+               return ret;
+
+       // Read shadowed RTC registers.
+       ret = regmap_bulk_read(tps->regmap, TPS6594_REG_RTC_SECONDS, rtc_data,
+                              NUM_TIME_REGS);
+       if (ret < 0)
+               return ret;
+
+       tm->tm_sec = bcd2bin(rtc_data[0]);
+       tm->tm_min = bcd2bin(rtc_data[1]);
+       tm->tm_hour = bcd2bin(rtc_data[2]);
+       tm->tm_mday = bcd2bin(rtc_data[3]);
+       tm->tm_mon = bcd2bin(rtc_data[4]) - 1;
+       tm->tm_year = bcd2bin(rtc_data[5]) + 100;
+       tm->tm_wday = bcd2bin(rtc_data[6]);
+
+       return 0;
+}
+
+static int tps6594_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+       unsigned char rtc_data[NUM_TIME_REGS];
+       struct tps6594 *tps = dev_get_drvdata(dev->parent);
+       int ret;
+
+       rtc_data[0] = bin2bcd(tm->tm_sec);
+       rtc_data[1] = bin2bcd(tm->tm_min);
+       rtc_data[2] = bin2bcd(tm->tm_hour);
+       rtc_data[3] = bin2bcd(tm->tm_mday);
+       rtc_data[4] = bin2bcd(tm->tm_mon + 1);
+       rtc_data[5] = bin2bcd(tm->tm_year - 100);
+       rtc_data[6] = bin2bcd(tm->tm_wday);
+
+       // Stop RTC while updating the RTC time registers.
+       ret = regmap_clear_bits(tps->regmap, TPS6594_REG_RTC_CTRL_1,
+                               TPS6594_BIT_STOP_RTC);
+       if (ret < 0)
+               return ret;
+
+       // Update all the time registers in one shot.
+       ret = regmap_bulk_write(tps->regmap, TPS6594_REG_RTC_SECONDS, rtc_data,
+                               NUM_TIME_REGS);
+       if (ret < 0)
+               return ret;
+
+       // Start back RTC.
+       return regmap_set_bits(tps->regmap, TPS6594_REG_RTC_CTRL_1,
+                              TPS6594_BIT_STOP_RTC);
+}
+
+static int tps6594_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
+{
+       unsigned char alarm_data[NUM_TIME_ALARM_REGS];
+       u32 int_val;
+       struct tps6594 *tps = dev_get_drvdata(dev->parent);
+       int ret;
+
+       ret = regmap_bulk_read(tps->regmap, TPS6594_REG_ALARM_SECONDS,
+                              alarm_data, NUM_TIME_ALARM_REGS);
+       if (ret < 0)
+               return ret;
+
+       alm->time.tm_sec = bcd2bin(alarm_data[0]);
+       alm->time.tm_min = bcd2bin(alarm_data[1]);
+       alm->time.tm_hour = bcd2bin(alarm_data[2]);
+       alm->time.tm_mday = bcd2bin(alarm_data[3]);
+       alm->time.tm_mon = bcd2bin(alarm_data[4]) - 1;
+       alm->time.tm_year = bcd2bin(alarm_data[5]) + 100;
+
+       ret = regmap_read(tps->regmap, TPS6594_REG_RTC_INTERRUPTS, &int_val);
+       if (ret < 0)
+               return ret;
+
+       alm->enabled = int_val & TPS6594_BIT_IT_ALARM;
+
+       return 0;
+}
+
+static int tps6594_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
+{
+       unsigned char alarm_data[NUM_TIME_ALARM_REGS];
+       struct tps6594 *tps = dev_get_drvdata(dev->parent);
+       int ret;
+
+       // Disable alarm irq before changing the alarm timestamp.
+       ret = tps6594_rtc_alarm_irq_enable(dev, 0);
+       if (ret)
+               return ret;
+
+       alarm_data[0] = bin2bcd(alm->time.tm_sec);
+       alarm_data[1] = bin2bcd(alm->time.tm_min);
+       alarm_data[2] = bin2bcd(alm->time.tm_hour);
+       alarm_data[3] = bin2bcd(alm->time.tm_mday);
+       alarm_data[4] = bin2bcd(alm->time.tm_mon + 1);
+       alarm_data[5] = bin2bcd(alm->time.tm_year - 100);
+
+       // Update all the alarm registers in one shot.
+       ret = regmap_bulk_write(tps->regmap, TPS6594_REG_ALARM_SECONDS,
+                               alarm_data, NUM_TIME_ALARM_REGS);
+       if (ret < 0)
+               return ret;
+
+       if (alm->enabled)
+               ret = tps6594_rtc_alarm_irq_enable(dev, 1);
+
+       return ret;
+}
+
+static int tps6594_rtc_set_calibration(struct device *dev, int calibration)
+{
+       struct tps6594 *tps = dev_get_drvdata(dev->parent);
+       __le16 value;
+       int ret;
+
+       /*
+        * TPS6594 uses two's complement 16 bit value for compensation of RTC
+        * crystal inaccuracies. One time every hour when seconds counter
+        * increments from 0 to 1 compensation value will be added to internal
+        * RTC counter value.
+        *
+        * Valid range for compensation value: [-32767 .. 32767].
+        */
+       if (calibration < S16_MIN + 1 || calibration > S16_MAX)
+               return -ERANGE;
+
+       value = cpu_to_le16(calibration);
+
+       // Update all the compensation registers in one shot.
+       ret = regmap_bulk_write(tps->regmap, TPS6594_REG_RTC_COMP_LSB, &value,
+                               sizeof(value));
+       if (ret < 0)
+               return ret;
+
+       // Enable automatic compensation.
+       return regmap_set_bits(tps->regmap, TPS6594_REG_RTC_CTRL_1,
+                              TPS6594_BIT_AUTO_COMP);
+}
+
+static int tps6594_rtc_get_calibration(struct device *dev, int *calibration)
+{
+       struct tps6594 *tps = dev_get_drvdata(dev->parent);
+       unsigned int ctrl;
+       __le16 value;
+       int ret;
+
+       ret = regmap_read(tps->regmap, TPS6594_REG_RTC_CTRL_1, &ctrl);
+       if (ret < 0)
+               return ret;
+
+       // If automatic compensation is not enabled report back zero.
+       if (!(ctrl & TPS6594_BIT_AUTO_COMP)) {
+               *calibration = 0;
+               return 0;
+       }
+
+       ret = regmap_bulk_read(tps->regmap, TPS6594_REG_RTC_COMP_LSB, &value,
+                              sizeof(value));
+       if (ret < 0)
+               return ret;
+
+       *calibration = le16_to_cpu(value);
+
+       return 0;
+}
+
+static int tps6594_rtc_read_offset(struct device *dev, long *offset)
+{
+       int calibration;
+       s64 tmp;
+       int ret;
+
+       ret = tps6594_rtc_get_calibration(dev, &calibration);
+       if (ret < 0)
+               return ret;
+
+       // Convert from RTC calibration register format to ppb format.
+       tmp = calibration * PPB_MULT;
+
+       if (tmp < 0)
+               tmp -= TICKS_PER_HOUR / 2LL;
+       else
+               tmp += TICKS_PER_HOUR / 2LL;
+       tmp = div_s64(tmp, TICKS_PER_HOUR);
+
+       /*
+        * SAFETY:
+        * Computatiion is the reverse operation of the one done in
+        * `tps6594_rtc_set_offset`. The safety remarks applie here too.
+        */
+
+       /*
+        * Offset value operates in negative way, so swap sign.
+        * See 8.3.10.5, (32768 - COMP_REG).
+        */
+       *offset = (long)-tmp;
+
+       return 0;
+}
+
+static int tps6594_rtc_set_offset(struct device *dev, long offset)
+{
+       int calibration;
+       s64 tmp;
+
+       // Make sure offset value is within supported range.
+       if (offset < MIN_OFFSET || offset > MAX_OFFSET)
+               return -ERANGE;
+
+       // Convert from ppb format to RTC calibration register format.
+
+       tmp = offset * TICKS_PER_HOUR;
+       if (tmp < 0)
+               tmp -= PPB_MULT / 2LL;
+       else
+               tmp += PPB_MULT / 2LL;
+       tmp = div_s64(tmp, PPB_MULT);
+
+       /*
+        * SAFETY:
+        * - tmp = offset * TICK_PER_HOUR :
+        *      `offset` can't be more than 277774, so `tmp` can't exceed 277774000000000
+        *      which is lower than the maximum value in an `s64` (2^63-1). No overflow here.
+        *
+        * - tmp += TICK_PER_HOUR / 2LL :
+        *      tmp will have a maximum value of 277774117964800 which is still inferior to 2^63-1.
+        */
+
+       // Offset value operates in negative way, so swap sign.
+       calibration = (int)-tmp;
+
+       return tps6594_rtc_set_calibration(dev, calibration);
+}
+
+static irqreturn_t tps6594_rtc_interrupt(int irq, void *rtc)
+{
+       struct device *dev = rtc;
+       struct tps6594 *tps = dev_get_drvdata(dev->parent);
+       struct rtc_device *rtc_dev = dev_get_drvdata(dev);
+       int ret;
+       u32 rtc_reg;
+
+       ret = regmap_read(tps->regmap, TPS6594_REG_RTC_STATUS, &rtc_reg);
+       if (ret)
+               return IRQ_NONE;
+
+       rtc_update_irq(rtc_dev, 1, RTC_IRQF | RTC_AF);
+
+       return IRQ_HANDLED;
+}
+
+static const struct rtc_class_ops tps6594_rtc_ops = {
+       .read_time = tps6594_rtc_read_time,
+       .set_time = tps6594_rtc_set_time,
+       .read_alarm = tps6594_rtc_read_alarm,
+       .set_alarm = tps6594_rtc_set_alarm,
+       .alarm_irq_enable = tps6594_rtc_alarm_irq_enable,
+       .read_offset = tps6594_rtc_read_offset,
+       .set_offset = tps6594_rtc_set_offset,
+};
+
+static int tps6594_rtc_probe(struct platform_device *pdev)
+{
+       struct tps6594 *tps = dev_get_drvdata(pdev->dev.parent);
+       struct device *dev = &pdev->dev;
+       struct rtc_device *rtc;
+       int irq;
+       int ret;
+
+       rtc = devm_kzalloc(dev, sizeof(*rtc), GFP_KERNEL);
+       if (!rtc)
+               return -ENOMEM;
+
+       rtc = devm_rtc_allocate_device(dev);
+       if (IS_ERR(rtc))
+               return PTR_ERR(rtc);
+
+       // Enable crystal oscillator.
+       ret = regmap_set_bits(tps->regmap, TPS6594_REG_RTC_CTRL_2,
+                             TPS6594_BIT_XTAL_EN);
+       if (ret < 0)
+               return ret;
+
+       ret = regmap_test_bits(tps->regmap, TPS6594_REG_RTC_STATUS,
+                              TPS6594_BIT_RUN);
+       if (ret < 0)
+               return ret;
+       // RTC not running.
+       if (ret == 0) {
+               ret = regmap_set_bits(tps->regmap, TPS6594_REG_RTC_CTRL_1,
+                                     TPS6594_BIT_STOP_RTC);
+               if (ret < 0)
+                       return ret;
+
+               /*
+                * On some boards, a 40 ms delay is needed before BIT_RUN is set.
+                * 80 ms should provide sufficient margin.
+                */
+               mdelay(80);
+
+               /*
+                * RTC should be running now. Check if this is the case.
+                * If not it might be a missing oscillator.
+                */
+               ret = regmap_test_bits(tps->regmap, TPS6594_REG_RTC_STATUS,
+                                      TPS6594_BIT_RUN);
+               if (ret < 0)
+                       return ret;
+               if (ret == 0)
+                       return -ENODEV;
+
+               // Stop RTC until first call to `tps6594_rtc_set_time`.
+               ret = regmap_clear_bits(tps->regmap, TPS6594_REG_RTC_CTRL_1,
+                                       TPS6594_BIT_STOP_RTC);
+               if (ret < 0)
+                       return ret;
+       }
+
+       platform_set_drvdata(pdev, rtc);
+
+       irq = platform_get_irq_byname(pdev, TPS6594_IRQ_NAME_ALARM);
+       if (irq < 0)
+               return dev_err_probe(dev, irq, "Failed to get irq\n");
+
+       ret = devm_request_threaded_irq(dev, irq, NULL, tps6594_rtc_interrupt,
+                                       IRQF_ONESHOT, TPS6594_IRQ_NAME_ALARM,
+                                       dev);
+       if (ret < 0)
+               return dev_err_probe(dev, ret,
+                                    "Failed to request_threaded_irq\n");
+
+       ret = device_init_wakeup(dev, true);
+       if (ret < 0)
+               return dev_err_probe(dev, ret,
+                                    "Failed to init rtc as wakeup source\n");
+
+       rtc->ops = &tps6594_rtc_ops;
+       rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+       rtc->range_max = RTC_TIMESTAMP_END_2099;
+
+       return devm_rtc_register_device(rtc);
+}
+
+static const struct platform_device_id tps6594_rtc_id_table[] = {
+       { "tps6594-rtc", },
+       {}
+};
+MODULE_DEVICE_TABLE(platform, tps6594_rtc_id_table);
+
+static struct platform_driver tps6594_rtc_driver = {
+       .probe          = tps6594_rtc_probe,
+       .driver         = {
+               .name   = "tps6594-rtc",
+       },
+       .id_table = tps6594_rtc_id_table,
+};
+
+module_platform_driver(tps6594_rtc_driver);
+MODULE_AUTHOR("Esteban Blanc <eblanc@baylibre.com>");
+MODULE_DESCRIPTION("TPS6594 RTC driver");
+MODULE_LICENSE("GPL");
index 99361618c31f314c5edba1999953788dd3a69250..0b0324fe4aff302bf8c7c4575df585c5ce60df28 100644 (file)
@@ -79,8 +79,8 @@ struct raw3215_info {
        struct ccw_device *cdev;      /* device for tty driver */
        spinlock_t *lock;             /* pointer to irq lock */
        int flags;                    /* state flags */
-       char *buffer;                 /* pointer to output buffer */
-       char *inbuf;                  /* pointer to input buffer */
+       u8 *buffer;                   /* pointer to output buffer */
+       u8 *inbuf;                    /* pointer to input buffer */
        int head;                     /* first free byte in output buffer */
        int count;                    /* number of bytes in output buffer */
        int written;                  /* number of bytes in write requests */
@@ -89,7 +89,6 @@ struct raw3215_info {
        wait_queue_head_t empty_wait; /* wait queue for flushing */
        struct timer_list timer;      /* timer for delayed output */
        int line_pos;                 /* position on the line (for tabs) */
-       char ubuffer[80];             /* copy_from_user buffer */
 };
 
 /* array of 3215 devices structures */
@@ -523,12 +522,14 @@ static unsigned int raw3215_make_room(struct raw3215_info *raw,
  *     string  without blocking.
  *     Return value is the number of bytes copied.
  */
-static unsigned int raw3215_addtext(const char *str, unsigned int length,
+static unsigned int raw3215_addtext(const u8 *str, size_t length,
                                    struct raw3215_info *raw, int opmode,
                                    unsigned int todrop)
 {
-       unsigned int c, ch, i, blanks, expanded_size = 0;
+       unsigned int i, blanks, expanded_size = 0;
        unsigned int column = raw->line_pos;
+       size_t c;
+       u8 ch;
 
        if (opmode == RAW3215_COUNT)
                todrop = 0;
@@ -559,7 +560,7 @@ static unsigned int raw3215_addtext(const char *str, unsigned int length,
                if (todrop && expanded_size < todrop)   /* Drop head data */
                        continue;
                for (i = 0; i < blanks; i++) {
-                       raw->buffer[raw->head] = (char)_ascebc[(int)ch];
+                       raw->buffer[raw->head] = _ascebc[ch];
                        raw->head = (raw->head + 1) & (RAW3215_BUFFER_SIZE - 1);
                        raw->count++;
                }
@@ -571,8 +572,8 @@ static unsigned int raw3215_addtext(const char *str, unsigned int length,
 /*
  * String write routine for 3215 devices
  */
-static void raw3215_write(struct raw3215_info *raw, const char *str,
-                         unsigned int length)
+static void raw3215_write(struct raw3215_info *raw, const u8 *str,
+                         size_t length)
 {
        unsigned int count, avail;
        unsigned long flags;
@@ -597,7 +598,7 @@ static void raw3215_write(struct raw3215_info *raw, const char *str,
 /*
  * Put character routine for 3215 devices
  */
-static void raw3215_putchar(struct raw3215_info *raw, unsigned char ch)
+static void raw3215_putchar(struct raw3215_info *raw, u8 ch)
 {
        raw3215_write(raw, &ch, 1);
 }
@@ -824,12 +825,10 @@ static struct ccw_driver raw3215_ccw_driver = {
        .int_class      = IRQIO_C15,
 };
 
-static void handle_write(struct raw3215_info *raw, const char *str, int count)
+static void handle_write(struct raw3215_info *raw, const u8 *str, size_t count)
 {
-       int i;
-
        while (count > 0) {
-               i = min_t(int, count, RAW3215_BUFFER_SIZE - 1);
+               size_t i = min_t(size_t, count, RAW3215_BUFFER_SIZE - 1);
                raw3215_write(raw, str, i);
                count -= i;
                str += i;
index 363315fa1666d69410d63c75c1a1132b47d5eaa9..251d2a1c3eef946bf07e79e8abf9d52b1dc24129 100644 (file)
@@ -54,7 +54,7 @@ struct tty3270_attribute {
 };
 
 struct tty3270_cell {
-       unsigned char character;
+       u8 character;
        struct tty3270_attribute attributes;
 };
 
@@ -123,7 +123,7 @@ struct tty3270 {
 
        /* Character array for put_char/flush_chars. */
        unsigned int char_count;
-       char char_buf[TTY3270_CHAR_BUF_SIZE];
+       u8 char_buf[TTY3270_CHAR_BUF_SIZE];
 };
 
 /* tty3270->update_flags. See tty3270_update for details. */
@@ -1255,7 +1255,7 @@ static unsigned int tty3270_write_room(struct tty_struct *tty)
  * Insert character into the screen at the current position with the
  * current color and highlight. This function does NOT do cursor movement.
  */
-static void tty3270_put_character(struct tty3270 *tp, char ch)
+static void tty3270_put_character(struct tty3270 *tp, u8 ch)
 {
        struct tty3270_line *line;
        struct tty3270_cell *cell;
@@ -1561,7 +1561,7 @@ static void tty3270_goto_xy(struct tty3270 *tp, int cx, int cy)
  *  Pn is a numeric parameter, a string of zero or more decimal digits.
  *  Ps is a selective parameter.
  */
-static void tty3270_escape_sequence(struct tty3270 *tp, char ch)
+static void tty3270_escape_sequence(struct tty3270 *tp, u8 ch)
 {
        enum { ES_NORMAL, ES_ESC, ES_SQUARE, ES_PAREN, ES_GETPARS };
 
@@ -1726,7 +1726,7 @@ static void tty3270_escape_sequence(struct tty3270 *tp, char ch)
  * String write routine for 3270 ttys
  */
 static void tty3270_do_write(struct tty3270 *tp, struct tty_struct *tty,
-                            const unsigned char *buf, int count)
+                            const u8 *buf, size_t count)
 {
        int i_msg, i;
 
@@ -2052,7 +2052,7 @@ con3270_write(struct console *co, const char *str, unsigned int count)
 {
        struct tty3270 *tp = co->data;
        unsigned long flags;
-       char c;
+       u8 c;
 
        spin_lock_irqsave(&tp->view.lock, flags);
        while (count--) {
index 144cd2e035909bceac10bd75e00ec9b7b721d6e5..42c9f77f8da0a00aa272e6875e8dbd30d7c3e54e 100644 (file)
@@ -109,6 +109,7 @@ static int uvio_copy_attest_result_to_user(struct uv_cb_attest *uvcb_attest,
                                           struct uvio_attest *uvio_attest)
 {
        struct uvio_attest __user *user_uvio_attest = (void __user *)uv_ioctl->argument_addr;
+       u32 __user *user_buf_add_len = (u32 __user *)&user_uvio_attest->add_data_len;
        void __user *user_buf_add = (void __user *)uvio_attest->add_data_addr;
        void __user *user_buf_meas = (void __user *)uvio_attest->meas_addr;
        void __user *user_buf_uid = &user_uvio_attest->config_uid;
@@ -117,6 +118,8 @@ static int uvio_copy_attest_result_to_user(struct uv_cb_attest *uvcb_attest,
                return -EFAULT;
        if (add_data && copy_to_user(user_buf_add, add_data, uvio_attest->add_data_len))
                return -EFAULT;
+       if (put_user(uvio_attest->add_data_len, user_buf_add_len))
+               return -EFAULT;
        if (copy_to_user(user_buf_uid, uvcb_attest->config_uid, sizeof(uvcb_attest->config_uid)))
                return -EFAULT;
        return 0;
index acb710d3d7bcd52261ea5b36b8a2be82f72f2926..983b3b16196c6b7638e023f121e5c937c83ce150 100644 (file)
@@ -32,7 +32,8 @@
 
 #define AP_RESET_INTERVAL              20      /* Reset sleep interval (20ms)          */
 
-static int vfio_ap_mdev_reset_queues(struct ap_queue_table *qtable);
+static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev);
+static int vfio_ap_mdev_reset_qlist(struct list_head *qlist);
 static struct vfio_ap_queue *vfio_ap_find_queue(int apqn);
 static const struct vfio_device_ops vfio_ap_matrix_dev_ops;
 static void vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q);
@@ -665,17 +666,23 @@ static bool vfio_ap_mdev_filter_cdoms(struct ap_matrix_mdev *matrix_mdev)
  *                             device driver.
  *
  * @matrix_mdev: the matrix mdev whose matrix is to be filtered.
+ * @apm_filtered: a 256-bit bitmap for storing the APIDs filtered from the
+ *               guest's AP configuration that are still in the host's AP
+ *               configuration.
  *
  * Note: If an APQN referencing a queue device that is not bound to the vfio_ap
  *      driver, its APID will be filtered from the guest's APCB. The matrix
  *      structure precludes filtering an individual APQN, so its APID will be
- *      filtered.
+ *      filtered. Consequently, all queues associated with the adapter that
+ *      are in the host's AP configuration must be reset. If queues are
+ *      subsequently made available again to the guest, they should re-appear
+ *      in a reset state
  *
  * Return: a boolean value indicating whether the KVM guest's APCB was changed
  *        by the filtering or not.
  */
-static bool vfio_ap_mdev_filter_matrix(unsigned long *apm, unsigned long *aqm,
-                                      struct ap_matrix_mdev *matrix_mdev)
+static bool vfio_ap_mdev_filter_matrix(struct ap_matrix_mdev *matrix_mdev,
+                                      unsigned long *apm_filtered)
 {
        unsigned long apid, apqi, apqn;
        DECLARE_BITMAP(prev_shadow_apm, AP_DEVICES);
@@ -685,6 +692,7 @@ static bool vfio_ap_mdev_filter_matrix(unsigned long *apm, unsigned long *aqm,
        bitmap_copy(prev_shadow_apm, matrix_mdev->shadow_apcb.apm, AP_DEVICES);
        bitmap_copy(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm, AP_DOMAINS);
        vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb);
+       bitmap_clear(apm_filtered, 0, AP_DEVICES);
 
        /*
         * Copy the adapters, domains and control domains to the shadow_apcb
@@ -696,8 +704,9 @@ static bool vfio_ap_mdev_filter_matrix(unsigned long *apm, unsigned long *aqm,
        bitmap_and(matrix_mdev->shadow_apcb.aqm, matrix_mdev->matrix.aqm,
                   (unsigned long *)matrix_dev->info.aqm, AP_DOMAINS);
 
-       for_each_set_bit_inv(apid, apm, AP_DEVICES) {
-               for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) {
+       for_each_set_bit_inv(apid, matrix_mdev->shadow_apcb.apm, AP_DEVICES) {
+               for_each_set_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm,
+                                    AP_DOMAINS) {
                        /*
                         * If the APQN is not bound to the vfio_ap device
                         * driver, then we can't assign it to the guest's
@@ -709,8 +718,16 @@ static bool vfio_ap_mdev_filter_matrix(unsigned long *apm, unsigned long *aqm,
                        apqn = AP_MKQID(apid, apqi);
                        q = vfio_ap_mdev_get_queue(matrix_mdev, apqn);
                        if (!q || q->reset_status.response_code) {
-                               clear_bit_inv(apid,
-                                             matrix_mdev->shadow_apcb.apm);
+                               clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm);
+
+                               /*
+                                * If the adapter was previously plugged into
+                                * the guest, let's let the caller know that
+                                * the APID was filtered.
+                                */
+                               if (test_bit_inv(apid, prev_shadow_apm))
+                                       set_bit_inv(apid, apm_filtered);
+
                                break;
                        }
                }
@@ -812,7 +829,7 @@ static void vfio_ap_mdev_remove(struct mdev_device *mdev)
 
        mutex_lock(&matrix_dev->guests_lock);
        mutex_lock(&matrix_dev->mdevs_lock);
-       vfio_ap_mdev_reset_queues(&matrix_mdev->qtable);
+       vfio_ap_mdev_reset_queues(matrix_mdev);
        vfio_ap_mdev_unlink_fr_queues(matrix_mdev);
        list_del(&matrix_mdev->node);
        mutex_unlock(&matrix_dev->mdevs_lock);
@@ -922,6 +939,47 @@ static void vfio_ap_mdev_link_adapter(struct ap_matrix_mdev *matrix_mdev,
                                       AP_MKQID(apid, apqi));
 }
 
+static void collect_queues_to_reset(struct ap_matrix_mdev *matrix_mdev,
+                                   unsigned long apid,
+                                   struct list_head *qlist)
+{
+       struct vfio_ap_queue *q;
+       unsigned long  apqi;
+
+       for_each_set_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm, AP_DOMAINS) {
+               q = vfio_ap_mdev_get_queue(matrix_mdev, AP_MKQID(apid, apqi));
+               if (q)
+                       list_add_tail(&q->reset_qnode, qlist);
+       }
+}
+
+static void reset_queues_for_apid(struct ap_matrix_mdev *matrix_mdev,
+                                 unsigned long apid)
+{
+       struct list_head qlist;
+
+       INIT_LIST_HEAD(&qlist);
+       collect_queues_to_reset(matrix_mdev, apid, &qlist);
+       vfio_ap_mdev_reset_qlist(&qlist);
+}
+
+static int reset_queues_for_apids(struct ap_matrix_mdev *matrix_mdev,
+                                 unsigned long *apm_reset)
+{
+       struct list_head qlist;
+       unsigned long apid;
+
+       if (bitmap_empty(apm_reset, AP_DEVICES))
+               return 0;
+
+       INIT_LIST_HEAD(&qlist);
+
+       for_each_set_bit_inv(apid, apm_reset, AP_DEVICES)
+               collect_queues_to_reset(matrix_mdev, apid, &qlist);
+
+       return vfio_ap_mdev_reset_qlist(&qlist);
+}
+
 /**
  * assign_adapter_store - parses the APID from @buf and sets the
  * corresponding bit in the mediated matrix device's APM
@@ -962,7 +1020,7 @@ static ssize_t assign_adapter_store(struct device *dev,
 {
        int ret;
        unsigned long apid;
-       DECLARE_BITMAP(apm_delta, AP_DEVICES);
+       DECLARE_BITMAP(apm_filtered, AP_DEVICES);
        struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
 
        mutex_lock(&ap_perms_mutex);
@@ -991,12 +1049,11 @@ static ssize_t assign_adapter_store(struct device *dev,
        }
 
        vfio_ap_mdev_link_adapter(matrix_mdev, apid);
-       memset(apm_delta, 0, sizeof(apm_delta));
-       set_bit_inv(apid, apm_delta);
 
-       if (vfio_ap_mdev_filter_matrix(apm_delta,
-                                      matrix_mdev->matrix.aqm, matrix_mdev))
+       if (vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered)) {
                vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+               reset_queues_for_apids(matrix_mdev, apm_filtered);
+       }
 
        ret = count;
 done:
@@ -1027,11 +1084,12 @@ static struct vfio_ap_queue
  *                              adapter was assigned.
  * @matrix_mdev: the matrix mediated device to which the adapter was assigned.
  * @apid: the APID of the unassigned adapter.
- * @qtable: table for storing queues associated with unassigned adapter.
+ * @qlist: list for storing queues associated with unassigned adapter that
+ *        need to be reset.
  */
 static void vfio_ap_mdev_unlink_adapter(struct ap_matrix_mdev *matrix_mdev,
                                        unsigned long apid,
-                                       struct ap_queue_table *qtable)
+                                       struct list_head *qlist)
 {
        unsigned long apqi;
        struct vfio_ap_queue *q;
@@ -1039,11 +1097,10 @@ static void vfio_ap_mdev_unlink_adapter(struct ap_matrix_mdev *matrix_mdev,
        for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS) {
                q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi);
 
-               if (q && qtable) {
+               if (q && qlist) {
                        if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
                            test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm))
-                               hash_add(qtable->queues, &q->mdev_qnode,
-                                        q->apqn);
+                               list_add_tail(&q->reset_qnode, qlist);
                }
        }
 }
@@ -1051,26 +1108,23 @@ static void vfio_ap_mdev_unlink_adapter(struct ap_matrix_mdev *matrix_mdev,
 static void vfio_ap_mdev_hot_unplug_adapter(struct ap_matrix_mdev *matrix_mdev,
                                            unsigned long apid)
 {
-       int loop_cursor;
-       struct vfio_ap_queue *q;
-       struct ap_queue_table *qtable = kzalloc(sizeof(*qtable), GFP_KERNEL);
+       struct vfio_ap_queue *q, *tmpq;
+       struct list_head qlist;
 
-       hash_init(qtable->queues);
-       vfio_ap_mdev_unlink_adapter(matrix_mdev, apid, qtable);
+       INIT_LIST_HEAD(&qlist);
+       vfio_ap_mdev_unlink_adapter(matrix_mdev, apid, &qlist);
 
        if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm)) {
                clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm);
                vfio_ap_mdev_update_guest_apcb(matrix_mdev);
        }
 
-       vfio_ap_mdev_reset_queues(qtable);
+       vfio_ap_mdev_reset_qlist(&qlist);
 
-       hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) {
+       list_for_each_entry_safe(q, tmpq, &qlist, reset_qnode) {
                vfio_ap_unlink_mdev_fr_queue(q);
-               hash_del(&q->mdev_qnode);
+               list_del(&q->reset_qnode);
        }
-
-       kfree(qtable);
 }
 
 /**
@@ -1171,7 +1225,7 @@ static ssize_t assign_domain_store(struct device *dev,
 {
        int ret;
        unsigned long apqi;
-       DECLARE_BITMAP(aqm_delta, AP_DOMAINS);
+       DECLARE_BITMAP(apm_filtered, AP_DEVICES);
        struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
 
        mutex_lock(&ap_perms_mutex);
@@ -1200,12 +1254,11 @@ static ssize_t assign_domain_store(struct device *dev,
        }
 
        vfio_ap_mdev_link_domain(matrix_mdev, apqi);
-       memset(aqm_delta, 0, sizeof(aqm_delta));
-       set_bit_inv(apqi, aqm_delta);
 
-       if (vfio_ap_mdev_filter_matrix(matrix_mdev->matrix.apm, aqm_delta,
-                                      matrix_mdev))
+       if (vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered)) {
                vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+               reset_queues_for_apids(matrix_mdev, apm_filtered);
+       }
 
        ret = count;
 done:
@@ -1218,7 +1271,7 @@ static DEVICE_ATTR_WO(assign_domain);
 
 static void vfio_ap_mdev_unlink_domain(struct ap_matrix_mdev *matrix_mdev,
                                       unsigned long apqi,
-                                      struct ap_queue_table *qtable)
+                                      struct list_head *qlist)
 {
        unsigned long apid;
        struct vfio_ap_queue *q;
@@ -1226,11 +1279,10 @@ static void vfio_ap_mdev_unlink_domain(struct ap_matrix_mdev *matrix_mdev,
        for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) {
                q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi);
 
-               if (q && qtable) {
+               if (q && qlist) {
                        if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
                            test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm))
-                               hash_add(qtable->queues, &q->mdev_qnode,
-                                        q->apqn);
+                               list_add_tail(&q->reset_qnode, qlist);
                }
        }
 }
@@ -1238,26 +1290,23 @@ static void vfio_ap_mdev_unlink_domain(struct ap_matrix_mdev *matrix_mdev,
 static void vfio_ap_mdev_hot_unplug_domain(struct ap_matrix_mdev *matrix_mdev,
                                           unsigned long apqi)
 {
-       int loop_cursor;
-       struct vfio_ap_queue *q;
-       struct ap_queue_table *qtable = kzalloc(sizeof(*qtable), GFP_KERNEL);
+       struct vfio_ap_queue *q, *tmpq;
+       struct list_head qlist;
 
-       hash_init(qtable->queues);
-       vfio_ap_mdev_unlink_domain(matrix_mdev, apqi, qtable);
+       INIT_LIST_HEAD(&qlist);
+       vfio_ap_mdev_unlink_domain(matrix_mdev, apqi, &qlist);
 
        if (test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) {
                clear_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm);
                vfio_ap_mdev_update_guest_apcb(matrix_mdev);
        }
 
-       vfio_ap_mdev_reset_queues(qtable);
+       vfio_ap_mdev_reset_qlist(&qlist);
 
-       hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) {
+       list_for_each_entry_safe(q, tmpq, &qlist, reset_qnode) {
                vfio_ap_unlink_mdev_fr_queue(q);
-               hash_del(&q->mdev_qnode);
+               list_del(&q->reset_qnode);
        }
-
-       kfree(qtable);
 }
 
 /**
@@ -1612,7 +1661,7 @@ static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev)
                get_update_locks_for_kvm(kvm);
 
                kvm_arch_crypto_clear_masks(kvm);
-               vfio_ap_mdev_reset_queues(&matrix_mdev->qtable);
+               vfio_ap_mdev_reset_queues(matrix_mdev);
                kvm_put_kvm(kvm);
                matrix_mdev->kvm = NULL;
 
@@ -1748,15 +1797,33 @@ static void vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q)
        }
 }
 
-static int vfio_ap_mdev_reset_queues(struct ap_queue_table *qtable)
+static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev)
 {
        int ret = 0, loop_cursor;
        struct vfio_ap_queue *q;
 
-       hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode)
+       hash_for_each(matrix_mdev->qtable.queues, loop_cursor, q, mdev_qnode)
                vfio_ap_mdev_reset_queue(q);
 
-       hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) {
+       hash_for_each(matrix_mdev->qtable.queues, loop_cursor, q, mdev_qnode) {
+               flush_work(&q->reset_work);
+
+               if (q->reset_status.response_code)
+                       ret = -EIO;
+       }
+
+       return ret;
+}
+
+static int vfio_ap_mdev_reset_qlist(struct list_head *qlist)
+{
+       int ret = 0;
+       struct vfio_ap_queue *q;
+
+       list_for_each_entry(q, qlist, reset_qnode)
+               vfio_ap_mdev_reset_queue(q);
+
+       list_for_each_entry(q, qlist, reset_qnode) {
                flush_work(&q->reset_work);
 
                if (q->reset_status.response_code)
@@ -1942,7 +2009,7 @@ static ssize_t vfio_ap_mdev_ioctl(struct vfio_device *vdev,
                ret = vfio_ap_mdev_get_device_info(arg);
                break;
        case VFIO_DEVICE_RESET:
-               ret = vfio_ap_mdev_reset_queues(&matrix_mdev->qtable);
+               ret = vfio_ap_mdev_reset_queues(matrix_mdev);
                break;
        case VFIO_DEVICE_GET_IRQ_INFO:
                        ret = vfio_ap_get_irq_info(arg);
@@ -2088,6 +2155,7 @@ int vfio_ap_mdev_probe_queue(struct ap_device *apdev)
 {
        int ret;
        struct vfio_ap_queue *q;
+       DECLARE_BITMAP(apm_filtered, AP_DEVICES);
        struct ap_matrix_mdev *matrix_mdev;
 
        ret = sysfs_create_group(&apdev->device.kobj, &vfio_queue_attr_group);
@@ -2109,15 +2177,28 @@ int vfio_ap_mdev_probe_queue(struct ap_device *apdev)
        if (matrix_mdev) {
                vfio_ap_mdev_link_queue(matrix_mdev, q);
 
-               if (vfio_ap_mdev_filter_matrix(matrix_mdev->matrix.apm,
-                                              matrix_mdev->matrix.aqm,
-                                              matrix_mdev))
+               /*
+                * If we're in the process of handling the adding of adapters or
+                * domains to the host's AP configuration, then let the
+                * vfio_ap device driver's on_scan_complete callback filter the
+                * matrix and update the guest's AP configuration after all of
+                * the new queue devices are probed.
+                */
+               if (!bitmap_empty(matrix_mdev->apm_add, AP_DEVICES) ||
+                   !bitmap_empty(matrix_mdev->aqm_add, AP_DOMAINS))
+                       goto done;
+
+               if (vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered)) {
                        vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+                       reset_queues_for_apids(matrix_mdev, apm_filtered);
+               }
        }
+
+done:
        dev_set_drvdata(&apdev->device, q);
        release_update_locks_for_mdev(matrix_mdev);
 
-       return 0;
+       return ret;
 
 err_remove_group:
        sysfs_remove_group(&apdev->device.kobj, &vfio_queue_attr_group);
@@ -2134,26 +2215,40 @@ void vfio_ap_mdev_remove_queue(struct ap_device *apdev)
        q = dev_get_drvdata(&apdev->device);
        get_update_locks_for_queue(q);
        matrix_mdev = q->matrix_mdev;
+       apid = AP_QID_CARD(q->apqn);
+       apqi = AP_QID_QUEUE(q->apqn);
 
        if (matrix_mdev) {
-               vfio_ap_unlink_queue_fr_mdev(q);
-
-               apid = AP_QID_CARD(q->apqn);
-               apqi = AP_QID_QUEUE(q->apqn);
-
-               /*
-                * If the queue is assigned to the guest's APCB, then remove
-                * the adapter's APID from the APCB and hot it into the guest.
-                */
+               /* If the queue is assigned to the guest's AP configuration */
                if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
                    test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) {
+                       /*
+                        * Since the queues are defined via a matrix of adapters
+                        * and domains, it is not possible to hot unplug a
+                        * single queue; so, let's unplug the adapter.
+                        */
                        clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm);
                        vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+                       reset_queues_for_apid(matrix_mdev, apid);
+                       goto done;
                }
        }
 
-       vfio_ap_mdev_reset_queue(q);
-       flush_work(&q->reset_work);
+       /*
+        * If the queue is not in the host's AP configuration, then resetting
+        * it will fail with response code 01, (APQN not valid); so, let's make
+        * sure it is in the host's config.
+        */
+       if (test_bit_inv(apid, (unsigned long *)matrix_dev->info.apm) &&
+           test_bit_inv(apqi, (unsigned long *)matrix_dev->info.aqm)) {
+               vfio_ap_mdev_reset_queue(q);
+               flush_work(&q->reset_work);
+       }
+
+done:
+       if (matrix_mdev)
+               vfio_ap_unlink_queue_fr_mdev(q);
+
        dev_set_drvdata(&apdev->device, NULL);
        kfree(q);
        release_update_locks_for_mdev(matrix_mdev);
@@ -2461,39 +2556,30 @@ void vfio_ap_on_cfg_changed(struct ap_config_info *cur_cfg_info,
 
 static void vfio_ap_mdev_hot_plug_cfg(struct ap_matrix_mdev *matrix_mdev)
 {
-       bool do_hotplug = false;
-       int filter_domains = 0;
-       int filter_adapters = 0;
-       DECLARE_BITMAP(apm, AP_DEVICES);
-       DECLARE_BITMAP(aqm, AP_DOMAINS);
+       DECLARE_BITMAP(apm_filtered, AP_DEVICES);
+       bool filter_domains, filter_adapters, filter_cdoms, do_hotplug = false;
 
        mutex_lock(&matrix_mdev->kvm->lock);
        mutex_lock(&matrix_dev->mdevs_lock);
 
-       filter_adapters = bitmap_and(apm, matrix_mdev->matrix.apm,
-                                    matrix_mdev->apm_add, AP_DEVICES);
-       filter_domains = bitmap_and(aqm, matrix_mdev->matrix.aqm,
-                                   matrix_mdev->aqm_add, AP_DOMAINS);
-
-       if (filter_adapters && filter_domains)
-               do_hotplug |= vfio_ap_mdev_filter_matrix(apm, aqm, matrix_mdev);
-       else if (filter_adapters)
-               do_hotplug |=
-                       vfio_ap_mdev_filter_matrix(apm,
-                                                  matrix_mdev->shadow_apcb.aqm,
-                                                  matrix_mdev);
-       else
-               do_hotplug |=
-                       vfio_ap_mdev_filter_matrix(matrix_mdev->shadow_apcb.apm,
-                                                  aqm, matrix_mdev);
+       filter_adapters = bitmap_intersects(matrix_mdev->matrix.apm,
+                                           matrix_mdev->apm_add, AP_DEVICES);
+       filter_domains = bitmap_intersects(matrix_mdev->matrix.aqm,
+                                          matrix_mdev->aqm_add, AP_DOMAINS);
+       filter_cdoms = bitmap_intersects(matrix_mdev->matrix.adm,
+                                        matrix_mdev->adm_add, AP_DOMAINS);
 
-       if (bitmap_intersects(matrix_mdev->matrix.adm, matrix_mdev->adm_add,
-                             AP_DOMAINS))
+       if (filter_adapters || filter_domains)
+               do_hotplug = vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered);
+
+       if (filter_cdoms)
                do_hotplug |= vfio_ap_mdev_filter_cdoms(matrix_mdev);
 
        if (do_hotplug)
                vfio_ap_mdev_update_guest_apcb(matrix_mdev);
 
+       reset_queues_for_apids(matrix_mdev, apm_filtered);
+
        mutex_unlock(&matrix_dev->mdevs_lock);
        mutex_unlock(&matrix_mdev->kvm->lock);
 }
index 88aff8b81f2fc664594ea3e2b354d03545e4e19c..98d37aa27044a643825e0632bfe2b445fcd717b1 100644 (file)
@@ -133,6 +133,8 @@ struct ap_matrix_mdev {
  * @apqn: the APQN of the AP queue device
  * @saved_isc: the guest ISC registered with the GIB interface
  * @mdev_qnode: allows the vfio_ap_queue struct to be added to a hashtable
+ * @reset_qnode: allows the vfio_ap_queue struct to be added to a list of queues
+ *              that need to be reset
  * @reset_status: the status from the last reset of the queue
  * @reset_work: work to wait for queue reset to complete
  */
@@ -143,6 +145,7 @@ struct vfio_ap_queue {
 #define VFIO_AP_ISC_INVALID 0xff
        unsigned char saved_isc;
        struct hlist_node mdev_qnode;
+       struct list_head reset_qnode;
        struct ap_queue_status reset_status;
        struct work_struct reset_work;
 };
index b92a32b4b1141670cc2f3c2e8b91a8e1d526b26a..04c64ce0a1ca1a2006d31ca5c7ee819598f155c4 100644 (file)
@@ -255,9 +255,10 @@ static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover)
                if (!recover) {
                        hash_del(&addr->hnode);
                        kfree(addr);
-                       continue;
+               } else {
+                       /* prepare for recovery */
+                       addr->disp_flag = QETH_DISP_ADDR_ADD;
                }
-               addr->disp_flag = QETH_DISP_ADDR_ADD;
        }
 
        mutex_unlock(&card->ip_lock);
@@ -278,9 +279,11 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
                if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
                        rc = qeth_l3_register_addr_entry(card, addr);
 
-                       if (!rc) {
+                       if (!rc || rc == -EADDRINUSE || rc == -ENETDOWN) {
+                               /* keep it in the records */
                                addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
                        } else {
+                               /* bad address */
                                hash_del(&addr->hnode);
                                kfree(addr);
                        }
index 408a806bf4c2d3c23a07ca0f45dbe71d6d741965..c64a085a7ee2f9f29c0e9fe1e00bed7e8eedf5a1 100644 (file)
@@ -263,6 +263,7 @@ static ssize_t store_ctlr_mode(struct device *dev,
                               const char *buf, size_t count)
 {
        struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+       int res;
 
        if (count > FCOE_MAX_MODENAME_LEN)
                return -EINVAL;
@@ -279,12 +280,13 @@ static ssize_t store_ctlr_mode(struct device *dev,
                        return -ENOTSUPP;
                }
 
-               ctlr->mode = sysfs_match_string(fip_conn_type_names, buf);
-               if (ctlr->mode < 0 || ctlr->mode == FIP_CONN_TYPE_UNKNOWN) {
+               res = sysfs_match_string(fip_conn_type_names, buf);
+               if (res < 0 || res == FIP_CONN_TYPE_UNKNOWN) {
                        LIBFCOE_SYSFS_DBG(ctlr, "Unknown mode %s provided.\n",
                                          buf);
                        return -EINVAL;
                }
+               ctlr->mode = res;
 
                ctlr->f->set_fcoe_ctlr_mode(ctlr);
                LIBFCOE_SYSFS_DBG(ctlr, "Mode changed to %s.\n", buf);
index 4d6db4509e755dcfc66803ff929e36140d8e8373..8d7fc5284293b5283523b049ba38387857ebb09e 100644 (file)
@@ -546,6 +546,7 @@ int fnic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc)
        if (fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] != NULL) {
                WARN(1, "fnic<%d>: %s: hwq: %d tag 0x%x already exists\n",
                                fnic->fnic_num, __func__, hwq, blk_mq_unique_tag_to_tag(mqtag));
+               spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
                return SCSI_MLQUEUE_HOST_BUSY;
        }
 
index 2a50fda3a628c3fdc9daa79d73437de565bc5891..625fd547ee60a79c3a8bae9e985cb74d06b9073f 100644 (file)
@@ -371,7 +371,6 @@ static u16 initio_se2_rd(unsigned long base, u8 addr)
  */
 static void initio_se2_wr(unsigned long base, u8 addr, u16 val)
 {
-       u8 rb;
        u8 instr;
        int i;
 
@@ -400,7 +399,7 @@ static void initio_se2_wr(unsigned long base, u8 addr, u16 val)
                udelay(30);
                outb(SE2CS, base + TUL_NVRAM);                  /* -CLK */
                udelay(30);
-               if ((rb = inb(base + TUL_NVRAM)) & SE2DI)
+               if (inb(base + TUL_NVRAM) & SE2DI)
                        break;  /* write complete */
        }
        outb(0, base + TUL_NVRAM);                              /* -CS */
index 71f711cb0628a70d40efc99520ef2dc807494e75..355a0bc0828e749a45513309b942cfdae4878a7c 100644 (file)
@@ -3387,7 +3387,7 @@ static enum sci_status isci_io_request_build(struct isci_host *ihost,
                return SCI_FAILURE;
        }
 
-       return SCI_SUCCESS;
+       return status;
 }
 
 static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag)
index d26941b131fdb81e6bc9fe48ccc57b75a0055af5..bf879d81846b69379f34b91759a45ef8d5af89fb 100644 (file)
@@ -1918,7 +1918,7 @@ out:
  *
  * Returns the number of SGEs added to the SGL.
  **/
-static int
+static uint32_t
 lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                struct sli4_sge *sgl, int datasegcnt,
                struct lpfc_io_buf *lpfc_cmd)
@@ -1926,8 +1926,8 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
        struct scatterlist *sgde = NULL; /* s/g data entry */
        struct sli4_sge_diseed *diseed = NULL;
        dma_addr_t physaddr;
-       int i = 0, num_sge = 0, status;
-       uint32_t reftag;
+       int i = 0, status;
+       uint32_t reftag, num_sge = 0;
        uint8_t txop, rxop;
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
        uint32_t rc;
@@ -2099,7 +2099,7 @@ out:
  *
  * Returns the number of SGEs added to the SGL.
  **/
-static int
+static uint32_t
 lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                struct sli4_sge *sgl, int datacnt, int protcnt,
                struct lpfc_io_buf *lpfc_cmd)
@@ -2123,8 +2123,8 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
        uint32_t rc;
 #endif
        uint32_t checking = 1;
-       uint32_t dma_offset = 0;
-       int num_sge = 0, j = 2;
+       uint32_t dma_offset = 0, num_sge = 0;
+       int j = 2;
        struct sli4_hybrid_sgl *sgl_xtra = NULL;
 
        sgpe = scsi_prot_sglist(sc);
index 46e6f807d1ca7220b400a1772aceda5f87f7b377..706985358c6a02f37d91698c0163f506695e22cc 100644 (file)
@@ -4880,7 +4880,7 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
        lockdep_assert_held(&phba->hbalock);
 
        pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
-       if (hdrtype != 0x80 ||
+       if (hdrtype != PCI_HEADER_TYPE_MFD ||
            (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
             FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
                return;
index d8c57a0a518f4c42aa0ff60417052dc871a431a7..528f19f782f2156d956a618eddb80e00f1faf728 100644 (file)
@@ -475,7 +475,7 @@ int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
  * @op_reply_q: op_reply_qinfo object
  * @reply_ci: operational reply descriptor's queue consumer index
  *
- * Returns reply descriptor frame address
+ * Returns: reply descriptor frame address
  */
 static inline struct mpi3_default_reply_descriptor *
 mpi3mr_get_reply_desc(struct op_reply_qinfo *op_reply_q, u32 reply_ci)
@@ -1063,7 +1063,6 @@ enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc)
  * @mrioc: Adapter instance reference
  *
  * Free the DMA memory allocated for IOCTL handling purpose.
-
  *
  * Return: None
  */
@@ -1106,7 +1105,6 @@ static void mpi3mr_free_ioctl_dma_memory(struct mpi3mr_ioc *mrioc)
 /**
  * mpi3mr_alloc_ioctl_dma_memory - Alloc memory for ioctl dma
  * @mrioc: Adapter instance reference
-
  *
  * This function allocates dmaable memory required to handle the
  * application issued MPI3 IOCTL requests.
@@ -1241,7 +1239,7 @@ static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc,
  * during reset/resume
  * @mrioc: Adapter instance reference
  *
- * Return zero if the new IOCFacts parameters value is compatible with
+ * Return: zero if the new IOCFacts parameters value is compatible with
  * older values else return -EPERM
  */
 static int
index 03348f605c2e9a5289082fbd2c1694c90229cf16..dd674378f2f392216334f7adb90129f2753e8c2a 100644 (file)
@@ -2889,7 +2889,7 @@ static void qla2x00_iocb_work_fn(struct work_struct *work)
 static void
 qla_trace_init(void)
 {
-       qla_trc_array = trace_array_get_by_name("qla2xxx");
+       qla_trc_array = trace_array_get_by_name("qla2xxx", NULL);
        if (!qla_trc_array) {
                ql_log(ql_log_fatal, NULL, 0x0001,
                       "Unable to create qla2xxx trace instance, instance logging will be disabled.\n");
index 885a7d5df3b9daa26bd0b42a428d7cb40ceae7d2..612489afe8d2467965759c80562562e26919f704 100644 (file)
@@ -61,11 +61,11 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd);
 static enum scsi_disposition scsi_try_to_abort_cmd(const struct scsi_host_template *,
                                                   struct scsi_cmnd *);
 
-void scsi_eh_wakeup(struct Scsi_Host *shost)
+void scsi_eh_wakeup(struct Scsi_Host *shost, unsigned int busy)
 {
        lockdep_assert_held(shost->host_lock);
 
-       if (scsi_host_busy(shost) == shost->host_failed) {
+       if (busy == shost->host_failed) {
                trace_scsi_eh_wakeup(shost);
                wake_up_process(shost->ehandler);
                SCSI_LOG_ERROR_RECOVERY(5, shost_printk(KERN_INFO, shost,
@@ -88,7 +88,7 @@ void scsi_schedule_eh(struct Scsi_Host *shost)
        if (scsi_host_set_state(shost, SHOST_RECOVERY) == 0 ||
            scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY) == 0) {
                shost->host_eh_scheduled++;
-               scsi_eh_wakeup(shost);
+               scsi_eh_wakeup(shost, scsi_host_busy(shost));
        }
 
        spin_unlock_irqrestore(shost->host_lock, flags);
@@ -282,11 +282,12 @@ static void scsi_eh_inc_host_failed(struct rcu_head *head)
 {
        struct scsi_cmnd *scmd = container_of(head, typeof(*scmd), rcu);
        struct Scsi_Host *shost = scmd->device->host;
+       unsigned int busy = scsi_host_busy(shost);
        unsigned long flags;
 
        spin_lock_irqsave(shost->host_lock, flags);
        shost->host_failed++;
-       scsi_eh_wakeup(shost);
+       scsi_eh_wakeup(shost, busy);
        spin_unlock_irqrestore(shost->host_lock, flags);
 }
 
@@ -2197,15 +2198,18 @@ void scsi_eh_flush_done_q(struct list_head *done_q)
        struct scsi_cmnd *scmd, *next;
 
        list_for_each_entry_safe(scmd, next, done_q, eh_entry) {
+               struct scsi_device *sdev = scmd->device;
+
                list_del_init(&scmd->eh_entry);
-               if (scsi_device_online(scmd->device) &&
-                   !scsi_noretry_cmd(scmd) && scsi_cmd_retry_allowed(scmd) &&
-                       scsi_eh_should_retry_cmd(scmd)) {
+               if (scsi_device_online(sdev) && !scsi_noretry_cmd(scmd) &&
+                   scsi_cmd_retry_allowed(scmd) &&
+                   scsi_eh_should_retry_cmd(scmd)) {
                        SCSI_LOG_ERROR_RECOVERY(3,
                                scmd_printk(KERN_INFO, scmd,
                                             "%s: flush retry cmd\n",
                                             current->comm));
                                scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
+                               blk_mq_kick_requeue_list(sdev->request_queue);
                } else {
                        /*
                         * If just we got sense for the device (called
index cf3864f720930988fbadc77b3c91c77fe2d3bb62..df5ac03d5d6c2eb5233ad7fcfdad37a1e487b4e6 100644 (file)
@@ -278,9 +278,11 @@ static void scsi_dec_host_busy(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
        rcu_read_lock();
        __clear_bit(SCMD_STATE_INFLIGHT, &cmd->state);
        if (unlikely(scsi_host_in_recovery(shost))) {
+               unsigned int busy = scsi_host_busy(shost);
+
                spin_lock_irqsave(shost->host_lock, flags);
                if (shost->host_failed || shost->host_eh_scheduled)
-                       scsi_eh_wakeup(shost);
+                       scsi_eh_wakeup(shost, busy);
                spin_unlock_irqrestore(shost->host_lock, flags);
        }
        rcu_read_unlock();
index 3f0dfb97db6bd1b88755db1fb50dd6e968e385c6..1fbfe1b52c9f1a906ea6b0da7a6b273e2972a903 100644 (file)
@@ -92,7 +92,7 @@ extern void scmd_eh_abort_handler(struct work_struct *work);
 extern enum blk_eh_timer_return scsi_timeout(struct request *req);
 extern int scsi_error_handler(void *host);
 extern enum scsi_disposition scsi_decide_disposition(struct scsi_cmnd *cmd);
-extern void scsi_eh_wakeup(struct Scsi_Host *shost);
+extern void scsi_eh_wakeup(struct Scsi_Host *shost, unsigned int busy);
 extern void scsi_eh_scmd_add(struct scsi_cmnd *);
 void scsi_eh_ready_devs(struct Scsi_Host *shost,
                        struct list_head *work_q,
index 041940183516969318839f297583f16ab98b7b71..cdedc271857aae82ef5e25ae2506c4079c6bd6ca 100644 (file)
@@ -1347,7 +1347,6 @@ struct pqi_ctrl_info {
        bool            controller_online;
        bool            block_requests;
        bool            scan_blocked;
-       u8              logical_volume_rescan_needed : 1;
        u8              inbound_spanning_supported : 1;
        u8              outbound_spanning_supported : 1;
        u8              pqi_mode_enabled : 1;
index 9a58df9312fa7e4151ca07537391b030d91a2289..ceff1ec13f9ea9ea056da947d3939c51f4797522 100644 (file)
 #define BUILD_TIMESTAMP
 #endif
 
-#define DRIVER_VERSION         "2.1.24-046"
+#define DRIVER_VERSION         "2.1.26-030"
 #define DRIVER_MAJOR           2
 #define DRIVER_MINOR           1
-#define DRIVER_RELEASE         24
-#define DRIVER_REVISION                46
+#define DRIVER_RELEASE         26
+#define DRIVER_REVISION                30
 
 #define DRIVER_NAME            "Microchip SmartPQI Driver (v" \
                                DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -2093,8 +2093,6 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
                if (existing_device->devtype == TYPE_DISK) {
                        existing_device->raid_level = new_device->raid_level;
                        existing_device->volume_status = new_device->volume_status;
-                       if (ctrl_info->logical_volume_rescan_needed)
-                               existing_device->rescan = true;
                        memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group));
                        if (!pqi_raid_maps_equal(existing_device->raid_map, new_device->raid_map)) {
                                kfree(existing_device->raid_map);
@@ -2164,6 +2162,20 @@ static inline void pqi_init_device_tmf_work(struct pqi_scsi_dev *device)
                INIT_WORK(&tmf_work->work_struct, pqi_tmf_worker);
 }
 
+static inline bool pqi_volume_rescan_needed(struct pqi_scsi_dev *device)
+{
+       if (pqi_device_in_remove(device))
+               return false;
+
+       if (device->sdev == NULL)
+               return false;
+
+       if (!scsi_device_online(device->sdev))
+               return false;
+
+       return device->rescan;
+}
+
 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
        struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
 {
@@ -2284,9 +2296,13 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
                if (device->sdev && device->queue_depth != device->advertised_queue_depth) {
                        device->advertised_queue_depth = device->queue_depth;
                        scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
-                       if (device->rescan) {
-                               scsi_rescan_device(device->sdev);
+                       spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+                       if (pqi_volume_rescan_needed(device)) {
                                device->rescan = false;
+                               spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+                               scsi_rescan_device(device->sdev);
+                       } else {
+                               spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
                        }
                }
        }
@@ -2308,8 +2324,6 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
                }
        }
 
-       ctrl_info->logical_volume_rescan_needed = false;
-
 }
 
 static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device)
@@ -3702,6 +3716,21 @@ static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
        return ack_event;
 }
 
+static void pqi_mark_volumes_for_rescan(struct pqi_ctrl_info *ctrl_info)
+{
+       unsigned long flags;
+       struct pqi_scsi_dev *device;
+
+       spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+       list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
+               if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK)
+                       device->rescan = true;
+       }
+
+       spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+}
+
 static void pqi_disable_raid_bypass(struct pqi_ctrl_info *ctrl_info)
 {
        unsigned long flags;
@@ -3742,7 +3771,7 @@ static void pqi_event_worker(struct work_struct *work)
                                ack_event = true;
                                rescan_needed = true;
                                if (event->event_type == PQI_EVENT_TYPE_LOGICAL_DEVICE)
-                                       ctrl_info->logical_volume_rescan_needed = true;
+                                       pqi_mark_volumes_for_rescan(ctrl_info);
                                else if (event->event_type == PQI_EVENT_TYPE_AIO_STATE_CHANGE)
                                        pqi_disable_raid_bypass(ctrl_info);
                        }
@@ -10142,6 +10171,18 @@ static const struct pci_device_id pqi_pci_id_table[] = {
                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
                                0x1014, 0x0718)
        },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              0x1137, 0x02f8)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              0x1137, 0x02f9)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              0x1137, 0x02fa)
+       },
        {
                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
                                0x1e93, 0x1000)
@@ -10198,6 +10239,34 @@ static const struct pci_device_id pqi_pci_id_table[] = {
                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
                                0x1f51, 0x100a)
        },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              0x1f51, 0x100e)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              0x1f51, 0x100f)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              0x1f51, 0x1010)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              0x1f51, 0x1011)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              0x1f51, 0x1043)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              0x1f51, 0x1044)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              0x1f51, 0x1045)
+       },
        {
                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
                               PCI_ANY_ID, PCI_ANY_ID)
index a95936b18f695e3ef796098866ea07101e9e346d..7ceb982040a5dfe5d490f9a4bd306e99e5140a53 100644 (file)
@@ -330,6 +330,7 @@ enum storvsc_request_type {
  */
 
 static int storvsc_ringbuffer_size = (128 * 1024);
+static int aligned_ringbuffer_size;
 static u32 max_outstanding_req_per_channel;
 static int storvsc_change_queue_depth(struct scsi_device *sdev, int queue_depth);
 
@@ -687,8 +688,8 @@ static void handle_sc_creation(struct vmbus_channel *new_sc)
        new_sc->next_request_id_callback = storvsc_next_request_id;
 
        ret = vmbus_open(new_sc,
-                        storvsc_ringbuffer_size,
-                        storvsc_ringbuffer_size,
+                        aligned_ringbuffer_size,
+                        aligned_ringbuffer_size,
                         (void *)&props,
                         sizeof(struct vmstorage_channel_properties),
                         storvsc_on_channel_callback, new_sc);
@@ -1973,7 +1974,7 @@ static int storvsc_probe(struct hv_device *device,
        dma_set_min_align_mask(&device->device, HV_HYP_PAGE_SIZE - 1);
 
        stor_device->port_number = host->host_no;
-       ret = storvsc_connect_to_vsp(device, storvsc_ringbuffer_size, is_fc);
+       ret = storvsc_connect_to_vsp(device, aligned_ringbuffer_size, is_fc);
        if (ret)
                goto err_out1;
 
@@ -2164,7 +2165,7 @@ static int storvsc_resume(struct hv_device *hv_dev)
 {
        int ret;
 
-       ret = storvsc_connect_to_vsp(hv_dev, storvsc_ringbuffer_size,
+       ret = storvsc_connect_to_vsp(hv_dev, aligned_ringbuffer_size,
                                     hv_dev_is_fc(hv_dev));
        return ret;
 }
@@ -2198,8 +2199,9 @@ static int __init storvsc_drv_init(void)
         * the ring buffer indices) by the max request size (which is
         * vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64)
         */
+       aligned_ringbuffer_size = VMBUS_RING_SIZE(storvsc_ringbuffer_size);
        max_outstanding_req_per_channel =
-               ((storvsc_ringbuffer_size - PAGE_SIZE) /
+               ((aligned_ringbuffer_size - PAGE_SIZE) /
                ALIGN(MAX_MULTIPAGE_BUFFER_PACKET +
                sizeof(struct vstor_packet) + sizeof(u64),
                sizeof(u64)));
index 9d1bdcdc13312988b2ee052c3736bb548f21ffba..617eb892f4ad457feb5d4de3d9c1ceb88a010c61 100644 (file)
 #define VIRTIO_SCSI_EVENT_LEN 8
 #define VIRTIO_SCSI_VQ_BASE 2
 
+static unsigned int virtscsi_poll_queues;
+module_param(virtscsi_poll_queues, uint, 0644);
+MODULE_PARM_DESC(virtscsi_poll_queues,
+                "The number of dedicated virtqueues for polling I/O");
+
 /* Command queue element */
 struct virtio_scsi_cmd {
        struct scsi_cmnd *sc;
@@ -76,6 +81,7 @@ struct virtio_scsi {
        struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN];
 
        u32 num_queues;
+       int io_queues[HCTX_MAX_TYPES];
 
        struct hlist_node node;
 
@@ -182,8 +188,6 @@ static void virtscsi_vq_done(struct virtio_scsi *vscsi,
                while ((buf = virtqueue_get_buf(vq, &len)) != NULL)
                        fn(vscsi, buf);
 
-               if (unlikely(virtqueue_is_broken(vq)))
-                       break;
        } while (!virtqueue_enable_cb(vq));
        spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags);
 }
@@ -722,9 +726,49 @@ static int virtscsi_abort(struct scsi_cmnd *sc)
 static void virtscsi_map_queues(struct Scsi_Host *shost)
 {
        struct virtio_scsi *vscsi = shost_priv(shost);
-       struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
+       int i, qoff;
+
+       for (i = 0, qoff = 0; i < shost->nr_maps; i++) {
+               struct blk_mq_queue_map *map = &shost->tag_set.map[i];
+
+               map->nr_queues = vscsi->io_queues[i];
+               map->queue_offset = qoff;
+               qoff += map->nr_queues;
+
+               if (map->nr_queues == 0)
+                       continue;
+
+               /*
+                * Regular queues have interrupts and hence CPU affinity is
+                * defined by the core virtio code, but polling queues have
+                * no interrupts so we let the block layer assign CPU affinity.
+                */
+               if (i == HCTX_TYPE_POLL)
+                       blk_mq_map_queues(map);
+               else
+                       blk_mq_virtio_map_queues(map, vscsi->vdev, 2);
+       }
+}
+
+static int virtscsi_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
+{
+       struct virtio_scsi *vscsi = shost_priv(shost);
+       struct virtio_scsi_vq *virtscsi_vq = &vscsi->req_vqs[queue_num];
+       unsigned long flags;
+       unsigned int len;
+       int found = 0;
+       void *buf;
+
+       spin_lock_irqsave(&virtscsi_vq->vq_lock, flags);
+
+       while ((buf = virtqueue_get_buf(virtscsi_vq->vq, &len)) != NULL) {
+               virtscsi_complete_cmd(vscsi, buf);
+               found++;
+       }
+
+       spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags);
 
-       blk_mq_virtio_map_queues(qmap, vscsi->vdev, 2);
+       return found;
 }
 
 static void virtscsi_commit_rqs(struct Scsi_Host *shost, u16 hwq)
@@ -751,6 +795,7 @@ static const struct scsi_host_template virtscsi_host_template = {
        .this_id = -1,
        .cmd_size = sizeof(struct virtio_scsi_cmd),
        .queuecommand = virtscsi_queuecommand,
+       .mq_poll = virtscsi_mq_poll,
        .commit_rqs = virtscsi_commit_rqs,
        .change_queue_depth = virtscsi_change_queue_depth,
        .eh_abort_handler = virtscsi_abort,
@@ -795,13 +840,14 @@ static int virtscsi_init(struct virtio_device *vdev,
 {
        int err;
        u32 i;
-       u32 num_vqs;
+       u32 num_vqs, num_poll_vqs, num_req_vqs;
        vq_callback_t **callbacks;
        const char **names;
        struct virtqueue **vqs;
        struct irq_affinity desc = { .pre_vectors = 2 };
 
-       num_vqs = vscsi->num_queues + VIRTIO_SCSI_VQ_BASE;
+       num_req_vqs = vscsi->num_queues;
+       num_vqs = num_req_vqs + VIRTIO_SCSI_VQ_BASE;
        vqs = kmalloc_array(num_vqs, sizeof(struct virtqueue *), GFP_KERNEL);
        callbacks = kmalloc_array(num_vqs, sizeof(vq_callback_t *),
                                  GFP_KERNEL);
@@ -812,15 +858,31 @@ static int virtscsi_init(struct virtio_device *vdev,
                goto out;
        }
 
+       num_poll_vqs = min_t(unsigned int, virtscsi_poll_queues,
+                            num_req_vqs - 1);
+       vscsi->io_queues[HCTX_TYPE_DEFAULT] = num_req_vqs - num_poll_vqs;
+       vscsi->io_queues[HCTX_TYPE_READ] = 0;
+       vscsi->io_queues[HCTX_TYPE_POLL] = num_poll_vqs;
+
+       dev_info(&vdev->dev, "%d/%d/%d default/read/poll queues\n",
+                vscsi->io_queues[HCTX_TYPE_DEFAULT],
+                vscsi->io_queues[HCTX_TYPE_READ],
+                vscsi->io_queues[HCTX_TYPE_POLL]);
+
        callbacks[0] = virtscsi_ctrl_done;
        callbacks[1] = virtscsi_event_done;
        names[0] = "control";
        names[1] = "event";
-       for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++) {
+       for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs - num_poll_vqs; i++) {
                callbacks[i] = virtscsi_req_done;
                names[i] = "request";
        }
 
+       for (; i < num_vqs; i++) {
+               callbacks[i] = NULL;
+               names[i] = "request_poll";
+       }
+
        /* Discover virtqueues and write information to configuration.  */
        err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc);
        if (err)
@@ -874,6 +936,7 @@ static int virtscsi_probe(struct virtio_device *vdev)
 
        sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
        shost->sg_tablesize = sg_elems;
+       shost->nr_maps = 1;
        vscsi = shost_priv(shost);
        vscsi->vdev = vdev;
        vscsi->num_queues = num_queues;
@@ -883,6 +946,9 @@ static int virtscsi_probe(struct virtio_device *vdev)
        if (err)
                goto virtscsi_init_failed;
 
+       if (vscsi->io_queues[HCTX_TYPE_POLL])
+               shost->nr_maps = HCTX_TYPE_POLL + 1;
+
        shost->can_queue = virtqueue_get_vring_size(vscsi->req_vqs[0].vq);
 
        cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1;
index e05473c5c267f454437886996993f6c00bcfc301..16018009a5a67aa6c2f641ca407e02f57196d6a2 100644 (file)
@@ -59,6 +59,7 @@ struct maple_device_specify {
 static bool checked[MAPLE_PORTS];
 static bool empty[MAPLE_PORTS];
 static struct maple_device *baseunits[MAPLE_PORTS];
+static const struct bus_type maple_bus_type;
 
 /**
  * maple_driver_register - register a maple driver
@@ -773,11 +774,10 @@ static struct maple_driver maple_unsupported_device = {
 /*
  * maple_bus_type - core maple bus structure
  */
-struct bus_type maple_bus_type = {
+static const struct bus_type maple_bus_type = {
        .name = "maple",
        .match = maple_match_bus_driver,
 };
-EXPORT_SYMBOL_GPL(maple_bus_type);
 
 static struct device maple_bus = {
        .init_name = "maple",
index 780199bf351efbfb24422880ef39510c77def68f..49a0955e82d6cf5eef83e5f63ba8d31194c65324 100644 (file)
@@ -296,14 +296,14 @@ struct apple_mbox *apple_mbox_get(struct device *dev, int index)
        of_node_put(args.np);
 
        if (!pdev)
-               return ERR_PTR(EPROBE_DEFER);
+               return ERR_PTR(-EPROBE_DEFER);
 
        mbox = platform_get_drvdata(pdev);
        if (!mbox)
-               return ERR_PTR(EPROBE_DEFER);
+               return ERR_PTR(-EPROBE_DEFER);
 
        if (!device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_CONSUMER))
-               return ERR_PTR(ENODEV);
+               return ERR_PTR(-ENODEV);
 
        return mbox;
 }
index 042553abe1bf84b3af080661c6a0f0cd826b9e5f..253299e4214d09f9177ef1cd0b1f6d06fc75806c 100644 (file)
@@ -77,11 +77,26 @@ struct registered_event_data {
 
 static bool xlnx_is_error_event(const u32 node_id)
 {
-       if (node_id == EVENT_ERROR_PMC_ERR1 ||
-           node_id == EVENT_ERROR_PMC_ERR2 ||
-           node_id == EVENT_ERROR_PSM_ERR1 ||
-           node_id == EVENT_ERROR_PSM_ERR2)
-               return true;
+       u32 pm_family_code, pm_sub_family_code;
+
+       zynqmp_pm_get_family_info(&pm_family_code, &pm_sub_family_code);
+
+       if (pm_sub_family_code == VERSAL_SUB_FAMILY_CODE) {
+               if (node_id == VERSAL_EVENT_ERROR_PMC_ERR1 ||
+                   node_id == VERSAL_EVENT_ERROR_PMC_ERR2 ||
+                   node_id == VERSAL_EVENT_ERROR_PSM_ERR1 ||
+                   node_id == VERSAL_EVENT_ERROR_PSM_ERR2)
+                       return true;
+       } else {
+               if (node_id == VERSAL_NET_EVENT_ERROR_PMC_ERR1 ||
+                   node_id == VERSAL_NET_EVENT_ERROR_PMC_ERR2 ||
+                   node_id == VERSAL_NET_EVENT_ERROR_PMC_ERR3 ||
+                   node_id == VERSAL_NET_EVENT_ERROR_PSM_ERR1 ||
+                   node_id == VERSAL_NET_EVENT_ERROR_PSM_ERR2 ||
+                   node_id == VERSAL_NET_EVENT_ERROR_PSM_ERR3 ||
+                   node_id == VERSAL_NET_EVENT_ERROR_PSM_ERR4)
+                       return true;
+       }
 
        return false;
 }
@@ -483,7 +498,7 @@ static void xlnx_call_notify_cb_handler(const u32 *payload)
 
 static void xlnx_get_event_callback_data(u32 *buf)
 {
-       zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, 0, 0, 0, 0, buf);
+       zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, buf, 0);
 }
 
 static irqreturn_t xlnx_event_handler(int irq, void *dev_id)
@@ -656,7 +671,11 @@ static int xlnx_event_manager_probe(struct platform_device *pdev)
 
        ret = zynqmp_pm_register_sgi(sgi_num, 0);
        if (ret) {
-               dev_err(&pdev->dev, "SGI %d Registration over TF-A failed with %d\n", sgi_num, ret);
+               if (ret == -EOPNOTSUPP)
+                       dev_err(&pdev->dev, "SGI registration not supported by TF-A or Xen\n");
+               else
+                       dev_err(&pdev->dev, "SGI %d registration failed, err %d\n", sgi_num, ret);
+
                xlnx_event_cleanup_sgi(pdev);
                return ret;
        }
index 07d735b38b499681280fa17832afb53ba5b9e96f..965b1143936abe3d826bbd15207374099643bba5 100644 (file)
@@ -51,7 +51,7 @@ static enum pm_suspend_mode suspend_mode = PM_SUSPEND_MODE_STD;
 
 static void zynqmp_pm_get_callback_data(u32 *buf)
 {
-       zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, 0, 0, 0, 0, buf);
+       zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, buf, 0);
 }
 
 static void suspend_event_callback(const u32 *payload, void *data)
index 3a99f6dcdfafa24b9115ea7358837a57aa95a423..f54bb4dd2d1016a5c18abd53356ee02bf101a19a 100644 (file)
@@ -927,6 +927,14 @@ static int amd_sdw_manager_probe(struct platform_device *pdev)
        amd_manager->bus.clk_stop_timeout = 200;
        amd_manager->bus.link_id = amd_manager->instance;
 
+       /*
+        * Due to BIOS compatibility, the two links are exposed within
+        * the scope of a single controller. If this changes, the
+        * controller_id will have to be updated with drv_data
+        * information.
+        */
+       amd_manager->bus.controller_id = 0;
+
        switch (amd_manager->instance) {
        case ACP_SDW0:
                amd_manager->num_dout_ports = AMD_SDW0_MAX_TX_PORTS;
@@ -942,13 +950,13 @@ static int amd_sdw_manager_probe(struct platform_device *pdev)
 
        amd_manager->reg_mask = &sdw_manager_reg_mask_array[amd_manager->instance];
        params = &amd_manager->bus.params;
-       params->max_dr_freq = AMD_SDW_DEFAULT_CLK_FREQ * 2;
-       params->curr_dr_freq = AMD_SDW_DEFAULT_CLK_FREQ * 2;
+
        params->col = AMD_SDW_DEFAULT_COLUMNS;
        params->row = AMD_SDW_DEFAULT_ROWS;
        prop = &amd_manager->bus.prop;
        prop->clk_freq = &amd_sdw_freq_tbl[0];
        prop->mclk_freq = AMD_SDW_BUS_BASE_FREQ;
+       prop->max_clk_freq = AMD_SDW_DEFAULT_CLK_FREQ;
 
        ret = sdw_bus_master_add(&amd_manager->bus, dev, dev->fwnode);
        if (ret) {
index 41b0d9adf68ef349053b0fe8317bc9817eba4f31..f3fec15c311229f7a1da48bfb3de416ed52a95d6 100644 (file)
@@ -22,6 +22,10 @@ static int sdw_get_id(struct sdw_bus *bus)
                return rc;
 
        bus->id = rc;
+
+       if (bus->controller_id == -1)
+               bus->controller_id = rc;
+
        return 0;
 }
 
index d1553cb77187471b40e4601f3cd5399e241ad076..67abd7e52f092a988a2e3f20e253dd76c53b178f 100644 (file)
@@ -20,7 +20,7 @@ void sdw_bus_debugfs_init(struct sdw_bus *bus)
                return;
 
        /* create the debugfs master-N */
-       snprintf(name, sizeof(name), "master-%d-%d", bus->id, bus->link_id);
+       snprintf(name, sizeof(name), "master-%d-%d", bus->controller_id, bus->link_id);
        bus->debugfs = debugfs_create_dir(name, sdw_debugfs_root);
 }
 
index 31162f2b563811569dcf1bec7a0fc0757efa72f4..c70a63d009ae4b0ae4184952540f3a48b2b400d3 100644 (file)
@@ -333,7 +333,7 @@ static int sdw_select_row_col(struct sdw_bus *bus, int clk_freq)
  */
 static int sdw_compute_bus_params(struct sdw_bus *bus)
 {
-       unsigned int max_dr_freq, curr_dr_freq = 0;
+       unsigned int curr_dr_freq = 0;
        struct sdw_master_prop *mstr_prop = &bus->prop;
        int i, clk_values, ret;
        bool is_gear = false;
@@ -351,14 +351,12 @@ static int sdw_compute_bus_params(struct sdw_bus *bus)
                clk_buf = NULL;
        }
 
-       max_dr_freq = mstr_prop->max_clk_freq * SDW_DOUBLE_RATE_FACTOR;
-
        for (i = 0; i < clk_values; i++) {
                if (!clk_buf)
-                       curr_dr_freq = max_dr_freq;
+                       curr_dr_freq = bus->params.max_dr_freq;
                else
                        curr_dr_freq = (is_gear) ?
-                               (max_dr_freq >>  clk_buf[i]) :
+                               (bus->params.max_dr_freq >>  clk_buf[i]) :
                                clk_buf[i] * SDW_DOUBLE_RATE_FACTOR;
 
                if (curr_dr_freq <= bus->params.bandwidth)
index 7f15e3549e539d053a82b63b8346a362c6d0145c..93698532deac4098706099dfceaf85055f563b09 100644 (file)
@@ -234,6 +234,9 @@ static int intel_link_probe(struct auxiliary_device *auxdev,
        cdns->instance = sdw->instance;
        cdns->msg_count = 0;
 
+       /* single controller for all SoundWire links */
+       bus->controller_id = 0;
+
        bus->link_id = auxdev->id;
        bus->clk_stop_timeout = 1;
 
index 9b05c9e25ebe48a7d135ff45ad4b3af0fb1cb2aa..51abedbbaa6630e53ab301b85d6dcefe2f8ce9ac 100644 (file)
@@ -145,7 +145,7 @@ int sdw_master_device_add(struct sdw_bus *bus, struct device *parent,
        md->dev.fwnode = fwnode;
        md->dev.dma_mask = parent->dma_mask;
 
-       dev_set_name(&md->dev, "sdw-master-%d", bus->id);
+       dev_set_name(&md->dev, "sdw-master-%d-%d", bus->controller_id, bus->link_id);
 
        ret = device_register(&md->dev);
        if (ret) {
index 8076d40407d4d2647dcc658b760cdeddd02c3a21..3c4d6debab1f36b2648174071b7ad153ca4aac09 100644 (file)
@@ -1157,9 +1157,20 @@ static int qcom_swrm_stream_alloc_ports(struct qcom_swrm_ctrl *ctrl,
        struct sdw_port_runtime *p_rt;
        struct sdw_slave *slave;
        unsigned long *port_mask;
-       int i, maxport, pn, nports = 0, ret = 0;
+       int maxport, pn, nports = 0, ret = 0;
        unsigned int m_port;
 
+       if (direction == SNDRV_PCM_STREAM_CAPTURE)
+               sconfig.direction = SDW_DATA_DIR_TX;
+       else
+               sconfig.direction = SDW_DATA_DIR_RX;
+
+       /* hw parameters wil be ignored as we only support PDM */
+       sconfig.ch_count = 1;
+       sconfig.frame_rate = params_rate(params);
+       sconfig.type = stream->type;
+       sconfig.bps = 1;
+
        mutex_lock(&ctrl->port_lock);
        list_for_each_entry(m_rt, &stream->master_list, stream_node) {
                if (m_rt->direction == SDW_DATA_DIR_RX) {
@@ -1183,7 +1194,7 @@ static int qcom_swrm_stream_alloc_ports(struct qcom_swrm_ctrl *ctrl,
                                if (pn > maxport) {
                                        dev_err(ctrl->dev, "All ports busy\n");
                                        ret = -EBUSY;
-                                       goto err;
+                                       goto out;
                                }
                                set_bit(pn, port_mask);
                                pconfig[nports].num = pn;
@@ -1193,24 +1204,9 @@ static int qcom_swrm_stream_alloc_ports(struct qcom_swrm_ctrl *ctrl,
                }
        }
 
-       if (direction == SNDRV_PCM_STREAM_CAPTURE)
-               sconfig.direction = SDW_DATA_DIR_TX;
-       else
-               sconfig.direction = SDW_DATA_DIR_RX;
-
-       /* hw parameters wil be ignored as we only support PDM */
-       sconfig.ch_count = 1;
-       sconfig.frame_rate = params_rate(params);
-       sconfig.type = stream->type;
-       sconfig.bps = 1;
        sdw_stream_add_master(&ctrl->bus, &sconfig, pconfig,
                              nports, stream);
-err:
-       if (ret) {
-               for (i = 0; i < nports; i++)
-                       clear_bit(pconfig[i].num, port_mask);
-       }
-
+out:
        mutex_unlock(&ctrl->port_lock);
 
        return ret;
@@ -1593,6 +1589,13 @@ static int qcom_swrm_probe(struct platform_device *pdev)
                }
        }
 
+       ctrl->bus.controller_id = -1;
+
+       if (ctrl->version > SWRM_VERSION_1_3_0) {
+               ctrl->reg_read(ctrl, SWRM_COMP_MASTER_ID, &val);
+               ctrl->bus.controller_id = val;
+       }
+
        ret = sdw_bus_master_add(&ctrl->bus, dev, dev->fwnode);
        if (ret) {
                dev_err(dev, "Failed to register Soundwire controller (%d)\n",
index c1c1a2ac293af4eed496edf98c89178eee5f9818..060c2982e26b009d561eafcbf81b888ada781fd4 100644 (file)
@@ -39,14 +39,14 @@ int sdw_slave_add(struct sdw_bus *bus,
        slave->dev.fwnode = fwnode;
 
        if (id->unique_id == SDW_IGNORED_UNIQUE_ID) {
-               /* name shall be sdw:link:mfg:part:class */
-               dev_set_name(&slave->dev, "sdw:%01x:%04x:%04x:%02x",
-                            bus->link_id, id->mfg_id, id->part_id,
+               /* name shall be sdw:ctrl:link:mfg:part:class */
+               dev_set_name(&slave->dev, "sdw:%01x:%01x:%04x:%04x:%02x",
+                            bus->controller_id, bus->link_id, id->mfg_id, id->part_id,
                             id->class_id);
        } else {
-               /* name shall be sdw:link:mfg:part:class:unique */
-               dev_set_name(&slave->dev, "sdw:%01x:%04x:%04x:%02x:%01x",
-                            bus->link_id, id->mfg_id, id->part_id,
+               /* name shall be sdw:ctrl:link:mfg:part:class:unique */
+               dev_set_name(&slave->dev, "sdw:%01x:%01x:%04x:%04x:%02x:%01x",
+                            bus->controller_id, bus->link_id, id->mfg_id, id->part_id,
                             id->class_id, id->unique_id);
        }
 
index f048b3d55b2edcb32269369aa7c2b5dda07414ea..f9c0adc0738db27a7578509edbfbcd82c60b5206 100644 (file)
@@ -898,7 +898,7 @@ static struct sdw_port_runtime *sdw_port_alloc(struct list_head *port_list)
 }
 
 static int sdw_port_config(struct sdw_port_runtime *p_rt,
-                          struct sdw_port_config *port_config,
+                          const struct sdw_port_config *port_config,
                           int port_index)
 {
        p_rt->ch_mask = port_config[port_index].ch_mask;
@@ -971,7 +971,7 @@ static int sdw_slave_port_is_valid_range(struct device *dev, int num)
 
 static int sdw_slave_port_config(struct sdw_slave *slave,
                                 struct sdw_slave_runtime *s_rt,
-                                struct sdw_port_config *port_config)
+                                const struct sdw_port_config *port_config)
 {
        struct sdw_port_runtime *p_rt;
        int ret;
@@ -1027,7 +1027,7 @@ static int sdw_master_port_alloc(struct sdw_master_runtime *m_rt,
 }
 
 static int sdw_master_port_config(struct sdw_master_runtime *m_rt,
-                                 struct sdw_port_config *port_config)
+                                 const struct sdw_port_config *port_config)
 {
        struct sdw_port_runtime *p_rt;
        int ret;
@@ -1862,7 +1862,7 @@ EXPORT_SYMBOL(sdw_release_stream);
  */
 int sdw_stream_add_master(struct sdw_bus *bus,
                          struct sdw_stream_config *stream_config,
-                         struct sdw_port_config *port_config,
+                         const struct sdw_port_config *port_config,
                          unsigned int num_ports,
                          struct sdw_stream_runtime *stream)
 {
@@ -1982,7 +1982,7 @@ EXPORT_SYMBOL(sdw_stream_remove_master);
  */
 int sdw_stream_add_slave(struct sdw_slave *slave,
                         struct sdw_stream_config *stream_config,
-                        struct sdw_port_config *port_config,
+                        const struct sdw_port_config *port_config,
                         unsigned int num_ports,
                         struct sdw_stream_runtime *stream)
 {
index d96222e6d7d2d4022753d3120b4c36ea759dad75..cfdaa5eaec76db9b322272b54d4fdfdcff3db697 100644 (file)
@@ -19,7 +19,7 @@
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/spi/spi.h>
-#include <linux/spi/spi-mem.h>
+#include <linux/mtd/spi-nor.h>
 #include <linux/sysfs.h>
 #include <linux/types.h>
 #include "spi-bcm-qspi.h"
@@ -1221,7 +1221,7 @@ static int bcm_qspi_exec_mem_op(struct spi_mem *mem,
 
        /* non-aligned and very short transfers are handled by MSPI */
        if (!IS_ALIGNED((uintptr_t)addr, 4) || !IS_ALIGNED((uintptr_t)buf, 4) ||
-           len < 4)
+           len < 4 || op->cmd.opcode == SPINOR_OP_RDSFDP)
                mspi_read = true;
 
        if (!has_bspi(qspi) || mspi_read)
index a50eb4db79de8e93cb61a9ea50bc8913ed3e4f1f..e5140532071d2b647ab77fa561f27630a334971a 100644 (file)
@@ -317,6 +317,15 @@ static void cdns_spi_process_fifo(struct cdns_spi *xspi, int ntx, int nrx)
        xspi->rx_bytes -= nrx;
 
        while (ntx || nrx) {
+               if (nrx) {
+                       u8 data = cdns_spi_read(xspi, CDNS_SPI_RXD);
+
+                       if (xspi->rxbuf)
+                               *xspi->rxbuf++ = data;
+
+                       nrx--;
+               }
+
                if (ntx) {
                        if (xspi->txbuf)
                                cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++);
@@ -326,14 +335,6 @@ static void cdns_spi_process_fifo(struct cdns_spi *xspi, int ntx, int nrx)
                        ntx--;
                }
 
-               if (nrx) {
-                       u8 data = cdns_spi_read(xspi, CDNS_SPI_RXD);
-
-                       if (xspi->rxbuf)
-                               *xspi->rxbuf++ = data;
-
-                       nrx--;
-               }
        }
 }
 
index f0b630fe16c3c8a79480b6f73718dc5f2a98b649..b341b6908df06db192ff5e7f5590f839ac9c3978 100644 (file)
@@ -441,7 +441,6 @@ static void mcfqspi_remove(struct platform_device *pdev)
        mcfqspi_wr_qmr(mcfqspi, MCFQSPI_QMR_MSTR);
 
        mcfqspi_cs_teardown(mcfqspi);
-       clk_disable_unprepare(mcfqspi->clk);
 }
 
 #ifdef CONFIG_PM_SLEEP
index f13073e1259364640b16b57323e5d027c10bdb0f..b24190526ce96420fe885e585b00fb820502bacd 100644 (file)
@@ -244,7 +244,10 @@ static int cs42l43_spi_probe(struct platform_device *pdev)
        priv->ctlr->use_gpio_descriptors = true;
        priv->ctlr->auto_runtime_pm = true;
 
-       devm_pm_runtime_enable(priv->dev);
+       ret = devm_pm_runtime_enable(priv->dev);
+       if (ret)
+               return ret;
+
        pm_runtime_idle(priv->dev);
 
        regmap_write(priv->regmap, CS42L43_TRAN_CONFIG6, CS42L43_FIFO_SIZE - 1);
index 9d22018f7985f11956fae5e06ffb0dbd180914f9..1301d14483d482dcaf05250a563a414db73c9dd4 100644 (file)
@@ -377,6 +377,11 @@ static const struct spi_controller_mem_ops hisi_sfc_v3xx_mem_ops = {
 static irqreturn_t hisi_sfc_v3xx_isr(int irq, void *data)
 {
        struct hisi_sfc_v3xx_host *host = data;
+       u32 reg;
+
+       reg = readl(host->regbase + HISI_SFC_V3XX_INT_STAT);
+       if (!reg)
+               return IRQ_NONE;
 
        hisi_sfc_v3xx_disable_int(host);
 
index 272bc871a848b833e6e673740f4be5f8f3a16294..546cdce525fc5b1b49b305b872e81d2b0aed0cb5 100644 (file)
@@ -1344,7 +1344,7 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
        controller->dma_tx = dma_request_chan(dev, "tx");
        if (IS_ERR(controller->dma_tx)) {
                ret = PTR_ERR(controller->dma_tx);
-               dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret);
+               dev_err_probe(dev, ret, "can't get the TX DMA channel!\n");
                controller->dma_tx = NULL;
                goto err;
        }
@@ -1353,7 +1353,7 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
        controller->dma_rx = dma_request_chan(dev, "rx");
        if (IS_ERR(controller->dma_rx)) {
                ret = PTR_ERR(controller->dma_rx);
-               dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret);
+               dev_err_probe(dev, ret, "can't get the RX DMA channel!\n");
                controller->dma_rx = NULL;
                goto err;
        }
index 57d767a68e7b2766dcea5510809cf2f09e0bef63..07d20ca1164c357813e075b7a1a6763da735ab0a 100644 (file)
@@ -76,6 +76,7 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
        { PCI_VDEVICE(INTEL, 0x7a24), (unsigned long)&cnl_info },
        { PCI_VDEVICE(INTEL, 0x7aa4), (unsigned long)&cnl_info },
        { PCI_VDEVICE(INTEL, 0x7e23), (unsigned long)&cnl_info },
+       { PCI_VDEVICE(INTEL, 0x7f24), (unsigned long)&cnl_info },
        { PCI_VDEVICE(INTEL, 0x9d24), (unsigned long)&cnl_info },
        { PCI_VDEVICE(INTEL, 0x9da4), (unsigned long)&cnl_info },
        { PCI_VDEVICE(INTEL, 0xa0a4), (unsigned long)&cnl_info },
@@ -84,7 +85,6 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
        { PCI_VDEVICE(INTEL, 0xa2a4), (unsigned long)&cnl_info },
        { PCI_VDEVICE(INTEL, 0xa324), (unsigned long)&cnl_info },
        { PCI_VDEVICE(INTEL, 0xa3a4), (unsigned long)&cnl_info },
-       { PCI_VDEVICE(INTEL, 0xae23), (unsigned long)&cnl_info },
        { },
 };
 MODULE_DEVICE_TABLE(pci, intel_spi_pci_ids);
index cfc3b1ddbd229f04885db1b610298e63b623132f..6f12e4fb2e2e184f1bb4cf9fe12e5437384fc4ac 100644 (file)
@@ -136,14 +136,14 @@ struct sh_msiof_spi_priv {
 
 /* SIFCTR */
 #define SIFCTR_TFWM_MASK       GENMASK(31, 29) /* Transmit FIFO Watermark */
-#define SIFCTR_TFWM_64         (0 << 29)       /*  Transfer Request when 64 empty stages */
-#define SIFCTR_TFWM_32         (1 << 29)       /*  Transfer Request when 32 empty stages */
-#define SIFCTR_TFWM_24         (2 << 29)       /*  Transfer Request when 24 empty stages */
-#define SIFCTR_TFWM_16         (3 << 29)       /*  Transfer Request when 16 empty stages */
-#define SIFCTR_TFWM_12         (4 << 29)       /*  Transfer Request when 12 empty stages */
-#define SIFCTR_TFWM_8          (5 << 29)       /*  Transfer Request when 8 empty stages */
-#define SIFCTR_TFWM_4          (6 << 29)       /*  Transfer Request when 4 empty stages */
-#define SIFCTR_TFWM_1          (7 << 29)       /*  Transfer Request when 1 empty stage */
+#define SIFCTR_TFWM_64         (0UL << 29)     /*  Transfer Request when 64 empty stages */
+#define SIFCTR_TFWM_32         (1UL << 29)     /*  Transfer Request when 32 empty stages */
+#define SIFCTR_TFWM_24         (2UL << 29)     /*  Transfer Request when 24 empty stages */
+#define SIFCTR_TFWM_16         (3UL << 29)     /*  Transfer Request when 16 empty stages */
+#define SIFCTR_TFWM_12         (4UL << 29)     /*  Transfer Request when 12 empty stages */
+#define SIFCTR_TFWM_8          (5UL << 29)     /*  Transfer Request when 8 empty stages */
+#define SIFCTR_TFWM_4          (6UL << 29)     /*  Transfer Request when 4 empty stages */
+#define SIFCTR_TFWM_1          (7UL << 29)     /*  Transfer Request when 1 empty stage */
 #define SIFCTR_TFUA_MASK       GENMASK(26, 20) /* Transmit FIFO Usable Area */
 #define SIFCTR_TFUA_SHIFT      20
 #define SIFCTR_TFUA(i)         ((i) << SIFCTR_TFUA_SHIFT)
index 7477a11e12be0e2bf47006ce9e579fdd9f1fda30..f2170f4b50775ea175c3d0c1e4a7ef0f809e6a52 100644 (file)
@@ -1717,6 +1717,10 @@ static int __spi_pump_transfer_message(struct spi_controller *ctlr,
                        pm_runtime_put_noidle(ctlr->dev.parent);
                        dev_err(&ctlr->dev, "Failed to power device: %d\n",
                                ret);
+
+                       msg->status = ret;
+                       spi_finalize_current_message(ctlr);
+
                        return ret;
                }
        }
index 9d974424c8c183d837162b1283dc1953561e43c4..7f152167bb05b2c24a0f9669f60278152898eebb 100644 (file)
@@ -2,7 +2,7 @@
 #
 # Makefile for kernel SPMI framework.
 #
-obj-$(CONFIG_SPMI)     += spmi.o
+obj-$(CONFIG_SPMI)     += spmi.o spmi-devres.o
 
 obj-$(CONFIG_SPMI_HISI3670)    += hisi-spmi-controller.o
 obj-$(CONFIG_SPMI_MSM_PMIC_ARB)        += spmi-pmic-arb.o
index 9cbd473487cb0d58ae664894de174bd1c7a13f9e..674a350cc6769a890d13cebb338ea2e5fc94d796 100644 (file)
@@ -267,10 +267,10 @@ static int spmi_controller_probe(struct platform_device *pdev)
        struct resource *iores;
        int ret;
 
-       ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*spmi_controller));
-       if (!ctrl) {
+       ctrl = devm_spmi_controller_alloc(&pdev->dev, sizeof(*spmi_controller));
+       if (IS_ERR(ctrl)) {
                dev_err(&pdev->dev, "can not allocate spmi_controller data\n");
-               return -ENOMEM;
+               return PTR_ERR(ctrl);
        }
        spmi_controller = spmi_controller_get_drvdata(ctrl);
        spmi_controller->controller = ctrl;
@@ -278,24 +278,21 @@ static int spmi_controller_probe(struct platform_device *pdev)
        iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!iores) {
                dev_err(&pdev->dev, "can not get resource!\n");
-               ret = -EINVAL;
-               goto err_put_controller;
+               return -EINVAL;
        }
 
        spmi_controller->base = devm_ioremap(&pdev->dev, iores->start,
                                             resource_size(iores));
        if (!spmi_controller->base) {
                dev_err(&pdev->dev, "can not remap base addr!\n");
-               ret = -EADDRNOTAVAIL;
-               goto err_put_controller;
+               return -EADDRNOTAVAIL;
        }
 
        ret = of_property_read_u32(pdev->dev.of_node, "hisilicon,spmi-channel",
                                   &spmi_controller->channel);
        if (ret) {
                dev_err(&pdev->dev, "can not get channel\n");
-               ret = -ENODEV;
-               goto err_put_controller;
+               return -ENODEV;
        }
 
        platform_set_drvdata(pdev, spmi_controller);
@@ -311,25 +308,13 @@ static int spmi_controller_probe(struct platform_device *pdev)
        ctrl->read_cmd = spmi_read_cmd;
        ctrl->write_cmd = spmi_write_cmd;
 
-       ret = spmi_controller_add(ctrl);
+       ret = devm_spmi_controller_add(&pdev->dev, ctrl);
        if (ret) {
                dev_err(&pdev->dev, "spmi_controller_add failed with error %d!\n", ret);
-               goto err_put_controller;
+               return ret;
        }
 
        return 0;
-
-err_put_controller:
-       spmi_controller_put(ctrl);
-       return ret;
-}
-
-static void spmi_del_controller(struct platform_device *pdev)
-{
-       struct spmi_controller *ctrl = platform_get_drvdata(pdev);
-
-       spmi_controller_remove(ctrl);
-       spmi_controller_put(ctrl);
 }
 
 static const struct of_device_id spmi_controller_match_table[] = {
@@ -342,7 +327,6 @@ MODULE_DEVICE_TABLE(of, spmi_controller_match_table);
 
 static struct platform_driver spmi_controller_driver = {
        .probe          = spmi_controller_probe,
-       .remove_new     = spmi_del_controller,
        .driver         = {
                .name   = "hisi_spmi_controller",
                .of_match_table = spmi_controller_match_table,
diff --git a/drivers/spmi/spmi-devres.c b/drivers/spmi/spmi-devres.c
new file mode 100644 (file)
index 0000000..62c4b3f
--- /dev/null
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2023 Google LLC.
+ */
+
+#include <linux/device.h>
+#include <linux/spmi.h>
+
+static void devm_spmi_controller_release(struct device *parent, void *res)
+{
+       spmi_controller_put(*(struct spmi_controller **)res);
+}
+
+struct spmi_controller *devm_spmi_controller_alloc(struct device *parent, size_t size)
+{
+       struct spmi_controller **ptr, *ctrl;
+
+       ptr = devres_alloc(devm_spmi_controller_release, sizeof(*ptr), GFP_KERNEL);
+       if (!ptr)
+               return ERR_PTR(-ENOMEM);
+
+       ctrl = spmi_controller_alloc(parent, size);
+       if (IS_ERR(ctrl)) {
+               devres_free(ptr);
+               return ctrl;
+       }
+
+       *ptr = ctrl;
+       devres_add(parent, ptr);
+
+       return ctrl;
+}
+EXPORT_SYMBOL_GPL(devm_spmi_controller_alloc);
+
+static void devm_spmi_controller_remove(struct device *parent, void *res)
+{
+       spmi_controller_remove(*(struct spmi_controller **)res);
+}
+
+int devm_spmi_controller_add(struct device *parent, struct spmi_controller *ctrl)
+{
+       struct spmi_controller **ptr;
+       int ret;
+
+       ptr = devres_alloc(devm_spmi_controller_remove, sizeof(*ptr), GFP_KERNEL);
+       if (!ptr)
+               return -ENOMEM;
+
+       ret = spmi_controller_add(ctrl);
+       if (ret) {
+               devres_free(ptr);
+               return ret;
+       }
+
+       *ptr = ctrl;
+       devres_add(parent, ptr);
+
+       return 0;
+
+}
+EXPORT_SYMBOL_GPL(devm_spmi_controller_add);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SPMI devres helpers");
index b3c991e1ea40da36832733acb13f679ddd8de0f8..5079442f8ea1664e467aac50441be11fffffe6bc 100644 (file)
@@ -50,6 +50,7 @@ struct pmif {
        struct clk_bulk_data clks[PMIF_MAX_CLKS];
        size_t nclks;
        const struct pmif_data *data;
+       raw_spinlock_t lock;
 };
 
 static const char * const pmif_clock_names[] = {
@@ -314,6 +315,7 @@ static int pmif_spmi_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
        struct ch_reg *inf_reg;
        int ret;
        u32 data, cmd;
+       unsigned long flags;
 
        /* Check for argument validation. */
        if (sid & ~0xf) {
@@ -334,6 +336,7 @@ static int pmif_spmi_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
        else
                return -EINVAL;
 
+       raw_spin_lock_irqsave(&arb->lock, flags);
        /* Wait for Software Interface FSM state to be IDLE. */
        inf_reg = &arb->chan;
        ret = readl_poll_timeout_atomic(arb->base + arb->data->regs[inf_reg->ch_sta],
@@ -343,6 +346,7 @@ static int pmif_spmi_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
                /* set channel ready if the data has transferred */
                if (pmif_is_fsm_vldclr(arb))
                        pmif_writel(arb, 1, inf_reg->ch_rdy);
+               raw_spin_unlock_irqrestore(&arb->lock, flags);
                dev_err(&ctrl->dev, "failed to wait for SWINF_IDLE\n");
                return ret;
        }
@@ -350,6 +354,7 @@ static int pmif_spmi_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
        /* Send the command. */
        cmd = (opc << 30) | (sid << 24) | ((len - 1) << 16) | addr;
        pmif_writel(arb, cmd, inf_reg->ch_send);
+       raw_spin_unlock_irqrestore(&arb->lock, flags);
 
        /*
         * Wait for Software Interface FSM state to be WFVLDCLR,
@@ -376,7 +381,14 @@ static int pmif_spmi_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
        struct pmif *arb = spmi_controller_get_drvdata(ctrl);
        struct ch_reg *inf_reg;
        int ret;
-       u32 data, cmd;
+       u32 data, wdata, cmd;
+       unsigned long flags;
+
+       /* Check for argument validation. */
+       if (unlikely(sid & ~0xf)) {
+               dev_err(&ctrl->dev, "exceed the max slv id\n");
+               return -EINVAL;
+       }
 
        if (len > 4) {
                dev_err(&ctrl->dev, "pmif supports 1..4 bytes per trans, but:%zu requested", len);
@@ -394,6 +406,10 @@ static int pmif_spmi_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
        else
                return -EINVAL;
 
+       /* Set the write data. */
+       memcpy(&wdata, buf, len);
+
+       raw_spin_lock_irqsave(&arb->lock, flags);
        /* Wait for Software Interface FSM state to be IDLE. */
        inf_reg = &arb->chan;
        ret = readl_poll_timeout_atomic(arb->base + arb->data->regs[inf_reg->ch_sta],
@@ -403,17 +419,17 @@ static int pmif_spmi_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
                /* set channel ready if the data has transferred */
                if (pmif_is_fsm_vldclr(arb))
                        pmif_writel(arb, 1, inf_reg->ch_rdy);
+               raw_spin_unlock_irqrestore(&arb->lock, flags);
                dev_err(&ctrl->dev, "failed to wait for SWINF_IDLE\n");
                return ret;
        }
 
-       /* Set the write data. */
-       memcpy(&data, buf, len);
-       pmif_writel(arb, data, inf_reg->wdata);
+       pmif_writel(arb, wdata, inf_reg->wdata);
 
        /* Send the command. */
        cmd = (opc << 30) | BIT(29) | (sid << 24) | ((len - 1) << 16) | addr;
        pmif_writel(arb, cmd, inf_reg->ch_send);
+       raw_spin_unlock_irqrestore(&arb->lock, flags);
 
        return 0;
 }
@@ -437,44 +453,39 @@ static int mtk_spmi_probe(struct platform_device *pdev)
        int err, i;
        u32 chan_offset;
 
-       ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*arb));
-       if (!ctrl)
-               return -ENOMEM;
+       ctrl = devm_spmi_controller_alloc(&pdev->dev, sizeof(*arb));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
 
        arb = spmi_controller_get_drvdata(ctrl);
        arb->data = device_get_match_data(&pdev->dev);
        if (!arb->data) {
-               err = -EINVAL;
                dev_err(&pdev->dev, "Cannot get drv_data\n");
-               goto err_put_ctrl;
+               return -EINVAL;
        }
 
        arb->base = devm_platform_ioremap_resource_byname(pdev, "pmif");
-       if (IS_ERR(arb->base)) {
-               err = PTR_ERR(arb->base);
-               goto err_put_ctrl;
-       }
+       if (IS_ERR(arb->base))
+               return PTR_ERR(arb->base);
 
        arb->spmimst_base = devm_platform_ioremap_resource_byname(pdev, "spmimst");
-       if (IS_ERR(arb->spmimst_base)) {
-               err = PTR_ERR(arb->spmimst_base);
-               goto err_put_ctrl;
-       }
+       if (IS_ERR(arb->spmimst_base))
+               return PTR_ERR(arb->spmimst_base);
 
        arb->nclks = ARRAY_SIZE(pmif_clock_names);
        for (i = 0; i < arb->nclks; i++)
                arb->clks[i].id = pmif_clock_names[i];
 
-       err = devm_clk_bulk_get(&pdev->dev, arb->nclks, arb->clks);
+       err = clk_bulk_get(&pdev->dev, arb->nclks, arb->clks);
        if (err) {
                dev_err(&pdev->dev, "Failed to get clocks: %d\n", err);
-               goto err_put_ctrl;
+               return err;
        }
 
        err = clk_bulk_prepare_enable(arb->nclks, arb->clks);
        if (err) {
                dev_err(&pdev->dev, "Failed to enable clocks: %d\n", err);
-               goto err_put_ctrl;
+               goto err_put_clks;
        }
 
        ctrl->cmd = pmif_arb_cmd;
@@ -488,6 +499,8 @@ static int mtk_spmi_probe(struct platform_device *pdev)
        arb->chan.ch_send = PMIF_SWINF_0_ACC + chan_offset;
        arb->chan.ch_rdy = PMIF_SWINF_0_VLD_CLR + chan_offset;
 
+       raw_spin_lock_init(&arb->lock);
+
        platform_set_drvdata(pdev, ctrl);
 
        err = spmi_controller_add(ctrl);
@@ -498,8 +511,8 @@ static int mtk_spmi_probe(struct platform_device *pdev)
 
 err_domain_remove:
        clk_bulk_disable_unprepare(arb->nclks, arb->clks);
-err_put_ctrl:
-       spmi_controller_put(ctrl);
+err_put_clks:
+       clk_bulk_put(arb->nclks, arb->clks);
        return err;
 }
 
@@ -508,9 +521,9 @@ static void mtk_spmi_remove(struct platform_device *pdev)
        struct spmi_controller *ctrl = platform_get_drvdata(pdev);
        struct pmif *arb = spmi_controller_get_drvdata(ctrl);
 
-       clk_bulk_disable_unprepare(arb->nclks, arb->clks);
        spmi_controller_remove(ctrl);
-       spmi_controller_put(ctrl);
+       clk_bulk_disable_unprepare(arb->nclks, arb->clks);
+       clk_bulk_put(arb->nclks, arb->clks);
 }
 
 static const struct of_device_id mtk_spmi_match_table[] = {
index dcb675d980d478b7d0457b7c60daafb2ead06ff3..9ed1180fe31f14799dc6e358882ad286f32b2300 100644 (file)
@@ -1443,9 +1443,9 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
        u32 channel, ee, hw_ver;
        int err;
 
-       ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*pmic_arb));
-       if (!ctrl)
-               return -ENOMEM;
+       ctrl = devm_spmi_controller_alloc(&pdev->dev, sizeof(*pmic_arb));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
 
        pmic_arb = spmi_controller_get_drvdata(ctrl);
        pmic_arb->spmic = ctrl;
@@ -1462,20 +1462,16 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
         */
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
        core = devm_ioremap(&ctrl->dev, res->start, resource_size(res));
-       if (IS_ERR(core)) {
-               err = PTR_ERR(core);
-               goto err_put_ctrl;
-       }
+       if (IS_ERR(core))
+               return PTR_ERR(core);
 
        pmic_arb->core_size = resource_size(res);
 
        pmic_arb->ppid_to_apid = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PPID,
                                              sizeof(*pmic_arb->ppid_to_apid),
                                              GFP_KERNEL);
-       if (!pmic_arb->ppid_to_apid) {
-               err = -ENOMEM;
-               goto err_put_ctrl;
-       }
+       if (!pmic_arb->ppid_to_apid)
+               return -ENOMEM;
 
        hw_ver = readl_relaxed(core + PMIC_ARB_VERSION);
 
@@ -1499,19 +1495,15 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
                                                   "obsrvr");
                pmic_arb->rd_base = devm_ioremap(&ctrl->dev, res->start,
                                                 resource_size(res));
-               if (IS_ERR(pmic_arb->rd_base)) {
-                       err = PTR_ERR(pmic_arb->rd_base);
-                       goto err_put_ctrl;
-               }
+               if (IS_ERR(pmic_arb->rd_base))
+                       return PTR_ERR(pmic_arb->rd_base);
 
                res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
                                                   "chnls");
                pmic_arb->wr_base = devm_ioremap(&ctrl->dev, res->start,
                                                 resource_size(res));
-               if (IS_ERR(pmic_arb->wr_base)) {
-                       err = PTR_ERR(pmic_arb->wr_base);
-                       goto err_put_ctrl;
-               }
+               if (IS_ERR(pmic_arb->wr_base))
+                       return PTR_ERR(pmic_arb->wr_base);
        }
 
        pmic_arb->max_periphs = PMIC_ARB_MAX_PERIPHS;
@@ -1522,10 +1514,9 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
                of_property_read_u32(pdev->dev.of_node, "qcom,bus-id",
                                        &pmic_arb->bus_instance);
                if (pmic_arb->bus_instance > 1) {
-                       err = -EINVAL;
                        dev_err(&pdev->dev, "invalid bus instance (%u) specified\n",
                                pmic_arb->bus_instance);
-                       goto err_put_ctrl;
+                       return -EINVAL;
                }
 
                if (pmic_arb->bus_instance == 0) {
@@ -1543,10 +1534,9 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
                }
 
                if (pmic_arb->base_apid + pmic_arb->apid_count > pmic_arb->max_periphs) {
-                       err = -EINVAL;
                        dev_err(&pdev->dev, "Unsupported APID count %d detected\n",
                                pmic_arb->base_apid + pmic_arb->apid_count);
-                       goto err_put_ctrl;
+                       return -EINVAL;
                }
        } else if (hw_ver >= PMIC_ARB_VERSION_V5_MIN) {
                pmic_arb->base_apid = 0;
@@ -1554,55 +1544,45 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
                                        PMIC_ARB_FEATURES_PERIPH_MASK;
 
                if (pmic_arb->apid_count > pmic_arb->max_periphs) {
-                       err = -EINVAL;
                        dev_err(&pdev->dev, "Unsupported APID count %d detected\n",
                                pmic_arb->apid_count);
-                       goto err_put_ctrl;
+                       return -EINVAL;
                }
        }
 
        pmic_arb->apid_data = devm_kcalloc(&ctrl->dev, pmic_arb->max_periphs,
                                           sizeof(*pmic_arb->apid_data),
                                           GFP_KERNEL);
-       if (!pmic_arb->apid_data) {
-               err = -ENOMEM;
-               goto err_put_ctrl;
-       }
+       if (!pmic_arb->apid_data)
+               return -ENOMEM;
 
        dev_info(&ctrl->dev, "PMIC arbiter version %s (0x%x)\n",
                 pmic_arb->ver_ops->ver_str, hw_ver);
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "intr");
        pmic_arb->intr = devm_ioremap_resource(&ctrl->dev, res);
-       if (IS_ERR(pmic_arb->intr)) {
-               err = PTR_ERR(pmic_arb->intr);
-               goto err_put_ctrl;
-       }
+       if (IS_ERR(pmic_arb->intr))
+               return PTR_ERR(pmic_arb->intr);
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cnfg");
        pmic_arb->cnfg = devm_ioremap_resource(&ctrl->dev, res);
-       if (IS_ERR(pmic_arb->cnfg)) {
-               err = PTR_ERR(pmic_arb->cnfg);
-               goto err_put_ctrl;
-       }
+       if (IS_ERR(pmic_arb->cnfg))
+               return PTR_ERR(pmic_arb->cnfg);
 
        pmic_arb->irq = platform_get_irq_byname(pdev, "periph_irq");
-       if (pmic_arb->irq < 0) {
-               err = pmic_arb->irq;
-               goto err_put_ctrl;
-       }
+       if (pmic_arb->irq < 0)
+               return pmic_arb->irq;
 
        err = of_property_read_u32(pdev->dev.of_node, "qcom,channel", &channel);
        if (err) {
                dev_err(&pdev->dev, "channel unspecified.\n");
-               goto err_put_ctrl;
+               return err;
        }
 
        if (channel > 5) {
                dev_err(&pdev->dev, "invalid channel (%u) specified.\n",
                        channel);
-               err = -EINVAL;
-               goto err_put_ctrl;
+               return -EINVAL;
        }
 
        pmic_arb->channel = channel;
@@ -1610,22 +1590,19 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
        err = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &ee);
        if (err) {
                dev_err(&pdev->dev, "EE unspecified.\n");
-               goto err_put_ctrl;
+               return err;
        }
 
        if (ee > 5) {
                dev_err(&pdev->dev, "invalid EE (%u) specified\n", ee);
-               err = -EINVAL;
-               goto err_put_ctrl;
+               return -EINVAL;
        }
 
        pmic_arb->ee = ee;
        mapping_table = devm_kcalloc(&ctrl->dev, pmic_arb->max_periphs,
                                        sizeof(*mapping_table), GFP_KERNEL);
-       if (!mapping_table) {
-               err = -ENOMEM;
-               goto err_put_ctrl;
-       }
+       if (!mapping_table)
+               return -ENOMEM;
 
        pmic_arb->mapping_table = mapping_table;
        /* Initialize max_apid/min_apid to the opposite bounds, during
@@ -1645,7 +1622,7 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
                if (err) {
                        dev_err(&pdev->dev, "could not read APID->PPID mapping table, rc= %d\n",
                                err);
-                       goto err_put_ctrl;
+                       return err;
                }
        }
 
@@ -1654,8 +1631,7 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
                                         &pmic_arb_irq_domain_ops, pmic_arb);
        if (!pmic_arb->domain) {
                dev_err(&pdev->dev, "unable to create irq_domain\n");
-               err = -ENOMEM;
-               goto err_put_ctrl;
+               return -ENOMEM;
        }
 
        irq_set_chained_handler_and_data(pmic_arb->irq, pmic_arb_chained_irq,
@@ -1669,8 +1645,6 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
 err_domain_remove:
        irq_set_chained_handler_and_data(pmic_arb->irq, NULL, NULL);
        irq_domain_remove(pmic_arb->domain);
-err_put_ctrl:
-       spmi_controller_put(ctrl);
        return err;
 }
 
@@ -1681,7 +1655,6 @@ static void spmi_pmic_arb_remove(struct platform_device *pdev)
        spmi_controller_remove(ctrl);
        irq_set_chained_handler_and_data(pmic_arb->irq, NULL, NULL);
        irq_domain_remove(pmic_arb->domain);
-       spmi_controller_put(ctrl);
 }
 
 static const struct of_device_id spmi_pmic_arb_match_table[] = {
index 93cd4a34debc7808c6a0cd336b47d38204db8f53..3a60fd2e09e1f978c436ec925a06d554fd04e747 100644 (file)
@@ -448,11 +448,11 @@ struct spmi_controller *spmi_controller_alloc(struct device *parent,
        int id;
 
        if (WARN_ON(!parent))
-               return NULL;
+               return ERR_PTR(-EINVAL);
 
        ctrl = kzalloc(sizeof(*ctrl) + size, GFP_KERNEL);
        if (!ctrl)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        device_initialize(&ctrl->dev);
        ctrl->dev.type = &spmi_ctrl_type;
@@ -466,7 +466,7 @@ struct spmi_controller *spmi_controller_alloc(struct device *parent,
                dev_err(parent,
                        "unable to allocate SPMI controller identifier.\n");
                spmi_controller_put(ctrl);
-               return NULL;
+               return ERR_PTR(id);
        }
 
        ctrl->nr = id;
index de2f6516da09592807b6657884d05fec6dcca5d7..22325ab9d6521d686f839be3945ba9e37e8dd268 100644 (file)
@@ -264,7 +264,7 @@ static int gb_i2c_probe(struct gbphy_device *gbphy_dev,
        /* Looks good; up our i2c adapter */
        adapter = &gb_i2c_dev->adapter;
        adapter->owner = THIS_MODULE;
-       adapter->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+       adapter->class = I2C_CLASS_HWMON;
        adapter->algo = &gb_i2c_algorithm;
 
        adapter->dev.parent = &gbphy_dev->dev;
index 6af519938868ed0273827b22809fa62d54db2c76..a1492215dab10895ff3ec42366df36d13d18844e 100644 (file)
@@ -1,6 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0
 rtllib-objs :=                 \
-       dot11d.o                \
        rtllib_module.o         \
        rtllib_rx.o             \
        rtllib_tx.o             \
diff --git a/drivers/staging/rtl8192e/dot11d.c b/drivers/staging/rtl8192e/dot11d.c
deleted file mode 100644 (file)
index d0b7332..0000000
+++ /dev/null
@@ -1,165 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- ******************************************************************************/
-#include "dot11d.h"
-
-struct channel_list {
-       u8      channel[32];
-       u8      len;
-};
-
-static struct channel_list channel_array[] = {
-       {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 36, 40, 44, 48, 52, 56, 60, 64,
-         149, 153, 157, 161, 165}, 24},
-       {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 11},
-       {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52, 56,
-         60, 64}, 21},
-       {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 13},
-       {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 13},
-       {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 36, 40, 44, 48, 52,
-         56, 60, 64}, 22},
-       {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 36, 40, 44, 48, 52,
-         56, 60, 64}, 22},
-       {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 13},
-       {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 36, 40, 44, 48, 52,
-         56, 60, 64}, 22},
-       {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 36, 40, 44, 48, 52,
-        56, 60, 64}, 22},
-       {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}, 14},
-       {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 13},
-       {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52,
-         56, 60, 64}, 21}
-};
-
-void dot11d_init(struct rtllib_device *ieee)
-{
-       struct rt_dot11d_info *dot11d_info = GET_DOT11D_INFO(ieee);
-
-       dot11d_info->enabled = false;
-
-       dot11d_info->state = DOT11D_STATE_NONE;
-       dot11d_info->country_len = 0;
-       memset(dot11d_info->channel_map, 0, MAX_CHANNEL_NUMBER + 1);
-       memset(dot11d_info->max_tx_power_list, 0xFF, MAX_CHANNEL_NUMBER + 1);
-       RESET_CIE_WATCHDOG(ieee);
-}
-EXPORT_SYMBOL(dot11d_init);
-
-void dot11d_channel_map(u8 channel_plan, struct rtllib_device *ieee)
-{
-       int i, max_chan = 14, min_chan = 1;
-
-       ieee->global_domain = false;
-
-       if (channel_array[channel_plan].len != 0) {
-               memset(GET_DOT11D_INFO(ieee)->channel_map, 0,
-                      sizeof(GET_DOT11D_INFO(ieee)->channel_map));
-               for (i = 0; i < channel_array[channel_plan].len; i++) {
-                       if (channel_array[channel_plan].channel[i] < min_chan ||
-                           channel_array[channel_plan].channel[i] > max_chan)
-                               break;
-                       GET_DOT11D_INFO(ieee)->channel_map[channel_array
-                                       [channel_plan].channel[i]] = 1;
-               }
-       }
-
-       switch (channel_plan) {
-       case COUNTRY_CODE_GLOBAL_DOMAIN:
-               ieee->global_domain = true;
-               for (i = 12; i <= 14; i++)
-                       GET_DOT11D_INFO(ieee)->channel_map[i] = 2;
-               ieee->bss_start_channel = 10;
-               break;
-
-       case COUNTRY_CODE_WORLD_WIDE_13:
-               for (i = 12; i <= 13; i++)
-                       GET_DOT11D_INFO(ieee)->channel_map[i] = 2;
-               ieee->bss_start_channel = 10;
-               break;
-
-       default:
-               ieee->bss_start_channel = 1;
-               break;
-       }
-}
-EXPORT_SYMBOL(dot11d_channel_map);
-
-void dot11d_reset(struct rtllib_device *ieee)
-{
-       struct rt_dot11d_info *dot11d_info = GET_DOT11D_INFO(ieee);
-       u32 i;
-
-       memset(dot11d_info->channel_map, 0, MAX_CHANNEL_NUMBER + 1);
-       memset(dot11d_info->max_tx_power_list, 0xFF, MAX_CHANNEL_NUMBER + 1);
-       for (i = 1; i <= 11; i++)
-               (dot11d_info->channel_map)[i] = 1;
-       for (i = 12; i <= 14; i++)
-               (dot11d_info->channel_map)[i] = 2;
-       dot11d_info->state = DOT11D_STATE_NONE;
-       dot11d_info->country_len = 0;
-       RESET_CIE_WATCHDOG(ieee);
-}
-
-void dot11d_update_country(struct rtllib_device *dev, u8 *address,
-                          u16 country_len, u8 *country)
-{
-       struct rt_dot11d_info *dot11d_info = GET_DOT11D_INFO(dev);
-       u8 i, j, number_of_triples, max_channel_number;
-       struct chnl_txpow_triple *triple;
-
-       memset(dot11d_info->channel_map, 0, MAX_CHANNEL_NUMBER + 1);
-       memset(dot11d_info->max_tx_power_list, 0xFF, MAX_CHANNEL_NUMBER + 1);
-       max_channel_number = 0;
-       number_of_triples = (country_len - 3) / 3;
-       triple = (struct chnl_txpow_triple *)(country + 3);
-       for (i = 0; i < number_of_triples; i++) {
-               if (max_channel_number >= triple->first_channel) {
-                       netdev_info(dev->dev,
-                                   "%s: Invalid country IE, skip it......1\n",
-                                   __func__);
-                       return;
-               }
-               if (MAX_CHANNEL_NUMBER < (triple->first_channel +
-                   triple->num_channels)) {
-                       netdev_info(dev->dev,
-                                   "%s: Invalid country IE, skip it......2\n",
-                                   __func__);
-                       return;
-               }
-
-               for (j = 0; j < triple->num_channels; j++) {
-                       dot11d_info->channel_map[triple->first_channel + j] = 1;
-                       dot11d_info->max_tx_power_list[triple->first_channel + j] =
-                                                triple->max_tx_power;
-                       max_channel_number = triple->first_channel + j;
-               }
-
-               triple = (struct chnl_txpow_triple *)((u8 *)triple + 3);
-       }
-
-       UPDATE_CIE_SRC(dev, address);
-
-       dot11d_info->country_len = country_len;
-       memcpy(dot11d_info->country_buffer, country, country_len);
-       dot11d_info->state = DOT11D_STATE_LEARNED;
-}
-
-void dot11d_scan_complete(struct rtllib_device *dev)
-{
-       struct rt_dot11d_info *dot11d_info = GET_DOT11D_INFO(dev);
-
-       switch (dot11d_info->state) {
-       case DOT11D_STATE_LEARNED:
-               dot11d_info->state = DOT11D_STATE_DONE;
-               break;
-       case DOT11D_STATE_DONE:
-               dot11d_reset(dev);
-               break;
-       case DOT11D_STATE_NONE:
-               break;
-       }
-}
diff --git a/drivers/staging/rtl8192e/dot11d.h b/drivers/staging/rtl8192e/dot11d.h
deleted file mode 100644 (file)
index 6d2b93a..0000000
+++ /dev/null
@@ -1,84 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- ******************************************************************************/
-#ifndef __INC_DOT11D_H
-#define __INC_DOT11D_H
-
-#include "rtllib.h"
-
-struct chnl_txpow_triple {
-       u8 first_channel;
-       u8  num_channels;
-       u8  max_tx_power;
-};
-
-enum dot11d_state {
-       DOT11D_STATE_NONE = 0,
-       DOT11D_STATE_LEARNED,
-       DOT11D_STATE_DONE,
-};
-
-/**
- * struct rt_dot11d_info * @country_len: value greater than 0 if
- *               @country_buffer contains valid country information element.
- * @channel_map: holds channel values
- *             0 - invalid,
- *             1 - valid (active scan),
- *             2 - valid (passive scan)
- * @country_src_addr - Source AP of the country IE
- */
-
-struct rt_dot11d_info {
-       bool enabled;
-
-       u16 country_len;
-       u8  country_buffer[MAX_IE_LEN];
-       u8  country_src_addr[6];
-       u8  country_watchdog;
-
-       u8  channel_map[MAX_CHANNEL_NUMBER + 1];
-       u8  max_tx_power_list[MAX_CHANNEL_NUMBER + 1];
-
-       enum dot11d_state state;
-};
-
-static inline void copy_mac_addr(unsigned char *des, unsigned char *src)
-{
-       memcpy(des, src, 6);
-}
-
-#define GET_DOT11D_INFO(__ieee_dev)                    \
-        ((struct rt_dot11d_info *)((__ieee_dev)->dot11d_info))
-
-#define IS_DOT11D_ENABLE(__ieee_dev)                   \
-        (GET_DOT11D_INFO(__ieee_dev)->enabled)
-#define IS_COUNTRY_IE_VALID(__ieee_dev)                        \
-       (GET_DOT11D_INFO(__ieee_dev)->country_len > 0)
-
-#define IS_EQUAL_CIE_SRC(__ieee_dev, __address)                \
-        ether_addr_equal_unaligned( \
-               GET_DOT11D_INFO(__ieee_dev)->country_src_addr, __address)
-#define UPDATE_CIE_SRC(__ieee_dev, __address)          \
-       copy_mac_addr(GET_DOT11D_INFO(__ieee_dev)->country_src_addr, __address)
-
-#define GET_CIE_WATCHDOG(__ieee_dev)                           \
-        (GET_DOT11D_INFO(__ieee_dev)->country_watchdog)
-static inline void RESET_CIE_WATCHDOG(struct rtllib_device *__ieee_dev)
-{
-       GET_CIE_WATCHDOG(__ieee_dev) = 0;
-}
-
-#define UPDATE_CIE_WATCHDOG(__ieee_dev) (++GET_CIE_WATCHDOG(__ieee_dev))
-
-void dot11d_init(struct rtllib_device *dev);
-void dot11d_channel_map(u8 channel_plan, struct rtllib_device *ieee);
-void dot11d_reset(struct rtllib_device *dev);
-void dot11d_update_country(struct rtllib_device *dev, u8 *address,
-                          u16 country_len, u8 *country);
-void dot11d_scan_complete(struct rtllib_device *dev);
-
-#endif
index eba8364d0ff21cf575684e7bb313305d4efa3731..7f0c160bc7419f3b38ac9b77cca27bb0b998ea06 100644 (file)
@@ -63,9 +63,9 @@ bool rtl92e_send_cmd_pkt(struct net_device *dev, u32 type, const void *data,
 
                if (type == DESC_PACKET_TYPE_INIT &&
                    (!priv->rtllib->check_nic_enough_desc(dev, TXCMD_QUEUE) ||
-                    (!skb_queue_empty(&priv->rtllib->skb_waitQ[TXCMD_QUEUE])) ||
+                    (!skb_queue_empty(&priv->rtllib->skb_waitq[TXCMD_QUEUE])) ||
                     (priv->rtllib->queue_stop))) {
-                       skb_queue_tail(&priv->rtllib->skb_waitQ[TXCMD_QUEUE],
+                       skb_queue_tail(&priv->rtllib->skb_waitq[TXCMD_QUEUE],
                                       skb);
                } else {
                        priv->rtllib->softmac_hard_start_xmit(skb, dev);
index e93394c51264beebfc549dfcaf52b52475cb11c0..c7a2eae2fdb90c46912d23f904d1eab5df388cb9 100644 (file)
@@ -51,12 +51,12 @@ void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val)
 
        case HW_VAR_MEDIA_STATUS:
        {
-               enum rt_op_mode OpMode = *((enum rt_op_mode *)(val));
+               enum rt_op_mode op_mode = *((enum rt_op_mode *)(val));
                u8 btMsr = rtl92e_readb(dev, MSR);
 
                btMsr &= 0xfc;
 
-               switch (OpMode) {
+               switch (op_mode) {
                case RT_OP_MODE_INFRASTRUCTURE:
                        btMsr |= MSR_INFRA;
                        break;
@@ -261,7 +261,6 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
                priv->eeprom_customer_id = usValue & 0xff;
                usValue = rtl92e_eeprom_read(dev,
                                             EEPROM_ICVersion_ChannelPlan >> 1);
-               priv->eeprom_chnl_plan = usValue & 0xff;
                IC_Version = (usValue & 0xff00) >> 8;
 
                ICVer8192 = IC_Version & 0xf;
@@ -283,7 +282,6 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
                priv->eeprom_vid = 0;
                priv->eeprom_did = 0;
                priv->eeprom_customer_id = 0;
-               priv->eeprom_chnl_plan = 0;
        }
 
        if (!priv->autoload_fail_flag) {
@@ -387,25 +385,15 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
 
        rtl92e_init_adaptive_rate(dev);
 
-       priv->chnl_plan = priv->eeprom_chnl_plan;
-
        switch (priv->eeprom_customer_id) {
        case EEPROM_CID_NetCore:
                priv->customer_id = RT_CID_819X_NETCORE;
                break;
        case EEPROM_CID_TOSHIBA:
                priv->customer_id = RT_CID_TOSHIBA;
-               if (priv->eeprom_chnl_plan & 0x80)
-                       priv->chnl_plan = priv->eeprom_chnl_plan & 0x7f;
-               else
-                       priv->chnl_plan = 0x0;
                break;
        }
 
-       if (priv->chnl_plan > CHANNEL_PLAN_LEN - 1)
-               priv->chnl_plan = 0;
-       priv->chnl_plan = COUNTRY_CODE_WORLD_WIDE_13;
-
        if (priv->eeprom_vid == 0x1186 &&  priv->eeprom_did == 0x3304)
                priv->rtllib->bSupportRemoteWakeUp = true;
        else
@@ -891,11 +879,11 @@ void  rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
        memset(pTxFwInfo, 0, sizeof(struct tx_fwinfo_8190pci));
        pTxFwInfo->TxHT = (cb_desc->data_rate & 0x80) ? 1 : 0;
        pTxFwInfo->TxRate = _rtl92e_rate_mgn_to_hw(cb_desc->data_rate);
-       pTxFwInfo->EnableCPUDur = cb_desc->bTxEnableFwCalcDur;
+       pTxFwInfo->EnableCPUDur = cb_desc->tx_enable_fw_calc_dur;
        pTxFwInfo->Short = _rtl92e_query_is_short(pTxFwInfo->TxHT,
                                                  pTxFwInfo->TxRate, cb_desc);
 
-       if (cb_desc->bAMPDUEnable) {
+       if (cb_desc->ampdu_enable) {
                pTxFwInfo->AllowAggregation = 1;
                pTxFwInfo->RxMF = cb_desc->ampdu_factor;
                pTxFwInfo->RxAMD = cb_desc->ampdu_density;
@@ -1685,8 +1673,6 @@ bool rtl92e_get_rx_stats(struct net_device *dev, struct rtllib_rx_stats *stats,
        stats->TimeStampLow = pDrvInfo->TSFL;
        stats->TimeStampHigh = rtl92e_readl(dev, TSFR + 4);
 
-       rtl92e_update_rx_pkt_timestamp(dev, stats);
-
        if ((stats->RxBufShift + stats->RxDrvInfoSize) > 0)
                stats->bShift = 1;
 
@@ -1707,12 +1693,12 @@ void rtl92e_stop_adapter(struct net_device *dev, bool reset)
 {
        struct r8192_priv *priv = rtllib_priv(dev);
        int i;
-       u8      OpMode;
+       u8      op_mode;
        u8      u1bTmp;
        u32     ulRegRead;
 
-       OpMode = RT_OP_MODE_NO_LINK;
-       priv->rtllib->SetHwRegHandler(dev, HW_VAR_MEDIA_STATUS, &OpMode);
+       op_mode = RT_OP_MODE_NO_LINK;
+       priv->rtllib->SetHwRegHandler(dev, HW_VAR_MEDIA_STATUS, &op_mode);
 
        if (!priv->rtllib->bSupportRemoteWakeUp) {
                u1bTmp = 0x0;
@@ -1742,7 +1728,7 @@ void rtl92e_stop_adapter(struct net_device *dev, bool reset)
        }
 
        for (i = 0; i < MAX_QUEUE_SIZE; i++)
-               skb_queue_purge(&priv->rtllib->skb_waitQ[i]);
+               skb_queue_purge(&priv->rtllib->skb_waitq[i]);
 
        skb_queue_purge(&priv->skb_queue);
 }
@@ -1767,20 +1753,17 @@ void rtl92e_update_ratr_table(struct net_device *dev)
                ratr_value &= 0x00000FF7;
                break;
        case WIRELESS_MODE_N_24G:
-               if (ieee->ht_info->peer_mimo_ps == 0)
-                       ratr_value &= 0x0007F007;
-               else
-                       ratr_value &= 0x000FF007;
+               ratr_value &= 0x000FF007;
                break;
        default:
                break;
        }
        ratr_value &= 0x0FFFFFFF;
        if (ieee->ht_info->cur_tx_bw40mhz &&
-           ieee->ht_info->bCurShortGI40MHz)
+           ieee->ht_info->cur_short_gi_40mhz)
                ratr_value |= 0x80000000;
        else if (!ieee->ht_info->cur_tx_bw40mhz &&
-                 ieee->ht_info->bCurShortGI20MHz)
+                 ieee->ht_info->cur_short_gi_20mhz)
                ratr_value |= 0x80000000;
        rtl92e_writel(dev, RATR0 + rate_index * 4, ratr_value);
        rtl92e_writeb(dev, UFWP, 1);
@@ -1814,7 +1797,7 @@ rtl92e_init_variables(struct net_device  *dev)
                            IMR_MGNTDOK | IMR_COMDOK | IMR_HIGHDOK |
                            IMR_BDOK | IMR_RXCMDOK | IMR_TIMEOUT0 |
                            IMR_RDU | IMR_RXFOVW | IMR_TXFOVW |
-                           IMR_BcnInt | IMR_TBDOK | IMR_TBDER);
+                           IMR_TBDOK | IMR_TBDER);
 
        priv->bfirst_after_down = false;
 }
index 4d12d73850415b9f685f821e4d876b8233226ba3..e1bd4d67e862b6056fe4e0dae2dde55d6e4b319d 100644 (file)
@@ -671,16 +671,16 @@ static void _rtl92e_phy_switch_channel_work_item(struct net_device *dev)
        _rtl92e_phy_switch_channel(dev, priv->chan);
 }
 
-u8 rtl92e_set_channel(struct net_device *dev, u8 channel)
+void rtl92e_set_channel(struct net_device *dev, u8 channel)
 {
        struct r8192_priv *priv = rtllib_priv(dev);
 
        if (!priv->up) {
                netdev_err(dev, "%s(): Driver is not initialized\n", __func__);
-               return false;
+               return;
        }
        if (priv->sw_chnl_in_progress)
-               return false;
+               return;
 
        switch (priv->rtllib->mode) {
        case WIRELESS_MODE_B:
@@ -688,7 +688,7 @@ u8 rtl92e_set_channel(struct net_device *dev, u8 channel)
                        netdev_warn(dev,
                                    "Channel %d not available in 802.11b.\n",
                                    channel);
-                       return false;
+                       return;
                }
                break;
        case WIRELESS_MODE_G:
@@ -697,7 +697,7 @@ u8 rtl92e_set_channel(struct net_device *dev, u8 channel)
                        netdev_warn(dev,
                                    "Channel %d not available in 802.11g.\n",
                                    channel);
-                       return false;
+                       return;
                }
                break;
        }
@@ -714,7 +714,7 @@ u8 rtl92e_set_channel(struct net_device *dev, u8 channel)
        if (priv->up)
                _rtl92e_phy_switch_channel_work_item(dev);
        priv->sw_chnl_in_progress = false;
-       return true;
+       return;
 }
 
 static void _rtl92e_cck_tx_power_track_bw_switch_tssi(struct net_device *dev)
index 6c4c33ded6a94cc2e9ac1feb581ddc62b2143fe3..ff4b4004b0d0051b3fe6efaded18837ef9d7f76d 100644 (file)
@@ -41,7 +41,7 @@ void rtl92e_get_tx_power(struct net_device *dev);
 void rtl92e_set_tx_power(struct net_device *dev, u8 channel);
 u8 rtl92e_config_rf_path(struct net_device *dev, enum rf90_radio_path eRFPath);
 
-u8 rtl92e_set_channel(struct net_device *dev, u8 channel);
+void rtl92e_set_channel(struct net_device *dev, u8 channel);
 void rtl92e_set_bw_mode(struct net_device *dev,
                        enum ht_channel_width bandwidth,
                        enum ht_extchnl_offset Offset);
index 995daab906c93186d820cac5108621de01bac312..6815d18a7919e4c8aae9c9ae9826621c75ef0401 100644 (file)
@@ -226,16 +226,6 @@ static void _rtl92e_tx_timeout(struct net_device *dev, unsigned int txqueue)
        netdev_info(dev, "TXTIMEOUT");
 }
 
-static void _rtl92e_set_chan(struct net_device *dev, short ch)
-{
-       struct r8192_priv *priv = rtllib_priv(dev);
-
-       priv->chan = ch;
-
-       if (priv->rf_set_chan)
-               priv->rf_set_chan(dev, priv->chan);
-}
-
 static void _rtl92e_update_cap(struct net_device *dev, u16 cap)
 {
        struct r8192_priv *priv = rtllib_priv(dev);
@@ -297,7 +287,6 @@ static void _rtl92e_update_beacon(void *data)
        if (ieee->ht_info->current_ht_support)
                HT_update_self_and_peer_setting(ieee, net);
        ieee->ht_info->current_rt2rt_long_slot_time = net->bssht.bd_rt2rt_long_slot_time;
-       ieee->ht_info->RT2RT_HT_Mode = net->bssht.rt2rt_ht_mode;
        _rtl92e_update_cap(dev, net->capability);
 }
 
@@ -426,38 +415,6 @@ static int _rtl92e_handle_assoc_response(struct net_device *dev,
        return 0;
 }
 
-static void _rtl92e_prepare_beacon(struct tasklet_struct *t)
-{
-       struct r8192_priv *priv = from_tasklet(priv, t,
-                                              irq_prepare_beacon_tasklet);
-       struct net_device *dev = priv->rtllib->dev;
-       struct sk_buff *pskb = NULL, *pnewskb = NULL;
-       struct cb_desc *tcb_desc = NULL;
-       struct rtl8192_tx_ring *ring = NULL;
-       struct tx_desc *pdesc = NULL;
-
-       ring = &priv->tx_ring[BEACON_QUEUE];
-       pskb = __skb_dequeue(&ring->queue);
-       kfree_skb(pskb);
-
-       pnewskb = rtllib_get_beacon(priv->rtllib);
-       if (!pnewskb)
-               return;
-
-       tcb_desc = (struct cb_desc *)(pnewskb->cb + 8);
-       tcb_desc->queue_index = BEACON_QUEUE;
-       tcb_desc->data_rate = 2;
-       tcb_desc->ratr_index = 7;
-       tcb_desc->tx_dis_rate_fallback = 1;
-       tcb_desc->tx_use_drv_assinged_rate = 1;
-       skb_push(pnewskb, priv->rtllib->tx_headroom);
-
-       pdesc = &ring->desc[0];
-       rtl92e_fill_tx_desc(dev, pdesc, tcb_desc, pnewskb);
-       __skb_queue_tail(&ring->queue, pnewskb);
-       pdesc->OWN = 1;
-}
-
 void rtl92e_config_rate(struct net_device *dev, u16 *rate_config)
 {
        struct r8192_priv *priv = rtllib_priv(dev);
@@ -685,7 +642,7 @@ static void _rtl92e_init_priv_handler(struct net_device *dev)
        struct r8192_priv *priv = rtllib_priv(dev);
 
        priv->rtllib->softmac_hard_start_xmit   = _rtl92e_hard_start_xmit;
-       priv->rtllib->set_chan                  = _rtl92e_set_chan;
+       priv->rtllib->set_chan                  = rtl92e_set_channel;
        priv->rtllib->link_change               = rtl92e_link_change;
        priv->rtllib->softmac_data_hard_start_xmit = _rtl92e_hard_data_xmit;
        priv->rtllib->check_nic_enough_desc     = _rtl92e_check_nic_enough_desc;
@@ -694,7 +651,6 @@ static void _rtl92e_init_priv_handler(struct net_device *dev)
        priv->rtllib->set_wireless_mode         = rtl92e_set_wireless_mode;
        priv->rtllib->leisure_ps_leave          = rtl92e_leisure_ps_leave;
        priv->rtllib->set_bw_mode_handler       = rtl92e_set_bw_mode;
-       priv->rf_set_chan                       = rtl92e_set_channel;
 
        priv->rtllib->sta_wake_up = rtl92e_hw_wakeup;
        priv->rtllib->enter_sleep_state = rtl92e_enter_sleep;
@@ -767,7 +723,7 @@ static void _rtl92e_init_priv_variable(struct net_device *dev)
        skb_queue_head_init(&priv->skb_queue);
 
        for (i = 0; i < MAX_QUEUE_SIZE; i++)
-               skb_queue_head_init(&priv->rtllib->skb_waitQ[i]);
+               skb_queue_head_init(&priv->rtllib->skb_waitq[i]);
 }
 
 static void _rtl92e_init_priv_lock(struct r8192_priv *priv)
@@ -796,8 +752,6 @@ static void _rtl92e_init_priv_task(struct net_device *dev)
        INIT_DELAYED_WORK(&priv->rtllib->hw_sleep_wq, (void *)rtl92e_hw_sleep_wq);
        tasklet_setup(&priv->irq_rx_tasklet, _rtl92e_irq_rx_tasklet);
        tasklet_setup(&priv->irq_tx_tasklet, _rtl92e_irq_tx_tasklet);
-       tasklet_setup(&priv->irq_prepare_beacon_tasklet,
-                     _rtl92e_prepare_beacon);
 }
 
 static short _rtl92e_get_channel_map(struct net_device *dev)
@@ -806,13 +760,6 @@ static short _rtl92e_get_channel_map(struct net_device *dev)
 
        struct r8192_priv *priv = rtllib_priv(dev);
 
-       if (priv->chnl_plan >= COUNTRY_CODE_MAX) {
-               netdev_info(dev,
-                           "rtl819x_init:Error channel plan! Set to default.\n");
-               priv->chnl_plan = COUNTRY_CODE_FCC;
-       }
-       dot11d_init(priv->rtllib);
-       dot11d_channel_map(priv->chnl_plan, priv->rtllib);
        for (i = 1; i <= 11; i++)
                (priv->rtllib->active_channel_map)[i] = 1;
        (priv->rtllib->active_channel_map)[12] = 2;
@@ -1024,21 +971,21 @@ static void _rtl92e_watchdog_wq_cb(void *data)
                }
        }
        if ((ieee->link_state == MAC80211_LINKED) && (ieee->iw_mode == IW_MODE_INFRA)) {
-               if (ieee->link_detect_info.NumRxOkInPeriod > 100 ||
-               ieee->link_detect_info.NumTxOkInPeriod > 100)
+               if (ieee->link_detect_info.num_rx_ok_in_period > 100 ||
+               ieee->link_detect_info.num_tx_ok_in_period > 100)
                        bBusyTraffic = true;
 
-               if (ieee->link_detect_info.NumRxOkInPeriod > 4000 ||
-                   ieee->link_detect_info.NumTxOkInPeriod > 4000) {
+               if (ieee->link_detect_info.num_rx_ok_in_period > 4000 ||
+                   ieee->link_detect_info.num_tx_ok_in_period > 4000) {
                        bHigherBusyTraffic = true;
-                       if (ieee->link_detect_info.NumRxOkInPeriod > 5000)
+                       if (ieee->link_detect_info.num_rx_ok_in_period > 5000)
                                bHigherBusyRxTraffic = true;
                        else
                                bHigherBusyRxTraffic = false;
                }
 
                if (((ieee->link_detect_info.NumRxUnicastOkInPeriod +
-                   ieee->link_detect_info.NumTxOkInPeriod) > 8) ||
+                   ieee->link_detect_info.num_tx_ok_in_period) > 8) ||
                    (ieee->link_detect_info.NumRxUnicastOkInPeriod > 2))
                        bEnterPS = false;
                else
@@ -1056,8 +1003,8 @@ static void _rtl92e_watchdog_wq_cb(void *data)
                rtl92e_leisure_ps_leave(dev);
        }
 
-       ieee->link_detect_info.NumRxOkInPeriod = 0;
-       ieee->link_detect_info.NumTxOkInPeriod = 0;
+       ieee->link_detect_info.num_rx_ok_in_period = 0;
+       ieee->link_detect_info.num_tx_ok_in_period = 0;
        ieee->link_detect_info.NumRxUnicastOkInPeriod = 0;
        ieee->link_detect_info.bBusyTraffic = bBusyTraffic;
 
@@ -1240,7 +1187,7 @@ static int _rtl92e_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
        tcb_desc->ratr_index = 7;
        tcb_desc->tx_dis_rate_fallback = 1;
        tcb_desc->tx_use_drv_assinged_rate = 1;
-       tcb_desc->bTxEnableFwCalcDur = 1;
+       tcb_desc->tx_enable_fw_calc_dur = 1;
        skb_push(skb, priv->rtllib->tx_headroom);
        ret = _rtl92e_tx(dev, skb);
        if (ret != 0)
@@ -1484,17 +1431,6 @@ void rtl92e_reset_desc_ring(struct net_device *dev)
        spin_unlock_irqrestore(&priv->irq_th_lock, flags);
 }
 
-void rtl92e_update_rx_pkt_timestamp(struct net_device *dev,
-                                   struct rtllib_rx_stats *stats)
-{
-       struct r8192_priv *priv = rtllib_priv(dev);
-
-       if (stats->bIsAMPDU && !stats->bFirstMPDU)
-               stats->mac_time = priv->last_rx_desc_tsf;
-       else
-               priv->last_rx_desc_tsf = stats->mac_time;
-}
-
 long rtl92e_translate_to_dbm(struct r8192_priv *priv, u8 signal_strength_index)
 {
        long    signal_power;
@@ -1638,9 +1574,9 @@ static void _rtl92e_tx_resume(struct net_device *dev)
 
        for (queue_index = BK_QUEUE;
             queue_index < MAX_QUEUE_SIZE; queue_index++) {
-               while ((!skb_queue_empty(&ieee->skb_waitQ[queue_index])) &&
+               while ((!skb_queue_empty(&ieee->skb_waitq[queue_index])) &&
                (priv->rtllib->check_nic_enough_desc(dev, queue_index) > 0)) {
-                       skb = skb_dequeue(&ieee->skb_waitQ[queue_index]);
+                       skb = skb_dequeue(&ieee->skb_waitq[queue_index]);
                        ieee->softmac_data_hard_start_xmit(skb, dev, 0);
                }
        }
@@ -1827,9 +1763,6 @@ static irqreturn_t _rtl92e_irq(int irq, void *netdev)
        if (inta & IMR_ROK)
                tasklet_schedule(&priv->irq_rx_tasklet);
 
-       if (inta & IMR_BcnInt)
-               tasklet_schedule(&priv->irq_prepare_beacon_tasklet);
-
        if (inta & IMR_RDU) {
                rtl92e_writel(dev, INTA_MASK,
                              rtl92e_readl(dev, INTA_MASK) & ~IMR_RDU);
@@ -1840,22 +1773,22 @@ static irqreturn_t _rtl92e_irq(int irq, void *netdev)
                tasklet_schedule(&priv->irq_rx_tasklet);
 
        if (inta & IMR_BKDOK) {
-               priv->rtllib->link_detect_info.NumTxOkInPeriod++;
+               priv->rtllib->link_detect_info.num_tx_ok_in_period++;
                _rtl92e_tx_isr(dev, BK_QUEUE);
        }
 
        if (inta & IMR_BEDOK) {
-               priv->rtllib->link_detect_info.NumTxOkInPeriod++;
+               priv->rtllib->link_detect_info.num_tx_ok_in_period++;
                _rtl92e_tx_isr(dev, BE_QUEUE);
        }
 
        if (inta & IMR_VIDOK) {
-               priv->rtllib->link_detect_info.NumTxOkInPeriod++;
+               priv->rtllib->link_detect_info.num_tx_ok_in_period++;
                _rtl92e_tx_isr(dev, VI_QUEUE);
        }
 
        if (inta & IMR_VODOK) {
-               priv->rtllib->link_detect_info.NumTxOkInPeriod++;
+               priv->rtllib->link_detect_info.num_tx_ok_in_period++;
                _rtl92e_tx_isr(dev, VO_QUEUE);
        }
 
index a4afbf3e934d5ae6162dbfd4b66e3aa6711d78d6..1d6d31292f4116ccf91bca461ad91b9987913952 100644 (file)
@@ -34,8 +34,6 @@
 
 #include "../rtllib.h"
 
-#include "../dot11d.h"
-
 #include "r8192E_firmware.h"
 #include "r8192E_hw.h"
 
@@ -219,7 +217,6 @@ struct r8192_priv {
 
        struct tasklet_struct           irq_rx_tasklet;
        struct tasklet_struct           irq_tx_tasklet;
-       struct tasklet_struct           irq_prepare_beacon_tasklet;
 
        struct mutex                            wx_mutex;
        struct mutex                            rf_mutex;
@@ -228,8 +225,6 @@ struct r8192_priv {
        struct rt_stats stats;
        struct iw_statistics                    wstats;
 
-       u8 (*rf_set_chan)(struct net_device *dev, u8 ch);
-
        struct rx_desc *rx_ring;
        struct sk_buff  *rx_buf[MAX_RX_COUNT];
        dma_addr_t      rx_ring_dma;
@@ -237,8 +232,6 @@ struct r8192_priv {
        int             rxringcount;
        u16             rxbuffersize;
 
-       u64 last_rx_desc_tsf;
-
        u32 receive_config;
        u8              retry_data;
        u8              retry_rts;
@@ -286,7 +279,6 @@ struct r8192_priv {
        u16 eeprom_vid;
        u16 eeprom_did;
        u8 eeprom_customer_id;
-       u16 eeprom_chnl_plan;
 
        u8 eeprom_tx_pwr_level_cck[14];
        u8 eeprom_tx_pwr_level_ofdm24g[14];
@@ -312,7 +304,6 @@ struct r8192_priv {
 
        bool tx_pwr_data_read_from_eeprom;
 
-       u16 chnl_plan;
        u8 hw_rf_off_action;
 
        bool rf_change_in_progress;
@@ -396,8 +387,6 @@ void rtl92e_irq_enable(struct net_device *dev);
 void rtl92e_config_rate(struct net_device *dev, u16 *rate_config);
 void rtl92e_irq_disable(struct net_device *dev);
 
-void rtl92e_update_rx_pkt_timestamp(struct net_device *dev,
-                                   struct rtllib_rx_stats *stats);
 long rtl92e_translate_to_dbm(struct r8192_priv *priv, u8 signal_strength_index);
 void rtl92e_update_rx_statistics(struct r8192_priv *priv,
                                 struct rtllib_rx_stats *pprevious_stats);
index 330dafd62656a02173b783b286ceee33e3cdf1de..92143c50c149a7e3e77945e7712a06f3f2791351 100644 (file)
@@ -287,9 +287,9 @@ static void _rtl92e_dm_check_rate_adaptive(struct net_device *dev)
 
        if (priv->rtllib->link_state == MAC80211_LINKED) {
                bshort_gi_enabled = (ht_info->cur_tx_bw40mhz &&
-                                    ht_info->bCurShortGI40MHz) ||
+                                    ht_info->cur_short_gi_40mhz) ||
                                    (!ht_info->cur_tx_bw40mhz &&
-                                    ht_info->bCurShortGI20MHz);
+                                    ht_info->cur_short_gi_20mhz);
 
                pra->upper_rssi_threshold_ratr =
                                (pra->upper_rssi_threshold_ratr & (~BIT(31))) |
@@ -1142,7 +1142,7 @@ static void _rtl92e_dm_check_edca_turbo(struct net_device *dev)
                                if (priv->bis_cur_rdlstate ||
                                    !priv->bcurrent_turbo_EDCA) {
                                        rtl92e_writel(dev, EDCAPARA_BE,
-                                                     edca_setting_UL[ht_info->IOTPeer]);
+                                                     edca_setting_UL[ht_info->iot_peer]);
                                        priv->bis_cur_rdlstate = false;
                                }
                        } else {
@@ -1150,10 +1150,10 @@ static void _rtl92e_dm_check_edca_turbo(struct net_device *dev)
                                    !priv->bcurrent_turbo_EDCA) {
                                        if (priv->rtllib->mode == WIRELESS_MODE_G)
                                                rtl92e_writel(dev, EDCAPARA_BE,
-                                                             edca_setting_DL_GMode[ht_info->IOTPeer]);
+                                                             edca_setting_DL_GMode[ht_info->iot_peer]);
                                        else
                                                rtl92e_writel(dev, EDCAPARA_BE,
-                                                             edca_setting_DL[ht_info->IOTPeer]);
+                                                             edca_setting_DL[ht_info->iot_peer]);
                                        priv->bis_cur_rdlstate = true;
                                }
                        }
@@ -1164,17 +1164,17 @@ static void _rtl92e_dm_check_edca_turbo(struct net_device *dev)
                                    !priv->bcurrent_turbo_EDCA) {
                                        if (priv->rtllib->mode == WIRELESS_MODE_G)
                                                rtl92e_writel(dev, EDCAPARA_BE,
-                                                             edca_setting_DL_GMode[ht_info->IOTPeer]);
+                                                             edca_setting_DL_GMode[ht_info->iot_peer]);
                                        else
                                                rtl92e_writel(dev, EDCAPARA_BE,
-                                                             edca_setting_DL[ht_info->IOTPeer]);
+                                                             edca_setting_DL[ht_info->iot_peer]);
                                        priv->bis_cur_rdlstate = true;
                                }
                        } else {
                                if (priv->bis_cur_rdlstate ||
                                    !priv->bcurrent_turbo_EDCA) {
                                        rtl92e_writel(dev, EDCAPARA_BE,
-                                                     edca_setting_UL[ht_info->IOTPeer]);
+                                                     edca_setting_UL[ht_info->iot_peer]);
                                        priv->bis_cur_rdlstate = false;
                                }
                        }
@@ -1217,7 +1217,7 @@ static void _rtl92e_dm_cts_to_self(struct net_device *dev)
                ht_info->iot_action &= ~HT_IOT_ACT_FORCED_CTS2SELF;
                return;
        }
-       if (ht_info->IOTPeer == HT_IOT_PEER_BROADCOM) {
+       if (ht_info->iot_peer == HT_IOT_PEER_BROADCOM) {
                curTxOkCnt = priv->stats.txbytesunicast - lastTxOkCnt;
                curRxOkCnt = priv->stats.rxbytesunicast - lastRxOkCnt;
                if (curRxOkCnt > 4 * curTxOkCnt)
@@ -1713,7 +1713,7 @@ static void _rtl92e_dm_check_fsync(struct net_device *dev)
        static u8 reg_c38_State = RegC38_Default;
 
        if (priv->rtllib->link_state == MAC80211_LINKED &&
-           priv->rtllib->ht_info->IOTPeer == HT_IOT_PEER_BROADCOM) {
+           priv->rtllib->ht_info->iot_peer == HT_IOT_PEER_BROADCOM) {
                if (priv->rtllib->bfsync_enable == 0) {
                        switch (priv->rtllib->fsync_state) {
                        case Default_Fsync:
@@ -1819,7 +1819,7 @@ static void _rtl92e_dm_dynamic_tx_power(struct net_device *dev)
                priv->dynamic_tx_low_pwr = false;
                return;
        }
-       if ((priv->rtllib->ht_info->IOTPeer == HT_IOT_PEER_ATHEROS) &&
+       if ((priv->rtllib->ht_info->iot_peer == HT_IOT_PEER_ATHEROS) &&
            (priv->rtllib->mode == WIRELESS_MODE_G)) {
                txhipower_threshold = TX_POWER_ATHEROAP_THRESH_HIGH;
                txlowpower_threshold = TX_POWER_ATHEROAP_THRESH_LOW;
index 4371ab1239ee2b582e2c9e6b8cf4d9f0dde3602d..4c884c5277f94be222aef10faa2e0305d15bf2ad 100644 (file)
@@ -158,28 +158,6 @@ static int _rtl92e_wx_set_mode(struct net_device *dev,
        return ret;
 }
 
-struct  iw_range_with_scan_capa {
-       /* Informative stuff (to choose between different interface) */
-       __u32      throughput;     /* To give an idea... */
-       /* In theory this value should be the maximum benchmarked
-        * TCP/IP throughput, because with most of these devices the
-        * bit rate is meaningless (overhead an co) to estimate how
-        * fast the connection will go and pick the fastest one.
-        * I suggest people to play with Netperf or any benchmark...
-        */
-
-       /* NWID (or domain id) */
-       __u32      min_nwid;    /* Minimal NWID we are able to set */
-       __u32      max_nwid;    /* Maximal NWID we are able to set */
-
-       /* Old Frequency (backward compat - moved lower ) */
-       __u16      old_num_channels;
-       __u8        old_num_frequency;
-
-       /* Scan capabilities */
-       __u8        scan_capa;
-};
-
 static int _rtl92e_wx_get_range(struct net_device *dev,
                                struct iw_request_info *info,
                                union iwreq_data *wrqu, char *extra)
index 4af8055d24895b7642c28f23b744dff7815a50f8..ee9ce392155c310cc4c1a7c5e461768e3a7979b9 100644 (file)
@@ -23,35 +23,35 @@ static void deactivate_ba_entry(struct rtllib_device *ieee, struct ba_record *ba
        del_timer_sync(&ba->timer);
 }
 
-static u8 tx_ts_delete_ba(struct rtllib_device *ieee, struct tx_ts_record *pTxTs)
+static u8 tx_ts_delete_ba(struct rtllib_device *ieee, struct tx_ts_record *ts)
 {
-       struct ba_record *admitted_ba = &pTxTs->TxAdmittedBARecord;
-       struct ba_record *pending_ba = &pTxTs->TxPendingBARecord;
-       u8 bSendDELBA = false;
+       struct ba_record *admitted_ba = &ts->tx_admitted_ba_record;
+       struct ba_record *pending_ba = &ts->tx_pending_ba_record;
+       u8 send_del_ba = false;
 
        if (pending_ba->b_valid) {
                deactivate_ba_entry(ieee, pending_ba);
-               bSendDELBA = true;
+               send_del_ba = true;
        }
 
        if (admitted_ba->b_valid) {
                deactivate_ba_entry(ieee, admitted_ba);
-               bSendDELBA = true;
+               send_del_ba = true;
        }
-       return bSendDELBA;
+       return send_del_ba;
 }
 
 static u8 rx_ts_delete_ba(struct rtllib_device *ieee, struct rx_ts_record *ts)
 {
        struct ba_record *ba = &ts->rx_admitted_ba_record;
-       u8                      bSendDELBA = false;
+       u8                      send_del_ba = false;
 
        if (ba->b_valid) {
                deactivate_ba_entry(ieee, ba);
-               bSendDELBA = true;
+               send_del_ba = true;
        }
 
-       return bSendDELBA;
+       return send_del_ba;
 }
 
 void rtllib_reset_ba_entry(struct ba_record *ba)
@@ -68,7 +68,7 @@ static struct sk_buff *rtllib_ADDBA(struct rtllib_device *ieee, u8 *dst,
                                    u16 status_code, u8 type)
 {
        struct sk_buff *skb = NULL;
-       struct ieee80211_hdr_3addr *BAReq = NULL;
+       struct ieee80211_hdr_3addr *ba_req = NULL;
        u8 *tag = NULL;
        u16 len = ieee->tx_headroom + 9;
 
@@ -87,13 +87,13 @@ static struct sk_buff *rtllib_ADDBA(struct rtllib_device *ieee, u8 *dst,
 
        skb_reserve(skb, ieee->tx_headroom);
 
-       BAReq = skb_put(skb, sizeof(struct ieee80211_hdr_3addr));
+       ba_req = skb_put(skb, sizeof(struct ieee80211_hdr_3addr));
 
-       ether_addr_copy(BAReq->addr1, dst);
-       ether_addr_copy(BAReq->addr2, ieee->dev->dev_addr);
+       ether_addr_copy(ba_req->addr1, dst);
+       ether_addr_copy(ba_req->addr2, ieee->dev->dev_addr);
 
-       ether_addr_copy(BAReq->addr3, ieee->current_network.bssid);
-       BAReq->frame_control = cpu_to_le16(IEEE80211_STYPE_ACTION);
+       ether_addr_copy(ba_req->addr3, ieee->current_network.bssid);
+       ba_req->frame_control = cpu_to_le16(IEEE80211_STYPE_ACTION);
 
        tag = skb_put(skb, 9);
        *tag++ = ACT_CAT_BA;
@@ -127,9 +127,9 @@ static struct sk_buff *rtllib_DELBA(struct rtllib_device *ieee, u8 *dst,
                                    struct ba_record *ba,
                                    enum tr_select TxRxSelect, u16 reason_code)
 {
-       union delba_param_set DelbaParamSet;
+       union delba_param_set del_ba_param_set;
        struct sk_buff *skb = NULL;
-       struct ieee80211_hdr_3addr *Delba = NULL;
+       struct ieee80211_hdr_3addr *del_ba = NULL;
        u8 *tag = NULL;
        u16 len = 6 + ieee->tx_headroom;
 
@@ -137,10 +137,10 @@ static struct sk_buff *rtllib_DELBA(struct rtllib_device *ieee, u8 *dst,
                netdev_dbg(ieee->dev, "%s(): reason_code(%d) sentd to: %pM\n",
                           __func__, reason_code, dst);
 
-       memset(&DelbaParamSet, 0, 2);
+       memset(&del_ba_param_set, 0, 2);
 
-       DelbaParamSet.field.initiator = (TxRxSelect == TX_DIR) ? 1 : 0;
-       DelbaParamSet.field.tid = ba->ba_param_set.field.tid;
+       del_ba_param_set.field.initiator = (TxRxSelect == TX_DIR) ? 1 : 0;
+       del_ba_param_set.field.tid      = ba->ba_param_set.field.tid;
 
        skb = dev_alloc_skb(len + sizeof(struct ieee80211_hdr_3addr));
        if (!skb)
@@ -148,19 +148,19 @@ static struct sk_buff *rtllib_DELBA(struct rtllib_device *ieee, u8 *dst,
 
        skb_reserve(skb, ieee->tx_headroom);
 
-       Delba = skb_put(skb, sizeof(struct ieee80211_hdr_3addr));
+       del_ba = skb_put(skb, sizeof(struct ieee80211_hdr_3addr));
 
-       ether_addr_copy(Delba->addr1, dst);
-       ether_addr_copy(Delba->addr2, ieee->dev->dev_addr);
-       ether_addr_copy(Delba->addr3, ieee->current_network.bssid);
-       Delba->frame_control = cpu_to_le16(IEEE80211_STYPE_ACTION);
+       ether_addr_copy(del_ba->addr1, dst);
+       ether_addr_copy(del_ba->addr2, ieee->dev->dev_addr);
+       ether_addr_copy(del_ba->addr3, ieee->current_network.bssid);
+       del_ba->frame_control = cpu_to_le16(IEEE80211_STYPE_ACTION);
 
        tag = skb_put(skb, 6);
 
        *tag++ = ACT_CAT_BA;
        *tag++ = ACT_DELBA;
 
-       put_unaligned_le16(DelbaParamSet.short_data, tag);
+       put_unaligned_le16(del_ba_param_set.short_data, tag);
        tag += 2;
 
        put_unaligned_le16(reason_code, tag);
@@ -215,11 +215,11 @@ int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb)
 {
        struct ieee80211_hdr_3addr *req = NULL;
        u16 rc = 0;
-       u8 *dst = NULL, *pDialogToken = NULL, *tag = NULL;
+       u8 *dst = NULL, *dialog_token = NULL, *tag = NULL;
        struct ba_record *ba = NULL;
-       union ba_param_set *pBaParamSet = NULL;
-       u16 *pBaTimeoutVal = NULL;
-       union sequence_control *pBaStartSeqCtrl = NULL;
+       union ba_param_set *ba_param_set = NULL;
+       u16 *ba_timeout_value = NULL;
+       union sequence_control *ba_start_seq_ctrl = NULL;
        struct rx_ts_record *ts = NULL;
 
        if (skb->len < sizeof(struct ieee80211_hdr_3addr) + 9) {
@@ -238,10 +238,10 @@ int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb)
        tag = (u8 *)req;
        dst = (u8 *)(&req->addr2[0]);
        tag += sizeof(struct ieee80211_hdr_3addr);
-       pDialogToken = tag + 2;
-       pBaParamSet = (union ba_param_set *)(tag + 3);
-       pBaTimeoutVal = (u16 *)(tag + 5);
-       pBaStartSeqCtrl = (union sequence_control *)(req + 7);
+       dialog_token = tag + 2;
+       ba_param_set = (union ba_param_set *)(tag + 3);
+       ba_timeout_value = (u16 *)(tag + 5);
+       ba_start_seq_ctrl = (union sequence_control *)(req + 7);
 
        if (!ieee->current_network.qos_data.active ||
            !ieee->ht_info->current_ht_support ||
@@ -254,14 +254,14 @@ int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb)
                goto OnADDBAReq_Fail;
        }
        if (!rtllib_get_ts(ieee, (struct ts_common_info **)&ts, dst,
-                  (u8)(pBaParamSet->field.tid), RX_DIR, true)) {
+                  (u8)(ba_param_set->field.tid), RX_DIR, true)) {
                rc = ADDBA_STATUS_REFUSED;
                netdev_warn(ieee->dev, "%s(): can't get TS\n", __func__);
                goto OnADDBAReq_Fail;
        }
        ba = &ts->rx_admitted_ba_record;
 
-       if (pBaParamSet->field.ba_policy == BA_POLICY_DELAYED) {
+       if (ba_param_set->field.ba_policy == BA_POLICY_DELAYED) {
                rc = ADDBA_STATUS_INVALID_PARAM;
                netdev_warn(ieee->dev, "%s(): BA Policy is not correct\n",
                            __func__);
@@ -271,10 +271,10 @@ int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb)
        rtllib_FlushRxTsPendingPkts(ieee, ts);
 
        deactivate_ba_entry(ieee, ba);
-       ba->dialog_token = *pDialogToken;
-       ba->ba_param_set = *pBaParamSet;
-       ba->ba_timeout_value = *pBaTimeoutVal;
-       ba->ba_start_seq_ctrl = *pBaStartSeqCtrl;
+       ba->dialog_token = *dialog_token;
+       ba->ba_param_set = *ba_param_set;
+       ba->ba_timeout_value = *ba_timeout_value;
+       ba->ba_start_seq_ctrl = *ba_start_seq_ctrl;
 
        if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev) ||
           (ieee->ht_info->iot_action & HT_IOT_ACT_ALLOW_PEER_AGG_ONE_PKT))
@@ -291,9 +291,9 @@ OnADDBAReq_Fail:
        {
                struct ba_record BA;
 
-               BA.ba_param_set = *pBaParamSet;
-               BA.ba_timeout_value = *pBaTimeoutVal;
-               BA.dialog_token = *pDialogToken;
+               BA.ba_param_set = *ba_param_set;
+               BA.ba_timeout_value = *ba_timeout_value;
+               BA.dialog_token = *dialog_token;
                BA.ba_param_set.field.ba_policy = BA_POLICY_IMMEDIATE;
                rtllib_send_ADDBARsp(ieee, dst, &BA, rc);
                return 0;
@@ -303,11 +303,11 @@ OnADDBAReq_Fail:
 int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb)
 {
        struct ieee80211_hdr_3addr *rsp = NULL;
-       struct ba_record *pending_ba, *pAdmittedBA;
+       struct ba_record *pending_ba, *admitted_ba;
        struct tx_ts_record *ts = NULL;
-       u8 *dst = NULL, *pDialogToken = NULL, *tag = NULL;
-       u16 *status_code = NULL, *pBaTimeoutVal = NULL;
-       union ba_param_set *pBaParamSet = NULL;
+       u8 *dst = NULL, *dialog_token = NULL, *tag = NULL;
+       u16 *status_code = NULL, *ba_timeout_value = NULL;
+       union ba_param_set *ba_param_set = NULL;
        u16                     reason_code;
 
        if (skb->len < sizeof(struct ieee80211_hdr_3addr) + 9) {
@@ -320,40 +320,40 @@ int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb)
        tag = (u8 *)rsp;
        dst = (u8 *)(&rsp->addr2[0]);
        tag += sizeof(struct ieee80211_hdr_3addr);
-       pDialogToken = tag + 2;
+       dialog_token = tag + 2;
        status_code = (u16 *)(tag + 3);
-       pBaParamSet = (union ba_param_set *)(tag + 5);
-       pBaTimeoutVal = (u16 *)(tag + 7);
+       ba_param_set = (union ba_param_set *)(tag + 5);
+       ba_timeout_value = (u16 *)(tag + 7);
 
        if (!ieee->current_network.qos_data.active ||
            !ieee->ht_info->current_ht_support ||
-           !ieee->ht_info->bCurrentAMPDUEnable) {
+           !ieee->ht_info->current_ampdu_enable) {
                netdev_warn(ieee->dev,
                            "reject to ADDBA_RSP as some capability is not ready(%d, %d, %d)\n",
                            ieee->current_network.qos_data.active,
                            ieee->ht_info->current_ht_support,
-                           ieee->ht_info->bCurrentAMPDUEnable);
+                           ieee->ht_info->current_ampdu_enable);
                reason_code = DELBA_REASON_UNKNOWN_BA;
                goto OnADDBARsp_Reject;
        }
 
        if (!rtllib_get_ts(ieee, (struct ts_common_info **)&ts, dst,
-                  (u8)(pBaParamSet->field.tid), TX_DIR, false)) {
+                  (u8)(ba_param_set->field.tid), TX_DIR, false)) {
                netdev_warn(ieee->dev, "%s(): can't get TS\n", __func__);
                reason_code = DELBA_REASON_UNKNOWN_BA;
                goto OnADDBARsp_Reject;
        }
 
-       ts->bAddBaReqInProgress = false;
-       pending_ba = &ts->TxPendingBARecord;
-       pAdmittedBA = &ts->TxAdmittedBARecord;
+       ts->add_ba_req_in_progress = false;
+       pending_ba = &ts->tx_pending_ba_record;
+       admitted_ba = &ts->tx_admitted_ba_record;
 
-       if (pAdmittedBA->b_valid) {
+       if (admitted_ba->b_valid) {
                netdev_dbg(ieee->dev, "%s(): ADDBA response already admitted\n",
                           __func__);
                return -1;
        } else if (!pending_ba->b_valid ||
-                  (*pDialogToken != pending_ba->dialog_token)) {
+                  (*dialog_token != pending_ba->dialog_token)) {
                netdev_warn(ieee->dev,
                            "%s(): ADDBA Rsp. BA invalid, DELBA!\n",
                            __func__);
@@ -367,22 +367,22 @@ int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb)
        }
 
        if (*status_code == ADDBA_STATUS_SUCCESS) {
-               if (pBaParamSet->field.ba_policy == BA_POLICY_DELAYED) {
-                       ts->bAddBaReqDelayed = true;
-                       deactivate_ba_entry(ieee, pAdmittedBA);
+               if (ba_param_set->field.ba_policy == BA_POLICY_DELAYED) {
+                       ts->add_ba_req_delayed = true;
+                       deactivate_ba_entry(ieee, admitted_ba);
                        reason_code = DELBA_REASON_END_BA;
                        goto OnADDBARsp_Reject;
                }
 
-               pAdmittedBA->dialog_token = *pDialogToken;
-               pAdmittedBA->ba_timeout_value = *pBaTimeoutVal;
-               pAdmittedBA->ba_start_seq_ctrl = pending_ba->ba_start_seq_ctrl;
-               pAdmittedBA->ba_param_set = *pBaParamSet;
-               deactivate_ba_entry(ieee, pAdmittedBA);
-               activate_ba_entry(pAdmittedBA, *pBaTimeoutVal);
+               admitted_ba->dialog_token = *dialog_token;
+               admitted_ba->ba_timeout_value = *ba_timeout_value;
+               admitted_ba->ba_start_seq_ctrl = pending_ba->ba_start_seq_ctrl;
+               admitted_ba->ba_param_set = *ba_param_set;
+               deactivate_ba_entry(ieee, admitted_ba);
+               activate_ba_entry(admitted_ba, *ba_timeout_value);
        } else {
-               ts->bAddBaReqDelayed = true;
-               ts->bDisable_AddBa = true;
+               ts->add_ba_req_delayed = true;
+               ts->disable_add_ba = true;
                reason_code = DELBA_REASON_END_BA;
                goto OnADDBARsp_Reject;
        }
@@ -393,7 +393,7 @@ OnADDBARsp_Reject:
        {
                struct ba_record BA;
 
-               BA.ba_param_set = *pBaParamSet;
+               BA.ba_param_set = *ba_param_set;
                rtllib_send_DELBA(ieee, dst, &BA, TX_DIR, reason_code);
                return 0;
        }
@@ -402,7 +402,7 @@ OnADDBARsp_Reject:
 int rtllib_rx_DELBA(struct rtllib_device *ieee, struct sk_buff *skb)
 {
        struct ieee80211_hdr_3addr *delba = NULL;
-       union delba_param_set *pDelBaParamSet = NULL;
+       union delba_param_set *del_ba_param_set = NULL;
        u8 *dst = NULL;
 
        if (skb->len < sizeof(struct ieee80211_hdr_3addr) + 6) {
@@ -427,46 +427,46 @@ int rtllib_rx_DELBA(struct rtllib_device *ieee, struct sk_buff *skb)
 #endif
        delba = (struct ieee80211_hdr_3addr *)skb->data;
        dst = (u8 *)(&delba->addr2[0]);
-       pDelBaParamSet = (union delba_param_set *)&delba->seq_ctrl + 2;
+       del_ba_param_set = (union delba_param_set *)&delba->seq_ctrl + 2;
 
-       if (pDelBaParamSet->field.initiator == 1) {
+       if (del_ba_param_set->field.initiator == 1) {
                struct rx_ts_record *ts;
 
                if (!rtllib_get_ts(ieee, (struct ts_common_info **)&ts, dst,
-                          (u8)pDelBaParamSet->field.tid, RX_DIR, false)) {
+                          (u8)del_ba_param_set->field.tid, RX_DIR, false)) {
                        netdev_warn(ieee->dev,
                                    "%s(): can't get TS for RXTS. dst:%pM TID:%d\n",
                                    __func__, dst,
-                                   (u8)pDelBaParamSet->field.tid);
+                                   (u8)del_ba_param_set->field.tid);
                        return -1;
                }
 
                rx_ts_delete_ba(ieee, ts);
        } else {
-               struct tx_ts_record *pTxTs;
+               struct tx_ts_record *ts;
 
-               if (!rtllib_get_ts(ieee, (struct ts_common_info **)&pTxTs, dst,
-                          (u8)pDelBaParamSet->field.tid, TX_DIR, false)) {
+               if (!rtllib_get_ts(ieee, (struct ts_common_info **)&ts, dst,
+                          (u8)del_ba_param_set->field.tid, TX_DIR, false)) {
                        netdev_warn(ieee->dev, "%s(): can't get TS for TXTS\n",
                                    __func__);
                        return -1;
                }
 
-               pTxTs->bUsingBa = false;
-               pTxTs->bAddBaReqInProgress = false;
-               pTxTs->bAddBaReqDelayed = false;
-               del_timer_sync(&pTxTs->TsAddBaTimer);
-               tx_ts_delete_ba(ieee, pTxTs);
+               ts->using_ba = false;
+               ts->add_ba_req_in_progress = false;
+               ts->add_ba_req_delayed = false;
+               del_timer_sync(&ts->ts_add_ba_timer);
+               tx_ts_delete_ba(ieee, ts);
        }
        return 0;
 }
 
 void rtllib_ts_init_add_ba(struct rtllib_device *ieee, struct tx_ts_record *ts,
-                          u8 policy, u8        bOverwritePending)
+                          u8 policy, u8        overwrite_pending)
 {
-       struct ba_record *ba = &ts->TxPendingBARecord;
+       struct ba_record *ba = &ts->tx_pending_ba_record;
 
-       if (ba->b_valid && !bOverwritePending)
+       if (ba->b_valid && !overwrite_pending)
                return;
 
        deactivate_ba_entry(ieee, ba);
@@ -474,35 +474,35 @@ void rtllib_ts_init_add_ba(struct rtllib_device *ieee, struct tx_ts_record *ts,
        ba->dialog_token++;
        ba->ba_param_set.field.amsdu_support = 0;
        ba->ba_param_set.field.ba_policy = policy;
-       ba->ba_param_set.field.tid = ts->TsCommonInfo.TSpec.ucTSID;
+       ba->ba_param_set.field.tid = ts->ts_common_info.tspec.ts_id;
        ba->ba_param_set.field.buffer_size = 32;
        ba->ba_timeout_value = 0;
-       ba->ba_start_seq_ctrl.field.seq_num = (ts->TxCurSeq + 3) % 4096;
+       ba->ba_start_seq_ctrl.field.seq_num = (ts->tx_cur_seq + 3) % 4096;
 
        activate_ba_entry(ba, BA_SETUP_TIMEOUT);
 
-       rtllib_send_ADDBAReq(ieee, ts->TsCommonInfo.addr, ba);
+       rtllib_send_ADDBAReq(ieee, ts->ts_common_info.addr, ba);
 }
 
 void rtllib_ts_init_del_ba(struct rtllib_device *ieee,
-                          struct ts_common_info *pTsCommonInfo,
+                          struct ts_common_info *ts_common_info,
                           enum tr_select TxRxSelect)
 {
        if (TxRxSelect == TX_DIR) {
-               struct tx_ts_record *pTxTs =
-                        (struct tx_ts_record *)pTsCommonInfo;
-
-               if (tx_ts_delete_ba(ieee, pTxTs))
-                       rtllib_send_DELBA(ieee, pTsCommonInfo->addr,
-                                         (pTxTs->TxAdmittedBARecord.b_valid) ?
-                                        (&pTxTs->TxAdmittedBARecord) :
-                                       (&pTxTs->TxPendingBARecord),
+               struct tx_ts_record *ts =
+                        (struct tx_ts_record *)ts_common_info;
+
+               if (tx_ts_delete_ba(ieee, ts))
+                       rtllib_send_DELBA(ieee, ts_common_info->addr,
+                                         (ts->tx_admitted_ba_record.b_valid) ?
+                                        (&ts->tx_admitted_ba_record) :
+                                       (&ts->tx_pending_ba_record),
                                         TxRxSelect, DELBA_REASON_END_BA);
        } else if (TxRxSelect == RX_DIR) {
                struct rx_ts_record *ts =
-                                (struct rx_ts_record *)pTsCommonInfo;
+                                (struct rx_ts_record *)ts_common_info;
                if (rx_ts_delete_ba(ieee, ts))
-                       rtllib_send_DELBA(ieee, pTsCommonInfo->addr,
+                       rtllib_send_DELBA(ieee, ts_common_info->addr,
                                          &ts->rx_admitted_ba_record,
                                          TxRxSelect, DELBA_REASON_END_BA);
        }
@@ -510,23 +510,23 @@ void rtllib_ts_init_del_ba(struct rtllib_device *ieee,
 
 void rtllib_ba_setup_timeout(struct timer_list *t)
 {
-       struct tx_ts_record *pTxTs = from_timer(pTxTs, t,
-                                             TxPendingBARecord.timer);
+       struct tx_ts_record *ts = from_timer(ts, t,
+                                             tx_pending_ba_record.timer);
 
-       pTxTs->bAddBaReqInProgress = false;
-       pTxTs->bAddBaReqDelayed = true;
-       pTxTs->TxPendingBARecord.b_valid = false;
+       ts->add_ba_req_in_progress = false;
+       ts->add_ba_req_delayed = true;
+       ts->tx_pending_ba_record.b_valid = false;
 }
 
 void rtllib_tx_ba_inact_timeout(struct timer_list *t)
 {
-       struct tx_ts_record *pTxTs = from_timer(pTxTs, t,
-                                             TxAdmittedBARecord.timer);
-       struct rtllib_device *ieee = container_of(pTxTs, struct rtllib_device,
-                                    TxTsRecord[pTxTs->num]);
-       tx_ts_delete_ba(ieee, pTxTs);
-       rtllib_send_DELBA(ieee, pTxTs->TsCommonInfo.addr,
-                         &pTxTs->TxAdmittedBARecord, TX_DIR,
+       struct tx_ts_record *ts = from_timer(ts, t,
+                                             tx_admitted_ba_record.timer);
+       struct rtllib_device *ieee = container_of(ts, struct rtllib_device,
+                                    tx_ts_records[ts->num]);
+       tx_ts_delete_ba(ieee, ts);
+       rtllib_send_DELBA(ieee, ts->ts_common_info.addr,
+                         &ts->tx_admitted_ba_record, TX_DIR,
                          DELBA_REASON_TIMEOUT);
 }
 
@@ -535,7 +535,7 @@ void rtllib_rx_ba_inact_timeout(struct timer_list *t)
        struct rx_ts_record *ts = from_timer(ts, t,
                                              rx_admitted_ba_record.timer);
        struct rtllib_device *ieee = container_of(ts, struct rtllib_device,
-                                    RxTsRecord[ts->num]);
+                                    rx_ts_records[ts->num]);
 
        rx_ts_delete_ba(ieee, ts);
        rtllib_send_DELBA(ieee, ts->ts_common_info.addr,
index f8eb4d553fe02f1a4261f7e589f20e144d2ab9df..68577bffb936d079279e95ac4ee8e372da6c34d4 100644 (file)
@@ -94,51 +94,32 @@ enum ht_aggre_mode {
 struct rt_hi_throughput {
        u8 enable_ht;
        u8 current_ht_support;
-       u8 bRegBW40MHz;
-       u8 bCurBW40MHz;
-       u8 bRegShortGI40MHz;
-       u8 bCurShortGI40MHz;
-       u8 bRegShortGI20MHz;
-       u8 bCurShortGI20MHz;
-       u8 bRegSuppCCK;
-       u8 bCurSuppCCK;
-       enum ht_spec_ver ePeerHTSpecVer;
+       u8 cur_bw_40mhz;
+       u8 cur_short_gi_40mhz;
+       u8 cur_short_gi_20mhz;
+       enum ht_spec_ver peer_ht_spec_ver;
        struct ht_capab_ele SelfHTCap;
-       struct ht_info_ele SelfHTInfo;
        u8 PeerHTCapBuf[32];
        u8 PeerHTInfoBuf[32];
-       u8 bAMSDU_Support;
-       u16 nAMSDU_MaxSize;
-       u8 bCurrent_AMSDU_Support;
-       u16 nCurrent_AMSDU_MaxSize;
-       u8 bAMPDUEnable;
-       u8 bCurrentAMPDUEnable;
-       u8 AMPDU_Factor;
+       u8 ampdu_enable;
+       u8 current_ampdu_enable;
+       u8 ampdu_factor;
        u8 CurrentAMPDUFactor;
-       u8 MPDU_Density;
        u8 current_mpdu_density;
-       enum ht_aggre_mode ForcedAMPDUMode;
        u8 forced_ampdu_factor;
        u8 forced_mpdu_density;
-       enum ht_aggre_mode ForcedAMSDUMode;
-       u8 forced_short_gi;
        u8 current_op_mode;
-       u8 self_mimo_ps;
-       u8 peer_mimo_ps;
        enum ht_extchnl_offset CurSTAExtChnlOffset;
        u8 cur_tx_bw40mhz;
        u8 sw_bw_in_progress;
-       u8 reg_rt2rt_aggregation;
-       u8 RT2RT_HT_Mode;
        u8 current_rt2rt_aggregation;
        u8 current_rt2rt_long_slot_time;
        u8 sz_rt2rt_agg_buf[10];
-       u8 reg_rx_reorder_enable;
        u8 cur_rx_reorder_enable;
        u8 rx_reorder_win_size;
        u8 rx_reorder_pending_time;
        u16 rx_reorder_drop_counter;
-       u8 IOTPeer;
+       u8 iot_peer;
        u32 iot_action;
        u8 iot_ra_func;
 } __packed;
index e607bccc079a52f54e0f8459c577cea6fb1405d7..6d0912f90198f463685e9788f480f1efe6ae187f 100644 (file)
@@ -71,75 +71,55 @@ void ht_update_default_setting(struct rtllib_device *ieee)
 {
        struct rt_hi_throughput *ht_info = ieee->ht_info;
 
-       ht_info->bRegShortGI20MHz = 1;
-       ht_info->bRegShortGI40MHz = 1;
+       ht_info->ampdu_enable = 1;
+       ht_info->ampdu_factor = 2;
 
-       ht_info->bRegBW40MHz = 1;
-
-       if (ht_info->bRegBW40MHz)
-               ht_info->bRegSuppCCK = 1;
-       else
-               ht_info->bRegSuppCCK = true;
-
-       ht_info->nAMSDU_MaxSize = 7935UL;
-       ht_info->bAMSDU_Support = 0;
-
-       ht_info->bAMPDUEnable = 1;
-       ht_info->AMPDU_Factor = 2;
-       ht_info->MPDU_Density = 0;
-
-       ht_info->self_mimo_ps = 3;
-       if (ht_info->self_mimo_ps == 2)
-               ht_info->self_mimo_ps = 3;
        ieee->tx_dis_rate_fallback = 0;
        ieee->tx_use_drv_assinged_rate = 0;
 
-       ieee->bTxEnableFwCalcDur = 1;
-
-       ht_info->reg_rt2rt_aggregation = 1;
+       ieee->tx_enable_fw_calc_dur = 1;
 
-       ht_info->reg_rx_reorder_enable = 1;
        ht_info->rx_reorder_win_size = 64;
        ht_info->rx_reorder_pending_time = 30;
 }
 
-static u16 HTMcsToDataRate(struct rtllib_device *ieee, u8 nMcsRate)
+static u16 ht_mcs_to_data_rate(struct rtllib_device *ieee, u8 mcs_rate)
 {
        struct rt_hi_throughput *ht_info = ieee->ht_info;
 
-       u8      is40MHz = (ht_info->bCurBW40MHz) ? 1 : 0;
-       u8      isShortGI = (ht_info->bCurBW40MHz) ?
-                           ((ht_info->bCurShortGI40MHz) ? 1 : 0) :
-                           ((ht_info->bCurShortGI20MHz) ? 1 : 0);
-       return MCS_DATA_RATE[is40MHz][isShortGI][(nMcsRate & 0x7f)];
+       u8      is40MHz = (ht_info->cur_bw_40mhz) ? 1 : 0;
+       u8      isShortGI = (ht_info->cur_bw_40mhz) ?
+                           ((ht_info->cur_short_gi_40mhz) ? 1 : 0) :
+                           ((ht_info->cur_short_gi_20mhz) ? 1 : 0);
+       return MCS_DATA_RATE[is40MHz][isShortGI][(mcs_rate & 0x7f)];
 }
 
-u16  TxCountToDataRate(struct rtllib_device *ieee, u8 nDataRate)
+u16  tx_count_to_data_rate(struct rtllib_device *ieee, u8 data_rate)
 {
-       u16     CCKOFDMRate[12] = {0x02, 0x04, 0x0b, 0x16, 0x0c, 0x12, 0x18,
+       u16     cck_of_dm_rate[12] = {0x02, 0x04, 0x0b, 0x16, 0x0c, 0x12, 0x18,
                                   0x24, 0x30, 0x48, 0x60, 0x6c};
        u8      is40MHz = 0;
        u8      isShortGI = 0;
 
-       if (nDataRate < 12)
-               return CCKOFDMRate[nDataRate];
-       if (nDataRate >= 0x10 && nDataRate <= 0x1f) {
+       if (data_rate < 12)
+               return cck_of_dm_rate[data_rate];
+       if (data_rate >= 0x10 && data_rate <= 0x1f) {
                is40MHz = 0;
                isShortGI = 0;
-       } else if (nDataRate >= 0x20  && nDataRate <= 0x2f) {
+       } else if (data_rate >= 0x20  && data_rate <= 0x2f) {
                is40MHz = 1;
                isShortGI = 0;
-       } else if (nDataRate >= 0x30  && nDataRate <= 0x3f) {
+       } else if (data_rate >= 0x30  && data_rate <= 0x3f) {
                is40MHz = 0;
                isShortGI = 1;
-       } else if (nDataRate >= 0x40  && nDataRate <= 0x4f) {
+       } else if (data_rate >= 0x40  && data_rate <= 0x4f) {
                is40MHz = 1;
                isShortGI = 1;
        }
-       return MCS_DATA_RATE[is40MHz][isShortGI][nDataRate & 0xf];
+       return MCS_DATA_RATE[is40MHz][isShortGI][data_rate & 0xf];
 }
 
-bool IsHTHalfNmodeAPs(struct rtllib_device *ieee)
+bool is_ht_half_nmode_aps(struct rtllib_device *ieee)
 {
        bool                    retValue = false;
        struct rtllib_network *net = &ieee->current_network;
@@ -164,187 +144,171 @@ bool IsHTHalfNmodeAPs(struct rtllib_device *ieee)
        return retValue;
 }
 
-static void HTIOTPeerDetermine(struct rtllib_device *ieee)
+static void ht_iot_peer_determine(struct rtllib_device *ieee)
 {
        struct rt_hi_throughput *ht_info = ieee->ht_info;
        struct rtllib_network *net = &ieee->current_network;
 
        if (net->bssht.bd_rt2rt_aggregation) {
-               ht_info->IOTPeer = HT_IOT_PEER_REALTEK;
+               ht_info->iot_peer = HT_IOT_PEER_REALTEK;
                if (net->bssht.rt2rt_ht_mode & RT_HT_CAP_USE_92SE)
-                       ht_info->IOTPeer = HT_IOT_PEER_REALTEK_92SE;
+                       ht_info->iot_peer = HT_IOT_PEER_REALTEK_92SE;
                if (net->bssht.rt2rt_ht_mode & RT_HT_CAP_USE_SOFTAP)
-                       ht_info->IOTPeer = HT_IOT_PEER_92U_SOFTAP;
+                       ht_info->iot_peer = HT_IOT_PEER_92U_SOFTAP;
        } else if (net->broadcom_cap_exist) {
-               ht_info->IOTPeer = HT_IOT_PEER_BROADCOM;
+               ht_info->iot_peer = HT_IOT_PEER_BROADCOM;
        } else if (!memcmp(net->bssid, UNKNOWN_BORADCOM, 3) ||
                 !memcmp(net->bssid, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3) ||
                 !memcmp(net->bssid, LINKSYSWRT350_LINKSYSWRT150_BROADCOM, 3)) {
-               ht_info->IOTPeer = HT_IOT_PEER_BROADCOM;
+               ht_info->iot_peer = HT_IOT_PEER_BROADCOM;
        } else if ((memcmp(net->bssid, BELKINF5D8233V1_RALINK, 3) == 0) ||
                 (memcmp(net->bssid, BELKINF5D82334V3_RALINK, 3) == 0) ||
                 (memcmp(net->bssid, PCI_RALINK, 3) == 0) ||
                 (memcmp(net->bssid, EDIMAX_RALINK, 3) == 0) ||
                 (memcmp(net->bssid, AIRLINK_RALINK, 3) == 0) ||
                  net->ralink_cap_exist) {
-               ht_info->IOTPeer = HT_IOT_PEER_RALINK;
+               ht_info->iot_peer = HT_IOT_PEER_RALINK;
        } else if ((net->atheros_cap_exist) ||
                (memcmp(net->bssid, DLINK_ATHEROS_1, 3) == 0) ||
                (memcmp(net->bssid, DLINK_ATHEROS_2, 3) == 0)) {
-               ht_info->IOTPeer = HT_IOT_PEER_ATHEROS;
+               ht_info->iot_peer = HT_IOT_PEER_ATHEROS;
        } else if ((memcmp(net->bssid, CISCO_BROADCOM, 3) == 0) ||
                  net->cisco_cap_exist) {
-               ht_info->IOTPeer = HT_IOT_PEER_CISCO;
+               ht_info->iot_peer = HT_IOT_PEER_CISCO;
        } else if ((memcmp(net->bssid, LINKSYS_MARVELL_4400N, 3) == 0) ||
                  net->marvell_cap_exist) {
-               ht_info->IOTPeer = HT_IOT_PEER_MARVELL;
+               ht_info->iot_peer = HT_IOT_PEER_MARVELL;
        } else if (net->airgo_cap_exist) {
-               ht_info->IOTPeer = HT_IOT_PEER_AIRGO;
+               ht_info->iot_peer = HT_IOT_PEER_AIRGO;
        } else {
-               ht_info->IOTPeer = HT_IOT_PEER_UNKNOWN;
+               ht_info->iot_peer = HT_IOT_PEER_UNKNOWN;
        }
 
-       netdev_dbg(ieee->dev, "IOTPEER: %x\n", ht_info->IOTPeer);
+       netdev_dbg(ieee->dev, "IOTPEER: %x\n", ht_info->iot_peer);
 }
 
-static u8 HTIOTActIsMgntUseCCK6M(struct rtllib_device *ieee,
+static u8 ht_iot_act_is_mgnt_use_cck_6m(struct rtllib_device *ieee,
                                 struct rtllib_network *network)
 {
        u8      retValue = 0;
 
-       if (ieee->ht_info->IOTPeer == HT_IOT_PEER_BROADCOM)
+       if (ieee->ht_info->iot_peer == HT_IOT_PEER_BROADCOM)
                retValue = 1;
 
        return retValue;
 }
 
-static u8 HTIOTActIsCCDFsync(struct rtllib_device *ieee)
+static u8 ht_iot_act_is_ccd_fsync(struct rtllib_device *ieee)
 {
        u8      retValue = 0;
 
-       if (ieee->ht_info->IOTPeer == HT_IOT_PEER_BROADCOM)
+       if (ieee->ht_info->iot_peer == HT_IOT_PEER_BROADCOM)
                retValue = 1;
        return retValue;
 }
 
-static void HTIOTActDetermineRaFunc(struct rtllib_device *ieee, bool bPeerRx2ss)
+static void ht_iot_act_determine_ra_func(struct rtllib_device *ieee, bool bPeerRx2ss)
 {
        struct rt_hi_throughput *ht_info = ieee->ht_info;
 
        ht_info->iot_ra_func &= HT_IOT_RAFUNC_DISABLE_ALL;
 
-       if (ht_info->IOTPeer == HT_IOT_PEER_RALINK && !bPeerRx2ss)
+       if (ht_info->iot_peer == HT_IOT_PEER_RALINK && !bPeerRx2ss)
                ht_info->iot_ra_func |= HT_IOT_RAFUNC_PEER_1R;
 
        if (ht_info->iot_action & HT_IOT_ACT_AMSDU_ENABLE)
                ht_info->iot_ra_func |= HT_IOT_RAFUNC_TX_AMSDU;
 }
 
-void HTResetIOTSetting(struct rt_hi_throughput *ht_info)
+void ht_reset_iot_setting(struct rt_hi_throughput *ht_info)
 {
        ht_info->iot_action = 0;
-       ht_info->IOTPeer = HT_IOT_PEER_UNKNOWN;
+       ht_info->iot_peer = HT_IOT_PEER_UNKNOWN;
        ht_info->iot_ra_func = 0;
 }
 
-void HTConstructCapabilityElement(struct rtllib_device *ieee, u8 *posHTCap,
-                                 u8 *len, u8 IsEncrypt, bool bAssoc)
+void ht_construct_capability_element(struct rtllib_device *ieee, u8 *pos_ht_cap,
+                                 u8 *len, u8 is_encrypt, bool assoc)
 {
-       struct rt_hi_throughput *pHT = ieee->ht_info;
-       struct ht_capab_ele *pCapELE = NULL;
+       struct rt_hi_throughput *ht = ieee->ht_info;
+       struct ht_capab_ele *cap_ele = NULL;
 
-       if (!posHTCap || !pHT) {
+       if (!pos_ht_cap || !ht) {
                netdev_warn(ieee->dev,
                            "%s(): posHTCap and ht_info are null\n", __func__);
                return;
        }
-       memset(posHTCap, 0, *len);
+       memset(pos_ht_cap, 0, *len);
 
-       if ((bAssoc) && (pHT->ePeerHTSpecVer == HT_SPEC_VER_EWC)) {
+       if ((assoc) && (ht->peer_ht_spec_ver == HT_SPEC_VER_EWC)) {
                static const u8 EWC11NHTCap[] = { 0x00, 0x90, 0x4c, 0x33 };
 
-               memcpy(posHTCap, EWC11NHTCap, sizeof(EWC11NHTCap));
-               pCapELE = (struct ht_capab_ele *)&posHTCap[4];
+               memcpy(pos_ht_cap, EWC11NHTCap, sizeof(EWC11NHTCap));
+               cap_ele = (struct ht_capab_ele *)&pos_ht_cap[4];
                *len = 30 + 2;
        } else {
-               pCapELE = (struct ht_capab_ele *)posHTCap;
+               cap_ele = (struct ht_capab_ele *)pos_ht_cap;
                *len = 26 + 2;
        }
 
-       pCapELE->AdvCoding              = 0;
+       cap_ele->AdvCoding              = 0;
        if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))
-               pCapELE->ChlWidth = 0;
+               cap_ele->ChlWidth = 0;
        else
-               pCapELE->ChlWidth = (pHT->bRegBW40MHz ? 1 : 0);
+               cap_ele->ChlWidth = 1;
 
-       pCapELE->MimoPwrSave            = pHT->self_mimo_ps;
-       pCapELE->GreenField             = 0;
-       pCapELE->ShortGI20Mhz           = 1;
-       pCapELE->ShortGI40Mhz           = 1;
+       cap_ele->MimoPwrSave            = 3;
+       cap_ele->GreenField             = 0;
+       cap_ele->ShortGI20Mhz           = 1;
+       cap_ele->ShortGI40Mhz           = 1;
 
-       pCapELE->TxSTBC                 = 1;
-       pCapELE->RxSTBC                 = 0;
-       pCapELE->DelayBA                = 0;
-       pCapELE->MaxAMSDUSize = (MAX_RECEIVE_BUFFER_SIZE >= 7935) ? 1 : 0;
-       pCapELE->DssCCk = ((pHT->bRegBW40MHz) ? (pHT->bRegSuppCCK ? 1 : 0) : 0);
-       pCapELE->PSMP = 0;
-       pCapELE->LSigTxopProtect = 0;
+       cap_ele->TxSTBC                 = 1;
+       cap_ele->RxSTBC                 = 0;
+       cap_ele->DelayBA                = 0;
+       cap_ele->MaxAMSDUSize = (MAX_RECEIVE_BUFFER_SIZE >= 7935) ? 1 : 0;
+       cap_ele->DssCCk = 1;
+       cap_ele->PSMP = 0;
+       cap_ele->LSigTxopProtect = 0;
 
        netdev_dbg(ieee->dev,
                   "TX HT cap/info ele BW=%d MaxAMSDUSize:%d DssCCk:%d\n",
-                  pCapELE->ChlWidth, pCapELE->MaxAMSDUSize, pCapELE->DssCCk);
+                  cap_ele->ChlWidth, cap_ele->MaxAMSDUSize, cap_ele->DssCCk);
 
-       if (IsEncrypt) {
-               pCapELE->MPDUDensity    = 7;
-               pCapELE->MaxRxAMPDUFactor       = 2;
+       if (is_encrypt) {
+               cap_ele->MPDUDensity    = 7;
+               cap_ele->MaxRxAMPDUFactor       = 2;
        } else {
-               pCapELE->MaxRxAMPDUFactor       = 3;
-               pCapELE->MPDUDensity    = 0;
+               cap_ele->MaxRxAMPDUFactor       = 3;
+               cap_ele->MPDUDensity    = 0;
        }
 
-       memcpy(pCapELE->MCS, ieee->reg_dot11ht_oper_rate_set, 16);
-       memset(&pCapELE->ExtHTCapInfo, 0, 2);
-       memset(pCapELE->TxBFCap, 0, 4);
+       memcpy(cap_ele->MCS, ieee->reg_dot11ht_oper_rate_set, 16);
+       memset(&cap_ele->ExtHTCapInfo, 0, 2);
+       memset(cap_ele->TxBFCap, 0, 4);
 
-       pCapELE->ASCap = 0;
+       cap_ele->ASCap = 0;
 
-       if (bAssoc) {
-               if (pHT->iot_action & HT_IOT_ACT_DISABLE_MCS15)
-                       pCapELE->MCS[1] &= 0x7f;
+       if (assoc) {
+               if (ht->iot_action & HT_IOT_ACT_DISABLE_MCS15)
+                       cap_ele->MCS[1] &= 0x7f;
 
-               if (pHT->iot_action & HT_IOT_ACT_DISABLE_MCS14)
-                       pCapELE->MCS[1] &= 0xbf;
+               if (ht->iot_action & HT_IOT_ACT_DISABLE_MCS14)
+                       cap_ele->MCS[1] &= 0xbf;
 
-               if (pHT->iot_action & HT_IOT_ACT_DISABLE_ALL_2SS)
-                       pCapELE->MCS[1] &= 0x00;
+               if (ht->iot_action & HT_IOT_ACT_DISABLE_ALL_2SS)
+                       cap_ele->MCS[1] &= 0x00;
 
-               if (pHT->iot_action & HT_IOT_ACT_DISABLE_RX_40MHZ_SHORT_GI)
-                       pCapELE->ShortGI40Mhz           = 0;
+               if (ht->iot_action & HT_IOT_ACT_DISABLE_RX_40MHZ_SHORT_GI)
+                       cap_ele->ShortGI40Mhz           = 0;
 
                if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) {
-                       pCapELE->ChlWidth = 0;
-                       pCapELE->MCS[1] = 0;
+                       cap_ele->ChlWidth = 0;
+                       cap_ele->MCS[1] = 0;
                }
        }
 }
 
-void HTConstructInfoElement(struct rtllib_device *ieee, u8 *posHTInfo,
-                           u8 *len, u8 IsEncrypt)
-{
-       struct ht_info_ele *pHTInfoEle = (struct ht_info_ele *)posHTInfo;
-
-       if (!posHTInfo || !pHTInfoEle) {
-               netdev_warn(ieee->dev,
-                           "%s(): posHTInfo and pHTInfoEle are null\n",
-                           __func__);
-               return;
-       }
-
-       memset(posHTInfo, 0, *len);
-       *len = 0;
-}
-
-void HTConstructRT2RTAggElement(struct rtllib_device *ieee, u8 *posRT2RTAgg,
+void ht_construct_rt2rt_agg_element(struct rtllib_device *ieee, u8 *posRT2RTAgg,
                                u8 *len)
 {
        if (!posRT2RTAgg) {
@@ -366,7 +330,7 @@ void HTConstructRT2RTAggElement(struct rtllib_device *ieee, u8 *posRT2RTAgg,
        *len = 6 + 2;
 }
 
-static u8 HT_PickMCSRate(struct rtllib_device *ieee, u8 *pOperateMCS)
+static u8 ht_pick_mcs_rate(struct rtllib_device *ieee, u8 *pOperateMCS)
 {
        u8 i;
 
@@ -393,7 +357,7 @@ static u8 HT_PickMCSRate(struct rtllib_device *ieee, u8 *pOperateMCS)
        return true;
 }
 
-u8 HTGetHighestMCSRate(struct rtllib_device *ieee, u8 *pMCSRateSet,
+u8 ht_get_highest_mcs_rate(struct rtllib_device *ieee, u8 *pMCSRateSet,
                       u8 *pMCSFilter)
 {
        u8              i, j;
@@ -422,8 +386,8 @@ u8 HTGetHighestMCSRate(struct rtllib_device *ieee, u8 *pMCSRateSet,
                        bitMap = availableMcsRate[i];
                        for (j = 0; j < 8; j++) {
                                if ((bitMap % 2) != 0) {
-                                       if (HTMcsToDataRate(ieee, (8 * i + j)) >
-                                           HTMcsToDataRate(ieee, mcsRate))
+                                       if (ht_mcs_to_data_rate(ieee, (8 * i + j)) >
+                                           ht_mcs_to_data_rate(ieee, mcsRate))
                                                mcsRate = 8 * i + j;
                                }
                                bitMap >>= 1;
@@ -433,7 +397,7 @@ u8 HTGetHighestMCSRate(struct rtllib_device *ieee, u8 *pMCSRateSet,
        return mcsRate | 0x80;
 }
 
-static u8 HTFilterMCSRate(struct rtllib_device *ieee, u8 *pSupportMCS,
+static u8 ht_filter_mcs_rate(struct rtllib_device *ieee, u8 *pSupportMCS,
                          u8 *pOperateMCS)
 {
        u8 i;
@@ -442,7 +406,7 @@ static u8 HTFilterMCSRate(struct rtllib_device *ieee, u8 *pSupportMCS,
                pOperateMCS[i] = ieee->reg_dot11tx_ht_oper_rate_set[i] &
                                 pSupportMCS[i];
 
-       HT_PickMCSRate(ieee, pOperateMCS);
+       ht_pick_mcs_rate(ieee, pOperateMCS);
 
        if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))
                pOperateMCS[1] = 0;
@@ -453,16 +417,15 @@ static u8 HTFilterMCSRate(struct rtllib_device *ieee, u8 *pSupportMCS,
        return true;
 }
 
-void HTSetConnectBwMode(struct rtllib_device *ieee,
+void ht_set_connect_bw_mode(struct rtllib_device *ieee,
                        enum ht_channel_width bandwidth,
                        enum ht_extchnl_offset Offset);
 
-void HTOnAssocRsp(struct rtllib_device *ieee)
+void ht_on_assoc_rsp(struct rtllib_device *ieee)
 {
        struct rt_hi_throughput *ht_info = ieee->ht_info;
        struct ht_capab_ele *pPeerHTCap = NULL;
        struct ht_info_ele *pPeerHTInfo = NULL;
-       u16 nMaxAMSDUSize = 0;
        u8 *pMcsFilter = NULL;
 
        static const u8 EWC11NHTCap[] = { 0x00, 0x90, 0x4c, 0x33 };
@@ -489,79 +452,48 @@ void HTOnAssocRsp(struct rtllib_device *ieee)
        print_hex_dump_bytes("%s: ", __func__, DUMP_PREFIX_NONE,
                             pPeerHTCap, sizeof(struct ht_capab_ele));
 #endif
-       HTSetConnectBwMode(ieee, (enum ht_channel_width)(pPeerHTCap->ChlWidth),
+       ht_set_connect_bw_mode(ieee, (enum ht_channel_width)(pPeerHTCap->ChlWidth),
                           (enum ht_extchnl_offset)(pPeerHTInfo->ExtChlOffset));
        ht_info->cur_tx_bw40mhz = ((pPeerHTInfo->RecommemdedTxWidth == 1) ?
                                 true : false);
 
-       ht_info->bCurShortGI20MHz = ((ht_info->bRegShortGI20MHz) ?
-                                   ((pPeerHTCap->ShortGI20Mhz == 1) ?
-                                   true : false) : false);
-       ht_info->bCurShortGI40MHz = ((ht_info->bRegShortGI40MHz) ?
-                                    ((pPeerHTCap->ShortGI40Mhz == 1) ?
-                                    true : false) : false);
+       ht_info->cur_short_gi_20mhz = ((pPeerHTCap->ShortGI20Mhz == 1) ? true : false);
+       ht_info->cur_short_gi_40mhz = ((pPeerHTCap->ShortGI40Mhz == 1) ? true : false);
 
-       ht_info->bCurSuppCCK = ((ht_info->bRegSuppCCK) ?
-                              ((pPeerHTCap->DssCCk == 1) ? true :
-                              false) : false);
-
-       ht_info->bCurrent_AMSDU_Support = ht_info->bAMSDU_Support;
-
-       nMaxAMSDUSize = (pPeerHTCap->MaxAMSDUSize == 0) ? 3839 : 7935;
-
-       if (ht_info->nAMSDU_MaxSize > nMaxAMSDUSize)
-               ht_info->nCurrent_AMSDU_MaxSize = nMaxAMSDUSize;
-       else
-               ht_info->nCurrent_AMSDU_MaxSize = ht_info->nAMSDU_MaxSize;
-
-       ht_info->bCurrentAMPDUEnable = ht_info->bAMPDUEnable;
+       ht_info->current_ampdu_enable = ht_info->ampdu_enable;
        if (ieee->rtllib_ap_sec_type &&
            (ieee->rtllib_ap_sec_type(ieee) & (SEC_ALG_WEP | SEC_ALG_TKIP))) {
-               if ((ht_info->IOTPeer == HT_IOT_PEER_ATHEROS) ||
-                   (ht_info->IOTPeer == HT_IOT_PEER_UNKNOWN))
-                       ht_info->bCurrentAMPDUEnable = false;
+               if ((ht_info->iot_peer == HT_IOT_PEER_ATHEROS) ||
+                   (ht_info->iot_peer == HT_IOT_PEER_UNKNOWN))
+                       ht_info->current_ampdu_enable = false;
        }
 
-       if (!ht_info->reg_rt2rt_aggregation) {
-               if (ht_info->AMPDU_Factor > pPeerHTCap->MaxRxAMPDUFactor)
+       if (ieee->current_network.bssht.bd_rt2rt_aggregation) {
+               if (ieee->pairwise_key_type != KEY_TYPE_NA)
                        ht_info->CurrentAMPDUFactor =
-                                                pPeerHTCap->MaxRxAMPDUFactor;
+                                        pPeerHTCap->MaxRxAMPDUFactor;
                else
-                       ht_info->CurrentAMPDUFactor = ht_info->AMPDU_Factor;
-
+                       ht_info->CurrentAMPDUFactor = HT_AGG_SIZE_64K;
        } else {
-               if (ieee->current_network.bssht.bd_rt2rt_aggregation) {
-                       if (ieee->pairwise_key_type != KEY_TYPE_NA)
-                               ht_info->CurrentAMPDUFactor =
-                                                pPeerHTCap->MaxRxAMPDUFactor;
-                       else
-                               ht_info->CurrentAMPDUFactor = HT_AGG_SIZE_64K;
-               } else {
-                       ht_info->CurrentAMPDUFactor = min_t(u32, pPeerHTCap->MaxRxAMPDUFactor,
-                                                           HT_AGG_SIZE_32K);
-               }
+               ht_info->CurrentAMPDUFactor = min_t(u32, pPeerHTCap->MaxRxAMPDUFactor,
+                                                   HT_AGG_SIZE_32K);
        }
-       ht_info->current_mpdu_density = max_t(u8, ht_info->MPDU_Density,
-                                             pPeerHTCap->MPDUDensity);
+
+       ht_info->current_mpdu_density = pPeerHTCap->MPDUDensity;
        if (ht_info->iot_action & HT_IOT_ACT_TX_USE_AMSDU_8K) {
-               ht_info->bCurrentAMPDUEnable = false;
-               ht_info->ForcedAMSDUMode = HT_AGG_FORCE_ENABLE;
+               ht_info->current_ampdu_enable = false;
        }
-       ht_info->cur_rx_reorder_enable = ht_info->reg_rx_reorder_enable;
+       ht_info->cur_rx_reorder_enable = 1;
 
        if (pPeerHTCap->MCS[0] == 0)
                pPeerHTCap->MCS[0] = 0xff;
 
-       HTIOTActDetermineRaFunc(ieee, ((pPeerHTCap->MCS[1]) != 0));
+       ht_iot_act_determine_ra_func(ieee, ((pPeerHTCap->MCS[1]) != 0));
 
-       HTFilterMCSRate(ieee, pPeerHTCap->MCS, ieee->dot11ht_oper_rate_set);
+       ht_filter_mcs_rate(ieee, pPeerHTCap->MCS, ieee->dot11ht_oper_rate_set);
 
-       ht_info->peer_mimo_ps = pPeerHTCap->MimoPwrSave;
-       if (ht_info->peer_mimo_ps == MIMO_PS_STATIC)
-               pMcsFilter = MCS_FILTER_1SS;
-       else
-               pMcsFilter = MCS_FILTER_ALL;
-       ieee->HTHighestOperaRate = HTGetHighestMCSRate(ieee,
+       pMcsFilter = MCS_FILTER_ALL;
+       ieee->HTHighestOperaRate = ht_get_highest_mcs_rate(ieee,
                                                       ieee->dot11ht_oper_rate_set,
                                                       pMcsFilter);
        ieee->HTCurrentOperaRate = ieee->HTHighestOperaRate;
@@ -569,30 +501,23 @@ void HTOnAssocRsp(struct rtllib_device *ieee)
        ht_info->current_op_mode = pPeerHTInfo->OptMode;
 }
 
-void HTInitializeHTInfo(struct rtllib_device *ieee)
+void ht_initialize_ht_info(struct rtllib_device *ieee)
 {
        struct rt_hi_throughput *ht_info = ieee->ht_info;
 
        ht_info->current_ht_support = false;
 
-       ht_info->bCurBW40MHz = false;
+       ht_info->cur_bw_40mhz = false;
        ht_info->cur_tx_bw40mhz = false;
 
-       ht_info->bCurShortGI20MHz = false;
-       ht_info->bCurShortGI40MHz = false;
-       ht_info->forced_short_gi = false;
+       ht_info->cur_short_gi_20mhz = false;
+       ht_info->cur_short_gi_40mhz = false;
 
-       ht_info->bCurSuppCCK = true;
-
-       ht_info->bCurrent_AMSDU_Support = false;
-       ht_info->nCurrent_AMSDU_MaxSize = ht_info->nAMSDU_MaxSize;
-       ht_info->current_mpdu_density = ht_info->MPDU_Density;
-       ht_info->CurrentAMPDUFactor = ht_info->AMPDU_Factor;
+       ht_info->current_mpdu_density = 0;
+       ht_info->CurrentAMPDUFactor = ht_info->ampdu_factor;
 
        memset((void *)(&ht_info->SelfHTCap), 0,
               sizeof(ht_info->SelfHTCap));
-       memset((void *)(&ht_info->SelfHTInfo), 0,
-              sizeof(ht_info->SelfHTInfo));
        memset((void *)(&ht_info->PeerHTCapBuf), 0,
               sizeof(ht_info->PeerHTCapBuf));
        memset((void *)(&ht_info->PeerHTInfoBuf), 0,
@@ -600,13 +525,12 @@ void HTInitializeHTInfo(struct rtllib_device *ieee)
 
        ht_info->sw_bw_in_progress = false;
 
-       ht_info->ePeerHTSpecVer = HT_SPEC_VER_IEEE;
+       ht_info->peer_ht_spec_ver = HT_SPEC_VER_IEEE;
 
        ht_info->current_rt2rt_aggregation = false;
        ht_info->current_rt2rt_long_slot_time = false;
-       ht_info->RT2RT_HT_Mode = (enum rt_ht_capability)0;
 
-       ht_info->IOTPeer = 0;
+       ht_info->iot_peer = 0;
        ht_info->iot_action = 0;
        ht_info->iot_ra_func = 0;
 
@@ -619,7 +543,7 @@ void HTInitializeHTInfo(struct rtllib_device *ieee)
        }
 }
 
-void HTInitializeBssDesc(struct bss_ht *pBssHT)
+void ht_initialize_bss_desc(struct bss_ht *pBssHT)
 {
        pBssHT->bd_support_ht = false;
        memset(pBssHT->bd_ht_cap_buf, 0, sizeof(pBssHT->bd_ht_cap_buf));
@@ -634,7 +558,7 @@ void HTInitializeBssDesc(struct bss_ht *pBssHT)
        pBssHT->rt2rt_ht_mode = (enum rt_ht_capability)0;
 }
 
-void HTResetSelfAndSavePeerSetting(struct rtllib_device *ieee,
+void ht_reset_self_and_save_peer_setting(struct rtllib_device *ieee,
                                   struct rtllib_network *pNetwork)
 {
        struct rt_hi_throughput *ht_info = ieee->ht_info;
@@ -645,7 +569,7 @@ void HTResetSelfAndSavePeerSetting(struct rtllib_device *ieee,
         */
        if (pNetwork->bssht.bd_support_ht) {
                ht_info->current_ht_support = true;
-               ht_info->ePeerHTSpecVer = pNetwork->bssht.bd_ht_spec_ver;
+               ht_info->peer_ht_spec_ver = pNetwork->bssht.bd_ht_spec_ver;
 
                if (pNetwork->bssht.bd_ht_cap_len > 0 &&
                    pNetwork->bssht.bd_ht_cap_len <= sizeof(ht_info->PeerHTCapBuf))
@@ -660,32 +584,24 @@ void HTResetSelfAndSavePeerSetting(struct rtllib_device *ieee,
                               pNetwork->bssht.bd_ht_info_buf,
                               pNetwork->bssht.bd_ht_info_len);
 
-               if (ht_info->reg_rt2rt_aggregation) {
-                       ht_info->current_rt2rt_aggregation =
-                                pNetwork->bssht.bd_rt2rt_aggregation;
-                       ht_info->current_rt2rt_long_slot_time =
-                                pNetwork->bssht.bd_rt2rt_long_slot_time;
-                       ht_info->RT2RT_HT_Mode = pNetwork->bssht.rt2rt_ht_mode;
-               } else {
-                       ht_info->current_rt2rt_aggregation = false;
-                       ht_info->current_rt2rt_long_slot_time = false;
-                       ht_info->RT2RT_HT_Mode = (enum rt_ht_capability)0;
-               }
+               ht_info->current_rt2rt_aggregation =
+                        pNetwork->bssht.bd_rt2rt_aggregation;
+               ht_info->current_rt2rt_long_slot_time =
+                        pNetwork->bssht.bd_rt2rt_long_slot_time;
 
-               HTIOTPeerDetermine(ieee);
+               ht_iot_peer_determine(ieee);
 
                ht_info->iot_action = 0;
-               bIOTAction = HTIOTActIsMgntUseCCK6M(ieee, pNetwork);
+               bIOTAction = ht_iot_act_is_mgnt_use_cck_6m(ieee, pNetwork);
                if (bIOTAction)
                        ht_info->iot_action |= HT_IOT_ACT_MGNT_USE_CCK_6M;
-               bIOTAction = HTIOTActIsCCDFsync(ieee);
+               bIOTAction = ht_iot_act_is_ccd_fsync(ieee);
                if (bIOTAction)
                        ht_info->iot_action |= HT_IOT_ACT_CDD_FSYNC;
        } else {
                ht_info->current_ht_support = false;
                ht_info->current_rt2rt_aggregation = false;
                ht_info->current_rt2rt_long_slot_time = false;
-               ht_info->RT2RT_HT_Mode = (enum rt_ht_capability)0;
 
                ht_info->iot_action = 0;
                ht_info->iot_ra_func = 0;
@@ -706,7 +622,7 @@ void HT_update_self_and_peer_setting(struct rtllib_device *ieee,
 }
 EXPORT_SYMBOL(HT_update_self_and_peer_setting);
 
-u8 HTCCheck(struct rtllib_device *ieee, u8 *pFrame)
+u8 ht_c_check(struct rtllib_device *ieee, u8 *pFrame)
 {
        if (ieee->ht_info->current_ht_support) {
                if ((IsQoSDataFrame(pFrame) && Frame_Order(pFrame)) == 1) {
@@ -717,11 +633,11 @@ u8 HTCCheck(struct rtllib_device *ieee, u8 *pFrame)
        return false;
 }
 
-static void HTSetConnectBwModeCallback(struct rtllib_device *ieee)
+static void ht_set_connect_bw_mode_callback(struct rtllib_device *ieee)
 {
        struct rt_hi_throughput *ht_info = ieee->ht_info;
 
-       if (ht_info->bCurBW40MHz) {
+       if (ht_info->cur_bw_40mhz) {
                if (ht_info->CurSTAExtChnlOffset == HT_EXTCHNL_OFFSET_UPPER)
                        ieee->set_chan(ieee->dev,
                                       ieee->current_network.channel + 2);
@@ -744,15 +660,12 @@ static void HTSetConnectBwModeCallback(struct rtllib_device *ieee)
        ht_info->sw_bw_in_progress = false;
 }
 
-void HTSetConnectBwMode(struct rtllib_device *ieee,
+void ht_set_connect_bw_mode(struct rtllib_device *ieee,
                        enum ht_channel_width bandwidth,
                        enum ht_extchnl_offset Offset)
 {
        struct rt_hi_throughput *ht_info = ieee->ht_info;
 
-       if (!ht_info->bRegBW40MHz)
-               return;
-
        if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))
                bandwidth = HT_CHANNEL_WIDTH_20;
 
@@ -766,21 +679,21 @@ void HTSetConnectBwMode(struct rtllib_device *ieee,
                        Offset = HT_EXTCHNL_OFFSET_NO_EXT;
                if (Offset == HT_EXTCHNL_OFFSET_UPPER ||
                    Offset == HT_EXTCHNL_OFFSET_LOWER) {
-                       ht_info->bCurBW40MHz = true;
+                       ht_info->cur_bw_40mhz = true;
                        ht_info->CurSTAExtChnlOffset = Offset;
                } else {
-                       ht_info->bCurBW40MHz = false;
+                       ht_info->cur_bw_40mhz = false;
                        ht_info->CurSTAExtChnlOffset = HT_EXTCHNL_OFFSET_NO_EXT;
                }
        } else {
-               ht_info->bCurBW40MHz = false;
+               ht_info->cur_bw_40mhz = false;
                ht_info->CurSTAExtChnlOffset = HT_EXTCHNL_OFFSET_NO_EXT;
        }
 
        netdev_dbg(ieee->dev, "%s():ht_info->bCurBW40MHz:%x\n", __func__,
-                  ht_info->bCurBW40MHz);
+                  ht_info->cur_bw_40mhz);
 
        ht_info->sw_bw_in_progress = true;
 
-       HTSetConnectBwModeCallback(ieee);
+       ht_set_connect_bw_mode_callback(ieee);
 }
index 1c00092ea3a59ad161d9c7582299fe07b0981eea..50e01ca49a4ce5bf17004f3ac74e33ec60082689 100644 (file)
@@ -8,7 +8,7 @@
 #define __INC_QOS_TYPE_H
 
 struct qos_tsinfo {
-       u8              ucTSID:4;
+       u8              ts_id:4;
        u8              ucDirection:2;
 };
 
index fff36315f1744fd20d79b355e1edcecd8634bd06..5b0e4cb572d26f373c0bec55bb9fb1b7d32a5b33 100644 (file)
@@ -17,21 +17,21 @@ enum tr_select {
 };
 
 struct ts_common_info {
-       struct list_head                List;
+       struct list_head                list;
        u8                              addr[ETH_ALEN];
-       struct qos_tsinfo TSpec;
+       struct qos_tsinfo tspec;
 };
 
 struct tx_ts_record {
-       struct ts_common_info TsCommonInfo;
-       u16                             TxCurSeq;
-       struct ba_record TxPendingBARecord;
-       struct ba_record TxAdmittedBARecord;
-       u8                              bAddBaReqInProgress;
-       u8                              bAddBaReqDelayed;
-       u8                              bUsingBa;
-       u8                              bDisable_AddBa;
-       struct timer_list               TsAddBaTimer;
+       struct ts_common_info ts_common_info;
+       u16                             tx_cur_seq;
+       struct ba_record tx_pending_ba_record;
+       struct ba_record tx_admitted_ba_record;
+       u8                              add_ba_req_in_progress;
+       u8                              add_ba_req_delayed;
+       u8                              using_ba;
+       u8                              disable_add_ba;
+       struct timer_list               ts_add_ba_timer;
        u8                              num;
 };
 
index 3206fdb3e65ab5252ed939adfda9425161abf957..7e73d31dcccfcf1f5cc5d20bc24bdb4ecb45d02d 100644 (file)
@@ -12,7 +12,7 @@ static void RxPktPendingTimeout(struct timer_list *t)
 {
        struct rx_ts_record *ts = from_timer(ts, t, rx_pkt_pending_timer);
        struct rtllib_device *ieee = container_of(ts, struct rtllib_device,
-                                                 RxTsRecord[ts->num]);
+                                                 rx_ts_records[ts->num]);
 
        struct rx_reorder_entry *pReorderEntry = NULL;
 
@@ -25,7 +25,7 @@ static void RxPktPendingTimeout(struct timer_list *t)
                while (!list_empty(&ts->rx_pending_pkt_list)) {
                        pReorderEntry = (struct rx_reorder_entry *)
                                        list_entry(ts->rx_pending_pkt_list.prev,
-                                       struct rx_reorder_entry, List);
+                                       struct rx_reorder_entry, list);
                        if (index == 0)
                                ts->rx_indicate_seq = pReorderEntry->SeqNum;
 
@@ -33,7 +33,7 @@ static void RxPktPendingTimeout(struct timer_list *t)
                                    ts->rx_indicate_seq) ||
                            SN_EQUAL(pReorderEntry->SeqNum,
                                     ts->rx_indicate_seq)) {
-                               list_del_init(&pReorderEntry->List);
+                               list_del_init(&pReorderEntry->list);
 
                                if (SN_EQUAL(pReorderEntry->SeqNum,
                                    ts->rx_indicate_seq))
@@ -47,7 +47,7 @@ static void RxPktPendingTimeout(struct timer_list *t)
                                                         pReorderEntry->prxb;
                                index++;
 
-                               list_add_tail(&pReorderEntry->List,
+                               list_add_tail(&pReorderEntry->list,
                                              &ieee->RxReorder_Unused_List);
                        } else {
                                bPktInBuf = true;
@@ -82,31 +82,31 @@ static void RxPktPendingTimeout(struct timer_list *t)
 
 static void TsAddBaProcess(struct timer_list *t)
 {
-       struct tx_ts_record *pTxTs = from_timer(pTxTs, t, TsAddBaTimer);
-       u8 num = pTxTs->num;
-       struct rtllib_device *ieee = container_of(pTxTs, struct rtllib_device,
-                                    TxTsRecord[num]);
+       struct tx_ts_record *ts = from_timer(ts, t, ts_add_ba_timer);
+       u8 num = ts->num;
+       struct rtllib_device *ieee = container_of(ts, struct rtllib_device,
+                                    tx_ts_records[num]);
 
-       rtllib_ts_init_add_ba(ieee, pTxTs, BA_POLICY_IMMEDIATE, false);
+       rtllib_ts_init_add_ba(ieee, ts, BA_POLICY_IMMEDIATE, false);
        netdev_dbg(ieee->dev, "%s(): ADDBA Req is started\n", __func__);
 }
 
-static void ResetTsCommonInfo(struct ts_common_info *pTsCommonInfo)
+static void ResetTsCommonInfo(struct ts_common_info *ts_common_info)
 {
-       eth_zero_addr(pTsCommonInfo->addr);
-       memset(&pTsCommonInfo->TSpec, 0, sizeof(struct qos_tsinfo));
+       eth_zero_addr(ts_common_info->addr);
+       memset(&ts_common_info->tspec, 0, sizeof(struct qos_tsinfo));
 }
 
 static void ResetTxTsEntry(struct tx_ts_record *ts)
 {
-       ResetTsCommonInfo(&ts->TsCommonInfo);
-       ts->TxCurSeq = 0;
-       ts->bAddBaReqInProgress = false;
-       ts->bAddBaReqDelayed = false;
-       ts->bUsingBa = false;
-       ts->bDisable_AddBa = false;
-       rtllib_reset_ba_entry(&ts->TxAdmittedBARecord);
-       rtllib_reset_ba_entry(&ts->TxPendingBARecord);
+       ResetTsCommonInfo(&ts->ts_common_info);
+       ts->tx_cur_seq = 0;
+       ts->add_ba_req_in_progress = false;
+       ts->add_ba_req_delayed = false;
+       ts->using_ba = false;
+       ts->disable_add_ba = false;
+       rtllib_reset_ba_entry(&ts->tx_admitted_ba_record);
+       rtllib_reset_ba_entry(&ts->tx_pending_ba_record);
 }
 
 static void ResetRxTsEntry(struct rx_ts_record *ts)
@@ -119,8 +119,8 @@ static void ResetRxTsEntry(struct rx_ts_record *ts)
 
 void rtllib_ts_init(struct rtllib_device *ieee)
 {
-       struct tx_ts_record *pTxTS  = ieee->TxTsRecord;
-       struct rx_ts_record *rxts  = ieee->RxTsRecord;
+       struct tx_ts_record *pTxTS  = ieee->tx_ts_records;
+       struct rx_ts_record *rxts  = ieee->rx_ts_records;
        struct rx_reorder_entry *pRxReorderEntry = ieee->RxReorderEntry;
        u8                              count = 0;
 
@@ -130,15 +130,15 @@ void rtllib_ts_init(struct rtllib_device *ieee)
 
        for (count = 0; count < TOTAL_TS_NUM; count++) {
                pTxTS->num = count;
-               timer_setup(&pTxTS->TsAddBaTimer, TsAddBaProcess, 0);
+               timer_setup(&pTxTS->ts_add_ba_timer, TsAddBaProcess, 0);
 
-               timer_setup(&pTxTS->TxPendingBARecord.timer, rtllib_ba_setup_timeout,
+               timer_setup(&pTxTS->tx_pending_ba_record.timer, rtllib_ba_setup_timeout,
                            0);
-               timer_setup(&pTxTS->TxAdmittedBARecord.timer,
+               timer_setup(&pTxTS->tx_admitted_ba_record.timer,
                            rtllib_tx_ba_inact_timeout, 0);
 
                ResetTxTsEntry(pTxTS);
-               list_add_tail(&pTxTS->TsCommonInfo.List,
+               list_add_tail(&pTxTS->ts_common_info.list,
                                &ieee->Tx_TS_Unused_List);
                pTxTS++;
        }
@@ -155,13 +155,13 @@ void rtllib_ts_init(struct rtllib_device *ieee)
                timer_setup(&rxts->rx_pkt_pending_timer, RxPktPendingTimeout, 0);
 
                ResetRxTsEntry(rxts);
-               list_add_tail(&rxts->ts_common_info.List,
+               list_add_tail(&rxts->ts_common_info.list,
                              &ieee->Rx_TS_Unused_List);
                rxts++;
        }
        INIT_LIST_HEAD(&ieee->RxReorder_Unused_List);
        for (count = 0; count < REORDER_ENTRY_NUM; count++) {
-               list_add_tail(&pRxReorderEntry->List,
+               list_add_tail(&pRxReorderEntry->list,
                              &ieee->RxReorder_Unused_List);
                if (count == (REORDER_ENTRY_NUM - 1))
                        break;
@@ -196,31 +196,31 @@ static struct ts_common_info *SearchAdmitTRStream(struct rtllib_device *ieee,
        for (dir = 0; dir <= DIR_BI_DIR; dir++) {
                if (!search_dir[dir])
                        continue;
-               list_for_each_entry(pRet, psearch_list, List) {
+               list_for_each_entry(pRet, psearch_list, list) {
                        if (memcmp(pRet->addr, addr, 6) == 0 &&
-                           pRet->TSpec.ucTSID == TID &&
-                           pRet->TSpec.ucDirection == dir)
+                           pRet->tspec.ts_id == TID &&
+                           pRet->tspec.ucDirection == dir)
                                break;
                }
-               if (&pRet->List  != psearch_list)
+               if (&pRet->list  != psearch_list)
                        break;
        }
 
-       if (pRet && &pRet->List  != psearch_list)
+       if (pRet && &pRet->list  != psearch_list)
                return pRet;
        return NULL;
 }
 
-static void MakeTSEntry(struct ts_common_info *pTsCommonInfo, u8 *addr,
+static void MakeTSEntry(struct ts_common_info *ts_common_info, u8 *addr,
                        struct qos_tsinfo *pTSPEC)
 {
-       if (!pTsCommonInfo)
+       if (!ts_common_info)
                return;
 
-       memcpy(pTsCommonInfo->addr, addr, 6);
+       memcpy(ts_common_info->addr, addr, 6);
 
        if (pTSPEC)
-               memcpy((u8 *)(&(pTsCommonInfo->TSpec)), (u8 *)pTSPEC,
+               memcpy((u8 *)(&(ts_common_info->tspec)), (u8 *)pTSPEC,
                        sizeof(struct qos_tsinfo));
 }
 
@@ -228,8 +228,8 @@ bool rtllib_get_ts(struct rtllib_device *ieee, struct ts_common_info **ppTS,
           u8 *addr, u8 TID, enum tr_select TxRxSelect, bool bAddNewTs)
 {
        u8      UP = 0;
-       struct qos_tsinfo TSpec;
-       struct qos_tsinfo *ts_info = &TSpec;
+       struct qos_tsinfo tspec;
+       struct qos_tsinfo *ts_info = &tspec;
        struct list_head *pUnusedList;
        struct list_head *pAddmitList;
        enum direction_value Dir;
@@ -286,13 +286,13 @@ bool rtllib_get_ts(struct rtllib_device *ieee, struct ts_common_info **ppTS,
 
        if (!list_empty(pUnusedList)) {
                (*ppTS) = list_entry(pUnusedList->next,
-                         struct ts_common_info, List);
-               list_del_init(&(*ppTS)->List);
+                         struct ts_common_info, list);
+               list_del_init(&(*ppTS)->list);
                if (TxRxSelect == TX_DIR) {
                        struct tx_ts_record *tmp =
                                container_of(*ppTS,
                                struct tx_ts_record,
-                               TsCommonInfo);
+                               ts_common_info);
                        ResetTxTsEntry(tmp);
                } else {
                        struct rx_ts_record *ts =
@@ -305,11 +305,11 @@ bool rtllib_get_ts(struct rtllib_device *ieee, struct ts_common_info **ppTS,
                netdev_dbg(ieee->dev,
                           "to init current TS, UP:%d, Dir:%d, addr: %pM ppTs=%p\n",
                           UP, Dir, addr, *ppTS);
-               ts_info->ucTSID = UP;
+               ts_info->ts_id = UP;
                ts_info->ucDirection = Dir;
 
-               MakeTSEntry(*ppTS, addr, &TSpec);
-               list_add_tail(&((*ppTS)->List), pAddmitList);
+               MakeTSEntry(*ppTS, addr, &tspec);
+               list_add_tail(&((*ppTS)->list), pAddmitList);
 
                return true;
        }
@@ -335,10 +335,10 @@ static void RemoveTsEntry(struct rtllib_device *ieee,
                while (!list_empty(&ts->rx_pending_pkt_list)) {
                        pRxReorderEntry = (struct rx_reorder_entry *)
                                        list_entry(ts->rx_pending_pkt_list.prev,
-                                       struct rx_reorder_entry, List);
+                                       struct rx_reorder_entry, list);
                        netdev_dbg(ieee->dev,  "%s(): Delete SeqNum %d!\n",
                                   __func__, pRxReorderEntry->SeqNum);
-                       list_del_init(&pRxReorderEntry->List);
+                       list_del_init(&pRxReorderEntry->list);
                        {
                                int i = 0;
                                struct rtllib_rxb *prxb = pRxReorderEntry->prxb;
@@ -350,13 +350,13 @@ static void RemoveTsEntry(struct rtllib_device *ieee,
                                kfree(prxb);
                                prxb = NULL;
                        }
-                       list_add_tail(&pRxReorderEntry->List,
+                       list_add_tail(&pRxReorderEntry->list,
                                      &ieee->RxReorder_Unused_List);
                }
        } else {
                struct tx_ts_record *pTxTS = (struct tx_ts_record *)pTs;
 
-               del_timer_sync(&pTxTS->TsAddBaTimer);
+               del_timer_sync(&pTxTS->ts_add_ba_timer);
        }
 }
 
@@ -366,37 +366,37 @@ void RemovePeerTS(struct rtllib_device *ieee, u8 *addr)
 
        netdev_info(ieee->dev, "===========>%s, %pM\n", __func__, addr);
 
-       list_for_each_entry_safe(ts, pTmpTS, &ieee->Tx_TS_Pending_List, List) {
+       list_for_each_entry_safe(ts, pTmpTS, &ieee->Tx_TS_Pending_List, list) {
                if (memcmp(ts->addr, addr, 6) == 0) {
                        RemoveTsEntry(ieee, ts, TX_DIR);
-                       list_del_init(&ts->List);
-                       list_add_tail(&ts->List, &ieee->Tx_TS_Unused_List);
+                       list_del_init(&ts->list);
+                       list_add_tail(&ts->list, &ieee->Tx_TS_Unused_List);
                }
        }
 
-       list_for_each_entry_safe(ts, pTmpTS, &ieee->Tx_TS_Admit_List, List) {
+       list_for_each_entry_safe(ts, pTmpTS, &ieee->Tx_TS_Admit_List, list) {
                if (memcmp(ts->addr, addr, 6) == 0) {
                        netdev_info(ieee->dev,
                                    "====>remove Tx_TS_admin_list\n");
                        RemoveTsEntry(ieee, ts, TX_DIR);
-                       list_del_init(&ts->List);
-                       list_add_tail(&ts->List, &ieee->Tx_TS_Unused_List);
+                       list_del_init(&ts->list);
+                       list_add_tail(&ts->list, &ieee->Tx_TS_Unused_List);
                }
        }
 
-       list_for_each_entry_safe(ts, pTmpTS, &ieee->Rx_TS_Pending_List, List) {
+       list_for_each_entry_safe(ts, pTmpTS, &ieee->Rx_TS_Pending_List, list) {
                if (memcmp(ts->addr, addr, 6) == 0) {
                        RemoveTsEntry(ieee, ts, RX_DIR);
-                       list_del_init(&ts->List);
-                       list_add_tail(&ts->List, &ieee->Rx_TS_Unused_List);
+                       list_del_init(&ts->list);
+                       list_add_tail(&ts->list, &ieee->Rx_TS_Unused_List);
                }
        }
 
-       list_for_each_entry_safe(ts, pTmpTS, &ieee->Rx_TS_Admit_List, List) {
+       list_for_each_entry_safe(ts, pTmpTS, &ieee->Rx_TS_Admit_List, list) {
                if (memcmp(ts->addr, addr, 6) == 0) {
                        RemoveTsEntry(ieee, ts, RX_DIR);
-                       list_del_init(&ts->List);
-                       list_add_tail(&ts->List, &ieee->Rx_TS_Unused_List);
+                       list_del_init(&ts->list);
+                       list_add_tail(&ts->list, &ieee->Rx_TS_Unused_List);
                }
        }
 }
@@ -406,43 +406,43 @@ void RemoveAllTS(struct rtllib_device *ieee)
 {
        struct ts_common_info *ts, *pTmpTS;
 
-       list_for_each_entry_safe(ts, pTmpTS, &ieee->Tx_TS_Pending_List, List) {
+       list_for_each_entry_safe(ts, pTmpTS, &ieee->Tx_TS_Pending_List, list) {
                RemoveTsEntry(ieee, ts, TX_DIR);
-               list_del_init(&ts->List);
-               list_add_tail(&ts->List, &ieee->Tx_TS_Unused_List);
+               list_del_init(&ts->list);
+               list_add_tail(&ts->list, &ieee->Tx_TS_Unused_List);
        }
 
-       list_for_each_entry_safe(ts, pTmpTS, &ieee->Tx_TS_Admit_List, List) {
+       list_for_each_entry_safe(ts, pTmpTS, &ieee->Tx_TS_Admit_List, list) {
                RemoveTsEntry(ieee, ts, TX_DIR);
-               list_del_init(&ts->List);
-               list_add_tail(&ts->List, &ieee->Tx_TS_Unused_List);
+               list_del_init(&ts->list);
+               list_add_tail(&ts->list, &ieee->Tx_TS_Unused_List);
        }
 
-       list_for_each_entry_safe(ts, pTmpTS, &ieee->Rx_TS_Pending_List, List) {
+       list_for_each_entry_safe(ts, pTmpTS, &ieee->Rx_TS_Pending_List, list) {
                RemoveTsEntry(ieee, ts, RX_DIR);
-               list_del_init(&ts->List);
-               list_add_tail(&ts->List, &ieee->Rx_TS_Unused_List);
+               list_del_init(&ts->list);
+               list_add_tail(&ts->list, &ieee->Rx_TS_Unused_List);
        }
 
-       list_for_each_entry_safe(ts, pTmpTS, &ieee->Rx_TS_Admit_List, List) {
+       list_for_each_entry_safe(ts, pTmpTS, &ieee->Rx_TS_Admit_List, list) {
                RemoveTsEntry(ieee, ts, RX_DIR);
-               list_del_init(&ts->List);
-               list_add_tail(&ts->List, &ieee->Rx_TS_Unused_List);
+               list_del_init(&ts->list);
+               list_add_tail(&ts->list, &ieee->Rx_TS_Unused_List);
        }
 }
 
 void TsStartAddBaProcess(struct rtllib_device *ieee, struct tx_ts_record *pTxTS)
 {
-       if (pTxTS->bAddBaReqInProgress == false) {
-               pTxTS->bAddBaReqInProgress = true;
+       if (pTxTS->add_ba_req_in_progress == false) {
+               pTxTS->add_ba_req_in_progress = true;
 
-               if (pTxTS->bAddBaReqDelayed) {
+               if (pTxTS->add_ba_req_delayed) {
                        netdev_dbg(ieee->dev, "Start ADDBA after 60 sec!!\n");
-                       mod_timer(&pTxTS->TsAddBaTimer, jiffies +
+                       mod_timer(&pTxTS->ts_add_ba_timer, jiffies +
                                  msecs_to_jiffies(TS_ADDBA_DELAY));
                } else {
                        netdev_dbg(ieee->dev, "Immediately Start ADDBA\n");
-                       mod_timer(&pTxTS->TsAddBaTimer, jiffies + 10);
+                       mod_timer(&pTxTS->ts_add_ba_timer, jiffies + 10);
                }
        } else {
                netdev_dbg(ieee->dev, "BA timer is already added\n");
index d2cf3cfaaaba31f641dfa3a05c3ecb90699e8f26..7b39a1987fdd69183ee45476d33b1cd6e8568271 100644 (file)
@@ -103,9 +103,6 @@ struct cb_desc {
        /* Tx Desc Related flags (8-9) */
        u8 bLastIniPkt:1;
        u8 bCmdOrInit:1;
-       u8 bFirstSeg:1;
-       u8 bLastSeg:1;
-       u8 bEncrypt:1;
        u8 tx_dis_rate_fallback:1;
        u8 tx_use_drv_assinged_rate:1;
        u8 bHwSec:1;
@@ -117,8 +114,8 @@ struct cb_desc {
        u8 bRTSEnable:1;
        u8 bUseShortGI:1;
        u8 bUseShortPreamble:1;
-       u8 bTxEnableFwCalcDur:1;
-       u8 bAMPDUEnable:1;
+       u8 tx_enable_fw_calc_dur:1;
+       u8 ampdu_enable:1;
        u8 bRTSSTBC:1;
        u8 RTSSC:1;
 
@@ -139,7 +136,6 @@ struct cb_desc {
        u8 bAMSDU:1;
        u8 bFromAggrQ:1;
        u8 reserved6:6;
-       u8 macId;
        u8 priority;
 
        /* Tx firmware related element(20-27) */
@@ -471,7 +467,6 @@ enum _REG_PREAMBLE_MODE {
  *       any adverse affects.
  */
 struct rtllib_rx_stats {
-       u64 mac_time;
        s8  rssi;
        u8  signal;
        u8  noise;
@@ -1047,7 +1042,7 @@ struct bandwidth_autoswitch {
 #define REORDER_WIN_SIZE       128
 #define REORDER_ENTRY_NUM      128
 struct rx_reorder_entry {
-       struct list_head        List;
+       struct list_head        list;
        u16                     SeqNum;
        struct rtllib_rxb *prxb;
 };
@@ -1123,8 +1118,8 @@ struct rt_link_detect {
        u16                             SlotNum;
        u16                             SlotIndex;
 
-       u32                             NumTxOkInPeriod;
-       u32                             NumRxOkInPeriod;
+       u32                             num_tx_ok_in_period;
+       u32                             num_rx_ok_in_period;
        u32                             NumRxUnicastOkInPeriod;
        bool                            bBusyTraffic;
        bool                            bHigherBusyTraffic;
@@ -1169,7 +1164,7 @@ struct rt_pmkid_list {
        u8 Bssid[ETH_ALEN];
        u8 PMKID[16];
        u8 SsidBuf[33];
-       u8 bUsed;
+       u8 used;
 };
 
 /*************** DRIVER STATUS   *****/
@@ -1192,7 +1187,7 @@ struct rtllib_device {
        unsigned long status;
        u8      CntAfterLink;
 
-       enum rt_op_mode OpMode;
+       enum rt_op_mode op_mode;
 
        /* The last AssocReq/Resp IEs */
        u8 *assocreq_ies, *assocresp_ies;
@@ -1224,17 +1219,17 @@ struct rtllib_device {
        u8      HTHighestOperaRate;
        u8      tx_dis_rate_fallback;
        u8      tx_use_drv_assinged_rate;
-       u8      bTxEnableFwCalcDur;
+       u8      tx_enable_fw_calc_dur;
        atomic_t        atm_swbw;
 
        struct list_head                Tx_TS_Admit_List;
        struct list_head                Tx_TS_Pending_List;
        struct list_head                Tx_TS_Unused_List;
-       struct tx_ts_record TxTsRecord[TOTAL_TS_NUM];
+       struct tx_ts_record tx_ts_records[TOTAL_TS_NUM];
        struct list_head                Rx_TS_Admit_List;
        struct list_head                Rx_TS_Pending_List;
        struct list_head                Rx_TS_Unused_List;
-       struct rx_ts_record RxTsRecord[TOTAL_TS_NUM];
+       struct rx_ts_record rx_ts_records[TOTAL_TS_NUM];
        struct rx_reorder_entry RxReorderEntry[128];
        struct list_head                RxReorder_Unused_List;
 
@@ -1321,12 +1316,8 @@ struct rtllib_device {
        u16 scan_watch_dog;
 
        /* map of allowed channels. 0 is dummy */
-       void *dot11d_info;
-       bool global_domain;
        u8 active_channel_map[MAX_CHANNEL_NUMBER+1];
 
-       u8   bss_start_channel;
-
        int rate;       /* current rate */
        int basic_rate;
 
@@ -1391,7 +1382,7 @@ struct rtllib_device {
        int mgmt_queue_head;
        int mgmt_queue_tail;
        u8 AsocRetryCount;
-       struct sk_buff_head skb_waitQ[MAX_QUEUE_SIZE];
+       struct sk_buff_head skb_waitq[MAX_QUEUE_SIZE];
 
        bool    bdynamic_txpower_enable;
 
@@ -1411,7 +1402,7 @@ struct rtllib_device {
        bool FwRWRF;
 
        struct rt_link_detect link_detect_info;
-       bool bIsAggregateFrame;
+       bool is_aggregate_frame;
        struct rt_pwr_save_ctrl pwr_save_ctrl;
 
        /* used if IEEE_SOFTMAC_TX_QUEUE is set */
@@ -1421,7 +1412,6 @@ struct rtllib_device {
        struct timer_list associate_timer;
 
        /* used if IEEE_SOFTMAC_BEACONS is set */
-       struct timer_list beacon_timer;
        u8 need_sw_enc;
        struct work_struct associate_complete_wq;
        struct work_struct ips_leave_wq;
@@ -1469,7 +1459,7 @@ struct rtllib_device {
         * This function can sleep. the driver should ensure
         * the radio has been switched before return.
         */
-       void (*set_chan)(struct net_device *dev, short ch);
+       void (*set_chan)(struct net_device *dev, u8 ch);
 
        /* indicate the driver that the link state is changed
         * for example it may indicate the card is associated now.
@@ -1687,8 +1677,8 @@ void rtllib_sta_ps_send_pspoll_frame(struct rtllib_device *ieee);
 void rtllib_start_protocol(struct rtllib_device *ieee);
 void rtllib_stop_protocol(struct rtllib_device *ieee);
 
-void rtllib_EnableNetMonitorMode(struct net_device *dev, bool bInitState);
-void rtllib_DisableNetMonitorMode(struct net_device *dev, bool bInitState);
+void rtllib_enable_net_monitor_mode(struct net_device *dev, bool init_state);
+void rtllib_disable_net_monitor_mode(struct net_device *dev, bool init_state);
 
 void rtllib_softmac_stop_protocol(struct rtllib_device *ieee);
 void rtllib_softmac_start_protocol(struct rtllib_device *ieee);
@@ -1696,7 +1686,6 @@ void rtllib_softmac_start_protocol(struct rtllib_device *ieee);
 void rtllib_reset_queue(struct rtllib_device *ieee);
 void rtllib_wake_all_queues(struct rtllib_device *ieee);
 void rtllib_stop_all_queues(struct rtllib_device *ieee);
-struct sk_buff *rtllib_get_beacon(struct rtllib_device *ieee);
 
 void notify_wx_assoc_event(struct rtllib_device *ieee);
 void rtllib_ps_tx_ack(struct rtllib_device *ieee, short success);
@@ -1758,39 +1747,37 @@ int rtllib_wx_get_rts(struct rtllib_device *ieee, struct iw_request_info *info,
                      union iwreq_data *wrqu, char *extra);
 #define MAX_RECEIVE_BUFFER_SIZE 9100
 
-void HTSetConnectBwMode(struct rtllib_device *ieee,
+void ht_set_connect_bw_mode(struct rtllib_device *ieee,
                        enum ht_channel_width bandwidth,
                        enum ht_extchnl_offset Offset);
 void ht_update_default_setting(struct rtllib_device *ieee);
-void HTConstructCapabilityElement(struct rtllib_device *ieee,
+void ht_construct_capability_element(struct rtllib_device *ieee,
                                  u8 *posHTCap, u8 *len,
                                  u8 isEncrypt, bool bAssoc);
-void HTConstructInfoElement(struct rtllib_device *ieee,
-                           u8 *posHTInfo, u8 *len, u8 isEncrypt);
-void HTConstructRT2RTAggElement(struct rtllib_device *ieee,
+void ht_construct_rt2rt_agg_element(struct rtllib_device *ieee,
                                u8 *posRT2RTAgg, u8 *len);
-void HTOnAssocRsp(struct rtllib_device *ieee);
-void HTInitializeHTInfo(struct rtllib_device *ieee);
-void HTInitializeBssDesc(struct bss_ht *pBssHT);
-void HTResetSelfAndSavePeerSetting(struct rtllib_device *ieee,
+void ht_on_assoc_rsp(struct rtllib_device *ieee);
+void ht_initialize_ht_info(struct rtllib_device *ieee);
+void ht_initialize_bss_desc(struct bss_ht *pBssHT);
+void ht_reset_self_and_save_peer_setting(struct rtllib_device *ieee,
                                   struct rtllib_network *pNetwork);
 void HT_update_self_and_peer_setting(struct rtllib_device *ieee,
                                     struct rtllib_network *pNetwork);
-u8 HTGetHighestMCSRate(struct rtllib_device *ieee, u8 *pMCSRateSet,
+u8 ht_get_highest_mcs_rate(struct rtllib_device *ieee, u8 *pMCSRateSet,
                       u8 *pMCSFilter);
 extern u8 MCS_FILTER_ALL[];
 extern u16 MCS_DATA_RATE[2][2][77];
-u8 HTCCheck(struct rtllib_device *ieee, u8 *pFrame);
-void HTResetIOTSetting(struct rt_hi_throughput *ht_info);
-bool IsHTHalfNmodeAPs(struct rtllib_device *ieee);
-u16  TxCountToDataRate(struct rtllib_device *ieee, u8 nDataRate);
+u8 ht_c_check(struct rtllib_device *ieee, u8 *pFrame);
+void ht_reset_iot_setting(struct rt_hi_throughput *ht_info);
+bool is_ht_half_nmode_aps(struct rtllib_device *ieee);
+u16  tx_count_to_data_rate(struct rtllib_device *ieee, u8 nDataRate);
 int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb);
 int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb);
 int rtllib_rx_DELBA(struct rtllib_device *ieee, struct sk_buff *skb);
 void rtllib_ts_init_add_ba(struct rtllib_device *ieee, struct tx_ts_record *ts,
-                          u8 policy, u8 bOverwritePending);
+                          u8 policy, u8 overwrite_pending);
 void rtllib_ts_init_del_ba(struct rtllib_device *ieee,
-                          struct ts_common_info *pTsCommonInfo,
+                          struct ts_common_info *ts_common_info,
                           enum tr_select TxRxSelect);
 void rtllib_ba_setup_timeout(struct timer_list *t);
 void rtllib_tx_ba_inact_timeout(struct timer_list *t);
index 95b6d6b9429c10af54902a3d5e9c7c062f8c14fb..e7af4a25b0beef03c7181556df610561b77d83bf 100644 (file)
@@ -123,7 +123,7 @@ struct net_device *alloc_rtllib(int sizeof_priv)
                goto free_softmac;
 
        ht_update_default_setting(ieee);
-       HTInitializeHTInfo(ieee);
+       ht_initialize_ht_info(ieee);
        rtllib_ts_init(ieee);
        for (i = 0; i < IEEE_IBSS_MAC_HASH_SIZE; i++)
                INIT_LIST_HEAD(&ieee->ibss_mac_hash[i]);
index ecaa4dec3f944b1dc4ea53cd6eebb7719d7f9fc5..4df20f4d6bf919140c95946b8cd5cf65ea8b361e 100644 (file)
@@ -34,7 +34,6 @@
 #include <linux/ctype.h>
 
 #include "rtllib.h"
-#include "dot11d.h"
 
 static void rtllib_rx_mgt(struct rtllib_device *ieee, struct sk_buff *skb,
                          struct rtllib_rx_stats *stats);
@@ -412,19 +411,19 @@ static bool AddReorderEntry(struct rx_ts_record *ts,
        while (pList->next != &ts->rx_pending_pkt_list) {
                if (SN_LESS(pReorderEntry->SeqNum, ((struct rx_reorder_entry *)
                    list_entry(pList->next, struct rx_reorder_entry,
-                   List))->SeqNum))
+                   list))->SeqNum))
                        pList = pList->next;
                else if (SN_EQUAL(pReorderEntry->SeqNum,
                        ((struct rx_reorder_entry *)list_entry(pList->next,
-                       struct rx_reorder_entry, List))->SeqNum))
+                       struct rx_reorder_entry, list))->SeqNum))
                        return false;
                else
                        break;
        }
-       pReorderEntry->List.next = pList->next;
-       pReorderEntry->List.next->prev = &pReorderEntry->List;
-       pReorderEntry->List.prev = pList;
-       pList->next = &pReorderEntry->List;
+       pReorderEntry->list.next = pList->next;
+       pReorderEntry->list.next->prev = &pReorderEntry->list;
+       pReorderEntry->list.prev = pList;
+       pList->next = &pReorderEntry->list;
 
        return true;
 }
@@ -505,15 +504,15 @@ void rtllib_FlushRxTsPendingPkts(struct rtllib_device *ieee,
 
                pRxReorderEntry = (struct rx_reorder_entry *)
                                  list_entry(ts->rx_pending_pkt_list.prev,
-                                            struct rx_reorder_entry, List);
+                                            struct rx_reorder_entry, list);
                netdev_dbg(ieee->dev, "%s(): Indicate SeqNum %d!\n", __func__,
                           pRxReorderEntry->SeqNum);
-               list_del_init(&pRxReorderEntry->List);
+               list_del_init(&pRxReorderEntry->list);
 
                ieee->RfdArray[RfdCnt] = pRxReorderEntry->prxb;
 
                RfdCnt = RfdCnt + 1;
-               list_add_tail(&pRxReorderEntry->List,
+               list_add_tail(&pRxReorderEntry->list,
                              &ieee->RxReorder_Unused_List);
        }
        rtllib_indicate_packets(ieee, ieee->RfdArray, RfdCnt);
@@ -602,8 +601,8 @@ static void RxReorderIndicatePacket(struct rtllib_device *ieee,
                if (!list_empty(&ieee->RxReorder_Unused_List)) {
                        pReorderEntry = (struct rx_reorder_entry *)
                                        list_entry(ieee->RxReorder_Unused_List.next,
-                                       struct rx_reorder_entry, List);
-                       list_del_init(&pReorderEntry->List);
+                                       struct rx_reorder_entry, list);
+                       list_del_init(&pReorderEntry->list);
 
                        /* Make a reorder entry and insert
                         * into a the packet list.
@@ -618,7 +617,7 @@ static void RxReorderIndicatePacket(struct rtllib_device *ieee,
                                           "%s(): Duplicate packet is dropped. IndicateSeq: %d, NewSeq: %d\n",
                                           __func__, ts->rx_indicate_seq,
                                           SeqNum);
-                               list_add_tail(&pReorderEntry->List,
+                               list_add_tail(&pReorderEntry->list,
                                              &ieee->RxReorder_Unused_List);
 
                                for (i = 0; i < prxb->nr_subframes; i++)
@@ -658,7 +657,7 @@ static void RxReorderIndicatePacket(struct rtllib_device *ieee,
                pReorderEntry = (struct rx_reorder_entry *)
                                        list_entry(ts->rx_pending_pkt_list.prev,
                                                   struct rx_reorder_entry,
-                                                  List);
+                                                  list);
                if (SN_LESS(pReorderEntry->SeqNum, ts->rx_indicate_seq) ||
                    SN_EQUAL(pReorderEntry->SeqNum, ts->rx_indicate_seq)) {
                        /* This protect struct buffer from overflow. */
@@ -670,7 +669,7 @@ static void RxReorderIndicatePacket(struct rtllib_device *ieee,
                                break;
                        }
 
-                       list_del_init(&pReorderEntry->List);
+                       list_del_init(&pReorderEntry->list);
 
                        if (SN_EQUAL(pReorderEntry->SeqNum, ts->rx_indicate_seq))
                                ts->rx_indicate_seq = (ts->rx_indicate_seq + 1) %
@@ -681,7 +680,7 @@ static void RxReorderIndicatePacket(struct rtllib_device *ieee,
                                   __func__, pReorderEntry->SeqNum);
                        index++;
 
-                       list_add_tail(&pReorderEntry->List,
+                       list_add_tail(&pReorderEntry->list,
                                      &ieee->RxReorder_Unused_List);
                } else {
                        bPktInBuf = true;
@@ -731,7 +730,7 @@ static u8 parse_subframe(struct rtllib_device *ieee, struct sk_buff *skb,
 
        u16             LLCOffset = sizeof(struct ieee80211_hdr_3addr);
        u16             ChkLength;
-       bool            bIsAggregateFrame = false;
+       bool            is_aggregate_frame = false;
        u16             nSubframe_Length;
        u8              nPadding_Length = 0;
        u16             SeqNum = 0;
@@ -740,7 +739,7 @@ static u8 parse_subframe(struct rtllib_device *ieee, struct sk_buff *skb,
        SeqNum = WLAN_GET_SEQ_SEQ(le16_to_cpu(hdr->seq_ctrl));
        if ((RTLLIB_QOS_HAS_SEQ(fc)) &&
           (((union frameqos *)(skb->data + RTLLIB_3ADDR_LEN))->field.reserved))
-               bIsAggregateFrame = true;
+               is_aggregate_frame = true;
 
        if (RTLLIB_QOS_HAS_SEQ(fc))
                LLCOffset += 2;
@@ -753,8 +752,8 @@ static u8 parse_subframe(struct rtllib_device *ieee, struct sk_buff *skb,
                return 0;
 
        skb_pull(skb, LLCOffset);
-       ieee->bIsAggregateFrame = bIsAggregateFrame;
-       if (!bIsAggregateFrame) {
+       ieee->is_aggregate_frame = is_aggregate_frame;
+       if (!is_aggregate_frame) {
                rxb->nr_subframes = 1;
 
                /* altered by clark 3/30/2010
@@ -858,7 +857,7 @@ static size_t rtllib_rx_get_hdrlen(struct rtllib_device *ieee,
        size_t hdrlen;
 
        hdrlen = rtllib_get_hdrlen(fc);
-       if (HTCCheck(ieee, skb->data)) {
+       if (ht_c_check(ieee, skb->data)) {
                if (net_ratelimit())
                        netdev_info(ieee->dev, "%s: find HTCControl!\n",
                                    __func__);
@@ -1151,7 +1150,7 @@ static void rtllib_rx_check_leave_lps(struct rtllib_device *ieee, u8 unicast,
        if (unicast) {
                if (ieee->link_state == MAC80211_LINKED) {
                        if (((ieee->link_detect_info.NumRxUnicastOkInPeriod +
-                           ieee->link_detect_info.NumTxOkInPeriod) > 8) ||
+                           ieee->link_detect_info.num_tx_ok_in_period) > 8) ||
                            (ieee->link_detect_info.NumRxUnicastOkInPeriod > 2)) {
                                ieee->leisure_ps_leave(ieee->dev);
                        }
@@ -1286,7 +1285,7 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb,
 
        /* Update statstics for AP roaming */
        ieee->link_detect_info.NumRecvDataInPeriod++;
-       ieee->link_detect_info.NumRxOkInPeriod++;
+       ieee->link_detect_info.num_rx_ok_in_period++;
 
        /* Data frame - extract src/dst addresses */
        rtllib_rx_extract_addr(ieee, hdr, dst, src, bssid);
@@ -1359,7 +1358,7 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb,
        /* Update WAPI PN */
 
        /* Check if leave LPS */
-       if (ieee->bIsAggregateFrame)
+       if (ieee->is_aggregate_frame)
                nr_subframes = rxb->nr_subframes;
        else
                nr_subframes = 1;
@@ -1402,7 +1401,7 @@ static int rtllib_rx_Monitor(struct rtllib_device *ieee, struct sk_buff *skb,
                return 0;
        }
 
-       if (HTCCheck(ieee, skb->data)) {
+       if (ht_c_check(ieee, skb->data)) {
                if (net_ratelimit())
                        netdev_info(ieee->dev, "%s: Find HTCControl!\n",
                                    __func__);
@@ -1663,35 +1662,6 @@ static const char *get_info_element_string(u16 id)
        }
 }
 
-static inline void rtllib_extract_country_ie(
-       struct rtllib_device *ieee,
-       struct rtllib_info_element *info_element,
-       struct rtllib_network *network,
-       u8 *addr2)
-{
-       if (IS_DOT11D_ENABLE(ieee)) {
-               if (info_element->len != 0) {
-                       memcpy(network->CountryIeBuf, info_element->data,
-                              info_element->len);
-                       network->CountryIeLen = info_element->len;
-
-                       if (!IS_COUNTRY_IE_VALID(ieee)) {
-                               if (rtllib_act_scanning(ieee, false) &&
-                                   ieee->FirstIe_InScan)
-                                       netdev_info(ieee->dev,
-                                                   "Received beacon CountryIE, SSID: <%s>\n",
-                                                   network->ssid);
-                               dot11d_update_country(ieee, addr2,
-                                                      info_element->len,
-                                                      info_element->data);
-                       }
-               }
-
-               if (IS_EQUAL_CIE_SRC(ieee, addr2))
-                       UPDATE_CIE_WATCHDOG(ieee);
-       }
-}
-
 static void rtllib_parse_mife_generic(struct rtllib_device *ieee,
                                      struct rtllib_info_element *info_element,
                                      struct rtllib_network *network,
@@ -2146,8 +2116,6 @@ int rtllib_parse_info_param(struct rtllib_device *ieee,
                case MFIE_TYPE_COUNTRY:
                        netdev_dbg(ieee->dev, "MFIE_TYPE_COUNTRY: %d bytes\n",
                                   info_element->len);
-                       rtllib_extract_country_ie(ieee, info_element, network,
-                                                 network->bssid);
                        break;
 /* TODO */
                default:
@@ -2221,7 +2189,7 @@ static inline int rtllib_network_init(
        network->RSSI = stats->SignalStrength;
        network->CountryIeLen = 0;
        memset(network->CountryIeBuf, 0, MAX_IE_LEN);
-       HTInitializeBssDesc(&network->bssht);
+       ht_initialize_bss_desc(&network->bssht);
        network->flags |= NETWORK_HAS_CCK;
 
        network->wpa_ie_len = 0;
index 42d652fe86013a16cd24b4a0591406ba633115e1..b9278b26accd854fc1a790ab87997ba42b4c3137 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/uaccess.h>
 #include <linux/etherdevice.h>
 #include <linux/ieee80211.h>
-#include "dot11d.h"
 
 static void rtllib_sta_wakeup(struct rtllib_device *ieee, short nl);
 
@@ -45,7 +44,7 @@ static unsigned int rtllib_MFIE_rate_len(struct rtllib_device *ieee)
  * Then it updates the pointer so that
  * it points after the new MFIE tag added.
  */
-static void rtllib_MFIE_Brate(struct rtllib_device *ieee, u8 **tag_p)
+static void rtllib_mfie_brate(struct rtllib_device *ieee, u8 **tag_p)
 {
        u8 *tag = *tag_p;
 
@@ -62,7 +61,7 @@ static void rtllib_MFIE_Brate(struct rtllib_device *ieee, u8 **tag_p)
        *tag_p = tag;
 }
 
-static void rtllib_MFIE_Grate(struct rtllib_device *ieee, u8 **tag_p)
+static void rtllib_mfie_grate(struct rtllib_device *ieee, u8 **tag_p)
 {
        u8 *tag = *tag_p;
 
@@ -83,7 +82,7 @@ static void rtllib_MFIE_Grate(struct rtllib_device *ieee, u8 **tag_p)
        *tag_p = tag;
 }
 
-static void rtllib_WMM_Info(struct rtllib_device *ieee, u8 **tag_p)
+static void rtllib_wmm_info(struct rtllib_device *ieee, u8 **tag_p)
 {
        u8 *tag = *tag_p;
 
@@ -99,7 +98,7 @@ static void rtllib_WMM_Info(struct rtllib_device *ieee, u8 **tag_p)
        *tag_p = tag;
 }
 
-static void rtllib_TURBO_Info(struct rtllib_device *ieee, u8 **tag_p)
+static void rtllib_turbo_info(struct rtllib_device *ieee, u8 **tag_p)
 {
        u8 *tag = *tag_p;
 
@@ -135,32 +134,33 @@ static void enqueue_mgmt(struct rtllib_device *ieee, struct sk_buff *skb)
 
 static void init_mgmt_queue(struct rtllib_device *ieee)
 {
-       ieee->mgmt_queue_tail = ieee->mgmt_queue_head = 0;
+       ieee->mgmt_queue_tail = 0;
+       ieee->mgmt_queue_head = 0;
 }
 
 u8 MgntQuery_TxRateExcludeCCKRates(struct rtllib_device *ieee)
 {
        u16     i;
-       u8      QueryRate = 0;
-       u8      BasicRate;
+       u8      query_rate = 0;
+       u8      basic_rate;
 
        for (i = 0; i < ieee->current_network.rates_len; i++) {
-               BasicRate = ieee->current_network.rates[i] & 0x7F;
-               if (!rtllib_is_cck_rate(BasicRate)) {
-                       if (QueryRate == 0) {
-                               QueryRate = BasicRate;
+               basic_rate = ieee->current_network.rates[i] & 0x7F;
+               if (!rtllib_is_cck_rate(basic_rate)) {
+                       if (query_rate == 0) {
+                               query_rate = basic_rate;
                        } else {
-                               if (BasicRate < QueryRate)
-                                       QueryRate = BasicRate;
+                               if (basic_rate < query_rate)
+                                       query_rate = basic_rate;
                        }
                }
        }
 
-       if (QueryRate == 0) {
-               QueryRate = 12;
-               netdev_info(ieee->dev, "No BasicRate found!!\n");
+       if (query_rate == 0) {
+               query_rate = 12;
+               netdev_info(ieee->dev, "No basic_rate found!!\n");
        }
-       return QueryRate;
+       return query_rate;
 }
 
 static u8 MgntQuery_MgntFrameTxRate(struct rtllib_device *ieee)
@@ -173,12 +173,8 @@ static u8 MgntQuery_MgntFrameTxRate(struct rtllib_device *ieee)
        else
                rate = ieee->basic_rate & 0x7f;
 
-       if (rate == 0) {
-               if (ieee->mode == WIRELESS_MODE_N_24G && !ht_info->bCurSuppCCK)
-                       rate = 0x0c;
-               else
-                       rate = 0x02;
-       }
+       if (rate == 0)
+               rate = 0x02;
 
        return rate;
 }
@@ -240,7 +236,7 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb, struct rtllib_device *ieee)
                /* check whether the managed packet queued greater than 5 */
                if (!ieee->check_nic_enough_desc(ieee->dev,
                                                 tcb_desc->queue_index) ||
-                   skb_queue_len(&ieee->skb_waitQ[tcb_desc->queue_index]) ||
+                   skb_queue_len(&ieee->skb_waitq[tcb_desc->queue_index]) ||
                    ieee->queue_stop) {
                        /* insert the skb packet to the management queue
                         *
@@ -250,7 +246,7 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb, struct rtllib_device *ieee)
                        netdev_info(ieee->dev,
                               "%s():insert to waitqueue, queue_index:%d!\n",
                               __func__, tcb_desc->queue_index);
-                       skb_queue_tail(&ieee->skb_waitQ[tcb_desc->queue_index],
+                       skb_queue_tail(&ieee->skb_waitq[tcb_desc->queue_index],
                                       skb);
                } else {
                        ieee->softmac_hard_start_xmit(skb, ieee->dev);
@@ -345,65 +341,34 @@ static inline struct sk_buff *rtllib_probe_req(struct rtllib_device *ieee)
        memcpy(tag, ieee->current_network.ssid, len);
        tag += len;
 
-       rtllib_MFIE_Brate(ieee, &tag);
-       rtllib_MFIE_Grate(ieee, &tag);
+       rtllib_mfie_brate(ieee, &tag);
+       rtllib_mfie_grate(ieee, &tag);
 
        return skb;
 }
 
-static struct sk_buff *rtllib_get_beacon_(struct rtllib_device *ieee);
-
-static void rtllib_send_beacon(struct rtllib_device *ieee)
-{
-       struct sk_buff *skb;
-
-       if (!ieee->ieee_up)
-               return;
-       skb = rtllib_get_beacon_(ieee);
-
-       if (skb) {
-               softmac_mgmt_xmit(skb, ieee);
-               ieee->softmac_stats.tx_beacons++;
-       }
-
-       if (ieee->beacon_txing && ieee->ieee_up)
-               mod_timer(&ieee->beacon_timer, jiffies +
-                         (msecs_to_jiffies(ieee->current_network.beacon_interval - 5)));
-}
-
-static void rtllib_send_beacon_cb(struct timer_list *t)
-{
-       struct rtllib_device *ieee =
-               from_timer(ieee, t, beacon_timer);
-       unsigned long flags;
-
-       spin_lock_irqsave(&ieee->beacon_lock, flags);
-       rtllib_send_beacon(ieee);
-       spin_unlock_irqrestore(&ieee->beacon_lock, flags);
-}
-
 /* Enables network monitor mode, all rx packets will be received. */
-void rtllib_EnableNetMonitorMode(struct net_device *dev,
-               bool bInitState)
+void rtllib_enable_net_monitor_mode(struct net_device *dev,
+               bool init_state)
 {
        struct rtllib_device *ieee = netdev_priv_rsl(dev);
 
        netdev_info(dev, "========>Enter Monitor Mode\n");
 
-       ieee->AllowAllDestAddrHandler(dev, true, !bInitState);
+       ieee->AllowAllDestAddrHandler(dev, true, !init_state);
 }
 
 /* Disables network monitor mode. Only packets destinated to
  * us will be received.
  */
-void rtllib_DisableNetMonitorMode(struct net_device *dev,
-               bool bInitState)
+void rtllib_disable_net_monitor_mode(struct net_device *dev,
+               bool init_state)
 {
        struct rtllib_device *ieee = netdev_priv_rsl(dev);
 
        netdev_info(dev, "========>Exit Monitor Mode\n");
 
-       ieee->AllowAllDestAddrHandler(dev, false, !bInitState);
+       ieee->AllowAllDestAddrHandler(dev, false, !init_state);
 }
 
 static void rtllib_send_probe(struct rtllib_device *ieee)
@@ -425,12 +390,6 @@ static void rtllib_send_probe_requests(struct rtllib_device *ieee)
        }
 }
 
-static void rtllib_update_active_chan_map(struct rtllib_device *ieee)
-{
-       memcpy(ieee->active_channel_map, GET_DOT11D_INFO(ieee)->channel_map,
-              MAX_CHANNEL_NUMBER + 1);
-}
-
 /* this performs syncro scan blocking the caller until all channels
  * in the allowed channel map has been checked.
  */
@@ -439,8 +398,6 @@ static void rtllib_softmac_scan_syncro(struct rtllib_device *ieee)
        union iwreq_data wrqu;
        short ch = 0;
 
-       rtllib_update_active_chan_map(ieee);
-
        ieee->be_scan_inprogress = true;
 
        mutex_lock(&ieee->scan_mutex);
@@ -492,10 +449,6 @@ out:
        ieee->actscanning = false;
        ieee->sync_scan_hurryup = 0;
 
-       if (ieee->link_state >= MAC80211_LINKED) {
-               if (IS_DOT11D_ENABLE(ieee))
-                       dot11d_scan_complete(ieee);
-       }
        mutex_unlock(&ieee->scan_mutex);
 
        ieee->be_scan_inprogress = false;
@@ -510,8 +463,6 @@ static void rtllib_softmac_scan_wq(void *data)
                                     struct rtllib_device, softmac_scan_wq);
        u8 last_channel = ieee->current_network.channel;
 
-       rtllib_update_active_chan_map(ieee);
-
        if (!ieee->ieee_up)
                return;
        if (rtllib_act_scanning(ieee, true))
@@ -552,8 +503,6 @@ static void rtllib_softmac_scan_wq(void *data)
        return;
 
 out:
-       if (IS_DOT11D_ENABLE(ieee))
-               dot11d_scan_complete(ieee);
        ieee->current_network.channel = last_channel;
 
 out1:
@@ -609,10 +558,6 @@ static void rtllib_start_scan(struct rtllib_device *ieee)
 {
        ieee->rtllib_ips_leave_wq(ieee->dev);
 
-       if (IS_DOT11D_ENABLE(ieee)) {
-               if (IS_COUNTRY_IE_VALID(ieee))
-                       RESET_CIE_WATCHDOG(ieee);
-       }
        if (ieee->softmac_features & IEEE_SOFTMAC_SCAN) {
                if (ieee->scanning_continue == 0) {
                        ieee->actscanning = true;
@@ -625,10 +570,6 @@ static void rtllib_start_scan(struct rtllib_device *ieee)
 /* called with wx_mutex held */
 void rtllib_start_scan_syncro(struct rtllib_device *ieee)
 {
-       if (IS_DOT11D_ENABLE(ieee)) {
-               if (IS_COUNTRY_IE_VALID(ieee))
-                       RESET_CIE_WATCHDOG(ieee);
-       }
        ieee->sync_scan_hurryup = 0;
        if (ieee->softmac_features & IEEE_SOFTMAC_SCAN)
                rtllib_softmac_scan_syncro(ieee);
@@ -677,152 +618,6 @@ rtllib_authentication_req(struct rtllib_network *beacon,
        return skb;
 }
 
-static struct sk_buff *rtllib_probe_resp(struct rtllib_device *ieee,
-                                        const u8 *dest)
-{
-       u8 *tag;
-       int beacon_size;
-       struct rtllib_probe_response *beacon_buf;
-       struct sk_buff *skb = NULL;
-       int encrypt;
-       int atim_len, erp_len;
-       struct lib80211_crypt_data *crypt;
-
-       char *ssid = ieee->current_network.ssid;
-       int ssid_len = ieee->current_network.ssid_len;
-       int rate_len = ieee->current_network.rates_len + 2;
-       int rate_ex_len = ieee->current_network.rates_ex_len;
-       int wpa_ie_len = ieee->wpa_ie_len;
-       u8 erpinfo_content = 0;
-
-       u8 *tmp_ht_cap_buf = NULL;
-       u8 tmp_ht_cap_len = 0;
-       u8 *tmp_ht_info_buf = NULL;
-       u8 tmp_ht_info_len = 0;
-       struct rt_hi_throughput *ht_info = ieee->ht_info;
-       u8 *tmp_generic_ie_buf = NULL;
-       u8 tmp_generic_ie_len = 0;
-
-       if (rate_ex_len > 0)
-               rate_ex_len += 2;
-
-       if (ieee->current_network.capability & WLAN_CAPABILITY_IBSS)
-               atim_len = 4;
-       else
-               atim_len = 0;
-
-       if ((ieee->current_network.mode == WIRELESS_MODE_G) ||
-          (ieee->current_network.mode == WIRELESS_MODE_N_24G &&
-          ieee->ht_info->bCurSuppCCK)) {
-               erp_len = 3;
-               erpinfo_content = 0;
-               if (ieee->current_network.buseprotection)
-                       erpinfo_content |= ERP_UseProtection;
-       } else {
-               erp_len = 0;
-       }
-
-       crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
-       encrypt = crypt && crypt->ops &&
-               ((strcmp(crypt->ops->name, "R-WEP") == 0 || wpa_ie_len));
-       if (ieee->ht_info->current_ht_support) {
-               tmp_ht_cap_buf = (u8 *)&(ieee->ht_info->SelfHTCap);
-               tmp_ht_cap_len = sizeof(ieee->ht_info->SelfHTCap);
-               tmp_ht_info_buf = (u8 *)&(ieee->ht_info->SelfHTInfo);
-               tmp_ht_info_len = sizeof(ieee->ht_info->SelfHTInfo);
-               HTConstructCapabilityElement(ieee, tmp_ht_cap_buf,
-                                            &tmp_ht_cap_len, encrypt, false);
-               HTConstructInfoElement(ieee, tmp_ht_info_buf, &tmp_ht_info_len,
-                                      encrypt);
-
-               if (ht_info->reg_rt2rt_aggregation) {
-                       tmp_generic_ie_buf = ieee->ht_info->sz_rt2rt_agg_buf;
-                       tmp_generic_ie_len =
-                                sizeof(ieee->ht_info->sz_rt2rt_agg_buf);
-                       HTConstructRT2RTAggElement(ieee, tmp_generic_ie_buf,
-                                                  &tmp_generic_ie_len);
-               }
-       }
-
-       beacon_size = sizeof(struct rtllib_probe_response) + 2 +
-               ssid_len + 3 + rate_len + rate_ex_len + atim_len + erp_len
-               + wpa_ie_len + ieee->tx_headroom;
-       skb = dev_alloc_skb(beacon_size);
-       if (!skb)
-               return NULL;
-
-       skb_reserve(skb, ieee->tx_headroom);
-
-       beacon_buf = skb_put(skb, (beacon_size - ieee->tx_headroom));
-       ether_addr_copy(beacon_buf->header.addr1, dest);
-       ether_addr_copy(beacon_buf->header.addr2, ieee->dev->dev_addr);
-       ether_addr_copy(beacon_buf->header.addr3, ieee->current_network.bssid);
-
-       beacon_buf->header.duration_id = 0;
-       beacon_buf->beacon_interval =
-               cpu_to_le16(ieee->current_network.beacon_interval);
-       beacon_buf->capability =
-               cpu_to_le16(ieee->current_network.capability &
-               WLAN_CAPABILITY_IBSS);
-       beacon_buf->capability |=
-               cpu_to_le16(ieee->current_network.capability &
-               WLAN_CAPABILITY_SHORT_PREAMBLE);
-
-       if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
-               beacon_buf->capability |=
-                       cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
-
-       crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
-       if (encrypt)
-               beacon_buf->capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY);
-
-       beacon_buf->header.frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_RESP);
-       beacon_buf->info_element[0].id = MFIE_TYPE_SSID;
-       beacon_buf->info_element[0].len = ssid_len;
-
-       tag = (u8 *)beacon_buf->info_element[0].data;
-
-       memcpy(tag, ssid, ssid_len);
-
-       tag += ssid_len;
-
-       *(tag++) = MFIE_TYPE_RATES;
-       *(tag++) = rate_len - 2;
-       memcpy(tag, ieee->current_network.rates, rate_len - 2);
-       tag += rate_len - 2;
-
-       *(tag++) = MFIE_TYPE_DS_SET;
-       *(tag++) = 1;
-       *(tag++) = ieee->current_network.channel;
-
-       if (atim_len) {
-               u16 val16;
-               *(tag++) = MFIE_TYPE_IBSS_SET;
-               *(tag++) = 2;
-               val16 = ieee->current_network.atim_window;
-               memcpy((u8 *)tag, (u8 *)&val16, 2);
-               tag += 2;
-       }
-
-       if (erp_len) {
-               *(tag++) = MFIE_TYPE_ERP;
-               *(tag++) = 1;
-               *(tag++) = erpinfo_content;
-       }
-       if (rate_ex_len) {
-               *(tag++) = MFIE_TYPE_RATES_EX;
-               *(tag++) = rate_ex_len - 2;
-               memcpy(tag, ieee->current_network.rates_ex, rate_ex_len - 2);
-               tag += rate_ex_len - 2;
-       }
-
-       if (wpa_ie_len) {
-               memcpy(tag, ieee->wpa_ie, ieee->wpa_ie_len);
-               tag += ieee->wpa_ie_len;
-       }
-       return skb;
-}
-
 static struct sk_buff *rtllib_null_func(struct rtllib_device *ieee, short pwr)
 {
        struct sk_buff *skb;
@@ -875,7 +670,7 @@ static inline int SecIsInPMKIDList(struct rtllib_device *ieee, u8 *bssid)
        int i = 0;
 
        do {
-               if ((ieee->PMKIDList[i].bUsed) &&
+               if ((ieee->PMKIDList[i].used) &&
                   (memcmp(ieee->PMKIDList[i].Bssid, bssid, ETH_ALEN) == 0))
                        break;
                i++;
@@ -933,15 +728,15 @@ rtllib_association_req(struct rtllib_network *beacon,
        }
 
        if (ieee->ht_info->current_ht_support && ieee->ht_info->enable_ht) {
-               ht_cap_buf = (u8 *)&(ieee->ht_info->SelfHTCap);
+               ht_cap_buf = (u8 *)&ieee->ht_info->SelfHTCap;
                ht_cap_len = sizeof(ieee->ht_info->SelfHTCap);
-               HTConstructCapabilityElement(ieee, ht_cap_buf, &ht_cap_len,
+               ht_construct_capability_element(ieee, ht_cap_buf, &ht_cap_len,
                                             encrypt, true);
                if (ieee->ht_info->current_rt2rt_aggregation) {
                        realtek_ie_buf = ieee->ht_info->sz_rt2rt_agg_buf;
                        realtek_ie_len =
                                 sizeof(ieee->ht_info->sz_rt2rt_agg_buf);
-                       HTConstructRT2RTAggElement(ieee, realtek_ie_buf,
+                       ht_construct_rt2rt_agg_element(ieee, realtek_ie_buf,
                                                   &realtek_ie_len);
                }
        }
@@ -1071,7 +866,7 @@ rtllib_association_req(struct rtllib_network *beacon,
                tag += osCcxVerNum.Length;
        }
        if (ieee->ht_info->current_ht_support && ieee->ht_info->enable_ht) {
-               if (ieee->ht_info->ePeerHTSpecVer != HT_SPEC_VER_EWC) {
+               if (ieee->ht_info->peer_ht_spec_ver != HT_SPEC_VER_EWC) {
                        tag = skb_put(skb, ht_cap_len);
                        *tag++ = MFIE_TYPE_HT_CAP;
                        *tag++ = ht_cap_len - 2;
@@ -1093,7 +888,7 @@ rtllib_association_req(struct rtllib_network *beacon,
        }
        if (wmm_info_len) {
                tag = skb_put(skb, wmm_info_len);
-               rtllib_WMM_Info(ieee, &tag);
+               rtllib_wmm_info(ieee, &tag);
        }
 
        if (wps_ie_len && ieee->wps_ie)
@@ -1101,11 +896,11 @@ rtllib_association_req(struct rtllib_network *beacon,
 
        if (turbo_info_len) {
                tag = skb_put(skb, turbo_info_len);
-               rtllib_TURBO_Info(ieee, &tag);
+               rtllib_turbo_info(ieee, &tag);
        }
 
        if (ieee->ht_info->current_ht_support && ieee->ht_info->enable_ht) {
-               if (ieee->ht_info->ePeerHTSpecVer == HT_SPEC_VER_EWC) {
+               if (ieee->ht_info->peer_ht_spec_ver == HT_SPEC_VER_EWC) {
                        tag = skb_put(skb, ht_cap_len);
                        *tag++ = MFIE_TYPE_GENERIC;
                        *tag++ = ht_cap_len - 2;
@@ -1123,7 +918,7 @@ rtllib_association_req(struct rtllib_network *beacon,
 
        kfree(ieee->assocreq_ies);
        ieee->assocreq_ies = NULL;
-       ies = &(hdr->info_element[0].id);
+       ies = &hdr->info_element[0].id;
        ieee->assocreq_ies_len = (skb->data + skb->len) - ies;
        ieee->assocreq_ies = kmemdup(ies, ieee->assocreq_ies_len, GFP_ATOMIC);
        if (!ieee->assocreq_ies)
@@ -1269,7 +1064,7 @@ static void rtllib_associate_complete_wq(void *data)
        }
        if (ieee->ht_info->current_ht_support && ieee->ht_info->enable_ht) {
                netdev_info(ieee->dev, "Successfully associated, ht enabled\n");
-               HTOnAssocRsp(ieee);
+               ht_on_assoc_rsp(ieee);
        } else {
                netdev_info(ieee->dev,
                            "Successfully associated, ht not enabled(%d, %d)\n",
@@ -1314,7 +1109,7 @@ static void rtllib_associate_procedure_wq(void *data)
        mutex_lock(&ieee->wx_mutex);
 
        rtllib_stop_scan(ieee);
-       HTSetConnectBwMode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT);
+       ht_set_connect_bw_mode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT);
        if (ieee->rf_power_state == rf_off) {
                ieee->rtllib_ips_leave_wq(ieee->dev);
                mutex_unlock(&ieee->wx_mutex);
@@ -1417,21 +1212,20 @@ inline void rtllib_softmac_new_net(struct rtllib_device *ieee,
                           !(ieee->softmac_features & IEEE_SOFTMAC_SCAN))
                                rtllib_stop_scan_syncro(ieee);
 
-                       HTResetIOTSetting(ieee->ht_info);
+                       ht_reset_iot_setting(ieee->ht_info);
                        ieee->wmm_acm = 0;
                        if (ieee->iw_mode == IW_MODE_INFRA) {
                                /* Join the network for the first time */
                                ieee->AsocRetryCount = 0;
                                if ((ieee->current_network.qos_data.supported == 1) &&
                                    ieee->current_network.bssht.bd_support_ht)
-                                       HTResetSelfAndSavePeerSetting(ieee,
+                                       ht_reset_self_and_save_peer_setting(ieee,
                                                 &(ieee->current_network));
                                else
                                        ieee->ht_info->current_ht_support = false;
 
                                ieee->link_state = RTLLIB_ASSOCIATING;
-                               schedule_delayed_work(
-                                          &ieee->associate_procedure_wq, 0);
+                               schedule_delayed_work(&ieee->associate_procedure_wq, 0);
                        } else {
                                if (rtllib_is_54g(&ieee->current_network)) {
                                        ieee->rate = 108;
@@ -1825,7 +1619,7 @@ rtllib_rx_assoc_resp(struct rtllib_device *ieee, struct sk_buff *skb,
 
                        kfree(ieee->assocresp_ies);
                        ieee->assocresp_ies = NULL;
-                       ies = &(assoc_resp->info_element[0].id);
+                       ies = &assoc_resp->info_element[0].id;
                        ieee->assocresp_ies_len = (skb->data + skb->len) - ies;
                        ieee->assocresp_ies = kmemdup(ies,
                                                      ieee->assocresp_ies_len,
@@ -1841,8 +1635,7 @@ rtllib_rx_assoc_resp(struct rtllib_device *ieee, struct sk_buff *skb,
                                    "Association response status code 0x%x\n",
                                    errcode);
                        if (ieee->AsocRetryCount < RT_ASOC_RETRY_LIMIT)
-                               schedule_delayed_work(
-                                        &ieee->associate_procedure_wq, 0);
+                               schedule_delayed_work(&ieee->associate_procedure_wq, 0);
                        else
                                rtllib_associate_abort(ieee);
                }
@@ -1872,7 +1665,7 @@ static void rtllib_rx_auth_resp(struct rtllib_device *ieee, struct sk_buff *skb)
                ieee->softmac_stats.rx_auth_rs_ok++;
                if (!(ieee->ht_info->iot_action & HT_IOT_ACT_PURE_N_MODE)) {
                        if (!ieee->GetNmodeSupportBySecCfg(ieee->dev)) {
-                               if (IsHTHalfNmodeAPs(ieee)) {
+                               if (is_ht_half_nmode_aps(ieee)) {
                                        bSupportNmode = true;
                                        bHalfSupportNmode = true;
                                } else {
@@ -2030,7 +1823,7 @@ void rtllib_softmac_xmit(struct rtllib_txb *txb, struct rtllib_device *ieee)
         * the wait queue
         */
        for (i = 0; i < txb->nr_frags; i++) {
-               queue_len = skb_queue_len(&ieee->skb_waitQ[queue_index]);
+               queue_len = skb_queue_len(&ieee->skb_waitq[queue_index]);
                if ((queue_len  != 0) ||
                    (!ieee->check_nic_enough_desc(ieee->dev, queue_index)) ||
                    (ieee->queue_stop)) {
@@ -2039,13 +1832,12 @@ void rtllib_softmac_xmit(struct rtllib_txb *txb, struct rtllib_device *ieee)
                         * to check it any more.
                         */
                        if (queue_len < 200)
-                               skb_queue_tail(&ieee->skb_waitQ[queue_index],
+                               skb_queue_tail(&ieee->skb_waitq[queue_index],
                                               txb->fragments[i]);
                        else
                                kfree_skb(txb->fragments[i]);
                } else {
-                       ieee->softmac_data_hard_start_xmit(
-                                       txb->fragments[i],
+                       ieee->softmac_data_hard_start_xmit(txb->fragments[i],
                                        ieee->dev, ieee->rate);
                }
        }
@@ -2090,10 +1882,6 @@ static void rtllib_start_bss(struct rtllib_device *ieee)
 {
        unsigned long flags;
 
-       if (IS_DOT11D_ENABLE(ieee) && !IS_COUNTRY_IE_VALID(ieee)) {
-               if (!ieee->global_domain)
-                       return;
-       }
        /* check if we have already found the net we
         * are interested in (if any).
         * if not (we are disassociated and we are not
@@ -2121,6 +1909,7 @@ static void rtllib_link_change_wq(void *data)
                                     struct rtllib_device, link_change_wq);
        ieee->link_change(ieee->dev);
 }
+
 /* called only in userspace context */
 void rtllib_disassociate(struct rtllib_device *ieee)
 {
@@ -2128,8 +1917,6 @@ void rtllib_disassociate(struct rtllib_device *ieee)
        if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)
                rtllib_reset_queue(ieee);
 
-       if (IS_DOT11D_ENABLE(ieee))
-               dot11d_reset(ieee);
        ieee->link_state = MAC80211_NOLINK;
        ieee->is_set_key = false;
        ieee->wap_set = 0;
@@ -2181,46 +1968,6 @@ exit:
        mutex_unlock(&ieee->wx_mutex);
 }
 
-static struct sk_buff *rtllib_get_beacon_(struct rtllib_device *ieee)
-{
-       static const u8 broadcast_addr[] = {
-               0xff, 0xff, 0xff, 0xff, 0xff, 0xff
-       };
-       struct sk_buff *skb;
-       struct rtllib_probe_response *b;
-
-       skb = rtllib_probe_resp(ieee, broadcast_addr);
-
-       if (!skb)
-               return NULL;
-
-       b = (struct rtllib_probe_response *)skb->data;
-       b->header.frame_control = cpu_to_le16(IEEE80211_STYPE_BEACON);
-
-       return skb;
-}
-
-struct sk_buff *rtllib_get_beacon(struct rtllib_device *ieee)
-{
-       struct sk_buff *skb;
-       struct rtllib_probe_response *b;
-
-       skb = rtllib_get_beacon_(ieee);
-       if (!skb)
-               return NULL;
-
-       b = (struct rtllib_probe_response *)skb->data;
-       b->header.seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
-
-       if (ieee->seq_ctrl[0] == 0xFFF)
-               ieee->seq_ctrl[0] = 0;
-       else
-               ieee->seq_ctrl[0]++;
-
-       return skb;
-}
-EXPORT_SYMBOL(rtllib_get_beacon);
-
 void rtllib_softmac_stop_protocol(struct rtllib_device *ieee)
 {
        rtllib_stop_scan_syncro(ieee);
@@ -2279,8 +2026,6 @@ void rtllib_start_protocol(struct rtllib_device *ieee)
        short ch = 0;
        int i = 0;
 
-       rtllib_update_active_chan_map(ieee);
-
        if (ieee->proto_started)
                return;
 
@@ -2326,18 +2071,15 @@ int rtllib_softmac_init(struct rtllib_device *ieee)
        ieee->link_state = MAC80211_NOLINK;
        for (i = 0; i < 5; i++)
                ieee->seq_ctrl[i] = 0;
-       ieee->dot11d_info = kzalloc(sizeof(struct rt_dot11d_info), GFP_ATOMIC);
-       if (!ieee->dot11d_info)
-               return -ENOMEM;
 
        ieee->link_detect_info.SlotIndex = 0;
        ieee->link_detect_info.SlotNum = 2;
        ieee->link_detect_info.NumRecvBcnInPeriod = 0;
        ieee->link_detect_info.NumRecvDataInPeriod = 0;
-       ieee->link_detect_info.NumTxOkInPeriod = 0;
-       ieee->link_detect_info.NumRxOkInPeriod = 0;
+       ieee->link_detect_info.num_tx_ok_in_period = 0;
+       ieee->link_detect_info.num_rx_ok_in_period = 0;
        ieee->link_detect_info.NumRxUnicastOkInPeriod = 0;
-       ieee->bIsAggregateFrame = false;
+       ieee->is_aggregate_frame = false;
        ieee->assoc_id = 0;
        ieee->queue_stop = 0;
        ieee->scanning_continue = 0;
@@ -2369,8 +2111,6 @@ int rtllib_softmac_init(struct rtllib_device *ieee)
 
        timer_setup(&ieee->associate_timer, rtllib_associate_abort_cb, 0);
 
-       timer_setup(&ieee->beacon_timer, rtllib_send_beacon_cb, 0);
-
        INIT_DELAYED_WORK(&ieee->link_change_wq, (void *)rtllib_link_change_wq);
        INIT_WORK(&ieee->associate_complete_wq, (void *)rtllib_associate_complete_wq);
        INIT_DELAYED_WORK(&ieee->associate_procedure_wq, (void *)rtllib_associate_procedure_wq);
@@ -2404,9 +2144,6 @@ void rtllib_softmac_free(struct rtllib_device *ieee)
        cancel_work_sync(&ieee->ips_leave_wq);
        cancel_work_sync(&ieee->wx_sync_scan_wq);
        cancel_work_sync(&ieee->ps_task);
-
-       kfree(ieee->dot11d_info);
-       ieee->dot11d_info = NULL;
 }
 
 static inline struct sk_buff *
@@ -2493,7 +2230,7 @@ u8 rtllib_ap_sec_type(struct rtllib_device *ieee)
                return SEC_ALG_WEP;
        } else if ((wpa_ie_len != 0)) {
                if (((ieee->wpa_ie[0] == 0xdd) &&
-                   (!memcmp(&(ieee->wpa_ie[14]), ccmp_ie, 4))) ||
+                   (!memcmp(&ieee->wpa_ie[14], ccmp_ie, 4))) ||
                    ((ieee->wpa_ie[0] == 0x30) &&
                    (!memcmp(&ieee->wpa_ie[10], ccmp_rsn_ie, 4))))
                        return SEC_ALG_CCMP;
@@ -2508,7 +2245,7 @@ static void rtllib_MlmeDisassociateRequest(struct rtllib_device *rtllib,
                                           u8 *asSta, u8 asRsn)
 {
        u8 i;
-       u8      OpMode;
+       u8      op_mode;
 
        RemovePeerTS(rtllib, asSta);
 
@@ -2517,10 +2254,10 @@ static void rtllib_MlmeDisassociateRequest(struct rtllib_device *rtllib,
 
                for (i = 0; i < 6; i++)
                        rtllib->current_network.bssid[i] = 0x22;
-               OpMode = RT_OP_MODE_NO_LINK;
-               rtllib->OpMode = RT_OP_MODE_NO_LINK;
+               op_mode = RT_OP_MODE_NO_LINK;
+               rtllib->op_mode = RT_OP_MODE_NO_LINK;
                rtllib->SetHwRegHandler(rtllib->dev, HW_VAR_MEDIA_STATUS,
-                                       (u8 *)(&OpMode));
+                                       (u8 *)(&op_mode));
                rtllib_disassociate(rtllib);
 
                rtllib->SetHwRegHandler(rtllib->dev, HW_VAR_BSSID,
@@ -2528,11 +2265,7 @@ static void rtllib_MlmeDisassociateRequest(struct rtllib_device *rtllib,
        }
 }
 
-static void
-rtllib_MgntDisconnectAP(
-       struct rtllib_device *rtllib,
-       u8 asRsn
-)
+static void rtllib_MgntDisconnectAP(struct rtllib_device *rtllib, u8 asRsn)
 {
        bool bFilterOutNonAssociatedBSSID = false;
 
index f32584291704e00ab10a8dc695a4aa3dbb8f39fb..2afa701e5445be7100d7623c86086c79f2dfbabb 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/etherdevice.h>
 
 #include "rtllib.h"
-#include "dot11d.h"
 
 int rtllib_wx_set_freq(struct rtllib_device *ieee, struct iw_request_info *a,
                             union iwreq_data *wrqu, char *b)
@@ -208,7 +207,7 @@ int rtllib_wx_get_rate(struct rtllib_device *ieee,
 {
        u32 tmp_rate;
 
-       tmp_rate = TxCountToDataRate(ieee,
+       tmp_rate = tx_count_to_data_rate(ieee,
                                     ieee->softmac_stats.CurrentShowTxate);
        wrqu->bitrate.value = tmp_rate * 500000;
 
@@ -267,11 +266,11 @@ int rtllib_wx_set_mode(struct rtllib_device *ieee, struct iw_request_info *a,
 
        if (wrqu->mode == IW_MODE_MONITOR) {
                ieee->dev->type = ARPHRD_IEEE80211;
-               rtllib_EnableNetMonitorMode(ieee->dev, false);
+               rtllib_enable_net_monitor_mode(ieee->dev, false);
        } else {
                ieee->dev->type = ARPHRD_ETHER;
                if (ieee->iw_mode == IW_MODE_MONITOR)
-                       rtllib_DisableNetMonitorMode(ieee->dev, false);
+                       rtllib_disable_net_monitor_mode(ieee->dev, false);
        }
 
        if (!ieee->proto_started) {
@@ -318,10 +317,10 @@ void rtllib_wx_sync_scan_wq(void *data)
        ieee->ScanOperationBackupHandler(ieee->dev, SCAN_OPT_BACKUP);
 
        if (ieee->ht_info->current_ht_support && ieee->ht_info->enable_ht &&
-           ieee->ht_info->bCurBW40MHz) {
+           ieee->ht_info->cur_bw_40mhz) {
                b40M = 1;
                chan_offset = ieee->ht_info->CurSTAExtChnlOffset;
-               bandwidth = (enum ht_channel_width)ieee->ht_info->bCurBW40MHz;
+               bandwidth = (enum ht_channel_width)ieee->ht_info->cur_bw_40mhz;
                ieee->set_bw_mode_handler(ieee->dev, HT_CHANNEL_WIDTH_20,
                                       HT_EXTCHNL_OFFSET_NO_EXT);
        }
index 9bf679438ad11a297d5e0e96acad85361f098c60..f7098a2ba8b0b332f6c45eda6b7434348b117477 100644 (file)
@@ -267,7 +267,7 @@ static void rtllib_tx_query_agg_cap(struct rtllib_device *ieee,
                                    struct cb_desc *tcb_desc)
 {
        struct rt_hi_throughput *ht_info = ieee->ht_info;
-       struct tx_ts_record *pTxTs = NULL;
+       struct tx_ts_record *ts = NULL;
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 
        if (rtllib_act_scanning(ieee, false))
@@ -288,52 +288,35 @@ static void rtllib_tx_query_agg_cap(struct rtllib_device *ieee,
 
        if (!ieee->GetNmodeSupportBySecCfg(ieee->dev))
                return;
-       if (ht_info->bCurrentAMPDUEnable) {
-               if (!rtllib_get_ts(ieee, (struct ts_common_info **)(&pTxTs), hdr->addr1,
+       if (ht_info->current_ampdu_enable) {
+               if (!rtllib_get_ts(ieee, (struct ts_common_info **)(&ts), hdr->addr1,
                           skb->priority, TX_DIR, true)) {
                        netdev_info(ieee->dev, "%s: can't get TS\n", __func__);
                        return;
                }
-               if (!pTxTs->TxAdmittedBARecord.b_valid) {
+               if (!ts->tx_admitted_ba_record.b_valid) {
                        if (ieee->wpa_ie_len && (ieee->pairwise_key_type ==
                            KEY_TYPE_NA)) {
                                ;
                        } else if (tcb_desc->bdhcp == 1) {
                                ;
-                       } else if (!pTxTs->bDisable_AddBa) {
-                               TsStartAddBaProcess(ieee, pTxTs);
+                       } else if (!ts->disable_add_ba) {
+                               TsStartAddBaProcess(ieee, ts);
                        }
-                       goto FORCED_AGG_SETTING;
-               } else if (!pTxTs->bUsingBa) {
-                       if (SN_LESS(pTxTs->TxAdmittedBARecord.ba_start_seq_ctrl.field.seq_num,
-                                   (pTxTs->TxCurSeq + 1) % 4096))
-                               pTxTs->bUsingBa = true;
+                       return;
+               } else if (!ts->using_ba) {
+                       if (SN_LESS(ts->tx_admitted_ba_record.ba_start_seq_ctrl.field.seq_num,
+                                   (ts->tx_cur_seq + 1) % 4096))
+                               ts->using_ba = true;
                        else
-                               goto FORCED_AGG_SETTING;
+                               return;
                }
                if (ieee->iw_mode == IW_MODE_INFRA) {
-                       tcb_desc->bAMPDUEnable = true;
+                       tcb_desc->ampdu_enable = true;
                        tcb_desc->ampdu_factor = ht_info->CurrentAMPDUFactor;
                        tcb_desc->ampdu_density = ht_info->current_mpdu_density;
                }
        }
-FORCED_AGG_SETTING:
-       switch (ht_info->ForcedAMPDUMode) {
-       case HT_AGG_AUTO:
-               break;
-
-       case HT_AGG_FORCE_ENABLE:
-               tcb_desc->bAMPDUEnable = true;
-               tcb_desc->ampdu_density = ht_info->forced_mpdu_density;
-               tcb_desc->ampdu_factor = ht_info->forced_ampdu_factor;
-               break;
-
-       case HT_AGG_FORCE_DISABLE:
-               tcb_desc->bAMPDUEnable = false;
-               tcb_desc->ampdu_density = 0;
-               tcb_desc->ampdu_factor = 0;
-               break;
-       }
 }
 
 static void rtllib_query_ShortPreambleMode(struct rtllib_device *ieee,
@@ -357,14 +340,9 @@ static void rtllib_query_HTCapShortGI(struct rtllib_device *ieee,
        if (!ht_info->current_ht_support || !ht_info->enable_ht)
                return;
 
-       if (ht_info->forced_short_gi) {
-               tcb_desc->bUseShortGI = true;
-               return;
-       }
-
-       if (ht_info->bCurBW40MHz && ht_info->bCurShortGI40MHz)
+       if (ht_info->cur_bw_40mhz && ht_info->cur_short_gi_40mhz)
                tcb_desc->bUseShortGI = true;
-       else if (!ht_info->bCurBW40MHz && ht_info->bCurShortGI20MHz)
+       else if (!ht_info->cur_bw_40mhz && ht_info->cur_short_gi_20mhz)
                tcb_desc->bUseShortGI = true;
 }
 
@@ -383,7 +361,7 @@ static void rtllib_query_BandwidthMode(struct rtllib_device *ieee,
 
        if ((tcb_desc->data_rate & 0x80) == 0)
                return;
-       if (ht_info->bCurBW40MHz && ht_info->cur_tx_bw40mhz &&
+       if (ht_info->cur_bw_40mhz && ht_info->cur_tx_bw40mhz &&
            !ieee->bandwidth_auto_switch.bforced_tx20Mhz)
                tcb_desc->bPacketBW = true;
 }
@@ -441,9 +419,9 @@ static void rtllib_query_protectionmode(struct rtllib_device *ieee,
                if (ht_info->current_ht_support && ht_info->enable_ht) {
                        u8 HTOpMode = ht_info->current_op_mode;
 
-                       if ((ht_info->bCurBW40MHz && (HTOpMode == 2 ||
+                       if ((ht_info->cur_bw_40mhz && (HTOpMode == 2 ||
                                                      HTOpMode == 3)) ||
-                            (!ht_info->bCurBW40MHz && HTOpMode == 3)) {
+                            (!ht_info->cur_bw_40mhz && HTOpMode == 3)) {
                                tcb_desc->rts_rate = MGN_24M;
                                tcb_desc->bRTSEnable = true;
                                break;
@@ -454,7 +432,7 @@ static void rtllib_query_protectionmode(struct rtllib_device *ieee,
                        tcb_desc->bRTSEnable = true;
                        break;
                }
-               if (tcb_desc->bAMPDUEnable) {
+               if (tcb_desc->ampdu_enable) {
                        tcb_desc->rts_rate = MGN_24M;
                        tcb_desc->bRTSEnable = false;
                        break;
@@ -500,8 +478,8 @@ static u16 rtllib_query_seqnum(struct rtllib_device *ieee, struct sk_buff *skb,
                if (!rtllib_get_ts(ieee, (struct ts_common_info **)(&ts), dst,
                           skb->priority, TX_DIR, true))
                        return 0;
-               seqnum = ts->TxCurSeq;
-               ts->TxCurSeq = (ts->TxCurSeq + 1) % 4096;
+               seqnum = ts->tx_cur_seq;
+               ts->tx_cur_seq = (ts->tx_cur_seq + 1) % 4096;
                return seqnum;
        }
        return 0;
@@ -847,7 +825,7 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
        if (txb) {
                tcb_desc = (struct cb_desc *)
                                (txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE);
-               tcb_desc->bTxEnableFwCalcDur = 1;
+               tcb_desc->tx_enable_fw_calc_dur = 1;
                tcb_desc->priority = skb->priority;
 
                if (ether_type == ETH_P_PAE) {
index a37250de7ba37cde14cf53a1a27142d96fcbb685..f92ec0faf4d5647ff486f4b82d65d5d7a2f4a779 100644 (file)
@@ -134,7 +134,7 @@ static inline char *rtl819x_translate_scan(struct rtllib_device *ieee,
                                ((ht_cap->ShortGI40Mhz) ? 1 : 0) :
                                ((ht_cap->ShortGI20Mhz) ? 1 : 0);
 
-               max_mcs = HTGetHighestMCSRate(ieee, ht_cap->MCS,
+               max_mcs = ht_get_highest_mcs_rate(ieee, ht_cap->MCS,
                                              MCS_FILTER_ALL);
                rate = MCS_DATA_RATE[is40M][isShortGI][max_mcs & 0x7f];
                if (rate > max_rate)
index b18e6d9c832b8ad068d8e4eb16828ae5d2f31e5d..7554613fe7e1eb751675dbb61b0b781b33828996 100644 (file)
@@ -221,8 +221,7 @@ struct net_device *r8712_init_netdev(void)
 
 static u32 start_drv_threads(struct _adapter *padapter)
 {
-       padapter->cmd_thread = kthread_run(r8712_cmd_thread, padapter, "%s",
-                                         padapter->pnetdev->name);
+       padapter->cmd_thread = kthread_run(r8712_cmd_thread, padapter, "%s", padapter->pnetdev->name);
        if (IS_ERR(padapter->cmd_thread))
                return _FAIL;
        return _SUCCESS;
index c9400e40a1d639ddbe7ed592c2da5ad82f010280..a39d6c06648f5d7dc4a286ce71cf120c4b649baf 100644 (file)
@@ -213,8 +213,8 @@ u16 r8712_efuse_get_current_size(struct _adapter *adapter)
        u8 hworden = 0;
        u8 efuse_data, word_cnts = 0;
 
-       while (bContinual && efuse_one_byte_read(adapter, efuse_addr,
-              &efuse_data) && (efuse_addr < efuse_available_max_size)) {
+       while (bContinual && efuse_one_byte_read(adapter, efuse_addr, &efuse_data) &&
+              (efuse_addr < efuse_available_max_size)) {
                if (efuse_data != 0xFF) {
                        hworden =  efuse_data & 0x0F;
                        word_cnts = calculate_word_cnts(hworden);
@@ -252,9 +252,8 @@ u8 r8712_efuse_pg_packet_read(struct _adapter *adapter, u8 offset, u8 *data)
                                memset(tmpdata, 0xFF, PGPKT_DATA_SIZE);
                                for (tmpidx = 0; tmpidx < word_cnts * 2;
                                     tmpidx++) {
-                                       if (efuse_one_byte_read(adapter,
-                                           efuse_addr + 1 + tmpidx,
-                                           &efuse_data)) {
+                                       if (efuse_one_byte_read(adapter, efuse_addr + 1 + tmpidx,
+                                                               &efuse_data)) {
                                                tmpdata[tmpidx] = efuse_data;
                                        } else {
                                                ret = false;
index 7da014ab0723e0b6578364033ce277cca15e603b..a3c4713c59b3a6030e80bf23fe9e873a25dfb6a2 100644 (file)
@@ -267,8 +267,7 @@ union recv_frame *r8712_recvframe_chk_defrag(struct _adapter *padapter,
                                /*the first fragment*/
                                if (!list_empty(&pdefrag_q->queue)) {
                                        /*free current defrag_q */
-                                       r8712_free_recvframe_queue(pdefrag_q,
-                                                            pfree_recv_queue);
+                                       r8712_free_recvframe_queue(pdefrag_q, pfree_recv_queue);
                                }
                        }
                        /* Then enqueue the 0~(n-1) fragment to the defrag_q */
index 4cb01f590673e31a2c8540dd10c5fda2f51d34cb..d7d678b04ca80fe97f62bad5ec619fd8f4d79101 100644 (file)
@@ -147,9 +147,8 @@ static u32 get_ff_hwaddr(struct xmit_frame *pxmitframe)
 }
 
 static struct xmit_frame *dequeue_one_xmitframe(struct xmit_priv *pxmitpriv,
-                                        struct hw_xmit *phwxmit,
-                                        struct tx_servq *ptxservq,
-                                        struct  __queue *pframe_queue)
+                                               struct hw_xmit *phwxmit, struct tx_servq *ptxservq,
+                                               struct  __queue *pframe_queue)
 {
        struct list_head *xmitframe_plist, *xmitframe_phead;
        struct  xmit_frame *pxmitframe = NULL;
@@ -167,7 +166,7 @@ static struct xmit_frame *dequeue_one_xmitframe(struct xmit_priv *pxmitpriv,
 }
 
 static struct xmit_frame *dequeue_xframe_ex(struct xmit_priv *pxmitpriv,
-                                    struct hw_xmit *phwxmit_i, sint entry)
+                                           struct hw_xmit *phwxmit_i, sint entry)
 {
        unsigned long irqL0;
        struct list_head *sta_plist, *sta_phead;
@@ -197,11 +196,10 @@ static struct xmit_frame *dequeue_xframe_ex(struct xmit_priv *pxmitpriv,
                sta_phead = &phwxmit->sta_queue->queue;
                sta_plist = sta_phead->next;
                while (!end_of_queue_search(sta_phead, sta_plist)) {
-                       ptxservq = container_of(sta_plist, struct tx_servq,
-                                               tx_pending);
+                       ptxservq = container_of(sta_plist, struct tx_servq, tx_pending);
                        pframe_queue = &ptxservq->sta_pending;
-                       pxmitframe = dequeue_one_xmitframe(pxmitpriv, phwxmit,
-                                    ptxservq, pframe_queue);
+                       pxmitframe = dequeue_one_xmitframe(pxmitpriv, phwxmit, ptxservq,
+                                                          pframe_queue);
                        if (pxmitframe) {
                                phwxmit->accnt--;
                                goto exit_dequeue_xframe_ex;
@@ -221,8 +219,7 @@ exit_dequeue_xframe_ex:
        return pxmitframe;
 }
 
-void r8712_do_queue_select(struct _adapter *padapter,
-                          struct pkt_attrib *pattrib)
+void r8712_do_queue_select(struct _adapter *padapter, struct pkt_attrib *pattrib)
 {
        unsigned int qsel = 0;
        struct dvobj_priv *pdvobj = &padapter->dvobjpriv;
@@ -292,14 +289,12 @@ void r8712_append_mpdu_unit(struct xmit_buf *pxmitbuf,
        r8712_xmit_complete(padapter, pxmitframe);
        if (pxmitframe->attrib.ether_type != 0x0806) {
                if ((pxmitframe->attrib.ether_type != 0x888e) &&
-                       (pxmitframe->attrib.dhcp_pkt != 1)) {
-                       r8712_issue_addbareq_cmd(padapter,
-                                       pxmitframe->attrib.priority);
+                   (pxmitframe->attrib.dhcp_pkt != 1)) {
+                       r8712_issue_addbareq_cmd(padapter, pxmitframe->attrib.priority);
                }
        }
        pxmitframe->last[0] = 1;
-       update_txdesc(pxmitframe, (uint *)(pxmitframe->buf_addr),
-               pxmitframe->attrib.last_txcmdsz);
+       update_txdesc(pxmitframe, (uint *)(pxmitframe->buf_addr), pxmitframe->attrib.last_txcmdsz);
        /*padding zero */
        last_txcmdsz = pxmitframe->attrib.last_txcmdsz;
        padding_sz = (8 - (last_txcmdsz % 8));
@@ -333,8 +328,7 @@ void r8712_xmitframe_aggr_1st(struct xmit_buf *pxmitbuf,
        pxmitbuf->aggr_nr = 1;
 }
 
-u16 r8712_xmitframe_aggr_next(struct xmit_buf *pxmitbuf,
-                       struct xmit_frame *pxmitframe)
+u16 r8712_xmitframe_aggr_next(struct xmit_buf *pxmitbuf, struct xmit_frame *pxmitframe)
 {
        pxmitframe->pxmitbuf = pxmitbuf;
        pxmitbuf->priv_data = pxmitframe;
@@ -374,9 +368,9 @@ void r8712_dump_aggr_xframe(struct xmit_buf *pxmitbuf,
        pxmitframe->bpending[0] = false;
        pxmitframe->mem_addr = pxmitbuf->pbuf;
 
-       if ((pdvobj->ishighspeed && ((total_length + TXDESC_SIZE) % 0x200) ==
-            0) || ((!pdvobj->ishighspeed && ((total_length + TXDESC_SIZE) %
-                                             0x40) == 0))) {
+       if ((pdvobj->ishighspeed && ((total_length + TXDESC_SIZE) % 0x200) == 0) ||
+           ((!pdvobj->ishighspeed && ((total_length + TXDESC_SIZE) %
+           0x40) == 0))) {
                ptxdesc->txdw0 |= cpu_to_le32
                        (((TXDESC_SIZE + OFFSET_SZ + 8) << OFFSET_SHT) &
                         0x00ff0000);
@@ -387,8 +381,8 @@ void r8712_dump_aggr_xframe(struct xmit_buf *pxmitbuf,
                         0x00ff0000);
                /*default = 32 bytes for TX Desc*/
        }
-       r8712_write_port(pxmitframe->padapter, RTL8712_DMA_H2CCMD,
-                       total_length + TXDESC_SIZE, (u8 *)pxmitframe);
+       r8712_write_port(pxmitframe->padapter, RTL8712_DMA_H2CCMD, total_length + TXDESC_SIZE,
+                        (u8 *)pxmitframe);
 }
 
 #endif
@@ -618,14 +612,12 @@ int r8712_xmitframe_complete(struct _adapter *padapter,
        pxmitframe = dequeue_xframe_ex(pxmitpriv, phwxmits, hwentry);
        /* need to remember the 1st frame */
        if (pxmitframe) {
-
 #ifdef CONFIG_R8712_TX_AGGR
                /* 1. dequeue 2nd frame
                 * 2. aggr if 2nd xframe is dequeued, else dump directly
                 */
                if (AGGR_NR_HIGH_BOUND > 1)
-                       p2ndxmitframe = dequeue_xframe_ex(pxmitpriv, phwxmits,
-                                                       hwentry);
+                       p2ndxmitframe = dequeue_xframe_ex(pxmitpriv, phwxmits, hwentry);
                if (pxmitframe->frame_tag != DATA_FRAMETAG) {
                        r8712_free_xmitbuf(pxmitpriv, pxmitbuf);
                        return false;
@@ -639,16 +631,12 @@ int r8712_xmitframe_complete(struct _adapter *padapter,
                if (p2ndxmitframe) {
                        u16 total_length;
 
-                       total_length = r8712_xmitframe_aggr_next(
-                               pxmitbuf, p2ndxmitframe);
+                       total_length = r8712_xmitframe_aggr_next(pxmitbuf, p2ndxmitframe);
                        do {
-                               p2ndxmitframe = dequeue_xframe_ex(
-                                       pxmitpriv, phwxmits, hwentry);
+                               p2ndxmitframe = dequeue_xframe_ex(pxmitpriv, phwxmits, hwentry);
                                if (p2ndxmitframe)
                                        total_length =
-                                               r8712_xmitframe_aggr_next(
-                                                       pxmitbuf,
-                                                       p2ndxmitframe);
+                                               r8712_xmitframe_aggr_next(pxmitbuf, p2ndxmitframe);
                                else
                                        break;
                        } while (total_length <= 0x1800 &&
@@ -662,8 +650,8 @@ int r8712_xmitframe_complete(struct _adapter *padapter,
                xmitframe_xmitbuf_attach(pxmitframe, pxmitbuf);
                if (pxmitframe->frame_tag == DATA_FRAMETAG) {
                        if (pxmitframe->attrib.priority <= 15)
-                               res = r8712_xmitframe_coalesce(padapter,
-                                       pxmitframe->pkt, pxmitframe);
+                               res = r8712_xmitframe_coalesce(padapter, pxmitframe->pkt,
+                                                              pxmitframe);
                        /* always return ndis_packet after
                         * r8712_xmitframe_coalesce
                         */
@@ -714,10 +702,10 @@ static void dump_xframe(struct _adapter *padapter,
                ff_hwaddr = get_ff_hwaddr(pxmitframe);
 #ifdef CONFIG_R8712_TX_AGGR
                r8712_write_port(padapter, RTL8712_DMA_H2CCMD, w_sz,
-                               (unsigned char *)pxmitframe);
+                                (unsigned char *)pxmitframe);
 #else
                r8712_write_port(padapter, ff_hwaddr, w_sz,
-                          (unsigned char *)pxmitframe);
+                                (unsigned char *)pxmitframe);
 #endif
                mem_addr += w_sz;
                mem_addr = (u8 *)RND4(((addr_t)(mem_addr)));
index 4be96df5a3296b40ba9c6b1a19af0fe236918bb3..bbd4a13c7bb9d189d82b83f29493c126a7b0ef1b 100644 (file)
@@ -242,8 +242,7 @@ void r8712_set_chplan_cmd(struct _adapter *padapter, int chplan)
                kfree(ph2c);
                return;
        }
-       init_h2fwcmd_w_parm_no_rsp(ph2c, psetchplanpara,
-                               GEN_CMD_CODE(_SetChannelPlan));
+       init_h2fwcmd_w_parm_no_rsp(ph2c, psetchplanpara, GEN_CMD_CODE(_SetChannelPlan));
        psetchplanpara->ChannelPlan = chplan;
        r8712_enqueue_cmd(pcmdpriv, ph2c);
 }
@@ -302,8 +301,7 @@ void r8712_getbbrfreg_cmdrsp_callback(struct _adapter *padapter,
        padapter->mppriv.workparam.bcompleted = true;
 }
 
-void r8712_readtssi_cmdrsp_callback(struct _adapter *padapter,
-                               struct cmd_obj *pcmd)
+void r8712_readtssi_cmdrsp_callback(struct _adapter *padapter, struct cmd_obj *pcmd)
 {
        kfree(pcmd->parmbuf);
        kfree(pcmd);
@@ -374,11 +372,10 @@ int r8712_joinbss_cmd(struct _adapter  *padapter, struct wlan_network *pnetwork)
        psecuritypriv->authenticator_ie[0] = (unsigned char)
                                             psecnetwork->IELength;
        if ((psecnetwork->IELength - 12) < (256 - 1))
-               memcpy(&psecuritypriv->authenticator_ie[1],
-                       &psecnetwork->IEs[12], psecnetwork->IELength - 12);
+               memcpy(&psecuritypriv->authenticator_ie[1], &psecnetwork->IEs[12],
+                      psecnetwork->IELength - 12);
        else
-               memcpy(&psecuritypriv->authenticator_ie[1],
-                       &psecnetwork->IEs[12], (256 - 1));
+               memcpy(&psecuritypriv->authenticator_ie[1], &psecnetwork->IEs[12], (256 - 1));
        psecnetwork->IELength = 0;
        /*
         * If the driver wants to use the bssid to create the connection.
@@ -388,19 +385,15 @@ int r8712_joinbss_cmd(struct _adapter  *padapter, struct wlan_network *pnetwork)
        if (!pmlmepriv->assoc_by_bssid)
                ether_addr_copy(&pmlmepriv->assoc_bssid[0],
                                &pnetwork->network.MacAddress[0]);
-       psecnetwork->IELength = r8712_restruct_sec_ie(padapter,
-                                               &pnetwork->network.IEs[0],
-                                               &psecnetwork->IEs[0],
-                                               pnetwork->network.IELength);
+       psecnetwork->IELength = r8712_restruct_sec_ie(padapter, &pnetwork->network.IEs[0],
+                                                     &psecnetwork->IEs[0], pnetwork->network.IELength);
        pqospriv->qos_option = 0;
        if (pregistrypriv->wmm_enable) {
                u32 tmp_len;
 
-               tmp_len = r8712_restruct_wmm_ie(padapter,
-                                         &pnetwork->network.IEs[0],
-                                         &psecnetwork->IEs[0],
-                                         pnetwork->network.IELength,
-                                         psecnetwork->IELength);
+               tmp_len = r8712_restruct_wmm_ie(padapter, &pnetwork->network.IEs[0],
+                                               &psecnetwork->IEs[0], pnetwork->network.IELength,
+                                               psecnetwork->IELength);
                if (psecnetwork->IELength != tmp_len) {
                        psecnetwork->IELength = tmp_len;
                        pqospriv->qos_option = 1; /* WMM IE in beacon */
@@ -427,39 +420,28 @@ int r8712_joinbss_cmd(struct _adapter  *padapter, struct wlan_network *pnetwork)
        psecuritypriv->supplicant_ie[0] = (u8)psecnetwork->IELength;
        if (psecnetwork->IELength < 255)
                memcpy(&psecuritypriv->supplicant_ie[1], &psecnetwork->IEs[0],
-                       psecnetwork->IELength);
+                      psecnetwork->IELength);
        else
                memcpy(&psecuritypriv->supplicant_ie[1], &psecnetwork->IEs[0],
-                       255);
+                      255);
        /* get cmdsz before endian conversion */
        pcmd->cmdsz = r8712_get_wlan_bssid_ex_sz(psecnetwork);
 #ifdef __BIG_ENDIAN
        /* wlan_network endian conversion */
        psecnetwork->Length = cpu_to_le32(psecnetwork->Length);
-       psecnetwork->Ssid.SsidLength = cpu_to_le32(
-                                      psecnetwork->Ssid.SsidLength);
+       psecnetwork->Ssid.SsidLength = cpu_to_le32(psecnetwork->Ssid.SsidLength);
        psecnetwork->Privacy = cpu_to_le32(psecnetwork->Privacy);
        psecnetwork->Rssi = cpu_to_le32(psecnetwork->Rssi);
-       psecnetwork->NetworkTypeInUse = cpu_to_le32(
-                                       psecnetwork->NetworkTypeInUse);
-       psecnetwork->Configuration.ATIMWindow = cpu_to_le32(
-                               psecnetwork->Configuration.ATIMWindow);
-       psecnetwork->Configuration.BeaconPeriod = cpu_to_le32(
-                                psecnetwork->Configuration.BeaconPeriod);
-       psecnetwork->Configuration.DSConfig = cpu_to_le32(
-                               psecnetwork->Configuration.DSConfig);
-       psecnetwork->Configuration.FHConfig.DwellTime = cpu_to_le32(
-                               psecnetwork->Configuration.FHConfig.DwellTime);
-       psecnetwork->Configuration.FHConfig.HopPattern = cpu_to_le32(
-                               psecnetwork->Configuration.FHConfig.HopPattern);
-       psecnetwork->Configuration.FHConfig.HopSet = cpu_to_le32(
-                               psecnetwork->Configuration.FHConfig.HopSet);
-       psecnetwork->Configuration.FHConfig.Length = cpu_to_le32(
-                               psecnetwork->Configuration.FHConfig.Length);
-       psecnetwork->Configuration.Length = cpu_to_le32(
-                               psecnetwork->Configuration.Length);
-       psecnetwork->InfrastructureMode = cpu_to_le32(
-                               psecnetwork->InfrastructureMode);
+       psecnetwork->NetworkTypeInUse = cpu_to_le32(psecnetwork->NetworkTypeInUse);
+       psecnetwork->Configuration.ATIMWindow = cpu_to_le32(psecnetwork->Configuration.ATIMWindow);
+       psecnetwork->Configuration.BeaconPeriod = cpu_to_le32(psecnetwork->Configuration.BeaconPeriod);
+       psecnetwork->Configuration.DSConfig = cpu_to_le32(psecnetwork->Configuration.DSConfig);
+       psecnetwork->Configuration.FHConfig.DwellTime = cpu_to_le32(psecnetwork->Configuration.FHConfig.DwellTime);
+       psecnetwork->Configuration.FHConfig.HopPattern = cpu_to_le32(psecnetwork->Configuration.FHConfig.HopPattern);
+       psecnetwork->Configuration.FHConfig.HopSet = cpu_to_le32(psecnetwork->Configuration.FHConfig.HopSet);
+       psecnetwork->Configuration.FHConfig.Length = cpu_to_le32(psecnetwork->Configuration.FHConfig.Length);
+       psecnetwork->Configuration.Length = cpu_to_le32(psecnetwork->Configuration.Length);
+       psecnetwork->InfrastructureMode = cpu_to_le32(psecnetwork->InfrastructureMode);
        psecnetwork->IELength = cpu_to_le32(psecnetwork->IELength);
 #endif
        INIT_LIST_HEAD(&pcmd->list);
@@ -485,13 +467,12 @@ void r8712_disassoc_cmd(struct _adapter *padapter) /* for sta_mode */
                kfree(pdisconnect_cmd);
                return;
        }
-       init_h2fwcmd_w_parm_no_rsp(pdisconnect_cmd, pdisconnect,
-                                  _DisConnect_CMD_);
+       init_h2fwcmd_w_parm_no_rsp(pdisconnect_cmd, pdisconnect, _DisConnect_CMD_);
        r8712_enqueue_cmd(pcmdpriv, pdisconnect_cmd);
 }
 
 void r8712_setopmode_cmd(struct _adapter *padapter,
-                enum NDIS_802_11_NETWORK_INFRASTRUCTURE networktype)
+                        enum NDIS_802_11_NETWORK_INFRASTRUCTURE networktype)
 {
        struct cmd_obj *ph2c;
        struct setopmode_parm *psetop;
@@ -543,14 +524,12 @@ void r8712_setstakey_cmd(struct _adapter *padapter, u8 *psta, u8 unicast_key)
                psetstakey_para->algorithm = (unsigned char)
                                            psecuritypriv->PrivacyAlgrthm;
        else
-               GET_ENCRY_ALGO(psecuritypriv, sta,
-                              psetstakey_para->algorithm, false);
+               GET_ENCRY_ALGO(psecuritypriv, sta, psetstakey_para->algorithm, false);
        if (unicast_key)
                memcpy(&psetstakey_para->key, &sta->x_UncstKey, 16);
        else
-               memcpy(&psetstakey_para->key,
-                       &psecuritypriv->XGrpKey[
-                       psecuritypriv->XGrpKeyid - 1]. skey, 16);
+               memcpy(&psetstakey_para->key, &psecuritypriv->XGrpKey[psecuritypriv->XGrpKeyid - 1].
+                      skey, 16);
        r8712_enqueue_cmd(pcmdpriv, ph2c);
 }
 
@@ -568,8 +547,7 @@ void r8712_setMacAddr_cmd(struct _adapter *padapter, const u8 *mac_addr)
                kfree(ph2c);
                return;
        }
-       init_h2fwcmd_w_parm_no_rsp(ph2c, psetMacAddr_para,
-                                  _SetMacAddress_CMD_);
+       init_h2fwcmd_w_parm_no_rsp(ph2c, psetMacAddr_para, _SetMacAddress_CMD_);
        ether_addr_copy(psetMacAddr_para->MacAddr, mac_addr);
        r8712_enqueue_cmd(pcmdpriv, ph2c);
 }
@@ -589,8 +567,7 @@ void r8712_addbareq_cmd(struct _adapter *padapter, u8 tid)
                return;
        }
        paddbareq_parm->tid = tid;
-       init_h2fwcmd_w_parm_no_rsp(ph2c, paddbareq_parm,
-                                  GEN_CMD_CODE(_AddBAReq));
+       init_h2fwcmd_w_parm_no_rsp(ph2c, paddbareq_parm, GEN_CMD_CODE(_AddBAReq));
        r8712_enqueue_cmd_ex(pcmdpriv, ph2c);
 }
 
@@ -644,13 +621,11 @@ void r8712_joinbss_cmd_callback(struct _adapter *padapter, struct cmd_obj *pcmd)
        struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
 
        if (pcmd->res != H2C_SUCCESS)
-               mod_timer(&pmlmepriv->assoc_timer,
-                         jiffies + msecs_to_jiffies(1));
+               mod_timer(&pmlmepriv->assoc_timer, jiffies + msecs_to_jiffies(1));
        r8712_free_cmd_obj(pcmd);
 }
 
-void r8712_createbss_cmd_callback(struct _adapter *padapter,
-                                 struct cmd_obj *pcmd)
+void r8712_createbss_cmd_callback(struct _adapter *padapter, struct cmd_obj *pcmd)
 {
        unsigned long irqL;
        struct sta_info *psta = NULL;
@@ -660,8 +635,7 @@ void r8712_createbss_cmd_callback(struct _adapter *padapter,
        struct wlan_network *tgt_network = &(pmlmepriv->cur_network);
 
        if (pcmd->res != H2C_SUCCESS)
-               mod_timer(&pmlmepriv->assoc_timer,
-                         jiffies + msecs_to_jiffies(1));
+               mod_timer(&pmlmepriv->assoc_timer, jiffies + msecs_to_jiffies(1));
        del_timer(&pmlmepriv->assoc_timer);
 #ifdef __BIG_ENDIAN
        /* endian_convert */
@@ -670,31 +644,21 @@ void r8712_createbss_cmd_callback(struct _adapter *padapter,
        pnetwork->Privacy = le32_to_cpu(pnetwork->Privacy);
        pnetwork->Rssi = le32_to_cpu(pnetwork->Rssi);
        pnetwork->NetworkTypeInUse = le32_to_cpu(pnetwork->NetworkTypeInUse);
-       pnetwork->Configuration.ATIMWindow =
-               le32_to_cpu(pnetwork->Configuration.ATIMWindow);
-       pnetwork->Configuration.DSConfig =
-               le32_to_cpu(pnetwork->Configuration.DSConfig);
-       pnetwork->Configuration.FHConfig.DwellTime =
-               le32_to_cpu(pnetwork->Configuration.FHConfig.DwellTime);
-       pnetwork->Configuration.FHConfig.HopPattern =
-               le32_to_cpu(pnetwork->Configuration.FHConfig.HopPattern);
-       pnetwork->Configuration.FHConfig.HopSet =
-               le32_to_cpu(pnetwork->Configuration.FHConfig.HopSet);
-       pnetwork->Configuration.FHConfig.Length =
-               le32_to_cpu(pnetwork->Configuration.FHConfig.Length);
-       pnetwork->Configuration.Length =
-               le32_to_cpu(pnetwork->Configuration.Length);
-       pnetwork->InfrastructureMode =
-               le32_to_cpu(pnetwork->InfrastructureMode);
+       pnetwork->Configuration.ATIMWindow = le32_to_cpu(pnetwork->Configuration.ATIMWindow);
+       pnetwork->Configuration.DSConfig = le32_to_cpu(pnetwork->Configuration.DSConfig);
+       pnetwork->Configuration.FHConfig.DwellTime = le32_to_cpu(pnetwork->Configuration.FHConfig.DwellTime);
+       pnetwork->Configuration.FHConfig.HopPattern = le32_to_cpu(pnetwork->Configuration.FHConfig.HopPattern);
+       pnetwork->Configuration.FHConfig.HopSet = le32_to_cpu(pnetwork->Configuration.FHConfig.HopSet);
+       pnetwork->Configuration.FHConfig.Length = le32_to_cpu(pnetwork->Configuration.FHConfig.Length);
+       pnetwork->Configuration.Length = le32_to_cpu(pnetwork->Configuration.Length);
+       pnetwork->InfrastructureMode = le32_to_cpu(pnetwork->InfrastructureMode);
        pnetwork->IELength = le32_to_cpu(pnetwork->IELength);
 #endif
        spin_lock_irqsave(&pmlmepriv->lock, irqL);
        if ((pmlmepriv->fw_state) & WIFI_AP_STATE) {
-               psta = r8712_get_stainfo(&padapter->stapriv,
-                                        pnetwork->MacAddress);
+               psta = r8712_get_stainfo(&padapter->stapriv, pnetwork->MacAddress);
                if (!psta) {
-                       psta = r8712_alloc_stainfo(&padapter->stapriv,
-                                                  pnetwork->MacAddress);
+                       psta = r8712_alloc_stainfo(&padapter->stapriv, pnetwork->MacAddress);
                        if (!psta)
                                goto createbss_cmd_fail;
                }
@@ -702,20 +666,17 @@ void r8712_createbss_cmd_callback(struct _adapter *padapter,
        } else {
                pwlan = _r8712_alloc_network(pmlmepriv);
                if (!pwlan) {
-                       pwlan = r8712_get_oldest_wlan_network(
-                               &pmlmepriv->scanned_queue);
+                       pwlan = r8712_get_oldest_wlan_network(&pmlmepriv->scanned_queue);
                        if (!pwlan)
                                goto createbss_cmd_fail;
                        pwlan->last_scanned = jiffies;
                } else {
-                       list_add_tail(&(pwlan->list),
-                                        &pmlmepriv->scanned_queue.queue);
+                       list_add_tail(&(pwlan->list), &pmlmepriv->scanned_queue.queue);
                }
                pnetwork->Length = r8712_get_wlan_bssid_ex_sz(pnetwork);
                memcpy(&(pwlan->network), pnetwork, pnetwork->Length);
                pwlan->fixed = true;
-               memcpy(&tgt_network->network, pnetwork,
-                       (r8712_get_wlan_bssid_ex_sz(pnetwork)));
+               memcpy(&tgt_network->network, pnetwork, (r8712_get_wlan_bssid_ex_sz(pnetwork)));
                if (pmlmepriv->fw_state & _FW_UNDER_LINKING)
                        pmlmepriv->fw_state ^= _FW_UNDER_LINKING;
                /*
@@ -728,14 +689,11 @@ createbss_cmd_fail:
        r8712_free_cmd_obj(pcmd);
 }
 
-void r8712_setstaKey_cmdrsp_callback(struct _adapter *padapter,
-                                    struct cmd_obj *pcmd)
+void r8712_setstaKey_cmdrsp_callback(struct _adapter *padapter, struct cmd_obj *pcmd)
 {
        struct sta_priv *pstapriv = &padapter->stapriv;
-       struct set_stakey_rsp *psetstakey_rsp = (struct set_stakey_rsp *)
-                                               (pcmd->rsp);
-       struct sta_info *psta = r8712_get_stainfo(pstapriv,
-                                                 psetstakey_rsp->addr);
+       struct set_stakey_rsp *psetstakey_rsp = (struct set_stakey_rsp *) (pcmd->rsp);
+       struct sta_info *psta = r8712_get_stainfo(pstapriv, psetstakey_rsp->addr);
 
        if (!psta)
                goto exit;
@@ -750,27 +708,23 @@ void r8712_setassocsta_cmdrsp_callback(struct _adapter *padapter,
        unsigned long   irqL;
        struct sta_priv *pstapriv = &padapter->stapriv;
        struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-       struct set_assocsta_parm *passocsta_parm =
-                               (struct set_assocsta_parm *)(pcmd->parmbuf);
-       struct set_assocsta_rsp *passocsta_rsp =
-                               (struct set_assocsta_rsp *) (pcmd->rsp);
-       struct sta_info *psta = r8712_get_stainfo(pstapriv,
-                                                 passocsta_parm->addr);
+       struct set_assocsta_parm *passocsta_parm = (struct set_assocsta_parm *)(pcmd->parmbuf);
+       struct set_assocsta_rsp *passocsta_rsp = (struct set_assocsta_rsp *) (pcmd->rsp);
+       struct sta_info *psta = r8712_get_stainfo(pstapriv, passocsta_parm->addr);
 
        if (!psta)
                return;
        psta->aid = psta->mac_id = passocsta_rsp->cam_id;
        spin_lock_irqsave(&pmlmepriv->lock, irqL);
-       if ((check_fwstate(pmlmepriv, WIFI_MP_STATE)) &&
-           (check_fwstate(pmlmepriv, _FW_UNDER_LINKING)))
+       if ((check_fwstate(pmlmepriv, WIFI_MP_STATE)) && (check_fwstate(pmlmepriv, _FW_UNDER_LINKING)))
                pmlmepriv->fw_state ^= _FW_UNDER_LINKING;
        set_fwstate(pmlmepriv, _FW_LINKED);
        spin_unlock_irqrestore(&pmlmepriv->lock, irqL);
        r8712_free_cmd_obj(pcmd);
 }
 
-void r8712_disconnectCtrlEx_cmd(struct _adapter *adapter, u32 enableDrvCtrl,
-                       u32 tryPktCnt, u32 tryPktInterval, u32 firstStageTO)
+void r8712_disconnectCtrlEx_cmd(struct _adapter *adapter, u32 enableDrvCtrl, u32 tryPktCnt,
+                               u32 tryPktInterval, u32 firstStageTO)
 {
        struct cmd_obj *ph2c;
        struct DisconnectCtrlEx_param *param;
@@ -790,7 +744,6 @@ void r8712_disconnectCtrlEx_cmd(struct _adapter *adapter, u32 enableDrvCtrl,
        param->TryPktInterval = (unsigned char)tryPktInterval;
        param->FirstStageTO = (unsigned int)firstStageTO;
 
-       init_h2fwcmd_w_parm_no_rsp(ph2c, param,
-                               GEN_CMD_CODE(_DisconnectCtrlEx));
+       init_h2fwcmd_w_parm_no_rsp(ph2c, param, GEN_CMD_CODE(_DisconnectCtrlEx));
        r8712_enqueue_cmd(pcmdpriv, ph2c);
 }
index 8453d8de82484197464c33e803c9263b1cd20c42..2613b3c2acfcbe5a92f670d4acb111c79efdddc6 100644 (file)
@@ -716,39 +716,28 @@ struct DisconnectCtrlEx_param {
 #define H2C_RESERVED                   0x07
 
 void r8712_setMacAddr_cmd(struct _adapter *padapter, const u8 *mac_addr);
-u8 r8712_sitesurvey_cmd(struct _adapter *padapter,
-                       struct ndis_802_11_ssid *pssid);
+u8 r8712_sitesurvey_cmd(struct _adapter *padapter, struct ndis_802_11_ssid *pssid);
 int r8712_createbss_cmd(struct _adapter *padapter);
 void r8712_setstakey_cmd(struct _adapter *padapter, u8 *psta, u8 unicast_key);
-int r8712_joinbss_cmd(struct _adapter *padapter,
-                     struct wlan_network *pnetwork);
+int r8712_joinbss_cmd(struct _adapter *padapter, struct wlan_network *pnetwork);
 void r8712_disassoc_cmd(struct _adapter *padapter);
-void r8712_setopmode_cmd(struct _adapter *padapter,
-                enum NDIS_802_11_NETWORK_INFRASTRUCTURE networktype);
+void r8712_setopmode_cmd(struct _adapter *padapter, enum NDIS_802_11_NETWORK_INFRASTRUCTURE networktype);
 int r8712_setdatarate_cmd(struct _adapter *padapter, u8 *rateset);
 void r8712_set_chplan_cmd(struct _adapter  *padapter, int chplan);
 int r8712_getrfreg_cmd(struct _adapter *padapter, u8 offset, u8 *pval);
 int r8712_setrfreg_cmd(struct _adapter  *padapter, u8 offset, u32 val);
 void r8712_addbareq_cmd(struct _adapter *padapter, u8 tid);
 void r8712_wdg_wk_cmd(struct _adapter *padapter);
-void r8712_survey_cmd_callback(struct _adapter  *padapter,
-                              struct cmd_obj *pcmd);
-void r8712_disassoc_cmd_callback(struct _adapter  *padapter,
-                                struct cmd_obj *pcmd);
-void r8712_joinbss_cmd_callback(struct _adapter  *padapter,
-                               struct cmd_obj *pcmd);
-void r8712_createbss_cmd_callback(struct _adapter *padapter,
-                                 struct cmd_obj *pcmd);
-void r8712_getbbrfreg_cmdrsp_callback(struct _adapter *padapter,
-                                     struct cmd_obj *pcmd);
-void r8712_readtssi_cmdrsp_callback(struct _adapter *padapter,
-                               struct cmd_obj *pcmd);
-void r8712_setstaKey_cmdrsp_callback(struct _adapter  *padapter,
-                                    struct cmd_obj *pcmd);
-void r8712_setassocsta_cmdrsp_callback(struct _adapter  *padapter,
-                                      struct cmd_obj *pcmd);
-void r8712_disconnectCtrlEx_cmd(struct _adapter *adapter, u32 enableDrvCtrl,
-                       u32 tryPktCnt, u32 tryPktInterval, u32 firstStageTO);
+void r8712_survey_cmd_callback(struct _adapter  *padapter, struct cmd_obj *pcmd);
+void r8712_disassoc_cmd_callback(struct _adapter  *padapter, struct cmd_obj *pcmd);
+void r8712_joinbss_cmd_callback(struct _adapter  *padapter, struct cmd_obj *pcmd);
+void r8712_createbss_cmd_callback(struct _adapter *padapter, struct cmd_obj *pcmd);
+void r8712_getbbrfreg_cmdrsp_callback(struct _adapter *padapter, struct cmd_obj *pcmd);
+void r8712_readtssi_cmdrsp_callback(struct _adapter *padapter, struct cmd_obj *pcmd);
+void r8712_setstaKey_cmdrsp_callback(struct _adapter  *padapter, struct cmd_obj *pcmd);
+void r8712_setassocsta_cmdrsp_callback(struct _adapter  *padapter, struct cmd_obj *pcmd);
+void r8712_disconnectCtrlEx_cmd(struct _adapter *adapter, u32 enableDrvCtrl, u32 tryPktCnt, 
+                               u32 tryPktInterval, u32 firstStageTO);
 
 struct _cmd_callback {
        u32     cmd_code;
index 36f6904d25abc33b71325a976f3a5c1713a7d6b5..0653aa27b1fa29ca4f3ecfe34da3321d7eabc510 100644 (file)
@@ -59,8 +59,7 @@ void r8712_indicate_wx_assoc_event(struct _adapter *padapter)
        struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
 
        wrqu.ap_addr.sa_family = ARPHRD_ETHER;
-       memcpy(wrqu.ap_addr.sa_data, pmlmepriv->cur_network.network.MacAddress,
-               ETH_ALEN);
+       memcpy(wrqu.ap_addr.sa_data, pmlmepriv->cur_network.network.MacAddress, ETH_ALEN);
        wireless_send_event(padapter->pnetdev, SIOCGIWAP, &wrqu, NULL);
 }
 
@@ -111,11 +110,9 @@ static inline void handle_group_key(struct ieee_param *param,
                memcpy(grk[param->u.crypt.idx - 1].skey,
                       &param->u.crypt.key[24], 8);
                padapter->securitypriv.binstallGrpkey = true;
-               r8712_set_key(padapter, &padapter->securitypriv,
-                       param->u.crypt.idx);
+               r8712_set_key(padapter, &padapter->securitypriv, param->u.crypt.idx);
                if (padapter->registrypriv.power_mgnt > PS_MODE_ACTIVE) {
-                       if (padapter->registrypriv.power_mgnt !=
-                           padapter->pwrctrlpriv.pwr_mode)
+                       if (padapter->registrypriv.power_mgnt != padapter->pwrctrlpriv.pwr_mode)
                                mod_timer(&padapter->mlmepriv.dhcp_timer,
                                          jiffies + msecs_to_jiffies(60000));
                }
@@ -148,13 +145,11 @@ static noinline_for_stack char *translate_scan_wpa(struct iw_request_info *info,
                memset(iwe, 0, sizeof(*iwe));
                iwe->cmd = IWEVCUSTOM;
                iwe->u.data.length = (u16)strlen(buf);
-               start = iwe_stream_add_point(info, start, stop,
-                       iwe, buf);
+               start = iwe_stream_add_point(info, start, stop, iwe, buf);
                memset(iwe, 0, sizeof(*iwe));
                iwe->cmd = IWEVGENIE;
                iwe->u.data.length = (u16)wpa_len;
-               start = iwe_stream_add_point(info, start, stop,
-                       iwe, wpa_ie);
+               start = iwe_stream_add_point(info, start, stop, iwe, wpa_ie);
        }
        if (rsn_len > 0) {
                memset(buf, 0, MAX_WPA_IE_LEN);
@@ -168,13 +163,11 @@ static noinline_for_stack char *translate_scan_wpa(struct iw_request_info *info,
                memset(iwe, 0, sizeof(*iwe));
                iwe->cmd = IWEVCUSTOM;
                iwe->u.data.length = strlen(buf);
-               start = iwe_stream_add_point(info, start, stop,
-                       iwe, buf);
+               start = iwe_stream_add_point(info, start, stop, iwe, buf);
                memset(iwe, 0, sizeof(*iwe));
                iwe->cmd = IWEVGENIE;
                iwe->u.data.length = rsn_len;
-               start = iwe_stream_add_point(info, start, stop, iwe,
-                       rsn_ie);
+               start = iwe_stream_add_point(info, start, stop, iwe, rsn_ie);
        }
 
        return start;
@@ -189,14 +182,11 @@ static noinline_for_stack char *translate_scan_wps(struct iw_request_info *info,
        u8 wps_ie[512];
        uint wps_ielen;
 
-       if (r8712_get_wps_ie(pnetwork->network.IEs,
-           pnetwork->network.IELength,
-           wps_ie, &wps_ielen)) {
+       if (r8712_get_wps_ie(pnetwork->network.IEs, pnetwork->network.IELength, wps_ie, &wps_ielen)) {
                if (wps_ielen > 2) {
                        iwe->cmd = IWEVGENIE;
                        iwe->u.data.length = (u16)wps_ielen;
-                       start = iwe_stream_add_point(info, start, stop,
-                               iwe, wps_ie);
+                       start = iwe_stream_add_point(info, start, stop, iwe, wps_ie);
                }
        }
 
@@ -259,16 +249,14 @@ static char *translate_scan(struct _adapter *padapter,
        start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_CHAR_LEN);
        /* Add mode */
        iwe.cmd = SIOCGIWMODE;
-       memcpy((u8 *)&cap, r8712_get_capability_from_ie(pnetwork->network.IEs),
-               2);
+       memcpy((u8 *)&cap, r8712_get_capability_from_ie(pnetwork->network.IEs), 2);
        le16_to_cpus(&cap);
        if (cap & (WLAN_CAPABILITY_IBSS | WLAN_CAPABILITY_ESS)) {
                if (cap & WLAN_CAPABILITY_ESS)
                        iwe.u.mode = (u32)IW_MODE_MASTER;
                else
                        iwe.u.mode = (u32)IW_MODE_ADHOC;
-               start = iwe_stream_add_event(info, start, stop, &iwe,
-                       IW_EV_UINT_LEN);
+               start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_UINT_LEN);
        }
        /* Add frequency/channel */
        iwe.cmd = SIOCGIWFREQ;
@@ -276,28 +264,23 @@ static char *translate_scan(struct _adapter *padapter,
                /*  check legal index */
                u8 dsconfig = pnetwork->network.Configuration.DSConfig;
 
-               if (dsconfig >= 1 && dsconfig <= sizeof(
-                   ieee80211_wlan_frequencies) / sizeof(long))
-                       iwe.u.freq.m =
-                               (s32)(ieee80211_wlan_frequencies
-                                     [dsconfig - 1] * 100000);
+               if (dsconfig >= 1 && dsconfig <= sizeof(ieee80211_wlan_frequencies) / sizeof(long))
+                       iwe.u.freq.m = (s32)(ieee80211_wlan_frequencies[dsconfig - 1] * 100000);
                else
                        iwe.u.freq.m = 0;
        }
        iwe.u.freq.e = (s16)1;
        iwe.u.freq.i = (u8)pnetwork->network.Configuration.DSConfig;
        start = iwe_stream_add_event(info, start, stop, &iwe,
-               IW_EV_FREQ_LEN);
+                                    IW_EV_FREQ_LEN);
        /* Add encryption capability */
        iwe.cmd = SIOCGIWENCODE;
        if (cap & WLAN_CAPABILITY_PRIVACY)
-               iwe.u.data.flags = (u16)(IW_ENCODE_ENABLED |
-                                   IW_ENCODE_NOKEY);
+               iwe.u.data.flags = (u16)(IW_ENCODE_ENABLED | IW_ENCODE_NOKEY);
        else
                iwe.u.data.flags = (u16)(IW_ENCODE_DISABLED);
        iwe.u.data.length = (u16)0;
-       start = iwe_stream_add_point(info, start, stop, &iwe,
-               pnetwork->network.Ssid.Ssid);
+       start = iwe_stream_add_point(info, start, stop, &iwe, pnetwork->network.Ssid.Ssid);
        /*Add basic and extended rates */
        current_val = start + iwe_stream_lcp_len(info);
        iwe.cmd = SIOCGIWRATE;
@@ -307,10 +290,9 @@ static char *translate_scan(struct _adapter *padapter,
        i = 0;
        while (pnetwork->network.rates[i] != 0) {
                /* Bit rate given in 500 kb/s units */
-               iwe.u.bitrate.value = (pnetwork->network.rates[i++] &
-                                     0x7F) * 500000;
-               current_val = iwe_stream_add_value(info, start, current_val,
-                             stop, &iwe, IW_EV_PARAM_LEN);
+               iwe.u.bitrate.value = (pnetwork->network.rates[i++] & 0x7F) * 500000;
+               current_val = iwe_stream_add_value(info, start, current_val, stop, &iwe,
+                                                  IW_EV_PARAM_LEN);
        }
        /* Check if we added any event */
        if ((current_val - start) > iwe_stream_lcp_len(info))
@@ -324,8 +306,7 @@ static char *translate_scan(struct _adapter *padapter,
        iwe.cmd = IWEVQUAL;
        rssi = r8712_signal_scale_mapping(pnetwork->network.Rssi);
        /* we only update signal_level (signal strength) that is rssi. */
-       iwe.u.qual.updated = (u8)(IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_UPDATED |
-                                 IW_QUAL_NOISE_INVALID);
+       iwe.u.qual.updated = (u8)(IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_UPDATED | IW_QUAL_NOISE_INVALID);
        iwe.u.qual.level = rssi;  /* signal strength */
        iwe.u.qual.qual = 0; /* signal quality */
        iwe.u.qual.noise = 0; /* noise level */
@@ -490,71 +471,59 @@ static int r871x_set_wpa_ie(struct _adapter *padapter, char *pie,
                        goto exit;
                }
                if (r8712_parse_wpa_ie(buf, ielen, &group_cipher,
-                   &pairwise_cipher) == 0) {
+                                      &pairwise_cipher) == 0) {
                        padapter->securitypriv.AuthAlgrthm = 2;
                        padapter->securitypriv.ndisauthtype =
                                  Ndis802_11AuthModeWPAPSK;
                }
                if (r8712_parse_wpa2_ie(buf, ielen, &group_cipher,
-                   &pairwise_cipher) == 0) {
+                                       &pairwise_cipher) == 0) {
                        padapter->securitypriv.AuthAlgrthm = 2;
                        padapter->securitypriv.ndisauthtype =
                                  Ndis802_11AuthModeWPA2PSK;
                }
                switch (group_cipher) {
                case WPA_CIPHER_NONE:
-                       padapter->securitypriv.XGrpPrivacy =
-                                _NO_PRIVACY_;
-                       padapter->securitypriv.ndisencryptstatus =
-                                Ndis802_11EncryptionDisabled;
+                       padapter->securitypriv.XGrpPrivacy = _NO_PRIVACY_;
+                       padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled;
                        break;
                case WPA_CIPHER_WEP40:
                        padapter->securitypriv.XGrpPrivacy = _WEP40_;
-                       padapter->securitypriv.ndisencryptstatus =
-                                Ndis802_11Encryption1Enabled;
+                       padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
                        break;
                case WPA_CIPHER_TKIP:
                        padapter->securitypriv.XGrpPrivacy = _TKIP_;
-                       padapter->securitypriv.ndisencryptstatus =
-                                Ndis802_11Encryption2Enabled;
+                       padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption2Enabled;
                        break;
                case WPA_CIPHER_CCMP:
                        padapter->securitypriv.XGrpPrivacy = _AES_;
-                       padapter->securitypriv.ndisencryptstatus =
-                                Ndis802_11Encryption3Enabled;
+                       padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption3Enabled;
                        break;
                case WPA_CIPHER_WEP104:
                        padapter->securitypriv.XGrpPrivacy = _WEP104_;
-                       padapter->securitypriv.ndisencryptstatus =
-                                Ndis802_11Encryption1Enabled;
+                       padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
                        break;
                }
                switch (pairwise_cipher) {
                case WPA_CIPHER_NONE:
-                       padapter->securitypriv.PrivacyAlgrthm =
-                                _NO_PRIVACY_;
-                       padapter->securitypriv.ndisencryptstatus =
-                                Ndis802_11EncryptionDisabled;
+                       padapter->securitypriv.PrivacyAlgrthm = _NO_PRIVACY_;
+                       padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled;
                        break;
                case WPA_CIPHER_WEP40:
                        padapter->securitypriv.PrivacyAlgrthm = _WEP40_;
-                       padapter->securitypriv.ndisencryptstatus =
-                                Ndis802_11Encryption1Enabled;
+                       padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
                        break;
                case WPA_CIPHER_TKIP:
                        padapter->securitypriv.PrivacyAlgrthm = _TKIP_;
-                       padapter->securitypriv.ndisencryptstatus =
-                                Ndis802_11Encryption2Enabled;
+                       padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption2Enabled;
                        break;
                case WPA_CIPHER_CCMP:
                        padapter->securitypriv.PrivacyAlgrthm = _AES_;
-                       padapter->securitypriv.ndisencryptstatus =
-                                Ndis802_11Encryption3Enabled;
+                       padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption3Enabled;
                        break;
                case WPA_CIPHER_WEP104:
                        padapter->securitypriv.PrivacyAlgrthm = _WEP104_;
-                       padapter->securitypriv.ndisencryptstatus =
-                                Ndis802_11Encryption1Enabled;
+                       padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
                        break;
                }
                padapter->securitypriv.wps_phase = false;
@@ -574,8 +543,8 @@ static int r871x_set_wpa_ie(struct _adapter *padapter, char *pie,
                                            (buf[cnt + 1] + 2) :
                                            (MAX_WPA_IE_LEN << 2);
                                        memcpy(padapter->securitypriv.wps_ie,
-                                           &buf[cnt],
-                                           padapter->securitypriv.wps_ie_len);
+                                              &buf[cnt],
+                                              padapter->securitypriv.wps_ie_len);
                                        padapter->securitypriv.wps_phase =
                                                                 true;
                                        netdev_info(padapter->pnetdev, "r8712u: SET WPS_IE, wps_phase==true\n");
@@ -592,8 +561,7 @@ exit:
        return ret;
 }
 
-static int r8711_wx_get_name(struct net_device *dev,
-                            struct iw_request_info *info,
+static int r8711_wx_get_name(struct net_device *dev, struct iw_request_info *info,
                             union iwreq_data *wrqu, char *extra)
 {
        struct _adapter *padapter = netdev_priv(dev);
@@ -604,8 +572,7 @@ static int r8711_wx_get_name(struct net_device *dev,
        struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
        u8 *prates;
 
-       if (check_fwstate(pmlmepriv, _FW_LINKED | WIFI_ADHOC_MASTER_STATE) ==
-           true) {
+       if (check_fwstate(pmlmepriv, _FW_LINKED | WIFI_ADHOC_MASTER_STATE) == true) {
                /* parsing HT_CAP_IE */
                p = r8712_get_ie(&pcur_bss->IEs[12], WLAN_EID_HT_CAPABILITY,
                                 &ht_ielen, pcur_bss->IELength - 12);
@@ -658,9 +625,7 @@ static int r8711_wx_set_freq(struct net_device *dev,
        int rc = 0;
 
 /* If setting by frequency, convert to a channel */
-       if ((fwrq->e == 1) &&
-         (fwrq->m >= 241200000) &&
-         (fwrq->m <= 248700000)) {
+       if ((fwrq->e == 1) && (fwrq->m >= 241200000) && (fwrq->m <= 248700000)) {
                int f = fwrq->m / 100000;
                int c = 0;
 
@@ -685,8 +650,7 @@ static int r8711_wx_set_freq(struct net_device *dev,
        return rc;
 }
 
-static int r8711_wx_get_freq(struct net_device *dev,
-                            struct iw_request_info *info,
+static int r8711_wx_get_freq(struct net_device *dev, struct iw_request_info *info,
                             union iwreq_data *wrqu, char *extra)
 {
        struct _adapter *padapter = netdev_priv(dev);
@@ -745,7 +709,7 @@ static int r8711_wx_get_mode(struct net_device *dev, struct iw_request_info *a,
        if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
                wrqu->mode = IW_MODE_INFRA;
        else if (check_fwstate(pmlmepriv,
-                WIFI_ADHOC_MASTER_STATE | WIFI_ADHOC_STATE))
+                              WIFI_ADHOC_MASTER_STATE | WIFI_ADHOC_STATE))
                wrqu->mode = IW_MODE_ADHOC;
        else if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
                wrqu->mode = IW_MODE_MASTER;
@@ -754,9 +718,8 @@ static int r8711_wx_get_mode(struct net_device *dev, struct iw_request_info *a,
        return 0;
 }
 
-static int r871x_wx_set_pmkid(struct net_device *dev,
-                            struct iw_request_info *a,
-                            union iwreq_data *wrqu, char *extra)
+static int r871x_wx_set_pmkid(struct net_device *dev, struct iw_request_info *a,
+                             union iwreq_data *wrqu, char *extra)
 {
        struct _adapter *padapter = netdev_priv(dev);
        struct security_priv *psecuritypriv = &padapter->securitypriv;
@@ -828,7 +791,7 @@ static int r871x_wx_set_pmkid(struct net_device *dev,
                break;
        case IW_PMKSA_FLUSH:
                memset(psecuritypriv->PMKIDList, 0,
-                       sizeof(struct RT_PMKID_LIST) * NUM_PMKID_CACHE);
+                      sizeof(struct RT_PMKID_LIST) * NUM_PMKID_CACHE);
                psecuritypriv->PMKIDIndex = 0;
                intReturn = true;
                break;
@@ -850,9 +813,8 @@ static int r8711_wx_get_sens(struct net_device *dev,
        return 0;
 }
 
-static int r8711_wx_get_range(struct net_device *dev,
-                               struct iw_request_info *info,
-                               union iwreq_data *wrqu, char *extra)
+static int r8711_wx_get_range(struct net_device *dev, struct iw_request_info *info,
+                             union iwreq_data *wrqu, char *extra)
 {
        struct iw_range *range = (struct iw_range *)extra;
        u16 val;
@@ -912,9 +874,9 @@ static int r8711_wx_get_rate(struct net_device *dev,
                             union iwreq_data *wrqu, char *extra);
 
 static int r871x_wx_set_priv(struct net_device *dev,
-                               struct iw_request_info *info,
-                               union iwreq_data *awrq,
-                               char *extra)
+                            struct iw_request_info *info,
+                            union iwreq_data *awrq,
+                            char *extra)
 {
        int ret = 0, len = 0;
        char *ext;
@@ -995,12 +957,10 @@ static int r871x_wx_set_priv(struct net_device *dev,
                );
                sprintf(ext, "OK");
        } else {
-               netdev_info(dev, "r8712u: %s: unknown Command %s.\n",
-                           __func__, ext);
+               netdev_info(dev, "r8712u: %s: unknown Command %s.\n", __func__, ext);
                goto FREE_EXT;
        }
-       if (copy_to_user(dwrq->pointer, ext,
-                               min(dwrq->length, (__u16)(strlen(ext) + 1))))
+       if (copy_to_user(dwrq->pointer, ext, min(dwrq->length, (__u16)(strlen(ext) + 1))))
                ret = -EFAULT;
 
 FREE_EXT:
@@ -1021,10 +981,8 @@ FREE_EXT:
  * For this operation to succeed, there is no need for the interface to be up.
  *
  */
-static int r8711_wx_set_wap(struct net_device *dev,
-                        struct iw_request_info *info,
-                        union iwreq_data *awrq,
-                        char *extra)
+static int r8711_wx_set_wap(struct net_device *dev, struct iw_request_info *info,
+                           union iwreq_data *awrq, char *extra)
 {
        int ret = -EINPROGRESS;
        struct _adapter *padapter = netdev_priv(dev);
@@ -1072,17 +1030,15 @@ static int r8711_wx_set_wap(struct net_device *dev,
        return ret;
 }
 
-static int r8711_wx_get_wap(struct net_device *dev,
-                               struct iw_request_info *info,
-                               union iwreq_data *wrqu, char *extra)
+static int r8711_wx_get_wap(struct net_device *dev, struct iw_request_info *info,
+                           union iwreq_data *wrqu, char *extra)
 {
        struct _adapter *padapter = netdev_priv(dev);
        struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
        struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
 
        wrqu->ap_addr.sa_family = ARPHRD_ETHER;
-       if (check_fwstate(pmlmepriv, _FW_LINKED | WIFI_ADHOC_MASTER_STATE |
-                                    WIFI_AP_STATE))
+       if (check_fwstate(pmlmepriv, _FW_LINKED | WIFI_ADHOC_MASTER_STATE | WIFI_AP_STATE))
                ether_addr_copy(wrqu->ap_addr.sa_data, pcur_bss->MacAddress);
        else
                eth_zero_addr(wrqu->ap_addr.sa_data);
@@ -1122,9 +1078,8 @@ static int r871x_wx_set_mlme(struct net_device *dev,
  * For this operation to succeed, the interface is brought Up beforehand.
  *
  */
-static int r8711_wx_set_scan(struct net_device *dev,
-                       struct iw_request_info *a,
-                       union iwreq_data *wrqu, char *extra)
+static int r8711_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
+                            union iwreq_data *wrqu, char *extra)
 {
        struct _adapter *padapter = netdev_priv(dev);
        struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -1150,8 +1105,7 @@ static int r8711_wx_set_scan(struct net_device *dev,
                        unsigned long irqL;
                        u32 len = min_t(u8, req->essid_len, IW_ESSID_MAX_SIZE);
 
-                       memset((unsigned char *)&ssid, 0,
-                                sizeof(struct ndis_802_11_ssid));
+                       memset((unsigned char *)&ssid, 0, sizeof(struct ndis_802_11_ssid));
                        memcpy(ssid.Ssid, req->essid, len);
                        ssid.SsidLength = len;
                        spin_lock_irqsave(&pmlmepriv->lock, irqL);
@@ -1173,9 +1127,8 @@ static int r8711_wx_set_scan(struct net_device *dev,
        return 0;
 }
 
-static int r8711_wx_get_scan(struct net_device *dev,
-                               struct iw_request_info *a,
-                               union iwreq_data *wrqu, char *extra)
+static int r8711_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
+                            union iwreq_data *wrqu, char *extra)
 {
        struct _adapter *padapter = netdev_priv(dev);
        struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -1189,8 +1142,7 @@ static int r8711_wx_get_scan(struct net_device *dev,
 
        if (padapter->driver_stopped)
                return -EINVAL;
-       while (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY |
-                            _FW_UNDER_LINKING)) {
+       while (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY | _FW_UNDER_LINKING)) {
                msleep(30);
                cnt++;
                if (cnt > 100)
@@ -1228,9 +1180,8 @@ static int r8711_wx_get_scan(struct net_device *dev,
  * For this operation to succeed, there is no need for the interface to be Up.
  *
  */
-static int r8711_wx_set_essid(struct net_device *dev,
-                               struct iw_request_info *a,
-                               union iwreq_data *wrqu, char *extra)
+static int r8711_wx_set_essid(struct net_device *dev, struct iw_request_info *a,
+                             union iwreq_data *wrqu, char *extra)
 {
        struct _adapter *padapter = netdev_priv(dev);
        struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -1268,8 +1219,7 @@ static int r8711_wx_set_essid(struct net_device *dev,
                        if ((!memcmp(dst_ssid, src_ssid, ndis_ssid.SsidLength))
                            && (pnetwork->network.Ssid.SsidLength ==
                             ndis_ssid.SsidLength)) {
-                               if (check_fwstate(pmlmepriv,
-                                                       WIFI_ADHOC_STATE)) {
+                               if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) {
                                        if (pnetwork->network.
                                                InfrastructureMode
                                                !=
@@ -1291,9 +1241,8 @@ static int r8711_wx_set_essid(struct net_device *dev,
        return -EINPROGRESS;
 }
 
-static int r8711_wx_get_essid(struct net_device *dev,
-                               struct iw_request_info *a,
-                               union iwreq_data *wrqu, char *extra)
+static int r8711_wx_get_essid(struct net_device *dev, struct iw_request_info *a,
+                             union iwreq_data *wrqu, char *extra)
 {
        struct _adapter *padapter = netdev_priv(dev);
        struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -1311,9 +1260,8 @@ static int r8711_wx_get_essid(struct net_device *dev,
        return ret;
 }
 
-static int r8711_wx_set_rate(struct net_device *dev,
-                               struct iw_request_info *a,
-                               union iwreq_data *wrqu, char *extra)
+static int r8711_wx_set_rate(struct net_device *dev, struct iw_request_info *a,
+                            union iwreq_data *wrqu, char *extra)
 {
        struct _adapter *padapter = netdev_priv(dev);
        u32 target_rate = wrqu->bitrate.value;
@@ -1382,8 +1330,7 @@ set_rate:
        return r8712_setdatarate_cmd(padapter, datarates);
 }
 
-static int r8711_wx_get_rate(struct net_device *dev,
-                            struct iw_request_info *info,
+static int r8711_wx_get_rate(struct net_device *dev, struct iw_request_info *info,
                             union iwreq_data *wrqu, char *extra)
 {
        struct _adapter *padapter = netdev_priv(dev);
@@ -1437,9 +1384,8 @@ static int r8711_wx_get_rate(struct net_device *dev,
        return 0;
 }
 
-static int r8711_wx_get_rts(struct net_device *dev,
-                               struct iw_request_info *info,
-                               union iwreq_data *wrqu, char *extra)
+static int r8711_wx_get_rts(struct net_device *dev, struct iw_request_info *info,
+                           union iwreq_data *wrqu, char *extra)
 {
        struct _adapter *padapter = netdev_priv(dev);
 
@@ -1448,9 +1394,8 @@ static int r8711_wx_get_rts(struct net_device *dev,
        return 0;
 }
 
-static int r8711_wx_set_frag(struct net_device *dev,
-                               struct iw_request_info *info,
-                               union iwreq_data *wrqu, char *extra)
+static int r8711_wx_set_frag(struct net_device *dev, struct iw_request_info *info,
+                            union iwreq_data *wrqu, char *extra)
 {
        struct _adapter *padapter = netdev_priv(dev);
 
index a08c5d2f59e38bd1edf861799ee0edff608ede82..bb9f83d58225dc3727121c690224dfe9308cd0f5 100644 (file)
 #define        rFPGA0_AnalogParameter3         0x888   /* Useless now */
 #define        rFPGA0_AnalogParameter4         0x88c
 
-#define        rFPGA0_XA_LSSIReadBack          0x8a0   /* Tranceiver LSSI Readback */
+#define        rFPGA0_XA_LSSIReadBack          0x8a0   /* Transceiver LSSI Readback */
 #define        rFPGA0_XB_LSSIReadBack          0x8a4
 #define        rFPGA0_XC_LSSIReadBack          0x8a8
 #define        rFPGA0_XD_LSSIReadBack          0x8ac
index 6d9d4a800aa73c19d1a01f061b0d3bdbbb62001d..05eb5140d0966d106e8c927da96c32ecbc599a9d 100644 (file)
@@ -23,11 +23,6 @@ should properly handle a module unload. This also includes that all
 resources must be freed (kthreads, debugfs entries, ...) and global
 variables avoided.
 
-* Cleanup logging mechanism
-
-The driver should probably be using the standard kernel logging mechanisms
-such as dev_info, dev_dbg, and friends.
-
 * Documentation
 
 A short top-down description of this driver's architecture (function of
index 9fb8f657cc781ad41454dc67ef2608d6505db9f7..1579bd4e5263cb9c5f238184f18bbd304f9efcb0 100644 (file)
@@ -255,8 +255,7 @@ create_pagelist(struct vchiq_instance *instance, char *buf, char __user *ubuf,
        pagelist = dma_alloc_coherent(instance->state->dev, pagelist_size, &dma_addr,
                                      GFP_KERNEL);
 
-       vchiq_log_trace(instance->state->dev, VCHIQ_ARM,
-                       "%s - %pK", __func__, pagelist);
+       dev_dbg(instance->state->dev, "arm: %pK\n", pagelist);
 
        if (!pagelist)
                return NULL;
@@ -311,9 +310,8 @@ create_pagelist(struct vchiq_instance *instance, char *buf, char __user *ubuf,
                                                   type == PAGELIST_READ, pages);
 
                if (actual_pages != num_pages) {
-                       vchiq_log_debug(instance->state->dev, VCHIQ_ARM,
-                                       "%s - only %d/%d pages locked",
-                                       __func__, actual_pages, num_pages);
+                       dev_dbg(instance->state->dev, "arm: Only %d/%d pages locked\n",
+                               actual_pages, num_pages);
 
                        /* This is probably due to the process being killed */
                        if (actual_pages > 0)
@@ -407,8 +405,7 @@ free_pagelist(struct vchiq_instance *instance, struct vchiq_pagelist_info *pagel
        struct page **pages = pagelistinfo->pages;
        unsigned int num_pages = pagelistinfo->num_pages;
 
-       vchiq_log_trace(instance->state->dev, VCHIQ_ARM,
-                       "%s - %pK, %d", __func__, pagelistinfo->pagelist, actual);
+       dev_dbg(instance->state->dev, "arm: %pK, %d\n", pagelistinfo->pagelist, actual);
 
        /*
         * NOTE: dma_unmap_sg must be called before the
@@ -556,8 +553,8 @@ static int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state
                return -ENXIO;
        }
 
-       vchiq_log_debug(&pdev->dev, VCHIQ_ARM, "vchiq_init - done (slots %pK, phys %pad)",
-                       vchiq_slot_zero, &slot_phys);
+       dev_dbg(&pdev->dev, "arm: vchiq_init - done (slots %pK, phys %pad)\n",
+               vchiq_slot_zero, &slot_phys);
 
        vchiq_call_connected_callbacks();
 
@@ -659,13 +656,9 @@ vchiq_complete_bulk(struct vchiq_instance *instance, struct vchiq_bulk *bulk)
                              bulk->actual);
 }
 
-int vchiq_dump_platform_state(void *dump_context)
+void vchiq_dump_platform_state(struct seq_file *f)
 {
-       char buf[80];
-       int len;
-
-       len = snprintf(buf, sizeof(buf), "  Platform: 2835 (VC master)");
-       return vchiq_dump(dump_context, buf, len + 1);
+       seq_puts(f, "  Platform: 2835 (VC master)\n");
 }
 
 #define VCHIQ_INIT_RETRIES 10
@@ -687,19 +680,17 @@ int vchiq_initialise(struct vchiq_instance **instance_out)
                usleep_range(500, 600);
        }
        if (i == VCHIQ_INIT_RETRIES) {
-               vchiq_log_error(state->dev, VCHIQ_CORE, "%s: videocore not initialized\n",
-                               __func__);
+               dev_err(state->dev, "core: %s: Videocore not initialized\n", __func__);
                ret = -ENOTCONN;
                goto failed;
        } else if (i > 0) {
-               vchiq_log_warning(state->dev, VCHIQ_CORE,
-                                 "%s: videocore initialized after %d retries\n", __func__, i);
+               dev_warn(state->dev, "core: %s: videocore initialized after %d retries\n",
+                        __func__, i);
        }
 
        instance = kzalloc(sizeof(*instance), GFP_KERNEL);
        if (!instance) {
-               vchiq_log_error(state->dev, VCHIQ_CORE,
-                               "%s: error allocating vchiq instance\n", __func__);
+               dev_err(state->dev, "core: %s: Cannot allocate vchiq instance\n", __func__);
                ret = -ENOMEM;
                goto failed;
        }
@@ -714,8 +705,7 @@ int vchiq_initialise(struct vchiq_instance **instance_out)
        ret = 0;
 
 failed:
-       vchiq_log_trace(state->dev, VCHIQ_CORE,
-                       "%s(%p): returning %d", __func__, instance, ret);
+       dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret);
 
        return ret;
 }
@@ -728,9 +718,9 @@ void free_bulk_waiter(struct vchiq_instance *instance)
        list_for_each_entry_safe(waiter, next,
                                 &instance->bulk_waiter_list, list) {
                list_del(&waiter->list);
-               vchiq_log_debug(instance->state->dev, VCHIQ_ARM,
-                               "bulk_waiter - cleaned up %pK for pid %d",
-                               waiter, waiter->pid);
+               dev_dbg(instance->state->dev,
+                       "arm: bulk_waiter - cleaned up %pK for pid %d\n",
+                       waiter, waiter->pid);
                kfree(waiter);
        }
 }
@@ -748,8 +738,7 @@ int vchiq_shutdown(struct vchiq_instance *instance)
 
        mutex_unlock(&state->mutex);
 
-       vchiq_log_trace(state->dev, VCHIQ_CORE,
-                       "%s(%p): returning %d", __func__, instance, status);
+       dev_dbg(state->dev, "core: (%p): returning %d\n", instance, status);
 
        free_bulk_waiter(instance);
        kfree(instance);
@@ -769,8 +758,8 @@ int vchiq_connect(struct vchiq_instance *instance)
        struct vchiq_state *state = instance->state;
 
        if (mutex_lock_killable(&state->mutex)) {
-               vchiq_log_trace(state->dev, VCHIQ_CORE,
-                               "%s: call to mutex_lock failed", __func__);
+               dev_dbg(state->dev,
+                       "core: call to mutex_lock failed\n");
                status = -EAGAIN;
                goto failed;
        }
@@ -782,8 +771,7 @@ int vchiq_connect(struct vchiq_instance *instance)
        mutex_unlock(&state->mutex);
 
 failed:
-       vchiq_log_trace(state->dev, VCHIQ_CORE,
-                       "%s(%p): returning %d", __func__, instance, status);
+       dev_dbg(state->dev, "core: (%p): returning %d\n", instance, status);
 
        return status;
 }
@@ -814,8 +802,7 @@ vchiq_add_service(struct vchiq_instance *instance,
                status = -EINVAL;
        }
 
-       vchiq_log_trace(state->dev, VCHIQ_CORE,
-                       "%s(%p): returning %d", __func__, instance, status);
+       dev_dbg(state->dev, "core: (%p): returning %d\n", instance, status);
 
        return status;
 }
@@ -846,8 +833,7 @@ vchiq_open_service(struct vchiq_instance *instance,
        }
 
 failed:
-       vchiq_log_trace(state->dev, VCHIQ_CORE,
-                       "%s(%p): returning %d", __func__, instance, status);
+       dev_dbg(state->dev, "core: (%p): returning %d\n", instance, status);
 
        return status;
 }
@@ -971,8 +957,7 @@ vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handl
        } else {
                waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
                if (!waiter) {
-                       vchiq_log_error(service->state->dev, VCHIQ_CORE,
-                                       "%s - out of memory", __func__);
+                       dev_err(service->state->dev, "core: %s: - Out of memory\n", __func__);
                        return -ENOMEM;
                }
        }
@@ -995,9 +980,8 @@ vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handl
                mutex_lock(&instance->bulk_waiter_list_mutex);
                list_add(&waiter->list, &instance->bulk_waiter_list);
                mutex_unlock(&instance->bulk_waiter_list_mutex);
-               vchiq_log_debug(instance->state->dev, VCHIQ_ARM,
-                               "saved bulk_waiter %pK for pid %d", waiter,
-                               current->pid);
+               dev_dbg(instance->state->dev, "arm: saved bulk_waiter %pK for pid %d\n",
+                       waiter, current->pid);
        }
 
        return status;
@@ -1017,16 +1001,13 @@ add_completion(struct vchiq_instance *instance, enum vchiq_reason reason,
        while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) {
                /* Out of space - wait for the client */
                DEBUG_TRACE(SERVICE_CALLBACK_LINE);
-               vchiq_log_trace(instance->state->dev, VCHIQ_CORE,
-                               "%s - completion queue full", __func__);
+               dev_dbg(instance->state->dev, "core: completion queue full\n");
                DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
                if (wait_for_completion_interruptible(&instance->remove_event)) {
-                       vchiq_log_debug(instance->state->dev, VCHIQ_ARM,
-                                       "service_callback interrupted");
+                       dev_dbg(instance->state->dev, "arm: service_callback interrupted\n");
                        return -EAGAIN;
                } else if (instance->closing) {
-                       vchiq_log_debug(instance->state->dev, VCHIQ_ARM,
-                                       "service_callback closing");
+                       dev_dbg(instance->state->dev, "arm: service_callback closing\n");
                        return 0;
                }
                DEBUG_TRACE(SERVICE_CALLBACK_LINE);
@@ -1106,11 +1087,10 @@ service_callback(struct vchiq_instance *instance, enum vchiq_reason reason,
        vchiq_service_get(service);
        rcu_read_unlock();
 
-       vchiq_log_trace(service->state->dev, VCHIQ_ARM,
-                       "%s - service %lx(%d,%p), reason %d, header %lx, instance %lx, bulk_userdata %lx",
-                       __func__, (unsigned long)user_service, service->localport,
-                       user_service->userdata, reason, (unsigned long)header,
-                       (unsigned long)instance, (unsigned long)bulk_userdata);
+       dev_dbg(service->state->dev,
+               "arm: service %p(%d,%p), reason %d, header %p, instance %p, bulk_userdata %p\n",
+               user_service, service->localport, user_service->userdata,
+               reason, header, instance, bulk_userdata);
 
        if (header && user_service->is_vchi) {
                spin_lock(&msg_queue_spinlock);
@@ -1119,8 +1099,7 @@ service_callback(struct vchiq_instance *instance, enum vchiq_reason reason,
                        spin_unlock(&msg_queue_spinlock);
                        DEBUG_TRACE(SERVICE_CALLBACK_LINE);
                        DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
-                       vchiq_log_trace(service->state->dev, VCHIQ_ARM,
-                                       "%s - msg queue full", __func__);
+                       dev_dbg(service->state->dev, "arm: msg queue full\n");
                        /*
                         * If there is no MESSAGE_AVAILABLE in the completion
                         * queue, add one
@@ -1129,8 +1108,8 @@ service_callback(struct vchiq_instance *instance, enum vchiq_reason reason,
                                instance->completion_remove) < 0) {
                                int status;
 
-                               vchiq_log_debug(instance->state->dev, VCHIQ_ARM,
-                                               "Inserting extra MESSAGE_AVAILABLE");
+                               dev_dbg(instance->state->dev,
+                                       "arm: Inserting extra MESSAGE_AVAILABLE\n");
                                DEBUG_TRACE(SERVICE_CALLBACK_LINE);
                                status = add_completion(instance, reason, NULL, user_service,
                                                        bulk_userdata);
@@ -1143,14 +1122,12 @@ service_callback(struct vchiq_instance *instance, enum vchiq_reason reason,
 
                        DEBUG_TRACE(SERVICE_CALLBACK_LINE);
                        if (wait_for_completion_interruptible(&user_service->remove_event)) {
-                               vchiq_log_debug(instance->state->dev, VCHIQ_ARM,
-                                               "%s interrupted", __func__);
+                               dev_dbg(instance->state->dev, "arm: interrupted\n");
                                DEBUG_TRACE(SERVICE_CALLBACK_LINE);
                                vchiq_service_put(service);
                                return -EAGAIN;
                        } else if (instance->closing) {
-                               vchiq_log_debug(instance->state->dev, VCHIQ_ARM,
-                                               "%s closing", __func__);
+                               dev_dbg(instance->state->dev, "arm: closing\n");
                                DEBUG_TRACE(SERVICE_CALLBACK_LINE);
                                vchiq_service_put(service);
                                return -EINVAL;
@@ -1190,56 +1167,13 @@ service_callback(struct vchiq_instance *instance, enum vchiq_reason reason,
                bulk_userdata);
 }
 
-int vchiq_dump(void *dump_context, const char *str, int len)
-{
-       struct dump_context *context = (struct dump_context *)dump_context;
-       int copy_bytes;
-
-       if (context->actual >= context->space)
-               return 0;
-
-       if (context->offset > 0) {
-               int skip_bytes = min_t(int, len, context->offset);
-
-               str += skip_bytes;
-               len -= skip_bytes;
-               context->offset -= skip_bytes;
-               if (context->offset > 0)
-                       return 0;
-       }
-       copy_bytes = min_t(int, len, context->space - context->actual);
-       if (copy_bytes == 0)
-               return 0;
-       if (copy_to_user(context->buf + context->actual, str,
-                        copy_bytes))
-               return -EFAULT;
-       context->actual += copy_bytes;
-       len -= copy_bytes;
-
-       /*
-        * If the terminating NUL is included in the length, then it
-        * marks the end of a line and should be replaced with a
-        * carriage return.
-        */
-       if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
-               char cr = '\n';
-
-               if (copy_to_user(context->buf + context->actual - 1,
-                                &cr, 1))
-                       return -EFAULT;
-       }
-       return 0;
-}
-
-int vchiq_dump_platform_instances(void *dump_context)
+void vchiq_dump_platform_instances(struct seq_file *f)
 {
        struct vchiq_state *state = vchiq_get_state();
-       char buf[80];
-       int len;
        int i;
 
        if (!state)
-               return -ENOTCONN;
+               return;
 
        /*
         * There is no list of instances, so instead scan all services,
@@ -1264,7 +1198,6 @@ int vchiq_dump_platform_instances(void *dump_context)
        for (i = 0; i < state->unused_service; i++) {
                struct vchiq_service *service;
                struct vchiq_instance *instance;
-               int err;
 
                rcu_read_lock();
                service = rcu_dereference(state->services[i]);
@@ -1280,43 +1213,35 @@ int vchiq_dump_platform_instances(void *dump_context)
                }
                rcu_read_unlock();
 
-               len = snprintf(buf, sizeof(buf),
-                              "Instance %pK: pid %d,%s completions %d/%d",
-                              instance, instance->pid,
-                              instance->connected ? " connected, " :
-                              "",
-                              instance->completion_insert -
-                              instance->completion_remove,
-                              MAX_COMPLETIONS);
-               err = vchiq_dump(dump_context, buf, len + 1);
-               if (err)
-                       return err;
+               seq_printf(f, "Instance %pK: pid %d,%s completions %d/%d\n",
+                          instance, instance->pid,
+                          instance->connected ? " connected, " :
+                          "",
+                          instance->completion_insert -
+                          instance->completion_remove,
+                          MAX_COMPLETIONS);
                instance->mark = 1;
        }
-       return 0;
 }
 
-int vchiq_dump_platform_service_state(void *dump_context,
-                                     struct vchiq_service *service)
+void vchiq_dump_platform_service_state(struct seq_file *f,
+                                      struct vchiq_service *service)
 {
        struct user_service *user_service =
                        (struct user_service *)service->base.userdata;
-       char buf[80];
-       int len;
 
-       len = scnprintf(buf, sizeof(buf), "  instance %pK", service->instance);
+       seq_printf(f, "  instance %pK", service->instance);
 
        if ((service->base.callback == service_callback) && user_service->is_vchi) {
-               len += scnprintf(buf + len, sizeof(buf) - len, ", %d/%d messages",
-                                user_service->msg_insert - user_service->msg_remove,
-                                MSG_QUEUE_SIZE);
+               seq_printf(f, ", %d/%d messages",
+                          user_service->msg_insert - user_service->msg_remove,
+                          MSG_QUEUE_SIZE);
 
                if (user_service->dequeue_pending)
-                       len += scnprintf(buf + len, sizeof(buf) - len,
-                               " (dequeue pending)");
+                       seq_puts(f, " (dequeue pending)");
        }
 
-       return vchiq_dump(dump_context, buf, len + 1);
+       seq_puts(f, "\n");
 }
 
 struct vchiq_state *
@@ -1346,8 +1271,8 @@ vchiq_keepalive_vchiq_callback(struct vchiq_instance *instance,
                               struct vchiq_header *header,
                               unsigned int service_user, void *bulk_user)
 {
-       vchiq_log_error(instance->state->dev, VCHIQ_SUSPEND,
-                       "%s callback reason %d", __func__, reason);
+       dev_err(instance->state->dev, "suspend: %s: callback reason %d\n",
+               __func__, reason);
        return 0;
 }
 
@@ -1371,22 +1296,20 @@ vchiq_keepalive_thread_func(void *v)
 
        ret = vchiq_initialise(&instance);
        if (ret) {
-               vchiq_log_error(state->dev, VCHIQ_SUSPEND,
-                               "%s vchiq_initialise failed %d", __func__, ret);
+               dev_err(state->dev, "suspend: %s: vchiq_initialise failed %d\n", __func__, ret);
                goto exit;
        }
 
        status = vchiq_connect(instance);
        if (status) {
-               vchiq_log_error(state->dev, VCHIQ_SUSPEND,
-                               "%s vchiq_connect failed %d", __func__, status);
+               dev_err(state->dev, "suspend: %s: vchiq_connect failed %d\n", __func__, status);
                goto shutdown;
        }
 
        status = vchiq_add_service(instance, &params, &ka_handle);
        if (status) {
-               vchiq_log_error(state->dev, VCHIQ_SUSPEND,
-                               "%s vchiq_open_service failed %d", __func__, status);
+               dev_err(state->dev, "suspend: %s: vchiq_open_service failed %d\n",
+                       __func__, status);
                goto shutdown;
        }
 
@@ -1394,8 +1317,7 @@ vchiq_keepalive_thread_func(void *v)
                long rc = 0, uc = 0;
 
                if (wait_for_completion_interruptible(&arm_state->ka_evt)) {
-                       vchiq_log_error(state->dev, VCHIQ_SUSPEND,
-                                       "%s interrupted", __func__);
+                       dev_err(state->dev, "suspend: %s: interrupted\n", __func__);
                        flush_signals(current);
                        continue;
                }
@@ -1415,16 +1337,15 @@ vchiq_keepalive_thread_func(void *v)
                        atomic_inc(&arm_state->ka_use_ack_count);
                        status = vchiq_use_service(instance, ka_handle);
                        if (status) {
-                               vchiq_log_error(state->dev, VCHIQ_SUSPEND,
-                                               "%s vchiq_use_service error %d", __func__, status);
+                               dev_err(state->dev, "suspend: %s: vchiq_use_service error %d\n",
+                                       __func__, status);
                        }
                }
                while (rc--) {
                        status = vchiq_release_service(instance, ka_handle);
                        if (status) {
-                               vchiq_log_error(state->dev, VCHIQ_SUSPEND,
-                                               "%s vchiq_release_service error %d", __func__,
-                                               status);
+                               dev_err(state->dev, "suspend: %s: vchiq_release_service error %d\n",
+                                       __func__, status);
                        }
                }
        }
@@ -1459,7 +1380,7 @@ vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
                         service->client_id);
                entity_uc = &service->service_use_count;
        } else {
-               vchiq_log_error(state->dev, VCHIQ_SUSPEND, "%s null service ptr", __func__);
+               dev_err(state->dev, "suspend: %s: null service ptr\n", __func__);
                ret = -EINVAL;
                goto out;
        }
@@ -1468,8 +1389,8 @@ vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
        local_uc = ++arm_state->videocore_use_count;
        ++(*entity_uc);
 
-       vchiq_log_trace(state->dev, VCHIQ_SUSPEND, "%s %s count %d, state count %d",
-                       __func__, entity, *entity_uc, local_uc);
+       dev_dbg(state->dev, "suspend: %s count %d, state count %d\n",
+               entity, *entity_uc, local_uc);
 
        write_unlock_bh(&arm_state->susp_res_lock);
 
@@ -1488,7 +1409,7 @@ vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
        }
 
 out:
-       vchiq_log_trace(state->dev, VCHIQ_SUSPEND, "%s exit %d", __func__, ret);
+       dev_dbg(state->dev, "suspend: exit %d\n", ret);
        return ret;
 }
 
@@ -1526,14 +1447,14 @@ vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service)
        --arm_state->videocore_use_count;
        --(*entity_uc);
 
-       vchiq_log_trace(state->dev, VCHIQ_SUSPEND, "%s %s count %d, state count %d",
-                       __func__, entity, *entity_uc, arm_state->videocore_use_count);
+       dev_dbg(state->dev, "suspend: %s count %d, state count %d\n",
+               entity, *entity_uc, arm_state->videocore_use_count);
 
 unlock:
        write_unlock_bh(&arm_state->susp_res_lock);
 
 out:
-       vchiq_log_trace(state->dev, VCHIQ_SUSPEND, "%s exit %d", __func__, ret);
+       dev_dbg(state->dev, "suspend: exit %d\n", ret);
        return ret;
 }
 
@@ -1707,20 +1628,19 @@ vchiq_dump_service_use_state(struct vchiq_state *state)
        read_unlock_bh(&arm_state->susp_res_lock);
 
        if (only_nonzero)
-               vchiq_log_warning(state->dev, VCHIQ_SUSPEND,
-                                 "Too many active services (%d). Only dumping up to first %d services with non-zero use-count",
-                                 active_services, found);
+               dev_warn(state->dev,
+                        "suspend: Too many active services (%d). Only dumping up to first %d services with non-zero use-count\n",
+                        active_services, found);
 
        for (i = 0; i < found; i++) {
-               vchiq_log_warning(state->dev, VCHIQ_SUSPEND,
-                                 "%p4cc:%d service count %d %s",
-                                 &service_data[i].fourcc,
-                                 service_data[i].clientid, service_data[i].use_count,
-                                 service_data[i].use_count ? nz : "");
+               dev_warn(state->dev,
+                        "suspend: %p4cc:%d service count %d %s\n",
+                        &service_data[i].fourcc,
+                        service_data[i].clientid, service_data[i].use_count,
+                        service_data[i].use_count ? nz : "");
        }
-       vchiq_log_warning(state->dev, VCHIQ_SUSPEND, "VCHIQ use count %d", peer_count);
-       vchiq_log_warning(state->dev, VCHIQ_SUSPEND, "Overall vchiq instance use count %d",
-                         vc_use_count);
+       dev_warn(state->dev, "suspend: VCHIQ use count %d\n", peer_count);
+       dev_warn(state->dev, "suspend: Overall vchiq instance use count %d\n", vc_use_count);
 
        kfree(service_data);
 }
@@ -1742,10 +1662,10 @@ vchiq_check_service(struct vchiq_service *service)
        read_unlock_bh(&arm_state->susp_res_lock);
 
        if (ret) {
-               vchiq_log_error(service->state->dev, VCHIQ_SUSPEND,
-                               "%s ERROR - %p4cc:%d service count %d, state count %d", __func__,
-                               &service->base.fourcc, service->client_id,
-                               service->service_use_count, arm_state->videocore_use_count);
+               dev_err(service->state->dev,
+                       "suspend: %s:  %p4cc:%d service count %d, state count %d\n",
+                       __func__, &service->base.fourcc, service->client_id,
+                       service->service_use_count, arm_state->videocore_use_count);
                vchiq_dump_service_use_state(service->state);
        }
 out:
@@ -1759,8 +1679,8 @@ void vchiq_platform_conn_state_changed(struct vchiq_state *state,
        struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
        char threadname[16];
 
-       vchiq_log_debug(state->dev, VCHIQ_SUSPEND, "%d: %s->%s", state->id,
-                       get_conn_state_name(oldstate), get_conn_state_name(newstate));
+       dev_dbg(state->dev, "suspend: %d: %s->%s\n",
+               state->id, get_conn_state_name(oldstate), get_conn_state_name(newstate));
        if (state->conn_state != VCHIQ_CONNSTATE_CONNECTED)
                return;
 
@@ -1778,9 +1698,8 @@ void vchiq_platform_conn_state_changed(struct vchiq_state *state,
                                              (void *)state,
                                              threadname);
        if (IS_ERR(arm_state->ka_thread)) {
-               vchiq_log_error(state->dev, VCHIQ_SUSPEND,
-                               "vchiq: FATAL: couldn't create thread %s",
-                               threadname);
+               dev_err(state->dev, "suspend: Couldn't create thread %s\n",
+                       threadname);
        } else {
                wake_up_process(arm_state->ka_thread);
        }
@@ -1825,9 +1744,8 @@ static int vchiq_probe(struct platform_device *pdev)
 
        vchiq_debugfs_init();
 
-       vchiq_log_debug(&pdev->dev, VCHIQ_ARM,
-                       "vchiq: platform initialised - version %d (min %d)",
-                       VCHIQ_VERSION, VCHIQ_VERSION_MIN);
+       dev_dbg(&pdev->dev, "arm: platform initialised - version %d (min %d)\n",
+               VCHIQ_VERSION, VCHIQ_VERSION_MIN);
 
        /*
         * Simply exit on error since the function handles cleanup in
@@ -1835,8 +1753,7 @@ static int vchiq_probe(struct platform_device *pdev)
         */
        err = vchiq_register_chrdev(&pdev->dev);
        if (err) {
-               vchiq_log_warning(&pdev->dev, VCHIQ_ARM,
-                                 "Failed to initialize vchiq cdev");
+               dev_warn(&pdev->dev, "arm: Failed to initialize vchiq cdev\n");
                goto error_exit;
        }
 
@@ -1846,7 +1763,7 @@ static int vchiq_probe(struct platform_device *pdev)
        return 0;
 
 failed_platform_init:
-       vchiq_log_warning(&pdev->dev, VCHIQ_ARM, "could not initialize vchiq platform");
+       dev_warn(&pdev->dev, "arm: Could not initialize vchiq platform\n");
 error_exit:
        return err;
 }
index 7cdc3d70bd2c121078f82bcf047b1dd1656ce591..7844ef765a00e4628814d8c9c88f2f4b9ae8a0f8 100644 (file)
@@ -69,13 +69,6 @@ struct vchiq_instance {
        struct vchiq_debugfs_node debugfs_node;
 };
 
-struct dump_context {
-       char __user *buf;
-       size_t actual;
-       size_t space;
-       loff_t offset;
-};
-
 extern spinlock_t msg_queue_spinlock;
 extern struct vchiq_state g_state;
 
index b3928bd8c9c6056fae3104335c2af86ca5bc62a8..3cad13f09e37667007ae3ce8d7887b08333133e0 100644 (file)
@@ -27,7 +27,7 @@ static void connected_init(void)
  * be made immediately, otherwise it will be deferred until
  * vchiq_call_connected_callbacks is called.
  */
-void vchiq_add_connected_callback(void (*callback)(void))
+void vchiq_add_connected_callback(struct vchiq_device *device, void (*callback)(void))
 {
        connected_init();
 
@@ -39,9 +39,9 @@ void vchiq_add_connected_callback(void (*callback)(void))
                callback();
        } else {
                if (g_num_deferred_callbacks >= MAX_CALLBACKS) {
-                       vchiq_log_error(NULL, VCHIQ_CORE,
-                                       "There already %d callback registered - please increase MAX_CALLBACKS",
-                                       g_num_deferred_callbacks);
+                       dev_err(&device->dev,
+                               "core: There already %d callback registered - please increase MAX_CALLBACKS\n",
+                               g_num_deferred_callbacks);
                } else {
                        g_deferred_callback[g_num_deferred_callbacks] =
                                callback;
index 4caf5e30099dc3db7254823534be0cbd1ee0710a..e4ed56446f8ad3eaf0c7d3787887d9c286064015 100644 (file)
@@ -1,10 +1,12 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
 
+#include "vchiq_bus.h"
+
 #ifndef VCHIQ_CONNECTED_H
 #define VCHIQ_CONNECTED_H
 
-void vchiq_add_connected_callback(void (*callback)(void));
+void vchiq_add_connected_callback(struct vchiq_device *device, void (*callback)(void));
 void vchiq_call_connected_callbacks(void);
 
 #endif /* VCHIQ_CONNECTED_H */
index 39b857da2d4294f0fd8a82270a6fd0fb1b35ce8b..76c27778154aaf7372b4f6a53bbaf5237bb8ce40 100644 (file)
@@ -217,10 +217,10 @@ static const char *msg_type_str(unsigned int msg_type)
 static inline void
 set_service_state(struct vchiq_service *service, int newstate)
 {
-       vchiq_log_debug(service->state->dev, VCHIQ_CORE, "%d: srv:%d %s->%s",
-                       service->state->id, service->localport,
-                       srvstate_names[service->srvstate],
-                       srvstate_names[newstate]);
+       dev_dbg(service->state->dev, "core: %d: srv:%d %s->%s\n",
+               service->state->id, service->localport,
+               srvstate_names[service->srvstate],
+               srvstate_names[newstate]);
        service->srvstate = newstate;
 }
 
@@ -245,8 +245,7 @@ find_service_by_handle(struct vchiq_instance *instance, unsigned int handle)
                return service;
        }
        rcu_read_unlock();
-       vchiq_log_debug(service->state->dev, VCHIQ_CORE,
-                       "Invalid service handle 0x%x", handle);
+       dev_dbg(instance->state->dev, "core: Invalid service handle 0x%x\n", handle);
        return NULL;
 }
 
@@ -266,8 +265,7 @@ find_service_by_port(struct vchiq_state *state, unsigned int localport)
                }
                rcu_read_unlock();
        }
-       vchiq_log_debug(state->dev, VCHIQ_CORE,
-                       "Invalid port %u", localport);
+       dev_dbg(state->dev, "core: Invalid port %u\n", localport);
        return NULL;
 }
 
@@ -287,8 +285,7 @@ find_service_for_instance(struct vchiq_instance *instance, unsigned int handle)
                return service;
        }
        rcu_read_unlock();
-       vchiq_log_debug(service->state->dev, VCHIQ_CORE,
-                       "Invalid service handle 0x%x", handle);
+       dev_dbg(instance->state->dev, "core: Invalid service handle 0x%x\n", handle);
        return NULL;
 }
 
@@ -310,8 +307,7 @@ find_closed_service_for_instance(struct vchiq_instance *instance, unsigned int h
                return service;
        }
        rcu_read_unlock();
-       vchiq_log_debug(service->state->dev, VCHIQ_CORE,
-                       "Invalid service handle 0x%x", handle);
+       dev_dbg(instance->state->dev, "core: Invalid service handle 0x%x\n", handle);
        return service;
 }
 
@@ -459,15 +455,15 @@ make_service_callback(struct vchiq_service *service, enum vchiq_reason reason,
 {
        int status;
 
-       vchiq_log_trace(service->state->dev, VCHIQ_CORE, "%d: callback:%d (%s, %pK, %pK)",
-                       service->state->id, service->localport, reason_names[reason],
-                       header, bulk_userdata);
+       dev_dbg(service->state->dev, "core: %d: callback:%d (%s, %pK, %pK)\n",
+               service->state->id, service->localport, reason_names[reason],
+               header, bulk_userdata);
        status = service->base.callback(service->instance, reason, header, service->handle,
                                        bulk_userdata);
        if (status && (status != -EAGAIN)) {
-               vchiq_log_warning(service->state->dev, VCHIQ_CORE,
-                                 "%d: ignoring ERROR from callback to service %x",
-                                 service->state->id, service->handle);
+               dev_warn(service->state->dev,
+                        "core: %d: ignoring ERROR from callback to service %x\n",
+                        service->state->id, service->handle);
                status = 0;
        }
 
@@ -482,8 +478,8 @@ vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate)
 {
        enum vchiq_connstate oldstate = state->conn_state;
 
-       vchiq_log_debug(state->dev, VCHIQ_CORE, "%d: %s->%s", state->id, conn_state_names[oldstate],
-                       conn_state_names[newstate]);
+       dev_dbg(state->dev, "core: %d: %s->%s\n",
+               state->id, conn_state_names[oldstate], conn_state_names[newstate]);
        state->conn_state = newstate;
        vchiq_platform_conn_state_changed(state, oldstate, newstate);
 }
@@ -741,10 +737,10 @@ process_free_data_message(struct vchiq_state *state, u32 *service_found,
                 */
                complete(&quota->quota_event);
        } else if (count == 0) {
-               vchiq_log_error(state->dev, VCHIQ_CORE,
-                               "service %d message_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
-                               port, quota->message_use_count, header, msgid, header->msgid,
-                               header->size);
+               dev_err(state->dev,
+                       "core: service %d message_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)\n",
+                       port, quota->message_use_count, header, msgid,
+                       header->msgid, header->size);
                WARN(1, "invalid message use count\n");
        }
        if (!BITSET_IS_SET(service_found, port)) {
@@ -763,12 +759,12 @@ process_free_data_message(struct vchiq_state *state, u32 *service_found,
                         * it has dropped below its quota
                         */
                        complete(&quota->quota_event);
-                       vchiq_log_trace(state->dev, VCHIQ_CORE, "%d: pfq:%d %x@%pK - slot_use->%d",
-                                       state->id, port, header->size, header, count - 1);
+                       dev_dbg(state->dev, "core: %d: pfq:%d %x@%pK - slot_use->%d\n",
+                               state->id, port, header->size, header, count - 1);
                } else {
-                       vchiq_log_error(state->dev, VCHIQ_CORE,
-                                       "service %d slot_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
-                                       port, count, header, msgid, header->msgid, header->size);
+                       dev_err(state->dev,
+                               "core: service %d slot_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)\n",
+                               port, count, header, msgid, header->msgid, header->size);
                        WARN(1, "bad slot use count\n");
                }
        }
@@ -809,9 +805,9 @@ process_free_queue(struct vchiq_state *state, u32 *service_found,
                 */
                rmb();
 
-               vchiq_log_trace(state->dev, VCHIQ_CORE, "%d: pfq %d=%pK %x %x",
-                               state->id, slot_index, data, local->slot_queue_recycle,
-                               slot_queue_available);
+               dev_dbg(state->dev, "core: %d: pfq %d=%pK %x %x\n",
+                       state->id, slot_index, data, local->slot_queue_recycle,
+                       slot_queue_available);
 
                /* Initialise the bitmask for services which have used this slot */
                memset(service_found, 0, length);
@@ -831,9 +827,9 @@ process_free_queue(struct vchiq_state *state, u32 *service_found,
 
                        pos += calc_stride(header->size);
                        if (pos > VCHIQ_SLOT_SIZE) {
-                               vchiq_log_error(state->dev, VCHIQ_CORE,
-                                               "pfq - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
-                                               pos, header, msgid, header->msgid, header->size);
+                               dev_err(state->dev,
+                                       "core: pfq - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x\n",
+                                       pos, header, msgid, header->msgid, header->size);
                                WARN(1, "invalid slot position\n");
                        }
                }
@@ -980,10 +976,10 @@ queue_message(struct vchiq_state *state, struct vchiq_service *service,
                       ((tx_end_index != quota->previous_tx_index) &&
                        (quota->slot_use_count == quota->slot_quota))) {
                        spin_unlock(&quota_spinlock);
-                       vchiq_log_trace(state->dev, VCHIQ_CORE,
-                                       "%d: qm:%d %s,%zx - quota stall (msg %d, slot %d)",
-                                       state->id, service->localport, msg_type_str(type), size,
-                                       quota->message_use_count, quota->slot_use_count);
+                       dev_dbg(state->dev,
+                               "core: %d: qm:%d %s,%zx - quota stall (msg %d, slot %d)\n",
+                               state->id, service->localport, msg_type_str(type), size,
+                               quota->message_use_count, quota->slot_use_count);
                        VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
                        mutex_unlock(&state->slot_mutex);
                        if (wait_for_completion_interruptible(&quota->quota_event))
@@ -1023,9 +1019,9 @@ queue_message(struct vchiq_state *state, struct vchiq_service *service,
                int tx_end_index;
                int slot_use_count;
 
-               vchiq_log_debug(state->dev, VCHIQ_CORE, "%d: qm %s@%pK,%zx (%d->%d)", state->id,
-                               msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
-                               VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
+               dev_dbg(state->dev, "core: %d: qm %s@%pK,%zx (%d->%d)\n",
+                       state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
+                       VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
 
                WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
                                 QMFLAGS_NO_MUTEX_UNLOCK));
@@ -1073,17 +1069,16 @@ queue_message(struct vchiq_state *state, struct vchiq_service *service,
                spin_unlock(&quota_spinlock);
 
                if (slot_use_count)
-                       vchiq_log_trace(state->dev, VCHIQ_CORE,
-                                       "%d: qm:%d %s,%zx - slot_use->%d (hdr %p)", state->id,
-                                       service->localport, msg_type_str(VCHIQ_MSG_TYPE(msgid)),
-                                       size, slot_use_count, header);
+                       dev_dbg(state->dev, "core: %d: qm:%d %s,%zx - slot_use->%d (hdr %p)\n",
+                               state->id, service->localport, msg_type_str(VCHIQ_MSG_TYPE(msgid)),
+                               size, slot_use_count, header);
 
                VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
                VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
        } else {
-               vchiq_log_debug(state->dev, VCHIQ_CORE, "%d: qm %s@%pK,%zx (%d->%d)", state->id,
-                               msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
-                               VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
+               dev_dbg(state->dev, "core: %d: qm %s@%pK,%zx (%d->%d)\n",
+                       state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
+                       VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
                if (size != 0) {
                        /*
                         * It is assumed for now that this code path
@@ -1111,11 +1106,9 @@ queue_message(struct vchiq_state *state, struct vchiq_service *service,
                        ? service->base.fourcc
                        : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
 
-               vchiq_log_debug(state->dev, VCHIQ_CORE_MSG,
-                               "Sent Msg %s(%u) to %p4cc s:%u d:%d len:%zu",
-                               msg_type_str(VCHIQ_MSG_TYPE(msgid)), VCHIQ_MSG_TYPE(msgid),
-                               &svc_fourcc, VCHIQ_MSG_SRCPORT(msgid),
-                               VCHIQ_MSG_DSTPORT(msgid), size);
+               dev_dbg(state->dev, "core_msg: Sent Msg %s(%u) to %p4cc s:%u d:%d len:%zu\n",
+                       msg_type_str(VCHIQ_MSG_TYPE(msgid)), VCHIQ_MSG_TYPE(msgid),
+                       &svc_fourcc, VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid), size);
        }
 
        /* Make sure the new header is visible to the peer. */
@@ -1167,15 +1160,13 @@ queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
                int oldmsgid = header->msgid;
 
                if (oldmsgid != VCHIQ_MSGID_PADDING)
-                       vchiq_log_error(state->dev, VCHIQ_CORE, "%d: qms - msgid %x, not PADDING",
-                                       state->id, oldmsgid);
+                       dev_err(state->dev, "core: %d: qms - msgid %x, not PADDING\n",
+                               state->id, oldmsgid);
        }
 
-       vchiq_log_debug(state->dev, VCHIQ_SYNC,
-                       "%d: qms %s@%pK,%x (%d->%d)", state->id,
-                       msg_type_str(VCHIQ_MSG_TYPE(msgid)),
-                       header, size, VCHIQ_MSG_SRCPORT(msgid),
-                       VCHIQ_MSG_DSTPORT(msgid));
+       dev_dbg(state->dev, "sync: %d: qms %s@%pK,%x (%d->%d)\n",
+               state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
+               VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
 
        callback_result =
                copy_message_data(copy_callback, context,
@@ -1205,11 +1196,11 @@ queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
        svc_fourcc = service ? service->base.fourcc
                             : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
 
-       vchiq_log_trace(state->dev, VCHIQ_SYNC,
-                       "Sent Sync Msg %s(%u) to %p4cc s:%u d:%d len:%d",
-                       msg_type_str(VCHIQ_MSG_TYPE(msgid)), VCHIQ_MSG_TYPE(msgid),
-                       &svc_fourcc, VCHIQ_MSG_SRCPORT(msgid),
-                       VCHIQ_MSG_DSTPORT(msgid), size);
+       dev_dbg(state->dev,
+               "sync: Sent Sync Msg %s(%u) to %p4cc s:%u d:%d len:%d\n",
+               msg_type_str(VCHIQ_MSG_TYPE(msgid)), VCHIQ_MSG_TYPE(msgid),
+               &svc_fourcc, VCHIQ_MSG_SRCPORT(msgid),
+               VCHIQ_MSG_DSTPORT(msgid), size);
 
        remote_event_signal(&state->remote->sync_trigger);
 
@@ -1261,9 +1252,9 @@ release_slot(struct vchiq_state *state, struct vchiq_slot_info *slot_info,
                        VCHIQ_SLOT_QUEUE_MASK] =
                        SLOT_INDEX_FROM_INFO(state, slot_info);
                state->remote->slot_queue_recycle = slot_queue_recycle + 1;
-               vchiq_log_debug(state->dev, VCHIQ_CORE, "%d: %s %d - recycle->%x",
-                               state->id, __func__, SLOT_INDEX_FROM_INFO(state, slot_info),
-                               state->remote->slot_queue_recycle);
+               dev_dbg(state->dev, "core: %d: %d - recycle->%x\n",
+                       state->id, SLOT_INDEX_FROM_INFO(state, slot_info),
+                       state->remote->slot_queue_recycle);
 
                /*
                 * A write barrier is necessary, but remote_event_signal
@@ -1298,11 +1289,11 @@ notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue,
 {
        int status = 0;
 
-       vchiq_log_trace(service->state->dev, VCHIQ_CORE,
-                       "%d: nb:%d %cx - p=%x rn=%x r=%x",
-                       service->state->id, service->localport,
-                       (queue == &service->bulk_tx) ? 't' : 'r',
-                       queue->process, queue->remote_notify, queue->remove);
+       dev_dbg(service->state->dev,
+               "core: %d: nb:%d %cx - p=%x rn=%x r=%x\n",
+               service->state->id, service->localport,
+               (queue == &service->bulk_tx) ? 't' : 'r',
+               queue->process, queue->remote_notify, queue->remove);
 
        queue->remote_notify = queue->process;
 
@@ -1382,8 +1373,8 @@ poll_services_of_group(struct vchiq_state *state, int group)
 
                service_flags = atomic_xchg(&service->poll_flags, 0);
                if (service_flags & BIT(VCHIQ_POLL_REMOVE)) {
-                       vchiq_log_debug(state->dev, VCHIQ_CORE, "%d: ps - remove %d<->%d",
-                                       state->id, service->localport, service->remoteport);
+                       dev_dbg(state->dev, "core: %d: ps - remove %d<->%d\n",
+                               state->id, service->localport, service->remoteport);
 
                        /*
                         * Make it look like a client, because
@@ -1395,8 +1386,8 @@ poll_services_of_group(struct vchiq_state *state, int group)
                        if (vchiq_close_service_internal(service, NO_CLOSE_RECVD))
                                request_poll(state, service, VCHIQ_POLL_REMOVE);
                } else if (service_flags & BIT(VCHIQ_POLL_TERMINATE)) {
-                       vchiq_log_debug(state->dev, VCHIQ_CORE, "%d: ps - terminate %d<->%d",
-                                       state->id, service->localport, service->remoteport);
+                       dev_dbg(state->dev, "core: %d: ps - terminate %d<->%d\n",
+                               state->id, service->localport, service->remoteport);
                        if (vchiq_close_service_internal(service, NO_CLOSE_RECVD))
                                request_poll(state, service, VCHIQ_POLL_TERMINATE);
                }
@@ -1425,11 +1416,11 @@ abort_outstanding_bulks(struct vchiq_service *service,
 {
        int is_tx = (queue == &service->bulk_tx);
 
-       vchiq_log_trace(service->state->dev, VCHIQ_CORE,
-                       "%d: aob:%d %cx - li=%x ri=%x p=%x",
-                       service->state->id, service->localport,
-                       is_tx ? 't' : 'r', queue->local_insert,
-                       queue->remote_insert, queue->process);
+       dev_dbg(service->state->dev,
+               "core: %d: aob:%d %cx - li=%x ri=%x p=%x\n",
+               service->state->id, service->localport,
+               is_tx ? 't' : 'r', queue->local_insert,
+               queue->remote_insert, queue->process);
 
        WARN_ON((int)(queue->local_insert - queue->process) < 0);
        WARN_ON((int)(queue->remote_insert - queue->process) < 0);
@@ -1448,11 +1439,11 @@ abort_outstanding_bulks(struct vchiq_service *service,
                if (queue->process != queue->local_insert) {
                        vchiq_complete_bulk(service->instance, bulk);
 
-                       vchiq_log_debug(service->state->dev, VCHIQ_CORE_MSG,
-                                       "%s %p4cc d:%d ABORTED - tx len:%d, rx len:%d",
-                                       is_tx ? "Send Bulk to" : "Recv Bulk from",
-                                       &service->base.fourcc,
-                                       service->remoteport, bulk->size, bulk->remote_size);
+                       dev_dbg(service->state->dev,
+                               "core_msg: %s %p4cc d:%d ABORTED - tx len:%d, rx len:%d\n",
+                               is_tx ? "Send Bulk to" : "Recv Bulk from",
+                               &service->base.fourcc,
+                               service->remoteport, bulk->size, bulk->remote_size);
                } else {
                        /* fabricate a matching dummy bulk */
                        bulk->data = 0;
@@ -1485,8 +1476,8 @@ parse_open(struct vchiq_state *state, struct vchiq_header *header)
 
        payload = (struct vchiq_open_payload *)header->data;
        fourcc = payload->fourcc;
-       vchiq_log_debug(state->dev, VCHIQ_CORE, "%d: prs OPEN@%pK (%d->'%p4cc')",
-                       state->id, header, localport, &fourcc);
+       dev_dbg(state->dev, "core: %d: prs OPEN@%pK (%d->'%p4cc')\n",
+               state->id, header, localport, &fourcc);
 
        service = get_listening_service(state, fourcc);
        if (!service)
@@ -1609,17 +1600,17 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
                                vchiq_service_put(service);
                        service = get_connected_service(state, remoteport);
                        if (service)
-                               vchiq_log_warning(state->dev, VCHIQ_CORE,
-                                                 "%d: prs %s@%pK (%d->%d) - found connected service %d",
-                                                 state->id, msg_type_str(type), header,
-                                                 remoteport, localport, service->localport);
+                               dev_warn(state->dev,
+                                        "core: %d: prs %s@%pK (%d->%d) - found connected service %d\n",
+                                        state->id, msg_type_str(type), header,
+                                        remoteport, localport, service->localport);
                }
 
                if (!service) {
-                       vchiq_log_error(state->dev, VCHIQ_CORE,
-                                       "%d: prs %s@%pK (%d->%d) - invalid/closed service %d",
-                                       state->id, msg_type_str(type), header, remoteport,
-                                       localport, localport);
+                       dev_err(state->dev,
+                               "core: %d: prs %s@%pK (%d->%d) - invalid/closed service %d\n",
+                               state->id, msg_type_str(type), header, remoteport,
+                               localport, localport);
                        goto skip_message;
                }
                break;
@@ -1631,18 +1622,15 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
        svc_fourcc = service ? service->base.fourcc
                             : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
 
-       vchiq_log_debug(state->dev, VCHIQ_CORE_MSG,
-                       "Rcvd Msg %s(%u) from %p4cc s:%d d:%d len:%d",
-                       msg_type_str(type), type, &svc_fourcc,
-                       remoteport, localport, size);
+       dev_dbg(state->dev, "core_msg: Rcvd Msg %s(%u) from %p4cc s:%d d:%d len:%d\n",
+               msg_type_str(type), type, &svc_fourcc, remoteport, localport, size);
        if (size > 0)
                vchiq_log_dump_mem(state->dev, "Rcvd", 0, header->data, min(16, size));
 
        if (((unsigned long)header & VCHIQ_SLOT_MASK) +
            calc_stride(size) > VCHIQ_SLOT_SIZE) {
-               vchiq_log_error(state->dev, VCHIQ_CORE,
-                               "header %pK (msgid %x) - size %x too big for slot",
-                               header, (unsigned int)msgid, (unsigned int)size);
+               dev_err(state->dev, "core: header %pK (msgid %x) - size %x too big for slot\n",
+                       header, (unsigned int)msgid, (unsigned int)size);
                WARN(1, "oversized for slot\n");
        }
 
@@ -1659,37 +1647,36 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
                                header->data;
                        service->peer_version = payload->version;
                }
-               vchiq_log_debug(state->dev, VCHIQ_CORE,
-                               "%d: prs OPENACK@%pK,%x (%d->%d) v:%d",
-                               state->id, header, size, remoteport, localport,
-                               service->peer_version);
+               dev_dbg(state->dev,
+                       "core: %d: prs OPENACK@%pK,%x (%d->%d) v:%d\n",
+                       state->id, header, size, remoteport, localport,
+                       service->peer_version);
                if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
                        service->remoteport = remoteport;
                        set_service_state(service, VCHIQ_SRVSTATE_OPEN);
                        complete(&service->remove_event);
                } else {
-                       vchiq_log_error(state->dev, VCHIQ_CORE, "OPENACK received in state %s",
-                                       srvstate_names[service->srvstate]);
+                       dev_err(state->dev, "core: OPENACK received in state %s\n",
+                               srvstate_names[service->srvstate]);
                }
                break;
        case VCHIQ_MSG_CLOSE:
                WARN_ON(size); /* There should be no data */
 
-               vchiq_log_debug(state->dev, VCHIQ_CORE, "%d: prs CLOSE@%pK (%d->%d)",
-                               state->id, header, remoteport, localport);
+               dev_dbg(state->dev, "core: %d: prs CLOSE@%pK (%d->%d)\n",
+                       state->id, header, remoteport, localport);
 
                mark_service_closing_internal(service, 1);
 
                if (vchiq_close_service_internal(service, CLOSE_RECVD) == -EAGAIN)
                        goto bail_not_ready;
 
-               vchiq_log_debug(state->dev, VCHIQ_CORE, "Close Service %p4cc s:%u d:%d",
-                               &service->base.fourcc,
-                               service->localport, service->remoteport);
+               dev_dbg(state->dev, "core: Close Service %p4cc s:%u d:%d\n",
+                       &service->base.fourcc, service->localport, service->remoteport);
                break;
        case VCHIQ_MSG_DATA:
-               vchiq_log_debug(state->dev, VCHIQ_CORE, "%d: prs DATA@%pK,%x (%d->%d)",
-                               state->id, header, size, remoteport, localport);
+               dev_dbg(state->dev, "core: %d: prs DATA@%pK,%x (%d->%d)\n",
+                       state->id, header, size, remoteport, localport);
 
                if ((service->remoteport == remoteport) &&
                    (service->srvstate == VCHIQ_SRVSTATE_OPEN)) {
@@ -1708,8 +1695,8 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
                }
                break;
        case VCHIQ_MSG_CONNECT:
-               vchiq_log_debug(state->dev, VCHIQ_CORE, "%d: prs CONNECT@%pK",
-                               state->id, header);
+               dev_dbg(state->dev, "core: %d: prs CONNECT@%pK\n",
+                       state->id, header);
                state->version_common = ((struct vchiq_slot_zero *)
                                         state->slot_data)->version;
                complete(&state->connect);
@@ -1740,11 +1727,10 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
                        }
                        if ((int)(queue->remote_insert -
                                queue->local_insert) >= 0) {
-                               vchiq_log_error(state->dev, VCHIQ_CORE,
-                                               "%d: prs %s@%pK (%d->%d) unexpected (ri=%d,li=%d)",
-                                               state->id, msg_type_str(type), header, remoteport,
-                                               localport, queue->remote_insert,
-                                               queue->local_insert);
+                               dev_err(state->dev,
+                                       "core: %d: prs %s@%pK (%d->%d) unexpected (ri=%d,li=%d)\n",
+                                       state->id, msg_type_str(type), header, remoteport,
+                                       localport, queue->remote_insert, queue->local_insert);
                                mutex_unlock(&service->bulk_mutex);
                                break;
                        }
@@ -1761,15 +1747,14 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
                        bulk->actual = *(int *)header->data;
                        queue->remote_insert++;
 
-                       vchiq_log_debug(state->dev, VCHIQ_CORE,
-                                       "%d: prs %s@%pK (%d->%d) %x@%pad",
-                                       state->id, msg_type_str(type), header, remoteport,
-                                       localport, bulk->actual, &bulk->data);
+                       dev_dbg(state->dev, "core: %d: prs %s@%pK (%d->%d) %x@%pad\n",
+                               state->id, msg_type_str(type), header, remoteport,
+                               localport, bulk->actual, &bulk->data);
 
-                       vchiq_log_trace(state->dev, VCHIQ_CORE, "%d: prs:%d %cx li=%x ri=%x p=%x",
-                                       state->id, localport,
-                                       (type == VCHIQ_MSG_BULK_RX_DONE) ? 'r' : 't',
-                                       queue->local_insert, queue->remote_insert, queue->process);
+                       dev_dbg(state->dev, "core: %d: prs:%d %cx li=%x ri=%x p=%x\n",
+                               state->id, localport,
+                               (type == VCHIQ_MSG_BULK_RX_DONE) ? 'r' : 't',
+                               queue->local_insert, queue->remote_insert, queue->process);
 
                        DEBUG_TRACE(PARSE_LINE);
                        WARN_ON(queue->process == queue->local_insert);
@@ -1782,16 +1767,16 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
                }
                break;
        case VCHIQ_MSG_PADDING:
-               vchiq_log_trace(state->dev, VCHIQ_CORE, "%d: prs PADDING@%pK,%x",
-                               state->id, header, size);
+               dev_dbg(state->dev, "core: %d: prs PADDING@%pK,%x\n",
+                       state->id, header, size);
                break;
        case VCHIQ_MSG_PAUSE:
                /* If initiated, signal the application thread */
-               vchiq_log_trace(state->dev, VCHIQ_CORE, "%d: prs PAUSE@%pK,%x",
-                               state->id, header, size);
+               dev_dbg(state->dev, "core: %d: prs PAUSE@%pK,%x\n",
+                       state->id, header, size);
                if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
-                       vchiq_log_error(state->dev, VCHIQ_CORE,
-                                       "%d: PAUSE received in state PAUSED", state->id);
+                       dev_err(state->dev, "core: %d: PAUSE received in state PAUSED\n",
+                               state->id);
                        break;
                }
                if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
@@ -1804,8 +1789,8 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
                vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
                break;
        case VCHIQ_MSG_RESUME:
-               vchiq_log_trace(state->dev, VCHIQ_CORE, "%d: prs RESUME@%pK,%x",
-                               state->id, header, size);
+               dev_dbg(state->dev, "core: %d: prs RESUME@%pK,%x\n",
+                       state->id, header, size);
                /* Release the slot mutex */
                mutex_unlock(&state->slot_mutex);
                vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
@@ -1821,8 +1806,8 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
                break;
 
        default:
-               vchiq_log_error(state->dev, VCHIQ_CORE, "%d: prs invalid msgid %x@%pK,%x",
-                               state->id, msgid, header, size);
+               dev_err(state->dev, "core: %d: prs invalid msgid %x@%pK,%x\n",
+                       state->id, msgid, header, size);
                WARN(1, "invalid message\n");
                break;
        }
@@ -1932,7 +1917,7 @@ handle_poll(struct vchiq_state *state)
                         * since the PAUSE should have flushed
                         * through outstanding messages.
                         */
-                       vchiq_log_error(state->dev, VCHIQ_CORE, "Failed to send RESUME message");
+                       dev_err(state->dev, "core: Failed to send RESUME message\n");
                }
                break;
        default:
@@ -2032,21 +2017,18 @@ sync_func(void *v)
                service = find_service_by_port(state, localport);
 
                if (!service) {
-                       vchiq_log_error(state->dev, VCHIQ_SYNC,
-                                       "%d: sf %s@%pK (%d->%d) - invalid/closed service %d",
-                                       state->id, msg_type_str(type), header,
-                                       remoteport, localport, localport);
+                       dev_err(state->dev,
+                               "sync: %d: sf %s@%pK (%d->%d) - invalid/closed service %d\n",
+                               state->id, msg_type_str(type), header, remoteport,
+                               localport, localport);
                        release_message_sync(state, header);
                        continue;
                }
 
-               svc_fourcc = service ? service->base.fourcc
-                                    : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
+               svc_fourcc = service->base.fourcc;
 
-               vchiq_log_trace(state->dev, VCHIQ_SYNC,
-                               "Rcvd Msg %s from %p4cc s:%d d:%d len:%d",
-                               msg_type_str(type), &svc_fourcc,
-                               remoteport, localport, size);
+               dev_dbg(state->dev, "sync: Rcvd Msg %s from %p4cc s:%d d:%d len:%d\n",
+                       msg_type_str(type), &svc_fourcc, remoteport, localport, size);
                if (size > 0)
                        vchiq_log_dump_mem(state->dev, "Rcvd", 0, header->data, min(16, size));
 
@@ -2058,9 +2040,9 @@ sync_func(void *v)
                                        header->data;
                                service->peer_version = payload->version;
                        }
-                       vchiq_log_debug(state->dev, VCHIQ_SYNC, "%d: sf OPENACK@%pK,%x (%d->%d) v:%d",
-                                       state->id, header, size, remoteport, localport,
-                                       service->peer_version);
+                       dev_err(state->dev, "sync: %d: sf OPENACK@%pK,%x (%d->%d) v:%d\n",
+                               state->id, header, size, remoteport, localport,
+                               service->peer_version);
                        if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
                                service->remoteport = remoteport;
                                set_service_state(service, VCHIQ_SRVSTATE_OPENSYNC);
@@ -2071,22 +2053,22 @@ sync_func(void *v)
                        break;
 
                case VCHIQ_MSG_DATA:
-                       vchiq_log_trace(state->dev, VCHIQ_SYNC, "%d: sf DATA@%pK,%x (%d->%d)",
-                                       state->id, header, size, remoteport, localport);
+                       dev_dbg(state->dev, "sync: %d: sf DATA@%pK,%x (%d->%d)\n",
+                               state->id, header, size, remoteport, localport);
 
                        if ((service->remoteport == remoteport) &&
                            (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC)) {
                                if (make_service_callback(service, VCHIQ_MESSAGE_AVAILABLE, header,
                                                          NULL) == -EAGAIN)
-                                       vchiq_log_error(state->dev, VCHIQ_SYNC,
-                                                       "synchronous callback to service %d returns -EAGAIN",
-                                                       localport);
+                                       dev_err(state->dev,
+                                               "sync: error: synchronous callback to service %d returns -EAGAIN\n",
+                                               localport);
                        }
                        break;
 
                default:
-                       vchiq_log_error(state->dev, VCHIQ_SYNC, "%d: sf unexpected msgid %x@%pK,%x",
-                                       state->id, msgid, header, size);
+                       dev_err(state->dev, "sync: error: %d: sf unexpected msgid %x@%pK,%x\n",
+                               state->id, msgid, header, size);
                        release_message_sync(state, header);
                        break;
                }
@@ -2119,8 +2101,8 @@ vchiq_init_slots(struct device *dev, void *mem_base, int mem_size)
        num_slots -= first_data_slot;
 
        if (num_slots < 4) {
-               vchiq_log_error(dev, VCHIQ_CORE, "%s - insufficient memory %x bytes",
-                               __func__, mem_size);
+               dev_err(dev, "core: %s: Insufficient memory %x bytes\n",
+                       __func__, mem_size);
                return NULL;
        }
 
@@ -2462,9 +2444,9 @@ vchiq_add_service_internal(struct vchiq_state *state,
        /* Bring this service online */
        set_service_state(service, srvstate);
 
-       vchiq_log_debug(state->dev, VCHIQ_CORE_MSG, "%s Service %p4cc SrcPort:%d",
-                       (srvstate == VCHIQ_SRVSTATE_OPENING) ? "Open" : "Add",
-                       &params->fourcc, service->localport);
+       dev_dbg(state->dev, "core_msg: %s Service %p4cc SrcPort:%d\n",
+               (srvstate == VCHIQ_SRVSTATE_OPENING) ? "Open" : "Add",
+               &params->fourcc, service->localport);
 
        /* Don't unlock the service - leave it with a ref_count of 1. */
 
@@ -2501,11 +2483,10 @@ vchiq_open_service_internal(struct vchiq_service *service, int client_id)
        } else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
                   (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) {
                if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT)
-                       vchiq_log_error(service->state->dev, VCHIQ_CORE,
-                                       "%d: osi - srvstate = %s (ref %u)",
-                                       service->state->id,
-                                       srvstate_names[service->srvstate],
-                                       kref_read(&service->ref_count));
+                       dev_err(service->state->dev,
+                               "core: %d: osi - srvstate = %s (ref %u)\n",
+                               service->state->id, srvstate_names[service->srvstate],
+                               kref_read(&service->ref_count));
                status = -EINVAL;
                VCHIQ_SERVICE_STATS_INC(service, error_count);
                vchiq_release_service_internal(service);
@@ -2560,15 +2541,14 @@ release_service_messages(struct vchiq_service *service)
                        int port = VCHIQ_MSG_DSTPORT(msgid);
 
                        if ((port == service->localport) && (msgid & VCHIQ_MSGID_CLAIMED)) {
-                               vchiq_log_debug(state->dev, VCHIQ_CORE,
-                                               "  fsi - hdr %pK", header);
+                               dev_dbg(state->dev, "core:  fsi - hdr %pK\n", header);
                                release_slot(state, slot_info, header, NULL);
                        }
                        pos += calc_stride(header->size);
                        if (pos > VCHIQ_SLOT_SIZE) {
-                               vchiq_log_error(state->dev, VCHIQ_CORE,
-                                               "fsi - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
-                                               pos, header, msgid, header->msgid, header->size);
+                               dev_err(state->dev,
+                                       "core: fsi - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x\n",
+                                       pos, header, msgid, header->msgid, header->size);
                                WARN(1, "invalid slot position\n");
                        }
                }
@@ -2622,8 +2602,8 @@ close_service_complete(struct vchiq_service *service, int failstate)
        case VCHIQ_SRVSTATE_LISTENING:
                break;
        default:
-               vchiq_log_error(service->state->dev, VCHIQ_CORE, "%s(%x) called in state %s",
-                               __func__, service->handle, srvstate_names[service->srvstate]);
+               dev_err(service->state->dev, "core: (%x) called in state %s\n",
+                       service->handle, srvstate_names[service->srvstate]);
                WARN(1, "%s in unexpected state\n", __func__);
                return -EINVAL;
        }
@@ -2669,8 +2649,9 @@ vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
        int close_id = MAKE_CLOSE(service->localport,
                                  VCHIQ_MSG_DSTPORT(service->remoteport));
 
-       vchiq_log_debug(state->dev, VCHIQ_CORE, "%d: csi:%d,%d (%s)", service->state->id,
-                       service->localport, close_recvd, srvstate_names[service->srvstate]);
+       dev_dbg(state->dev, "core: %d: csi:%d,%d (%s)\n",
+               service->state->id, service->localport, close_recvd,
+               srvstate_names[service->srvstate]);
 
        switch (service->srvstate) {
        case VCHIQ_SRVSTATE_CLOSED:
@@ -2678,8 +2659,8 @@ vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
        case VCHIQ_SRVSTATE_LISTENING:
        case VCHIQ_SRVSTATE_CLOSEWAIT:
                if (close_recvd) {
-                       vchiq_log_error(state->dev, VCHIQ_CORE, "%s(1) called in state %s",
-                                       __func__, srvstate_names[service->srvstate]);
+                       dev_err(state->dev, "core: (1) called in state %s\n",
+                               srvstate_names[service->srvstate]);
                } else if (is_server) {
                        if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
                                status = -EINVAL;
@@ -2766,8 +2747,8 @@ vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
                break;
 
        default:
-               vchiq_log_error(state->dev, VCHIQ_CORE, "%s(%d) called in state %s", __func__,
-                               close_recvd, srvstate_names[service->srvstate]);
+               dev_err(state->dev, "core: (%d) called in state %s\n",
+                       close_recvd, srvstate_names[service->srvstate]);
                break;
        }
 
@@ -2780,8 +2761,8 @@ vchiq_terminate_service_internal(struct vchiq_service *service)
 {
        struct vchiq_state *state = service->state;
 
-       vchiq_log_debug(state->dev, VCHIQ_CORE, "%d: tsi - (%d<->%d)", state->id,
-                       service->localport, service->remoteport);
+       dev_dbg(state->dev, "core: %d: tsi - (%d<->%d)\n",
+               state->id, service->localport, service->remoteport);
 
        mark_service_closing(service);
 
@@ -2795,8 +2776,7 @@ vchiq_free_service_internal(struct vchiq_service *service)
 {
        struct vchiq_state *state = service->state;
 
-       vchiq_log_debug(state->dev, VCHIQ_CORE, "%d: fsi - (%d)",
-                       state->id, service->localport);
+       dev_dbg(state->dev, "core: %d: fsi - (%d)\n", state->id, service->localport);
 
        switch (service->srvstate) {
        case VCHIQ_SRVSTATE_OPENING:
@@ -2806,8 +2786,8 @@ vchiq_free_service_internal(struct vchiq_service *service)
        case VCHIQ_SRVSTATE_CLOSEWAIT:
                break;
        default:
-               vchiq_log_error(state->dev, VCHIQ_CORE, "%d: fsi - (%d) in state %s", state->id,
-                               service->localport, srvstate_names[service->srvstate]);
+               dev_err(state->dev, "core: %d: fsi - (%d) in state %s\n",
+                       state->id, service->localport, srvstate_names[service->srvstate]);
                return;
        }
 
@@ -2876,8 +2856,8 @@ vchiq_close_service(struct vchiq_instance *instance, unsigned int handle)
        if (!service)
                return -EINVAL;
 
-       vchiq_log_debug(service->state->dev, VCHIQ_CORE, "%d: close_service:%d",
-                       service->state->id, service->localport);
+       dev_dbg(service->state->dev, "core: %d: close_service:%d\n",
+               service->state->id, service->localport);
 
        if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
            (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
@@ -2907,10 +2887,10 @@ vchiq_close_service(struct vchiq_instance *instance, unsigned int handle)
                    (service->srvstate == VCHIQ_SRVSTATE_OPEN))
                        break;
 
-               vchiq_log_warning(service->state->dev, VCHIQ_CORE,
-                                 "%d: close_service:%d - waiting in state %s",
-                                 service->state->id, service->localport,
-                                 srvstate_names[service->srvstate]);
+               dev_warn(service->state->dev,
+                        "core: %d: close_service:%d - waiting in state %s\n",
+                        service->state->id, service->localport,
+                        srvstate_names[service->srvstate]);
        }
 
        if (!status &&
@@ -2934,8 +2914,8 @@ vchiq_remove_service(struct vchiq_instance *instance, unsigned int handle)
        if (!service)
                return -EINVAL;
 
-       vchiq_log_debug(service->state->dev, VCHIQ_CORE, "%d: remove_service:%d",
-                       service->state->id, service->localport);
+       dev_dbg(service->state->dev, "core: %d: remove_service:%d\n",
+               service->state->id, service->localport);
 
        if (service->srvstate == VCHIQ_SRVSTATE_FREE) {
                vchiq_service_put(service);
@@ -2968,10 +2948,10 @@ vchiq_remove_service(struct vchiq_instance *instance, unsigned int handle)
                    (service->srvstate == VCHIQ_SRVSTATE_OPEN))
                        break;
 
-               vchiq_log_warning(service->state->dev, VCHIQ_CORE,
-                                 "%d: remove_service:%d - waiting in state %s",
-                                 service->state->id, service->localport,
-                                 srvstate_names[service->srvstate]);
+               dev_warn(service->state->dev,
+                        "core: %d: remove_service:%d - waiting in state %s\n",
+                        service->state->id, service->localport,
+                        srvstate_names[service->srvstate]);
        }
 
        if (!status && (service->srvstate != VCHIQ_SRVSTATE_FREE))
@@ -3078,9 +3058,9 @@ int vchiq_bulk_transfer(struct vchiq_instance *instance, unsigned int handle,
         */
        wmb();
 
-       vchiq_log_debug(state->dev, VCHIQ_CORE, "%d: bt (%d->%d) %cx %x@%pad %pK",
-                       state->id, service->localport, service->remoteport,
-                       dir_char, size, &bulk->data, userdata);
+       dev_dbg(state->dev, "core: %d: bt (%d->%d) %cx %x@%pad %pK\n",
+               state->id, service->localport, service->remoteport,
+               dir_char, size, &bulk->data, userdata);
 
        /*
         * The slot mutex must be held when the service is being closed, so
@@ -3115,9 +3095,9 @@ int vchiq_bulk_transfer(struct vchiq_instance *instance, unsigned int handle,
        mutex_unlock(&state->slot_mutex);
        mutex_unlock(&service->bulk_mutex);
 
-       vchiq_log_trace(state->dev, VCHIQ_CORE, "%d: bt:%d %cx li=%x ri=%x p=%x",
-                       state->id, service->localport, dir_char, queue->local_insert,
-                       queue->remote_insert, queue->process);
+       dev_dbg(state->dev, "core: %d: bt:%d %cx li=%x ri=%x p=%x\n",
+               state->id, service->localport, dir_char, queue->local_insert,
+               queue->remote_insert, queue->process);
 
 waiting:
        vchiq_service_put(service);
@@ -3372,8 +3352,8 @@ vchiq_set_service_option(struct vchiq_instance *instance, unsigned int handle,
        return ret;
 }
 
-static int
-vchiq_dump_shared_state(void *dump_context, struct vchiq_state *state,
+static void
+vchiq_dump_shared_state(struct seq_file *f, struct vchiq_state *state,
                        struct vchiq_shared_state *shared, const char *label)
 {
        static const char *const debug_names[] = {
@@ -3390,146 +3370,44 @@ vchiq_dump_shared_state(void *dump_context, struct vchiq_state *state,
                "COMPLETION_QUEUE_FULL_COUNT"
        };
        int i;
-       char buf[80];
-       int len;
-       int err;
-
-       len = scnprintf(buf, sizeof(buf), "  %s: slots %d-%d tx_pos=%x recycle=%x",
-                       label, shared->slot_first, shared->slot_last,
-                       shared->tx_pos, shared->slot_queue_recycle);
-       err = vchiq_dump(dump_context, buf, len + 1);
-       if (err)
-               return err;
-
-       len = scnprintf(buf, sizeof(buf), "    Slots claimed:");
-       err = vchiq_dump(dump_context, buf, len + 1);
-       if (err)
-               return err;
+
+       seq_printf(f, "  %s: slots %d-%d tx_pos=%x recycle=%x\n",
+                  label, shared->slot_first, shared->slot_last,
+                  shared->tx_pos, shared->slot_queue_recycle);
+
+       seq_puts(f, "    Slots claimed:\n");
 
        for (i = shared->slot_first; i <= shared->slot_last; i++) {
                struct vchiq_slot_info slot_info =
                                                *SLOT_INFO_FROM_INDEX(state, i);
                if (slot_info.use_count != slot_info.release_count) {
-                       len = scnprintf(buf, sizeof(buf), "      %d: %d/%d", i, slot_info.use_count,
-                                       slot_info.release_count);
-                       err = vchiq_dump(dump_context, buf, len + 1);
-                       if (err)
-                               return err;
+                       seq_printf(f, "      %d: %d/%d\n", i, slot_info.use_count,
+                                  slot_info.release_count);
                }
        }
 
        for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
-               len = scnprintf(buf, sizeof(buf), "    DEBUG: %s = %d(%x)",
-                               debug_names[i], shared->debug[i], shared->debug[i]);
-               err = vchiq_dump(dump_context, buf, len + 1);
-               if (err)
-                       return err;
-       }
-       return 0;
-}
-
-int vchiq_dump_state(void *dump_context, struct vchiq_state *state)
-{
-       char buf[80];
-       int len;
-       int i;
-       int err;
-
-       len = scnprintf(buf, sizeof(buf), "State %d: %s", state->id,
-                       conn_state_names[state->conn_state]);
-       err = vchiq_dump(dump_context, buf, len + 1);
-       if (err)
-               return err;
-
-       len = scnprintf(buf, sizeof(buf), "  tx_pos=%x(@%pK), rx_pos=%x(@%pK)",
-                       state->local->tx_pos,
-                       state->tx_data + (state->local_tx_pos & VCHIQ_SLOT_MASK),
-                       state->rx_pos,
-                       state->rx_data + (state->rx_pos & VCHIQ_SLOT_MASK));
-       err = vchiq_dump(dump_context, buf, len + 1);
-       if (err)
-               return err;
-
-       len = scnprintf(buf, sizeof(buf), "  Version: %d (min %d)",
-                       VCHIQ_VERSION, VCHIQ_VERSION_MIN);
-       err = vchiq_dump(dump_context, buf, len + 1);
-       if (err)
-               return err;
-
-       if (VCHIQ_ENABLE_STATS) {
-               len = scnprintf(buf, sizeof(buf),
-                               "  Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, error_count=%d",
-                               state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
-                               state->stats.error_count);
-               err = vchiq_dump(dump_context, buf, len + 1);
-               if (err)
-                       return err;
-       }
-
-       len = scnprintf(buf, sizeof(buf),
-                       "  Slots: %d available (%d data), %d recyclable, %d stalls (%d data)",
-                       ((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
-                       state->local_tx_pos) / VCHIQ_SLOT_SIZE,
-                       state->data_quota - state->data_use_count,
-                       state->local->slot_queue_recycle - state->slot_queue_available,
-                       state->stats.slot_stalls, state->stats.data_stalls);
-       err = vchiq_dump(dump_context, buf, len + 1);
-       if (err)
-               return err;
-
-       err = vchiq_dump_platform_state(dump_context);
-       if (err)
-               return err;
-
-       err = vchiq_dump_shared_state(dump_context,
-                                     state,
-                                     state->local,
-                                     "Local");
-       if (err)
-               return err;
-       err = vchiq_dump_shared_state(dump_context,
-                                     state,
-                                     state->remote,
-                                     "Remote");
-       if (err)
-               return err;
-
-       err = vchiq_dump_platform_instances(dump_context);
-       if (err)
-               return err;
-
-       for (i = 0; i < state->unused_service; i++) {
-               struct vchiq_service *service = find_service_by_port(state, i);
-
-               if (service) {
-                       err = vchiq_dump_service_state(dump_context, service);
-                       vchiq_service_put(service);
-                       if (err)
-                               return err;
-               }
+               seq_printf(f, "    DEBUG: %s = %d(%x)\n",
+                          debug_names[i], shared->debug[i], shared->debug[i]);
        }
-       return 0;
 }
 
-int vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
+static void
+vchiq_dump_service_state(struct seq_file *f, struct vchiq_service *service)
 {
-       char buf[80];
-       int len;
-       int err;
        unsigned int ref_count;
 
        /*Don't include the lock just taken*/
        ref_count = kref_read(&service->ref_count) - 1;
-       len = scnprintf(buf, sizeof(buf), "Service %u: %s (ref %u)",
-                       service->localport, srvstate_names[service->srvstate],
-                       ref_count);
+       seq_printf(f, "Service %u: %s (ref %u)", service->localport,
+                  srvstate_names[service->srvstate], ref_count);
 
        if (service->srvstate != VCHIQ_SRVSTATE_FREE) {
                char remoteport[30];
                struct vchiq_service_quota *quota =
                        &service->state->service_quotas[service->localport];
                int fourcc = service->base.fourcc;
-               int tx_pending, rx_pending;
+               int tx_pending, rx_pending, tx_size = 0, rx_size = 0;
 
                if (service->remoteport != VCHIQ_PORT_FREE) {
                        int len2 = scnprintf(remoteport, sizeof(remoteport),
@@ -3542,68 +3420,100 @@ int vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
                        strscpy(remoteport, "n/a", sizeof(remoteport));
                }
 
-               len += scnprintf(buf + len, sizeof(buf) - len,
-                                " '%p4cc' remote %s (msg use %d/%d, slot use %d/%d)",
-                                &fourcc, remoteport,
-                                quota->message_use_count, quota->message_quota,
-                                quota->slot_use_count, quota->slot_quota);
-
-               err = vchiq_dump(dump_context, buf, len + 1);
-               if (err)
-                       return err;
+               seq_printf(f, " '%p4cc' remote %s (msg use %d/%d, slot use %d/%d)\n",
+                          &fourcc, remoteport,
+                          quota->message_use_count, quota->message_quota,
+                          quota->slot_use_count, quota->slot_quota);
 
                tx_pending = service->bulk_tx.local_insert -
                        service->bulk_tx.remote_insert;
+               if (tx_pending) {
+                       unsigned int i = BULK_INDEX(service->bulk_tx.remove);
+
+                       tx_size = service->bulk_tx.bulks[i].size;
+               }
 
                rx_pending = service->bulk_rx.local_insert -
                        service->bulk_rx.remote_insert;
+               if (rx_pending) {
+                       unsigned int i = BULK_INDEX(service->bulk_rx.remove);
 
-               len = scnprintf(buf, sizeof(buf),
-                               "  Bulk: tx_pending=%d (size %d), rx_pending=%d (size %d)",
-                               tx_pending,
-                               tx_pending ?
-                               service->bulk_tx.bulks[BULK_INDEX(service->bulk_tx.remove)].size :
-                               0, rx_pending, rx_pending ?
-                               service->bulk_rx.bulks[BULK_INDEX(service->bulk_rx.remove)].size :
-                               0);
+                       rx_size = service->bulk_rx.bulks[i].size;
+               }
+
+               seq_printf(f, "  Bulk: tx_pending=%d (size %d), rx_pending=%d (size %d)\n",
+                          tx_pending, tx_size, rx_pending, rx_size);
 
                if (VCHIQ_ENABLE_STATS) {
-                       err = vchiq_dump(dump_context, buf, len + 1);
-                       if (err)
-                               return err;
+                       seq_printf(f, "  Ctrl: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu\n",
+                                  service->stats.ctrl_tx_count,
+                                  service->stats.ctrl_tx_bytes,
+                                  service->stats.ctrl_rx_count,
+                                  service->stats.ctrl_rx_bytes);
+
+                       seq_printf(f, "  Bulk: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu\n",
+                                  service->stats.bulk_tx_count,
+                                  service->stats.bulk_tx_bytes,
+                                  service->stats.bulk_rx_count,
+                                  service->stats.bulk_rx_bytes);
+
+                       seq_printf(f, "  %d quota stalls, %d slot stalls, %d bulk stalls, %d aborted, %d errors\n",
+                                  service->stats.quota_stalls,
+                                  service->stats.slot_stalls,
+                                  service->stats.bulk_stalls,
+                                  service->stats.bulk_aborted_count,
+                                  service->stats.error_count);
+               }
+       }
 
-                       len = scnprintf(buf, sizeof(buf),
-                                       "  Ctrl: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu",
-                                       service->stats.ctrl_tx_count, service->stats.ctrl_tx_bytes,
-                                       service->stats.ctrl_rx_count, service->stats.ctrl_rx_bytes);
-                       err = vchiq_dump(dump_context, buf, len + 1);
-                       if (err)
-                               return err;
+       vchiq_dump_platform_service_state(f, service);
+}
 
-                       len = scnprintf(buf, sizeof(buf),
-                                       "  Bulk: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu",
-                                       service->stats.bulk_tx_count, service->stats.bulk_tx_bytes,
-                                       service->stats.bulk_rx_count, service->stats.bulk_rx_bytes);
-                       err = vchiq_dump(dump_context, buf, len + 1);
-                       if (err)
-                               return err;
+void vchiq_dump_state(struct seq_file *f, struct vchiq_state *state)
+{
+       int i;
 
-                       len = scnprintf(buf, sizeof(buf),
-                                       "  %d quota stalls, %d slot stalls, %d bulk stalls, %d aborted, %d errors",
-                                       service->stats.quota_stalls, service->stats.slot_stalls,
-                                       service->stats.bulk_stalls,
-                                       service->stats.bulk_aborted_count,
-                                       service->stats.error_count);
-               }
+       seq_printf(f, "State %d: %s\n", state->id,
+                  conn_state_names[state->conn_state]);
+
+       seq_printf(f, "  tx_pos=%x(@%pK), rx_pos=%x(@%pK)\n",
+                  state->local->tx_pos,
+                  state->tx_data + (state->local_tx_pos & VCHIQ_SLOT_MASK),
+                  state->rx_pos,
+                  state->rx_data + (state->rx_pos & VCHIQ_SLOT_MASK));
+
+       seq_printf(f, "  Version: %d (min %d)\n", VCHIQ_VERSION,
+                  VCHIQ_VERSION_MIN);
+
+       if (VCHIQ_ENABLE_STATS) {
+               seq_printf(f, "  Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, error_count=%d\n",
+                          state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
+                          state->stats.error_count);
        }
 
-       err = vchiq_dump(dump_context, buf, len + 1);
-       if (err)
-               return err;
+       seq_printf(f, "  Slots: %d available (%d data), %d recyclable, %d stalls (%d data)\n",
+                  ((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
+                  state->local_tx_pos) / VCHIQ_SLOT_SIZE,
+                  state->data_quota - state->data_use_count,
+                  state->local->slot_queue_recycle - state->slot_queue_available,
+                  state->stats.slot_stalls, state->stats.data_stalls);
+
+       vchiq_dump_platform_state(f);
+
+       vchiq_dump_shared_state(f, state, state->local, "Local");
+
+       vchiq_dump_shared_state(f, state, state->remote, "Remote");
+
+       vchiq_dump_platform_instances(f);
 
-       if (service->srvstate != VCHIQ_SRVSTATE_FREE)
-               err = vchiq_dump_platform_service_state(dump_context, service);
-       return err;
+       for (i = 0; i < state->unused_service; i++) {
+               struct vchiq_service *service = find_service_by_port(state, i);
+
+               if (service) {
+                       vchiq_dump_service_state(f, service);
+                       vchiq_service_put(service);
+               }
+       }
 }
 
 int vchiq_send_remote_use(struct vchiq_state *state)
@@ -3653,9 +3563,9 @@ void vchiq_log_dump_mem(struct device *dev, const char *label, u32 addr,
                *s++ = '\0';
 
                if (label && (*label != '\0'))
-                       vchiq_log_trace(dev, VCHIQ_CORE, "%s: %08x: %s", label, addr, line_buf);
+                       dev_dbg(dev, "core: %s: %08x: %s\n", label, addr, line_buf);
                else
-                       vchiq_log_trace(dev, VCHIQ_CORE, "%s: %08x: %s", label, addr, line_buf);
+                       dev_dbg(dev, "core: %s: %08x: %s\n", label, addr, line_buf);
 
                addr += 16;
                mem += 16;
index 161358db457cd79f29dbfef814e3ab3a4bbf5e5e..c8527551b58cdb7108581debd5dce229a5ecb4be 100644 (file)
@@ -6,6 +6,7 @@
 
 #include <linux/mutex.h>
 #include <linux/completion.h>
+#include <linux/debugfs.h>
 #include <linux/dev_printk.h>
 #include <linux/kthread.h>
 #include <linux/kref.h>
 #define VCHIQ_SLOT_SIZE     4096
 #define VCHIQ_MAX_MSG_SIZE  (VCHIQ_SLOT_SIZE - sizeof(struct vchiq_header))
 
-enum vchiq_log_category {
-       VCHIQ_ARM,
-       VCHIQ_CORE,
-       VCHIQ_CORE_MSG,
-       VCHIQ_SYNC,
-       VCHIQ_SUSPEND,
-};
-
-static inline const char *log_category_str(enum vchiq_log_category c)
-{
-       static const char * const strings[] = {
-               "vchiq_arm",
-               "vchiq_core",
-               "vchiq_core_msg",
-               "vchiq_sync",
-               "vchiq_suspend",
-       };
-
-       return strings[c];
-};
-
-#ifndef vchiq_log_error
-#define vchiq_log_error(dev, cat, fmt, ...) \
-       do { dev_dbg(dev, "%s error: " fmt, log_category_str(cat), ##__VA_ARGS__); } while (0)
-#endif
-#ifndef vchiq_log_warning
-#define vchiq_log_warning(dev, cat, fmt, ...) \
-       do { dev_dbg(dev, "%s warning: " fmt, log_category_str(cat), ##__VA_ARGS__); } while (0)
-#endif
-#ifndef vchiq_log_debug
-#define vchiq_log_debug(dev, cat, fmt, ...) \
-       do { dev_dbg(dev, "%s debug: " fmt, log_category_str(cat), ##__VA_ARGS__); } while (0)
-#endif
-#ifndef vchiq_log_trace
-#define vchiq_log_trace(dev, cat, fmt, ...) \
-       do { dev_dbg(dev, "%s trace: " fmt, log_category_str(cat), ##__VA_ARGS__); } while (0)
-#endif
-
 #define VCHIQ_SLOT_MASK        (VCHIQ_SLOT_SIZE - 1)
 #define VCHIQ_SLOT_QUEUE_MASK  (VCHIQ_MAX_SLOTS_PER_SIDE - 1)
 #define VCHIQ_SLOT_ZERO_SLOTS  DIV_ROUND_UP(sizeof(struct vchiq_slot_zero), \
@@ -504,11 +467,8 @@ vchiq_bulk_transfer(struct vchiq_instance *instance, unsigned int handle, void *
                    void __user *uoffset, int size, void *userdata, enum vchiq_bulk_mode mode,
                    enum vchiq_bulk_dir dir);
 
-extern int
-vchiq_dump_state(void *dump_context, struct vchiq_state *state);
-
-extern int
-vchiq_dump_service_state(void *dump_context, struct vchiq_service *service);
+extern void
+vchiq_dump_state(struct seq_file *f, struct vchiq_state *state);
 
 extern void
 vchiq_loud_error_header(void);
@@ -564,13 +524,11 @@ void vchiq_complete_bulk(struct vchiq_instance *instance, struct vchiq_bulk *bul
 
 void remote_event_signal(struct remote_event *event);
 
-int vchiq_dump(void *dump_context, const char *str, int len);
-
-int vchiq_dump_platform_state(void *dump_context);
+void vchiq_dump_platform_state(struct seq_file *f);
 
-int vchiq_dump_platform_instances(void *dump_context);
+void vchiq_dump_platform_instances(struct seq_file *f);
 
-int vchiq_dump_platform_service_state(void *dump_context, struct vchiq_service *service);
+void vchiq_dump_platform_service_state(struct seq_file *f, struct vchiq_service *service);
 
 int vchiq_use_service_internal(struct vchiq_service *service);
 
index 58db78a9c8d4c979d5abbae28e2c55ec321a783f..d833e4e2973a7f0b1c276136ddbcaf9dd7f98a85 100644 (file)
@@ -40,6 +40,13 @@ static int debugfs_trace_show(struct seq_file *f, void *offset)
        return 0;
 }
 
+static int vchiq_dump_show(struct seq_file *f, void *offset)
+{
+       vchiq_dump_state(f, &g_state);
+       return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(vchiq_dump);
+
 static int debugfs_trace_open(struct inode *inode, struct file *file)
 {
        return single_open(file, debugfs_trace_show, inode->i_private);
@@ -115,6 +122,9 @@ void vchiq_debugfs_init(void)
 {
        vchiq_dbg_dir = debugfs_create_dir("vchiq", NULL);
        vchiq_dbg_clients = debugfs_create_dir("clients", vchiq_dbg_dir);
+
+       debugfs_create_file("state", S_IFREG | 0444, vchiq_dbg_dir, NULL,
+                           &vchiq_dump_fops);
 }
 
 /* remove all the debugfs entries */
index 0bc93f48c14cba8ab6a8c7322bb05339cf678fc2..4d9deeeb637aacede3d72c920a93ae10efb3cf11 100644 (file)
@@ -47,9 +47,8 @@ user_service_free(void *userdata)
 
 static void close_delivered(struct user_service *user_service)
 {
-       vchiq_log_debug(user_service->service->state->dev, VCHIQ_ARM,
-                       "%s(handle=%x)",
-                       __func__, user_service->service->handle);
+       dev_dbg(user_service->service->state->dev,
+               "arm: (handle=%x)\n", user_service->service->handle);
 
        if (user_service->close_pending) {
                /* Allow the underlying service to be culled */
@@ -235,8 +234,7 @@ static int vchiq_ioc_dequeue_message(struct vchiq_instance *instance,
                        spin_unlock(&msg_queue_spinlock);
                        DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
                        if (wait_for_completion_interruptible(&user_service->insert_event)) {
-                               vchiq_log_debug(service->state->dev, VCHIQ_ARM,
-                                               "DEQUEUE_MESSAGE interrupted");
+                               dev_dbg(service->state->dev, "arm: DEQUEUE_MESSAGE interrupted\n");
                                ret = -EINTR;
                                break;
                        }
@@ -271,9 +269,9 @@ static int vchiq_ioc_dequeue_message(struct vchiq_instance *instance,
                        ret = -EFAULT;
                }
        } else {
-               vchiq_log_error(service->state->dev, VCHIQ_ARM,
-                               "header %pK: bufsize %x < size %x",
-                               header, args->bufsize, header->size);
+               dev_err(service->state->dev,
+                       "arm: header %pK: bufsize %x < size %x\n",
+                       header, args->bufsize, header->size);
                WARN(1, "invalid size\n");
                ret = -EMSGSIZE;
        }
@@ -318,13 +316,13 @@ static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
                }
                mutex_unlock(&instance->bulk_waiter_list_mutex);
                if (!waiter) {
-                       vchiq_log_error(service->state->dev, VCHIQ_ARM,
-                                       "no bulk_waiter found for pid %d", current->pid);
+                       dev_err(service->state->dev,
+                               "arm: no bulk_waiter found for pid %d\n", current->pid);
                        ret = -ESRCH;
                        goto out;
                }
-               vchiq_log_debug(service->state->dev, VCHIQ_ARM,
-                               "found bulk_waiter %pK for pid %d", waiter, current->pid);
+               dev_dbg(service->state->dev, "arm: found bulk_waiter %pK for pid %d\n",
+                       waiter, current->pid);
                userdata = &waiter->bulk_waiter;
        } else {
                userdata = args->userdata;
@@ -355,8 +353,8 @@ static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
                mutex_lock(&instance->bulk_waiter_list_mutex);
                list_add(&waiter->list, &instance->bulk_waiter_list);
                mutex_unlock(&instance->bulk_waiter_list_mutex);
-               vchiq_log_debug(service->state->dev, VCHIQ_ARM,
-                               "saved bulk_waiter %pK for pid %d", waiter, current->pid);
+               dev_dbg(service->state->dev, "arm: saved bulk_waiter %pK for pid %d\n",
+                       waiter, current->pid);
 
                ret = put_user(mode_waiting, mode);
        }
@@ -455,8 +453,7 @@ static int vchiq_ioc_await_completion(struct vchiq_instance *instance,
                mutex_lock(&instance->completion_mutex);
                if (rc) {
                        DEBUG_TRACE(AWAIT_COMPLETION_LINE);
-                       vchiq_log_debug(instance->state->dev, VCHIQ_ARM,
-                                       "AWAIT_COMPLETION interrupted");
+                       dev_dbg(instance->state->dev, "arm: AWAIT_COMPLETION interrupted\n");
                        ret = -EINTR;
                        goto out;
                }
@@ -501,10 +498,10 @@ static int vchiq_ioc_await_completion(struct vchiq_instance *instance,
                        msglen = header->size + sizeof(struct vchiq_header);
                        /* This must be a VCHIQ-style service */
                        if (args->msgbufsize < msglen) {
-                               vchiq_log_error(service->state->dev, VCHIQ_ARM,
-                                               "header %pK: msgbufsize %x < msglen %x",
-                                               header, args->msgbufsize, msglen);
-                                               WARN(1, "invalid message size\n");
+                               dev_err(service->state->dev,
+                                       "arm: header %pK: msgbufsize %x < msglen %x\n",
+                                       header, args->msgbufsize, msglen);
+                               WARN(1, "invalid message size\n");
                                if (ret == 0)
                                        ret = -EMSGSIZE;
                                break;
@@ -582,10 +579,9 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        long ret = 0;
        int i, rc;
 
-       vchiq_log_trace(instance->state->dev, VCHIQ_ARM,
-                       "%s - instance %pK, cmd %s, arg %lx", __func__, instance,
-                       ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) && (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
-                       ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
+       dev_dbg(instance->state->dev, "arm: instance %pK, cmd %s, arg %lx\n", instance,
+               ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) && (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
+               ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
 
        switch (cmd) {
        case VCHIQ_IOC_SHUTDOWN:
@@ -618,9 +614,9 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                }
                rc = mutex_lock_killable(&instance->state->mutex);
                if (rc) {
-                       vchiq_log_error(instance->state->dev, VCHIQ_ARM,
-                                       "vchiq: connect: could not lock mutex for state %d: %d",
-                                       instance->state->id, rc);
+                       dev_err(instance->state->dev,
+                               "arm: vchiq: connect: could not lock mutex for state %d: %d\n",
+                               instance->state->id, rc);
                        ret = -EINTR;
                        break;
                }
@@ -630,8 +626,8 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                if (!status)
                        instance->connected = 1;
                else
-                       vchiq_log_error(instance->state->dev, VCHIQ_ARM,
-                                       "vchiq: could not connect: %d", status);
+                       dev_err(instance->state->dev,
+                               "arm: vchiq: could not connect: %d\n", status);
                break;
 
        case VCHIQ_IOC_CREATE_SERVICE: {
@@ -700,13 +696,13 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                                vchiq_use_service_internal(service) :
                                vchiq_release_service_internal(service);
                        if (ret) {
-                               vchiq_log_error(instance->state->dev, VCHIQ_SUSPEND,
-                                               "%s: cmd %s returned error %ld for service %p4cc:%03d",
-                                               __func__, (cmd == VCHIQ_IOC_USE_SERVICE) ?
-                                               "VCHIQ_IOC_USE_SERVICE" :
-                                               "VCHIQ_IOC_RELEASE_SERVICE",
-                                               ret, &service->base.fourcc,
-                                               service->client_id);
+                               dev_err(instance->state->dev,
+                                       "suspend: cmd %s returned error %ld for service %p4cc:%03d\n",
+                                       (cmd == VCHIQ_IOC_USE_SERVICE) ?
+                                       "VCHIQ_IOC_USE_SERVICE" :
+                                       "VCHIQ_IOC_RELEASE_SERVICE",
+                                       ret, &service->base.fourcc,
+                                       service->client_id);
                        }
                } else {
                        ret = -EINVAL;
@@ -868,15 +864,15 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        }
 
        if (!status && (ret < 0) && (ret != -EINTR) && (ret != -EWOULDBLOCK)) {
-               vchiq_log_debug(instance->state->dev, VCHIQ_ARM,
-                               "  ioctl instance %pK, cmd %s -> status %d, %ld",
-                               instance, (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
-                               ioctl_names[_IOC_NR(cmd)] : "<invalid>", status, ret);
+               dev_dbg(instance->state->dev,
+                       "arm: ioctl instance %pK, cmd %s -> status %d, %ld\n",
+                       instance, (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
+                       ioctl_names[_IOC_NR(cmd)] : "<invalid>", status, ret);
        } else {
-               vchiq_log_trace(instance->state->dev, VCHIQ_ARM,
-                               "  ioctl instance %pK, cmd %s -> status %d, %ld",
-                               instance, (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
-                               ioctl_names[_IOC_NR(cmd)] : "<invalid>", status, ret);
+               dev_dbg(instance->state->dev,
+                       "arm: ioctl instance %pK, cmd %s -> status %d\n, %ld\n",
+                       instance, (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
+                       ioctl_names[_IOC_NR(cmd)] : "<invalid>", status, ret);
        }
 
        return ret;
@@ -1170,11 +1166,10 @@ static int vchiq_open(struct inode *inode, struct file *file)
        struct vchiq_state *state = vchiq_get_state();
        struct vchiq_instance *instance;
 
-       vchiq_log_debug(state->dev, VCHIQ_ARM, "vchiq_open");
+       dev_dbg(state->dev, "arm: vchiq open\n");
 
        if (!state) {
-               vchiq_log_error(state->dev, VCHIQ_ARM,
-                               "vchiq has no connection to VideoCore");
+               dev_err(state->dev, "arm: vchiq has no connection to VideoCore\n");
                return -ENOTCONN;
        }
 
@@ -1206,8 +1201,7 @@ static int vchiq_release(struct inode *inode, struct file *file)
        int ret = 0;
        int i;
 
-       vchiq_log_debug(state->dev, VCHIQ_ARM, "%s: instance=%lx", __func__,
-                       (unsigned long)instance);
+       dev_dbg(state->dev, "arm: instance=%p\n", instance);
 
        if (!state) {
                ret = -EPERM;
@@ -1306,26 +1300,6 @@ out:
        return ret;
 }
 
-static ssize_t
-vchiq_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
-{
-       struct dump_context context;
-       int err;
-
-       context.buf = buf;
-       context.actual = 0;
-       context.space = count;
-       context.offset = *ppos;
-
-       err = vchiq_dump_state(&context, &g_state);
-       if (err)
-               return err;
-
-       *ppos += context.actual;
-
-       return context.actual;
-}
-
 static const struct file_operations
 vchiq_fops = {
        .owner = THIS_MODULE,
@@ -1335,7 +1309,6 @@ vchiq_fops = {
 #endif
        .open = vchiq_open,
        .release = vchiq_release,
-       .read = vchiq_read
 };
 
 static struct miscdevice vchiq_miscdev = {
index d65cc5510649e96354ca307fdac22f854a7650d9..8e5df6ce36e8f9182627d16cf1e9671aac061524 100644 (file)
@@ -3,18 +3,32 @@ menuconfig VME_BUS
        bool "VME bridge support"
        depends on PCI
        help
-         If you say Y here you get support for the VME bridge Framework.
+         Enable support for VME (VersaModular Eurocard bus) bridge modules.
+         The bridge allows connecting VME devices to systems with existing
+         interfaces (like USB or PCI) by means of translating VME protocol
+         operations.
+
+         Note that this only enables the bridge framework. You'll also
+         likely want to enable driver for specific bridge device you have
+         to actually use it. If unsure, say N.
 
 if VME_BUS
 
 comment "VME Bridge Drivers"
 
 config VME_TSI148
-       tristate "Tempe"
+       tristate "Tundra TSI148 VME bridge support"
        depends on HAS_DMA
        help
-        If you say Y here you get support for the Tundra TSI148 VME bridge
-        chip.
+        If you say Y here you get support for the Tundra TSI148 VME-to-PCI/X
+        bridge chip (and pin-compatible clones).
+
+        TSI148 is a high-performant, 2eSST and VME64-compliant VME-to-PCI/X
+        interconnect bridge with support for PCI and PCI-X bus interface.
+        It is primarily used in industrial and embedded systems.
+
+        To compile this driver as a module, say M - the module will be
+        called vme_tsi148. If unsure, say N.
 
 config VME_FAKE
        tristate "Fake"
index 5c416c31ec5766fc5d836364446e9671f380b7a8..e9461a7a7ab8ba40ed8978134928a5d669f44f5c 100644 (file)
@@ -340,8 +340,8 @@ int vme_slave_set(struct vme_resource *resource, int enabled,
        image = list_entry(resource->entry, struct vme_slave_resource, list);
 
        if (!bridge->slave_set) {
-               dev_err(bridge->parent, "Function not supported\n");
-               return -ENOSYS;
+               dev_err(bridge->parent, "%s not supported\n", __func__);
+               return -EINVAL;
        }
 
        if (!(((image->address_attr & aspace) == aspace) &&
index 350ab8f3778a4699708b2b2925ec7f5da2e584a9..36183f2a64c11af1470df0b376bd4b3d4443b3d6 100644 (file)
  *      CARDvUpdateBasicTopRate - Update BasicTopRate
  *      CARDbAddBasicRate - Add to BasicRateSet
  *      CARDbIsOFDMinBasicRate - Check if any OFDM rate is in BasicRateSet
- *      CARDqGetTSFOffset - Calculate TSFOffset
+ *      card_get_tsf_offset - Calculate TSFOffset
  *      vt6655_get_current_tsf - Read Current NIC TSF counter
- *      CARDqGetNextTBTT - Calculate Next Beacon TSF counter
+ *      card_get_next_tbtt - Calculate Next Beacon TSF counter
  *      CARDvSetFirstNextTBTT - Set NIC Beacon time
  *      CARDvUpdateNextTBTT - Sync. NIC Beacon time
- *      CARDbRadioPowerOff - Turn Off NIC Radio Power
+ *      card_radio_power_off - Turn Off NIC Radio Power
  *
  * Revision History:
  *      06-10-2003 Bryan YC Fan:  Re-write codes to support VT3253 spec.
@@ -280,7 +280,7 @@ bool card_set_phy_parameter(struct vnt_private *priv, u8 bb_type)
  *  In:
  *      priv            - The adapter to be sync.
  *      rx_rate         - data rate of receive beacon
- *      qwBSSTimestamp  - Rx BCN's TSF
+ *      bss_timestamp   - Rx BCN's TSF
  *      qwLocalTSF      - Local TSF
  *  Out:
  *      none
@@ -288,20 +288,20 @@ bool card_set_phy_parameter(struct vnt_private *priv, u8 bb_type)
  * Return Value: none
  */
 bool card_update_tsf(struct vnt_private *priv, unsigned char rx_rate,
-                   u64 qwBSSTimestamp)
+                   u64 bss_timestamp)
 {
        u64 local_tsf;
-       u64 qwTSFOffset = 0;
+       u64 tsf_offset = 0;
 
        local_tsf = vt6655_get_current_tsf(priv);
 
-       if (qwBSSTimestamp != local_tsf) {
-               qwTSFOffset = CARDqGetTSFOffset(rx_rate, qwBSSTimestamp,
+       if (bss_timestamp != local_tsf) {
+               tsf_offset = card_get_tsf_offset(rx_rate, bss_timestamp,
                                                local_tsf);
                /* adjust TSF, HW's TSF add TSF Offset reg */
-               qwTSFOffset =  le64_to_cpu(qwTSFOffset);
-               iowrite32((u32)qwTSFOffset, priv->port_offset + MAC_REG_TSFOFST);
-               iowrite32((u32)(qwTSFOffset >> 32), priv->port_offset + MAC_REG_TSFOFST + 4);
+               tsf_offset =  le64_to_cpu(tsf_offset);
+               iowrite32((u32)tsf_offset, priv->port_offset + MAC_REG_TSFOFST);
+               iowrite32((u32)(tsf_offset >> 32), priv->port_offset + MAC_REG_TSFOFST + 4);
                vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_TFTCTL, TFTCTL_TSFSYNCEN);
        }
        return true;
@@ -314,28 +314,28 @@ bool card_update_tsf(struct vnt_private *priv, unsigned char rx_rate,
  * Parameters:
  *  In:
  *      priv         - The adapter to be set.
- *      wBeaconInterval - Beacon Interval
+ *      beacon_interval - Beacon Interval
  *  Out:
  *      none
  *
  * Return Value: true if succeed; otherwise false
  */
-bool CARDbSetBeaconPeriod(struct vnt_private *priv,
-                         unsigned short wBeaconInterval)
+bool card_set_beacon_period(struct vnt_private *priv,
+                         unsigned short beacon_interval)
 {
-       u64 qwNextTBTT;
+       u64 next_tbtt;
 
-       qwNextTBTT = vt6655_get_current_tsf(priv); /* Get Local TSF counter */
+       next_tbtt = vt6655_get_current_tsf(priv); /* Get Local TSF counter */
 
-       qwNextTBTT = CARDqGetNextTBTT(qwNextTBTT, wBeaconInterval);
+       next_tbtt = card_get_next_tbtt(next_tbtt, beacon_interval);
 
        /* set HW beacon interval */
-       iowrite16(wBeaconInterval, priv->port_offset + MAC_REG_BI);
-       priv->wBeaconInterval = wBeaconInterval;
+       iowrite16(beacon_interval, priv->port_offset + MAC_REG_BI);
+       priv->beacon_interval = beacon_interval;
        /* Set NextTBTT */
-       qwNextTBTT =  le64_to_cpu(qwNextTBTT);
-       iowrite32((u32)qwNextTBTT, priv->port_offset + MAC_REG_NEXTTBTT);
-       iowrite32((u32)(qwNextTBTT >> 32), priv->port_offset + MAC_REG_NEXTTBTT + 4);
+       next_tbtt =  le64_to_cpu(next_tbtt);
+       iowrite32((u32)next_tbtt, priv->port_offset + MAC_REG_NEXTTBTT);
+       iowrite32((u32)(next_tbtt >> 32), priv->port_offset + MAC_REG_NEXTTBTT + 4);
        vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN);
 
        return true;
@@ -351,7 +351,7 @@ bool CARDbSetBeaconPeriod(struct vnt_private *priv,
  *      none
  *
  */
-void CARDbRadioPowerOff(struct vnt_private *priv)
+void card_radio_power_off(struct vnt_private *priv)
 {
        if (priv->radio_off)
                return;
@@ -382,29 +382,29 @@ void CARDbRadioPowerOff(struct vnt_private *priv)
        vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_GPIOCTL0, LED_ACTSET);  /* LED issue */
 }
 
-void CARDvSafeResetTx(struct vnt_private *priv)
+void card_safe_reset_tx(struct vnt_private *priv)
 {
        unsigned int uu;
-       struct vnt_tx_desc *pCurrTD;
+       struct vnt_tx_desc *curr_td;
 
        /* initialize TD index */
-       priv->apTailTD[0] = &priv->apTD0Rings[0];
+       priv->tail_td[0] = &priv->apTD0Rings[0];
        priv->apCurrTD[0] = &priv->apTD0Rings[0];
 
-       priv->apTailTD[1] = &priv->apTD1Rings[0];
+       priv->tail_td[1] = &priv->apTD1Rings[0];
        priv->apCurrTD[1] = &priv->apTD1Rings[0];
 
        for (uu = 0; uu < TYPE_MAXTD; uu++)
                priv->iTDUsed[uu] = 0;
 
        for (uu = 0; uu < priv->opts.tx_descs[0]; uu++) {
-               pCurrTD = &priv->apTD0Rings[uu];
-               pCurrTD->td0.owner = OWNED_BY_HOST;
+               curr_td = &priv->apTD0Rings[uu];
+               curr_td->td0.owner = OWNED_BY_HOST;
                /* init all Tx Packet pointer to NULL */
        }
        for (uu = 0; uu < priv->opts.tx_descs[1]; uu++) {
-               pCurrTD = &priv->apTD1Rings[uu];
-               pCurrTD->td0.owner = OWNED_BY_HOST;
+               curr_td = &priv->apTD1Rings[uu];
+               curr_td->td0.owner = OWNED_BY_HOST;
                /* init all Tx Packet pointer to NULL */
        }
 
@@ -708,7 +708,7 @@ unsigned char card_get_pkt_type(struct vnt_private *priv)
  *
  * Return Value: TSF Offset value
  */
-u64 CARDqGetTSFOffset(unsigned char rx_rate, u64 qwTSF1, u64 qwTSF2)
+u64 card_get_tsf_offset(unsigned char rx_rate, u64 qwTSF1, u64 qwTSF2)
 {
        unsigned short wRxBcnTSFOffst;
 
@@ -764,11 +764,11 @@ u64 vt6655_get_current_tsf(struct vnt_private *priv)
  *
  * Return Value: TSF value of next Beacon
  */
-u64 CARDqGetNextTBTT(u64 qwTSF, unsigned short wBeaconInterval)
+u64 card_get_next_tbtt(u64 qwTSF, unsigned short beacon_interval)
 {
        u32 beacon_int;
 
-       beacon_int = wBeaconInterval * 1024;
+       beacon_int = beacon_interval * 1024;
        if (beacon_int) {
                do_div(qwTSF, beacon_int);
                qwTSF += 1;
@@ -785,25 +785,25 @@ u64 CARDqGetNextTBTT(u64 qwTSF, unsigned short wBeaconInterval)
  * Parameters:
  *  In:
  *      iobase          - IO Base
- *      wBeaconInterval - Beacon Interval
+ *      beacon_interval - Beacon Interval
  *  Out:
  *      none
  *
  * Return Value: none
  */
 void CARDvSetFirstNextTBTT(struct vnt_private *priv,
-                          unsigned short wBeaconInterval)
+                          unsigned short beacon_interval)
 {
        void __iomem *iobase = priv->port_offset;
-       u64 qwNextTBTT;
+       u64 next_tbtt;
 
-       qwNextTBTT = vt6655_get_current_tsf(priv); /* Get Local TSF counter */
+       next_tbtt = vt6655_get_current_tsf(priv); /* Get Local TSF counter */
 
-       qwNextTBTT = CARDqGetNextTBTT(qwNextTBTT, wBeaconInterval);
+       next_tbtt = card_get_next_tbtt(next_tbtt, beacon_interval);
        /* Set NextTBTT */
-       qwNextTBTT =  le64_to_cpu(qwNextTBTT);
-       iowrite32((u32)qwNextTBTT, iobase + MAC_REG_NEXTTBTT);
-       iowrite32((u32)(qwNextTBTT >> 32), iobase + MAC_REG_NEXTTBTT + 4);
+       next_tbtt =  le64_to_cpu(next_tbtt);
+       iowrite32((u32)next_tbtt, iobase + MAC_REG_NEXTTBTT);
+       iowrite32((u32)(next_tbtt >> 32), iobase + MAC_REG_NEXTTBTT + 4);
        vt6655_mac_reg_bits_on(iobase, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN);
 }
 
@@ -815,18 +815,18 @@ void CARDvSetFirstNextTBTT(struct vnt_private *priv,
  *  In:
  *      priv         - The adapter to be set
  *      qwTSF           - Current TSF counter
- *      wBeaconInterval - Beacon Interval
+ *      beacon_interval - Beacon Interval
  *  Out:
  *      none
  *
  * Return Value: none
  */
 void CARDvUpdateNextTBTT(struct vnt_private *priv, u64 qwTSF,
-                        unsigned short wBeaconInterval)
+                        unsigned short beacon_interval)
 {
        void __iomem *iobase = priv->port_offset;
 
-       qwTSF = CARDqGetNextTBTT(qwTSF, wBeaconInterval);
+       qwTSF = card_get_next_tbtt(qwTSF, beacon_interval);
        /* Set NextTBTT */
        qwTSF =  le64_to_cpu(qwTSF);
        iowrite32((u32)qwTSF, iobase + MAC_REG_NEXTTBTT);
index 19689a291f5b45b430183110d5c85f4f373e758f..f52e42564e81afd236e53534fd557250bd439b10 100644 (file)
@@ -43,20 +43,20 @@ void card_set_rspinf(struct vnt_private *priv, u8 bb_type);
 void CARDvUpdateBasicTopRate(struct vnt_private *priv);
 bool CARDbIsOFDMinBasicRate(struct vnt_private *priv);
 void CARDvSetFirstNextTBTT(struct vnt_private *priv,
-                          unsigned short wBeaconInterval);
+                          unsigned short beacon_interval);
 void CARDvUpdateNextTBTT(struct vnt_private *priv, u64 qwTSF,
-                        unsigned short wBeaconInterval);
+                        unsigned short beacon_interval);
 u64 vt6655_get_current_tsf(struct vnt_private *priv);
-u64 CARDqGetNextTBTT(u64 qwTSF, unsigned short wBeaconInterval);
-u64 CARDqGetTSFOffset(unsigned char rx_rate, u64 qwTSF1, u64 qwTSF2);
+u64 card_get_next_tbtt(u64 qwTSF, unsigned short beacon_interval);
+u64 card_get_tsf_offset(unsigned char rx_rate, u64 qwTSF1, u64 qwTSF2);
 unsigned char card_get_pkt_type(struct vnt_private *priv);
-void CARDvSafeResetTx(struct vnt_private *priv);
+void card_safe_reset_tx(struct vnt_private *priv);
 void CARDvSafeResetRx(struct vnt_private *priv);
-void CARDbRadioPowerOff(struct vnt_private *priv);
+void card_radio_power_off(struct vnt_private *priv);
 bool card_set_phy_parameter(struct vnt_private *priv, u8 bb_type);
 bool card_update_tsf(struct vnt_private *priv, unsigned char rx_rate,
-                   u64 qwBSSTimestamp);
-bool CARDbSetBeaconPeriod(struct vnt_private *priv,
-                         unsigned short wBeaconInterval);
+                   u64 bss_timestamp);
+bool card_set_beacon_period(struct vnt_private *priv,
+                         unsigned short beacon_interval);
 
 #endif /* __CARD_H__ */
index d9ee0b7401fe063eacccda6b8c2c68503c027525..0212240ba23f9102421a9a6d16c1807e9781277f 100644 (file)
@@ -133,7 +133,7 @@ struct vnt_private {
        volatile int                iTDUsed[TYPE_MAXTD];
 
        struct vnt_tx_desc *apCurrTD[TYPE_MAXTD];
-       struct vnt_tx_desc *apTailTD[TYPE_MAXTD];
+       struct vnt_tx_desc *tail_td[TYPE_MAXTD];
 
        struct vnt_tx_desc *apTD0Rings;
        struct vnt_tx_desc *apTD1Rings;
@@ -281,7 +281,7 @@ struct vnt_private {
 
        unsigned char abyEEPROM[EEP_MAX_CONTEXT_SIZE]; /* unsigned long alignment */
 
-       unsigned short wBeaconInterval;
+       unsigned short beacon_interval;
        u16 wake_up_count;
 
        struct work_struct interrupt_work;
index 7d297526e653967a659b854cc73b110b185e44d5..b0b262de648014759e6425ab48d2c5b510229ef2 100644 (file)
@@ -454,7 +454,7 @@ static void device_init_registers(struct vnt_private *priv)
        }
 
        if (priv->hw_radio_off || priv->bRadioControlOff)
-               CARDbRadioPowerOff(priv);
+               card_radio_power_off(priv);
 
        /* get Permanent network address */
        SROMvReadEtherAddress(priv->port_offset, priv->abyCurrentNetAddr);
@@ -463,7 +463,7 @@ static void device_init_registers(struct vnt_private *priv)
        /* reset Tx pointer */
        CARDvSafeResetRx(priv);
        /* reset Rx pointer */
-       CARDvSafeResetTx(priv);
+       card_safe_reset_tx(priv);
 
        if (priv->local_id <= REV_ID_VT3253_A1)
                vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_RCR, RCR_WPAERR);
@@ -737,7 +737,7 @@ static int device_init_td0_ring(struct vnt_private *priv)
 
        if (i > 0)
                priv->apTD0Rings[i - 1].next_desc = cpu_to_le32(priv->td0_pool_dma);
-       priv->apTailTD[0] = priv->apCurrTD[0] = &priv->apTD0Rings[0];
+       priv->tail_td[0] = priv->apCurrTD[0] = &priv->apTD0Rings[0];
 
        return 0;
 
@@ -777,7 +777,7 @@ static int device_init_td1_ring(struct vnt_private *priv)
 
        if (i > 0)
                priv->apTD1Rings[i - 1].next_desc = cpu_to_le32(priv->td1_pool_dma);
-       priv->apTailTD[1] = priv->apCurrTD[1] = &priv->apTD1Rings[0];
+       priv->tail_td[1] = priv->apCurrTD[1] = &priv->apTD1Rings[0];
 
        return 0;
 
@@ -969,7 +969,7 @@ static int device_tx_srv(struct vnt_private *priv, unsigned int idx)
        unsigned char byTsr0;
        unsigned char byTsr1;
 
-       for (desc = priv->apTailTD[idx]; priv->iTDUsed[idx] > 0; desc = desc->next) {
+       for (desc = priv->tail_td[idx]; priv->iTDUsed[idx] > 0; desc = desc->next) {
                if (desc->td0.owner == OWNED_BY_NIC)
                        break;
                if (works++ > 15)
@@ -1007,7 +1007,7 @@ static int device_tx_srv(struct vnt_private *priv, unsigned int idx)
                }
        }
 
-       priv->apTailTD[idx] = desc;
+       priv->tail_td[idx] = desc;
 
        return works;
 }
@@ -1349,7 +1349,7 @@ static void vnt_stop(struct ieee80211_hw *hw)
 
        MACbShutdown(priv);
        MACbSoftwareReset(priv);
-       CARDbRadioPowerOff(priv);
+       card_radio_power_off(priv);
 
        device_free_td0_ring(priv);
        device_free_td1_ring(priv);
@@ -1537,7 +1537,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
                        card_update_tsf(priv, conf->beacon_rate->hw_value,
                                       conf->sync_tsf);
 
-                       CARDbSetBeaconPeriod(priv, conf->beacon_int);
+                       card_set_beacon_period(priv, conf->beacon_int);
 
                        CARDvSetFirstNextTBTT(priv, conf->beacon_int);
                } else {
@@ -1712,7 +1712,7 @@ static int vnt_init(struct vnt_private *priv)
 
        priv->mac_hw = true;
 
-       CARDbRadioPowerOff(priv);
+       card_radio_power_off(priv);
 
        return 0;
 }
index 522d34ca9b0ff54921521ace43cfd0d76c280410..5e5ed582c35e3918d3e066ff7676e7424cf63416 100644 (file)
@@ -1456,7 +1456,7 @@ int vnt_beacon_enable(struct vnt_private *priv, struct ieee80211_vif *vif,
 
        CARDvSetFirstNextTBTT(priv, conf->beacon_int);
 
-       CARDbSetBeaconPeriod(priv, conf->beacon_int);
+       card_set_beacon_period(priv, conf->beacon_int);
 
        return vnt_beacon_make(priv, vif);
 }
index 506193e870c49159b2a8ba8c5b07ec98e6084407..7a85e6477e4655e3cae5bab15aed90116dc31a94 100644 (file)
@@ -147,7 +147,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd)
        struct se_session *se_sess = se_cmd->se_sess;
        struct se_node_acl *nacl = se_sess->se_node_acl;
        struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
-       unsigned long flags;
 
        rcu_read_lock();
        deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
@@ -178,10 +177,6 @@ out_unlock:
        se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
        se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
 
-       spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
-       list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
-       spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
-
        return 0;
 }
 EXPORT_SYMBOL(transport_lookup_tmr_lun);
index 670cfb7bd426ac677d15e0a9ae2b63f5c716c27a..73d0d6133ac8f2860323a98662db7a21acfe2d6c 100644 (file)
@@ -3629,6 +3629,10 @@ int transport_generic_handle_tmr(
        unsigned long flags;
        bool aborted = false;
 
+       spin_lock_irqsave(&cmd->se_dev->se_tmr_lock, flags);
+       list_add_tail(&cmd->se_tmr_req->tmr_list, &cmd->se_dev->dev_tmr_list);
+       spin_unlock_irqrestore(&cmd->se_dev->se_tmr_lock, flags);
+
        spin_lock_irqsave(&cmd->t_state_lock, flags);
        if (cmd->transport_state & CMD_T_ABORTED) {
                aborted = true;
index 59883502eff48f03ec72e7b840f279f3f314d18f..17a8ae5e991d0521a240cca01bea491cb5368115 100644 (file)
@@ -33,6 +33,13 @@ config THERMAL_STATISTICS
 
          If in doubt, say N.
 
+config THERMAL_DEBUGFS
+       bool "Thermal subsystem debug support"
+       depends on DEBUG_FS
+       help
+         Say Y to allow the thermal subsystem to collect diagnostic
+         information that can be accessed via debugfs.
+
 config THERMAL_EMERGENCY_POWEROFF_DELAY_MS
        int "Emergency poweroff delay in milli-seconds"
        default 0
index a8318d6710367c789a802d50d8b533608dec7177..d77d7fe99a84aa3fb1f8059cd8c58f643741b1db 100644 (file)
@@ -10,6 +10,8 @@ thermal_sys-y                 += thermal_trip.o thermal_helpers.o
 # netlink interface to manage the thermal framework
 thermal_sys-$(CONFIG_THERMAL_NETLINK)          += thermal_netlink.o
 
+thermal_sys-$(CONFIG_THERMAL_DEBUGFS)  += thermal_debugfs.o
+
 # interface to/from other layers providing sensors
 thermal_sys-$(CONFIG_THERMAL_HWMON)            += thermal_hwmon.o
 thermal_sys-$(CONFIG_THERMAL_OF)               += thermal_of.o
index 7b6aa265ff6a0ace1fa1eab47ebaee9e3527feed..81e061f183ad128f0bb8d516a8339c5131bb729e 100644 (file)
@@ -762,7 +762,7 @@ static int power_allocator_throttle(struct thermal_zone_device *tz,
 
        trip = params->trip_switch_on;
        if (trip && tz->temperature < trip->temperature) {
-               update = tz->last_temperature >= trip->temperature;
+               update = tz->passive;
                tz->passive = 0;
                reset_pid_controller(params);
                allow_maximum_power(tz, update);
index 22445403b52006d727fe74993809d7587aab7fe0..3b04c6ec4fca0762c9508e7450fad4af989e71d6 100644 (file)
@@ -35,7 +35,9 @@
 #include <linux/processor.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
+#include <linux/suspend.h>
 #include <linux/string.h>
+#include <linux/syscore_ops.h>
 #include <linux/topology.h>
 #include <linux/workqueue.h>
 
@@ -571,6 +573,30 @@ static __init int hfi_parse_features(void)
        return 0;
 }
 
+static void hfi_do_enable(void)
+{
+       /* This code runs only on the boot CPU. */
+       struct hfi_cpu_info *info = &per_cpu(hfi_cpu_info, 0);
+       struct hfi_instance *hfi_instance = info->hfi_instance;
+
+       /* No locking needed. There is no concurrency with CPU online. */
+       hfi_set_hw_table(hfi_instance);
+       hfi_enable();
+}
+
+static int hfi_do_disable(void)
+{
+       /* No locking needed. There is no concurrency with CPU offline. */
+       hfi_disable();
+
+       return 0;
+}
+
+static struct syscore_ops hfi_pm_ops = {
+       .resume = hfi_do_enable,
+       .suspend = hfi_do_disable,
+};
+
 void __init intel_hfi_init(void)
 {
        struct hfi_instance *hfi_instance;
@@ -602,6 +628,8 @@ void __init intel_hfi_init(void)
        if (!hfi_updates_wq)
                goto err_nomem;
 
+       register_syscore_ops(&hfi_pm_ops);
+
        return;
 
 err_nomem:
index 5ac5cb60bae67b8caa54d47e0ebb740d6a4505ab..bc6eb0dd66a495f04ae5d0c398611895351c9b2f 100644 (file)
@@ -49,7 +49,6 @@
  */
 #define DEFAULT_DURATION_JIFFIES (6)
 
-static unsigned int target_mwait;
 static struct dentry *debug_dir;
 static bool poll_pkg_cstate_enable;
 
@@ -312,34 +311,6 @@ MODULE_PARM_DESC(window_size, "sliding window in number of clamping cycles\n"
        "\twindow size results in slower response time but more smooth\n"
        "\tclamping results. default to 2.");
 
-static void find_target_mwait(void)
-{
-       unsigned int eax, ebx, ecx, edx;
-       unsigned int highest_cstate = 0;
-       unsigned int highest_subcstate = 0;
-       int i;
-
-       if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
-               return;
-
-       cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
-
-       if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
-           !(ecx & CPUID5_ECX_INTERRUPT_BREAK))
-               return;
-
-       edx >>= MWAIT_SUBSTATE_SIZE;
-       for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
-               if (edx & MWAIT_SUBSTATE_MASK) {
-                       highest_cstate = i;
-                       highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
-               }
-       }
-       target_mwait = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
-               (highest_subcstate - 1);
-
-}
-
 struct pkg_cstate_info {
        bool skip;
        int msr_index;
@@ -759,9 +730,6 @@ static int __init powerclamp_probe(void)
                return -ENODEV;
        }
 
-       /* find the deepest mwait value */
-       find_target_mwait();
-
        return 0;
 }
 
index 99ca0c7bc41c790a50cd9b5e7f10344189f0eae6..0f475fe46bc9dc4db106abb4b2a52fe5286b78e9 100644 (file)
@@ -8,9 +8,10 @@
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/minmax.h>
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
 #include <linux/platform_device.h>
+#include <linux/property.h>
 #include <linux/thermal.h>
 #include <linux/units.h>
 #include "thermal_hwmon.h"
index fa88d870724166b9203fcb8c42ce92811423e1f4..dfaa6341694a089d2ff1a1c61ee7e12efdfcdbdd 100644 (file)
@@ -211,7 +211,7 @@ exit:
        mutex_unlock(&tz->lock);
        mutex_unlock(&thermal_governor_lock);
 
-       thermal_notify_tz_gov_change(tz->id, policy);
+       thermal_notify_tz_gov_change(tz, policy);
 
        return ret;
 }
@@ -381,9 +381,8 @@ static void handle_thermal_trip(struct thermal_zone_device *tz,
                 * the threshold and the trip temperature will be equal.
                 */
                if (tz->temperature >= trip->temperature) {
-                       thermal_notify_tz_trip_up(tz->id,
-                                                 thermal_zone_trip_id(tz, trip),
-                                                 tz->temperature);
+                       thermal_notify_tz_trip_up(tz, trip);
+                       thermal_debug_tz_trip_up(tz, trip);
                        trip->threshold = trip->temperature - trip->hysteresis;
                } else {
                        trip->threshold = trip->temperature;
@@ -400,9 +399,8 @@ static void handle_thermal_trip(struct thermal_zone_device *tz,
                 * the trip.
                 */
                if (tz->temperature < trip->temperature - trip->hysteresis) {
-                       thermal_notify_tz_trip_down(tz->id,
-                                                   thermal_zone_trip_id(tz, trip),
-                                                   tz->temperature);
+                       thermal_notify_tz_trip_down(tz, trip);
+                       thermal_debug_tz_trip_down(tz, trip);
                        trip->threshold = trip->temperature;
                } else {
                        trip->threshold = trip->temperature - trip->hysteresis;
@@ -434,6 +432,7 @@ static void update_temperature(struct thermal_zone_device *tz)
        trace_thermal_temperature(tz);
 
        thermal_genl_sampling_temp(tz->id, temp);
+       thermal_debug_update_temp(tz);
 }
 
 static void thermal_zone_device_check(struct work_struct *work)
@@ -505,9 +504,9 @@ static int thermal_zone_device_set_mode(struct thermal_zone_device *tz,
        mutex_unlock(&tz->lock);
 
        if (mode == THERMAL_DEVICE_ENABLED)
-               thermal_notify_tz_enable(tz->id);
+               thermal_notify_tz_enable(tz);
        else
-               thermal_notify_tz_disable(tz->id);
+               thermal_notify_tz_disable(tz);
 
        return ret;
 }
@@ -846,7 +845,7 @@ static void thermal_release(struct device *dev)
                            sizeof("cooling_device") - 1)) {
                cdev = to_cooling_device(dev);
                thermal_cooling_device_destroy_sysfs(cdev);
-               kfree(cdev->type);
+               kfree_const(cdev->type);
                ida_free(&thermal_cdev_ida, cdev->id);
                kfree(cdev);
        }
@@ -918,7 +917,7 @@ __thermal_cooling_device_register(struct device_node *np,
        cdev->id = ret;
        id = ret;
 
-       cdev->type = kstrdup(type ? type : "", GFP_KERNEL);
+       cdev->type = kstrdup_const(type ? type : "", GFP_KERNEL);
        if (!cdev->type) {
                ret = -ENOMEM;
                goto out_ida_remove;
@@ -964,12 +963,14 @@ __thermal_cooling_device_register(struct device_node *np,
 
        mutex_unlock(&thermal_list_lock);
 
+       thermal_debug_cdev_add(cdev);
+
        return cdev;
 
 out_cooling_dev:
        thermal_cooling_device_destroy_sysfs(cdev);
 out_cdev_type:
-       kfree(cdev->type);
+       kfree_const(cdev->type);
 out_ida_remove:
        ida_free(&thermal_cdev_ida, id);
 out_kfree_cdev:
@@ -1170,6 +1171,8 @@ void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev)
        if (!cdev)
                return;
 
+       thermal_debug_cdev_remove(cdev);
+
        mutex_lock(&thermal_list_lock);
 
        if (!thermal_cooling_device_present(cdev)) {
@@ -1411,7 +1414,9 @@ thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *t
        if (atomic_cmpxchg(&tz->need_update, 1, 0))
                thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
 
-       thermal_notify_tz_create(tz->id, tz->type);
+       thermal_notify_tz_create(tz);
+
+       thermal_debug_tz_add(tz);
 
        return tz;
 
@@ -1470,14 +1475,13 @@ EXPORT_SYMBOL_GPL(thermal_zone_device);
  */
 void thermal_zone_device_unregister(struct thermal_zone_device *tz)
 {
-       int tz_id;
        struct thermal_cooling_device *cdev;
        struct thermal_zone_device *pos = NULL;
 
        if (!tz)
                return;
 
-       tz_id = tz->id;
+       thermal_debug_tz_remove(tz);
 
        mutex_lock(&thermal_list_lock);
        list_for_each_entry(pos, &thermal_tz_list, node)
@@ -1514,7 +1518,7 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
 
        put_device(&tz->device);
 
-       thermal_notify_tz_delete(tz_id);
+       thermal_notify_tz_delete(tz);
 
        wait_for_completion(&tz->removal);
        kfree(tz);
@@ -1636,6 +1640,8 @@ static int __init thermal_init(void)
 {
        int result;
 
+       thermal_debug_init();
+
        result = thermal_netlink_init();
        if (result)
                goto error;
index 4e023d54fd27daaae93f869fefbb656609eef78f..e9c099ecdd0fb9af23e0cf2840cecaaf25c88138 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/thermal.h>
 
 #include "thermal_netlink.h"
+#include "thermal_debugfs.h"
 
 /* Default Thermal Governor */
 #if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE)
diff --git a/drivers/thermal/thermal_debugfs.c b/drivers/thermal/thermal_debugfs.c
new file mode 100644 (file)
index 0000000..c617e8b
--- /dev/null
@@ -0,0 +1,839 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2023 Linaro Limited
+ *
+ * Author: Daniel Lezcano <daniel.lezcano@linaro.org>
+ *
+ * Thermal subsystem debug support
+ */
+#include <linux/debugfs.h>
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/minmax.h>
+#include <linux/mutex.h>
+#include <linux/thermal.h>
+
+#include "thermal_core.h"
+
+static struct dentry *d_root;
+static struct dentry *d_cdev;
+static struct dentry *d_tz;
+
+/*
+ * Length of the string containing the thermal zone id or the cooling
+ * device id, including the ending nul character. We can reasonably
+ * assume there won't be more than 256 thermal zones as the maximum
+ * observed today is around 32.
+ */
+#define IDSLENGTH 4
+
+/*
+ * The cooling device transition list is stored in a hash table where
+ * the size is CDEVSTATS_HASH_SIZE. The majority of cooling devices
+ * have dozen of states but some can have much more, so a hash table
+ * is more adequate in this case, because the cost of browsing the entire
+ * list when storing the transitions may not be negligible.
+ */
+#define CDEVSTATS_HASH_SIZE 16
+
+/**
+ * struct cdev_debugfs - per cooling device statistics structure
+ * A cooling device can have a high number of states. Showing the
+ * transitions on a matrix based representation can be overkill given
+ * most of the transitions won't happen and we end up with a matrix
+ * filled with zero. Instead, we show the transitions which actually
+ * happened.
+ *
+ * Every transition updates the current_state and the timestamp. The
+ * transitions and the durations are stored in lists.
+ *
+ * @total: the number of transitions for this cooling device
+ * @current_state: the current cooling device state
+ * @timestamp: the state change timestamp
+ * @transitions: an array of lists containing the state transitions
+ * @durations: an array of lists containing the residencies of each state
+ */
+struct cdev_debugfs {
+       u32 total;
+       int current_state;
+       ktime_t timestamp;
+       struct list_head transitions[CDEVSTATS_HASH_SIZE];
+       struct list_head durations[CDEVSTATS_HASH_SIZE];
+};
+
+/**
+ * struct cdev_record - Common structure for cooling device entry
+ *
+ * The following common structure allows to store the information
+ * related to the transitions and to the state residencies. They are
+ * identified with a id which is associated to a value. It is used as
+ * nodes for the "transitions" and "durations" above.
+ *
+ * @node: node to insert the structure in a list
+ * @id: identifier of the value which can be a state or a transition
+ * @residency: a ktime_t representing a state residency duration
+ * @count: a number of occurrences
+ */
+struct cdev_record {
+       struct list_head node;
+       int id;
+       union {
+                ktime_t residency;
+                u64 count;
+        };
+};
+
+/**
+ * struct trip_stats - Thermal trip statistics
+ *
+ * The trip_stats structure has the relevant information to show the
+ * statistics related to temperature going above a trip point.
+ *
+ * @timestamp: the trip crossing timestamp
+ * @duration: total time when the zone temperature was above the trip point
+ * @count: the number of times the zone temperature was above the trip point
+ * @max: maximum recorded temperature above the trip point
+ * @min: minimum recorded temperature above the trip point
+ * @avg: average temperature above the trip point
+ */
+struct trip_stats {
+       ktime_t timestamp;
+       ktime_t duration;
+       int count;
+       int max;
+       int min;
+       int avg;
+};
+
+/**
+ * struct tz_episode - A mitigation episode information
+ *
+ * The tz_episode structure describes a mitigation episode. A
+ * mitigation episode begins the trip point with the lower temperature
+ * is crossed the way up and ends when it is crossed the way
+ * down. During this episode we can have multiple trip points crossed
+ * the way up and down if there are multiple trip described in the
+ * firmware after the lowest temperature trip point.
+ *
+ * @timestamp: first trip point crossed the way up
+ * @duration: total duration of the mitigation episode
+ * @node: a list element to be added to the list of tz events
+ * @trip_stats: per trip point statistics, flexible array
+ */
+struct tz_episode {
+       ktime_t timestamp;
+       ktime_t duration;
+       struct list_head node;
+       struct trip_stats trip_stats[];
+};
+
+/**
+ * struct tz_debugfs - Store all mitigation episodes for a thermal zone
+ *
+ * The tz_debugfs structure contains the list of the mitigation
+ * episodes and has to track which trip point has been crossed in
+ * order to handle correctly nested trip point mitigation episodes.
+ *
+ * We keep the history of the trip point crossed in an array and as we
+ * can go back and forth inside this history, eg. trip 0,1,2,1,2,1,0,
+ * we keep track of the current position in the history array.
+ *
+ * @tz_episodes: a list of thermal mitigation episodes
+ * @trips_crossed: an array of trip points crossed by id
+ * @nr_trips: the number of trip points currently being crossed
+ */
+struct tz_debugfs {
+       struct list_head tz_episodes;
+       int *trips_crossed;
+       int nr_trips;
+};
+
+/**
+ * struct thermal_debugfs - High level structure for a thermal object in debugfs
+ *
+ * The thermal_debugfs structure is the common structure used by the
+ * cooling device or the thermal zone to store the statistics.
+ *
+ * @d_top: top directory of the thermal object directory
+ * @lock: per object lock to protect the internals
+ *
+ * @cdev_dbg: a cooling device debug structure
+ * @tz_dbg: a thermal zone debug structure
+ */
+struct thermal_debugfs {
+       struct dentry *d_top;
+       struct mutex lock;
+       union {
+               struct cdev_debugfs cdev_dbg;
+               struct tz_debugfs tz_dbg;
+       };
+};
+
+void thermal_debug_init(void)
+{
+       d_root = debugfs_create_dir("thermal", NULL);
+       if (!d_root)
+               return;
+
+       d_cdev = debugfs_create_dir("cooling_devices", d_root);
+       if (!d_cdev)
+               return;
+
+       d_tz = debugfs_create_dir("thermal_zones", d_root);
+}
+
+static struct thermal_debugfs *thermal_debugfs_add_id(struct dentry *d, int id)
+{
+       struct thermal_debugfs *thermal_dbg;
+       char ids[IDSLENGTH];
+
+       thermal_dbg = kzalloc(sizeof(*thermal_dbg), GFP_KERNEL);
+       if (!thermal_dbg)
+               return NULL;
+
+       mutex_init(&thermal_dbg->lock);
+
+       snprintf(ids, IDSLENGTH, "%d", id);
+
+       thermal_dbg->d_top = debugfs_create_dir(ids, d);
+       if (!thermal_dbg->d_top) {
+               kfree(thermal_dbg);
+               return NULL;
+       }
+
+       return thermal_dbg;
+}
+
+static void thermal_debugfs_remove_id(struct thermal_debugfs *thermal_dbg)
+{
+       if (!thermal_dbg)
+               return;
+
+       debugfs_remove(thermal_dbg->d_top);
+
+       kfree(thermal_dbg);
+}
+
+static struct cdev_record *
+thermal_debugfs_cdev_record_alloc(struct thermal_debugfs *thermal_dbg,
+                                 struct list_head *lists, int id)
+{
+       struct cdev_record *cdev_record;
+
+       cdev_record = kzalloc(sizeof(*cdev_record), GFP_KERNEL);
+       if (!cdev_record)
+               return NULL;
+
+       cdev_record->id = id;
+       INIT_LIST_HEAD(&cdev_record->node);
+       list_add_tail(&cdev_record->node,
+                     &lists[cdev_record->id % CDEVSTATS_HASH_SIZE]);
+
+       return cdev_record;
+}
+
+static struct cdev_record *
+thermal_debugfs_cdev_record_find(struct thermal_debugfs *thermal_dbg,
+                                struct list_head *lists, int id)
+{
+       struct cdev_record *entry;
+
+       list_for_each_entry(entry, &lists[id % CDEVSTATS_HASH_SIZE], node)
+               if (entry->id == id)
+                       return entry;
+
+       return NULL;
+}
+
+static struct cdev_record *
+thermal_debugfs_cdev_record_get(struct thermal_debugfs *thermal_dbg,
+                               struct list_head *lists, int id)
+{
+       struct cdev_record *cdev_record;
+
+       cdev_record = thermal_debugfs_cdev_record_find(thermal_dbg, lists, id);
+       if (cdev_record)
+               return cdev_record;
+
+       return thermal_debugfs_cdev_record_alloc(thermal_dbg, lists, id);
+}
+
+static void thermal_debugfs_cdev_clear(struct cdev_debugfs *cdev_dbg)
+{
+       int i;
+       struct cdev_record *entry, *tmp;
+
+       for (i = 0; i < CDEVSTATS_HASH_SIZE; i++) {
+
+               list_for_each_entry_safe(entry, tmp,
+                                        &cdev_dbg->transitions[i], node) {
+                       list_del(&entry->node);
+                       kfree(entry);
+               }
+
+               list_for_each_entry_safe(entry, tmp,
+                                        &cdev_dbg->durations[i], node) {
+                       list_del(&entry->node);
+                       kfree(entry);
+               }
+       }
+
+       cdev_dbg->total = 0;
+}
+
+static void *cdev_seq_start(struct seq_file *s, loff_t *pos)
+{
+       struct thermal_debugfs *thermal_dbg = s->private;
+
+       mutex_lock(&thermal_dbg->lock);
+
+       return (*pos < CDEVSTATS_HASH_SIZE) ? pos : NULL;
+}
+
+static void *cdev_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+       (*pos)++;
+
+       return (*pos < CDEVSTATS_HASH_SIZE) ? pos : NULL;
+}
+
+static void cdev_seq_stop(struct seq_file *s, void *v)
+{
+       struct thermal_debugfs *thermal_dbg = s->private;
+
+       mutex_unlock(&thermal_dbg->lock);
+}
+
+static int cdev_tt_seq_show(struct seq_file *s, void *v)
+{
+       struct thermal_debugfs *thermal_dbg = s->private;
+       struct cdev_debugfs *cdev_dbg = &thermal_dbg->cdev_dbg;
+       struct list_head *transitions = cdev_dbg->transitions;
+       struct cdev_record *entry;
+       int i = *(loff_t *)v;
+
+       if (!i)
+               seq_puts(s, "Transition\tOccurences\n");
+
+       list_for_each_entry(entry, &transitions[i], node) {
+               /*
+                * Assuming maximum cdev states is 1024, the longer
+                * string for a transition would be "1024->1024\0"
+                */
+               char buffer[11];
+
+               snprintf(buffer, ARRAY_SIZE(buffer), "%d->%d",
+                        entry->id >> 16, entry->id & 0xFFFF);
+
+               seq_printf(s, "%-10s\t%-10llu\n", buffer, entry->count);
+       }
+
+       return 0;
+}
+
+static const struct seq_operations tt_sops = {
+       .start = cdev_seq_start,
+       .next = cdev_seq_next,
+       .stop = cdev_seq_stop,
+       .show = cdev_tt_seq_show,
+};
+
+DEFINE_SEQ_ATTRIBUTE(tt);
+
+static int cdev_dt_seq_show(struct seq_file *s, void *v)
+{
+       struct thermal_debugfs *thermal_dbg = s->private;
+       struct cdev_debugfs *cdev_dbg = &thermal_dbg->cdev_dbg;
+       struct list_head *durations = cdev_dbg->durations;
+       struct cdev_record *entry;
+       int i = *(loff_t *)v;
+
+       if (!i)
+               seq_puts(s, "State\tResidency\n");
+
+       list_for_each_entry(entry, &durations[i], node) {
+               s64 duration = ktime_to_ms(entry->residency);
+
+               if (entry->id == cdev_dbg->current_state)
+                       duration += ktime_ms_delta(ktime_get(),
+                                                  cdev_dbg->timestamp);
+
+               seq_printf(s, "%-5d\t%-10llu\n", entry->id, duration);
+       }
+
+       return 0;
+}
+
+static const struct seq_operations dt_sops = {
+       .start = cdev_seq_start,
+       .next = cdev_seq_next,
+       .stop = cdev_seq_stop,
+       .show = cdev_dt_seq_show,
+};
+
+DEFINE_SEQ_ATTRIBUTE(dt);
+
+static int cdev_clear_set(void *data, u64 val)
+{
+       struct thermal_debugfs *thermal_dbg = data;
+
+       if (!val)
+               return -EINVAL;
+
+       mutex_lock(&thermal_dbg->lock);
+
+       thermal_debugfs_cdev_clear(&thermal_dbg->cdev_dbg);
+
+       mutex_unlock(&thermal_dbg->lock);
+
+       return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(cdev_clear_fops, NULL, cdev_clear_set, "%llu\n");
+
+/**
+ * thermal_debug_cdev_state_update - Update a cooling device state change
+ *
+ * Computes a transition and the duration of the previous state residency.
+ *
+ * @cdev : a pointer to a cooling device
+ * @new_state: an integer corresponding to the new cooling device state
+ */
+void thermal_debug_cdev_state_update(const struct thermal_cooling_device *cdev,
+                                    int new_state)
+{
+       struct thermal_debugfs *thermal_dbg = cdev->debugfs;
+       struct cdev_debugfs *cdev_dbg;
+       struct cdev_record *cdev_record;
+       int transition, old_state;
+
+       if (!thermal_dbg || (thermal_dbg->cdev_dbg.current_state == new_state))
+               return;
+
+       mutex_lock(&thermal_dbg->lock);
+
+       cdev_dbg = &thermal_dbg->cdev_dbg;
+
+       old_state = cdev_dbg->current_state;
+
+       /*
+        * Get the old state information in the durations list. If
+        * this one does not exist, a new allocated one will be
+        * returned. Recompute the total duration in the old state and
+        * get a new timestamp for the new state.
+        */
+       cdev_record = thermal_debugfs_cdev_record_get(thermal_dbg,
+                                                     cdev_dbg->durations,
+                                                     old_state);
+       if (cdev_record) {
+               ktime_t now = ktime_get();
+               ktime_t delta = ktime_sub(now, cdev_dbg->timestamp);
+               cdev_record->residency = ktime_add(cdev_record->residency, delta);
+               cdev_dbg->timestamp = now;
+       }
+
+       cdev_dbg->current_state = new_state;
+       transition = (old_state << 16) | new_state;
+
+       /*
+        * Get the transition in the transitions list. If this one
+        * does not exist, a new allocated one will be returned.
+        * Increment the occurrence of this transition which is stored
+        * in the value field.
+        */
+       cdev_record = thermal_debugfs_cdev_record_get(thermal_dbg,
+                                                     cdev_dbg->transitions,
+                                                     transition);
+       if (cdev_record)
+               cdev_record->count++;
+
+       cdev_dbg->total++;
+
+       mutex_unlock(&thermal_dbg->lock);
+}
+
+/**
+ * thermal_debug_cdev_add - Add a cooling device debugfs entry
+ *
+ * Allocates a cooling device object for debug, initializes the
+ * statistics and create the entries in sysfs.
+ * @cdev: a pointer to a cooling device
+ */
+void thermal_debug_cdev_add(struct thermal_cooling_device *cdev)
+{
+       struct thermal_debugfs *thermal_dbg;
+       struct cdev_debugfs *cdev_dbg;
+       int i;
+
+       thermal_dbg = thermal_debugfs_add_id(d_cdev, cdev->id);
+       if (!thermal_dbg)
+               return;
+
+       cdev_dbg = &thermal_dbg->cdev_dbg;
+
+       for (i = 0; i < CDEVSTATS_HASH_SIZE; i++) {
+               INIT_LIST_HEAD(&cdev_dbg->transitions[i]);
+               INIT_LIST_HEAD(&cdev_dbg->durations[i]);
+       }
+
+       cdev_dbg->current_state = 0;
+       cdev_dbg->timestamp = ktime_get();
+
+       debugfs_create_file("trans_table", 0400, thermal_dbg->d_top,
+                           thermal_dbg, &tt_fops);
+
+       debugfs_create_file("time_in_state_ms", 0400, thermal_dbg->d_top,
+                           thermal_dbg, &dt_fops);
+
+       debugfs_create_file("clear", 0200, thermal_dbg->d_top,
+                           thermal_dbg, &cdev_clear_fops);
+
+       debugfs_create_u32("total_trans", 0400, thermal_dbg->d_top,
+                          &cdev_dbg->total);
+
+       cdev->debugfs = thermal_dbg;
+}
+
+/**
+ * thermal_debug_cdev_remove - Remove a cooling device debugfs entry
+ *
+ * Frees the statistics memory data and remove the debugfs entry
+ *
+ * @cdev: a pointer to a cooling device
+ */
+void thermal_debug_cdev_remove(struct thermal_cooling_device *cdev)
+{
+       struct thermal_debugfs *thermal_dbg = cdev->debugfs;
+
+       if (!thermal_dbg)
+               return;
+
+       mutex_lock(&thermal_dbg->lock);
+
+       thermal_debugfs_cdev_clear(&thermal_dbg->cdev_dbg);
+       cdev->debugfs = NULL;
+
+       mutex_unlock(&thermal_dbg->lock);
+
+       thermal_debugfs_remove_id(thermal_dbg);
+}
+
+static struct tz_episode *thermal_debugfs_tz_event_alloc(struct thermal_zone_device *tz,
+                                                       ktime_t now)
+{
+       struct tz_episode *tze;
+       int i;
+
+       tze = kzalloc(struct_size(tze, trip_stats, tz->num_trips), GFP_KERNEL);
+       if (!tze)
+               return NULL;
+
+       INIT_LIST_HEAD(&tze->node);
+       tze->timestamp = now;
+
+       for (i = 0; i < tz->num_trips; i++) {
+               tze->trip_stats[i].min = INT_MAX;
+               tze->trip_stats[i].max = INT_MIN;
+       }
+
+       return tze;
+}
+
+void thermal_debug_tz_trip_up(struct thermal_zone_device *tz,
+                             const struct thermal_trip *trip)
+{
+       struct tz_episode *tze;
+       struct tz_debugfs *tz_dbg;
+       struct thermal_debugfs *thermal_dbg = tz->debugfs;
+       int temperature = tz->temperature;
+       int trip_id = thermal_zone_trip_id(tz, trip);
+       ktime_t now = ktime_get();
+
+       if (!thermal_dbg)
+               return;
+
+       mutex_lock(&thermal_dbg->lock);
+
+       tz_dbg = &thermal_dbg->tz_dbg;
+
+       /*
+        * The mitigation is starting. A mitigation can contain
+        * several episodes where each of them is related to a
+        * temperature crossing a trip point. The episodes are
+        * nested. That means when the temperature is crossing the
+        * first trip point, the duration begins to be measured. If
+        * the temperature continues to increase and reaches the
+        * second trip point, the duration of the first trip must be
+        * also accumulated.
+        *
+        * eg.
+        *
+        * temp
+        *   ^
+        *   |             --------
+        * trip 2         /        \         ------
+        *   |           /|        |\      /|      |\
+        * trip 1       / |        | `----  |      | \
+        *   |         /| |        |        |      | |\
+        * trip 0     / | |        |        |      | | \
+        *   |       /| | |        |        |      | | |\
+        *   |      / | | |        |        |      | | | `--
+        *   |     /  | | |        |        |      | | |
+        *   |-----   | | |        |        |      | | |
+        *   |        | | |        |        |      | | |
+        *    --------|-|-|--------|--------|------|-|-|------------------> time
+        *            | | |<--t2-->|        |<-t2'>| | |
+        *            | |                            | |
+        *            | |<------------t1------------>| |
+        *            |                                |
+        *            |<-------------t0--------------->|
+        *
+        */
+       if (!tz_dbg->nr_trips) {
+               tze = thermal_debugfs_tz_event_alloc(tz, now);
+               if (!tze)
+                       goto unlock;
+
+               list_add(&tze->node, &tz_dbg->tz_episodes);
+       }
+
+       /*
+        * Each time a trip point is crossed the way up, the trip_id
+        * is stored in the trip_crossed array and the nr_trips is
+        * incremented. A nr_trips equal to zero means we are entering
+        * a mitigation episode.
+        *
+        * The trip ids may not be in the ascending order but the
+        * result in the array trips_crossed will be in the ascending
+        * temperature order. The function detecting when a trip point
+        * is crossed the way down will handle the very rare case when
+        * the trip points may have been reordered during this
+        * mitigation episode.
+        */
+       tz_dbg->trips_crossed[tz_dbg->nr_trips++] = trip_id;
+
+       tze = list_first_entry(&tz_dbg->tz_episodes, struct tz_episode, node);
+       tze->trip_stats[trip_id].timestamp = now;
+       tze->trip_stats[trip_id].max = max(tze->trip_stats[trip_id].max, temperature);
+       tze->trip_stats[trip_id].min = min(tze->trip_stats[trip_id].min, temperature);
+       tze->trip_stats[trip_id].avg = tze->trip_stats[trip_id].avg +
+               (temperature - tze->trip_stats[trip_id].avg) /
+               tze->trip_stats[trip_id].count;
+
+unlock:
+       mutex_unlock(&thermal_dbg->lock);
+}
+
+void thermal_debug_tz_trip_down(struct thermal_zone_device *tz,
+                               const struct thermal_trip *trip)
+{
+       struct thermal_debugfs *thermal_dbg = tz->debugfs;
+       struct tz_episode *tze;
+       struct tz_debugfs *tz_dbg;
+       ktime_t delta, now = ktime_get();
+       int trip_id = thermal_zone_trip_id(tz, trip);
+       int i;
+
+       if (!thermal_dbg)
+               return;
+
+       mutex_lock(&thermal_dbg->lock);
+
+       tz_dbg = &thermal_dbg->tz_dbg;
+
+       /*
+        * The temperature crosses the way down but there was not
+        * mitigation detected before. That may happen when the
+        * temperature is greater than a trip point when registering a
+        * thermal zone, which is a common use case as the kernel has
+        * no mitigation mechanism yet at boot time.
+        */
+       if (!tz_dbg->nr_trips)
+               goto out;
+
+       for (i = tz_dbg->nr_trips - 1; i >= 0; i--) {
+               if (tz_dbg->trips_crossed[i] == trip_id)
+                       break;
+       }
+
+       if (i < 0)
+               goto out;
+
+       tz_dbg->nr_trips--;
+
+       if (i < tz_dbg->nr_trips)
+               tz_dbg->trips_crossed[i] = tz_dbg->trips_crossed[tz_dbg->nr_trips];
+
+       tze = list_first_entry(&tz_dbg->tz_episodes, struct tz_episode, node);
+
+       delta = ktime_sub(now, tze->trip_stats[trip_id].timestamp);
+
+       tze->trip_stats[trip_id].duration =
+               ktime_add(delta, tze->trip_stats[trip_id].duration);
+
+       /*
+        * This event closes the mitigation as we are crossing the
+        * last trip point the way down.
+        */
+       if (!tz_dbg->nr_trips)
+               tze->duration = ktime_sub(now, tze->timestamp);
+
+out:
+       mutex_unlock(&thermal_dbg->lock);
+}
+
+void thermal_debug_update_temp(struct thermal_zone_device *tz)
+{
+       struct thermal_debugfs *thermal_dbg = tz->debugfs;
+       struct tz_episode *tze;
+       struct tz_debugfs *tz_dbg;
+       int trip_id, i;
+
+       if (!thermal_dbg)
+               return;
+
+       mutex_lock(&thermal_dbg->lock);
+
+       tz_dbg = &thermal_dbg->tz_dbg;
+
+       if (!tz_dbg->nr_trips)
+               goto out;
+
+       for (i = 0; i < tz_dbg->nr_trips; i++) {
+               trip_id = tz_dbg->trips_crossed[i];
+               tze = list_first_entry(&tz_dbg->tz_episodes, struct tz_episode, node);
+               tze->trip_stats[trip_id].count++;
+               tze->trip_stats[trip_id].max = max(tze->trip_stats[trip_id].max, tz->temperature);
+               tze->trip_stats[trip_id].min = min(tze->trip_stats[trip_id].min, tz->temperature);
+               tze->trip_stats[trip_id].avg = tze->trip_stats[trip_id].avg +
+                       (tz->temperature - tze->trip_stats[trip_id].avg) /
+                       tze->trip_stats[trip_id].count;
+       }
+out:
+       mutex_unlock(&thermal_dbg->lock);
+}
+
+static void *tze_seq_start(struct seq_file *s, loff_t *pos)
+{
+       struct thermal_zone_device *tz = s->private;
+       struct thermal_debugfs *thermal_dbg = tz->debugfs;
+       struct tz_debugfs *tz_dbg = &thermal_dbg->tz_dbg;
+
+       mutex_lock(&thermal_dbg->lock);
+
+       return seq_list_start(&tz_dbg->tz_episodes, *pos);
+}
+
+static void *tze_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+       struct thermal_zone_device *tz = s->private;
+       struct thermal_debugfs *thermal_dbg = tz->debugfs;
+       struct tz_debugfs *tz_dbg = &thermal_dbg->tz_dbg;
+
+       return seq_list_next(v, &tz_dbg->tz_episodes, pos);
+}
+
+static void tze_seq_stop(struct seq_file *s, void *v)
+{
+       struct thermal_zone_device *tz = s->private;
+       struct thermal_debugfs *thermal_dbg = tz->debugfs;
+
+       mutex_unlock(&thermal_dbg->lock);
+}
+
+static int tze_seq_show(struct seq_file *s, void *v)
+{
+       struct thermal_zone_device *tz = s->private;
+       struct thermal_trip *trip;
+       struct tz_episode *tze;
+       const char *type;
+       int trip_id;
+
+       tze = list_entry((struct list_head *)v, struct tz_episode, node);
+
+       seq_printf(s, ",-Mitigation at %lluus, duration=%llums\n",
+                  ktime_to_us(tze->timestamp),
+                  ktime_to_ms(tze->duration));
+
+       seq_printf(s, "| trip |     type | temp(°mC) | hyst(°mC) |  duration  |  avg(°mC) |  min(°mC) |  max(°mC) |\n");
+
+       for_each_trip(tz, trip) {
+               /*
+                * There is no possible mitigation happening at the
+                * critical trip point, so the stats will be always
+                * zero, skip this trip point
+                */
+               if (trip->type == THERMAL_TRIP_CRITICAL)
+                       continue;
+
+               if (trip->type == THERMAL_TRIP_PASSIVE)
+                       type = "passive";
+               else if (trip->type == THERMAL_TRIP_ACTIVE)
+                       type = "active";
+               else
+                       type = "hot";
+
+               trip_id = thermal_zone_trip_id(tz, trip);
+
+               seq_printf(s, "| %*d | %*s | %*d | %*d | %*lld | %*d | %*d | %*d |\n",
+                          4 , trip_id,
+                          8, type,
+                          9, trip->temperature,
+                          9, trip->hysteresis,
+                          10, ktime_to_ms(tze->trip_stats[trip_id].duration),
+                          9, tze->trip_stats[trip_id].avg,
+                          9, tze->trip_stats[trip_id].min,
+                          9, tze->trip_stats[trip_id].max);
+       }
+
+       return 0;
+}
+
+static const struct seq_operations tze_sops = {
+       .start = tze_seq_start,
+       .next = tze_seq_next,
+       .stop = tze_seq_stop,
+       .show = tze_seq_show,
+};
+
+DEFINE_SEQ_ATTRIBUTE(tze);
+
+void thermal_debug_tz_add(struct thermal_zone_device *tz)
+{
+       struct thermal_debugfs *thermal_dbg;
+       struct tz_debugfs *tz_dbg;
+
+       thermal_dbg = thermal_debugfs_add_id(d_tz, tz->id);
+       if (!thermal_dbg)
+               return;
+
+       tz_dbg = &thermal_dbg->tz_dbg;
+
+       tz_dbg->trips_crossed = kzalloc(sizeof(int) * tz->num_trips, GFP_KERNEL);
+       if (!tz_dbg->trips_crossed) {
+               thermal_debugfs_remove_id(thermal_dbg);
+               return;
+       }
+
+       INIT_LIST_HEAD(&tz_dbg->tz_episodes);
+
+       debugfs_create_file("mitigations", 0400, thermal_dbg->d_top, tz, &tze_fops);
+
+       tz->debugfs = thermal_dbg;
+}
+
+void thermal_debug_tz_remove(struct thermal_zone_device *tz)
+{
+       struct thermal_debugfs *thermal_dbg = tz->debugfs;
+
+       if (!thermal_dbg)
+               return;
+
+       mutex_lock(&thermal_dbg->lock);
+
+       tz->debugfs = NULL;
+
+       mutex_unlock(&thermal_dbg->lock);
+
+       thermal_debugfs_remove_id(thermal_dbg);
+}
diff --git a/drivers/thermal/thermal_debugfs.h b/drivers/thermal/thermal_debugfs.h
new file mode 100644 (file)
index 0000000..155b9af
--- /dev/null
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifdef CONFIG_THERMAL_DEBUGFS
+void thermal_debug_init(void);
+void thermal_debug_cdev_add(struct thermal_cooling_device *cdev);
+void thermal_debug_cdev_remove(struct thermal_cooling_device *cdev);
+void thermal_debug_cdev_state_update(const struct thermal_cooling_device *cdev, int state);
+void thermal_debug_tz_add(struct thermal_zone_device *tz);
+void thermal_debug_tz_remove(struct thermal_zone_device *tz);
+void thermal_debug_tz_trip_up(struct thermal_zone_device *tz,
+                             const struct thermal_trip *trip);
+void thermal_debug_tz_trip_down(struct thermal_zone_device *tz,
+                               const struct thermal_trip *trip);
+void thermal_debug_update_temp(struct thermal_zone_device *tz);
+#else
+static inline void thermal_debug_init(void) {}
+static inline void thermal_debug_cdev_add(struct thermal_cooling_device *cdev) {}
+static inline void thermal_debug_cdev_remove(struct thermal_cooling_device *cdev) {}
+static inline void thermal_debug_cdev_state_update(const struct thermal_cooling_device *cdev,
+                                                  int state) {}
+static inline void thermal_debug_tz_add(struct thermal_zone_device *tz) {}
+static inline void thermal_debug_tz_remove(struct thermal_zone_device *tz) {}
+static inline void thermal_debug_tz_trip_up(struct thermal_zone_device *tz,
+                                           const struct thermal_trip *trip) {};
+static inline void thermal_debug_tz_trip_down(struct thermal_zone_device *tz,
+                                             const struct thermal_trip *trip) {}
+static inline void thermal_debug_update_temp(struct thermal_zone_device *tz) {}
+#endif /* CONFIG_THERMAL_DEBUGFS */
index c3982e0f0075030acc8ef3b598278bbd264f23cb..0329f4a71b020501e1a3bf6038d2c5a097d0f2f0 100644 (file)
@@ -146,14 +146,23 @@ unlock:
 }
 EXPORT_SYMBOL_GPL(thermal_zone_get_temp);
 
-static void thermal_cdev_set_cur_state(struct thermal_cooling_device *cdev,
-                                      int target)
+static int thermal_cdev_set_cur_state(struct thermal_cooling_device *cdev, int state)
 {
-       if (cdev->ops->set_cur_state(cdev, target))
-               return;
+       int ret;
+
+       /*
+        * No check is needed for the ops->set_cur_state as the
+        * registering function checked the ops are correctly set
+        */
+       ret = cdev->ops->set_cur_state(cdev, state);
+       if (ret)
+               return ret;
 
-       thermal_notify_cdev_state_update(cdev->id, target);
-       thermal_cooling_device_stats_update(cdev, target);
+       thermal_notify_cdev_state_update(cdev, state);
+       thermal_cooling_device_stats_update(cdev, state);
+       thermal_debug_cdev_state_update(cdev, state);
+
+       return 0;
 }
 
 void __thermal_cdev_update(struct thermal_cooling_device *cdev)
index 332052e24a86c07a7b3af1f9a5c280d08b6374a8..76a231a29654512fd7eba589b827182daa0d63a9 100644 (file)
@@ -148,7 +148,7 @@ static int thermal_genl_event_tz_trip_up(struct param *p)
        return 0;
 }
 
-static int thermal_genl_event_tz_trip_add(struct param *p)
+static int thermal_genl_event_tz_trip_change(struct param *p)
 {
        if (nla_put_u32(p->msg, THERMAL_GENL_ATTR_TZ_ID, p->tz_id) ||
            nla_put_u32(p->msg, THERMAL_GENL_ATTR_TZ_TRIP_ID, p->trip_id) ||
@@ -160,15 +160,6 @@ static int thermal_genl_event_tz_trip_add(struct param *p)
        return 0;
 }
 
-static int thermal_genl_event_tz_trip_delete(struct param *p)
-{
-       if (nla_put_u32(p->msg, THERMAL_GENL_ATTR_TZ_ID, p->tz_id) ||
-           nla_put_u32(p->msg, THERMAL_GENL_ATTR_TZ_TRIP_ID, p->trip_id))
-               return -EMSGSIZE;
-
-       return 0;
-}
-
 static int thermal_genl_event_cdev_add(struct param *p)
 {
        if (nla_put_string(p->msg, THERMAL_GENL_ATTR_CDEV_NAME,
@@ -258,9 +249,6 @@ int thermal_genl_event_tz_disable(struct param *p)
 int thermal_genl_event_tz_trip_down(struct param *p)
        __attribute__((alias("thermal_genl_event_tz_trip_up")));
 
-int thermal_genl_event_tz_trip_change(struct param *p)
-       __attribute__((alias("thermal_genl_event_tz_trip_add")));
-
 static cb_t event_cb[] = {
        [THERMAL_GENL_EVENT_TZ_CREATE]          = thermal_genl_event_tz_create,
        [THERMAL_GENL_EVENT_TZ_DELETE]          = thermal_genl_event_tz_delete,
@@ -269,8 +257,6 @@ static cb_t event_cb[] = {
        [THERMAL_GENL_EVENT_TZ_TRIP_UP]         = thermal_genl_event_tz_trip_up,
        [THERMAL_GENL_EVENT_TZ_TRIP_DOWN]       = thermal_genl_event_tz_trip_down,
        [THERMAL_GENL_EVENT_TZ_TRIP_CHANGE]     = thermal_genl_event_tz_trip_change,
-       [THERMAL_GENL_EVENT_TZ_TRIP_ADD]        = thermal_genl_event_tz_trip_add,
-       [THERMAL_GENL_EVENT_TZ_TRIP_DELETE]     = thermal_genl_event_tz_trip_delete,
        [THERMAL_GENL_EVENT_CDEV_ADD]           = thermal_genl_event_cdev_add,
        [THERMAL_GENL_EVENT_CDEV_DELETE]        = thermal_genl_event_cdev_delete,
        [THERMAL_GENL_EVENT_CDEV_STATE_UPDATE]  = thermal_genl_event_cdev_state_update,
@@ -318,100 +304,93 @@ out_free_msg:
        return ret;
 }
 
-int thermal_notify_tz_create(int tz_id, const char *name)
+int thermal_notify_tz_create(const struct thermal_zone_device *tz)
 {
-       struct param p = { .tz_id = tz_id, .name = name };
+       struct param p = { .tz_id = tz->id, .name = tz->type };
 
        return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_CREATE, &p);
 }
 
-int thermal_notify_tz_delete(int tz_id)
+int thermal_notify_tz_delete(const struct thermal_zone_device *tz)
 {
-       struct param p = { .tz_id = tz_id };
+       struct param p = { .tz_id = tz->id };
 
        return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_DELETE, &p);
 }
 
-int thermal_notify_tz_enable(int tz_id)
+int thermal_notify_tz_enable(const struct thermal_zone_device *tz)
 {
-       struct param p = { .tz_id = tz_id };
+       struct param p = { .tz_id = tz->id };
 
        return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_ENABLE, &p);
 }
 
-int thermal_notify_tz_disable(int tz_id)
+int thermal_notify_tz_disable(const struct thermal_zone_device *tz)
 {
-       struct param p = { .tz_id = tz_id };
+       struct param p = { .tz_id = tz->id };
 
        return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_DISABLE, &p);
 }
 
-int thermal_notify_tz_trip_down(int tz_id, int trip_id, int temp)
+int thermal_notify_tz_trip_down(const struct thermal_zone_device *tz,
+                               const struct thermal_trip *trip)
 {
-       struct param p = { .tz_id = tz_id, .trip_id = trip_id, .temp = temp };
+       struct param p = { .tz_id = tz->id,
+                          .trip_id = thermal_zone_trip_id(tz, trip),
+                          .temp = tz->temperature };
 
        return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_TRIP_DOWN, &p);
 }
 
-int thermal_notify_tz_trip_up(int tz_id, int trip_id, int temp)
+int thermal_notify_tz_trip_up(const struct thermal_zone_device *tz,
+                             const struct thermal_trip *trip)
 {
-       struct param p = { .tz_id = tz_id, .trip_id = trip_id, .temp = temp };
+       struct param p = { .tz_id = tz->id,
+                          .trip_id = thermal_zone_trip_id(tz, trip),
+                          .temp = tz->temperature };
 
        return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_TRIP_UP, &p);
 }
 
-int thermal_notify_tz_trip_add(int tz_id, int trip_id, int trip_type,
-                              int trip_temp, int trip_hyst)
-{
-       struct param p = { .tz_id = tz_id, .trip_id = trip_id,
-                          .trip_type = trip_type, .trip_temp = trip_temp,
-                          .trip_hyst = trip_hyst };
-
-       return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_TRIP_ADD, &p);
-}
-
-int thermal_notify_tz_trip_delete(int tz_id, int trip_id)
-{
-       struct param p = { .tz_id = tz_id, .trip_id = trip_id };
-
-       return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_TRIP_DELETE, &p);
-}
-
-int thermal_notify_tz_trip_change(int tz_id, int trip_id, int trip_type,
-                                 int trip_temp, int trip_hyst)
+int thermal_notify_tz_trip_change(const struct thermal_zone_device *tz,
+                                 const struct thermal_trip *trip)
 {
-       struct param p = { .tz_id = tz_id, .trip_id = trip_id,
-                          .trip_type = trip_type, .trip_temp = trip_temp,
-                          .trip_hyst = trip_hyst };
+       struct param p = { .tz_id = tz->id,
+                          .trip_id = thermal_zone_trip_id(tz, trip),
+                          .trip_type = trip->type,
+                          .trip_temp = trip->temperature,
+                          .trip_hyst = trip->hysteresis };
 
        return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_TRIP_CHANGE, &p);
 }
 
-int thermal_notify_cdev_state_update(int cdev_id, int cdev_state)
+int thermal_notify_cdev_state_update(const struct thermal_cooling_device *cdev,
+                                    int state)
 {
-       struct param p = { .cdev_id = cdev_id, .cdev_state = cdev_state };
+       struct param p = { .cdev_id = cdev->id, .cdev_state = state };
 
        return thermal_genl_send_event(THERMAL_GENL_EVENT_CDEV_STATE_UPDATE, &p);
 }
 
-int thermal_notify_cdev_add(int cdev_id, const char *name, int cdev_max_state)
+int thermal_notify_cdev_add(const struct thermal_cooling_device *cdev)
 {
-       struct param p = { .cdev_id = cdev_id, .name = name,
-                          .cdev_max_state = cdev_max_state };
+       struct param p = { .cdev_id = cdev->id, .name = cdev->type,
+                          .cdev_max_state = cdev->max_state };
 
        return thermal_genl_send_event(THERMAL_GENL_EVENT_CDEV_ADD, &p);
 }
 
-int thermal_notify_cdev_delete(int cdev_id)
+int thermal_notify_cdev_delete(const struct thermal_cooling_device *cdev)
 {
-       struct param p = { .cdev_id = cdev_id };
+       struct param p = { .cdev_id = cdev->id };
 
        return thermal_genl_send_event(THERMAL_GENL_EVENT_CDEV_DELETE, &p);
 }
 
-int thermal_notify_tz_gov_change(int tz_id, const char *name)
+int thermal_notify_tz_gov_change(const struct thermal_zone_device *tz,
+                                const char *name)
 {
-       struct param p = { .tz_id = tz_id, .name = name };
+       struct param p = { .tz_id = tz->id, .name = name };
 
        return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_GOV_CHANGE, &p);
 }
index 0a9987c3bc5787643f79b4e73e96ed39b4dbe21e..93a927e144d55fc51ec68c14d55bf495def0a6fb 100644 (file)
@@ -10,25 +10,30 @@ struct thermal_genl_cpu_caps {
        int efficiency;
 };
 
+struct thermal_zone_device;
+struct thermal_trip;
+struct thermal_cooling_device;
+
 /* Netlink notification function */
 #ifdef CONFIG_THERMAL_NETLINK
 int __init thermal_netlink_init(void);
 void __init thermal_netlink_exit(void);
-int thermal_notify_tz_create(int tz_id, const char *name);
-int thermal_notify_tz_delete(int tz_id);
-int thermal_notify_tz_enable(int tz_id);
-int thermal_notify_tz_disable(int tz_id);
-int thermal_notify_tz_trip_down(int tz_id, int id, int temp);
-int thermal_notify_tz_trip_up(int tz_id, int id, int temp);
-int thermal_notify_tz_trip_delete(int tz_id, int id);
-int thermal_notify_tz_trip_add(int tz_id, int id, int type,
-                              int temp, int hyst);
-int thermal_notify_tz_trip_change(int tz_id, int id, int type,
-                                 int temp, int hyst);
-int thermal_notify_cdev_state_update(int cdev_id, int state);
-int thermal_notify_cdev_add(int cdev_id, const char *name, int max_state);
-int thermal_notify_cdev_delete(int cdev_id);
-int thermal_notify_tz_gov_change(int tz_id, const char *name);
+int thermal_notify_tz_create(const struct thermal_zone_device *tz);
+int thermal_notify_tz_delete(const struct thermal_zone_device *tz);
+int thermal_notify_tz_enable(const struct thermal_zone_device *tz);
+int thermal_notify_tz_disable(const struct thermal_zone_device *tz);
+int thermal_notify_tz_trip_down(const struct thermal_zone_device *tz,
+                               const struct thermal_trip *trip);
+int thermal_notify_tz_trip_up(const struct thermal_zone_device *tz,
+                             const struct thermal_trip *trip);
+int thermal_notify_tz_trip_change(const struct thermal_zone_device *tz,
+                                 const struct thermal_trip *trip);
+int thermal_notify_cdev_state_update(const struct thermal_cooling_device *cdev,
+                                    int state);
+int thermal_notify_cdev_add(const struct thermal_cooling_device *cdev);
+int thermal_notify_cdev_delete(const struct thermal_cooling_device *cdev);
+int thermal_notify_tz_gov_change(const struct thermal_zone_device *tz,
+                                const char *name);
 int thermal_genl_sampling_temp(int id, int temp);
 int thermal_genl_cpu_capability_event(int count,
                                      struct thermal_genl_cpu_caps *caps);
@@ -38,70 +43,62 @@ static inline int thermal_netlink_init(void)
        return 0;
 }
 
-static inline int thermal_notify_tz_create(int tz_id, const char *name)
-{
-       return 0;
-}
-
-static inline int thermal_notify_tz_delete(int tz_id)
-{
-       return 0;
-}
-
-static inline int thermal_notify_tz_enable(int tz_id)
+static inline int thermal_notify_tz_create(const struct thermal_zone_device *tz)
 {
        return 0;
 }
 
-static inline int thermal_notify_tz_disable(int tz_id)
+static inline int thermal_notify_tz_delete(const struct thermal_zone_device *tz)
 {
        return 0;
 }
 
-static inline int thermal_notify_tz_trip_down(int tz_id, int id, int temp)
+static inline int thermal_notify_tz_enable(const struct thermal_zone_device *tz)
 {
        return 0;
 }
 
-static inline int thermal_notify_tz_trip_up(int tz_id, int id, int temp)
+static inline int thermal_notify_tz_disable(const struct thermal_zone_device *tz)
 {
        return 0;
 }
 
-static inline int thermal_notify_tz_trip_delete(int tz_id, int id)
+static inline int thermal_notify_tz_trip_down(const struct thermal_zone_device *tz,
+                                             const struct thermal_trip *trip)
 {
        return 0;
 }
 
-static inline int thermal_notify_tz_trip_add(int tz_id, int id, int type,
-                                            int temp, int hyst)
+static inline int thermal_notify_tz_trip_up(const struct thermal_zone_device *tz,
+                                           const struct thermal_trip *trip)
 {
        return 0;
 }
 
-static inline int thermal_notify_tz_trip_change(int tz_id, int id, int type,
-                                               int temp, int hyst)
+static inline int thermal_notify_tz_trip_change(const struct thermal_zone_device *tz,
+                                               const struct thermal_trip *trip)
 {
        return 0;
 }
 
-static inline int thermal_notify_cdev_state_update(int cdev_id, int state)
+static inline int thermal_notify_cdev_state_update(const struct thermal_cooling_device *cdev,
+                                                  int state)
 {
        return 0;
 }
 
-static inline int thermal_notify_cdev_add(int cdev_id, const char *name,
-                                         int max_state)
+static inline int thermal_notify_cdev_add(const struct thermal_cooling_device *cdev)
 {
        return 0;
 }
 
-static inline int thermal_notify_cdev_delete(int cdev_id)
+static inline int thermal_notify_cdev_delete(const struct thermal_cooling_device *cdev)
 {
        return 0;
 }
 
-static inline int thermal_notify_tz_gov_change(int tz_id, const char *name)
+static inline int thermal_notify_tz_gov_change(const struct thermal_zone_device *tz,
+                                              const char *name)
 {
        return 0;
 }
index 8bffa1e5e2063fe96d2666124f1ebb4a2c2f8c53..c875a26d5adf41816a1798fdf14d1fd490141e75 100644 (file)
@@ -155,9 +155,7 @@ int thermal_zone_trip_id(const struct thermal_zone_device *tz,
 void thermal_zone_trip_updated(struct thermal_zone_device *tz,
                               const struct thermal_trip *trip)
 {
-       thermal_notify_tz_trip_change(tz->id, thermal_zone_trip_id(tz, trip),
-                                     trip->type, trip->temperature,
-                                     trip->hysteresis);
+       thermal_notify_tz_trip_change(tz, trip);
        __thermal_zone_device_update(tz, THERMAL_TRIP_CHANGED);
 }
 
@@ -168,8 +166,6 @@ void thermal_zone_set_trip_temp(struct thermal_zone_device *tz,
                return;
 
        trip->temperature = temp;
-       thermal_notify_tz_trip_change(tz->id, thermal_zone_trip_id(tz, trip),
-                                     trip->type, trip->temperature,
-                                     trip->hysteresis);
+       thermal_notify_tz_trip_change(tz, trip);
 }
 EXPORT_SYMBOL_GPL(thermal_zone_set_trip_temp);
index ec7b5f65804e49e82533d6493c4cb90df514f2e2..9fb1a64f3300b8fcf7ebc1f7dafa6a68778da122 100644 (file)
@@ -307,7 +307,7 @@ static const struct attribute_group *domain_attr_groups[] = {
        NULL,
 };
 
-struct bus_type tb_bus_type = {
+const struct bus_type tb_bus_type = {
        .name = "thunderbolt",
        .match = tb_service_match,
        .probe = tb_service_probe,
index d8b9c734abd36345990da8401ebbcddbce39ff50..56790d50f9e3296bd3607162f6b2030fe28dc77a 100644 (file)
@@ -1020,7 +1020,7 @@ icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
 
        memset(&reply, 0, sizeof(reply));
        ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
-                         1, 10, 2000);
+                         1, 10, 250);
        if (ret)
                return ret;
 
index 4b7bec74e89fbcf02b17ae44b54087dac01518b2..fb4f46e51753ab8f3952c7093489571d6a40a1b7 100644 (file)
@@ -1517,6 +1517,10 @@ static struct pci_device_id nhi_ids[] = {
          .driver_data = (kernel_ulong_t)&icl_nhi_ops },
        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_P_NHI1),
          .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_LNL_NHI0),
+         .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_LNL_NHI1),
+         .driver_data = (kernel_ulong_t)&icl_nhi_ops },
        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI) },
        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI) },
 
index 0f029ce758825e0a05c16def1b7255f9742e338b..7a07c7c1a9c2c6782e238e8a5785a0fec1783dff 100644 (file)
@@ -90,6 +90,8 @@ extern const struct tb_nhi_ops icl_nhi_ops;
 #define PCI_DEVICE_ID_INTEL_TGL_H_NHI1                 0x9a21
 #define PCI_DEVICE_ID_INTEL_RPL_NHI0                   0xa73e
 #define PCI_DEVICE_ID_INTEL_RPL_NHI1                   0xa76d
+#define PCI_DEVICE_ID_INTEL_LNL_NHI0                   0xa833
+#define PCI_DEVICE_ID_INTEL_LNL_NHI1                   0xa834
 
 #define PCI_CLASS_SERIAL_USB_USB4                      0x0c0340
 
index 44e9b09de47a5a01a0a22d48c9d637bdd7c7bf68..900114ba4371b10fd941e0add6ade8210c74268f 100644 (file)
@@ -941,22 +941,6 @@ int tb_port_get_link_generation(struct tb_port *port)
        }
 }
 
-static const char *width_name(enum tb_link_width width)
-{
-       switch (width) {
-       case TB_LINK_WIDTH_SINGLE:
-               return "symmetric, single lane";
-       case TB_LINK_WIDTH_DUAL:
-               return "symmetric, dual lanes";
-       case TB_LINK_WIDTH_ASYM_TX:
-               return "asymmetric, 3 transmitters, 1 receiver";
-       case TB_LINK_WIDTH_ASYM_RX:
-               return "asymmetric, 3 receivers, 1 transmitter";
-       default:
-               return "unknown";
-       }
-}
-
 /**
  * tb_port_get_link_width() - Get current link width
  * @port: Port to check (USB4 or CIO)
@@ -2769,7 +2753,7 @@ static void tb_switch_link_init(struct tb_switch *sw)
                return;
 
        tb_sw_dbg(sw, "current link speed %u.0 Gb/s\n", sw->link_speed);
-       tb_sw_dbg(sw, "current link width %s\n", width_name(sw->link_width));
+       tb_sw_dbg(sw, "current link width %s\n", tb_width_name(sw->link_width));
 
        bonded = sw->link_width >= TB_LINK_WIDTH_DUAL;
 
@@ -2789,6 +2773,19 @@ static void tb_switch_link_init(struct tb_switch *sw)
        if (down->dual_link_port)
                down->dual_link_port->bonded = bonded;
        tb_port_update_credits(down);
+
+       if (tb_port_get_link_generation(up) < 4)
+               return;
+
+       /*
+        * Set the Gen 4 preferred link width. This is what the router
+        * prefers when the link is brought up. If the router does not
+        * support asymmetric link configuration, this also will be set
+        * to TB_LINK_WIDTH_DUAL.
+        */
+       sw->preferred_link_width = sw->link_width;
+       tb_sw_dbg(sw, "preferred link width %s\n",
+                 tb_width_name(sw->preferred_link_width));
 }
 
 /**
@@ -3029,7 +3026,7 @@ int tb_switch_set_link_width(struct tb_switch *sw, enum tb_link_width width)
 
        tb_switch_update_link_attributes(sw);
 
-       tb_sw_dbg(sw, "link width set to %s\n", width_name(width));
+       tb_sw_dbg(sw, "link width set to %s\n", tb_width_name(width));
        return ret;
 }
 
index fd49f86e03532c140557af4bb822eb2d7665ef95..846d2813bb1a51db8744948298d57d5e5fcdb36e 100644 (file)
@@ -513,8 +513,6 @@ static void tb_port_unconfigure_xdomain(struct tb_port *port)
                usb4_port_unconfigure_xdomain(port);
        else
                tb_lc_unconfigure_xdomain(port);
-
-       tb_port_enable(port->dual_link_port);
 }
 
 static void tb_scan_xdomain(struct tb_port *port)
@@ -1087,15 +1085,14 @@ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
                             struct tb_port *dst_port, int requested_up,
                             int requested_down)
 {
+       bool clx = false, clx_disabled = false, downstream;
        struct tb_switch *sw;
-       bool clx, downstream;
        struct tb_port *up;
        int ret = 0;
 
        if (!asym_threshold)
                return 0;
 
-       /* Disable CL states before doing any transitions */
        downstream = tb_port_path_direction_downstream(src_port, dst_port);
        /* Pick up router deepest in the hierarchy */
        if (downstream)
@@ -1103,11 +1100,10 @@ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
        else
                sw = src_port->sw;
 
-       clx = tb_disable_clx(sw);
-
        tb_for_each_upstream_port_on_path(src_port, dst_port, up) {
+               struct tb_port *down = tb_switch_downstream_port(up->sw);
+               enum tb_link_width width_up, width_down;
                int consumed_up, consumed_down;
-               enum tb_link_width width;
 
                ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up,
                                               &consumed_up, &consumed_down);
@@ -1128,7 +1124,8 @@ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
                        if (consumed_down + requested_down < asym_threshold)
                                continue;
 
-                       width = TB_LINK_WIDTH_ASYM_RX;
+                       width_up = TB_LINK_WIDTH_ASYM_RX;
+                       width_down = TB_LINK_WIDTH_ASYM_TX;
                } else {
                        /* Upstream, the opposite of above */
                        if (consumed_down + requested_down >= TB_ASYM_MIN) {
@@ -1138,22 +1135,34 @@ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
                        if (consumed_up + requested_up < asym_threshold)
                                continue;
 
-                       width = TB_LINK_WIDTH_ASYM_TX;
+                       width_up = TB_LINK_WIDTH_ASYM_TX;
+                       width_down = TB_LINK_WIDTH_ASYM_RX;
                }
 
-               if (up->sw->link_width == width)
+               if (up->sw->link_width == width_up)
                        continue;
 
-               if (!tb_port_width_supported(up, width))
+               if (!tb_port_width_supported(up, width_up) ||
+                   !tb_port_width_supported(down, width_down))
                        continue;
 
+               /*
+                * Disable CL states before doing any transitions. We
+                * delayed it until now that we know there is a real
+                * transition taking place.
+                */
+               if (!clx_disabled) {
+                       clx = tb_disable_clx(sw);
+                       clx_disabled = true;
+               }
+
                tb_sw_dbg(up->sw, "configuring asymmetric link\n");
 
                /*
                 * Here requested + consumed > threshold so we need to
                 * transtion the link into asymmetric now.
                 */
-               ret = tb_switch_set_link_width(up->sw, width);
+               ret = tb_switch_set_link_width(up->sw, width_up);
                if (ret) {
                        tb_sw_warn(up->sw, "failed to set link width\n");
                        break;
@@ -1174,24 +1183,24 @@ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
  * @dst_port: Destination adapter
  * @requested_up: New lower bandwidth request upstream (Mb/s)
  * @requested_down: New lower bandwidth request downstream (Mb/s)
+ * @keep_asym: Keep asymmetric link if preferred
  *
  * Goes over each link from @src_port to @dst_port and tries to
  * transition the link to symmetric if the currently consumed bandwidth
- * allows.
+ * allows and link asymmetric preference is ignored (if @keep_asym is %false).
  */
 static int tb_configure_sym(struct tb *tb, struct tb_port *src_port,
                            struct tb_port *dst_port, int requested_up,
-                           int requested_down)
+                           int requested_down, bool keep_asym)
 {
+       bool clx = false, clx_disabled = false, downstream;
        struct tb_switch *sw;
-       bool clx, downstream;
        struct tb_port *up;
        int ret = 0;
 
        if (!asym_threshold)
                return 0;
 
-       /* Disable CL states before doing any transitions */
        downstream = tb_port_path_direction_downstream(src_port, dst_port);
        /* Pick up router deepest in the hierarchy */
        if (downstream)
@@ -1199,8 +1208,6 @@ static int tb_configure_sym(struct tb *tb, struct tb_port *src_port,
        else
                sw = src_port->sw;
 
-       clx = tb_disable_clx(sw);
-
        tb_for_each_upstream_port_on_path(src_port, dst_port, up) {
                int consumed_up, consumed_down;
 
@@ -1233,6 +1240,25 @@ static int tb_configure_sym(struct tb *tb, struct tb_port *src_port,
                if (up->sw->link_width == TB_LINK_WIDTH_DUAL)
                        continue;
 
+               /*
+                * Here consumed < threshold so we can transition the
+                * link to symmetric.
+                *
+                * However, if the router prefers asymmetric link we
+                * honor that (unless @keep_asym is %false).
+                */
+               if (keep_asym &&
+                   up->sw->preferred_link_width > TB_LINK_WIDTH_DUAL) {
+                       tb_sw_dbg(up->sw, "keeping preferred asymmetric link\n");
+                       continue;
+               }
+
+               /* Disable CL states before doing any transitions */
+               if (!clx_disabled) {
+                       clx = tb_disable_clx(sw);
+                       clx_disabled = true;
+               }
+
                tb_sw_dbg(up->sw, "configuring symmetric link\n");
 
                ret = tb_switch_set_link_width(up->sw, TB_LINK_WIDTH_DUAL);
@@ -1280,7 +1306,7 @@ static void tb_configure_link(struct tb_port *down, struct tb_port *up,
                struct tb_port *host_port;
 
                host_port = tb_port_at(tb_route(sw), tb->root_switch);
-               tb_configure_sym(tb, host_port, up, 0, 0);
+               tb_configure_sym(tb, host_port, up, 0, 0, false);
        }
 
        /* Set the link configured */
@@ -1465,7 +1491,7 @@ static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
                 * If bandwidth on a link is < asym_threshold
                 * transition the link to symmetric.
                 */
-               tb_configure_sym(tb, src_port, dst_port, 0, 0);
+               tb_configure_sym(tb, src_port, dst_port, 0, 0, true);
                /* Now we can allow the domain to runtime suspend again */
                pm_runtime_mark_last_busy(&dst_port->sw->dev);
                pm_runtime_put_autosuspend(&dst_port->sw->dev);
@@ -1901,7 +1927,7 @@ static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
                        return;
        }
 
-       tb_port_dbg(port, "DP %s resource available\n",
+       tb_port_dbg(port, "DP %s resource available after hotplug\n",
                    tb_port_is_dpin(port) ? "IN" : "OUT");
        list_add_tail(&port->list, &tcm->dp_resources);
 
@@ -2287,7 +2313,7 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
                 * If bandwidth on a link is < asym_threshold transition
                 * the link to symmetric.
                 */
-               tb_configure_sym(tb, in, out, *requested_up, *requested_down);
+               tb_configure_sym(tb, in, out, *requested_up, *requested_down, true);
                /*
                 * If requested bandwidth is less or equal than what is
                 * currently allocated to that tunnel we simply change
@@ -2330,7 +2356,7 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
                ret = tb_configure_asym(tb, in, out, *requested_up,
                                        *requested_down);
                if (ret) {
-                       tb_configure_sym(tb, in, out, 0, 0);
+                       tb_configure_sym(tb, in, out, 0, 0, true);
                        return ret;
                }
 
@@ -2338,7 +2364,7 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
                                                requested_down);
                if (ret) {
                        tb_tunnel_warn(tunnel, "failed to allocate bandwidth\n");
-                       tb_configure_sym(tb, in, out, 0, 0);
+                       tb_configure_sym(tb, in, out, 0, 0, true);
                }
        } else {
                ret = -ENOBUFS;
index e299e53473ae28627216dacf3d9dbdfcf2ec8239..997c5a53690524d80445d3106c8836971bd82eff 100644 (file)
@@ -125,6 +125,7 @@ struct tb_switch_tmu {
  * @device_name: Name of the device (or %NULL if not known)
  * @link_speed: Speed of the link in Gb/s
  * @link_width: Width of the upstream facing link
+ * @preferred_link_width: Router preferred link width (only set for Gen 4 links)
  * @link_usb4: Upstream link is USB4
  * @generation: Switch Thunderbolt generation
  * @cap_plug_events: Offset to the plug events capability (%0 if not found)
@@ -178,6 +179,7 @@ struct tb_switch {
        const char *device_name;
        unsigned int link_speed;
        enum tb_link_width link_width;
+       enum tb_link_width preferred_link_width;
        bool link_usb4;
        unsigned int generation;
        int cap_plug_events;
@@ -568,6 +570,22 @@ static inline struct tb_port *tb_port_at(u64 route, struct tb_switch *sw)
        return &sw->ports[port];
 }
 
+static inline const char *tb_width_name(enum tb_link_width width)
+{
+       switch (width) {
+       case TB_LINK_WIDTH_SINGLE:
+               return "symmetric, single lane";
+       case TB_LINK_WIDTH_DUAL:
+               return "symmetric, dual lanes";
+       case TB_LINK_WIDTH_ASYM_TX:
+               return "asymmetric, 3 transmitters, 1 receiver";
+       case TB_LINK_WIDTH_ASYM_RX:
+               return "asymmetric, 3 receivers, 1 transmitter";
+       default:
+               return "unknown";
+       }
+}
+
 /**
  * tb_port_has_remote() - Does the port have switch connected downstream
  * @port: Port to check
index 11f2aec2a5d374ede2c87215a184284009dc8e9e..9a259c72e5a74691f6d3789ce551210a4dfd807f 100644 (file)
@@ -894,7 +894,7 @@ static int tb_switch_tmu_change_mode(struct tb_switch *sw)
 
        ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request);
        if (ret)
-               return ret;
+               goto out;
 
        /* Program the new mode and the downstream router lane adapter */
        switch (sw->tmu.mode_request) {
index 7534cd3a81f451aa090343be0c25f995aef9fe6d..6fffb2c82d3d1332a9de5005b7a67099ef5d8582 100644 (file)
@@ -173,16 +173,28 @@ static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable)
        int ret;
 
        /* Only supported of both routers are at least USB4 v2 */
-       if (tb_port_get_link_generation(port) < 4)
+       if ((usb4_switch_version(tunnel->src_port->sw) < 2) ||
+          (usb4_switch_version(tunnel->dst_port->sw) < 2))
+               return 0;
+
+       if (enable && tb_port_get_link_generation(port) < 4)
                return 0;
 
        ret = usb4_pci_port_set_ext_encapsulation(tunnel->src_port, enable);
        if (ret)
                return ret;
 
+       /*
+        * Downstream router could be unplugged so disable of encapsulation
+        * in upstream router is still possible.
+        */
        ret = usb4_pci_port_set_ext_encapsulation(tunnel->dst_port, enable);
-       if (ret)
-               return ret;
+       if (ret) {
+               if (enable)
+                       return ret;
+               if (ret != -ENODEV)
+                       return ret;
+       }
 
        tb_tunnel_dbg(tunnel, "extended encapsulation %s\n",
                      str_enabled_disabled(enable));
@@ -199,14 +211,21 @@ static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
                        return res;
        }
 
-       res = tb_pci_port_enable(tunnel->src_port, activate);
+       if (activate)
+               res = tb_pci_port_enable(tunnel->dst_port, activate);
+       else
+               res = tb_pci_port_enable(tunnel->src_port, activate);
        if (res)
                return res;
 
-       if (tb_port_is_pcie_up(tunnel->dst_port)) {
-               res = tb_pci_port_enable(tunnel->dst_port, activate);
+
+       if (activate) {
+               res = tb_pci_port_enable(tunnel->src_port, activate);
                if (res)
                        return res;
+       } else {
+               /* Downstream router could be unplugged */
+               tb_pci_port_enable(tunnel->dst_port, activate);
        }
 
        return activate ? 0 : tb_pci_set_ext_encapsulation(tunnel, activate);
@@ -1067,8 +1086,7 @@ static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
        return 0;
 }
 
-static int tb_dp_read_dprx(struct tb_tunnel *tunnel, u32 *rate, u32 *lanes,
-                          int timeout_msec)
+static int tb_dp_wait_dprx(struct tb_tunnel *tunnel, int timeout_msec)
 {
        ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
        struct tb_port *in = tunnel->src_port;
@@ -1087,15 +1105,13 @@ static int tb_dp_read_dprx(struct tb_tunnel *tunnel, u32 *rate, u32 *lanes,
                        return ret;
 
                if (val & DP_COMMON_CAP_DPRX_DONE) {
-                       *rate = tb_dp_cap_get_rate(val);
-                       *lanes = tb_dp_cap_get_lanes(val);
-
                        tb_tunnel_dbg(tunnel, "DPRX read done\n");
                        return 0;
                }
                usleep_range(100, 150);
        } while (ktime_before(ktime_get(), timeout));
 
+       tb_tunnel_dbg(tunnel, "DPRX read timeout\n");
        return -ETIMEDOUT;
 }
 
@@ -1110,6 +1126,7 @@ static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate,
        switch (cap) {
        case DP_LOCAL_CAP:
        case DP_REMOTE_CAP:
+       case DP_COMMON_CAP:
                break;
 
        default:
@@ -1182,7 +1199,7 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
                 * reduced one). Otherwise return the remote (possibly
                 * reduced) caps.
                 */
-               ret = tb_dp_read_dprx(tunnel, &rate, &lanes, 150);
+               ret = tb_dp_wait_dprx(tunnel, 150);
                if (ret) {
                        if (ret == -ETIMEDOUT)
                                ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP,
@@ -1190,6 +1207,9 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
                        if (ret)
                                return ret;
                }
+               ret = tb_dp_read_cap(tunnel, DP_COMMON_CAP, &rate, &lanes);
+               if (ret)
+                       return ret;
        } else if (sw->generation >= 2) {
                ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, &rate, &lanes);
                if (ret)
@@ -1313,8 +1333,6 @@ static void tb_dp_dump(struct tb_tunnel *tunnel)
                      "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
                      rate, lanes, tb_dp_bandwidth(rate, lanes));
 
-       out = tunnel->dst_port;
-
        if (tb_port_read(out, &dp_cap, TB_CFG_PORT,
                         out->cap_adap + DP_LOCAL_CAP, 1))
                return;
index 9803f0bbf20d14fe07efc408b99a9e7a5e6cfef8..9495742913d5c63363cedfffe6ca509ec8b2f3e1 100644 (file)
@@ -1462,6 +1462,11 @@ static int tb_xdomain_get_properties(struct tb_xdomain *xd)
                                tb_port_disable(port->dual_link_port);
                }
 
+               dev_dbg(&xd->dev, "current link speed %u.0 Gb/s\n",
+                       xd->link_speed);
+               dev_dbg(&xd->dev, "current link width %s\n",
+                       tb_width_name(xd->link_width));
+
                if (device_add(&xd->dev)) {
                        dev_err(&xd->dev, "failed to add XDomain device\n");
                        return -ENODEV;
@@ -1895,6 +1900,50 @@ struct device_type tb_xdomain_type = {
 };
 EXPORT_SYMBOL_GPL(tb_xdomain_type);
 
+static void tb_xdomain_link_init(struct tb_xdomain *xd, struct tb_port *down)
+{
+       if (!down->dual_link_port)
+               return;
+
+       /*
+        * Gen 4 links come up already as bonded so only update the port
+        * structures here.
+        */
+       if (tb_port_get_link_generation(down) >= 4) {
+               down->bonded = true;
+               down->dual_link_port->bonded = true;
+       } else {
+               xd->bonding_possible = true;
+       }
+}
+
+static void tb_xdomain_link_exit(struct tb_xdomain *xd)
+{
+       struct tb_port *down = tb_xdomain_downstream_port(xd);
+
+       if (!down->dual_link_port)
+               return;
+
+       if (tb_port_get_link_generation(down) >= 4) {
+               down->bonded = false;
+               down->dual_link_port->bonded = false;
+       } else if (xd->link_width > TB_LINK_WIDTH_SINGLE) {
+               /*
+                * Just return port structures back to way they were and
+                * update credits. No need to update userspace because
+                * the XDomain is removed soon anyway.
+                */
+               tb_port_lane_bonding_disable(down);
+               tb_port_update_credits(down);
+       } else if (down->dual_link_port) {
+               /*
+                * Re-enable the lane 1 adapter we disabled at the end
+                * of tb_xdomain_get_properties().
+                */
+               tb_port_enable(down->dual_link_port);
+       }
+}
+
 /**
  * tb_xdomain_alloc() - Allocate new XDomain object
  * @tb: Domain where the XDomain belongs
@@ -1945,7 +1994,8 @@ struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
                        goto err_free_local_uuid;
        } else {
                xd->needs_uuid = true;
-               xd->bonding_possible = !!down->dual_link_port;
+
+               tb_xdomain_link_init(xd, down);
        }
 
        device_initialize(&xd->dev);
@@ -2014,6 +2064,8 @@ void tb_xdomain_remove(struct tb_xdomain *xd)
 
        device_for_each_child_reverse(&xd->dev, xd, unregister_service);
 
+       tb_xdomain_link_exit(xd);
+
        /*
         * Undo runtime PM here explicitly because it is possible that
         * the XDomain was never added to the bus and thus device_del()
index 785558c65ae8052f9e76d8b64d2486da381f5ebf..7716ce0d35bc219608bd67b762aac0d4544bd9b9 100644 (file)
@@ -81,7 +81,7 @@ struct serial_state {
        int                     quot;
        int                     IER;    /* Interrupt Enable Register */
        int                     MCR;    /* Modem control register */
-       int                     x_char; /* xon/xoff character */
+       u8                      x_char; /* xon/xoff character */
 };
 
 static struct tty_driver *serial_driver;
@@ -178,9 +178,9 @@ static void receive_chars(struct serial_state *info)
 {
         int status;
        int serdatr;
-       unsigned char ch, flag;
+       u8 ch, flag;
        struct  async_icount *icount;
-       int oe = 0;
+       bool overrun = false;
 
        icount = &info->icount;
 
@@ -230,7 +230,7 @@ static void receive_chars(struct serial_state *info)
           * should be ignored.
           */
          if (status & info->ignore_status_mask)
-           goto out;
+                 return;
 
          status &= info->read_status_mask;
 
@@ -251,15 +251,13 @@ static void receive_chars(struct serial_state *info)
             * reported immediately, and doesn't
             * affect the current character
             */
-            oe = 1;
+            overrun = true;
          }
        }
        tty_insert_flip_char(&info->tport, ch, flag);
-       if (oe == 1)
+       if (overrun)
                tty_insert_flip_char(&info->tport, 0, TTY_OVERRUN);
        tty_flip_buffer_push(&info->tport);
-out:
-       return;
 }
 
 static void transmit_chars(struct serial_state *info)
@@ -813,7 +811,7 @@ static void rs_flush_buffer(struct tty_struct *tty)
  * This function is used to send a high-priority XON/XOFF character to
  * the device
  */
-static void rs_send_xchar(struct tty_struct *tty, char ch)
+static void rs_send_xchar(struct tty_struct *tty, u8 ch)
 {
        struct serial_state *info = tty->driver_data;
         unsigned long flags;
index a067628e01c81843b6b68fbe03c169da6397f774..69508d7a4135fd32bd311fd6b9d7aae60de6a243 100644 (file)
@@ -49,7 +49,7 @@ struct ehv_bc_data {
        unsigned int tx_irq;
 
        spinlock_t lock;        /* lock for transmit buffer */
-       unsigned char buf[BUF_SIZE];    /* transmit circular buffer */
+       u8 buf[BUF_SIZE];       /* transmit circular buffer */
        unsigned int head;      /* circular buffer head */
        unsigned int tail;      /* circular buffer tail */
 
@@ -138,14 +138,17 @@ static int find_console_handle(void)
 
 static unsigned int local_ev_byte_channel_send(unsigned int handle,
                                               unsigned int *count,
-                                              const char *p)
+                                              const u8 *p)
 {
-       char buffer[EV_BYTE_CHANNEL_MAX_BYTES];
+       u8 buffer[EV_BYTE_CHANNEL_MAX_BYTES];
        unsigned int c = *count;
 
+       /*
+        * ev_byte_channel_send() expects at least EV_BYTE_CHANNEL_MAX_BYTES
+        * (16 B) in the buffer. Fake it using a local buffer if needed.
+        */
        if (c < sizeof(buffer)) {
-               memcpy(buffer, p, c);
-               memset(&buffer[c], 0, sizeof(buffer) - c);
+               memcpy_and_pad(buffer, sizeof(buffer), p, c, 0);
                p = buffer;
        }
        return ev_byte_channel_send(handle, count, p);
@@ -163,7 +166,7 @@ static unsigned int local_ev_byte_channel_send(unsigned int handle,
  * has been sent, or if some error has occurred.
  *
  */
-static void byte_channel_spin_send(const char data)
+static void byte_channel_spin_send(const u8 data)
 {
        int ret, count;
 
@@ -471,8 +474,7 @@ static ssize_t ehv_bc_tty_write(struct tty_struct *ttys, const u8 *s,
 {
        struct ehv_bc_data *bc = ttys->driver_data;
        unsigned long flags;
-       unsigned int len;
-       unsigned int written = 0;
+       size_t len, written = 0;
 
        while (1) {
                spin_lock_irqsave(&bc->lock, flags);
index 4591f940b7a1511a807dc31141d8d4d179bd0d03..d27979eabfdfbee1397140db3e30f0ee8ab7467c 100644 (file)
@@ -50,10 +50,8 @@ static u32 goldfish_tty_line_count = 8;
 static u32 goldfish_tty_current_line_count;
 static struct goldfish_tty *goldfish_ttys;
 
-static void do_rw_io(struct goldfish_tty *qtty,
-                    unsigned long address,
-                    unsigned int count,
-                    int is_write)
+static void do_rw_io(struct goldfish_tty *qtty, unsigned long address,
+                    size_t count, bool is_write)
 {
        unsigned long irq_flags;
        void __iomem *base = qtty->base;
@@ -73,10 +71,8 @@ static void do_rw_io(struct goldfish_tty *qtty,
        spin_unlock_irqrestore(&qtty->lock, irq_flags);
 }
 
-static void goldfish_tty_rw(struct goldfish_tty *qtty,
-                           unsigned long addr,
-                           unsigned int count,
-                           int is_write)
+static void goldfish_tty_rw(struct goldfish_tty *qtty, unsigned long addr,
+                           size_t count, bool is_write)
 {
        dma_addr_t dma_handle;
        enum dma_data_direction dma_dir;
@@ -125,20 +121,18 @@ static void goldfish_tty_rw(struct goldfish_tty *qtty,
        }
 }
 
-static void goldfish_tty_do_write(int line, const u8 *buf, unsigned int count)
+static void goldfish_tty_do_write(int line, const u8 *buf, size_t count)
 {
        struct goldfish_tty *qtty = &goldfish_ttys[line];
-       unsigned long address = (unsigned long)(void *)buf;
 
-       goldfish_tty_rw(qtty, address, count, 1);
+       goldfish_tty_rw(qtty, (unsigned long)buf, count, true);
 }
 
 static irqreturn_t goldfish_tty_interrupt(int irq, void *dev_id)
 {
        struct goldfish_tty *qtty = dev_id;
        void __iomem *base = qtty->base;
-       unsigned long address;
-       unsigned char *buf;
+       u8 *buf;
        u32 count;
 
        count = gf_ioread32(base + GOLDFISH_TTY_REG_BYTES_READY);
@@ -147,8 +141,7 @@ static irqreturn_t goldfish_tty_interrupt(int irq, void *dev_id)
 
        count = tty_prepare_flip_string(&qtty->port, &buf, count);
 
-       address = (unsigned long)(void *)buf;
-       goldfish_tty_rw(qtty, address, count, 0);
+       goldfish_tty_rw(qtty, (unsigned long)buf, count, false);
 
        tty_flip_buffer_push(&qtty->port);
        return IRQ_HANDLED;
index 4f9264d005c06d55e5853bf9b12e329907648ca3..6e05c5c7bca1ad258502eaf158b94534c1cdd23d 100644 (file)
@@ -108,7 +108,7 @@ config HVC_DCC_SERIALIZE_SMP
 
 config HVC_RISCV_SBI
        bool "RISC-V SBI console support"
-       depends on RISCV_SBI_V01
+       depends on RISCV_SBI
        select HVC_DRIVER
        help
          This enables support for console output via RISC-V SBI calls, which
index 959fae54ca394bfc511efaa6d55476a7e0a9c11c..cd1f657f782df218b12e315dd3fdab79474d2bec 100644 (file)
@@ -922,8 +922,7 @@ struct hvc_struct *hvc_alloc(uint32_t vtermno, int data,
                        return ERR_PTR(err);
        }
 
-       hp = kzalloc(ALIGN(sizeof(*hp), sizeof(long)) + outbuf_size,
-                       GFP_KERNEL);
+       hp = kzalloc(struct_size(hp, outbuf, outbuf_size), GFP_KERNEL);
        if (!hp)
                return ERR_PTR(-ENOMEM);
 
@@ -931,7 +930,6 @@ struct hvc_struct *hvc_alloc(uint32_t vtermno, int data,
        hp->data = data;
        hp->ops = ops;
        hp->outbuf_size = outbuf_size;
-       hp->outbuf = &((char *)hp)[ALIGN(sizeof(*hp), sizeof(long))];
 
        tty_port_init(&hp->port);
        hp->port.ops = &hvc_port_ops;
@@ -976,7 +974,7 @@ struct hvc_struct *hvc_alloc(uint32_t vtermno, int data,
 }
 EXPORT_SYMBOL_GPL(hvc_alloc);
 
-int hvc_remove(struct hvc_struct *hp)
+void hvc_remove(struct hvc_struct *hp)
 {
        unsigned long flags;
        struct tty_struct *tty;
@@ -1010,7 +1008,6 @@ int hvc_remove(struct hvc_struct *hp)
                tty_vhangup(tty);
                tty_kref_put(tty);
        }
-       return 0;
 }
 EXPORT_SYMBOL_GPL(hvc_remove);
 
index 9668f821db01d852e6d937193242ca4104729fa2..cf4c1af08a7c926687881b2021b10d5f88a1f703 100644 (file)
@@ -37,7 +37,6 @@ struct hvc_struct {
        spinlock_t lock;
        int index;
        int do_wakeup;
-       char *outbuf;
        int outbuf_size;
        int n_outbuf;
        uint32_t vtermno;
@@ -48,12 +47,13 @@ struct hvc_struct {
        struct work_struct tty_resize;
        struct list_head next;
        unsigned long flags;
+       u8 outbuf[] __aligned(sizeof(long));
 };
 
 /* implemented by a low level driver */
 struct hv_ops {
-       int (*get_chars)(uint32_t vtermno, char *buf, int count);
-       int (*put_chars)(uint32_t vtermno, const char *buf, int count);
+       ssize_t (*get_chars)(uint32_t vtermno, u8 *buf, size_t count);
+       ssize_t (*put_chars)(uint32_t vtermno, const u8 *buf, size_t count);
        int (*flush)(uint32_t vtermno, bool wait);
 
        /* Callbacks for notification. Called in open, close and hangup */
@@ -77,7 +77,7 @@ extern int hvc_instantiate(uint32_t vtermno, int index,
 extern struct hvc_struct * hvc_alloc(uint32_t vtermno, int data,
                                     const struct hv_ops *ops, int outbuf_size);
 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
-extern int hvc_remove(struct hvc_struct *hp);
+extern void hvc_remove(struct hvc_struct *hp);
 
 /* data available */
 int hvc_poll(struct hvc_struct *hp);
index 1751108cf763ebdab3266f51006e9aa11e04b620..dfc5c9c38f07460c03122967318156ad6a720819 100644 (file)
 /* Lock to serialize access to DCC fifo */
 static DEFINE_SPINLOCK(dcc_lock);
 
-static DEFINE_KFIFO(inbuf, unsigned char, DCC_INBUF_SIZE);
-static DEFINE_KFIFO(outbuf, unsigned char, DCC_OUTBUF_SIZE);
+static DEFINE_KFIFO(inbuf, u8, DCC_INBUF_SIZE);
+static DEFINE_KFIFO(outbuf, u8, DCC_OUTBUF_SIZE);
 
-static void dcc_uart_console_putchar(struct uart_port *port, unsigned char ch)
+static void dcc_uart_console_putchar(struct uart_port *port, u8 ch)
 {
        while (__dcc_getstatus() & DCC_STATUS_TX)
                cpu_relax();
@@ -47,6 +47,14 @@ static void dcc_early_write(struct console *con, const char *s, unsigned n)
 static int __init dcc_early_console_setup(struct earlycon_device *device,
                                          const char *opt)
 {
+       unsigned int count = 0x4000000;
+
+       while (--count && (__dcc_getstatus() & DCC_STATUS_TX))
+               cpu_relax();
+
+       if (__dcc_getstatus() & DCC_STATUS_TX)
+               return -ENODEV;
+
        device->con->write = dcc_early_write;
 
        return 0;
@@ -54,9 +62,9 @@ static int __init dcc_early_console_setup(struct earlycon_device *device,
 
 EARLYCON_DECLARE(dcc, dcc_early_console_setup);
 
-static int hvc_dcc_put_chars(uint32_t vt, const char *buf, int count)
+static ssize_t hvc_dcc_put_chars(uint32_t vt, const u8 *buf, size_t count)
 {
-       int i;
+       size_t i;
 
        for (i = 0; i < count; i++) {
                while (__dcc_getstatus() & DCC_STATUS_TX)
@@ -68,9 +76,9 @@ static int hvc_dcc_put_chars(uint32_t vt, const char *buf, int count)
        return count;
 }
 
-static int hvc_dcc_get_chars(uint32_t vt, char *buf, int count)
+static ssize_t hvc_dcc_get_chars(uint32_t vt, u8 *buf, size_t count)
 {
-       int i;
+       size_t i;
 
        for (i = 0; i < count; ++i)
                if (__dcc_getstatus() & DCC_STATUS_RX)
@@ -149,8 +157,8 @@ static DECLARE_WORK(dcc_pwork, dcc_put_work);
  */
 static void dcc_get_work(struct work_struct *work)
 {
-       unsigned char ch;
        unsigned long irqflags;
+       u8 ch;
 
        /*
         * Read characters from DCC and put them into the input FIFO, as
@@ -172,10 +180,10 @@ static DECLARE_WORK(dcc_gwork, dcc_get_work);
  * Write characters directly to the DCC if we're on core 0 and the FIFO
  * is empty, or write them to the FIFO if we're not.
  */
-static int hvc_dcc0_put_chars(u32 vt, const char *buf, int count)
+static ssize_t hvc_dcc0_put_chars(u32 vt, const u8 *buf, size_t count)
 {
-       int len;
        unsigned long irqflags;
+       ssize_t len;
 
        if (!IS_ENABLED(CONFIG_HVC_DCC_SERIALIZE_SMP))
                return hvc_dcc_put_chars(vt, buf, count);
@@ -211,10 +219,10 @@ static int hvc_dcc0_put_chars(u32 vt, const char *buf, int count)
  * Read characters directly from the DCC if we're on core 0 and the FIFO
  * is empty, or read them from the FIFO if we're not.
  */
-static int hvc_dcc0_get_chars(u32 vt, char *buf, int count)
+static ssize_t hvc_dcc0_get_chars(u32 vt, u8 *buf, size_t count)
 {
-       int len;
        unsigned long irqflags;
+       ssize_t len;
 
        if (!IS_ENABLED(CONFIG_HVC_DCC_SERIALIZE_SMP))
                return hvc_dcc_get_chars(vt, buf, count);
index 543f35ddf523c4b99a6396f51d8c8e51207c2e53..fdecc0d63731aa268a08ef00bd12cded3b918eca 100644 (file)
@@ -215,11 +215,11 @@ static void destroy_tty_buffer_list(struct list_head *list)
  * If the IUCV path has been severed, then -EPIPE is returned to cause a
  * hang up (that is issued by the HVC layer).
  */
-static int hvc_iucv_write(struct hvc_iucv_private *priv,
-                         char *buf, int count, int *has_more_data)
+static ssize_t hvc_iucv_write(struct hvc_iucv_private *priv,
+                             u8 *buf, size_t count, int *has_more_data)
 {
        struct iucv_tty_buffer *rb;
-       int written;
+       ssize_t written;
        int rc;
 
        /* immediately return if there is no IUCV connection */
@@ -312,10 +312,10 @@ out_written:
  *             the routine locks the struct hvc_iucv_private->lock to call
  *             helper functions.
  */
-static int hvc_iucv_get_chars(uint32_t vtermno, char *buf, int count)
+static ssize_t hvc_iucv_get_chars(uint32_t vtermno, u8 *buf, size_t count)
 {
        struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
-       int written;
+       ssize_t written;
        int has_more_data;
 
        if (count <= 0)
@@ -352,8 +352,8 @@ static int hvc_iucv_get_chars(uint32_t vtermno, char *buf, int count)
  * If an existing IUCV communicaton path has been severed, -EPIPE is returned
  * (that can be passed to HVC layer to cause a tty hangup).
  */
-static int hvc_iucv_queue(struct hvc_iucv_private *priv, const char *buf,
-                         int count)
+static ssize_t hvc_iucv_queue(struct hvc_iucv_private *priv, const u8 *buf,
+                             size_t count)
 {
        size_t len;
 
@@ -455,12 +455,12 @@ static void hvc_iucv_sndbuf_work(struct work_struct *work)
  * Locking:    The method gets called under an irqsave() spinlock; and
  *             locks struct hvc_iucv_private->lock.
  */
-static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count)
+static ssize_t hvc_iucv_put_chars(uint32_t vtermno, const u8 *buf, size_t count)
 {
        struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
        int queued;
 
-       if (count <= 0)
+       if (!count)
                return 0;
 
        if (!priv)
index 992e199e0ea8057b97e84fd371ce0aa50af20bf3..095c33ad10f83cb14c87aeeeef33503674d5682e 100644 (file)
@@ -58,7 +58,7 @@ static const struct hv_ops hvc_opal_raw_ops = {
        .notifier_hangup = notifier_hangup_irq,
 };
 
-static int hvc_opal_hvsi_get_chars(uint32_t vtermno, char *buf, int count)
+static ssize_t hvc_opal_hvsi_get_chars(uint32_t vtermno, u8 *buf, size_t count)
 {
        struct hvc_opal_priv *pv = hvc_opal_privs[vtermno];
 
@@ -68,7 +68,8 @@ static int hvc_opal_hvsi_get_chars(uint32_t vtermno, char *buf, int count)
        return hvsilib_get_chars(&pv->hvsi, buf, count);
 }
 
-static int hvc_opal_hvsi_put_chars(uint32_t vtermno, const char *buf, int count)
+static ssize_t hvc_opal_hvsi_put_chars(uint32_t vtermno, const u8 *buf,
+                                      size_t count)
 {
        struct hvc_opal_priv *pv = hvc_opal_privs[vtermno];
 
@@ -232,24 +233,21 @@ static int hvc_opal_probe(struct platform_device *dev)
        return 0;
 }
 
-static int hvc_opal_remove(struct platform_device *dev)
+static void hvc_opal_remove(struct platform_device *dev)
 {
        struct hvc_struct *hp = dev_get_drvdata(&dev->dev);
-       int rc, termno;
+       int termno;
 
        termno = hp->vtermno;
-       rc = hvc_remove(hp);
-       if (rc == 0) {
-               if (hvc_opal_privs[termno] != &hvc_opal_boot_priv)
-                       kfree(hvc_opal_privs[termno]);
-               hvc_opal_privs[termno] = NULL;
-       }
-       return rc;
+       hvc_remove(hp);
+       if (hvc_opal_privs[termno] != &hvc_opal_boot_priv)
+               kfree(hvc_opal_privs[termno]);
+       hvc_opal_privs[termno] = NULL;
 }
 
 static struct platform_driver hvc_opal_driver = {
        .probe          = hvc_opal_probe,
-       .remove         = hvc_opal_remove,
+       .remove_new     = hvc_opal_remove,
        .driver         = {
                .name   = hvc_opal_name,
                .of_match_table = hvc_opal_match,
index 31f53fa77e4af5f1c4cd1ecd4677210d1d515cf0..cede8a57259492bfc15fa8ff36286371c8c6139d 100644 (file)
@@ -15,9 +15,9 @@
 
 #include "hvc_console.h"
 
-static int hvc_sbi_tty_put(uint32_t vtermno, const char *buf, int count)
+static ssize_t hvc_sbi_tty_put(uint32_t vtermno, const u8 *buf, size_t count)
 {
-       int i;
+       size_t i;
 
        for (i = 0; i < count; i++)
                sbi_console_putchar(buf[i]);
@@ -25,9 +25,10 @@ static int hvc_sbi_tty_put(uint32_t vtermno, const char *buf, int count)
        return i;
 }
 
-static int hvc_sbi_tty_get(uint32_t vtermno, char *buf, int count)
+static ssize_t hvc_sbi_tty_get(uint32_t vtermno, u8 *buf, size_t count)
 {
-       int i, c;
+       size_t i;
+       int c;
 
        for (i = 0; i < count; i++) {
                c = sbi_console_getchar();
@@ -39,21 +40,44 @@ static int hvc_sbi_tty_get(uint32_t vtermno, char *buf, int count)
        return i;
 }
 
-static const struct hv_ops hvc_sbi_ops = {
+static const struct hv_ops hvc_sbi_v01_ops = {
        .get_chars = hvc_sbi_tty_get,
        .put_chars = hvc_sbi_tty_put,
 };
 
-static int __init hvc_sbi_init(void)
+static ssize_t hvc_sbi_dbcn_tty_put(uint32_t vtermno, const u8 *buf, size_t count)
 {
-       return PTR_ERR_OR_ZERO(hvc_alloc(0, 0, &hvc_sbi_ops, 16));
+       return sbi_debug_console_write(buf, count);
 }
-device_initcall(hvc_sbi_init);
 
-static int __init hvc_sbi_console_init(void)
+static ssize_t hvc_sbi_dbcn_tty_get(uint32_t vtermno, u8 *buf, size_t count)
 {
-       hvc_instantiate(0, 0, &hvc_sbi_ops);
+       return sbi_debug_console_read(buf, count);
+}
+
+static const struct hv_ops hvc_sbi_dbcn_ops = {
+       .put_chars = hvc_sbi_dbcn_tty_put,
+       .get_chars = hvc_sbi_dbcn_tty_get,
+};
+
+static int __init hvc_sbi_init(void)
+{
+       int err;
+
+       if (sbi_debug_console_available) {
+               err = PTR_ERR_OR_ZERO(hvc_alloc(0, 0, &hvc_sbi_dbcn_ops, 256));
+               if (err)
+                       return err;
+               hvc_instantiate(0, 0, &hvc_sbi_dbcn_ops);
+       } else if (IS_ENABLED(CONFIG_RISCV_SBI_V01)) {
+               err = PTR_ERR_OR_ZERO(hvc_alloc(0, 0, &hvc_sbi_v01_ops, 256));
+               if (err)
+                       return err;
+               hvc_instantiate(0, 0, &hvc_sbi_v01_ops);
+       } else {
+               return -ENODEV;
+       }
 
        return 0;
 }
-console_initcall(hvc_sbi_console_init);
+device_initcall(hvc_sbi_init);
index 184d325abeeda4bc4ef8939565d1ea0e4094ebc9..a0b90275b37f60ad1aa4b29e10ec5d6bc9a634a8 100644 (file)
@@ -31,10 +31,10 @@ static struct hvc_struct *hvc_rtas_dev;
 static int rtascons_put_char_token = RTAS_UNKNOWN_SERVICE;
 static int rtascons_get_char_token = RTAS_UNKNOWN_SERVICE;
 
-static inline int hvc_rtas_write_console(uint32_t vtermno, const char *buf,
-               int count)
+static ssize_t hvc_rtas_write_console(uint32_t vtermno, const u8 *buf,
+                                     size_t count)
 {
-       int i;
+       size_t i;
 
        for (i = 0; i < count; i++) {
                if (rtas_call(rtascons_put_char_token, 1, 1, NULL, buf[i]))
@@ -44,9 +44,10 @@ static inline int hvc_rtas_write_console(uint32_t vtermno, const char *buf,
        return i;
 }
 
-static int hvc_rtas_read_console(uint32_t vtermno, char *buf, int count)
+static ssize_t hvc_rtas_read_console(uint32_t vtermno, u8 *buf, size_t count)
 {
-       int i, c;
+       size_t i;
+       int c;
 
        for (i = 0; i < count; i++) {
                if (rtas_call(rtascons_get_char_token, 0, 2, &c))
index ff0dcc56413c4c98b339ac99306b4adca7a480fa..fdc2699b78dc4894d109e30ae59332204634d4a9 100644 (file)
@@ -19,9 +19,9 @@
 
 static struct hvc_struct *hvc_udbg_dev;
 
-static int hvc_udbg_put(uint32_t vtermno, const char *buf, int count)
+static ssize_t hvc_udbg_put(uint32_t vtermno, const u8 *buf, size_t count)
 {
-       int i;
+       size_t i;
 
        for (i = 0; i < count && udbg_putc; i++)
                udbg_putc(buf[i]);
@@ -29,9 +29,10 @@ static int hvc_udbg_put(uint32_t vtermno, const char *buf, int count)
        return i;
 }
 
-static int hvc_udbg_get(uint32_t vtermno, char *buf, int count)
+static ssize_t hvc_udbg_get(uint32_t vtermno, u8 *buf, size_t count)
 {
-       int i, c;
+       size_t i;
+       int c;
 
        if (!udbg_getc_poll)
                return 0;
index 736b230f5ec04337807d89b6a81669c83db38d0e..47930601a26a8f455c4754d5a8b21380a2b682d7 100644 (file)
@@ -58,20 +58,20 @@ struct hvterm_priv {
        hv_protocol_t           proto;  /* Raw data or HVSI packets */
        struct hvsi_priv        hvsi;   /* HVSI specific data */
        spinlock_t              buf_lock;
-       char                    buf[SIZE_VIO_GET_CHARS];
-       int                     left;
-       int                     offset;
+       u8                      buf[SIZE_VIO_GET_CHARS];
+       size_t                  left;
+       size_t                  offset;
 };
 static struct hvterm_priv *hvterm_privs[MAX_NR_HVC_CONSOLES];
 /* For early boot console */
 static struct hvterm_priv hvterm_priv0;
 
-static int hvterm_raw_get_chars(uint32_t vtermno, char *buf, int count)
+static ssize_t hvterm_raw_get_chars(uint32_t vtermno, u8 *buf, size_t count)
 {
        struct hvterm_priv *pv = hvterm_privs[vtermno];
        unsigned long i;
        unsigned long flags;
-       int got;
+       size_t got;
 
        if (WARN_ON(!pv))
                return 0;
@@ -115,7 +115,8 @@ static int hvterm_raw_get_chars(uint32_t vtermno, char *buf, int count)
  *       you are sending fewer chars.
  * @count: number of chars to send.
  */
-static int hvterm_raw_put_chars(uint32_t vtermno, const char *buf, int count)
+static ssize_t hvterm_raw_put_chars(uint32_t vtermno, const u8 *buf,
+                                   size_t count)
 {
        struct hvterm_priv *pv = hvterm_privs[vtermno];
 
@@ -133,7 +134,7 @@ static const struct hv_ops hvterm_raw_ops = {
        .notifier_hangup = notifier_hangup_irq,
 };
 
-static int hvterm_hvsi_get_chars(uint32_t vtermno, char *buf, int count)
+static ssize_t hvterm_hvsi_get_chars(uint32_t vtermno, u8 *buf, size_t count)
 {
        struct hvterm_priv *pv = hvterm_privs[vtermno];
 
@@ -143,7 +144,8 @@ static int hvterm_hvsi_get_chars(uint32_t vtermno, char *buf, int count)
        return hvsilib_get_chars(&pv->hvsi, buf, count);
 }
 
-static int hvterm_hvsi_put_chars(uint32_t vtermno, const char *buf, int count)
+static ssize_t hvterm_hvsi_put_chars(uint32_t vtermno, const u8 *buf,
+                                    size_t count)
 {
        struct hvterm_priv *pv = hvterm_privs[vtermno];
 
index 34c01874f45beb23d8470a83acbd38fa9221df72..0e497501f8e31bc2522fe8fcba85b3ec3e832d47 100644 (file)
@@ -84,13 +84,13 @@ static inline void notify_daemon(struct xencons_info *cons)
        notify_remote_via_evtchn(cons->evtchn);
 }
 
-static int __write_console(struct xencons_info *xencons,
-               const char *data, int len)
+static ssize_t __write_console(struct xencons_info *xencons,
+                              const u8 *data, size_t len)
 {
        XENCONS_RING_IDX cons, prod;
        struct xencons_interface *intf = xencons->intf;
-       int sent = 0;
        unsigned long flags;
+       size_t sent = 0;
 
        spin_lock_irqsave(&xencons->ring_lock, flags);
        cons = intf->out_cons;
@@ -115,10 +115,11 @@ static int __write_console(struct xencons_info *xencons,
        return sent;
 }
 
-static int domU_write_console(uint32_t vtermno, const char *data, int len)
+static ssize_t domU_write_console(uint32_t vtermno, const u8 *data, size_t len)
 {
-       int ret = len;
        struct xencons_info *cons = vtermno_to_xencons(vtermno);
+       size_t ret = len;
+
        if (cons == NULL)
                return -EINVAL;
 
@@ -129,7 +130,7 @@ static int domU_write_console(uint32_t vtermno, const char *data, int len)
         * kernel is crippled.
         */
        while (len) {
-               int sent = __write_console(cons, data, len);
+               ssize_t sent = __write_console(cons, data, len);
 
                if (sent < 0)
                        return sent;
@@ -144,14 +145,14 @@ static int domU_write_console(uint32_t vtermno, const char *data, int len)
        return ret;
 }
 
-static int domU_read_console(uint32_t vtermno, char *buf, int len)
+static ssize_t domU_read_console(uint32_t vtermno, u8 *buf, size_t len)
 {
        struct xencons_interface *intf;
        XENCONS_RING_IDX cons, prod;
-       int recv = 0;
        struct xencons_info *xencons = vtermno_to_xencons(vtermno);
        unsigned int eoiflag = 0;
        unsigned long flags;
+       size_t recv = 0;
 
        if (xencons == NULL)
                return -EINVAL;
@@ -209,7 +210,7 @@ static const struct hv_ops domU_hvc_ops = {
        .notifier_hangup = notifier_hangup_irq,
 };
 
-static int dom0_read_console(uint32_t vtermno, char *buf, int len)
+static ssize_t dom0_read_console(uint32_t vtermno, u8 *buf, size_t len)
 {
        return HYPERVISOR_console_io(CONSOLEIO_read, len, buf);
 }
@@ -218,9 +219,9 @@ static int dom0_read_console(uint32_t vtermno, char *buf, int len)
  * Either for a dom0 to write to the system console, or a domU with a
  * debug version of Xen
  */
-static int dom0_write_console(uint32_t vtermno, const char *str, int len)
+static ssize_t dom0_write_console(uint32_t vtermno, const u8 *str, size_t len)
 {
-       int rc = HYPERVISOR_console_io(CONSOLEIO_write, len, (char *)str);
+       int rc = HYPERVISOR_console_io(CONSOLEIO_write, len, (u8 *)str);
        if (rc < 0)
                return rc;
 
index 09289c8154aec41cb19b5bf6bf297ccdbb734547..22e1bc4d8a66835dc642e0f5bb67fc3360de0caa 100644 (file)
@@ -12,7 +12,7 @@ static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
        packet->seqno = cpu_to_be16(atomic_inc_return(&pv->seqno));
 
        /* Assumes that always succeeds, works in practice */
-       return pv->put_chars(pv->termno, (char *)packet, packet->len);
+       return pv->put_chars(pv->termno, (u8 *)packet, packet->len);
 }
 
 static void hvsi_start_handshake(struct hvsi_priv *pv)
@@ -178,9 +178,10 @@ static int hvsi_get_packet(struct hvsi_priv *pv)
        return 0;
 }
 
-int hvsilib_get_chars(struct hvsi_priv *pv, char *buf, int count)
+ssize_t hvsilib_get_chars(struct hvsi_priv *pv, u8 *buf, size_t count)
 {
-       unsigned int tries, read = 0;
+       unsigned int tries;
+       size_t read = 0;
 
        if (WARN_ON(!pv))
                return -ENXIO;
@@ -199,7 +200,7 @@ int hvsilib_get_chars(struct hvsi_priv *pv, char *buf, int count)
        for (tries = 1; count && tries < 2; tries++) {
                /* Consume existing data packet */
                if (pv->inbuf_pktlen) {
-                       unsigned int l = min(count, (int)pv->inbuf_pktlen);
+                       size_t l = min(count, pv->inbuf_pktlen);
                        memcpy(&buf[read], &pv->inbuf[pv->inbuf_cur], l);
                        pv->inbuf_cur += l;
                        pv->inbuf_pktlen -= l;
@@ -228,10 +229,11 @@ int hvsilib_get_chars(struct hvsi_priv *pv, char *buf, int count)
        return read;
 }
 
-int hvsilib_put_chars(struct hvsi_priv *pv, const char *buf, int count)
+ssize_t hvsilib_put_chars(struct hvsi_priv *pv, const u8 *buf, size_t count)
 {
        struct hvsi_data dp;
-       int rc, adjcount = min(count, HVSI_MAX_OUTGOING_DATA);
+       size_t adjcount = min_t(size_t, count, HVSI_MAX_OUTGOING_DATA);
+       int rc;
 
        if (WARN_ON(!pv))
                return -ENODEV;
@@ -411,9 +413,9 @@ void hvsilib_close(struct hvsi_priv *pv, struct hvc_struct *hp)
 }
 
 void hvsilib_init(struct hvsi_priv *pv,
-                 int (*get_chars)(uint32_t termno, char *buf, int count),
-                 int (*put_chars)(uint32_t termno, const char *buf,
-                                  int count),
+                 ssize_t (*get_chars)(uint32_t termno, u8 *buf, size_t count),
+                 ssize_t (*put_chars)(uint32_t termno, const u8 *buf,
+                                      size_t count),
                  int termno, int is_console)
 {
        memset(pv, 0, sizeof(*pv));
index 73818bb64416759ed32cbca23037be1bd315540e..a5728a5b3f83e61fa5e3221c2db1ffaf95549e8e 100644 (file)
@@ -49,9 +49,6 @@ struct ipw_dev {
 
        void __iomem *common_memory;
 
-       /* Reference to attribute memory, containing CIS data */
-       void *attribute_memory;
-
        /* Hardware context */
        struct ipw_hardware *hardware;
        /* Network layer context */
index 369ec71c24ef056acb03b3e0a4897eff9ccd185d..aac80b69a069c38b12f85c8bcb4475e36729d83e 100644 (file)
@@ -213,16 +213,16 @@ struct fdc_word {
  */
 
 /* ranges >= 1 && sizes[0] >= 1 */
-static struct fdc_word mips_ejtag_fdc_encode(const char **ptrs,
+static struct fdc_word mips_ejtag_fdc_encode(const u8 **ptrs,
                                             unsigned int *sizes,
                                             unsigned int ranges)
 {
        struct fdc_word word = { 0, 0 };
-       const char **ptrs_end = ptrs + ranges;
+       const u8 **ptrs_end = ptrs + ranges;
 
        for (; ptrs < ptrs_end; ++ptrs) {
-               const char *ptr = *(ptrs++);
-               const char *end = ptr + *(sizes++);
+               const u8 *ptr = *(ptrs++);
+               const u8 *end = ptr + *(sizes++);
 
                for (; ptr < end; ++ptr) {
                        word.word |= (u8)*ptr << (8*word.bytes);
@@ -417,7 +417,7 @@ static unsigned int mips_ejtag_fdc_put_chan(struct mips_ejtag_fdc_tty *priv,
 {
        struct mips_ejtag_fdc_tty_port *dport;
        struct tty_struct *tty;
-       const char *ptrs[2];
+       const u8 *ptrs[2];
        unsigned int sizes[2] = { 0 };
        struct fdc_word word = { .bytes = 0 };
        unsigned long flags;
index bf3f87ba3a92ce6fe5f238a3cb5920647ee882cb..ebaada8db92944c0fc8f44259cd3e2048a0b14cc 100644 (file)
@@ -514,7 +514,7 @@ static void MoxaPortLineCtrl(struct moxa_port *, bool, bool);
 static void MoxaPortFlowCtrl(struct moxa_port *, int, int, int, int, int);
 static int MoxaPortLineStatus(struct moxa_port *);
 static void MoxaPortFlushData(struct moxa_port *, int);
-static int MoxaPortWriteData(struct tty_struct *, const unsigned char *, int);
+static ssize_t MoxaPortWriteData(struct tty_struct *, const u8 *, size_t);
 static int MoxaPortReadData(struct moxa_port *);
 static unsigned int MoxaPortTxQueue(struct moxa_port *);
 static int MoxaPortRxQueue(struct moxa_port *);
@@ -1933,10 +1933,10 @@ static void MoxaPortFlushData(struct moxa_port *port, int mode)
  *
  *      Function 20:    Write data.
  *      Syntax:
- *      int  MoxaPortWriteData(int port, unsigned char * buffer, int length);
+ *      ssize_t  MoxaPortWriteData(int port, u8 *buffer, size_t length);
  *           int port           : port number (0 - 127)
- *           unsigned char * buffer     : pointer to write data buffer.
- *           int length         : write data length
+ *           u8 *buffer         : pointer to write data buffer.
+ *           size_t length      : write data length
  *
  *           return:    0 - length      : real write data length
  *
@@ -2163,11 +2163,12 @@ static int MoxaPortLineStatus(struct moxa_port *port)
        return val;
 }
 
-static int MoxaPortWriteData(struct tty_struct *tty, const u8 *buffer, int len)
+static ssize_t MoxaPortWriteData(struct tty_struct *tty, const u8 *buffer,
+                                size_t len)
 {
        struct moxa_port *port = tty->driver_data;
        void __iomem *baseAddr, *ofsAddr, *ofs;
-       unsigned int c, total;
+       size_t c, total;
        u16 head, tail, tx_mask, spage, epage;
        u16 pageno, pageofs, bufhead;
 
@@ -2224,8 +2225,8 @@ static int MoxaPortWriteData(struct tty_struct *tty, const u8 *buffer, int len)
 static int MoxaPortReadData(struct moxa_port *port)
 {
        struct tty_struct *tty = port->port.tty;
-       unsigned char *dst;
        void __iomem *baseAddr, *ofsAddr, *ofs;
+       u8 *dst;
        unsigned int count, len, total;
        u16 tail, rx_mask, spage, epage;
        u16 pageno, pageofs, bufhead, head;
index 6ce7f259968fa40fe835c2fa6996bfd8016d335b..458bb1280ebf9d4d04e7fc291898b84e01cdd7e5 100644 (file)
@@ -264,7 +264,7 @@ struct mxser_port {
        u8 rx_low_water;
        int type;               /* UART type */
 
-       unsigned char x_char;   /* xon/xoff character */
+       u8 x_char;              /* xon/xoff character */
        u8 IER;                 /* Interrupt Enable Register */
        u8 MCR;                 /* Modem control register */
        u8 FCR;                 /* FIFO control register */
@@ -905,7 +905,7 @@ static ssize_t mxser_write(struct tty_struct *tty, const u8 *buf, size_t count)
 {
        struct mxser_port *info = tty->driver_data;
        unsigned long flags;
-       int written;
+       size_t written;
        bool is_empty;
 
        spin_lock_irqsave(&info->slock, flags);
@@ -1521,7 +1521,7 @@ static u8 mxser_receive_chars_old(struct tty_struct *tty,
                        if (++ignored > 100)
                                break;
                } else {
-                       char flag = 0;
+                       u8 flag = 0;
                        if (status & UART_LSR_BRK_ERROR_BITS) {
                                if (status & UART_LSR_BI) {
                                        flag = TTY_BREAK;
@@ -1585,7 +1585,7 @@ static void mxser_transmit_chars(struct tty_struct *tty, struct mxser_port *port
 
        count = port->xmit_fifo_size;
        do {
-               unsigned char c;
+               u8 c;
 
                if (!kfifo_get(&port->port.xmit_fifo, &c))
                        break;
index a3ab3946e4add17a08f0afbda3964400865fcb3b..4036566febcba50fd8070249ae515f4456df8d60 100644 (file)
@@ -124,8 +124,8 @@ struct gsm_msg {
        u8 addr;                /* DLCI address + flags */
        u8 ctrl;                /* Control byte + flags */
        unsigned int len;       /* Length of data block (can be zero) */
-       unsigned char *data;    /* Points into buffer but not at the start */
-       unsigned char buffer[];
+       u8 *data;       /* Points into buffer but not at the start */
+       u8 buffer[];
 };
 
 enum gsm_dlci_state {
@@ -283,7 +283,7 @@ struct gsm_mux {
        /* Bits for GSM mode decoding */
 
        /* Framing Layer */
-       unsigned char *buf;
+       u8 *buf;
        enum gsm_mux_state state;
        unsigned int len;
        unsigned int address;
@@ -2856,7 +2856,7 @@ invalid:
  *     Receive bytes in gsm mode 0
  */
 
-static void gsm0_receive(struct gsm_mux *gsm, unsigned char c)
+static void gsm0_receive(struct gsm_mux *gsm, u8 c)
 {
        unsigned int len;
 
@@ -2947,7 +2947,7 @@ static void gsm0_receive(struct gsm_mux *gsm, unsigned char c)
  *     Receive bytes in mode 1 (Advanced option)
  */
 
-static void gsm1_receive(struct gsm_mux *gsm, unsigned char c)
+static void gsm1_receive(struct gsm_mux *gsm, u8 c)
 {
        /* handle XON/XOFF */
        if ((c & ISO_IEC_646_MASK) == XON) {
@@ -3541,7 +3541,7 @@ static void gsmld_receive_buf(struct tty_struct *tty, const u8 *cp,
                              const u8 *fp, size_t count)
 {
        struct gsm_mux *gsm = tty->disc_data;
-       char flags = TTY_NORMAL;
+       u8 flags = TTY_NORMAL;
 
        if (debug & DBG_DATA)
                gsm_hex_dump_bytes(__func__, cp, count);
@@ -3711,7 +3711,7 @@ static ssize_t gsmld_write(struct tty_struct *tty, struct file *file,
 {
        struct gsm_mux *gsm = tty->disc_data;
        unsigned long flags;
-       int space;
+       size_t space;
        int ret;
 
        if (!gsm)
@@ -3909,8 +3909,7 @@ static void gsm_mux_net_tx_timeout(struct net_device *net, unsigned int txqueue)
        net->stats.tx_errors++;
 }
 
-static void gsm_mux_rx_netchar(struct gsm_dlci *dlci,
-                               const unsigned char *in_buf, int size)
+static void gsm_mux_rx_netchar(struct gsm_dlci *dlci, const u8 *in_buf, int size)
 {
        struct net_device *net = dlci->net;
        struct sk_buff *skb;
index a670419efe7963c8e8630d05696c8046c10fe8b3..1615f074ab86c507b7152d60749c6e43d4a3194f 100644 (file)
 
 struct n_hdlc_buf {
        struct list_head  list_item;
-       int               count;
-       char              buf[];
+       size_t            count;
+       u8                buf[];
 };
 
 struct n_hdlc_buf_list {
@@ -263,9 +263,9 @@ static int n_hdlc_tty_open(struct tty_struct *tty)
  */
 static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
 {
-       register int actual;
        unsigned long flags;
        struct n_hdlc_buf *tbuf;
+       ssize_t actual;
 
 check_again:
 
@@ -281,7 +281,7 @@ check_again:
 
        tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list);
        while (tbuf) {
-               pr_debug("sending frame %p, count=%d\n", tbuf, tbuf->count);
+               pr_debug("sending frame %p, count=%zu\n", tbuf, tbuf->count);
 
                /* Send the next block of data to device */
                set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
@@ -521,9 +521,9 @@ static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file,
                                const u8 *data, size_t count)
 {
        struct n_hdlc *n_hdlc = tty->disc_data;
-       int error = 0;
        DECLARE_WAITQUEUE(wait, current);
        struct n_hdlc_buf *tbuf;
+       ssize_t error = 0;
 
        pr_debug("%s() called count=%zd\n", __func__, count);
 
index 02cd40147b3a80c2a2d0c919f0b8fd2e5fd82ca7..e28a921c16374f7552e6dec6d324e7453173b6bb 100644 (file)
@@ -65,24 +65,8 @@ do {                                                 \
 #define DBG3(args...) DBG_(0x04, ##args)
 #define DBG4(args...) DBG_(0x08, ##args)
 
-/* TODO: rewrite to optimize macros... */
-
 #define TMP_BUF_MAX 256
 
-#define DUMP(buf__, len__)                                             \
-       do {                                                            \
-               char tbuf[TMP_BUF_MAX] = {0};                           \
-               if (len__ > 1) {                                        \
-                       u32 data_len = min_t(u32, len__, TMP_BUF_MAX);  \
-                       strscpy(tbuf, buf__, data_len);                 \
-                       if (tbuf[data_len - 2] == '\r')                 \
-                               tbuf[data_len - 2] = 'r';               \
-                       DBG1("SENDING: '%s' (%d+n)", tbuf, len__);      \
-               } else {                                                \
-                       DBG1("SENDING: '%s' (%d)", tbuf, len__);        \
-               }                                                       \
-       } while (0)
-
 /*    Defines */
 #define NOZOMI_NAME            "nozomi"
 #define NOZOMI_NAME_TTY                "nozomi_tty"
@@ -754,8 +738,6 @@ static int send_data(enum port_type index, struct nozomi *dc)
                return 0;
        }
 
-       /* DUMP(buf, size); */
-
        /* Write length + data */
        write_mem32(addr, (u32 *) &size, 4);
        write_mem32(addr + 4, (u32 *) dc->send_buf, size);
@@ -801,11 +783,10 @@ static int receive_data(enum port_type index, struct nozomi *dc)
                        tty_insert_flip_char(&port->port, buf[0], TTY_NORMAL);
                        size = 0;
                } else if (size < RECEIVE_BUF_MAX) {
-                       size -= tty_insert_flip_string(&port->port,
-                                       (char *)buf, size);
+                       size -= tty_insert_flip_string(&port->port, buf, size);
                } else {
-                       i = tty_insert_flip_string(&port->port,
-                                       (char *)buf, RECEIVE_BUF_MAX);
+                       i = tty_insert_flip_string(&port->port, buf,
+                                                  RECEIVE_BUF_MAX);
                        size -= i;
                        offset += i;
                }
@@ -1602,10 +1583,10 @@ static void ntty_hangup(struct tty_struct *tty)
 static ssize_t ntty_write(struct tty_struct *tty, const u8 *buffer,
                          size_t count)
 {
-       int rval = -EINVAL;
        struct nozomi *dc = get_dc_by_tty(tty);
        struct port *port = tty->driver_data;
        unsigned long flags;
+       size_t rval;
 
        if (!dc || !port)
                return -ENODEV;
index a5fdaf5e148efa666f60e2bc8e7f1998ac122e4b..822a5cd05566647d2bea82e2e978d05d214f79b4 100644 (file)
@@ -77,7 +77,7 @@ static bool is_serdev_device(const struct device *dev)
 static void serdev_ctrl_release(struct device *dev)
 {
        struct serdev_controller *ctrl = to_serdev_controller(dev);
-       ida_simple_remove(&ctrl_ida, ctrl->nr);
+       ida_free(&ctrl_ida, ctrl->nr);
        kfree(ctrl);
 }
 
@@ -225,8 +225,7 @@ EXPORT_SYMBOL_GPL(serdev_device_write_wakeup);
  * Return: The number of bytes written (less than count if not enough room in
  * the write buffer), or a negative errno on errors.
  */
-int serdev_device_write_buf(struct serdev_device *serdev,
-                           const unsigned char *buf, size_t count)
+int serdev_device_write_buf(struct serdev_device *serdev, const u8 *buf, size_t count)
 {
        struct serdev_controller *ctrl = serdev->ctrl;
 
@@ -259,13 +258,12 @@ EXPORT_SYMBOL_GPL(serdev_device_write_buf);
  * -ETIMEDOUT or -ERESTARTSYS if interrupted before any bytes were written, or
  * a negative errno on errors.
  */
-int serdev_device_write(struct serdev_device *serdev,
-                       const unsigned char *buf, size_t count,
-                       long timeout)
+ssize_t serdev_device_write(struct serdev_device *serdev, const u8 *buf,
+                           size_t count, long timeout)
 {
        struct serdev_controller *ctrl = serdev->ctrl;
-       int written = 0;
-       int ret;
+       size_t written = 0;
+       ssize_t ret;
 
        if (!ctrl || !ctrl->ops->write_buf || !serdev->ops->write_wakeup)
                return -EINVAL;
@@ -468,6 +466,7 @@ EXPORT_SYMBOL_GPL(serdev_device_alloc);
 
 /**
  * serdev_controller_alloc() - Allocate a new serdev controller
+ * @host:      serial port hardware controller device
  * @parent:    parent device
  * @size:      size of private data
  *
@@ -476,8 +475,9 @@ EXPORT_SYMBOL_GPL(serdev_device_alloc);
  * The allocated private data region may be accessed via
  * serdev_controller_get_drvdata()
  */
-struct serdev_controller *serdev_controller_alloc(struct device *parent,
-                                             size_t size)
+struct serdev_controller *serdev_controller_alloc(struct device *host,
+                                                 struct device *parent,
+                                                 size_t size)
 {
        struct serdev_controller *ctrl;
        int id;
@@ -489,7 +489,7 @@ struct serdev_controller *serdev_controller_alloc(struct device *parent,
        if (!ctrl)
                return NULL;
 
-       id = ida_simple_get(&ctrl_ida, 0, 0, GFP_KERNEL);
+       id = ida_alloc(&ctrl_ida, GFP_KERNEL);
        if (id < 0) {
                dev_err(parent,
                        "unable to allocate serdev controller identifier.\n");
@@ -502,7 +502,8 @@ struct serdev_controller *serdev_controller_alloc(struct device *parent,
        ctrl->dev.type = &serdev_ctrl_type;
        ctrl->dev.bus = &serdev_bus_type;
        ctrl->dev.parent = parent;
-       device_set_node(&ctrl->dev, dev_fwnode(parent));
+       ctrl->host = host;
+       device_set_node(&ctrl->dev, dev_fwnode(host));
        serdev_controller_set_drvdata(ctrl, &ctrl[1]);
 
        dev_set_name(&ctrl->dev, "serial%d", id);
@@ -665,7 +666,7 @@ static int acpi_serdev_check_resources(struct serdev_controller *ctrl,
                acpi_get_parent(adev->handle, &lookup.controller_handle);
 
        /* Make sure controller and ResourceSource handle match */
-       if (!device_match_acpi_handle(ctrl->dev.parent, lookup.controller_handle))
+       if (!device_match_acpi_handle(ctrl->host, lookup.controller_handle))
                return -ENODEV;
 
        return 0;
@@ -730,7 +731,7 @@ static int acpi_serdev_register_devices(struct serdev_controller *ctrl)
        bool skip;
        int ret;
 
-       if (!has_acpi_companion(ctrl->dev.parent))
+       if (!has_acpi_companion(ctrl->host))
                return -ENODEV;
 
        /*
@@ -739,7 +740,7 @@ static int acpi_serdev_register_devices(struct serdev_controller *ctrl)
         * succeed in this case, so that the proper serdev devices can be
         * added "manually" later.
         */
-       ret = acpi_quirk_skip_serdev_enumeration(ctrl->dev.parent, &skip);
+       ret = acpi_quirk_skip_serdev_enumeration(ctrl->host, &skip);
        if (ret)
                return ret;
        if (skip)
index e3856814ce7758de5bd1214e03a1be7ad570b1bc..e94e090cf0a1a5aa31575b21de671c1fe8fb71f5 100644 (file)
@@ -74,7 +74,7 @@ static const struct tty_port_client_operations client_ops = {
  * Callback functions from the serdev core.
  */
 
-static int ttyport_write_buf(struct serdev_controller *ctrl, const unsigned char *data, size_t len)
+static ssize_t ttyport_write_buf(struct serdev_controller *ctrl, const u8 *data, size_t len)
 {
        struct serport *serport = serdev_controller_get_drvdata(ctrl);
        struct tty_struct *tty = serport->tty;
@@ -274,6 +274,7 @@ static const struct serdev_controller_ops ctrl_ops = {
 };
 
 struct device *serdev_tty_port_register(struct tty_port *port,
+                                       struct device *host,
                                        struct device *parent,
                                        struct tty_driver *drv, int idx)
 {
@@ -284,7 +285,7 @@ struct device *serdev_tty_port_register(struct tty_port *port,
        if (!port || !drv || !parent)
                return ERR_PTR(-ENODEV);
 
-       ctrl = serdev_controller_alloc(parent, sizeof(struct serport));
+       ctrl = serdev_controller_alloc(host, parent, sizeof(struct serport));
        if (!ctrl)
                return ERR_PTR(-ENOMEM);
        serport = serdev_controller_get_drvdata(ctrl);
index d7482ae33a1caea283ab6b568173a0782f446db3..8c2aaf7af7b75c23587f847f8bc9be9330ddadc2 100644 (file)
@@ -566,7 +566,7 @@ err_sysfs_remove:
        return rc;
 }
 
-static int aspeed_vuart_remove(struct platform_device *pdev)
+static void aspeed_vuart_remove(struct platform_device *pdev)
 {
        struct aspeed_vuart *vuart = platform_get_drvdata(pdev);
 
@@ -574,8 +574,6 @@ static int aspeed_vuart_remove(struct platform_device *pdev)
        aspeed_vuart_set_enabled(vuart, false);
        serial8250_unregister_port(vuart->line);
        sysfs_remove_group(&vuart->dev->kobj, &aspeed_vuart_attr_group);
-
-       return 0;
 }
 
 static const struct of_device_id aspeed_vuart_table[] = {
@@ -590,7 +588,7 @@ static struct platform_driver aspeed_vuart_driver = {
                .of_match_table = aspeed_vuart_table,
        },
        .probe = aspeed_vuart_probe,
-       .remove = aspeed_vuart_remove,
+       .remove_new = aspeed_vuart_remove,
 };
 
 module_platform_driver(aspeed_vuart_driver);
index 15a2387a5b258f60420d86b4ac170630cf0c7446..beac6b340acef7cac394456e10b6244be7568deb 100644 (file)
@@ -119,6 +119,8 @@ static int bcm2835aux_serial_probe(struct platform_device *pdev)
 
        /* get the clock - this also enables the HW */
        data->clk = devm_clk_get_optional(&pdev->dev, NULL);
+       if (IS_ERR(data->clk))
+               return dev_err_probe(&pdev->dev, PTR_ERR(data->clk), "could not get clk\n");
 
        /* get the interrupt */
        ret = platform_get_irq(pdev, 0);
@@ -195,14 +197,12 @@ dis_clk:
        return ret;
 }
 
-static int bcm2835aux_serial_remove(struct platform_device *pdev)
+static void bcm2835aux_serial_remove(struct platform_device *pdev)
 {
        struct bcm2835aux_data *data = platform_get_drvdata(pdev);
 
        serial8250_unregister_port(data->line);
        clk_disable_unprepare(data->clk);
-
-       return 0;
 }
 
 static const struct bcm2835_aux_serial_driver_data bcm2835_acpi_data = {
@@ -228,7 +228,7 @@ static struct platform_driver bcm2835aux_serial_driver = {
                .acpi_match_table = bcm2835aux_serial_acpi_match,
        },
        .probe  = bcm2835aux_serial_probe,
-       .remove = bcm2835aux_serial_remove,
+       .remove_new = bcm2835aux_serial_remove,
 };
 module_platform_driver(bcm2835aux_serial_driver);
 
index 55dea2539c47956878f0336d02d9697cf6e43a35..504c4c02085776c610e8a6d9010f6982046c674a 100644 (file)
@@ -1121,7 +1121,7 @@ release_dma:
        return ret;
 }
 
-static int brcmuart_remove(struct platform_device *pdev)
+static void brcmuart_remove(struct platform_device *pdev)
 {
        struct brcmuart_priv *priv = platform_get_drvdata(pdev);
 
@@ -1131,7 +1131,6 @@ static int brcmuart_remove(struct platform_device *pdev)
        brcmuart_free_bufs(&pdev->dev, priv);
        if (priv->dma_enabled)
                brcmuart_arbitration(priv, 0);
-       return 0;
 }
 
 static int __maybe_unused brcmuart_suspend(struct device *dev)
@@ -1207,7 +1206,7 @@ static struct platform_driver brcmuart_platform_driver = {
                .of_match_table = brcmuart_dt_ids,
        },
        .probe          = brcmuart_probe,
-       .remove         = brcmuart_remove,
+       .remove_new     = brcmuart_remove,
 };
 
 static int __init brcmuart_init(void)
index 912733151858760bdab550be2bd261db296f68d3..b62ad9006780ce4b7266ffb0df6066ebecd60ecd 100644 (file)
@@ -883,7 +883,7 @@ static int serial8250_probe(struct platform_device *dev)
 /*
  * Remove serial ports registered against a platform device.
  */
-static int serial8250_remove(struct platform_device *dev)
+static void serial8250_remove(struct platform_device *dev)
 {
        int i;
 
@@ -893,7 +893,6 @@ static int serial8250_remove(struct platform_device *dev)
                if (up->port.dev == &dev->dev)
                        serial8250_unregister_port(i);
        }
-       return 0;
 }
 
 static int serial8250_suspend(struct platform_device *dev, pm_message_t state)
@@ -926,7 +925,7 @@ static int serial8250_resume(struct platform_device *dev)
 
 static struct platform_driver serial8250_isa_driver = {
        .probe          = serial8250_probe,
-       .remove         = serial8250_remove,
+       .remove_new     = serial8250_remove,
        .suspend        = serial8250_suspend,
        .resume         = serial8250_resume,
        .driver         = {
index e6218766d0c804a6b7230ecbc5604b4f7296b374..2d1f350a4bea2a86103d707cc322ded0f5941abb 100644 (file)
@@ -663,7 +663,7 @@ static int dw8250_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int dw8250_remove(struct platform_device *pdev)
+static void dw8250_remove(struct platform_device *pdev)
 {
        struct dw8250_data *data = platform_get_drvdata(pdev);
        struct device *dev = &pdev->dev;
@@ -680,8 +680,6 @@ static int dw8250_remove(struct platform_device *pdev)
 
        pm_runtime_disable(dev);
        pm_runtime_put_noidle(dev);
-
-       return 0;
 }
 
 static int dw8250_suspend(struct device *dev)
@@ -790,7 +788,7 @@ static struct platform_driver dw8250_platform_driver = {
                .acpi_match_table = dw8250_acpi_match,
        },
        .probe                  = dw8250_probe,
-       .remove                 = dw8250_remove,
+       .remove_new             = dw8250_remove,
 };
 
 module_platform_driver(dw8250_platform_driver);
index 84843e204a5e80e872ab840e9069cc73a018d38d..3e33ddf7bc8007af3fc7a27ae6a8eda4f5499883 100644 (file)
@@ -259,17 +259,6 @@ void dw8250_setup_port(struct uart_port *p)
        }
        up->capabilities |= UART_CAP_NOTEMT;
 
-       /*
-        * If the Component Version Register returns zero, we know that
-        * ADDITIONAL_FEATURES are not enabled. No need to go any further.
-        */
-       reg = dw8250_readl_ext(p, DW_UART_UCV);
-       if (!reg)
-               return;
-
-       dev_dbg(p->dev, "Designware UART version %c.%c%c\n",
-               (reg >> 24) & 0xff, (reg >> 16) & 0xff, (reg >> 8) & 0xff);
-
        /* Preserve value written by firmware or bootloader  */
        old_dlf = dw8250_readl_ext(p, DW_UART_DLF);
        dw8250_writel_ext(p, DW_UART_DLF, ~0U);
@@ -282,6 +271,11 @@ void dw8250_setup_port(struct uart_port *p)
                p->set_divisor = dw8250_set_divisor;
        }
 
+       reg = dw8250_readl_ext(p, DW_UART_UCV);
+       if (reg)
+               dev_dbg(p->dev, "Designware UART version %c.%c%c\n",
+                       (reg >> 24) & 0xff, (reg >> 16) & 0xff, (reg >> 8) & 0xff);
+
        reg = dw8250_readl_ext(p, DW_UART_CPR);
        if (!reg) {
                reg = data->pdata->cpr_val;
index ef5019e944eaa82cbaf85b98ef7a6de3f0d5c4c3..a754755100ffe1f11ca7ebfdb90d915e6c3a2187 100644 (file)
@@ -200,12 +200,11 @@ static int serial8250_em_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int serial8250_em_remove(struct platform_device *pdev)
+static void serial8250_em_remove(struct platform_device *pdev)
 {
        struct serial8250_em_priv *priv = platform_get_drvdata(pdev);
 
        serial8250_unregister_port(priv->line);
-       return 0;
 }
 
 static const struct of_device_id serial8250_em_dt_ids[] = {
@@ -220,7 +219,7 @@ static struct platform_driver serial8250_em_platform_driver = {
                .of_match_table = serial8250_em_dt_ids,
        },
        .probe                  = serial8250_em_probe,
-       .remove                 = serial8250_em_remove,
+       .remove_new             = serial8250_em_remove,
 };
 
 module_platform_driver(serial8250_em_platform_driver);
index 6085d356ad86d20b2e71d0f058c05ceedfce7f3f..23366f868ae3a8391f631e9952eb70baf9959076 100644 (file)
@@ -480,7 +480,7 @@ static int sealevel_rs485_config(struct uart_port *port, struct ktermios *termio
 }
 
 static const struct serial_rs485 generic_rs485_supported = {
-       .flags = SER_RS485_ENABLED,
+       .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND,
 };
 
 static const struct exar8250_platform exar8250_default_platform = {
@@ -524,7 +524,8 @@ static int iot2040_rs485_config(struct uart_port *port, struct ktermios *termios
 }
 
 static const struct serial_rs485 iot2040_rs485_supported = {
-       .flags = SER_RS485_ENABLED | SER_RS485_RX_DURING_TX | SER_RS485_TERMINATE_BUS,
+       .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND |
+                SER_RS485_RX_DURING_TX | SER_RS485_TERMINATE_BUS,
 };
 
 static const struct property_entry iot2040_gpio_properties[] = {
index f522eb5026c979f03fabfa08cc97bc2be9fed99c..b4ed442082a8521b61ec6d87cc01864a3e9a475e 100644 (file)
@@ -51,7 +51,8 @@ int fsl8250_handle_irq(struct uart_port *port)
         * immediately and interrupt the CPU again. The hardware clears LSR.BI
         * when the next valid char is read.)
         */
-       if (unlikely(up->lsr_saved_flags & UART_LSR_BI)) {
+       if (unlikely((iir & UART_IIR_ID) == UART_IIR_RLSI &&
+                    (up->lsr_saved_flags & UART_LSR_BI))) {
                up->lsr_saved_flags &= ~UART_LSR_BI;
                port->serial_in(port, UART_RX);
                uart_port_unlock_irqrestore(&up->port, flags);
@@ -159,12 +160,11 @@ static int fsl8250_acpi_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int fsl8250_acpi_remove(struct platform_device *pdev)
+static void fsl8250_acpi_remove(struct platform_device *pdev)
 {
        struct fsl8250_data *data = platform_get_drvdata(pdev);
 
        serial8250_unregister_port(data->line);
-       return 0;
 }
 
 static const struct acpi_device_id fsl_8250_acpi_id[] = {
@@ -179,7 +179,7 @@ static struct platform_driver fsl8250_platform_driver = {
                .acpi_match_table       = ACPI_PTR(fsl_8250_acpi_id),
        },
        .probe                  = fsl8250_acpi_probe,
-       .remove                 = fsl8250_acpi_remove,
+       .remove_new             = fsl8250_acpi_remove,
 };
 
 module_platform_driver(fsl8250_platform_driver);
index 4c4c4da73ad09c3c2bd03114e1f66b7949f5e266..a12f737924c0b79c5d85d13e413f412bb7ed6dd6 100644 (file)
@@ -320,14 +320,13 @@ out:
        return err;
 }
 
-static int ingenic_uart_remove(struct platform_device *pdev)
+static void ingenic_uart_remove(struct platform_device *pdev)
 {
        struct ingenic_uart_data *data = platform_get_drvdata(pdev);
 
        serial8250_unregister_port(data->line);
        clk_disable_unprepare(data->clk_module);
        clk_disable_unprepare(data->clk_baud);
-       return 0;
 }
 
 static const struct ingenic_uart_config jz4740_uart_config = {
@@ -368,7 +367,7 @@ static struct platform_driver ingenic_uart_platform_driver = {
                .of_match_table = of_match,
        },
        .probe                  = ingenic_uart_probe,
-       .remove                 = ingenic_uart_remove,
+       .remove_new             = ingenic_uart_remove,
 };
 
 module_platform_driver(ingenic_uart_platform_driver);
index d5a39e105a76f5bfbd9e3718c91d52ca5a79fbff..50c77c3dacf2c809dd1785e54a363c5381d605b9 100644 (file)
@@ -75,17 +75,16 @@ static int serial8250_ioc3_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int serial8250_ioc3_remove(struct platform_device *pdev)
+static void serial8250_ioc3_remove(struct platform_device *pdev)
 {
        struct ioc3_8250_data *data = platform_get_drvdata(pdev);
 
        serial8250_unregister_port(data->line);
-       return 0;
 }
 
 static struct platform_driver serial8250_ioc3_driver = {
        .probe  = serial8250_ioc3_probe,
-       .remove = serial8250_ioc3_remove,
+       .remove_new = serial8250_ioc3_remove,
        .driver = {
                .name = "ioc3-serial8250",
        }
index 6dc85aaba5d0b9c27df3048e13ce7d783f0b289d..8d728a6a5991412b23298368aa914971ea4b4261 100644 (file)
@@ -182,15 +182,13 @@ dis_clk_reg:
        return ret;
 }
 
-static int lpc18xx_serial_remove(struct platform_device *pdev)
+static void lpc18xx_serial_remove(struct platform_device *pdev)
 {
        struct lpc18xx_uart_data *data = platform_get_drvdata(pdev);
 
        serial8250_unregister_port(data->line);
        clk_disable_unprepare(data->clk_uart);
        clk_disable_unprepare(data->clk_reg);
-
-       return 0;
 }
 
 static const struct of_device_id lpc18xx_serial_match[] = {
@@ -201,7 +199,7 @@ MODULE_DEVICE_TABLE(of, lpc18xx_serial_match);
 
 static struct platform_driver lpc18xx_serial_driver = {
        .probe  = lpc18xx_serial_probe,
-       .remove = lpc18xx_serial_remove,
+       .remove_new = lpc18xx_serial_remove,
        .driver = {
                .name = "lpc18xx-uart",
                .of_match_table = lpc18xx_serial_match,
index 0e43bdfb745989c72a3d015b29b7cf8505777d42..776ec1ef29d62b9f9a3c57a43d571f7808b6096b 100644 (file)
@@ -287,17 +287,14 @@ static int lpss8250_dma_setup(struct lpss8250 *lpss, struct uart_8250_port *port
                return 0;
        }
 
-       rx_param = devm_kzalloc(dev, sizeof(*rx_param), GFP_KERNEL);
+       rx_param = devm_kmemdup(dev, &lpss->dma_param, sizeof(*rx_param), GFP_KERNEL);
        if (!rx_param)
                return -ENOMEM;
 
-       tx_param = devm_kzalloc(dev, sizeof(*tx_param), GFP_KERNEL);
+       tx_param = devm_kmemdup(dev, &lpss->dma_param, sizeof(*tx_param), GFP_KERNEL);
        if (!tx_param)
                return -ENOMEM;
 
-       *rx_param = lpss->dma_param;
-       *tx_param = lpss->dma_param;
-
        dma->fn = lpss8250_dma_filter;
        dma->rx_param = rx_param;
        dma->tx_param = tx_param;
index 23457daae8a148c6237e0262079cd401a72a7ea2..9ff6bbe9c0863e4c5891f862ea5425b08d813c6b 100644 (file)
@@ -581,7 +581,7 @@ static int mtk8250_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int mtk8250_remove(struct platform_device *pdev)
+static void mtk8250_remove(struct platform_device *pdev)
 {
        struct mtk8250_data *data = platform_get_drvdata(pdev);
 
@@ -591,8 +591,6 @@ static int mtk8250_remove(struct platform_device *pdev)
 
        pm_runtime_disable(&pdev->dev);
        pm_runtime_put_noidle(&pdev->dev);
-
-       return 0;
 }
 
 static int __maybe_unused mtk8250_suspend(struct device *dev)
@@ -652,7 +650,7 @@ static struct platform_driver mtk8250_platform_driver = {
                .of_match_table = mtk8250_of_match,
        },
        .probe                  = mtk8250_probe,
-       .remove                 = mtk8250_remove,
+       .remove_new             = mtk8250_remove,
 };
 module_platform_driver(mtk8250_platform_driver);
 
index ef3e745bd09cbf17b6d3c22fa123212375f8cc6c..34f17a9785e79e1d26b6fbbbea3441a89f523e44 100644 (file)
@@ -251,7 +251,7 @@ err_free:
 /*
  * Release a line
  */
-static int of_platform_serial_remove(struct platform_device *ofdev)
+static void of_platform_serial_remove(struct platform_device *ofdev)
 {
        struct of_serial_info *info = platform_get_drvdata(ofdev);
 
@@ -261,7 +261,6 @@ static int of_platform_serial_remove(struct platform_device *ofdev)
        pm_runtime_put_sync(&ofdev->dev);
        pm_runtime_disable(&ofdev->dev);
        kfree(info);
-       return 0;
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -337,7 +336,7 @@ static struct platform_driver of_platform_serial_driver = {
                .pm = &of_serial_pm_ops,
        },
        .probe = of_platform_serial_probe,
-       .remove = of_platform_serial_remove,
+       .remove_new = of_platform_serial_remove,
 };
 
 module_platform_driver(of_platform_serial_driver);
index 578f35895b273fdc4e43956db562cd4d7a607e98..6942990a333c8b6a76520f9d5868e604565034bf 100644 (file)
@@ -1586,7 +1586,7 @@ err:
        return ret;
 }
 
-static int omap8250_remove(struct platform_device *pdev)
+static void omap8250_remove(struct platform_device *pdev)
 {
        struct omap8250_priv *priv = platform_get_drvdata(pdev);
        struct uart_8250_port *up;
@@ -1594,7 +1594,7 @@ static int omap8250_remove(struct platform_device *pdev)
 
        err = pm_runtime_resume_and_get(&pdev->dev);
        if (err)
-               return err;
+               dev_err(&pdev->dev, "Failed to resume hardware\n");
 
        up = serial8250_get_port(priv->line);
        omap_8250_shutdown(&up->port);
@@ -1606,7 +1606,6 @@ static int omap8250_remove(struct platform_device *pdev)
        pm_runtime_disable(&pdev->dev);
        cpu_latency_qos_remove_request(&priv->pm_qos_request);
        device_init_wakeup(&pdev->dev, false);
-       return 0;
 }
 
 static int omap8250_prepare(struct device *dev)
@@ -1865,7 +1864,7 @@ static struct platform_driver omap8250_platform_driver = {
                .of_match_table = omap8250_dt_ids,
        },
        .probe                  = omap8250_probe,
-       .remove                 = omap8250_remove,
+       .remove_new             = omap8250_remove,
 };
 module_platform_driver(omap8250_platform_driver);
 
index 614be0f13a31348642619bb0350b3dbf96785a69..0d35c77fad9eb1a700baeff700d4c83d38b8a2fd 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/serial_core.h>
 #include <linux/8250_pci.h>
 #include <linux/bitops.h>
+#include <linux/bitfield.h>
 
 #include <asm/byteorder.h>
 #include <asm/io.h>
@@ -1970,6 +1971,20 @@ pci_sunix_setup(struct serial_private *priv,
 
 #define MOXA_GPIO_PIN2 BIT(2)
 
+#define MOXA_RS232     0x00
+#define MOXA_RS422     0x01
+#define MOXA_RS485_4W  0x0B
+#define MOXA_RS485_2W  0x0F
+#define MOXA_UIR_OFFSET        0x04
+#define MOXA_EVEN_RS_MASK      GENMASK(3, 0)
+#define MOXA_ODD_RS_MASK       GENMASK(7, 4)
+
+enum {
+       MOXA_SUPP_RS232 = BIT(0),
+       MOXA_SUPP_RS422 = BIT(1),
+       MOXA_SUPP_RS485 = BIT(2),
+};
+
 static bool pci_moxa_is_mini_pcie(unsigned short device)
 {
        if (device == PCI_DEVICE_ID_MOXA_CP102N ||
@@ -1983,12 +1998,54 @@ static bool pci_moxa_is_mini_pcie(unsigned short device)
        return false;
 }
 
+static unsigned int pci_moxa_supported_rs(struct pci_dev *dev)
+{
+       switch (dev->device & 0x0F00) {
+       case 0x0000:
+       case 0x0600:
+               return MOXA_SUPP_RS232;
+       case 0x0100:
+               return MOXA_SUPP_RS232 | MOXA_SUPP_RS422 | MOXA_SUPP_RS485;
+       case 0x0300:
+               return MOXA_SUPP_RS422 | MOXA_SUPP_RS485;
+       }
+       return 0;
+}
+
+static int pci_moxa_set_interface(const struct pci_dev *dev,
+                                 unsigned int port_idx,
+                                 u8 mode)
+{
+       resource_size_t iobar_addr = pci_resource_start(dev, 2);
+       resource_size_t UIR_addr = iobar_addr + MOXA_UIR_OFFSET + port_idx / 2;
+       u8 val;
+
+       val = inb(UIR_addr);
+
+       if (port_idx % 2) {
+               val &= ~MOXA_ODD_RS_MASK;
+               val |= FIELD_PREP(MOXA_ODD_RS_MASK, mode);
+       } else {
+               val &= ~MOXA_EVEN_RS_MASK;
+               val |= FIELD_PREP(MOXA_EVEN_RS_MASK, mode);
+       }
+       outb(val, UIR_addr);
+
+       return 0;
+}
+
 static int pci_moxa_init(struct pci_dev *dev)
 {
        unsigned short device = dev->device;
        resource_size_t iobar_addr = pci_resource_start(dev, 2);
-       unsigned int num_ports = (device & 0x00F0) >> 4;
-       u8 val;
+       unsigned int num_ports = (device & 0x00F0) >> 4, i;
+       u8 val, init_mode = MOXA_RS232;
+
+       if (!(pci_moxa_supported_rs(dev) & MOXA_SUPP_RS232)) {
+               init_mode = MOXA_RS422;
+       }
+       for (i = 0; i < num_ports; ++i)
+               pci_moxa_set_interface(dev, i, init_mode);
 
        /*
         * Enable hardware buffer to prevent break signal output when system boots up.
index 9f9e21981929fcf0597d844d5b8ab54f1a14c7f7..cd258922bd780019609fee227f55d84ed3a04246 100644 (file)
@@ -9,15 +9,21 @@
 
 #include <linux/bitfield.h>
 #include <linux/bitops.h>
+#include <linux/delay.h>
 #include <linux/io.h>
+#include <linux/iopoll.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/serial_core.h>
+#include <linux/serial_reg.h>
+#include <linux/serial_8250.h>
 #include <linux/slab.h>
 #include <linux/string.h>
 #include <linux/units.h>
 #include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/8250_pci.h>
 
 #include <asm/byteorder.h>
 
 #define PCI_SUBDEVICE_ID_EFAR_PCI11400         PCI_DEVICE_ID_EFAR_PCI11400
 #define PCI_SUBDEVICE_ID_EFAR_PCI11414         PCI_DEVICE_ID_EFAR_PCI11414
 
+#define UART_SYSTEM_ADDR_BASE                  0x1000
+#define UART_DEV_REV_REG                       (UART_SYSTEM_ADDR_BASE + 0x00)
+#define UART_DEV_REV_MASK                      GENMASK(7, 0)
+#define UART_SYSLOCK_REG                       (UART_SYSTEM_ADDR_BASE + 0xA0)
+#define UART_SYSLOCK                           BIT(2)
+#define SYSLOCK_SLEEP_TIMEOUT                  100
+#define SYSLOCK_RETRY_CNT                      1000
+
+#define UART_RX_BYTE_FIFO                      0x00
+#define UART_FIFO_CTL                          0x02
+
 #define UART_ACTV_REG                          0x11
 #define UART_BLOCK_SET_ACTIVE                  BIT(0)
 
 #define UART_RESET_REG                         0x94
 #define UART_RESET_D3_RESET_DISABLE            BIT(16)
 
+#define UART_BURST_STATUS_REG                  0x9C
+#define UART_RX_BURST_FIFO                     0xA4
+
 #define MAX_PORTS                              4
 #define PORT_OFFSET                            0x100
+#define RX_BUF_SIZE                            512
+#define UART_BYTE_SIZE                          1
+#define UART_BURST_SIZE                                4
+
+#define UART_BST_STAT_RX_COUNT_MASK            0x00FF
+#define UART_BST_STAT_IIR_INT_PEND             0x100000
+#define UART_LSR_OVERRUN_ERR_CLR               0x43
+#define UART_BST_STAT_LSR_RX_MASK              0x9F000000
+#define UART_BST_STAT_LSR_RX_ERR_MASK          0x9E000000
+#define UART_BST_STAT_LSR_OVERRUN_ERR          0x2000000
+#define UART_BST_STAT_LSR_PARITY_ERR           0x4000000
+#define UART_BST_STAT_LSR_FRAME_ERR            0x8000000
+
+struct pci1xxxx_8250 {
+       unsigned int nr;
+       u8 dev_rev;
+       u8 pad[3];
+       void __iomem *membase;
+       int line[] __counted_by(nr);
+};
+
+static const struct serial_rs485 pci1xxxx_rs485_supported = {
+       .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND |
+                SER_RS485_RTS_AFTER_SEND,
+       .delay_rts_after_send = 1,
+       /* Delay RTS before send is not supported */
+};
+
+static int pci1xxxx_set_sys_lock(struct pci1xxxx_8250 *port)
+{
+       writel(UART_SYSLOCK, port->membase + UART_SYSLOCK_REG);
+       return readl(port->membase + UART_SYSLOCK_REG);
+}
+
+static int pci1xxxx_acquire_sys_lock(struct pci1xxxx_8250 *port)
+{
+       u32 regval;
+
+       return readx_poll_timeout(pci1xxxx_set_sys_lock, port, regval,
+                                 (regval & UART_SYSLOCK),
+                                 SYSLOCK_SLEEP_TIMEOUT,
+                                 SYSLOCK_RETRY_CNT * SYSLOCK_SLEEP_TIMEOUT);
+}
+
+static void pci1xxxx_release_sys_lock(struct pci1xxxx_8250 *port)
+{
+       writel(0x0, port->membase + UART_SYSLOCK_REG);
+}
 
 static const int logical_to_physical_port_idx[][MAX_PORTS] = {
        {0,  1,  2,  3}, /* PCI12000, PCI11010, PCI11101, PCI11400, PCI11414 */
@@ -104,12 +172,6 @@ static const int logical_to_physical_port_idx[][MAX_PORTS] = {
        {3, -1, -1, -1}, /* PCI1p3 */
 };
 
-struct pci1xxxx_8250 {
-       unsigned int nr;
-       void __iomem *membase;
-       int line[] __counted_by(nr);
-};
-
 static int pci1xxxx_get_num_ports(struct pci_dev *dev)
 {
        switch (dev->subsystem_device) {
@@ -205,12 +267,102 @@ static int pci1xxxx_rs485_config(struct uart_port *port,
        return 0;
 }
 
-static const struct serial_rs485 pci1xxxx_rs485_supported = {
-       .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND |
-                SER_RS485_RTS_AFTER_SEND,
-       .delay_rts_after_send = 1,
-       /* Delay RTS before send is not supported */
-};
+static u32 pci1xxxx_read_burst_status(struct uart_port *port)
+{
+       u32 status;
+
+       status = readl(port->membase + UART_BURST_STATUS_REG);
+       if (status & UART_BST_STAT_LSR_RX_ERR_MASK) {
+               if (status & UART_BST_STAT_LSR_OVERRUN_ERR) {
+                       writeb(UART_LSR_OVERRUN_ERR_CLR,
+                              port->membase + UART_FIFO_CTL);
+                       port->icount.overrun++;
+               }
+
+               if (status & UART_BST_STAT_LSR_FRAME_ERR)
+                       port->icount.frame++;
+
+               if (status & UART_BST_STAT_LSR_PARITY_ERR)
+                       port->icount.parity++;
+       }
+       return status;
+}
+
+static void pci1xxxx_process_read_data(struct uart_port *port,
+                                      unsigned char *rx_buff, u32 *buff_index,
+                                      u32 *valid_byte_count)
+{
+       u32 valid_burst_count = *valid_byte_count / UART_BURST_SIZE;
+       u32 *burst_buf;
+
+       /*
+        * Depending on the RX Trigger Level the number of bytes that can be
+        * stored in RX FIFO at a time varies. Each transaction reads data
+        * in DWORDs. If there are less than four remaining valid_byte_count
+        * to read, the data is received one byte at a time.
+        */
+       while (valid_burst_count--) {
+               if (*buff_index >= (RX_BUF_SIZE - UART_BURST_SIZE))
+                       break;
+               burst_buf = (u32 *)&rx_buff[*buff_index];
+               *burst_buf = readl(port->membase + UART_RX_BURST_FIFO);
+               *buff_index += UART_BURST_SIZE;
+               *valid_byte_count -= UART_BURST_SIZE;
+       }
+
+       while (*valid_byte_count) {
+               if (*buff_index >= RX_BUF_SIZE)
+                       break;
+               rx_buff[*buff_index] = readb(port->membase +
+                                            UART_RX_BYTE_FIFO);
+               *buff_index += UART_BYTE_SIZE;
+               *valid_byte_count -= UART_BYTE_SIZE;
+       }
+}
+
+static void pci1xxxx_rx_burst(struct uart_port *port, u32 uart_status)
+{
+       u32 valid_byte_count = uart_status & UART_BST_STAT_RX_COUNT_MASK;
+       struct tty_port *tty_port = &port->state->port;
+       unsigned char rx_buff[RX_BUF_SIZE];
+       u32 buff_index = 0;
+       u32 copied_len;
+
+       if (valid_byte_count != 0 &&
+           valid_byte_count < RX_BUF_SIZE) {
+               pci1xxxx_process_read_data(port, rx_buff, &buff_index,
+                                          &valid_byte_count);
+
+               copied_len = (u32)tty_insert_flip_string(tty_port, rx_buff,
+                                                        buff_index);
+
+               if (copied_len != buff_index)
+                       port->icount.overrun += buff_index - copied_len;
+
+               port->icount.rx += buff_index;
+               tty_flip_buffer_push(tty_port);
+       }
+}
+
+static int pci1xxxx_handle_irq(struct uart_port *port)
+{
+       unsigned long flags;
+       u32 status;
+
+       status = pci1xxxx_read_burst_status(port);
+
+       if (status & UART_BST_STAT_IIR_INT_PEND)
+               return 0;
+
+       spin_lock_irqsave(&port->lock, flags);
+
+       if (status & UART_BST_STAT_LSR_RX_MASK)
+               pci1xxxx_rx_burst(port, status);
+
+       spin_unlock_irqrestore(&port->lock, flags);
+
+       return 1;
+}
 
 static bool pci1xxxx_port_suspend(int line)
 {
@@ -323,7 +475,7 @@ static int pci1xxxx_resume(struct device *dev)
 }
 
 static int pci1xxxx_setup(struct pci_dev *pdev,
-                         struct uart_8250_port *port, int port_idx)
+                         struct uart_8250_port *port, int port_idx, int rev)
 {
        int ret;
 
@@ -335,6 +487,10 @@ static int pci1xxxx_setup(struct pci_dev *pdev,
        port->port.rs485_config = pci1xxxx_rs485_config;
        port->port.rs485_supported = pci1xxxx_rs485_supported;
 
+       /* From C0 rev Burst operation is supported */
+       if (rev >= 0xC0)
+               port->port.handle_irq = pci1xxxx_handle_irq;
+
        ret = serial8250_pci_setup_port(pdev, port, 0, PORT_OFFSET * port_idx, 0);
        if (ret < 0)
                return ret;
@@ -370,6 +526,27 @@ static int pci1xxxx_logical_to_physical_port_translate(int subsys_dev, int port)
        return logical_to_physical_port_idx[0][port];
 }
 
+static int pci1xxxx_get_device_revision(struct pci1xxxx_8250 *priv)
+{
+       u32 regval;
+       int ret;
+
+       /*
+        * DEV REV is a system register, HW Syslock bit
+        * should be acquired before accessing the register
+        */
+       ret = pci1xxxx_acquire_sys_lock(priv);
+       if (ret)
+               return ret;
+
+       regval = readl(priv->membase + UART_DEV_REV_REG);
+       priv->dev_rev = regval & UART_DEV_REV_MASK;
+
+       pci1xxxx_release_sys_lock(priv);
+
+       return 0;
+}
+
 static int pci1xxxx_serial_probe(struct pci_dev *pdev,
                                 const struct pci_device_id *id)
 {
@@ -381,6 +558,7 @@ static int pci1xxxx_serial_probe(struct pci_dev *pdev,
        int num_vectors;
        int subsys_dev;
        int port_idx;
+       int ret;
        int rc;
 
        rc = pcim_enable_device(pdev);
@@ -397,6 +575,10 @@ static int pci1xxxx_serial_probe(struct pci_dev *pdev,
        if (!priv->membase)
                return -ENOMEM;
 
+       ret = pci1xxxx_get_device_revision(priv);
+       if (ret)
+               return ret;
+
        pci_set_master(pdev);
 
        priv->nr = nr_ports;
@@ -428,7 +610,7 @@ static int pci1xxxx_serial_probe(struct pci_dev *pdev,
                else
                        uart.port.irq = pci_irq_vector(pdev, 0);
 
-               rc = pci1xxxx_setup(pdev, &uart, port_idx);
+               rc = pci1xxxx_setup(pdev, &uart, port_idx, priv->dev_rev);
                if (rc) {
                        dev_warn(dev, "Failed to setup port %u\n", i);
                        continue;
index a5b3ea27fc90207650884231c5b76a6d624761a2..77686da42ce8e93ca42d9c00ffde2d4e8bb9c234 100644 (file)
@@ -146,20 +146,18 @@ static int serial_pxa_probe(struct platform_device *pdev)
        return ret;
 }
 
-static int serial_pxa_remove(struct platform_device *pdev)
+static void serial_pxa_remove(struct platform_device *pdev)
 {
        struct pxa8250_data *data = platform_get_drvdata(pdev);
 
        serial8250_unregister_port(data->line);
 
        clk_unprepare(data->clk);
-
-       return 0;
 }
 
 static struct platform_driver serial_pxa_driver = {
        .probe          = serial_pxa_probe,
-       .remove         = serial_pxa_remove,
+       .remove_new     = serial_pxa_remove,
 
        .driver         = {
                .name   = "pxa2xx-uart",
index 89956bbf34d9e4d4ad6b921db7a13b039629741a..ba352262df75a7b32c9fa6401abd012e5ccf0ec8 100644 (file)
@@ -128,15 +128,13 @@ err_clkdisable:
        return ret;
 }
 
-static int tegra_uart_remove(struct platform_device *pdev)
+static void tegra_uart_remove(struct platform_device *pdev)
 {
        struct tegra_uart *uart = platform_get_drvdata(pdev);
 
        serial8250_unregister_port(uart->line);
        reset_control_assert(uart->rst);
        clk_disable_unprepare(uart->clk);
-
-       return 0;
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -192,7 +190,7 @@ static struct platform_driver tegra_uart_driver = {
                .acpi_match_table = ACPI_PTR(tegra_uart_acpi_match),
        },
        .probe = tegra_uart_probe,
-       .remove = tegra_uart_remove,
+       .remove_new = tegra_uart_remove,
 };
 
 module_platform_driver(tegra_uart_driver);
index a405155264b1721a2f731ec051a93942d6f90441..6399a38ecce2ada62168872b33b13ae7c7273b49 100644 (file)
@@ -241,14 +241,12 @@ static int uniphier_uart_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int uniphier_uart_remove(struct platform_device *pdev)
+static void uniphier_uart_remove(struct platform_device *pdev)
 {
        struct uniphier8250_priv *priv = platform_get_drvdata(pdev);
 
        serial8250_unregister_port(priv->line);
        clk_disable_unprepare(priv->clk);
-
-       return 0;
 }
 
 static int __maybe_unused uniphier_uart_suspend(struct device *dev)
@@ -293,7 +291,7 @@ MODULE_DEVICE_TABLE(of, uniphier_uart_match);
 
 static struct platform_driver uniphier_uart_platform_driver = {
        .probe          = uniphier_uart_probe,
-       .remove         = uniphier_uart_remove,
+       .remove_new     = uniphier_uart_remove,
        .driver = {
                .name   = "uniphier-uart",
                .of_match_table = uniphier_uart_match,
index dc2ef05a10ebea95511de60814c852f9b29f1c00..2056aed4668834b4d62541026d7187c81bc42187 100644 (file)
@@ -90,12 +90,6 @@ struct serial_info {
        const struct serial_quirk *quirk;
 };
 
-struct serial_cfg_mem {
-       tuple_t tuple;
-       cisparse_t parse;
-       u_char buf[256];
-};
-
 /*
  * vers_1 5.0, "Brain Boxes", "2-Port RS232 card", "r6"
  * manfid 0x0160, 0x0104
index 732c893c8d161815b476bf815ee689c92e3f25a9..ffcf4882b25f9dce85e12be1a1191caace38880d 100644 (file)
@@ -87,7 +87,7 @@ config SERIAL_EARLYCON_SEMIHOST
 
 config SERIAL_EARLYCON_RISCV_SBI
        bool "Early console using RISC-V SBI"
-       depends on RISCV_SBI_V01
+       depends on RISCV_SBI
        select SERIAL_CORE
        select SERIAL_CORE_CONSOLE
        select SERIAL_EARLYCON
@@ -532,6 +532,9 @@ config SERIAL_UARTLITE_NR_UARTS
        help
          Set this to the number of uartlites in your system, or the number
          you think you might implement.
+         If maximum number of uartlite serial ports is more than 4, then the
+         driver uses dynamic allocation instead of static allocation for major
+         number.
 
 config SERIAL_SUNCORE
        bool
index 7090b251dd4df98c504d0ebe0af29ad7e9b64d20..effcba71ea775ab0d5f0f6d48576b0110844a7db 100644 (file)
@@ -425,7 +425,7 @@ static int altera_jtaguart_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int altera_jtaguart_remove(struct platform_device *pdev)
+static void altera_jtaguart_remove(struct platform_device *pdev)
 {
        struct uart_port *port;
        int i = pdev->id;
@@ -436,8 +436,6 @@ static int altera_jtaguart_remove(struct platform_device *pdev)
        port = &altera_jtaguart_ports[i];
        uart_remove_one_port(&altera_jtaguart_driver, port);
        iounmap(port->membase);
-
-       return 0;
 }
 
 #ifdef CONFIG_OF
@@ -451,7 +449,7 @@ MODULE_DEVICE_TABLE(of, altera_jtaguart_match);
 
 static struct platform_driver altera_jtaguart_platform_driver = {
        .probe  = altera_jtaguart_probe,
-       .remove = altera_jtaguart_remove,
+       .remove_new = altera_jtaguart_remove,
        .driver = {
                .name           = DRV_NAME,
                .of_match_table = of_match_ptr(altera_jtaguart_match),
index 77835ac68df2608ce9d208caa12d60925caeaa05..897f0995b2fe77b0827be4a2221c7abec6f57f06 100644 (file)
@@ -305,7 +305,7 @@ static int altera_uart_startup(struct uart_port *port)
                int ret;
 
                ret = request_irq(port->irq, altera_uart_interrupt, 0,
-                               DRV_NAME, port);
+                               dev_name(port->dev), port);
                if (ret) {
                        pr_err(DRV_NAME ": unable to attach Altera UART %d "
                               "interrupt vector=%d\n", port->line, port->irq);
@@ -595,7 +595,7 @@ static int altera_uart_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int altera_uart_remove(struct platform_device *pdev)
+static void altera_uart_remove(struct platform_device *pdev)
 {
        struct uart_port *port = platform_get_drvdata(pdev);
 
@@ -604,8 +604,6 @@ static int altera_uart_remove(struct platform_device *pdev)
                port->mapbase = 0;
                iounmap(port->membase);
        }
-
-       return 0;
 }
 
 #ifdef CONFIG_OF
@@ -619,7 +617,7 @@ MODULE_DEVICE_TABLE(of, altera_uart_match);
 
 static struct platform_driver altera_uart_platform_driver = {
        .probe  = altera_uart_probe,
-       .remove = altera_uart_remove,
+       .remove_new = altera_uart_remove,
        .driver = {
                .name           = DRV_NAME,
                .of_match_table = of_match_ptr(altera_uart_match),
index b7635363373e201fe0e59885d1d6cfe7e5ebd017..fccec1698a54104c1487ea65536dce7729123c61 100644 (file)
@@ -50,8 +50,8 @@
 
 #define AMBA_ISR_PASS_LIMIT    256
 
-#define UART_DR_ERROR          (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
-#define UART_DUMMY_DR_RX       (1 << 16)
+#define UART_DR_ERROR          (UART011_DR_OE | UART011_DR_BE | UART011_DR_PE | UART011_DR_FE)
+#define UART_DUMMY_DR_RX       BIT(16)
 
 enum {
        REG_DR,
@@ -125,7 +125,7 @@ static unsigned int get_fifosize_arm(struct amba_device *dev)
 
 static struct vendor_data vendor_arm = {
        .reg_offset             = pl011_std_offsets,
-       .ifls                   = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
+       .ifls                   = UART011_IFLS_RX4_8 | UART011_IFLS_TX4_8,
        .fr_busy                = UART01x_FR_BUSY,
        .fr_dsr                 = UART01x_FR_DSR,
        .fr_cts                 = UART01x_FR_CTS,
@@ -203,7 +203,7 @@ static unsigned int get_fifosize_st(struct amba_device *dev)
 
 static struct vendor_data vendor_st = {
        .reg_offset             = pl011_st_offsets,
-       .ifls                   = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF,
+       .ifls                   = UART011_IFLS_RX_HALF | UART011_IFLS_TX_HALF,
        .fr_busy                = UART01x_FR_BUSY,
        .fr_dsr                 = UART01x_FR_DSR,
        .fr_cts                 = UART01x_FR_CTS,
@@ -277,13 +277,13 @@ struct uart_amba_port {
 static unsigned int pl011_tx_empty(struct uart_port *port);
 
 static unsigned int pl011_reg_to_offset(const struct uart_amba_port *uap,
-       unsigned int reg)
+                                       unsigned int reg)
 {
        return uap->reg_offset[reg];
 }
 
 static unsigned int pl011_read(const struct uart_amba_port *uap,
-       unsigned int reg)
+                              unsigned int reg)
 {
        void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg);
 
@@ -292,7 +292,7 @@ static unsigned int pl011_read(const struct uart_amba_port *uap,
 }
 
 static void pl011_write(unsigned int val, const struct uart_amba_port *uap,
-       unsigned int reg)
+                       unsigned int reg)
 {
        void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg);
 
@@ -330,10 +330,11 @@ static int pl011_fifo_to_tty(struct uart_amba_port *uap)
                                uap->port.icount.brk++;
                                if (uart_handle_break(&uap->port))
                                        continue;
-                       } else if (ch & UART011_DR_PE)
+                       } else if (ch & UART011_DR_PE) {
                                uap->port.icount.parity++;
-                       else if (ch & UART011_DR_FE)
+                       } else if (ch & UART011_DR_FE) {
                                uap->port.icount.frame++;
+                       }
                        if (ch & UART011_DR_OE)
                                uap->port.icount.overrun++;
 
@@ -358,7 +359,6 @@ static int pl011_fifo_to_tty(struct uart_amba_port *uap)
        return fifotaken;
 }
 
-
 /*
  * All the DMA operation mode stuff goes inside this ifdef.
  * This assumes that you have a generic DMA device interface,
@@ -369,7 +369,7 @@ static int pl011_fifo_to_tty(struct uart_amba_port *uap)
 #define PL011_DMA_BUFFER_SIZE PAGE_SIZE
 
 static int pl011_dmabuf_init(struct dma_chan *chan, struct pl011_dmabuf *db,
-       enum dma_data_direction dir)
+                            enum dma_data_direction dir)
 {
        db->buf = dma_alloc_coherent(chan->device->dev, PL011_DMA_BUFFER_SIZE,
                                     &db->dma, GFP_KERNEL);
@@ -381,7 +381,7 @@ static int pl011_dmabuf_init(struct dma_chan *chan, struct pl011_dmabuf *db,
 }
 
 static void pl011_dmabuf_free(struct dma_chan *chan, struct pl011_dmabuf *db,
-       enum dma_data_direction dir)
+                             enum dma_data_direction dir)
 {
        if (db->buf) {
                dma_free_coherent(chan->device->dev,
@@ -424,7 +424,7 @@ static void pl011_dma_probe(struct uart_amba_port *uap)
                dma_cap_set(DMA_SLAVE, mask);
 
                chan = dma_request_channel(mask, plat->dma_filter,
-                                               plat->dma_tx_param);
+                                          plat->dma_tx_param);
                if (!chan) {
                        dev_err(uap->port.dev, "no TX DMA channel!\n");
                        return;
@@ -438,9 +438,9 @@ static void pl011_dma_probe(struct uart_amba_port *uap)
                 dma_chan_name(uap->dmatx.chan));
 
        /* Optionally make use of an RX channel as well */
-       chan = dma_request_slave_channel(dev, "rx");
+       chan = dma_request_chan(dev, "rx");
 
-       if (!chan && plat && plat->dma_rx_param) {
+       if (IS_ERR(chan) && plat && plat->dma_rx_param) {
                chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
 
                if (!chan) {
@@ -449,7 +449,7 @@ static void pl011_dma_probe(struct uart_amba_port *uap)
                }
        }
 
-       if (chan) {
+       if (!IS_ERR(chan)) {
                struct dma_slave_config rx_conf = {
                        .src_addr = uap->port.mapbase +
                                pl011_reg_to_offset(uap, REG_DR),
@@ -465,12 +465,12 @@ static void pl011_dma_probe(struct uart_amba_port *uap)
                 * If the controller does, check for suitable residue processing
                 * otherwise assime all is well.
                 */
-               if (0 == dma_get_slave_caps(chan, &caps)) {
+               if (dma_get_slave_caps(chan, &caps) == 0) {
                        if (caps.residue_granularity ==
                                        DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
                                dma_release_channel(chan);
                                dev_info(uap->port.dev,
-                                       "RX DMA disabled - no residue processing\n");
+                                        "RX DMA disabled - no residue processing\n");
                                return;
                        }
                }
@@ -499,18 +499,16 @@ static void pl011_dma_probe(struct uart_amba_port *uap)
                        else
                                uap->dmarx.poll_timeout = 3000;
                } else if (!plat && dev->of_node) {
-                       uap->dmarx.auto_poll_rate = of_property_read_bool(
-                                               dev->of_node, "auto-poll");
+                       uap->dmarx.auto_poll_rate =
+                                       of_property_read_bool(dev->of_node, "auto-poll");
                        if (uap->dmarx.auto_poll_rate) {
                                u32 x;
 
-                               if (0 == of_property_read_u32(dev->of_node,
-                                               "poll-rate-ms", &x))
+                               if (of_property_read_u32(dev->of_node, "poll-rate-ms", &x) == 0)
                                        uap->dmarx.poll_rate = x;
                                else
                                        uap->dmarx.poll_rate = 100;
-                               if (0 == of_property_read_u32(dev->of_node,
-                                               "poll-timeout-ms", &x))
+                               if (of_property_read_u32(dev->of_node, "poll-timeout-ms", &x) == 0)
                                        uap->dmarx.poll_timeout = x;
                                else
                                        uap->dmarx.poll_timeout = 3000;
@@ -547,7 +545,7 @@ static void pl011_dma_tx_callback(void *data)
        uart_port_lock_irqsave(&uap->port, &flags);
        if (uap->dmatx.queued)
                dma_unmap_single(dmatx->chan->device->dev, dmatx->dma,
-                               dmatx->len, DMA_TO_DEVICE);
+                                dmatx->len, DMA_TO_DEVICE);
 
        dmacr = uap->dmacr;
        uap->dmacr = dmacr & ~UART011_TXDMAE;
@@ -618,9 +616,9 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap)
        if (count > PL011_DMA_BUFFER_SIZE)
                count = PL011_DMA_BUFFER_SIZE;
 
-       if (xmit->tail < xmit->head)
+       if (xmit->tail < xmit->head) {
                memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count);
-       else {
+       else {
                size_t first = UART_XMIT_SIZE - xmit->tail;
                size_t second;
 
@@ -643,7 +641,7 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap)
        }
 
        desc = dmaengine_prep_slave_single(chan, dmatx->dma, dmatx->len, DMA_MEM_TO_DEV,
-                                            DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+                                          DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!desc) {
                dma_unmap_single(dma_dev->dev, dmatx->dma, dmatx->len, DMA_TO_DEVICE);
                uap->dmatx.queued = false;
@@ -754,8 +752,9 @@ static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
                        if (pl011_dma_tx_refill(uap) > 0) {
                                uap->im &= ~UART011_TXIM;
                                pl011_write(uap->im, uap, REG_IMSC);
-                       } else
+                       } else {
                                ret = false;
+                       }
                } else if (!(uap->dmacr & UART011_TXDMAE)) {
                        uap->dmacr |= UART011_TXDMAE;
                        pl011_write(uap->dmacr, uap, REG_DMACR);
@@ -832,8 +831,8 @@ static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
        dbuf = uap->dmarx.use_buf_b ?
                &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
        desc = dmaengine_prep_slave_single(rxchan, dbuf->dma, dbuf->len,
-                                       DMA_DEV_TO_MEM,
-                                       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+                                          DMA_DEV_TO_MEM,
+                                          DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        /*
         * If the DMA engine is busy and cannot prepare a
         * channel, no big deal, the driver will fall back
@@ -889,14 +888,12 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
 
        /* Pick the remain data from the DMA */
        if (pending) {
-
                /*
                 * First take all chars in the DMA pipe, then look in the FIFO.
                 * Note that tty_insert_flip_buf() tries to take as many chars
                 * as it can.
                 */
-               dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken,
-                               pending);
+               dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken, pending);
 
                uap->port.icount.rx += dma_count;
                if (dma_count < pending)
@@ -978,8 +975,8 @@ static void pl011_dma_rx_irq(struct uart_amba_port *uap)
        /* Switch buffer & re-trigger DMA job */
        dmarx->use_buf_b = !dmarx->use_buf_b;
        if (pl011_dma_rx_trigger_dma(uap)) {
-               dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
-                       "fall back to interrupt mode\n");
+               dev_dbg(uap->port.dev,
+                       "could not retrigger RX DMA job fall back to interrupt mode\n");
                uap->im |= UART011_RXIM;
                pl011_write(uap->im, uap, REG_IMSC);
        }
@@ -1026,8 +1023,8 @@ static void pl011_dma_rx_callback(void *data)
         * get some IRQ immediately from RX.
         */
        if (ret) {
-               dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
-                       "fall back to interrupt mode\n");
+               dev_dbg(uap->port.dev,
+                       "could not retrigger RX DMA job fall back to interrupt mode\n");
                uap->im |= UART011_RXIM;
                pl011_write(uap->im, uap, REG_IMSC);
        }
@@ -1072,7 +1069,7 @@ static void pl011_dma_rx_poll(struct timer_list *t)
                dmataken = dbuf->len - dmarx->last_residue;
                size = dmarx->last_residue - state.residue;
                dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken,
-                               size);
+                                                  size);
                if (dma_count == size)
                        dmarx->last_residue =  state.residue;
                dmarx->last_jiffies = jiffies;
@@ -1085,7 +1082,6 @@ static void pl011_dma_rx_poll(struct timer_list *t)
         */
        if (jiffies_to_msecs(jiffies - dmarx->last_jiffies)
                        > uap->dmarx.poll_timeout) {
-
                uart_port_lock_irqsave(&uap->port, &flags);
                pl011_dma_rx_stop(uap);
                uap->im |= UART011_RXIM;
@@ -1097,7 +1093,7 @@ static void pl011_dma_rx_poll(struct timer_list *t)
                del_timer(&uap->dmarx.timer);
        } else {
                mod_timer(&uap->dmarx.timer,
-                       jiffies + msecs_to_jiffies(uap->dmarx.poll_rate));
+                         jiffies + msecs_to_jiffies(uap->dmarx.poll_rate));
        }
 }
 
@@ -1113,7 +1109,6 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
 
        uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL | __GFP_DMA);
        if (!uap->dmatx.buf) {
-               dev_err(uap->port.dev, "no memory for DMA TX buffer\n");
                uap->port.fifosize = uap->fifosize;
                return;
        }
@@ -1129,7 +1124,7 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
 
        /* Allocate and map DMA RX buffers */
        ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_a,
-                              DMA_FROM_DEVICE);
+                               DMA_FROM_DEVICE);
        if (ret) {
                dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
                        "RX buffer A", ret);
@@ -1137,12 +1132,12 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
        }
 
        ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_b,
-                              DMA_FROM_DEVICE);
+                               DMA_FROM_DEVICE);
        if (ret) {
                dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
                        "RX buffer B", ret);
                pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a,
-                                DMA_FROM_DEVICE);
+                                 DMA_FROM_DEVICE);
                goto skip_rx;
        }
 
@@ -1164,13 +1159,12 @@ skip_rx:
 
        if (uap->using_rx_dma) {
                if (pl011_dma_rx_trigger_dma(uap))
-                       dev_dbg(uap->port.dev, "could not trigger initial "
-                               "RX DMA job, fall back to interrupt mode\n");
+                       dev_dbg(uap->port.dev,
+                               "could not trigger initial RX DMA job, fall back to interrupt mode\n");
                if (uap->dmarx.poll_rate) {
                        timer_setup(&uap->dmarx.timer, pl011_dma_rx_poll, 0);
                        mod_timer(&uap->dmarx.timer,
-                               jiffies +
-                               msecs_to_jiffies(uap->dmarx.poll_rate));
+                                 jiffies + msecs_to_jiffies(uap->dmarx.poll_rate));
                        uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
                        uap->dmarx.last_jiffies = jiffies;
                }
@@ -1359,8 +1353,8 @@ static void pl011_stop_rx(struct uart_port *port)
        struct uart_amba_port *uap =
            container_of(port, struct uart_amba_port, port);
 
-       uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM|
-                    UART011_PEIM|UART011_BEIM|UART011_OEIM);
+       uap->im &= ~(UART011_RXIM | UART011_RTIM | UART011_FEIM |
+                    UART011_PEIM | UART011_BEIM | UART011_OEIM);
        pl011_write(uap->im, uap, REG_IMSC);
 
        pl011_dma_rx_stop(uap);
@@ -1380,7 +1374,7 @@ static void pl011_enable_ms(struct uart_port *port)
        struct uart_amba_port *uap =
            container_of(port, struct uart_amba_port, port);
 
-       uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM;
+       uap->im |= UART011_RIMIM | UART011_CTSMIM | UART011_DCDMIM | UART011_DSRMIM;
        pl011_write(uap->im, uap, REG_IMSC);
 }
 
@@ -1398,8 +1392,8 @@ __acquires(&uap->port.lock)
         */
        if (pl011_dma_rx_available(uap)) {
                if (pl011_dma_rx_trigger_dma(uap)) {
-                       dev_dbg(uap->port.dev, "could not trigger RX DMA job "
-                               "fall back to interrupt mode again\n");
+                       dev_dbg(uap->port.dev,
+                               "could not trigger RX DMA job fall back to interrupt mode again\n");
                        uap->im |= UART011_RXIM;
                        pl011_write(uap->im, uap, REG_IMSC);
                } else {
@@ -1409,8 +1403,7 @@ __acquires(&uap->port.lock)
                                uap->dmarx.last_jiffies = jiffies;
                                uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
                                mod_timer(&uap->dmarx.timer,
-                                       jiffies +
-                                       msecs_to_jiffies(uap->dmarx.poll_rate));
+                                         jiffies + msecs_to_jiffies(uap->dmarx.poll_rate));
                        }
 #endif
                }
@@ -1557,18 +1550,17 @@ static irqreturn_t pl011_int(int irq, void *dev_id)
                do {
                        check_apply_cts_event_workaround(uap);
 
-                       pl011_write(status & ~(UART011_TXIS|UART011_RTIS|
-                                              UART011_RXIS),
+                       pl011_write(status & ~(UART011_TXIS | UART011_RTIS | UART011_RXIS),
                                    uap, REG_ICR);
 
-                       if (status & (UART011_RTIS|UART011_RXIS)) {
+                       if (status & (UART011_RTIS | UART011_RXIS)) {
                                if (pl011_dma_rx_running(uap))
                                        pl011_dma_rx_irq(uap);
                                else
                                        pl011_rx_chars(uap);
                        }
-                       if (status & (UART011_DSRMIS|UART011_DCDMIS|
-                                     UART011_CTSMIS|UART011_RIMIS))
+                       if (status & (UART011_DSRMIS | UART011_DCDMIS |
+                                     UART011_CTSMIS | UART011_RIMIS))
                                pl011_modem_status(uap);
                        if (status & UART011_TXIS)
                                pl011_tx_chars(uap, true);
@@ -1598,6 +1590,12 @@ static unsigned int pl011_tx_empty(struct uart_port *port)
                                                        0 : TIOCSER_TEMT;
 }
 
+static void pl011_maybe_set_bit(bool cond, unsigned int *ptr, unsigned int mask)
+{
+       if (cond)
+               *ptr |= mask;
+}
+
 static unsigned int pl011_get_mctrl(struct uart_port *port)
 {
        struct uart_amba_port *uap =
@@ -1605,18 +1603,22 @@ static unsigned int pl011_get_mctrl(struct uart_port *port)
        unsigned int result = 0;
        unsigned int status = pl011_read(uap, REG_FR);
 
-#define TIOCMBIT(uartbit, tiocmbit)    \
-       if (status & uartbit)           \
-               result |= tiocmbit
+       pl011_maybe_set_bit(status & UART01x_FR_DCD, &result, TIOCM_CAR);
+       pl011_maybe_set_bit(status & uap->vendor->fr_dsr, &result, TIOCM_DSR);
+       pl011_maybe_set_bit(status & uap->vendor->fr_cts, &result, TIOCM_CTS);
+       pl011_maybe_set_bit(status & uap->vendor->fr_ri, &result, TIOCM_RNG);
 
-       TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR);
-       TIOCMBIT(uap->vendor->fr_dsr, TIOCM_DSR);
-       TIOCMBIT(uap->vendor->fr_cts, TIOCM_CTS);
-       TIOCMBIT(uap->vendor->fr_ri, TIOCM_RNG);
-#undef TIOCMBIT
        return result;
 }
 
+static void pl011_assign_bit(bool cond, unsigned int *ptr, unsigned int mask)
+{
+       if (cond)
+               *ptr |= mask;
+       else
+               *ptr &= ~mask;
+}
+
 static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
 {
        struct uart_amba_port *uap =
@@ -1625,23 +1627,16 @@ static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
 
        cr = pl011_read(uap, REG_CR);
 
-#define        TIOCMBIT(tiocmbit, uartbit)             \
-       if (mctrl & tiocmbit)           \
-               cr |= uartbit;          \
-       else                            \
-               cr &= ~uartbit
-
-       TIOCMBIT(TIOCM_RTS, UART011_CR_RTS);
-       TIOCMBIT(TIOCM_DTR, UART011_CR_DTR);
-       TIOCMBIT(TIOCM_OUT1, UART011_CR_OUT1);
-       TIOCMBIT(TIOCM_OUT2, UART011_CR_OUT2);
-       TIOCMBIT(TIOCM_LOOP, UART011_CR_LBE);
+       pl011_assign_bit(mctrl & TIOCM_RTS, &cr, UART011_CR_RTS);
+       pl011_assign_bit(mctrl & TIOCM_DTR, &cr, UART011_CR_DTR);
+       pl011_assign_bit(mctrl & TIOCM_OUT1, &cr, UART011_CR_OUT1);
+       pl011_assign_bit(mctrl & TIOCM_OUT2, &cr, UART011_CR_OUT2);
+       pl011_assign_bit(mctrl & TIOCM_LOOP, &cr, UART011_CR_LBE);
 
        if (port->status & UPSTAT_AUTORTS) {
                /* We need to disable auto-RTS if we want to turn RTS off */
-               TIOCMBIT(TIOCM_RTS, UART011_CR_RTSEN);
+               pl011_assign_bit(mctrl & TIOCM_RTS, &cr, UART011_CR_RTSEN);
        }
-#undef TIOCMBIT
 
        pl011_write(cr, uap, REG_CR);
 }
@@ -1707,8 +1702,7 @@ static int pl011_get_poll_char(struct uart_port *port)
        return pl011_read(uap, REG_DR);
 }
 
-static void pl011_put_poll_char(struct uart_port *port,
-                        unsigned char ch)
+static void pl011_put_poll_char(struct uart_port *port, unsigned char ch)
 {
        struct uart_amba_port *uap =
            container_of(port, struct uart_amba_port, port);
@@ -1909,14 +1903,13 @@ static int sbsa_uart_startup(struct uart_port *port)
        return 0;
 }
 
-static void pl011_shutdown_channel(struct uart_amba_port *uap,
-                                       unsigned int lcrh)
+static void pl011_shutdown_channel(struct uart_amba_port *uap, unsigned int lcrh)
 {
-      unsigned long val;
+       unsigned long val;
 
-      val = pl011_read(uap, lcrh);
-      val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
-      pl011_write(val, uap, lcrh);
+       val = pl011_read(uap, lcrh);
+       val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
+       pl011_write(val, uap, lcrh);
 }
 
 /*
@@ -2065,7 +2058,7 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
                uap->dmarx.poll_rate = DIV_ROUND_UP(10000000, baud);
 #endif
 
-       if (baud > port->uartclk/16)
+       if (baud > port->uartclk / 16)
                quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud);
        else
                quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud);
@@ -2147,9 +2140,9 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
         * else we see data corruption.
         */
        if (uap->vendor->oversampling) {
-               if ((baud >= 3000000) && (baud < 3250000) && (quot > 1))
+               if (baud >= 3000000 && baud < 3250000 && quot > 1)
                        quot -= 1;
-               else if ((baud > 3250000) && (quot > 2))
+               else if (baud > 3250000 && quot > 2)
                        quot -= 2;
        }
        /* Set baud rate */
@@ -2218,13 +2211,14 @@ static void pl011_config_port(struct uart_port *port, int flags)
 static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser)
 {
        int ret = 0;
+
        if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA)
                ret = -EINVAL;
        if (ser->irq < 0 || ser->irq >= nr_irqs)
                ret = -EINVAL;
        if (ser->baud_base < 9600)
                ret = -EINVAL;
-       if (port->mapbase != (unsigned long) ser->iomem_base)
+       if (port->mapbase != (unsigned long)ser->iomem_base)
                ret = -EINVAL;
        return ret;
 }
@@ -2369,35 +2363,34 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
 static void pl011_console_get_options(struct uart_amba_port *uap, int *baud,
                                      int *parity, int *bits)
 {
-       if (pl011_read(uap, REG_CR) & UART01x_CR_UARTEN) {
-               unsigned int lcr_h, ibrd, fbrd;
+       unsigned int lcr_h, ibrd, fbrd;
 
-               lcr_h = pl011_read(uap, REG_LCRH_TX);
+       if (!(pl011_read(uap, REG_CR) & UART01x_CR_UARTEN))
+               return;
 
-               *parity = 'n';
-               if (lcr_h & UART01x_LCRH_PEN) {
-                       if (lcr_h & UART01x_LCRH_EPS)
-                               *parity = 'e';
-                       else
-                               *parity = 'o';
-               }
+       lcr_h = pl011_read(uap, REG_LCRH_TX);
 
-               if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7)
-                       *bits = 7;
+       *parity = 'n';
+       if (lcr_h & UART01x_LCRH_PEN) {
+               if (lcr_h & UART01x_LCRH_EPS)
+                       *parity = 'e';
                else
-                       *bits = 8;
+                       *parity = 'o';
+       }
+
+       if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7)
+               *bits = 7;
+       else
+               *bits = 8;
 
-               ibrd = pl011_read(uap, REG_IBRD);
-               fbrd = pl011_read(uap, REG_FBRD);
+       ibrd = pl011_read(uap, REG_IBRD);
+       fbrd = pl011_read(uap, REG_FBRD);
 
-               *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd);
+       *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd);
 
-               if (uap->vendor->oversampling) {
-                       if (pl011_read(uap, REG_CR)
-                                 & ST_UART011_CR_OVSFACT)
-                               *baud *= 2;
-               }
-       }
+       if (uap->vendor->oversampling &&
+           (pl011_read(uap, REG_CR) & ST_UART011_CR_OVSFACT))
+               *baud *= 2;
 }
 
 static int pl011_console_setup(struct console *co, char *options)
@@ -2533,7 +2526,7 @@ static void qdf2400_e44_putc(struct uart_port *port, unsigned char c)
                cpu_relax();
 }
 
-static void qdf2400_e44_early_write(struct console *con, const char *s, unsigned n)
+static void qdf2400_e44_early_write(struct console *con, const char *s, unsigned int n)
 {
        struct earlycon_device *dev = con->data;
 
@@ -2552,7 +2545,7 @@ static void pl011_putc(struct uart_port *port, unsigned char c)
                cpu_relax();
 }
 
-static void pl011_early_write(struct console *con, const char *s, unsigned n)
+static void pl011_early_write(struct console *con, const char *s, unsigned int n)
 {
        struct earlycon_device *dev = con->data;
 
@@ -2613,7 +2606,9 @@ static int __init pl011_early_console_setup(struct earlycon_device *device,
 
        return 0;
 }
+
 OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup);
+
 OF_EARLYCON_DECLARE(pl011, "arm,sbsa-uart", pl011_early_console_setup);
 
 /*
@@ -2636,6 +2631,7 @@ qdf2400_e44_early_console_setup(struct earlycon_device *device,
        device->con->write = qdf2400_e44_early_write;
        return 0;
 }
+
 EARLYCON_DECLARE(qdf2400_e44, qdf2400_e44_early_console_setup);
 
 #else
@@ -2655,8 +2651,8 @@ static struct uart_driver amba_reg = {
 static int pl011_probe_dt_alias(int index, struct device *dev)
 {
        struct device_node *np;
-       static bool seen_dev_with_alias = false;
-       static bool seen_dev_without_alias = false;
+       static bool seen_dev_with_alias;
+       static bool seen_dev_without_alias;
        int ret = index;
 
        if (!IS_ENABLED(CONFIG_OF))
@@ -2672,7 +2668,7 @@ static int pl011_probe_dt_alias(int index, struct device *dev)
                ret = index;
        } else {
                seen_dev_with_alias = true;
-               if (ret >= ARRAY_SIZE(amba_ports) || amba_ports[ret] != NULL) {
+               if (ret >= ARRAY_SIZE(amba_ports) || amba_ports[ret]) {
                        dev_warn(dev, "requested serial port %d  not available.\n", ret);
                        ret = index;
                }
@@ -2706,7 +2702,7 @@ static int pl011_find_free_port(void)
        int i;
 
        for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
-               if (amba_ports[i] == NULL)
+               if (!amba_ports[i])
                        return i;
 
        return -EBUSY;
@@ -2873,6 +2869,22 @@ static int pl011_resume(struct device *dev)
 
 static SIMPLE_DEV_PM_OPS(pl011_dev_pm_ops, pl011_suspend, pl011_resume);
 
+#ifdef CONFIG_ACPI_SPCR_TABLE
+static void qpdf2400_erratum44_workaround(struct device *dev,
+                                         struct uart_amba_port *uap)
+{
+       if (!qdf2400_e44_present)
+               return;
+
+       dev_info(dev, "working around QDF2400 SoC erratum 44\n");
+       uap->vendor = &vendor_qdt_qdf2400_e44;
+}
+#else
+static void qpdf2400_erratum44_workaround(struct device *dev,
+                                         struct uart_amba_port *uap)
+{ /* empty */ }
+#endif
+
 static int sbsa_uart_probe(struct platform_device *pdev)
 {
        struct uart_amba_port *uap;
@@ -2908,13 +2920,8 @@ static int sbsa_uart_probe(struct platform_device *pdev)
                return ret;
        uap->port.irq   = ret;
 
-#ifdef CONFIG_ACPI_SPCR_TABLE
-       if (qdf2400_e44_present) {
-               dev_info(&pdev->dev, "working around QDF2400 SoC erratum 44\n");
-               uap->vendor = &vendor_qdt_qdf2400_e44;
-       } else
-#endif
-               uap->vendor = &vendor_sbsa;
+       uap->vendor = &vendor_sbsa;
+       qpdf2400_erratum44_workaround(&pdev->dev, uap);
 
        uap->reg_offset = uap->vendor->reg_offset;
        uap->fifosize   = 32;
@@ -2935,13 +2942,12 @@ static int sbsa_uart_probe(struct platform_device *pdev)
        return pl011_register_port(uap);
 }
 
-static int sbsa_uart_remove(struct platform_device *pdev)
+static void sbsa_uart_remove(struct platform_device *pdev)
 {
        struct uart_amba_port *uap = platform_get_drvdata(pdev);
 
        uart_remove_one_port(&amba_reg, &uap->port);
        pl011_unregister_port(uap);
-       return 0;
 }
 
 static const struct of_device_id sbsa_uart_of_match[] = {
@@ -2959,7 +2965,7 @@ MODULE_DEVICE_TABLE(acpi, sbsa_uart_acpi_match);
 
 static struct platform_driver arm_sbsa_uart_platform_driver = {
        .probe          = sbsa_uart_probe,
-       .remove         = sbsa_uart_remove,
+       .remove_new     = sbsa_uart_remove,
        .driver = {
                .name   = "sbsa-uart",
                .pm     = &pl011_dev_pm_ops,
@@ -2998,7 +3004,7 @@ static struct amba_driver pl011_driver = {
 
 static int __init pl011_init(void)
 {
-       printk(KERN_INFO "Serial: AMBA PL011 UART driver\n");
+       pr_info("Serial: AMBA PL011 UART driver\n");
 
        if (platform_driver_register(&arm_sbsa_uart_platform_driver))
                pr_warn("could not register SBSA UART platform driver\n");
index 716cb014c028e04e5b19fe4781ff17e073dce6d4..364599f256db8c7d843d3115183937a900be81c8 100644 (file)
@@ -122,7 +122,7 @@ static void apbuart_tx_chars(struct uart_port *port)
 {
        u8 ch;
 
-       uart_port_tx_limited(port, ch, port->fifosize >> 1,
+       uart_port_tx_limited(port, ch, port->fifosize,
                true,
                UART_PUT_CHAR(port, ch),
                ({}));
index ffd234673177fdb286926e9c0f3a2c2e3b68f8a6..8d09ace062e5966660e9a077f5b968ff4c8f7b96 100644 (file)
@@ -818,7 +818,7 @@ err_disable_clk:
        return ret;
 }
 
-static int ar933x_uart_remove(struct platform_device *pdev)
+static void ar933x_uart_remove(struct platform_device *pdev)
 {
        struct ar933x_uart_port *up;
 
@@ -828,8 +828,6 @@ static int ar933x_uart_remove(struct platform_device *pdev)
                uart_remove_one_port(&ar933x_uart_driver, &up->port);
                clk_disable_unprepare(up->clk);
        }
-
-       return 0;
 }
 
 #ifdef CONFIG_OF
@@ -842,7 +840,7 @@ MODULE_DEVICE_TABLE(of, ar933x_uart_of_ids);
 
 static struct platform_driver ar933x_uart_platform_driver = {
        .probe          = ar933x_uart_probe,
-       .remove         = ar933x_uart_remove,
+       .remove_new     = ar933x_uart_remove,
        .driver         = {
                .name           = DRIVER_NAME,
                .of_match_table = of_match_ptr(ar933x_uart_of_ids),
index 1946fafc3f3ecf218b2b7a9af689f5a40a422c81..85667f7095154eff29a7ba402f99894518d78c60 100644 (file)
@@ -1013,14 +1013,18 @@ static int atmel_prepare_tx_dma(struct uart_port *port)
        struct device *mfd_dev = port->dev->parent;
        dma_cap_mask_t          mask;
        struct dma_slave_config config;
+       struct dma_chan *chan;
        int ret, nent;
 
        dma_cap_zero(mask);
        dma_cap_set(DMA_SLAVE, mask);
 
-       atmel_port->chan_tx = dma_request_slave_channel(mfd_dev, "tx");
-       if (atmel_port->chan_tx == NULL)
+       chan = dma_request_chan(mfd_dev, "tx");
+       if (IS_ERR(chan)) {
+               atmel_port->chan_tx = NULL;
                goto chan_err;
+       }
+       atmel_port->chan_tx = chan;
        dev_info(port->dev, "using %s for tx DMA transfers\n",
                dma_chan_name(atmel_port->chan_tx));
 
@@ -1188,6 +1192,7 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
        dma_cap_mask_t          mask;
        struct dma_slave_config config;
        struct circ_buf         *ring;
+       struct dma_chan *chan;
        int ret, nent;
 
        ring = &atmel_port->rx_ring;
@@ -1195,9 +1200,12 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
        dma_cap_zero(mask);
        dma_cap_set(DMA_CYCLIC, mask);
 
-       atmel_port->chan_rx = dma_request_slave_channel(mfd_dev, "rx");
-       if (atmel_port->chan_rx == NULL)
+       chan = dma_request_chan(mfd_dev, "rx");
+       if (IS_ERR(chan)) {
+               atmel_port->chan_rx = NULL;
                goto chan_err;
+       }
+       atmel_port->chan_rx = chan;
        dev_info(port->dev, "using %s for rx DMA transfers\n",
                dma_chan_name(atmel_port->chan_rx));
 
@@ -3001,7 +3009,7 @@ err:
  * protocol that needs bitbanging on IO lines, but use the regular serial
  * port in the normal case.
  */
-static int atmel_serial_remove(struct platform_device *pdev)
+static void atmel_serial_remove(struct platform_device *pdev)
 {
        struct uart_port *port = platform_get_drvdata(pdev);
        struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
@@ -3020,8 +3028,6 @@ static int atmel_serial_remove(struct platform_device *pdev)
        clear_bit(port->line, atmel_ports_in_use);
 
        pdev->dev.of_node = NULL;
-
-       return 0;
 }
 
 static SIMPLE_DEV_PM_OPS(atmel_serial_pm_ops, atmel_serial_suspend,
@@ -3029,7 +3035,7 @@ static SIMPLE_DEV_PM_OPS(atmel_serial_pm_ops, atmel_serial_suspend,
 
 static struct platform_driver atmel_serial_driver = {
        .probe          = atmel_serial_probe,
-       .remove         = atmel_serial_remove,
+       .remove_new     = atmel_serial_remove,
        .driver         = {
                .name                   = "atmel_usart_serial",
                .of_match_table         = of_match_ptr(atmel_serial_dt_ids),
index 4a08fd5ee61be4f9e0607b840169e9fecb9903cb..a3cefa153456df4a4d486b3784a03b99e1a4fc6e 100644 (file)
@@ -868,7 +868,7 @@ static int bcm_uart_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int bcm_uart_remove(struct platform_device *pdev)
+static void bcm_uart_remove(struct platform_device *pdev)
 {
        struct uart_port *port;
 
@@ -876,7 +876,6 @@ static int bcm_uart_remove(struct platform_device *pdev)
        uart_remove_one_port(&bcm_uart_driver, port);
        /* mark port as free */
        ports[pdev->id].membase = NULL;
-       return 0;
 }
 
 static const struct of_device_id bcm63xx_of_match[] = {
@@ -890,7 +889,7 @@ MODULE_DEVICE_TABLE(of, bcm63xx_of_match);
  */
 static struct platform_driver bcm_uart_platform_driver = {
        .probe  = bcm_uart_probe,
-       .remove = bcm_uart_remove,
+       .remove_new = bcm_uart_remove,
        .driver = {
                .name  = "bcm63xx_uart",
                .of_match_table = bcm63xx_of_match,
index 55d19937efbd3f60782f74e06ff5dc408813049f..7927725b8957809d4bfea0f1446d9faba9762bdd 100644 (file)
@@ -510,13 +510,11 @@ static int uart_clps711x_probe(struct platform_device *pdev)
        return ret;
 }
 
-static int uart_clps711x_remove(struct platform_device *pdev)
+static void uart_clps711x_remove(struct platform_device *pdev)
 {
        struct clps711x_port *s = platform_get_drvdata(pdev);
 
        uart_remove_one_port(&clps711x_uart, &s->port);
-
-       return 0;
 }
 
 static const struct of_device_id __maybe_unused clps711x_uart_dt_ids[] = {
@@ -531,7 +529,7 @@ static struct platform_driver clps711x_uart_platform = {
                .of_match_table = of_match_ptr(clps711x_uart_dt_ids),
        },
        .probe  = uart_clps711x_probe,
-       .remove = uart_clps711x_remove,
+       .remove_new = uart_clps711x_remove,
 };
 
 static int __init uart_clps711x_init(void)
index be4af6eda4c21ec32cf766179892a530c814f7dd..df56c6c5afd0e50bbc595f56cf3c24313654e98e 100644 (file)
@@ -1549,13 +1549,11 @@ static int cpm_uart_probe(struct platform_device *ofdev)
        return ret;
 }
 
-static int cpm_uart_remove(struct platform_device *ofdev)
+static void cpm_uart_remove(struct platform_device *ofdev)
 {
        struct uart_cpm_port *pinfo = platform_get_drvdata(ofdev);
 
        uart_remove_one_port(&cpm_reg, &pinfo->port);
-
-       return 0;
 }
 
 static const struct of_device_id cpm_uart_match[] = {
@@ -1581,7 +1579,7 @@ static struct platform_driver cpm_uart_driver = {
                .of_match_table = cpm_uart_match,
        },
        .probe = cpm_uart_probe,
-       .remove = cpm_uart_remove,
+       .remove_new = cpm_uart_remove,
  };
 
 static int __init cpm_uart_init(void)
index 5004125f3045cabf7037d8382fbcd7811ddcd1af..e419c4bde8b787d7f6ca399c9067da2482d39347 100644 (file)
@@ -503,13 +503,11 @@ static int digicolor_uart_probe(struct platform_device *pdev)
        return uart_add_one_port(&digicolor_uart, &dp->port);
 }
 
-static int digicolor_uart_remove(struct platform_device *pdev)
+static void digicolor_uart_remove(struct platform_device *pdev)
 {
        struct uart_port *port = platform_get_drvdata(pdev);
 
        uart_remove_one_port(&digicolor_uart, port);
-
-       return 0;
 }
 
 static const struct of_device_id digicolor_uart_dt_ids[] = {
@@ -524,7 +522,7 @@ static struct platform_driver digicolor_uart_platform = {
                .of_match_table = of_match_ptr(digicolor_uart_dt_ids),
        },
        .probe  = digicolor_uart_probe,
-       .remove = digicolor_uart_remove,
+       .remove_new = digicolor_uart_remove,
 };
 
 static int __init digicolor_uart_init(void)
index 27afb0b74ea705bebbc3895b577ddc39b4eda6b6..0162155f0c83976e5aa9b014974fdf413f3b4a45 100644 (file)
@@ -15,17 +15,38 @@ static void sbi_putc(struct uart_port *port, unsigned char c)
        sbi_console_putchar(c);
 }
 
-static void sbi_console_write(struct console *con,
-                             const char *s, unsigned n)
+static void sbi_0_1_console_write(struct console *con,
+                                 const char *s, unsigned int n)
 {
        struct earlycon_device *dev = con->data;
        uart_console_write(&dev->port, s, n, sbi_putc);
 }
 
+static void sbi_dbcn_console_write(struct console *con,
+                                  const char *s, unsigned int n)
+{
+       int ret;
+
+       while (n) {
+               ret = sbi_debug_console_write(s, n);
+               if (ret < 0)
+                       break;
+
+               s += ret;
+               n -= ret;
+       }
+}
+
 static int __init early_sbi_setup(struct earlycon_device *device,
                                  const char *opt)
 {
-       device->con->write = sbi_console_write;
+       if (sbi_debug_console_available)
+               device->con->write = sbi_dbcn_console_write;
+       else if (IS_ENABLED(CONFIG_RISCV_SBI_V01))
+               device->con->write = sbi_0_1_console_write;
+       else
+               return -ENODEV;
+
        return 0;
 }
 EARLYCON_DECLARE(sbi, early_sbi_setup);
index cb28a87736aa1b817c3efb06337bc9a3ae4d1db6..d4e8bdb1cdef192b39c4af73f4073f3191844aa8 100644 (file)
@@ -8,7 +8,7 @@
 #include <linux/irq.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
 #include <linux/serial_core.h>
 #include <linux/slab.h>
 #include <linux/tty_flip.h>
@@ -413,18 +413,17 @@ static int esp32s3_acm_probe(struct platform_device *pdev)
        return uart_add_one_port(&esp32s3_acm_reg, port);
 }
 
-static int esp32s3_acm_remove(struct platform_device *pdev)
+static void esp32s3_acm_remove(struct platform_device *pdev)
 {
        struct uart_port *port = platform_get_drvdata(pdev);
 
        uart_remove_one_port(&esp32s3_acm_reg, port);
-       return 0;
 }
 
 
 static struct platform_driver esp32s3_acm_driver = {
        .probe          = esp32s3_acm_probe,
-       .remove         = esp32s3_acm_remove,
+       .remove_new     = esp32s3_acm_remove,
        .driver         = {
                .name   = DRIVER_NAME,
                .of_match_table = esp32s3_acm_dt_ids,
index 85c9c5ad7cc5cba40b5728c43f77565a6a8b0490..6fc61f32335590f582593d173774229c835a18e0 100644 (file)
@@ -9,7 +9,8 @@
 #include <linux/irq.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
 #include <linux/serial_core.h>
 #include <linux/slab.h>
 #include <linux/tty_flip.h>
@@ -678,16 +679,11 @@ static struct uart_driver esp32_uart_reg = {
 static int esp32_uart_probe(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
-       static const struct of_device_id *match;
        struct uart_port *port;
        struct esp32_port *sport;
        struct resource *res;
        int ret;
 
-       match = of_match_device(esp32_uart_dt_ids, &pdev->dev);
-       if (!match)
-               return -ENODEV;
-
        sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL);
        if (!sport)
                return -ENOMEM;
@@ -728,7 +724,7 @@ static int esp32_uart_probe(struct platform_device *pdev)
        port->flags = UPF_BOOT_AUTOCONF;
        port->has_sysrq = 1;
        port->fifosize = ESP32_UART_TX_FIFO_SIZE;
-       port->private_data = (void *)match->data;
+       port->private_data = (void *)device_get_match_data(&pdev->dev);
 
        esp32_uart_ports[port->line] = sport;
 
@@ -737,19 +733,17 @@ static int esp32_uart_probe(struct platform_device *pdev)
        return uart_add_one_port(&esp32_uart_reg, port);
 }
 
-static int esp32_uart_remove(struct platform_device *pdev)
+static void esp32_uart_remove(struct platform_device *pdev)
 {
        struct uart_port *port = platform_get_drvdata(pdev);
 
        uart_remove_one_port(&esp32_uart_reg, port);
-
-       return 0;
 }
 
 
 static struct platform_driver esp32_uart_driver = {
        .probe          = esp32_uart_probe,
-       .remove         = esp32_uart_remove,
+       .remove_new     = esp32_uart_remove,
        .driver         = {
                .name   = DRIVER_NAME,
                .of_match_table = esp32_uart_dt_ids,
index 3bdaf1ddc3094eb7efb6caa4f1f3f516eb8163fd..52c87876a88de6b7e43969029947744a91f0e3cf 100644 (file)
@@ -851,13 +851,11 @@ static int linflex_probe(struct platform_device *pdev)
        return uart_add_one_port(&linflex_reg, sport);
 }
 
-static int linflex_remove(struct platform_device *pdev)
+static void linflex_remove(struct platform_device *pdev)
 {
        struct uart_port *sport = platform_get_drvdata(pdev);
 
        uart_remove_one_port(&linflex_reg, sport);
-
-       return 0;
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -884,7 +882,7 @@ static SIMPLE_DEV_PM_OPS(linflex_pm_ops, linflex_suspend, linflex_resume);
 
 static struct platform_driver linflex_driver = {
        .probe          = linflex_probe,
-       .remove         = linflex_remove,
+       .remove_new     = linflex_remove,
        .driver         = {
                .name   = DRIVER_NAME,
                .of_match_table = linflex_dt_ids,
index 6d0cfb2e86b45a91010c01639ef4c0c21680ae8c..5ddf110aedbe513b522d10e691cada5563fec4df 100644 (file)
@@ -2959,7 +2959,7 @@ failed_reset:
        return ret;
 }
 
-static int lpuart_remove(struct platform_device *pdev)
+static void lpuart_remove(struct platform_device *pdev)
 {
        struct lpuart_port *sport = platform_get_drvdata(pdev);
 
@@ -2976,7 +2976,6 @@ static int lpuart_remove(struct platform_device *pdev)
        pm_runtime_disable(&pdev->dev);
        pm_runtime_set_suspended(&pdev->dev);
        pm_runtime_dont_use_autosuspend(&pdev->dev);
-       return 0;
 }
 
 static int lpuart_runtime_suspend(struct device *dev)
@@ -3210,7 +3209,7 @@ static const struct dev_pm_ops lpuart_pm_ops = {
 
 static struct platform_driver lpuart_driver = {
        .probe          = lpuart_probe,
-       .remove         = lpuart_remove,
+       .remove_new     = lpuart_remove,
        .driver         = {
                .name   = "fsl-lpuart",
                .of_match_table = lpuart_dt_ids,
index 708b9852a575dc1a503d94c7ec71e451c448a20b..4aa72d5aeafbf081ac37241853c49cdb18e46ae5 100644 (file)
@@ -415,13 +415,13 @@ static void imx_uart_stop_tx(struct uart_port *port)
        ucr1 = imx_uart_readl(sport, UCR1);
        imx_uart_writel(sport, ucr1 & ~UCR1_TRDYEN, UCR1);
 
+       ucr4 = imx_uart_readl(sport, UCR4);
        usr2 = imx_uart_readl(sport, USR2);
-       if (!(usr2 & USR2_TXDC)) {
+       if ((!(usr2 & USR2_TXDC)) && (ucr4 & UCR4_TCEN)) {
                /* The shifter is still busy, so retry once TC triggers */
                return;
        }
 
-       ucr4 = imx_uart_readl(sport, UCR4);
        ucr4 &= ~UCR4_TCEN;
        imx_uart_writel(sport, ucr4, UCR4);
 
@@ -1336,15 +1336,18 @@ static int imx_uart_dma_init(struct imx_port *sport)
 {
        struct dma_slave_config slave_config = {};
        struct device *dev = sport->port.dev;
+       struct dma_chan *chan;
        int ret;
 
        /* Prepare for RX : */
-       sport->dma_chan_rx = dma_request_slave_channel(dev, "rx");
-       if (!sport->dma_chan_rx) {
+       chan = dma_request_chan(dev, "rx");
+       if (IS_ERR(chan)) {
                dev_dbg(dev, "cannot get the DMA channel.\n");
-               ret = -EINVAL;
+               sport->dma_chan_rx = NULL;
+               ret = PTR_ERR(chan);
                goto err;
        }
+       sport->dma_chan_rx = chan;
 
        slave_config.direction = DMA_DEV_TO_MEM;
        slave_config.src_addr = sport->port.mapbase + URXD0;
@@ -1366,12 +1369,14 @@ static int imx_uart_dma_init(struct imx_port *sport)
        sport->rx_ring.buf = sport->rx_buf;
 
        /* Prepare for TX : */
-       sport->dma_chan_tx = dma_request_slave_channel(dev, "tx");
-       if (!sport->dma_chan_tx) {
+       chan = dma_request_chan(dev, "tx");
+       if (IS_ERR(chan)) {
                dev_err(dev, "cannot get the TX DMA channel!\n");
-               ret = -EINVAL;
+               sport->dma_chan_tx = NULL;
+               ret = PTR_ERR(chan);
                goto err;
        }
+       sport->dma_chan_tx = chan;
 
        slave_config.direction = DMA_MEM_TO_DEV;
        slave_config.dst_addr = sport->port.mapbase + URTX0;
@@ -1943,10 +1948,6 @@ static int imx_uart_rs485_config(struct uart_port *port, struct ktermios *termio
            rs485conf->flags & SER_RS485_RX_DURING_TX)
                imx_uart_start_rx(port);
 
-       if (port->rs485_rx_during_tx_gpio)
-               gpiod_set_value_cansleep(port->rs485_rx_during_tx_gpio,
-                                        !!(rs485conf->flags & SER_RS485_RX_DURING_TX));
-
        return 0;
 }
 
@@ -2210,7 +2211,6 @@ static enum hrtimer_restart imx_trigger_stop_tx(struct hrtimer *t)
        return HRTIMER_NORESTART;
 }
 
-static const struct serial_rs485 imx_no_rs485 = {};    /* No RS485 if no RTS */
 static const struct serial_rs485 imx_rs485_supported = {
        .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
                 SER_RS485_RX_DURING_TX,
@@ -2294,8 +2294,6 @@ static int imx_uart_probe(struct platform_device *pdev)
        /* RTS is required to control the RS485 transmitter */
        if (sport->have_rtscts || sport->have_rtsgpio)
                sport->port.rs485_supported = imx_rs485_supported;
-       else
-               sport->port.rs485_supported = imx_no_rs485;
        sport->port.flags = UPF_BOOT_AUTOCONF;
        timer_setup(&sport->timer, imx_uart_timeout, 0);
 
@@ -2322,19 +2320,13 @@ static int imx_uart_probe(struct platform_device *pdev)
        /* For register access, we only need to enable the ipg clock. */
        ret = clk_prepare_enable(sport->clk_ipg);
        if (ret) {
-               dev_err(&pdev->dev, "failed to enable per clk: %d\n", ret);
+               dev_err(&pdev->dev, "failed to enable ipg clk: %d\n", ret);
                return ret;
        }
 
        ret = uart_get_rs485_mode(&sport->port);
-       if (ret) {
-               clk_disable_unprepare(sport->clk_ipg);
-               return ret;
-       }
-
-       if (sport->port.rs485.flags & SER_RS485_ENABLED &&
-           (!sport->have_rtscts && !sport->have_rtsgpio))
-               dev_err(&pdev->dev, "no RTS control, disabling rs485\n");
+       if (ret)
+               goto err_clk;
 
        /*
         * If using the i.MX UART RTS/CTS control then the RTS (CTS_B)
@@ -2414,8 +2406,6 @@ static int imx_uart_probe(struct platform_device *pdev)
                imx_uart_writel(sport, ucr3, UCR3);
        }
 
-       clk_disable_unprepare(sport->clk_ipg);
-
        hrtimer_init(&sport->trigger_start_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        hrtimer_init(&sport->trigger_stop_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        sport->trigger_start_tx.function = imx_trigger_start_tx;
@@ -2431,7 +2421,7 @@ static int imx_uart_probe(struct platform_device *pdev)
                if (ret) {
                        dev_err(&pdev->dev, "failed to request rx irq: %d\n",
                                ret);
-                       return ret;
+                       goto err_clk;
                }
 
                ret = devm_request_irq(&pdev->dev, txirq, imx_uart_txint, 0,
@@ -2439,7 +2429,7 @@ static int imx_uart_probe(struct platform_device *pdev)
                if (ret) {
                        dev_err(&pdev->dev, "failed to request tx irq: %d\n",
                                ret);
-                       return ret;
+                       goto err_clk;
                }
 
                ret = devm_request_irq(&pdev->dev, rtsirq, imx_uart_rtsint, 0,
@@ -2447,14 +2437,14 @@ static int imx_uart_probe(struct platform_device *pdev)
                if (ret) {
                        dev_err(&pdev->dev, "failed to request rts irq: %d\n",
                                ret);
-                       return ret;
+                       goto err_clk;
                }
        } else {
                ret = devm_request_irq(&pdev->dev, rxirq, imx_uart_int, 0,
                                       dev_name(&pdev->dev), sport);
                if (ret) {
                        dev_err(&pdev->dev, "failed to request irq: %d\n", ret);
-                       return ret;
+                       goto err_clk;
                }
        }
 
@@ -2462,16 +2452,19 @@ static int imx_uart_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, sport);
 
-       return uart_add_one_port(&imx_uart_uart_driver, &sport->port);
+       ret = uart_add_one_port(&imx_uart_uart_driver, &sport->port);
+
+err_clk:
+       clk_disable_unprepare(sport->clk_ipg);
+
+       return ret;
 }
 
-static int imx_uart_remove(struct platform_device *pdev)
+static void imx_uart_remove(struct platform_device *pdev)
 {
        struct imx_port *sport = platform_get_drvdata(pdev);
 
        uart_remove_one_port(&imx_uart_uart_driver, &sport->port);
-
-       return 0;
 }
 
 static void imx_uart_restore_context(struct imx_port *sport)
@@ -2640,7 +2633,7 @@ static const struct dev_pm_ops imx_uart_pm_ops = {
 
 static struct platform_driver imx_uart_platform_driver = {
        .probe = imx_uart_probe,
-       .remove = imx_uart_remove,
+       .remove_new = imx_uart_remove,
 
        .driver = {
                .name = "imx-uart",
index 8489c07f4cd5eadcb749e3a2f288ff74de77df06..df55e5dc5afc28fcf6f4591f324d456c783bdd4d 100644 (file)
@@ -115,8 +115,6 @@ struct board_ops {
        void (*send_start_character)(struct jsm_channel *ch);
        void (*send_stop_character)(struct jsm_channel *ch);
        void (*copy_data_from_queue_to_uart)(struct jsm_channel *ch);
-       u32 (*get_uart_bytes_left)(struct jsm_channel *ch);
-       void (*send_immediate_char)(struct jsm_channel *ch, unsigned char);
 };
 
 
@@ -127,7 +125,6 @@ struct jsm_board
 {
        int             boardnum;       /* Board number: 0-32 */
 
-       int             type;           /* Type of board */
        u8              rev;            /* PCI revision ID */
        struct pci_dev  *pci_dev;
        u32             maxports;       /* MAX ports this board can handle */
@@ -155,8 +152,6 @@ struct jsm_board
        u32             bd_dividend;    /* Board/UARTs specific dividend */
 
        struct board_ops *bd_ops;
-
-       struct list_head jsm_board_entry;
 };
 
 /************************************************************************
index 3fd57ac3ad81c465c5a74bab262f1e5fbfcd556c..1eda48964c0b6539f0e6281d121d7b17344549b7 100644 (file)
@@ -877,28 +877,6 @@ static void cls_uart_off(struct jsm_channel *ch)
        writeb(0, &ch->ch_cls_uart->ier);
 }
 
-/*
- * cls_get_uarts_bytes_left.
- * Returns 0 is nothing left in the FIFO, returns 1 otherwise.
- *
- * The channel lock MUST be held by the calling function.
- */
-static u32 cls_get_uart_bytes_left(struct jsm_channel *ch)
-{
-       u8 left = 0;
-       u8 lsr = readb(&ch->ch_cls_uart->lsr);
-
-       /* Determine whether the Transmitter is empty or not */
-       if (!(lsr & UART_LSR_TEMT))
-               left = 1;
-       else {
-               ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
-               left = 0;
-       }
-
-       return left;
-}
-
 /*
  * cls_send_break.
  * Starts sending a break thru the UART.
@@ -916,18 +894,6 @@ static void cls_send_break(struct jsm_channel *ch)
        }
 }
 
-/*
- * cls_send_immediate_char.
- * Sends a specific character as soon as possible to the UART,
- * jumping over any bytes that might be in the write queue.
- *
- * The channel lock MUST be held by the calling function.
- */
-static void cls_send_immediate_char(struct jsm_channel *ch, unsigned char c)
-{
-       writeb(c, &ch->ch_cls_uart->txrx);
-}
-
 struct board_ops jsm_cls_ops = {
        .intr =                         cls_intr,
        .uart_init =                    cls_uart_init,
@@ -943,7 +909,5 @@ struct board_ops jsm_cls_ops = {
        .send_start_character =         cls_send_start_character,
        .send_stop_character =          cls_send_stop_character,
        .copy_data_from_queue_to_uart = cls_copy_data_from_queue_to_uart,
-       .get_uart_bytes_left =          cls_get_uart_bytes_left,
-       .send_immediate_char =          cls_send_immediate_char
 };
 
index 2bd6404289708b07a29ba96a79f18af8ed1aa2ea..1fa10f19368f57edb7b6b097bfc3236541890952 100644 (file)
@@ -1309,25 +1309,6 @@ static void neo_uart_off(struct jsm_channel *ch)
        writeb(0, &ch->ch_neo_uart->ier);
 }
 
-static u32 neo_get_uart_bytes_left(struct jsm_channel *ch)
-{
-       u8 left = 0;
-       u8 lsr = readb(&ch->ch_neo_uart->lsr);
-
-       /* We must cache the LSR as some of the bits get reset once read... */
-       ch->ch_cached_lsr |= lsr;
-
-       /* Determine whether the Transmitter is empty or not */
-       if (!(lsr & UART_LSR_TEMT))
-               left = 1;
-       else {
-               ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
-               left = 0;
-       }
-
-       return left;
-}
-
 /* Channel lock MUST be held by the calling function! */
 static void neo_send_break(struct jsm_channel *ch)
 {
@@ -1348,25 +1329,6 @@ static void neo_send_break(struct jsm_channel *ch)
        }
 }
 
-/*
- * neo_send_immediate_char.
- *
- * Sends a specific character as soon as possible to the UART,
- * jumping over any bytes that might be in the write queue.
- *
- * The channel lock MUST be held by the calling function.
- */
-static void neo_send_immediate_char(struct jsm_channel *ch, unsigned char c)
-{
-       if (!ch)
-               return;
-
-       writeb(c, &ch->ch_neo_uart->txrx);
-
-       /* flush write operation */
-       neo_pci_posting_flush(ch->ch_bd);
-}
-
 struct board_ops jsm_neo_ops = {
        .intr                           = neo_intr,
        .uart_init                      = neo_uart_init,
@@ -1382,6 +1344,4 @@ struct board_ops jsm_neo_ops = {
        .send_start_character           = neo_send_start_character,
        .send_stop_character            = neo_send_stop_character,
        .copy_data_from_queue_to_uart   = neo_copy_data_from_queue_to_uart,
-       .get_uart_bytes_left            = neo_get_uart_bytes_left,
-       .send_immediate_char            = neo_send_immediate_char
 };
index 3adb60c683f7049de37337ac6c5ae89cd1d9d4ae..a0731773ce75c5affaafd7e8dd69a173583fb035 100644 (file)
@@ -887,13 +887,11 @@ static int lqasc_probe(struct platform_device *pdev)
        return ret;
 }
 
-static int lqasc_remove(struct platform_device *pdev)
+static void lqasc_remove(struct platform_device *pdev)
 {
        struct uart_port *port = platform_get_drvdata(pdev);
 
        uart_remove_one_port(&lqasc_reg, port);
-
-       return 0;
 }
 
 static const struct ltq_soc_data soc_data_lantiq = {
@@ -917,7 +915,7 @@ MODULE_DEVICE_TABLE(of, ltq_asc_match);
 
 static struct platform_driver lqasc_driver = {
        .probe          = lqasc_probe,
-       .remove         = lqasc_remove,
+       .remove_new     = lqasc_remove,
        .driver         = {
                .name   = DRVNAME,
                .of_match_table = ltq_asc_match,
index a25ab1efe38ffdebd68cb31bac549941dc64dacf..3ce369f76349835a1c9bd0ac90dbcabf4d049105 100644 (file)
@@ -336,15 +336,13 @@ err_erase_id:
        return ret;
 }
 
-static int liteuart_remove(struct platform_device *pdev)
+static void liteuart_remove(struct platform_device *pdev)
 {
        struct uart_port *port = platform_get_drvdata(pdev);
        unsigned int line = port->line;
 
        uart_remove_one_port(&liteuart_driver, port);
        xa_erase(&liteuart_array, line);
-
-       return 0;
 }
 
 static const struct of_device_id liteuart_of_match[] = {
@@ -355,7 +353,7 @@ MODULE_DEVICE_TABLE(of, liteuart_of_match);
 
 static struct platform_driver liteuart_platform_driver = {
        .probe = liteuart_probe,
-       .remove = liteuart_remove,
+       .remove_new = liteuart_remove,
        .driver = {
                .name = KBUILD_MODNAME,
                .of_match_table = liteuart_of_match,
index 5149a947b7febb0c47c9c4e85e680085e62715b3..ec20329f06036d066c00056a39588dc502e9977d 100644 (file)
@@ -659,13 +659,11 @@ static int serial_hs_lpc32xx_probe(struct platform_device *pdev)
 /*
  * Remove serial ports registered against a platform device.
  */
-static int serial_hs_lpc32xx_remove(struct platform_device *pdev)
+static void serial_hs_lpc32xx_remove(struct platform_device *pdev)
 {
        struct lpc32xx_hsuart_port *p = platform_get_drvdata(pdev);
 
        uart_remove_one_port(&lpc32xx_hs_reg, &p->port);
-
-       return 0;
 }
 
 
@@ -702,7 +700,7 @@ MODULE_DEVICE_TABLE(of, serial_hs_lpc32xx_dt_ids);
 
 static struct platform_driver serial_hs_lpc32xx_driver = {
        .probe          = serial_hs_lpc32xx_probe,
-       .remove         = serial_hs_lpc32xx_remove,
+       .remove_new     = serial_hs_lpc32xx_remove,
        .suspend        = serial_hs_lpc32xx_suspend,
        .resume         = serial_hs_lpc32xx_resume,
        .driver         = {
index 21b574f78b8615bfa3fdc1cb00790b13900349f5..19f0a305cc43023f092369d96bec49feec3fa875 100644 (file)
@@ -558,7 +558,7 @@ static void ma35d1serial_console_write(struct console *co, const char *s, u32 co
        u32 ier;
 
        if ((co->index < 0) || (co->index >= MA35_UART_NR)) {
-               pr_warn("Failed to write on ononsole port %x, out of range\n",
+               pr_warn("Failed to write on console port %x, out of range\n",
                        co->index);
                return;
        }
@@ -754,14 +754,13 @@ err_iounmap:
 /*
  * Remove serial ports registered against a platform device.
  */
-static int ma35d1serial_remove(struct platform_device *dev)
+static void ma35d1serial_remove(struct platform_device *dev)
 {
        struct uart_port *port = platform_get_drvdata(dev);
        struct uart_ma35d1_port *up = to_ma35d1_uart_port(port);
 
        uart_remove_one_port(&ma35d1serial_reg, port);
        clk_disable_unprepare(up->clk);
-       return 0;
 }
 
 static int ma35d1serial_suspend(struct platform_device *dev, pm_message_t state)
@@ -794,7 +793,7 @@ static int ma35d1serial_resume(struct platform_device *dev)
 
 static struct platform_driver ma35d1serial_driver = {
        .probe      = ma35d1serial_probe,
-       .remove     = ma35d1serial_remove,
+       .remove_new = ma35d1serial_remove,
        .suspend    = ma35d1serial_suspend,
        .resume     = ma35d1serial_resume,
        .driver     = {
index 97e4965b73d405f821ddfb98df47936f79eae7cf..10bf6d75bf9ee7f9ee13a36796a8af5439ac5eb9 100644 (file)
 #define MAX310x_REV_MASK               (0xf8)
 #define MAX310X_WRITE_BIT              0x80
 
+/* Port startup definitions */
+#define MAX310X_PORT_STARTUP_WAIT_RETRIES      20 /* Number of retries */
+#define MAX310X_PORT_STARTUP_WAIT_DELAY_MS     10 /* Delay between retries */
+
+/* Crystal-related definitions */
+#define MAX310X_XTAL_WAIT_RETRIES      20 /* Number of retries */
+#define MAX310X_XTAL_WAIT_DELAY_MS     10 /* Delay between retries */
+
 /* MAX3107 specific */
 #define MAX3107_REV_ID                 (0xa0)
 
@@ -583,7 +591,7 @@ static int max310x_update_best_err(unsigned long f, long *besterr)
        return 1;
 }
 
-static u32 max310x_set_ref_clk(struct device *dev, struct max310x_port *s,
+static s32 max310x_set_ref_clk(struct device *dev, struct max310x_port *s,
                               unsigned long freq, bool xtal)
 {
        unsigned int div, clksrc, pllcfg = 0;
@@ -641,12 +649,20 @@ static u32 max310x_set_ref_clk(struct device *dev, struct max310x_port *s,
 
        /* Wait for crystal */
        if (xtal) {
-               unsigned int val;
-               msleep(10);
-               regmap_read(s->regmap, MAX310X_STS_IRQSTS_REG, &val);
-               if (!(val & MAX310X_STS_CLKREADY_BIT)) {
-                       dev_warn(dev, "clock is not stable yet\n");
-               }
+               bool stable = false;
+               unsigned int try = 0, val = 0;
+
+               do {
+                       msleep(MAX310X_XTAL_WAIT_DELAY_MS);
+                       regmap_read(s->regmap, MAX310X_STS_IRQSTS_REG, &val);
+
+                       if (val & MAX310X_STS_CLKREADY_BIT)
+                               stable = true;
+               } while (!stable && (++try < MAX310X_XTAL_WAIT_RETRIES));
+
+               if (!stable)
+                       return dev_err_probe(dev, -EAGAIN,
+                                            "clock is not stable\n");
        }
 
        return bestfreq;
@@ -780,7 +796,7 @@ static void max310x_handle_tx(struct uart_port *port)
        to_send = uart_circ_chars_pending(xmit);
        until_end = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
        if (likely(to_send)) {
-               /* Limit to size of TX FIFO */
+               /* Limit to space available in TX FIFO */
                txlen = max310x_port_read(port, MAX310X_TXFIFOLVL_REG);
                txlen = port->fifosize - txlen;
                to_send = (to_send > txlen) ? txlen : to_send;
@@ -1271,7 +1287,7 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
 {
        int i, ret, fmin, fmax, freq;
        struct max310x_port *s;
-       u32 uartclk = 0;
+       s32 uartclk = 0;
        bool xtal;
 
        for (i = 0; i < devtype->nr; i++)
@@ -1334,6 +1350,9 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
                goto out_clk;
 
        for (i = 0; i < devtype->nr; i++) {
+               bool started = false;
+               unsigned int try = 0, val = 0;
+
                /* Reset port */
                regmap_write(regmaps[i], MAX310X_MODE2_REG,
                             MAX310X_MODE2_RST_BIT);
@@ -1342,13 +1361,27 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
 
                /* Wait for port startup */
                do {
-                       regmap_read(regmaps[i], MAX310X_BRGDIVLSB_REG, &ret);
-               } while (ret != 0x01);
+                       msleep(MAX310X_PORT_STARTUP_WAIT_DELAY_MS);
+                       regmap_read(regmaps[i], MAX310X_BRGDIVLSB_REG, &val);
+
+                       if (val == 0x01)
+                               started = true;
+               } while (!started && (++try < MAX310X_PORT_STARTUP_WAIT_RETRIES));
+
+               if (!started) {
+                       ret = dev_err_probe(dev, -EAGAIN, "port reset failed\n");
+                       goto out_uart;
+               }
 
                regmap_write(regmaps[i], MAX310X_MODE1_REG, devtype->mode1);
        }
 
        uartclk = max310x_set_ref_clk(dev, s, freq, xtal);
+       if (uartclk < 0) {
+               ret = uartclk;
+               goto out_uart;
+       }
+
        dev_dbg(dev, "Reference clock set to %i Hz\n", uartclk);
 
        for (i = 0; i < devtype->nr; i++) {
index 91b15243f6c65b17895ed485e227465ba36c176d..8690a45239e096a14bfa4d4cc6a5cea16c0a9cdd 100644 (file)
@@ -627,7 +627,7 @@ static int mcf_probe(struct platform_device *pdev)
 
 /****************************************************************************/
 
-static int mcf_remove(struct platform_device *pdev)
+static void mcf_remove(struct platform_device *pdev)
 {
        struct uart_port *port;
        int i;
@@ -637,15 +637,13 @@ static int mcf_remove(struct platform_device *pdev)
                if (port)
                        uart_remove_one_port(&mcf_driver, port);
        }
-
-       return 0;
 }
 
 /****************************************************************************/
 
 static struct platform_driver mcf_platform_driver = {
        .probe          = mcf_probe,
-       .remove         = mcf_remove,
+       .remove_new     = mcf_remove,
        .driver         = {
                .name   = "mcfuart",
        },
index 8dd84617e7158e6651cf17726bb27d3d0c0f81fc..8395688f5ee926f92a2345ab1735c2b9058d7a06 100644 (file)
@@ -795,7 +795,7 @@ static int meson_uart_probe(struct platform_device *pdev)
        return ret;
 }
 
-static int meson_uart_remove(struct platform_device *pdev)
+static void meson_uart_remove(struct platform_device *pdev)
 {
        struct uart_driver *uart_driver;
        struct uart_port *port;
@@ -807,12 +807,10 @@ static int meson_uart_remove(struct platform_device *pdev)
 
        for (int id = 0; id < AML_UART_PORT_NUM; id++)
                if (meson_ports[id])
-                       return 0;
+                       return;
 
        /* No more available uart ports, unregister uart driver */
        uart_unregister_driver(uart_driver);
-
-       return 0;
 }
 
 static struct meson_uart_data meson_g12a_uart_data = {
@@ -852,7 +850,7 @@ MODULE_DEVICE_TABLE(of, meson_uart_dt_match);
 
 static  struct platform_driver meson_uart_platform_driver = {
        .probe          = meson_uart_probe,
-       .remove         = meson_uart_remove,
+       .remove_new     = meson_uart_remove,
        .driver         = {
                .name           = "meson_uart",
                .of_match_table = meson_uart_dt_match,
index db3b81f2aa570cb4ce843df6e856c9b11fc30a7c..da4c6f7e2a308c9c4c546f23cae569bd613a59a6 100644 (file)
@@ -552,15 +552,13 @@ failed:
        return ret;
 }
 
-static int mlb_usio_remove(struct platform_device *pdev)
+static void mlb_usio_remove(struct platform_device *pdev)
 {
        struct uart_port *port = &mlb_usio_ports[pdev->id];
        struct clk *clk = port->private_data;
 
        uart_remove_one_port(&mlb_usio_uart_driver, port);
        clk_disable_unprepare(clk);
-
-       return 0;
 }
 
 static const struct of_device_id mlb_usio_dt_ids[] = {
@@ -571,7 +569,7 @@ MODULE_DEVICE_TABLE(of, mlb_usio_dt_ids);
 
 static struct platform_driver mlb_usio_driver = {
        .probe          = mlb_usio_probe,
-       .remove         = mlb_usio_remove,
+       .remove_new     = mlb_usio_remove,
        .driver         = {
                .name   = USIO_NAME,
                .of_match_table = mlb_usio_dt_ids,
index a252465e745f754082237e5248a33159189b2420..95dae5e27b28685e744ccf137988adce998de515 100644 (file)
@@ -1765,15 +1765,12 @@ static int mpc52xx_uart_of_probe(struct platform_device *op)
        return 0;
 }
 
-static int
-mpc52xx_uart_of_remove(struct platform_device *op)
+static void mpc52xx_uart_of_remove(struct platform_device *op)
 {
        struct uart_port *port = platform_get_drvdata(op);
 
        if (port)
                uart_remove_one_port(&mpc52xx_uart_driver, port);
-
-       return 0;
 }
 
 #ifdef CONFIG_PM
@@ -1846,7 +1843,7 @@ MODULE_DEVICE_TABLE(of, mpc52xx_uart_of_match);
 
 static struct platform_driver mpc52xx_uart_of_driver = {
        .probe          = mpc52xx_uart_of_probe,
-       .remove         = mpc52xx_uart_of_remove,
+       .remove_new     = mpc52xx_uart_of_remove,
 #ifdef CONFIG_PM
        .suspend        = mpc52xx_uart_of_suspend,
        .resume         = mpc52xx_uart_of_resume,
index 597264b546fd766098280e311901635c2e3f614b..e24204ad35deff4b2911f4e247071aae90c615d9 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/slab.h>
 #include <linux/clk.h>
 #include <linux/platform_device.h>
+#include <linux/pm_opp.h>
 #include <linux/delay.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
@@ -1131,7 +1132,7 @@ static int msm_set_baud_rate(struct uart_port *port, unsigned int baud,
        uart_port_unlock_irqrestore(port, flags);
 
        entry = msm_find_best_baud(port, baud, &rate);
-       clk_set_rate(msm_port->clk, rate);
+       dev_pm_opp_set_rate(port->dev, rate);
        baud = rate / 16 / entry->divisor;
 
        uart_port_lock_irqsave(port, &flags);
@@ -1186,6 +1187,7 @@ static void msm_init_clock(struct uart_port *port)
 {
        struct msm_port *msm_port = to_msm_port(port);
 
+       dev_pm_opp_set_rate(port->dev, port->uartclk);
        clk_prepare_enable(msm_port->clk);
        clk_prepare_enable(msm_port->pclk);
        msm_serial_set_mnd_regs(port);
@@ -1239,6 +1241,7 @@ err_irq:
 
        clk_disable_unprepare(msm_port->pclk);
        clk_disable_unprepare(msm_port->clk);
+       dev_pm_opp_set_rate(port->dev, 0);
 
        return ret;
 }
@@ -1254,6 +1257,7 @@ static void msm_shutdown(struct uart_port *port)
                msm_release_dma(msm_port);
 
        clk_disable_unprepare(msm_port->clk);
+       dev_pm_opp_set_rate(port->dev, 0);
 
        free_irq(port->irq, port);
 }
@@ -1419,11 +1423,13 @@ static void msm_power(struct uart_port *port, unsigned int state,
 
        switch (state) {
        case 0:
+               dev_pm_opp_set_rate(port->dev, port->uartclk);
                clk_prepare_enable(msm_port->clk);
                clk_prepare_enable(msm_port->pclk);
                break;
        case 3:
                clk_disable_unprepare(msm_port->clk);
+               dev_pm_opp_set_rate(port->dev, 0);
                clk_disable_unprepare(msm_port->pclk);
                break;
        default:
@@ -1789,7 +1795,7 @@ static int msm_serial_probe(struct platform_device *pdev)
        struct resource *resource;
        struct uart_port *port;
        const struct of_device_id *id;
-       int irq, line;
+       int irq, line, ret;
 
        if (pdev->dev.of_node)
                line = of_alias_get_id(pdev->dev.of_node, "serial");
@@ -1824,6 +1830,15 @@ static int msm_serial_probe(struct platform_device *pdev)
                        return PTR_ERR(msm_port->pclk);
        }
 
+       ret = devm_pm_opp_set_clkname(&pdev->dev, "core");
+       if (ret)
+               return ret;
+
+       /* OPP table is optional */
+       ret = devm_pm_opp_of_add_table(&pdev->dev);
+       if (ret && ret != -ENODEV)
+               return dev_err_probe(&pdev->dev, ret, "invalid OPP table\n");
+
        port->uartclk = clk_get_rate(msm_port->clk);
        dev_info(&pdev->dev, "uartclk = %d\n", port->uartclk);
 
@@ -1843,13 +1858,11 @@ static int msm_serial_probe(struct platform_device *pdev)
        return uart_add_one_port(&msm_uart_driver, port);
 }
 
-static int msm_serial_remove(struct platform_device *pdev)
+static void msm_serial_remove(struct platform_device *pdev)
 {
        struct uart_port *port = platform_get_drvdata(pdev);
 
        uart_remove_one_port(&msm_uart_driver, port);
-
-       return 0;
 }
 
 static const struct of_device_id msm_match_table[] = {
@@ -1882,7 +1895,7 @@ static const struct dev_pm_ops msm_serial_dev_pm_ops = {
 };
 
 static struct platform_driver msm_platform_driver = {
-       .remove = msm_serial_remove,
+       .remove_new = msm_serial_remove,
        .probe = msm_serial_probe,
        .driver = {
                .name = "msm_serial",
index 8eeecf8ad3596b9463940ef78bc330600fe58d3b..3ec725555bcc1e6eb2843a186a5c49cba1ab0b42 100644 (file)
@@ -904,21 +904,27 @@ static void mxs_auart_dma_exit(struct mxs_auart_port *s)
 
 static int mxs_auart_dma_init(struct mxs_auart_port *s)
 {
+       struct dma_chan *chan;
+
        if (auart_dma_enabled(s))
                return 0;
 
        /* init for RX */
-       s->rx_dma_chan = dma_request_slave_channel(s->dev, "rx");
-       if (!s->rx_dma_chan)
+       chan = dma_request_chan(s->dev, "rx");
+       if (IS_ERR(chan))
                goto err_out;
+       s->rx_dma_chan = chan;
+
        s->rx_dma_buf = kzalloc(UART_XMIT_SIZE, GFP_KERNEL | GFP_DMA);
        if (!s->rx_dma_buf)
                goto err_out;
 
        /* init for TX */
-       s->tx_dma_chan = dma_request_slave_channel(s->dev, "tx");
-       if (!s->tx_dma_chan)
+       chan = dma_request_chan(s->dev, "tx");
+       if (IS_ERR(chan))
                goto err_out;
+       s->tx_dma_chan = chan;
+
        s->tx_dma_buf = kzalloc(UART_XMIT_SIZE, GFP_KERNEL | GFP_DMA);
        if (!s->tx_dma_buf)
                goto err_out;
@@ -1686,7 +1692,7 @@ out_disable_clks:
        return ret;
 }
 
-static int mxs_auart_remove(struct platform_device *pdev)
+static void mxs_auart_remove(struct platform_device *pdev)
 {
        struct mxs_auart_port *s = platform_get_drvdata(pdev);
 
@@ -1698,13 +1704,11 @@ static int mxs_auart_remove(struct platform_device *pdev)
                clk_disable_unprepare(s->clk);
                clk_disable_unprepare(s->clk_ahb);
        }
-
-       return 0;
 }
 
 static struct platform_driver mxs_auart_driver = {
        .probe = mxs_auart_probe,
-       .remove = mxs_auart_remove,
+       .remove_new = mxs_auart_remove,
        .driver = {
                .name = "mxs-auart",
                .of_match_table = mxs_auart_dt_ids,
index ad4c1c5d0a7f0bf30127813879576d4377b6d743..f5a0b401af63b15c5c8a8d49ec2742a6ce58e263 100644 (file)
@@ -1483,6 +1483,13 @@ static struct omap_uart_port_info *of_get_uart_port_info(struct device *dev)
        return omap_up_info;
 }
 
+static const struct serial_rs485 serial_omap_rs485_supported = {
+       .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
+                SER_RS485_RX_DURING_TX,
+       .delay_rts_before_send = 1,
+       .delay_rts_after_send = 1,
+};
+
 static int serial_omap_probe_rs485(struct uart_omap_port *up,
                                   struct device *dev)
 {
@@ -1497,6 +1504,9 @@ static int serial_omap_probe_rs485(struct uart_omap_port *up,
        if (!np)
                return 0;
 
+       up->port.rs485_config = serial_omap_config_rs485;
+       up->port.rs485_supported = serial_omap_rs485_supported;
+
        ret = uart_get_rs485_mode(&up->port);
        if (ret)
                return ret;
@@ -1531,13 +1541,6 @@ static int serial_omap_probe_rs485(struct uart_omap_port *up,
        return 0;
 }
 
-static const struct serial_rs485 serial_omap_rs485_supported = {
-       .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
-                SER_RS485_RX_DURING_TX,
-       .delay_rts_before_send = 1,
-       .delay_rts_after_send = 1,
-};
-
 static int serial_omap_probe(struct platform_device *pdev)
 {
        struct omap_uart_port_info *omap_up_info = dev_get_platdata(&pdev->dev);
@@ -1604,17 +1607,11 @@ static int serial_omap_probe(struct platform_device *pdev)
                dev_info(up->port.dev, "no wakeirq for uart%d\n",
                         up->port.line);
 
-       ret = serial_omap_probe_rs485(up, &pdev->dev);
-       if (ret < 0)
-               goto err_rs485;
-
        sprintf(up->name, "OMAP UART%d", up->port.line);
        up->port.mapbase = mem->start;
        up->port.membase = base;
        up->port.flags = omap_up_info->flags;
        up->port.uartclk = omap_up_info->uartclk;
-       up->port.rs485_config = serial_omap_config_rs485;
-       up->port.rs485_supported = serial_omap_rs485_supported;
        if (!up->port.uartclk) {
                up->port.uartclk = DEFAULT_CLK_SPEED;
                dev_warn(&pdev->dev,
@@ -1622,6 +1619,10 @@ static int serial_omap_probe(struct platform_device *pdev)
                         DEFAULT_CLK_SPEED);
        }
 
+       ret = serial_omap_probe_rs485(up, &pdev->dev);
+       if (ret < 0)
+               goto err_rs485;
+
        up->latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE;
        up->calc_latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE;
        cpu_latency_qos_add_request(&up->pm_qos_request, up->latency);
@@ -1658,7 +1659,7 @@ err_port_line:
        return ret;
 }
 
-static int serial_omap_remove(struct platform_device *dev)
+static void serial_omap_remove(struct platform_device *dev)
 {
        struct uart_omap_port *up = platform_get_drvdata(dev);
 
@@ -1670,8 +1671,6 @@ static int serial_omap_remove(struct platform_device *dev)
        pm_runtime_disable(up->dev);
        cpu_latency_qos_remove_request(&up->pm_qos_request);
        device_init_wakeup(&dev->dev, false);
-
-       return 0;
 }
 
 /*
@@ -1808,7 +1807,7 @@ MODULE_DEVICE_TABLE(of, omap_serial_of_match);
 
 static struct platform_driver serial_omap_driver = {
        .probe          = serial_omap_probe,
-       .remove         = serial_omap_remove,
+       .remove_new     = serial_omap_remove,
        .driver         = {
                .name   = OMAP_SERIAL_DRIVER_NAME,
                .pm     = &serial_omap_dev_pm_ops,
index 919f5e5aa0f1c5594dfdf407a5ef01ed42d9b3f2..d9fe85397741dd7813571133ad85d928566f0783 100644 (file)
@@ -725,20 +725,18 @@ static int owl_uart_probe(struct platform_device *pdev)
        return ret;
 }
 
-static int owl_uart_remove(struct platform_device *pdev)
+static void owl_uart_remove(struct platform_device *pdev)
 {
        struct owl_uart_port *owl_port = platform_get_drvdata(pdev);
 
        uart_remove_one_port(&owl_uart_driver, &owl_port->port);
        owl_uart_ports[pdev->id] = NULL;
        clk_disable_unprepare(owl_port->clk);
-
-       return 0;
 }
 
 static struct platform_driver owl_uart_platform_driver = {
        .probe = owl_uart_probe,
-       .remove = owl_uart_remove,
+       .remove_new = owl_uart_remove,
        .driver = {
                .name = "owl-uart",
                .of_match_table = owl_uart_dt_matches,
index 3a95bf5d55d367f3362761e6e6162defbd06d425..bbb46e6e98a24fc24b172e8bc9642b497a3e70a5 100644 (file)
@@ -938,7 +938,7 @@ err:
        return ret;
 }
 
-static int pic32_uart_remove(struct platform_device *pdev)
+static void pic32_uart_remove(struct platform_device *pdev)
 {
        struct uart_port *port = platform_get_drvdata(pdev);
        struct pic32_sport *sport = to_pic32_sport(port);
@@ -947,9 +947,6 @@ static int pic32_uart_remove(struct platform_device *pdev)
        clk_disable_unprepare(sport->clk);
        platform_set_drvdata(pdev, NULL);
        pic32_sports[sport->idx] = NULL;
-
-       /* automatic unroll of sport and gpios */
-       return 0;
 }
 
 static const struct of_device_id pic32_serial_dt_ids[] = {
@@ -960,7 +957,7 @@ MODULE_DEVICE_TABLE(of, pic32_serial_dt_ids);
 
 static struct platform_driver pic32_uart_platform_driver = {
        .probe          = pic32_uart_probe,
-       .remove         = pic32_uart_remove,
+       .remove_new     = pic32_uart_remove,
        .driver         = {
                .name   = PIC32_DEV_NAME,
                .of_match_table = of_match_ptr(pic32_serial_dt_ids),
index 7e78f97e8f4359470d9dab35206cce8b4d236d4c..e63a8fbe63bdb22b70fd9362fae5d557c4c59b75 100644 (file)
@@ -1696,7 +1696,7 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int qcom_geni_serial_remove(struct platform_device *pdev)
+static void qcom_geni_serial_remove(struct platform_device *pdev)
 {
        struct qcom_geni_serial_port *port = platform_get_drvdata(pdev);
        struct uart_driver *drv = port->private_data.drv;
@@ -1704,8 +1704,6 @@ static int qcom_geni_serial_remove(struct platform_device *pdev)
        dev_pm_clear_wake_irq(&pdev->dev);
        device_init_wakeup(&pdev->dev, false);
        uart_remove_one_port(drv, &port->uport);
-
-       return 0;
 }
 
 static int qcom_geni_serial_sys_suspend(struct device *dev)
@@ -1805,7 +1803,7 @@ static const struct of_device_id qcom_geni_serial_match_table[] = {
 MODULE_DEVICE_TABLE(of, qcom_geni_serial_match_table);
 
 static struct platform_driver qcom_geni_serial_platform_driver = {
-       .remove = qcom_geni_serial_remove,
+       .remove_new = qcom_geni_serial_remove,
        .probe = qcom_geni_serial_probe,
        .driver = {
                .name = "qcom_geni_serial",
index d824c8318f3307a94735b76a71d04e9216ff816a..13deb355cf1bc36509a3c8676bb3aa8eaa11f437 100644 (file)
@@ -780,19 +780,17 @@ static int rda_uart_probe(struct platform_device *pdev)
        return ret;
 }
 
-static int rda_uart_remove(struct platform_device *pdev)
+static void rda_uart_remove(struct platform_device *pdev)
 {
        struct rda_uart_port *rda_port = platform_get_drvdata(pdev);
 
        uart_remove_one_port(&rda_uart_driver, &rda_port->port);
        rda_uart_ports[pdev->id] = NULL;
-
-       return 0;
 }
 
 static struct platform_driver rda_uart_platform_driver = {
        .probe = rda_uart_probe,
-       .remove = rda_uart_remove,
+       .remove_new = rda_uart_remove,
        .driver = {
                .name = "rda-uart",
                .of_match_table = rda_uart_dt_matches,
index d46a81cddfcd7403220536a5d8d4d57fdf28a4da..4132fcff7d4e293df8a47631031aa6f6c0d46322 100644 (file)
@@ -178,7 +178,6 @@ struct rp2_card;
 struct rp2_uart_port {
        struct uart_port                port;
        int                             idx;
-       int                             ignore_rx;
        struct rp2_card                 *card;
        void __iomem                    *asic_base;
        void __iomem                    *base;
index be7bcd75d9f4973c7f28ca553c6f611fb765e132..79c794fa654512ae6a912c900cbbd87ef9671fe2 100644 (file)
@@ -870,19 +870,17 @@ static int sa1100_serial_probe(struct platform_device *dev)
        return 0;
 }
 
-static int sa1100_serial_remove(struct platform_device *pdev)
+static void sa1100_serial_remove(struct platform_device *pdev)
 {
        struct sa1100_port *sport = platform_get_drvdata(pdev);
 
        if (sport)
                uart_remove_one_port(&sa1100_reg, &sport->port);
-
-       return 0;
 }
 
 static struct platform_driver sa11x0_serial_driver = {
        .probe          = sa1100_serial_probe,
-       .remove         = sa1100_serial_remove,
+       .remove_new     = sa1100_serial_remove,
        .suspend        = sa1100_serial_suspend,
        .resume         = sa1100_serial_resume,
        .driver         = {
index 66bd6c090aceb7e194b5711f6784368337b4712c..71d17d804fdabb85779a28d866100bbb0325b29e 100644 (file)
@@ -2054,7 +2054,7 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int s3c24xx_serial_remove(struct platform_device *dev)
+static void s3c24xx_serial_remove(struct platform_device *dev)
 {
        struct uart_port *port = s3c24xx_dev_to_port(&dev->dev);
 
@@ -2063,8 +2063,6 @@ static int s3c24xx_serial_remove(struct platform_device *dev)
        }
 
        uart_unregister_driver(&s3c24xx_uart_drv);
-
-       return 0;
 }
 
 /* UART power management code */
@@ -2627,7 +2625,7 @@ MODULE_DEVICE_TABLE(of, s3c24xx_uart_dt_match);
 
 static struct platform_driver samsung_serial_driver = {
        .probe          = s3c24xx_serial_probe,
-       .remove         = s3c24xx_serial_remove,
+       .remove_new     = s3c24xx_serial_remove,
        .id_table       = s3c24xx_serial_driver_ids,
        .driver         = {
                .name   = "samsung-uart",
index cf0c6120d30edee40728934c1e8f447ae990b2f2..929206a9a6e1150f9ec9803240840cfe6c11a37a 100644 (file)
 #include <linux/tty_flip.h>
 #include <linux/spi/spi.h>
 #include <linux/uaccess.h>
+#include <linux/units.h>
 #include <uapi/linux/sched/types.h>
 
 #define SC16IS7XX_NAME                 "sc16is7xx"
 #define SC16IS7XX_MAX_DEVS             8
+#define SC16IS7XX_MAX_PORTS            2 /* Maximum number of UART ports per IC. */
 
 /* SC16IS7XX register definitions */
 #define SC16IS7XX_RHR_REG              (0x00) /* RX FIFO */
 
 
 /* Misc definitions */
+#define SC16IS7XX_SPI_READ_BIT         BIT(7)
 #define SC16IS7XX_FIFO_SIZE            (64)
-#define SC16IS7XX_REG_SHIFT            2
 #define SC16IS7XX_GPIOS_PER_BANK       4
 
 struct sc16is7xx_devtype {
@@ -322,18 +324,19 @@ struct sc16is7xx_one_config {
 
 struct sc16is7xx_one {
        struct uart_port                port;
-       u8                              line;
+       struct regmap                   *regmap;
+       struct mutex                    efr_lock; /* EFR registers access */
        struct kthread_work             tx_work;
        struct kthread_work             reg_work;
        struct kthread_delayed_work     ms_work;
        struct sc16is7xx_one_config     config;
-       bool                            irda_mode;
        unsigned int                    old_mctrl;
+       u8                              old_lcr; /* Value before EFR access. */
+       bool                            irda_mode;
 };
 
 struct sc16is7xx_port {
        const struct sc16is7xx_devtype  *devtype;
-       struct regmap                   *regmap;
        struct clk                      *clk;
 #ifdef CONFIG_GPIOLIB
        struct gpio_chip                gpio;
@@ -343,65 +346,47 @@ struct sc16is7xx_port {
        unsigned char                   buf[SC16IS7XX_FIFO_SIZE];
        struct kthread_worker           kworker;
        struct task_struct              *kworker_task;
-       struct mutex                    efr_lock;
        struct sc16is7xx_one            p[];
 };
 
-static unsigned long sc16is7xx_lines;
+static DECLARE_BITMAP(sc16is7xx_lines, SC16IS7XX_MAX_DEVS);
 
 static struct uart_driver sc16is7xx_uart = {
        .owner          = THIS_MODULE,
+       .driver_name    = SC16IS7XX_NAME,
        .dev_name       = "ttySC",
        .nr             = SC16IS7XX_MAX_DEVS,
 };
 
-static void sc16is7xx_ier_set(struct uart_port *port, u8 bit);
-static void sc16is7xx_stop_tx(struct uart_port *port);
-
 #define to_sc16is7xx_one(p,e)  ((container_of((p), struct sc16is7xx_one, e)))
 
-static int sc16is7xx_line(struct uart_port *port)
-{
-       struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
-
-       return one->line;
-}
-
 static u8 sc16is7xx_port_read(struct uart_port *port, u8 reg)
 {
-       struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+       struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
        unsigned int val = 0;
-       const u8 line = sc16is7xx_line(port);
 
-       regmap_read(s->regmap, (reg << SC16IS7XX_REG_SHIFT) | line, &val);
+       regmap_read(one->regmap, reg, &val);
 
        return val;
 }
 
 static void sc16is7xx_port_write(struct uart_port *port, u8 reg, u8 val)
 {
-       struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
-       const u8 line = sc16is7xx_line(port);
+       struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
 
-       regmap_write(s->regmap, (reg << SC16IS7XX_REG_SHIFT) | line, val);
+       regmap_write(one->regmap, reg, val);
 }
 
-static void sc16is7xx_fifo_read(struct uart_port *port, unsigned int rxlen)
+static void sc16is7xx_fifo_read(struct uart_port *port, u8 *rxbuf, unsigned int rxlen)
 {
-       struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
-       const u8 line = sc16is7xx_line(port);
-       u8 addr = (SC16IS7XX_RHR_REG << SC16IS7XX_REG_SHIFT) | line;
+       struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
 
-       regcache_cache_bypass(s->regmap, true);
-       regmap_raw_read(s->regmap, addr, s->buf, rxlen);
-       regcache_cache_bypass(s->regmap, false);
+       regmap_noinc_read(one->regmap, SC16IS7XX_RHR_REG, rxbuf, rxlen);
 }
 
-static void sc16is7xx_fifo_write(struct uart_port *port, u8 to_send)
+static void sc16is7xx_fifo_write(struct uart_port *port, u8 *txbuf, u8 to_send)
 {
-       struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
-       const u8 line = sc16is7xx_line(port);
-       u8 addr = (SC16IS7XX_THR_REG << SC16IS7XX_REG_SHIFT) | line;
+       struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
 
        /*
         * Don't send zero-length data, at least on SPI it confuses the chip
@@ -410,39 +395,101 @@ static void sc16is7xx_fifo_write(struct uart_port *port, u8 to_send)
        if (unlikely(!to_send))
                return;
 
-       regcache_cache_bypass(s->regmap, true);
-       regmap_raw_write(s->regmap, addr, s->buf, to_send);
-       regcache_cache_bypass(s->regmap, false);
+       regmap_noinc_write(one->regmap, SC16IS7XX_THR_REG, txbuf, to_send);
 }
 
 static void sc16is7xx_port_update(struct uart_port *port, u8 reg,
                                  u8 mask, u8 val)
+{
+       struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+
+       regmap_update_bits(one->regmap, reg, mask, val);
+}
+
+static void sc16is7xx_power(struct uart_port *port, int on)
+{
+       sc16is7xx_port_update(port, SC16IS7XX_IER_REG,
+                             SC16IS7XX_IER_SLEEP_BIT,
+                             on ? 0 : SC16IS7XX_IER_SLEEP_BIT);
+}
+
+/*
+ * In an amazing feat of design, the Enhanced Features Register (EFR)
+ * shares the address of the Interrupt Identification Register (IIR).
+ * Access to EFR is switched on by writing a magic value (0xbf) to the
+ * Line Control Register (LCR). Any interrupt firing during this time will
+ * see the EFR where it expects the IIR to be, leading to
+ * "Unexpected interrupt" messages.
+ *
+ * Prevent this possibility by claiming a mutex while accessing the EFR,
+ * and claiming the same mutex from within the interrupt handler. This is
+ * similar to disabling the interrupt, but that doesn't work because the
+ * bulk of the interrupt processing is run as a workqueue job in thread
+ * context.
+ */
+static void sc16is7xx_efr_lock(struct uart_port *port)
+{
+       struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+
+       mutex_lock(&one->efr_lock);
+
+       /* Backup content of LCR. */
+       one->old_lcr = sc16is7xx_port_read(port, SC16IS7XX_LCR_REG);
+
+       /* Enable access to Enhanced register set */
+       sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, SC16IS7XX_LCR_CONF_MODE_B);
+
+       /* Disable cache updates when writing to EFR registers */
+       regcache_cache_bypass(one->regmap, true);
+}
+
+static void sc16is7xx_efr_unlock(struct uart_port *port)
+{
+       struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+
+       /* Re-enable cache updates when writing to normal registers */
+       regcache_cache_bypass(one->regmap, false);
+
+       /* Restore original content of LCR */
+       sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, one->old_lcr);
+
+       mutex_unlock(&one->efr_lock);
+}
+
+static void sc16is7xx_ier_clear(struct uart_port *port, u8 bit)
 {
        struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
-       const u8 line = sc16is7xx_line(port);
+       struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+
+       lockdep_assert_held_once(&port->lock);
 
-       regmap_update_bits(s->regmap, (reg << SC16IS7XX_REG_SHIFT) | line,
-                          mask, val);
+       one->config.flags |= SC16IS7XX_RECONF_IER;
+       one->config.ier_mask |= bit;
+       one->config.ier_val &= ~bit;
+       kthread_queue_work(&s->kworker, &one->reg_work);
 }
 
-static int sc16is7xx_alloc_line(void)
+static void sc16is7xx_ier_set(struct uart_port *port, u8 bit)
 {
-       int i;
+       struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+       struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
 
-       BUILD_BUG_ON(SC16IS7XX_MAX_DEVS > BITS_PER_LONG);
+       lockdep_assert_held_once(&port->lock);
 
-       for (i = 0; i < SC16IS7XX_MAX_DEVS; i++)
-               if (!test_and_set_bit(i, &sc16is7xx_lines))
-                       break;
+       one->config.flags |= SC16IS7XX_RECONF_IER;
+       one->config.ier_mask |= bit;
+       one->config.ier_val |= bit;
+       kthread_queue_work(&s->kworker, &one->reg_work);
+}
 
-       return i;
+static void sc16is7xx_stop_tx(struct uart_port *port)
+{
+       sc16is7xx_ier_clear(port, SC16IS7XX_IER_THRI_BIT);
 }
 
-static void sc16is7xx_power(struct uart_port *port, int on)
+static void sc16is7xx_stop_rx(struct uart_port *port)
 {
-       sc16is7xx_port_update(port, SC16IS7XX_IER_REG,
-                             SC16IS7XX_IER_SLEEP_BIT,
-                             on ? 0 : SC16IS7XX_IER_SLEEP_BIT);
+       sc16is7xx_ier_clear(port, SC16IS7XX_IER_RDI_BIT);
 }
 
 static const struct sc16is7xx_devtype sc16is74x_devtype = {
@@ -477,7 +524,7 @@ static const struct sc16is7xx_devtype sc16is762_devtype = {
 
 static bool sc16is7xx_regmap_volatile(struct device *dev, unsigned int reg)
 {
-       switch (reg >> SC16IS7XX_REG_SHIFT) {
+       switch (reg) {
        case SC16IS7XX_RHR_REG:
        case SC16IS7XX_IIR_REG:
        case SC16IS7XX_LSR_REG:
@@ -488,85 +535,60 @@ static bool sc16is7xx_regmap_volatile(struct device *dev, unsigned int reg)
        case SC16IS7XX_IOCONTROL_REG:
                return true;
        default:
-               break;
+               return false;
        }
-
-       return false;
 }
 
 static bool sc16is7xx_regmap_precious(struct device *dev, unsigned int reg)
 {
-       switch (reg >> SC16IS7XX_REG_SHIFT) {
+       switch (reg) {
        case SC16IS7XX_RHR_REG:
                return true;
        default:
-               break;
+               return false;
        }
+}
 
-       return false;
+static bool sc16is7xx_regmap_noinc(struct device *dev, unsigned int reg)
+{
+       return reg == SC16IS7XX_RHR_REG;
 }
 
 static int sc16is7xx_set_baud(struct uart_port *port, int baud)
 {
-       struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+       struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
        u8 lcr;
        u8 prescaler = 0;
        unsigned long clk = port->uartclk, div = clk / 16 / baud;
 
-       if (div > 0xffff) {
+       if (div >= BIT(16)) {
                prescaler = SC16IS7XX_MCR_CLKSEL_BIT;
                div /= 4;
        }
 
-       /* In an amazing feat of design, the Enhanced Features Register shares
-        * the address of the Interrupt Identification Register, and is
-        * switched in by writing a magic value (0xbf) to the Line Control
-        * Register. Any interrupt firing during this time will see the EFR
-        * where it expects the IIR to be, leading to "Unexpected interrupt"
-        * messages.
-        *
-        * Prevent this possibility by claiming a mutex while accessing the
-        * EFR, and claiming the same mutex from within the interrupt handler.
-        * This is similar to disabling the interrupt, but that doesn't work
-        * because the bulk of the interrupt processing is run as a workqueue
-        * job in thread context.
-        */
-       mutex_lock(&s->efr_lock);
-
-       lcr = sc16is7xx_port_read(port, SC16IS7XX_LCR_REG);
-
-       /* Open the LCR divisors for configuration */
-       sc16is7xx_port_write(port, SC16IS7XX_LCR_REG,
-                            SC16IS7XX_LCR_CONF_MODE_B);
-
        /* Enable enhanced features */
-       regcache_cache_bypass(s->regmap, true);
+       sc16is7xx_efr_lock(port);
        sc16is7xx_port_update(port, SC16IS7XX_EFR_REG,
                              SC16IS7XX_EFR_ENABLE_BIT,
                              SC16IS7XX_EFR_ENABLE_BIT);
-
-       regcache_cache_bypass(s->regmap, false);
-
-       /* Put LCR back to the normal mode */
-       sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr);
-
-       mutex_unlock(&s->efr_lock);
+       sc16is7xx_efr_unlock(port);
 
        sc16is7xx_port_update(port, SC16IS7XX_MCR_REG,
                              SC16IS7XX_MCR_CLKSEL_BIT,
                              prescaler);
 
-       /* Open the LCR divisors for configuration */
+       /* Backup LCR and access special register set (DLL/DLH) */
+       lcr = sc16is7xx_port_read(port, SC16IS7XX_LCR_REG);
        sc16is7xx_port_write(port, SC16IS7XX_LCR_REG,
                             SC16IS7XX_LCR_CONF_MODE_A);
 
        /* Write the new divisor */
-       regcache_cache_bypass(s->regmap, true);
+       regcache_cache_bypass(one->regmap, true);
        sc16is7xx_port_write(port, SC16IS7XX_DLH_REG, div / 256);
        sc16is7xx_port_write(port, SC16IS7XX_DLL_REG, div % 256);
-       regcache_cache_bypass(s->regmap, false);
+       regcache_cache_bypass(one->regmap, false);
 
-       /* Put LCR back to the normal mode */
+       /* Restore LCR and access to general register set */
        sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr);
 
        return DIV_ROUND_CLOSEST(clk / 16, div);
@@ -602,7 +624,7 @@ static void sc16is7xx_handle_rx(struct uart_port *port, unsigned int rxlen,
                        s->buf[0] = sc16is7xx_port_read(port, SC16IS7XX_RHR_REG);
                        bytes_read = 1;
                } else {
-                       sc16is7xx_fifo_read(port, rxlen);
+                       sc16is7xx_fifo_read(port, s->buf, rxlen);
                        bytes_read = rxlen;
                }
 
@@ -675,7 +697,7 @@ static void sc16is7xx_handle_tx(struct uart_port *port)
        /* Get length of data pending in circular buffer */
        to_send = uart_circ_chars_pending(xmit);
        if (likely(to_send)) {
-               /* Limit to size of TX FIFO */
+               /* Limit to space available in TX FIFO */
                txlen = sc16is7xx_port_read(port, SC16IS7XX_TXLVL_REG);
                if (txlen > SC16IS7XX_FIFO_SIZE) {
                        dev_err_ratelimited(port->dev,
@@ -691,7 +713,7 @@ static void sc16is7xx_handle_tx(struct uart_port *port)
                        uart_xmit_advance(port, 1);
                }
 
-               sc16is7xx_fifo_write(port, to_send);
+               sc16is7xx_fifo_write(port, s->buf, to_send);
        }
 
        uart_port_lock_irqsave(port, &flags);
@@ -700,6 +722,8 @@ static void sc16is7xx_handle_tx(struct uart_port *port)
 
        if (uart_circ_empty(xmit))
                sc16is7xx_stop_tx(port);
+       else
+               sc16is7xx_ier_set(port, SC16IS7XX_IER_THRI_BIT);
        uart_port_unlock_irqrestore(port, flags);
 }
 
@@ -718,11 +742,10 @@ static unsigned int sc16is7xx_get_hwmctrl(struct uart_port *port)
 static void sc16is7xx_update_mlines(struct sc16is7xx_one *one)
 {
        struct uart_port *port = &one->port;
-       struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
        unsigned long flags;
        unsigned int status, changed;
 
-       lockdep_assert_held_once(&s->efr_lock);
+       lockdep_assert_held_once(&one->efr_lock);
 
        status = sc16is7xx_get_hwmctrl(port);
        changed = status ^ one->old_mctrl;
@@ -748,74 +771,77 @@ static void sc16is7xx_update_mlines(struct sc16is7xx_one *one)
 
 static bool sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno)
 {
+       bool rc = true;
+       unsigned int iir, rxlen;
        struct uart_port *port = &s->p[portno].port;
+       struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
 
-       do {
-               unsigned int iir, rxlen;
-               struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
-
-               iir = sc16is7xx_port_read(port, SC16IS7XX_IIR_REG);
-               if (iir & SC16IS7XX_IIR_NO_INT_BIT)
-                       return false;
-
-               iir &= SC16IS7XX_IIR_ID_MASK;
-
-               switch (iir) {
-               case SC16IS7XX_IIR_RDI_SRC:
-               case SC16IS7XX_IIR_RLSE_SRC:
-               case SC16IS7XX_IIR_RTOI_SRC:
-               case SC16IS7XX_IIR_XOFFI_SRC:
-                       rxlen = sc16is7xx_port_read(port, SC16IS7XX_RXLVL_REG);
-
-                       /*
-                        * There is a silicon bug that makes the chip report a
-                        * time-out interrupt but no data in the FIFO. This is
-                        * described in errata section 18.1.4.
-                        *
-                        * When this happens, read one byte from the FIFO to
-                        * clear the interrupt.
-                        */
-                       if (iir == SC16IS7XX_IIR_RTOI_SRC && !rxlen)
-                               rxlen = 1;
-
-                       if (rxlen)
-                               sc16is7xx_handle_rx(port, rxlen, iir);
-                       break;
+       mutex_lock(&one->efr_lock);
+
+       iir = sc16is7xx_port_read(port, SC16IS7XX_IIR_REG);
+       if (iir & SC16IS7XX_IIR_NO_INT_BIT) {
+               rc = false;
+               goto out_port_irq;
+       }
+
+       iir &= SC16IS7XX_IIR_ID_MASK;
+
+       switch (iir) {
+       case SC16IS7XX_IIR_RDI_SRC:
+       case SC16IS7XX_IIR_RLSE_SRC:
+       case SC16IS7XX_IIR_RTOI_SRC:
+       case SC16IS7XX_IIR_XOFFI_SRC:
+               rxlen = sc16is7xx_port_read(port, SC16IS7XX_RXLVL_REG);
+
+               /*
+                * There is a silicon bug that makes the chip report a
+                * time-out interrupt but no data in the FIFO. This is
+                * described in errata section 18.1.4.
+                *
+                * When this happens, read one byte from the FIFO to
+                * clear the interrupt.
+                */
+               if (iir == SC16IS7XX_IIR_RTOI_SRC && !rxlen)
+                       rxlen = 1;
+
+               if (rxlen)
+                       sc16is7xx_handle_rx(port, rxlen, iir);
+               break;
                /* CTSRTS interrupt comes only when CTS goes inactive */
-               case SC16IS7XX_IIR_CTSRTS_SRC:
-               case SC16IS7XX_IIR_MSI_SRC:
-                       sc16is7xx_update_mlines(one);
-                       break;
-               case SC16IS7XX_IIR_THRI_SRC:
-                       sc16is7xx_handle_tx(port);
-                       break;
-               default:
-                       dev_err_ratelimited(port->dev,
-                                           "ttySC%i: Unexpected interrupt: %x",
-                                           port->line, iir);
-                       break;
-               }
-       } while (0);
-       return true;
+       case SC16IS7XX_IIR_CTSRTS_SRC:
+       case SC16IS7XX_IIR_MSI_SRC:
+               sc16is7xx_update_mlines(one);
+               break;
+       case SC16IS7XX_IIR_THRI_SRC:
+               sc16is7xx_handle_tx(port);
+               break;
+       default:
+               dev_err_ratelimited(port->dev,
+                                   "ttySC%i: Unexpected interrupt: %x",
+                                   port->line, iir);
+               break;
+       }
+
+out_port_irq:
+       mutex_unlock(&one->efr_lock);
+
+       return rc;
 }
 
 static irqreturn_t sc16is7xx_irq(int irq, void *dev_id)
 {
-       struct sc16is7xx_port *s = (struct sc16is7xx_port *)dev_id;
+       bool keep_polling;
 
-       mutex_lock(&s->efr_lock);
+       struct sc16is7xx_port *s = (struct sc16is7xx_port *)dev_id;
 
-       while (1) {
-               bool keep_polling = false;
+       do {
                int i;
 
+               keep_polling = false;
+
                for (i = 0; i < s->devtype->nr_uart; ++i)
                        keep_polling |= sc16is7xx_port_irq(s, i);
-               if (!keep_polling)
-                       break;
-       }
-
-       mutex_unlock(&s->efr_lock);
+       } while (keep_polling);
 
        return IRQ_HANDLED;
 }
@@ -823,20 +849,15 @@ static irqreturn_t sc16is7xx_irq(int irq, void *dev_id)
 static void sc16is7xx_tx_proc(struct kthread_work *ws)
 {
        struct uart_port *port = &(to_sc16is7xx_one(ws, tx_work)->port);
-       struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
-       unsigned long flags;
+       struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
 
        if ((port->rs485.flags & SER_RS485_ENABLED) &&
            (port->rs485.delay_rts_before_send > 0))
                msleep(port->rs485.delay_rts_before_send);
 
-       mutex_lock(&s->efr_lock);
+       mutex_lock(&one->efr_lock);
        sc16is7xx_handle_tx(port);
-       mutex_unlock(&s->efr_lock);
-
-       uart_port_lock_irqsave(port, &flags);
-       sc16is7xx_ier_set(port, SC16IS7XX_IER_THRI_BIT);
-       uart_port_unlock_irqrestore(port, flags);
+       mutex_unlock(&one->efr_lock);
 }
 
 static void sc16is7xx_reconf_rs485(struct uart_port *port)
@@ -897,51 +918,15 @@ static void sc16is7xx_reg_proc(struct kthread_work *ws)
                sc16is7xx_reconf_rs485(&one->port);
 }
 
-static void sc16is7xx_ier_clear(struct uart_port *port, u8 bit)
-{
-       struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
-       struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
-
-       lockdep_assert_held_once(&port->lock);
-
-       one->config.flags |= SC16IS7XX_RECONF_IER;
-       one->config.ier_mask |= bit;
-       one->config.ier_val &= ~bit;
-       kthread_queue_work(&s->kworker, &one->reg_work);
-}
-
-static void sc16is7xx_ier_set(struct uart_port *port, u8 bit)
-{
-       struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
-       struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
-
-       lockdep_assert_held_once(&port->lock);
-
-       one->config.flags |= SC16IS7XX_RECONF_IER;
-       one->config.ier_mask |= bit;
-       one->config.ier_val |= bit;
-       kthread_queue_work(&s->kworker, &one->reg_work);
-}
-
-static void sc16is7xx_stop_tx(struct uart_port *port)
-{
-       sc16is7xx_ier_clear(port, SC16IS7XX_IER_THRI_BIT);
-}
-
-static void sc16is7xx_stop_rx(struct uart_port *port)
-{
-       sc16is7xx_ier_clear(port, SC16IS7XX_IER_RDI_BIT);
-}
-
 static void sc16is7xx_ms_proc(struct kthread_work *ws)
 {
        struct sc16is7xx_one *one = to_sc16is7xx_one(ws, ms_work.work);
        struct sc16is7xx_port *s = dev_get_drvdata(one->port.dev);
 
        if (one->port.state) {
-               mutex_lock(&s->efr_lock);
+               mutex_lock(&one->efr_lock);
                sc16is7xx_update_mlines(one);
-               mutex_unlock(&s->efr_lock);
+               mutex_unlock(&one->efr_lock);
 
                kthread_queue_delayed_work(&s->kworker, &one->ms_work, HZ);
        }
@@ -1025,7 +1010,6 @@ static void sc16is7xx_set_termios(struct uart_port *port,
                                  struct ktermios *termios,
                                  const struct ktermios *old)
 {
-       struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
        struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
        unsigned int lcr, flow = 0;
        int baud;
@@ -1083,17 +1067,7 @@ static void sc16is7xx_set_termios(struct uart_port *port,
        if (!(termios->c_cflag & CREAD))
                port->ignore_status_mask |= SC16IS7XX_LSR_BRK_ERROR_MASK;
 
-       /* As above, claim the mutex while accessing the EFR. */
-       mutex_lock(&s->efr_lock);
-
-       sc16is7xx_port_write(port, SC16IS7XX_LCR_REG,
-                            SC16IS7XX_LCR_CONF_MODE_B);
-
        /* Configure flow control */
-       regcache_cache_bypass(s->regmap, true);
-       sc16is7xx_port_write(port, SC16IS7XX_XON1_REG, termios->c_cc[VSTART]);
-       sc16is7xx_port_write(port, SC16IS7XX_XOFF1_REG, termios->c_cc[VSTOP]);
-
        port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
        if (termios->c_cflag & CRTSCTS) {
                flow |= SC16IS7XX_EFR_AUTOCTS_BIT |
@@ -1105,16 +1079,16 @@ static void sc16is7xx_set_termios(struct uart_port *port,
        if (termios->c_iflag & IXOFF)
                flow |= SC16IS7XX_EFR_SWFLOW1_BIT;
 
-       sc16is7xx_port_update(port,
-                             SC16IS7XX_EFR_REG,
-                             SC16IS7XX_EFR_FLOWCTRL_BITS,
-                             flow);
-       regcache_cache_bypass(s->regmap, false);
-
        /* Update LCR register */
        sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr);
 
-       mutex_unlock(&s->efr_lock);
+       /* Update EFR registers */
+       sc16is7xx_efr_lock(port);
+       sc16is7xx_port_write(port, SC16IS7XX_XON1_REG, termios->c_cc[VSTART]);
+       sc16is7xx_port_write(port, SC16IS7XX_XOFF1_REG, termios->c_cc[VSTOP]);
+       sc16is7xx_port_update(port, SC16IS7XX_EFR_REG,
+                             SC16IS7XX_EFR_FLOWCTRL_BITS, flow);
+       sc16is7xx_efr_unlock(port);
 
        /* Get baud rate generator configuration */
        baud = uart_get_baud_rate(port, termios, old,
@@ -1160,7 +1134,6 @@ static int sc16is7xx_config_rs485(struct uart_port *port, struct ktermios *termi
 static int sc16is7xx_startup(struct uart_port *port)
 {
        struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
-       struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
        unsigned int val;
        unsigned long flags;
 
@@ -1177,7 +1150,7 @@ static int sc16is7xx_startup(struct uart_port *port)
        sc16is7xx_port_write(port, SC16IS7XX_LCR_REG,
                             SC16IS7XX_LCR_CONF_MODE_B);
 
-       regcache_cache_bypass(s->regmap, true);
+       regcache_cache_bypass(one->regmap, true);
 
        /* Enable write access to enhanced features and internal clock div */
        sc16is7xx_port_update(port, SC16IS7XX_EFR_REG,
@@ -1195,7 +1168,7 @@ static int sc16is7xx_startup(struct uart_port *port)
                             SC16IS7XX_TCR_RX_RESUME(24) |
                             SC16IS7XX_TCR_RX_HALT(48));
 
-       regcache_cache_bypass(s->regmap, false);
+       regcache_cache_bypass(one->regmap, false);
 
        /* Now, initialize the UART */
        sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, SC16IS7XX_LCR_WORD_LEN_8);
@@ -1425,7 +1398,7 @@ static void sc16is7xx_setup_irda_ports(struct sc16is7xx_port *s)
        int i;
        int ret;
        int count;
-       u32 irda_port[2];
+       u32 irda_port[SC16IS7XX_MAX_PORTS];
        struct device *dev = s->p[0].port.dev;
 
        count = device_property_count_u32(dev, "irda-mode-ports");
@@ -1446,12 +1419,13 @@ static void sc16is7xx_setup_irda_ports(struct sc16is7xx_port *s)
 /*
  * Configure ports designated to operate as modem control lines.
  */
-static int sc16is7xx_setup_mctrl_ports(struct sc16is7xx_port *s)
+static int sc16is7xx_setup_mctrl_ports(struct sc16is7xx_port *s,
+                                      struct regmap *regmap)
 {
        int i;
        int ret;
        int count;
-       u32 mctrl_port[2];
+       u32 mctrl_port[SC16IS7XX_MAX_PORTS];
        struct device *dev = s->p[0].port.dev;
 
        count = device_property_count_u32(dev, "nxp,modem-control-line-ports");
@@ -1475,8 +1449,8 @@ static int sc16is7xx_setup_mctrl_ports(struct sc16is7xx_port *s)
 
        if (s->mctrl_mask)
                regmap_update_bits(
-                       s->regmap,
-                       SC16IS7XX_IOCONTROL_REG << SC16IS7XX_REG_SHIFT,
+                       regmap,
+                       SC16IS7XX_IOCONTROL_REG,
                        SC16IS7XX_IOCONTROL_MODEM_A_BIT |
                        SC16IS7XX_IOCONTROL_MODEM_B_BIT, s->mctrl_mask);
 
@@ -1491,7 +1465,7 @@ static const struct serial_rs485 sc16is7xx_rs485_supported = {
 
 static int sc16is7xx_probe(struct device *dev,
                           const struct sc16is7xx_devtype *devtype,
-                          struct regmap *regmap, int irq)
+                          struct regmap *regmaps[], int irq)
 {
        unsigned long freq = 0, *pfreq = dev_get_platdata(dev);
        unsigned int val;
@@ -1499,16 +1473,20 @@ static int sc16is7xx_probe(struct device *dev,
        int i, ret;
        struct sc16is7xx_port *s;
 
-       if (IS_ERR(regmap))
-               return PTR_ERR(regmap);
+       for (i = 0; i < devtype->nr_uart; i++)
+               if (IS_ERR(regmaps[i]))
+                       return PTR_ERR(regmaps[i]);
 
        /*
         * This device does not have an identification register that would
         * tell us if we are really connected to the correct device.
         * The best we can do is to check if communication is at all possible.
+        *
+        * Note: regmap[0] is used in the probe function to access registers
+        * common to all channels/ports, as it is guaranteed to be present on
+        * all variants.
         */
-       ret = regmap_read(regmap,
-                         SC16IS7XX_LSR_REG << SC16IS7XX_REG_SHIFT, &val);
+       ret = regmap_read(regmaps[0], SC16IS7XX_LSR_REG, &val);
        if (ret < 0)
                return -EPROBE_DEFER;
 
@@ -1542,10 +1520,8 @@ static int sc16is7xx_probe(struct device *dev,
                        return -EINVAL;
        }
 
-       s->regmap = regmap;
        s->devtype = devtype;
        dev_set_drvdata(dev, s);
-       mutex_init(&s->efr_lock);
 
        kthread_init_worker(&s->kworker);
        s->kworker_task = kthread_run(kthread_worker_fn, &s->kworker,
@@ -1557,11 +1533,17 @@ static int sc16is7xx_probe(struct device *dev,
        sched_set_fifo(s->kworker_task);
 
        /* reset device, purging any pending irq / data */
-       regmap_write(s->regmap, SC16IS7XX_IOCONTROL_REG << SC16IS7XX_REG_SHIFT,
-                       SC16IS7XX_IOCONTROL_SRESET_BIT);
+       regmap_write(regmaps[0], SC16IS7XX_IOCONTROL_REG,
+                    SC16IS7XX_IOCONTROL_SRESET_BIT);
 
        for (i = 0; i < devtype->nr_uart; ++i) {
-               s->p[i].line            = i;
+               s->p[i].port.line = find_first_zero_bit(sc16is7xx_lines,
+                                                       SC16IS7XX_MAX_DEVS);
+               if (s->p[i].port.line >= SC16IS7XX_MAX_DEVS) {
+                       ret = -ERANGE;
+                       goto out_ports;
+               }
+
                /* Initialize port data */
                s->p[i].port.dev        = dev;
                s->p[i].port.irq        = irq;
@@ -1581,12 +1563,9 @@ static int sc16is7xx_probe(struct device *dev,
                s->p[i].port.rs485_supported = sc16is7xx_rs485_supported;
                s->p[i].port.ops        = &sc16is7xx_ops;
                s->p[i].old_mctrl       = 0;
-               s->p[i].port.line       = sc16is7xx_alloc_line();
+               s->p[i].regmap          = regmaps[i];
 
-               if (s->p[i].port.line >= SC16IS7XX_MAX_DEVS) {
-                       ret = -ENOMEM;
-                       goto out_ports;
-               }
+               mutex_init(&s->p[i].efr_lock);
 
                ret = uart_get_rs485_mode(&s->p[i].port);
                if (ret)
@@ -1603,20 +1582,25 @@ static int sc16is7xx_probe(struct device *dev,
                kthread_init_work(&s->p[i].tx_work, sc16is7xx_tx_proc);
                kthread_init_work(&s->p[i].reg_work, sc16is7xx_reg_proc);
                kthread_init_delayed_work(&s->p[i].ms_work, sc16is7xx_ms_proc);
+
                /* Register port */
-               uart_add_one_port(&sc16is7xx_uart, &s->p[i].port);
+               ret = uart_add_one_port(&sc16is7xx_uart, &s->p[i].port);
+               if (ret)
+                       goto out_ports;
+
+               set_bit(s->p[i].port.line, sc16is7xx_lines);
 
                /* Enable EFR */
                sc16is7xx_port_write(&s->p[i].port, SC16IS7XX_LCR_REG,
                                     SC16IS7XX_LCR_CONF_MODE_B);
 
-               regcache_cache_bypass(s->regmap, true);
+               regcache_cache_bypass(regmaps[i], true);
 
                /* Enable write access to enhanced features */
                sc16is7xx_port_write(&s->p[i].port, SC16IS7XX_EFR_REG,
                                     SC16IS7XX_EFR_ENABLE_BIT);
 
-               regcache_cache_bypass(s->regmap, false);
+               regcache_cache_bypass(regmaps[i], false);
 
                /* Restore access to general registers */
                sc16is7xx_port_write(&s->p[i].port, SC16IS7XX_LCR_REG, 0x00);
@@ -1627,7 +1611,7 @@ static int sc16is7xx_probe(struct device *dev,
 
        sc16is7xx_setup_irda_ports(s);
 
-       ret = sc16is7xx_setup_mctrl_ports(s);
+       ret = sc16is7xx_setup_mctrl_ports(s, regmaps[0]);
        if (ret)
                goto out_ports;
 
@@ -1662,10 +1646,9 @@ static int sc16is7xx_probe(struct device *dev,
 #endif
 
 out_ports:
-       for (i--; i >= 0; i--) {
-               uart_remove_one_port(&sc16is7xx_uart, &s->p[i].port);
-               clear_bit(s->p[i].port.line, &sc16is7xx_lines);
-       }
+       for (i = 0; i < devtype->nr_uart; i++)
+               if (test_and_clear_bit(s->p[i].port.line, sc16is7xx_lines))
+                       uart_remove_one_port(&sc16is7xx_uart, &s->p[i].port);
 
        kthread_stop(s->kworker_task);
 
@@ -1687,8 +1670,8 @@ static void sc16is7xx_remove(struct device *dev)
 
        for (i = 0; i < s->devtype->nr_uart; i++) {
                kthread_cancel_delayed_work_sync(&s->p[i].ms_work);
-               uart_remove_one_port(&sc16is7xx_uart, &s->p[i].port);
-               clear_bit(s->p[i].port.line, &sc16is7xx_lines);
+               if (test_and_clear_bit(s->p[i].port.line, sc16is7xx_lines))
+                       uart_remove_one_port(&sc16is7xx_uart, &s->p[i].port);
                sc16is7xx_power(&s->p[i].port, 0);
        }
 
@@ -1710,45 +1693,74 @@ static const struct of_device_id __maybe_unused sc16is7xx_dt_ids[] = {
 MODULE_DEVICE_TABLE(of, sc16is7xx_dt_ids);
 
 static struct regmap_config regcfg = {
-       .reg_bits = 7,
-       .pad_bits = 1,
+       .reg_bits = 5,
+       .pad_bits = 3,
        .val_bits = 8,
        .cache_type = REGCACHE_RBTREE,
        .volatile_reg = sc16is7xx_regmap_volatile,
        .precious_reg = sc16is7xx_regmap_precious,
+       .writeable_noinc_reg = sc16is7xx_regmap_noinc,
+       .readable_noinc_reg = sc16is7xx_regmap_noinc,
+       .max_raw_read = SC16IS7XX_FIFO_SIZE,
+       .max_raw_write = SC16IS7XX_FIFO_SIZE,
+       .max_register = SC16IS7XX_EFCR_REG,
 };
 
+static const char *sc16is7xx_regmap_name(u8 port_id)
+{
+       switch (port_id) {
+       case 0: return "port0";
+       case 1: return "port1";
+       default:
+               WARN_ON(true);
+               return NULL;
+       }
+}
+
+static unsigned int sc16is7xx_regmap_port_mask(unsigned int port_id)
+{
+       /* CH1,CH0 are at bits 2:1. */
+       return port_id << 1;
+}
+
 #ifdef CONFIG_SERIAL_SC16IS7XX_SPI
 static int sc16is7xx_spi_probe(struct spi_device *spi)
 {
        const struct sc16is7xx_devtype *devtype;
-       struct regmap *regmap;
+       struct regmap *regmaps[SC16IS7XX_MAX_PORTS];
+       unsigned int i;
        int ret;
 
        /* Setup SPI bus */
        spi->bits_per_word      = 8;
-       /* only supports mode 0 on SC16IS762 */
+       /* For all variants, only mode 0 is supported */
+       if ((spi->mode & SPI_MODE_X_MASK) != SPI_MODE_0)
+               return dev_err_probe(&spi->dev, -EINVAL, "Unsupported SPI mode\n");
+
        spi->mode               = spi->mode ? : SPI_MODE_0;
-       spi->max_speed_hz       = spi->max_speed_hz ? : 15000000;
+       spi->max_speed_hz       = spi->max_speed_hz ? : 4 * HZ_PER_MHZ;
        ret = spi_setup(spi);
        if (ret)
                return ret;
 
-       if (spi->dev.of_node) {
-               devtype = device_get_match_data(&spi->dev);
-               if (!devtype)
-                       return -ENODEV;
-       } else {
-               const struct spi_device_id *id_entry = spi_get_device_id(spi);
+       devtype = spi_get_device_match_data(spi);
+       if (!devtype)
+               return dev_err_probe(&spi->dev, -ENODEV, "Failed to match device\n");
 
-               devtype = (struct sc16is7xx_devtype *)id_entry->driver_data;
+       for (i = 0; i < devtype->nr_uart; i++) {
+               regcfg.name = sc16is7xx_regmap_name(i);
+               /*
+                * If read_flag_mask is 0, the regmap code sets it to a default
+                * of 0x80. Since we specify our own mask, we must add the READ
+                * bit ourselves:
+                */
+               regcfg.read_flag_mask = sc16is7xx_regmap_port_mask(i) |
+                       SC16IS7XX_SPI_READ_BIT;
+               regcfg.write_flag_mask = sc16is7xx_regmap_port_mask(i);
+               regmaps[i] = devm_regmap_init_spi(spi, &regcfg);
        }
 
-       regcfg.max_register = (0xf << SC16IS7XX_REG_SHIFT) |
-                             (devtype->nr_uart - 1);
-       regmap = devm_regmap_init_spi(spi, &regcfg);
-
-       return sc16is7xx_probe(&spi->dev, devtype, regmap, spi->irq);
+       return sc16is7xx_probe(&spi->dev, devtype, regmaps, spi->irq);
 }
 
 static void sc16is7xx_spi_remove(struct spi_device *spi)
@@ -1778,30 +1790,27 @@ static struct spi_driver sc16is7xx_spi_uart_driver = {
        .remove         = sc16is7xx_spi_remove,
        .id_table       = sc16is7xx_spi_id_table,
 };
-
-MODULE_ALIAS("spi:sc16is7xx");
 #endif
 
 #ifdef CONFIG_SERIAL_SC16IS7XX_I2C
 static int sc16is7xx_i2c_probe(struct i2c_client *i2c)
 {
-       const struct i2c_device_id *id = i2c_client_get_device_id(i2c);
        const struct sc16is7xx_devtype *devtype;
-       struct regmap *regmap;
-
-       if (i2c->dev.of_node) {
-               devtype = device_get_match_data(&i2c->dev);
-               if (!devtype)
-                       return -ENODEV;
-       } else {
-               devtype = (struct sc16is7xx_devtype *)id->driver_data;
+       struct regmap *regmaps[SC16IS7XX_MAX_PORTS];
+       unsigned int i;
+
+       devtype = i2c_get_match_data(i2c);
+       if (!devtype)
+               return dev_err_probe(&i2c->dev, -ENODEV, "Failed to match device\n");
+
+       for (i = 0; i < devtype->nr_uart; i++) {
+               regcfg.name = sc16is7xx_regmap_name(i);
+               regcfg.read_flag_mask = sc16is7xx_regmap_port_mask(i);
+               regcfg.write_flag_mask = sc16is7xx_regmap_port_mask(i);
+               regmaps[i] = devm_regmap_init_i2c(i2c, &regcfg);
        }
 
-       regcfg.max_register = (0xf << SC16IS7XX_REG_SHIFT) |
-                             (devtype->nr_uart - 1);
-       regmap = devm_regmap_init_i2c(i2c, &regcfg);
-
-       return sc16is7xx_probe(&i2c->dev, devtype, regmap, i2c->irq);
+       return sc16is7xx_probe(&i2c->dev, devtype, regmaps, i2c->irq);
 }
 
 static void sc16is7xx_i2c_remove(struct i2c_client *client)
index 2be2c10980253fefc9215a4c42e8523b362537a9..f24217a560d7a21a8d9c1048717129494a90bf38 100644 (file)
@@ -1021,7 +1021,7 @@ err_out:
        return ret;
 }
 
-static int sccnxp_remove(struct platform_device *pdev)
+static void sccnxp_remove(struct platform_device *pdev)
 {
        int i;
        struct sccnxp_port *s = platform_get_drvdata(pdev);
@@ -1036,10 +1036,11 @@ static int sccnxp_remove(struct platform_device *pdev)
 
        uart_unregister_driver(&s->uart);
 
-       if (!IS_ERR(s->regulator))
-               return regulator_disable(s->regulator);
-
-       return 0;
+       if (!IS_ERR(s->regulator)) {
+               int ret = regulator_disable(s->regulator);
+               if (ret)
+                       dev_err(&pdev->dev, "Failed to disable regulator\n");
+       }
 }
 
 static struct platform_driver sccnxp_uart_driver = {
@@ -1047,7 +1048,7 @@ static struct platform_driver sccnxp_uart_driver = {
                .name   = SCCNXP_NAME,
        },
        .probe          = sccnxp_probe,
-       .remove         = sccnxp_remove,
+       .remove_new     = sccnxp_remove,
        .id_table       = sccnxp_id_table,
 };
 module_platform_driver(sccnxp_uart_driver);
index 6d4006b4197544c0dc17d334c0d671fb6cb79edc..525f3a2f7bd47f9013115e379f227d990cc5d768 100644 (file)
@@ -1611,13 +1611,12 @@ static int tegra_uart_probe(struct platform_device *pdev)
        return ret;
 }
 
-static int tegra_uart_remove(struct platform_device *pdev)
+static void tegra_uart_remove(struct platform_device *pdev)
 {
        struct tegra_uart_port *tup = platform_get_drvdata(pdev);
        struct uart_port *u = &tup->uport;
 
        uart_remove_one_port(&tegra_uart_driver, u);
-       return 0;
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -1644,7 +1643,7 @@ static const struct dev_pm_ops tegra_uart_pm_ops = {
 
 static struct platform_driver tegra_uart_platform_driver = {
        .probe          = tegra_uart_probe,
-       .remove         = tegra_uart_remove,
+       .remove_new     = tegra_uart_remove,
        .driver         = {
                .name   = "serial-tegra",
                .of_match_table = tegra_uart_of_match,
index f1348a5095527e876b17a7463d3d8cee0321291f..d6a58a9e072a1dad7938fbb53627f4d5e5374adc 100644 (file)
@@ -156,7 +156,7 @@ static void __uart_start(struct uart_state *state)
         * enabled, serial_port_runtime_resume() calls start_tx() again
         * after enabling the device.
         */
-       if (!pm_runtime_enabled(port->dev) || pm_runtime_active(port->dev))
+       if (pm_runtime_active(&port_dev->dev))
                port->ops->start_tx(port);
        pm_runtime_mark_last_busy(&port_dev->dev);
        pm_runtime_put_autosuspend(&port_dev->dev);
@@ -410,11 +410,10 @@ void
 uart_update_timeout(struct uart_port *port, unsigned int cflag,
                    unsigned int baud)
 {
-       unsigned int size = tty_get_frame_size(cflag);
-       u64 frame_time;
+       u64 temp = tty_get_frame_size(cflag);
 
-       frame_time = (u64)size * NSEC_PER_SEC;
-       port->frame_time = DIV64_U64_ROUND_UP(frame_time, baud);
+       temp *= NSEC_PER_SEC;
+       port->frame_time = (unsigned int)DIV64_U64_ROUND_UP(temp, baud);
 }
 EXPORT_SYMBOL(uart_update_timeout);
 
@@ -687,7 +686,7 @@ EXPORT_SYMBOL_GPL(uart_xchar_out);
  * This function is used to send a high-priority XON/XOFF character to
  * the device
  */
-static void uart_send_xchar(struct tty_struct *tty, char ch)
+static void uart_send_xchar(struct tty_struct *tty, u8 ch)
 {
        struct uart_state *state = tty->driver_data;
        struct uart_port *port;
@@ -1085,8 +1084,8 @@ static int uart_tiocmget(struct tty_struct *tty)
                goto out;
 
        if (!tty_io_error(tty)) {
-               result = uport->mctrl;
                uart_port_lock_irq(uport);
+               result = uport->mctrl;
                result |= uport->ops->get_mctrl(uport);
                uart_port_unlock_irq(uport);
        }
@@ -1371,20 +1370,34 @@ static void uart_sanitize_serial_rs485(struct uart_port *port, struct serial_rs4
                return;
        }
 
-       /* Pick sane settings if the user hasn't */
-       if ((supported_flags & (SER_RS485_RTS_ON_SEND|SER_RS485_RTS_AFTER_SEND)) &&
-           !(rs485->flags & SER_RS485_RTS_ON_SEND) ==
-           !(rs485->flags & SER_RS485_RTS_AFTER_SEND)) {
-               dev_warn_ratelimited(port->dev,
-                       "%s (%d): invalid RTS setting, using RTS_ON_SEND instead\n",
-                       port->name, port->line);
-               rs485->flags |= SER_RS485_RTS_ON_SEND;
-               rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
-               supported_flags |= SER_RS485_RTS_ON_SEND|SER_RS485_RTS_AFTER_SEND;
+       /* Clear other RS485 flags but SER_RS485_TERMINATE_BUS and return if enabling RS422 */
+       if (rs485->flags & SER_RS485_MODE_RS422) {
+               rs485->flags &= (SER_RS485_ENABLED | SER_RS485_MODE_RS422 | SER_RS485_TERMINATE_BUS);
+               return;
        }
 
        rs485->flags &= supported_flags;
 
+       /* Pick sane settings if the user hasn't */
+       if (!(rs485->flags & SER_RS485_RTS_ON_SEND) ==
+           !(rs485->flags & SER_RS485_RTS_AFTER_SEND)) {
+               if (supported_flags & SER_RS485_RTS_ON_SEND) {
+                       rs485->flags |= SER_RS485_RTS_ON_SEND;
+                       rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
+
+                       dev_warn_ratelimited(port->dev,
+                               "%s (%d): invalid RTS setting, using RTS_ON_SEND instead\n",
+                               port->name, port->line);
+               } else {
+                       rs485->flags |= SER_RS485_RTS_AFTER_SEND;
+                       rs485->flags &= ~SER_RS485_RTS_ON_SEND;
+
+                       dev_warn_ratelimited(port->dev,
+                               "%s (%d): invalid RTS setting, using RTS_AFTER_SEND instead\n",
+                               port->name, port->line);
+               }
+       }
+
        uart_sanitize_serial_rs485_delays(port, rs485);
 
        /* Return clean padding area to userspace */
@@ -1402,6 +1415,16 @@ static void uart_set_rs485_termination(struct uart_port *port,
                                 !!(rs485->flags & SER_RS485_TERMINATE_BUS));
 }
 
+static void uart_set_rs485_rx_during_tx(struct uart_port *port,
+                                       const struct serial_rs485 *rs485)
+{
+       if (!(rs485->flags & SER_RS485_ENABLED))
+               return;
+
+       gpiod_set_value_cansleep(port->rs485_rx_during_tx_gpio,
+                                !!(rs485->flags & SER_RS485_RX_DURING_TX));
+}
+
 static int uart_rs485_config(struct uart_port *port)
 {
        struct serial_rs485 *rs485 = &port->rs485;
@@ -1413,12 +1436,17 @@ static int uart_rs485_config(struct uart_port *port)
 
        uart_sanitize_serial_rs485(port, rs485);
        uart_set_rs485_termination(port, rs485);
+       uart_set_rs485_rx_during_tx(port, rs485);
 
        uart_port_lock_irqsave(port, &flags);
        ret = port->rs485_config(port, NULL, rs485);
        uart_port_unlock_irqrestore(port, flags);
-       if (ret)
+       if (ret) {
                memset(rs485, 0, sizeof(*rs485));
+               /* unset GPIOs */
+               gpiod_set_value_cansleep(port->rs485_term_gpio, 0);
+               gpiod_set_value_cansleep(port->rs485_rx_during_tx_gpio, 0);
+       }
 
        return ret;
 }
@@ -1446,7 +1474,7 @@ static int uart_set_rs485_config(struct tty_struct *tty, struct uart_port *port,
        int ret;
        unsigned long flags;
 
-       if (!port->rs485_config)
+       if (!(port->rs485_supported.flags & SER_RS485_ENABLED))
                return -ENOTTY;
 
        if (copy_from_user(&rs485, rs485_user, sizeof(*rs485_user)))
@@ -1457,6 +1485,7 @@ static int uart_set_rs485_config(struct tty_struct *tty, struct uart_port *port,
                return ret;
        uart_sanitize_serial_rs485(port, &rs485);
        uart_set_rs485_termination(port, &rs485);
+       uart_set_rs485_rx_during_tx(port, &rs485);
 
        uart_port_lock_irqsave(port, &flags);
        ret = port->rs485_config(port, &tty->termios, &rs485);
@@ -1468,8 +1497,14 @@ static int uart_set_rs485_config(struct tty_struct *tty, struct uart_port *port,
                        port->ops->set_mctrl(port, port->mctrl);
        }
        uart_port_unlock_irqrestore(port, flags);
-       if (ret)
+       if (ret) {
+               /* restore old GPIO settings */
+               gpiod_set_value_cansleep(port->rs485_term_gpio,
+                       !!(port->rs485.flags & SER_RS485_TERMINATE_BUS));
+               gpiod_set_value_cansleep(port->rs485_rx_during_tx_gpio,
+                       !!(port->rs485.flags & SER_RS485_RX_DURING_TX));
                return ret;
+       }
 
        if (copy_to_user(rs485_user, &port->rs485, sizeof(port->rs485)))
                return -EFAULT;
@@ -2342,7 +2377,7 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
 
        mutex_lock(&port->mutex);
 
-       tty_dev = device_find_child(uport->dev, &match, serial_match_port);
+       tty_dev = device_find_child(&uport->port_dev->dev, &match, serial_match_port);
        if (tty_dev && device_may_wakeup(tty_dev)) {
                enable_irq_wake(uport->irq);
                put_device(tty_dev);
@@ -2423,7 +2458,7 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
 
        mutex_lock(&port->mutex);
 
-       tty_dev = device_find_child(uport->dev, &match, serial_match_port);
+       tty_dev = device_find_child(&uport->port_dev->dev, &match, serial_match_port);
        if (!uport->suspended && device_may_wakeup(tty_dev)) {
                if (irqd_is_wakeup_set(irq_get_irq_data((uport->irq))))
                        disable_irq_wake(uport->irq);
@@ -2633,7 +2668,8 @@ static int uart_poll_init(struct tty_driver *driver, int line, char *options)
        mutex_lock(&tport->mutex);
 
        port = uart_port_check(state);
-       if (!port || !(port->ops->poll_get_char && port->ops->poll_put_char)) {
+       if (!port || port->type == PORT_UNKNOWN ||
+           !(port->ops->poll_get_char && port->ops->poll_put_char)) {
                ret = -1;
                goto out;
        }
@@ -3153,7 +3189,8 @@ static int serial_core_add_one_port(struct uart_driver *drv, struct uart_port *u
         * setserial to be used to alter this port's parameters.
         */
        tty_dev = tty_port_register_device_attr_serdev(port, drv->tty_driver,
-                       uport->line, uport->dev, port, uport->tty_groups);
+                       uport->line, uport->dev, &uport->port_dev->dev, port,
+                       uport->tty_groups);
        if (!IS_ERR(tty_dev)) {
                device_set_wakeup_capable(tty_dev, 1);
        } else {
@@ -3570,6 +3607,9 @@ int uart_get_rs485_mode(struct uart_port *port)
        u32 rs485_delay[2];
        int ret;
 
+       if (!(port->rs485_supported.flags & SER_RS485_ENABLED))
+               return 0;
+
        ret = device_property_read_u32_array(dev, "rs485-rts-delay",
                                             rs485_delay, 2);
        if (!ret) {
@@ -3620,6 +3660,8 @@ int uart_get_rs485_mode(struct uart_port *port)
        if (IS_ERR(desc))
                return dev_err_probe(dev, PTR_ERR(desc), "Cannot get rs485-rx-during-tx-gpios\n");
        port->rs485_rx_during_tx_gpio = desc;
+       if (port->rs485_rx_during_tx_gpio)
+               port->rs485_supported.flags |= SER_RS485_RX_DURING_TX;
 
        return 0;
 }
index eaa98072245559e4a7ae90ed5df425131f8c09c2..e1897894a4ef58cb69f8bb9edef43ed17786cb34 100644 (file)
@@ -1052,7 +1052,7 @@ static int serial_txx9_probe(struct platform_device *dev)
 /*
  * Remove serial ports registered against a platform device.
  */
-static int serial_txx9_remove(struct platform_device *dev)
+static void serial_txx9_remove(struct platform_device *dev)
 {
        int i;
 
@@ -1062,7 +1062,6 @@ static int serial_txx9_remove(struct platform_device *dev)
                if (up->dev == &dev->dev)
                        serial_txx9_unregister_port(i);
        }
-       return 0;
 }
 
 #ifdef CONFIG_PM
@@ -1097,7 +1096,7 @@ static int serial_txx9_resume(struct platform_device *dev)
 
 static struct platform_driver serial_txx9_plat_driver = {
        .probe          = serial_txx9_probe,
-       .remove         = serial_txx9_remove,
+       .remove_new     = serial_txx9_remove,
 #ifdef CONFIG_PM
        .suspend        = serial_txx9_suspend,
        .resume         = serial_txx9_resume,
index 84ab434c94baf92ab1c404e7ab8ad78c67599f6f..a85e7b9a2e492391ae7a5c6560bc4b2d31ed96fb 100644 (file)
@@ -1558,10 +1558,9 @@ static struct dma_chan *sci_request_dma_chan(struct uart_port *port,
        struct dma_slave_config cfg;
        int ret;
 
-       chan = dma_request_slave_channel(port->dev,
-                                        dir == DMA_MEM_TO_DEV ? "tx" : "rx");
-       if (!chan) {
-               dev_dbg(port->dev, "dma_request_slave_channel failed\n");
+       chan = dma_request_chan(port->dev, dir == DMA_MEM_TO_DEV ? "tx" : "rx");
+       if (IS_ERR(chan)) {
+               dev_dbg(port->dev, "dma_request_chan failed\n");
                return NULL;
        }
 
@@ -3190,7 +3189,7 @@ static struct uart_driver sci_uart_driver = {
        .cons           = SCI_CONSOLE,
 };
 
-static int sci_remove(struct platform_device *dev)
+static void sci_remove(struct platform_device *dev)
 {
        struct sci_port *port = platform_get_drvdata(dev);
        unsigned int type = port->port.type;    /* uart_remove_... clears it */
@@ -3204,8 +3203,6 @@ static int sci_remove(struct platform_device *dev)
                device_remove_file(&dev->dev, &dev_attr_rx_fifo_trigger);
        if (type == PORT_SCIFA || type == PORT_SCIFB || type == PORT_HSCIF)
                device_remove_file(&dev->dev, &dev_attr_rx_fifo_timeout);
-
-       return 0;
 }
 
 
@@ -3470,7 +3467,7 @@ static SIMPLE_DEV_PM_OPS(sci_dev_pm_ops, sci_suspend, sci_resume);
 
 static struct platform_driver sci_driver = {
        .probe          = sci_probe,
-       .remove         = sci_remove,
+       .remove_new     = sci_remove,
        .driver         = {
                .name   = "sh-sci",
                .pm     = &sci_dev_pm_ops,
index b296e57a9dee4b09cbe4496f7f25c5f1c8c6b5f9..a4cc569a78a256746e0e4cc262fc2abfe8ad5214 100644 (file)
@@ -1007,7 +1007,7 @@ probe_out1:
        return r;
 }
 
-static int sifive_serial_remove(struct platform_device *dev)
+static void sifive_serial_remove(struct platform_device *dev)
 {
        struct sifive_serial_port *ssp = platform_get_drvdata(dev);
 
@@ -1015,8 +1015,6 @@ static int sifive_serial_remove(struct platform_device *dev)
        uart_remove_one_port(&sifive_serial_uart_driver, &ssp->port);
        free_irq(ssp->port.irq, ssp);
        clk_notifier_unregister(ssp->clk, &ssp->clk_notifier);
-
-       return 0;
 }
 
 static int sifive_serial_suspend(struct device *dev)
@@ -1033,8 +1031,8 @@ static int sifive_serial_resume(struct device *dev)
        return uart_resume_port(&sifive_serial_uart_driver, &ssp->port);
 }
 
-DEFINE_SIMPLE_DEV_PM_OPS(sifive_uart_pm_ops, sifive_serial_suspend,
-                        sifive_serial_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(sifive_uart_pm_ops, sifive_serial_suspend,
+                               sifive_serial_resume);
 
 static const struct of_device_id sifive_serial_of_match[] = {
        { .compatible = "sifive,fu540-c000-uart0" },
@@ -1045,7 +1043,7 @@ MODULE_DEVICE_TABLE(of, sifive_serial_of_match);
 
 static struct platform_driver sifive_serial_platform_driver = {
        .probe          = sifive_serial_probe,
-       .remove         = sifive_serial_remove,
+       .remove_new     = sifive_serial_remove,
        .driver         = {
                .name   = SIFIVE_SERIAL_NAME,
                .pm = pm_sleep_ptr(&sifive_uart_pm_ops),
index f257525f9299506ca9473473499e292ddc145fda..15f14fa593da58793a7cdbed5d7be07853f18ec9 100644 (file)
@@ -1076,7 +1076,7 @@ static struct uart_driver sprd_uart_driver = {
        .cons = SPRD_CONSOLE,
 };
 
-static int sprd_remove(struct platform_device *dev)
+static void sprd_remove(struct platform_device *dev)
 {
        struct sprd_uart_port *sup = platform_get_drvdata(dev);
 
@@ -1089,8 +1089,6 @@ static int sprd_remove(struct platform_device *dev)
 
        if (!sprd_ports_num)
                uart_unregister_driver(&sprd_uart_driver);
-
-       return 0;
 }
 
 static bool sprd_uart_is_console(struct uart_port *uport)
@@ -1257,7 +1255,7 @@ MODULE_DEVICE_TABLE(of, serial_ids);
 
 static struct platform_driver sprd_platform_driver = {
        .probe          = sprd_probe,
-       .remove         = sprd_remove,
+       .remove_new     = sprd_remove,
        .driver         = {
                .name   = "sprd_serial",
                .of_match_table = serial_ids,
index a821f5d76a26f1b85e6be8b55b8d39c2997bef49..bbb5595d7e24cc22a195e7520366ac0e43e94e3c 100644 (file)
@@ -793,13 +793,11 @@ static int asc_serial_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int asc_serial_remove(struct platform_device *pdev)
+static void asc_serial_remove(struct platform_device *pdev)
 {
        struct uart_port *port = platform_get_drvdata(pdev);
 
        uart_remove_one_port(&asc_uart_driver, port);
-
-       return 0;
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -932,7 +930,7 @@ static const struct dev_pm_ops asc_serial_pm_ops = {
 
 static struct platform_driver asc_serial_driver = {
        .probe          = asc_serial_probe,
-       .remove         = asc_serial_remove,
+       .remove_new     = asc_serial_remove,
        .driver = {
                .name   = DRIVER_NAME,
                .pm     = &asc_serial_pm_ops,
index 3048620315d609525b7c573cd85edc23b9fbe3b7..794b7751274034848c65a7e3374b694bcf61c42d 100644 (file)
@@ -226,12 +226,6 @@ static int stm32_usart_config_rs485(struct uart_port *port, struct ktermios *ter
 
        stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
 
-       if (port->rs485_rx_during_tx_gpio)
-               gpiod_set_value_cansleep(port->rs485_rx_during_tx_gpio,
-                                        !!(rs485conf->flags & SER_RS485_RX_DURING_TX));
-       else
-               rs485conf->flags |= SER_RS485_RX_DURING_TX;
-
        if (rs485conf->flags & SER_RS485_ENABLED) {
                cr1 = readl_relaxed(port->membase + ofs->cr1);
                cr3 = readl_relaxed(port->membase + ofs->cr3);
@@ -256,6 +250,8 @@ static int stm32_usart_config_rs485(struct uart_port *port, struct ktermios *ter
 
                writel_relaxed(cr3, port->membase + ofs->cr3);
                writel_relaxed(cr1, port->membase + ofs->cr1);
+
+               rs485conf->flags |= SER_RS485_RX_DURING_TX;
        } else {
                stm32_usart_clr_bits(port, ofs->cr3,
                                     USART_CR3_DEM | USART_CR3_DEP);
@@ -1822,7 +1818,7 @@ err_dma_rx:
        return ret;
 }
 
-static int stm32_usart_serial_remove(struct platform_device *pdev)
+static void stm32_usart_serial_remove(struct platform_device *pdev)
 {
        struct uart_port *port = platform_get_drvdata(pdev);
        struct stm32_port *stm32_port = to_stm32_port(port);
@@ -1861,8 +1857,6 @@ static int stm32_usart_serial_remove(struct platform_device *pdev)
        }
 
        stm32_usart_deinit_port(stm32_port);
-
-       return 0;
 }
 
 static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch)
@@ -2146,7 +2140,7 @@ static const struct dev_pm_ops stm32_serial_pm_ops = {
 
 static struct platform_driver stm32_serial_driver = {
        .probe          = stm32_usart_serial_probe,
-       .remove         = stm32_usart_serial_remove,
+       .remove_new     = stm32_usart_serial_remove,
        .driver = {
                .name   = DRIVER_NAME,
                .pm     = &stm32_serial_pm_ops,
index 5bfc0040f17be8f439aab1a3efe84aa061210a84..8d612ab80680c9e54aec4d694430fa099e8a1c69 100644 (file)
@@ -595,7 +595,7 @@ out_free_port:
        return err;
 }
 
-static int hv_remove(struct platform_device *dev)
+static void hv_remove(struct platform_device *dev)
 {
        struct uart_port *port = platform_get_drvdata(dev);
 
@@ -608,8 +608,6 @@ static int hv_remove(struct platform_device *dev)
        kfree(con_write_page);
        kfree(port);
        sunhv_port = NULL;
-
-       return 0;
 }
 
 static const struct of_device_id hv_match[] = {
@@ -630,7 +628,7 @@ static struct platform_driver hv_driver = {
                .of_match_table = hv_match,
        },
        .probe          = hv_probe,
-       .remove         = hv_remove,
+       .remove_new     = hv_remove,
 };
 
 static int __init sunhv_init(void)
index 4251f4e1ba997547d65dcd248b850a1abbbe241f..99f5285819d4b22645d6e706adfe2be8d69a8ead 100644 (file)
@@ -662,13 +662,11 @@ static int sunplus_uart_probe(struct platform_device *pdev)
        return ret;
 }
 
-static int sunplus_uart_remove(struct platform_device *pdev)
+static void sunplus_uart_remove(struct platform_device *pdev)
 {
        struct sunplus_uart_port *sup = platform_get_drvdata(pdev);
 
        uart_remove_one_port(&sunplus_uart_driver, &sup->port);
-
-       return 0;
 }
 
 static int __maybe_unused sunplus_uart_suspend(struct device *dev)
@@ -703,7 +701,7 @@ MODULE_DEVICE_TABLE(of, sp_uart_of_match);
 
 static struct platform_driver sunplus_uart_platform_driver = {
        .probe          = sunplus_uart_probe,
-       .remove         = sunplus_uart_remove,
+       .remove_new     = sunplus_uart_remove,
        .driver = {
                .name   = "sunplus_uart",
                .of_match_table = sp_uart_of_match,
index 6aa51a6f8063074828b13757960db01151a4c9b0..1ea2f33a07a74149669b2df514fc2a2a8fc83b0f 100644 (file)
@@ -443,7 +443,7 @@ static void sunsab_start_tx(struct uart_port *port)
 
        up->interrupt_mask1 &= ~(SAB82532_IMR1_ALLS|SAB82532_IMR1_XPR);
        writeb(up->interrupt_mask1, &up->regs->w.imr1);
-       
+
        if (!test_bit(SAB82532_XPR, &up->irqflags))
                return;
 
@@ -549,7 +549,7 @@ static int sunsab_startup(struct uart_port *port)
        (void) readb(&up->regs->r.isr1);
 
        /*
-        * Now, initialize the UART 
+        * Now, initialize the UART
         */
        writeb(0, &up->regs->w.ccr0);                           /* power-down */
        writeb(SAB82532_CCR0_MCE | SAB82532_CCR0_SC_NRZ |
@@ -563,7 +563,7 @@ static int sunsab_startup(struct uart_port *port)
                           SAB82532_MODE_RAC);
        writeb(up->cached_mode, &up->regs->w.mode);
        writeb(SAB82532_RFC_DPS|SAB82532_RFC_RFTH_32, &up->regs->w.rfc);
-       
+
        tmp = readb(&up->regs->rw.ccr0);
        tmp |= SAB82532_CCR0_PU;        /* power-up */
        writeb(tmp, &up->regs->rw.ccr0);
@@ -607,7 +607,7 @@ static void sunsab_shutdown(struct uart_port *port)
        up->cached_dafo &= ~SAB82532_DAFO_XBRK;
        writeb(up->cached_dafo, &up->regs->rw.dafo);
 
-       /* Disable Receiver */  
+       /* Disable Receiver */
        up->cached_mode &= ~SAB82532_MODE_RAC;
        writeb(up->cached_mode, &up->regs->rw.mode);
 
@@ -622,7 +622,7 @@ static void sunsab_shutdown(struct uart_port *port)
         * speed the chip was configured for when the port was open).
         */
 #if 0
-       /* Power Down */        
+       /* Power Down */
        tmp = readb(&up->regs->rw.ccr0);
        tmp &= ~SAB82532_CCR0_PU;
        writeb(tmp, &up->regs->rw.ccr0);
@@ -649,7 +649,7 @@ static void calc_ebrg(int baud, int *n_ret, int *m_ret)
                *m_ret = 0;
                return;
        }
-     
+
        /*
         * We scale numbers by 10 so that we get better accuracy
         * without having to use floating point.  Here we increment m
@@ -788,7 +788,7 @@ static const char *sunsab_type(struct uart_port *port)
 {
        struct uart_sunsab_port *up = (void *)port;
        static char buf[36];
-       
+
        sprintf(buf, "SAB82532 %s", sab82532_version[up->type]);
        return buf;
 }
@@ -933,7 +933,7 @@ static int sunsab_console_setup(struct console *con, char *options)
        sunsab_set_mctrl(&up->port, TIOCM_DTR | TIOCM_RTS);
 
        uart_port_unlock_irqrestore(&up->port, flags);
-       
+
        return 0;
 }
 
@@ -1066,7 +1066,7 @@ out:
        return err;
 }
 
-static int sab_remove(struct platform_device *op)
+static void sab_remove(struct platform_device *op)
 {
        struct uart_sunsab_port *up = platform_get_drvdata(op);
 
@@ -1078,8 +1078,6 @@ static int sab_remove(struct platform_device *op)
        of_iounmap(&op->resource[0],
                   up[0].port.membase,
                   sizeof(union sab82532_async_regs));
-
-       return 0;
 }
 
 static const struct of_device_id sab_match[] = {
@@ -1100,7 +1098,7 @@ static struct platform_driver sab_driver = {
                .of_match_table = sab_match,
        },
        .probe          = sab_probe,
-       .remove         = sab_remove,
+       .remove_new     = sab_remove,
 };
 
 static int __init sunsab_init(void)
index 1e051cc2591c7a5721742d7c3fb19833a4f80772..c8b65f4b2710e47f361d578d639d3ef9bd114107 100644 (file)
@@ -1515,7 +1515,7 @@ out_unmap:
        return err;
 }
 
-static int su_remove(struct platform_device *op)
+static void su_remove(struct platform_device *op)
 {
        struct uart_sunsu_port *up = platform_get_drvdata(op);
        bool kbdms = false;
@@ -1536,8 +1536,6 @@ static int su_remove(struct platform_device *op)
 
        if (kbdms)
                kfree(up);
-
-       return 0;
 }
 
 static const struct of_device_id su_match[] = {
@@ -1565,7 +1563,7 @@ static struct platform_driver su_driver = {
                .of_match_table = su_match,
        },
        .probe          = su_probe,
-       .remove         = su_remove,
+       .remove_new     = su_remove,
 };
 
 static int __init sunsu_init(void)
index d3b5e864b727b7722268bb8c2335237210fc326d..c99289c6c8f89b7d7bd0a9459ed502efb805c474 100644 (file)
@@ -1513,7 +1513,7 @@ static void zs_remove_one(struct uart_sunzilog_port *up)
                uart_remove_one_port(&sunzilog_reg, &up->port);
 }
 
-static int zs_remove(struct platform_device *op)
+static void zs_remove(struct platform_device *op)
 {
        struct uart_sunzilog_port *up = platform_get_drvdata(op);
        struct zilog_layout __iomem *regs;
@@ -1523,8 +1523,6 @@ static int zs_remove(struct platform_device *op)
 
        regs = sunzilog_chip_regs[up[0].port.line / 2];
        of_iounmap(&op->resource[0], regs, sizeof(struct zilog_layout));
-
-       return 0;
 }
 
 static const struct of_device_id zs_match[] = {
@@ -1541,7 +1539,7 @@ static struct platform_driver zs_driver = {
                .of_match_table = zs_match,
        },
        .probe          = zs_probe,
-       .remove         = zs_remove,
+       .remove_new     = zs_remove,
 };
 
 static int __init sunzilog_init(void)
index 65069daf36ec0b4ad983b9d576988c1c90e675ed..d9c78320eb0209085849a00653c8d39997bc343f 100644 (file)
@@ -266,7 +266,7 @@ free_tx:
        return err;
 }
 
-static int tegra_tcu_remove(struct platform_device *pdev)
+static void tegra_tcu_remove(struct platform_device *pdev)
 {
        struct tegra_tcu *tcu = platform_get_drvdata(pdev);
 
@@ -277,8 +277,6 @@ static int tegra_tcu_remove(struct platform_device *pdev)
        uart_remove_one_port(&tcu->driver, &tcu->port);
        uart_unregister_driver(&tcu->driver);
        mbox_free_channel(tcu->tx);
-
-       return 0;
 }
 
 static const struct of_device_id tegra_tcu_match[] = {
@@ -293,7 +291,7 @@ static struct platform_driver tegra_tcu_driver = {
                .of_match_table = tegra_tcu_match,
        },
        .probe = tegra_tcu_probe,
-       .remove = tegra_tcu_remove,
+       .remove_new = tegra_tcu_remove,
 };
 module_platform_driver(tegra_tcu_driver);
 
index 0cc6524f5e8ba7cdf24b69df56a7c5e889b6fad8..4bc89a9b380a7509faa4e6e0c58ce4da9b44eb52 100644 (file)
@@ -473,7 +473,7 @@ err_mem:
        return err;
 }
 
-static int timbuart_remove(struct platform_device *dev)
+static void timbuart_remove(struct platform_device *dev)
 {
        struct timbuart_port *uart = platform_get_drvdata(dev);
 
@@ -481,8 +481,6 @@ static int timbuart_remove(struct platform_device *dev)
        uart_remove_one_port(&timbuart_driver, &uart->port);
        uart_unregister_driver(&timbuart_driver);
        kfree(uart);
-
-       return 0;
 }
 
 static struct platform_driver timbuart_platform_driver = {
@@ -490,7 +488,7 @@ static struct platform_driver timbuart_platform_driver = {
                .name   = "timb-uart",
        },
        .probe          = timbuart_probe,
-       .remove         = timbuart_remove,
+       .remove_new     = timbuart_remove,
 };
 
 module_platform_driver(timbuart_platform_driver);
index 404c14acafa5f035340b08d903ec762d74c05fb6..10ba41b7be9944fcc93fc12969b7f06e48abc0da 100644 (file)
 #include <linux/pm_runtime.h>
 
 #define ULITE_NAME             "ttyUL"
+#if CONFIG_SERIAL_UARTLITE_NR_UARTS > 4
+#define ULITE_MAJOR             0       /* use dynamic node allocation */
+#define ULITE_MINOR             0
+#else
 #define ULITE_MAJOR            204
 #define ULITE_MINOR            187
+#endif
 #define ULITE_NR_UARTS         CONFIG_SERIAL_UARTLITE_NR_UARTS
 
 /* ---------------------------------------------------------------------
@@ -62,11 +67,11 @@ static struct uart_port *console_port;
 #endif
 
 /**
- * struct uartlite_data: Driver private data
- * reg_ops: Functions to read/write registers
- * clk: Our parent clock, if present
- * baud: The baud rate configured when this device was synthesized
- * cflags: The cflags for parity and data bits
+ * struct uartlite_data - Driver private data
+ * @reg_ops: Functions to read/write registers
+ * @clk: Our parent clock, if present
+ * @baud: The baud rate configured when this device was synthesized
+ * @cflags: The cflags for parity and data bits
  */
 struct uartlite_data {
        const struct uartlite_reg_ops *reg_ops;
@@ -890,7 +895,7 @@ of_err:
        return ret;
 }
 
-static int ulite_remove(struct platform_device *pdev)
+static void ulite_remove(struct platform_device *pdev)
 {
        struct uart_port *port = dev_get_drvdata(&pdev->dev);
        struct uartlite_data *pdata = port->private_data;
@@ -900,7 +905,6 @@ static int ulite_remove(struct platform_device *pdev)
        pm_runtime_disable(&pdev->dev);
        pm_runtime_set_suspended(&pdev->dev);
        pm_runtime_dont_use_autosuspend(&pdev->dev);
-       return 0;
 }
 
 /* work with hotplug and coldplug */
@@ -908,7 +912,7 @@ MODULE_ALIAS("platform:uartlite");
 
 static struct platform_driver ulite_platform_driver = {
        .probe = ulite_probe,
-       .remove = ulite_remove,
+       .remove_new = ulite_remove,
        .driver = {
                .name  = "uartlite",
                .of_match_table = of_match_ptr(ulite_of_match),
index ed7a6bb5596ab610264766fb16827e9c5920fd04..397b95dff7eda6bc045a40ace5a8b3fa4a408a14 100644 (file)
@@ -189,10 +189,10 @@ struct uart_qe_port {
        u16 tx_fifosize;
        int wait_closing;
        u32 flags;
-       struct qe_bd *rx_bd_base;
-       struct qe_bd *rx_cur;
-       struct qe_bd *tx_bd_base;
-       struct qe_bd *tx_cur;
+       struct qe_bd __iomem *rx_bd_base;
+       struct qe_bd __iomem *rx_cur;
+       struct qe_bd __iomem *tx_bd_base;
+       struct qe_bd __iomem *tx_cur;
        unsigned char *tx_buf;
        unsigned char *rx_buf;
        void *bd_virt;          /* virtual address of the BD buffers */
@@ -258,7 +258,7 @@ static unsigned int qe_uart_tx_empty(struct uart_port *port)
 {
        struct uart_qe_port *qe_port =
                container_of(port, struct uart_qe_port, port);
-       struct qe_bd *bdp = qe_port->tx_bd_base;
+       struct qe_bd __iomem *bdp = qe_port->tx_bd_base;
 
        while (1) {
                if (ioread16be(&bdp->status) & BD_SC_READY)
@@ -330,7 +330,7 @@ static void qe_uart_stop_tx(struct uart_port *port)
  */
 static int qe_uart_tx_pump(struct uart_qe_port *qe_port)
 {
-       struct qe_bd *bdp;
+       struct qe_bd __iomem *bdp;
        unsigned char *p;
        unsigned int count;
        struct uart_port *port = &qe_port->port;
@@ -341,7 +341,7 @@ static int qe_uart_tx_pump(struct uart_qe_port *qe_port)
                /* Pick next descriptor and fill from buffer */
                bdp = qe_port->tx_cur;
 
-               p = qe2cpu_addr(be32_to_cpu(bdp->buf), qe_port);
+               p = qe2cpu_addr(ioread32be(&bdp->buf), qe_port);
 
                *p++ = port->x_char;
                iowrite16be(1, &bdp->length);
@@ -368,7 +368,7 @@ static int qe_uart_tx_pump(struct uart_qe_port *qe_port)
 
        while (!(ioread16be(&bdp->status) & BD_SC_READY) && !uart_circ_empty(xmit)) {
                count = 0;
-               p = qe2cpu_addr(be32_to_cpu(bdp->buf), qe_port);
+               p = qe2cpu_addr(ioread32be(&bdp->buf), qe_port);
                while (count < qe_port->tx_fifosize) {
                        *p++ = xmit->buf[xmit->tail];
                        uart_xmit_advance(port, 1);
@@ -460,7 +460,7 @@ static void qe_uart_int_rx(struct uart_qe_port *qe_port)
        unsigned char ch, *cp;
        struct uart_port *port = &qe_port->port;
        struct tty_port *tport = &port->state->port;
-       struct qe_bd *bdp;
+       struct qe_bd __iomem *bdp;
        u16 status;
        unsigned int flg;
 
@@ -487,7 +487,7 @@ static void qe_uart_int_rx(struct uart_qe_port *qe_port)
                }
 
                /* get pointer */
-               cp = qe2cpu_addr(be32_to_cpu(bdp->buf), qe_port);
+               cp = qe2cpu_addr(ioread32be(&bdp->buf), qe_port);
 
                /* loop through the buffer */
                while (i-- > 0) {
@@ -590,7 +590,7 @@ static void qe_uart_initbd(struct uart_qe_port *qe_port)
 {
        int i;
        void *bd_virt;
-       struct qe_bd *bdp;
+       struct qe_bd __iomem *bdp;
 
        /* Set the physical address of the host memory buffers in the buffer
         * descriptors, and the virtual address for us to work with.
@@ -648,7 +648,7 @@ static void qe_uart_init_ucc(struct uart_qe_port *qe_port)
 {
        u32 cecr_subblock;
        struct ucc_slow __iomem *uccp = qe_port->uccp;
-       struct ucc_uart_pram *uccup = qe_port->uccup;
+       struct ucc_uart_pram __iomem *uccup = qe_port->uccup;
 
        unsigned int i;
 
@@ -983,7 +983,7 @@ static int qe_uart_request_port(struct uart_port *port)
 
        qe_port->us_private = uccs;
        qe_port->uccp = uccs->us_regs;
-       qe_port->uccup = (struct ucc_uart_pram *) uccs->us_pram;
+       qe_port->uccup = (struct ucc_uart_pram __iomem *)uccs->us_pram;
        qe_port->rx_bd_base = uccs->rx_bd;
        qe_port->tx_bd_base = uccs->tx_bd;
 
@@ -1156,7 +1156,7 @@ static void uart_firmware_cont(const struct firmware *fw, void *context)
 
        firmware = (struct qe_firmware *) fw->data;
 
-       if (firmware->header.length != fw->size) {
+       if (be32_to_cpu(firmware->header.length) != fw->size) {
                dev_err(dev, "invalid firmware\n");
                goto out;
        }
@@ -1459,7 +1459,7 @@ out_free:
        return ret;
 }
 
-static int ucc_uart_remove(struct platform_device *ofdev)
+static void ucc_uart_remove(struct platform_device *ofdev)
 {
        struct uart_qe_port *qe_port = platform_get_drvdata(ofdev);
 
@@ -1470,8 +1470,6 @@ static int ucc_uart_remove(struct platform_device *ofdev)
        of_node_put(qe_port->np);
 
        kfree(qe_port);
-
-       return 0;
 }
 
 static const struct of_device_id ucc_uart_match[] = {
@@ -1492,7 +1490,7 @@ static struct platform_driver ucc_uart_of_driver = {
                .of_match_table    = ucc_uart_match,
        },
        .probe          = ucc_uart_probe,
-       .remove         = ucc_uart_remove,
+       .remove_new     = ucc_uart_remove,
 };
 
 static int __init ucc_uart_init(void)
index 66a45a634158f6e75ebe0d93eb8ce295b76135a2..920762d7b4a4b2607f6ec9b2b7981af803b65361 100644 (file)
@@ -1663,10 +1663,8 @@ err_out_unregister_driver:
 /**
  * cdns_uart_remove - called when the platform driver is unregistered
  * @pdev: Pointer to the platform device structure
- *
- * Return: 0 on success, negative errno otherwise
  */
-static int cdns_uart_remove(struct platform_device *pdev)
+static void cdns_uart_remove(struct platform_device *pdev)
 {
        struct uart_port *port = platform_get_drvdata(pdev);
        struct cdns_uart *cdns_uart_data = port->private_data;
@@ -1692,12 +1690,11 @@ static int cdns_uart_remove(struct platform_device *pdev)
 
        if (!--instances)
                uart_unregister_driver(cdns_uart_data->cdns_uart_driver);
-       return 0;
 }
 
 static struct platform_driver cdns_uart_platform_driver = {
        .probe   = cdns_uart_probe,
-       .remove  = cdns_uart_remove,
+       .remove_new = cdns_uart_remove,
        .driver  = {
                .name = CDNS_UART_NAME,
                .of_match_table = cdns_uart_of_match,
index 6b4a28bcf2f5f5406799a44bafa3891ef7d60f02..02217e3c916b560b782a32164e3fbf146570f712 100644 (file)
@@ -1150,16 +1150,29 @@ EXPORT_SYMBOL(unregister_sysrq_key);
 #ifdef CONFIG_PROC_FS
 /*
  * writing 'C' to /proc/sysrq-trigger is like sysrq-C
+ * Normally, only the first character written is processed.
+ * However, if the first character is an underscore,
+ * all characters are processed.
  */
 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
                                   size_t count, loff_t *ppos)
 {
-       if (count) {
+       bool bulk = false;
+       size_t i;
+
+       for (i = 0; i < count; i++) {
                char c;
 
-               if (get_user(c, buf))
+               if (get_user(c, buf + i))
                        return -EFAULT;
-               __handle_sysrq(c, false);
+
+               if (c == '_')
+                       bulk = true;
+               else
+                       __handle_sysrq(c, false);
+
+               if (!bulk)
+                       break;
        }
 
        return count;
index 06414e43e0b53359348c7906bbb7a6acca8161f0..407b0d87b7c10890da30bdd9f9c60605b85829b6 100644 (file)
@@ -852,9 +852,9 @@ static ssize_t iterate_tty_read(struct tty_ldisc *ld, struct tty_struct *tty,
 {
        void *cookie = NULL;
        unsigned long offset = 0;
-       char kernel_buf[64];
        ssize_t retval = 0;
        size_t copied, count = iov_iter_count(to);
+       u8 kernel_buf[64];
 
        do {
                ssize_t size = min(count, sizeof(kernel_buf));
@@ -995,7 +995,7 @@ static ssize_t iterate_tty_write(struct tty_ldisc *ld, struct tty_struct *tty,
 
        /* write_buf/write_cnt is protected by the atomic_write_lock mutex */
        if (tty->write_cnt < chunk) {
-               unsigned char *buf_chunk;
+               u8 *buf_chunk;
 
                if (chunk < 1024)
                        chunk = 1024;
@@ -1047,6 +1047,7 @@ out:
        return ret;
 }
 
+#ifdef CONFIG_PRINT_QUOTA_WARNING
 /**
  * tty_write_message - write a message to a certain tty, not just the console.
  * @tty: the destination tty_struct
@@ -1057,6 +1058,8 @@ out:
  * needed.
  *
  * We must still hold the BTM and test the CLOSING flag for the moment.
+ *
+ * This function is DEPRECATED, do not use in new code.
  */
 void tty_write_message(struct tty_struct *tty, char *msg)
 {
@@ -1069,6 +1072,7 @@ void tty_write_message(struct tty_struct *tty, char *msg)
                tty_write_unlock(tty);
        }
 }
+#endif
 
 static ssize_t file_tty_write(struct file *file, struct kiocb *iocb, struct iov_iter *from)
 {
@@ -1145,7 +1149,7 @@ ssize_t redirected_tty_write(struct kiocb *iocb, struct iov_iter *iter)
  *
  * Locking: none for xchar method, write ordering for write method.
  */
-int tty_send_xchar(struct tty_struct *tty, char ch)
+int tty_send_xchar(struct tty_struct *tty, u8 ch)
 {
        bool was_stopped = tty->flow.stopped;
 
@@ -2274,10 +2278,10 @@ static bool tty_legacy_tiocsti __read_mostly = IS_ENABLED(CONFIG_LEGACY_TIOCSTI)
  *  * Called functions take tty_ldiscs_lock
  *  * current->signal->tty check is safe without locks
  */
-static int tiocsti(struct tty_struct *tty, char __user *p)
+static int tiocsti(struct tty_struct *tty, u8 __user *p)
 {
-       char ch, mbz = 0;
        struct tty_ldisc *ld;
+       u8 ch;
 
        if (!tty_legacy_tiocsti && !capable(CAP_SYS_ADMIN))
                return -EIO;
@@ -2292,7 +2296,7 @@ static int tiocsti(struct tty_struct *tty, char __user *p)
                return -EIO;
        tty_buffer_lock_exclusive(tty->port);
        if (ld->ops->receive_buf)
-               ld->ops->receive_buf(tty, &ch, &mbz, 1);
+               ld->ops->receive_buf(tty, &ch, NULL, 1);
        tty_buffer_unlock_exclusive(tty->port);
        tty_ldisc_deref(ld);
        return 0;
@@ -2489,6 +2493,9 @@ static int send_break(struct tty_struct *tty, unsigned int duration)
        if (!retval) {
                msleep_interruptible(duration);
                retval = tty->ops->break_ctl(tty, 0);
+       } else if (retval == -EOPNOTSUPP) {
+               /* some drivers can tell only dynamically */
+               retval = 0;
        }
        tty_write_unlock(tty);
 
@@ -2498,6 +2505,24 @@ static int send_break(struct tty_struct *tty, unsigned int duration)
        return retval;
 }
 
+/**
+ * tty_get_tiocm - get tiocm status register
+ * @tty: tty device
+ *
+ * Obtain the modem status bits from the tty driver if the feature
+ * is supported.
+ */
+int tty_get_tiocm(struct tty_struct *tty)
+{
+       int retval = -ENOTTY;
+
+       if (tty->ops->tiocmget)
+               retval = tty->ops->tiocmget(tty);
+
+       return retval;
+}
+EXPORT_SYMBOL_GPL(tty_get_tiocm);
+
 /**
  * tty_tiocmget - get modem status
  * @tty: tty device
@@ -2510,14 +2535,12 @@ static int send_break(struct tty_struct *tty, unsigned int duration)
  */
 static int tty_tiocmget(struct tty_struct *tty, int __user *p)
 {
-       int retval = -ENOTTY;
+       int retval;
 
-       if (tty->ops->tiocmget) {
-               retval = tty->ops->tiocmget(tty);
+       retval = tty_get_tiocm(tty);
+       if (retval >= 0)
+               retval = put_user(retval, p);
 
-               if (retval >= 0)
-                       retval = put_user(retval, p);
-       }
        return retval;
 }
 
@@ -3138,7 +3161,7 @@ struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx)
  *
  * Return: the number of characters successfully output.
  */
-int tty_put_char(struct tty_struct *tty, unsigned char ch)
+int tty_put_char(struct tty_struct *tty, u8 ch)
 {
        if (tty->ops->put_char)
                return tty->ops->put_char(tty, ch);
index 4b499301a3db1d37cf6fee22507c5f5a843ca4d7..85de90eebc7bbf533e82df6a4ca3d83d0083afe6 100644 (file)
@@ -844,7 +844,7 @@ int tty_mode_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
                        ret = -EFAULT;
                return ret;
        case TIOCSLCKTRMIOS:
-               if (!capable(CAP_SYS_ADMIN))
+               if (!checkpoint_restore_ns_capable(&init_user_ns))
                        return -EPERM;
                copy_termios_locked(real_tty, &kterm);
                if (user_termios_to_kernel_termios(&kterm,
@@ -861,7 +861,7 @@ int tty_mode_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
                        ret = -EFAULT;
                return ret;
        case TIOCSLCKTRMIOS:
-               if (!capable(CAP_SYS_ADMIN))
+               if (!checkpoint_restore_ns_capable(&init_user_ns))
                        return -EPERM;
                copy_termios_locked(real_tty, &kterm);
                if (user_termios_to_kernel_termios_1(&kterm,
index 63c1252509617c1cce11eae25b519039dad0e2fc..14cca33d226930f56ecbcce0d8ad8e90df8eee4f 100644 (file)
@@ -171,7 +171,8 @@ EXPORT_SYMBOL_GPL(tty_port_register_device_attr);
  * @port: tty_port of the device
  * @driver: tty_driver for this device
  * @index: index of the tty
- * @device: parent if exists, otherwise NULL
+ * @host: serial port hardware device
+ * @parent: parent if exists, otherwise NULL
  * @drvdata: driver data for the device
  * @attr_grp: attribute group for the device
  *
@@ -180,20 +181,20 @@ EXPORT_SYMBOL_GPL(tty_port_register_device_attr);
  */
 struct device *tty_port_register_device_attr_serdev(struct tty_port *port,
                struct tty_driver *driver, unsigned index,
-               struct device *device, void *drvdata,
+               struct device *host, struct device *parent, void *drvdata,
                const struct attribute_group **attr_grp)
 {
        struct device *dev;
 
        tty_port_link_device(port, driver, index);
 
-       dev = serdev_tty_port_register(port, device, driver, index);
+       dev = serdev_tty_port_register(port, host, parent, driver, index);
        if (PTR_ERR(dev) != -ENODEV) {
                /* Skip creating cdev if we registered a serdev device */
                return dev;
        }
 
-       return tty_register_device_attr(driver, index, device, drvdata,
+       return tty_register_device_attr(driver, index, parent, drvdata,
                        attr_grp);
 }
 EXPORT_SYMBOL_GPL(tty_port_register_device_attr_serdev);
@@ -203,17 +204,18 @@ EXPORT_SYMBOL_GPL(tty_port_register_device_attr_serdev);
  * @port: tty_port of the device
  * @driver: tty_driver for this device
  * @index: index of the tty
- * @device: parent if exists, otherwise NULL
+ * @host: serial port hardware controller device
+ * @parent: parent if exists, otherwise NULL
  *
  * Register a serdev or tty device depending on if the parent device has any
  * defined serdev clients or not.
  */
 struct device *tty_port_register_device_serdev(struct tty_port *port,
                struct tty_driver *driver, unsigned index,
-               struct device *device)
+               struct device *host, struct device *parent)
 {
        return tty_port_register_device_attr_serdev(port, driver, index,
-                       device, NULL, NULL);
+                       host, parent, NULL, NULL);
 }
 EXPORT_SYMBOL_GPL(tty_port_register_device_serdev);
 
@@ -245,7 +247,7 @@ int tty_port_alloc_xmit_buf(struct tty_port *port)
        /* We may sleep in get_zeroed_page() */
        mutex_lock(&port->buf_mutex);
        if (port->xmit_buf == NULL) {
-               port->xmit_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL);
+               port->xmit_buf = (u8 *)get_zeroed_page(GFP_KERNEL);
                if (port->xmit_buf)
                        kfifo_init(&port->xmit_fifo, port->xmit_buf, PAGE_SIZE);
        }
index 5e39a4f430ee5cbcbdcf05f80819a6289b1d5b1c..82d70083fead062c77b8cfb097c3d86dc2b56279 100644 (file)
@@ -644,7 +644,7 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
        if (!ct)
                return 0;
 
-       unilist = vmemdup_user(list, array_size(sizeof(*unilist), ct));
+       unilist = vmemdup_array_user(list, ct, sizeof(*unilist));
        if (IS_ERR(unilist))
                return PTR_ERR(unilist);
 
index 12a192e1196b3ddfe9cdc09d0d579b0139317ac0..a2116e135a82d48e353ea2857e8bafb9e2b0687e 100644 (file)
@@ -1772,12 +1772,10 @@ int vt_do_diacrit(unsigned int cmd, void __user *udp, int perm)
                        return -EINVAL;
 
                if (ct) {
-
-                       dia = memdup_user(a->kbdiacr,
-                                       sizeof(struct kbdiacr) * ct);
+                       dia = memdup_array_user(a->kbdiacr,
+                                               ct, sizeof(struct kbdiacr));
                        if (IS_ERR(dia))
                                return PTR_ERR(dia);
-
                }
 
                spin_lock_irqsave(&kbd_event_lock, flags);
@@ -1811,8 +1809,8 @@ int vt_do_diacrit(unsigned int cmd, void __user *udp, int perm)
                        return -EINVAL;
 
                if (ct) {
-                       buf = memdup_user(a->kbdiacruc,
-                                         ct * sizeof(struct kbdiacruc));
+                       buf = memdup_array_user(a->kbdiacruc,
+                                               ct, sizeof(struct kbdiacruc));
                        if (IS_ERR(buf))
                                return PTR_ERR(buf);
                } 
index d1e33328ff3f49f29a7a4f119f5a6a3868b75e4b..d77b25b79ae3ec5c949912eb88b2433472180be0 100644 (file)
@@ -3057,7 +3057,7 @@ bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd)
  */
 static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag)
 {
-       u32 mask = 1U << task_tag;
+       u32 mask;
        unsigned long flags;
        int err;
 
@@ -3075,6 +3075,8 @@ static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag)
                return 0;
        }
 
+       mask = 1U << task_tag;
+
        /* clear outstanding transaction before retry */
        spin_lock_irqsave(hba->host->host_lock, flags);
        ufshcd_utrl_clear(hba, mask);
@@ -6352,7 +6354,6 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
                ufshcd_hold(hba);
                if (!ufshcd_is_clkgating_allowed(hba))
                        ufshcd_setup_clocks(hba, true);
-               ufshcd_release(hba);
                pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM;
                ufshcd_vops_resume(hba, pm_op);
        } else {
@@ -8725,7 +8726,6 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
 
        ufs_bsg_probe(hba);
        scsi_scan_host(hba->host);
-       pm_runtime_put_sync(hba->dev);
 
 out:
        return ret;
@@ -8994,15 +8994,12 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
 
        /* Probe and add UFS logical units  */
        ret = ufshcd_add_lus(hba);
+
 out:
-       /*
-        * If we failed to initialize the device or the device is not
-        * present, turn off the power/clocks etc.
-        */
-       if (ret) {
-               pm_runtime_put_sync(hba->dev);
-               ufshcd_hba_exit(hba);
-       }
+       pm_runtime_put_sync(hba->dev);
+
+       if (ret)
+               dev_err(hba->dev, "%s failed: %d\n", __func__, ret);
 }
 
 static enum scsi_timeout_action ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
index 480787048e752929d9b255cde85feb9d629292f1..39eef470f8fa5b88b41450aeec8833a619899e34 100644 (file)
@@ -1716,7 +1716,7 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
                                             ufs_qcom_write_msi_msg);
        if (ret) {
                dev_err(hba->dev, "Failed to request Platform MSI %d\n", ret);
-               goto out;
+               return ret;
        }
 
        msi_lock_descs(hba->dev);
@@ -1750,11 +1750,8 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
                                    FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1),
                                    REG_UFS_CFG3);
                ufshcd_mcq_enable_esi(hba);
-       }
-
-out:
-       if (!ret)
                host->esi_enabled = true;
+       }
 
        return ret;
 }
index 62082d64ece00613a9d82eadd0552f6262cb9c10..2d572f6c8ec833023450136e3e2278d46d8937ae 100644 (file)
@@ -466,13 +466,13 @@ static int uio_open(struct inode *inode, struct file *filep)
 
        mutex_lock(&minor_lock);
        idev = idr_find(&uio_idr, iminor(inode));
-       mutex_unlock(&minor_lock);
        if (!idev) {
                ret = -ENODEV;
+               mutex_unlock(&minor_lock);
                goto out;
        }
-
        get_device(&idev->dev);
+       mutex_unlock(&minor_lock);
 
        if (!try_module_get(idev->owner)) {
                ret = -ENODEV;
@@ -1064,9 +1064,8 @@ void uio_unregister_device(struct uio_info *info)
        wake_up_interruptible(&idev->wait);
        kill_fasync(&idev->async_queue, SIGIO, POLL_HUP);
 
-       device_unregister(&idev->dev);
-
        uio_free_minor(minor);
+       device_unregister(&idev->dev);
 
        return;
 }
index 5812f7ea7f9021342863c8f122d3a802d3973d63..16703815be0c482328cb45c72dabf457eedcf4e9 100644 (file)
@@ -546,7 +546,7 @@ MODULE_PARM_DESC(annex,
 
 #define uea_wait(sc, cond, timeo) \
 ({ \
-       int _r = wait_event_interruptible_timeout(sc->sync_q, \
+       int _r = wait_event_freezable_timeout(sc->sync_q, \
                        (cond) || kthread_should_stop(), timeo); \
        if (kthread_should_stop()) \
                _r = -ENODEV; \
@@ -1896,7 +1896,6 @@ static int uea_kthread(void *data)
                        ret = sc->stat(sc);
                if (ret != -EAGAIN)
                        uea_wait(sc, 0, msecs_to_jiffies(1000));
-               try_to_freeze();
        }
        uea_leaves(INS_TO_USBDEV(sc));
        return ret;
@@ -2252,7 +2251,7 @@ static ssize_t stat_status_show(struct device *dev, struct device_attribute *att
        sc = dev_to_uea(dev);
        if (!sc)
                goto out;
-       ret = snprintf(buf, 10, "%08x\n", sc->stats.phy.state);
+       ret = sysfs_emit(buf, "%08x\n", sc->stats.phy.state);
 out:
        mutex_unlock(&uea_mutex);
        return ret;
@@ -2318,19 +2317,19 @@ static ssize_t stat_human_status_show(struct device *dev,
 
        switch (modem_state) {
        case 0:
-               ret = sprintf(buf, "Modem is booting\n");
+               ret = sysfs_emit(buf, "Modem is booting\n");
                break;
        case 1:
-               ret = sprintf(buf, "Modem is initializing\n");
+               ret = sysfs_emit(buf, "Modem is initializing\n");
                break;
        case 2:
-               ret = sprintf(buf, "Modem is operational\n");
+               ret = sysfs_emit(buf, "Modem is operational\n");
                break;
        case 3:
-               ret = sprintf(buf, "Modem synchronization failed\n");
+               ret = sysfs_emit(buf, "Modem synchronization failed\n");
                break;
        default:
-               ret = sprintf(buf, "Modem state is unknown\n");
+               ret = sysfs_emit(buf, "Modem state is unknown\n");
                break;
        }
 out:
@@ -2364,7 +2363,7 @@ static ssize_t stat_delin_show(struct device *dev, struct device_attribute *attr
                        delin = "LOSS";
        }
 
-       ret = sprintf(buf, "%s\n", delin);
+       ret = sysfs_emit(buf, "%s\n", delin);
 out:
        mutex_unlock(&uea_mutex);
        return ret;
@@ -2384,7 +2383,7 @@ static ssize_t stat_##name##_show(struct device *dev,             \
        sc = dev_to_uea(dev);                                   \
        if (!sc)                                                \
                goto out;                                       \
-       ret = snprintf(buf, 10, "%08x\n", sc->stats.phy.name);  \
+       ret = sysfs_emit(buf, "%08x\n", sc->stats.phy.name);    \
        if (reset)                                              \
                sc->stats.phy.name = 0;                         \
 out:                                                           \
index 11a5b3437c32d2fe152743bf44c49ab1ce6142f7..aeca902ab6cc427b0946cf13ea9b8c725eb3f287 100644 (file)
@@ -1119,6 +1119,8 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
        dma_addr_t trb_dma;
        u32 togle_pcs = 1;
        int sg_iter = 0;
+       int num_trb_req;
+       int trb_burst;
        int num_trb;
        int address;
        u32 control;
@@ -1126,16 +1128,15 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
        u16 total_tdl = 0;
        struct scatterlist *s = NULL;
        bool sg_supported = !!(request->num_mapped_sgs);
+       u32 ioc = request->no_interrupt ? 0 : TRB_IOC;
 
+       num_trb_req = sg_supported ? request->num_mapped_sgs : 1;
+
+       /* ISO transfer require each SOF have a TD, each TD include some TRBs */
        if (priv_ep->type == USB_ENDPOINT_XFER_ISOC)
-               num_trb = priv_ep->interval;
+               num_trb = priv_ep->interval * num_trb_req;
        else
-               num_trb = sg_supported ? request->num_mapped_sgs : 1;
-
-       if (num_trb > priv_ep->free_trbs) {
-               priv_ep->flags |= EP_RING_FULL;
-               return -ENOBUFS;
-       }
+               num_trb = num_trb_req;
 
        priv_req = to_cdns3_request(request);
        address = priv_ep->endpoint.desc->bEndpointAddress;
@@ -1184,14 +1185,31 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
 
                link_trb->control = cpu_to_le32(((priv_ep->pcs) ? TRB_CYCLE : 0) |
                                    TRB_TYPE(TRB_LINK) | TRB_TOGGLE | ch_bit);
+
+               if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) {
+                       /*
+                        * ISO require LINK TRB must be first one of TD.
+                        * Fill LINK TRBs for left trb space to simply software process logic.
+                        */
+                       while (priv_ep->enqueue) {
+                               *trb = *link_trb;
+                               trace_cdns3_prepare_trb(priv_ep, trb);
+
+                               cdns3_ep_inc_enq(priv_ep);
+                               trb = priv_ep->trb_pool + priv_ep->enqueue;
+                               priv_req->trb = trb;
+                       }
+               }
+       }
+
+       if (num_trb > priv_ep->free_trbs) {
+               priv_ep->flags |= EP_RING_FULL;
+               return -ENOBUFS;
        }
 
        if (priv_dev->dev_ver <= DEV_VER_V2)
                togle_pcs = cdns3_wa1_update_guard(priv_ep, trb);
 
-       if (sg_supported)
-               s = request->sg;
-
        /* set incorrect Cycle Bit for first trb*/
        control = priv_ep->pcs ? 0 : TRB_CYCLE;
        trb->length = 0;
@@ -1209,6 +1227,9 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
        do {
                u32 length;
 
+               if (!(sg_iter % num_trb_req) && sg_supported)
+                       s = request->sg;
+
                /* fill TRB */
                control |= TRB_TYPE(TRB_NORMAL);
                if (sg_supported) {
@@ -1223,7 +1244,36 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
                        total_tdl += DIV_ROUND_UP(length,
                                               priv_ep->endpoint.maxpacket);
 
-               trb->length |= cpu_to_le32(TRB_BURST_LEN(priv_ep->trb_burst_size) |
+               trb_burst = priv_ep->trb_burst_size;
+
+               /*
+                * Supposed DMA cross 4k bounder problem should be fixed at DEV_VER_V2, but still
+                * met problem when do ISO transfer if sg enabled.
+                *
+                * Data pattern likes below when sg enabled, package size is 1k and mult is 2
+                *       [UVC Header(8B) ] [data(3k - 8)] ...
+                *
+                * The received data at offset 0xd000 will get 0xc000 data, len 0x70. Error happen
+                * as below pattern:
+                *      0xd000: wrong
+                *      0xe000: wrong
+                *      0xf000: correct
+                *      0x10000: wrong
+                *      0x11000: wrong
+                *      0x12000: correct
+                *      ...
+                *
+                * But it is still unclear about why error have not happen below 0xd000, it should
+                * cross 4k bounder. But anyway, the below code can fix this problem.
+                *
+                * To avoid DMA cross 4k bounder at ISO transfer, reduce burst len according to 16.
+                */
+               if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && priv_dev->dev_ver <= DEV_VER_V2)
+                       if (ALIGN_DOWN(trb->buffer, SZ_4K) !=
+                           ALIGN_DOWN(trb->buffer + length, SZ_4K))
+                               trb_burst = 16;
+
+               trb->length |= cpu_to_le32(TRB_BURST_LEN(trb_burst) |
                                        TRB_LEN(length));
                pcs = priv_ep->pcs ? TRB_CYCLE : 0;
 
@@ -1235,11 +1285,11 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
                        control |= pcs;
 
                if (priv_ep->type == USB_ENDPOINT_XFER_ISOC  && !priv_ep->dir) {
-                       control |= TRB_IOC | TRB_ISP;
+                       control |= ioc | TRB_ISP;
                } else {
                        /* for last element in TD or in SG list */
                        if (sg_iter == (num_trb - 1) && sg_iter != 0)
-                               control |= pcs | TRB_IOC | TRB_ISP;
+                               control |= pcs | ioc | TRB_ISP;
                }
 
                if (sg_iter)
@@ -1250,7 +1300,7 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
                if (sg_supported) {
                        trb->control |= cpu_to_le32(TRB_ISP);
                        /* Don't set chain bit for last TRB */
-                       if (sg_iter < num_trb - 1)
+                       if ((sg_iter % num_trb_req) < num_trb_req - 1)
                                trb->control |= cpu_to_le32(TRB_CHAIN);
 
                        s = sg_next(s);
@@ -1270,7 +1320,7 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
        priv_req->num_of_trb = num_trb;
 
        if (sg_iter == 1)
-               trb->control |= cpu_to_le32(TRB_IOC | TRB_ISP);
+               trb->control |= cpu_to_le32(ioc | TRB_ISP);
 
        if (priv_dev->dev_ver < DEV_VER_V2 &&
            (priv_ep->flags & EP_TDLCHK_EN)) {
@@ -1508,6 +1558,12 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev,
 
                /* The TRB was changed as link TRB, and the request was handled at ep_dequeue */
                while (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK) {
+
+                       /* ISO ep_traddr may stop at LINK TRB */
+                       if (priv_ep->dequeue == cdns3_get_dma_pos(priv_dev, priv_ep) &&
+                           priv_ep->type == USB_ENDPOINT_XFER_ISOC)
+                               break;
+
                        trace_cdns3_complete_trb(priv_ep, trb);
                        cdns3_ep_inc_deq(priv_ep);
                        trb = priv_ep->trb_pool + priv_ep->dequeue;
@@ -1540,6 +1596,10 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev,
                        }
 
                        if (request_handled) {
+                               /* TRBs are duplicated by priv_ep->interval time for ISO IN */
+                               if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && priv_ep->dir)
+                                       request->actual /= priv_ep->interval;
+
                                cdns3_gadget_giveback(priv_ep, priv_req, 0);
                                request_handled = false;
                                transfer_end = false;
@@ -2035,11 +2095,10 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
        bool is_iso_ep = (priv_ep->type == USB_ENDPOINT_XFER_ISOC);
        struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
        u32 bEndpointAddress = priv_ep->num | priv_ep->dir;
-       u32 max_packet_size = 0;
-       u8 maxburst = 0;
+       u32 max_packet_size = priv_ep->wMaxPacketSize;
+       u8 maxburst = priv_ep->bMaxBurst;
        u32 ep_cfg = 0;
        u8 buffering;
-       u8 mult = 0;
        int ret;
 
        buffering = priv_dev->ep_buf_size - 1;
@@ -2061,8 +2120,7 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
                break;
        default:
                ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_ISOC);
-               mult = priv_dev->ep_iso_burst - 1;
-               buffering = mult + 1;
+               buffering = (priv_ep->bMaxBurst + 1) * (priv_ep->mult + 1) - 1;
        }
 
        switch (priv_dev->gadget.speed) {
@@ -2073,17 +2131,8 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
                max_packet_size = is_iso_ep ? 1024 : 512;
                break;
        case USB_SPEED_SUPER:
-               /* It's limitation that driver assumes in driver. */
-               mult = 0;
-               max_packet_size = 1024;
-               if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) {
-                       maxburst = priv_dev->ep_iso_burst - 1;
-                       buffering = (mult + 1) *
-                                   (maxburst + 1);
-
-                       if (priv_ep->interval > 1)
-                               buffering++;
-               } else {
+               if (priv_ep->type != USB_ENDPOINT_XFER_ISOC) {
+                       max_packet_size = 1024;
                        maxburst = priv_dev->ep_buf_size - 1;
                }
                break;
@@ -2112,7 +2161,6 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
        if (priv_dev->dev_ver < DEV_VER_V2)
                priv_ep->trb_burst_size = 16;
 
-       mult = min_t(u8, mult, EP_CFG_MULT_MAX);
        buffering = min_t(u8, buffering, EP_CFG_BUFFERING_MAX);
        maxburst = min_t(u8, maxburst, EP_CFG_MAXBURST_MAX);
 
@@ -2146,7 +2194,7 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
        }
 
        ep_cfg |= EP_CFG_MAXPKTSIZE(max_packet_size) |
-                 EP_CFG_MULT(mult) |
+                 EP_CFG_MULT(priv_ep->mult) |                  /* must match EP setting */
                  EP_CFG_BUFFERING(buffering) |
                  EP_CFG_MAXBURST(maxburst);
 
@@ -2236,6 +2284,13 @@ usb_ep *cdns3_gadget_match_ep(struct usb_gadget *gadget,
        priv_ep->type = usb_endpoint_type(desc);
        priv_ep->flags |= EP_CLAIMED;
        priv_ep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0;
+       priv_ep->wMaxPacketSize =  usb_endpoint_maxp(desc);
+       priv_ep->mult = USB_EP_MAXP_MULT(priv_ep->wMaxPacketSize);
+       priv_ep->wMaxPacketSize &= USB_ENDPOINT_MAXP_MASK;
+       if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && comp_desc) {
+               priv_ep->mult =  USB_SS_MULT(comp_desc->bmAttributes) - 1;
+               priv_ep->bMaxBurst = comp_desc->bMaxBurst;
+       }
 
        spin_unlock_irqrestore(&priv_dev->lock, flags);
        return &priv_ep->endpoint;
@@ -3019,22 +3074,40 @@ static int cdns3_gadget_check_config(struct usb_gadget *gadget)
        struct cdns3_endpoint *priv_ep;
        struct usb_ep *ep;
        int n_in = 0;
+       int iso = 0;
+       int out = 1;
        int total;
+       int n;
 
        list_for_each_entry(ep, &gadget->ep_list, ep_list) {
                priv_ep = ep_to_cdns3_ep(ep);
-               if ((priv_ep->flags & EP_CLAIMED) && (ep->address & USB_DIR_IN))
-                       n_in++;
+               if (!(priv_ep->flags & EP_CLAIMED))
+                       continue;
+
+               n = (priv_ep->mult + 1) * (priv_ep->bMaxBurst + 1);
+               if (ep->address & USB_DIR_IN) {
+                       /*
+                        * ISO transfer: DMA start move data when get ISO, only transfer
+                        * data as min(TD size, iso). No benefit for allocate bigger
+                        * internal memory than 'iso'.
+                        */
+                       if (priv_ep->type == USB_ENDPOINT_XFER_ISOC)
+                               iso += n;
+                       else
+                               n_in++;
+               } else {
+                       if (priv_ep->type == USB_ENDPOINT_XFER_ISOC)
+                               out = max_t(int, out, n);
+               }
        }
 
        /* 2KB are reserved for EP0, 1KB for out*/
-       total = 2 + n_in + 1;
+       total = 2 + n_in + out + iso;
 
        if (total > priv_dev->onchip_buffers)
                return -ENOMEM;
 
-       priv_dev->ep_buf_size = priv_dev->ep_iso_burst =
-                       (priv_dev->onchip_buffers - 2) / (n_in + 1);
+       priv_dev->ep_buf_size = (priv_dev->onchip_buffers - 2 - iso) / (n_in + out);
 
        return 0;
 }
index fbe4a8e3aa89777dafc8f1832ce2c731efe1cf17..086a7bb8389752a1a70be922982fe11c864a3fb8 100644 (file)
@@ -1168,6 +1168,9 @@ struct cdns3_endpoint {
        u8                      dir;
        u8                      num;
        u8                      type;
+       u8                      mult;
+       u8                      bMaxBurst;
+       u16                     wMaxPacketSize;
        int                     interval;
 
        int                     free_trbs;
index 2c1aca84f2264d0e450ff0528d4c4fbd1af59a5b..3ef8e3c872a37ecf41b7b089d27321aab04293f0 100644 (file)
@@ -87,16 +87,20 @@ static int cdns3_plat_probe(struct platform_device *pdev)
        cdns->dev_irq = platform_get_irq_byname(pdev, "peripheral");
 
        if (cdns->dev_irq < 0)
-               return cdns->dev_irq;
+               return dev_err_probe(dev, cdns->dev_irq,
+                                    "Failed to get peripheral IRQ\n");
 
        regs = devm_platform_ioremap_resource_byname(pdev, "dev");
        if (IS_ERR(regs))
-               return PTR_ERR(regs);
+               return dev_err_probe(dev, PTR_ERR(regs),
+                                    "Failed to get dev base\n");
+
        cdns->dev_regs  = regs;
 
        cdns->otg_irq = platform_get_irq_byname(pdev, "otg");
        if (cdns->otg_irq < 0)
-               return cdns->otg_irq;
+               return dev_err_probe(dev, cdns->otg_irq,
+                                    "Failed to get otg IRQ\n");
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "otg");
        if (!res) {
@@ -119,7 +123,8 @@ static int cdns3_plat_probe(struct platform_device *pdev)
 
        cdns->usb2_phy = devm_phy_optional_get(dev, "cdns3,usb2-phy");
        if (IS_ERR(cdns->usb2_phy))
-               return PTR_ERR(cdns->usb2_phy);
+               return dev_err_probe(dev, PTR_ERR(cdns->usb2_phy),
+                                    "Failed to get cdn3,usb2-phy\n");
 
        ret = phy_init(cdns->usb2_phy);
        if (ret)
@@ -127,7 +132,8 @@ static int cdns3_plat_probe(struct platform_device *pdev)
 
        cdns->usb3_phy = devm_phy_optional_get(dev, "cdns3,usb3-phy");
        if (IS_ERR(cdns->usb3_phy))
-               return PTR_ERR(cdns->usb3_phy);
+               return dev_err_probe(dev, PTR_ERR(cdns->usb3_phy),
+                                    "Failed to get cdn3,usb3-phy\n");
 
        ret = phy_init(cdns->usb3_phy);
        if (ret)
index a7265b86e42775bab7e286ffa9b5cd2852454294..c04d196abd8782901492822d81659b228613ddd7 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/**
+/*
  * cdns3-starfive.c - StarFive specific Glue layer for Cadence USB Controller
  *
  * Copyright (C) 2023 StarFive Technology Co., Ltd.
index ad617b7455b9c3e26bb1dee98fccbb6ad0c11e2e..cd138acdcce16500e0b875ca31c9148618e5272a 100644 (file)
@@ -187,202 +187,202 @@ static inline const char *cdnsp_decode_trb(char *str, size_t size, u32 field0,
 
        switch (type) {
        case TRB_LINK:
-               ret = snprintf(str, size,
-                              "LINK %08x%08x intr %ld type '%s' flags %c:%c:%c:%c",
-                              field1, field0, GET_INTR_TARGET(field2),
-                              cdnsp_trb_type_string(type),
-                              field3 & TRB_IOC ? 'I' : 'i',
-                              field3 & TRB_CHAIN ? 'C' : 'c',
-                              field3 & TRB_TC ? 'T' : 't',
-                              field3 & TRB_CYCLE ? 'C' : 'c');
+               ret = scnprintf(str, size,
+                               "LINK %08x%08x intr %ld type '%s' flags %c:%c:%c:%c",
+                               field1, field0, GET_INTR_TARGET(field2),
+                               cdnsp_trb_type_string(type),
+                               field3 & TRB_IOC ? 'I' : 'i',
+                               field3 & TRB_CHAIN ? 'C' : 'c',
+                               field3 & TRB_TC ? 'T' : 't',
+                               field3 & TRB_CYCLE ? 'C' : 'c');
                break;
        case TRB_TRANSFER:
        case TRB_COMPLETION:
        case TRB_PORT_STATUS:
        case TRB_HC_EVENT:
-               ret = snprintf(str, size,
-                              "ep%d%s(%d) type '%s' TRB %08x%08x status '%s'"
-                              " len %ld slot %ld flags %c:%c",
-                              ep_num, ep_id % 2 ? "out" : "in",
-                              TRB_TO_EP_INDEX(field3),
-                              cdnsp_trb_type_string(type), field1, field0,
-                              cdnsp_trb_comp_code_string(GET_COMP_CODE(field2)),
-                              EVENT_TRB_LEN(field2), TRB_TO_SLOT_ID(field3),
-                              field3 & EVENT_DATA ? 'E' : 'e',
-                              field3 & TRB_CYCLE ? 'C' : 'c');
+               ret = scnprintf(str, size,
+                               "ep%d%s(%d) type '%s' TRB %08x%08x status '%s'"
+                               " len %ld slot %ld flags %c:%c",
+                               ep_num, ep_id % 2 ? "out" : "in",
+                               TRB_TO_EP_INDEX(field3),
+                               cdnsp_trb_type_string(type), field1, field0,
+                               cdnsp_trb_comp_code_string(GET_COMP_CODE(field2)),
+                               EVENT_TRB_LEN(field2), TRB_TO_SLOT_ID(field3),
+                               field3 & EVENT_DATA ? 'E' : 'e',
+                               field3 & TRB_CYCLE ? 'C' : 'c');
                break;
        case TRB_MFINDEX_WRAP:
-               ret = snprintf(str, size, "%s: flags %c",
-                              cdnsp_trb_type_string(type),
-                              field3 & TRB_CYCLE ? 'C' : 'c');
+               ret = scnprintf(str, size, "%s: flags %c",
+                               cdnsp_trb_type_string(type),
+                               field3 & TRB_CYCLE ? 'C' : 'c');
                break;
        case TRB_SETUP:
-               ret = snprintf(str, size,
-                              "type '%s' bRequestType %02x bRequest %02x "
-                              "wValue %02x%02x wIndex %02x%02x wLength %d "
-                              "length %ld TD size %ld intr %ld Setup ID %ld "
-                              "flags %c:%c:%c",
-                              cdnsp_trb_type_string(type),
-                              field0 & 0xff,
-                              (field0 & 0xff00) >> 8,
-                              (field0 & 0xff000000) >> 24,
-                              (field0 & 0xff0000) >> 16,
-                              (field1 & 0xff00) >> 8,
-                              field1 & 0xff,
-                              (field1 & 0xff000000) >> 16 |
-                              (field1 & 0xff0000) >> 16,
-                              TRB_LEN(field2), GET_TD_SIZE(field2),
-                              GET_INTR_TARGET(field2),
-                              TRB_SETUPID_TO_TYPE(field3),
-                              field3 & TRB_IDT ? 'D' : 'd',
-                              field3 & TRB_IOC ? 'I' : 'i',
-                              field3 & TRB_CYCLE ? 'C' : 'c');
+               ret = scnprintf(str, size,
+                               "type '%s' bRequestType %02x bRequest %02x "
+                               "wValue %02x%02x wIndex %02x%02x wLength %d "
+                               "length %ld TD size %ld intr %ld Setup ID %ld "
+                               "flags %c:%c:%c",
+                               cdnsp_trb_type_string(type),
+                               field0 & 0xff,
+                               (field0 & 0xff00) >> 8,
+                               (field0 & 0xff000000) >> 24,
+                               (field0 & 0xff0000) >> 16,
+                               (field1 & 0xff00) >> 8,
+                               field1 & 0xff,
+                               (field1 & 0xff000000) >> 16 |
+                               (field1 & 0xff0000) >> 16,
+                               TRB_LEN(field2), GET_TD_SIZE(field2),
+                               GET_INTR_TARGET(field2),
+                               TRB_SETUPID_TO_TYPE(field3),
+                               field3 & TRB_IDT ? 'D' : 'd',
+                               field3 & TRB_IOC ? 'I' : 'i',
+                               field3 & TRB_CYCLE ? 'C' : 'c');
                break;
        case TRB_DATA:
-               ret = snprintf(str, size,
-                              "type '%s' Buffer %08x%08x length %ld TD size %ld "
-                              "intr %ld flags %c:%c:%c:%c:%c:%c:%c",
-                              cdnsp_trb_type_string(type),
-                              field1, field0, TRB_LEN(field2),
-                              GET_TD_SIZE(field2),
-                              GET_INTR_TARGET(field2),
-                              field3 & TRB_IDT ? 'D' : 'i',
-                              field3 & TRB_IOC ? 'I' : 'i',
-                              field3 & TRB_CHAIN ? 'C' : 'c',
-                              field3 & TRB_NO_SNOOP ? 'S' : 's',
-                              field3 & TRB_ISP ? 'I' : 'i',
-                              field3 & TRB_ENT ? 'E' : 'e',
-                              field3 & TRB_CYCLE ? 'C' : 'c');
+               ret = scnprintf(str, size,
+                               "type '%s' Buffer %08x%08x length %ld TD size %ld "
+                               "intr %ld flags %c:%c:%c:%c:%c:%c:%c",
+                               cdnsp_trb_type_string(type),
+                               field1, field0, TRB_LEN(field2),
+                               GET_TD_SIZE(field2),
+                               GET_INTR_TARGET(field2),
+                               field3 & TRB_IDT ? 'D' : 'i',
+                               field3 & TRB_IOC ? 'I' : 'i',
+                               field3 & TRB_CHAIN ? 'C' : 'c',
+                               field3 & TRB_NO_SNOOP ? 'S' : 's',
+                               field3 & TRB_ISP ? 'I' : 'i',
+                               field3 & TRB_ENT ? 'E' : 'e',
+                               field3 & TRB_CYCLE ? 'C' : 'c');
                break;
        case TRB_STATUS:
-               ret = snprintf(str, size,
-                              "Buffer %08x%08x length %ld TD size %ld intr"
-                              "%ld type '%s' flags %c:%c:%c:%c",
-                              field1, field0, TRB_LEN(field2),
-                              GET_TD_SIZE(field2),
-                              GET_INTR_TARGET(field2),
-                              cdnsp_trb_type_string(type),
-                              field3 & TRB_IOC ? 'I' : 'i',
-                              field3 & TRB_CHAIN ? 'C' : 'c',
-                              field3 & TRB_ENT ? 'E' : 'e',
-                              field3 & TRB_CYCLE ? 'C' : 'c');
+               ret = scnprintf(str, size,
+                               "Buffer %08x%08x length %ld TD size %ld intr"
+                               "%ld type '%s' flags %c:%c:%c:%c",
+                               field1, field0, TRB_LEN(field2),
+                               GET_TD_SIZE(field2),
+                               GET_INTR_TARGET(field2),
+                               cdnsp_trb_type_string(type),
+                               field3 & TRB_IOC ? 'I' : 'i',
+                               field3 & TRB_CHAIN ? 'C' : 'c',
+                               field3 & TRB_ENT ? 'E' : 'e',
+                               field3 & TRB_CYCLE ? 'C' : 'c');
                break;
        case TRB_NORMAL:
        case TRB_ISOC:
        case TRB_EVENT_DATA:
        case TRB_TR_NOOP:
-               ret = snprintf(str, size,
-                              "type '%s' Buffer %08x%08x length %ld "
-                              "TD size %ld intr %ld "
-                              "flags %c:%c:%c:%c:%c:%c:%c:%c:%c",
-                              cdnsp_trb_type_string(type),
-                              field1, field0, TRB_LEN(field2),
-                              GET_TD_SIZE(field2),
-                              GET_INTR_TARGET(field2),
-                              field3 & TRB_BEI ? 'B' : 'b',
-                              field3 & TRB_IDT ? 'T' : 't',
-                              field3 & TRB_IOC ? 'I' : 'i',
-                              field3 & TRB_CHAIN ? 'C' : 'c',
-                              field3 & TRB_NO_SNOOP ? 'S' : 's',
-                              field3 & TRB_ISP ? 'I' : 'i',
-                              field3 & TRB_ENT ? 'E' : 'e',
-                              field3 & TRB_CYCLE ? 'C' : 'c',
-                              !(field3 & TRB_EVENT_INVALIDATE) ? 'V' : 'v');
+               ret = scnprintf(str, size,
+                               "type '%s' Buffer %08x%08x length %ld "
+                               "TD size %ld intr %ld "
+                               "flags %c:%c:%c:%c:%c:%c:%c:%c:%c",
+                               cdnsp_trb_type_string(type),
+                               field1, field0, TRB_LEN(field2),
+                               GET_TD_SIZE(field2),
+                               GET_INTR_TARGET(field2),
+                               field3 & TRB_BEI ? 'B' : 'b',
+                               field3 & TRB_IDT ? 'T' : 't',
+                               field3 & TRB_IOC ? 'I' : 'i',
+                               field3 & TRB_CHAIN ? 'C' : 'c',
+                               field3 & TRB_NO_SNOOP ? 'S' : 's',
+                               field3 & TRB_ISP ? 'I' : 'i',
+                               field3 & TRB_ENT ? 'E' : 'e',
+                               field3 & TRB_CYCLE ? 'C' : 'c',
+                               !(field3 & TRB_EVENT_INVALIDATE) ? 'V' : 'v');
                break;
        case TRB_CMD_NOOP:
        case TRB_ENABLE_SLOT:
-               ret = snprintf(str, size, "%s: flags %c",
-                              cdnsp_trb_type_string(type),
-                              field3 & TRB_CYCLE ? 'C' : 'c');
+               ret = scnprintf(str, size, "%s: flags %c",
+                               cdnsp_trb_type_string(type),
+                               field3 & TRB_CYCLE ? 'C' : 'c');
                break;
        case TRB_DISABLE_SLOT:
-               ret = snprintf(str, size, "%s: slot %ld flags %c",
-                              cdnsp_trb_type_string(type),
-                              TRB_TO_SLOT_ID(field3),
-                              field3 & TRB_CYCLE ? 'C' : 'c');
+               ret = scnprintf(str, size, "%s: slot %ld flags %c",
+                               cdnsp_trb_type_string(type),
+                               TRB_TO_SLOT_ID(field3),
+                               field3 & TRB_CYCLE ? 'C' : 'c');
                break;
        case TRB_ADDR_DEV:
-               ret = snprintf(str, size,
-                              "%s: ctx %08x%08x slot %ld flags %c:%c",
-                              cdnsp_trb_type_string(type), field1, field0,
-                              TRB_TO_SLOT_ID(field3),
-                              field3 & TRB_BSR ? 'B' : 'b',
-                              field3 & TRB_CYCLE ? 'C' : 'c');
+               ret = scnprintf(str, size,
+                               "%s: ctx %08x%08x slot %ld flags %c:%c",
+                               cdnsp_trb_type_string(type), field1, field0,
+                               TRB_TO_SLOT_ID(field3),
+                               field3 & TRB_BSR ? 'B' : 'b',
+                               field3 & TRB_CYCLE ? 'C' : 'c');
                break;
        case TRB_CONFIG_EP:
-               ret = snprintf(str, size,
-                              "%s: ctx %08x%08x slot %ld flags %c:%c",
-                              cdnsp_trb_type_string(type), field1, field0,
-                              TRB_TO_SLOT_ID(field3),
-                              field3 & TRB_DC ? 'D' : 'd',
-                              field3 & TRB_CYCLE ? 'C' : 'c');
+               ret = scnprintf(str, size,
+                               "%s: ctx %08x%08x slot %ld flags %c:%c",
+                               cdnsp_trb_type_string(type), field1, field0,
+                               TRB_TO_SLOT_ID(field3),
+                               field3 & TRB_DC ? 'D' : 'd',
+                               field3 & TRB_CYCLE ? 'C' : 'c');
                break;
        case TRB_EVAL_CONTEXT:
-               ret = snprintf(str, size,
-                              "%s: ctx %08x%08x slot %ld flags %c",
-                              cdnsp_trb_type_string(type), field1, field0,
-                              TRB_TO_SLOT_ID(field3),
-                              field3 & TRB_CYCLE ? 'C' : 'c');
+               ret = scnprintf(str, size,
+                               "%s: ctx %08x%08x slot %ld flags %c",
+                               cdnsp_trb_type_string(type), field1, field0,
+                               TRB_TO_SLOT_ID(field3),
+                               field3 & TRB_CYCLE ? 'C' : 'c');
                break;
        case TRB_RESET_EP:
        case TRB_HALT_ENDPOINT:
-               ret = snprintf(str, size,
-                              "%s: ep%d%s(%d) ctx %08x%08x slot %ld flags %c",
-                              cdnsp_trb_type_string(type),
-                              ep_num, ep_id % 2 ? "out" : "in",
-                              TRB_TO_EP_INDEX(field3), field1, field0,
-                              TRB_TO_SLOT_ID(field3),
-                              field3 & TRB_CYCLE ? 'C' : 'c');
+               ret = scnprintf(str, size,
+                               "%s: ep%d%s(%d) ctx %08x%08x slot %ld flags %c",
+                               cdnsp_trb_type_string(type),
+                               ep_num, ep_id % 2 ? "out" : "in",
+                               TRB_TO_EP_INDEX(field3), field1, field0,
+                               TRB_TO_SLOT_ID(field3),
+                               field3 & TRB_CYCLE ? 'C' : 'c');
                break;
        case TRB_STOP_RING:
-               ret = snprintf(str, size,
-                              "%s: ep%d%s(%d) slot %ld sp %d flags %c",
-                              cdnsp_trb_type_string(type),
-                              ep_num, ep_id % 2 ? "out" : "in",
-                              TRB_TO_EP_INDEX(field3),
-                              TRB_TO_SLOT_ID(field3),
-                              TRB_TO_SUSPEND_PORT(field3),
-                              field3 & TRB_CYCLE ? 'C' : 'c');
+               ret = scnprintf(str, size,
+                               "%s: ep%d%s(%d) slot %ld sp %d flags %c",
+                               cdnsp_trb_type_string(type),
+                               ep_num, ep_id % 2 ? "out" : "in",
+                               TRB_TO_EP_INDEX(field3),
+                               TRB_TO_SLOT_ID(field3),
+                               TRB_TO_SUSPEND_PORT(field3),
+                               field3 & TRB_CYCLE ? 'C' : 'c');
                break;
        case TRB_SET_DEQ:
-               ret = snprintf(str, size,
-                              "%s: ep%d%s(%d) deq %08x%08x stream %ld slot %ld  flags %c",
-                              cdnsp_trb_type_string(type),
-                              ep_num, ep_id % 2 ? "out" : "in",
-                              TRB_TO_EP_INDEX(field3), field1, field0,
-                              TRB_TO_STREAM_ID(field2),
-                              TRB_TO_SLOT_ID(field3),
-                              field3 & TRB_CYCLE ? 'C' : 'c');
+               ret = scnprintf(str, size,
+                               "%s: ep%d%s(%d) deq %08x%08x stream %ld slot %ld  flags %c",
+                               cdnsp_trb_type_string(type),
+                               ep_num, ep_id % 2 ? "out" : "in",
+                               TRB_TO_EP_INDEX(field3), field1, field0,
+                               TRB_TO_STREAM_ID(field2),
+                               TRB_TO_SLOT_ID(field3),
+                               field3 & TRB_CYCLE ? 'C' : 'c');
                break;
        case TRB_RESET_DEV:
-               ret = snprintf(str, size, "%s: slot %ld flags %c",
-                              cdnsp_trb_type_string(type),
-                              TRB_TO_SLOT_ID(field3),
-                              field3 & TRB_CYCLE ? 'C' : 'c');
+               ret = scnprintf(str, size, "%s: slot %ld flags %c",
+                               cdnsp_trb_type_string(type),
+                               TRB_TO_SLOT_ID(field3),
+                               field3 & TRB_CYCLE ? 'C' : 'c');
                break;
        case TRB_ENDPOINT_NRDY:
                temp = TRB_TO_HOST_STREAM(field2);
 
-               ret = snprintf(str, size,
-                              "%s: ep%d%s(%d) H_SID %x%s%s D_SID %lx flags %c:%c",
-                              cdnsp_trb_type_string(type),
-                              ep_num, ep_id % 2 ? "out" : "in",
-                              TRB_TO_EP_INDEX(field3), temp,
-                              temp == STREAM_PRIME_ACK ? "(PRIME)" : "",
-                              temp == STREAM_REJECTED ? "(REJECTED)" : "",
-                              TRB_TO_DEV_STREAM(field0),
-                              field3 & TRB_STAT ? 'S' : 's',
-                              field3 & TRB_CYCLE ? 'C' : 'c');
+               ret = scnprintf(str, size,
+                               "%s: ep%d%s(%d) H_SID %x%s%s D_SID %lx flags %c:%c",
+                               cdnsp_trb_type_string(type),
+                               ep_num, ep_id % 2 ? "out" : "in",
+                               TRB_TO_EP_INDEX(field3), temp,
+                               temp == STREAM_PRIME_ACK ? "(PRIME)" : "",
+                               temp == STREAM_REJECTED ? "(REJECTED)" : "",
+                               TRB_TO_DEV_STREAM(field0),
+                               field3 & TRB_STAT ? 'S' : 's',
+                               field3 & TRB_CYCLE ? 'C' : 'c');
                break;
        default:
-               ret = snprintf(str, size,
-                              "type '%s' -> raw %08x %08x %08x %08x",
-                              cdnsp_trb_type_string(type),
-                              field0, field1, field2, field3);
+               ret = scnprintf(str, size,
+                               "type '%s' -> raw %08x %08x %08x %08x",
+                               cdnsp_trb_type_string(type),
+                               field0, field1, field2, field3);
        }
 
-       if (ret >= size)
-               pr_info("CDNSP: buffer overflowed.\n");
+       if (ret == size - 1)
+               pr_info("CDNSP: buffer may be truncated.\n");
 
        return str;
 }
@@ -465,32 +465,32 @@ static inline const char *cdnsp_decode_portsc(char *str, size_t size,
 {
        int ret;
 
-       ret = snprintf(str, size, "%s %s %s Link:%s PortSpeed:%d ",
-                      portsc & PORT_POWER ? "Powered" : "Powered-off",
-                      portsc & PORT_CONNECT ? "Connected" : "Not-connected",
-                      portsc & PORT_PED ? "Enabled" : "Disabled",
-                      cdnsp_portsc_link_state_string(portsc),
-                      DEV_PORT_SPEED(portsc));
+       ret = scnprintf(str, size, "%s %s %s Link:%s PortSpeed:%d ",
+                       portsc & PORT_POWER ? "Powered" : "Powered-off",
+                       portsc & PORT_CONNECT ? "Connected" : "Not-connected",
+                       portsc & PORT_PED ? "Enabled" : "Disabled",
+                       cdnsp_portsc_link_state_string(portsc),
+                       DEV_PORT_SPEED(portsc));
 
        if (portsc & PORT_RESET)
-               ret += snprintf(str + ret, size - ret, "In-Reset ");
+               ret += scnprintf(str + ret, size - ret, "In-Reset ");
 
-       ret += snprintf(str + ret, size - ret, "Change: ");
+       ret += scnprintf(str + ret, size - ret, "Change: ");
        if (portsc & PORT_CSC)
-               ret += snprintf(str + ret, size - ret, "CSC ");
+               ret += scnprintf(str + ret, size - ret, "CSC ");
        if (portsc & PORT_WRC)
-               ret += snprintf(str + ret, size - ret, "WRC ");
+               ret += scnprintf(str + ret, size - ret, "WRC ");
        if (portsc & PORT_RC)
-               ret += snprintf(str + ret, size - ret, "PRC ");
+               ret += scnprintf(str + ret, size - ret, "PRC ");
        if (portsc & PORT_PLC)
-               ret += snprintf(str + ret, size - ret, "PLC ");
+               ret += scnprintf(str + ret, size - ret, "PLC ");
        if (portsc & PORT_CEC)
-               ret += snprintf(str + ret, size - ret, "CEC ");
-       ret += snprintf(str + ret, size - ret, "Wake: ");
+               ret += scnprintf(str + ret, size - ret, "CEC ");
+       ret += scnprintf(str + ret, size - ret, "Wake: ");
        if (portsc & PORT_WKCONN_E)
-               ret += snprintf(str + ret, size - ret, "WCE ");
+               ret += scnprintf(str + ret, size - ret, "WCE ");
        if (portsc & PORT_WKDISC_E)
-               ret += snprintf(str + ret, size - ret, "WDE ");
+               ret += scnprintf(str + ret, size - ret, "WDE ");
 
        return str;
 }
@@ -562,20 +562,20 @@ static inline const char *cdnsp_decode_ep_context(char *str, size_t size,
 
        avg = EP_AVG_TRB_LENGTH(tx_info);
 
-       ret = snprintf(str, size, "State %s mult %d max P. Streams %d %s",
-                      cdnsp_ep_state_string(ep_state), mult,
-                      max_pstr, lsa ? "LSA " : "");
+       ret = scnprintf(str, size, "State %s mult %d max P. Streams %d %s",
+                       cdnsp_ep_state_string(ep_state), mult,
+                       max_pstr, lsa ? "LSA " : "");
 
-       ret += snprintf(str + ret, size - ret,
-                       "interval %d us max ESIT payload %d CErr %d ",
-                       (1 << interval) * 125, esit, cerr);
+       ret += scnprintf(str + ret, size - ret,
+                        "interval %d us max ESIT payload %d CErr %d ",
+                        (1 << interval) * 125, esit, cerr);
 
-       ret += snprintf(str + ret, size - ret,
-                       "Type %s %sburst %d maxp %d deq %016llx ",
-                       cdnsp_ep_type_string(ep_type), hid ? "HID" : "",
-                       burst, maxp, deq);
+       ret += scnprintf(str + ret, size - ret,
+                        "Type %s %sburst %d maxp %d deq %016llx ",
+                        cdnsp_ep_type_string(ep_type), hid ? "HID" : "",
+                        burst, maxp, deq);
 
-       ret += snprintf(str + ret, size - ret, "avg trb len %d", avg);
+       ret += scnprintf(str + ret, size - ret, "avg trb len %d", avg);
 
        return str;
 }
index d9bb3d3f026e68cae40de5dee4fa9d81ed391f10..2a38e1eb65466c82a6eb9e4f2feba8fc59ee7dfc 100644 (file)
@@ -176,6 +176,7 @@ struct hw_bank {
  * @enabled_otg_timer_bits: bits of enabled otg timers
  * @next_otg_timer: next nearest enabled timer to be expired
  * @work: work for role changing
+ * @power_lost_work: work for power lost handling
  * @wq: workqueue thread
  * @qh_pool: allocation pool for queue heads
  * @td_pool: allocation pool for transfer descriptors
@@ -226,6 +227,7 @@ struct ci_hdrc {
        enum otg_fsm_timer              next_otg_timer;
        struct usb_role_switch          *role_switch;
        struct work_struct              work;
+       struct work_struct              power_lost_work;
        struct workqueue_struct         *wq;
 
        struct dma_pool                 *qh_pool;
index e28bb2f2612dc60a1760a1aafe27dca9d2aeb26b..ae9a6a17ec6e365d35b89e805185a043997a08a5 100644 (file)
@@ -96,6 +96,7 @@ struct ci_hdrc_imx_data {
        struct usb_phy *phy;
        struct platform_device *ci_pdev;
        struct clk *clk;
+       struct clk *clk_wakeup;
        struct imx_usbmisc_data *usbmisc_data;
        bool supports_runtime_pm;
        bool override_phy_control;
@@ -199,7 +200,7 @@ static int imx_get_clks(struct device *dev)
 
        data->clk_ipg = devm_clk_get(dev, "ipg");
        if (IS_ERR(data->clk_ipg)) {
-               /* If the platform only needs one clocks */
+               /* If the platform only needs one primary clock */
                data->clk = devm_clk_get(dev, NULL);
                if (IS_ERR(data->clk)) {
                        ret = PTR_ERR(data->clk);
@@ -208,6 +209,13 @@ static int imx_get_clks(struct device *dev)
                                PTR_ERR(data->clk), PTR_ERR(data->clk_ipg));
                        return ret;
                }
+               /* Get wakeup clock. Not all of the platforms need to
+                * handle this clock. So make it optional.
+                */
+               data->clk_wakeup = devm_clk_get_optional(dev, "usb_wakeup_clk");
+               if (IS_ERR(data->clk_wakeup))
+                       ret = dev_err_probe(dev, PTR_ERR(data->clk_wakeup),
+                                       "Failed to get wakeup clk\n");
                return ret;
        }
 
@@ -423,6 +431,10 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
        if (ret)
                goto disable_hsic_regulator;
 
+       ret = clk_prepare_enable(data->clk_wakeup);
+       if (ret)
+               goto err_wakeup_clk;
+
        data->phy = devm_usb_get_phy_by_phandle(dev, "fsl,usbphy", 0);
        if (IS_ERR(data->phy)) {
                ret = PTR_ERR(data->phy);
@@ -504,6 +516,8 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
 disable_device:
        ci_hdrc_remove_device(data->ci_pdev);
 err_clk:
+       clk_disable_unprepare(data->clk_wakeup);
+err_wakeup_clk:
        imx_disable_unprepare_clks(dev);
 disable_hsic_regulator:
        if (data->hsic_pad_regulator)
@@ -530,6 +544,7 @@ static void ci_hdrc_imx_remove(struct platform_device *pdev)
                usb_phy_shutdown(data->phy);
        if (data->ci_pdev) {
                imx_disable_unprepare_clks(&pdev->dev);
+               clk_disable_unprepare(data->clk_wakeup);
                if (data->plat_data->flags & CI_HDRC_PMQOS)
                        cpu_latency_qos_remove_request(&data->pm_qos_req);
                if (data->hsic_pad_regulator)
index 7ac39a281b8cb58ba1c5fa085fdfb445606326f6..835bf2428dc6eccee263b05024d42885884cd94d 100644 (file)
@@ -523,6 +523,13 @@ static irqreturn_t ci_irq_handler(int irq, void *data)
        u32 otgsc = 0;
 
        if (ci->in_lpm) {
+               /*
+                * If we already have a wakeup irq pending there,
+                * let's just return to wait resume finished firstly.
+                */
+               if (ci->wakeup_int)
+                       return IRQ_HANDLED;
+
                disable_irq_nosync(irq);
                ci->wakeup_int = true;
                pm_runtime_get(ci->dev);
@@ -849,6 +856,27 @@ static int ci_extcon_register(struct ci_hdrc *ci)
        return 0;
 }
 
+static void ci_power_lost_work(struct work_struct *work)
+{
+       struct ci_hdrc *ci = container_of(work, struct ci_hdrc, power_lost_work);
+       enum ci_role role;
+
+       disable_irq_nosync(ci->irq);
+       pm_runtime_get_sync(ci->dev);
+       if (!ci_otg_is_fsm_mode(ci)) {
+               role = ci_get_role(ci);
+
+               if (ci->role != role) {
+                       ci_handle_id_switch(ci);
+               } else if (role == CI_ROLE_GADGET) {
+                       if (ci->is_otg && hw_read_otgsc(ci, OTGSC_BSV))
+                               usb_gadget_vbus_connect(&ci->gadget);
+               }
+       }
+       pm_runtime_put_sync(ci->dev);
+       enable_irq(ci->irq);
+}
+
 static DEFINE_IDA(ci_ida);
 
 struct platform_device *ci_hdrc_add_device(struct device *dev,
@@ -862,7 +890,7 @@ struct platform_device *ci_hdrc_add_device(struct device *dev,
        if (ret)
                return ERR_PTR(ret);
 
-       id = ida_simple_get(&ci_ida, 0, 0, GFP_KERNEL);
+       id = ida_alloc(&ci_ida, GFP_KERNEL);
        if (id < 0)
                return ERR_PTR(id);
 
@@ -892,7 +920,7 @@ struct platform_device *ci_hdrc_add_device(struct device *dev,
 err:
        platform_device_put(pdev);
 put_id:
-       ida_simple_remove(&ci_ida, id);
+       ida_free(&ci_ida, id);
        return ERR_PTR(ret);
 }
 EXPORT_SYMBOL_GPL(ci_hdrc_add_device);
@@ -901,7 +929,7 @@ void ci_hdrc_remove_device(struct platform_device *pdev)
 {
        int id = pdev->id;
        platform_device_unregister(pdev);
-       ida_simple_remove(&ci_ida, id);
+       ida_free(&ci_ida, id);
 }
 EXPORT_SYMBOL_GPL(ci_hdrc_remove_device);
 
@@ -1038,6 +1066,8 @@ static int ci_hdrc_probe(struct platform_device *pdev)
 
        spin_lock_init(&ci->lock);
        mutex_init(&ci->mutex);
+       INIT_WORK(&ci->power_lost_work, ci_power_lost_work);
+
        ci->dev = dev;
        ci->platdata = dev_get_platdata(dev);
        ci->imx28_write_fix = !!(ci->platdata->flags &
@@ -1389,25 +1419,6 @@ static int ci_suspend(struct device *dev)
        return 0;
 }
 
-static void ci_handle_power_lost(struct ci_hdrc *ci)
-{
-       enum ci_role role;
-
-       disable_irq_nosync(ci->irq);
-       if (!ci_otg_is_fsm_mode(ci)) {
-               role = ci_get_role(ci);
-
-               if (ci->role != role) {
-                       ci_handle_id_switch(ci);
-               } else if (role == CI_ROLE_GADGET) {
-                       if (ci->is_otg && hw_read_otgsc(ci, OTGSC_BSV))
-                               usb_gadget_vbus_connect(&ci->gadget);
-               }
-       }
-
-       enable_irq(ci->irq);
-}
-
 static int ci_resume(struct device *dev)
 {
        struct ci_hdrc *ci = dev_get_drvdata(dev);
@@ -1439,7 +1450,7 @@ static int ci_resume(struct device *dev)
                ci_role(ci)->resume(ci, power_lost);
 
        if (power_lost)
-               ci_handle_power_lost(ci);
+               queue_work(system_freezable_wq, &ci->power_lost_work);
 
        if (ci->supports_runtime_pm) {
                pm_runtime_disable(dev);
index 0b7bd3c643c3aa4d40fcb5612ec1ea3595f025ea..2d7f616270c17ba8d488459d7a84c62d18b083f8 100644 (file)
@@ -688,7 +688,8 @@ static int _hardware_dequeue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
                if ((TD_STATUS_ACTIVE & tmptoken) != 0) {
                        int n = hw_ep_bit(hwep->num, hwep->dir);
 
-                       if (ci->rev == CI_REVISION_24)
+                       if (ci->rev == CI_REVISION_24 ||
+                           ci->rev == CI_REVISION_22)
                                if (!hw_read(ci, OP_ENDPTSTAT, BIT(n)))
                                        reprime_dtd(ci, hwep, node);
                        hwreq->req.status = -EALREADY;
index a1f4e1ead97ff4b6457e885a816b24478382ae27..0e7439dba8fe8c727be5e9ed8e9417053fc60b8c 100644 (file)
@@ -916,6 +916,9 @@ static int acm_tty_break_ctl(struct tty_struct *tty, int state)
        struct acm *acm = tty->driver_data;
        int retval;
 
+       if (!(acm->ctrl_caps & USB_CDC_CAP_BRK))
+               return -EOPNOTSUPP;
+
        retval = acm_send_break(acm, state ? 0xffff : 0);
        if (retval < 0)
                dev_dbg(&acm->control->dev,
index 84d91b1c1eed53e11539b69ccaa80080870ab043..0886b19d2e1c8f2b1c0f4e8bf85d6240f7cf19d1 100644 (file)
@@ -301,7 +301,7 @@ static int ulpi_register(struct device *dev, struct ulpi *ulpi)
                return ret;
        }
 
-       root = debugfs_create_dir(dev_name(dev), ulpi_root);
+       root = debugfs_create_dir(dev_name(&ulpi->dev), ulpi_root);
        debugfs_create_file("regs", 0444, root, ulpi, &ulpi_regs_fops);
 
        dev_dbg(&ulpi->dev, "registered ULPI PHY: vendor %04x, product %04x\n",
index f58a0299fb3bd30c6cdcc7dc9c47b24f6be8d680..e01b1913d02bf68aa3dd3ad4e3a30adb11585c5f 100644 (file)
@@ -189,13 +189,13 @@ static int usb_create_newid_files(struct usb_driver *usb_drv)
                goto exit;
 
        if (usb_drv->probe != NULL) {
-               error = driver_create_file(&usb_drv->drvwrap.driver,
+               error = driver_create_file(&usb_drv->driver,
                                           &driver_attr_new_id);
                if (error == 0) {
-                       error = driver_create_file(&usb_drv->drvwrap.driver,
+                       error = driver_create_file(&usb_drv->driver,
                                        &driver_attr_remove_id);
                        if (error)
-                               driver_remove_file(&usb_drv->drvwrap.driver,
+                               driver_remove_file(&usb_drv->driver,
                                                &driver_attr_new_id);
                }
        }
@@ -209,9 +209,9 @@ static void usb_remove_newid_files(struct usb_driver *usb_drv)
                return;
 
        if (usb_drv->probe != NULL) {
-               driver_remove_file(&usb_drv->drvwrap.driver,
+               driver_remove_file(&usb_drv->driver,
                                &driver_attr_remove_id);
-               driver_remove_file(&usb_drv->drvwrap.driver,
+               driver_remove_file(&usb_drv->driver,
                                   &driver_attr_new_id);
        }
 }
@@ -290,7 +290,10 @@ static int usb_probe_device(struct device *dev)
         * specialised device drivers prior to setting the
         * use_generic_driver bit.
         */
-       error = udriver->probe(udev);
+       if (udriver->probe)
+               error = udriver->probe(udev);
+       else if (!udriver->generic_subclass)
+               error = -EINVAL;
        if (error == -ENODEV && udriver != &usb_generic_driver &&
            (udriver->id_table || udriver->match)) {
                udev->use_generic_driver = 1;
@@ -549,7 +552,7 @@ int usb_driver_claim_interface(struct usb_driver *driver,
        if (!iface->authorized)
                return -ENODEV;
 
-       dev->driver = &driver->drvwrap.driver;
+       dev->driver = &driver->driver;
        usb_set_intfdata(iface, data);
        iface->needs_binding = 0;
 
@@ -612,7 +615,7 @@ void usb_driver_release_interface(struct usb_driver *driver,
        struct device *dev = &iface->dev;
 
        /* this should never happen, don't release something that's not ours */
-       if (!dev->driver || dev->driver != &driver->drvwrap.driver)
+       if (!dev->driver || dev->driver != &driver->driver)
                return;
 
        /* don't release from within disconnect() */
@@ -947,7 +950,7 @@ static int __usb_bus_reprobe_drivers(struct device *dev, void *data)
        int ret;
 
        /* Don't reprobe if current driver isn't usb_generic_driver */
-       if (dev->driver != &usb_generic_driver.drvwrap.driver)
+       if (dev->driver != &usb_generic_driver.driver)
                return 0;
 
        udev = to_usb_device(dev);
@@ -961,6 +964,11 @@ static int __usb_bus_reprobe_drivers(struct device *dev, void *data)
        return 0;
 }
 
+bool is_usb_device_driver(const struct device_driver *drv)
+{
+       return drv->probe == usb_probe_device;
+}
+
 /**
  * usb_register_device_driver - register a USB device (not interface) driver
  * @new_udriver: USB operations for the device driver
@@ -980,15 +988,14 @@ int usb_register_device_driver(struct usb_device_driver *new_udriver,
        if (usb_disabled())
                return -ENODEV;
 
-       new_udriver->drvwrap.for_devices = 1;
-       new_udriver->drvwrap.driver.name = new_udriver->name;
-       new_udriver->drvwrap.driver.bus = &usb_bus_type;
-       new_udriver->drvwrap.driver.probe = usb_probe_device;
-       new_udriver->drvwrap.driver.remove = usb_unbind_device;
-       new_udriver->drvwrap.driver.owner = owner;
-       new_udriver->drvwrap.driver.dev_groups = new_udriver->dev_groups;
+       new_udriver->driver.name = new_udriver->name;
+       new_udriver->driver.bus = &usb_bus_type;
+       new_udriver->driver.probe = usb_probe_device;
+       new_udriver->driver.remove = usb_unbind_device;
+       new_udriver->driver.owner = owner;
+       new_udriver->driver.dev_groups = new_udriver->dev_groups;
 
-       retval = driver_register(&new_udriver->drvwrap.driver);
+       retval = driver_register(&new_udriver->driver);
 
        if (!retval) {
                pr_info("%s: registered new device driver %s\n",
@@ -1020,7 +1027,7 @@ void usb_deregister_device_driver(struct usb_device_driver *udriver)
        pr_info("%s: deregistering device driver %s\n",
                        usbcore_name, udriver->name);
 
-       driver_unregister(&udriver->drvwrap.driver);
+       driver_unregister(&udriver->driver);
 }
 EXPORT_SYMBOL_GPL(usb_deregister_device_driver);
 
@@ -1048,18 +1055,17 @@ int usb_register_driver(struct usb_driver *new_driver, struct module *owner,
        if (usb_disabled())
                return -ENODEV;
 
-       new_driver->drvwrap.for_devices = 0;
-       new_driver->drvwrap.driver.name = new_driver->name;
-       new_driver->drvwrap.driver.bus = &usb_bus_type;
-       new_driver->drvwrap.driver.probe = usb_probe_interface;
-       new_driver->drvwrap.driver.remove = usb_unbind_interface;
-       new_driver->drvwrap.driver.owner = owner;
-       new_driver->drvwrap.driver.mod_name = mod_name;
-       new_driver->drvwrap.driver.dev_groups = new_driver->dev_groups;
+       new_driver->driver.name = new_driver->name;
+       new_driver->driver.bus = &usb_bus_type;
+       new_driver->driver.probe = usb_probe_interface;
+       new_driver->driver.remove = usb_unbind_interface;
+       new_driver->driver.owner = owner;
+       new_driver->driver.mod_name = mod_name;
+       new_driver->driver.dev_groups = new_driver->dev_groups;
        spin_lock_init(&new_driver->dynids.lock);
        INIT_LIST_HEAD(&new_driver->dynids.list);
 
-       retval = driver_register(&new_driver->drvwrap.driver);
+       retval = driver_register(&new_driver->driver);
        if (retval)
                goto out;
 
@@ -1074,7 +1080,7 @@ out:
        return retval;
 
 out_newid:
-       driver_unregister(&new_driver->drvwrap.driver);
+       driver_unregister(&new_driver->driver);
 
        pr_err("%s: error %d registering interface driver %s\n",
                usbcore_name, retval, new_driver->name);
@@ -1099,7 +1105,7 @@ void usb_deregister(struct usb_driver *driver)
                        usbcore_name, driver->name);
 
        usb_remove_newid_files(driver);
-       driver_unregister(&driver->drvwrap.driver);
+       driver_unregister(&driver->driver);
        usb_free_dynids(driver);
 }
 EXPORT_SYMBOL_GPL(usb_deregister);
index 740342a2812ac5bbdc8595d24615d2364640b28e..b134bff5c3fe3e86215bdcd14a2591a521f5ba3c 100644 (file)
@@ -59,10 +59,26 @@ int usb_choose_configuration(struct usb_device *udev)
        int num_configs;
        int insufficient_power = 0;
        struct usb_host_config *c, *best;
+       struct usb_device_driver *udriver;
+
+       /*
+        * If a USB device (not an interface) doesn't have a driver then the
+        * kernel has no business trying to select or install a configuration
+        * for it.
+        */
+       if (!udev->dev.driver)
+               return -1;
+       udriver = to_usb_device_driver(udev->dev.driver);
 
        if (usb_device_is_owned(udev))
                return 0;
 
+       if (udriver->choose_configuration) {
+               i = udriver->choose_configuration(udev);
+               if (i >= 0)
+                       return i;
+       }
+
        best = NULL;
        c = udev->config;
        num_configs = udev->descriptor.bNumConfigurations;
index 87480a6e6d934893a6096cbf7e58f5d1cc5358f1..e38a4124f6102a5ff2a47107a8286815cfc5c8e2 100644 (file)
 #define USB_VENDOR_TEXAS_INSTRUMENTS           0x0451
 #define USB_PRODUCT_TUSB8041_USB3              0x8140
 #define USB_PRODUCT_TUSB8041_USB2              0x8142
-#define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND       0x01
-#define HUB_QUIRK_DISABLE_AUTOSUSPEND          0x02
+#define USB_VENDOR_MICROCHIP                   0x0424
+#define USB_PRODUCT_USB4913                    0x4913
+#define USB_PRODUCT_USB4914                    0x4914
+#define USB_PRODUCT_USB4915                    0x4915
+#define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND       BIT(0)
+#define HUB_QUIRK_DISABLE_AUTOSUSPEND          BIT(1)
+#define HUB_QUIRK_REDUCE_FRAME_INTR_BINTERVAL  BIT(2)
 
 #define USB_TP_TRANSMISSION_DELAY      40      /* ns */
 #define USB_TP_TRANSMISSION_DELAY_MAX  65535   /* ns */
 #define USB_PING_RESPONSE_TIME         400     /* ns */
+#define USB_REDUCE_FRAME_INTR_BINTERVAL        9
+
+/*
+ * The SET_ADDRESS request timeout will be 500 ms when
+ * USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT quirk flag is set.
+ */
+#define USB_SHORT_SET_ADDRESS_REQ_TIMEOUT      500  /* ms */
 
 /* Protect struct usb_device->state and ->children members
  * Note: Both are also protected by ->dev.sem, except that ->state can
@@ -1904,6 +1916,14 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
                usb_autopm_get_interface_no_resume(intf);
        }
 
+       if ((id->driver_info & HUB_QUIRK_REDUCE_FRAME_INTR_BINTERVAL) &&
+           desc->endpoint[0].desc.bInterval > USB_REDUCE_FRAME_INTR_BINTERVAL) {
+               desc->endpoint[0].desc.bInterval =
+                       USB_REDUCE_FRAME_INTR_BINTERVAL;
+               /* Tell the HCD about the interrupt ep's new bInterval */
+               usb_set_interface(hdev, 0, 0);
+       }
+
        if (hub_configure(hub, &desc->endpoint[0].desc) >= 0) {
                onboard_hub_create_pdevs(hdev, &hub->onboard_hub_devs);
 
@@ -2033,9 +2053,19 @@ static void update_port_device_state(struct usb_device *udev)
 
        if (udev->parent) {
                hub = usb_hub_to_struct_hub(udev->parent);
-               port_dev = hub->ports[udev->portnum - 1];
-               WRITE_ONCE(port_dev->state, udev->state);
-               sysfs_notify_dirent(port_dev->state_kn);
+
+               /*
+                * The Link Layer Validation System Driver (lvstest)
+                * has a test step to unbind the hub before running the
+                * rest of the procedure. This triggers hub_disconnect
+                * which will set the hub's maxchild to 0, further
+                * resulting in usb_hub_to_struct_hub returning NULL.
+                */
+               if (hub) {
+                       port_dev = hub->ports[udev->portnum - 1];
+                       WRITE_ONCE(port_dev->state, udev->state);
+                       sysfs_notify_dirent(port_dev->state_kn);
+               }
        }
 }
 
@@ -2368,17 +2398,25 @@ static int usb_enumerate_device_otg(struct usb_device *udev)
                        }
                } else if (desc->bLength == sizeof
                                (struct usb_otg_descriptor)) {
-                       /* Set a_alt_hnp_support for legacy otg device */
-                       err = usb_control_msg(udev,
-                               usb_sndctrlpipe(udev, 0),
-                               USB_REQ_SET_FEATURE, 0,
-                               USB_DEVICE_A_ALT_HNP_SUPPORT,
-                               0, NULL, 0,
-                               USB_CTRL_SET_TIMEOUT);
-                       if (err < 0)
-                               dev_err(&udev->dev,
-                                       "set a_alt_hnp_support failed: %d\n",
-                                       err);
+                       /*
+                        * We are operating on a legacy OTP device
+                        * These should be told that they are operating
+                        * on the wrong port if we have another port that does
+                        * support HNP
+                        */
+                       if (bus->otg_port != 0) {
+                               /* Set a_alt_hnp_support for legacy otg device */
+                               err = usb_control_msg(udev,
+                                       usb_sndctrlpipe(udev, 0),
+                                       USB_REQ_SET_FEATURE, 0,
+                                       USB_DEVICE_A_ALT_HNP_SUPPORT,
+                                       0, NULL, 0,
+                                       USB_CTRL_SET_TIMEOUT);
+                               if (err < 0)
+                                       dev_err(&udev->dev,
+                                               "set a_alt_hnp_support failed: %d\n",
+                                               err);
+                       }
                }
        }
 #endif
@@ -4626,7 +4664,12 @@ EXPORT_SYMBOL_GPL(usb_ep0_reinit);
 static int hub_set_address(struct usb_device *udev, int devnum)
 {
        int retval;
+       unsigned int timeout_ms = USB_CTRL_SET_TIMEOUT;
        struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+       struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
+
+       if (hub->hdev->quirks & USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT)
+               timeout_ms = USB_SHORT_SET_ADDRESS_REQ_TIMEOUT;
 
        /*
         * The host controller will choose the device address,
@@ -4639,11 +4682,11 @@ static int hub_set_address(struct usb_device *udev, int devnum)
        if (udev->state != USB_STATE_DEFAULT)
                return -EINVAL;
        if (hcd->driver->address_device)
-               retval = hcd->driver->address_device(hcd, udev);
+               retval = hcd->driver->address_device(hcd, udev, timeout_ms);
        else
                retval = usb_control_msg(udev, usb_sndaddr0pipe(),
                                USB_REQ_SET_ADDRESS, 0, devnum, 0,
-                               NULL, 0, USB_CTRL_SET_TIMEOUT);
+                               NULL, 0, timeout_ms);
        if (retval == 0) {
                update_devnum(udev, devnum);
                /* Device now using proper address. */
@@ -5895,6 +5938,21 @@ static const struct usb_device_id hub_id_table[] = {
       .idVendor = USB_VENDOR_TEXAS_INSTRUMENTS,
       .idProduct = USB_PRODUCT_TUSB8041_USB3,
       .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
+       { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
+                       | USB_DEVICE_ID_MATCH_PRODUCT,
+         .idVendor = USB_VENDOR_MICROCHIP,
+         .idProduct = USB_PRODUCT_USB4913,
+         .driver_info = HUB_QUIRK_REDUCE_FRAME_INTR_BINTERVAL},
+       { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
+                       | USB_DEVICE_ID_MATCH_PRODUCT,
+         .idVendor = USB_VENDOR_MICROCHIP,
+         .idProduct = USB_PRODUCT_USB4914,
+         .driver_info = HUB_QUIRK_REDUCE_FRAME_INTR_BINTERVAL},
+       { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
+                       | USB_DEVICE_ID_MATCH_PRODUCT,
+         .idVendor = USB_VENDOR_MICROCHIP,
+         .idProduct = USB_PRODUCT_USB4915,
+         .driver_info = HUB_QUIRK_REDUCE_FRAME_INTR_BINTERVAL},
     { .match_flags = USB_DEVICE_ID_MATCH_DEV_CLASS,
       .bDeviceClass = USB_CLASS_HUB},
     { .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS,
index 15e9bd180a1d253f3b3cd2eebb3de68bcae9fb5e..b4783574b8e6603b7c521accdbc4cc52518d61ec 100644 (file)
@@ -138,6 +138,9 @@ static int quirks_param_set(const char *value, const struct kernel_param *kp)
                        case 'o':
                                flags |= USB_QUIRK_HUB_SLOW_RESET;
                                break;
+                       case 'p':
+                               flags |= USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT;
+                               break;
                        /* Ignore unrecognized flag characters */
                        }
                }
@@ -527,6 +530,10 @@ static const struct usb_device_id usb_quirk_list[] = {
 
        { USB_DEVICE(0x2386, 0x350e), .driver_info = USB_QUIRK_NO_LPM },
 
+       /* APTIV AUTOMOTIVE HUB */
+       { USB_DEVICE(0x2c48, 0x0132), .driver_info =
+                       USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT },
+
        /* DJI CineSSD */
        { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
 
index 2a938cf47ccd6239dd0f1a9d8eed95618de75d64..dc8d9228a5e759775cc16b0a1bbacf514235b396 100644 (file)
@@ -431,7 +431,7 @@ struct usb_interface *usb_find_interface(struct usb_driver *drv, int minor)
        struct device *dev;
 
        argb.minor = minor;
-       argb.drv = &drv->drvwrap.driver;
+       argb.drv = &drv->driver;
 
        dev = bus_find_device(&usb_bus_type, NULL, &argb, __find_interface);
 
index 60363153fc3f38aed322182bc46357ca5ca6ec74..bfecb50773b6b61d04b9d696faaab7350c9644a6 100644 (file)
@@ -175,13 +175,7 @@ static inline int is_root_hub(struct usb_device *udev)
        return (udev->parent == NULL);
 }
 
-/* Do the same for device drivers and interface drivers. */
-
-static inline int is_usb_device_driver(struct device_driver *drv)
-{
-       return container_of(drv, struct usbdrv_wrap, driver)->
-                       for_devices;
-}
+extern bool is_usb_device_driver(const struct device_driver *drv);
 
 /* for labeling diagnostics */
 extern const char *usbcore_name;
index fb03162ae9b764d97d7df31ec1e158b564898076..eb677c3cfd0b62677a1e51502d3a78f1d68cdef2 100644 (file)
@@ -130,6 +130,7 @@ static void dwc2_set_rk_params(struct dwc2_hsotg *hsotg)
        p->lpm_clock_gating = false;
        p->besl = false;
        p->hird_threshold_en = false;
+       p->no_clock_gating = true;
 }
 
 static void dwc2_set_ltq_params(struct dwc2_hsotg *hsotg)
index b101dbf8c5dcc775797da2ce3ea0f2c34ebd9323..3e55838c0001443845d975d297dcf25877fe34d5 100644 (file)
@@ -277,48 +277,11 @@ int dwc3_core_soft_reset(struct dwc3 *dwc)
        /*
         * We're resetting only the device side because, if we're in host mode,
         * XHCI driver will reset the host block. If dwc3 was configured for
-        * host-only mode or current role is host, then we can return early.
+        * host-only mode, then we can return early.
         */
        if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST)
                return 0;
 
-       /*
-        * If the dr_mode is host and the dwc->current_dr_role is not the
-        * corresponding DWC3_GCTL_PRTCAP_HOST, then the dwc3_core_init_mode
-        * isn't executed yet. Ensure the phy is ready before the controller
-        * updates the GCTL.PRTCAPDIR or other settings by soft-resetting
-        * the phy.
-        *
-        * Note: GUSB3PIPECTL[n] and GUSB2PHYCFG[n] are port settings where n
-        * is port index. If this is a multiport host, then we need to reset
-        * all active ports.
-        */
-       if (dwc->dr_mode == USB_DR_MODE_HOST) {
-               u32 usb3_port;
-               u32 usb2_port;
-
-               usb3_port = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
-               usb3_port |= DWC3_GUSB3PIPECTL_PHYSOFTRST;
-               dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), usb3_port);
-
-               usb2_port = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
-               usb2_port |= DWC3_GUSB2PHYCFG_PHYSOFTRST;
-               dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), usb2_port);
-
-               /* Small delay for phy reset assertion */
-               usleep_range(1000, 2000);
-
-               usb3_port &= ~DWC3_GUSB3PIPECTL_PHYSOFTRST;
-               dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), usb3_port);
-
-               usb2_port &= ~DWC3_GUSB2PHYCFG_PHYSOFTRST;
-               dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), usb2_port);
-
-               /* Wait for clock synchronization */
-               msleep(50);
-               return 0;
-       }
-
        reg = dwc3_readl(dwc->regs, DWC3_DCTL);
        reg |= DWC3_DCTL_CSFTRST;
        reg &= ~DWC3_DCTL_RUN_STOP;
@@ -1367,6 +1330,18 @@ static int dwc3_core_init(struct dwc3 *dwc)
 
        dwc3_config_threshold(dwc);
 
+       /*
+        * Modify this for all supported Super Speed ports when
+        * multiport support is added.
+        */
+       if (hw_mode != DWC3_GHWPARAMS0_MODE_GADGET &&
+           (DWC3_IP_IS(DWC31)) &&
+           dwc->maximum_speed == USB_SPEED_SUPER) {
+               reg = dwc3_readl(dwc->regs, DWC3_LLUCTL);
+               reg |= DWC3_LLUCTL_FORCE_GEN1;
+               dwc3_writel(dwc->regs, DWC3_LLUCTL, reg);
+       }
+
        return 0;
 
 err_power_off_phy:
@@ -2340,12 +2315,15 @@ static int dwc3_resume(struct device *dev)
 
        pinctrl_pm_select_default_state(dev);
 
+       pm_runtime_disable(dev);
+       pm_runtime_set_active(dev);
+
        ret = dwc3_resume_common(dwc, PMSG_RESUME);
-       if (ret)
+       if (ret) {
+               pm_runtime_set_suspended(dev);
                return ret;
+       }
 
-       pm_runtime_disable(dev);
-       pm_runtime_set_active(dev);
        pm_runtime_enable(dev);
 
        return 0;
index efe6caf4d0e873ef030ef55bf0ecbec52b8c91e0..e3eea965e57bfd3d32fa6b1cb52fd4072734a30d 100644 (file)
 #define DWC3_OEVTEN            0xcc0C
 #define DWC3_OSTS              0xcc10
 
+#define DWC3_LLUCTL            0xd024
+
 /* Bit fields */
 
 /* Global SoC Bus Configuration INCRx Register 0 */
 /* Global HWPARAMS4 Register */
 #define DWC3_GHWPARAMS4_HIBER_SCRATCHBUFS(n)   (((n) & (0x0f << 13)) >> 13)
 #define DWC3_MAX_HIBER_SCRATCHBUFS             15
+#define DWC3_EXT_BUFF_CONTROL          BIT(21)
 
 /* Global HWPARAMS6 Register */
 #define DWC3_GHWPARAMS6_BCSUPPORT              BIT(14)
 #define DWC3_OSTS_VBUSVLD              BIT(1)
 #define DWC3_OSTS_CONIDSTS             BIT(0)
 
+/* Force Gen1 speed on Gen2 link */
+#define DWC3_LLUCTL_FORCE_GEN1         BIT(10)
+
 /* Structures */
 
 struct dwc3_trb;
index a1e15f2fffdbff6811ff09fbadd681a8967c2f30..8ee4480685031427171191131db639a881d5c8ac 100644 (file)
@@ -363,8 +363,10 @@ static int __maybe_unused dwc3_imx8mp_pm_resume(struct device *dev)
        }
 
        ret = clk_prepare_enable(dwc3_imx->hsio_clk);
-       if (ret)
+       if (ret) {
+               clk_disable_unprepare(dwc3_imx->suspend_clk);
                return ret;
+       }
 
        ret = dwc3_imx8mp_resume(dwc3_imx, PMSG_RESUME);
 
index 6604845c397cd2171ee55966fc3ba80f3f2538d1..39564e17f3b07a228d54e503f0926c7b9bb810cf 100644 (file)
@@ -51,6 +51,8 @@
 #define PCI_DEVICE_ID_INTEL_MTLP               0x7ec1
 #define PCI_DEVICE_ID_INTEL_MTLS               0x7f6f
 #define PCI_DEVICE_ID_INTEL_MTL                        0x7e7e
+#define PCI_DEVICE_ID_INTEL_ARLH               0x7ec1
+#define PCI_DEVICE_ID_INTEL_ARLH_PCH           0x777e
 #define PCI_DEVICE_ID_INTEL_TGL                        0x9a15
 #define PCI_DEVICE_ID_AMD_MR                   0x163a
 
@@ -421,6 +423,8 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
        { PCI_DEVICE_DATA(INTEL, MTLP, &dwc3_pci_intel_swnode) },
        { PCI_DEVICE_DATA(INTEL, MTL, &dwc3_pci_intel_swnode) },
        { PCI_DEVICE_DATA(INTEL, MTLS, &dwc3_pci_intel_swnode) },
+       { PCI_DEVICE_DATA(INTEL, ARLH, &dwc3_pci_intel_swnode) },
+       { PCI_DEVICE_DATA(INTEL, ARLH_PCH, &dwc3_pci_intel_swnode) },
        { PCI_DEVICE_DATA(INTEL, TGL, &dwc3_pci_intel_swnode) },
 
        { PCI_DEVICE_DATA(AMD, NL_USB, &dwc3_pci_amd_swnode) },
index fdf6d5d3c2ada4357bcf1a0156410e4fc0809cfa..dbd6a5b2b2892e50098c1c752a822b8036bdfe98 100644 (file)
@@ -57,7 +57,7 @@ struct dwc3_acpi_pdata {
        u32                     qscratch_base_offset;
        u32                     qscratch_base_size;
        u32                     dwc3_core_base_size;
-       int                     hs_phy_irq_index;
+       int                     qusb2_phy_irq_index;
        int                     dp_hs_phy_irq_index;
        int                     dm_hs_phy_irq_index;
        int                     ss_phy_irq_index;
@@ -73,7 +73,7 @@ struct dwc3_qcom {
        int                     num_clocks;
        struct reset_control    *resets;
 
-       int                     hs_phy_irq;
+       int                     qusb2_phy_irq;
        int                     dp_hs_phy_irq;
        int                     dm_hs_phy_irq;
        int                     ss_phy_irq;
@@ -372,7 +372,7 @@ static void dwc3_qcom_disable_wakeup_irq(int irq)
 
 static void dwc3_qcom_disable_interrupts(struct dwc3_qcom *qcom)
 {
-       dwc3_qcom_disable_wakeup_irq(qcom->hs_phy_irq);
+       dwc3_qcom_disable_wakeup_irq(qcom->qusb2_phy_irq);
 
        if (qcom->usb2_speed == USB_SPEED_LOW) {
                dwc3_qcom_disable_wakeup_irq(qcom->dm_hs_phy_irq);
@@ -389,7 +389,7 @@ static void dwc3_qcom_disable_interrupts(struct dwc3_qcom *qcom)
 
 static void dwc3_qcom_enable_interrupts(struct dwc3_qcom *qcom)
 {
-       dwc3_qcom_enable_wakeup_irq(qcom->hs_phy_irq, 0);
+       dwc3_qcom_enable_wakeup_irq(qcom->qusb2_phy_irq, 0);
 
        /*
         * Configure DP/DM line interrupts based on the USB2 device attached to
@@ -542,19 +542,19 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
        int irq;
        int ret;
 
-       irq = dwc3_qcom_get_irq(pdev, "hs_phy_irq",
-                               pdata ? pdata->hs_phy_irq_index : -1);
+       irq = dwc3_qcom_get_irq(pdev, "qusb2_phy",
+                               pdata ? pdata->qusb2_phy_irq_index : -1);
        if (irq > 0) {
                /* Keep wakeup interrupts disabled until suspend */
                ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
                                        qcom_dwc3_resume_irq,
                                        IRQF_ONESHOT | IRQF_NO_AUTOEN,
-                                       "qcom_dwc3 HS", qcom);
+                                       "qcom_dwc3 QUSB2", qcom);
                if (ret) {
-                       dev_err(qcom->dev, "hs_phy_irq failed: %d\n", ret);
+                       dev_err(qcom->dev, "qusb2_phy_irq failed: %d\n", ret);
                        return ret;
                }
-               qcom->hs_phy_irq = irq;
+               qcom->qusb2_phy_irq = irq;
        }
 
        irq = dwc3_qcom_get_irq(pdev, "dp_hs_phy_irq",
@@ -1058,7 +1058,7 @@ static const struct dwc3_acpi_pdata sdm845_acpi_pdata = {
        .qscratch_base_offset = SDM845_QSCRATCH_BASE_OFFSET,
        .qscratch_base_size = SDM845_QSCRATCH_SIZE,
        .dwc3_core_base_size = SDM845_DWC3_CORE_SIZE,
-       .hs_phy_irq_index = 1,
+       .qusb2_phy_irq_index = 1,
        .dp_hs_phy_irq_index = 4,
        .dm_hs_phy_irq_index = 3,
        .ss_phy_irq_index = 2
@@ -1068,7 +1068,7 @@ static const struct dwc3_acpi_pdata sdm845_acpi_urs_pdata = {
        .qscratch_base_offset = SDM845_QSCRATCH_BASE_OFFSET,
        .qscratch_base_size = SDM845_QSCRATCH_SIZE,
        .dwc3_core_base_size = SDM845_DWC3_CORE_SIZE,
-       .hs_phy_irq_index = 1,
+       .qusb2_phy_irq_index = 1,
        .dp_hs_phy_irq_index = 4,
        .dm_hs_phy_irq_index = 3,
        .ss_phy_irq_index = 2,
index 5b7e92f476de928a6fe38e4494089b8c521b143d..6095f4dee6ceb65675d5b2b76655643d688673c7 100644 (file)
@@ -293,11 +293,15 @@ static int dwc3_xlnx_probe(struct platform_device *pdev)
                goto err_clk_put;
 
        pm_runtime_set_active(dev);
-       pm_runtime_enable(dev);
+       ret = devm_pm_runtime_enable(dev);
+       if (ret < 0)
+               goto err_pm_set_suspended;
+
        pm_suspend_ignore_children(dev, false);
-       pm_runtime_get_sync(dev);
+       return pm_runtime_resume_and_get(dev);
 
-       return 0;
+err_pm_set_suspended:
+       pm_runtime_set_suspended(dev);
 
 err_clk_put:
        clk_bulk_disable_unprepare(priv_data->num_clocks, priv_data->clks);
@@ -315,7 +319,6 @@ static void dwc3_xlnx_remove(struct platform_device *pdev)
        clk_bulk_disable_unprepare(priv_data->num_clocks, priv_data->clks);
        priv_data->num_clocks = 0;
 
-       pm_runtime_disable(dev);
        pm_runtime_put_noidle(dev);
        pm_runtime_set_suspended(dev);
 }
index b94243237293792762e6fd64babf72ef3373e805..6ae8a36f21cf687fa6eaff07bf37f8faa96bf7f9 100644 (file)
@@ -238,7 +238,10 @@ void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
                struct dwc3_request     *req;
 
                req = next_request(&dep->pending_list);
-               dwc3_gadget_giveback(dep, req, -ECONNRESET);
+               if (!dwc->connected)
+                       dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
+               else
+                       dwc3_gadget_giveback(dep, req, -ECONNRESET);
        }
 
        dwc->eps[0]->trb_enqueue = 0;
index 858fe4c299b7af2609677eec6079e53022930bad..564976b3e2b911e6709758cc8bdbe0c85df86562 100644 (file)
@@ -673,6 +673,12 @@ static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
                params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(bInterval_m1);
        }
 
+       if (dep->endpoint.fifo_mode) {
+               if (!(dwc->hwparams.hwparams4 & DWC3_EXT_BUFF_CONTROL))
+                       return -EINVAL;
+               params.param1 |= DWC3_DEPCFG_EBC_HWO_NOWB | DWC3_DEPCFG_USE_EBC;
+       }
+
        return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
 }
 
@@ -2103,7 +2109,17 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
 
        list_for_each_entry(r, &dep->pending_list, list) {
                if (r == req) {
-                       dwc3_gadget_giveback(dep, req, -ECONNRESET);
+                       /*
+                        * Explicitly check for EP0/1 as dequeue for those
+                        * EPs need to be handled differently.  Control EP
+                        * only deals with one USB req, and giveback will
+                        * occur during dwc3_ep0_stall_and_restart().  EP0
+                        * requests are never added to started_list.
+                        */
+                       if (dep->number > 1)
+                               dwc3_gadget_giveback(dep, req, -ECONNRESET);
+                       else
+                               dwc3_ep0_reset_state(dwc);
                        goto out;
                }
        }
@@ -3973,6 +3989,13 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
        usb_gadget_set_state(dwc->gadget, USB_STATE_NOTATTACHED);
 
        dwc3_ep0_reset_state(dwc);
+
+       /*
+        * Request PM idle to address condition where usage count is
+        * already decremented to zero, but waiting for the disconnect
+        * interrupt to set dwc->connected to FALSE.
+        */
+       pm_request_idle(dwc->dev);
 }
 
 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
@@ -4686,15 +4709,13 @@ int dwc3_gadget_suspend(struct dwc3 *dwc)
        unsigned long flags;
        int ret;
 
-       if (!dwc->gadget_driver)
-               return 0;
-
        ret = dwc3_gadget_soft_disconnect(dwc);
        if (ret)
                goto err;
 
        spin_lock_irqsave(&dwc->lock, flags);
-       dwc3_disconnect_gadget(dwc);
+       if (dwc->gadget_driver)
+               dwc3_disconnect_gadget(dwc);
        spin_unlock_irqrestore(&dwc->lock, flags);
 
        return 0;
index 55a56cf67d7364998f9f4a42fd95e5d856cd105c..fd7a4e94397e64ccc74e362e5d73319918fdbae6 100644 (file)
@@ -26,6 +26,8 @@ struct dwc3;
 #define DWC3_DEPCFG_XFER_NOT_READY_EN  BIT(10)
 #define DWC3_DEPCFG_FIFO_ERROR_EN      BIT(11)
 #define DWC3_DEPCFG_STREAM_EVENT_EN    BIT(13)
+#define DWC3_DEPCFG_EBC_HWO_NOWB       BIT(14)
+#define DWC3_DEPCFG_USE_EBC            BIT(15)
 #define DWC3_DEPCFG_BINTERVAL_M1(n)    (((n) & 0xff) << 16)
 #define DWC3_DEPCFG_STREAM_CAPABLE     BIT(24)
 #define DWC3_DEPCFG_EP_NUMBER(n)       (((n) & 0x1f) << 25)
index 61f57fe5bb783bcf676cdb47177c66bb2a2e81be..43230915323c7dfa6625bfbbe67b1f8df238dcd4 100644 (file)
@@ -61,7 +61,7 @@ out:
 
 int dwc3_host_init(struct dwc3 *dwc)
 {
-       struct property_entry   props[4];
+       struct property_entry   props[5];
        struct platform_device  *xhci;
        int                     ret, irq;
        int                     prop_idx = 0;
@@ -89,6 +89,8 @@ int dwc3_host_init(struct dwc3 *dwc)
 
        memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props));
 
+       props[prop_idx++] = PROPERTY_ENTRY_BOOL("xhci-sg-trb-cache-size-quirk");
+
        if (dwc->usb3_lpm_capable)
                props[prop_idx++] = PROPERTY_ENTRY_BOOL("usb3-lpm-capable");
 
index 7bf810a0c98a9393fb915c55ddb51264cdff2b0b..8c5aaf8606357d8b4e8efc672a9c6982d2956b0a 100644 (file)
@@ -404,9 +404,9 @@ static void qh_lines(struct fotg210_hcd *fotg210, struct fotg210_qh *qh,
                        else if (td->hw_alt_next != list_end)
                                mark = '/';
                }
-               temp = snprintf(next, size,
-                               "\n\t%p%c%s len=%d %08x urb %p",
-                               td, mark, ({ char *tmp;
+               temp = scnprintf(next, size,
+                                "\n\t%p%c%s len=%d %08x urb %p",
+                                td, mark, ({ char *tmp;
                                switch ((scratch>>8)&0x03) {
                                case 0:
                                        tmp = "out";
@@ -424,15 +424,11 @@ static void qh_lines(struct fotg210_hcd *fotg210, struct fotg210_qh *qh,
                                (scratch >> 16) & 0x7fff,
                                scratch,
                                td->urb);
-               if (size < temp)
-                       temp = size;
                size -= temp;
                next += temp;
        }
 
-       temp = snprintf(next, size, "\n");
-       if (size < temp)
-               temp = size;
+       temp = scnprintf(next, size, "\n");
 
        size -= temp;
        next += temp;
index f7ea84070554ea904fada7b8172e4f173c777807..0bae12e34f9a72dc80540396b6d72b806dcc5542 100644 (file)
@@ -1094,10 +1094,10 @@ static int fotg210_udc_stop(struct usb_gadget *g)
 
 /**
  * fotg210_vbus_session - Called by external transceiver to enable/disable udc
- * @_gadget: usb gadget
+ * @g: usb gadget
  * @is_active: 0 if should disable UDC VBUS, 1 if should enable
  *
- * Returns 0
+ * Returns: %0
  */
 static int fotg210_vbus_session(struct usb_gadget *g, int is_active)
 {
@@ -1122,7 +1122,7 @@ static const struct usb_gadget_ops fotg210_gadget_ops = {
  *
  * Called by the USB Phy when a cable connect or disconnect is sensed.
  *
- * Returns NOTIFY_OK or NOTIFY_DONE
+ * Returns: NOTIFY_OK or NOTIFY_DONE
  */
 static int fotg210_phy_event(struct notifier_block *nb, unsigned long action,
                             void *data)
index 4c639e9ddedc0a0bc226795cd8fbd2e0dadbf70b..ce3cfa1f36f516d117e0095a4ab47a0abd9b5c48 100644 (file)
@@ -606,10 +606,11 @@ static struct config_group *function_make(
        char *instance_name;
        int ret;
 
-       ret = snprintf(buf, MAX_NAME_LEN, "%s", name);
-       if (ret >= MAX_NAME_LEN)
+       if (strlen(name) >= MAX_NAME_LEN)
                return ERR_PTR(-ENAMETOOLONG);
 
+       scnprintf(buf, MAX_NAME_LEN, "%s", name);
+
        func_name = buf;
        instance_name = strchr(func_name, '.');
        if (!instance_name) {
@@ -701,10 +702,12 @@ static struct config_group *config_desc_make(
        int ret;
 
        gi = container_of(group, struct gadget_info, configs_group);
-       ret = snprintf(buf, MAX_NAME_LEN, "%s", name);
-       if (ret >= MAX_NAME_LEN)
+
+       if (strlen(name) >= MAX_NAME_LEN)
                return ERR_PTR(-ENAMETOOLONG);
 
+       scnprintf(buf, MAX_NAME_LEN, "%s", name);
+
        num_str = strchr(buf, '.');
        if (!num_str) {
                pr_err("Unable to locate . in name.bConfigurationValue\n");
@@ -812,7 +815,7 @@ static ssize_t gadget_string_s_show(struct config_item *item, char *page)
        struct gadget_string *string = to_gadget_string(item);
        int ret;
 
-       ret = snprintf(page, sizeof(string->string), "%s\n", string->string);
+       ret = sysfs_emit(page, "%s\n", string->string);
        return ret;
 }
 
index fdd0fc7b8f259e18524f825229c6d5723a363c25..6bff6cb93789167b7de9d2b939d5e4e6255078db 100644 (file)
@@ -2931,9 +2931,8 @@ static int __ffs_func_bind_do_os_desc(enum ffs_os_desc_type type,
 
                t = &func->function.os_desc_table[desc->bFirstInterfaceNumber];
                t->if_id = func->interfaces_nums[desc->bFirstInterfaceNumber];
-               memcpy(t->os_desc->ext_compat_id, &desc->CompatibleID,
-                      ARRAY_SIZE(desc->CompatibleID) +
-                      ARRAY_SIZE(desc->SubCompatibleID));
+               memcpy(t->os_desc->ext_compat_id, &desc->IDs,
+                      sizeof_field(struct usb_ext_compat_desc, IDs));
                length = sizeof(*desc);
        }
                break;
index 722a3ab2b337935e546806e21d1eab357f8f54e7..c265a1f62fc1451dacba18723e0ff75dbbebfc6d 100644 (file)
@@ -545,21 +545,37 @@ static int start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
 
 static bool start_in_transfer(struct fsg_common *common, struct fsg_buffhd *bh)
 {
+       int rc;
+
        if (!fsg_is_set(common))
                return false;
        bh->state = BUF_STATE_SENDING;
-       if (start_transfer(common->fsg, common->fsg->bulk_in, bh->inreq))
+       rc = start_transfer(common->fsg, common->fsg->bulk_in, bh->inreq);
+       if (rc) {
                bh->state = BUF_STATE_EMPTY;
+               if (rc == -ESHUTDOWN) {
+                       common->running = 0;
+                       return false;
+               }
+       }
        return true;
 }
 
 static bool start_out_transfer(struct fsg_common *common, struct fsg_buffhd *bh)
 {
+       int rc;
+
        if (!fsg_is_set(common))
                return false;
        bh->state = BUF_STATE_RECEIVING;
-       if (start_transfer(common->fsg, common->fsg->bulk_out, bh->outreq))
+       rc = start_transfer(common->fsg, common->fsg->bulk_out, bh->outreq);
+       if (rc) {
                bh->state = BUF_STATE_FULL;
+               if (rc == -ESHUTDOWN) {
+                       common->running = 0;
+                       return false;
+               }
+       }
        return true;
 }
 
index 5335845d697b5d8496cc0c3b63d4615191a8c35c..20c6fbd94f32d793f7b0944a84496dde4fc75c39 100644 (file)
@@ -1177,11 +1177,11 @@ F_MIDI_OPT(out_ports, true, MAX_PORTS);
 static ssize_t f_midi_opts_id_show(struct config_item *item, char *page)
 {
        struct f_midi_opts *opts = to_f_midi_opts(item);
-       int result;
+       ssize_t result;
 
        mutex_lock(&opts->lock);
        if (opts->id) {
-               result = strlcpy(page, opts->id, PAGE_SIZE);
+               result = strscpy(page, opts->id, PAGE_SIZE);
        } else {
                page[0] = 0;
                result = 0;
index cc0ed29a4adc07b62fa412802c2b42495e5c22d8..ca5d5f5649982a6752b03053421cbf31c7589276 100644 (file)
@@ -103,6 +103,16 @@ static inline struct f_ncm *func_to_ncm(struct usb_function *f)
 /* Delay for the transmit to wait before sending an unfilled NTB frame. */
 #define TX_TIMEOUT_NSECS       300000
 
+/*
+ * Although max mtu as dictated by u_ether is 15412 bytes, setting
+ * max_segment_size to 15426 would not be efficient. If user chooses segment
+ * size to be (>= 8192), then we can't aggregate more than one buffer in each
+ * NTB (assuming each packet coming from network layer is >= 8192 bytes) as ep
+ * maxpacket limit is 16384. So let max_segment_size be limited to 8000 to allow
+ * at least 2 packets to be aggregated reducing wastage of NTB buffer space
+ */
+#define MAX_DATAGRAM_SIZE      8000
+
 #define FORMATS_SUPPORTED      (USB_CDC_NCM_NTB16_SUPPORTED |  \
                                 USB_CDC_NCM_NTB32_SUPPORTED)
 
@@ -179,7 +189,6 @@ static struct usb_cdc_ether_desc ecm_desc = {
        /* this descriptor actually adds value, surprise! */
        /* .iMACAddress = DYNAMIC */
        .bmEthernetStatistics = cpu_to_le32(0), /* no statistics */
-       .wMaxSegmentSize =      cpu_to_le16(ETH_FRAME_LEN),
        .wNumberMCFilters =     cpu_to_le16(0),
        .bNumberPowerFilters =  0,
 };
@@ -1166,11 +1175,15 @@ static int ncm_unwrap_ntb(struct gether *port,
        struct sk_buff  *skb2;
        int             ret = -EINVAL;
        unsigned        ntb_max = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize);
-       unsigned        frame_max = le16_to_cpu(ecm_desc.wMaxSegmentSize);
+       unsigned        frame_max;
        const struct ndp_parser_opts *opts = ncm->parser_opts;
        unsigned        crc_len = ncm->is_crc ? sizeof(uint32_t) : 0;
        int             dgram_counter;
        int             to_process = skb->len;
+       struct f_ncm_opts *ncm_opts;
+
+       ncm_opts = container_of(port->func.fi, struct f_ncm_opts, func_inst);
+       frame_max = ncm_opts->max_segment_size;
 
 parse_ntb:
        tmp = (__le16 *)ntb_ptr;
@@ -1430,8 +1443,10 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
 
        mutex_lock(&ncm_opts->lock);
        gether_set_gadget(ncm_opts->net, cdev->gadget);
-       if (!ncm_opts->bound)
+       if (!ncm_opts->bound) {
+               ncm_opts->net->mtu = (ncm_opts->max_segment_size - ETH_HLEN);
                status = gether_register_netdev(ncm_opts->net);
+       }
        mutex_unlock(&ncm_opts->lock);
 
        if (status)
@@ -1474,6 +1489,8 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
        ncm_data_intf.bInterfaceNumber = status;
        ncm_union_desc.bSlaveInterface0 = status;
 
+       ecm_desc.wMaxSegmentSize = cpu_to_le16(ncm_opts->max_segment_size);
+
        status = -ENODEV;
 
        /* allocate instance-specific endpoints */
@@ -1576,11 +1593,56 @@ USB_ETHERNET_CONFIGFS_ITEM_ATTR_QMULT(ncm);
 /* f_ncm_opts_ifname */
 USB_ETHERNET_CONFIGFS_ITEM_ATTR_IFNAME(ncm);
 
+static ssize_t ncm_opts_max_segment_size_show(struct config_item *item,
+                                             char *page)
+{
+       struct f_ncm_opts *opts = to_f_ncm_opts(item);
+       u16 segment_size;
+
+       mutex_lock(&opts->lock);
+       segment_size = opts->max_segment_size;
+       mutex_unlock(&opts->lock);
+
+       return sysfs_emit(page, "%u\n", segment_size);
+}
+
+static ssize_t ncm_opts_max_segment_size_store(struct config_item *item,
+                                              const char *page, size_t len)
+{
+       struct f_ncm_opts *opts = to_f_ncm_opts(item);
+       u16 segment_size;
+       int ret;
+
+       mutex_lock(&opts->lock);
+       if (opts->refcnt) {
+               ret = -EBUSY;
+               goto out;
+       }
+
+       ret = kstrtou16(page, 0, &segment_size);
+       if (ret)
+               goto out;
+
+       if (segment_size > MAX_DATAGRAM_SIZE) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       opts->max_segment_size = segment_size;
+       ret = len;
+out:
+       mutex_unlock(&opts->lock);
+       return ret;
+}
+
+CONFIGFS_ATTR(ncm_opts_, max_segment_size);
+
 static struct configfs_attribute *ncm_attrs[] = {
        &ncm_opts_attr_dev_addr,
        &ncm_opts_attr_host_addr,
        &ncm_opts_attr_qmult,
        &ncm_opts_attr_ifname,
+       &ncm_opts_attr_max_segment_size,
        NULL,
 };
 
@@ -1623,6 +1685,7 @@ static struct usb_function_instance *ncm_alloc_inst(void)
                kfree(opts);
                return ERR_CAST(net);
        }
+       opts->max_segment_size = ETH_FRAME_LEN;
        INIT_LIST_HEAD(&opts->ncm_os_desc.ext_prop);
 
        descs[0] = &opts->ncm_os_desc;
index ff33f31bcdf64591211f79028d6828e50097e8fa..37befd6db001ad8013659b8aad9fd378804e3b15 100644 (file)
@@ -1504,8 +1504,8 @@ static ssize_t tcm_usbg_tpg_nexus_show(struct config_item *item, char *page)
                ret = -ENODEV;
                goto out;
        }
-       ret = snprintf(page, PAGE_SIZE, "%s\n",
-                       tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
+       ret = sysfs_emit(page, "%s\n",
+                        tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
 out:
        mutex_unlock(&tpg->tpg_mutex);
        return ret;
index 6f0e1d803dc244d820905870dd6288bdbbd77832..7de74a3dd39211b4d1e13fad2299dcfe96135d32 100644 (file)
@@ -292,6 +292,77 @@ static struct usb_descriptor_header *f_audio_desc[] = {
        NULL,
 };
 
+/* Standard ISO OUT Endpoint Descriptor */
+static struct usb_endpoint_descriptor ss_as_out_ep_desc  = {
+       .bLength =              USB_DT_ENDPOINT_AUDIO_SIZE,
+       .bDescriptorType =      USB_DT_ENDPOINT,
+       .bEndpointAddress =     USB_DIR_OUT,
+       .bmAttributes =         USB_ENDPOINT_SYNC_ADAPTIVE
+                               | USB_ENDPOINT_XFER_ISOC,
+       .wMaxPacketSize =       cpu_to_le16(UAC1_OUT_EP_MAX_PACKET_SIZE),
+       .bInterval =            4,
+};
+
+static struct usb_ss_ep_comp_descriptor ss_as_out_ep_desc_comp = {
+       .bLength                = sizeof(ss_as_out_ep_desc_comp),
+       .bDescriptorType        = USB_DT_SS_ENDPOINT_COMP,
+       .bMaxBurst              = 0,
+       .bmAttributes           = 0,
+       /* wBytesPerInterval = DYNAMIC */
+};
+
+/* Standard ISO OUT Endpoint Descriptor */
+static struct usb_endpoint_descriptor ss_as_in_ep_desc  = {
+       .bLength =              USB_DT_ENDPOINT_AUDIO_SIZE,
+       .bDescriptorType =      USB_DT_ENDPOINT,
+       .bEndpointAddress =     USB_DIR_IN,
+       .bmAttributes =         USB_ENDPOINT_SYNC_ASYNC
+                               | USB_ENDPOINT_XFER_ISOC,
+       .wMaxPacketSize =       cpu_to_le16(UAC1_OUT_EP_MAX_PACKET_SIZE),
+       .bInterval =            4,
+};
+
+static struct usb_ss_ep_comp_descriptor ss_as_in_ep_desc_comp = {
+       .bLength                = sizeof(ss_as_in_ep_desc_comp),
+       .bDescriptorType        = USB_DT_SS_ENDPOINT_COMP,
+       .bMaxBurst              = 0,
+       .bmAttributes           = 0,
+       /* wBytesPerInterval = DYNAMIC */
+};
+
+static struct usb_descriptor_header *f_audio_ss_desc[] = {
+       (struct usb_descriptor_header *)&ac_interface_desc,
+       (struct usb_descriptor_header *)&ac_header_desc,
+
+       (struct usb_descriptor_header *)&usb_out_it_desc,
+       (struct usb_descriptor_header *)&io_out_ot_desc,
+       (struct usb_descriptor_header *)&io_in_it_desc,
+       (struct usb_descriptor_header *)&usb_in_ot_desc,
+
+       (struct usb_descriptor_header *)&as_out_interface_alt_0_desc,
+       (struct usb_descriptor_header *)&as_out_interface_alt_1_desc,
+       (struct usb_descriptor_header *)&as_out_header_desc,
+
+       (struct usb_descriptor_header *)&as_out_type_i_desc,
+
+       //(struct usb_descriptor_header *)&as_out_ep_desc,
+       (struct usb_descriptor_header *)&ss_as_out_ep_desc,
+       (struct usb_descriptor_header *)&ss_as_out_ep_desc_comp,
+       (struct usb_descriptor_header *)&as_iso_out_desc,
+
+       (struct usb_descriptor_header *)&as_in_interface_alt_0_desc,
+       (struct usb_descriptor_header *)&as_in_interface_alt_1_desc,
+       (struct usb_descriptor_header *)&as_in_header_desc,
+
+       (struct usb_descriptor_header *)&as_in_type_i_desc,
+
+       //(struct usb_descriptor_header *)&as_in_ep_desc,
+       (struct usb_descriptor_header *)&ss_as_in_ep_desc,
+       (struct usb_descriptor_header *)&ss_as_in_ep_desc_comp,
+       (struct usb_descriptor_header *)&as_iso_in_desc,
+       NULL,
+};
+
 enum {
        STR_AC_IF,
        STR_USB_OUT_IT,
@@ -1352,6 +1423,7 @@ static int f_audio_bind(struct usb_configuration *c, struct usb_function *f)
                ep = usb_ep_autoconfig(cdev->gadget, &as_out_ep_desc);
                if (!ep)
                        goto err_free_fu;
+               ss_as_out_ep_desc.bEndpointAddress = as_out_ep_desc.bEndpointAddress;
                audio->out_ep = ep;
                audio->out_ep->desc = &as_out_ep_desc;
        }
@@ -1360,6 +1432,7 @@ static int f_audio_bind(struct usb_configuration *c, struct usb_function *f)
                ep = usb_ep_autoconfig(cdev->gadget, &as_in_ep_desc);
                if (!ep)
                        goto err_free_fu;
+               ss_as_in_ep_desc.bEndpointAddress = as_in_ep_desc.bEndpointAddress;
                audio->in_ep = ep;
                audio->in_ep->desc = &as_in_ep_desc;
        }
@@ -1367,8 +1440,8 @@ static int f_audio_bind(struct usb_configuration *c, struct usb_function *f)
        setup_descriptor(audio_opts);
 
        /* copy descriptors, and track endpoint copies */
-       status = usb_assign_descriptors(f, f_audio_desc, f_audio_desc, NULL,
-                                       NULL);
+       status = usb_assign_descriptors(f, f_audio_desc, f_audio_desc, f_audio_ss_desc,
+                                       f_audio_ss_desc);
        if (status)
                goto err_free_fu;
 
@@ -1561,7 +1634,7 @@ static ssize_t f_uac1_opts_##name##_show(struct config_item *item,        \
        int result;                                                     \
                                                                        \
        mutex_lock(&opts->lock);                                        \
-       result = snprintf(page, sizeof(opts->name), "%s", opts->name);  \
+       result = scnprintf(page, sizeof(opts->name), "%s", opts->name); \
        mutex_unlock(&opts->lock);                                      \
                                                                        \
        return result;                                                  \
@@ -1579,7 +1652,7 @@ static ssize_t f_uac1_opts_##name##_store(struct config_item *item,       \
                goto end;                                               \
        }                                                               \
                                                                        \
-       ret = snprintf(opts->name, min(sizeof(opts->name), len),        \
+       ret = scnprintf(opts->name, min(sizeof(opts->name), len),       \
                        "%s", page);                                    \
                                                                        \
 end:                                                                   \
@@ -1685,7 +1758,7 @@ static struct usb_function_instance *f_audio_alloc_inst(void)
 
        opts->req_number = UAC1_DEF_REQ_NUM;
 
-       snprintf(opts->function_name, sizeof(opts->function_name), "AC Interface");
+       scnprintf(opts->function_name, sizeof(opts->function_name), "AC Interface");
 
        return &opts->func_inst;
 }
index f9a0f07a7476be709adf2736f1ff72ca8a1e2257..383f6854cfec5c2e7fd9f490c32165ae56f6c5c8 100644 (file)
@@ -2045,7 +2045,7 @@ static ssize_t f_uac2_opts_##name##_show(struct config_item *item,        \
        int result;                                                     \
                                                                        \
        mutex_lock(&opts->lock);                                        \
-       result = snprintf(page, sizeof(opts->name), "%s", opts->name);  \
+       result = scnprintf(page, sizeof(opts->name), "%s", opts->name); \
        mutex_unlock(&opts->lock);                                      \
                                                                        \
        return result;                                                  \
@@ -2063,7 +2063,7 @@ static ssize_t f_uac2_opts_##name##_store(struct config_item *item,       \
                goto end;                                               \
        }                                                               \
                                                                        \
-       ret = snprintf(opts->name, min(sizeof(opts->name), len),        \
+       ret = scnprintf(opts->name, min(sizeof(opts->name), len),       \
                        "%s", page);                                    \
                                                                        \
 end:                                                                   \
@@ -2187,7 +2187,7 @@ static struct usb_function_instance *afunc_alloc_inst(void)
        opts->req_number = UAC2_DEF_REQ_NUM;
        opts->fb_max = FBACK_FAST_MAX;
 
-       snprintf(opts->function_name, sizeof(opts->function_name), "Source/Sink");
+       scnprintf(opts->function_name, sizeof(opts->function_name), "Source/Sink");
 
        opts->p_terminal_type = UAC2_DEF_P_TERM_TYPE;
        opts->c_terminal_type = UAC2_DEF_C_TERM_TYPE;
index 786379f1b7b72457a6bc9e303e47b7f62c6eecae..929666805bd23c7fe8cd61f2ac3cc0a7aeb55bc3 100644 (file)
@@ -263,10 +263,13 @@ uvc_function_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
        return 0;
 }
 
-void uvc_function_setup_continue(struct uvc_device *uvc)
+void uvc_function_setup_continue(struct uvc_device *uvc, int disable_ep)
 {
        struct usb_composite_dev *cdev = uvc->func.config->cdev;
 
+       if (disable_ep && uvc->video.ep)
+               usb_ep_disable(uvc->video.ep);
+
        usb_composite_setup_continue(cdev);
 }
 
@@ -337,15 +340,11 @@ uvc_function_set_alt(struct usb_function *f, unsigned interface, unsigned alt)
                if (uvc->state != UVC_STATE_STREAMING)
                        return 0;
 
-               if (uvc->video.ep)
-                       usb_ep_disable(uvc->video.ep);
-
                memset(&v4l2_event, 0, sizeof(v4l2_event));
                v4l2_event.type = UVC_EVENT_STREAMOFF;
                v4l2_event_queue(&uvc->vdev, &v4l2_event);
 
-               uvc->state = UVC_STATE_CONNECTED;
-               return 0;
+               return USB_GADGET_DELAYED_STATUS;
 
        case 1:
                if (uvc->state != UVC_STATE_CONNECTED)
@@ -722,13 +721,29 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
        }
        uvc->enable_interrupt_ep = opts->enable_interrupt_ep;
 
-       ep = usb_ep_autoconfig(cdev->gadget, &uvc_fs_streaming_ep);
+       /*
+        * gadget_is_{super|dual}speed() API check UDC controller capitblity. It should pass down
+        * highest speed endpoint descriptor to UDC controller. So UDC controller driver can reserve
+        * enough resource at check_config(), especially mult and maxburst. So UDC driver (such as
+        * cdns3) can know need at least (mult + 1) * (maxburst + 1) * wMaxPacketSize internal
+        * memory for this uvc functions. This is the only straightforward method to resolve the UDC
+        * resource allocation issue in the current gadget framework.
+        */
+       if (gadget_is_superspeed(c->cdev->gadget))
+               ep = usb_ep_autoconfig_ss(cdev->gadget, &uvc_ss_streaming_ep,
+                                         &uvc_ss_streaming_comp);
+       else if (gadget_is_dualspeed(cdev->gadget))
+               ep = usb_ep_autoconfig(cdev->gadget, &uvc_hs_streaming_ep);
+       else
+               ep = usb_ep_autoconfig(cdev->gadget, &uvc_fs_streaming_ep);
+
        if (!ep) {
                uvcg_info(f, "Unable to allocate streaming EP\n");
                goto error;
        }
        uvc->video.ep = ep;
 
+       uvc_fs_streaming_ep.bEndpointAddress = uvc->video.ep->address;
        uvc_hs_streaming_ep.bEndpointAddress = uvc->video.ep->address;
        uvc_ss_streaming_ep.bEndpointAddress = uvc->video.ep->address;
 
@@ -960,7 +975,8 @@ static void uvc_free(struct usb_function *f)
        struct uvc_device *uvc = to_uvc(f);
        struct f_uvc_opts *opts = container_of(f->fi, struct f_uvc_opts,
                                               func_inst);
-       config_item_put(&uvc->header->item);
+       if (!opts->header)
+               config_item_put(&uvc->header->item);
        --opts->refcnt;
        kfree(uvc);
 }
@@ -1052,25 +1068,29 @@ static struct usb_function *uvc_alloc(struct usb_function_instance *fi)
        uvc->desc.hs_streaming = opts->hs_streaming;
        uvc->desc.ss_streaming = opts->ss_streaming;
 
-       streaming = config_group_find_item(&opts->func_inst.group, "streaming");
-       if (!streaming)
-               goto err_config;
-
-       header = config_group_find_item(to_config_group(streaming), "header");
-       config_item_put(streaming);
-       if (!header)
-               goto err_config;
-
-       h = config_group_find_item(to_config_group(header), "h");
-       config_item_put(header);
-       if (!h)
-               goto err_config;
-
-       uvc->header = to_uvcg_streaming_header(h);
-       if (!uvc->header->linked) {
-               mutex_unlock(&opts->lock);
-               kfree(uvc);
-               return ERR_PTR(-EBUSY);
+       if (opts->header) {
+               uvc->header = opts->header;
+       } else {
+               streaming = config_group_find_item(&opts->func_inst.group, "streaming");
+               if (!streaming)
+                       goto err_config;
+
+               header = config_group_find_item(to_config_group(streaming), "header");
+               config_item_put(streaming);
+               if (!header)
+                       goto err_config;
+
+               h = config_group_find_item(to_config_group(header), "h");
+               config_item_put(header);
+               if (!h)
+                       goto err_config;
+
+               uvc->header = to_uvcg_streaming_header(h);
+               if (!uvc->header->linked) {
+                       mutex_unlock(&opts->lock);
+                       kfree(uvc);
+                       return ERR_PTR(-EBUSY);
+               }
        }
 
        uvc->desc.extension_units = &opts->extension_units;
index 1db972d4beebe13875974a9708f41ae8d29ecd12..083aef0c65c6a6edcf12f4398c78d004978e9ab8 100644 (file)
@@ -11,7 +11,7 @@
 
 struct uvc_device;
 
-void uvc_function_setup_continue(struct uvc_device *uvc);
+void uvc_function_setup_continue(struct uvc_device *uvc, int disable_ep);
 
 void uvc_function_connect(struct uvc_device *uvc);
 
index 9d1c40c152d86448a63017575931553240b1bd7c..3c5a6f6ac3414c965519a23d00650c77c9793a4e 100644 (file)
@@ -1163,6 +1163,8 @@ struct net_device *gether_connect(struct gether *link)
                if (netif_running(dev->net))
                        eth_start(dev, GFP_ATOMIC);
 
+               netif_device_attach(dev->net);
+
        /* on error, disable any endpoints  */
        } else {
                (void) usb_ep_disable(link->out_ep);
index 5408854d84072dc7e35646ee978fb09cdeaa7f19..49ec095cdb4b6dcb330fd3149b502840312f78ec 100644 (file)
@@ -31,6 +31,8 @@ struct f_ncm_opts {
         */
        struct mutex                    lock;
        int                             refcnt;
+
+       u16                             max_segment_size;
 };
 
 #endif /* U_NCM_H */
index 1ce58f61253c9afaa53f51bf3e6164bb87a90321..3ac392cbb779495434f6e8a19d073f0c91d2ce23 100644 (file)
@@ -98,6 +98,12 @@ struct f_uvc_opts {
         */
        struct mutex                    lock;
        int                             refcnt;
+
+       /*
+        * Only for legacy gadget. Shall be NULL for configfs-composed gadgets,
+        * which is guaranteed by alloc_inst implementation of f_uvc doing kzalloc.
+        */
+       struct uvcg_streaming_header    *header;
 };
 
 #endif /* U_UVC_H */
index 6751de8b63ad9437bbaa08ad47412048e8e79b29..cb35687b11e7e986dd6d8a07be90ba0f994276ef 100644 (file)
@@ -81,6 +81,7 @@ struct uvc_request {
        struct sg_table sgt;
        u8 header[UVCG_REQUEST_HEADER_LEN];
        struct uvc_buffer *last_buf;
+       struct list_head list;
 };
 
 struct uvc_video {
@@ -101,9 +102,18 @@ struct uvc_video {
        unsigned int uvc_num_requests;
 
        /* Requests */
+       bool is_enabled; /* tracks whether video stream is enabled */
        unsigned int req_size;
-       struct uvc_request *ureq;
+       struct list_head ureqs; /* all uvc_requests allocated by uvc_video */
+
+       /* USB requests that the video pump thread can encode into */
        struct list_head req_free;
+
+       /*
+        * USB requests video pump thread has already encoded into. These are
+        * ready to be queued to the endpoint.
+        */
+       struct list_head req_ready;
        spinlock_t req_lock;
 
        unsigned int req_int_count;
@@ -177,7 +187,7 @@ struct uvc_file_handle {
  * Functions
  */
 
-extern void uvc_function_setup_continue(struct uvc_device *uvc);
+extern void uvc_function_setup_continue(struct uvc_device *uvc, int disable_ep);
 extern void uvc_function_connect(struct uvc_device *uvc);
 extern void uvc_function_disconnect(struct uvc_device *uvc);
 
index 9bf0e985acfab531dfe70989a7000c1a0fe09caf..7e704b2bcfd1ce5a2c28a4f318fca6633a7072f8 100644 (file)
@@ -3414,7 +3414,7 @@ static ssize_t f_uvc_opts_string_##cname##_show(struct config_item *item,\
        int result;                                                     \
                                                                        \
        mutex_lock(&opts->lock);                                        \
-       result = snprintf(page, sizeof(opts->aname), "%s", opts->aname);\
+       result = scnprintf(page, sizeof(opts->aname), "%s", opts->aname);\
        mutex_unlock(&opts->lock);                                      \
                                                                        \
        return result;                                                  \
index 3f0a9795c0d45d150f2371f77554f558e23b812b..c7e5fa4f29e038e7737bfd13c90ee3a2c577da30 100644 (file)
@@ -443,7 +443,7 @@ uvc_v4l2_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
                return -EINVAL;
 
        /* Enable UVC video. */
-       ret = uvcg_video_enable(video, 1);
+       ret = uvcg_video_enable(video);
        if (ret < 0)
                return ret;
 
@@ -451,7 +451,7 @@ uvc_v4l2_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
         * Complete the alternate setting selection setup phase now that
         * userspace is ready to provide video frames.
         */
-       uvc_function_setup_continue(uvc);
+       uvc_function_setup_continue(uvc, 0);
        uvc->state = UVC_STATE_STREAMING;
 
        return 0;
@@ -463,11 +463,18 @@ uvc_v4l2_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
        struct video_device *vdev = video_devdata(file);
        struct uvc_device *uvc = video_get_drvdata(vdev);
        struct uvc_video *video = &uvc->video;
+       int ret = 0;
 
        if (type != video->queue.queue.type)
                return -EINVAL;
 
-       return uvcg_video_enable(video, 0);
+       ret = uvcg_video_disable(video);
+       if (ret < 0)
+               return ret;
+
+       uvc->state = UVC_STATE_CONNECTED;
+       uvc_function_setup_continue(uvc, 1);
+       return 0;
 }
 
 static int
@@ -500,7 +507,7 @@ uvc_v4l2_subscribe_event(struct v4l2_fh *fh,
 static void uvc_v4l2_disable(struct uvc_device *uvc)
 {
        uvc_function_disconnect(uvc);
-       uvcg_video_enable(&uvc->video, 0);
+       uvcg_video_disable(&uvc->video);
        uvcg_free_buffers(&uvc->video.queue);
        uvc->func_connected = false;
        wake_up_interruptible(&uvc->func_connected_queue);
@@ -647,4 +654,3 @@ const struct v4l2_file_operations uvc_v4l2_fops = {
        .get_unmapped_area = uvcg_v4l2_get_unmapped_area,
 #endif
 };
-
index 91af3b1ef0d412e9d71720afb38f24a703ec4e6e..dd3241fc6939d6fa0e0a23a36469e5f137164e3a 100644 (file)
@@ -227,6 +227,28 @@ uvc_video_encode_isoc(struct usb_request *req, struct uvc_video *video,
  * Request handling
  */
 
+/*
+ * Callers must take care to hold req_lock when this function may be called
+ * from multiple threads. For example, when frames are streaming to the host.
+ */
+static void
+uvc_video_free_request(struct uvc_request *ureq, struct usb_ep *ep)
+{
+       sg_free_table(&ureq->sgt);
+       if (ureq->req && ep) {
+               usb_ep_free_request(ep, ureq->req);
+               ureq->req = NULL;
+       }
+
+       kfree(ureq->req_buffer);
+       ureq->req_buffer = NULL;
+
+       if (!list_empty(&ureq->list))
+               list_del_init(&ureq->list);
+
+       kfree(ureq);
+}
+
 static int uvcg_video_ep_queue(struct uvc_video *video, struct usb_request *req)
 {
        int ret;
@@ -247,14 +269,127 @@ static int uvcg_video_ep_queue(struct uvc_video *video, struct usb_request *req)
        return ret;
 }
 
+/* This function must be called with video->req_lock held. */
+static int uvcg_video_usb_req_queue(struct uvc_video *video,
+       struct usb_request *req, bool queue_to_ep)
+{
+       bool is_bulk = video->max_payload_size;
+       struct list_head *list = NULL;
+
+       if (!video->is_enabled)
+               return -ENODEV;
+
+       if (queue_to_ep) {
+               struct uvc_request *ureq = req->context;
+               /*
+                * With USB3 handling more requests at a higher speed, we can't
+                * afford to generate an interrupt for every request. Decide to
+                * interrupt:
+                *
+                * - When no more requests are available in the free queue, as
+                *   this may be our last chance to refill the endpoint's
+                *   request queue.
+                *
+                * - When this is request is the last request for the video
+                *   buffer, as we want to start sending the next video buffer
+                *   ASAP in case it doesn't get started already in the next
+                *   iteration of this loop.
+                *
+                * - Four times over the length of the requests queue (as
+                *   indicated by video->uvc_num_requests), as a trade-off
+                *   between latency and interrupt load.
+                */
+               if (list_empty(&video->req_free) || ureq->last_buf ||
+                       !(video->req_int_count %
+                       DIV_ROUND_UP(video->uvc_num_requests, 4))) {
+                       video->req_int_count = 0;
+                       req->no_interrupt = 0;
+               } else {
+                       req->no_interrupt = 1;
+               }
+               video->req_int_count++;
+               return uvcg_video_ep_queue(video, req);
+       }
+       /*
+        * If we're not queuing to the ep, for isoc we're queuing
+        * to the req_ready list, otherwise req_free.
+        */
+       list = is_bulk ? &video->req_free : &video->req_ready;
+       list_add_tail(&req->list, list);
+       return 0;
+}
+
+/*
+ * Must only be called from uvcg_video_enable - since after that we only want to
+ * queue requests to the endpoint from the uvc_video_complete complete handler.
+ * This function is needed in order to 'kick start' the flow of requests from
+ * gadget driver to the usb controller.
+ */
+static void uvc_video_ep_queue_initial_requests(struct uvc_video *video)
+{
+       struct usb_request *req = NULL;
+       unsigned long flags = 0;
+       unsigned int count = 0;
+       int ret = 0;
+
+       /*
+        * We only queue half of the free list since we still want to have
+        * some free usb_requests in the free list for the video_pump async_wq
+        * thread to encode uvc buffers into. Otherwise we could get into a
+        * situation where the free list does not have any usb requests to
+        * encode into - we always end up queueing 0 length requests to the
+        * end point.
+        */
+       unsigned int half_list_size = video->uvc_num_requests / 2;
+
+       spin_lock_irqsave(&video->req_lock, flags);
+       /*
+        * Take these requests off the free list and queue them all to the
+        * endpoint. Since we queue 0 length requests with the req_lock held,
+        * there isn't any 'data' race involved here with the complete handler.
+        */
+       while (count < half_list_size) {
+               req = list_first_entry(&video->req_free, struct usb_request,
+                                       list);
+               list_del(&req->list);
+               req->length = 0;
+               ret = uvcg_video_ep_queue(video, req);
+               if (ret < 0) {
+                       uvcg_queue_cancel(&video->queue, 0);
+                       break;
+               }
+               count++;
+       }
+       spin_unlock_irqrestore(&video->req_lock, flags);
+}
+
 static void
 uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
 {
        struct uvc_request *ureq = req->context;
        struct uvc_video *video = ureq->video;
        struct uvc_video_queue *queue = &video->queue;
-       struct uvc_device *uvc = video->uvc;
+       struct uvc_buffer *last_buf;
        unsigned long flags;
+       bool is_bulk = video->max_payload_size;
+       int ret = 0;
+
+       spin_lock_irqsave(&video->req_lock, flags);
+       if (!video->is_enabled) {
+               /*
+                * When is_enabled is false, uvcg_video_disable() ensures
+                * that in-flight uvc_buffers are returned, so we can
+                * safely call free_request without worrying about
+                * last_buf.
+                */
+               uvc_video_free_request(ureq, ep);
+               spin_unlock_irqrestore(&video->req_lock, flags);
+               return;
+       }
+
+       last_buf = ureq->last_buf;
+       ureq->last_buf = NULL;
+       spin_unlock_irqrestore(&video->req_lock, flags);
 
        switch (req->status) {
        case 0:
@@ -277,44 +412,85 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
                uvcg_queue_cancel(queue, 0);
        }
 
-       if (ureq->last_buf) {
-               uvcg_complete_buffer(&video->queue, ureq->last_buf);
-               ureq->last_buf = NULL;
+       if (last_buf) {
+               spin_lock_irqsave(&queue->irqlock, flags);
+               uvcg_complete_buffer(queue, last_buf);
+               spin_unlock_irqrestore(&queue->irqlock, flags);
        }
 
        spin_lock_irqsave(&video->req_lock, flags);
-       list_add_tail(&req->list, &video->req_free);
+       /*
+        * Video stream might have been disabled while we were
+        * processing the current usb_request. So make sure
+        * we're still streaming before queueing the usb_request
+        * back to req_free
+        */
+       if (video->is_enabled) {
+               /*
+                * Here we check whether any request is available in the ready
+                * list. If it is, queue it to the ep and add the current
+                * usb_request to the req_free list - for video_pump to fill in.
+                * Otherwise, just use the current usb_request to queue a 0
+                * length request to the ep. Since we always add to the req_free
+                * list if we dequeue from the ready list, there will never
+                * be a situation where the req_free list is completely out of
+                * requests and cannot recover.
+                */
+               struct usb_request *to_queue = req;
+
+               to_queue->length = 0;
+               if (!list_empty(&video->req_ready)) {
+                       to_queue = list_first_entry(&video->req_ready,
+                               struct usb_request, list);
+                       list_del(&to_queue->list);
+                       list_add_tail(&req->list, &video->req_free);
+                       /*
+                        * Queue work to the wq as well since it is possible that a
+                        * buffer may not have been completely encoded with the set of
+                        * in-flight usb requests for whih the complete callbacks are
+                        * firing.
+                        * In that case, if we do not queue work to the worker thread,
+                        * the buffer will never be marked as complete - and therefore
+                        * not be returned to userpsace. As a result,
+                        * dequeue -> queue -> dequeue flow of uvc buffers will not
+                        * happen.
+                        */
+                       queue_work(video->async_wq, &video->pump);
+               }
+               /*
+                * Queue to the endpoint. The actual queueing to ep will
+                * only happen on one thread - the async_wq for bulk endpoints
+                * and this thread for isoc endpoints.
+                */
+               ret = uvcg_video_usb_req_queue(video, to_queue, !is_bulk);
+               if (ret < 0) {
+                       /*
+                        * Endpoint error, but the stream is still enabled.
+                        * Put request back in req_free for it to be cleaned
+                        * up later.
+                        */
+                       list_add_tail(&to_queue->list, &video->req_free);
+               }
+       } else {
+               uvc_video_free_request(ureq, ep);
+               ret = 0;
+       }
        spin_unlock_irqrestore(&video->req_lock, flags);
-
-       if (uvc->state == UVC_STATE_STREAMING)
-               queue_work(video->async_wq, &video->pump);
+       if (ret < 0)
+               uvcg_queue_cancel(queue, 0);
 }
 
 static int
 uvc_video_free_requests(struct uvc_video *video)
 {
-       unsigned int i;
-
-       if (video->ureq) {
-               for (i = 0; i < video->uvc_num_requests; ++i) {
-                       sg_free_table(&video->ureq[i].sgt);
+       struct uvc_request *ureq, *temp;
 
-                       if (video->ureq[i].req) {
-                               usb_ep_free_request(video->ep, video->ureq[i].req);
-                               video->ureq[i].req = NULL;
-                       }
-
-                       if (video->ureq[i].req_buffer) {
-                               kfree(video->ureq[i].req_buffer);
-                               video->ureq[i].req_buffer = NULL;
-                       }
-               }
-
-               kfree(video->ureq);
-               video->ureq = NULL;
-       }
+       list_for_each_entry_safe(ureq, temp, &video->ureqs, list)
+               uvc_video_free_request(ureq, video->ep);
 
+       INIT_LIST_HEAD(&video->ureqs);
        INIT_LIST_HEAD(&video->req_free);
+       INIT_LIST_HEAD(&video->req_ready);
        video->req_size = 0;
        return 0;
 }
@@ -322,6 +498,7 @@ uvc_video_free_requests(struct uvc_video *video)
 static int
 uvc_video_alloc_requests(struct uvc_video *video)
 {
+       struct uvc_request *ureq;
        unsigned int req_size;
        unsigned int i;
        int ret = -ENOMEM;
@@ -332,29 +509,33 @@ uvc_video_alloc_requests(struct uvc_video *video)
                 * max_t(unsigned int, video->ep->maxburst, 1)
                 * (video->ep->mult);
 
-       video->ureq = kcalloc(video->uvc_num_requests, sizeof(struct uvc_request), GFP_KERNEL);
-       if (video->ureq == NULL)
-               return -ENOMEM;
+       for (i = 0; i < video->uvc_num_requests; i++) {
+               ureq = kzalloc(sizeof(struct uvc_request), GFP_KERNEL);
+               if (ureq == NULL)
+                       goto error;
+
+               INIT_LIST_HEAD(&ureq->list);
 
-       for (i = 0; i < video->uvc_num_requests; ++i) {
-               video->ureq[i].req_buffer = kmalloc(req_size, GFP_KERNEL);
-               if (video->ureq[i].req_buffer == NULL)
+               list_add_tail(&ureq->list, &video->ureqs);
+
+               ureq->req_buffer = kmalloc(req_size, GFP_KERNEL);
+               if (ureq->req_buffer == NULL)
                        goto error;
 
-               video->ureq[i].req = usb_ep_alloc_request(video->ep, GFP_KERNEL);
-               if (video->ureq[i].req == NULL)
+               ureq->req = usb_ep_alloc_request(video->ep, GFP_KERNEL);
+               if (ureq->req == NULL)
                        goto error;
 
-               video->ureq[i].req->buf = video->ureq[i].req_buffer;
-               video->ureq[i].req->length = 0;
-               video->ureq[i].req->complete = uvc_video_complete;
-               video->ureq[i].req->context = &video->ureq[i];
-               video->ureq[i].video = video;
-               video->ureq[i].last_buf = NULL;
+               ureq->req->buf = ureq->req_buffer;
+               ureq->req->length = 0;
+               ureq->req->complete = uvc_video_complete;
+               ureq->req->context = ureq;
+               ureq->video = video;
+               ureq->last_buf = NULL;
 
-               list_add_tail(&video->ureq[i].req->list, &video->req_free);
+               list_add_tail(&ureq->req->list, &video->req_free);
                /* req_size/PAGE_SIZE + 1 for overruns and + 1 for header */
-               sg_alloc_table(&video->ureq[i].sgt,
+               sg_alloc_table(&ureq->sgt,
                               DIV_ROUND_UP(req_size - UVCG_REQUEST_HEADER_LEN,
                                            PAGE_SIZE) + 2, GFP_KERNEL);
        }
@@ -387,16 +568,18 @@ static void uvcg_video_pump(struct work_struct *work)
        struct usb_request *req = NULL;
        struct uvc_buffer *buf;
        unsigned long flags;
-       bool buf_done;
-       int ret;
+       int ret = 0;
+
+       while (true) {
+               if (!video->ep->enabled)
+                       return;
 
-       while (video->ep->enabled) {
                /*
-                * Retrieve the first available USB request, protected by the
-                * request lock.
+                * Check is_enabled and retrieve the first available USB
+                * request, protected by the request lock.
                 */
                spin_lock_irqsave(&video->req_lock, flags);
-               if (list_empty(&video->req_free)) {
+               if (!video->is_enabled || list_empty(&video->req_free)) {
                        spin_unlock_irqrestore(&video->req_lock, flags);
                        return;
                }
@@ -414,15 +597,6 @@ static void uvcg_video_pump(struct work_struct *work)
 
                if (buf != NULL) {
                        video->encode(req, video, buf);
-                       buf_done = buf->state == UVC_BUF_STATE_DONE;
-               } else if (!(queue->flags & UVC_QUEUE_DISCONNECTED) && !is_bulk) {
-                       /*
-                        * No video buffer available; the queue is still connected and
-                        * we're transferring over ISOC. Queue a 0 length request to
-                        * prevent missed ISOC transfers.
-                        */
-                       req->length = 0;
-                       buf_done = false;
                } else {
                        /*
                         * Either the queue has been disconnected or no video buffer
@@ -433,83 +607,139 @@ static void uvcg_video_pump(struct work_struct *work)
                        break;
                }
 
-               /*
-                * With USB3 handling more requests at a higher speed, we can't
-                * afford to generate an interrupt for every request. Decide to
-                * interrupt:
-                *
-                * - When no more requests are available in the free queue, as
-                *   this may be our last chance to refill the endpoint's
-                *   request queue.
-                *
-                * - When this is request is the last request for the video
-                *   buffer, as we want to start sending the next video buffer
-                *   ASAP in case it doesn't get started already in the next
-                *   iteration of this loop.
-                *
-                * - Four times over the length of the requests queue (as
-                *   indicated by video->uvc_num_requests), as a trade-off
-                *   between latency and interrupt load.
-                */
-               if (list_empty(&video->req_free) || buf_done ||
-                   !(video->req_int_count %
-                      DIV_ROUND_UP(video->uvc_num_requests, 4))) {
-                       video->req_int_count = 0;
-                       req->no_interrupt = 0;
-               } else {
-                       req->no_interrupt = 1;
-               }
-
-               /* Queue the USB request */
-               ret = uvcg_video_ep_queue(video, req);
                spin_unlock_irqrestore(&queue->irqlock, flags);
 
+               spin_lock_irqsave(&video->req_lock, flags);
+               /* For bulk end points we queue from the worker thread
+                * since we would preferably not want to wait on requests
+                * to be ready, in the uvcg_video_complete() handler.
+                * For isoc endpoints we add the request to the ready list
+                * and only queue it to the endpoint from the complete handler.
+                */
+               ret = uvcg_video_usb_req_queue(video, req, is_bulk);
+               spin_unlock_irqrestore(&video->req_lock, flags);
+
                if (ret < 0) {
                        uvcg_queue_cancel(queue, 0);
                        break;
                }
 
-               /* Endpoint now owns the request */
+               /* The request is owned by  the endpoint / ready list. */
                req = NULL;
-               video->req_int_count++;
        }
 
        if (!req)
                return;
 
        spin_lock_irqsave(&video->req_lock, flags);
-       list_add_tail(&req->list, &video->req_free);
+       if (video->is_enabled)
+               list_add_tail(&req->list, &video->req_free);
+       else
+               uvc_video_free_request(req->context, video->ep);
        spin_unlock_irqrestore(&video->req_lock, flags);
-       return;
 }
 
 /*
- * Enable or disable the video stream.
+ * Disable the video stream
  */
-int uvcg_video_enable(struct uvc_video *video, int enable)
+int
+uvcg_video_disable(struct uvc_video *video)
 {
-       unsigned int i;
-       int ret;
+       unsigned long flags;
+       struct list_head inflight_bufs;
+       struct usb_request *req, *temp;
+       struct uvc_buffer *buf, *btemp;
+       struct uvc_request *ureq, *utemp;
 
        if (video->ep == NULL) {
                uvcg_info(&video->uvc->func,
-                         "Video enable failed, device is uninitialized.\n");
+                         "Video disable failed, device is uninitialized.\n");
                return -ENODEV;
        }
 
-       if (!enable) {
-               cancel_work_sync(&video->pump);
-               uvcg_queue_cancel(&video->queue, 0);
+       INIT_LIST_HEAD(&inflight_bufs);
+       spin_lock_irqsave(&video->req_lock, flags);
+       video->is_enabled = false;
+
+       /*
+        * Remove any in-flight buffers from the uvc_requests
+        * because we want to return them before cancelling the
+        * queue. This ensures that we aren't stuck waiting for
+        * all complete callbacks to come through before disabling
+        * vb2 queue.
+        */
+       list_for_each_entry(ureq, &video->ureqs, list) {
+               if (ureq->last_buf) {
+                       list_add_tail(&ureq->last_buf->queue, &inflight_bufs);
+                       ureq->last_buf = NULL;
+               }
+       }
+       spin_unlock_irqrestore(&video->req_lock, flags);
 
-               for (i = 0; i < video->uvc_num_requests; ++i)
-                       if (video->ureq && video->ureq[i].req)
-                               usb_ep_dequeue(video->ep, video->ureq[i].req);
+       cancel_work_sync(&video->pump);
+       uvcg_queue_cancel(&video->queue, 0);
+
+       spin_lock_irqsave(&video->req_lock, flags);
+       /*
+        * Remove all uvc_requests from ureqs with list_del_init
+        * This lets uvc_video_free_request correctly identify
+        * if the uvc_request is attached to a list or not when freeing
+        * memory.
+        */
+       list_for_each_entry_safe(ureq, utemp, &video->ureqs, list)
+               list_del_init(&ureq->list);
+
+       list_for_each_entry_safe(req, temp, &video->req_free, list) {
+               list_del(&req->list);
+               uvc_video_free_request(req->context, video->ep);
+       }
 
-               uvc_video_free_requests(video);
-               uvcg_queue_enable(&video->queue, 0);
-               return 0;
+       list_for_each_entry_safe(req, temp, &video->req_ready, list) {
+               list_del(&req->list);
+               uvc_video_free_request(req->context, video->ep);
        }
 
+       INIT_LIST_HEAD(&video->ureqs);
+       INIT_LIST_HEAD(&video->req_free);
+       INIT_LIST_HEAD(&video->req_ready);
+       video->req_size = 0;
+       spin_unlock_irqrestore(&video->req_lock, flags);
+
+       /*
+        * Return all the video buffers before disabling the queue.
+        */
+       spin_lock_irqsave(&video->queue.irqlock, flags);
+       list_for_each_entry_safe(buf, btemp, &inflight_bufs, queue) {
+               list_del(&buf->queue);
+               uvcg_complete_buffer(&video->queue, buf);
+       }
+       spin_unlock_irqrestore(&video->queue.irqlock, flags);
+
+       uvcg_queue_enable(&video->queue, 0);
+       return 0;
+}
+
+/*
+ * Enable the video stream.
+ */
+int uvcg_video_enable(struct uvc_video *video)
+{
+       int ret;
+
+       if (video->ep == NULL) {
+               uvcg_info(&video->uvc->func,
+                         "Video enable failed, device is uninitialized.\n");
+               return -ENODEV;
+       }
+
+       /*
+        * Safe to access request related fields without req_lock because
+        * this is the only thread currently active, and no other
+        * request handling thread will become active until this function
+        * returns.
+        */
+       video->is_enabled = true;
+
        if ((ret = uvcg_queue_enable(&video->queue, 1)) < 0)
                return ret;
 
@@ -525,7 +755,7 @@ int uvcg_video_enable(struct uvc_video *video, int enable)
 
        video->req_int_count = 0;
 
-       queue_work(video->async_wq, &video->pump);
+       uvc_video_ep_queue_initial_requests(video);
 
        return ret;
 }
@@ -535,7 +765,10 @@ int uvcg_video_enable(struct uvc_video *video, int enable)
  */
 int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc)
 {
+       video->is_enabled = false;
+       INIT_LIST_HEAD(&video->ureqs);
        INIT_LIST_HEAD(&video->req_free);
+       INIT_LIST_HEAD(&video->req_ready);
        spin_lock_init(&video->req_lock);
        INIT_WORK(&video->pump, uvcg_video_pump);
 
index 03adeefa343b71e2dcb15cb61d03834430000cc0..8ef6259741f1313a88bd992e7be65241a65dd634 100644 (file)
@@ -14,7 +14,8 @@
 
 struct uvc_video;
 
-int uvcg_video_enable(struct uvc_video *video, int enable);
+int uvcg_video_enable(struct uvc_video *video);
+int uvcg_video_disable(struct uvc_video *video);
 
 int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc);
 
index c06dd1af7a0c506a17210bbc9ea41ac5355985d9..c395438d39780ec07b46344ef004596995cc04d1 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/usb/video.h>
 
 #include "u_uvc.h"
+#include "uvc_configfs.h"
 
 USB_GADGET_COMPOSITE_OPTIONS();
 
@@ -84,8 +85,6 @@ static struct usb_device_descriptor webcam_device_descriptor = {
        .bNumConfigurations     = 0, /* dynamic */
 };
 
-DECLARE_UVC_HEADER_DESCRIPTOR(1);
-
 static const struct UVC_HEADER_DESCRIPTOR(1) uvc_control_header = {
        .bLength                = UVC_DT_HEADER_SIZE(1),
        .bDescriptorType        = USB_DT_CS_INTERFACE,
@@ -158,43 +157,112 @@ static const struct UVC_INPUT_HEADER_DESCRIPTOR(1, 2) uvc_input_header = {
        .bmaControls[1][0]      = 4,
 };
 
-static const struct uvc_format_uncompressed uvc_format_yuv = {
-       .bLength                = UVC_DT_FORMAT_UNCOMPRESSED_SIZE,
-       .bDescriptorType        = USB_DT_CS_INTERFACE,
-       .bDescriptorSubType     = UVC_VS_FORMAT_UNCOMPRESSED,
-       .bFormatIndex           = 1,
-       .bNumFrameDescriptors   = 2,
-       .guidFormat             =
-               { 'Y',  'U',  'Y',  '2', 0x00, 0x00, 0x10, 0x00,
-                0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71},
-       .bBitsPerPixel          = 16,
-       .bDefaultFrameIndex     = 1,
-       .bAspectRatioX          = 0,
-       .bAspectRatioY          = 0,
-       .bmInterlaceFlags       = 0,
-       .bCopyProtect           = 0,
+static const struct uvcg_color_matching uvcg_color_matching = {
+       .desc = {
+               .bLength                = UVC_DT_COLOR_MATCHING_SIZE,
+               .bDescriptorType        = USB_DT_CS_INTERFACE,
+               .bDescriptorSubType     = UVC_VS_COLORFORMAT,
+               .bColorPrimaries        = 1,
+               .bTransferCharacteristics       = 1,
+               .bMatrixCoefficients    = 4,
+       },
+};
+
+static struct uvcg_uncompressed uvcg_format_yuv = {
+       .fmt = {
+               .type                   = UVCG_UNCOMPRESSED,
+               /* add to .frames and fill .num_frames at runtime */
+               .color_matching         = (struct uvcg_color_matching *)&uvcg_color_matching,
+       },
+       .desc = {
+               .bLength                = UVC_DT_FORMAT_UNCOMPRESSED_SIZE,
+               .bDescriptorType        = USB_DT_CS_INTERFACE,
+               .bDescriptorSubType     = UVC_VS_FORMAT_UNCOMPRESSED,
+               .bFormatIndex           = 1,
+               .bNumFrameDescriptors   = 2,
+               .guidFormat             = {
+                       'Y',  'U',  'Y',  '2', 0x00, 0x00, 0x10, 0x00,
+                        0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71
+               },
+               .bBitsPerPixel          = 16,
+               .bDefaultFrameIndex     = 1,
+               .bAspectRatioX          = 0,
+               .bAspectRatioY          = 0,
+               .bmInterlaceFlags       = 0,
+               .bCopyProtect           = 0,
+       },
+};
+
+static struct uvcg_format_ptr uvcg_format_ptr_yuv = {
+       .fmt = &uvcg_format_yuv.fmt,
 };
 
 DECLARE_UVC_FRAME_UNCOMPRESSED(1);
 DECLARE_UVC_FRAME_UNCOMPRESSED(3);
 
+#define UVCG_WIDTH_360P                        640
+#define UVCG_HEIGHT_360P               360
+#define UVCG_MIN_BITRATE_360P          18432000
+#define UVCG_MAX_BITRATE_360P          55296000
+#define UVCG_MAX_VIDEO_FB_SZ_360P      460800
+#define UVCG_FRM_INTERV_0_360P         666666
+#define UVCG_FRM_INTERV_1_360P         1000000
+#define UVCG_FRM_INTERV_2_360P         5000000
+#define UVCG_DEFAULT_FRM_INTERV_360P   UVCG_FRM_INTERV_0_360P
+
 static const struct UVC_FRAME_UNCOMPRESSED(3) uvc_frame_yuv_360p = {
        .bLength                = UVC_DT_FRAME_UNCOMPRESSED_SIZE(3),
        .bDescriptorType        = USB_DT_CS_INTERFACE,
        .bDescriptorSubType     = UVC_VS_FRAME_UNCOMPRESSED,
        .bFrameIndex            = 1,
        .bmCapabilities         = 0,
-       .wWidth                 = cpu_to_le16(640),
-       .wHeight                = cpu_to_le16(360),
-       .dwMinBitRate           = cpu_to_le32(18432000),
-       .dwMaxBitRate           = cpu_to_le32(55296000),
-       .dwMaxVideoFrameBufferSize      = cpu_to_le32(460800),
-       .dwDefaultFrameInterval = cpu_to_le32(666666),
+       .wWidth                 = cpu_to_le16(UVCG_WIDTH_360P),
+       .wHeight                = cpu_to_le16(UVCG_HEIGHT_360P),
+       .dwMinBitRate           = cpu_to_le32(UVCG_MIN_BITRATE_360P),
+       .dwMaxBitRate           = cpu_to_le32(UVCG_MAX_BITRATE_360P),
+       .dwMaxVideoFrameBufferSize      = cpu_to_le32(UVCG_MAX_VIDEO_FB_SZ_360P),
+       .dwDefaultFrameInterval = cpu_to_le32(UVCG_DEFAULT_FRM_INTERV_360P),
        .bFrameIntervalType     = 3,
-       .dwFrameInterval[0]     = cpu_to_le32(666666),
-       .dwFrameInterval[1]     = cpu_to_le32(1000000),
-       .dwFrameInterval[2]     = cpu_to_le32(5000000),
+       .dwFrameInterval[0]     = cpu_to_le32(UVCG_FRM_INTERV_0_360P),
+       .dwFrameInterval[1]     = cpu_to_le32(UVCG_FRM_INTERV_1_360P),
+       .dwFrameInterval[2]     = cpu_to_le32(UVCG_FRM_INTERV_2_360P),
+};
+
+static u32 uvcg_frame_yuv_360p_dw_frame_interval[] = {
+       [0] = UVCG_FRM_INTERV_0_360P,
+       [1] = UVCG_FRM_INTERV_1_360P,
+       [2] = UVCG_FRM_INTERV_2_360P,
+};
+
+static const struct uvcg_frame uvcg_frame_yuv_360p = {
+       .fmt_type               = UVCG_UNCOMPRESSED,
+       .frame = {
+               .b_length                       = UVC_DT_FRAME_UNCOMPRESSED_SIZE(3),
+               .b_descriptor_type              = USB_DT_CS_INTERFACE,
+               .b_descriptor_subtype           = UVC_VS_FRAME_UNCOMPRESSED,
+               .b_frame_index                  = 1,
+               .bm_capabilities                = 0,
+               .w_width                        = UVCG_WIDTH_360P,
+               .w_height                       = UVCG_HEIGHT_360P,
+               .dw_min_bit_rate                = UVCG_MIN_BITRATE_360P,
+               .dw_max_bit_rate                = UVCG_MAX_BITRATE_360P,
+               .dw_max_video_frame_buffer_size = UVCG_MAX_VIDEO_FB_SZ_360P,
+               .dw_default_frame_interval      = UVCG_DEFAULT_FRM_INTERV_360P,
+               .b_frame_interval_type          = 3,
+       },
+       .dw_frame_interval      = uvcg_frame_yuv_360p_dw_frame_interval,
+};
+
+static struct uvcg_frame_ptr uvcg_frame_ptr_yuv_360p = {
+       .frm = (struct uvcg_frame *)&uvcg_frame_yuv_360p,
 };
+#define UVCG_WIDTH_720P                        1280
+#define UVCG_HEIGHT_720P               720
+#define UVCG_MIN_BITRATE_720P          29491200
+#define UVCG_MAX_BITRATE_720P          29491200
+#define UVCG_MAX_VIDEO_FB_SZ_720P      1843200
+#define UVCG_FRM_INTERV_0_720P         5000000
+#define UVCG_DEFAULT_FRM_INTERV_720P   UVCG_FRM_INTERV_0_720P
 
 static const struct UVC_FRAME_UNCOMPRESSED(1) uvc_frame_yuv_720p = {
        .bLength                = UVC_DT_FRAME_UNCOMPRESSED_SIZE(1),
@@ -202,28 +270,66 @@ static const struct UVC_FRAME_UNCOMPRESSED(1) uvc_frame_yuv_720p = {
        .bDescriptorSubType     = UVC_VS_FRAME_UNCOMPRESSED,
        .bFrameIndex            = 2,
        .bmCapabilities         = 0,
-       .wWidth                 = cpu_to_le16(1280),
-       .wHeight                = cpu_to_le16(720),
-       .dwMinBitRate           = cpu_to_le32(29491200),
-       .dwMaxBitRate           = cpu_to_le32(29491200),
-       .dwMaxVideoFrameBufferSize      = cpu_to_le32(1843200),
-       .dwDefaultFrameInterval = cpu_to_le32(5000000),
+       .wWidth                 = cpu_to_le16(UVCG_WIDTH_720P),
+       .wHeight                = cpu_to_le16(UVCG_HEIGHT_720P),
+       .dwMinBitRate           = cpu_to_le32(UVCG_MIN_BITRATE_720P),
+       .dwMaxBitRate           = cpu_to_le32(UVCG_MAX_BITRATE_720P),
+       .dwMaxVideoFrameBufferSize      = cpu_to_le32(UVCG_MAX_VIDEO_FB_SZ_720P),
+       .dwDefaultFrameInterval = cpu_to_le32(UVCG_DEFAULT_FRM_INTERV_720P),
        .bFrameIntervalType     = 1,
-       .dwFrameInterval[0]     = cpu_to_le32(5000000),
+       .dwFrameInterval[0]     = cpu_to_le32(UVCG_FRM_INTERV_0_720P),
 };
 
-static const struct uvc_format_mjpeg uvc_format_mjpg = {
-       .bLength                = UVC_DT_FORMAT_MJPEG_SIZE,
-       .bDescriptorType        = USB_DT_CS_INTERFACE,
-       .bDescriptorSubType     = UVC_VS_FORMAT_MJPEG,
-       .bFormatIndex           = 2,
-       .bNumFrameDescriptors   = 2,
-       .bmFlags                = 0,
-       .bDefaultFrameIndex     = 1,
-       .bAspectRatioX          = 0,
-       .bAspectRatioY          = 0,
-       .bmInterlaceFlags       = 0,
-       .bCopyProtect           = 0,
+static u32 uvcg_frame_yuv_720p_dw_frame_interval[] = {
+       [0] = UVCG_FRM_INTERV_0_720P,
+};
+
+static const struct uvcg_frame uvcg_frame_yuv_720p = {
+       .fmt_type               = UVCG_UNCOMPRESSED,
+       .frame = {
+               .b_length                       = UVC_DT_FRAME_UNCOMPRESSED_SIZE(1),
+               .b_descriptor_type              = USB_DT_CS_INTERFACE,
+               .b_descriptor_subtype           = UVC_VS_FRAME_UNCOMPRESSED,
+               .b_frame_index                  = 2,
+               .bm_capabilities                = 0,
+               .w_width                        = UVCG_WIDTH_720P,
+               .w_height                       = UVCG_HEIGHT_720P,
+               .dw_min_bit_rate                = UVCG_MIN_BITRATE_720P,
+               .dw_max_bit_rate                = UVCG_MAX_BITRATE_720P,
+               .dw_max_video_frame_buffer_size = UVCG_MAX_VIDEO_FB_SZ_720P,
+               .dw_default_frame_interval      = UVCG_DEFAULT_FRM_INTERV_720P,
+               .b_frame_interval_type          = 1,
+       },
+       .dw_frame_interval      = uvcg_frame_yuv_720p_dw_frame_interval,
+};
+
+static struct uvcg_frame_ptr uvcg_frame_ptr_yuv_720p = {
+       .frm = (struct uvcg_frame *)&uvcg_frame_yuv_720p,
+};
+
+static struct uvcg_mjpeg uvcg_format_mjpeg = {
+       .fmt = {
+               .type                   = UVCG_MJPEG,
+               /* add to .frames and fill .num_frames at runtime */
+               .color_matching         = (struct uvcg_color_matching *)&uvcg_color_matching,
+       },
+       .desc = {
+               .bLength                = UVC_DT_FORMAT_MJPEG_SIZE,
+               .bDescriptorType        = USB_DT_CS_INTERFACE,
+               .bDescriptorSubType     = UVC_VS_FORMAT_MJPEG,
+               .bFormatIndex           = 2,
+               .bNumFrameDescriptors   = 2,
+               .bmFlags                = 0,
+               .bDefaultFrameIndex     = 1,
+               .bAspectRatioX          = 0,
+               .bAspectRatioY          = 0,
+               .bmInterlaceFlags       = 0,
+               .bCopyProtect           = 0,
+       },
+};
+
+static struct uvcg_format_ptr uvcg_format_ptr_mjpeg = {
+       .fmt = &uvcg_format_mjpeg.fmt,
 };
 
 DECLARE_UVC_FRAME_MJPEG(1);
@@ -235,16 +341,45 @@ static const struct UVC_FRAME_MJPEG(3) uvc_frame_mjpg_360p = {
        .bDescriptorSubType     = UVC_VS_FRAME_MJPEG,
        .bFrameIndex            = 1,
        .bmCapabilities         = 0,
-       .wWidth                 = cpu_to_le16(640),
-       .wHeight                = cpu_to_le16(360),
-       .dwMinBitRate           = cpu_to_le32(18432000),
-       .dwMaxBitRate           = cpu_to_le32(55296000),
-       .dwMaxVideoFrameBufferSize      = cpu_to_le32(460800),
-       .dwDefaultFrameInterval = cpu_to_le32(666666),
+       .wWidth                 = cpu_to_le16(UVCG_WIDTH_360P),
+       .wHeight                = cpu_to_le16(UVCG_HEIGHT_360P),
+       .dwMinBitRate           = cpu_to_le32(UVCG_MIN_BITRATE_360P),
+       .dwMaxBitRate           = cpu_to_le32(UVCG_MAX_BITRATE_360P),
+       .dwMaxVideoFrameBufferSize      = cpu_to_le32(UVCG_MAX_VIDEO_FB_SZ_360P),
+       .dwDefaultFrameInterval = cpu_to_le32(UVCG_DEFAULT_FRM_INTERV_360P),
        .bFrameIntervalType     = 3,
-       .dwFrameInterval[0]     = cpu_to_le32(666666),
-       .dwFrameInterval[1]     = cpu_to_le32(1000000),
-       .dwFrameInterval[2]     = cpu_to_le32(5000000),
+       .dwFrameInterval[0]     = cpu_to_le32(UVCG_FRM_INTERV_0_360P),
+       .dwFrameInterval[1]     = cpu_to_le32(UVCG_FRM_INTERV_1_360P),
+       .dwFrameInterval[2]     = cpu_to_le32(UVCG_FRM_INTERV_2_360P),
+};
+
+static u32 uvcg_frame_mjpeg_360p_dw_frame_interval[] = {
+       [0] = UVCG_FRM_INTERV_0_360P,
+       [1] = UVCG_FRM_INTERV_1_360P,
+       [2] = UVCG_FRM_INTERV_2_360P,
+};
+
+static const struct uvcg_frame uvcg_frame_mjpeg_360p = {
+       .fmt_type               = UVCG_MJPEG,
+       .frame = {
+               .b_length                       = UVC_DT_FRAME_MJPEG_SIZE(3),
+               .b_descriptor_type              = USB_DT_CS_INTERFACE,
+               .b_descriptor_subtype           = UVC_VS_FRAME_MJPEG,
+               .b_frame_index                  = 1,
+               .bm_capabilities                = 0,
+               .w_width                        = UVCG_WIDTH_360P,
+               .w_height                       = UVCG_HEIGHT_360P,
+               .dw_min_bit_rate                = UVCG_MIN_BITRATE_360P,
+               .dw_max_bit_rate                = UVCG_MAX_BITRATE_360P,
+               .dw_max_video_frame_buffer_size = UVCG_MAX_VIDEO_FB_SZ_360P,
+               .dw_default_frame_interval      = UVCG_DEFAULT_FRM_INTERV_360P,
+               .b_frame_interval_type          = 3,
+       },
+       .dw_frame_interval      = uvcg_frame_mjpeg_360p_dw_frame_interval,
+};
+
+static struct uvcg_frame_ptr uvcg_frame_ptr_mjpeg_360p = {
+       .frm = (struct uvcg_frame *)&uvcg_frame_mjpeg_360p,
 };
 
 static const struct UVC_FRAME_MJPEG(1) uvc_frame_mjpg_720p = {
@@ -253,23 +388,44 @@ static const struct UVC_FRAME_MJPEG(1) uvc_frame_mjpg_720p = {
        .bDescriptorSubType     = UVC_VS_FRAME_MJPEG,
        .bFrameIndex            = 2,
        .bmCapabilities         = 0,
-       .wWidth                 = cpu_to_le16(1280),
-       .wHeight                = cpu_to_le16(720),
-       .dwMinBitRate           = cpu_to_le32(29491200),
-       .dwMaxBitRate           = cpu_to_le32(29491200),
-       .dwMaxVideoFrameBufferSize      = cpu_to_le32(1843200),
-       .dwDefaultFrameInterval = cpu_to_le32(5000000),
+       .wWidth                 = cpu_to_le16(UVCG_WIDTH_720P),
+       .wHeight                = cpu_to_le16(UVCG_HEIGHT_720P),
+       .dwMinBitRate           = cpu_to_le32(UVCG_MIN_BITRATE_720P),
+       .dwMaxBitRate           = cpu_to_le32(UVCG_MAX_BITRATE_720P),
+       .dwMaxVideoFrameBufferSize      = cpu_to_le32(UVCG_MAX_VIDEO_FB_SZ_720P),
+       .dwDefaultFrameInterval = cpu_to_le32(UVCG_DEFAULT_FRM_INTERV_720P),
        .bFrameIntervalType     = 1,
-       .dwFrameInterval[0]     = cpu_to_le32(5000000),
+       .dwFrameInterval[0]     = cpu_to_le32(UVCG_FRM_INTERV_0_720P),
 };
 
-static const struct uvc_color_matching_descriptor uvc_color_matching = {
-       .bLength                = UVC_DT_COLOR_MATCHING_SIZE,
-       .bDescriptorType        = USB_DT_CS_INTERFACE,
-       .bDescriptorSubType     = UVC_VS_COLORFORMAT,
-       .bColorPrimaries        = 1,
-       .bTransferCharacteristics       = 1,
-       .bMatrixCoefficients    = 4,
+static u32 uvcg_frame_mjpeg_720p_dw_frame_interval[] = {
+       [0] = UVCG_FRM_INTERV_0_720P,
+};
+
+static const struct uvcg_frame uvcg_frame_mjpeg_720p = {
+       .fmt_type               = UVCG_MJPEG,
+       .frame = {
+               .b_length                       = UVC_DT_FRAME_MJPEG_SIZE(1),
+               .b_descriptor_type              = USB_DT_CS_INTERFACE,
+               .b_descriptor_subtype           = UVC_VS_FRAME_MJPEG,
+               .b_frame_index                  = 2,
+               .bm_capabilities                = 0,
+               .w_width                        = UVCG_WIDTH_720P,
+               .w_height                       = UVCG_HEIGHT_720P,
+               .dw_min_bit_rate                = UVCG_MIN_BITRATE_720P,
+               .dw_max_bit_rate                = UVCG_MAX_BITRATE_720P,
+               .dw_max_video_frame_buffer_size = UVCG_MAX_VIDEO_FB_SZ_720P,
+               .dw_default_frame_interval      = UVCG_DEFAULT_FRM_INTERV_720P,
+               .b_frame_interval_type          = 1,
+       },
+       .dw_frame_interval      = uvcg_frame_mjpeg_720p_dw_frame_interval,
+};
+
+static struct uvcg_frame_ptr uvcg_frame_ptr_mjpeg_720p = {
+       .frm = (struct uvcg_frame *)&uvcg_frame_mjpeg_720p,
+};
+
+static struct uvcg_streaming_header uvcg_streaming_header = {
 };
 
 static const struct uvc_descriptor_header * const uvc_fs_control_cls[] = {
@@ -290,40 +446,40 @@ static const struct uvc_descriptor_header * const uvc_ss_control_cls[] = {
 
 static const struct uvc_descriptor_header * const uvc_fs_streaming_cls[] = {
        (const struct uvc_descriptor_header *) &uvc_input_header,
-       (const struct uvc_descriptor_header *) &uvc_format_yuv,
+       (const struct uvc_descriptor_header *) &uvcg_format_yuv.desc,
        (const struct uvc_descriptor_header *) &uvc_frame_yuv_360p,
        (const struct uvc_descriptor_header *) &uvc_frame_yuv_720p,
-       (const struct uvc_descriptor_header *) &uvc_color_matching,
-       (const struct uvc_descriptor_header *) &uvc_format_mjpg,
+       (const struct uvc_descriptor_header *) &uvcg_color_matching.desc,
+       (const struct uvc_descriptor_header *) &uvcg_format_mjpeg.desc,
        (const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p,
        (const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p,
-       (const struct uvc_descriptor_header *) &uvc_color_matching,
+       (const struct uvc_descriptor_header *) &uvcg_color_matching.desc,
        NULL,
 };
 
 static const struct uvc_descriptor_header * const uvc_hs_streaming_cls[] = {
        (const struct uvc_descriptor_header *) &uvc_input_header,
-       (const struct uvc_descriptor_header *) &uvc_format_yuv,
+       (const struct uvc_descriptor_header *) &uvcg_format_yuv.desc,
        (const struct uvc_descriptor_header *) &uvc_frame_yuv_360p,
        (const struct uvc_descriptor_header *) &uvc_frame_yuv_720p,
-       (const struct uvc_descriptor_header *) &uvc_color_matching,
-       (const struct uvc_descriptor_header *) &uvc_format_mjpg,
+       (const struct uvc_descriptor_header *) &uvcg_color_matching.desc,
+       (const struct uvc_descriptor_header *) &uvcg_format_mjpeg.desc,
        (const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p,
        (const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p,
-       (const struct uvc_descriptor_header *) &uvc_color_matching,
+       (const struct uvc_descriptor_header *) &uvcg_color_matching.desc,
        NULL,
 };
 
 static const struct uvc_descriptor_header * const uvc_ss_streaming_cls[] = {
        (const struct uvc_descriptor_header *) &uvc_input_header,
-       (const struct uvc_descriptor_header *) &uvc_format_yuv,
+       (const struct uvc_descriptor_header *) &uvcg_format_yuv.desc,
        (const struct uvc_descriptor_header *) &uvc_frame_yuv_360p,
        (const struct uvc_descriptor_header *) &uvc_frame_yuv_720p,
-       (const struct uvc_descriptor_header *) &uvc_color_matching,
-       (const struct uvc_descriptor_header *) &uvc_format_mjpg,
+       (const struct uvc_descriptor_header *) &uvcg_color_matching.desc,
+       (const struct uvc_descriptor_header *) &uvcg_format_mjpeg.desc,
        (const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p,
        (const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p,
-       (const struct uvc_descriptor_header *) &uvc_color_matching,
+       (const struct uvc_descriptor_header *) &uvcg_color_matching.desc,
        NULL,
 };
 
@@ -387,6 +543,23 @@ webcam_bind(struct usb_composite_dev *cdev)
        uvc_opts->hs_streaming = uvc_hs_streaming_cls;
        uvc_opts->ss_streaming = uvc_ss_streaming_cls;
 
+       INIT_LIST_HEAD(&uvcg_format_yuv.fmt.frames);
+       list_add_tail(&uvcg_frame_ptr_yuv_360p.entry, &uvcg_format_yuv.fmt.frames);
+       list_add_tail(&uvcg_frame_ptr_yuv_720p.entry, &uvcg_format_yuv.fmt.frames);
+       uvcg_format_yuv.fmt.num_frames = 2;
+
+       INIT_LIST_HEAD(&uvcg_format_mjpeg.fmt.frames);
+       list_add_tail(&uvcg_frame_ptr_mjpeg_360p.entry, &uvcg_format_mjpeg.fmt.frames);
+       list_add_tail(&uvcg_frame_ptr_mjpeg_720p.entry, &uvcg_format_mjpeg.fmt.frames);
+       uvcg_format_mjpeg.fmt.num_frames = 2;
+
+       INIT_LIST_HEAD(&uvcg_streaming_header.formats);
+       list_add_tail(&uvcg_format_ptr_yuv.entry, &uvcg_streaming_header.formats);
+       list_add_tail(&uvcg_format_ptr_mjpeg.entry, &uvcg_streaming_header.formats);
+       uvcg_streaming_header.num_fmt = 2;
+
+       uvc_opts->header = &uvcg_streaming_header;
+
        /* Allocate string descriptor numbers ... note that string contents
         * can be overridden by the composite_dev glue.
         */
index 30ea4a9d5301fca5864d37f485753babc06eb0ad..e3bf17a98b380650cef9b3c007d603fd8feba984 100644 (file)
@@ -1924,7 +1924,7 @@ err_unprepare_fclk:
        return retval;
 }
 
-static int at91udc_remove(struct platform_device *pdev)
+static void at91udc_remove(struct platform_device *pdev)
 {
        struct at91_udc *udc = platform_get_drvdata(pdev);
        unsigned long   flags;
@@ -1932,8 +1932,11 @@ static int at91udc_remove(struct platform_device *pdev)
        DBG("remove\n");
 
        usb_del_gadget_udc(&udc->gadget);
-       if (udc->driver)
-               return -EBUSY;
+       if (udc->driver) {
+               dev_err(&pdev->dev,
+                       "Driver still in use but removing anyhow\n");
+               return;
+       }
 
        spin_lock_irqsave(&udc->lock, flags);
        pullup(udc, 0);
@@ -1943,8 +1946,6 @@ static int at91udc_remove(struct platform_device *pdev)
        remove_debug_file(udc);
        clk_unprepare(udc->fclk);
        clk_unprepare(udc->iclk);
-
-       return 0;
 }
 
 #ifdef CONFIG_PM
@@ -2001,7 +2002,7 @@ static int at91udc_resume(struct platform_device *pdev)
 
 static struct platform_driver at91_udc_driver = {
        .probe          = at91udc_probe,
-       .remove         = at91udc_remove,
+       .remove_new     = at91udc_remove,
        .shutdown       = at91udc_shutdown,
        .suspend        = at91udc_suspend,
        .resume         = at91udc_resume,
index 02b1bef5e22e2e5c22c03630277c0193ff58aad1..b76885d78e8a780b9b57cf37b4163e0242ca08aa 100644 (file)
@@ -94,7 +94,7 @@ static ssize_t queue_dbg_read(struct file *file, char __user *buf,
 
        inode_lock(file_inode(file));
        list_for_each_entry_safe(req, tmp_req, queue, queue) {
-               len = snprintf(tmpbuf, sizeof(tmpbuf),
+               len = scnprintf(tmpbuf, sizeof(tmpbuf),
                                "%8p %08x %c%c%c %5d %c%c%c\n",
                                req->req.buf, req->req.length,
                                req->req.no_interrupt ? 'i' : 'I',
@@ -104,7 +104,6 @@ static ssize_t queue_dbg_read(struct file *file, char __user *buf,
                                req->submitted ? 'F' : 'f',
                                req->using_dma ? 'D' : 'd',
                                req->last_transaction ? 'L' : 'l');
-               len = min(len, sizeof(tmpbuf));
                if (len > nbytes)
                        break;
 
index be9ae0d28a40975be29f9ee27c81c4b789324800..f5f330004190ea0a8388cf23748b15bd4560a5e1 100644 (file)
@@ -16,34 +16,34 @@ static inline const char *cdns2_decode_usb_irq(char *str, size_t size,
 {
        int ret;
 
-       ret = snprintf(str, size, "usbirq: 0x%02x - ", usb_irq);
+       ret = scnprintf(str, size, "usbirq: 0x%02x - ", usb_irq);
 
        if (usb_irq & USBIRQ_SOF)
-               ret += snprintf(str + ret, size - ret, "SOF ");
+               ret += scnprintf(str + ret, size - ret, "SOF ");
        if (usb_irq & USBIRQ_SUTOK)
-               ret += snprintf(str + ret, size - ret, "SUTOK ");
+               ret += scnprintf(str + ret, size - ret, "SUTOK ");
        if (usb_irq & USBIRQ_SUDAV)
-               ret += snprintf(str + ret, size - ret, "SETUP ");
+               ret += scnprintf(str + ret, size - ret, "SETUP ");
        if (usb_irq & USBIRQ_SUSPEND)
-               ret += snprintf(str + ret, size - ret, "Suspend ");
+               ret += scnprintf(str + ret, size - ret, "Suspend ");
        if (usb_irq & USBIRQ_URESET)
-               ret += snprintf(str + ret, size - ret, "Reset ");
+               ret += scnprintf(str + ret, size - ret, "Reset ");
        if (usb_irq & USBIRQ_HSPEED)
-               ret += snprintf(str + ret, size - ret, "HS ");
+               ret += scnprintf(str + ret, size - ret, "HS ");
        if (usb_irq & USBIRQ_LPM)
-               ret += snprintf(str + ret, size - ret, "LPM ");
+               ret += scnprintf(str + ret, size - ret, "LPM ");
 
-       ret += snprintf(str + ret, size - ret, ", EXT: 0x%02x - ", ext_irq);
+       ret += scnprintf(str + ret, size - ret, ", EXT: 0x%02x - ", ext_irq);
 
        if (ext_irq & EXTIRQ_WAKEUP)
-               ret += snprintf(str + ret, size - ret, "Wakeup ");
+               ret += scnprintf(str + ret, size - ret, "Wakeup ");
        if (ext_irq & EXTIRQ_VBUSFAULT_FALL)
-               ret += snprintf(str + ret, size - ret, "VBUS_FALL ");
+               ret += scnprintf(str + ret, size - ret, "VBUS_FALL ");
        if (ext_irq & EXTIRQ_VBUSFAULT_RISE)
-               ret += snprintf(str + ret, size - ret, "VBUS_RISE ");
+               ret += scnprintf(str + ret, size - ret, "VBUS_RISE ");
 
-       if (ret >= size)
-               pr_info("CDNS2: buffer overflowed.\n");
+       if (ret == size - 1)
+               pr_info("CDNS2: buffer may be truncated.\n");
 
        return str;
 }
@@ -54,28 +54,28 @@ static inline const char *cdns2_decode_dma_irq(char *str, size_t size,
 {
        int ret;
 
-       ret = snprintf(str, size, "ISTS: %08x, %s: %08x ",
-                      ep_ists, ep_name, ep_sts);
+       ret = scnprintf(str, size, "ISTS: %08x, %s: %08x ",
+                       ep_ists, ep_name, ep_sts);
 
        if (ep_sts & DMA_EP_STS_IOC)
-               ret += snprintf(str + ret, size - ret, "IOC ");
+               ret += scnprintf(str + ret, size - ret, "IOC ");
        if (ep_sts & DMA_EP_STS_ISP)
-               ret += snprintf(str + ret, size - ret, "ISP ");
+               ret += scnprintf(str + ret, size - ret, "ISP ");
        if (ep_sts & DMA_EP_STS_DESCMIS)
-               ret += snprintf(str + ret, size - ret, "DESCMIS ");
+               ret += scnprintf(str + ret, size - ret, "DESCMIS ");
        if (ep_sts & DMA_EP_STS_TRBERR)
-               ret += snprintf(str + ret, size - ret, "TRBERR ");
+               ret += scnprintf(str + ret, size - ret, "TRBERR ");
        if (ep_sts & DMA_EP_STS_OUTSMM)
-               ret += snprintf(str + ret, size - ret, "OUTSMM ");
+               ret += scnprintf(str + ret, size - ret, "OUTSMM ");
        if (ep_sts & DMA_EP_STS_ISOERR)
-               ret += snprintf(str + ret, size - ret, "ISOERR ");
+               ret += scnprintf(str + ret, size - ret, "ISOERR ");
        if (ep_sts & DMA_EP_STS_DBUSY)
-               ret += snprintf(str + ret, size - ret, "DBUSY ");
+               ret += scnprintf(str + ret, size - ret, "DBUSY ");
        if (DMA_EP_STS_CCS(ep_sts))
-               ret += snprintf(str + ret, size - ret, "CCS ");
+               ret += scnprintf(str + ret, size - ret, "CCS ");
 
-       if (ret >= size)
-               pr_info("CDNS2: buffer overflowed.\n");
+       if (ret == size - 1)
+               pr_info("CDNS2: buffer may be truncated.\n");
 
        return str;
 }
@@ -105,43 +105,43 @@ static inline const char *cdns2_raw_ring(struct cdns2_endpoint *pep,
        int ret;
        int i;
 
-       ret = snprintf(str, size, "\n\t\tTR for %s:", pep->name);
+       ret = scnprintf(str, size, "\n\t\tTR for %s:", pep->name);
 
        trb = &trbs[ring->dequeue];
        dma = cdns2_trb_virt_to_dma(pep, trb);
-       ret += snprintf(str + ret, size - ret,
-                       "\n\t\tRing deq index: %d, trb: V=%p, P=0x%pad\n",
-                       ring->dequeue, trb, &dma);
+       ret += scnprintf(str + ret, size - ret,
+                        "\n\t\tRing deq index: %d, trb: V=%p, P=0x%pad\n",
+                        ring->dequeue, trb, &dma);
 
        trb = &trbs[ring->enqueue];
        dma = cdns2_trb_virt_to_dma(pep, trb);
-       ret += snprintf(str + ret, size - ret,
-                       "\t\tRing enq index: %d, trb: V=%p, P=0x%pad\n",
-                       ring->enqueue, trb, &dma);
+       ret += scnprintf(str + ret, size - ret,
+                        "\t\tRing enq index: %d, trb: V=%p, P=0x%pad\n",
+                        ring->enqueue, trb, &dma);
 
-       ret += snprintf(str + ret, size - ret,
-                       "\t\tfree trbs: %d, CCS=%d, PCS=%d\n",
-                       ring->free_trbs, ring->ccs, ring->pcs);
+       ret += scnprintf(str + ret, size - ret,
+                        "\t\tfree trbs: %d, CCS=%d, PCS=%d\n",
+                        ring->free_trbs, ring->ccs, ring->pcs);
 
        if (TRBS_PER_SEGMENT > 40) {
-               ret += snprintf(str + ret, size - ret,
-                               "\t\tTransfer ring %d too big\n", TRBS_PER_SEGMENT);
+               ret += scnprintf(str + ret, size - ret,
+                                "\t\tTransfer ring %d too big\n", TRBS_PER_SEGMENT);
                return str;
        }
 
        dma = ring->dma;
        for (i = 0; i < TRBS_PER_SEGMENT; ++i) {
                trb = &trbs[i];
-               ret += snprintf(str + ret, size - ret,
-                               "\t\t@%pad %08x %08x %08x\n", &dma,
-                               le32_to_cpu(trb->buffer),
-                               le32_to_cpu(trb->length),
-                               le32_to_cpu(trb->control));
+               ret += scnprintf(str + ret, size - ret,
+                                "\t\t@%pad %08x %08x %08x\n", &dma,
+                                le32_to_cpu(trb->buffer),
+                                le32_to_cpu(trb->length),
+                                le32_to_cpu(trb->control));
                dma += sizeof(*trb);
        }
 
-       if (ret >= size)
-               pr_info("CDNS2: buffer overflowed.\n");
+       if (ret == size - 1)
+               pr_info("CDNS2: buffer may be truncated.\n");
 
        return str;
 }
@@ -166,36 +166,36 @@ static inline const char *cdns2_decode_trb(char *str, size_t size, u32 flags,
 
        switch (type) {
        case TRB_LINK:
-               ret = snprintf(str, size,
-                              "LINK %08x type '%s' flags %c:%c:%c%c:%c",
-                              buffer, cdns2_trb_type_string(type),
-                              flags & TRB_CYCLE ? 'C' : 'c',
-                              flags & TRB_TOGGLE ? 'T' : 't',
-                              flags & TRB_CHAIN ? 'C' : 'c',
-                              flags & TRB_CHAIN ? 'H' : 'h',
-                              flags & TRB_IOC ? 'I' : 'i');
+               ret = scnprintf(str, size,
+                               "LINK %08x type '%s' flags %c:%c:%c%c:%c",
+                               buffer, cdns2_trb_type_string(type),
+                               flags & TRB_CYCLE ? 'C' : 'c',
+                               flags & TRB_TOGGLE ? 'T' : 't',
+                               flags & TRB_CHAIN ? 'C' : 'c',
+                               flags & TRB_CHAIN ? 'H' : 'h',
+                               flags & TRB_IOC ? 'I' : 'i');
                break;
        case TRB_NORMAL:
-               ret = snprintf(str, size,
-                              "type: '%s', Buffer: %08x, length: %ld, burst len: %ld, "
-                              "flags %c:%c:%c%c:%c",
-                              cdns2_trb_type_string(type),
-                              buffer, TRB_LEN(length),
-                              TRB_FIELD_TO_BURST(length),
-                              flags & TRB_CYCLE ? 'C' : 'c',
-                              flags & TRB_ISP ? 'I' : 'i',
-                              flags & TRB_CHAIN ? 'C' : 'c',
-                              flags & TRB_CHAIN ? 'H' : 'h',
-                              flags & TRB_IOC ? 'I' : 'i');
+               ret = scnprintf(str, size,
+                               "type: '%s', Buffer: %08x, length: %ld, burst len: %ld, "
+                               "flags %c:%c:%c%c:%c",
+                               cdns2_trb_type_string(type),
+                               buffer, TRB_LEN(length),
+                               TRB_FIELD_TO_BURST(length),
+                               flags & TRB_CYCLE ? 'C' : 'c',
+                               flags & TRB_ISP ? 'I' : 'i',
+                               flags & TRB_CHAIN ? 'C' : 'c',
+                               flags & TRB_CHAIN ? 'H' : 'h',
+                               flags & TRB_IOC ? 'I' : 'i');
                break;
        default:
-               ret = snprintf(str, size, "type '%s' -> raw %08x %08x %08x",
-                              cdns2_trb_type_string(type),
-                              buffer, length, flags);
+               ret = scnprintf(str, size, "type '%s' -> raw %08x %08x %08x",
+                               cdns2_trb_type_string(type),
+                               buffer, length, flags);
        }
 
-       if (ret >= size)
-               pr_info("CDNS2: buffer overflowed.\n");
+       if (ret == size - 1)
+               pr_info("CDNS2: buffer may be truncated.\n");
 
        return str;
 }
index 2693a10eb0c7cbc9503b4961c7471d16e6f8f17c..e8042c158f6dcd4d65858238ff36c8acd9a1c68d 100644 (file)
@@ -1360,7 +1360,7 @@ static void ch9getstatus(struct fsl_udc *udc, u8 request_type, u16 value,
        udc->ep0_dir = USB_DIR_IN;
        /* Borrow the per device status_req */
        req = udc->status_req;
-       /* Fill in the reqest structure */
+       /* Fill in the request structure */
        *((u16 *) req->req.buf) = cpu_to_le16(tmp);
 
        req->ep = ep;
@@ -2532,15 +2532,18 @@ err_kfree:
 /* Driver removal function
  * Free resources and finish pending transactions
  */
-static int fsl_udc_remove(struct platform_device *pdev)
+static void fsl_udc_remove(struct platform_device *pdev)
 {
        struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
 
        DECLARE_COMPLETION_ONSTACK(done);
 
-       if (!udc_controller)
-               return -ENODEV;
+       if (!udc_controller) {
+               dev_err(&pdev->dev,
+                       "Driver still in use but removing anyhow\n");
+               return;
+       }
 
        udc_controller->done = &done;
        usb_del_gadget_udc(&udc_controller->gadget);
@@ -2568,8 +2571,6 @@ static int fsl_udc_remove(struct platform_device *pdev)
         */
        if (pdata->exit)
                pdata->exit(pdev);
-
-       return 0;
 }
 
 /*-----------------------------------------------------------------
@@ -2667,7 +2668,7 @@ static const struct platform_device_id fsl_udc_devtype[] = {
 MODULE_DEVICE_TABLE(platform, fsl_udc_devtype);
 static struct platform_driver udc_driver = {
        .probe          = fsl_udc_probe,
-       .remove         = fsl_udc_remove,
+       .remove_new     = fsl_udc_remove,
        .id_table       = fsl_udc_devtype,
        /* these suspend and resume are not usb suspend and resume */
        .suspend        = fsl_udc_suspend,
index c6dfa7cccc11da1025302dab5ded2043a5dc806b..fb901be5dac1b7a4fe7fc4203c5757bccb8275a6 100644 (file)
@@ -2089,15 +2089,18 @@ static void gr_ep_remove(struct gr_udc *dev, int num, int is_in)
                                  ep->tailbuf, ep->tailbuf_paddr);
 }
 
-static int gr_remove(struct platform_device *pdev)
+static void gr_remove(struct platform_device *pdev)
 {
        struct gr_udc *dev = platform_get_drvdata(pdev);
        int i;
 
        if (dev->added)
                usb_del_gadget_udc(&dev->gadget); /* Shuts everything down */
-       if (dev->driver)
-               return -EBUSY;
+       if (dev->driver) {
+               dev_err(&pdev->dev,
+                       "Driver still in use but removing anyhow\n");
+               return;
+       }
 
        gr_dfs_delete(dev);
        dma_pool_destroy(dev->desc_pool);
@@ -2110,8 +2113,6 @@ static int gr_remove(struct platform_device *pdev)
                gr_ep_remove(dev, i, 0);
        for (i = 0; i < dev->nepi; i++)
                gr_ep_remove(dev, i, 1);
-
-       return 0;
 }
 static int gr_request_irq(struct gr_udc *dev, int irq)
 {
@@ -2248,7 +2249,7 @@ static struct platform_driver gr_driver = {
                .of_match_table = gr_match,
        },
        .probe = gr_probe,
-       .remove = gr_remove,
+       .remove_new = gr_remove,
 };
 module_platform_driver(gr_driver);
 
index a917cc9a32aba704d4c722c287972df2151a695b..d5f29f8fe481311d5c4278cc5a7313264e63852e 100644 (file)
@@ -3174,13 +3174,16 @@ i2c_fail:
        return retval;
 }
 
-static int lpc32xx_udc_remove(struct platform_device *pdev)
+static void lpc32xx_udc_remove(struct platform_device *pdev)
 {
        struct lpc32xx_udc *udc = platform_get_drvdata(pdev);
 
        usb_del_gadget_udc(&udc->gadget);
-       if (udc->driver)
-               return -EBUSY;
+       if (udc->driver) {
+               dev_err(&pdev->dev,
+                       "Driver still in use but removing anyhow\n");
+               return;
+       }
 
        udc_clk_set(udc, 1);
        udc_disable(udc);
@@ -3194,8 +3197,6 @@ static int lpc32xx_udc_remove(struct platform_device *pdev)
                          udc->udca_v_base, udc->udca_p_base);
 
        clk_disable_unprepare(udc->usb_slv_clk);
-
-       return 0;
 }
 
 #ifdef CONFIG_PM
@@ -3255,7 +3256,7 @@ MODULE_DEVICE_TABLE(of, lpc32xx_udc_of_match);
 
 static struct platform_driver lpc32xx_udc_driver = {
        .probe          = lpc32xx_udc_probe,
-       .remove         = lpc32xx_udc_remove,
+       .remove_new     = lpc32xx_udc_remove,
        .shutdown       = lpc32xx_udc_shutdown,
        .suspend        = lpc32xx_udc_suspend,
        .resume         = lpc32xx_udc_resume,
index d888dcda2bc86c6c39ef919f07e70d0554bad1a0..78308b64955dd1e42332d40913ae55026ee27ef0 100644 (file)
@@ -1451,7 +1451,7 @@ udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty)
 
        req = udc->status_req;
 
-       /* fill in the reqest structure */
+       /* fill in the request structure */
        if (empty == false) {
                *((u16 *) req->req.buf) = cpu_to_le16(status);
                req->req.length = 2;
index 4f8617210d852643e55a5b8463d0f03b6d9e9c78..169f72665739feca100e5a65786ddd6af2cbd675 100644 (file)
@@ -274,7 +274,6 @@ struct pch_udc_cfg_data {
  * @td_data:           for data request
  * @dev:               reference to device struct
  * @offset_addr:       offset address of ep register
- * @desc:              for this ep
  * @queue:             queue for requests
  * @num:               endpoint number
  * @in:                        endpoint is IN
index df0551ecc810c61c8d50672d5732f035a05ca23f..1ac26cb49ecf987b6153cf819fe7d321da507cb8 100644 (file)
@@ -2397,12 +2397,15 @@ static void pxa25x_udc_shutdown(struct platform_device *_dev)
        pullup_off();
 }
 
-static int pxa25x_udc_remove(struct platform_device *pdev)
+static void pxa25x_udc_remove(struct platform_device *pdev)
 {
        struct pxa25x_udc *dev = platform_get_drvdata(pdev);
 
-       if (dev->driver)
-               return -EBUSY;
+       if (dev->driver) {
+               dev_err(&pdev->dev,
+                       "Driver still in use but removing anyhow\n");
+               return;
+       }
 
        usb_del_gadget_udc(&dev->gadget);
        dev->pullup = 0;
@@ -2414,7 +2417,6 @@ static int pxa25x_udc_remove(struct platform_device *pdev)
                dev->transceiver = NULL;
 
        the_controller = NULL;
-       return 0;
 }
 
 /*-------------------------------------------------------------------------*/
@@ -2472,7 +2474,7 @@ static int pxa25x_udc_resume(struct platform_device *dev)
 static struct platform_driver udc_driver = {
        .shutdown       = pxa25x_udc_shutdown,
        .probe          = pxa25x_udc_probe,
-       .remove         = pxa25x_udc_remove,
+       .remove_new     = pxa25x_udc_remove,
        .suspend        = pxa25x_udc_suspend,
        .resume         = pxa25x_udc_resume,
        .driver         = {
index d152d72de1269e09d22c0d0bc98bd4e9eb0c1e60..9fe4f48b18980c083e74c0b4ab7a263ae91d7915 100644 (file)
@@ -1158,12 +1158,12 @@ dump_eps(struct usb_hcd *hcd)
                end = dp + sizeof(ubuf);
                *dp = '\0';
                list_for_each_entry(urb, &ep->urb_list, urb_list) {
-                       ret = snprintf(dp, end - dp, " %p(%d.%s %d/%d)", urb,
-                                      usb_pipetype(urb->pipe),
-                                      usb_urb_dir_in(urb) ? "IN" : "OUT",
-                                      urb->actual_length,
-                                      urb->transfer_buffer_length);
-                       if (ret < 0 || ret >= end - dp)
+                       ret = scnprintf(dp, end - dp, " %p(%d.%s %d/%d)", urb,
+                                       usb_pipetype(urb->pipe),
+                                       usb_urb_dir_in(urb) ? "IN" : "OUT",
+                                       urb->actual_length,
+                                       urb->transfer_buffer_length);
+                       if (ret == end - dp - 1)
                                break;  /* error or buffer full */
                        dp += ret;
                }
@@ -1255,9 +1255,9 @@ max3421_handle_irqs(struct usb_hcd *hcd)
                        end = sbuf + sizeof(sbuf);
                        *dp = '\0';
                        for (i = 0; i < 16; ++i) {
-                               int ret = snprintf(dp, end - dp, " %lu",
-                                                  max3421_hcd->err_stat[i]);
-                               if (ret < 0 || ret >= end - dp)
+                               int ret = scnprintf(dp, end - dp, " %lu",
+                                                   max3421_hcd->err_stat[i]);
+                               if (ret == end - dp - 1)
                                        break;  /* error or buffer full */
                                dp += ret;
                        }
index b40d9238d4471494bf47249bff907bbcf8e2f209..d82935d31126dd020d941b7a7cd8c78a93f3392f 100644 (file)
@@ -6,9 +6,24 @@
  *
  * Author: Lu Baolu <baolu.lu@linux.intel.com>
  */
+#include <linux/bug.h>
+#include <linux/device.h>
 #include <linux/dma-mapping.h>
-#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/kstrtox.h>
+#include <linux/list.h>
 #include <linux/nls.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include <linux/io-64-nonatomic-lo-hi.h>
+
+#include <asm/byteorder.h>
 
 #include "xhci.h"
 #include "xhci-trace.h"
@@ -28,7 +43,7 @@ static void dbc_ring_free(struct device *dev, struct xhci_ring *ring)
        if (!ring)
                return;
 
-       if (ring->first_seg && ring->first_seg->trbs) {
+       if (ring->first_seg) {
                dma_free_coherent(dev, TRB_SEGMENT_SIZE,
                                  ring->first_seg->trbs,
                                  ring->first_seg->dma);
@@ -374,13 +389,13 @@ static void xhci_dbc_eps_init(struct xhci_dbc *dbc)
 
 static void xhci_dbc_eps_exit(struct xhci_dbc *dbc)
 {
-       memset(dbc->eps, 0, sizeof(struct dbc_ep) * ARRAY_SIZE(dbc->eps));
+       memset(dbc->eps, 0, sizeof_field(struct xhci_dbc, eps));
 }
 
 static int dbc_erst_alloc(struct device *dev, struct xhci_ring *evt_ring,
                    struct xhci_erst *erst, gfp_t flags)
 {
-       erst->entries = dma_alloc_coherent(dev, sizeof(struct xhci_erst_entry),
+       erst->entries = dma_alloc_coherent(dev, sizeof(*erst->entries),
                                           &erst->erst_dma_addr, flags);
        if (!erst->entries)
                return -ENOMEM;
@@ -394,9 +409,8 @@ static int dbc_erst_alloc(struct device *dev, struct xhci_ring *evt_ring,
 
 static void dbc_erst_free(struct device *dev, struct xhci_erst *erst)
 {
-       if (erst->entries)
-               dma_free_coherent(dev, sizeof(struct xhci_erst_entry),
-                                 erst->entries, erst->erst_dma_addr);
+       dma_free_coherent(dev, sizeof(*erst->entries), erst->entries,
+                         erst->erst_dma_addr);
        erst->entries = NULL;
 }
 
@@ -495,7 +509,7 @@ static int xhci_dbc_mem_init(struct xhci_dbc *dbc, gfp_t flags)
                goto ctx_fail;
 
        /* Allocate the string table: */
-       dbc->string_size = sizeof(struct dbc_str_descs);
+       dbc->string_size = sizeof(*dbc->string);
        dbc->string = dma_alloc_coherent(dev, dbc->string_size,
                                         &dbc->string_dma, flags);
        if (!dbc->string)
@@ -543,11 +557,8 @@ static void xhci_dbc_mem_cleanup(struct xhci_dbc *dbc)
 
        xhci_dbc_eps_exit(dbc);
 
-       if (dbc->string) {
-               dma_free_coherent(dbc->dev, dbc->string_size,
-                                 dbc->string, dbc->string_dma);
-               dbc->string = NULL;
-       }
+       dma_free_coherent(dbc->dev, dbc->string_size, dbc->string, dbc->string_dma);
+       dbc->string = NULL;
 
        dbc_free_ctx(dbc->dev, dbc->ctx);
        dbc->ctx = NULL;
@@ -597,7 +608,7 @@ static int xhci_do_dbc_start(struct xhci_dbc *dbc)
 static int xhci_do_dbc_stop(struct xhci_dbc *dbc)
 {
        if (dbc->state == DS_DISABLED)
-               return -1;
+               return -EINVAL;
 
        writel(0, &dbc->regs->control);
        dbc->state = DS_DISABLED;
@@ -650,11 +661,11 @@ static void xhci_dbc_stop(struct xhci_dbc *dbc)
        spin_lock_irqsave(&dbc->lock, flags);
        ret = xhci_do_dbc_stop(dbc);
        spin_unlock_irqrestore(&dbc->lock, flags);
+       if (ret)
+               return;
 
-       if (!ret) {
-               xhci_dbc_mem_cleanup(dbc);
-               pm_runtime_put_sync(dbc->dev); /* note, was self.controller */
-       }
+       xhci_dbc_mem_cleanup(dbc);
+       pm_runtime_put_sync(dbc->dev); /* note, was self.controller */
 }
 
 static void
@@ -914,41 +925,29 @@ static void xhci_dbc_handle_events(struct work_struct *work)
        mod_delayed_work(system_wq, &dbc->event_work, 1);
 }
 
+static const char * const dbc_state_strings[DS_MAX] = {
+       [DS_DISABLED] = "disabled",
+       [DS_INITIALIZED] = "initialized",
+       [DS_ENABLED] = "enabled",
+       [DS_CONNECTED] = "connected",
+       [DS_CONFIGURED] = "configured",
+       [DS_STALLED] = "stalled",
+};
+
 static ssize_t dbc_show(struct device *dev,
                        struct device_attribute *attr,
                        char *buf)
 {
-       const char              *p;
        struct xhci_dbc         *dbc;
        struct xhci_hcd         *xhci;
 
        xhci = hcd_to_xhci(dev_get_drvdata(dev));
        dbc = xhci->dbc;
 
-       switch (dbc->state) {
-       case DS_DISABLED:
-               p = "disabled";
-               break;
-       case DS_INITIALIZED:
-               p = "initialized";
-               break;
-       case DS_ENABLED:
-               p = "enabled";
-               break;
-       case DS_CONNECTED:
-               p = "connected";
-               break;
-       case DS_CONFIGURED:
-               p = "configured";
-               break;
-       case DS_STALLED:
-               p = "stalled";
-               break;
-       default:
-               p = "unknown";
-       }
+       if (dbc->state >= ARRAY_SIZE(dbc_state_strings))
+               return sysfs_emit(buf, "unknown\n");
 
-       return sprintf(buf, "%s\n", p);
+       return sysfs_emit(buf, "%s\n", dbc_state_strings[dbc->state]);
 }
 
 static ssize_t dbc_store(struct device *dev,
@@ -961,9 +960,9 @@ static ssize_t dbc_store(struct device *dev,
        xhci = hcd_to_xhci(dev_get_drvdata(dev));
        dbc = xhci->dbc;
 
-       if (!strncmp(buf, "enable", 6))
+       if (sysfs_streq(buf, "enable"))
                xhci_dbc_start(dbc);
-       else if (!strncmp(buf, "disable", 7))
+       else if (sysfs_streq(buf, "disable"))
                xhci_dbc_stop(dbc);
        else
                return -EINVAL;
@@ -981,7 +980,7 @@ static ssize_t dbc_idVendor_show(struct device *dev,
        xhci = hcd_to_xhci(dev_get_drvdata(dev));
        dbc = xhci->dbc;
 
-       return sprintf(buf, "%04x\n", dbc->idVendor);
+       return sysfs_emit(buf, "%04x\n", dbc->idVendor);
 }
 
 static ssize_t dbc_idVendor_store(struct device *dev,
@@ -993,9 +992,11 @@ static ssize_t dbc_idVendor_store(struct device *dev,
        void __iomem            *ptr;
        u16                     value;
        u32                     dev_info;
+       int ret;
 
-       if (kstrtou16(buf, 0, &value))
-               return -EINVAL;
+       ret = kstrtou16(buf, 0, &value);
+       if (ret)
+               return ret;
 
        xhci = hcd_to_xhci(dev_get_drvdata(dev));
        dbc = xhci->dbc;
@@ -1021,7 +1022,7 @@ static ssize_t dbc_idProduct_show(struct device *dev,
        xhci = hcd_to_xhci(dev_get_drvdata(dev));
        dbc = xhci->dbc;
 
-       return sprintf(buf, "%04x\n", dbc->idProduct);
+       return sysfs_emit(buf, "%04x\n", dbc->idProduct);
 }
 
 static ssize_t dbc_idProduct_store(struct device *dev,
@@ -1033,9 +1034,11 @@ static ssize_t dbc_idProduct_store(struct device *dev,
        void __iomem            *ptr;
        u32                     dev_info;
        u16                     value;
+       int ret;
 
-       if (kstrtou16(buf, 0, &value))
-               return -EINVAL;
+       ret = kstrtou16(buf, 0, &value);
+       if (ret)
+               return ret;
 
        xhci = hcd_to_xhci(dev_get_drvdata(dev));
        dbc = xhci->dbc;
@@ -1060,7 +1063,7 @@ static ssize_t dbc_bcdDevice_show(struct device *dev,
        xhci = hcd_to_xhci(dev_get_drvdata(dev));
        dbc = xhci->dbc;
 
-       return sprintf(buf, "%04x\n", dbc->bcdDevice);
+       return sysfs_emit(buf, "%04x\n", dbc->bcdDevice);
 }
 
 static ssize_t dbc_bcdDevice_store(struct device *dev,
@@ -1072,9 +1075,11 @@ static ssize_t dbc_bcdDevice_store(struct device *dev,
        void __iomem *ptr;
        u32 dev_info;
        u16 value;
+       int ret;
 
-       if (kstrtou16(buf, 0, &value))
-               return -EINVAL;
+       ret = kstrtou16(buf, 0, &value);
+       if (ret)
+               return ret;
 
        xhci = hcd_to_xhci(dev_get_drvdata(dev));
        dbc = xhci->dbc;
@@ -1100,7 +1105,7 @@ static ssize_t dbc_bInterfaceProtocol_show(struct device *dev,
        xhci = hcd_to_xhci(dev_get_drvdata(dev));
        dbc = xhci->dbc;
 
-       return sprintf(buf, "%02x\n", dbc->bInterfaceProtocol);
+       return sysfs_emit(buf, "%02x\n", dbc->bInterfaceProtocol);
 }
 
 static ssize_t dbc_bInterfaceProtocol_store(struct device *dev,
@@ -1114,9 +1119,13 @@ static ssize_t dbc_bInterfaceProtocol_store(struct device *dev,
        u8 value;
        int ret;
 
-       /* bInterfaceProtocol is 8 bit, but xhci only supports values 0 and 1 */
+       /* bInterfaceProtocol is 8 bit, but... */
        ret = kstrtou8(buf, 0, &value);
-       if (ret || value > 1)
+       if (ret)
+               return ret;
+
+       /* ...xhci only supports values 0 and 1 */
+       if (value > 1)
                return -EINVAL;
 
        xhci = hcd_to_xhci(dev_get_drvdata(dev));
@@ -1139,7 +1148,7 @@ static DEVICE_ATTR_RW(dbc_idProduct);
 static DEVICE_ATTR_RW(dbc_bcdDevice);
 static DEVICE_ATTR_RW(dbc_bInterfaceProtocol);
 
-static struct attribute *dbc_dev_attributes[] = {
+static struct attribute *dbc_dev_attrs[] = {
        &dev_attr_dbc.attr,
        &dev_attr_dbc_idVendor.attr,
        &dev_attr_dbc_idProduct.attr,
@@ -1147,10 +1156,7 @@ static struct attribute *dbc_dev_attributes[] = {
        &dev_attr_dbc_bInterfaceProtocol.attr,
        NULL
 };
-
-static const struct attribute_group dbc_dev_attrib_grp = {
-       .attrs = dbc_dev_attributes,
-};
+ATTRIBUTE_GROUPS(dbc_dev);
 
 struct xhci_dbc *
 xhci_alloc_dbc(struct device *dev, void __iomem *base, const struct dbc_driver *driver)
@@ -1176,7 +1182,7 @@ xhci_alloc_dbc(struct device *dev, void __iomem *base, const struct dbc_driver *
        INIT_DELAYED_WORK(&dbc->event_work, xhci_dbc_handle_events);
        spin_lock_init(&dbc->lock);
 
-       ret = sysfs_create_group(&dev->kobj, &dbc_dev_attrib_grp);
+       ret = sysfs_create_groups(&dev->kobj, dbc_dev_groups);
        if (ret)
                goto err;
 
@@ -1195,7 +1201,7 @@ void xhci_dbc_remove(struct xhci_dbc *dbc)
        xhci_dbc_stop(dbc);
 
        /* remove sysfs files */
-       sysfs_remove_group(&dbc->dev->kobj, &dbc_dev_attrib_grp);
+       sysfs_remove_groups(&dbc->dev->kobj, dbc_dev_groups);
 
        kfree(dbc);
 }
index 51a7ab3ba0cac0297c4576d295b6c49551dd2d6c..e39e3ae1677aeb48bfc00ad8f32d9157901b1dc1 100644 (file)
@@ -82,6 +82,7 @@ enum dbc_state {
        DS_CONNECTED,
        DS_CONFIGURED,
        DS_STALLED,
+       DS_MAX
 };
 
 struct dbc_ep {
index 6d142cd61bd6b9434360d788d4a41d51cb5a4195..f8ba15e7c225c2a079ae31c74c93a3ddc82a08ee 100644 (file)
@@ -693,7 +693,7 @@ void xhci_debugfs_init(struct xhci_hcd *xhci)
                                     "command-ring",
                                     xhci->debugfs_root);
 
-       xhci_debugfs_create_ring_dir(xhci, &xhci->interrupter->event_ring,
+       xhci_debugfs_create_ring_dir(xhci, &xhci->interrupters[0]->event_ring,
                                     "event-ring",
                                     xhci->debugfs_root);
 
index 62116586848b84a160b11fcba5a4f2ba035b10c9..a7716202a8dd58d74f3d31fd48d0833de89db2be 100644 (file)
@@ -323,6 +323,7 @@ void xhci_initialize_ring_info(struct xhci_ring *ring,
         */
        ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
 }
+EXPORT_SYMBOL_GPL(xhci_initialize_ring_info);
 
 /* Allocate segments and link them for a ring */
 static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
@@ -1739,6 +1740,8 @@ struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
        }
 
        command->status = 0;
+       /* set default timeout to 5000 ms */
+       command->timeout_ms = XHCI_CMD_DEFAULT_TIMEOUT;
        INIT_LIST_HEAD(&command->cmd_list);
        return command;
 }
@@ -1853,6 +1856,31 @@ xhci_free_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
        kfree(ir);
 }
 
+void xhci_remove_secondary_interrupter(struct usb_hcd *hcd, struct xhci_interrupter *ir)
+{
+       struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+       unsigned int intr_num;
+
+       spin_lock_irq(&xhci->lock);
+
+       /* interrupter 0 is primary interrupter, don't touch it */
+       if (!ir || !ir->intr_num || ir->intr_num >= xhci->max_interrupters) {
+               xhci_dbg(xhci, "Invalid secondary interrupter, can't remove\n");
+               spin_unlock_irq(&xhci->lock);
+               return;
+       }
+
+       intr_num = ir->intr_num;
+
+       xhci_remove_interrupter(xhci, ir);
+       xhci->interrupters[intr_num] = NULL;
+
+       spin_unlock_irq(&xhci->lock);
+
+       xhci_free_interrupter(xhci, ir);
+}
+EXPORT_SYMBOL_GPL(xhci_remove_secondary_interrupter);
+
 void xhci_mem_cleanup(struct xhci_hcd *xhci)
 {
        struct device   *dev = xhci_to_hcd(xhci)->self.sysdev;
@@ -1860,10 +1888,14 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
 
        cancel_delayed_work_sync(&xhci->cmd_timer);
 
-       xhci_remove_interrupter(xhci, xhci->interrupter);
-       xhci_free_interrupter(xhci, xhci->interrupter);
-       xhci->interrupter = NULL;
-       xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed primary event ring");
+       for (i = 0; i < xhci->max_interrupters; i++) {
+               if (xhci->interrupters[i]) {
+                       xhci_remove_interrupter(xhci, xhci->interrupters[i]);
+                       xhci_free_interrupter(xhci, xhci->interrupters[i]);
+                       xhci->interrupters[i] = NULL;
+               }
+       }
+       xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed interrupters");
 
        if (xhci->cmd_ring)
                xhci_ring_free(xhci, xhci->cmd_ring);
@@ -1933,6 +1965,7 @@ no_bw:
        for (i = 0; i < xhci->num_port_caps; i++)
                kfree(xhci->port_caps[i].psi);
        kfree(xhci->port_caps);
+       kfree(xhci->interrupters);
        xhci->num_port_caps = 0;
 
        xhci->usb2_rhub.ports = NULL;
@@ -1941,6 +1974,7 @@ no_bw:
        xhci->rh_bw = NULL;
        xhci->ext_caps = NULL;
        xhci->port_caps = NULL;
+       xhci->interrupters = NULL;
 
        xhci->page_size = 0;
        xhci->page_shift = 0;
@@ -2246,18 +2280,20 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
 }
 
 static struct xhci_interrupter *
-xhci_alloc_interrupter(struct xhci_hcd *xhci, gfp_t flags)
+xhci_alloc_interrupter(struct xhci_hcd *xhci, int segs, gfp_t flags)
 {
        struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
        struct xhci_interrupter *ir;
-       unsigned int num_segs;
+       unsigned int num_segs = segs;
        int ret;
 
        ir = kzalloc_node(sizeof(*ir), flags, dev_to_node(dev));
        if (!ir)
                return NULL;
 
-       num_segs = min_t(unsigned int, 1 << HCS_ERST_MAX(xhci->hcs_params2),
+       /* number of ring segments should be greater than 0 */
+       if (segs <= 0)
+               num_segs = min_t(unsigned int, 1 << HCS_ERST_MAX(xhci->hcs_params2),
                         ERST_MAX_SEGS);
 
        ir->event_ring = xhci_ring_alloc(xhci, num_segs, 1, TYPE_EVENT, 0,
@@ -2286,12 +2322,19 @@ xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir,
        u64 erst_base;
        u32 erst_size;
 
-       if (intr_num > xhci->max_interrupters) {
+       if (intr_num >= xhci->max_interrupters) {
                xhci_warn(xhci, "Can't add interrupter %d, max interrupters %d\n",
                          intr_num, xhci->max_interrupters);
                return -EINVAL;
        }
 
+       if (xhci->interrupters[intr_num]) {
+               xhci_warn(xhci, "Interrupter %d\n already set up", intr_num);
+               return -EINVAL;
+       }
+
+       xhci->interrupters[intr_num] = ir;
+       ir->intr_num = intr_num;
        ir->ir_set = &xhci->run_regs->ir_set[intr_num];
 
        /* set ERST count with the number of entries in the segment table */
@@ -2311,10 +2354,52 @@ xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir,
        return 0;
 }
 
+struct xhci_interrupter *
+xhci_create_secondary_interrupter(struct usb_hcd *hcd, int num_seg)
+{
+       struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+       struct xhci_interrupter *ir;
+       unsigned int i;
+       int err = -ENOSPC;
+
+       if (!xhci->interrupters || xhci->max_interrupters <= 1)
+               return NULL;
+
+       ir = xhci_alloc_interrupter(xhci, num_seg, GFP_KERNEL);
+       if (!ir)
+               return NULL;
+
+       spin_lock_irq(&xhci->lock);
+
+       /* Find available secondary interrupter, interrupter 0 is reserved for primary */
+       for (i = 1; i < xhci->max_interrupters; i++) {
+               if (xhci->interrupters[i] == NULL) {
+                       err = xhci_add_interrupter(xhci, ir, i);
+                       break;
+               }
+       }
+
+       spin_unlock_irq(&xhci->lock);
+
+       if (err) {
+               xhci_warn(xhci, "Failed to add secondary interrupter, max interrupters %d\n",
+                         xhci->max_interrupters);
+               xhci_free_interrupter(xhci, ir);
+               return NULL;
+       }
+
+       xhci_dbg(xhci, "Add secondary interrupter %d, max interrupters %d\n",
+                i, xhci->max_interrupters);
+
+       return ir;
+}
+EXPORT_SYMBOL_GPL(xhci_create_secondary_interrupter);
+
 int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
 {
-       dma_addr_t      dma;
+       struct xhci_interrupter *ir;
        struct device   *dev = xhci_to_hcd(xhci)->self.sysdev;
+       dma_addr_t      dma;
        unsigned int    val, val2;
        u64             val_64;
        u32             page_size, temp;
@@ -2438,11 +2523,14 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
        /* Allocate and set up primary interrupter 0 with an event ring. */
        xhci_dbg_trace(xhci, trace_xhci_dbg_init,
                       "Allocating primary event ring");
-       xhci->interrupter = xhci_alloc_interrupter(xhci, flags);
-       if (!xhci->interrupter)
+       xhci->interrupters = kcalloc_node(xhci->max_interrupters, sizeof(*xhci->interrupters),
+                                         flags, dev_to_node(dev));
+
+       ir = xhci_alloc_interrupter(xhci, 0, flags);
+       if (!ir)
                goto fail;
 
-       if (xhci_add_interrupter(xhci, xhci->interrupter, 0))
+       if (xhci_add_interrupter(xhci, ir, 0))
                goto fail;
 
        xhci->isoc_bei_interval = AVOID_BEI_INTERVAL_MAX;
index bbdf1b0b7be11e668c0194e42906913bcda2ef79..3252e3d2d79cd68e875d6965f3cda650dffa456a 100644 (file)
@@ -7,6 +7,7 @@
  *  Chunfeng Yun <chunfeng.yun@mediatek.com>
  */
 
+#include <linux/bitfield.h>
 #include <linux/dma-mapping.h>
 #include <linux/iopoll.h>
 #include <linux/kernel.h>
@@ -73,6 +74,9 @@
 #define FRMCNT_LEV1_RANG       (0x12b << 8)
 #define FRMCNT_LEV1_RANG_MASK  GENMASK(19, 8)
 
+#define HSCH_CFG1              0x960
+#define SCH3_RXFIFO_DEPTH_MASK GENMASK(21, 20)
+
 #define SS_GEN2_EOF_CFG                0x990
 #define SSG2EOF_OFFSET         0x3c
 
 #define SSC_IP_SLEEP_EN        BIT(4)
 #define SSC_SPM_INT_EN         BIT(1)
 
+#define SCH_FIFO_TO_KB(x)      ((x) >> 10)
+
 enum ssusb_uwk_vers {
        SSUSB_UWK_V1 = 1,
        SSUSB_UWK_V2,
@@ -165,6 +171,35 @@ static void xhci_mtk_set_frame_interval(struct xhci_hcd_mtk *mtk)
        writel(value, hcd->regs + SS_GEN2_EOF_CFG);
 }
 
+/*
+ * workaround: usb3.2 gen1 isoc rx hw issue
+ * host send out unexpected ACK afer device fininsh a burst transfer with
+ * a short packet.
+ */
+static void xhci_mtk_rxfifo_depth_set(struct xhci_hcd_mtk *mtk)
+{
+       struct usb_hcd *hcd = mtk->hcd;
+       u32 value;
+
+       if (!mtk->rxfifo_depth)
+               return;
+
+       value = readl(hcd->regs + HSCH_CFG1);
+       value &= ~SCH3_RXFIFO_DEPTH_MASK;
+       value |= FIELD_PREP(SCH3_RXFIFO_DEPTH_MASK,
+                           SCH_FIFO_TO_KB(mtk->rxfifo_depth) - 1);
+       writel(value, hcd->regs + HSCH_CFG1);
+}
+
+static void xhci_mtk_init_quirk(struct xhci_hcd_mtk *mtk)
+{
+       /* workaround only for mt8195 */
+       xhci_mtk_set_frame_interval(mtk);
+
+       /* workaround for SoCs using SSUSB about before IPM v1.6.0 */
+       xhci_mtk_rxfifo_depth_set(mtk);
+}
+
 static int xhci_mtk_host_enable(struct xhci_hcd_mtk *mtk)
 {
        struct mu3c_ippc_regs __iomem *ippc = mtk->ippc_regs;
@@ -448,8 +483,7 @@ static int xhci_mtk_setup(struct usb_hcd *hcd)
                if (ret)
                        return ret;
 
-               /* workaround only for mt8195 */
-               xhci_mtk_set_frame_interval(mtk);
+               xhci_mtk_init_quirk(mtk);
        }
 
        ret = xhci_gen_setup(hcd, xhci_mtk_quirks);
@@ -527,6 +561,8 @@ static int xhci_mtk_probe(struct platform_device *pdev)
        of_property_read_u32(node, "mediatek,u2p-dis-msk",
                             &mtk->u2p_dis_msk);
 
+       of_property_read_u32(node, "rx-fifo-depth", &mtk->rxfifo_depth);
+
        ret = usb_wakeup_of_property_parse(mtk, node);
        if (ret) {
                dev_err(dev, "failed to parse uwk property\n");
index 39f7ae7d30871d074e832a44fcd6dc9708d971c0..f5e2bd66bb1b2a58515ce1cf8b8d011e10df3cdb 100644 (file)
@@ -171,6 +171,8 @@ struct xhci_hcd_mtk {
        struct regmap *uwk;
        u32 uwk_reg_base;
        u32 uwk_vers;
+       /* quirk */
+       u32 rxfifo_depth;
 };
 
 static inline struct xhci_hcd_mtk *hcd_to_mtk(struct usb_hcd *hcd)
index d6fc08e5db8fbd410c7b547782f44fa1eff23271..b534ca9752be43cb304f69c94b70b1fef5bc75c8 100644 (file)
@@ -95,10 +95,9 @@ static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
 
        if (hcd->msix_enabled) {
                struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
-               int i;
 
-               for (i = 0; i < xhci->msix_count; i++)
-                       synchronize_irq(pci_irq_vector(pdev, i));
+               /* for now, the driver only supports one primary interrupter */
+               synchronize_irq(pci_irq_vector(pdev, 0));
        }
 }
 
@@ -112,100 +111,18 @@ static void xhci_cleanup_msix(struct xhci_hcd *xhci)
        if (hcd->irq > 0)
                return;
 
-       if (hcd->msix_enabled) {
-               int i;
-
-               for (i = 0; i < xhci->msix_count; i++)
-                       free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci));
-       } else {
-               free_irq(pci_irq_vector(pdev, 0), xhci_to_hcd(xhci));
-       }
-
+       free_irq(pci_irq_vector(pdev, 0), xhci_to_hcd(xhci));
        pci_free_irq_vectors(pdev);
        hcd->msix_enabled = 0;
 }
 
-/*
- * Set up MSI
- */
-static int xhci_setup_msi(struct xhci_hcd *xhci)
-{
-       int ret;
-       /*
-        * TODO:Check with MSI Soc for sysdev
-        */
-       struct pci_dev  *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
-
-       ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
-       if (ret < 0) {
-               xhci_dbg_trace(xhci, trace_xhci_dbg_init,
-                               "failed to allocate MSI entry");
-               return ret;
-       }
-
-       ret = request_irq(pdev->irq, xhci_msi_irq,
-                               0, "xhci_hcd", xhci_to_hcd(xhci));
-       if (ret) {
-               xhci_dbg_trace(xhci, trace_xhci_dbg_init,
-                               "disable MSI interrupt");
-               pci_free_irq_vectors(pdev);
-       }
-
-       return ret;
-}
-
-/*
- * Set up MSI-X
- */
-static int xhci_setup_msix(struct xhci_hcd *xhci)
-{
-       int i, ret;
-       struct usb_hcd *hcd = xhci_to_hcd(xhci);
-       struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
-
-       /*
-        * calculate number of msi-x vectors supported.
-        * - HCS_MAX_INTRS: the max number of interrupts the host can handle,
-        *   with max number of interrupters based on the xhci HCSPARAMS1.
-        * - num_online_cpus: maximum msi-x vectors per CPUs core.
-        *   Add additional 1 vector to ensure always available interrupt.
-        */
-       xhci->msix_count = min(num_online_cpus() + 1,
-                               HCS_MAX_INTRS(xhci->hcs_params1));
-
-       ret = pci_alloc_irq_vectors(pdev, xhci->msix_count, xhci->msix_count,
-                       PCI_IRQ_MSIX);
-       if (ret < 0) {
-               xhci_dbg_trace(xhci, trace_xhci_dbg_init,
-                               "Failed to enable MSI-X");
-               return ret;
-       }
-
-       for (i = 0; i < xhci->msix_count; i++) {
-               ret = request_irq(pci_irq_vector(pdev, i), xhci_msi_irq, 0,
-                               "xhci_hcd", xhci_to_hcd(xhci));
-               if (ret)
-                       goto disable_msix;
-       }
-
-       hcd->msix_enabled = 1;
-       return ret;
-
-disable_msix:
-       xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt");
-       while (--i >= 0)
-               free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci));
-       pci_free_irq_vectors(pdev);
-       return ret;
-}
-
+/* Try enabling MSI-X with MSI and legacy IRQ as fallback */
 static int xhci_try_enable_msi(struct usb_hcd *hcd)
 {
+       struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
-       struct pci_dev  *pdev;
        int ret;
 
-       pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
        /*
         * Some Fresco Logic host controllers advertise MSI, but fail to
         * generate interrupts.  Don't even try to enable MSI.
@@ -218,32 +135,53 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd)
                free_irq(hcd->irq, hcd);
        hcd->irq = 0;
 
-       ret = xhci_setup_msix(xhci);
-       if (ret)
-               /* fall back to msi*/
-               ret = xhci_setup_msi(xhci);
+       /*
+        * calculate number of MSI-X vectors supported.
+        * - HCS_MAX_INTRS: the max number of interrupts the host can handle,
+        *   with max number of interrupters based on the xhci HCSPARAMS1.
+        * - num_online_cpus: maximum MSI-X vectors per CPUs core.
+        *   Add additional 1 vector to ensure always available interrupt.
+        */
+       xhci->nvecs = min(num_online_cpus() + 1,
+                         HCS_MAX_INTRS(xhci->hcs_params1));
 
-       if (!ret) {
-               hcd->msi_enabled = 1;
-               return 0;
+       /* TODO: Check with MSI Soc for sysdev */
+       xhci->nvecs = pci_alloc_irq_vectors(pdev, 1, xhci->nvecs,
+                                           PCI_IRQ_MSIX | PCI_IRQ_MSI);
+       if (xhci->nvecs < 0) {
+               xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+                              "failed to allocate IRQ vectors");
+               goto legacy_irq;
        }
 
+       ret = request_irq(pci_irq_vector(pdev, 0), xhci_msi_irq, 0, "xhci_hcd",
+                         xhci_to_hcd(xhci));
+       if (ret)
+               goto free_irq_vectors;
+
+       hcd->msi_enabled = 1;
+       hcd->msix_enabled = pdev->msix_enabled;
+       return 0;
+
+free_irq_vectors:
+       xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable %s interrupt",
+                      pdev->msix_enabled ? "MSI-X" : "MSI");
+       pci_free_irq_vectors(pdev);
+
+legacy_irq:
        if (!pdev->irq) {
                xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n");
                return -EINVAL;
        }
 
- legacy_irq:
        if (!strlen(hcd->irq_descr))
                snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
                         hcd->driver->description, hcd->self.busnum);
 
-       /* fall back to legacy interrupt*/
-       ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
-                       hcd->irq_descr, hcd);
+       /* fall back to legacy interrupt */
+       ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED, hcd->irq_descr, hcd);
        if (ret) {
-               xhci_err(xhci, "request interrupt %d failed\n",
-                               pdev->irq);
+               xhci_err(xhci, "request interrupt %d failed\n", pdev->irq);
                return ret;
        }
        hcd->irq = pdev->irq;
index 732cdeb739202e112a4bfd0a00b63dcae7c984d2..3d071b8753088a5437c2e9f82031a6db4ad91208 100644 (file)
@@ -130,6 +130,9 @@ static const struct of_device_id usb_xhci_of_match[] = {
        }, {
                .compatible = "brcm,xhci-brcm-v2",
                .data = &xhci_plat_brcm,
+       }, {
+               .compatible = "brcm,bcm2711-xhci",
+               .data = &xhci_plat_brcm,
        }, {
                .compatible = "brcm,bcm7445-xhci",
                .data = &xhci_plat_brcm,
@@ -250,6 +253,9 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s
                if (device_property_read_bool(tmpdev, "quirk-broken-port-ped"))
                        xhci->quirks |= XHCI_BROKEN_PORT_PED;
 
+               if (device_property_read_bool(tmpdev, "xhci-sg-trb-cache-size-quirk"))
+                       xhci->quirks |= XHCI_SG_TRB_CACHE_SIZE_QUIRK;
+
                device_property_read_u32(tmpdev, "imod-interval-ns",
                                         &xhci->imod_interval);
        }
@@ -433,7 +439,7 @@ void xhci_plat_remove(struct platform_device *dev)
 }
 EXPORT_SYMBOL_GPL(xhci_plat_remove);
 
-static int __maybe_unused xhci_plat_suspend(struct device *dev)
+static int xhci_plat_suspend(struct device *dev)
 {
        struct usb_hcd  *hcd = dev_get_drvdata(dev);
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
@@ -461,7 +467,7 @@ static int __maybe_unused xhci_plat_suspend(struct device *dev)
        return 0;
 }
 
-static int __maybe_unused xhci_plat_resume(struct device *dev)
+static int xhci_plat_resume_common(struct device *dev, struct pm_message pmsg)
 {
        struct usb_hcd  *hcd = dev_get_drvdata(dev);
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
@@ -483,7 +489,7 @@ static int __maybe_unused xhci_plat_resume(struct device *dev)
        if (ret)
                goto disable_clks;
 
-       ret = xhci_resume(xhci, PMSG_RESUME);
+       ret = xhci_resume(xhci, pmsg);
        if (ret)
                goto disable_clks;
 
@@ -502,6 +508,16 @@ disable_clks:
        return ret;
 }
 
+static int xhci_plat_resume(struct device *dev)
+{
+       return xhci_plat_resume_common(dev, PMSG_RESUME);
+}
+
+static int xhci_plat_restore(struct device *dev)
+{
+       return xhci_plat_resume_common(dev, PMSG_RESTORE);
+}
+
 static int __maybe_unused xhci_plat_runtime_suspend(struct device *dev)
 {
        struct usb_hcd  *hcd = dev_get_drvdata(dev);
@@ -524,7 +540,12 @@ static int __maybe_unused xhci_plat_runtime_resume(struct device *dev)
 }
 
 const struct dev_pm_ops xhci_plat_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(xhci_plat_suspend, xhci_plat_resume)
+       .suspend = pm_sleep_ptr(xhci_plat_suspend),
+       .resume = pm_sleep_ptr(xhci_plat_resume),
+       .freeze = pm_sleep_ptr(xhci_plat_suspend),
+       .thaw = pm_sleep_ptr(xhci_plat_resume),
+       .poweroff = pm_sleep_ptr(xhci_plat_suspend),
+       .restore = pm_sleep_ptr(xhci_plat_restore),
 
        SET_RUNTIME_PM_OPS(xhci_plat_runtime_suspend,
                           xhci_plat_runtime_resume,
index f3b5e6345858c4485c321c674559c8a60f3221eb..f0d8a607ff214f86ba33b2e9126ccb186e6c1853 100644 (file)
@@ -366,9 +366,10 @@ void xhci_ring_cmd_db(struct xhci_hcd *xhci)
        readl(&xhci->dba->doorbell[0]);
 }
 
-static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci, unsigned long delay)
+static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci)
 {
-       return mod_delayed_work(system_wq, &xhci->cmd_timer, delay);
+       return mod_delayed_work(system_wq, &xhci->cmd_timer,
+                       msecs_to_jiffies(xhci->current_cmd->timeout_ms));
 }
 
 static struct xhci_command *xhci_next_queued_cmd(struct xhci_hcd *xhci)
@@ -412,7 +413,7 @@ static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
        if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
            !(xhci->xhc_state & XHCI_STATE_DYING)) {
                xhci->current_cmd = cur_cmd;
-               xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
+               xhci_mod_cmd_timer(xhci);
                xhci_ring_cmd_db(xhci);
        }
 }
@@ -1787,7 +1788,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
        if (!list_is_singular(&xhci->cmd_list)) {
                xhci->current_cmd = list_first_entry(&cmd->cmd_list,
                                                struct xhci_command, cmd_list);
-               xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
+               xhci_mod_cmd_timer(xhci);
        } else if (xhci->current_cmd == cmd) {
                xhci->current_cmd = NULL;
        }
@@ -2375,6 +2376,9 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
        /* handle completion code */
        switch (trb_comp_code) {
        case COMP_SUCCESS:
+               /* Don't overwrite status if TD had an error, see xHCI 4.9.1 */
+               if (td->error_mid_td)
+                       break;
                if (remaining) {
                        frame->status = short_framestatus;
                        if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
@@ -2390,9 +2394,13 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
        case COMP_BANDWIDTH_OVERRUN_ERROR:
                frame->status = -ECOMM;
                break;
-       case COMP_ISOCH_BUFFER_OVERRUN:
        case COMP_BABBLE_DETECTED_ERROR:
+               sum_trbs_for_length = true;
+               fallthrough;
+       case COMP_ISOCH_BUFFER_OVERRUN:
                frame->status = -EOVERFLOW;
+               if (ep_trb != td->last_trb)
+                       td->error_mid_td = true;
                break;
        case COMP_INCOMPATIBLE_DEVICE_ERROR:
        case COMP_STALL_ERROR:
@@ -2400,8 +2408,9 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
                break;
        case COMP_USB_TRANSACTION_ERROR:
                frame->status = -EPROTO;
+               sum_trbs_for_length = true;
                if (ep_trb != td->last_trb)
-                       return 0;
+                       td->error_mid_td = true;
                break;
        case COMP_STOPPED:
                sum_trbs_for_length = true;
@@ -2421,6 +2430,9 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
                break;
        }
 
+       if (td->urb_length_set)
+               goto finish_td;
+
        if (sum_trbs_for_length)
                frame->actual_length = sum_trb_lengths(xhci, ep->ring, ep_trb) +
                        ep_trb_len - remaining;
@@ -2429,6 +2441,14 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
 
        td->urb->actual_length += frame->actual_length;
 
+finish_td:
+       /* Don't give back TD yet if we encountered an error mid TD */
+       if (td->error_mid_td && ep_trb != td->last_trb) {
+               xhci_dbg(xhci, "Error mid isoc TD, wait for final completion event\n");
+               td->urb_length_set = true;
+               return 0;
+       }
+
        return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
 }
 
@@ -2807,17 +2827,51 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                }
 
                if (!ep_seg) {
-                       if (!ep->skip ||
-                           !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
-                               /* Some host controllers give a spurious
-                                * successful event after a short transfer.
-                                * Ignore it.
-                                */
-                               if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
-                                               ep_ring->last_td_was_short) {
-                                       ep_ring->last_td_was_short = false;
-                                       goto cleanup;
+
+                       if (ep->skip && usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
+                               skip_isoc_td(xhci, td, ep, status);
+                               goto cleanup;
+                       }
+
+                       /*
+                        * Some hosts give a spurious success event after a short
+                        * transfer. Ignore it.
+                        */
+                       if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
+                           ep_ring->last_td_was_short) {
+                               ep_ring->last_td_was_short = false;
+                               goto cleanup;
+                       }
+
+                       /*
+                        * xhci 4.10.2 states isoc endpoints should continue
+                        * processing the next TD if there was an error mid TD.
+                        * So host like NEC don't generate an event for the last
+                        * isoc TRB even if the IOC flag is set.
+                        * xhci 4.9.1 states that if there are errors in mult-TRB
+                        * TDs xHC should generate an error for that TRB, and if xHC
+                        * proceeds to the next TD it should genete an event for
+                        * any TRB with IOC flag on the way. Other host follow this.
+                        * So this event might be for the next TD.
+                        */
+                       if (td->error_mid_td &&
+                           !list_is_last(&td->td_list, &ep_ring->td_list)) {
+                               struct xhci_td *td_next = list_next_entry(td, td_list);
+
+                               ep_seg = trb_in_td(xhci, td_next->start_seg, td_next->first_trb,
+                                                  td_next->last_trb, ep_trb_dma, false);
+                               if (ep_seg) {
+                                       /* give back previous TD, start handling new */
+                                       xhci_dbg(xhci, "Missing TD completion event after mid TD error\n");
+                                       ep_ring->dequeue = td->last_trb;
+                                       ep_ring->deq_seg = td->last_trb_seg;
+                                       inc_deq(xhci, ep_ring);
+                                       xhci_td_cleanup(xhci, td, ep_ring, td->status);
+                                       td = td_next;
                                }
+                       }
+
+                       if (!ep_seg) {
                                /* HC is busted, give up! */
                                xhci_err(xhci,
                                        "ERROR Transfer event TRB DMA ptr not "
@@ -2829,9 +2883,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                                          ep_trb_dma, true);
                                return -ESHUTDOWN;
                        }
-
-                       skip_isoc_td(xhci, td, ep, status);
-                       goto cleanup;
                }
                if (trb_comp_code == COMP_SHORT_PACKET)
                        ep_ring->last_td_was_short = true;
@@ -3060,7 +3111,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
        writel(status, &xhci->op_regs->status);
 
        /* This is the handler of the primary interrupter */
-       ir = xhci->interrupter;
+       ir = xhci->interrupters[0];
        if (!hcd->msi_enabled) {
                u32 irq_pending;
                irq_pending = readl(&ir->ir_set->irq_pending);
@@ -4287,7 +4338,7 @@ static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
        /* if there are no other commands queued we start the timeout timer */
        if (list_empty(&xhci->cmd_list)) {
                xhci->current_cmd = cmd;
-               xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
+               xhci_mod_cmd_timer(xhci);
        }
 
        list_add_tail(&cmd->cmd_list, &xhci->cmd_list);
index 884b0898d9c9577c4554ac9559a3a064caefd936..c057c42c36f4cc9385af591c2921f77dac62c21f 100644 (file)
@@ -480,7 +480,7 @@ static int xhci_init(struct usb_hcd *hcd)
 
 static int xhci_run_finished(struct xhci_hcd *xhci)
 {
-       struct xhci_interrupter *ir = xhci->interrupter;
+       struct xhci_interrupter *ir = xhci->interrupters[0];
        unsigned long   flags;
        u32             temp;
 
@@ -532,7 +532,7 @@ int xhci_run(struct usb_hcd *hcd)
        u64 temp_64;
        int ret;
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
-       struct xhci_interrupter *ir = xhci->interrupter;
+       struct xhci_interrupter *ir = xhci->interrupters[0];
        /* Start the xHCI host controller running only after the USB 2.0 roothub
         * is setup.
         */
@@ -596,7 +596,7 @@ void xhci_stop(struct usb_hcd *hcd)
 {
        u32 temp;
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
-       struct xhci_interrupter *ir = xhci->interrupter;
+       struct xhci_interrupter *ir = xhci->interrupters[0];
 
        mutex_lock(&xhci->mutex);
 
@@ -692,36 +692,51 @@ EXPORT_SYMBOL_GPL(xhci_shutdown);
 #ifdef CONFIG_PM
 static void xhci_save_registers(struct xhci_hcd *xhci)
 {
-       struct xhci_interrupter *ir = xhci->interrupter;
+       struct xhci_interrupter *ir;
+       unsigned int i;
 
        xhci->s3.command = readl(&xhci->op_regs->command);
        xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
        xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
        xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
 
-       if (!ir)
-               return;
+       /* save both primary and all secondary interrupters */
+       /* fixme, shold we lock  to prevent race with remove secondary interrupter? */
+       for (i = 0; i < xhci->max_interrupters; i++) {
+               ir = xhci->interrupters[i];
+               if (!ir)
+                       continue;
 
-       ir->s3_erst_size = readl(&ir->ir_set->erst_size);
-       ir->s3_erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base);
-       ir->s3_erst_dequeue = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
-       ir->s3_irq_pending = readl(&ir->ir_set->irq_pending);
-       ir->s3_irq_control = readl(&ir->ir_set->irq_control);
+               ir->s3_erst_size = readl(&ir->ir_set->erst_size);
+               ir->s3_erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base);
+               ir->s3_erst_dequeue = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
+               ir->s3_irq_pending = readl(&ir->ir_set->irq_pending);
+               ir->s3_irq_control = readl(&ir->ir_set->irq_control);
+       }
 }
 
 static void xhci_restore_registers(struct xhci_hcd *xhci)
 {
-       struct xhci_interrupter *ir = xhci->interrupter;
+       struct xhci_interrupter *ir;
+       unsigned int i;
 
        writel(xhci->s3.command, &xhci->op_regs->command);
        writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
        xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
        writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
-       writel(ir->s3_erst_size, &ir->ir_set->erst_size);
-       xhci_write_64(xhci, ir->s3_erst_base, &ir->ir_set->erst_base);
-       xhci_write_64(xhci, ir->s3_erst_dequeue, &ir->ir_set->erst_dequeue);
-       writel(ir->s3_irq_pending, &ir->ir_set->irq_pending);
-       writel(ir->s3_irq_control, &ir->ir_set->irq_control);
+
+       /* FIXME should we lock to protect against freeing of interrupters */
+       for (i = 0; i < xhci->max_interrupters; i++) {
+               ir = xhci->interrupters[i];
+               if (!ir)
+                       continue;
+
+               writel(ir->s3_erst_size, &ir->ir_set->erst_size);
+               xhci_write_64(xhci, ir->s3_erst_base, &ir->ir_set->erst_base);
+               xhci_write_64(xhci, ir->s3_erst_dequeue, &ir->ir_set->erst_dequeue);
+               writel(ir->s3_irq_pending, &ir->ir_set->irq_pending);
+               writel(ir->s3_irq_control, &ir->ir_set->irq_control);
+       }
 }
 
 static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
@@ -1084,7 +1099,7 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
                xhci_dbg(xhci, "// Disabling event ring interrupts\n");
                temp = readl(&xhci->op_regs->status);
                writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
-               xhci_disable_interrupter(xhci->interrupter);
+               xhci_disable_interrupter(xhci->interrupters[0]);
 
                xhci_dbg(xhci, "cleaning up memory\n");
                xhci_mem_cleanup(xhci);
@@ -1438,10 +1453,8 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
  * descriptor.  If the usb_device's max packet size changes after that point,
  * we need to issue an evaluate context command and wait on it.
  */
-static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
-               unsigned int ep_index, struct urb *urb, gfp_t mem_flags)
+static int xhci_check_ep0_maxpacket(struct xhci_hcd *xhci, struct xhci_virt_device *vdev)
 {
-       struct xhci_container_ctx *out_ctx;
        struct xhci_input_control_ctx *ctrl_ctx;
        struct xhci_ep_ctx *ep_ctx;
        struct xhci_command *command;
@@ -1449,11 +1462,15 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
        int hw_max_packet_size;
        int ret = 0;
 
-       out_ctx = xhci->devs[slot_id]->out_ctx;
-       ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
+       ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, 0);
        hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
-       max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
-       if (hw_max_packet_size != max_packet_size) {
+       max_packet_size = usb_endpoint_maxp(&vdev->udev->ep0.desc);
+
+       if (hw_max_packet_size == max_packet_size)
+               return 0;
+
+       switch (max_packet_size) {
+       case 8: case 16: case 32: case 64: case 9:
                xhci_dbg_trace(xhci,  trace_xhci_dbg_context_change,
                                "Max Packet Size for ep 0 changed.");
                xhci_dbg_trace(xhci,  trace_xhci_dbg_context_change,
@@ -1465,28 +1482,22 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
                xhci_dbg_trace(xhci,  trace_xhci_dbg_context_change,
                                "Issuing evaluate context command.");
 
-               /* Set up the input context flags for the command */
-               /* FIXME: This won't work if a non-default control endpoint
-                * changes max packet sizes.
-                */
-
-               command = xhci_alloc_command(xhci, true, mem_flags);
+               command = xhci_alloc_command(xhci, true, GFP_KERNEL);
                if (!command)
                        return -ENOMEM;
 
-               command->in_ctx = xhci->devs[slot_id]->in_ctx;
+               command->in_ctx = vdev->in_ctx;
                ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
                if (!ctrl_ctx) {
                        xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
                                        __func__);
                        ret = -ENOMEM;
-                       goto command_cleanup;
+                       break;
                }
                /* Set up the modified control endpoint 0 */
-               xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
-                               xhci->devs[slot_id]->out_ctx, ep_index);
+               xhci_endpoint_copy(xhci, vdev->in_ctx, vdev->out_ctx, 0);
 
-               ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
+               ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, 0);
                ep_ctx->ep_info &= cpu_to_le32(~EP_STATE_MASK);/* must clear */
                ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
                ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
@@ -1494,17 +1505,20 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
                ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
                ctrl_ctx->drop_flags = 0;
 
-               ret = xhci_configure_endpoint(xhci, urb->dev, command,
-                               true, false);
-
-               /* Clean up the input context for later use by bandwidth
-                * functions.
-                */
+               ret = xhci_configure_endpoint(xhci, vdev->udev, command,
+                                             true, false);
+               /* Clean up the input context for later use by bandwidth functions */
                ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
-command_cleanup:
-               kfree(command->completion);
-               kfree(command);
+               break;
+       default:
+               dev_dbg(&vdev->udev->dev, "incorrect max packet size %d for ep0\n",
+                       max_packet_size);
+               return -EINVAL;
        }
+
+       kfree(command->completion);
+       kfree(command);
+
        return ret;
 }
 
@@ -1522,24 +1536,7 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
        struct urb_priv *urb_priv;
        int num_tds;
 
-       if (!urb)
-               return -EINVAL;
-       ret = xhci_check_args(hcd, urb->dev, urb->ep,
-                                       true, true, __func__);
-       if (ret <= 0)
-               return ret ? ret : -EINVAL;
-
-       slot_id = urb->dev->slot_id;
        ep_index = xhci_get_endpoint_index(&urb->ep->desc);
-       ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state;
-
-       if (!HCD_HW_ACCESSIBLE(hcd))
-               return -ESHUTDOWN;
-
-       if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) {
-               xhci_dbg(xhci, "Can't queue urb, port error, link inactive\n");
-               return -ENODEV;
-       }
 
        if (usb_endpoint_xfer_isoc(&urb->ep->desc))
                num_tds = urb->number_of_packets;
@@ -1561,22 +1558,27 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
 
        trace_xhci_urb_enqueue(urb);
 
-       if (usb_endpoint_xfer_control(&urb->ep->desc)) {
-               /* Check to see if the max packet size for the default control
-                * endpoint changed during FS device enumeration
-                */
-               if (urb->dev->speed == USB_SPEED_FULL) {
-                       ret = xhci_check_maxpacket(xhci, slot_id,
-                                       ep_index, urb, mem_flags);
-                       if (ret < 0) {
-                               xhci_urb_free_priv(urb_priv);
-                               urb->hcpriv = NULL;
-                               return ret;
-                       }
-               }
+       spin_lock_irqsave(&xhci->lock, flags);
+
+       ret = xhci_check_args(hcd, urb->dev, urb->ep,
+                             true, true, __func__);
+       if (ret <= 0) {
+               ret = ret ? ret : -EINVAL;
+               goto free_priv;
        }
 
-       spin_lock_irqsave(&xhci->lock, flags);
+       slot_id = urb->dev->slot_id;
+
+       if (!HCD_HW_ACCESSIBLE(hcd)) {
+               ret = -ESHUTDOWN;
+               goto free_priv;
+       }
+
+       if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) {
+               xhci_dbg(xhci, "Can't queue urb, port error, link inactive\n");
+               ret = -ENODEV;
+               goto free_priv;
+       }
 
        if (xhci->xhc_state & XHCI_STATE_DYING) {
                xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for non-responsive xHCI host.\n",
@@ -1584,6 +1586,9 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
                ret = -ESHUTDOWN;
                goto free_priv;
        }
+
+       ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state;
+
        if (*ep_state & (EP_GETTING_STREAMS | EP_GETTING_NO_STREAMS)) {
                xhci_warn(xhci, "WARN: Can't enqueue URB, ep in streams transition state %x\n",
                          *ep_state);
@@ -3087,6 +3092,9 @@ done:
  * of an endpoint that isn't in the halted state this function will issue a
  * configure endpoint command with the Drop and Add bits set for the target
  * endpoint. Refer to the additional note in xhci spcification section 4.6.8.
+ *
+ * vdev may be lost due to xHC restore error and re-initialization during S3/S4
+ * resume. A new vdev will be allocated later by xhci_discover_or_reset_device()
  */
 
 static void xhci_endpoint_reset(struct usb_hcd *hcd,
@@ -3104,19 +3112,37 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
        int err;
 
        xhci = hcd_to_xhci(hcd);
+       ep_index = xhci_get_endpoint_index(&host_ep->desc);
+
+       /*
+        * Usb core assumes a max packet value for ep0 on FS devices until the
+        * real value is read from the descriptor. Core resets Ep0 if values
+        * mismatch. Reconfigure the xhci ep0 endpoint context here in that case
+        */
+       if (usb_endpoint_xfer_control(&host_ep->desc) && ep_index == 0) {
+
+               udev = container_of(host_ep, struct usb_device, ep0);
+               if (udev->speed != USB_SPEED_FULL || !udev->slot_id)
+                       return;
+
+               vdev = xhci->devs[udev->slot_id];
+               if (!vdev || vdev->udev != udev)
+                       return;
+
+               xhci_check_ep0_maxpacket(xhci, vdev);
+
+               /* Nothing else should be done here for ep0 during ep reset */
+               return;
+       }
+
        if (!host_ep->hcpriv)
                return;
        udev = (struct usb_device *) host_ep->hcpriv;
        vdev = xhci->devs[udev->slot_id];
 
-       /*
-        * vdev may be lost due to xHC restore error and re-initialization
-        * during S3/S4 resume. A new vdev will be allocated later by
-        * xhci_discover_or_reset_device()
-        */
        if (!udev->slot_id || !vdev)
                return;
-       ep_index = xhci_get_endpoint_index(&host_ep->desc);
+
        ep = &vdev->eps[ep_index];
 
        /* Bail out if toggle is already being cleared by a endpoint reset */
@@ -4029,12 +4055,18 @@ disable_slot:
        return 0;
 }
 
-/*
- * Issue an Address Device command and optionally send a corresponding
- * SetAddress request to the device.
+/**
+ * xhci_setup_device - issues an Address Device command to assign a unique
+ *                     USB bus address.
+ * @hcd: USB host controller data structure.
+ * @udev: USB dev structure representing the connected device.
+ * @setup: Enum specifying setup mode: address only or with context.
+ * @timeout_ms: Max wait time (ms) for the command operation to complete.
+ *
+ * Return: 0 if successful; otherwise, negative error code.
  */
 static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
-                            enum xhci_setup_dev setup)
+                            enum xhci_setup_dev setup, unsigned int timeout_ms)
 {
        const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address";
        unsigned long flags;
@@ -4091,6 +4123,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
        }
 
        command->in_ctx = virt_dev->in_ctx;
+       command->timeout_ms = timeout_ms;
 
        slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
        ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
@@ -4217,14 +4250,16 @@ out:
        return ret;
 }
 
-static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
+static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev,
+                              unsigned int timeout_ms)
 {
-       return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS);
+       return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS, timeout_ms);
 }
 
 static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
 {
-       return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY);
+       return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY,
+                                XHCI_CMD_DEFAULT_TIMEOUT);
 }
 
 /*
index 3ea5c092bba7176fbbbf01aa597107178ececb31..6f82d404883f9accf627c057a96702a7d8d65a80 100644 (file)
@@ -791,6 +791,8 @@ struct xhci_command {
        struct completion               *completion;
        union xhci_trb                  *command_trb;
        struct list_head                cmd_list;
+       /* xHCI command response timeout in milliseconds */
+       unsigned int                    timeout_ms;
 };
 
 /* drop context bitmasks */
@@ -1547,11 +1549,15 @@ struct xhci_td {
        struct xhci_segment     *bounce_seg;
        /* actual_length of the URB has already been set */
        bool                    urb_length_set;
+       bool                    error_mid_td;
        unsigned int            num_trbs;
 };
 
-/* xHCI command default timeout value */
-#define XHCI_CMD_DEFAULT_TIMEOUT       (5 * HZ)
+/*
+ * xHCI command default timeout value in milliseconds.
+ * USB 3.2 spec, section 9.2.6.1
+ */
+#define XHCI_CMD_DEFAULT_TIMEOUT       5000
 
 /* command descriptor */
 struct xhci_cd {
@@ -1760,8 +1766,8 @@ struct xhci_hcd {
        int             page_size;
        /* Valid values are 12 to 20, inclusive */
        int             page_shift;
-       /* msi-x vectors */
-       int             msix_count;
+       /* MSI-X/MSI vectors */
+       int             nvecs;
        /* optional clocks */
        struct clk              *clk;
        struct clk              *reg_clk;
@@ -1769,7 +1775,7 @@ struct xhci_hcd {
        struct reset_control *reset;
        /* data structures */
        struct xhci_device_context_array *dcbaa;
-       struct xhci_interrupter *interrupter;
+       struct xhci_interrupter **interrupters;
        struct xhci_ring        *cmd_ring;
        unsigned int            cmd_ring_state;
 #define CMD_RING_STATE_RUNNING         (1 << 0)
@@ -2080,6 +2086,10 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
                int type, gfp_t flags);
 void xhci_free_container_ctx(struct xhci_hcd *xhci,
                struct xhci_container_ctx *ctx);
+struct xhci_interrupter *
+xhci_create_secondary_interrupter(struct usb_hcd *hcd, int num_seg);
+void xhci_remove_secondary_interrupter(struct usb_hcd
+                                      *hcd, struct xhci_interrupter *ir);
 
 /* xHCI host controller glue */
 typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *);
index 1e3df27bab58fd1cb2be079dab3bbdca43e6a51a..6d28467ce35227684e0281a39284eba87582195a 100644 (file)
@@ -501,7 +501,6 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
                dev->minor, cmd, arg);
 
        retval = 0;
-       io_res = 0;
        switch (cmd) {
        case IOW_WRITE:
                if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW24 ||
index 2b45404e9732cefca8b6a8cd93fa4f08a015cc76..0dd2b032c90b9bc6196ab7a3752e327c18a61fb0 100644 (file)
@@ -5,8 +5,10 @@
  * Copyright (c) 2022, Google LLC
  */
 
+#include <linux/clk.h>
 #include <linux/device.h>
 #include <linux/export.h>
+#include <linux/err.h>
 #include <linux/gpio/consumer.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
@@ -60,15 +62,22 @@ struct onboard_hub {
        bool going_away;
        struct list_head udev_list;
        struct mutex lock;
+       struct clk *clk;
 };
 
 static int onboard_hub_power_on(struct onboard_hub *hub)
 {
        int err;
 
+       err = clk_prepare_enable(hub->clk);
+       if (err) {
+               dev_err(hub->dev, "failed to enable clock: %pe\n", ERR_PTR(err));
+               return err;
+       }
+
        err = regulator_bulk_enable(hub->pdata->num_supplies, hub->supplies);
        if (err) {
-               dev_err(hub->dev, "failed to enable supplies: %d\n", err);
+               dev_err(hub->dev, "failed to enable supplies: %pe\n", ERR_PTR(err));
                return err;
        }
 
@@ -88,10 +97,12 @@ static int onboard_hub_power_off(struct onboard_hub *hub)
 
        err = regulator_bulk_disable(hub->pdata->num_supplies, hub->supplies);
        if (err) {
-               dev_err(hub->dev, "failed to disable supplies: %d\n", err);
+               dev_err(hub->dev, "failed to disable supplies: %pe\n", ERR_PTR(err));
                return err;
        }
 
+       clk_disable_unprepare(hub->clk);
+
        hub->is_powered_on = false;
 
        return 0;
@@ -233,9 +244,9 @@ static void onboard_hub_attach_usb_driver(struct work_struct *work)
 {
        int err;
 
-       err = driver_attach(&onboard_hub_usbdev_driver.drvwrap.driver);
+       err = driver_attach(&onboard_hub_usbdev_driver.driver);
        if (err)
-               pr_err("Failed to attach USB driver: %d\n", err);
+               pr_err("Failed to attach USB driver: %pe\n", ERR_PTR(err));
 }
 
 static int onboard_hub_probe(struct platform_device *pdev)
@@ -262,10 +273,14 @@ static int onboard_hub_probe(struct platform_device *pdev)
 
        err = devm_regulator_bulk_get(dev, hub->pdata->num_supplies, hub->supplies);
        if (err) {
-               dev_err(dev, "Failed to get regulator supplies: %d\n", err);
+               dev_err(dev, "Failed to get regulator supplies: %pe\n", ERR_PTR(err));
                return err;
        }
 
+       hub->clk = devm_clk_get_optional(dev, NULL);
+       if (IS_ERR(hub->clk))
+               return dev_err_probe(dev, PTR_ERR(hub->clk), "failed to get clock\n");
+
        hub->reset_gpio = devm_gpiod_get_optional(dev, "reset",
                                                  GPIOD_OUT_HIGH);
        if (IS_ERR(hub->reset_gpio))
@@ -426,6 +441,7 @@ static void onboard_hub_usbdev_disconnect(struct usb_device *udev)
 static const struct usb_device_id onboard_hub_id_table[] = {
        { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6504) }, /* CYUSB33{0,1,2}x/CYUSB230x 3.0 */
        { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6506) }, /* CYUSB33{0,1,2}x/CYUSB230x 2.0 */
+       { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6570) }, /* CY7C6563x 2.0 */
        { USB_DEVICE(VENDOR_ID_GENESYS, 0x0608) }, /* Genesys Logic GL850G USB 2.0 */
        { USB_DEVICE(VENDOR_ID_GENESYS, 0x0610) }, /* Genesys Logic GL852G USB 2.0 */
        { USB_DEVICE(VENDOR_ID_GENESYS, 0x0620) }, /* Genesys Logic GL3523 USB 3.1 */
index 292110e64a1d91c6697eb3bd18290d75dee5eebd..f360d5cf8d8a047bf8464bec36fa5fc3ab9e2dd9 100644 (file)
@@ -36,6 +36,11 @@ static const struct onboard_hub_pdata cypress_hx3_data = {
        .num_supplies = 2,
 };
 
+static const struct onboard_hub_pdata cypress_hx2vl_data = {
+       .reset_us = 1,
+       .num_supplies = 1,
+};
+
 static const struct onboard_hub_pdata genesys_gl850g_data = {
        .reset_us = 3,
        .num_supplies = 1,
@@ -61,6 +66,7 @@ static const struct of_device_id onboard_hub_match[] = {
        { .compatible = "usb451,8142", .data = &ti_tusb8041_data, },
        { .compatible = "usb4b4,6504", .data = &cypress_hx3_data, },
        { .compatible = "usb4b4,6506", .data = &cypress_hx3_data, },
+       { .compatible = "usb4b4,6570", .data = &cypress_hx2vl_data, },
        { .compatible = "usb5e3,608", .data = &genesys_gl850g_data, },
        { .compatible = "usb5e3,610", .data = &genesys_gl852g_data, },
        { .compatible = "usb5e3,620", .data = &genesys_gl852g_data, },
index 7f371ea1248c32217ccb2e1f7e900d7e0d98753a..26e9b8749d8ab153e88017dbf5b3eb667f7bea12 100644 (file)
@@ -205,6 +205,9 @@ static int eud_probe(struct platform_device *pdev)
                return PTR_ERR(chip->mode_mgr);
 
        chip->irq = platform_get_irq(pdev, 0);
+       if (chip->irq < 0)
+               return chip->irq;
+
        ret = devm_request_threaded_irq(&pdev->dev, chip->irq, handle_eud_irq,
                        handle_eud_irq_thread, IRQF_ONESHOT, NULL, chip);
        if (ret)
index c640f98d20c54840cca2d65e43d4e0fed37d52ac..9a0649d236935488f7e1bdb50aacafe7ade8e833 100644 (file)
@@ -34,6 +34,8 @@
 #define YUREX_BUF_SIZE         8
 #define YUREX_WRITE_TIMEOUT    (HZ*2)
 
+#define MAX_S64_STRLEN 20 /* {-}922337203685477580{7,8} */
+
 /* table of devices that work with this driver */
 static struct usb_device_id yurex_table[] = {
        { USB_DEVICE(YUREX_VENDOR_ID, YUREX_PRODUCT_ID) },
@@ -401,7 +403,7 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
 {
        struct usb_yurex *dev;
        int len = 0;
-       char in_buffer[20];
+       char in_buffer[MAX_S64_STRLEN];
        unsigned long flags;
 
        dev = file->private_data;
@@ -412,14 +414,16 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
                return -ENODEV;
        }
 
+       if (WARN_ON_ONCE(dev->bbu > S64_MAX || dev->bbu < S64_MIN)) {
+               mutex_unlock(&dev->io_mutex);
+               return -EIO;
+       }
+
        spin_lock_irqsave(&dev->lock, flags);
-       len = snprintf(in_buffer, 20, "%lld\n", dev->bbu);
+       scnprintf(in_buffer, MAX_S64_STRLEN, "%lld\n", dev->bbu);
        spin_unlock_irqrestore(&dev->lock, flags);
        mutex_unlock(&dev->io_mutex);
 
-       if (WARN_ON_ONCE(len >= sizeof(in_buffer)))
-               return -EIO;
-
        return simple_read_from_buffer(buffer, count, ppos, in_buffer, len);
 }
 
index 9ca9305243fe59e7a61812c24f1c9eac081b416d..4e30de4db1c0a88c67e9a05cad0b290b8c30e4d1 100644 (file)
@@ -1250,14 +1250,19 @@ static vm_fault_t mon_bin_vma_fault(struct vm_fault *vmf)
        struct mon_reader_bin *rp = vmf->vma->vm_private_data;
        unsigned long offset, chunk_idx;
        struct page *pageptr;
+       unsigned long flags;
 
+       spin_lock_irqsave(&rp->b_lock, flags);
        offset = vmf->pgoff << PAGE_SHIFT;
-       if (offset >= rp->b_size)
+       if (offset >= rp->b_size) {
+               spin_unlock_irqrestore(&rp->b_lock, flags);
                return VM_FAULT_SIGBUS;
+       }
        chunk_idx = offset / CHUNK_SIZE;
        pageptr = rp->b_vec[chunk_idx].pg;
        get_page(pageptr);
        vmf->page = pageptr;
+       spin_unlock_irqrestore(&rp->b_lock, flags);
        return 0;
 }
 
index 98ab0cc473d675592ff54ceea1ae99d408eb446e..3c23805ab1a44441ec92c3bacd1bd49ee0871ccf 100644 (file)
@@ -35,9 +35,9 @@ static int mon_stat_open(struct inode *inode, struct file *file)
 
        mbus = inode->i_private;
 
-       sp->slen = snprintf(sp->str, STAT_BUF_SIZE,
-           "nreaders %d events %u text_lost %u\n",
-           mbus->nreaders, mbus->cnt_events, mbus->cnt_text_lost);
+       sp->slen = scnprintf(sp->str, STAT_BUF_SIZE,
+                            "nreaders %d events %u text_lost %u\n",
+                            mbus->nreaders, mbus->cnt_events, mbus->cnt_text_lost);
 
        file->private_data = sp;
        return 0;
index 39cb141646526dbc362e71890b6494d677f84f62..2fe9b95bac1d57727a431a96f10c332f95fdb763 100644 (file)
@@ -352,7 +352,7 @@ static int mon_text_open(struct inode *inode, struct file *file)
        rp->r.rnf_error = mon_text_error;
        rp->r.rnf_complete = mon_text_complete;
 
-       snprintf(rp->slab_name, SLAB_NAME_SZ, "mon_text_%p", rp);
+       scnprintf(rp->slab_name, SLAB_NAME_SZ, "mon_text_%p", rp);
        rp->e_slab = kmem_cache_create(rp->slab_name,
            sizeof(struct mon_event_text), sizeof(long), 0,
            mon_text_ctor);
@@ -700,46 +700,28 @@ static const struct file_operations mon_fops_text_u = {
 
 int mon_text_add(struct mon_bus *mbus, const struct usb_bus *ubus)
 {
-       enum { NAMESZ = 10 };
+       enum { NAMESZ = 12 };
        char name[NAMESZ];
        int busnum = ubus? ubus->busnum: 0;
-       int rc;
 
        if (mon_dir == NULL)
                return 0;
 
        if (ubus != NULL) {
-               rc = snprintf(name, NAMESZ, "%dt", busnum);
-               if (rc <= 0 || rc >= NAMESZ)
-                       goto err_print_t;
+               scnprintf(name, NAMESZ, "%dt", busnum);
                mbus->dent_t = debugfs_create_file(name, 0600, mon_dir, mbus,
                                                             &mon_fops_text_t);
        }
 
-       rc = snprintf(name, NAMESZ, "%du", busnum);
-       if (rc <= 0 || rc >= NAMESZ)
-               goto err_print_u;
+       scnprintf(name, NAMESZ, "%du", busnum);
        mbus->dent_u = debugfs_create_file(name, 0600, mon_dir, mbus,
                                           &mon_fops_text_u);
 
-       rc = snprintf(name, NAMESZ, "%ds", busnum);
-       if (rc <= 0 || rc >= NAMESZ)
-               goto err_print_s;
+       scnprintf(name, NAMESZ, "%ds", busnum);
        mbus->dent_s = debugfs_create_file(name, 0600, mon_dir, mbus,
                                           &mon_fops_stat);
 
        return 1;
-
-err_print_s:
-       debugfs_remove(mbus->dent_u);
-       mbus->dent_u = NULL;
-err_print_u:
-       if (ubus != NULL) {
-               debugfs_remove(mbus->dent_t);
-               mbus->dent_t = NULL;
-       }
-err_print_t:
-       return 0;
 }
 
 void mon_text_del(struct mon_bus *mbus)
index 770081b828a429326e8801981f9ec66a913b6890..9ab50f26db607180ab687e28e860dd38318d5386 100644 (file)
@@ -46,15 +46,21 @@ EXPORT_SYMBOL_GPL(usb_phy_generic_unregister);
 static int nop_set_suspend(struct usb_phy *x, int suspend)
 {
        struct usb_phy_generic *nop = dev_get_drvdata(x->dev);
+       int ret = 0;
 
-       if (!IS_ERR(nop->clk)) {
-               if (suspend)
+       if (suspend) {
+               if (!IS_ERR(nop->clk))
                        clk_disable_unprepare(nop->clk);
-               else
+               if (!IS_ERR(nop->vcc) && !device_may_wakeup(x->dev))
+                       ret = regulator_disable(nop->vcc);
+       } else {
+               if (!IS_ERR(nop->vcc) && !device_may_wakeup(x->dev))
+                       ret = regulator_enable(nop->vcc);
+               if (!IS_ERR(nop->clk))
                        clk_prepare_enable(nop->clk);
        }
 
-       return 0;
+       return ret;
 }
 
 static void nop_reset(struct usb_phy_generic *nop)
index acd46b72899e900d126dde72922423b55300e401..920a32cd094d6f48faf145a6a5e74bb817dfdf89 100644 (file)
@@ -388,8 +388,7 @@ static void __mxs_phy_disconnect_line(struct mxs_phy *mxs_phy, bool disconnect)
 
 static bool mxs_phy_is_otg_host(struct mxs_phy *mxs_phy)
 {
-       return IS_ENABLED(CONFIG_USB_OTG) &&
-               mxs_phy->phy.last_event == USB_EVENT_ID;
+       return mxs_phy->phy.last_event == USB_EVENT_ID;
 }
 
 static void mxs_phy_disconnect_line(struct mxs_phy *mxs_phy, bool on)
index c3ce6b1054f1c354717dfd14a0139e6493f97508..da09cff55abcec526dde7e5749cd11b9c60f9a8f 100644 (file)
@@ -179,16 +179,16 @@ static ssize_t vbus_show(struct device *dev,
 
        switch (twl->linkstat) {
        case MUSB_VBUS_VALID:
-              ret = snprintf(buf, PAGE_SIZE, "vbus\n");
+              ret = sysfs_emit(buf, "vbus\n");
               break;
        case MUSB_ID_GROUND:
-              ret = snprintf(buf, PAGE_SIZE, "id\n");
+              ret = sysfs_emit(buf, "id\n");
               break;
        case MUSB_VBUS_OFF:
-              ret = snprintf(buf, PAGE_SIZE, "none\n");
+              ret = sysfs_emit(buf, "none\n");
               break;
        default:
-              ret = snprintf(buf, PAGE_SIZE, "UNKNOWN\n");
+              ret = sysfs_emit(buf, "UNKNOWN\n");
        }
        spin_unlock_irqrestore(&twl->lock, flags);
 
index 3eb8dc3a1a8f529fdb2d2e66a376aa3fb1da191e..6c812d01b37d7b57e0f229ed52315f79271bdde5 100644 (file)
@@ -113,7 +113,7 @@ static ssize_t new_id_store(struct device_driver *driver,
        if (retval >= 0 && usb_drv->usb_driver != NULL)
                retval = usb_store_new_id(&usb_drv->usb_driver->dynids,
                                          usb_drv->usb_driver->id_table,
-                                         &usb_drv->usb_driver->drvwrap.driver,
+                                         &usb_drv->usb_driver->driver,
                                          buf, count);
        return retval;
 }
index 1e61fe04317158c3a5e877bfb0d89cb5572ce4ef..923e0ed85444be9fde31e0b0d965813fc99c5acf 100644 (file)
@@ -146,6 +146,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
        { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
        { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
+       { USB_DEVICE(0x10C4, 0x87ED) }, /* IMST USB-Stick for Smart Meter */
        { USB_DEVICE(0x10C4, 0x8856) }, /* CEL EM357 ZigBee USB Stick - LR */
        { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */
        { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
index 72390dbf0769282e8efb289023ca2b6915494160..2ae124c49d448f63b6d6a3078ad08fffee3ad2d0 100644 (file)
@@ -2269,6 +2269,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0111, 0xff) },                   /* Fibocom FM160 (MBIM mode) */
        { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) },                   /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
        { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) },                   /* Fibocom FM101-GL (laptop MBIM) */
+       { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a3, 0xff) },                   /* Fibocom FM101-GL (laptop MBIM) */
        { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff),                     /* Fibocom FM101-GL (laptop MBIM) */
          .driver_info = RSVD(4) },
        { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) },                   /* LongSung M5710 */
index b1e844bf31f81f7984a976bf4ca5dcd8f01b3a97..703a9c56355731c158801f89996937f7ea760d35 100644 (file)
@@ -184,6 +184,8 @@ static const struct usb_device_id id_table[] = {
        {DEVICE_SWI(0x413c, 0x81d0)},   /* Dell Wireless 5819 */
        {DEVICE_SWI(0x413c, 0x81d1)},   /* Dell Wireless 5818 */
        {DEVICE_SWI(0x413c, 0x81d2)},   /* Dell Wireless 5818 */
+       {DEVICE_SWI(0x413c, 0x8217)},   /* Dell Wireless DW5826e */
+       {DEVICE_SWI(0x413c, 0x8218)},   /* Dell Wireless DW5826e QDL */
 
        /* Huawei devices */
        {DEVICE_HWI(0x03f0, 0x581d)},   /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
index 17b09f03ef84594cf47698d6964fc80a77b0a0dc..f1e91eb7f8a4136e5f657748889be250193ea32a 100644 (file)
@@ -1521,7 +1521,7 @@ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[]
 
        /* Now set udriver's id_table and look for matches */
        udriver->id_table = id_table;
-       rc = driver_attach(&udriver->drvwrap.driver);
+       rc = driver_attach(&udriver->driver);
        return 0;
 
 err_deregister_drivers:
index 0774ba22fb66ee3fe48f56c9c7d78232975a9969..177fa6cd143ab2837640c26f8336781ddd3cf9cb 100644 (file)
@@ -98,26 +98,26 @@ static ssize_t truinst_show(struct device *dev, struct device_attribute *attr,
        struct usb_device *udev = interface_to_usbdev(intf);
        int result;
        if (swi_tru_install == TRU_FORCE_MS) {
-               result = snprintf(buf, PAGE_SIZE, "Forced Mass Storage\n");
+               result = sysfs_emit(buf, "Forced Mass Storage\n");
        } else {
                swocInfo = kmalloc(sizeof(struct swoc_info), GFP_KERNEL);
                if (!swocInfo) {
-                       snprintf(buf, PAGE_SIZE, "Error\n");
+                       sysfs_emit(buf, "Error\n");
                        return -ENOMEM;
                }
                result = sierra_get_swoc_info(udev, swocInfo);
                if (result < 0) {
                        dev_dbg(dev, "SWIMS: failed SWoC query\n");
                        kfree(swocInfo);
-                       snprintf(buf, PAGE_SIZE, "Error\n");
+                       sysfs_emit(buf, "Error\n");
                        return -EIO;
                }
                debug_swoc(dev, swocInfo);
-               result = snprintf(buf, PAGE_SIZE,
-                       "REV=%02d SKU=%04X VER=%04X\n",
-                       swocInfo->rev,
-                       swocInfo->LinuxSKU,
-                       swocInfo->LinuxVer);
+               result = sysfs_emit(buf,
+                                   "REV=%02d SKU=%04X VER=%04X\n",
+                                   swocInfo->rev,
+                                   swocInfo->LinuxSKU,
+                                   swocInfo->LinuxVer);
                kfree(swocInfo);
        }
        return result;
index 696bb0b2359922dedf4921bf233a23560629a46e..9707f53cfda9c08507082ac33b69b5d146c6927f 100644 (file)
@@ -1246,7 +1246,7 @@ static struct usb_driver uas_driver = {
        .suspend = uas_suspend,
        .resume = uas_resume,
        .reset_resume = uas_reset_resume,
-       .drvwrap.driver.shutdown = uas_shutdown,
+       .driver.shutdown = uas_shutdown,
        .id_table = uas_usb_ids,
 };
 
index 16a670828dde19b14d8049bb79c7e6daec210b72..015aa925335360709fa54498a05eb9db72b1c64a 100644 (file)
@@ -263,11 +263,13 @@ static void typec_altmode_put_partner(struct altmode *altmode)
 {
        struct altmode *partner = altmode->partner;
        struct typec_altmode *adev;
+       struct typec_altmode *partner_adev;
 
        if (!partner)
                return;
 
        adev = &altmode->adev;
+       partner_adev = &partner->adev;
 
        if (is_typec_plug(adev->dev.parent)) {
                struct typec_plug *plug = to_typec_plug(adev->dev.parent);
@@ -276,7 +278,7 @@ static void typec_altmode_put_partner(struct altmode *altmode)
        } else {
                partner->partner = NULL;
        }
-       put_device(&adev->dev);
+       put_device(&partner_adev->dev);
 }
 
 /**
@@ -476,7 +478,7 @@ static int altmode_id_get(struct device *dev)
        else
                ids = &to_typec_port(dev)->mode_ids;
 
-       return ida_simple_get(ids, 0, 0, GFP_KERNEL);
+       return ida_alloc(ids, GFP_KERNEL);
 }
 
 static void altmode_id_remove(struct device *dev, int id)
@@ -490,7 +492,7 @@ static void altmode_id_remove(struct device *dev, int id)
        else
                ids = &to_typec_port(dev)->mode_ids;
 
-       ida_simple_remove(ids, id);
+       ida_free(ids, id);
 }
 
 static void typec_altmode_release(struct device *dev)
@@ -1798,7 +1800,7 @@ static void typec_release(struct device *dev)
 {
        struct typec_port *port = to_typec_port(dev);
 
-       ida_simple_remove(&typec_index_ida, port->id);
+       ida_free(&typec_index_ida, port->id);
        ida_destroy(&port->mode_ids);
        typec_switch_put(port->sw);
        typec_mux_put(port->mux);
@@ -2231,7 +2233,8 @@ void typec_port_register_altmodes(struct typec_port *port,
        struct typec_altmode_desc desc;
        struct typec_altmode *alt;
        size_t index = 0;
-       u32 svid, vdo;
+       u16 svid;
+       u32 vdo;
        int ret;
 
        altmodes_node = device_get_named_child_node(&port->dev, "altmodes");
@@ -2239,7 +2242,7 @@ void typec_port_register_altmodes(struct typec_port *port,
                return; /* No altmodes specified */
 
        fwnode_for_each_child_node(altmodes_node, child) {
-               ret = fwnode_property_read_u32(child, "svid", &svid);
+               ret = fwnode_property_read_u16(child, "svid", &svid);
                if (ret) {
                        dev_err(&port->dev, "Error reading svid for altmode %s\n",
                                fwnode_get_name(child));
@@ -2297,7 +2300,7 @@ struct typec_port *typec_register_port(struct device *parent,
        if (!port)
                return ERR_PTR(-ENOMEM);
 
-       id = ida_simple_get(&typec_index_ida, 0, 0, GFP_KERNEL);
+       id = ida_alloc(&typec_index_ida, GFP_KERNEL);
        if (id < 0) {
                kfree(port);
                return ERR_PTR(id);
index 38416fb0cc3ca94bc5b4ce50a3642e83a1607854..d2cb5e733e57359d16d90418a4dc66c4d60bc449 100644 (file)
@@ -56,4 +56,14 @@ config TYPEC_MUX_PTN36502
          Say Y or M if your system has a NXP PTN36502 Type-C redriver chip
          found on some devices with a Type-C port.
 
+config TYPEC_MUX_WCD939X_USBSS
+       tristate "Qualcomm WCD939x USBSS Analog Audio Switch driver"
+       depends on I2C
+       select REGMAP_I2C
+       help
+         Driver for the Qualcomm WCD939x Audio Codec USBSS domain which
+         provides support for muxing analog audio and sideband signals on a
+         common USB Type-C connector.
+         If compiled as a module, the module will be named wcd939x-usbss.
+
 endmenu
index 9d6a5557b0bde282a4eb3db4e9537ed0bef30bc4..57dc9ac6f8dcfc24d632e02d43266314c6f90364 100644 (file)
@@ -6,3 +6,4 @@ obj-$(CONFIG_TYPEC_MUX_PI3USB30532)     += pi3usb30532.o
 obj-$(CONFIG_TYPEC_MUX_INTEL_PMC)      += intel_pmc_mux.o
 obj-$(CONFIG_TYPEC_MUX_NB7VPQ904M)     += nb7vpq904m.o
 obj-$(CONFIG_TYPEC_MUX_PTN36502)       += ptn36502.o
+obj-$(CONFIG_TYPEC_MUX_WCD939X_USBSS)  += wcd939x-usbss.o
diff --git a/drivers/usb/typec/mux/wcd939x-usbss.c b/drivers/usb/typec/mux/wcd939x-usbss.c
new file mode 100644 (file)
index 0000000..d46c353
--- /dev/null
@@ -0,0 +1,779 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (C) 2023 Linaro Ltd.
+ */
+
+#include <linux/bits.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/bitfield.h>
+#include <linux/gpio/consumer.h>
+#include <linux/usb/typec_dp.h>
+#include <linux/usb/typec_mux.h>
+
+#define WCD_USBSS_PMP_OUT1                     0x2
+
+#define WCD_USBSS_DP_DN_MISC1                  0x20
+
+#define WCD_USBSS_DP_DN_MISC1_DP_PCOMP_2X_DYN_BST_ON_EN                        BIT(3)
+#define WCD_USBSS_DP_DN_MISC1_DN_PCOMP_2X_DYN_BST_ON_EN                        BIT(0)
+
+#define WCD_USBSS_MG1_EN                       0x24
+
+#define WCD_USBSS_MG1_EN_CT_SNS_EN                                     BIT(1)
+
+#define WCD_USBSS_MG1_BIAS                     0x25
+
+#define WCD_USBSS_MG1_BIAS_PCOMP_DYN_BST_EN                            BIT(3)
+
+#define WCD_USBSS_MG1_MISC                     0x27
+
+#define WCD_USBSS_MG1_MISC_PCOMP_2X_DYN_BST_ON_EN                      BIT(5)
+
+#define WCD_USBSS_MG2_EN                       0x28
+
+#define WCD_USBSS_MG2_EN_CT_SNS_EN                                     BIT(1)
+
+#define WCD_USBSS_MG2_BIAS                     0x29
+
+#define WCD_USBSS_MG2_BIAS_PCOMP_DYN_BST_EN                            BIT(3)
+
+#define WCD_USBSS_MG2_MISC                     0x30
+
+#define WCD_USBSS_MG2_MISC_PCOMP_2X_DYN_BST_ON_EN                      BIT(5)
+
+#define WCD_USBSS_DISP_AUXP_THRESH             0x80
+
+#define WCD_USBSS_DISP_AUXP_THRESH_DISP_AUXP_OVPON_CM                  GENMASK(7, 5)
+
+#define WCD_USBSS_DISP_AUXP_CTL                        0x81
+
+#define WCD_USBSS_DISP_AUXP_CTL_LK_CANCEL_TRK_COEFF                    GENMASK(2, 0)
+
+#define WCD_USBSS_CPLDO_CTL2                   0xa1
+
+#define WCD_USBSS_SWITCH_SETTINGS_ENABLE       0x403
+
+#define WCD_USBSS_SWITCH_SETTINGS_ENABLE_DEVICE_ENABLE                 BIT(7)
+#define WCD_USBSS_SWITCH_SETTINGS_ENABLE_DP_AUXP_TO_MGX_SWITCHES       BIT(6)
+#define WCD_USBSS_SWITCH_SETTINGS_ENABLE_DP_AUXM_TO_MGX_SWITCHES       BIT(5)
+#define WCD_USBSS_SWITCH_SETTINGS_ENABLE_DNL_SWITCHES                  BIT(4)
+#define WCD_USBSS_SWITCH_SETTINGS_ENABLE_DPR_SWITCHES                  BIT(3)
+#define WCD_USBSS_SWITCH_SETTINGS_ENABLE_SENSE_SWITCHES                        BIT(2)
+#define WCD_USBSS_SWITCH_SETTINGS_ENABLE_MIC_SWITCHES                  BIT(1)
+#define WCD_USBSS_SWITCH_SETTINGS_ENABLE_AGND_SWITCHES                 BIT(0)
+
+#define WCD_USBSS_SWITCH_SELECT0               0x404
+
+#define WCD_USBSS_SWITCH_SELECT0_DP_AUXP_SWITCHES                      BIT(7)  /* 1-> MG2 */
+#define WCD_USBSS_SWITCH_SELECT0_DP_AUXM_SWITCHES                      BIT(6)  /* 1-> MG2 */
+#define WCD_USBSS_SWITCH_SELECT0_DNL_SWITCHES                          GENMASK(5, 4)
+#define WCD_USBSS_SWITCH_SELECT0_DPR_SWITCHES                          GENMASK(3, 2)
+#define WCD_USBSS_SWITCH_SELECT0_SENSE_SWITCHES                                BIT(1)  /* 1-> SBU2 */
+#define WCD_USBSS_SWITCH_SELECT0_MIC_SWITCHES                          BIT(0)  /* 1-> MG2 */
+
+#define WCD_USBSS_SWITCH_SELECT0_DNL_SWITCH_L          0
+#define WCD_USBSS_SWITCH_SELECT0_DNL_SWITCH_DN         1
+#define WCD_USBSS_SWITCH_SELECT0_DNL_SWITCH_DN2                2
+
+#define WCD_USBSS_SWITCH_SELECT0_DPR_SWITCH_R          0
+#define WCD_USBSS_SWITCH_SELECT0_DPR_SWITCH_DP         1
+#define WCD_USBSS_SWITCH_SELECT0_DPR_SWITCH_DR2                2
+
+#define WCD_USBSS_SWITCH_SELECT1               0x405
+
+#define WCD_USBSS_SWITCH_SELECT1_AGND_SWITCHES                         BIT(0)  /* 1-> MG2 */
+
+#define WCD_USBSS_DELAY_R_SW                   0x40d
+#define WCD_USBSS_DELAY_MIC_SW                 0x40e
+#define WCD_USBSS_DELAY_SENSE_SW               0x40f
+#define WCD_USBSS_DELAY_GND_SW                 0x410
+#define WCD_USBSS_DELAY_L_SW                   0x411
+
+#define WCD_USBSS_FUNCTION_ENABLE              0x413
+
+#define WCD_USBSS_FUNCTION_ENABLE_SOURCE_SELECT                                GENMASK(1, 0)
+
+#define WCD_USBSS_FUNCTION_ENABLE_SOURCE_SELECT_MANUAL         1
+#define WCD_USBSS_FUNCTION_ENABLE_SOURCE_SELECT_AUDIO_FSM      2
+
+#define WCD_USBSS_EQUALIZER1                   0x415
+
+#define WCD_USBSS_EQUALIZER1_EQ_EN                                     BIT(7)
+#define WCD_USBSS_EQUALIZER1_BW_SETTINGS                               GENMASK(6, 3)
+
+#define WCD_USBSS_USB_SS_CNTL                  0x419
+
+#define WCD_USBSS_USB_SS_CNTL_STANDBY_STATE                            BIT(4)
+#define WCD_USBSS_USB_SS_CNTL_RCO_EN                                   BIT(3)
+#define WCD_USBSS_USB_SS_CNTL_USB_SS_MODE                              GENMASK(2, 0)
+
+#define WCD_USBSS_USB_SS_CNTL_USB_SS_MODE_AATC         2
+#define WCD_USBSS_USB_SS_CNTL_USB_SS_MODE_USB          5
+
+#define WCD_USBSS_AUDIO_FSM_START              0x433
+
+#define WCD_USBSS_AUDIO_FSM_START_AUDIO_FSM_AUDIO_TRIG                 BIT(0)
+
+#define WCD_USBSS_RATIO_SPKR_REXT_L_LSB                0x461
+#define WCD_USBSS_RATIO_SPKR_REXT_L_MSB                0x462
+#define WCD_USBSS_RATIO_SPKR_REXT_R_LSB                0x463
+#define WCD_USBSS_RATIO_SPKR_REXT_R_MSB                0x464
+#define WCD_USBSS_AUD_COEF_L_K0_0              0x475
+#define WCD_USBSS_AUD_COEF_L_K0_1              0x476
+#define WCD_USBSS_AUD_COEF_L_K0_2              0x477
+#define WCD_USBSS_AUD_COEF_L_K1_0              0x478
+#define WCD_USBSS_AUD_COEF_L_K1_1              0x479
+#define WCD_USBSS_AUD_COEF_L_K2_0              0x47a
+#define WCD_USBSS_AUD_COEF_L_K2_1              0x47b
+#define WCD_USBSS_AUD_COEF_L_K3_0              0x47c
+#define WCD_USBSS_AUD_COEF_L_K3_1              0x47d
+#define WCD_USBSS_AUD_COEF_L_K4_0              0x47e
+#define WCD_USBSS_AUD_COEF_L_K4_1              0x47f
+#define WCD_USBSS_AUD_COEF_L_K5_0              0x480
+#define WCD_USBSS_AUD_COEF_L_K5_1              0x481
+#define WCD_USBSS_AUD_COEF_R_K0_0              0x482
+#define WCD_USBSS_AUD_COEF_R_K0_1              0x483
+#define WCD_USBSS_AUD_COEF_R_K0_2              0x484
+#define WCD_USBSS_AUD_COEF_R_K1_0              0x485
+#define WCD_USBSS_AUD_COEF_R_K1_1              0x486
+#define WCD_USBSS_AUD_COEF_R_K2_0              0x487
+#define WCD_USBSS_AUD_COEF_R_K2_1              0x488
+#define WCD_USBSS_AUD_COEF_R_K3_0              0x489
+#define WCD_USBSS_AUD_COEF_R_K3_1              0x48a
+#define WCD_USBSS_AUD_COEF_R_K4_0              0x48b
+#define WCD_USBSS_AUD_COEF_R_K4_1              0x48c
+#define WCD_USBSS_AUD_COEF_R_K5_0              0x48d
+#define WCD_USBSS_AUD_COEF_R_K5_1              0x48e
+#define WCD_USBSS_GND_COEF_L_K0_0              0x48f
+#define WCD_USBSS_GND_COEF_L_K0_1              0x490
+#define WCD_USBSS_GND_COEF_L_K0_2              0x491
+#define WCD_USBSS_GND_COEF_L_K1_0              0x492
+#define WCD_USBSS_GND_COEF_L_K1_1              0x493
+#define WCD_USBSS_GND_COEF_L_K2_0              0x494
+#define WCD_USBSS_GND_COEF_L_K2_1              0x495
+#define WCD_USBSS_GND_COEF_L_K3_0              0x496
+#define WCD_USBSS_GND_COEF_L_K3_1              0x497
+#define WCD_USBSS_GND_COEF_L_K4_0              0x498
+#define WCD_USBSS_GND_COEF_L_K4_1              0x499
+#define WCD_USBSS_GND_COEF_L_K5_0              0x49a
+#define WCD_USBSS_GND_COEF_L_K5_1              0x49b
+#define WCD_USBSS_GND_COEF_R_K0_0              0x49c
+#define WCD_USBSS_GND_COEF_R_K0_1              0x49d
+#define WCD_USBSS_GND_COEF_R_K0_2              0x49e
+#define WCD_USBSS_GND_COEF_R_K1_0              0x49f
+#define WCD_USBSS_GND_COEF_R_K1_1              0x4a0
+#define WCD_USBSS_GND_COEF_R_K2_0              0x4a1
+#define WCD_USBSS_GND_COEF_R_K2_1              0x4a2
+#define WCD_USBSS_GND_COEF_R_K3_0              0x4a3
+#define WCD_USBSS_GND_COEF_R_K3_1              0x4a4
+#define WCD_USBSS_GND_COEF_R_K4_0              0x4a5
+#define WCD_USBSS_GND_COEF_R_K4_1              0x4a6
+#define WCD_USBSS_GND_COEF_R_K5_0              0x4a7
+#define WCD_USBSS_GND_COEF_R_K5_1              0x4a8
+
+#define WCD_USBSS_MAX_REGISTER                 0x4c1
+
+struct wcd939x_usbss {
+       struct i2c_client *client;
+       struct gpio_desc *reset_gpio;
+       struct regulator *vdd_supply;
+
+       /* used to serialize concurrent change requests */
+       struct mutex lock;
+
+       struct typec_switch_dev *sw;
+       struct typec_mux_dev *mux;
+
+       struct regmap *regmap;
+
+       struct typec_mux *codec;
+       struct typec_switch *codec_switch;
+
+       enum typec_orientation orientation;
+       unsigned long mode;
+       unsigned int svid;
+};
+
+static const struct regmap_range_cfg wcd939x_usbss_ranges[] = {
+       {
+               .range_min = 0,
+               .range_max = WCD_USBSS_MAX_REGISTER,
+               .selector_reg = 0x0,
+               .selector_mask = 0xff,
+               .selector_shift = 0,
+               .window_start = 0,
+               .window_len = 0x100,
+       },
+};
+
+static const struct regmap_config wcd939x_usbss_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+       .max_register = WCD_USBSS_MAX_REGISTER,
+       .ranges = wcd939x_usbss_ranges,
+       .num_ranges = ARRAY_SIZE(wcd939x_usbss_ranges),
+};
+
+/* Linearlizer coefficients for 32ohm load */
+static const struct {
+       unsigned int offset;
+       unsigned int mask;
+       unsigned int value;
+} wcd939x_usbss_coeff_init[] = {
+       { WCD_USBSS_AUD_COEF_L_K5_0, GENMASK(7, 0), 0x39 },
+       { WCD_USBSS_AUD_COEF_R_K5_0, GENMASK(7, 0), 0x39 },
+       { WCD_USBSS_GND_COEF_L_K2_0, GENMASK(7, 0), 0xe8 },
+       { WCD_USBSS_GND_COEF_L_K4_0, GENMASK(7, 0), 0x73 },
+       { WCD_USBSS_GND_COEF_R_K2_0, GENMASK(7, 0), 0xe8 },
+       { WCD_USBSS_GND_COEF_R_K4_0, GENMASK(7, 0), 0x73 },
+       { WCD_USBSS_RATIO_SPKR_REXT_L_LSB, GENMASK(7, 0), 0x00 },
+       { WCD_USBSS_RATIO_SPKR_REXT_L_MSB, GENMASK(6, 0), 0x04 },
+       { WCD_USBSS_RATIO_SPKR_REXT_R_LSB, GENMASK(7, 0), 0x00 },
+       { WCD_USBSS_RATIO_SPKR_REXT_R_MSB, GENMASK(6, 0), 0x04 },
+};
+
+static int wcd939x_usbss_set(struct wcd939x_usbss *usbss)
+{
+       bool reverse = (usbss->orientation == TYPEC_ORIENTATION_REVERSE);
+       bool enable_audio = false;
+       bool enable_usb = false;
+       bool enable_dp = false;
+       int ret;
+
+       /* USB Mode */
+       if (usbss->mode < TYPEC_STATE_MODAL ||
+           (!usbss->svid && (usbss->mode == TYPEC_MODE_USB2 ||
+                             usbss->mode == TYPEC_MODE_USB3))) {
+               enable_usb = true;
+       } else if (usbss->svid) {
+               switch (usbss->mode) {
+               /* DP Only */
+               case TYPEC_DP_STATE_C:
+               case TYPEC_DP_STATE_E:
+                       enable_dp = true;
+                       break;
+
+               /* DP + USB */
+               case TYPEC_DP_STATE_D:
+               case TYPEC_DP_STATE_F:
+                       enable_usb = true;
+                       enable_dp = true;
+                       break;
+
+               default:
+                       return -EOPNOTSUPP;
+               }
+       } else if (usbss->mode == TYPEC_MODE_AUDIO) {
+               enable_audio = true;
+       } else {
+               return -EOPNOTSUPP;
+       }
+
+       /* Disable all switches */
+       ret = regmap_clear_bits(usbss->regmap, WCD_USBSS_SWITCH_SETTINGS_ENABLE,
+                               WCD_USBSS_SWITCH_SETTINGS_ENABLE_DP_AUXP_TO_MGX_SWITCHES |
+                               WCD_USBSS_SWITCH_SETTINGS_ENABLE_DP_AUXM_TO_MGX_SWITCHES |
+                               WCD_USBSS_SWITCH_SETTINGS_ENABLE_DPR_SWITCHES |
+                               WCD_USBSS_SWITCH_SETTINGS_ENABLE_DNL_SWITCHES |
+                               WCD_USBSS_SWITCH_SETTINGS_ENABLE_SENSE_SWITCHES |
+                               WCD_USBSS_SWITCH_SETTINGS_ENABLE_MIC_SWITCHES |
+                               WCD_USBSS_SWITCH_SETTINGS_ENABLE_AGND_SWITCHES);
+       if (ret)
+               return ret;
+
+       /* Clear switches */
+       ret = regmap_clear_bits(usbss->regmap, WCD_USBSS_SWITCH_SELECT0,
+                               WCD_USBSS_SWITCH_SELECT0_DP_AUXP_SWITCHES |
+                               WCD_USBSS_SWITCH_SELECT0_DP_AUXM_SWITCHES |
+                               WCD_USBSS_SWITCH_SELECT0_DPR_SWITCHES |
+                               WCD_USBSS_SWITCH_SELECT0_DNL_SWITCHES |
+                               WCD_USBSS_SWITCH_SELECT0_SENSE_SWITCHES |
+                               WCD_USBSS_SWITCH_SELECT0_MIC_SWITCHES);
+       if (ret)
+               return ret;
+
+       ret = regmap_clear_bits(usbss->regmap, WCD_USBSS_SWITCH_SELECT1,
+                               WCD_USBSS_SWITCH_SELECT1_AGND_SWITCHES);
+       if (ret)
+               return ret;
+
+       /* Enable OVP_MG1_BIAS PCOMP_DYN_BST_EN */
+       ret = regmap_set_bits(usbss->regmap, WCD_USBSS_MG1_BIAS,
+                             WCD_USBSS_MG1_BIAS_PCOMP_DYN_BST_EN);
+       if (ret)
+               return ret;
+
+       /* Enable OVP_MG2_BIAS PCOMP_DYN_BST_EN */
+       ret = regmap_set_bits(usbss->regmap, WCD_USBSS_MG2_BIAS,
+                             WCD_USBSS_MG2_BIAS_PCOMP_DYN_BST_EN);
+       if (ret)
+               return ret;
+
+       /* Disable Equalizer in safe mode */
+       ret = regmap_clear_bits(usbss->regmap, WCD_USBSS_EQUALIZER1,
+                               WCD_USBSS_EQUALIZER1_EQ_EN);
+       if (ret)
+               return ret;
+
+       /* Start FSM with all disabled, force write */
+       ret = regmap_write_bits(usbss->regmap, WCD_USBSS_AUDIO_FSM_START,
+                               WCD_USBSS_AUDIO_FSM_START_AUDIO_FSM_AUDIO_TRIG,
+                               WCD_USBSS_AUDIO_FSM_START_AUDIO_FSM_AUDIO_TRIG);
+
+       /* 35us to allow the SBU switch to turn off */
+       usleep_range(35, 1000);
+
+       /* Setup Audio Accessory mux/switch */
+       if (enable_audio) {
+               int i;
+
+               /*
+                * AATC switch configuration:
+                * "Normal":
+                * - R: DNR
+                * - L: DNL
+                * - Sense: GSBU2
+                * - Mic: MG1
+                * - AGND: MG2
+                * "Swapped":
+                * - R: DNR
+                * - L: DNL
+                * - Sense: GSBU1
+                * - Mic: MG2
+                * - AGND: MG1
+                * Swapped information is given by the codec MBHC logic
+                */
+
+               /* Set AATC mode */
+               ret = regmap_update_bits(usbss->regmap, WCD_USBSS_USB_SS_CNTL,
+                                        WCD_USBSS_USB_SS_CNTL_USB_SS_MODE,
+                                        FIELD_PREP(WCD_USBSS_USB_SS_CNTL_USB_SS_MODE,
+                                                   WCD_USBSS_USB_SS_CNTL_USB_SS_MODE_AATC));
+               if (ret)
+                       return ret;
+
+               /* Select L for DNL_SWITCHES and R for DPR_SWITCHES */
+               ret = regmap_update_bits(usbss->regmap, WCD_USBSS_SWITCH_SELECT0,
+                               WCD_USBSS_SWITCH_SELECT0_DPR_SWITCHES |
+                               WCD_USBSS_SWITCH_SELECT0_DNL_SWITCHES,
+                               FIELD_PREP(WCD_USBSS_SWITCH_SELECT0_DNL_SWITCHES,
+                                       WCD_USBSS_SWITCH_SELECT0_DNL_SWITCH_L) |
+                               FIELD_PREP(WCD_USBSS_SWITCH_SELECT0_DPR_SWITCHES,
+                                       WCD_USBSS_SWITCH_SELECT0_DPR_SWITCH_R));
+               if (ret)
+                       return ret;
+
+               if (reverse)
+                       /* Select MG2 for MIC, SBU1 for Sense */
+                       ret = regmap_update_bits(usbss->regmap, WCD_USBSS_SWITCH_SELECT0,
+                                                WCD_USBSS_SWITCH_SELECT0_MIC_SWITCHES,
+                                                WCD_USBSS_SWITCH_SELECT0_MIC_SWITCHES);
+               else
+                       /* Select MG1 for MIC, SBU2 for Sense */
+                       ret = regmap_update_bits(usbss->regmap, WCD_USBSS_SWITCH_SELECT0,
+                                                WCD_USBSS_SWITCH_SELECT0_SENSE_SWITCHES,
+                                                WCD_USBSS_SWITCH_SELECT0_SENSE_SWITCHES);
+               if (ret)
+                       return ret;
+
+               if (reverse)
+                       /* Disable OVP_MG1_BIAS PCOMP_DYN_BST_EN */
+                       ret = regmap_clear_bits(usbss->regmap, WCD_USBSS_MG1_BIAS,
+                                               WCD_USBSS_MG1_BIAS_PCOMP_DYN_BST_EN);
+               else
+                       /* Disable OVP_MG2_BIAS PCOMP_DYN_BST_EN */
+                       ret = regmap_clear_bits(usbss->regmap, WCD_USBSS_MG2_BIAS,
+                                               WCD_USBSS_MG2_BIAS_PCOMP_DYN_BST_EN);
+               if (ret)
+                       return ret;
+
+               /*  Enable SENSE, MIC switches */
+               ret = regmap_set_bits(usbss->regmap, WCD_USBSS_SWITCH_SETTINGS_ENABLE,
+                                     WCD_USBSS_SWITCH_SETTINGS_ENABLE_SENSE_SWITCHES |
+                                     WCD_USBSS_SWITCH_SETTINGS_ENABLE_MIC_SWITCHES);
+               if (ret)
+                       return ret;
+
+               if (reverse)
+                       /* Select MG1 for AGND_SWITCHES */
+                       ret = regmap_clear_bits(usbss->regmap, WCD_USBSS_SWITCH_SELECT1,
+                                               WCD_USBSS_SWITCH_SELECT1_AGND_SWITCHES);
+               else
+                       /* Select MG2 for AGND_SWITCHES */
+                       ret = regmap_set_bits(usbss->regmap, WCD_USBSS_SWITCH_SELECT1,
+                                             WCD_USBSS_SWITCH_SELECT1_AGND_SWITCHES);
+               if (ret)
+                       return ret;
+
+               /* Enable AGND switches */
+               ret = regmap_set_bits(usbss->regmap, WCD_USBSS_SWITCH_SETTINGS_ENABLE,
+                                     WCD_USBSS_SWITCH_SETTINGS_ENABLE_AGND_SWITCHES);
+               if (ret)
+                       return ret;
+
+               /* Enable DPR, DNL switches */
+               ret = regmap_set_bits(usbss->regmap, WCD_USBSS_SWITCH_SETTINGS_ENABLE,
+                                     WCD_USBSS_SWITCH_SETTINGS_ENABLE_DNL_SWITCHES |
+                                     WCD_USBSS_SWITCH_SETTINGS_ENABLE_DPR_SWITCHES);
+               if (ret)
+                       return ret;
+
+               /* Setup FSM delays */
+               ret = regmap_write(usbss->regmap, WCD_USBSS_DELAY_L_SW, 0x02);
+               if (ret)
+                       return ret;
+
+               ret = regmap_write(usbss->regmap, WCD_USBSS_DELAY_R_SW, 0x02);
+               if (ret)
+                       return ret;
+
+               ret = regmap_write(usbss->regmap, WCD_USBSS_DELAY_MIC_SW, 0x01);
+               if (ret)
+                       return ret;
+
+               /* Start FSM, force write */
+               ret = regmap_write_bits(usbss->regmap, WCD_USBSS_AUDIO_FSM_START,
+                                       WCD_USBSS_AUDIO_FSM_START_AUDIO_FSM_AUDIO_TRIG,
+                                       WCD_USBSS_AUDIO_FSM_START_AUDIO_FSM_AUDIO_TRIG);
+               if (ret)
+                       return ret;
+
+               /* Default Linearlizer coefficients */
+               for (i = 0; i < ARRAY_SIZE(wcd939x_usbss_coeff_init); ++i)
+                       regmap_update_bits(usbss->regmap,
+                                          wcd939x_usbss_coeff_init[i].offset,
+                                          wcd939x_usbss_coeff_init[i].mask,
+                                          wcd939x_usbss_coeff_init[i].value);
+
+               return 0;
+       }
+
+       ret = regmap_update_bits(usbss->regmap, WCD_USBSS_USB_SS_CNTL,
+                                WCD_USBSS_USB_SS_CNTL_USB_SS_MODE,
+                                FIELD_PREP(WCD_USBSS_USB_SS_CNTL_USB_SS_MODE,
+                                           WCD_USBSS_USB_SS_CNTL_USB_SS_MODE_USB));
+       if (ret)
+               return ret;
+
+       /* Enable USB muxes */
+       if (enable_usb) {
+               /* Do not enable Equalizer in safe mode */
+               if (usbss->mode != TYPEC_STATE_SAFE) {
+                       ret = regmap_set_bits(usbss->regmap, WCD_USBSS_EQUALIZER1,
+                                             WCD_USBSS_EQUALIZER1_EQ_EN);
+                       if (ret)
+                               return ret;
+               }
+
+               /* Select DN for DNL_SWITCHES and DP for DPR_SWITCHES */
+               ret = regmap_update_bits(usbss->regmap, WCD_USBSS_SWITCH_SELECT0,
+                                        WCD_USBSS_SWITCH_SELECT0_DPR_SWITCHES |
+                                        WCD_USBSS_SWITCH_SELECT0_DNL_SWITCHES,
+                                        FIELD_PREP(WCD_USBSS_SWITCH_SELECT0_DNL_SWITCHES,
+                                                   WCD_USBSS_SWITCH_SELECT0_DNL_SWITCH_DN) |
+                                        FIELD_PREP(WCD_USBSS_SWITCH_SELECT0_DPR_SWITCHES,
+                                                   WCD_USBSS_SWITCH_SELECT0_DPR_SWITCH_DP));
+               if (ret)
+                       return ret;
+
+               /* Enable DNL_SWITCHES and DPR_SWITCHES */
+               ret = regmap_set_bits(usbss->regmap, WCD_USBSS_SWITCH_SETTINGS_ENABLE,
+                                     WCD_USBSS_SWITCH_SETTINGS_ENABLE_DPR_SWITCHES |
+                                     WCD_USBSS_SWITCH_SETTINGS_ENABLE_DNL_SWITCHES);
+               if (ret)
+                       return ret;
+       }
+
+       /* Enable DP AUX muxes */
+       if (enable_dp) {
+               /* Update Leakage Canceller Coefficient for AUXP pins */
+               ret = regmap_update_bits(usbss->regmap, WCD_USBSS_DISP_AUXP_CTL,
+                                        WCD_USBSS_DISP_AUXP_CTL_LK_CANCEL_TRK_COEFF,
+                                        FIELD_PREP(WCD_USBSS_DISP_AUXP_CTL_LK_CANCEL_TRK_COEFF,
+                                                   5));
+               if (ret)
+                       return ret;
+
+               ret = regmap_set_bits(usbss->regmap, WCD_USBSS_DISP_AUXP_THRESH,
+                                     WCD_USBSS_DISP_AUXP_THRESH_DISP_AUXP_OVPON_CM);
+               if (ret)
+                       return ret;
+
+               if (reverse)
+                       /* Select MG2 for AUXP and MG1 for AUXM */
+                       ret = regmap_update_bits(usbss->regmap, WCD_USBSS_SWITCH_SELECT0,
+                                                WCD_USBSS_SWITCH_SELECT0_DP_AUXP_SWITCHES |
+                                                WCD_USBSS_SWITCH_SELECT0_DP_AUXM_SWITCHES,
+                                                WCD_USBSS_SWITCH_SELECT0_DP_AUXP_SWITCHES);
+               else
+                       /* Select MG1 for AUXP and MG2 for AUXM */
+                       ret = regmap_update_bits(usbss->regmap, WCD_USBSS_SWITCH_SELECT0,
+                                                WCD_USBSS_SWITCH_SELECT0_DP_AUXP_SWITCHES |
+                                                WCD_USBSS_SWITCH_SELECT0_DP_AUXM_SWITCHES,
+                                                WCD_USBSS_SWITCH_SELECT0_DP_AUXM_SWITCHES);
+               if (ret)
+                       return ret;
+
+               /* Enable DP_AUXP_TO_MGX and DP_AUXM_TO_MGX switches */
+               ret = regmap_set_bits(usbss->regmap, WCD_USBSS_SWITCH_SETTINGS_ENABLE,
+                                     WCD_USBSS_SWITCH_SETTINGS_ENABLE_DP_AUXP_TO_MGX_SWITCHES |
+                                     WCD_USBSS_SWITCH_SETTINGS_ENABLE_DP_AUXM_TO_MGX_SWITCHES);
+
+               /* 15us to allow the SBU switch to turn on again */
+               usleep_range(15, 1000);
+       }
+
+       return 0;
+}
+
+static int wcd939x_usbss_switch_set(struct typec_switch_dev *sw,
+                                   enum typec_orientation orientation)
+{
+       struct wcd939x_usbss *usbss = typec_switch_get_drvdata(sw);
+       int ret = 0;
+
+       mutex_lock(&usbss->lock);
+
+       if (usbss->orientation != orientation) {
+               usbss->orientation = orientation;
+
+               ret = wcd939x_usbss_set(usbss);
+       }
+
+       mutex_unlock(&usbss->lock);
+
+       if (ret)
+               return ret;
+
+       /* Report orientation to codec after switch has been done */
+       return typec_switch_set(usbss->codec_switch, orientation);
+}
+
+static int wcd939x_usbss_mux_set(struct typec_mux_dev *mux,
+                                struct typec_mux_state *state)
+{
+       struct wcd939x_usbss *usbss = typec_mux_get_drvdata(mux);
+       int ret = 0;
+
+       mutex_lock(&usbss->lock);
+
+       if (usbss->mode != state->mode) {
+               usbss->mode = state->mode;
+
+               if (state->alt)
+                       usbss->svid = state->alt->svid;
+               else
+                       usbss->svid = 0; // No SVID
+
+               ret = wcd939x_usbss_set(usbss);
+       }
+
+       mutex_unlock(&usbss->lock);
+
+       if (ret)
+               return ret;
+
+       /* Report event to codec after switch has been done */
+       return typec_mux_set(usbss->codec, state);
+}
+
+static int wcd939x_usbss_probe(struct i2c_client *client)
+{
+       struct device *dev = &client->dev;
+       struct typec_switch_desc sw_desc = { };
+       struct typec_mux_desc mux_desc = { };
+       struct wcd939x_usbss *usbss;
+       int ret;
+
+       usbss = devm_kzalloc(dev, sizeof(*usbss), GFP_KERNEL);
+       if (!usbss)
+               return -ENOMEM;
+
+       usbss->client = client;
+       mutex_init(&usbss->lock);
+
+       usbss->regmap = devm_regmap_init_i2c(client, &wcd939x_usbss_regmap_config);
+       if (IS_ERR(usbss->regmap))
+               return dev_err_probe(dev, PTR_ERR(usbss->regmap), "failed to initialize regmap\n");
+
+       usbss->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+       if (IS_ERR(usbss->reset_gpio))
+               return dev_err_probe(dev, PTR_ERR(usbss->reset_gpio),
+                                    "unable to acquire reset gpio\n");
+
+       usbss->vdd_supply = devm_regulator_get_optional(dev, "vdd");
+       if (IS_ERR(usbss->vdd_supply))
+               return PTR_ERR(usbss->vdd_supply);
+
+       /* Get Codec's MUX & Switch devices */
+       usbss->codec = fwnode_typec_mux_get(dev->fwnode);
+       if (IS_ERR(usbss->codec))
+               return dev_err_probe(dev, PTR_ERR(usbss->codec),
+                                    "failed to acquire codec mode-switch\n");
+
+       usbss->codec_switch = fwnode_typec_switch_get(dev->fwnode);
+       if (IS_ERR(usbss->codec_switch)) {
+               ret = dev_err_probe(dev, PTR_ERR(usbss->codec_switch),
+                                   "failed to acquire codec orientation-switch\n");
+               goto err_mux_put;
+       }
+
+       usbss->mode = TYPEC_STATE_SAFE;
+       usbss->orientation = TYPEC_ORIENTATION_NONE;
+
+       gpiod_set_value(usbss->reset_gpio, 1);
+
+       ret = regulator_enable(usbss->vdd_supply);
+       if (ret) {
+               dev_err(dev, "Failed to enable vdd: %d\n", ret);
+               goto err_mux_switch;
+       }
+
+       msleep(20);
+
+       gpiod_set_value(usbss->reset_gpio, 0);
+
+       msleep(20);
+
+       /* Disable standby */
+       ret = regmap_clear_bits(usbss->regmap, WCD_USBSS_USB_SS_CNTL,
+                               WCD_USBSS_USB_SS_CNTL_STANDBY_STATE);
+       if (ret)
+               goto err_regulator_disable;
+
+       /* Set manual mode by default */
+       ret = regmap_update_bits(usbss->regmap, WCD_USBSS_FUNCTION_ENABLE,
+                                WCD_USBSS_FUNCTION_ENABLE_SOURCE_SELECT,
+                                FIELD_PREP(WCD_USBSS_FUNCTION_ENABLE_SOURCE_SELECT,
+                                           WCD_USBSS_FUNCTION_ENABLE_SOURCE_SELECT_MANUAL));
+       if (ret)
+               goto err_regulator_disable;
+
+       /* Enable dynamic boosting for DP and DN */
+       ret = regmap_set_bits(usbss->regmap, WCD_USBSS_DP_DN_MISC1,
+                             WCD_USBSS_DP_DN_MISC1_DP_PCOMP_2X_DYN_BST_ON_EN |
+                             WCD_USBSS_DP_DN_MISC1_DN_PCOMP_2X_DYN_BST_ON_EN);
+       if (ret)
+               goto err_regulator_disable;
+
+       /* Enable dynamic boosting for MG1 OVP */
+       ret = regmap_set_bits(usbss->regmap, WCD_USBSS_MG1_MISC,
+                             WCD_USBSS_MG1_MISC_PCOMP_2X_DYN_BST_ON_EN);
+       if (ret)
+               goto err_regulator_disable;
+
+       /* Enable dynamic boosting for MG2 OVP */
+       ret = regmap_set_bits(usbss->regmap, WCD_USBSS_MG2_MISC,
+                             WCD_USBSS_MG2_MISC_PCOMP_2X_DYN_BST_ON_EN);
+       if (ret)
+               goto err_regulator_disable;
+
+       /* Write 0xFF to WCD_USBSS_CPLDO_CTL2 */
+       ret = regmap_set_bits(usbss->regmap, WCD_USBSS_CPLDO_CTL2, 0xff);
+       if (ret)
+               goto err_regulator_disable;
+
+       /* Set RCO_EN: WCD_USBSS_USB_SS_CNTL Bit<3> --> 0x0 --> 0x1 */
+       ret = regmap_clear_bits(usbss->regmap, WCD_USBSS_USB_SS_CNTL,
+                               WCD_USBSS_USB_SS_CNTL_RCO_EN);
+       if (ret)
+               goto err_regulator_disable;
+
+       ret = regmap_set_bits(usbss->regmap, WCD_USBSS_USB_SS_CNTL,
+                             WCD_USBSS_USB_SS_CNTL_RCO_EN);
+       if (ret)
+               goto err_regulator_disable;
+
+       /* Disable all switches but enable the mux */
+       ret = regmap_write(usbss->regmap, WCD_USBSS_SWITCH_SETTINGS_ENABLE,
+                          WCD_USBSS_SWITCH_SETTINGS_ENABLE_DEVICE_ENABLE);
+       if (ret)
+               goto err_regulator_disable;
+
+       /* Setup in SAFE mode */
+       ret = wcd939x_usbss_set(usbss);
+       if (ret)
+               goto err_regulator_disable;
+
+       sw_desc.drvdata = usbss;
+       sw_desc.fwnode = dev_fwnode(dev);
+       sw_desc.set = wcd939x_usbss_switch_set;
+
+       usbss->sw = typec_switch_register(dev, &sw_desc);
+       if (IS_ERR(usbss->sw)) {
+               ret = dev_err_probe(dev, PTR_ERR(usbss->sw), "failed to register typec switch\n");
+               goto err_regulator_disable;
+       }
+
+       mux_desc.drvdata = usbss;
+       mux_desc.fwnode = dev_fwnode(dev);
+       mux_desc.set = wcd939x_usbss_mux_set;
+
+       usbss->mux = typec_mux_register(dev, &mux_desc);
+       if (IS_ERR(usbss->mux)) {
+               ret = dev_err_probe(dev, PTR_ERR(usbss->mux), "failed to register typec mux\n");
+               goto err_switch_unregister;
+       }
+
+       i2c_set_clientdata(client, usbss);
+
+       return 0;
+
+err_switch_unregister:
+       typec_switch_unregister(usbss->sw);
+
+err_regulator_disable:
+       regulator_disable(usbss->vdd_supply);
+
+err_mux_switch:
+       typec_switch_put(usbss->codec_switch);
+
+err_mux_put:
+       typec_mux_put(usbss->codec);
+
+       return ret;
+}
+
+static void wcd939x_usbss_remove(struct i2c_client *client)
+{
+       struct wcd939x_usbss *usbss = i2c_get_clientdata(client);
+
+       typec_mux_unregister(usbss->mux);
+       typec_switch_unregister(usbss->sw);
+
+       regulator_disable(usbss->vdd_supply);
+
+       typec_switch_put(usbss->codec_switch);
+       typec_mux_put(usbss->codec);
+}
+
+static const struct i2c_device_id wcd939x_usbss_table[] = {
+       { "wcd9390-usbss" },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, wcd939x_usbss_table);
+
+static const struct of_device_id wcd939x_usbss_of_table[] = {
+       { .compatible = "qcom,wcd9390-usbss" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, wcd939x_usbss_of_table);
+
+static struct i2c_driver wcd939x_usbss_driver = {
+       .driver = {
+               .name = "wcd939x-usbss",
+               .of_match_table = wcd939x_usbss_of_table,
+       },
+       .probe          = wcd939x_usbss_probe,
+       .remove         = wcd939x_usbss_remove,
+       .id_table       = wcd939x_usbss_table,
+};
+module_i2c_driver(wcd939x_usbss_driver);
+
+MODULE_DESCRIPTION("Qualcomm WCD939x USBSS driver");
+MODULE_LICENSE("GPL");
index 85d015cdbe1fe1b530720ae34bdddf08dae10929..b9cca2be76fce447970ba08beb6419262bf95a14 100644 (file)
@@ -468,7 +468,7 @@ static struct device_type pd_capabilities_type = {
 /**
  * usb_power_delivery_register_capabilities - Register a set of capabilities.
  * @pd: The USB PD instance that the capabilities belong to.
- * @desc: Description of the Capablities Message.
+ * @desc: Description of the Capabilities Message.
  *
  * This function registers a Capabilities Message described in @desc. The
  * capabilities will have their own sub-directory under @pd in sysfs.
@@ -571,7 +571,7 @@ static void pd_release(struct device *dev)
 {
        struct usb_power_delivery *pd = to_usb_power_delivery(dev);
 
-       ida_simple_remove(&pd_ida, pd->id);
+       ida_free(&pd_ida, pd->id);
        kfree(pd);
 }
 
@@ -616,7 +616,7 @@ usb_power_delivery_register(struct device *parent, struct usb_power_delivery_des
        if (!pd)
                return ERR_PTR(-ENOMEM);
 
-       ret = ida_simple_get(&pd_ida, 0, 0, GFP_KERNEL);
+       ret = ida_alloc(&pd_ida, GFP_KERNEL);
        if (ret < 0) {
                kfree(pd);
                return ERR_PTR(ret);
index 9454b12a073c96aecb2241a90b562c2190bfc2b5..7fb966fd639b32296f443a8701e4968c5084bad5 100644 (file)
@@ -92,11 +92,16 @@ static void max_tcpci_init_regs(struct max_tcpci_chip *chip)
                return;
        }
 
+       /* Vconn Over Current Protection */
+       ret = max_tcpci_write8(chip, TCPC_FAULT_STATUS_MASK, TCPC_FAULT_STATUS_MASK_VCONN_OC);
+       if (ret < 0)
+               return;
+
        alert_mask = TCPC_ALERT_TX_SUCCESS | TCPC_ALERT_TX_DISCARDED | TCPC_ALERT_TX_FAILED |
                TCPC_ALERT_RX_HARD_RST | TCPC_ALERT_RX_STATUS | TCPC_ALERT_CC_STATUS |
                TCPC_ALERT_VBUS_DISCNCT | TCPC_ALERT_RX_BUF_OVF | TCPC_ALERT_POWER_STATUS |
                /* Enable Extended alert for detecting Fast Role Swap Signal */
-               TCPC_ALERT_EXTND | TCPC_ALERT_EXTENDED_STATUS;
+               TCPC_ALERT_EXTND | TCPC_ALERT_EXTENDED_STATUS | TCPC_ALERT_FAULT;
 
        ret = max_tcpci_write16(chip, TCPC_ALERT_MASK, alert_mask);
        if (ret < 0) {
@@ -295,6 +300,19 @@ static irqreturn_t _max_tcpci_irq(struct max_tcpci_chip *chip, u16 status)
                }
        }
 
+       if (status & TCPC_ALERT_FAULT) {
+               ret = max_tcpci_read8(chip, TCPC_FAULT_STATUS, &reg_status);
+               if (ret < 0)
+                       return ret;
+
+               ret = max_tcpci_write8(chip, TCPC_FAULT_STATUS, reg_status);
+               if (ret < 0)
+                       return ret;
+
+               if (reg_status & TCPC_FAULT_STATUS_VCONN_OC)
+                       tcpm_port_error_recovery(chip->port);
+       }
+
        if (status & TCPC_ALERT_EXTND) {
                ret = max_tcpci_read8(chip, TCPC_ALERT_EXTENDED, &reg_status);
                if (ret < 0)
index bfb6f9481e87f7aa32464ea344278eba6613b5ff..f7d7daa60c8dc98b04d29f10b2e1377b02124d61 100644 (file)
@@ -251,6 +251,7 @@ enum frs_typec_current {
 #define TCPM_FRS_EVENT         BIT(3)
 #define TCPM_SOURCING_VBUS     BIT(4)
 #define TCPM_PORT_CLEAN                BIT(5)
+#define TCPM_PORT_ERROR                BIT(6)
 
 #define LOG_BUFFER_ENTRIES     1024
 #define LOG_BUFFER_ENTRY_SIZE  128
@@ -296,6 +297,15 @@ struct pd_pps_data {
        bool active;
 };
 
+struct pd_data {
+       struct usb_power_delivery *pd;
+       struct usb_power_delivery_capabilities *source_cap;
+       struct usb_power_delivery_capabilities_desc source_desc;
+       struct usb_power_delivery_capabilities *sink_cap;
+       struct usb_power_delivery_capabilities_desc sink_desc;
+       unsigned int operating_snk_mw;
+};
+
 struct tcpm_port {
        struct device *dev;
 
@@ -397,12 +407,14 @@ struct tcpm_port {
        unsigned int rx_msgid;
 
        /* USB PD objects */
-       struct usb_power_delivery *pd;
+       struct usb_power_delivery **pds;
+       struct pd_data **pd_list;
        struct usb_power_delivery_capabilities *port_source_caps;
        struct usb_power_delivery_capabilities *port_sink_caps;
        struct usb_power_delivery *partner_pd;
        struct usb_power_delivery_capabilities *partner_source_caps;
        struct usb_power_delivery_capabilities *partner_sink_caps;
+       struct usb_power_delivery *selected_pd;
 
        /* Partner capabilities/requests */
        u32 sink_request;
@@ -412,6 +424,7 @@ struct tcpm_port {
        unsigned int nr_sink_caps;
 
        /* Local capabilities */
+       unsigned int pd_count;
        u32 src_pdo[PDO_MAX_OBJECTS];
        unsigned int nr_src_pdo;
        u32 snk_pdo[PDO_MAX_OBJECTS];
@@ -2847,7 +2860,7 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
                                           PD_MSG_CTRL_NOT_SUPP,
                                           NONE_AMS);
                } else {
-                       if (port->send_discover) {
+                       if (port->send_discover && port->negotiated_rev < PD_REV30) {
                                tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
                                break;
                        }
@@ -2863,7 +2876,7 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
                                           PD_MSG_CTRL_NOT_SUPP,
                                           NONE_AMS);
                } else {
-                       if (port->send_discover) {
+                       if (port->send_discover && port->negotiated_rev < PD_REV30) {
                                tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
                                break;
                        }
@@ -2872,7 +2885,7 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
                }
                break;
        case PD_CTRL_VCONN_SWAP:
-               if (port->send_discover) {
+               if (port->send_discover && port->negotiated_rev < PD_REV30) {
                        tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
                        break;
                }
@@ -4401,7 +4414,8 @@ static void run_state_machine(struct tcpm_port *port)
                        tcpm_set_current_limit(port, tcpm_get_current_limit(port), 5000);
                tcpm_swap_complete(port, 0);
                tcpm_typec_connect(port);
-               mod_enable_frs_delayed_work(port, 0);
+               if (port->pd_capable && port->source_caps[0] & PDO_FIXED_DUAL_ROLE)
+                       mod_enable_frs_delayed_work(port, 0);
                tcpm_pps_complete(port, port->pps_status);
 
                if (port->ams != NONE_AMS)
@@ -4862,8 +4876,7 @@ static void run_state_machine(struct tcpm_port *port)
                break;
        case PORT_RESET:
                tcpm_reset_port(port);
-               tcpm_set_cc(port, tcpm_default_state(port) == SNK_UNATTACHED ?
-                           TYPEC_CC_RD : tcpm_rp_cc(port));
+               tcpm_set_cc(port, TYPEC_CC_OPEN);
                tcpm_set_state(port, PORT_RESET_WAIT_OFF,
                               PD_T_ERROR_RECOVERY);
                break;
@@ -5487,6 +5500,10 @@ static void tcpm_pd_event_handler(struct kthread_work *work)
                                        tcpm_set_state(port, tcpm_default_state(port), 0);
                        }
                }
+               if (events & TCPM_PORT_ERROR) {
+                       tcpm_log(port, "port triggering error recovery");
+                       tcpm_set_state(port, ERROR_RECOVERY, 0);
+               }
 
                spin_lock(&port->pd_event_lock);
        }
@@ -5554,6 +5571,15 @@ bool tcpm_port_is_toggling(struct tcpm_port *port)
 }
 EXPORT_SYMBOL_GPL(tcpm_port_is_toggling);
 
+void tcpm_port_error_recovery(struct tcpm_port *port)
+{
+       spin_lock(&port->pd_event_lock);
+       port->pd_events |= TCPM_PORT_ERROR;
+       spin_unlock(&port->pd_event_lock);
+       kthread_queue_work(port->wq, &port->event_work);
+}
+EXPORT_SYMBOL_GPL(tcpm_port_error_recovery);
+
 static void tcpm_enable_frs_work(struct kthread_work *work)
 {
        struct tcpm_port *port = container_of(work, struct tcpm_port, enable_frs);
@@ -6045,12 +6071,114 @@ port_unlock:
        return 0;
 }
 
+static struct pd_data *tcpm_find_pd_data(struct tcpm_port *port, struct usb_power_delivery *pd)
+{
+       int i;
+
+       for (i = 0; port->pd_list[i]; i++) {
+               if (port->pd_list[i]->pd == pd)
+                       return port->pd_list[i];
+       }
+
+       return ERR_PTR(-ENODATA);
+}
+
+static struct usb_power_delivery **tcpm_pd_get(struct typec_port *p)
+{
+       struct tcpm_port *port = typec_get_drvdata(p);
+
+       return port->pds;
+}
+
+static int tcpm_pd_set(struct typec_port *p, struct usb_power_delivery *pd)
+{
+       struct tcpm_port *port = typec_get_drvdata(p);
+       struct pd_data *data;
+       int i, ret = 0;
+
+       mutex_lock(&port->lock);
+
+       if (port->selected_pd == pd)
+               goto unlock;
+
+       data = tcpm_find_pd_data(port, pd);
+       if (IS_ERR(data)) {
+               ret = PTR_ERR(data);
+               goto unlock;
+       }
+
+       if (data->sink_desc.pdo[0]) {
+               for (i = 0; i < PDO_MAX_OBJECTS && data->sink_desc.pdo[i]; i++)
+                       port->snk_pdo[i] = data->sink_desc.pdo[i];
+               port->nr_snk_pdo = i + 1;
+               port->operating_snk_mw = data->operating_snk_mw;
+       }
+
+       if (data->source_desc.pdo[0]) {
+               for (i = 0; i < PDO_MAX_OBJECTS && data->source_desc.pdo[i]; i++)
+                       port->snk_pdo[i] = data->source_desc.pdo[i];
+               port->nr_src_pdo = i + 1;
+       }
+
+       switch (port->state) {
+       case SRC_UNATTACHED:
+       case SRC_ATTACH_WAIT:
+       case SRC_TRYWAIT:
+               tcpm_set_cc(port, tcpm_rp_cc(port));
+               break;
+       case SRC_SEND_CAPABILITIES:
+       case SRC_SEND_CAPABILITIES_TIMEOUT:
+       case SRC_NEGOTIATE_CAPABILITIES:
+       case SRC_READY:
+       case SRC_WAIT_NEW_CAPABILITIES:
+               port->caps_count = 0;
+               port->upcoming_state = SRC_SEND_CAPABILITIES;
+               ret = tcpm_ams_start(port, POWER_NEGOTIATION);
+               if (ret == -EAGAIN) {
+                       port->upcoming_state = INVALID_STATE;
+                       goto unlock;
+               }
+               break;
+       case SNK_NEGOTIATE_CAPABILITIES:
+       case SNK_NEGOTIATE_PPS_CAPABILITIES:
+       case SNK_READY:
+       case SNK_TRANSITION_SINK:
+       case SNK_TRANSITION_SINK_VBUS:
+               if (port->pps_data.active)
+                       port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
+               else if (port->pd_capable)
+                       port->upcoming_state = SNK_NEGOTIATE_CAPABILITIES;
+               else
+                       break;
+
+               port->update_sink_caps = true;
+
+               ret = tcpm_ams_start(port, POWER_NEGOTIATION);
+               if (ret == -EAGAIN) {
+                       port->upcoming_state = INVALID_STATE;
+                       goto unlock;
+               }
+               break;
+       default:
+               break;
+       }
+
+       port->port_source_caps = data->source_cap;
+       port->port_sink_caps = data->sink_cap;
+       port->selected_pd = pd;
+unlock:
+       mutex_unlock(&port->lock);
+       return ret;
+}
+
 static const struct typec_operations tcpm_ops = {
        .try_role = tcpm_try_role,
        .dr_set = tcpm_dr_set,
        .pr_set = tcpm_pr_set,
        .vconn_set = tcpm_vconn_set,
-       .port_type_set = tcpm_port_type_set
+       .port_type_set = tcpm_port_type_set,
+       .pd_get = tcpm_pd_get,
+       .pd_set = tcpm_pd_set
 };
 
 void tcpm_tcpc_reset(struct tcpm_port *port)
@@ -6064,58 +6192,63 @@ EXPORT_SYMBOL_GPL(tcpm_tcpc_reset);
 
 static void tcpm_port_unregister_pd(struct tcpm_port *port)
 {
-       usb_power_delivery_unregister_capabilities(port->port_sink_caps);
+       int i;
+
        port->port_sink_caps = NULL;
-       usb_power_delivery_unregister_capabilities(port->port_source_caps);
        port->port_source_caps = NULL;
-       usb_power_delivery_unregister(port->pd);
-       port->pd = NULL;
+       for (i = 0; i < port->pd_count; i++) {
+               usb_power_delivery_unregister_capabilities(port->pd_list[i]->sink_cap);
+               kfree(port->pd_list[i]->sink_cap);
+               usb_power_delivery_unregister_capabilities(port->pd_list[i]->source_cap);
+               kfree(port->pd_list[i]->source_cap);
+               devm_kfree(port->dev, port->pd_list[i]);
+               port->pd_list[i] = NULL;
+               usb_power_delivery_unregister(port->pds[i]);
+               port->pds[i] = NULL;
+       }
 }
 
 static int tcpm_port_register_pd(struct tcpm_port *port)
 {
        struct usb_power_delivery_desc desc = { port->typec_caps.pd_revision };
-       struct usb_power_delivery_capabilities_desc caps = { };
        struct usb_power_delivery_capabilities *cap;
-       int ret;
+       int ret, i;
 
        if (!port->nr_src_pdo && !port->nr_snk_pdo)
                return 0;
 
-       port->pd = usb_power_delivery_register(port->dev, &desc);
-       if (IS_ERR(port->pd)) {
-               ret = PTR_ERR(port->pd);
-               goto err_unregister;
-       }
-
-       if (port->nr_src_pdo) {
-               memcpy_and_pad(caps.pdo, sizeof(caps.pdo), port->src_pdo,
-                              port->nr_src_pdo * sizeof(u32), 0);
-               caps.role = TYPEC_SOURCE;
-
-               cap = usb_power_delivery_register_capabilities(port->pd, &caps);
-               if (IS_ERR(cap)) {
-                       ret = PTR_ERR(cap);
+       for (i = 0; i < port->pd_count; i++) {
+               port->pds[i] = usb_power_delivery_register(port->dev, &desc);
+               if (IS_ERR(port->pds[i])) {
+                       ret = PTR_ERR(port->pds[i]);
                        goto err_unregister;
                }
-
-               port->port_source_caps = cap;
-       }
-
-       if (port->nr_snk_pdo) {
-               memcpy_and_pad(caps.pdo, sizeof(caps.pdo), port->snk_pdo,
-                              port->nr_snk_pdo * sizeof(u32), 0);
-               caps.role = TYPEC_SINK;
-
-               cap = usb_power_delivery_register_capabilities(port->pd, &caps);
-               if (IS_ERR(cap)) {
-                       ret = PTR_ERR(cap);
-                       goto err_unregister;
+               port->pd_list[i]->pd = port->pds[i];
+
+               if (port->pd_list[i]->source_desc.pdo[0]) {
+                       cap = usb_power_delivery_register_capabilities(port->pds[i],
+                                                               &port->pd_list[i]->source_desc);
+                       if (IS_ERR(cap)) {
+                               ret = PTR_ERR(cap);
+                               goto err_unregister;
+                       }
+                       port->pd_list[i]->source_cap = cap;
                }
 
-               port->port_sink_caps = cap;
+               if (port->pd_list[i]->sink_desc.pdo[0]) {
+                       cap = usb_power_delivery_register_capabilities(port->pds[i],
+                                                               &port->pd_list[i]->sink_desc);
+                       if (IS_ERR(cap)) {
+                               ret = PTR_ERR(cap);
+                               goto err_unregister;
+                       }
+                       port->pd_list[i]->sink_cap = cap;
+               }
        }
 
+       port->port_source_caps = port->pd_list[0]->source_cap;
+       port->port_sink_caps = port->pd_list[0]->sink_cap;
+       port->selected_pd = port->pds[0];
        return 0;
 
 err_unregister:
@@ -6124,12 +6257,15 @@ err_unregister:
        return ret;
 }
 
-static int tcpm_fw_get_caps(struct tcpm_port *port,
-                           struct fwnode_handle *fwnode)
+static int tcpm_fw_get_caps(struct tcpm_port *port, struct fwnode_handle *fwnode)
 {
+       struct fwnode_handle *capabilities, *child, *caps = NULL;
+       unsigned int nr_src_pdo, nr_snk_pdo;
        const char *opmode_str;
-       int ret;
-       u32 mw, frs_current;
+       u32 *src_pdo, *snk_pdo;
+       u32 uw, frs_current;
+       int ret = 0, i;
+       int mode;
 
        if (!fwnode)
                return -EINVAL;
@@ -6147,30 +6283,20 @@ static int tcpm_fw_get_caps(struct tcpm_port *port,
        if (ret < 0)
                return ret;
 
+       mode = 0;
+
+       if (fwnode_property_read_bool(fwnode, "accessory-mode-audio"))
+               port->typec_caps.accessory[mode++] = TYPEC_ACCESSORY_AUDIO;
+
+       if (fwnode_property_read_bool(fwnode, "accessory-mode-debug"))
+               port->typec_caps.accessory[mode++] = TYPEC_ACCESSORY_DEBUG;
+
        port->port_type = port->typec_caps.type;
        port->pd_supported = !fwnode_property_read_bool(fwnode, "pd-disable");
-
        port->slow_charger_loop = fwnode_property_read_bool(fwnode, "slow-charger-loop");
-       if (port->port_type == TYPEC_PORT_SNK)
-               goto sink;
-
-       /* Get Source PDOs for the PD port or Source Rp value for the non-PD port */
-       if (port->pd_supported) {
-               ret = fwnode_property_count_u32(fwnode, "source-pdos");
-               if (ret == 0)
-                       return -EINVAL;
-               else if (ret < 0)
-                       return ret;
+       port->self_powered = fwnode_property_read_bool(fwnode, "self-powered");
 
-               port->nr_src_pdo = min(ret, PDO_MAX_OBJECTS);
-               ret = fwnode_property_read_u32_array(fwnode, "source-pdos",
-                                                    port->src_pdo, port->nr_src_pdo);
-               if (ret)
-                       return ret;
-               ret = tcpm_validate_caps(port, port->src_pdo, port->nr_src_pdo);
-               if (ret)
-                       return ret;
-       } else {
+       if (!port->pd_supported) {
                ret = fwnode_property_read_string(fwnode, "typec-power-opmode", &opmode_str);
                if (ret)
                        return ret;
@@ -6178,45 +6304,150 @@ static int tcpm_fw_get_caps(struct tcpm_port *port,
                if (ret < 0)
                        return ret;
                port->src_rp = tcpm_pwr_opmode_to_rp(ret);
-       }
-
-       if (port->port_type == TYPEC_PORT_SRC)
                return 0;
+       }
 
-sink:
-       port->self_powered = fwnode_property_read_bool(fwnode, "self-powered");
-
-       if (!port->pd_supported)
-               return 0;
-
-       /* Get sink pdos */
-       ret = fwnode_property_count_u32(fwnode, "sink-pdos");
-       if (ret <= 0)
-               return -EINVAL;
-
-       port->nr_snk_pdo = min(ret, PDO_MAX_OBJECTS);
-       ret = fwnode_property_read_u32_array(fwnode, "sink-pdos",
-                                            port->snk_pdo, port->nr_snk_pdo);
-       if ((ret < 0) || tcpm_validate_caps(port, port->snk_pdo,
-                                           port->nr_snk_pdo))
-               return -EINVAL;
-
-       if (fwnode_property_read_u32(fwnode, "op-sink-microwatt", &mw) < 0)
-               return -EINVAL;
-       port->operating_snk_mw = mw / 1000;
+       /* The following code are applicable to pd-capable ports, i.e. pd_supported is true. */
 
        /* FRS can only be supported by DRP ports */
        if (port->port_type == TYPEC_PORT_DRP) {
                ret = fwnode_property_read_u32(fwnode, "new-source-frs-typec-current",
                                               &frs_current);
-               if (ret >= 0 && frs_current <= FRS_5V_3A)
+               if (!ret && frs_current <= FRS_5V_3A)
                        port->new_source_frs_current = frs_current;
+
+               if (ret)
+                       ret = 0;
        }
 
+       /* For the backward compatibility, "capabilities" node is optional. */
+       capabilities = fwnode_get_named_child_node(fwnode, "capabilities");
+       if (!capabilities) {
+               port->pd_count = 1;
+       } else {
+               fwnode_for_each_child_node(capabilities, child)
+                       port->pd_count++;
+
+               if (!port->pd_count) {
+                       ret = -ENODATA;
+                       goto put_capabilities;
+               }
+       }
+
+       port->pds = devm_kcalloc(port->dev, port->pd_count, sizeof(struct usb_power_delivery *),
+                                GFP_KERNEL);
+       if (!port->pds) {
+               ret = -ENOMEM;
+               goto put_capabilities;
+       }
+
+       port->pd_list = devm_kcalloc(port->dev, port->pd_count, sizeof(struct pd_data *),
+                                    GFP_KERNEL);
+       if (!port->pd_list) {
+               ret = -ENOMEM;
+               goto put_capabilities;
+       }
+
+       for (i = 0; i < port->pd_count; i++) {
+               port->pd_list[i] = devm_kzalloc(port->dev, sizeof(struct pd_data), GFP_KERNEL);
+               if (!port->pd_list[i]) {
+                       ret = -ENOMEM;
+                       goto put_capabilities;
+               }
+
+               src_pdo = port->pd_list[i]->source_desc.pdo;
+               port->pd_list[i]->source_desc.role = TYPEC_SOURCE;
+               snk_pdo = port->pd_list[i]->sink_desc.pdo;
+               port->pd_list[i]->sink_desc.role = TYPEC_SINK;
+
+               /* If "capabilities" is NULL, fall back to single pd cap population. */
+               if (!capabilities)
+                       caps = fwnode;
+               else
+                       caps = fwnode_get_next_child_node(capabilities, caps);
+
+               if (port->port_type != TYPEC_PORT_SNK) {
+                       ret = fwnode_property_count_u32(caps, "source-pdos");
+                       if (ret == 0) {
+                               ret = -EINVAL;
+                               goto put_caps;
+                       }
+                       if (ret < 0)
+                               goto put_caps;
+
+                       nr_src_pdo = min(ret, PDO_MAX_OBJECTS);
+                       ret = fwnode_property_read_u32_array(caps, "source-pdos", src_pdo,
+                                                            nr_src_pdo);
+                       if (ret)
+                               goto put_caps;
+
+                       ret = tcpm_validate_caps(port, src_pdo, nr_src_pdo);
+                       if (ret)
+                               goto put_caps;
+
+                       if (i == 0) {
+                               port->nr_src_pdo = nr_src_pdo;
+                               memcpy_and_pad(port->src_pdo, sizeof(u32) * PDO_MAX_OBJECTS,
+                                              port->pd_list[0]->source_desc.pdo,
+                                              sizeof(u32) * nr_src_pdo,
+                                              0);
+                       }
+               }
+
+               if (port->port_type != TYPEC_PORT_SRC) {
+                       ret = fwnode_property_count_u32(caps, "sink-pdos");
+                       if (ret == 0) {
+                               ret = -EINVAL;
+                               goto put_caps;
+                       }
+
+                       if (ret < 0)
+                               goto put_caps;
+
+                       nr_snk_pdo = min(ret, PDO_MAX_OBJECTS);
+                       ret = fwnode_property_read_u32_array(caps, "sink-pdos", snk_pdo,
+                                                            nr_snk_pdo);
+                       if (ret)
+                               goto put_caps;
+
+                       ret = tcpm_validate_caps(port, snk_pdo, nr_snk_pdo);
+                       if (ret)
+                               goto put_caps;
+
+                       if (fwnode_property_read_u32(caps, "op-sink-microwatt", &uw) < 0) {
+                               ret = -EINVAL;
+                               goto put_caps;
+                       }
+
+                       port->pd_list[i]->operating_snk_mw = uw / 1000;
+
+                       if (i == 0) {
+                               port->nr_snk_pdo = nr_snk_pdo;
+                               memcpy_and_pad(port->snk_pdo, sizeof(u32) * PDO_MAX_OBJECTS,
+                                              port->pd_list[0]->sink_desc.pdo,
+                                              sizeof(u32) * nr_snk_pdo,
+                                              0);
+                               port->operating_snk_mw = port->pd_list[0]->operating_snk_mw;
+                       }
+               }
+       }
+
+put_caps:
+       if (caps != fwnode)
+               fwnode_handle_put(caps);
+put_capabilities:
+       fwnode_handle_put(capabilities);
+       return ret;
+}
+
+static int tcpm_fw_get_snk_vdos(struct tcpm_port *port, struct fwnode_handle *fwnode)
+{
+       int ret;
+
        /* sink-vdos is optional */
        ret = fwnode_property_count_u32(fwnode, "sink-vdos");
        if (ret < 0)
-               ret = 0;
+               return 0;
 
        port->nr_snk_vdo = min(ret, VDO_MAX_OBJECTS);
        if (port->nr_snk_vdo) {
@@ -6582,12 +6813,14 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
        tcpm_debugfs_init(port);
 
        err = tcpm_fw_get_caps(port, tcpc->fwnode);
+       if (err < 0)
+               goto out_destroy_wq;
+       err = tcpm_fw_get_snk_vdos(port, tcpc->fwnode);
        if (err < 0)
                goto out_destroy_wq;
 
        port->try_role = port->typec_caps.prefer_role;
 
-       port->typec_caps.fwnode = tcpc->fwnode;
        port->typec_caps.revision = 0x0120;     /* Type-C spec release 1.2 */
        port->typec_caps.pd_revision = 0x0300;  /* USB-PD spec release 3.0 */
        port->typec_caps.svdm_version = SVDM_VER_2_0;
@@ -6596,7 +6829,6 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
        port->typec_caps.orientation_aware = 1;
 
        port->partner_desc.identity = &port->partner_ident;
-       port->port_type = port->typec_caps.type;
 
        port->role_sw = usb_role_switch_get(port->dev);
        if (!port->role_sw)
@@ -6615,7 +6847,8 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
        if (err)
                goto out_role_sw_put;
 
-       port->typec_caps.pd = port->pd;
+       if (port->pds)
+               port->typec_caps.pd = port->pds[0];
 
        port->typec_port = typec_register_port(port->dev, &port->typec_caps);
        if (IS_ERR(port->typec_port)) {
index 196535ad996d080ff50340472e32e00e2445b257..0717cfcd9f8cad4aa838f05237f1cbb794dcaf1d 100644 (file)
@@ -8,6 +8,7 @@
 
 #include <linux/i2c.h>
 #include <linux/acpi.h>
+#include <linux/gpio/consumer.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/power_supply.h>
@@ -64,6 +65,9 @@
 #define TPS_PBMC_RC    0 /* Return code */
 #define TPS_PBMC_DPCS  2 /* device patch complete status */
 
+/* reset de-assertion to ready for operation */
+#define TPS_SETUP_MS                   1000
+
 enum {
        TPS_PORTINFO_SINK,
        TPS_PORTINFO_SINK_ACCESSORY,
@@ -111,6 +115,8 @@ struct tipd_data {
        void (*trace_power_status)(u16 status);
        void (*trace_status)(u32 status);
        int (*apply_patch)(struct tps6598x *tps);
+       int (*init)(struct tps6598x *tps);
+       int (*reset)(struct tps6598x *tps);
 };
 
 struct tps6598x {
@@ -119,6 +125,7 @@ struct tps6598x {
        struct mutex lock; /* device lock */
        u8 i2c_protocol:1;
 
+       struct gpio_desc *reset;
        struct typec_port *port;
        struct typec_partner *partner;
        struct usb_pd_identity partner_identity;
@@ -323,7 +330,7 @@ static void tps6598x_disconnect(struct tps6598x *tps, u32 status)
 }
 
 static int tps6598x_exec_cmd_tmo(struct tps6598x *tps, const char *cmd,
-                            size_t in_len, u8 *in_data,
+                            size_t in_len, const u8 *in_data,
                             size_t out_len, u8 *out_data,
                             u32 cmd_timeout_ms, u32 res_delay_ms)
 {
@@ -389,7 +396,7 @@ static int tps6598x_exec_cmd_tmo(struct tps6598x *tps, const char *cmd,
 }
 
 static int tps6598x_exec_cmd(struct tps6598x *tps, const char *cmd,
-                            size_t in_len, u8 *in_data,
+                            size_t in_len, const u8 *in_data,
                             size_t out_len, u8 *out_data)
 {
        return tps6598x_exec_cmd_tmo(tps, cmd, in_len, in_data,
@@ -866,6 +873,30 @@ tps6598x_register_port(struct tps6598x *tps, struct fwnode_handle *fwnode)
        return 0;
 }
 
+static int tps_request_firmware(struct tps6598x *tps, const struct firmware **fw)
+{
+       const char *firmware_name;
+       int ret;
+
+       ret = device_property_read_string(tps->dev, "firmware-name",
+                                         &firmware_name);
+       if (ret)
+               return ret;
+
+       ret = request_firmware(fw, firmware_name, tps->dev);
+       if (ret) {
+               dev_err(tps->dev, "failed to retrieve \"%s\"\n", firmware_name);
+               return ret;
+       }
+
+       if ((*fw)->size == 0) {
+               release_firmware(*fw);
+               ret = -EINVAL;
+       }
+
+       return ret;
+}
+
 static int
 tps25750_write_firmware(struct tps6598x *tps,
                        u8 bpms_addr, const u8 *data, size_t len)
@@ -954,16 +985,9 @@ static int tps25750_start_patch_burst_mode(struct tps6598x *tps)
        if (ret)
                return ret;
 
-       ret = request_firmware(&fw, firmware_name, tps->dev);
-       if (ret) {
-               dev_err(tps->dev, "failed to retrieve \"%s\"\n", firmware_name);
+       ret = tps_request_firmware(tps, &fw);
+       if (ret)
                return ret;
-       }
-
-       if (fw->size == 0) {
-               ret = -EINVAL;
-               goto release_fw;
-       }
 
        ret = of_property_match_string(np, "reg-names", "patch-address");
        if (ret < 0) {
@@ -1101,6 +1125,76 @@ wait_for_app:
        return 0;
 };
 
+static int tps6598x_apply_patch(struct tps6598x *tps)
+{
+       u8 in = TPS_PTCS_CONTENT_DEV | TPS_PTCS_CONTENT_APP;
+       u8 out[TPS_MAX_LEN] = {0};
+       size_t in_len = sizeof(in);
+       size_t copied_bytes = 0;
+       size_t bytes_left;
+       const struct firmware *fw;
+       const char *firmware_name;
+       int ret;
+
+       ret = device_property_read_string(tps->dev, "firmware-name",
+                                         &firmware_name);
+       if (ret)
+               return ret;
+
+       ret = tps_request_firmware(tps, &fw);
+       if (ret)
+               return ret;
+
+       ret = tps6598x_exec_cmd(tps, "PTCs", in_len, &in,
+                               TPS_PTCS_OUT_BYTES, out);
+       if (ret || out[TPS_PTCS_STATUS] == TPS_PTCS_STATUS_FAIL) {
+               if (!ret)
+                       ret = -EBUSY;
+               dev_err(tps->dev, "Update start failed (%d)\n", ret);
+               goto release_fw;
+       }
+
+       bytes_left = fw->size;
+       while (bytes_left) {
+               if (bytes_left < TPS_MAX_LEN)
+                       in_len = bytes_left;
+               else
+                       in_len = TPS_MAX_LEN;
+               ret = tps6598x_exec_cmd(tps, "PTCd", in_len,
+                                       fw->data + copied_bytes,
+                                       TPS_PTCD_OUT_BYTES, out);
+               if (ret || out[TPS_PTCD_TRANSFER_STATUS] ||
+                   out[TPS_PTCD_LOADING_STATE] == TPS_PTCD_LOAD_ERR) {
+                       if (!ret)
+                               ret = -EBUSY;
+                       dev_err(tps->dev, "Patch download failed (%d)\n", ret);
+                       goto release_fw;
+               }
+               copied_bytes += in_len;
+               bytes_left -= in_len;
+       }
+
+       ret = tps6598x_exec_cmd(tps, "PTCc", 0, NULL, TPS_PTCC_OUT_BYTES, out);
+       if (ret || out[TPS_PTCC_DEV] || out[TPS_PTCC_APP]) {
+               if (!ret)
+                       ret = -EBUSY;
+               dev_err(tps->dev, "Update completion failed (%d)\n", ret);
+               goto release_fw;
+       }
+       msleep(TPS_SETUP_MS);
+       dev_info(tps->dev, "Firmware update succeeded\n");
+
+release_fw:
+       release_firmware(fw);
+
+       return ret;
+};
+
+static int cd321x_init(struct tps6598x *tps)
+{
+       return 0;
+}
+
 static int tps25750_init(struct tps6598x *tps)
 {
        int ret;
@@ -1119,6 +1213,26 @@ static int tps25750_init(struct tps6598x *tps)
        return 0;
 }
 
+static int tps6598x_init(struct tps6598x *tps)
+{
+       return tps->data->apply_patch(tps);
+}
+
+static int cd321x_reset(struct tps6598x *tps)
+{
+       return 0;
+}
+
+static int tps25750_reset(struct tps6598x *tps)
+{
+       return tps6598x_exec_cmd_tmo(tps, "GAID", 0, NULL, 0, NULL, 2000, 0);
+}
+
+static int tps6598x_reset(struct tps6598x *tps)
+{
+       return 0;
+}
+
 static int
 tps25750_register_port(struct tps6598x *tps, struct fwnode_handle *fwnode)
 {
@@ -1182,7 +1296,6 @@ static int tps6598x_probe(struct i2c_client *client)
        u32 vid;
        int ret;
        u64 mask1;
-       bool is_tps25750;
 
        tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
        if (!tps)
@@ -1191,12 +1304,18 @@ static int tps6598x_probe(struct i2c_client *client)
        mutex_init(&tps->lock);
        tps->dev = &client->dev;
 
+       tps->reset = devm_gpiod_get_optional(tps->dev, "reset", GPIOD_OUT_LOW);
+       if (IS_ERR(tps->reset))
+               return dev_err_probe(tps->dev, PTR_ERR(tps->reset),
+                                    "failed to get reset GPIO\n");
+       if (tps->reset)
+               msleep(TPS_SETUP_MS);
+
        tps->regmap = devm_regmap_init_i2c(client, &tps6598x_regmap_config);
        if (IS_ERR(tps->regmap))
                return PTR_ERR(tps->regmap);
 
-       is_tps25750 = device_is_compatible(tps->dev, "ti,tps25750");
-       if (!is_tps25750) {
+       if (!device_is_compatible(tps->dev, "ti,tps25750")) {
                ret = tps6598x_read32(tps, TPS_REG_VID, &vid);
                if (ret < 0 || !vid)
                        return -ENODEV;
@@ -1239,8 +1358,8 @@ static int tps6598x_probe(struct i2c_client *client)
        if (ret < 0)
                return ret;
 
-       if (is_tps25750 && ret == TPS_MODE_PTCH) {
-               ret = tps25750_init(tps);
+       if (ret == TPS_MODE_PTCH) {
+               ret = tps->data->init(tps);
                if (ret)
                        return ret;
        }
@@ -1328,8 +1447,8 @@ err_clear_mask:
        tps6598x_write64(tps, TPS_REG_INT_MASK1, 0);
 err_reset_controller:
        /* Reset PD controller to remove any applied patch */
-       if (is_tps25750)
-               tps6598x_exec_cmd_tmo(tps, "GAID", 0, NULL, 0, NULL, 2000, 0);
+       tps->data->reset(tps);
+
        return ret;
 }
 
@@ -1346,8 +1465,10 @@ static void tps6598x_remove(struct i2c_client *client)
        usb_role_switch_put(tps->role_sw);
 
        /* Reset PD controller to remove any applied patch */
-       if (device_is_compatible(tps->dev, "ti,tps25750"))
-               tps6598x_exec_cmd_tmo(tps, "GAID", 0, NULL, 0, NULL, 2000, 0);
+       tps->data->reset(tps);
+
+       if (tps->reset)
+               gpiod_set_value_cansleep(tps->reset, 1);
 }
 
 static int __maybe_unused tps6598x_suspend(struct device *dev)
@@ -1358,6 +1479,8 @@ static int __maybe_unused tps6598x_suspend(struct device *dev)
        if (tps->wakeup) {
                disable_irq(client->irq);
                enable_irq_wake(client->irq);
+       } else if (tps->reset) {
+               gpiod_set_value_cansleep(tps->reset, 1);
        }
 
        if (!client->irq)
@@ -1376,8 +1499,8 @@ static int __maybe_unused tps6598x_resume(struct device *dev)
        if (ret < 0)
                return ret;
 
-       if (device_is_compatible(tps->dev, "ti,tps25750") && ret == TPS_MODE_PTCH) {
-               ret = tps25750_init(tps);
+       if (ret == TPS_MODE_PTCH) {
+               ret = tps->data->init(tps);
                if (ret)
                        return ret;
        }
@@ -1385,6 +1508,9 @@ static int __maybe_unused tps6598x_resume(struct device *dev)
        if (tps->wakeup) {
                disable_irq_wake(client->irq);
                enable_irq(client->irq);
+       } else if (tps->reset) {
+               gpiod_set_value_cansleep(tps->reset, 0);
+               msleep(TPS_SETUP_MS);
        }
 
        if (!client->irq)
@@ -1403,6 +1529,8 @@ static const struct tipd_data cd321x_data = {
        .register_port = tps6598x_register_port,
        .trace_power_status = trace_tps6598x_power_status,
        .trace_status = trace_tps6598x_status,
+       .init = cd321x_init,
+       .reset = cd321x_reset,
 };
 
 static const struct tipd_data tps6598x_data = {
@@ -1410,6 +1538,9 @@ static const struct tipd_data tps6598x_data = {
        .register_port = tps6598x_register_port,
        .trace_power_status = trace_tps6598x_power_status,
        .trace_status = trace_tps6598x_status,
+       .apply_patch = tps6598x_apply_patch,
+       .init = tps6598x_init,
+       .reset = tps6598x_reset,
 };
 
 static const struct tipd_data tps25750_data = {
@@ -1418,6 +1549,8 @@ static const struct tipd_data tps25750_data = {
        .trace_power_status = trace_tps25750_power_status,
        .trace_status = trace_tps25750_status,
        .apply_patch = tps25750_apply_patch,
+       .init = tps25750_init,
+       .reset = tps25750_reset,
 };
 
 static const struct of_device_id tps6598x_of_match[] = {
index 01609bf509e4be7e393ac7eddf651c05ec0cc23d..89b24519463a19bb608b23e620242479dacea5f0 100644 (file)
 /* SLEEP CONF REG */
 #define TPS_SLEEP_CONF_SLEEP_MODE_ALLOWED      BIT(0)
 
+/* Start Patch Download Sequence */
+#define TPS_PTCS_CONTENT_APP                   BIT(0)
+#define TPS_PTCS_CONTENT_DEV                   BIT(1)
+#define TPS_PTCS_OUT_BYTES                     4
+#define TPS_PTCS_STATUS                                1
+
+#define TPS_PTCS_STATUS_FAIL                   0x80
+/* Patch Download */
+#define TPS_PTCD_OUT_BYTES                     10
+#define TPS_PTCD_TRANSFER_STATUS               1
+#define TPS_PTCD_LOADING_STATE                 2
+
+#define TPS_PTCD_LOAD_ERR                      0x09
+/* Patch Download Complete */
+#define TPS_PTCC_OUT_BYTES                     4
+#define TPS_PTCC_DEV                           2
+#define TPS_PTCC_APP                           3
+
 #endif /* __TPS6598X_H__ */
index 5392ec6989592041f87b96a7af1479621be799a0..14f5a7bfae2e92873e405b369ca8ce5620d856c0 100644 (file)
@@ -938,7 +938,9 @@ static void ucsi_handle_connector_change(struct work_struct *work)
 
        clear_bit(EVENT_PENDING, &con->ucsi->flags);
 
+       mutex_lock(&ucsi->ppm_lock);
        ret = ucsi_acknowledge_connector_change(ucsi);
+       mutex_unlock(&ucsi->ppm_lock);
        if (ret)
                dev_err(ucsi->dev, "%s: ACK failed (%d)", __func__, ret);
 
index 6bbf490ac4010e9ad31a140bd484ec40077f0af6..928eacbeb21ac4cc5b8857644969bff7aba7a8a1 100644 (file)
@@ -25,6 +25,8 @@ struct ucsi_acpi {
        unsigned long flags;
        guid_t guid;
        u64 cmd;
+       bool dell_quirk_probed;
+       bool dell_quirk_active;
 };
 
 static int ucsi_acpi_dsm(struct ucsi_acpi *ua, int func)
@@ -73,9 +75,13 @@ static int ucsi_acpi_sync_write(struct ucsi *ucsi, unsigned int offset,
                                const void *val, size_t val_len)
 {
        struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
+       bool ack = UCSI_COMMAND(*(u64 *)val) == UCSI_ACK_CC_CI;
        int ret;
 
-       set_bit(COMMAND_PENDING, &ua->flags);
+       if (ack)
+               set_bit(ACK_PENDING, &ua->flags);
+       else
+               set_bit(COMMAND_PENDING, &ua->flags);
 
        ret = ucsi_acpi_async_write(ucsi, offset, val, val_len);
        if (ret)
@@ -85,7 +91,10 @@ static int ucsi_acpi_sync_write(struct ucsi *ucsi, unsigned int offset,
                ret = -ETIMEDOUT;
 
 out_clear_bit:
-       clear_bit(COMMAND_PENDING, &ua->flags);
+       if (ack)
+               clear_bit(ACK_PENDING, &ua->flags);
+       else
+               clear_bit(COMMAND_PENDING, &ua->flags);
 
        return ret;
 }
@@ -119,12 +128,73 @@ static const struct ucsi_operations ucsi_zenbook_ops = {
        .async_write = ucsi_acpi_async_write
 };
 
-static const struct dmi_system_id zenbook_dmi_id[] = {
+/*
+ * Some Dell laptops expect that an ACK command with the
+ * UCSI_ACK_CONNECTOR_CHANGE bit set is followed by a (separate)
+ * ACK command that only has the UCSI_ACK_COMMAND_COMPLETE bit set.
+ * If this is not done events are not delivered to OSPM and
+ * subsequent commands will timeout.
+ */
+static int
+ucsi_dell_sync_write(struct ucsi *ucsi, unsigned int offset,
+                    const void *val, size_t val_len)
+{
+       struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
+       u64 cmd = *(u64 *)val, ack = 0;
+       int ret;
+
+       if (UCSI_COMMAND(cmd) == UCSI_ACK_CC_CI &&
+           cmd & UCSI_ACK_CONNECTOR_CHANGE)
+               ack = UCSI_ACK_CC_CI | UCSI_ACK_COMMAND_COMPLETE;
+
+       ret = ucsi_acpi_sync_write(ucsi, offset, val, val_len);
+       if (ret != 0)
+               return ret;
+       if (ack == 0)
+               return ret;
+
+       if (!ua->dell_quirk_probed) {
+               ua->dell_quirk_probed = true;
+
+               cmd = UCSI_GET_CAPABILITY;
+               ret = ucsi_acpi_sync_write(ucsi, UCSI_CONTROL, &cmd,
+                                          sizeof(cmd));
+               if (ret == 0)
+                       return ucsi_acpi_sync_write(ucsi, UCSI_CONTROL,
+                                                   &ack, sizeof(ack));
+               if (ret != -ETIMEDOUT)
+                       return ret;
+
+               ua->dell_quirk_active = true;
+               dev_err(ua->dev, "Firmware bug: Additional ACK required after ACKing a connector change.\n");
+               dev_err(ua->dev, "Firmware bug: Enabling workaround\n");
+       }
+
+       if (!ua->dell_quirk_active)
+               return ret;
+
+       return ucsi_acpi_sync_write(ucsi, UCSI_CONTROL, &ack, sizeof(ack));
+}
+
+static const struct ucsi_operations ucsi_dell_ops = {
+       .read = ucsi_acpi_read,
+       .sync_write = ucsi_dell_sync_write,
+       .async_write = ucsi_acpi_async_write
+};
+
+static const struct dmi_system_id ucsi_acpi_quirks[] = {
        {
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
                        DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UA_UM325UA"),
                },
+               .driver_data = (void *)&ucsi_zenbook_ops,
+       },
+       {
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+               },
+               .driver_data = (void *)&ucsi_dell_ops,
        },
        { }
 };
@@ -142,8 +212,10 @@ static void ucsi_acpi_notify(acpi_handle handle, u32 event, void *data)
        if (UCSI_CCI_CONNECTOR(cci))
                ucsi_connector_change(ua->ucsi, UCSI_CCI_CONNECTOR(cci));
 
-       if (test_bit(COMMAND_PENDING, &ua->flags) &&
-           cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE))
+       if (cci & UCSI_CCI_ACK_COMPLETE && test_bit(ACK_PENDING, &ua->flags))
+               complete(&ua->complete);
+       if (cci & UCSI_CCI_COMMAND_COMPLETE &&
+           test_bit(COMMAND_PENDING, &ua->flags))
                complete(&ua->complete);
 }
 
@@ -151,6 +223,7 @@ static int ucsi_acpi_probe(struct platform_device *pdev)
 {
        struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
        const struct ucsi_operations *ops = &ucsi_acpi_ops;
+       const struct dmi_system_id *id;
        struct ucsi_acpi *ua;
        struct resource *res;
        acpi_status status;
@@ -180,8 +253,9 @@ static int ucsi_acpi_probe(struct platform_device *pdev)
        init_completion(&ua->complete);
        ua->dev = &pdev->dev;
 
-       if (dmi_check_system(zenbook_dmi_id))
-               ops = &ucsi_zenbook_ops;
+       id = dmi_first_match(ucsi_acpi_quirks);
+       if (id)
+               ops = id->driver_data;
 
        ua->ucsi = ucsi_create(&pdev->dev, ops);
        if (IS_ERR(ua->ucsi))
index 0a6624d37929e85f1771d16df64a72ce92a2c623..79110a69d697c1e7fe511739b564659a46f24815 100644 (file)
@@ -377,14 +377,14 @@ static int __init usbip_host_init(void)
                goto err_usb_register;
        }
 
-       ret = driver_create_file(&stub_driver.drvwrap.driver,
+       ret = driver_create_file(&stub_driver.driver,
                                 &driver_attr_match_busid);
        if (ret) {
                pr_err("driver_create_file failed\n");
                goto err_create_file;
        }
 
-       ret = driver_create_file(&stub_driver.drvwrap.driver,
+       ret = driver_create_file(&stub_driver.driver,
                                 &driver_attr_rebind);
        if (ret) {
                pr_err("driver_create_file failed\n");
@@ -402,10 +402,10 @@ err_usb_register:
 
 static void __exit usbip_host_exit(void)
 {
-       driver_remove_file(&stub_driver.drvwrap.driver,
+       driver_remove_file(&stub_driver.driver,
                           &driver_attr_match_busid);
 
-       driver_remove_file(&stub_driver.drvwrap.driver,
+       driver_remove_file(&stub_driver.driver,
                           &driver_attr_rebind);
 
        /*
index 1bd4bc005829bdb71f85861d7ef57d27bf709660..faf61c9c6a9894269f3b171467e7bad238406248 100644 (file)
@@ -173,6 +173,6 @@ struct vudc_device *alloc_vudc_device(int devid);
 void put_vudc_device(struct vudc_device *udc_dev);
 
 int vudc_probe(struct platform_device *pdev);
-int vudc_remove(struct platform_device *pdev);
+void vudc_remove(struct platform_device *pdev);
 
 #endif /* __USBIP_VUDC_H */
index 44b04c54c08677155b747b28f272e91ec21d0f21..f11535020e35a3beb67f7403bccc4e0295525ed7 100644 (file)
@@ -628,12 +628,11 @@ out:
        return ret;
 }
 
-int vudc_remove(struct platform_device *pdev)
+void vudc_remove(struct platform_device *pdev)
 {
        struct vudc *udc = platform_get_drvdata(pdev);
 
        usb_del_gadget_udc(&udc->gadget);
        cleanup_vudc_hw(udc);
        kfree(udc);
-       return 0;
 }
index 993e721cb840df29c209bbad2bfc35fea434b4db..8bee553e48945f66d6c81bd0c1e0d2ca22f798ae 100644 (file)
@@ -19,7 +19,7 @@ MODULE_PARM_DESC(num, "number of emulated controllers");
 
 static struct platform_driver vudc_driver = {
        .probe          = vudc_probe,
-       .remove         = vudc_remove,
+       .remove_new     = vudc_remove,
        .driver         = {
                .name   = GADGET_NAME,
                .dev_groups = vudc_groups,
index 5a09a09cca709034503250c1ff64beb26fa35b3d..cce3d1837104c34f0bf331db33d5a81f64c0a6e7 100644 (file)
@@ -497,7 +497,7 @@ static int eni_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (!eni_vdpa->vring) {
                ret = -ENOMEM;
                ENI_ERR(pdev, "failed to allocate virtqueues\n");
-               goto err;
+               goto err_remove_vp_legacy;
        }
 
        for (i = 0; i < eni_vdpa->queues; i++) {
@@ -509,11 +509,13 @@ static int eni_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        ret = vdpa_register_device(&eni_vdpa->vdpa, eni_vdpa->queues);
        if (ret) {
                ENI_ERR(pdev, "failed to register to vdpa bus\n");
-               goto err;
+               goto err_remove_vp_legacy;
        }
 
        return 0;
 
+err_remove_vp_legacy:
+       vp_legacy_remove(&eni_vdpa->ldev);
 err:
        put_device(&eni_vdpa->vdpa.dev);
        return ret;
index 84547d998bcf3b1a0305e489ff9cbf5b65c276eb..50aac8fe57ef57184c31e6080b33c3b543a635e4 100644 (file)
@@ -35,6 +35,9 @@ struct mlx5_vdpa_mr {
        struct vhost_iotlb *iotlb;
 
        bool user_mr;
+
+       refcount_t refcount;
+       struct list_head mr_list;
 };
 
 struct mlx5_vdpa_resources {
@@ -93,6 +96,7 @@ struct mlx5_vdpa_dev {
        u32 generation;
 
        struct mlx5_vdpa_mr *mr[MLX5_VDPA_NUM_AS];
+       struct list_head mr_list_head;
        /* serialize mr access */
        struct mutex mr_mtx;
        struct mlx5_control_vq cvq;
@@ -118,8 +122,10 @@ int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, u32 mkey);
 struct mlx5_vdpa_mr *mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
                                         struct vhost_iotlb *iotlb);
 void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev);
-void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev,
-                         struct mlx5_vdpa_mr *mr);
+void mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev,
+                     struct mlx5_vdpa_mr *mr);
+void mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev,
+                     struct mlx5_vdpa_mr *mr);
 void mlx5_vdpa_update_mr(struct mlx5_vdpa_dev *mvdev,
                         struct mlx5_vdpa_mr *mr,
                         unsigned int asid);
index 2197c46e563a1f13414588260e3fda3195f1052c..4758914ccf860838bb32c0bd3ae23d7280ce9ea5 100644 (file)
@@ -498,32 +498,54 @@ static void destroy_user_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr
 
 static void _mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
 {
+       if (WARN_ON(!mr))
+               return;
+
        if (mr->user_mr)
                destroy_user_mr(mvdev, mr);
        else
                destroy_dma_mr(mvdev, mr);
 
        vhost_iotlb_free(mr->iotlb);
+
+       list_del(&mr->mr_list);
+
+       kfree(mr);
 }
 
-void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev,
-                         struct mlx5_vdpa_mr *mr)
+static void _mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev,
+                             struct mlx5_vdpa_mr *mr)
 {
        if (!mr)
                return;
 
+       if (refcount_dec_and_test(&mr->refcount))
+               _mlx5_vdpa_destroy_mr(mvdev, mr);
+}
+
+void mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev,
+                     struct mlx5_vdpa_mr *mr)
+{
        mutex_lock(&mvdev->mr_mtx);
+       _mlx5_vdpa_put_mr(mvdev, mr);
+       mutex_unlock(&mvdev->mr_mtx);
+}
 
-       _mlx5_vdpa_destroy_mr(mvdev, mr);
+static void _mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev,
+                             struct mlx5_vdpa_mr *mr)
+{
+       if (!mr)
+               return;
 
-       for (int i = 0; i < MLX5_VDPA_NUM_AS; i++) {
-               if (mvdev->mr[i] == mr)
-                       mvdev->mr[i] = NULL;
-       }
+       refcount_inc(&mr->refcount);
+}
 
+void mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev,
+                     struct mlx5_vdpa_mr *mr)
+{
+       mutex_lock(&mvdev->mr_mtx);
+       _mlx5_vdpa_get_mr(mvdev, mr);
        mutex_unlock(&mvdev->mr_mtx);
-
-       kfree(mr);
 }
 
 void mlx5_vdpa_update_mr(struct mlx5_vdpa_dev *mvdev,
@@ -534,10 +556,23 @@ void mlx5_vdpa_update_mr(struct mlx5_vdpa_dev *mvdev,
 
        mutex_lock(&mvdev->mr_mtx);
 
+       _mlx5_vdpa_put_mr(mvdev, old_mr);
        mvdev->mr[asid] = new_mr;
-       if (old_mr) {
-               _mlx5_vdpa_destroy_mr(mvdev, old_mr);
-               kfree(old_mr);
+
+       mutex_unlock(&mvdev->mr_mtx);
+}
+
+static void mlx5_vdpa_show_mr_leaks(struct mlx5_vdpa_dev *mvdev)
+{
+       struct mlx5_vdpa_mr *mr;
+
+       mutex_lock(&mvdev->mr_mtx);
+
+       list_for_each_entry(mr, &mvdev->mr_list_head, mr_list) {
+
+               mlx5_vdpa_warn(mvdev, "mkey still alive after resource delete: "
+                                     "mr: %p, mkey: 0x%x, refcount: %u\n",
+                                      mr, mr->mkey, refcount_read(&mr->refcount));
        }
 
        mutex_unlock(&mvdev->mr_mtx);
@@ -547,9 +582,11 @@ void mlx5_vdpa_update_mr(struct mlx5_vdpa_dev *mvdev,
 void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev)
 {
        for (int i = 0; i < MLX5_VDPA_NUM_AS; i++)
-               mlx5_vdpa_destroy_mr(mvdev, mvdev->mr[i]);
+               mlx5_vdpa_update_mr(mvdev, NULL, i);
 
        prune_iotlb(mvdev->cvq.iotlb);
+
+       mlx5_vdpa_show_mr_leaks(mvdev);
 }
 
 static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
@@ -576,6 +613,8 @@ static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
        if (err)
                goto err_iotlb;
 
+       list_add_tail(&mr->mr_list, &mvdev->mr_list_head);
+
        return 0;
 
 err_iotlb:
@@ -607,6 +646,8 @@ struct mlx5_vdpa_mr *mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
        if (err)
                goto out_err;
 
+       refcount_set(&mr->refcount, 1);
+
        return mr;
 
 out_err:
@@ -651,7 +692,7 @@ int mlx5_vdpa_reset_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
        if (asid >= MLX5_VDPA_NUM_AS)
                return -EINVAL;
 
-       mlx5_vdpa_destroy_mr(mvdev, mvdev->mr[asid]);
+       mlx5_vdpa_update_mr(mvdev, NULL, asid);
 
        if (asid == 0 && MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
                if (mlx5_vdpa_create_dma_mr(mvdev))
index 26ba7da6b410621ea72e65d4bb90bd192e06dbda..778821bab7d93e3393440656306e60cfae52d096 100644 (file)
@@ -120,6 +120,12 @@ struct mlx5_vdpa_virtqueue {
        u16 avail_idx;
        u16 used_idx;
        int fw_state;
+
+       u64 modified_fields;
+
+       struct mlx5_vdpa_mr *vq_mr;
+       struct mlx5_vdpa_mr *desc_mr;
+
        struct msi_map map;
 
        /* keep last in the struct */
@@ -943,6 +949,14 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
        kfree(in);
        mvq->virtq_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
 
+       mlx5_vdpa_get_mr(mvdev, vq_mr);
+       mvq->vq_mr = vq_mr;
+
+       if (vq_desc_mr && MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported)) {
+               mlx5_vdpa_get_mr(mvdev, vq_desc_mr);
+               mvq->desc_mr = vq_desc_mr;
+       }
+
        return 0;
 
 err_cmd:
@@ -969,6 +983,12 @@ static void destroy_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtq
        }
        mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_NONE;
        umems_destroy(ndev, mvq);
+
+       mlx5_vdpa_put_mr(&ndev->mvdev, mvq->vq_mr);
+       mvq->vq_mr = NULL;
+
+       mlx5_vdpa_put_mr(&ndev->mvdev, mvq->desc_mr);
+       mvq->desc_mr = NULL;
 }
 
 static u32 get_rqpn(struct mlx5_vdpa_virtqueue *mvq, bool fw)
@@ -1167,7 +1187,12 @@ err_cmd:
        return err;
 }
 
-static bool is_valid_state_change(int oldstate, int newstate)
+static bool is_resumable(struct mlx5_vdpa_net *ndev)
+{
+       return ndev->mvdev.vdev.config->resume;
+}
+
+static bool is_valid_state_change(int oldstate, int newstate, bool resumable)
 {
        switch (oldstate) {
        case MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT:
@@ -1175,25 +1200,43 @@ static bool is_valid_state_change(int oldstate, int newstate)
        case MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY:
                return newstate == MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND;
        case MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND:
+               return resumable ? newstate == MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY : false;
        case MLX5_VIRTIO_NET_Q_OBJECT_STATE_ERR:
        default:
                return false;
        }
 }
 
-static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int state)
+static bool modifiable_virtqueue_fields(struct mlx5_vdpa_virtqueue *mvq)
+{
+       /* Only state is always modifiable */
+       if (mvq->modified_fields & ~MLX5_VIRTQ_MODIFY_MASK_STATE)
+               return mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT ||
+                      mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND;
+
+       return true;
+}
+
+static int modify_virtqueue(struct mlx5_vdpa_net *ndev,
+                           struct mlx5_vdpa_virtqueue *mvq,
+                           int state)
 {
        int inlen = MLX5_ST_SZ_BYTES(modify_virtio_net_q_in);
        u32 out[MLX5_ST_SZ_DW(modify_virtio_net_q_out)] = {};
+       struct mlx5_vdpa_dev *mvdev = &ndev->mvdev;
+       struct mlx5_vdpa_mr *desc_mr = NULL;
+       struct mlx5_vdpa_mr *vq_mr = NULL;
+       bool state_change = false;
        void *obj_context;
        void *cmd_hdr;
+       void *vq_ctx;
        void *in;
        int err;
 
        if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_NONE)
                return 0;
 
-       if (!is_valid_state_change(mvq->fw_state, state))
+       if (!modifiable_virtqueue_fields(mvq))
                return -EINVAL;
 
        in = kzalloc(inlen, GFP_KERNEL);
@@ -1208,17 +1251,83 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
        MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
 
        obj_context = MLX5_ADDR_OF(modify_virtio_net_q_in, in, obj_context);
-       MLX5_SET64(virtio_net_q_object, obj_context, modify_field_select,
-                  MLX5_VIRTQ_MODIFY_MASK_STATE);
-       MLX5_SET(virtio_net_q_object, obj_context, state, state);
+       vq_ctx = MLX5_ADDR_OF(virtio_net_q_object, obj_context, virtio_q_context);
+
+       if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_STATE) {
+               if (!is_valid_state_change(mvq->fw_state, state, is_resumable(ndev))) {
+                       err = -EINVAL;
+                       goto done;
+               }
+
+               MLX5_SET(virtio_net_q_object, obj_context, state, state);
+               state_change = true;
+       }
+
+       if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_ADDRS) {
+               MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr);
+               MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr);
+               MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr);
+       }
+
+       if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_AVAIL_IDX)
+               MLX5_SET(virtio_net_q_object, obj_context, hw_available_index, mvq->avail_idx);
+
+       if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_USED_IDX)
+               MLX5_SET(virtio_net_q_object, obj_context, hw_used_index, mvq->used_idx);
+
+       if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY) {
+               vq_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]];
+
+               if (vq_mr)
+                       MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, vq_mr->mkey);
+               else
+                       mvq->modified_fields &= ~MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY;
+       }
+
+       if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY) {
+               desc_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]];
+
+               if (desc_mr && MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported))
+                       MLX5_SET(virtio_q, vq_ctx, desc_group_mkey, desc_mr->mkey);
+               else
+                       mvq->modified_fields &= ~MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY;
+       }
+
+       MLX5_SET64(virtio_net_q_object, obj_context, modify_field_select, mvq->modified_fields);
        err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
-       kfree(in);
-       if (!err)
+       if (err)
+               goto done;
+
+       if (state_change)
                mvq->fw_state = state;
 
+       if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY) {
+               mlx5_vdpa_put_mr(mvdev, mvq->vq_mr);
+               mlx5_vdpa_get_mr(mvdev, vq_mr);
+               mvq->vq_mr = vq_mr;
+       }
+
+       if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY) {
+               mlx5_vdpa_put_mr(mvdev, mvq->desc_mr);
+               mlx5_vdpa_get_mr(mvdev, desc_mr);
+               mvq->desc_mr = desc_mr;
+       }
+
+       mvq->modified_fields = 0;
+
+done:
+       kfree(in);
        return err;
 }
 
+static int modify_virtqueue_state(struct mlx5_vdpa_net *ndev,
+                                 struct mlx5_vdpa_virtqueue *mvq,
+                                 unsigned int state)
+{
+       mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_STATE;
+       return modify_virtqueue(ndev, mvq, state);
+}
+
 static int counter_set_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
 {
        u32 in[MLX5_ST_SZ_DW(create_virtio_q_counters_in)] = {};
@@ -1347,7 +1456,7 @@ static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
                goto err_vq;
 
        if (mvq->ready) {
-               err = modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY);
+               err = modify_virtqueue_state(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY);
                if (err) {
                        mlx5_vdpa_warn(&ndev->mvdev, "failed to modify to ready vq idx %d(%d)\n",
                                       idx, err);
@@ -1382,7 +1491,7 @@ static void suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *m
        if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY)
                return;
 
-       if (modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND))
+       if (modify_virtqueue_state(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND))
                mlx5_vdpa_warn(&ndev->mvdev, "modify to suspend failed\n");
 
        if (query_virtqueue(ndev, mvq, &attr)) {
@@ -1401,12 +1510,31 @@ static void suspend_vqs(struct mlx5_vdpa_net *ndev)
                suspend_vq(ndev, &ndev->vqs[i]);
 }
 
+static void resume_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+{
+       if (!mvq->initialized || !is_resumable(ndev))
+               return;
+
+       if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND)
+               return;
+
+       if (modify_virtqueue_state(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY))
+               mlx5_vdpa_warn(&ndev->mvdev, "modify to resume failed for vq %u\n", mvq->index);
+}
+
+static void resume_vqs(struct mlx5_vdpa_net *ndev)
+{
+       for (int i = 0; i < ndev->mvdev.max_vqs; i++)
+               resume_vq(ndev, &ndev->vqs[i]);
+}
+
 static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
 {
        if (!mvq->initialized)
                return;
 
        suspend_vq(ndev, mvq);
+       mvq->modified_fields = 0;
        destroy_virtqueue(ndev, mvq);
        dealloc_vector(ndev, mvq);
        counter_set_dealloc(ndev, mvq);
@@ -2138,6 +2266,7 @@ static int mlx5_vdpa_set_vq_address(struct vdpa_device *vdev, u16 idx, u64 desc_
        mvq->desc_addr = desc_area;
        mvq->device_addr = device_area;
        mvq->driver_addr = driver_area;
+       mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_ADDRS;
        return 0;
 }
 
@@ -2207,7 +2336,7 @@ static void mlx5_vdpa_set_vq_ready(struct vdpa_device *vdev, u16 idx, bool ready
        if (!ready) {
                suspend_vq(ndev, mvq);
        } else {
-               err = modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY);
+               err = modify_virtqueue_state(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY);
                if (err) {
                        mlx5_vdpa_warn(mvdev, "modify VQ %d to ready failed (%d)\n", idx, err);
                        ready = false;
@@ -2255,6 +2384,8 @@ static int mlx5_vdpa_set_vq_state(struct vdpa_device *vdev, u16 idx,
 
        mvq->used_idx = state->split.avail_index;
        mvq->avail_idx = state->split.avail_index;
+       mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_AVAIL_IDX |
+                               MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_USED_IDX;
        return 0;
 }
 
@@ -2703,24 +2834,35 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
                                unsigned int asid)
 {
        struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+       bool teardown = !is_resumable(ndev);
        int err;
 
        suspend_vqs(ndev);
-       err = save_channels_info(ndev);
-       if (err)
-               return err;
+       if (teardown) {
+               err = save_channels_info(ndev);
+               if (err)
+                       return err;
 
-       teardown_driver(ndev);
+               teardown_driver(ndev);
+       }
 
        mlx5_vdpa_update_mr(mvdev, new_mr, asid);
 
+       for (int i = 0; i < ndev->cur_num_vqs; i++)
+               ndev->vqs[i].modified_fields |= MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY |
+                                               MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY;
+
        if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK) || mvdev->suspended)
                return 0;
 
-       restore_channels_info(ndev);
-       err = setup_driver(mvdev);
-       if (err)
-               return err;
+       if (teardown) {
+               restore_channels_info(ndev);
+               err = setup_driver(mvdev);
+               if (err)
+                       return err;
+       }
+
+       resume_vqs(ndev);
 
        return 0;
 }
@@ -2804,8 +2946,10 @@ static void clear_vqs_ready(struct mlx5_vdpa_net *ndev)
 {
        int i;
 
-       for (i = 0; i < ndev->mvdev.max_vqs; i++)
+       for (i = 0; i < ndev->mvdev.max_vqs; i++) {
                ndev->vqs[i].ready = false;
+               ndev->vqs[i].modified_fields = 0;
+       }
 
        ndev->mvdev.cvq.ready = false;
 }
@@ -2982,7 +3126,7 @@ static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
        return mlx5_vdpa_update_cvq_iotlb(mvdev, iotlb, asid);
 
 out_err:
-       mlx5_vdpa_destroy_mr(mvdev, new_mr);
+       mlx5_vdpa_put_mr(mvdev, new_mr);
        return err;
 }
 
@@ -3229,6 +3373,23 @@ static int mlx5_vdpa_suspend(struct vdpa_device *vdev)
        return 0;
 }
 
+static int mlx5_vdpa_resume(struct vdpa_device *vdev)
+{
+       struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+       struct mlx5_vdpa_net *ndev;
+
+       ndev = to_mlx5_vdpa_ndev(mvdev);
+
+       mlx5_vdpa_info(mvdev, "resuming device\n");
+
+       down_write(&ndev->reslock);
+       mvdev->suspended = false;
+       resume_vqs(ndev);
+       register_link_notifier(ndev);
+       up_write(&ndev->reslock);
+       return 0;
+}
+
 static int mlx5_set_group_asid(struct vdpa_device *vdev, u32 group,
                               unsigned int asid)
 {
@@ -3285,6 +3446,7 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = {
        .get_vq_dma_dev = mlx5_get_vq_dma_dev,
        .free = mlx5_vdpa_free,
        .suspend = mlx5_vdpa_suspend,
+       .resume = mlx5_vdpa_resume, /* Op disabled if not supported. */
 };
 
 static int query_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
@@ -3560,6 +3722,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
        if (err)
                goto err_mpfs;
 
+       INIT_LIST_HEAD(&mvdev->mr_list_head);
+
        if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
                err = mlx5_vdpa_create_dma_mr(mvdev);
                if (err)
@@ -3656,6 +3820,9 @@ static int mlx5v_probe(struct auxiliary_device *adev,
        if (!MLX5_CAP_DEV_VDPA_EMULATION(mdev, desc_group_mkey_supported))
                mgtdev->vdpa_ops.get_vq_desc_group = NULL;
 
+       if (!MLX5_CAP_DEV_VDPA_EMULATION(mdev, freeze_to_rdy_supported))
+               mgtdev->vdpa_ops.resume = NULL;
+
        err = vdpa_mgmtdev_register(&mgtdev->mgtdev);
        if (err)
                goto reg_err;
index a7612e0783b36a89a61d5482a305c668a748ddac..d0695680b282ec7812983a7bd37cd9729dfa0126 100644 (file)
@@ -131,7 +131,7 @@ static void vdpa_release_dev(struct device *d)
        if (ops->free)
                ops->free(vdev);
 
-       ida_simple_remove(&vdpa_index_ida, vdev->index);
+       ida_free(&vdpa_index_ida, vdev->index);
        kfree(vdev->driver_override);
        kfree(vdev);
 }
@@ -205,7 +205,7 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
        return vdev;
 
 err_name:
-       ida_simple_remove(&vdpa_index_ida, vdev->index);
+       ida_free(&vdpa_index_ida, vdev->index);
 err_ida:
        kfree(vdev);
 err:
index 6bda6dbb48784b7047c467388575f9e35c8e2c0b..ceae52fd7586d019778cb7d1026942bea962315d 100644 (file)
@@ -80,6 +80,16 @@ config VFIO_VIRQFD
        select EVENTFD
        default n
 
+config VFIO_DEBUGFS
+       bool "Export VFIO internals in DebugFS"
+       depends on DEBUG_FS
+       help
+         Allows exposure of VFIO device internals. This option enables
+         the use of debugfs by VFIO drivers as required. The device can
+         cause the VFIO code create a top-level debug/vfio directory
+         during initialization, and then populate a subdirectory with
+         entries as required.
+
 source "drivers/vfio/pci/Kconfig"
 source "drivers/vfio/platform/Kconfig"
 source "drivers/vfio/mdev/Kconfig"
index 68c05705200fce8fc9824a8521bbe554e5c130f7..b2fc9fb499d8690cf7d75e32bdf9bbb02efdf9f7 100644 (file)
@@ -7,6 +7,7 @@ vfio-$(CONFIG_VFIO_GROUP) += group.o
 vfio-$(CONFIG_IOMMUFD) += iommufd.o
 vfio-$(CONFIG_VFIO_CONTAINER) += container.o
 vfio-$(CONFIG_VFIO_VIRQFD) += virqfd.o
+vfio-$(CONFIG_VFIO_DEBUGFS) += debugfs.o
 
 obj-$(CONFIG_VFIO_IOMMU_TYPE1) += vfio_iommu_type1.o
 obj-$(CONFIG_VFIO_IOMMU_SPAPR_TCE) += vfio_iommu_spapr_tce.o
diff --git a/drivers/vfio/debugfs.c b/drivers/vfio/debugfs.c
new file mode 100644 (file)
index 0000000..298bd86
--- /dev/null
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, HiSilicon Ltd.
+ */
+
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/vfio.h>
+#include "vfio.h"
+
+static struct dentry *vfio_debugfs_root;
+
+static int vfio_device_state_read(struct seq_file *seq, void *data)
+{
+       struct device *vf_dev = seq->private;
+       struct vfio_device *vdev = container_of(vf_dev,
+                                               struct vfio_device, device);
+       enum vfio_device_mig_state state;
+       int ret;
+
+       BUILD_BUG_ON(VFIO_DEVICE_STATE_NR !=
+                    VFIO_DEVICE_STATE_PRE_COPY_P2P + 1);
+
+       ret = vdev->mig_ops->migration_get_state(vdev, &state);
+       if (ret)
+               return -EINVAL;
+
+       switch (state) {
+       case VFIO_DEVICE_STATE_ERROR:
+               seq_puts(seq, "ERROR\n");
+               break;
+       case VFIO_DEVICE_STATE_STOP:
+               seq_puts(seq, "STOP\n");
+               break;
+       case VFIO_DEVICE_STATE_RUNNING:
+               seq_puts(seq, "RUNNING\n");
+               break;
+       case VFIO_DEVICE_STATE_STOP_COPY:
+               seq_puts(seq, "STOP_COPY\n");
+               break;
+       case VFIO_DEVICE_STATE_RESUMING:
+               seq_puts(seq, "RESUMING\n");
+               break;
+       case VFIO_DEVICE_STATE_RUNNING_P2P:
+               seq_puts(seq, "RUNNING_P2P\n");
+               break;
+       case VFIO_DEVICE_STATE_PRE_COPY:
+               seq_puts(seq, "PRE_COPY\n");
+               break;
+       case VFIO_DEVICE_STATE_PRE_COPY_P2P:
+               seq_puts(seq, "PRE_COPY_P2P\n");
+               break;
+       default:
+               seq_puts(seq, "Invalid\n");
+       }
+
+       return 0;
+}
+
+void vfio_device_debugfs_init(struct vfio_device *vdev)
+{
+       struct device *dev = &vdev->device;
+
+       vdev->debug_root = debugfs_create_dir(dev_name(vdev->dev),
+                                             vfio_debugfs_root);
+
+       if (vdev->mig_ops) {
+               struct dentry *vfio_dev_migration = NULL;
+
+               vfio_dev_migration = debugfs_create_dir("migration",
+                                                       vdev->debug_root);
+               debugfs_create_devm_seqfile(dev, "state", vfio_dev_migration,
+                                           vfio_device_state_read);
+       }
+}
+
+void vfio_device_debugfs_exit(struct vfio_device *vdev)
+{
+       debugfs_remove_recursive(vdev->debug_root);
+}
+
+void vfio_debugfs_create_root(void)
+{
+       vfio_debugfs_root = debugfs_create_dir("vfio", NULL);
+}
+
+void vfio_debugfs_remove_root(void)
+{
+       debugfs_remove_recursive(vfio_debugfs_root);
+       vfio_debugfs_root = NULL;
+}
index 8125e5f37832c40adbf25a6868389c78639e42cc..18c397df566d8dbf4716a583bf45f33faa6c1f0a 100644 (file)
@@ -65,4 +65,6 @@ source "drivers/vfio/pci/hisilicon/Kconfig"
 
 source "drivers/vfio/pci/pds/Kconfig"
 
+source "drivers/vfio/pci/virtio/Kconfig"
+
 endmenu
index 45167be462d8f601c2da3924fd6848ef6c059cf9..046139a4eca5b58b733bfc62174ac54ec745307d 100644 (file)
@@ -13,3 +13,5 @@ obj-$(CONFIG_MLX5_VFIO_PCI)           += mlx5/
 obj-$(CONFIG_HISI_ACC_VFIO_PCI) += hisilicon/
 
 obj-$(CONFIG_PDS_VFIO_PCI) += pds/
+
+obj-$(CONFIG_VIRTIO_VFIO_PCI) += virtio/
index b2f9778c8366ea8944f1c59be51f41d69570da19..4d27465c8f1a893352bae1498303a5da7e4ebb6d 100644 (file)
@@ -694,6 +694,7 @@ static ssize_t hisi_acc_vf_resume_write(struct file *filp, const char __user *bu
                                        size_t len, loff_t *pos)
 {
        struct hisi_acc_vf_migration_file *migf = filp->private_data;
+       u8 *vf_data = (u8 *)&migf->vf_data;
        loff_t requested_length;
        ssize_t done = 0;
        int ret;
@@ -715,7 +716,7 @@ static ssize_t hisi_acc_vf_resume_write(struct file *filp, const char __user *bu
                goto out_unlock;
        }
 
-       ret = copy_from_user(&migf->vf_data, buf, len);
+       ret = copy_from_user(vf_data + *pos, buf, len);
        if (ret) {
                done = -EFAULT;
                goto out_unlock;
@@ -835,7 +836,9 @@ static ssize_t hisi_acc_vf_save_read(struct file *filp, char __user *buf, size_t
 
        len = min_t(size_t, migf->total_length - *pos, len);
        if (len) {
-               ret = copy_to_user(buf, &migf->vf_data, len);
+               u8 *vf_data = (u8 *)&migf->vf_data;
+
+               ret = copy_to_user(buf, vf_data + *pos, len);
                if (ret) {
                        done = -EFAULT;
                        goto out_unlock;
index c937aa6f39546da2d3f4443c0d35c9d984090f7f..8ddf4346fcd5d153ad24b7377edd7f412be28b47 100644 (file)
@@ -70,7 +70,7 @@ out_free_region_info:
        kfree(region_info);
 }
 
-static int pds_vfio_dirty_alloc_bitmaps(struct pds_vfio_dirty *dirty,
+static int pds_vfio_dirty_alloc_bitmaps(struct pds_vfio_region *region,
                                        unsigned long bytes)
 {
        unsigned long *host_seq_bmp, *host_ack_bmp;
@@ -85,47 +85,63 @@ static int pds_vfio_dirty_alloc_bitmaps(struct pds_vfio_dirty *dirty,
                return -ENOMEM;
        }
 
-       dirty->host_seq.bmp = host_seq_bmp;
-       dirty->host_ack.bmp = host_ack_bmp;
+       region->host_seq = host_seq_bmp;
+       region->host_ack = host_ack_bmp;
+       region->bmp_bytes = bytes;
 
        return 0;
 }
 
 static void pds_vfio_dirty_free_bitmaps(struct pds_vfio_dirty *dirty)
 {
-       vfree(dirty->host_seq.bmp);
-       vfree(dirty->host_ack.bmp);
-       dirty->host_seq.bmp = NULL;
-       dirty->host_ack.bmp = NULL;
+       if (!dirty->regions)
+               return;
+
+       for (int i = 0; i < dirty->num_regions; i++) {
+               struct pds_vfio_region *region = &dirty->regions[i];
+
+               vfree(region->host_seq);
+               vfree(region->host_ack);
+               region->host_seq = NULL;
+               region->host_ack = NULL;
+               region->bmp_bytes = 0;
+       }
 }
 
 static void __pds_vfio_dirty_free_sgl(struct pds_vfio_pci_device *pds_vfio,
-                                     struct pds_vfio_bmp_info *bmp_info)
+                                     struct pds_vfio_region *region)
 {
        struct pci_dev *pdev = pds_vfio->vfio_coredev.pdev;
        struct device *pdsc_dev = &pci_physfn(pdev)->dev;
 
-       dma_unmap_single(pdsc_dev, bmp_info->sgl_addr,
-                        bmp_info->num_sge * sizeof(struct pds_lm_sg_elem),
+       dma_unmap_single(pdsc_dev, region->sgl_addr,
+                        region->num_sge * sizeof(struct pds_lm_sg_elem),
                         DMA_BIDIRECTIONAL);
-       kfree(bmp_info->sgl);
+       kfree(region->sgl);
 
-       bmp_info->num_sge = 0;
-       bmp_info->sgl = NULL;
-       bmp_info->sgl_addr = 0;
+       region->num_sge = 0;
+       region->sgl = NULL;
+       region->sgl_addr = 0;
 }
 
 static void pds_vfio_dirty_free_sgl(struct pds_vfio_pci_device *pds_vfio)
 {
-       if (pds_vfio->dirty.host_seq.sgl)
-               __pds_vfio_dirty_free_sgl(pds_vfio, &pds_vfio->dirty.host_seq);
-       if (pds_vfio->dirty.host_ack.sgl)
-               __pds_vfio_dirty_free_sgl(pds_vfio, &pds_vfio->dirty.host_ack);
+       struct pds_vfio_dirty *dirty = &pds_vfio->dirty;
+
+       if (!dirty->regions)
+               return;
+
+       for (int i = 0; i < dirty->num_regions; i++) {
+               struct pds_vfio_region *region = &dirty->regions[i];
+
+               if (region->sgl)
+                       __pds_vfio_dirty_free_sgl(pds_vfio, region);
+       }
 }
 
-static int __pds_vfio_dirty_alloc_sgl(struct pds_vfio_pci_device *pds_vfio,
-                                     struct pds_vfio_bmp_info *bmp_info,
-                                     u32 page_count)
+static int pds_vfio_dirty_alloc_sgl(struct pds_vfio_pci_device *pds_vfio,
+                                   struct pds_vfio_region *region,
+                                   u32 page_count)
 {
        struct pci_dev *pdev = pds_vfio->vfio_coredev.pdev;
        struct device *pdsc_dev = &pci_physfn(pdev)->dev;
@@ -147,32 +163,81 @@ static int __pds_vfio_dirty_alloc_sgl(struct pds_vfio_pci_device *pds_vfio,
                return -EIO;
        }
 
-       bmp_info->sgl = sgl;
-       bmp_info->num_sge = max_sge;
-       bmp_info->sgl_addr = sgl_addr;
+       region->sgl = sgl;
+       region->num_sge = max_sge;
+       region->sgl_addr = sgl_addr;
 
        return 0;
 }
 
-static int pds_vfio_dirty_alloc_sgl(struct pds_vfio_pci_device *pds_vfio,
-                                   u32 page_count)
+static void pds_vfio_dirty_free_regions(struct pds_vfio_dirty *dirty)
 {
+       vfree(dirty->regions);
+       dirty->regions = NULL;
+       dirty->num_regions = 0;
+}
+
+static int pds_vfio_dirty_alloc_regions(struct pds_vfio_pci_device *pds_vfio,
+                                       struct pds_lm_dirty_region_info *region_info,
+                                       u64 region_page_size, u8 num_regions)
+{
+       struct pci_dev *pdev = pds_vfio->vfio_coredev.pdev;
        struct pds_vfio_dirty *dirty = &pds_vfio->dirty;
+       u32 dev_bmp_offset_byte = 0;
        int err;
 
-       err = __pds_vfio_dirty_alloc_sgl(pds_vfio, &dirty->host_seq,
-                                        page_count);
-       if (err)
-               return err;
+       dirty->regions = vcalloc(num_regions, sizeof(struct pds_vfio_region));
+       if (!dirty->regions)
+               return -ENOMEM;
+       dirty->num_regions = num_regions;
+
+       for (int i = 0; i < num_regions; i++) {
+               struct pds_lm_dirty_region_info *ri = &region_info[i];
+               struct pds_vfio_region *region = &dirty->regions[i];
+               u64 region_size, region_start;
+               u32 page_count;
+
+               /* page_count might be adjusted by the device */
+               page_count = le32_to_cpu(ri->page_count);
+               region_start = le64_to_cpu(ri->dma_base);
+               region_size = page_count * region_page_size;
+
+               err = pds_vfio_dirty_alloc_bitmaps(region,
+                                                  page_count / BITS_PER_BYTE);
+               if (err) {
+                       dev_err(&pdev->dev, "Failed to alloc dirty bitmaps: %pe\n",
+                               ERR_PTR(err));
+                       goto out_free_regions;
+               }
 
-       err = __pds_vfio_dirty_alloc_sgl(pds_vfio, &dirty->host_ack,
-                                        page_count);
-       if (err) {
-               __pds_vfio_dirty_free_sgl(pds_vfio, &dirty->host_seq);
-               return err;
+               err = pds_vfio_dirty_alloc_sgl(pds_vfio, region, page_count);
+               if (err) {
+                       dev_err(&pdev->dev, "Failed to alloc dirty sg lists: %pe\n",
+                               ERR_PTR(err));
+                       goto out_free_regions;
+               }
+
+               region->size = region_size;
+               region->start = region_start;
+               region->page_size = region_page_size;
+               region->dev_bmp_offset_start_byte = dev_bmp_offset_byte;
+
+               dev_bmp_offset_byte += page_count / BITS_PER_BYTE;
+               if (dev_bmp_offset_byte % BITS_PER_BYTE) {
+                       dev_err(&pdev->dev, "Device bitmap offset is mis-aligned\n");
+                       err = -EINVAL;
+                       goto out_free_regions;
+               }
        }
 
        return 0;
+
+out_free_regions:
+       pds_vfio_dirty_free_bitmaps(dirty);
+       pds_vfio_dirty_free_sgl(pds_vfio);
+       pds_vfio_dirty_free_regions(dirty);
+
+       return err;
 }
 
 static int pds_vfio_dirty_enable(struct pds_vfio_pci_device *pds_vfio,
@@ -181,16 +246,14 @@ static int pds_vfio_dirty_enable(struct pds_vfio_pci_device *pds_vfio,
 {
        struct pci_dev *pdev = pds_vfio->vfio_coredev.pdev;
        struct device *pdsc_dev = &pci_physfn(pdev)->dev;
-       struct pds_vfio_dirty *dirty = &pds_vfio->dirty;
-       u64 region_start, region_size, region_page_size;
        struct pds_lm_dirty_region_info *region_info;
        struct interval_tree_node *node = NULL;
+       u64 region_page_size = *page_size;
        u8 max_regions = 0, num_regions;
        dma_addr_t regions_dma = 0;
        u32 num_ranges = nnodes;
-       u32 page_count;
-       u16 len;
        int err;
+       u16 len;
 
        dev_dbg(&pdev->dev, "vf%u: Start dirty page tracking\n",
                pds_vfio->vf_id);
@@ -217,39 +280,38 @@ static int pds_vfio_dirty_enable(struct pds_vfio_pci_device *pds_vfio,
                return -EOPNOTSUPP;
        }
 
-       /*
-        * Only support 1 region for now. If there are any large gaps in the
-        * VM's address regions, then this would be a waste of memory as we are
-        * generating 2 bitmaps (ack/seq) from the min address to the max
-        * address of the VM's address regions. In the future, if we support
-        * more than one region in the device/driver we can split the bitmaps
-        * on the largest address region gaps. We can do this split up to the
-        * max_regions times returned from the dirty_status command.
-        */
-       max_regions = 1;
        if (num_ranges > max_regions) {
                vfio_combine_iova_ranges(ranges, nnodes, max_regions);
                num_ranges = max_regions;
        }
 
+       region_info = kcalloc(num_ranges, sizeof(*region_info), GFP_KERNEL);
+       if (!region_info)
+               return -ENOMEM;
+       len = num_ranges * sizeof(*region_info);
+
        node = interval_tree_iter_first(ranges, 0, ULONG_MAX);
        if (!node)
                return -EINVAL;
+       for (int i = 0; i < num_ranges; i++) {
+               struct pds_lm_dirty_region_info *ri = &region_info[i];
+               u64 region_size = node->last - node->start + 1;
+               u64 region_start = node->start;
+               u32 page_count;
 
-       region_size = node->last - node->start + 1;
-       region_start = node->start;
-       region_page_size = *page_size;
+               page_count = DIV_ROUND_UP(region_size, region_page_size);
 
-       len = sizeof(*region_info);
-       region_info = kzalloc(len, GFP_KERNEL);
-       if (!region_info)
-               return -ENOMEM;
+               ri->dma_base = cpu_to_le64(region_start);
+               ri->page_count = cpu_to_le32(page_count);
+               ri->page_size_log2 = ilog2(region_page_size);
 
-       page_count = DIV_ROUND_UP(region_size, region_page_size);
+               dev_dbg(&pdev->dev,
+                       "region_info[%d]: region_start 0x%llx region_end 0x%lx region_size 0x%llx page_count %u page_size %llu\n",
+                       i, region_start, node->last, region_size, page_count,
+                       region_page_size);
 
-       region_info->dma_base = cpu_to_le64(region_start);
-       region_info->page_count = cpu_to_le32(page_count);
-       region_info->page_size_log2 = ilog2(region_page_size);
+               node = interval_tree_iter_next(node, 0, ULONG_MAX);
+       }
 
        regions_dma = dma_map_single(pdsc_dev, (void *)region_info, len,
                                     DMA_BIDIRECTIONAL);
@@ -258,39 +320,20 @@ static int pds_vfio_dirty_enable(struct pds_vfio_pci_device *pds_vfio,
                goto out_free_region_info;
        }
 
-       err = pds_vfio_dirty_enable_cmd(pds_vfio, regions_dma, max_regions);
+       err = pds_vfio_dirty_enable_cmd(pds_vfio, regions_dma, num_ranges);
        dma_unmap_single(pdsc_dev, regions_dma, len, DMA_BIDIRECTIONAL);
        if (err)
                goto out_free_region_info;
 
-       /*
-        * page_count might be adjusted by the device,
-        * update it before freeing region_info DMA
-        */
-       page_count = le32_to_cpu(region_info->page_count);
-
-       dev_dbg(&pdev->dev,
-               "region_info: regions_dma 0x%llx dma_base 0x%llx page_count %u page_size_log2 %u\n",
-               regions_dma, region_start, page_count,
-               (u8)ilog2(region_page_size));
-
-       err = pds_vfio_dirty_alloc_bitmaps(dirty, page_count / BITS_PER_BYTE);
-       if (err) {
-               dev_err(&pdev->dev, "Failed to alloc dirty bitmaps: %pe\n",
-                       ERR_PTR(err));
-               goto out_free_region_info;
-       }
-
-       err = pds_vfio_dirty_alloc_sgl(pds_vfio, page_count);
+       err = pds_vfio_dirty_alloc_regions(pds_vfio, region_info,
+                                          region_page_size, num_ranges);
        if (err) {
-               dev_err(&pdev->dev, "Failed to alloc dirty sg lists: %pe\n",
-                       ERR_PTR(err));
-               goto out_free_bitmaps;
+               dev_err(&pdev->dev,
+                       "Failed to allocate %d regions for tracking dirty regions: %pe\n",
+                       num_regions, ERR_PTR(err));
+               goto out_dirty_disable;
        }
 
-       dirty->region_start = region_start;
-       dirty->region_size = region_size;
-       dirty->region_page_size = region_page_size;
        pds_vfio_dirty_set_enabled(pds_vfio);
 
        pds_vfio_print_guest_region_info(pds_vfio, max_regions);
@@ -299,8 +342,8 @@ static int pds_vfio_dirty_enable(struct pds_vfio_pci_device *pds_vfio,
 
        return 0;
 
-out_free_bitmaps:
-       pds_vfio_dirty_free_bitmaps(dirty);
+out_dirty_disable:
+       pds_vfio_dirty_disable_cmd(pds_vfio);
 out_free_region_info:
        kfree(region_info);
        return err;
@@ -314,6 +357,7 @@ void pds_vfio_dirty_disable(struct pds_vfio_pci_device *pds_vfio, bool send_cmd)
                        pds_vfio_dirty_disable_cmd(pds_vfio);
                pds_vfio_dirty_free_sgl(pds_vfio);
                pds_vfio_dirty_free_bitmaps(&pds_vfio->dirty);
+               pds_vfio_dirty_free_regions(&pds_vfio->dirty);
        }
 
        if (send_cmd)
@@ -321,8 +365,9 @@ void pds_vfio_dirty_disable(struct pds_vfio_pci_device *pds_vfio, bool send_cmd)
 }
 
 static int pds_vfio_dirty_seq_ack(struct pds_vfio_pci_device *pds_vfio,
-                                 struct pds_vfio_bmp_info *bmp_info,
-                                 u32 offset, u32 bmp_bytes, bool read_seq)
+                                 struct pds_vfio_region *region,
+                                 unsigned long *seq_ack_bmp, u32 offset,
+                                 u32 bmp_bytes, bool read_seq)
 {
        const char *bmp_type_str = read_seq ? "read_seq" : "write_ack";
        u8 dma_dir = read_seq ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
@@ -339,7 +384,7 @@ static int pds_vfio_dirty_seq_ack(struct pds_vfio_pci_device *pds_vfio,
        int err;
        int i;
 
-       bmp = (void *)((u64)bmp_info->bmp + offset);
+       bmp = (void *)((u64)seq_ack_bmp + offset);
        page_offset = offset_in_page(bmp);
        bmp -= page_offset;
 
@@ -375,7 +420,7 @@ static int pds_vfio_dirty_seq_ack(struct pds_vfio_pci_device *pds_vfio,
                goto out_free_sg_table;
 
        for_each_sgtable_dma_sg(&sg_table, sg, i) {
-               struct pds_lm_sg_elem *sg_elem = &bmp_info->sgl[i];
+               struct pds_lm_sg_elem *sg_elem = &region->sgl[i];
 
                sg_elem->addr = cpu_to_le64(sg_dma_address(sg));
                sg_elem->len = cpu_to_le32(sg_dma_len(sg));
@@ -383,15 +428,16 @@ static int pds_vfio_dirty_seq_ack(struct pds_vfio_pci_device *pds_vfio,
 
        num_sge = sg_table.nents;
        size = num_sge * sizeof(struct pds_lm_sg_elem);
-       dma_sync_single_for_device(pdsc_dev, bmp_info->sgl_addr, size, dma_dir);
-       err = pds_vfio_dirty_seq_ack_cmd(pds_vfio, bmp_info->sgl_addr, num_sge,
+       offset += region->dev_bmp_offset_start_byte;
+       dma_sync_single_for_device(pdsc_dev, region->sgl_addr, size, dma_dir);
+       err = pds_vfio_dirty_seq_ack_cmd(pds_vfio, region->sgl_addr, num_sge,
                                         offset, bmp_bytes, read_seq);
        if (err)
                dev_err(&pdev->dev,
                        "Dirty bitmap %s failed offset %u bmp_bytes %u num_sge %u DMA 0x%llx: %pe\n",
                        bmp_type_str, offset, bmp_bytes,
-                       num_sge, bmp_info->sgl_addr, ERR_PTR(err));
-       dma_sync_single_for_cpu(pdsc_dev, bmp_info->sgl_addr, size, dma_dir);
+                       num_sge, region->sgl_addr, ERR_PTR(err));
+       dma_sync_single_for_cpu(pdsc_dev, region->sgl_addr, size, dma_dir);
 
        dma_unmap_sgtable(pdsc_dev, &sg_table, dma_dir, 0);
 out_free_sg_table:
@@ -403,32 +449,36 @@ out_free_pages:
 }
 
 static int pds_vfio_dirty_write_ack(struct pds_vfio_pci_device *pds_vfio,
+                                  struct pds_vfio_region *region,
                                    u32 offset, u32 len)
 {
-       return pds_vfio_dirty_seq_ack(pds_vfio, &pds_vfio->dirty.host_ack,
+
+       return pds_vfio_dirty_seq_ack(pds_vfio, region, region->host_ack,
                                      offset, len, WRITE_ACK);
 }
 
 static int pds_vfio_dirty_read_seq(struct pds_vfio_pci_device *pds_vfio,
+                                  struct pds_vfio_region *region,
                                   u32 offset, u32 len)
 {
-       return pds_vfio_dirty_seq_ack(pds_vfio, &pds_vfio->dirty.host_seq,
+       return pds_vfio_dirty_seq_ack(pds_vfio, region, region->host_seq,
                                      offset, len, READ_SEQ);
 }
 
 static int pds_vfio_dirty_process_bitmaps(struct pds_vfio_pci_device *pds_vfio,
+                                         struct pds_vfio_region *region,
                                          struct iova_bitmap *dirty_bitmap,
                                          u32 bmp_offset, u32 len_bytes)
 {
-       u64 page_size = pds_vfio->dirty.region_page_size;
-       u64 region_start = pds_vfio->dirty.region_start;
+       u64 page_size = region->page_size;
+       u64 region_start = region->start;
        u32 bmp_offset_bit;
        __le64 *seq, *ack;
        int dword_count;
 
        dword_count = len_bytes / sizeof(u64);
-       seq = (__le64 *)((u64)pds_vfio->dirty.host_seq.bmp + bmp_offset);
-       ack = (__le64 *)((u64)pds_vfio->dirty.host_ack.bmp + bmp_offset);
+       seq = (__le64 *)((u64)region->host_seq + bmp_offset);
+       ack = (__le64 *)((u64)region->host_ack + bmp_offset);
        bmp_offset_bit = bmp_offset * 8;
 
        for (int i = 0; i < dword_count; i++) {
@@ -451,12 +501,28 @@ static int pds_vfio_dirty_process_bitmaps(struct pds_vfio_pci_device *pds_vfio,
        return 0;
 }
 
+static struct pds_vfio_region *
+pds_vfio_get_region(struct pds_vfio_pci_device *pds_vfio, unsigned long iova)
+{
+       struct pds_vfio_dirty *dirty = &pds_vfio->dirty;
+
+       for (int i = 0; i < dirty->num_regions; i++) {
+               struct pds_vfio_region *region = &dirty->regions[i];
+
+               if (iova >= region->start &&
+                   iova < (region->start + region->size))
+                       return region;
+       }
+
+       return NULL;
+}
+
 static int pds_vfio_dirty_sync(struct pds_vfio_pci_device *pds_vfio,
                               struct iova_bitmap *dirty_bitmap,
                               unsigned long iova, unsigned long length)
 {
        struct device *dev = &pds_vfio->vfio_coredev.pdev->dev;
-       struct pds_vfio_dirty *dirty = &pds_vfio->dirty;
+       struct pds_vfio_region *region;
        u64 bmp_offset, bmp_bytes;
        u64 bitmap_size, pages;
        int err;
@@ -469,26 +535,31 @@ static int pds_vfio_dirty_sync(struct pds_vfio_pci_device *pds_vfio,
                return -EINVAL;
        }
 
-       pages = DIV_ROUND_UP(length, pds_vfio->dirty.region_page_size);
+       region = pds_vfio_get_region(pds_vfio, iova);
+       if (!region) {
+               dev_err(dev, "vf%u: Failed to find region that contains iova 0x%lx length 0x%lx\n",
+                       pds_vfio->vf_id, iova, length);
+               return -EINVAL;
+       }
+
+       pages = DIV_ROUND_UP(length, region->page_size);
        bitmap_size =
                round_up(pages, sizeof(u64) * BITS_PER_BYTE) / BITS_PER_BYTE;
 
        dev_dbg(dev,
                "vf%u: iova 0x%lx length %lu page_size %llu pages %llu bitmap_size %llu\n",
-               pds_vfio->vf_id, iova, length, pds_vfio->dirty.region_page_size,
+               pds_vfio->vf_id, iova, length, region->page_size,
                pages, bitmap_size);
 
-       if (!length || ((dirty->region_start + iova + length) >
-                       (dirty->region_start + dirty->region_size))) {
+       if (!length || ((iova - region->start + length) > region->size)) {
                dev_err(dev, "Invalid iova 0x%lx and/or length 0x%lx to sync\n",
                        iova, length);
                return -EINVAL;
        }
 
        /* bitmap is modified in 64 bit chunks */
-       bmp_bytes = ALIGN(DIV_ROUND_UP(length / dirty->region_page_size,
-                                      sizeof(u64)),
-                         sizeof(u64));
+       bmp_bytes = ALIGN(DIV_ROUND_UP(length / region->page_size,
+                                      sizeof(u64)), sizeof(u64));
        if (bmp_bytes != bitmap_size) {
                dev_err(dev,
                        "Calculated bitmap bytes %llu not equal to bitmap size %llu\n",
@@ -496,22 +567,30 @@ static int pds_vfio_dirty_sync(struct pds_vfio_pci_device *pds_vfio,
                return -EINVAL;
        }
 
-       bmp_offset = DIV_ROUND_UP(iova / dirty->region_page_size, sizeof(u64));
+       if (bmp_bytes > region->bmp_bytes) {
+               dev_err(dev,
+                       "Calculated bitmap bytes %llu larger than region's cached bmp_bytes %llu\n",
+                       bmp_bytes, region->bmp_bytes);
+               return -EINVAL;
+       }
+
+       bmp_offset = DIV_ROUND_UP((iova - region->start) /
+                                 region->page_size, sizeof(u64));
 
        dev_dbg(dev,
                "Syncing dirty bitmap, iova 0x%lx length 0x%lx, bmp_offset %llu bmp_bytes %llu\n",
                iova, length, bmp_offset, bmp_bytes);
 
-       err = pds_vfio_dirty_read_seq(pds_vfio, bmp_offset, bmp_bytes);
+       err = pds_vfio_dirty_read_seq(pds_vfio, region, bmp_offset, bmp_bytes);
        if (err)
                return err;
 
-       err = pds_vfio_dirty_process_bitmaps(pds_vfio, dirty_bitmap, bmp_offset,
-                                            bmp_bytes);
+       err = pds_vfio_dirty_process_bitmaps(pds_vfio, region, dirty_bitmap,
+                                            bmp_offset, bmp_bytes);
        if (err)
                return err;
 
-       err = pds_vfio_dirty_write_ack(pds_vfio, bmp_offset, bmp_bytes);
+       err = pds_vfio_dirty_write_ack(pds_vfio, region, bmp_offset, bmp_bytes);
        if (err)
                return err;
 
index f78da25d75ca9c37318e0793c55b791bcc74432d..c8e23018b80186a41bca122d88e1d3f52e825d05 100644 (file)
@@ -4,20 +4,22 @@
 #ifndef _DIRTY_H_
 #define _DIRTY_H_
 
-struct pds_vfio_bmp_info {
-       unsigned long *bmp;
-       u32 bmp_bytes;
+struct pds_vfio_region {
+       unsigned long *host_seq;
+       unsigned long *host_ack;
+       u64 bmp_bytes;
+       u64 size;
+       u64 start;
+       u64 page_size;
        struct pds_lm_sg_elem *sgl;
        dma_addr_t sgl_addr;
+       u32 dev_bmp_offset_start_byte;
        u16 num_sge;
 };
 
 struct pds_vfio_dirty {
-       struct pds_vfio_bmp_info host_seq;
-       struct pds_vfio_bmp_info host_ack;
-       u64 region_size;
-       u64 region_start;
-       u64 region_page_size;
+       struct pds_vfio_region *regions;
+       u8 num_regions;
        bool is_enabled;
 };
 
index e27de61ac9fe75f5818dc8d7386270c592c05a07..07fea08ea8a21340cacef113b238adea5dc8b59d 100644 (file)
@@ -38,7 +38,7 @@
 #define vfio_iowrite8  iowrite8
 
 #define VFIO_IOWRITE(size) \
-static int vfio_pci_iowrite##size(struct vfio_pci_core_device *vdev,           \
+int vfio_pci_core_iowrite##size(struct vfio_pci_core_device *vdev,     \
                        bool test_mem, u##size val, void __iomem *io)   \
 {                                                                      \
        if (test_mem) {                                                 \
@@ -55,7 +55,8 @@ static int vfio_pci_iowrite##size(struct vfio_pci_core_device *vdev,          \
                up_read(&vdev->memory_lock);                            \
                                                                        \
        return 0;                                                       \
-}
+}                                                                      \
+EXPORT_SYMBOL_GPL(vfio_pci_core_iowrite##size);
 
 VFIO_IOWRITE(8)
 VFIO_IOWRITE(16)
@@ -65,7 +66,7 @@ VFIO_IOWRITE(64)
 #endif
 
 #define VFIO_IOREAD(size) \
-static int vfio_pci_ioread##size(struct vfio_pci_core_device *vdev,            \
+int vfio_pci_core_ioread##size(struct vfio_pci_core_device *vdev,      \
                        bool test_mem, u##size *val, void __iomem *io)  \
 {                                                                      \
        if (test_mem) {                                                 \
@@ -82,7 +83,8 @@ static int vfio_pci_ioread##size(struct vfio_pci_core_device *vdev,           \
                up_read(&vdev->memory_lock);                            \
                                                                        \
        return 0;                                                       \
-}
+}                                                                      \
+EXPORT_SYMBOL_GPL(vfio_pci_core_ioread##size);
 
 VFIO_IOREAD(8)
 VFIO_IOREAD(16)
@@ -119,13 +121,13 @@ static ssize_t do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem,
                                if (copy_from_user(&val, buf, 4))
                                        return -EFAULT;
 
-                               ret = vfio_pci_iowrite32(vdev, test_mem,
-                                                        val, io + off);
+                               ret = vfio_pci_core_iowrite32(vdev, test_mem,
+                                                             val, io + off);
                                if (ret)
                                        return ret;
                        } else {
-                               ret = vfio_pci_ioread32(vdev, test_mem,
-                                                       &val, io + off);
+                               ret = vfio_pci_core_ioread32(vdev, test_mem,
+                                                            &val, io + off);
                                if (ret)
                                        return ret;
 
@@ -141,13 +143,13 @@ static ssize_t do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem,
                                if (copy_from_user(&val, buf, 2))
                                        return -EFAULT;
 
-                               ret = vfio_pci_iowrite16(vdev, test_mem,
-                                                        val, io + off);
+                               ret = vfio_pci_core_iowrite16(vdev, test_mem,
+                                                             val, io + off);
                                if (ret)
                                        return ret;
                        } else {
-                               ret = vfio_pci_ioread16(vdev, test_mem,
-                                                       &val, io + off);
+                               ret = vfio_pci_core_ioread16(vdev, test_mem,
+                                                            &val, io + off);
                                if (ret)
                                        return ret;
 
@@ -163,13 +165,13 @@ static ssize_t do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem,
                                if (copy_from_user(&val, buf, 1))
                                        return -EFAULT;
 
-                               ret = vfio_pci_iowrite8(vdev, test_mem,
-                                                       val, io + off);
+                               ret = vfio_pci_core_iowrite8(vdev, test_mem,
+                                                            val, io + off);
                                if (ret)
                                        return ret;
                        } else {
-                               ret = vfio_pci_ioread8(vdev, test_mem,
-                                                      &val, io + off);
+                               ret = vfio_pci_core_ioread8(vdev, test_mem,
+                                                           &val, io + off);
                                if (ret)
                                        return ret;
 
@@ -200,7 +202,7 @@ static ssize_t do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem,
        return done;
 }
 
-static int vfio_pci_setup_barmap(struct vfio_pci_core_device *vdev, int bar)
+int vfio_pci_core_setup_barmap(struct vfio_pci_core_device *vdev, int bar)
 {
        struct pci_dev *pdev = vdev->pdev;
        int ret;
@@ -223,6 +225,7 @@ static int vfio_pci_setup_barmap(struct vfio_pci_core_device *vdev, int bar)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(vfio_pci_core_setup_barmap);
 
 ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf,
                        size_t count, loff_t *ppos, bool iswrite)
@@ -262,7 +265,7 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf,
                }
                x_end = end;
        } else {
-               int ret = vfio_pci_setup_barmap(vdev, bar);
+               int ret = vfio_pci_core_setup_barmap(vdev, bar);
                if (ret) {
                        done = ret;
                        goto out;
@@ -363,21 +366,21 @@ static void vfio_pci_ioeventfd_do_write(struct vfio_pci_ioeventfd *ioeventfd,
 {
        switch (ioeventfd->count) {
        case 1:
-               vfio_pci_iowrite8(ioeventfd->vdev, test_mem,
-                                 ioeventfd->data, ioeventfd->addr);
+               vfio_pci_core_iowrite8(ioeventfd->vdev, test_mem,
+                                      ioeventfd->data, ioeventfd->addr);
                break;
        case 2:
-               vfio_pci_iowrite16(ioeventfd->vdev, test_mem,
-                                  ioeventfd->data, ioeventfd->addr);
+               vfio_pci_core_iowrite16(ioeventfd->vdev, test_mem,
+                                       ioeventfd->data, ioeventfd->addr);
                break;
        case 4:
-               vfio_pci_iowrite32(ioeventfd->vdev, test_mem,
-                                  ioeventfd->data, ioeventfd->addr);
+               vfio_pci_core_iowrite32(ioeventfd->vdev, test_mem,
+                                       ioeventfd->data, ioeventfd->addr);
                break;
 #ifdef iowrite64
        case 8:
-               vfio_pci_iowrite64(ioeventfd->vdev, test_mem,
-                                  ioeventfd->data, ioeventfd->addr);
+               vfio_pci_core_iowrite64(ioeventfd->vdev, test_mem,
+                                       ioeventfd->data, ioeventfd->addr);
                break;
 #endif
        }
@@ -438,7 +441,7 @@ int vfio_pci_ioeventfd(struct vfio_pci_core_device *vdev, loff_t offset,
                return -EINVAL;
 #endif
 
-       ret = vfio_pci_setup_barmap(vdev, bar);
+       ret = vfio_pci_core_setup_barmap(vdev, bar);
        if (ret)
                return ret;
 
diff --git a/drivers/vfio/pci/virtio/Kconfig b/drivers/vfio/pci/virtio/Kconfig
new file mode 100644 (file)
index 0000000..bd80eca
--- /dev/null
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config VIRTIO_VFIO_PCI
+        tristate "VFIO support for VIRTIO NET PCI devices"
+        depends on VIRTIO_PCI && VIRTIO_PCI_ADMIN_LEGACY
+        select VFIO_PCI_CORE
+        help
+          This provides support for exposing VIRTIO NET VF devices which support
+          legacy IO access, using the VFIO framework that can work with a legacy
+          virtio driver in the guest.
+          Based on PCIe spec, VFs do not support I/O Space.
+          As of that this driver emulates I/O BAR in software to let a VF be
+          seen as a transitional device by its users and let it work with
+          a legacy driver.
+
+          If you don't know what to do here, say N.
diff --git a/drivers/vfio/pci/virtio/Makefile b/drivers/vfio/pci/virtio/Makefile
new file mode 100644 (file)
index 0000000..7171105
--- /dev/null
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_VIRTIO_VFIO_PCI) += virtio-vfio-pci.o
+virtio-vfio-pci-y := main.o
diff --git a/drivers/vfio/pci/virtio/main.c b/drivers/vfio/pci/virtio/main.c
new file mode 100644 (file)
index 0000000..d5af683
--- /dev/null
@@ -0,0 +1,576 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/vfio.h>
+#include <linux/vfio_pci_core.h>
+#include <linux/virtio_pci.h>
+#include <linux/virtio_net.h>
+#include <linux/virtio_pci_admin.h>
+
+struct virtiovf_pci_core_device {
+       struct vfio_pci_core_device core_device;
+       u8 *bar0_virtual_buf;
+       /* synchronize access to the virtual buf */
+       struct mutex bar_mutex;
+       void __iomem *notify_addr;
+       u64 notify_offset;
+       __le32 pci_base_addr_0;
+       __le16 pci_cmd;
+       u8 bar0_virtual_buf_size;
+       u8 notify_bar;
+};
+
+static int
+virtiovf_issue_legacy_rw_cmd(struct virtiovf_pci_core_device *virtvdev,
+                            loff_t pos, char __user *buf,
+                            size_t count, bool read)
+{
+       bool msix_enabled =
+               (virtvdev->core_device.irq_type == VFIO_PCI_MSIX_IRQ_INDEX);
+       struct pci_dev *pdev = virtvdev->core_device.pdev;
+       u8 *bar0_buf = virtvdev->bar0_virtual_buf;
+       bool common;
+       u8 offset;
+       int ret;
+
+       common = pos < VIRTIO_PCI_CONFIG_OFF(msix_enabled);
+       /* offset within the relevant configuration area */
+       offset = common ? pos : pos - VIRTIO_PCI_CONFIG_OFF(msix_enabled);
+       mutex_lock(&virtvdev->bar_mutex);
+       if (read) {
+               if (common)
+                       ret = virtio_pci_admin_legacy_common_io_read(pdev, offset,
+                                       count, bar0_buf + pos);
+               else
+                       ret = virtio_pci_admin_legacy_device_io_read(pdev, offset,
+                                       count, bar0_buf + pos);
+               if (ret)
+                       goto out;
+               if (copy_to_user(buf, bar0_buf + pos, count))
+                       ret = -EFAULT;
+       } else {
+               if (copy_from_user(bar0_buf + pos, buf, count)) {
+                       ret = -EFAULT;
+                       goto out;
+               }
+
+               if (common)
+                       ret = virtio_pci_admin_legacy_common_io_write(pdev, offset,
+                                       count, bar0_buf + pos);
+               else
+                       ret = virtio_pci_admin_legacy_device_io_write(pdev, offset,
+                                       count, bar0_buf + pos);
+       }
+out:
+       mutex_unlock(&virtvdev->bar_mutex);
+       return ret;
+}
+
+static int
+virtiovf_pci_bar0_rw(struct virtiovf_pci_core_device *virtvdev,
+                    loff_t pos, char __user *buf,
+                    size_t count, bool read)
+{
+       struct vfio_pci_core_device *core_device = &virtvdev->core_device;
+       struct pci_dev *pdev = core_device->pdev;
+       u16 queue_notify;
+       int ret;
+
+       if (!(le16_to_cpu(virtvdev->pci_cmd) & PCI_COMMAND_IO))
+               return -EIO;
+
+       if (pos + count > virtvdev->bar0_virtual_buf_size)
+               return -EINVAL;
+
+       ret = pm_runtime_resume_and_get(&pdev->dev);
+       if (ret) {
+               pci_info_ratelimited(pdev, "runtime resume failed %d\n", ret);
+               return -EIO;
+       }
+
+       switch (pos) {
+       case VIRTIO_PCI_QUEUE_NOTIFY:
+               if (count != sizeof(queue_notify)) {
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (read) {
+                       ret = vfio_pci_core_ioread16(core_device, true, &queue_notify,
+                                                    virtvdev->notify_addr);
+                       if (ret)
+                               goto end;
+                       if (copy_to_user(buf, &queue_notify,
+                                        sizeof(queue_notify))) {
+                               ret = -EFAULT;
+                               goto end;
+                       }
+               } else {
+                       if (copy_from_user(&queue_notify, buf, count)) {
+                               ret = -EFAULT;
+                               goto end;
+                       }
+                       ret = vfio_pci_core_iowrite16(core_device, true, queue_notify,
+                                                     virtvdev->notify_addr);
+               }
+               break;
+       default:
+               ret = virtiovf_issue_legacy_rw_cmd(virtvdev, pos, buf, count,
+                                                  read);
+       }
+
+end:
+       pm_runtime_put(&pdev->dev);
+       return ret ? ret : count;
+}
+
+static bool range_intersect_range(loff_t range1_start, size_t count1,
+                                 loff_t range2_start, size_t count2,
+                                 loff_t *start_offset,
+                                 size_t *intersect_count,
+                                 size_t *register_offset)
+{
+       if (range1_start <= range2_start &&
+           range1_start + count1 > range2_start) {
+               *start_offset = range2_start - range1_start;
+               *intersect_count = min_t(size_t, count2,
+                                        range1_start + count1 - range2_start);
+               *register_offset = 0;
+               return true;
+       }
+
+       if (range1_start > range2_start &&
+           range1_start < range2_start + count2) {
+               *start_offset = 0;
+               *intersect_count = min_t(size_t, count1,
+                                        range2_start + count2 - range1_start);
+               *register_offset = range1_start - range2_start;
+               return true;
+       }
+
+       return false;
+}
+
+static ssize_t virtiovf_pci_read_config(struct vfio_device *core_vdev,
+                                       char __user *buf, size_t count,
+                                       loff_t *ppos)
+{
+       struct virtiovf_pci_core_device *virtvdev = container_of(
+               core_vdev, struct virtiovf_pci_core_device, core_device.vdev);
+       loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
+       size_t register_offset;
+       loff_t copy_offset;
+       size_t copy_count;
+       __le32 val32;
+       __le16 val16;
+       u8 val8;
+       int ret;
+
+       ret = vfio_pci_core_read(core_vdev, buf, count, ppos);
+       if (ret < 0)
+               return ret;
+
+       if (range_intersect_range(pos, count, PCI_DEVICE_ID, sizeof(val16),
+                                 &copy_offset, &copy_count, &register_offset)) {
+               val16 = cpu_to_le16(VIRTIO_TRANS_ID_NET);
+               if (copy_to_user(buf + copy_offset, (void *)&val16 + register_offset, copy_count))
+                       return -EFAULT;
+       }
+
+       if ((le16_to_cpu(virtvdev->pci_cmd) & PCI_COMMAND_IO) &&
+           range_intersect_range(pos, count, PCI_COMMAND, sizeof(val16),
+                                 &copy_offset, &copy_count, &register_offset)) {
+               if (copy_from_user((void *)&val16 + register_offset, buf + copy_offset,
+                                  copy_count))
+                       return -EFAULT;
+               val16 |= cpu_to_le16(PCI_COMMAND_IO);
+               if (copy_to_user(buf + copy_offset, (void *)&val16 + register_offset,
+                                copy_count))
+                       return -EFAULT;
+       }
+
+       if (range_intersect_range(pos, count, PCI_REVISION_ID, sizeof(val8),
+                                 &copy_offset, &copy_count, &register_offset)) {
+               /* Transional needs to have revision 0 */
+               val8 = 0;
+               if (copy_to_user(buf + copy_offset, &val8, copy_count))
+                       return -EFAULT;
+       }
+
+       if (range_intersect_range(pos, count, PCI_BASE_ADDRESS_0, sizeof(val32),
+                                 &copy_offset, &copy_count, &register_offset)) {
+               u32 bar_mask = ~(virtvdev->bar0_virtual_buf_size - 1);
+               u32 pci_base_addr_0 = le32_to_cpu(virtvdev->pci_base_addr_0);
+
+               val32 = cpu_to_le32((pci_base_addr_0 & bar_mask) | PCI_BASE_ADDRESS_SPACE_IO);
+               if (copy_to_user(buf + copy_offset, (void *)&val32 + register_offset, copy_count))
+                       return -EFAULT;
+       }
+
+       if (range_intersect_range(pos, count, PCI_SUBSYSTEM_ID, sizeof(val16),
+                                 &copy_offset, &copy_count, &register_offset)) {
+               /*
+                * Transitional devices use the PCI subsystem device id as
+                * virtio device id, same as legacy driver always did.
+                */
+               val16 = cpu_to_le16(VIRTIO_ID_NET);
+               if (copy_to_user(buf + copy_offset, (void *)&val16 + register_offset,
+                                copy_count))
+                       return -EFAULT;
+       }
+
+       if (range_intersect_range(pos, count, PCI_SUBSYSTEM_VENDOR_ID, sizeof(val16),
+                                 &copy_offset, &copy_count, &register_offset)) {
+               val16 = cpu_to_le16(PCI_VENDOR_ID_REDHAT_QUMRANET);
+               if (copy_to_user(buf + copy_offset, (void *)&val16 + register_offset,
+                                copy_count))
+                       return -EFAULT;
+       }
+
+       return count;
+}
+
+static ssize_t
+virtiovf_pci_core_read(struct vfio_device *core_vdev, char __user *buf,
+                      size_t count, loff_t *ppos)
+{
+       struct virtiovf_pci_core_device *virtvdev = container_of(
+               core_vdev, struct virtiovf_pci_core_device, core_device.vdev);
+       unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
+       loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
+
+       if (!count)
+               return 0;
+
+       if (index == VFIO_PCI_CONFIG_REGION_INDEX)
+               return virtiovf_pci_read_config(core_vdev, buf, count, ppos);
+
+       if (index == VFIO_PCI_BAR0_REGION_INDEX)
+               return virtiovf_pci_bar0_rw(virtvdev, pos, buf, count, true);
+
+       return vfio_pci_core_read(core_vdev, buf, count, ppos);
+}
+
+static ssize_t virtiovf_pci_write_config(struct vfio_device *core_vdev,
+                                        const char __user *buf, size_t count,
+                                        loff_t *ppos)
+{
+       struct virtiovf_pci_core_device *virtvdev = container_of(
+               core_vdev, struct virtiovf_pci_core_device, core_device.vdev);
+       loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
+       size_t register_offset;
+       loff_t copy_offset;
+       size_t copy_count;
+
+       if (range_intersect_range(pos, count, PCI_COMMAND, sizeof(virtvdev->pci_cmd),
+                                 &copy_offset, &copy_count,
+                                 &register_offset)) {
+               if (copy_from_user((void *)&virtvdev->pci_cmd + register_offset,
+                                  buf + copy_offset,
+                                  copy_count))
+                       return -EFAULT;
+       }
+
+       if (range_intersect_range(pos, count, PCI_BASE_ADDRESS_0,
+                                 sizeof(virtvdev->pci_base_addr_0),
+                                 &copy_offset, &copy_count,
+                                 &register_offset)) {
+               if (copy_from_user((void *)&virtvdev->pci_base_addr_0 + register_offset,
+                                  buf + copy_offset,
+                                  copy_count))
+                       return -EFAULT;
+       }
+
+       return vfio_pci_core_write(core_vdev, buf, count, ppos);
+}
+
+static ssize_t
+virtiovf_pci_core_write(struct vfio_device *core_vdev, const char __user *buf,
+                       size_t count, loff_t *ppos)
+{
+       struct virtiovf_pci_core_device *virtvdev = container_of(
+               core_vdev, struct virtiovf_pci_core_device, core_device.vdev);
+       unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
+       loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
+
+       if (!count)
+               return 0;
+
+       if (index == VFIO_PCI_CONFIG_REGION_INDEX)
+               return virtiovf_pci_write_config(core_vdev, buf, count, ppos);
+
+       if (index == VFIO_PCI_BAR0_REGION_INDEX)
+               return virtiovf_pci_bar0_rw(virtvdev, pos, (char __user *)buf, count, false);
+
+       return vfio_pci_core_write(core_vdev, buf, count, ppos);
+}
+
+static int
+virtiovf_pci_ioctl_get_region_info(struct vfio_device *core_vdev,
+                                  unsigned int cmd, unsigned long arg)
+{
+       struct virtiovf_pci_core_device *virtvdev = container_of(
+               core_vdev, struct virtiovf_pci_core_device, core_device.vdev);
+       unsigned long minsz = offsetofend(struct vfio_region_info, offset);
+       void __user *uarg = (void __user *)arg;
+       struct vfio_region_info info = {};
+
+       if (copy_from_user(&info, uarg, minsz))
+               return -EFAULT;
+
+       if (info.argsz < minsz)
+               return -EINVAL;
+
+       switch (info.index) {
+       case VFIO_PCI_BAR0_REGION_INDEX:
+               info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+               info.size = virtvdev->bar0_virtual_buf_size;
+               info.flags = VFIO_REGION_INFO_FLAG_READ |
+                            VFIO_REGION_INFO_FLAG_WRITE;
+               return copy_to_user(uarg, &info, minsz) ? -EFAULT : 0;
+       default:
+               return vfio_pci_core_ioctl(core_vdev, cmd, arg);
+       }
+}
+
+static long
+virtiovf_vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd,
+                            unsigned long arg)
+{
+       switch (cmd) {
+       case VFIO_DEVICE_GET_REGION_INFO:
+               return virtiovf_pci_ioctl_get_region_info(core_vdev, cmd, arg);
+       default:
+               return vfio_pci_core_ioctl(core_vdev, cmd, arg);
+       }
+}
+
+static int
+virtiovf_set_notify_addr(struct virtiovf_pci_core_device *virtvdev)
+{
+       struct vfio_pci_core_device *core_device = &virtvdev->core_device;
+       int ret;
+
+       /*
+        * Setup the BAR where the 'notify' exists to be used by vfio as well
+        * This will let us mmap it only once and use it when needed.
+        */
+       ret = vfio_pci_core_setup_barmap(core_device,
+                                        virtvdev->notify_bar);
+       if (ret)
+               return ret;
+
+       virtvdev->notify_addr = core_device->barmap[virtvdev->notify_bar] +
+                       virtvdev->notify_offset;
+       return 0;
+}
+
+static int virtiovf_pci_open_device(struct vfio_device *core_vdev)
+{
+       struct virtiovf_pci_core_device *virtvdev = container_of(
+               core_vdev, struct virtiovf_pci_core_device, core_device.vdev);
+       struct vfio_pci_core_device *vdev = &virtvdev->core_device;
+       int ret;
+
+       ret = vfio_pci_core_enable(vdev);
+       if (ret)
+               return ret;
+
+       if (virtvdev->bar0_virtual_buf) {
+               /*
+                * Upon close_device() the vfio_pci_core_disable() is called
+                * and will close all the previous mmaps, so it seems that the
+                * valid life cycle for the 'notify' addr is per open/close.
+                */
+               ret = virtiovf_set_notify_addr(virtvdev);
+               if (ret) {
+                       vfio_pci_core_disable(vdev);
+                       return ret;
+               }
+       }
+
+       vfio_pci_core_finish_enable(vdev);
+       return 0;
+}
+
+static int virtiovf_get_device_config_size(unsigned short device)
+{
+       /* Network card */
+       return offsetofend(struct virtio_net_config, status);
+}
+
+static int virtiovf_read_notify_info(struct virtiovf_pci_core_device *virtvdev)
+{
+       u64 offset;
+       int ret;
+       u8 bar;
+
+       ret = virtio_pci_admin_legacy_io_notify_info(virtvdev->core_device.pdev,
+                               VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_OWNER_MEM,
+                               &bar, &offset);
+       if (ret)
+               return ret;
+
+       virtvdev->notify_bar = bar;
+       virtvdev->notify_offset = offset;
+       return 0;
+}
+
+static int virtiovf_pci_init_device(struct vfio_device *core_vdev)
+{
+       struct virtiovf_pci_core_device *virtvdev = container_of(
+               core_vdev, struct virtiovf_pci_core_device, core_device.vdev);
+       struct pci_dev *pdev;
+       int ret;
+
+       ret = vfio_pci_core_init_dev(core_vdev);
+       if (ret)
+               return ret;
+
+       pdev = virtvdev->core_device.pdev;
+       ret = virtiovf_read_notify_info(virtvdev);
+       if (ret)
+               return ret;
+
+       virtvdev->bar0_virtual_buf_size = VIRTIO_PCI_CONFIG_OFF(true) +
+                               virtiovf_get_device_config_size(pdev->device);
+       BUILD_BUG_ON(!is_power_of_2(virtvdev->bar0_virtual_buf_size));
+       virtvdev->bar0_virtual_buf = kzalloc(virtvdev->bar0_virtual_buf_size,
+                                            GFP_KERNEL);
+       if (!virtvdev->bar0_virtual_buf)
+               return -ENOMEM;
+       mutex_init(&virtvdev->bar_mutex);
+       return 0;
+}
+
+static void virtiovf_pci_core_release_dev(struct vfio_device *core_vdev)
+{
+       struct virtiovf_pci_core_device *virtvdev = container_of(
+               core_vdev, struct virtiovf_pci_core_device, core_device.vdev);
+
+       kfree(virtvdev->bar0_virtual_buf);
+       vfio_pci_core_release_dev(core_vdev);
+}
+
+static const struct vfio_device_ops virtiovf_vfio_pci_tran_ops = {
+       .name = "virtio-vfio-pci-trans",
+       .init = virtiovf_pci_init_device,
+       .release = virtiovf_pci_core_release_dev,
+       .open_device = virtiovf_pci_open_device,
+       .close_device = vfio_pci_core_close_device,
+       .ioctl = virtiovf_vfio_pci_core_ioctl,
+       .device_feature = vfio_pci_core_ioctl_feature,
+       .read = virtiovf_pci_core_read,
+       .write = virtiovf_pci_core_write,
+       .mmap = vfio_pci_core_mmap,
+       .request = vfio_pci_core_request,
+       .match = vfio_pci_core_match,
+       .bind_iommufd = vfio_iommufd_physical_bind,
+       .unbind_iommufd = vfio_iommufd_physical_unbind,
+       .attach_ioas = vfio_iommufd_physical_attach_ioas,
+       .detach_ioas = vfio_iommufd_physical_detach_ioas,
+};
+
+static const struct vfio_device_ops virtiovf_vfio_pci_ops = {
+       .name = "virtio-vfio-pci",
+       .init = vfio_pci_core_init_dev,
+       .release = vfio_pci_core_release_dev,
+       .open_device = virtiovf_pci_open_device,
+       .close_device = vfio_pci_core_close_device,
+       .ioctl = vfio_pci_core_ioctl,
+       .device_feature = vfio_pci_core_ioctl_feature,
+       .read = vfio_pci_core_read,
+       .write = vfio_pci_core_write,
+       .mmap = vfio_pci_core_mmap,
+       .request = vfio_pci_core_request,
+       .match = vfio_pci_core_match,
+       .bind_iommufd = vfio_iommufd_physical_bind,
+       .unbind_iommufd = vfio_iommufd_physical_unbind,
+       .attach_ioas = vfio_iommufd_physical_attach_ioas,
+       .detach_ioas = vfio_iommufd_physical_detach_ioas,
+};
+
+static bool virtiovf_bar0_exists(struct pci_dev *pdev)
+{
+       struct resource *res = pdev->resource;
+
+       return res->flags;
+}
+
+static int virtiovf_pci_probe(struct pci_dev *pdev,
+                             const struct pci_device_id *id)
+{
+       const struct vfio_device_ops *ops = &virtiovf_vfio_pci_ops;
+       struct virtiovf_pci_core_device *virtvdev;
+       int ret;
+
+       if (pdev->is_virtfn && virtio_pci_admin_has_legacy_io(pdev) &&
+           !virtiovf_bar0_exists(pdev))
+               ops = &virtiovf_vfio_pci_tran_ops;
+
+       virtvdev = vfio_alloc_device(virtiovf_pci_core_device, core_device.vdev,
+                                    &pdev->dev, ops);
+       if (IS_ERR(virtvdev))
+               return PTR_ERR(virtvdev);
+
+       dev_set_drvdata(&pdev->dev, &virtvdev->core_device);
+       ret = vfio_pci_core_register_device(&virtvdev->core_device);
+       if (ret)
+               goto out;
+       return 0;
+out:
+       vfio_put_device(&virtvdev->core_device.vdev);
+       return ret;
+}
+
+static void virtiovf_pci_remove(struct pci_dev *pdev)
+{
+       struct virtiovf_pci_core_device *virtvdev = dev_get_drvdata(&pdev->dev);
+
+       vfio_pci_core_unregister_device(&virtvdev->core_device);
+       vfio_put_device(&virtvdev->core_device.vdev);
+}
+
+static const struct pci_device_id virtiovf_pci_table[] = {
+       /* Only virtio-net is supported/tested so far */
+       { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_REDHAT_QUMRANET, 0x1041) },
+       {}
+};
+
+MODULE_DEVICE_TABLE(pci, virtiovf_pci_table);
+
+static void virtiovf_pci_aer_reset_done(struct pci_dev *pdev)
+{
+       struct virtiovf_pci_core_device *virtvdev = dev_get_drvdata(&pdev->dev);
+
+       virtvdev->pci_cmd = 0;
+}
+
+static const struct pci_error_handlers virtiovf_err_handlers = {
+       .reset_done = virtiovf_pci_aer_reset_done,
+       .error_detected = vfio_pci_core_aer_err_detected,
+};
+
+static struct pci_driver virtiovf_pci_driver = {
+       .name = KBUILD_MODNAME,
+       .id_table = virtiovf_pci_table,
+       .probe = virtiovf_pci_probe,
+       .remove = virtiovf_pci_remove,
+       .err_handler = &virtiovf_err_handlers,
+       .driver_managed_dma = true,
+};
+
+module_pci_driver(virtiovf_pci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Yishai Hadas <yishaih@nvidia.com>");
+MODULE_DESCRIPTION(
+       "VIRTIO VFIO PCI - User Level meta-driver for VIRTIO NET devices");
index 307e3f29b527f5b6178ded2705bf9baebda2e3a9..bde84ad344e50181685f5fbc2620c20b7b33f5a0 100644 (file)
@@ -448,4 +448,18 @@ static inline void vfio_device_put_kvm(struct vfio_device *device)
 }
 #endif
 
+#ifdef CONFIG_VFIO_DEBUGFS
+void vfio_debugfs_create_root(void);
+void vfio_debugfs_remove_root(void);
+
+void vfio_device_debugfs_init(struct vfio_device *vdev);
+void vfio_device_debugfs_exit(struct vfio_device *vdev);
+#else
+static inline void vfio_debugfs_create_root(void) { }
+static inline void vfio_debugfs_remove_root(void) { }
+
+static inline void vfio_device_debugfs_init(struct vfio_device *vdev) { }
+static inline void vfio_device_debugfs_exit(struct vfio_device *vdev) { }
+#endif /* CONFIG_VFIO_DEBUGFS */
+
 #endif
index eacd6ec04de5a42d58b1a677cffb12d514556f4b..b2854d7939ce02ddf2be186e483c0f8c3f094a26 100644 (file)
@@ -1436,7 +1436,7 @@ static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova,
        list_for_each_entry(d, &iommu->domain_list, next) {
                ret = iommu_map(d->domain, iova, (phys_addr_t)pfn << PAGE_SHIFT,
                                npage << PAGE_SHIFT, prot | IOMMU_CACHE,
-                               GFP_KERNEL);
+                               GFP_KERNEL_ACCOUNT);
                if (ret)
                        goto unwind;
 
@@ -1750,7 +1750,8 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
                        }
 
                        ret = iommu_map(domain->domain, iova, phys, size,
-                                       dma->prot | IOMMU_CACHE, GFP_KERNEL);
+                                       dma->prot | IOMMU_CACHE,
+                                       GFP_KERNEL_ACCOUNT);
                        if (ret) {
                                if (!dma->iommu_mapped) {
                                        vfio_unpin_pages_remote(dma, iova,
@@ -1845,7 +1846,8 @@ static void vfio_test_domain_fgsp(struct vfio_domain *domain, struct list_head *
                        continue;
 
                ret = iommu_map(domain->domain, start, page_to_phys(pages), PAGE_SIZE * 2,
-                               IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE, GFP_KERNEL);
+                               IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE,
+                               GFP_KERNEL_ACCOUNT);
                if (!ret) {
                        size_t unmapped = iommu_unmap(domain->domain, start, PAGE_SIZE);
 
index 8d4995ada74a01848ce8e7becf61120cc10ec33a..1cc93aac99a290d903819635284860b48600ab5d 100644 (file)
@@ -311,6 +311,7 @@ static int __vfio_register_dev(struct vfio_device *device,
        refcount_set(&device->refcount, 1);
 
        vfio_device_group_register(device);
+       vfio_device_debugfs_init(device);
 
        return 0;
 err_out:
@@ -378,6 +379,7 @@ void vfio_unregister_group_dev(struct vfio_device *device)
                }
        }
 
+       vfio_device_debugfs_exit(device);
        /* Balances vfio_device_set_group in register path */
        vfio_device_remove_group(device);
 }
@@ -1676,6 +1678,7 @@ static int __init vfio_init(void)
        if (ret)
                goto err_alloc_dev_chrdev;
 
+       vfio_debugfs_create_root();
        pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
        return 0;
 
@@ -1691,6 +1694,7 @@ err_virqfd:
 
 static void __exit vfio_cleanup(void)
 {
+       vfio_debugfs_remove_root();
        ida_destroy(&vfio.device_ida);
        vfio_cdev_cleanup();
        class_destroy(vfio.device_class);
index 173beda74b38c09446da95fe79c64ae6440cf2e0..bc4a51e4638b46c79345f2be89dc7639b81ee28a 100644 (file)
@@ -59,6 +59,7 @@ struct vhost_vdpa {
        int in_batch;
        struct vdpa_iova_range range;
        u32 batch_asid;
+       bool suspended;
 };
 
 static DEFINE_IDA(vhost_vdpa_ida);
@@ -232,6 +233,8 @@ static int _compat_vdpa_reset(struct vhost_vdpa *v)
        struct vdpa_device *vdpa = v->vdpa;
        u32 flags = 0;
 
+       v->suspended = false;
+
        if (v->vdev.vqs) {
                flags |= !vhost_backend_has_feature(v->vdev.vqs[0],
                                                    VHOST_BACKEND_F_IOTLB_PERSIST) ?
@@ -590,11 +593,16 @@ static long vhost_vdpa_suspend(struct vhost_vdpa *v)
 {
        struct vdpa_device *vdpa = v->vdpa;
        const struct vdpa_config_ops *ops = vdpa->config;
+       int ret;
 
        if (!ops->suspend)
                return -EOPNOTSUPP;
 
-       return ops->suspend(vdpa);
+       ret = ops->suspend(vdpa);
+       if (!ret)
+               v->suspended = true;
+
+       return ret;
 }
 
 /* After a successful return of this ioctl the device resumes processing
@@ -605,11 +613,16 @@ static long vhost_vdpa_resume(struct vhost_vdpa *v)
 {
        struct vdpa_device *vdpa = v->vdpa;
        const struct vdpa_config_ops *ops = vdpa->config;
+       int ret;
 
        if (!ops->resume)
                return -EOPNOTSUPP;
 
-       return ops->resume(vdpa);
+       ret = ops->resume(vdpa);
+       if (!ret)
+               v->suspended = false;
+
+       return ret;
 }
 
 static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
@@ -690,6 +703,9 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
 
        switch (cmd) {
        case VHOST_SET_VRING_ADDR:
+               if ((ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK) && !v->suspended)
+                       return -EINVAL;
+
                if (ops->set_vq_address(vdpa, idx,
                                        (u64)(uintptr_t)vq->desc,
                                        (u64)(uintptr_t)vq->avail,
@@ -698,6 +714,9 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
                break;
 
        case VHOST_SET_VRING_BASE:
+               if ((ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK) && !v->suspended)
+                       return -EINVAL;
+
                if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
                        vq_state.packed.last_avail_idx = vq->last_avail_idx & 0x7fff;
                        vq_state.packed.last_avail_counter = !!(vq->last_avail_idx & 0x8000);
@@ -968,7 +987,8 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
                        r = ops->set_map(vdpa, asid, iotlb);
        } else {
                r = iommu_map(v->domain, iova, pa, size,
-                             perm_to_iommu_flags(perm), GFP_KERNEL);
+                             perm_to_iommu_flags(perm),
+                             GFP_KERNEL_ACCOUNT);
        }
        if (r) {
                vhost_iotlb_del_range(iotlb, iova, iova + size - 1);
index 94d092091b5e76cdfe2d7696d1edf0a2de2c4b3f..ea2d0d69bd8cc11f6393bea249b9744d57fb86a7 100644 (file)
@@ -395,6 +395,17 @@ config BACKLIGHT_LP8788
        help
          This supports TI LP8788 backlight driver.
 
+config BACKLIGHT_MP3309C
+       tristate "Backlight Driver for MPS MP3309C"
+       depends on I2C && PWM
+       select REGMAP_I2C
+       help
+         This supports MPS MP3309C backlight WLED driver in both PWM and
+         analog/I2C dimming modes.
+
+         To compile this driver as a module, choose M here: the module will
+         be called mp3309c.
+
 config BACKLIGHT_PANDORA
        tristate "Backlight driver for Pandora console"
        depends on TWL4030_CORE
index 67d3ff39be3c1e4d4969c2ea5eff199504c585a0..06966cb204597b74afffe16ba4814357843b2d86 100644 (file)
@@ -43,6 +43,7 @@ obj-$(CONFIG_BACKLIGHT_LP855X)                += lp855x_bl.o
 obj-$(CONFIG_BACKLIGHT_LP8788)         += lp8788_bl.o
 obj-$(CONFIG_BACKLIGHT_LV5207LP)       += lv5207lp.o
 obj-$(CONFIG_BACKLIGHT_MAX8925)                += max8925_bl.o
+obj-$(CONFIG_BACKLIGHT_MP3309C)                += mp3309c.o
 obj-$(CONFIG_BACKLIGHT_MT6370)         += mt6370-backlight.o
 obj-$(CONFIG_BACKLIGHT_OMAP1)          += omap1_bl.o
 obj-$(CONFIG_BACKLIGHT_PANDORA)                += pandora_bl.o
index f76d2469d490103dfd7090310c60a73f7736207b..d7298376cf74ddd89c4804ab297b621538ff15e2 100644 (file)
@@ -6,11 +6,11 @@
  */
 
 #include <linux/delay.h>
+#include <linux/gpio/consumer.h>
 #include <linux/lcd.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
-#include <linux/of_gpio.h>
 #include <linux/spi/spi.h>
 
 #define HX8357_NUM_IM_PINS     3
 #define HX8369_SET_GAMMA_CURVE_RELATED         0xe0
 
 struct hx8357_data {
-       unsigned                im_pins[HX8357_NUM_IM_PINS];
-       unsigned                reset;
+       struct gpio_descs       *im_pins;
+       struct gpio_desc        *reset;
        struct spi_device       *spi;
        int                     state;
-       bool                    use_im_pins;
 };
 
 static u8 hx8357_seq_power[] = {
@@ -321,11 +320,11 @@ static void hx8357_lcd_reset(struct lcd_device *lcdev)
        struct hx8357_data *lcd = lcd_get_data(lcdev);
 
        /* Reset the screen */
-       gpio_set_value(lcd->reset, 1);
+       gpiod_set_value(lcd->reset, 0);
        usleep_range(10000, 12000);
-       gpio_set_value(lcd->reset, 0);
+       gpiod_set_value(lcd->reset, 1);
        usleep_range(10000, 12000);
-       gpio_set_value(lcd->reset, 1);
+       gpiod_set_value(lcd->reset, 0);
 
        /* The controller needs 120ms to recover from reset */
        msleep(120);
@@ -340,10 +339,10 @@ static int hx8357_lcd_init(struct lcd_device *lcdev)
         * Set the interface selection pins to SPI mode, with three
         * wires
         */
-       if (lcd->use_im_pins) {
-               gpio_set_value_cansleep(lcd->im_pins[0], 1);
-               gpio_set_value_cansleep(lcd->im_pins[1], 0);
-               gpio_set_value_cansleep(lcd->im_pins[2], 1);
+       if (lcd->im_pins) {
+               gpiod_set_value_cansleep(lcd->im_pins->desc[0], 1);
+               gpiod_set_value_cansleep(lcd->im_pins->desc[1], 0);
+               gpiod_set_value_cansleep(lcd->im_pins->desc[2], 1);
        }
 
        ret = hx8357_spi_write_array(lcdev, hx8357_seq_power,
@@ -580,6 +579,7 @@ MODULE_DEVICE_TABLE(of, hx8357_dt_ids);
 
 static int hx8357_probe(struct spi_device *spi)
 {
+       struct device *dev = &spi->dev;
        struct lcd_device *lcdev;
        struct hx8357_data *lcd;
        const struct of_device_id *match;
@@ -601,49 +601,19 @@ static int hx8357_probe(struct spi_device *spi)
        if (!match || !match->data)
                return -EINVAL;
 
-       lcd->reset = of_get_named_gpio(spi->dev.of_node, "gpios-reset", 0);
-       if (!gpio_is_valid(lcd->reset)) {
-               dev_err(&spi->dev, "Missing dt property: gpios-reset\n");
-               return -EINVAL;
-       }
+       lcd->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+       if (IS_ERR(lcd->reset))
+               return dev_err_probe(dev, PTR_ERR(lcd->reset), "failed to request reset GPIO\n");
+       gpiod_set_consumer_name(lcd->reset, "hx8357-reset");
 
-       ret = devm_gpio_request_one(&spi->dev, lcd->reset,
-                                   GPIOF_OUT_INIT_HIGH,
-                                   "hx8357-reset");
-       if (ret) {
-               dev_err(&spi->dev,
-                       "failed to request gpio %d: %d\n",
-                       lcd->reset, ret);
-               return -EINVAL;
-       }
+       lcd->im_pins = devm_gpiod_get_array_optional(dev, "im", GPIOD_OUT_LOW);
+       if (IS_ERR(lcd->im_pins))
+               return dev_err_probe(dev, PTR_ERR(lcd->im_pins), "failed to request im GPIOs\n");
+       if (lcd->im_pins->ndescs < HX8357_NUM_IM_PINS)
+               return dev_err_probe(dev, -EINVAL, "not enough im GPIOs\n");
 
-       if (of_property_present(spi->dev.of_node, "im-gpios")) {
-               lcd->use_im_pins = 1;
-
-               for (i = 0; i < HX8357_NUM_IM_PINS; i++) {
-                       lcd->im_pins[i] = of_get_named_gpio(spi->dev.of_node,
-                                                           "im-gpios", i);
-                       if (lcd->im_pins[i] == -EPROBE_DEFER) {
-                               dev_info(&spi->dev, "GPIO requested is not here yet, deferring the probe\n");
-                               return -EPROBE_DEFER;
-                       }
-                       if (!gpio_is_valid(lcd->im_pins[i])) {
-                               dev_err(&spi->dev, "Missing dt property: im-gpios\n");
-                               return -EINVAL;
-                       }
-
-                       ret = devm_gpio_request_one(&spi->dev, lcd->im_pins[i],
-                                                   GPIOF_OUT_INIT_LOW,
-                                                   "im_pins");
-                       if (ret) {
-                               dev_err(&spi->dev, "failed to request gpio %d: %d\n",
-                                       lcd->im_pins[i], ret);
-                               return -EINVAL;
-                       }
-               }
-       } else {
-               lcd->use_im_pins = 0;
-       }
+       for (i = 0; i < HX8357_NUM_IM_PINS; i++)
+               gpiod_set_consumer_name(lcd->im_pins->desc[i], "im_pins");
 
        lcdev = devm_lcd_device_register(&spi->dev, "mxsfb", &spi->dev, lcd,
                                        &hx8357_ops);
index e7b6bd827986fa931de3d3e6c22565e8227afd35..c8e0e655dc867d49ac6729abecea39e9590a8792 100644 (file)
@@ -81,7 +81,7 @@
 #define START_RW_WRITE         0
 #define START_RW_READ          1
 
-/**
+/*
  * START_BYTE(id, rs, rw)
  *
  * Set the start byte according to the required operation.
 #define START_BYTE(id, rs, rw) \
        (0x70 | (((id) & 0x01) << 2) | (((rs) & 0x01) << 1) | ((rw) & 0x01))
 
-/**
+/*
  * CHECK_FREQ_REG(spi_device s, spi_transfer x) - Check the frequency
  *     for the SPI transfer. According to the datasheet, the controller
  *     accept higher frequency for the GRAM transfer, but it requires
@@ -269,6 +269,10 @@ static int ili922x_write(struct spi_device *spi, u8 reg, u16 value)
        spi_message_add_tail(&xfer_regindex, &msg);
 
        ret = spi_sync(spi, &msg);
+       if (ret < 0) {
+               dev_err(&spi->dev, "Error sending SPI message 0x%x", ret);
+               return ret;
+       }
 
        spi_message_init(&msg);
        tbuf[0] = set_tx_byte(START_BYTE(ili922x_id, START_RS_REG,
index 8fcb62be597b849b6133044351092c647152e9a4..a3412c936ca28cfb38544aa4a3c3eb7f1c0155ad 100644 (file)
@@ -180,7 +180,7 @@ static int lm3630a_pwm_ctrl(struct lm3630a_chip *pchip, int br, int br_max)
 
        pchip->pwmd_state.enabled = pchip->pwmd_state.duty_cycle ? true : false;
 
-       return pwm_apply_state(pchip->pwmd, &pchip->pwmd_state);
+       return pwm_apply_might_sleep(pchip->pwmd, &pchip->pwmd_state);
 }
 
 /* update and get brightness */
index da1f124db69c0239b7e293f6a8d9309d46155548..7075bfab59c4dc4f1354c89ef0881397eb60c6fe 100644 (file)
@@ -234,7 +234,7 @@ static int lp855x_pwm_ctrl(struct lp855x *lp, int br, int max_br)
        state.duty_cycle = div_u64(br * state.period, max_br);
        state.enabled = state.duty_cycle;
 
-       return pwm_apply_state(lp->pwm, &state);
+       return pwm_apply_might_sleep(lp->pwm, &state);
 }
 
 static int lp855x_bl_update_status(struct backlight_device *bl)
diff --git a/drivers/video/backlight/mp3309c.c b/drivers/video/backlight/mp3309c.c
new file mode 100644 (file)
index 0000000..34d7125
--- /dev/null
@@ -0,0 +1,444 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for MPS MP3309C White LED driver with I2C interface
+ *
+ * This driver support both analog (by I2C commands) and PWM dimming control
+ * modes.
+ *
+ * Copyright (C) 2023 ASEM Srl
+ * Author: Flavio Suligoi <f.suligoi@asem.it>
+ *
+ * Based on pwm_bl.c
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/pwm.h>
+#include <linux/regmap.h>
+
+#define REG_I2C_0      0x00
+#define REG_I2C_1      0x01
+
+#define REG_I2C_0_EN   0x80
+#define REG_I2C_0_D0   0x40
+#define REG_I2C_0_D1   0x20
+#define REG_I2C_0_D2   0x10
+#define REG_I2C_0_D3   0x08
+#define REG_I2C_0_D4   0x04
+#define REG_I2C_0_RSRV1        0x02
+#define REG_I2C_0_RSRV2        0x01
+
+#define REG_I2C_1_RSRV1        0x80
+#define REG_I2C_1_DIMS 0x40
+#define REG_I2C_1_SYNC 0x20
+#define REG_I2C_1_OVP0 0x10
+#define REG_I2C_1_OVP1 0x08
+#define REG_I2C_1_VOS  0x04
+#define REG_I2C_1_LEDO 0x02
+#define REG_I2C_1_OTP  0x01
+
+#define ANALOG_I2C_NUM_LEVELS  32              /* 0..31 */
+#define ANALOG_I2C_REG_MASK    0x7c
+
+#define MP3309C_PWM_DEFAULT_NUM_LEVELS 256     /* 0..255 */
+
+enum mp3309c_status_value {
+       FIRST_POWER_ON,
+       BACKLIGHT_OFF,
+       BACKLIGHT_ON,
+};
+
+enum mp3309c_dimming_mode_value {
+       DIMMING_PWM,
+       DIMMING_ANALOG_I2C,
+};
+
+struct mp3309c_platform_data {
+       unsigned int max_brightness;
+       unsigned int default_brightness;
+       unsigned int *levels;
+       u8  dimming_mode;
+       u8  over_voltage_protection;
+       bool sync_mode;
+       u8 status;
+};
+
+struct mp3309c_chip {
+       struct device *dev;
+       struct mp3309c_platform_data *pdata;
+       struct backlight_device *bl;
+       struct gpio_desc *enable_gpio;
+       struct regmap *regmap;
+       struct pwm_device *pwmd;
+};
+
+static const struct regmap_config mp3309c_regmap = {
+       .name = "mp3309c_regmap",
+       .reg_bits = 8,
+       .reg_stride = 1,
+       .val_bits = 8,
+       .max_register = REG_I2C_1,
+};
+
+static int mp3309c_enable_device(struct mp3309c_chip *chip)
+{
+       u8 reg_val;
+       int ret;
+
+       /* I2C register #0 - Device enable */
+       ret = regmap_update_bits(chip->regmap, REG_I2C_0, REG_I2C_0_EN,
+                                REG_I2C_0_EN);
+       if (ret)
+               return ret;
+
+       /*
+        * I2C register #1 - Set working mode:
+        *  - set one of the two dimming mode:
+        *    - PWM dimming using an external PWM dimming signal
+        *    - analog dimming using I2C commands
+        *  - enable/disable synchronous mode
+        *  - set overvoltage protection (OVP)
+        */
+       reg_val = 0x00;
+       if (chip->pdata->dimming_mode == DIMMING_PWM)
+               reg_val |= REG_I2C_1_DIMS;
+       if (chip->pdata->sync_mode)
+               reg_val |= REG_I2C_1_SYNC;
+       reg_val |= chip->pdata->over_voltage_protection;
+       ret = regmap_write(chip->regmap, REG_I2C_1, reg_val);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int mp3309c_bl_update_status(struct backlight_device *bl)
+{
+       struct mp3309c_chip *chip = bl_get_data(bl);
+       int brightness = backlight_get_brightness(bl);
+       struct pwm_state pwmstate;
+       unsigned int analog_val, bits_val;
+       int i, ret;
+
+       if (chip->pdata->dimming_mode == DIMMING_PWM) {
+               /*
+                * PWM control mode
+                */
+               pwm_get_state(chip->pwmd, &pwmstate);
+               pwm_set_relative_duty_cycle(&pwmstate,
+                                           chip->pdata->levels[brightness],
+                                           chip->pdata->levels[chip->pdata->max_brightness]);
+               pwmstate.enabled = true;
+               ret = pwm_apply_state(chip->pwmd, &pwmstate);
+               if (ret)
+                       return ret;
+
+               switch (chip->pdata->status) {
+               case FIRST_POWER_ON:
+               case BACKLIGHT_OFF:
+                       /*
+                        * After 20ms of low pwm signal level, the chip turns
+                        * off automatically. In this case, before enabling the
+                        * chip again, we must wait about 10ms for pwm signal to
+                        * stabilize.
+                        */
+                       if (brightness > 0) {
+                               msleep(10);
+                               mp3309c_enable_device(chip);
+                               chip->pdata->status = BACKLIGHT_ON;
+                       } else {
+                               chip->pdata->status = BACKLIGHT_OFF;
+                       }
+                       break;
+               case BACKLIGHT_ON:
+                       if (brightness == 0)
+                               chip->pdata->status = BACKLIGHT_OFF;
+                       break;
+               }
+       } else {
+               /*
+                * Analog (by I2C command) control mode
+                *
+                * The first time, before setting brightness, we must enable the
+                * device
+                */
+               if (chip->pdata->status == FIRST_POWER_ON)
+                       mp3309c_enable_device(chip);
+
+               /*
+                * Dimming mode I2C command (fixed dimming range 0..31)
+                *
+                * The 5 bits of the dimming analog value D4..D0 is allocated
+                * in the I2C register #0, in the following way:
+                *
+                *     +--+--+--+--+--+--+--+--+
+                *     |EN|D0|D1|D2|D3|D4|XX|XX|
+                *     +--+--+--+--+--+--+--+--+
+                */
+               analog_val = brightness;
+               bits_val = 0;
+               for (i = 0; i <= 5; i++)
+                       bits_val += ((analog_val >> i) & 0x01) << (6 - i);
+               ret = regmap_update_bits(chip->regmap, REG_I2C_0,
+                                        ANALOG_I2C_REG_MASK, bits_val);
+               if (ret)
+                       return ret;
+
+               if (brightness > 0)
+                       chip->pdata->status = BACKLIGHT_ON;
+               else
+                       chip->pdata->status = BACKLIGHT_OFF;
+       }
+
+       return 0;
+}
+
+static const struct backlight_ops mp3309c_bl_ops = {
+       .update_status = mp3309c_bl_update_status,
+};
+
+static int pm3309c_parse_dt_node(struct mp3309c_chip *chip,
+                                struct mp3309c_platform_data *pdata)
+{
+       struct device_node *node = chip->dev->of_node;
+       struct property *prop_pwms;
+       struct property *prop_levels = NULL;
+       int length = 0;
+       int ret, i;
+       unsigned int num_levels, tmp_value;
+
+       if (!node) {
+               dev_err(chip->dev, "failed to get DT node\n");
+               return -ENODEV;
+       }
+
+       /*
+        * Dimming mode: the MP3309C provides two dimming control mode:
+        *
+        * - PWM mode
+        * - Analog by I2C control mode (default)
+        *
+        * I2C control mode is assumed as default but, if the pwms property is
+        * found in the backlight node, the mode switches to PWM mode.
+        */
+       pdata->dimming_mode = DIMMING_ANALOG_I2C;
+       prop_pwms = of_find_property(node, "pwms", &length);
+       if (prop_pwms) {
+               chip->pwmd = devm_pwm_get(chip->dev, NULL);
+               if (IS_ERR(chip->pwmd))
+                       return dev_err_probe(chip->dev, PTR_ERR(chip->pwmd),
+                                            "error getting pwm data\n");
+               pdata->dimming_mode = DIMMING_PWM;
+               pwm_apply_args(chip->pwmd);
+       }
+
+       /*
+        * In I2C control mode the dimming levels (0..31) are fixed by the
+        * hardware, while in PWM control mode they can be chosen by the user,
+        * to allow nonlinear mappings.
+        */
+       if  (pdata->dimming_mode == DIMMING_ANALOG_I2C) {
+               /*
+                * Analog (by I2C commands) control mode: fixed 0..31 brightness
+                * levels
+                */
+               num_levels = ANALOG_I2C_NUM_LEVELS;
+
+               /* Enable GPIO used in I2C dimming mode only */
+               chip->enable_gpio = devm_gpiod_get(chip->dev, "enable",
+                                                  GPIOD_OUT_HIGH);
+               if (IS_ERR(chip->enable_gpio))
+                       return dev_err_probe(chip->dev,
+                                            PTR_ERR(chip->enable_gpio),
+                                            "error getting enable gpio\n");
+       } else {
+               /*
+                * PWM control mode: check for brightness level in DT
+                */
+               prop_levels = of_find_property(node, "brightness-levels",
+                                              &length);
+               if (prop_levels) {
+                       /* Read brightness levels from DT */
+                       num_levels = length / sizeof(u32);
+                       if (num_levels < 2)
+                               return -EINVAL;
+               } else {
+                       /* Use default brightness levels */
+                       num_levels = MP3309C_PWM_DEFAULT_NUM_LEVELS;
+               }
+       }
+
+       /* Fill brightness levels array */
+       pdata->levels = devm_kcalloc(chip->dev, num_levels,
+                                    sizeof(*pdata->levels), GFP_KERNEL);
+       if (!pdata->levels)
+               return -ENOMEM;
+       if (prop_levels) {
+               ret = of_property_read_u32_array(node, "brightness-levels",
+                                                pdata->levels,
+                                                num_levels);
+               if (ret < 0)
+                       return ret;
+       } else {
+               for (i = 0; i < num_levels; i++)
+                       pdata->levels[i] = i;
+       }
+
+       pdata->max_brightness = num_levels - 1;
+
+       ret = of_property_read_u32(node, "default-brightness",
+                                  &pdata->default_brightness);
+       if (ret)
+               pdata->default_brightness = pdata->max_brightness;
+       if (pdata->default_brightness > pdata->max_brightness) {
+               dev_err(chip->dev,
+                       "default brightness exceeds max brightness\n");
+               pdata->default_brightness = pdata->max_brightness;
+       }
+
+       /*
+        * Over-voltage protection (OVP)
+        *
+        * This (optional) property values are:
+        *
+        *  - 13.5V
+        *  - 24V
+        *  - 35.5V (hardware default setting)
+        *
+        * If missing, the default value for OVP is 35.5V
+        */
+       pdata->over_voltage_protection = REG_I2C_1_OVP1;
+       if (!of_property_read_u32(node, "mps,overvoltage-protection-microvolt",
+                                 &tmp_value)) {
+               switch (tmp_value) {
+               case 13500000:
+                       pdata->over_voltage_protection = 0x00;
+                       break;
+               case 24000000:
+                       pdata->over_voltage_protection = REG_I2C_1_OVP0;
+                       break;
+               case 35500000:
+                       pdata->over_voltage_protection = REG_I2C_1_OVP1;
+                       break;
+               default:
+                       return -EINVAL;
+               }
+       }
+
+       /* Synchronous (default) and non-synchronous mode */
+       pdata->sync_mode = true;
+       if (of_property_read_bool(node, "mps,no-sync-mode"))
+               pdata->sync_mode = false;
+
+       return 0;
+}
+
+static int mp3309c_probe(struct i2c_client *client)
+{
+       struct mp3309c_platform_data *pdata = dev_get_platdata(&client->dev);
+       struct mp3309c_chip *chip;
+       struct backlight_properties props;
+       struct pwm_state pwmstate;
+       int ret;
+
+       if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+               dev_err(&client->dev, "failed to check i2c functionality\n");
+               return -EOPNOTSUPP;
+       }
+
+       chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+       if (!chip)
+               return -ENOMEM;
+
+       chip->dev = &client->dev;
+
+       chip->regmap = devm_regmap_init_i2c(client, &mp3309c_regmap);
+       if (IS_ERR(chip->regmap))
+               return dev_err_probe(&client->dev, PTR_ERR(chip->regmap),
+                                    "failed to allocate register map\n");
+
+       i2c_set_clientdata(client, chip);
+
+       if (!pdata) {
+               pdata = devm_kzalloc(chip->dev, sizeof(*pdata), GFP_KERNEL);
+               if (!pdata)
+                       return -ENOMEM;
+
+               ret = pm3309c_parse_dt_node(chip, pdata);
+               if (ret)
+                       return ret;
+       }
+       chip->pdata = pdata;
+
+       /* Backlight properties */
+       props.brightness = pdata->default_brightness;
+       props.max_brightness = pdata->max_brightness;
+       props.scale = BACKLIGHT_SCALE_LINEAR;
+       props.type = BACKLIGHT_RAW;
+       props.power = FB_BLANK_UNBLANK;
+       props.fb_blank = FB_BLANK_UNBLANK;
+       chip->bl = devm_backlight_device_register(chip->dev, "mp3309c",
+                                                 chip->dev, chip,
+                                                 &mp3309c_bl_ops, &props);
+       if (IS_ERR(chip->bl))
+               return dev_err_probe(chip->dev, PTR_ERR(chip->bl),
+                                    "error registering backlight device\n");
+
+       /* In PWM dimming mode, enable pwm device */
+       if (chip->pdata->dimming_mode == DIMMING_PWM) {
+               pwm_init_state(chip->pwmd, &pwmstate);
+               pwm_set_relative_duty_cycle(&pwmstate,
+                                           chip->pdata->default_brightness,
+                                           chip->pdata->max_brightness);
+               pwmstate.enabled = true;
+               ret = pwm_apply_state(chip->pwmd, &pwmstate);
+               if (ret)
+                       return dev_err_probe(chip->dev, ret,
+                                            "error setting pwm device\n");
+       }
+
+       chip->pdata->status = FIRST_POWER_ON;
+       backlight_update_status(chip->bl);
+
+       return 0;
+}
+
+static void mp3309c_remove(struct i2c_client *client)
+{
+       struct mp3309c_chip *chip = i2c_get_clientdata(client);
+       struct backlight_device *bl = chip->bl;
+
+       bl->props.power = FB_BLANK_POWERDOWN;
+       bl->props.brightness = 0;
+       backlight_update_status(chip->bl);
+}
+
+static const struct of_device_id mp3309c_match_table[] = {
+       { .compatible = "mps,mp3309c", },
+       { },
+};
+MODULE_DEVICE_TABLE(of, mp3309c_match_table);
+
+static const struct i2c_device_id mp3309c_id[] = {
+       { "mp3309c", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, mp3309c_id);
+
+static struct i2c_driver mp3309c_i2c_driver = {
+       .driver = {
+                       .name           = KBUILD_MODNAME,
+                       .of_match_table = mp3309c_match_table,
+       },
+       .probe          = mp3309c_probe,
+       .remove         = mp3309c_remove,
+       .id_table       = mp3309c_id,
+};
+
+module_i2c_driver(mp3309c_i2c_driver);
+
+MODULE_DESCRIPTION("Backlight Driver for MPS MP3309C");
+MODULE_AUTHOR("Flavio Suligoi <f.suligoi@asem.it>");
+MODULE_LICENSE("GPL");
index 289bd9ce4d36d4ad5b751f25b122ee622bb31f23..ffcebf6aa76a967a05716a1db8f316295a0c2f66 100644 (file)
@@ -103,7 +103,7 @@ static int pwm_backlight_update_status(struct backlight_device *bl)
                pwm_get_state(pb->pwm, &state);
                state.duty_cycle = compute_duty_cycle(pb, brightness, &state);
                state.enabled = true;
-               pwm_apply_state(pb->pwm, &state);
+               pwm_apply_might_sleep(pb->pwm, &state);
 
                pwm_backlight_power_on(pb);
        } else {
@@ -120,7 +120,7 @@ static int pwm_backlight_update_status(struct backlight_device *bl)
                 * inactive output.
                 */
                state.enabled = !pb->power_supply && !pb->enable_gpio;
-               pwm_apply_state(pb->pwm, &state);
+               pwm_apply_might_sleep(pb->pwm, &state);
        }
 
        if (pb->notify_after)
@@ -461,10 +461,9 @@ static int pwm_backlight_probe(struct platform_device *pdev)
 
        if (!data) {
                ret = pwm_backlight_parse_dt(&pdev->dev, &defdata);
-               if (ret < 0) {
-                       dev_err(&pdev->dev, "failed to find platform data\n");
-                       return ret;
-               }
+               if (ret < 0)
+                       return dev_err_probe(&pdev->dev, ret,
+                                            "failed to find platform data\n");
 
                data = &defdata;
        }
@@ -493,24 +492,27 @@ static int pwm_backlight_probe(struct platform_device *pdev)
        pb->enable_gpio = devm_gpiod_get_optional(&pdev->dev, "enable",
                                                  GPIOD_ASIS);
        if (IS_ERR(pb->enable_gpio)) {
-               ret = PTR_ERR(pb->enable_gpio);
+               ret = dev_err_probe(&pdev->dev, PTR_ERR(pb->enable_gpio),
+                                   "failed to acquire enable GPIO\n");
                goto err_alloc;
        }
 
        pb->power_supply = devm_regulator_get_optional(&pdev->dev, "power");
        if (IS_ERR(pb->power_supply)) {
                ret = PTR_ERR(pb->power_supply);
-               if (ret == -ENODEV)
+               if (ret == -ENODEV) {
                        pb->power_supply = NULL;
-               else
+               } else {
+                       dev_err_probe(&pdev->dev, ret,
+                                     "failed to acquire power regulator\n");
                        goto err_alloc;
+               }
        }
 
        pb->pwm = devm_pwm_get(&pdev->dev, NULL);
        if (IS_ERR(pb->pwm)) {
-               ret = PTR_ERR(pb->pwm);
-               if (ret != -EPROBE_DEFER)
-                       dev_err(&pdev->dev, "unable to request PWM\n");
+               ret = dev_err_probe(&pdev->dev, PTR_ERR(pb->pwm),
+                                   "unable to request PWM\n");
                goto err_alloc;
        }
 
@@ -528,10 +530,10 @@ static int pwm_backlight_probe(struct platform_device *pdev)
        if (!state.period && (data->pwm_period_ns > 0))
                state.period = data->pwm_period_ns;
 
-       ret = pwm_apply_state(pb->pwm, &state);
+       ret = pwm_apply_might_sleep(pb->pwm, &state);
        if (ret) {
-               dev_err(&pdev->dev, "failed to apply initial PWM state: %d\n",
-                       ret);
+               dev_err_probe(&pdev->dev, ret,
+                             "failed to apply initial PWM state");
                goto err_alloc;
        }
 
@@ -568,8 +570,8 @@ static int pwm_backlight_probe(struct platform_device *pdev)
                ret = pwm_backlight_brightness_default(&pdev->dev, data,
                                                       state.period);
                if (ret < 0) {
-                       dev_err(&pdev->dev,
-                               "failed to setup default brightness table\n");
+                       dev_err_probe(&pdev->dev, ret,
+                                     "failed to setup default brightness table\n");
                        goto err_alloc;
                }
 
@@ -597,8 +599,8 @@ static int pwm_backlight_probe(struct platform_device *pdev)
        bl = backlight_device_register(dev_name(&pdev->dev), &pdev->dev, pb,
                                       &pwm_backlight_ops, &props);
        if (IS_ERR(bl)) {
-               dev_err(&pdev->dev, "failed to register backlight\n");
-               ret = PTR_ERR(bl);
+               ret = dev_err_probe(&pdev->dev, PTR_ERR(bl),
+                                   "failed to register backlight\n");
                goto err_alloc;
        }
 
@@ -633,7 +635,7 @@ static void pwm_backlight_remove(struct platform_device *pdev)
        pwm_get_state(pb->pwm, &state);
        state.duty_cycle = 0;
        state.enabled = false;
-       pwm_apply_state(pb->pwm, &state);
+       pwm_apply_might_sleep(pb->pwm, &state);
 
        if (pb->exit)
                pb->exit(&pdev->dev);
@@ -649,7 +651,7 @@ static void pwm_backlight_shutdown(struct platform_device *pdev)
        pwm_get_state(pb->pwm, &state);
        state.duty_cycle = 0;
        state.enabled = false;
-       pwm_apply_state(pb->pwm, &state);
+       pwm_apply_might_sleep(pb->pwm, &state);
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -673,7 +675,7 @@ static int pwm_backlight_suspend(struct device *dev)
        pwm_get_state(pb->pwm, &state);
        state.duty_cycle = 0;
        state.enabled = false;
-       pwm_apply_state(pb->pwm, &state);
+       pwm_apply_might_sleep(pb->pwm, &state);
 
        if (pb->notify_after)
                pb->notify_after(pb->dev, 0);
index 83c2d7329ca58831f0153417a332383b0c6cf081..bc31db6ef7d262a7bdf1bd9c72934b7c5fcb29bf 100644 (file)
@@ -7,7 +7,7 @@ menu "Console display driver support"
 
 config VGA_CONSOLE
        bool "VGA text console" if EXPERT || !X86
-       depends on ALPHA || IA64 || X86 || \
+       depends on ALPHA || X86 || \
                (ARM && ARCH_FOOTBRIDGE) || \
                (MIPS && (MIPS_MALTA || SIBYTE_BCM112X || SIBYTE_SB1250 || SIBYTE_BCM1x80 || SNI_RM))
        select APERTURE_HELPERS if (DRM || FB || VFIO_PCI_CORE)
index 8bf5f2f54be7b8760fd89b027fd3d686c00ada14..e2514321986297bda06f3c29ce7bfae3cc6fe2ef 100644 (file)
@@ -116,7 +116,6 @@ unsigned char *fb_ddc_read(struct i2c_adapter *adapter)
        algo_data->setsda(algo_data->data, 1);
        algo_data->setscl(algo_data->data, 1);
 
-       adapter->class |= I2C_CLASS_DDC;
        return edid;
 }
 
index 63af6ab034b5f1bb45992a4074f8862d528b38d3..1183e7a871f8b270a9ff2106cef15e44720184a4 100644 (file)
@@ -631,8 +631,7 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info,
 
        if (logo_lines > vc->vc_bottom) {
                logo_shown = FBCON_LOGO_CANSHOW;
-               printk(KERN_INFO
-                      "fbcon_init: disable boot-logo (boot-logo bigger than screen).\n");
+               pr_info("fbcon: disable boot-logo (boot-logo bigger than screen).\n");
        } else {
                logo_shown = FBCON_LOGO_DRAW;
                vc->vc_top = logo_lines;
index abb87d3576db0f178c86d480dc6973f86ca8e453..986760b90465fb117f502e702b3ca31c90efe13f 100644 (file)
@@ -1227,7 +1227,6 @@ static int cyber2000fb_setup_ddc_bus(struct cfb_info *cfb)
        strscpy(cfb->ddc_adapter.name, cfb->fb.fix.id,
                sizeof(cfb->ddc_adapter.name));
        cfb->ddc_adapter.owner          = THIS_MODULE;
-       cfb->ddc_adapter.class          = I2C_CLASS_DDC;
        cfb->ddc_adapter.algo_data      = &cfb->ddc_algo;
        cfb->ddc_adapter.dev.parent     = cfb->fb.device;
        cfb->ddc_algo.setsda            = cyber2000fb_ddc_setsda;
index 1897e65ab7031dfbf9f3e907ac245661bc88ba18..9b74dae71472c459e1d46eb0e0890ccb5debc9e0 100644 (file)
@@ -163,7 +163,6 @@ static int i740fb_setup_ddc_bus(struct fb_info *info)
        strscpy(par->ddc_adapter.name, info->fix.id,
                sizeof(par->ddc_adapter.name));
        par->ddc_adapter.owner          = THIS_MODULE;
-       par->ddc_adapter.class          = I2C_CLASS_DDC;
        par->ddc_adapter.algo_data      = &par->ddc_algo;
        par->ddc_adapter.dev.parent     = info->device;
        par->ddc_algo.setsda            = i740fb_ddc_setsda;
index e2e4705e3fe0e21b9e178e1b8ae50277480c6244..bb048e14b2cf1cb763d7547482e651769febf923 100644 (file)
@@ -100,8 +100,7 @@ static const struct i2c_algo_bit_data matrox_i2c_algo_template =
 };
 
 static int i2c_bus_reg(struct i2c_bit_adapter* b, struct matrox_fb_info* minfo, 
-               unsigned int data, unsigned int clock, const char *name,
-               int class)
+               unsigned int data, unsigned int clock, const char *name)
 {
        int err;
 
@@ -112,7 +111,6 @@ static int i2c_bus_reg(struct i2c_bit_adapter* b, struct matrox_fb_info* minfo,
        snprintf(b->adapter.name, sizeof(b->adapter.name), name,
                minfo->fbcon.node);
        i2c_set_adapdata(&b->adapter, b);
-       b->adapter.class = class;
        b->adapter.algo_data = &b->bac;
        b->adapter.dev.parent = &minfo->pcidev->dev;
        b->bac = matrox_i2c_algo_template;
@@ -160,27 +158,24 @@ static void* i2c_matroxfb_probe(struct matrox_fb_info* minfo) {
                case MGA_2164:
                        err = i2c_bus_reg(&m2info->ddc1, minfo,
                                          DDC1B_DATA, DDC1B_CLK,
-                                         "DDC:fb%u #0", I2C_CLASS_DDC);
+                                         "DDC:fb%u #0");
                        break;
                default:
                        err = i2c_bus_reg(&m2info->ddc1, minfo,
                                          DDC1_DATA, DDC1_CLK,
-                                         "DDC:fb%u #0", I2C_CLASS_DDC);
+                                         "DDC:fb%u #0");
                        break;
        }
        if (err)
                goto fail_ddc1;
        if (minfo->devflags.dualhead) {
-               err = i2c_bus_reg(&m2info->ddc2, minfo,
-                                 DDC2_DATA, DDC2_CLK,
-                                 "DDC:fb%u #1", I2C_CLASS_DDC);
+               err = i2c_bus_reg(&m2info->ddc2, minfo, DDC2_DATA, DDC2_CLK, "DDC:fb%u #1");
                if (err == -ENODEV) {
                        printk(KERN_INFO "i2c-matroxfb: VGA->TV plug detected, DDC unavailable.\n");
                } else if (err)
                        printk(KERN_INFO "i2c-matroxfb: Could not register secondary output i2c bus. Continuing anyway.\n");
                /* Register maven bus even on G450/G550 */
-               err = i2c_bus_reg(&m2info->maven, minfo,
-                                 MAT_DATA, MAT_CLK, "MAVEN:fb%u", 0);
+               err = i2c_bus_reg(&m2info->maven, minfo, MAT_DATA, MAT_CLK, "MAVEN:fb%u");
                if (err)
                        printk(KERN_INFO "i2c-matroxfb: Could not register Maven i2c bus. Continuing anyway.\n");
                else {
index 589b349cb63e08983e03624c9ef7fea34da11623..07722a5ea8eff8a63e5827e4a79349f00689ee46 100644 (file)
@@ -252,7 +252,6 @@ static int s3fb_setup_ddc_bus(struct fb_info *info)
        strscpy(par->ddc_adapter.name, info->fix.id,
                sizeof(par->ddc_adapter.name));
        par->ddc_adapter.owner          = THIS_MODULE;
-       par->ddc_adapter.class          = I2C_CLASS_DDC;
        par->ddc_adapter.algo_data      = &par->ddc_algo;
        par->ddc_adapter.dev.parent     = info->device;
        par->ddc_algo.setsda            = s3fb_ddc_setsda;
index dddd6afcb972a5c23a5969c2ced0638ccf0b5b34..ebc9aeffdde7c54321b19499715e128d594c0e61 100644 (file)
@@ -869,6 +869,9 @@ static int savagefb_check_var(struct fb_var_screeninfo   *var,
 
        DBG("savagefb_check_var");
 
+       if (!var->pixclock)
+               return -EINVAL;
+
        var->transp.offset = 0;
        var->transp.length = 0;
        switch (var->bits_per_pixel) {
index 803ccb6aa479703bc1cb88237b4c3adc594a75a2..009bf1d926448011292c182e7eee29c25930ed6d 100644 (file)
@@ -1444,6 +1444,8 @@ sisfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
 
        vtotal = var->upper_margin + var->lower_margin + var->vsync_len;
 
+       if (!var->pixclock)
+               return -EINVAL;
        pixclock = var->pixclock;
 
        if((var->vmode & FB_VMODE_MASK) == FB_VMODE_NONINTERLACED) {
index 5ae48e36fccb4e8da75f72c64512b4f3ff697b99..1a4f90ea7d5a8c5006164be6e2895068bb29f7d5 100644 (file)
@@ -347,7 +347,7 @@ static int ssd1307fb_init(struct ssd1307fb_par *par)
 
                pwm_init_state(par->pwm, &pwmstate);
                pwm_set_relative_duty_cycle(&pwmstate, 50, 100);
-               pwm_apply_state(par->pwm, &pwmstate);
+               pwm_apply_might_sleep(par->pwm, &pwmstate);
 
                /* Enable the PWM */
                pwm_enable(par->pwm);
index 2de0e675fd1504da67b7110ee81152934ad2cbad..8e5bac27542d915534c3071ec5f64e89727c2c11 100644 (file)
@@ -1158,7 +1158,7 @@ stifb_init_display(struct stifb_info *fb)
            }
            break;
        }
-       stifb_blank(0, (struct fb_info *)fb);   /* 0=enable screen */
+       stifb_blank(0, fb->info);       /* 0=enable screen */
 
        SETUP_FB(fb);
 }
index 22aa953138b0f339f0c4f077c02276d0f86dea29..51ebe78359ec34e4aff34a29d60275131966adb3 100644 (file)
@@ -1267,7 +1267,6 @@ static int tdfxfb_setup_ddc_bus(struct tdfxfb_i2c_chan *chan, const char *name,
 
        strscpy(chan->adapter.name, name, sizeof(chan->adapter.name));
        chan->adapter.owner             = THIS_MODULE;
-       chan->adapter.class             = I2C_CLASS_DDC;
        chan->adapter.algo_data         = &chan->algo;
        chan->adapter.dev.parent        = dev;
        chan->algo.setsda               = tdfxfb_ddc_setsda;
index 816d40b6f689cb54051324803a469093b129e9bd..516cf2a187575da096f233591fb35bb6f7c23d7b 100644 (file)
@@ -274,7 +274,6 @@ static int tridentfb_setup_ddc_bus(struct fb_info *info)
        strscpy(par->ddc_adapter.name, info->fix.id,
                sizeof(par->ddc_adapter.name));
        par->ddc_adapter.owner          = THIS_MODULE;
-       par->ddc_adapter.class          = I2C_CLASS_DDC;
        par->ddc_adapter.algo_data      = &par->ddc_algo;
        par->ddc_adapter.dev.parent     = info->device;
        if (is_oldclock(par->chip_id)) { /* not sure if this check is OK */
index c35e530e0ec9d775668ad4924c1e21a8c8cc3c9b..582502810575970f11fd646f491f4e806f44225d 100644 (file)
@@ -201,7 +201,6 @@ static int create_i2c_bus(struct i2c_adapter *adapter,
        sprintf(adapter->name, "viafb i2c io_port idx 0x%02x",
                adap_cfg->ioport_index);
        adapter->owner = THIS_MODULE;
-       adapter->class = I2C_CLASS_DDC;
        adapter->algo_data = algo;
        if (pdev)
                adapter->dev.parent = &pdev->dev;
index 42c25dc851976c5fa823b89fc4f72e5826d17459..ac73937073a76f7d22df39a503ac59bda2d4a7da 100644 (file)
@@ -374,7 +374,6 @@ static int vt8500lcd_probe(struct platform_device *pdev)
 
        irq = platform_get_irq(pdev, 0);
        if (irq < 0) {
-               dev_err(&pdev->dev, "no IRQ defined\n");
                ret = -ENODEV;
                goto failed_free_palette;
        }
index dfd69bd77f531bb624b60378e47091e76ac917ee..c6e9855998abff7889517e7460d849d582a983ec 100644 (file)
         VMMDEV_REQUESTOR_CON_DONT_KNOW | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN)
 
 /**
- * Reserves memory in which the VMM can relocate any guest mappings
- * that are floating around.
+ * vbg_guest_mappings_init - Reserves memory in which the VMM can
+ *     relocate any guest mappings that are floating around.
+ * @gdev:              The Guest extension device.
  *
  * This operation is a little bit tricky since the VMM might not accept
  * just any address because of address clashes between the three contexts
  * it operates in, so we try several times.
  *
  * Failure to reserve the guest mappings is ignored.
- *
- * @gdev:              The Guest extension device.
  */
 static void vbg_guest_mappings_init(struct vbg_dev *gdev)
 {
@@ -125,7 +124,7 @@ out:
 }
 
 /**
- * Undo what vbg_guest_mappings_init did.
+ * vbg_guest_mappings_exit - Undo what vbg_guest_mappings_init did.
  *
  * @gdev:              The Guest extension device.
  */
@@ -166,9 +165,10 @@ static void vbg_guest_mappings_exit(struct vbg_dev *gdev)
 }
 
 /**
- * Report the guest information to the host.
- * Return: 0 or negative errno value.
+ * vbg_report_guest_info - Report the guest information to the host.
  * @gdev:              The Guest extension device.
+ *
+ * Return: %0 or negative errno value.
  */
 static int vbg_report_guest_info(struct vbg_dev *gdev)
 {
@@ -229,10 +229,11 @@ out_free:
 }
 
 /**
- * Report the guest driver status to the host.
- * Return: 0 or negative errno value.
+ * vbg_report_driver_status - Report the guest driver status to the host.
  * @gdev:              The Guest extension device.
  * @active:            Flag whether the driver is now active or not.
+ *
+ * Return: 0 or negative errno value.
  */
 static int vbg_report_driver_status(struct vbg_dev *gdev, bool active)
 {
@@ -261,10 +262,12 @@ static int vbg_report_driver_status(struct vbg_dev *gdev, bool active)
 }
 
 /**
- * Inflate the balloon by one chunk. The caller owns the balloon mutex.
- * Return: 0 or negative errno value.
+ * vbg_balloon_inflate - Inflate the balloon by one chunk. The caller
+ * owns the balloon mutex.
  * @gdev:              The Guest extension device.
  * @chunk_idx:         Index of the chunk.
+ *
+ * Return: %0 or negative errno value.
  */
 static int vbg_balloon_inflate(struct vbg_dev *gdev, u32 chunk_idx)
 {
@@ -312,10 +315,12 @@ out_error:
 }
 
 /**
- * Deflate the balloon by one chunk. The caller owns the balloon mutex.
- * Return: 0 or negative errno value.
+ * vbg_balloon_deflate - Deflate the balloon by one chunk. The caller
+ * owns the balloon mutex.
  * @gdev:              The Guest extension device.
  * @chunk_idx:         Index of the chunk.
+ *
+ * Return: %0 or negative errno value.
  */
 static int vbg_balloon_deflate(struct vbg_dev *gdev, u32 chunk_idx)
 {
@@ -344,7 +349,7 @@ static int vbg_balloon_deflate(struct vbg_dev *gdev, u32 chunk_idx)
        return 0;
 }
 
-/**
+/*
  * Respond to VMMDEV_EVENT_BALLOON_CHANGE_REQUEST events, query the size
  * the host wants the balloon to be and adjust accordingly.
  */
@@ -409,7 +414,7 @@ static void vbg_balloon_work(struct work_struct *work)
        }
 }
 
-/**
+/*
  * Callback for heartbeat timer.
  */
 static void vbg_heartbeat_timer(struct timer_list *t)
@@ -422,11 +427,12 @@ static void vbg_heartbeat_timer(struct timer_list *t)
 }
 
 /**
- * Configure the host to check guest's heartbeat
- * and get heartbeat interval from the host.
- * Return: 0 or negative errno value.
+ * vbg_heartbeat_host_config - Configure the host to check guest's heartbeat
+ *     and get heartbeat interval from the host.
  * @gdev:              The Guest extension device.
  * @enabled:           Set true to enable guest heartbeat checks on host.
+ *
+ * Return: %0 or negative errno value.
  */
 static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled)
 {
@@ -449,9 +455,11 @@ static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled)
 }
 
 /**
- * Initializes the heartbeat timer. This feature may be disabled by the host.
- * Return: 0 or negative errno value.
+ * vbg_heartbeat_init - Initializes the heartbeat timer. This feature
+ * may be disabled by the host.
  * @gdev:              The Guest extension device.
+ *
+ * Return: %0 or negative errno value.
  */
 static int vbg_heartbeat_init(struct vbg_dev *gdev)
 {
@@ -481,7 +489,8 @@ static int vbg_heartbeat_init(struct vbg_dev *gdev)
 }
 
 /**
- * Cleanup hearbeat code, stop HB timer and disable host heartbeat checking.
+ * vbg_heartbeat_exit - Cleanup heartbeat code, stop HB timer and disable
+ *     host heartbeat checking.
  * @gdev:              The Guest extension device.
  */
 static void vbg_heartbeat_exit(struct vbg_dev *gdev)
@@ -493,11 +502,12 @@ static void vbg_heartbeat_exit(struct vbg_dev *gdev)
 }
 
 /**
- * Applies a change to the bit usage tracker.
- * Return: true if the mask changed, false if not.
+ * vbg_track_bit_usage - Applies a change to the bit usage tracker.
  * @tracker:           The bit usage tracker.
  * @changed:           The bits to change.
  * @previous:          The previous value of the bits.
+ *
+ * Return: %true if the mask changed, %false if not.
  */
 static bool vbg_track_bit_usage(struct vbg_bit_usage_tracker *tracker,
                                u32 changed, u32 previous)
@@ -529,10 +539,12 @@ static bool vbg_track_bit_usage(struct vbg_bit_usage_tracker *tracker,
 }
 
 /**
- * Init and termination worker for resetting the (host) event filter on the host
- * Return: 0 or negative errno value.
+ * vbg_reset_host_event_filter - Init and termination worker for
+ *     resetting the (host) event filter on the host
  * @gdev:                 The Guest extension device.
  * @fixed_events:         Fixed events (init time).
+ *
+ * Return: %0 or negative errno value.
  */
 static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
                                       u32 fixed_events)
@@ -556,12 +568,8 @@ static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
 }
 
 /**
- * Changes the event filter mask for the given session.
- *
- * This is called in response to VBG_IOCTL_CHANGE_FILTER_MASK as well as to
- * do session cleanup. Takes the session mutex.
- *
- * Return: 0 or negative errno value.
+ * vbg_set_session_event_filter - Changes the event filter mask for the
+ *     given session.
  * @gdev:                      The Guest extension device.
  * @session:                   The session.
  * @or_mask:                   The events to add.
@@ -570,6 +578,11 @@ static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
  *                             This tweaks the error handling so we perform
  *                             proper session cleanup even if the host
  *                             misbehaves.
+ *
+ * This is called in response to VBG_IOCTL_CHANGE_FILTER_MASK as well as to
+ * do session cleanup. Takes the session mutex.
+ *
+ * Return: 0 or negative errno value.
  */
 static int vbg_set_session_event_filter(struct vbg_dev *gdev,
                                        struct vbg_session *session,
@@ -637,9 +650,11 @@ out:
 }
 
 /**
- * Init and termination worker for set guest capabilities to zero on the host.
- * Return: 0 or negative errno value.
+ * vbg_reset_host_capabilities - Init and termination worker for set
+ *     guest capabilities to zero on the host.
  * @gdev:              The Guest extension device.
+ *
+ * Return: %0 or negative errno value.
  */
 static int vbg_reset_host_capabilities(struct vbg_dev *gdev)
 {
@@ -662,12 +677,14 @@ static int vbg_reset_host_capabilities(struct vbg_dev *gdev)
 }
 
 /**
- * Set guest capabilities on the host.
- * Must be called with gdev->session_mutex hold.
- * Return: 0 or negative errno value.
+ * vbg_set_host_capabilities - Set guest capabilities on the host.
  * @gdev:                      The Guest extension device.
  * @session:                   The session.
  * @session_termination:       Set if we're called by the session cleanup code.
+ *
+ * Must be called with gdev->session_mutex hold.
+ *
+ * Return: %0 or negative errno value.
  */
 static int vbg_set_host_capabilities(struct vbg_dev *gdev,
                                     struct vbg_session *session,
@@ -704,9 +721,8 @@ static int vbg_set_host_capabilities(struct vbg_dev *gdev,
 }
 
 /**
- * Acquire (get exclusive access) guest capabilities for a session.
- * Takes the session mutex.
- * Return: 0 or negative errno value.
+ * vbg_acquire_session_capabilities - Acquire (get exclusive access)
+ *     guest capabilities for a session.
  * @gdev:                      The Guest extension device.
  * @session:                   The session.
  * @flags:                     Flags (VBGL_IOC_AGC_FLAGS_XXX).
@@ -716,6 +732,10 @@ static int vbg_set_host_capabilities(struct vbg_dev *gdev,
  *                             This tweaks the error handling so we perform
  *                             proper session cleanup even if the host
  *                             misbehaves.
+ *
+ * Takes the session mutex.
+ *
+ * Return: %0 or negative errno value.
  */
 static int vbg_acquire_session_capabilities(struct vbg_dev *gdev,
                                            struct vbg_session *session,
@@ -811,8 +831,8 @@ out:
 }
 
 /**
- * Sets the guest capabilities for a session. Takes the session mutex.
- * Return: 0 or negative errno value.
+ * vbg_set_session_capabilities - Sets the guest capabilities for a
+ *     session. Takes the session mutex.
  * @gdev:                      The Guest extension device.
  * @session:                   The session.
  * @or_mask:                   The capabilities to add.
@@ -821,6 +841,8 @@ out:
  *                             This tweaks the error handling so we perform
  *                             proper session cleanup even if the host
  *                             misbehaves.
+ *
+ * Return: %0 or negative errno value.
  */
 static int vbg_set_session_capabilities(struct vbg_dev *gdev,
                                        struct vbg_session *session,
@@ -866,9 +888,10 @@ out:
 }
 
 /**
- * vbg_query_host_version get the host feature mask and version information.
- * Return: 0 or negative errno value.
+ * vbg_query_host_version - get the host feature mask and version information.
  * @gdev:              The Guest extension device.
+ *
+ * Return: %0 or negative errno value.
  */
 static int vbg_query_host_version(struct vbg_dev *gdev)
 {
@@ -905,19 +928,18 @@ out:
 }
 
 /**
- * Initializes the VBoxGuest device extension when the
- * device driver is loaded.
+ * vbg_core_init - Initializes the VBoxGuest device extension when the
+ *     device driver is loaded.
+ * @gdev:              The Guest extension device.
+ * @fixed_events:      Events that will be enabled upon init and no client
+ *                     will ever be allowed to mask.
  *
  * The native code locates the VMMDev on the PCI bus and retrieve
  * the MMIO and I/O port ranges, this function will take care of
  * mapping the MMIO memory (if present). Upon successful return
  * the native code should set up the interrupt handler.
  *
- * Return: 0 or negative errno value.
- *
- * @gdev:              The Guest extension device.
- * @fixed_events:      Events that will be enabled upon init and no client
- *                     will ever be allowed to mask.
+ * Return: %0 or negative errno value.
  */
 int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events)
 {
@@ -1017,11 +1039,12 @@ err_free_reqs:
 }
 
 /**
- * Call this on exit to clean-up vboxguest-core managed resources.
+ * vbg_core_exit - Call this on exit to clean-up vboxguest-core managed
+ *     resources.
+ * @gdev:              The Guest extension device.
  *
  * The native code should call this before the driver is loaded,
  * but don't call this on shutdown.
- * @gdev:              The Guest extension device.
  */
 void vbg_core_exit(struct vbg_dev *gdev)
 {
@@ -1046,12 +1069,13 @@ void vbg_core_exit(struct vbg_dev *gdev)
 }
 
 /**
- * Creates a VBoxGuest user session.
+ * vbg_core_open_session - Creates a VBoxGuest user session.
+ * @gdev:              The Guest extension device.
+ * @requestor:         VMMDEV_REQUESTOR_* flags
  *
  * vboxguest_linux.c calls this when userspace opens the char-device.
+ *
  * Return: A pointer to the new session or an ERR_PTR on error.
- * @gdev:              The Guest extension device.
- * @requestor:         VMMDEV_REQUESTOR_* flags
  */
 struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor)
 {
@@ -1068,7 +1092,7 @@ struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor)
 }
 
 /**
- * Closes a VBoxGuest session.
+ * vbg_core_close_session - Closes a VBoxGuest session.
  * @session:           The session to close (and free).
  */
 void vbg_core_close_session(struct vbg_session *session)
@@ -1250,11 +1274,13 @@ static int vbg_ioctl_interrupt_all_wait_events(struct vbg_dev *gdev,
 }
 
 /**
- * Checks if the VMM request is allowed in the context of the given session.
- * Return: 0 or negative errno value.
+ * vbg_req_allowed - Checks if the VMM request is allowed in the
+ *     context of the given session.
  * @gdev:              The Guest extension device.
  * @session:           The calling session.
  * @req:               The request.
+ *
+ * Return: %0 or negative errno value.
  */
 static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session,
                           const struct vmmdev_request_header *req)
@@ -1670,11 +1696,12 @@ static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
 }
 
 /**
- * Common IOCtl for user to kernel communication.
- * Return: 0 or negative errno value.
+ * vbg_core_ioctl - Common IOCtl for user to kernel communication.
  * @session:   The client session.
  * @req:       The requested function.
  * @data:      The i/o data buffer, minimum size sizeof(struct vbg_ioctl_hdr).
+ *
+ * Return: %0 or negative errno value.
  */
 int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
 {
@@ -1744,11 +1771,12 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
 }
 
 /**
- * Report guest supported mouse-features to the host.
+ * vbg_core_set_mouse_status - Report guest supported mouse-features to the host.
  *
- * Return: 0 or negative errno value.
  * @gdev:              The Guest extension device.
  * @features:          The set of features to report to the host.
+ *
+ * Return: %0 or negative errno value.
  */
 int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features)
 {
@@ -1772,7 +1800,7 @@ int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features)
        return vbg_status_code_to_errno(rc);
 }
 
-/** Core interrupt service routine. */
+/* Core interrupt service routine. */
 irqreturn_t vbg_core_isr(int irq, void *dev_id)
 {
        struct vbg_dev *gdev = dev_id;
index c47e62dc55da80f35bc11298fed353b01dec4d3a..8c92ea5b7305192697149d1306a7ff4daec3f2ad 100644 (file)
@@ -81,10 +81,11 @@ static int vbg_misc_device_user_open(struct inode *inode, struct file *filp)
 }
 
 /**
- * Close device.
- * Return: 0 on success, negated errno on failure.
+ * vbg_misc_device_close - Close device.
  * @inode:             Pointer to inode info structure.
  * @filp:              Associated file pointer.
+ *
+ * Return: %0 on success, negated errno on failure.
  */
 static int vbg_misc_device_close(struct inode *inode, struct file *filp)
 {
@@ -94,11 +95,12 @@ static int vbg_misc_device_close(struct inode *inode, struct file *filp)
 }
 
 /**
- * Device I/O Control entry point.
- * Return: 0 on success, negated errno on failure.
+ * vbg_misc_device_ioctl - Device I/O Control entry point.
  * @filp:              Associated file pointer.
  * @req:               The request specified to ioctl().
  * @arg:               The argument specified to ioctl().
+ *
+ * Return: %0 on success, negated errno on failure.
  */
 static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
                                  unsigned long arg)
@@ -173,7 +175,7 @@ out:
        return ret;
 }
 
-/** The file_operations structures. */
+/* The file_operations structures. */
 static const struct file_operations vbg_misc_device_fops = {
        .owner                  = THIS_MODULE,
        .open                   = vbg_misc_device_open,
@@ -193,7 +195,7 @@ static const struct file_operations vbg_misc_device_user_fops = {
 #endif
 };
 
-/**
+/*
  * Called when the input device is first opened.
  *
  * Sets up absolute mouse reporting.
@@ -206,7 +208,7 @@ static int vbg_input_open(struct input_dev *input)
        return vbg_core_set_mouse_status(gdev, feat);
 }
 
-/**
+/*
  * Called if all open handles to the input device are closed.
  *
  * Disables absolute reporting.
@@ -218,7 +220,7 @@ static void vbg_input_close(struct input_dev *input)
        vbg_core_set_mouse_status(gdev, 0);
 }
 
-/**
+/*
  * Creates the kernel input device.
  *
  * Return: 0 on success, negated errno on failure.
@@ -277,7 +279,7 @@ static struct attribute *vbg_pci_attrs[] = {
 };
 ATTRIBUTE_GROUPS(vbg_pci);
 
-/**
+/*
  * Does the PCI detection and init of the device.
  *
  * Return: 0 on success, negated errno on failure.
@@ -453,7 +455,7 @@ void vbg_put_gdev(struct vbg_dev *gdev)
 }
 EXPORT_SYMBOL(vbg_put_gdev);
 
-/**
+/*
  * Callback for mouse events.
  *
  * This is called at the end of the ISR, after leaving the event spinlock, if
index 8d195e3f83012363a0db7fabbe3a27b38fd06bb2..1c02b3c0d934728e0833907fc4dbbef7f0c3a638 100644 (file)
@@ -237,14 +237,16 @@ static int hgcm_call_preprocess_linaddr(
 }
 
 /**
- * Preprocesses the HGCM call, validate parameters, alloc bounce buffers and
- * figure out how much extra storage we need for page lists.
- * Return: 0 or negative errno value.
+ * hgcm_call_preprocess - Preprocesses the HGCM call, validate parameters,
+ *     alloc bounce buffers and figure out how much extra storage we need for
+ *     page lists.
  * @src_parm:         Pointer to source function call parameters
  * @parm_count:       Number of function call parameters.
  * @bounce_bufs_ret:  Where to return the allocated bouncebuffer array
  * @extra:            Where to return the extra request space needed for
  *                    physical page lists.
+ *
+ * Return: %0 or negative errno value.
  */
 static int hgcm_call_preprocess(
        const struct vmmdev_hgcm_function_parameter *src_parm,
@@ -301,10 +303,11 @@ static int hgcm_call_preprocess(
 }
 
 /**
- * Translates linear address types to page list direction flags.
+ * hgcm_call_linear_addr_type_to_pagelist_flags - Translates linear address
+ *     types to page list direction flags.
+ * @type:  The type.
  *
  * Return: page list flags.
- * @type:  The type.
  */
 static u32 hgcm_call_linear_addr_type_to_pagelist_flags(
        enum vmmdev_hgcm_function_parameter_type type)
@@ -369,7 +372,8 @@ static void hgcm_call_init_linaddr(struct vmmdev_hgcm_call *call,
 }
 
 /**
- * Initializes the call request that we're sending to the host.
+ * hgcm_call_init_call - Initializes the call request that we're sending
+ *     to the host.
  * @call:            The call to initialize.
  * @client_id:       The client ID of the caller.
  * @function:        The function number of the function to call.
@@ -425,7 +429,9 @@ static void hgcm_call_init_call(
 }
 
 /**
- * Tries to cancel a pending HGCM call.
+ * hgcm_cancel_call - Tries to cancel a pending HGCM call.
+ * @gdev:        The VBoxGuest device extension.
+ * @call:        The call to cancel.
  *
  * Return: VBox status code
  */
@@ -459,13 +465,15 @@ static int hgcm_cancel_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call)
 }
 
 /**
- * Performs the call and completion wait.
- * Return: 0 or negative errno value.
+ * vbg_hgcm_do_call - Performs the call and completion wait.
  * @gdev:        The VBoxGuest device extension.
  * @call:        The call to execute.
  * @timeout_ms:  Timeout in ms.
+ * @interruptible: whether this call is interruptible
  * @leak_it:     Where to return the leak it / free it, indicator.
  *               Cancellation fun.
+ *
+ * Return: %0 or negative errno value.
  */
 static int vbg_hgcm_do_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call,
                            u32 timeout_ms, bool interruptible, bool *leak_it)
@@ -545,13 +553,14 @@ static int vbg_hgcm_do_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call,
 }
 
 /**
- * Copies the result of the call back to the caller info structure and user
- * buffers.
- * Return: 0 or negative errno value.
+ * hgcm_call_copy_back_result - Copies the result of the call back to
+ *     the caller info structure and user buffers.
  * @call:            HGCM call request.
  * @dst_parm:        Pointer to function call parameters destination.
  * @parm_count:      Number of function call parameters.
  * @bounce_bufs:     The bouncebuffer array.
+ *
+ * Return: %0 or negative errno value.
  */
 static int hgcm_call_copy_back_result(
        const struct vmmdev_hgcm_call *call,
index a1c467a0e9f719665fc02fa559d5c94545e5725f..b67a28da47026d0299b8a1f8c22a40fc36b1c4a2 100644 (file)
@@ -68,6 +68,7 @@ out:
 static void vmgenid_notify(struct acpi_device *device, u32 event)
 {
        struct vmgenid_state *state = acpi_driver_data(device);
+       char *envp[] = { "NEW_VMGENID=1", NULL };
        u8 old_id[VMGENID_SIZE];
 
        memcpy(old_id, state->this_id, sizeof(old_id));
@@ -75,6 +76,7 @@ static void vmgenid_notify(struct acpi_device *device, u32 event)
        if (!memcmp(old_id, state->this_id, sizeof(old_id)))
                return;
        add_vmfork_randomness(state->this_id, sizeof(state->this_id));
+       kobject_uevent_env(&device->dev.kobj, KOBJ_CHANGE, envp);
 }
 
 static const struct acpi_device_id vmgenid_ids[] = {
index 0a53a61231c2944092d3ecac74d5caf40aaca2d3..c17193544268aa7cf0af5a3de5addcad2ec91e39 100644 (file)
@@ -60,6 +60,11 @@ config VIRTIO_PCI
 
          If unsure, say M.
 
+config VIRTIO_PCI_ADMIN_LEGACY
+       bool
+       depends on VIRTIO_PCI && (X86 || COMPILE_TEST)
+       default y
+
 config VIRTIO_PCI_LEGACY
        bool "Support for legacy virtio draft 0.9.X and older devices"
        default y
index 8e98d24917cc053e820e8771644c874c233a528e..73ace62af44093826691b97b251e1af415b00702 100644 (file)
@@ -7,6 +7,7 @@ obj-$(CONFIG_VIRTIO_MMIO) += virtio_mmio.o
 obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o
 virtio_pci-y := virtio_pci_modern.o virtio_pci_common.o
 virtio_pci-$(CONFIG_VIRTIO_PCI_LEGACY) += virtio_pci_legacy.o
+virtio_pci-$(CONFIG_VIRTIO_PCI_ADMIN_LEGACY) += virtio_pci_admin_legacy_io.o
 obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o
 obj-$(CONFIG_VIRTIO_INPUT) += virtio_input.o
 obj-$(CONFIG_VIRTIO_VDPA) += virtio_vdpa.o
index 3893dc29eb26338f036379c5664e249ad18f1df6..f4080692b3513bd67a6d946c355c2cfbd7496ecd 100644 (file)
@@ -302,9 +302,15 @@ static int virtio_dev_probe(struct device *_d)
        if (err)
                goto err;
 
+       if (dev->config->create_avq) {
+               err = dev->config->create_avq(dev);
+               if (err)
+                       goto err;
+       }
+
        err = drv->probe(dev);
        if (err)
-               goto err;
+               goto err_probe;
 
        /* If probe didn't do it, mark device DRIVER_OK ourselves. */
        if (!(dev->config->get_status(dev) & VIRTIO_CONFIG_S_DRIVER_OK))
@@ -316,6 +322,10 @@ static int virtio_dev_probe(struct device *_d)
        virtio_config_enable(dev);
 
        return 0;
+
+err_probe:
+       if (dev->config->destroy_avq)
+               dev->config->destroy_avq(dev);
 err:
        virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
        return err;
@@ -331,6 +341,9 @@ static void virtio_dev_remove(struct device *_d)
 
        drv->remove(dev);
 
+       if (dev->config->destroy_avq)
+               dev->config->destroy_avq(dev);
+
        /* Driver should have reset device. */
        WARN_ON_ONCE(dev->config->get_status(dev));
 
@@ -489,13 +502,20 @@ EXPORT_SYMBOL_GPL(unregister_virtio_device);
 int virtio_device_freeze(struct virtio_device *dev)
 {
        struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
+       int ret;
 
        virtio_config_disable(dev);
 
        dev->failed = dev->config->get_status(dev) & VIRTIO_CONFIG_S_FAILED;
 
-       if (drv && drv->freeze)
-               return drv->freeze(dev);
+       if (drv && drv->freeze) {
+               ret = drv->freeze(dev);
+               if (ret)
+                       return ret;
+       }
+
+       if (dev->config->destroy_avq)
+               dev->config->destroy_avq(dev);
 
        return 0;
 }
@@ -532,10 +552,16 @@ int virtio_device_restore(struct virtio_device *dev)
        if (ret)
                goto err;
 
+       if (dev->config->create_avq) {
+               ret = dev->config->create_avq(dev);
+               if (ret)
+                       goto err;
+       }
+
        if (drv->restore) {
                ret = drv->restore(dev);
                if (ret)
-                       goto err;
+                       goto err_restore;
        }
 
        /* If restore didn't do it, mark device DRIVER_OK ourselves. */
@@ -546,6 +572,9 @@ int virtio_device_restore(struct virtio_device *dev)
 
        return 0;
 
+err_restore:
+       if (dev->config->destroy_avq)
+               dev->config->destroy_avq(dev);
 err:
        virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
        return ret;
index 59cdc0292dce123613655d667e42819565d12901..1f5b3dd31fcfc9988282c9c65cee7a6d95475b9e 100644 (file)
@@ -119,6 +119,11 @@ struct virtio_balloon {
        /* Free page reporting device */
        struct virtqueue *reporting_vq;
        struct page_reporting_dev_info pr_dev_info;
+
+       /* State for keeping the wakeup_source active while adjusting the balloon */
+       spinlock_t adjustment_lock;
+       bool adjustment_signal_pending;
+       bool adjustment_in_progress;
 };
 
 static const struct virtio_device_id id_table[] = {
@@ -437,6 +442,31 @@ static void virtio_balloon_queue_free_page_work(struct virtio_balloon *vb)
        queue_work(vb->balloon_wq, &vb->report_free_page_work);
 }
 
+static void start_update_balloon_size(struct virtio_balloon *vb)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&vb->adjustment_lock, flags);
+       vb->adjustment_signal_pending = true;
+       if (!vb->adjustment_in_progress) {
+               vb->adjustment_in_progress = true;
+               pm_stay_awake(vb->vdev->dev.parent);
+       }
+       spin_unlock_irqrestore(&vb->adjustment_lock, flags);
+
+       queue_work(system_freezable_wq, &vb->update_balloon_size_work);
+}
+
+static void end_update_balloon_size(struct virtio_balloon *vb)
+{
+       spin_lock_irq(&vb->adjustment_lock);
+       if (!vb->adjustment_signal_pending && vb->adjustment_in_progress) {
+               vb->adjustment_in_progress = false;
+               pm_relax(vb->vdev->dev.parent);
+       }
+       spin_unlock_irq(&vb->adjustment_lock);
+}
+
 static void virtballoon_changed(struct virtio_device *vdev)
 {
        struct virtio_balloon *vb = vdev->priv;
@@ -444,8 +474,7 @@ static void virtballoon_changed(struct virtio_device *vdev)
 
        spin_lock_irqsave(&vb->stop_update_lock, flags);
        if (!vb->stop_update) {
-               queue_work(system_freezable_wq,
-                          &vb->update_balloon_size_work);
+               start_update_balloon_size(vb);
                virtio_balloon_queue_free_page_work(vb);
        }
        spin_unlock_irqrestore(&vb->stop_update_lock, flags);
@@ -476,19 +505,25 @@ static void update_balloon_size_func(struct work_struct *work)
 
        vb = container_of(work, struct virtio_balloon,
                          update_balloon_size_work);
-       diff = towards_target(vb);
 
-       if (!diff)
-               return;
+       spin_lock_irq(&vb->adjustment_lock);
+       vb->adjustment_signal_pending = false;
+       spin_unlock_irq(&vb->adjustment_lock);
 
-       if (diff > 0)
-               diff -= fill_balloon(vb, diff);
-       else
-               diff += leak_balloon(vb, -diff);
-       update_balloon_size(vb);
+       diff = towards_target(vb);
+
+       if (diff) {
+               if (diff > 0)
+                       diff -= fill_balloon(vb, diff);
+               else
+                       diff += leak_balloon(vb, -diff);
+               update_balloon_size(vb);
+       }
 
        if (diff)
                queue_work(system_freezable_wq, work);
+       else
+               end_update_balloon_size(vb);
 }
 
 static int init_vqs(struct virtio_balloon *vb)
@@ -992,6 +1027,8 @@ static int virtballoon_probe(struct virtio_device *vdev)
                        goto out_unregister_oom;
        }
 
+       spin_lock_init(&vb->adjustment_lock);
+
        virtio_device_ready(vdev);
 
        if (towards_target(vb))
diff --git a/drivers/virtio/virtio_pci_admin_legacy_io.c b/drivers/virtio/virtio_pci_admin_legacy_io.c
new file mode 100644 (file)
index 0000000..819cfbb
--- /dev/null
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+
+#include <linux/virtio_pci_admin.h>
+#include "virtio_pci_common.h"
+
+/*
+ * virtio_pci_admin_has_legacy_io - Checks whether the legacy IO
+ * commands are supported
+ * @dev: VF pci_dev
+ *
+ * Returns true on success.
+ */
+bool virtio_pci_admin_has_legacy_io(struct pci_dev *pdev)
+{
+       struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
+       struct virtio_pci_device *vp_dev;
+
+       if (!virtio_dev)
+               return false;
+
+       if (!virtio_has_feature(virtio_dev, VIRTIO_F_ADMIN_VQ))
+               return false;
+
+       vp_dev = to_vp_device(virtio_dev);
+
+       if ((vp_dev->admin_vq.supported_cmds & VIRTIO_LEGACY_ADMIN_CMD_BITMAP) ==
+               VIRTIO_LEGACY_ADMIN_CMD_BITMAP)
+               return true;
+       return false;
+}
+EXPORT_SYMBOL_GPL(virtio_pci_admin_has_legacy_io);
+
+static int virtio_pci_admin_legacy_io_write(struct pci_dev *pdev, u16 opcode,
+                                           u8 offset, u8 size, u8 *buf)
+{
+       struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
+       struct virtio_admin_cmd_legacy_wr_data *data;
+       struct virtio_admin_cmd cmd = {};
+       struct scatterlist data_sg;
+       int vf_id;
+       int ret;
+
+       if (!virtio_dev)
+               return -ENODEV;
+
+       vf_id = pci_iov_vf_id(pdev);
+       if (vf_id < 0)
+               return vf_id;
+
+       data = kzalloc(sizeof(*data) + size, GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       data->offset = offset;
+       memcpy(data->registers, buf, size);
+       sg_init_one(&data_sg, data, sizeof(*data) + size);
+       cmd.opcode = cpu_to_le16(opcode);
+       cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
+       cmd.group_member_id = cpu_to_le64(vf_id + 1);
+       cmd.data_sg = &data_sg;
+       ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
+
+       kfree(data);
+       return ret;
+}
+
+/*
+ * virtio_pci_admin_legacy_io_write_common - Write legacy common configuration
+ * of a member device
+ * @dev: VF pci_dev
+ * @offset: starting byte offset within the common configuration area to write to
+ * @size: size of the data to write
+ * @buf: buffer which holds the data
+ *
+ * Note: caller must serialize access for the given device.
+ * Returns 0 on success, or negative on failure.
+ */
+int virtio_pci_admin_legacy_common_io_write(struct pci_dev *pdev, u8 offset,
+                                           u8 size, u8 *buf)
+{
+       return virtio_pci_admin_legacy_io_write(pdev,
+                                       VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_WRITE,
+                                       offset, size, buf);
+}
+EXPORT_SYMBOL_GPL(virtio_pci_admin_legacy_common_io_write);
+
+/*
+ * virtio_pci_admin_legacy_io_write_device - Write legacy device configuration
+ * of a member device
+ * @dev: VF pci_dev
+ * @offset: starting byte offset within the device configuration area to write to
+ * @size: size of the data to write
+ * @buf: buffer which holds the data
+ *
+ * Note: caller must serialize access for the given device.
+ * Returns 0 on success, or negative on failure.
+ */
+int virtio_pci_admin_legacy_device_io_write(struct pci_dev *pdev, u8 offset,
+                                           u8 size, u8 *buf)
+{
+       return virtio_pci_admin_legacy_io_write(pdev,
+                                       VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_WRITE,
+                                       offset, size, buf);
+}
+EXPORT_SYMBOL_GPL(virtio_pci_admin_legacy_device_io_write);
+
+static int virtio_pci_admin_legacy_io_read(struct pci_dev *pdev, u16 opcode,
+                                          u8 offset, u8 size, u8 *buf)
+{
+       struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
+       struct virtio_admin_cmd_legacy_rd_data *data;
+       struct scatterlist data_sg, result_sg;
+       struct virtio_admin_cmd cmd = {};
+       int vf_id;
+       int ret;
+
+       if (!virtio_dev)
+               return -ENODEV;
+
+       vf_id = pci_iov_vf_id(pdev);
+       if (vf_id < 0)
+               return vf_id;
+
+       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       data->offset = offset;
+       sg_init_one(&data_sg, data, sizeof(*data));
+       sg_init_one(&result_sg, buf, size);
+       cmd.opcode = cpu_to_le16(opcode);
+       cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
+       cmd.group_member_id = cpu_to_le64(vf_id + 1);
+       cmd.data_sg = &data_sg;
+       cmd.result_sg = &result_sg;
+       ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
+
+       kfree(data);
+       return ret;
+}
+
+/*
+ * virtio_pci_admin_legacy_device_io_read - Read legacy device configuration of
+ * a member device
+ * @dev: VF pci_dev
+ * @offset: starting byte offset within the device configuration area to read from
+ * @size: size of the data to be read
+ * @buf: buffer to hold the returned data
+ *
+ * Note: caller must serialize access for the given device.
+ * Returns 0 on success, or negative on failure.
+ */
+int virtio_pci_admin_legacy_device_io_read(struct pci_dev *pdev, u8 offset,
+                                          u8 size, u8 *buf)
+{
+       return virtio_pci_admin_legacy_io_read(pdev,
+                                       VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_READ,
+                                       offset, size, buf);
+}
+EXPORT_SYMBOL_GPL(virtio_pci_admin_legacy_device_io_read);
+
+/*
+ * virtio_pci_admin_legacy_common_io_read - Read legacy common configuration of
+ * a member device
+ * @dev: VF pci_dev
+ * @offset: starting byte offset within the common configuration area to read from
+ * @size: size of the data to be read
+ * @buf: buffer to hold the returned data
+ *
+ * Note: caller must serialize access for the given device.
+ * Returns 0 on success, or negative on failure.
+ */
+int virtio_pci_admin_legacy_common_io_read(struct pci_dev *pdev, u8 offset,
+                                          u8 size, u8 *buf)
+{
+       return virtio_pci_admin_legacy_io_read(pdev,
+                                       VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_READ,
+                                       offset, size, buf);
+}
+EXPORT_SYMBOL_GPL(virtio_pci_admin_legacy_common_io_read);
+
+/*
+ * virtio_pci_admin_legacy_io_notify_info - Read the queue notification
+ * information for legacy interface
+ * @dev: VF pci_dev
+ * @req_bar_flags: requested bar flags
+ * @bar: on output the BAR number of the owner or member device
+ * @bar_offset: on output the offset within bar
+ *
+ * Returns 0 on success, or negative on failure.
+ */
+int virtio_pci_admin_legacy_io_notify_info(struct pci_dev *pdev,
+                                          u8 req_bar_flags, u8 *bar,
+                                          u64 *bar_offset)
+{
+       struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
+       struct virtio_admin_cmd_notify_info_result *result;
+       struct virtio_admin_cmd cmd = {};
+       struct scatterlist result_sg;
+       int vf_id;
+       int ret;
+
+       if (!virtio_dev)
+               return -ENODEV;
+
+       vf_id = pci_iov_vf_id(pdev);
+       if (vf_id < 0)
+               return vf_id;
+
+       result = kzalloc(sizeof(*result), GFP_KERNEL);
+       if (!result)
+               return -ENOMEM;
+
+       sg_init_one(&result_sg, result, sizeof(*result));
+       cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_LEGACY_NOTIFY_INFO);
+       cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
+       cmd.group_member_id = cpu_to_le64(vf_id + 1);
+       cmd.result_sg = &result_sg;
+       ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
+       if (!ret) {
+               struct virtio_admin_cmd_notify_info_data *entry;
+               int i;
+
+               ret = -ENOENT;
+               for (i = 0; i < VIRTIO_ADMIN_CMD_MAX_NOTIFY_INFO; i++) {
+                       entry = &result->entries[i];
+                       if (entry->flags == VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_END)
+                               break;
+                       if (entry->flags != req_bar_flags)
+                               continue;
+                       *bar = entry->bar;
+                       *bar_offset = le64_to_cpu(entry->offset);
+                       ret = 0;
+                       break;
+               }
+       }
+
+       kfree(result);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(virtio_pci_admin_legacy_io_notify_info);
index 7a5593997e0efe64f2ce832bf0c8676bbaabf188..b655fccaf77330b08b23a57d7e2fe1d012409615 100644 (file)
@@ -236,6 +236,9 @@ void vp_del_vqs(struct virtio_device *vdev)
        int i;
 
        list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
+               if (vp_dev->is_avq(vdev, vq->index))
+                       continue;
+
                if (vp_dev->per_vq_vectors) {
                        int v = vp_dev->vqs[vq->index]->msix_vector;
 
@@ -492,8 +495,40 @@ static int virtio_pci_restore(struct device *dev)
        return virtio_device_restore(&vp_dev->vdev);
 }
 
+static bool vp_supports_pm_no_reset(struct device *dev)
+{
+       struct pci_dev *pci_dev = to_pci_dev(dev);
+       u16 pmcsr;
+
+       if (!pci_dev->pm_cap)
+               return false;
+
+       pci_read_config_word(pci_dev, pci_dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+       if (PCI_POSSIBLE_ERROR(pmcsr)) {
+               dev_err(dev, "Unable to query pmcsr");
+               return false;
+       }
+
+       return pmcsr & PCI_PM_CTRL_NO_SOFT_RESET;
+}
+
+static int virtio_pci_suspend(struct device *dev)
+{
+       return vp_supports_pm_no_reset(dev) ? 0 : virtio_pci_freeze(dev);
+}
+
+static int virtio_pci_resume(struct device *dev)
+{
+       return vp_supports_pm_no_reset(dev) ? 0 : virtio_pci_restore(dev);
+}
+
 static const struct dev_pm_ops virtio_pci_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze, virtio_pci_restore)
+       .suspend = virtio_pci_suspend,
+       .resume = virtio_pci_resume,
+       .freeze = virtio_pci_freeze,
+       .thaw = virtio_pci_restore,
+       .poweroff = virtio_pci_freeze,
+       .restore = virtio_pci_restore,
 };
 #endif
 
@@ -642,6 +677,17 @@ static struct pci_driver virtio_pci_driver = {
        .sriov_configure = virtio_pci_sriov_configure,
 };
 
+struct virtio_device *virtio_pci_vf_get_pf_dev(struct pci_dev *pdev)
+{
+       struct virtio_pci_device *pf_vp_dev;
+
+       pf_vp_dev = pci_iov_get_pf_drvdata(pdev, &virtio_pci_driver);
+       if (IS_ERR(pf_vp_dev))
+               return NULL;
+
+       return &pf_vp_dev->vdev;
+}
+
 module_pci_driver(virtio_pci_driver);
 
 MODULE_AUTHOR("Anthony Liguori <aliguori@us.ibm.com>");
index 4b773bd7c58cb7e42726127cdd188a181c8c04db..7fef52bee4557cc5cadf492ba6e49b76a00e3c3a 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/virtio_pci_modern.h>
 #include <linux/highmem.h>
 #include <linux/spinlock.h>
+#include <linux/mutex.h>
 
 struct virtio_pci_vq_info {
        /* the actual virtqueue */
@@ -41,6 +42,17 @@ struct virtio_pci_vq_info {
        unsigned int msix_vector;
 };
 
+struct virtio_pci_admin_vq {
+       /* Virtqueue info associated with this admin queue. */
+       struct virtio_pci_vq_info info;
+       /* serializing admin commands execution and virtqueue deletion */
+       struct mutex cmd_lock;
+       u64 supported_cmds;
+       /* Name of the admin queue: avq.$vq_index. */
+       char name[10];
+       u16 vq_index;
+};
+
 /* Our device structure */
 struct virtio_pci_device {
        struct virtio_device vdev;
@@ -58,9 +70,13 @@ struct virtio_pci_device {
        spinlock_t lock;
        struct list_head virtqueues;
 
-       /* array of all queues for house-keeping */
+       /* Array of all virtqueues reported in the
+        * PCI common config num_queues field
+        */
        struct virtio_pci_vq_info **vqs;
 
+       struct virtio_pci_admin_vq admin_vq;
+
        /* MSI-X support */
        int msix_enabled;
        int intx_enabled;
@@ -86,6 +102,7 @@ struct virtio_pci_device {
        void (*del_vq)(struct virtio_pci_vq_info *info);
 
        u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector);
+       bool (*is_avq)(struct virtio_device *vdev, unsigned int index);
 };
 
 /* Constants for MSI-X */
@@ -139,4 +156,27 @@ static inline void virtio_pci_legacy_remove(struct virtio_pci_device *vp_dev)
 int virtio_pci_modern_probe(struct virtio_pci_device *);
 void virtio_pci_modern_remove(struct virtio_pci_device *);
 
+struct virtio_device *virtio_pci_vf_get_pf_dev(struct pci_dev *pdev);
+
+#define VIRTIO_LEGACY_ADMIN_CMD_BITMAP \
+       (BIT_ULL(VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_WRITE) | \
+        BIT_ULL(VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_READ) | \
+        BIT_ULL(VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_WRITE) | \
+        BIT_ULL(VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_READ) | \
+        BIT_ULL(VIRTIO_ADMIN_CMD_LEGACY_NOTIFY_INFO))
+
+/* Unlike modern drivers which support hardware virtio devices, legacy drivers
+ * assume software-based devices: e.g. they don't use proper memory barriers
+ * on ARM, use big endian on PPC, etc. X86 drivers are mostly ok though, more
+ * or less by chance. For now, only support legacy IO on X86.
+ */
+#ifdef CONFIG_VIRTIO_PCI_ADMIN_LEGACY
+#define VIRTIO_ADMIN_CMD_BITMAP VIRTIO_LEGACY_ADMIN_CMD_BITMAP
+#else
+#define VIRTIO_ADMIN_CMD_BITMAP 0
+#endif
+
+int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
+                            struct virtio_admin_cmd *cmd);
+
 #endif
index ee6a386d250b168bdd59153a62ddceb361c0af93..f62b530aa3b5b0e6d555ffef8e39c7bd5fda63a5 100644 (file)
@@ -19,6 +19,8 @@
 #define VIRTIO_RING_NO_LEGACY
 #include "virtio_pci_common.h"
 
+#define VIRTIO_AVQ_SGS_MAX     4
+
 static u64 vp_get_features(struct virtio_device *vdev)
 {
        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
@@ -26,6 +28,187 @@ static u64 vp_get_features(struct virtio_device *vdev)
        return vp_modern_get_features(&vp_dev->mdev);
 }
 
+static bool vp_is_avq(struct virtio_device *vdev, unsigned int index)
+{
+       struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+
+       if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
+               return false;
+
+       return index == vp_dev->admin_vq.vq_index;
+}
+
+static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
+                                   u16 opcode,
+                                   struct scatterlist **sgs,
+                                   unsigned int out_num,
+                                   unsigned int in_num,
+                                   void *data)
+{
+       struct virtqueue *vq;
+       int ret, len;
+
+       vq = admin_vq->info.vq;
+       if (!vq)
+               return -EIO;
+
+       if (opcode != VIRTIO_ADMIN_CMD_LIST_QUERY &&
+           opcode != VIRTIO_ADMIN_CMD_LIST_USE &&
+           !((1ULL << opcode) & admin_vq->supported_cmds))
+               return -EOPNOTSUPP;
+
+       ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, data, GFP_KERNEL);
+       if (ret < 0)
+               return -EIO;
+
+       if (unlikely(!virtqueue_kick(vq)))
+               return -EIO;
+
+       while (!virtqueue_get_buf(vq, &len) &&
+              !virtqueue_is_broken(vq))
+               cpu_relax();
+
+       if (virtqueue_is_broken(vq))
+               return -EIO;
+
+       return 0;
+}
+
+int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
+                            struct virtio_admin_cmd *cmd)
+{
+       struct scatterlist *sgs[VIRTIO_AVQ_SGS_MAX], hdr, stat;
+       struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+       struct virtio_admin_cmd_status *va_status;
+       unsigned int out_num = 0, in_num = 0;
+       struct virtio_admin_cmd_hdr *va_hdr;
+       u16 status;
+       int ret;
+
+       if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
+               return -EOPNOTSUPP;
+
+       va_status = kzalloc(sizeof(*va_status), GFP_KERNEL);
+       if (!va_status)
+               return -ENOMEM;
+
+       va_hdr = kzalloc(sizeof(*va_hdr), GFP_KERNEL);
+       if (!va_hdr) {
+               ret = -ENOMEM;
+               goto err_alloc;
+       }
+
+       va_hdr->opcode = cmd->opcode;
+       va_hdr->group_type = cmd->group_type;
+       va_hdr->group_member_id = cmd->group_member_id;
+
+       /* Add header */
+       sg_init_one(&hdr, va_hdr, sizeof(*va_hdr));
+       sgs[out_num] = &hdr;
+       out_num++;
+
+       if (cmd->data_sg) {
+               sgs[out_num] = cmd->data_sg;
+               out_num++;
+       }
+
+       /* Add return status */
+       sg_init_one(&stat, va_status, sizeof(*va_status));
+       sgs[out_num + in_num] = &stat;
+       in_num++;
+
+       if (cmd->result_sg) {
+               sgs[out_num + in_num] = cmd->result_sg;
+               in_num++;
+       }
+
+       mutex_lock(&vp_dev->admin_vq.cmd_lock);
+       ret = virtqueue_exec_admin_cmd(&vp_dev->admin_vq,
+                                      le16_to_cpu(cmd->opcode),
+                                      sgs, out_num, in_num, sgs);
+       mutex_unlock(&vp_dev->admin_vq.cmd_lock);
+
+       if (ret) {
+               dev_err(&vdev->dev,
+                       "Failed to execute command on admin vq: %d\n.", ret);
+               goto err_cmd_exec;
+       }
+
+       status = le16_to_cpu(va_status->status);
+       if (status != VIRTIO_ADMIN_STATUS_OK) {
+               dev_err(&vdev->dev,
+                       "admin command error: status(%#x) qualifier(%#x)\n",
+                       status, le16_to_cpu(va_status->status_qualifier));
+               ret = -status;
+       }
+
+err_cmd_exec:
+       kfree(va_hdr);
+err_alloc:
+       kfree(va_status);
+       return ret;
+}
+
+static void virtio_pci_admin_cmd_list_init(struct virtio_device *virtio_dev)
+{
+       struct virtio_pci_device *vp_dev = to_vp_device(virtio_dev);
+       struct virtio_admin_cmd cmd = {};
+       struct scatterlist result_sg;
+       struct scatterlist data_sg;
+       __le64 *data;
+       int ret;
+
+       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return;
+
+       sg_init_one(&result_sg, data, sizeof(*data));
+       cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_LIST_QUERY);
+       cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
+       cmd.result_sg = &result_sg;
+
+       ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
+       if (ret)
+               goto end;
+
+       *data &= cpu_to_le64(VIRTIO_ADMIN_CMD_BITMAP);
+       sg_init_one(&data_sg, data, sizeof(*data));
+       cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_LIST_USE);
+       cmd.data_sg = &data_sg;
+       cmd.result_sg = NULL;
+
+       ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
+       if (ret)
+               goto end;
+
+       vp_dev->admin_vq.supported_cmds = le64_to_cpu(*data);
+end:
+       kfree(data);
+}
+
+static void vp_modern_avq_activate(struct virtio_device *vdev)
+{
+       struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+       struct virtio_pci_admin_vq *admin_vq = &vp_dev->admin_vq;
+
+       if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
+               return;
+
+       __virtqueue_unbreak(admin_vq->info.vq);
+       virtio_pci_admin_cmd_list_init(vdev);
+}
+
+static void vp_modern_avq_deactivate(struct virtio_device *vdev)
+{
+       struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+       struct virtio_pci_admin_vq *admin_vq = &vp_dev->admin_vq;
+
+       if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
+               return;
+
+       __virtqueue_break(admin_vq->info.vq);
+}
+
 static void vp_transport_features(struct virtio_device *vdev, u64 features)
 {
        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
@@ -37,6 +220,9 @@ static void vp_transport_features(struct virtio_device *vdev, u64 features)
 
        if (features & BIT_ULL(VIRTIO_F_RING_RESET))
                __virtio_set_bit(vdev, VIRTIO_F_RING_RESET);
+
+       if (features & BIT_ULL(VIRTIO_F_ADMIN_VQ))
+               __virtio_set_bit(vdev, VIRTIO_F_ADMIN_VQ);
 }
 
 static int __vp_check_common_size_one_feature(struct virtio_device *vdev, u32 fbit,
@@ -69,6 +255,9 @@ static int vp_check_common_size(struct virtio_device *vdev)
        if (vp_check_common_size_one_feature(vdev, VIRTIO_F_RING_RESET, queue_reset))
                return -EINVAL;
 
+       if (vp_check_common_size_one_feature(vdev, VIRTIO_F_ADMIN_VQ, admin_queue_num))
+               return -EINVAL;
+
        return 0;
 }
 
@@ -195,6 +384,8 @@ static void vp_set_status(struct virtio_device *vdev, u8 status)
        /* We should never be setting status to 0. */
        BUG_ON(status == 0);
        vp_modern_set_status(&vp_dev->mdev, status);
+       if (status & VIRTIO_CONFIG_S_DRIVER_OK)
+               vp_modern_avq_activate(vdev);
 }
 
 static void vp_reset(struct virtio_device *vdev)
@@ -211,6 +402,9 @@ static void vp_reset(struct virtio_device *vdev)
         */
        while (vp_modern_get_status(mdev))
                msleep(1);
+
+       vp_modern_avq_deactivate(vdev);
+
        /* Flush pending VQ/configuration callbacks. */
        vp_synchronize_vectors(vdev);
 }
@@ -345,6 +539,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
        struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
        bool (*notify)(struct virtqueue *vq);
        struct virtqueue *vq;
+       bool is_avq;
        u16 num;
        int err;
 
@@ -353,11 +548,13 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
        else
                notify = vp_notify;
 
-       if (index >= vp_modern_get_num_queues(mdev))
+       is_avq = vp_is_avq(&vp_dev->vdev, index);
+       if (index >= vp_modern_get_num_queues(mdev) && !is_avq)
                return ERR_PTR(-EINVAL);
 
+       num = is_avq ?
+               VIRTIO_AVQ_SGS_MAX : vp_modern_get_queue_size(mdev, index);
        /* Check if queue is either not available or already active. */
-       num = vp_modern_get_queue_size(mdev, index);
        if (!num || vp_modern_get_queue_enable(mdev, index))
                return ERR_PTR(-ENOENT);
 
@@ -383,6 +580,12 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
                goto err;
        }
 
+       if (is_avq) {
+               mutex_lock(&vp_dev->admin_vq.cmd_lock);
+               vp_dev->admin_vq.info.vq = vq;
+               mutex_unlock(&vp_dev->admin_vq.cmd_lock);
+       }
+
        return vq;
 
 err:
@@ -418,6 +621,12 @@ static void del_vq(struct virtio_pci_vq_info *info)
        struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
        struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
 
+       if (vp_is_avq(&vp_dev->vdev, vq->index)) {
+               mutex_lock(&vp_dev->admin_vq.cmd_lock);
+               vp_dev->admin_vq.info.vq = NULL;
+               mutex_unlock(&vp_dev->admin_vq.cmd_lock);
+       }
+
        if (vp_dev->msix_enabled)
                vp_modern_queue_vector(mdev, vq->index,
                                       VIRTIO_MSI_NO_VECTOR);
@@ -527,6 +736,45 @@ static bool vp_get_shm_region(struct virtio_device *vdev,
        return true;
 }
 
+static int vp_modern_create_avq(struct virtio_device *vdev)
+{
+       struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+       struct virtio_pci_admin_vq *avq;
+       struct virtqueue *vq;
+       u16 admin_q_num;
+
+       if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
+               return 0;
+
+       admin_q_num = vp_modern_avq_num(&vp_dev->mdev);
+       if (!admin_q_num)
+               return -EINVAL;
+
+       avq = &vp_dev->admin_vq;
+       avq->vq_index = vp_modern_avq_index(&vp_dev->mdev);
+       sprintf(avq->name, "avq.%u", avq->vq_index);
+       vq = vp_dev->setup_vq(vp_dev, &vp_dev->admin_vq.info, avq->vq_index, NULL,
+                             avq->name, NULL, VIRTIO_MSI_NO_VECTOR);
+       if (IS_ERR(vq)) {
+               dev_err(&vdev->dev, "failed to setup admin virtqueue, err=%ld",
+                       PTR_ERR(vq));
+               return PTR_ERR(vq);
+       }
+
+       vp_modern_set_queue_enable(&vp_dev->mdev, avq->info.vq->index, true);
+       return 0;
+}
+
+static void vp_modern_destroy_avq(struct virtio_device *vdev)
+{
+       struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+
+       if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
+               return;
+
+       vp_dev->del_vq(&vp_dev->admin_vq.info);
+}
+
 static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
        .get            = NULL,
        .set            = NULL,
@@ -545,6 +793,8 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
        .get_shm_region  = vp_get_shm_region,
        .disable_vq_and_reset = vp_modern_disable_vq_and_reset,
        .enable_vq_after_reset = vp_modern_enable_vq_after_reset,
+       .create_avq = vp_modern_create_avq,
+       .destroy_avq = vp_modern_destroy_avq,
 };
 
 static const struct virtio_config_ops virtio_pci_config_ops = {
@@ -565,6 +815,8 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
        .get_shm_region  = vp_get_shm_region,
        .disable_vq_and_reset = vp_modern_disable_vq_and_reset,
        .enable_vq_after_reset = vp_modern_enable_vq_after_reset,
+       .create_avq = vp_modern_create_avq,
+       .destroy_avq = vp_modern_destroy_avq,
 };
 
 /* the PCI probing function */
@@ -588,9 +840,11 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
        vp_dev->config_vector = vp_config_vector;
        vp_dev->setup_vq = setup_vq;
        vp_dev->del_vq = del_vq;
+       vp_dev->is_avq = vp_is_avq;
        vp_dev->isr = mdev->isr;
        vp_dev->vdev.id = mdev->id;
 
+       mutex_init(&vp_dev->admin_vq.cmd_lock);
        return 0;
 }
 
@@ -598,5 +852,6 @@ void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev)
 {
        struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
 
+       mutex_destroy(&vp_dev->admin_vq.cmd_lock);
        vp_modern_remove(mdev);
 }
index 7de8b1ebabac4217b2240f6d8faaf486b4265f38..0d3dbfaf4b236910530b967a3fb9de25b68bcd36 100644 (file)
@@ -207,6 +207,10 @@ static inline void check_offsets(void)
                     offsetof(struct virtio_pci_modern_common_cfg, queue_notify_data));
        BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_RESET !=
                     offsetof(struct virtio_pci_modern_common_cfg, queue_reset));
+       BUILD_BUG_ON(VIRTIO_PCI_COMMON_ADM_Q_IDX !=
+                    offsetof(struct virtio_pci_modern_common_cfg, admin_queue_index));
+       BUILD_BUG_ON(VIRTIO_PCI_COMMON_ADM_Q_NUM !=
+                    offsetof(struct virtio_pci_modern_common_cfg, admin_queue_num));
 }
 
 /*
@@ -296,7 +300,7 @@ int vp_modern_probe(struct virtio_pci_modern_device *mdev)
        mdev->common = vp_modern_map_capability(mdev, common,
                              sizeof(struct virtio_pci_common_cfg), 4, 0,
                              offsetofend(struct virtio_pci_modern_common_cfg,
-                                         queue_reset),
+                                         admin_queue_num),
                              &mdev->common_len, NULL);
        if (!mdev->common)
                goto err_map_common;
@@ -719,6 +723,24 @@ void __iomem *vp_modern_map_vq_notify(struct virtio_pci_modern_device *mdev,
 }
 EXPORT_SYMBOL_GPL(vp_modern_map_vq_notify);
 
+u16 vp_modern_avq_num(struct virtio_pci_modern_device *mdev)
+{
+       struct virtio_pci_modern_common_cfg __iomem *cfg;
+
+       cfg = (struct virtio_pci_modern_common_cfg __iomem *)mdev->common;
+       return vp_ioread16(&cfg->admin_queue_num);
+}
+EXPORT_SYMBOL_GPL(vp_modern_avq_num);
+
+u16 vp_modern_avq_index(struct virtio_pci_modern_device *mdev)
+{
+       struct virtio_pci_modern_common_cfg __iomem *cfg;
+
+       cfg = (struct virtio_pci_modern_common_cfg __iomem *)mdev->common;
+       return vp_ioread16(&cfg->admin_queue_index);
+}
+EXPORT_SYMBOL_GPL(vp_modern_avq_index);
+
 MODULE_VERSION("0.1");
 MODULE_DESCRIPTION("Modern Virtio PCI Device");
 MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>");
index ad316573288a842e3480e85e76f52705e81c8f21..513c0b114337c43a1e7b55c2038679b9fb91a387 100644 (file)
@@ -5,6 +5,17 @@
 
 menu "1-wire Bus Masters"
 
+config W1_MASTER_AMD_AXI
+       tristate "AMD AXI 1-wire bus host"
+       help
+         Say Y here is you want to support the AMD AXI 1-wire IP core.
+         This driver makes use of the programmable logic IP to perform
+         correctly timed 1 wire transactions without relying on GPIO timing
+         through the kernel.
+
+         This driver can also be built as a module.  If so, the module will be
+         called amd_w1_axi.
+
 config W1_MASTER_MATROX
        tristate "Matrox G400 transport layer for 1-wire"
        depends on PCI
index c5d85a827e5209cebac809a2ca51ccd063cd02eb..6c5a21f9b88ce211cf80ecad925e9cb8a7e0fa04 100644 (file)
@@ -3,6 +3,7 @@
 # Makefile for 1-wire bus master drivers.
 #
 
+obj-$(CONFIG_W1_MASTER_AMD_AXI)                += amd_axi_w1.o
 obj-$(CONFIG_W1_MASTER_MATROX)         += matrox_w1.o
 obj-$(CONFIG_W1_MASTER_DS2490)         += ds2490.o
 obj-$(CONFIG_W1_MASTER_DS2482)         += ds2482.o
diff --git a/drivers/w1/masters/amd_axi_w1.c b/drivers/w1/masters/amd_axi_w1.c
new file mode 100644 (file)
index 0000000..4d3a68c
--- /dev/null
@@ -0,0 +1,396 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * amd_axi_w1 - AMD 1Wire programmable logic bus host driver
+ *
+ * Copyright (C) 2022-2023 Advanced Micro Devices, Inc. All Rights Reserved.
+ */
+
+#include <linux/atomic.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include <linux/w1.h>
+
+/* 1-wire AMD IP definition */
+#define AXIW1_IPID     0x10ee4453
+/* Registers offset */
+#define AXIW1_INST_REG 0x0
+#define AXIW1_CTRL_REG 0x4
+#define AXIW1_IRQE_REG 0x8
+#define AXIW1_STAT_REG 0xC
+#define AXIW1_DATA_REG 0x10
+#define AXIW1_IPVER_REG        0x18
+#define AXIW1_IPID_REG 0x1C
+/* Instructions */
+#define AXIW1_INITPRES 0x0800
+#define AXIW1_READBIT  0x0C00
+#define AXIW1_WRITEBIT 0x0E00
+#define AXIW1_READBYTE 0x0D00
+#define AXIW1_WRITEBYTE        0x0F00
+/* Status flag masks */
+#define AXIW1_DONE     BIT(0)
+#define AXIW1_READY    BIT(4)
+#define AXIW1_PRESENCE BIT(31)
+#define AXIW1_MAJORVER_MASK    GENMASK(23, 8)
+#define AXIW1_MINORVER_MASK    GENMASK(7, 0)
+/* Control flag */
+#define AXIW1_GO       BIT(0)
+#define AXI_CLEAR      0
+#define AXI_RESET      BIT(31)
+#define AXIW1_READDATA BIT(0)
+/* Interrupt Enable */
+#define AXIW1_READY_IRQ_EN     BIT(4)
+#define AXIW1_DONE_IRQ_EN      BIT(0)
+
+#define AXIW1_TIMEOUT  msecs_to_jiffies(100)
+
+#define DRIVER_NAME    "amd_axi_w1"
+
+struct amd_axi_w1_local {
+       struct device *dev;
+       void __iomem *base_addr;
+       int irq;
+       atomic_t flag;                  /* Set on IRQ, cleared once serviced */
+       wait_queue_head_t wait_queue;
+       struct w1_bus_master bus_host;
+};
+
+/**
+ * amd_axi_w1_wait_irq_interruptible_timeout() - Wait for IRQ with timeout.
+ *
+ * @amd_axi_w1_local:  Pointer to device structure
+ * @IRQ:               IRQ channel to wait on
+ *
+ * Return:             %0 - OK, %-EINTR - Interrupted, %-EBUSY - Timed out
+ */
+static int amd_axi_w1_wait_irq_interruptible_timeout(struct amd_axi_w1_local *amd_axi_w1_local,
+                                                    u32 IRQ)
+{
+       int ret;
+
+       /* Enable the IRQ requested and wait for flag to indicate it's been triggered */
+       iowrite32(IRQ, amd_axi_w1_local->base_addr + AXIW1_IRQE_REG);
+       ret = wait_event_interruptible_timeout(amd_axi_w1_local->wait_queue,
+                                              atomic_read(&amd_axi_w1_local->flag) != 0,
+                                              AXIW1_TIMEOUT);
+       if (ret < 0) {
+               dev_err(amd_axi_w1_local->dev, "Wait IRQ Interrupted\n");
+               return -EINTR;
+       }
+
+       if (!ret) {
+               dev_err(amd_axi_w1_local->dev, "Wait IRQ Timeout\n");
+               return -EBUSY;
+       }
+
+       atomic_set(&amd_axi_w1_local->flag, 0);
+       return 0;
+}
+
+/**
+ * amd_axi_w1_touch_bit() - Performs the touch-bit function - write a 0 or 1 and reads the level.
+ *
+ * @data:      Pointer to device structure
+ * @bit:       The level to write
+ *
+ * Return:     The level read
+ */
+static u8 amd_axi_w1_touch_bit(void *data, u8 bit)
+{
+       struct amd_axi_w1_local *amd_axi_w1_local = data;
+       u8 val = 0;
+       int rc;
+
+       /* Wait for READY signal to be 1 to ensure 1-wire IP is ready */
+       while ((ioread32(amd_axi_w1_local->base_addr + AXIW1_STAT_REG) & AXIW1_READY) == 0) {
+               rc = amd_axi_w1_wait_irq_interruptible_timeout(amd_axi_w1_local,
+                                                              AXIW1_READY_IRQ_EN);
+               if (rc < 0)
+                       return 1; /* Callee doesn't test for error. Return inactive bus state */
+       }
+
+       if (bit)
+               /* Read. Write read Bit command in register 0 */
+               iowrite32(AXIW1_READBIT, amd_axi_w1_local->base_addr + AXIW1_INST_REG);
+       else
+               /* Write. Write tx Bit command in instruction register with bit to transmit */
+               iowrite32(AXIW1_WRITEBIT + (bit & 0x01),
+                         amd_axi_w1_local->base_addr + AXIW1_INST_REG);
+
+       /* Write Go signal and clear control reset signal in control register */
+       iowrite32(AXIW1_GO, amd_axi_w1_local->base_addr + AXIW1_CTRL_REG);
+
+       /* Wait for done signal to be 1 */
+       while ((ioread32(amd_axi_w1_local->base_addr + AXIW1_STAT_REG) & AXIW1_DONE) != 1) {
+               rc = amd_axi_w1_wait_irq_interruptible_timeout(amd_axi_w1_local, AXIW1_DONE_IRQ_EN);
+               if (rc < 0)
+                       return 1; /* Callee doesn't test for error. Return inactive bus state */
+       }
+
+       /* If read, Retrieve data from register */
+       if (bit)
+               val = (u8)(ioread32(amd_axi_w1_local->base_addr + AXIW1_DATA_REG) & AXIW1_READDATA);
+
+       /* Clear Go signal in register 1 */
+       iowrite32(AXI_CLEAR, amd_axi_w1_local->base_addr + AXIW1_CTRL_REG);
+
+       return val;
+}
+
+/**
+ * amd_axi_w1_read_byte - Performs the read byte function.
+ *
+ * @data:      Pointer to device structure
+ * Return:     The value read
+ */
+static u8 amd_axi_w1_read_byte(void *data)
+{
+       struct amd_axi_w1_local *amd_axi_w1_local = data;
+       u8 val = 0;
+       int rc;
+
+       /* Wait for READY signal to be 1 to ensure 1-wire IP is ready */
+       while ((ioread32(amd_axi_w1_local->base_addr + AXIW1_STAT_REG) & AXIW1_READY) == 0) {
+               rc = amd_axi_w1_wait_irq_interruptible_timeout(amd_axi_w1_local,
+                                                              AXIW1_READY_IRQ_EN);
+               if (rc < 0)
+                       return 0xFF; /* Return inactive bus state */
+       }
+
+       /* Write read Byte command in instruction register*/
+       iowrite32(AXIW1_READBYTE, amd_axi_w1_local->base_addr + AXIW1_INST_REG);
+
+       /* Write Go signal and clear control reset signal in control register */
+       iowrite32(AXIW1_GO, amd_axi_w1_local->base_addr + AXIW1_CTRL_REG);
+
+       /* Wait for done signal to be 1 */
+       while ((ioread32(amd_axi_w1_local->base_addr + AXIW1_STAT_REG) & AXIW1_DONE) != 1) {
+               rc = amd_axi_w1_wait_irq_interruptible_timeout(amd_axi_w1_local, AXIW1_DONE_IRQ_EN);
+               if (rc < 0)
+                       return 0xFF; /* Return inactive bus state */
+       }
+
+       /* Retrieve LSB bit in data register to get RX byte */
+       val = (u8)(ioread32(amd_axi_w1_local->base_addr + AXIW1_DATA_REG) & 0x000000FF);
+
+       /* Clear Go signal in control register */
+       iowrite32(AXI_CLEAR, amd_axi_w1_local->base_addr + AXIW1_CTRL_REG);
+
+       return val;
+}
+
+/**
+ * amd_axi_w1_write_byte - Performs the write byte function.
+ *
+ * @data:      The ds2482 channel pointer
+ * @val:       The value to write
+ */
+static void amd_axi_w1_write_byte(void *data, u8 val)
+{
+       struct amd_axi_w1_local *amd_axi_w1_local = data;
+       int rc;
+
+       /* Wait for READY signal to be 1 to ensure 1-wire IP is ready */
+       while ((ioread32(amd_axi_w1_local->base_addr + AXIW1_STAT_REG) & AXIW1_READY) == 0) {
+               rc = amd_axi_w1_wait_irq_interruptible_timeout(amd_axi_w1_local,
+                                                              AXIW1_READY_IRQ_EN);
+               if (rc < 0)
+                       return;
+       }
+
+       /* Write tx Byte command in instruction register with bit to transmit */
+       iowrite32(AXIW1_WRITEBYTE + val, amd_axi_w1_local->base_addr + AXIW1_INST_REG);
+
+       /* Write Go signal and clear control reset signal in register 1 */
+       iowrite32(AXIW1_GO, amd_axi_w1_local->base_addr + AXIW1_CTRL_REG);
+
+       /* Wait for done signal to be 1 */
+       while ((ioread32(amd_axi_w1_local->base_addr + AXIW1_STAT_REG) & AXIW1_DONE) != 1) {
+               rc = amd_axi_w1_wait_irq_interruptible_timeout(amd_axi_w1_local,
+                                                              AXIW1_DONE_IRQ_EN);
+               if (rc < 0)
+                       return;
+       }
+
+       /* Clear Go signal in control register */
+       iowrite32(AXI_CLEAR, amd_axi_w1_local->base_addr + AXIW1_CTRL_REG);
+}
+
+/**
+ * amd_axi_w1_reset_bus() - Issues a reset bus sequence.
+ *
+ * @data:      the bus host data struct
+ * Return:     0=Device present, 1=No device present or error
+ */
+static u8 amd_axi_w1_reset_bus(void *data)
+{
+       struct amd_axi_w1_local *amd_axi_w1_local = data;
+       u8 val = 0;
+       int rc;
+
+       /* Reset 1-wire Axi IP */
+       iowrite32(AXI_RESET, amd_axi_w1_local->base_addr + AXIW1_CTRL_REG);
+
+       /* Wait for READY signal to be 1 to ensure 1-wire IP is ready */
+       while ((ioread32(amd_axi_w1_local->base_addr + AXIW1_STAT_REG) & AXIW1_READY) == 0) {
+               rc = amd_axi_w1_wait_irq_interruptible_timeout(amd_axi_w1_local,
+                                                              AXIW1_READY_IRQ_EN);
+               if (rc < 0)
+                       return 1; /* Something went wrong with the hardware */
+       }
+       /* Write Initialization command in instruction register */
+       iowrite32(AXIW1_INITPRES, amd_axi_w1_local->base_addr + AXIW1_INST_REG);
+
+       /* Write Go signal and clear control reset signal in register 1 */
+       iowrite32(AXIW1_GO, amd_axi_w1_local->base_addr + AXIW1_CTRL_REG);
+
+       /* Wait for done signal to be 1 */
+       while ((ioread32(amd_axi_w1_local->base_addr + AXIW1_STAT_REG) & AXIW1_DONE) != 1) {
+               rc = amd_axi_w1_wait_irq_interruptible_timeout(amd_axi_w1_local, AXIW1_DONE_IRQ_EN);
+               if (rc < 0)
+                       return 1; /* Something went wrong with the hardware */
+       }
+       /* Retrieve MSB bit in status register to get failure bit */
+       if ((ioread32(amd_axi_w1_local->base_addr + AXIW1_STAT_REG) & AXIW1_PRESENCE) != 0)
+               val = 1;
+
+       /* Clear Go signal in control register */
+       iowrite32(AXI_CLEAR, amd_axi_w1_local->base_addr + AXIW1_CTRL_REG);
+
+       return val;
+}
+
+/* Reset the 1-wire AXI IP. Put the IP in reset state and clear registers */
+static void amd_axi_w1_reset(struct amd_axi_w1_local *amd_axi_w1_local)
+{
+       iowrite32(AXI_RESET, amd_axi_w1_local->base_addr + AXIW1_CTRL_REG);
+       iowrite32(AXI_CLEAR, amd_axi_w1_local->base_addr + AXIW1_INST_REG);
+       iowrite32(AXI_CLEAR, amd_axi_w1_local->base_addr + AXIW1_IRQE_REG);
+       iowrite32(AXI_CLEAR, amd_axi_w1_local->base_addr + AXIW1_STAT_REG);
+       iowrite32(AXI_CLEAR, amd_axi_w1_local->base_addr + AXIW1_DATA_REG);
+}
+
+static irqreturn_t amd_axi_w1_irq(int irq, void *lp)
+{
+       struct amd_axi_w1_local *amd_axi_w1_local = lp;
+
+       /* Reset interrupt trigger */
+       iowrite32(AXI_CLEAR, amd_axi_w1_local->base_addr + AXIW1_IRQE_REG);
+
+       atomic_set(&amd_axi_w1_local->flag, 1);
+       wake_up_interruptible(&amd_axi_w1_local->wait_queue);
+
+       return IRQ_HANDLED;
+}
+
+static int amd_axi_w1_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct amd_axi_w1_local *lp;
+       struct clk *clk;
+       u32 ver_major, ver_minor;
+       int val, rc = 0;
+
+       lp = devm_kzalloc(dev, sizeof(*lp), GFP_KERNEL);
+       if (!lp)
+               return -ENOMEM;
+
+       lp->dev = dev;
+       lp->base_addr = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(lp->base_addr))
+               return PTR_ERR(lp->base_addr);
+
+       lp->irq = platform_get_irq(pdev, 0);
+       if (lp->irq < 0)
+               return lp->irq;
+
+       rc = devm_request_irq(dev, lp->irq, &amd_axi_w1_irq, IRQF_TRIGGER_HIGH, DRIVER_NAME, lp);
+       if (rc)
+               return rc;
+
+       /* Initialize wait queue and flag */
+       init_waitqueue_head(&lp->wait_queue);
+
+       clk = devm_clk_get_enabled(dev, NULL);
+       if (IS_ERR(clk))
+               return PTR_ERR(clk);
+
+       /* Verify IP presence in HW */
+       if (ioread32(lp->base_addr + AXIW1_IPID_REG) != AXIW1_IPID) {
+               dev_err(dev, "AMD 1-wire IP not detected in hardware\n");
+               return -ENODEV;
+       }
+
+       /*
+        * Allow for future driver expansion supporting new hardware features
+        * This driver currently only supports hardware 1.x, but include logic
+        * to detect if a potentially incompatible future version is used
+        * by reading major version ID. It is highly undesirable for new IP versions
+        * to break the API, but this code will at least allow for graceful failure
+        * should that happen. Future new features can be enabled by hardware
+        * incrementing the minor version and augmenting the driver to detect capability
+        * using the minor version number
+        */
+       val = ioread32(lp->base_addr + AXIW1_IPVER_REG);
+       ver_major = FIELD_GET(AXIW1_MAJORVER_MASK, val);
+       ver_minor = FIELD_GET(AXIW1_MINORVER_MASK, val);
+
+       if (ver_major != 1) {
+               dev_err(dev, "AMD AXI W1 host version %u.%u is not supported by this driver",
+                       ver_major, ver_minor);
+               return -ENODEV;
+       }
+
+       lp->bus_host.data = lp;
+       lp->bus_host.touch_bit = amd_axi_w1_touch_bit;
+       lp->bus_host.read_byte = amd_axi_w1_read_byte;
+       lp->bus_host.write_byte = amd_axi_w1_write_byte;
+       lp->bus_host.reset_bus = amd_axi_w1_reset_bus;
+
+       amd_axi_w1_reset(lp);
+
+       platform_set_drvdata(pdev, lp);
+       rc = w1_add_master_device(&lp->bus_host);
+       if (rc) {
+               dev_err(dev, "Could not add host device\n");
+               return rc;
+       }
+
+       return 0;
+}
+
+static void amd_axi_w1_remove(struct platform_device *pdev)
+{
+       struct amd_axi_w1_local *lp = platform_get_drvdata(pdev);
+
+       w1_remove_master_device(&lp->bus_host);
+}
+
+static const struct of_device_id amd_axi_w1_of_match[] = {
+       { .compatible = "amd,axi-1wire-host" },
+       { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, amd_axi_w1_of_match);
+
+static struct platform_driver amd_axi_w1_driver = {
+       .probe = amd_axi_w1_probe,
+       .remove_new = amd_axi_w1_remove,
+       .driver = {
+               .name = DRIVER_NAME,
+               .of_match_table = amd_axi_w1_of_match,
+       },
+};
+module_platform_driver(amd_axi_w1_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kris Chaplin <kris.chaplin@amd.com>");
+MODULE_DESCRIPTION("Driver for AMD AXI 1 Wire IP core");
index 5f5b97e247004add44b9f8ee913eff2203381ef6..e1cac0730cbbf425efe7124b1729e3eede6fe73b 100644 (file)
@@ -98,6 +98,8 @@
 #define ST_EPOF                                0x80
 /* Status transfer size, 16 bytes status, 16 byte result flags */
 #define ST_SIZE                                0x20
+/* 1-wire data i/o fifo size, 128 bytes */
+#define FIFO_SIZE                      0x80
 
 /* Result Register flags */
 #define RR_DETECT                      0xA5 /* New device detected */
@@ -614,14 +616,11 @@ static int ds_read_byte(struct ds_device *dev, u8 *byte)
        return 0;
 }
 
-static int ds_read_block(struct ds_device *dev, u8 *buf, int len)
+static int read_block_chunk(struct ds_device *dev, u8 *buf, int len)
 {
        struct ds_status st;
        int err;
 
-       if (len > 64*1024)
-               return -E2BIG;
-
        memset(buf, 0xFF, len);
 
        err = ds_send_data(dev, buf, len);
@@ -640,6 +639,24 @@ static int ds_read_block(struct ds_device *dev, u8 *buf, int len)
        return err;
 }
 
+static int ds_read_block(struct ds_device *dev, u8 *buf, int len)
+{
+       int err, to_read, rem = len;
+
+       if (len > 64 * 1024)
+               return -E2BIG;
+
+       do {
+               to_read = rem <= FIFO_SIZE ? rem : FIFO_SIZE;
+               err = read_block_chunk(dev, &buf[len - rem], to_read);
+               if (err < 0)
+                       return err;
+               rem -= to_read;
+       } while (rem);
+
+       return err;
+}
+
 static int ds_write_block(struct ds_device *dev, u8 *buf, int len)
 {
        int err;
index e45acb6d916ee3a69af868710b3507a28c20f2e8..05c67038ed20c35e99ff9bbb6de4a771fdf0c74d 100644 (file)
@@ -9,7 +9,6 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
-#include <linux/w1-gpio.h>
 #include <linux/gpio/consumer.h>
 #include <linux/of_platform.h>
 #include <linux/err.h>
 
 #include <linux/w1.h>
 
+struct w1_gpio_ddata {
+       struct gpio_desc *gpiod;
+       struct gpio_desc *pullup_gpiod;
+       unsigned int pullup_duration;
+};
+
 static u8 w1_gpio_set_pullup(void *data, int delay)
 {
-       struct w1_gpio_platform_data *pdata = data;
+       struct w1_gpio_ddata *ddata = data;
 
        if (delay) {
-               pdata->pullup_duration = delay;
+               ddata->pullup_duration = delay;
        } else {
-               if (pdata->pullup_duration) {
+               if (ddata->pullup_duration) {
                        /*
                         * This will OVERRIDE open drain emulation and force-pull
                         * the line high for some time.
                         */
-                       gpiod_set_raw_value(pdata->gpiod, 1);
-                       msleep(pdata->pullup_duration);
+                       gpiod_set_raw_value(ddata->gpiod, 1);
+                       msleep(ddata->pullup_duration);
                        /*
                         * This will simply set the line as input since we are doing
                         * open drain emulation in the GPIO library.
                         */
-                       gpiod_set_value(pdata->gpiod, 1);
+                       gpiod_set_value(ddata->gpiod, 1);
                }
-               pdata->pullup_duration = 0;
+               ddata->pullup_duration = 0;
        }
 
        return 0;
@@ -46,16 +51,16 @@ static u8 w1_gpio_set_pullup(void *data, int delay)
 
 static void w1_gpio_write_bit(void *data, u8 bit)
 {
-       struct w1_gpio_platform_data *pdata = data;
+       struct w1_gpio_ddata *ddata = data;
 
-       gpiod_set_value(pdata->gpiod, bit);
+       gpiod_set_value(ddata->gpiod, bit);
 }
 
 static u8 w1_gpio_read_bit(void *data)
 {
-       struct w1_gpio_platform_data *pdata = data;
+       struct w1_gpio_ddata *ddata = data;
 
-       return gpiod_get_value(pdata->gpiod) ? 1 : 0;
+       return gpiod_get_value(ddata->gpiod) ? 1 : 0;
 }
 
 #if defined(CONFIG_OF)
@@ -69,58 +74,48 @@ MODULE_DEVICE_TABLE(of, w1_gpio_dt_ids);
 static int w1_gpio_probe(struct platform_device *pdev)
 {
        struct w1_bus_master *master;
-       struct w1_gpio_platform_data *pdata;
+       struct w1_gpio_ddata *ddata;
        struct device *dev = &pdev->dev;
        struct device_node *np = dev->of_node;
        /* Enforce open drain mode by default */
        enum gpiod_flags gflags = GPIOD_OUT_LOW_OPEN_DRAIN;
        int err;
 
-       if (of_have_populated_dt()) {
-               pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
-               if (!pdata)
-                       return -ENOMEM;
-
-               /*
-                * This parameter means that something else than the gpiolib has
-                * already set the line into open drain mode, so we should just
-                * driver it high/low like we are in full control of the line and
-                * open drain will happen transparently.
-                */
-               if (of_property_present(np, "linux,open-drain"))
-                       gflags = GPIOD_OUT_LOW;
-
-               pdev->dev.platform_data = pdata;
-       }
-       pdata = dev_get_platdata(dev);
+       ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
+       if (!ddata)
+               return -ENOMEM;
 
-       if (!pdata) {
-               dev_err(dev, "No configuration data\n");
-               return -ENXIO;
-       }
+       /*
+        * This parameter means that something else than the gpiolib has
+        * already set the line into open drain mode, so we should just
+        * driver it high/low like we are in full control of the line and
+        * open drain will happen transparently.
+        */
+       if (of_property_present(np, "linux,open-drain"))
+               gflags = GPIOD_OUT_LOW;
 
        master = devm_kzalloc(dev, sizeof(struct w1_bus_master),
                        GFP_KERNEL);
        if (!master)
                return -ENOMEM;
 
-       pdata->gpiod = devm_gpiod_get_index(dev, NULL, 0, gflags);
-       if (IS_ERR(pdata->gpiod)) {
+       ddata->gpiod = devm_gpiod_get_index(dev, NULL, 0, gflags);
+       if (IS_ERR(ddata->gpiod)) {
                dev_err(dev, "gpio_request (pin) failed\n");
-               return PTR_ERR(pdata->gpiod);
+               return PTR_ERR(ddata->gpiod);
        }
 
-       pdata->pullup_gpiod =
+       ddata->pullup_gpiod =
                devm_gpiod_get_index_optional(dev, NULL, 1, GPIOD_OUT_LOW);
-       if (IS_ERR(pdata->pullup_gpiod)) {
+       if (IS_ERR(ddata->pullup_gpiod)) {
                dev_err(dev, "gpio_request_one "
                        "(ext_pullup_enable_pin) failed\n");
-               return PTR_ERR(pdata->pullup_gpiod);
+               return PTR_ERR(ddata->pullup_gpiod);
        }
 
-       master->data = pdata;
+       master->data = ddata;
        master->read_bit = w1_gpio_read_bit;
-       gpiod_direction_output(pdata->gpiod, 1);
+       gpiod_direction_output(ddata->gpiod, 1);
        master->write_bit = w1_gpio_write_bit;
 
        /*
@@ -138,11 +133,8 @@ static int w1_gpio_probe(struct platform_device *pdev)
                return err;
        }
 
-       if (pdata->enable_external_pullup)
-               pdata->enable_external_pullup(1);
-
-       if (pdata->pullup_gpiod)
-               gpiod_set_value(pdata->pullup_gpiod, 1);
+       if (ddata->pullup_gpiod)
+               gpiod_set_value(ddata->pullup_gpiod, 1);
 
        platform_set_drvdata(pdev, master);
 
@@ -152,45 +144,19 @@ static int w1_gpio_probe(struct platform_device *pdev)
 static int w1_gpio_remove(struct platform_device *pdev)
 {
        struct w1_bus_master *master = platform_get_drvdata(pdev);
-       struct w1_gpio_platform_data *pdata = dev_get_platdata(&pdev->dev);
-
-       if (pdata->enable_external_pullup)
-               pdata->enable_external_pullup(0);
+       struct w1_gpio_ddata *ddata = master->data;
 
-       if (pdata->pullup_gpiod)
-               gpiod_set_value(pdata->pullup_gpiod, 0);
+       if (ddata->pullup_gpiod)
+               gpiod_set_value(ddata->pullup_gpiod, 0);
 
        w1_remove_master_device(master);
 
        return 0;
 }
 
-static int __maybe_unused w1_gpio_suspend(struct device *dev)
-{
-       struct w1_gpio_platform_data *pdata = dev_get_platdata(dev);
-
-       if (pdata->enable_external_pullup)
-               pdata->enable_external_pullup(0);
-
-       return 0;
-}
-
-static int __maybe_unused w1_gpio_resume(struct device *dev)
-{
-       struct w1_gpio_platform_data *pdata = dev_get_platdata(dev);
-
-       if (pdata->enable_external_pullup)
-               pdata->enable_external_pullup(1);
-
-       return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(w1_gpio_pm_ops, w1_gpio_suspend, w1_gpio_resume);
-
 static struct platform_driver w1_gpio_driver = {
        .driver = {
                .name   = "w1-gpio",
-               .pm     = &w1_gpio_pm_ops,
                .of_match_table = of_match_ptr(w1_gpio_dt_ids),
        },
        .probe = w1_gpio_probe,
index 9f21fd98f7995cdc5af6bbf255d802b0a20fbbb8..250b7f7ec429e92a6146452a20d591644af4e083 100644 (file)
@@ -1,8 +1,9 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- *     w1_ds2433.c - w1 family 23 (DS2433) driver
+ *     w1_ds2433.c - w1 family 23 (DS2433) & 43 (DS28EC20) eeprom driver
  *
  * Copyright (c) 2005 Ben Gardner <bgardner@wabtec.com>
+ * Copyright (c) 2023 Marc Ferland <marc.ferland@sonatest.com>
  */
 
 #include <linux/kernel.h>
 #include <linux/w1.h>
 
 #define W1_EEPROM_DS2433       0x23
+#define W1_EEPROM_DS28EC20     0x43
+
+#define W1_EEPROM_DS2433_SIZE  512
+#define W1_EEPROM_DS28EC20_SIZE 2560
 
-#define W1_EEPROM_SIZE         512
-#define W1_PAGE_COUNT          16
 #define W1_PAGE_SIZE           32
 #define W1_PAGE_BITS           5
 #define W1_PAGE_MASK           0x1F
-
-#define W1_F23_TIME            300
+#define W1_VALIDCRC_MAX                96
 
 #define W1_F23_READ_EEPROM     0xF0
 #define W1_F23_WRITE_SCRATCH   0x0F
 #define W1_F23_READ_SCRATCH    0xAA
 #define W1_F23_COPY_SCRATCH    0x55
 
+struct ds2433_config {
+       size_t eeprom_size;             /* eeprom size in bytes */
+       unsigned int page_count;        /* number of 256 bits pages */
+       unsigned int tprog;             /* time in ms for page programming */
+};
+
+static const struct ds2433_config config_f23 = {
+       .eeprom_size = W1_EEPROM_DS2433_SIZE,
+       .page_count = 16,
+       .tprog = 5,
+};
+
+static const struct ds2433_config config_f43 = {
+       .eeprom_size = W1_EEPROM_DS28EC20_SIZE,
+       .page_count = 80,
+       .tprog = 10,
+};
+
 struct w1_f23_data {
-       u8      memory[W1_EEPROM_SIZE];
-       u32     validcrc;
+#ifdef CONFIG_W1_SLAVE_DS2433_CRC
+       u8 *memory;
+       DECLARE_BITMAP(validcrc, W1_VALIDCRC_MAX);
+#endif
+       const struct ds2433_config *cfg;
 };
 
 /*
@@ -64,11 +87,11 @@ static int w1_f23_refresh_block(struct w1_slave *sl, struct w1_f23_data *data,
        u8      wrbuf[3];
        int     off = block * W1_PAGE_SIZE;
 
-       if (data->validcrc & (1 << block))
+       if (test_bit(block, data->validcrc))
                return 0;
 
        if (w1_reset_select_slave(sl)) {
-               data->validcrc = 0;
+               bitmap_zero(data->validcrc, data->cfg->page_count);
                return -EIO;
        }
 
@@ -80,7 +103,7 @@ static int w1_f23_refresh_block(struct w1_slave *sl, struct w1_f23_data *data,
 
        /* cache the block if the CRC is valid */
        if (crc16(CRC16_INIT, &data->memory[off], W1_PAGE_SIZE) == CRC16_VALID)
-               data->validcrc |= (1 << block);
+               set_bit(block, data->validcrc);
 
        return 0;
 }
@@ -98,7 +121,7 @@ static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
        u8 wrbuf[3];
 #endif
 
-       count = w1_f23_fix_count(off, count, W1_EEPROM_SIZE);
+       count = w1_f23_fix_count(off, count, bin_attr->size);
        if (!count)
                return 0;
 
@@ -153,9 +176,7 @@ out_up:
  */
 static int w1_f23_write(struct w1_slave *sl, int addr, int len, const u8 *data)
 {
-#ifdef CONFIG_W1_SLAVE_DS2433_CRC
        struct w1_f23_data *f23 = sl->family_data;
-#endif
        u8 wrbuf[4];
        u8 rdbuf[W1_PAGE_SIZE + 3];
        u8 es = (addr + len - 1) & 0x1f;
@@ -191,13 +212,13 @@ static int w1_f23_write(struct w1_slave *sl, int addr, int len, const u8 *data)
        wrbuf[3] = es;
        w1_write_block(sl->master, wrbuf, 4);
 
-       /* Sleep for 5 ms to wait for the write to complete */
-       msleep(5);
+       /* Sleep for tprog ms to wait for the write to complete */
+       msleep(f23->cfg->tprog);
 
        /* Reset the bus to wake up the EEPROM (this may not be needed) */
        w1_reset_bus(sl->master);
 #ifdef CONFIG_W1_SLAVE_DS2433_CRC
-       f23->validcrc &= ~(1 << (addr >> W1_PAGE_BITS));
+       clear_bit(addr >> W1_PAGE_BITS, f23->validcrc);
 #endif
        return 0;
 }
@@ -209,7 +230,7 @@ static ssize_t eeprom_write(struct file *filp, struct kobject *kobj,
        struct w1_slave *sl = kobj_to_w1_slave(kobj);
        int addr, len, idx;
 
-       count = w1_f23_fix_count(off, count, W1_EEPROM_SIZE);
+       count = w1_f23_fix_count(off, count, bin_attr->size);
        if (!count)
                return 0;
 
@@ -253,10 +274,22 @@ out_up:
        return count;
 }
 
-static BIN_ATTR_RW(eeprom, W1_EEPROM_SIZE);
+static struct bin_attribute bin_attr_f23_eeprom = {
+       .attr = { .name = "eeprom", .mode = 0644 },
+       .read = eeprom_read,
+       .write = eeprom_write,
+       .size = W1_EEPROM_DS2433_SIZE,
+};
+
+static struct bin_attribute bin_attr_f43_eeprom = {
+       .attr = { .name = "eeprom", .mode = 0644 },
+       .read = eeprom_read,
+       .write = eeprom_write,
+       .size = W1_EEPROM_DS28EC20_SIZE,
+};
 
 static struct bin_attribute *w1_f23_bin_attributes[] = {
-       &bin_attr_eeprom,
+       &bin_attr_f23_eeprom,
        NULL,
 };
 
@@ -269,26 +302,63 @@ static const struct attribute_group *w1_f23_groups[] = {
        NULL,
 };
 
+static struct bin_attribute *w1_f43_bin_attributes[] = {
+       &bin_attr_f43_eeprom,
+       NULL,
+};
+
+static const struct attribute_group w1_f43_group = {
+       .bin_attrs = w1_f43_bin_attributes,
+};
+
+static const struct attribute_group *w1_f43_groups[] = {
+       &w1_f43_group,
+       NULL,
+};
+
 static int w1_f23_add_slave(struct w1_slave *sl)
 {
-#ifdef CONFIG_W1_SLAVE_DS2433_CRC
        struct w1_f23_data *data;
 
        data = kzalloc(sizeof(struct w1_f23_data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
+
+       switch (sl->family->fid) {
+       case W1_EEPROM_DS2433:
+               data->cfg = &config_f23;
+               break;
+       case W1_EEPROM_DS28EC20:
+               data->cfg = &config_f43;
+               break;
+       }
+
+#ifdef CONFIG_W1_SLAVE_DS2433_CRC
+       if (data->cfg->page_count > W1_VALIDCRC_MAX) {
+               dev_err(&sl->dev, "page count too big for crc bitmap\n");
+               kfree(data);
+               return -EINVAL;
+       }
+       data->memory = kzalloc(data->cfg->eeprom_size, GFP_KERNEL);
+       if (!data->memory) {
+               kfree(data);
+               return -ENOMEM;
+       }
+       bitmap_zero(data->validcrc, data->cfg->page_count);
+#endif /* CONFIG_W1_SLAVE_DS2433_CRC */
        sl->family_data = data;
 
-#endif /* CONFIG_W1_SLAVE_DS2433_CRC */
        return 0;
 }
 
 static void w1_f23_remove_slave(struct w1_slave *sl)
 {
-#ifdef CONFIG_W1_SLAVE_DS2433_CRC
-       kfree(sl->family_data);
+       struct w1_f23_data *data = sl->family_data;
        sl->family_data = NULL;
-#endif /* CONFIG_W1_SLAVE_DS2433_CRC */
+#ifdef CONFIG_W1_SLAVE_DS2433_CRC
+       kfree(data->memory);
+#endif /* CONFIG_W1_SLAVE_DS2433_CRC */
+       kfree(data);
 }
 
 static const struct w1_family_ops w1_f23_fops = {
@@ -297,13 +367,53 @@ static const struct w1_family_ops w1_f23_fops = {
        .groups         = w1_f23_groups,
 };
 
+static const struct w1_family_ops w1_f43_fops = {
+       .add_slave      = w1_f23_add_slave,
+       .remove_slave   = w1_f23_remove_slave,
+       .groups         = w1_f43_groups,
+};
+
 static struct w1_family w1_family_23 = {
        .fid = W1_EEPROM_DS2433,
        .fops = &w1_f23_fops,
 };
-module_w1_family(w1_family_23);
+
+static struct w1_family w1_family_43 = {
+       .fid = W1_EEPROM_DS28EC20,
+       .fops = &w1_f43_fops,
+};
+
+static int __init w1_ds2433_init(void)
+{
+       int err;
+
+       err = w1_register_family(&w1_family_23);
+       if (err)
+               return err;
+
+       err = w1_register_family(&w1_family_43);
+       if (err)
+               goto err_43;
+
+       return 0;
+
+err_43:
+       w1_unregister_family(&w1_family_23);
+       return err;
+}
+
+static void __exit w1_ds2433_exit(void)
+{
+       w1_unregister_family(&w1_family_23);
+       w1_unregister_family(&w1_family_43);
+}
+
+module_init(w1_ds2433_init);
+module_exit(w1_ds2433_exit);
 
 MODULE_AUTHOR("Ben Gardner <bgardner@wabtec.com>");
-MODULE_DESCRIPTION("w1 family 23 driver for DS2433, 4kb EEPROM");
+MODULE_AUTHOR("Marc Ferland <marc.ferland@sonatest.com>");
+MODULE_DESCRIPTION("w1 family 23/43 driver for DS2433 (4kb) and DS28EC20 (20kb)");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("w1-family-" __stringify(W1_EEPROM_DS2433));
+MODULE_ALIAS("w1-family-" __stringify(W1_EEPROM_DS28EC20));
index 4440e626b7975fc2ea000b159179be0173d84c72..42adc2c1e06b37319be85755ec83d47ff5d2e398 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/dma-buf.h>
+#include <linux/dma-direct.h>
 #include <linux/slab.h>
 #include <linux/types.h>
 #include <linux/uaccess.h>
@@ -50,7 +51,7 @@ struct gntdev_dmabuf {
 
        /* Number of pages this buffer has. */
        int nr_pages;
-       /* Pages of this buffer. */
+       /* Pages of this buffer (only for dma-buf export). */
        struct page **pages;
 };
 
@@ -484,7 +485,7 @@ out:
 /* DMA buffer import support. */
 
 static int
-dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
+dmabuf_imp_grant_foreign_access(unsigned long *gfns, u32 *refs,
                                int count, int domid)
 {
        grant_ref_t priv_gref_head;
@@ -507,7 +508,7 @@ dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
                }
 
                gnttab_grant_foreign_access_ref(cur_ref, domid,
-                                               xen_page_to_gfn(pages[i]), 0);
+                                               gfns[i], 0);
                refs[i] = cur_ref;
        }
 
@@ -529,7 +530,6 @@ static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
 
 static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
 {
-       kfree(gntdev_dmabuf->pages);
        kfree(gntdev_dmabuf->u.imp.refs);
        kfree(gntdev_dmabuf);
 }
@@ -549,12 +549,6 @@ static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count)
        if (!gntdev_dmabuf->u.imp.refs)
                goto fail;
 
-       gntdev_dmabuf->pages = kcalloc(count,
-                                      sizeof(gntdev_dmabuf->pages[0]),
-                                      GFP_KERNEL);
-       if (!gntdev_dmabuf->pages)
-               goto fail;
-
        gntdev_dmabuf->nr_pages = count;
 
        for (i = 0; i < count; i++)
@@ -576,7 +570,8 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
        struct dma_buf *dma_buf;
        struct dma_buf_attachment *attach;
        struct sg_table *sgt;
-       struct sg_page_iter sg_iter;
+       struct sg_dma_page_iter sg_iter;
+       unsigned long *gfns;
        int i;
 
        dma_buf = dma_buf_get(fd);
@@ -624,26 +619,31 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
 
        gntdev_dmabuf->u.imp.sgt = sgt;
 
-       /* Now convert sgt to array of pages and check for page validity. */
+       gfns = kcalloc(count, sizeof(*gfns), GFP_KERNEL);
+       if (!gfns) {
+               ret = ERR_PTR(-ENOMEM);
+               goto fail_unmap;
+       }
+
+       /*
+        * Now convert sgt to array of gfns without accessing underlying pages.
+        * It is not allowed to access the underlying struct page of an sg table
+        * exported by DMA-buf, but since we deal with special Xen dma device here
+        * (not a normal physical one) look at the dma addresses in the sg table
+        * and then calculate gfns directly from them.
+        */
        i = 0;
-       for_each_sgtable_page(sgt, &sg_iter, 0) {
-               struct page *page = sg_page_iter_page(&sg_iter);
-               /*
-                * Check if page is valid: this can happen if we are given
-                * a page from VRAM or other resources which are not backed
-                * by a struct page.
-                */
-               if (!pfn_valid(page_to_pfn(page))) {
-                       ret = ERR_PTR(-EINVAL);
-                       goto fail_unmap;
-               }
+       for_each_sgtable_dma_page(sgt, &sg_iter, 0) {
+               dma_addr_t addr = sg_page_iter_dma_address(&sg_iter);
+               unsigned long pfn = bfn_to_pfn(XEN_PFN_DOWN(dma_to_phys(dev, addr)));
 
-               gntdev_dmabuf->pages[i++] = page;
+               gfns[i++] = pfn_to_gfn(pfn);
        }
 
-       ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gntdev_dmabuf->pages,
+       ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gfns,
                                                      gntdev_dmabuf->u.imp.refs,
                                                      count, domid));
+       kfree(gfns);
        if (IS_ERR(ret))
                goto fail_end_access;
 
index d4b2519257962de62ebb83ac52327e41dcbf6cdb..32835b4b9bc5030ad3e81ae411d56635f8d6a696 100644 (file)
@@ -119,11 +119,13 @@ EXPORT_SYMBOL_GPL(xenbus_strstate);
  * @callback: callback to register
  *
  * Register a @watch on the given path, using the given xenbus_watch structure
- * for storage, and the given @callback function as the callback.  Return 0 on
- * success, or -errno on error.  On success, the given @path will be saved as
- * @watch->node, and remains the caller's to free.  On error, @watch->node will
+ * for storage, and the given @callback function as the callback.  On success,
+ * the given @path will be saved as @watch->node, and remains the
+ * caller's to free.  On error, @watch->node will
  * be NULL, the device will switch to %XenbusStateClosing, and the error will
  * be saved in the store.
+ *
+ * Returns: %0 on success or -errno on error
  */
 int xenbus_watch_path(struct xenbus_device *dev, const char *path,
                      struct xenbus_watch *watch,
@@ -160,12 +162,14 @@ EXPORT_SYMBOL_GPL(xenbus_watch_path);
  * @pathfmt: format of path to watch
  *
  * Register a watch on the given @path, using the given xenbus_watch
- * structure for storage, and the given @callback function as the callback.
- * Return 0 on success, or -errno on error.  On success, the watched path
- * (@path/@path2) will be saved as @watch->node, and becomes the caller's to
- * kfree().  On error, watch->node will be NULL, so the caller has nothing to
+ * structure for storage, and the given @callback function as the
+ * callback.  On success, the watched path (@path/@path2) will be saved
+ * as @watch->node, and becomes the caller's to kfree().
+ * On error, watch->node will be NULL, so the caller has nothing to
  * free, the device will switch to %XenbusStateClosing, and the error will be
  * saved in the store.
+ *
+ * Returns: %0 on success or -errno on error
  */
 int xenbus_watch_pathfmt(struct xenbus_device *dev,
                         struct xenbus_watch *watch,
@@ -255,13 +259,15 @@ abort:
 }
 
 /**
- * xenbus_switch_state
+ * xenbus_switch_state - save the new state of a driver
  * @dev: xenbus device
  * @state: new state
  *
  * Advertise in the store a change of the given driver to the given new_state.
- * Return 0 on success, or -errno on error.  On error, the device will switch
- * to XenbusStateClosing, and the error will be saved in the store.
+ * On error, the device will switch to XenbusStateClosing, and the error
+ * will be saved in the store.
+ *
+ * Returns: %0 on success or -errno on error
  */
 int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
 {
@@ -305,7 +311,7 @@ static void xenbus_va_dev_error(struct xenbus_device *dev, int err,
 }
 
 /**
- * xenbus_dev_error
+ * xenbus_dev_error - place an error message into the store
  * @dev: xenbus device
  * @err: error to report
  * @fmt: error message format
@@ -324,7 +330,7 @@ void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...)
 EXPORT_SYMBOL_GPL(xenbus_dev_error);
 
 /**
- * xenbus_dev_fatal
+ * xenbus_dev_fatal - put an error messages into the store and then shutdown
  * @dev: xenbus device
  * @err: error to report
  * @fmt: error message format
@@ -346,7 +352,7 @@ void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...)
 }
 EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
 
-/**
+/*
  * Equivalent to xenbus_dev_fatal(dev, err, fmt, args), but helps
  * avoiding recursion within xenbus_switch_state.
  */
@@ -453,7 +459,7 @@ void xenbus_teardown_ring(void **vaddr, unsigned int nr_pages,
 }
 EXPORT_SYMBOL_GPL(xenbus_teardown_ring);
 
-/**
+/*
  * Allocate an event channel for the given xenbus_device, assigning the newly
  * created local port to *port.  Return 0 on success, or -errno on error.  On
  * error, the device will switch to XenbusStateClosing, and the error will be
@@ -479,7 +485,7 @@ int xenbus_alloc_evtchn(struct xenbus_device *dev, evtchn_port_t *port)
 EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
 
 
-/**
+/*
  * Free an existing event channel. Returns 0 on success or -errno on error.
  */
 int xenbus_free_evtchn(struct xenbus_device *dev, evtchn_port_t port)
@@ -499,7 +505,7 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
 
 
 /**
- * xenbus_map_ring_valloc
+ * xenbus_map_ring_valloc - allocate & map pages of VA space
  * @dev: xenbus device
  * @gnt_refs: grant reference array
  * @nr_grefs: number of grant references
@@ -507,10 +513,11 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
  *
  * Map @nr_grefs pages of memory into this domain from another
  * domain's grant table.  xenbus_map_ring_valloc allocates @nr_grefs
- * pages of virtual address space, maps the pages to that address, and
- * sets *vaddr to that address.  Returns 0 on success, and -errno on
- * error. If an error is returned, device will switch to
+ * pages of virtual address space, maps the pages to that address, and sets
+ * *vaddr to that address.  If an error is returned, device will switch to
  * XenbusStateClosing and the error message will be saved in XenStore.
+ *
+ * Returns: %0 on success or -errno on error
  */
 int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs,
                           unsigned int nr_grefs, void **vaddr)
@@ -599,14 +606,15 @@ static int __xenbus_map_ring(struct xenbus_device *dev,
 }
 
 /**
- * xenbus_unmap_ring
+ * xenbus_unmap_ring - unmap memory from another domain
  * @dev: xenbus device
  * @handles: grant handle array
  * @nr_handles: number of handles in the array
  * @vaddrs: addresses to unmap
  *
  * Unmap memory in this domain that was imported from another domain.
- * Returns 0 on success and returns GNTST_* on error
+ *
+ * Returns: %0 on success or GNTST_* on error
  * (see xen/include/interface/grant_table.h).
  */
 static int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t *handles,
@@ -712,7 +720,7 @@ static int xenbus_map_ring_hvm(struct xenbus_device *dev,
 }
 
 /**
- * xenbus_unmap_ring_vfree
+ * xenbus_unmap_ring_vfree - unmap a page of memory from another domain
  * @dev: xenbus device
  * @vaddr: addr to unmap
  *
@@ -720,7 +728,8 @@ static int xenbus_map_ring_hvm(struct xenbus_device *dev,
  * Unmap a page of memory in this domain that was imported from another domain.
  * Use xenbus_unmap_ring_vfree if you mapped in your memory with
  * xenbus_map_ring_valloc (it will free the virtual address space).
- * Returns 0 on success and returns GNTST_* on error
+ *
+ * Returns: %0 on success or GNTST_* on error
  * (see xen/include/interface/grant_table.h).
  */
 int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
@@ -916,10 +925,10 @@ static int xenbus_unmap_ring_hvm(struct xenbus_device *dev, void *vaddr)
 }
 
 /**
- * xenbus_read_driver_state
+ * xenbus_read_driver_state - read state from a store path
  * @path: path for driver
  *
- * Return the state of the driver rooted at the given store path, or
+ * Returns: the state of the driver rooted at the given store path, or
  * XenbusStateUnknown if no state can be read.
  */
 enum xenbus_state xenbus_read_driver_state(const char *path)
index 731e3d14b67d360e3ac2f14b2c139d38d4b4caae..0e8418066a482f5ce6332372b3af1259ef02237a 100644 (file)
@@ -42,6 +42,7 @@ struct inode *v9fs_alloc_inode(struct super_block *sb);
 void v9fs_free_inode(struct inode *inode);
 struct inode *v9fs_get_inode(struct super_block *sb, umode_t mode,
                             dev_t rdev);
+void v9fs_set_netfs_context(struct inode *inode);
 int v9fs_init_inode(struct v9fs_session_info *v9ses,
                    struct inode *inode, umode_t mode, dev_t rdev);
 void v9fs_evict_inode(struct inode *inode);
index 8a635999a7d617ee4920854f9108d93855bb55ab..047855033d32f73f054a074452622499d5cf983c 100644 (file)
 #include <linux/netfs.h>
 #include <net/9p/9p.h>
 #include <net/9p/client.h>
+#include <trace/events/netfs.h>
 
 #include "v9fs.h"
 #include "v9fs_vfs.h"
 #include "cache.h"
 #include "fid.h"
 
+static void v9fs_upload_to_server(struct netfs_io_subrequest *subreq)
+{
+       struct p9_fid *fid = subreq->rreq->netfs_priv;
+       int err, len;
+
+       trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
+       len = p9_client_write(fid, subreq->start, &subreq->io_iter, &err);
+       netfs_write_subrequest_terminated(subreq, len ?: err, false);
+}
+
+static void v9fs_upload_to_server_worker(struct work_struct *work)
+{
+       struct netfs_io_subrequest *subreq =
+               container_of(work, struct netfs_io_subrequest, work);
+
+       v9fs_upload_to_server(subreq);
+}
+
+/*
+ * Set up write requests for a writeback slice.  We need to add a write request
+ * for each write we want to make.
+ */
+static void v9fs_create_write_requests(struct netfs_io_request *wreq, loff_t start, size_t len)
+{
+       struct netfs_io_subrequest *subreq;
+
+       subreq = netfs_create_write_request(wreq, NETFS_UPLOAD_TO_SERVER,
+                                           start, len, v9fs_upload_to_server_worker);
+       if (subreq)
+               netfs_queue_write_request(subreq);
+}
+
 /**
  * v9fs_issue_read - Issue a read from 9P
  * @subreq: The read to make
@@ -33,14 +66,10 @@ static void v9fs_issue_read(struct netfs_io_subrequest *subreq)
 {
        struct netfs_io_request *rreq = subreq->rreq;
        struct p9_fid *fid = rreq->netfs_priv;
-       struct iov_iter to;
-       loff_t pos = subreq->start + subreq->transferred;
-       size_t len = subreq->len   - subreq->transferred;
        int total, err;
 
-       iov_iter_xarray(&to, ITER_DEST, &rreq->mapping->i_pages, pos, len);
-
-       total = p9_client_read(fid, pos, &to, &err);
+       total = p9_client_read(fid, subreq->start + subreq->transferred,
+                              &subreq->io_iter, &err);
 
        /* if we just extended the file size, any portion not in
         * cache won't be on server and is zeroes */
@@ -50,25 +79,42 @@ static void v9fs_issue_read(struct netfs_io_subrequest *subreq)
 }
 
 /**
- * v9fs_init_request - Initialise a read request
+ * v9fs_init_request - Initialise a request
  * @rreq: The read request
  * @file: The file being read from
  */
 static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file)
 {
-       struct p9_fid *fid = file->private_data;
-
-       BUG_ON(!fid);
+       struct p9_fid *fid;
+       bool writing = (rreq->origin == NETFS_READ_FOR_WRITE ||
+                       rreq->origin == NETFS_WRITEBACK ||
+                       rreq->origin == NETFS_WRITETHROUGH ||
+                       rreq->origin == NETFS_LAUNDER_WRITE ||
+                       rreq->origin == NETFS_UNBUFFERED_WRITE ||
+                       rreq->origin == NETFS_DIO_WRITE);
+
+       if (file) {
+               fid = file->private_data;
+               if (!fid)
+                       goto no_fid;
+               p9_fid_get(fid);
+       } else {
+               fid = v9fs_fid_find_inode(rreq->inode, writing, INVALID_UID, true);
+               if (!fid)
+                       goto no_fid;
+       }
 
        /* we might need to read from a fid that was opened write-only
         * for read-modify-write of page cache, use the writeback fid
         * for that */
-       WARN_ON(rreq->origin == NETFS_READ_FOR_WRITE &&
-                       !(fid->mode & P9_ORDWR));
-
-       p9_fid_get(fid);
+       WARN_ON(rreq->origin == NETFS_READ_FOR_WRITE && !(fid->mode & P9_ORDWR));
        rreq->netfs_priv = fid;
        return 0;
+
+no_fid:
+       WARN_ONCE(1, "folio expected an open fid inode->i_ino=%lx\n",
+                 rreq->inode->i_ino);
+       return -EINVAL;
 }
 
 /**
@@ -82,281 +128,20 @@ static void v9fs_free_request(struct netfs_io_request *rreq)
        p9_fid_put(fid);
 }
 
-/**
- * v9fs_begin_cache_operation - Begin a cache operation for a read
- * @rreq: The read request
- */
-static int v9fs_begin_cache_operation(struct netfs_io_request *rreq)
-{
-#ifdef CONFIG_9P_FSCACHE
-       struct fscache_cookie *cookie = v9fs_inode_cookie(V9FS_I(rreq->inode));
-
-       return fscache_begin_read_operation(&rreq->cache_resources, cookie);
-#else
-       return -ENOBUFS;
-#endif
-}
-
 const struct netfs_request_ops v9fs_req_ops = {
        .init_request           = v9fs_init_request,
        .free_request           = v9fs_free_request,
-       .begin_cache_operation  = v9fs_begin_cache_operation,
        .issue_read             = v9fs_issue_read,
+       .create_write_requests  = v9fs_create_write_requests,
 };
 
-/**
- * v9fs_release_folio - release the private state associated with a folio
- * @folio: The folio to be released
- * @gfp: The caller's allocation restrictions
- *
- * Returns true if the page can be released, false otherwise.
- */
-
-static bool v9fs_release_folio(struct folio *folio, gfp_t gfp)
-{
-       if (folio_test_private(folio))
-               return false;
-#ifdef CONFIG_9P_FSCACHE
-       if (folio_test_fscache(folio)) {
-               if (current_is_kswapd() || !(gfp & __GFP_FS))
-                       return false;
-               folio_wait_fscache(folio);
-       }
-       fscache_note_page_release(v9fs_inode_cookie(V9FS_I(folio_inode(folio))));
-#endif
-       return true;
-}
-
-static void v9fs_invalidate_folio(struct folio *folio, size_t offset,
-                                size_t length)
-{
-       folio_wait_fscache(folio);
-}
-
-#ifdef CONFIG_9P_FSCACHE
-static void v9fs_write_to_cache_done(void *priv, ssize_t transferred_or_error,
-                                    bool was_async)
-{
-       struct v9fs_inode *v9inode = priv;
-       __le32 version;
-
-       if (IS_ERR_VALUE(transferred_or_error) &&
-           transferred_or_error != -ENOBUFS) {
-               version = cpu_to_le32(v9inode->qid.version);
-               fscache_invalidate(v9fs_inode_cookie(v9inode), &version,
-                                  i_size_read(&v9inode->netfs.inode), 0);
-       }
-}
-#endif
-
-static int v9fs_vfs_write_folio_locked(struct folio *folio)
-{
-       struct inode *inode = folio_inode(folio);
-       loff_t start = folio_pos(folio);
-       loff_t i_size = i_size_read(inode);
-       struct iov_iter from;
-       size_t len = folio_size(folio);
-       struct p9_fid *writeback_fid;
-       int err;
-       struct v9fs_inode __maybe_unused *v9inode = V9FS_I(inode);
-       struct fscache_cookie __maybe_unused *cookie = v9fs_inode_cookie(v9inode);
-
-       if (start >= i_size)
-               return 0; /* Simultaneous truncation occurred */
-
-       len = min_t(loff_t, i_size - start, len);
-
-       iov_iter_xarray(&from, ITER_SOURCE, &folio_mapping(folio)->i_pages, start, len);
-
-       writeback_fid = v9fs_fid_find_inode(inode, true, INVALID_UID, true);
-       if (!writeback_fid) {
-               WARN_ONCE(1, "folio expected an open fid inode->i_private=%p\n",
-                       inode->i_private);
-               return -EINVAL;
-       }
-
-       folio_wait_fscache(folio);
-       folio_start_writeback(folio);
-
-       p9_client_write(writeback_fid, start, &from, &err);
-
-#ifdef CONFIG_9P_FSCACHE
-       if (err == 0 &&
-               fscache_cookie_enabled(cookie) &&
-               test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags)) {
-               folio_start_fscache(folio);
-               fscache_write_to_cache(v9fs_inode_cookie(v9inode),
-                                       folio_mapping(folio), start, len, i_size,
-                                       v9fs_write_to_cache_done, v9inode,
-                                       true);
-       }
-#endif
-
-       folio_end_writeback(folio);
-       p9_fid_put(writeback_fid);
-
-       return err;
-}
-
-static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc)
-{
-       struct folio *folio = page_folio(page);
-       int retval;
-
-       p9_debug(P9_DEBUG_VFS, "folio %p\n", folio);
-
-       retval = v9fs_vfs_write_folio_locked(folio);
-       if (retval < 0) {
-               if (retval == -EAGAIN) {
-                       folio_redirty_for_writepage(wbc, folio);
-                       retval = 0;
-               } else {
-                       mapping_set_error(folio_mapping(folio), retval);
-               }
-       } else
-               retval = 0;
-
-       folio_unlock(folio);
-       return retval;
-}
-
-static int v9fs_launder_folio(struct folio *folio)
-{
-       int retval;
-
-       if (folio_clear_dirty_for_io(folio)) {
-               retval = v9fs_vfs_write_folio_locked(folio);
-               if (retval)
-                       return retval;
-       }
-       folio_wait_fscache(folio);
-       return 0;
-}
-
-/**
- * v9fs_direct_IO - 9P address space operation for direct I/O
- * @iocb: target I/O control block
- * @iter: The data/buffer to use
- *
- * The presence of v9fs_direct_IO() in the address space ops vector
- * allowes open() O_DIRECT flags which would have failed otherwise.
- *
- * In the non-cached mode, we shunt off direct read and write requests before
- * the VFS gets them, so this method should never be called.
- *
- * Direct IO is not 'yet' supported in the cached mode. Hence when
- * this routine is called through generic_file_aio_read(), the read/write fails
- * with an error.
- *
- */
-static ssize_t
-v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
-{
-       struct file *file = iocb->ki_filp;
-       loff_t pos = iocb->ki_pos;
-       ssize_t n;
-       int err = 0;
-
-       if (iov_iter_rw(iter) == WRITE) {
-               n = p9_client_write(file->private_data, pos, iter, &err);
-               if (n) {
-                       struct inode *inode = file_inode(file);
-                       loff_t i_size = i_size_read(inode);
-
-                       if (pos + n > i_size)
-                               inode_add_bytes(inode, pos + n - i_size);
-               }
-       } else {
-               n = p9_client_read(file->private_data, pos, iter, &err);
-       }
-       return n ? n : err;
-}
-
-static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
-                           loff_t pos, unsigned int len,
-                           struct page **subpagep, void **fsdata)
-{
-       int retval;
-       struct folio *folio;
-       struct v9fs_inode *v9inode = V9FS_I(mapping->host);
-
-       p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
-
-       /* Prefetch area to be written into the cache if we're caching this
-        * file.  We need to do this before we get a lock on the page in case
-        * there's more than one writer competing for the same cache block.
-        */
-       retval = netfs_write_begin(&v9inode->netfs, filp, mapping, pos, len, &folio, fsdata);
-       if (retval < 0)
-               return retval;
-
-       *subpagep = &folio->page;
-       return retval;
-}
-
-static int v9fs_write_end(struct file *filp, struct address_space *mapping,
-                         loff_t pos, unsigned int len, unsigned int copied,
-                         struct page *subpage, void *fsdata)
-{
-       loff_t last_pos = pos + copied;
-       struct folio *folio = page_folio(subpage);
-       struct inode *inode = mapping->host;
-
-       p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
-
-       if (!folio_test_uptodate(folio)) {
-               if (unlikely(copied < len)) {
-                       copied = 0;
-                       goto out;
-               }
-
-               folio_mark_uptodate(folio);
-       }
-
-       /*
-        * No need to use i_size_read() here, the i_size
-        * cannot change under us because we hold the i_mutex.
-        */
-       if (last_pos > inode->i_size) {
-               inode_add_bytes(inode, last_pos - inode->i_size);
-               i_size_write(inode, last_pos);
-#ifdef CONFIG_9P_FSCACHE
-               fscache_update_cookie(v9fs_inode_cookie(V9FS_I(inode)), NULL,
-                       &last_pos);
-#endif
-       }
-       folio_mark_dirty(folio);
-out:
-       folio_unlock(folio);
-       folio_put(folio);
-
-       return copied;
-}
-
-#ifdef CONFIG_9P_FSCACHE
-/*
- * Mark a page as having been made dirty and thus needing writeback.  We also
- * need to pin the cache object to write back to.
- */
-static bool v9fs_dirty_folio(struct address_space *mapping, struct folio *folio)
-{
-       struct v9fs_inode *v9inode = V9FS_I(mapping->host);
-
-       return fscache_dirty_folio(mapping, folio, v9fs_inode_cookie(v9inode));
-}
-#else
-#define v9fs_dirty_folio filemap_dirty_folio
-#endif
-
 const struct address_space_operations v9fs_addr_operations = {
-       .read_folio = netfs_read_folio,
-       .readahead = netfs_readahead,
-       .dirty_folio = v9fs_dirty_folio,
-       .writepage = v9fs_vfs_writepage,
-       .write_begin = v9fs_write_begin,
-       .write_end = v9fs_write_end,
-       .release_folio = v9fs_release_folio,
-       .invalidate_folio = v9fs_invalidate_folio,
-       .launder_folio = v9fs_launder_folio,
-       .direct_IO = v9fs_direct_IO,
+       .read_folio             = netfs_read_folio,
+       .readahead              = netfs_readahead,
+       .dirty_folio            = netfs_dirty_folio,
+       .release_folio          = netfs_release_folio,
+       .invalidate_folio       = netfs_invalidate_folio,
+       .launder_folio          = netfs_launder_folio,
+       .direct_IO              = noop_direct_IO,
+       .writepages             = netfs_writepages,
 };
index 11cd8d23f6f2384dfee0ee96e85ba894f373bbe3..bae330c2f0cf07d207af8c193dad15a78703793a 100644 (file)
@@ -353,25 +353,15 @@ static ssize_t
 v9fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
 {
        struct p9_fid *fid = iocb->ki_filp->private_data;
-       int ret, err = 0;
 
        p9_debug(P9_DEBUG_VFS, "fid %d count %zu offset %lld\n",
                 fid->fid, iov_iter_count(to), iocb->ki_pos);
 
-       if (!(fid->mode & P9L_DIRECT)) {
-               p9_debug(P9_DEBUG_VFS, "(cached)\n");
-               return generic_file_read_iter(iocb, to);
-       }
-
-       if (iocb->ki_filp->f_flags & O_NONBLOCK)
-               ret = p9_client_read_once(fid, iocb->ki_pos, to, &err);
-       else
-               ret = p9_client_read(fid, iocb->ki_pos, to, &err);
-       if (!ret)
-               return err;
+       if (fid->mode & P9L_DIRECT)
+               return netfs_unbuffered_read_iter(iocb, to);
 
-       iocb->ki_pos += ret;
-       return ret;
+       p9_debug(P9_DEBUG_VFS, "(cached)\n");
+       return netfs_file_read_iter(iocb, to);
 }
 
 /*
@@ -407,46 +397,14 @@ v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
        struct file *file = iocb->ki_filp;
        struct p9_fid *fid = file->private_data;
-       ssize_t retval;
-       loff_t origin;
-       int err = 0;
 
        p9_debug(P9_DEBUG_VFS, "fid %d\n", fid->fid);
 
-       if (!(fid->mode & (P9L_DIRECT | P9L_NOWRITECACHE))) {
-               p9_debug(P9_DEBUG_CACHE, "(cached)\n");
-               return generic_file_write_iter(iocb, from);
-       }
+       if (fid->mode & (P9L_DIRECT | P9L_NOWRITECACHE))
+               return netfs_unbuffered_write_iter(iocb, from);
 
-       retval = generic_write_checks(iocb, from);
-       if (retval <= 0)
-               return retval;
-
-       origin = iocb->ki_pos;
-       retval = p9_client_write(file->private_data, iocb->ki_pos, from, &err);
-       if (retval > 0) {
-               struct inode *inode = file_inode(file);
-               loff_t i_size;
-               unsigned long pg_start, pg_end;
-
-               pg_start = origin >> PAGE_SHIFT;
-               pg_end = (origin + retval - 1) >> PAGE_SHIFT;
-               if (inode->i_mapping && inode->i_mapping->nrpages)
-                       invalidate_inode_pages2_range(inode->i_mapping,
-                                                     pg_start, pg_end);
-               iocb->ki_pos += retval;
-               i_size = i_size_read(inode);
-               if (iocb->ki_pos > i_size) {
-                       inode_add_bytes(inode, iocb->ki_pos - i_size);
-                       /*
-                        * Need to serialize against i_size_write() in
-                        * v9fs_stat2inode()
-                        */
-                       v9fs_i_size_write(inode, iocb->ki_pos);
-               }
-               return retval;
-       }
-       return err;
+       p9_debug(P9_DEBUG_CACHE, "(cached)\n");
+       return netfs_file_write_iter(iocb, from);
 }
 
 static int v9fs_file_fsync(struct file *filp, loff_t start, loff_t end,
@@ -519,36 +477,7 @@ v9fs_file_mmap(struct file *filp, struct vm_area_struct *vma)
 static vm_fault_t
 v9fs_vm_page_mkwrite(struct vm_fault *vmf)
 {
-       struct folio *folio = page_folio(vmf->page);
-       struct file *filp = vmf->vma->vm_file;
-       struct inode *inode = file_inode(filp);
-
-
-       p9_debug(P9_DEBUG_VFS, "folio %p fid %lx\n",
-                folio, (unsigned long)filp->private_data);
-
-       /* Wait for the page to be written to the cache before we allow it to
-        * be modified.  We then assume the entire page will need writing back.
-        */
-#ifdef CONFIG_9P_FSCACHE
-       if (folio_test_fscache(folio) &&
-           folio_wait_fscache_killable(folio) < 0)
-               return VM_FAULT_NOPAGE;
-#endif
-
-       /* Update file times before taking page lock */
-       file_update_time(filp);
-
-       if (folio_lock_killable(folio) < 0)
-               return VM_FAULT_RETRY;
-       if (folio_mapping(folio) != inode->i_mapping)
-               goto out_unlock;
-       folio_wait_stable(folio);
-
-       return VM_FAULT_LOCKED;
-out_unlock:
-       folio_unlock(folio);
-       return VM_FAULT_NOPAGE;
+       return netfs_page_mkwrite(vmf, NULL);
 }
 
 static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
index b845ee18a80be7a1aac30fab226f45a7ae02f343..32572982f72e68a6db3967d9ab9ba9d51c8bae9c 100644 (file)
@@ -246,10 +246,10 @@ void v9fs_free_inode(struct inode *inode)
 /*
  * Set parameters for the netfs library
  */
-static void v9fs_set_netfs_context(struct inode *inode)
+void v9fs_set_netfs_context(struct inode *inode)
 {
        struct v9fs_inode *v9inode = V9FS_I(inode);
-       netfs_inode_init(&v9inode->netfs, &v9fs_req_ops);
+       netfs_inode_init(&v9inode->netfs, &v9fs_req_ops, true);
 }
 
 int v9fs_init_inode(struct v9fs_session_info *v9ses,
@@ -326,8 +326,6 @@ int v9fs_init_inode(struct v9fs_session_info *v9ses,
                err = -EINVAL;
                goto error;
        }
-
-       v9fs_set_netfs_context(inode);
 error:
        return err;
 
@@ -359,6 +357,7 @@ struct inode *v9fs_get_inode(struct super_block *sb, umode_t mode, dev_t rdev)
                iput(inode);
                return ERR_PTR(err);
        }
+       v9fs_set_netfs_context(inode);
        return inode;
 }
 
@@ -374,11 +373,8 @@ void v9fs_evict_inode(struct inode *inode)
 
        truncate_inode_pages_final(&inode->i_data);
 
-#ifdef CONFIG_9P_FSCACHE
        version = cpu_to_le32(v9inode->qid.version);
-       fscache_clear_inode_writeback(v9fs_inode_cookie(v9inode), inode,
-                                     &version);
-#endif
+       netfs_clear_inode_writeback(inode, &version);
 
        clear_inode(inode);
        filemap_fdatawrite(&inode->i_data);
@@ -464,6 +460,7 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
                goto error;
 
        v9fs_stat2inode(st, inode, sb, 0);
+       v9fs_set_netfs_context(inode);
        v9fs_cache_inode_get_cookie(inode);
        unlock_new_inode(inode);
        return inode;
@@ -1113,7 +1110,7 @@ static int v9fs_vfs_setattr(struct mnt_idmap *idmap,
        if ((iattr->ia_valid & ATTR_SIZE) &&
                 iattr->ia_size != i_size_read(inode)) {
                truncate_setsize(inode, iattr->ia_size);
-               truncate_pagecache(inode, iattr->ia_size);
+               netfs_resize_file(netfs_inode(inode), iattr->ia_size, true);
 
 #ifdef CONFIG_9P_FSCACHE
                if (v9ses->cache & CACHE_FSCACHE) {
@@ -1181,6 +1178,7 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
        mode |= inode->i_mode & ~S_IALLUGO;
        inode->i_mode = mode;
 
+       v9inode->netfs.remote_i_size = stat->length;
        if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE))
                v9fs_i_size_write(inode, stat->length);
        /* not real number of blocks, but 512 byte ones ... */
index c7319af2f4711e5686f453262b6ce3eeee9e651f..3505227e170402be03b2df40ff8f3ba2994a07d2 100644 (file)
@@ -128,6 +128,7 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
                goto error;
 
        v9fs_stat2inode_dotl(st, inode, 0);
+       v9fs_set_netfs_context(inode);
        v9fs_cache_inode_get_cookie(inode);
        retval = v9fs_get_acl(inode, fid);
        if (retval)
@@ -598,7 +599,7 @@ int v9fs_vfs_setattr_dotl(struct mnt_idmap *idmap,
        if ((iattr->ia_valid & ATTR_SIZE) && iattr->ia_size !=
                 i_size_read(inode)) {
                truncate_setsize(inode, iattr->ia_size);
-               truncate_pagecache(inode, iattr->ia_size);
+               netfs_resize_file(netfs_inode(inode), iattr->ia_size, true);
 
 #ifdef CONFIG_9P_FSCACHE
                if (v9ses->cache & CACHE_FSCACHE)
@@ -655,6 +656,7 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode,
                mode |= inode->i_mode & ~S_IALLUGO;
                inode->i_mode = mode;
 
+               v9inode->netfs.remote_i_size = stat->st_size;
                if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE))
                        v9fs_i_size_write(inode, stat->st_size);
                inode->i_blocks = stat->st_blocks;
@@ -683,8 +685,10 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode,
                        inode->i_mode = mode;
                }
                if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE) &&
-                   stat->st_result_mask & P9_STATS_SIZE)
+                   stat->st_result_mask & P9_STATS_SIZE) {
+                       v9inode->netfs.remote_i_size = stat->st_size;
                        v9fs_i_size_write(inode, stat->st_size);
+               }
                if (stat->st_result_mask & P9_STATS_BLOCKS)
                        inode->i_blocks = stat->st_blocks;
        }
index 73db55c050bf10b60137182d0cb639cb72561779..941f7d0e0bfa27e67aa34a9c7eebec3a65fc6f99 100644 (file)
@@ -289,31 +289,21 @@ static int v9fs_drop_inode(struct inode *inode)
 static int v9fs_write_inode(struct inode *inode,
                            struct writeback_control *wbc)
 {
-       struct v9fs_inode *v9inode;
-
        /*
         * send an fsync request to server irrespective of
         * wbc->sync_mode.
         */
        p9_debug(P9_DEBUG_VFS, "%s: inode %p\n", __func__, inode);
-
-       v9inode = V9FS_I(inode);
-       fscache_unpin_writeback(wbc, v9fs_inode_cookie(v9inode));
-
-       return 0;
+       return netfs_unpin_writeback(inode, wbc);
 }
 
 static int v9fs_write_inode_dotl(struct inode *inode,
                                 struct writeback_control *wbc)
 {
-       struct v9fs_inode *v9inode;
 
-       v9inode = V9FS_I(inode);
        p9_debug(P9_DEBUG_VFS, "%s: inode %p\n", __func__, inode);
 
-       fscache_unpin_writeback(wbc, v9fs_inode_cookie(v9inode));
-
-       return 0;
+       return netfs_unpin_writeback(inode, wbc);
 }
 
 static const struct super_operations v9fs_super_ops = {
index a3159831ba98e71bbb3e8b8fdd77422b72659a51..89fdbefd1075f8f5a071987bca3b4a50f687a887 100644 (file)
@@ -144,7 +144,6 @@ source "fs/overlayfs/Kconfig"
 menu "Caches"
 
 source "fs/netfs/Kconfig"
-source "fs/fscache/Kconfig"
 source "fs/cachefiles/Kconfig"
 
 endmenu
index a6962c588962d0a50fdb11c1fe31da59fe19ee88..c09016257f05e82a772da50ab18e2d708ea5a768 100644 (file)
@@ -61,7 +61,6 @@ obj-$(CONFIG_DLM)             += dlm/
  
 # Do not add any filesystems before this line
 obj-$(CONFIG_NETFS_SUPPORT)    += netfs/
-obj-$(CONFIG_FSCACHE)          += fscache/
 obj-$(CONFIG_REISERFS_FS)      += reiserfs/
 obj-$(CONFIG_EXT4_FS)          += ext4/
 # We place ext4 before ext2 so that clean ext3 root fs's do NOT mount using the
index c14533ef108f191a7209f4f035e084fb8a41b57a..b5b8de521f99b26ba6c9b2fd707fb794a62612ae 100644 (file)
@@ -124,7 +124,7 @@ static void afs_dir_read_cleanup(struct afs_read *req)
                if (xas_retry(&xas, folio))
                        continue;
                BUG_ON(xa_is_value(folio));
-               ASSERTCMP(folio_file_mapping(folio), ==, mapping);
+               ASSERTCMP(folio->mapping, ==, mapping);
 
                folio_put(folio);
        }
@@ -202,12 +202,12 @@ static void afs_dir_dump(struct afs_vnode *dvnode, struct afs_read *req)
                if (xas_retry(&xas, folio))
                        continue;
 
-               BUG_ON(folio_file_mapping(folio) != mapping);
+               BUG_ON(folio->mapping != mapping);
 
                size = min_t(loff_t, folio_size(folio), req->actual_len - folio_pos(folio));
                for (offset = 0; offset < size; offset += sizeof(*block)) {
                        block = kmap_local_folio(folio, offset);
-                       pr_warn("[%02lx] %32phN\n", folio_index(folio) + offset, block);
+                       pr_warn("[%02lx] %32phN\n", folio->index + offset, block);
                        kunmap_local(block);
                }
        }
@@ -233,7 +233,7 @@ static int afs_dir_check(struct afs_vnode *dvnode, struct afs_read *req)
                if (xas_retry(&xas, folio))
                        continue;
 
-               BUG_ON(folio_file_mapping(folio) != mapping);
+               BUG_ON(folio->mapping != mapping);
 
                if (!afs_dir_check_folio(dvnode, folio, req->actual_len)) {
                        afs_dir_dump(dvnode, req);
@@ -474,6 +474,14 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
                        continue;
                }
 
+               /* Don't expose silly rename entries to userspace. */
+               if (nlen > 6 &&
+                   dire->u.name[0] == '.' &&
+                   ctx->actor != afs_lookup_filldir &&
+                   ctx->actor != afs_lookup_one_filldir &&
+                   memcmp(dire->u.name, ".__afs", 6) == 0)
+                       continue;
+
                /* found the next entry */
                if (!dir_emit(ctx, dire->u.name, nlen,
                              ntohl(dire->u.vnode),
@@ -708,6 +716,8 @@ static void afs_do_lookup_success(struct afs_operation *op)
                        break;
                }
 
+               if (vp->scb.status.abort_code)
+                       trace_afs_bulkstat_error(op, &vp->fid, i, vp->scb.status.abort_code);
                if (!vp->scb.have_status && !vp->scb.have_error)
                        continue;
 
@@ -897,12 +907,16 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
                afs_begin_vnode_operation(op);
                afs_wait_for_operation(op);
        }
-       inode = ERR_PTR(afs_op_error(op));
 
 out_op:
        if (!afs_op_error(op)) {
-               inode = &op->file[1].vnode->netfs.inode;
-               op->file[1].vnode = NULL;
+               if (op->file[1].scb.status.abort_code) {
+                       afs_op_accumulate_error(op, -ECONNABORTED,
+                                               op->file[1].scb.status.abort_code);
+               } else {
+                       inode = &op->file[1].vnode->netfs.inode;
+                       op->file[1].vnode = NULL;
+               }
        }
 
        if (op->file[0].scb.have_status)
@@ -2022,7 +2036,7 @@ static bool afs_dir_release_folio(struct folio *folio, gfp_t gfp_flags)
 {
        struct afs_vnode *dvnode = AFS_FS_I(folio_inode(folio));
 
-       _enter("{{%llx:%llu}[%lu]}", dvnode->fid.vid, dvnode->fid.vnode, folio_index(folio));
+       _enter("{{%llx:%llu}[%lu]}", dvnode->fid.vid, dvnode->fid.vnode, folio->index);
 
        folio_detach_private(folio);
 
index 2cd40ba601f1cd45a8f80106b8c550cb5e3d94a7..c4d2711e20ad4476cabc0d41b4e50e2aad4477e4 100644 (file)
@@ -76,7 +76,7 @@ struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root)
        /* there shouldn't be an existing inode */
        BUG_ON(!(inode->i_state & I_NEW));
 
-       netfs_inode_init(&vnode->netfs, NULL);
+       netfs_inode_init(&vnode->netfs, NULL, false);
        inode->i_size           = 0;
        inode->i_mode           = S_IFDIR | S_IRUGO | S_IXUGO;
        if (root) {
@@ -258,16 +258,7 @@ const struct inode_operations afs_dynroot_inode_operations = {
        .lookup         = afs_dynroot_lookup,
 };
 
-/*
- * Dirs in the dynamic root don't need revalidation.
- */
-static int afs_dynroot_d_revalidate(struct dentry *dentry, unsigned int flags)
-{
-       return 1;
-}
-
 const struct dentry_operations afs_dynroot_dentry_operations = {
-       .d_revalidate   = afs_dynroot_d_revalidate,
        .d_delete       = always_delete_dentry,
        .d_release      = afs_d_release,
        .d_automount    = afs_d_automount,
index 30914e0d9cb29903cd42aacc9da6c3088f52f78a..3d33b221d9ca256a3b3d978a835d2db9fff2e284 100644 (file)
@@ -20,9 +20,6 @@
 
 static int afs_file_mmap(struct file *file, struct vm_area_struct *vma);
 static int afs_symlink_read_folio(struct file *file, struct folio *folio);
-static void afs_invalidate_folio(struct folio *folio, size_t offset,
-                              size_t length);
-static bool afs_release_folio(struct folio *folio, gfp_t gfp_flags);
 
 static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter);
 static ssize_t afs_file_splice_read(struct file *in, loff_t *ppos,
@@ -37,7 +34,7 @@ const struct file_operations afs_file_operations = {
        .release        = afs_release,
        .llseek         = generic_file_llseek,
        .read_iter      = afs_file_read_iter,
-       .write_iter     = afs_file_write,
+       .write_iter     = netfs_file_write_iter,
        .mmap           = afs_file_mmap,
        .splice_read    = afs_file_splice_read,
        .splice_write   = iter_file_splice_write,
@@ -53,22 +50,21 @@ const struct inode_operations afs_file_inode_operations = {
 };
 
 const struct address_space_operations afs_file_aops = {
+       .direct_IO      = noop_direct_IO,
        .read_folio     = netfs_read_folio,
        .readahead      = netfs_readahead,
-       .dirty_folio    = afs_dirty_folio,
-       .launder_folio  = afs_launder_folio,
-       .release_folio  = afs_release_folio,
-       .invalidate_folio = afs_invalidate_folio,
-       .write_begin    = afs_write_begin,
-       .write_end      = afs_write_end,
-       .writepages     = afs_writepages,
+       .dirty_folio    = netfs_dirty_folio,
+       .launder_folio  = netfs_launder_folio,
+       .release_folio  = netfs_release_folio,
+       .invalidate_folio = netfs_invalidate_folio,
        .migrate_folio  = filemap_migrate_folio,
+       .writepages     = afs_writepages,
 };
 
 const struct address_space_operations afs_symlink_aops = {
        .read_folio     = afs_symlink_read_folio,
-       .release_folio  = afs_release_folio,
-       .invalidate_folio = afs_invalidate_folio,
+       .release_folio  = netfs_release_folio,
+       .invalidate_folio = netfs_invalidate_folio,
        .migrate_folio  = filemap_migrate_folio,
 };
 
@@ -323,11 +319,7 @@ static void afs_issue_read(struct netfs_io_subrequest *subreq)
        fsreq->len      = subreq->len   - subreq->transferred;
        fsreq->key      = key_get(subreq->rreq->netfs_priv);
        fsreq->vnode    = vnode;
-       fsreq->iter     = &fsreq->def_iter;
-
-       iov_iter_xarray(&fsreq->def_iter, ITER_DEST,
-                       &fsreq->vnode->netfs.inode.i_mapping->i_pages,
-                       fsreq->pos, fsreq->len);
+       fsreq->iter     = &subreq->io_iter;
 
        afs_fetch_data(fsreq->vnode, fsreq);
        afs_put_read(fsreq);
@@ -359,22 +351,13 @@ static int afs_symlink_read_folio(struct file *file, struct folio *folio)
 
 static int afs_init_request(struct netfs_io_request *rreq, struct file *file)
 {
-       rreq->netfs_priv = key_get(afs_file_key(file));
+       if (file)
+               rreq->netfs_priv = key_get(afs_file_key(file));
+       rreq->rsize = 256 * 1024;
+       rreq->wsize = 256 * 1024;
        return 0;
 }
 
-static int afs_begin_cache_operation(struct netfs_io_request *rreq)
-{
-#ifdef CONFIG_AFS_FSCACHE
-       struct afs_vnode *vnode = AFS_FS_I(rreq->inode);
-
-       return fscache_begin_read_operation(&rreq->cache_resources,
-                                           afs_vnode_cache(vnode));
-#else
-       return -ENOBUFS;
-#endif
-}
-
 static int afs_check_write_begin(struct file *file, loff_t pos, unsigned len,
                                 struct folio **foliop, void **_fsdata)
 {
@@ -388,128 +371,37 @@ static void afs_free_request(struct netfs_io_request *rreq)
        key_put(rreq->netfs_priv);
 }
 
-const struct netfs_request_ops afs_req_ops = {
-       .init_request           = afs_init_request,
-       .free_request           = afs_free_request,
-       .begin_cache_operation  = afs_begin_cache_operation,
-       .check_write_begin      = afs_check_write_begin,
-       .issue_read             = afs_issue_read,
-};
-
-int afs_write_inode(struct inode *inode, struct writeback_control *wbc)
+static void afs_update_i_size(struct inode *inode, loff_t new_i_size)
 {
-       fscache_unpin_writeback(wbc, afs_vnode_cache(AFS_FS_I(inode)));
-       return 0;
-}
-
-/*
- * Adjust the dirty region of the page on truncation or full invalidation,
- * getting rid of the markers altogether if the region is entirely invalidated.
- */
-static void afs_invalidate_dirty(struct folio *folio, size_t offset,
-                                size_t length)
-{
-       struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio));
-       unsigned long priv;
-       unsigned int f, t, end = offset + length;
-
-       priv = (unsigned long)folio_get_private(folio);
-
-       /* we clean up only if the entire page is being invalidated */
-       if (offset == 0 && length == folio_size(folio))
-               goto full_invalidate;
-
-        /* If the page was dirtied by page_mkwrite(), the PTE stays writable
-         * and we don't get another notification to tell us to expand it
-         * again.
-         */
-       if (afs_is_folio_dirty_mmapped(priv))
-               return;
-
-       /* We may need to shorten the dirty region */
-       f = afs_folio_dirty_from(folio, priv);
-       t = afs_folio_dirty_to(folio, priv);
-
-       if (t <= offset || f >= end)
-               return; /* Doesn't overlap */
-
-       if (f < offset && t > end)
-               return; /* Splits the dirty region - just absorb it */
-
-       if (f >= offset && t <= end)
-               goto undirty;
+       struct afs_vnode *vnode = AFS_FS_I(inode);
+       loff_t i_size;
 
-       if (f < offset)
-               t = offset;
-       else
-               f = end;
-       if (f == t)
-               goto undirty;
-
-       priv = afs_folio_dirty(folio, f, t);
-       folio_change_private(folio, (void *)priv);
-       trace_afs_folio_dirty(vnode, tracepoint_string("trunc"), folio);
-       return;
-
-undirty:
-       trace_afs_folio_dirty(vnode, tracepoint_string("undirty"), folio);
-       folio_clear_dirty_for_io(folio);
-full_invalidate:
-       trace_afs_folio_dirty(vnode, tracepoint_string("inval"), folio);
-       folio_detach_private(folio);
+       write_seqlock(&vnode->cb_lock);
+       i_size = i_size_read(&vnode->netfs.inode);
+       if (new_i_size > i_size) {
+               i_size_write(&vnode->netfs.inode, new_i_size);
+               inode_set_bytes(&vnode->netfs.inode, new_i_size);
+       }
+       write_sequnlock(&vnode->cb_lock);
+       fscache_update_cookie(afs_vnode_cache(vnode), NULL, &new_i_size);
 }
 
-/*
- * invalidate part or all of a page
- * - release a page and clean up its private data if offset is 0 (indicating
- *   the entire page)
- */
-static void afs_invalidate_folio(struct folio *folio, size_t offset,
-                              size_t length)
+static void afs_netfs_invalidate_cache(struct netfs_io_request *wreq)
 {
-       _enter("{%lu},%zu,%zu", folio->index, offset, length);
-
-       BUG_ON(!folio_test_locked(folio));
+       struct afs_vnode *vnode = AFS_FS_I(wreq->inode);
 
-       if (folio_get_private(folio))
-               afs_invalidate_dirty(folio, offset, length);
-
-       folio_wait_fscache(folio);
-       _leave("");
+       afs_invalidate_cache(vnode, 0);
 }
 
-/*
- * release a page and clean up its private state if it's not busy
- * - return true if the page can now be released, false if not
- */
-static bool afs_release_folio(struct folio *folio, gfp_t gfp)
-{
-       struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio));
-
-       _enter("{{%llx:%llu}[%lu],%lx},%x",
-              vnode->fid.vid, vnode->fid.vnode, folio_index(folio), folio->flags,
-              gfp);
-
-       /* deny if folio is being written to the cache and the caller hasn't
-        * elected to wait */
-#ifdef CONFIG_AFS_FSCACHE
-       if (folio_test_fscache(folio)) {
-               if (current_is_kswapd() || !(gfp & __GFP_FS))
-                       return false;
-               folio_wait_fscache(folio);
-       }
-       fscache_note_page_release(afs_vnode_cache(vnode));
-#endif
-
-       if (folio_test_private(folio)) {
-               trace_afs_folio_dirty(vnode, tracepoint_string("rel"), folio);
-               folio_detach_private(folio);
-       }
-
-       /* Indicate that the folio can be released */
-       _leave(" = T");
-       return true;
-}
+const struct netfs_request_ops afs_req_ops = {
+       .init_request           = afs_init_request,
+       .free_request           = afs_free_request,
+       .check_write_begin      = afs_check_write_begin,
+       .issue_read             = afs_issue_read,
+       .update_i_size          = afs_update_i_size,
+       .invalidate_cache       = afs_netfs_invalidate_cache,
+       .create_write_requests  = afs_create_write_requests,
+};
 
 static void afs_add_open_mmap(struct afs_vnode *vnode)
 {
@@ -576,28 +468,39 @@ static vm_fault_t afs_vm_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pg
 
 static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
 {
-       struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
+       struct inode *inode = file_inode(iocb->ki_filp);
+       struct afs_vnode *vnode = AFS_FS_I(inode);
        struct afs_file *af = iocb->ki_filp->private_data;
-       int ret;
+       ssize_t ret;
 
-       ret = afs_validate(vnode, af->key);
+       if (iocb->ki_flags & IOCB_DIRECT)
+               return netfs_unbuffered_read_iter(iocb, iter);
+
+       ret = netfs_start_io_read(inode);
        if (ret < 0)
                return ret;
-
-       return generic_file_read_iter(iocb, iter);
+       ret = afs_validate(vnode, af->key);
+       if (ret == 0)
+               ret = filemap_read(iocb, iter, 0);
+       netfs_end_io_read(inode);
+       return ret;
 }
 
 static ssize_t afs_file_splice_read(struct file *in, loff_t *ppos,
                                    struct pipe_inode_info *pipe,
                                    size_t len, unsigned int flags)
 {
-       struct afs_vnode *vnode = AFS_FS_I(file_inode(in));
+       struct inode *inode = file_inode(in);
+       struct afs_vnode *vnode = AFS_FS_I(inode);
        struct afs_file *af = in->private_data;
-       int ret;
+       ssize_t ret;
 
-       ret = afs_validate(vnode, af->key);
+       ret = netfs_start_io_read(inode);
        if (ret < 0)
                return ret;
-
-       return filemap_splice_read(in, ppos, pipe, len, flags);
+       ret = afs_validate(vnode, af->key);
+       if (ret == 0)
+               ret = filemap_splice_read(in, ppos, pipe, len, flags);
+       netfs_end_io_read(inode);
+       return ret;
 }
index 4f04f6f33f46b940ffee52b25f527b17d729f1b9..94fc049aff584f43e622d164a13fc30962dd04f1 100644 (file)
@@ -58,7 +58,7 @@ static noinline void dump_vnode(struct afs_vnode *vnode, struct afs_vnode *paren
  */
 static void afs_set_netfs_context(struct afs_vnode *vnode)
 {
-       netfs_inode_init(&vnode->netfs, &afs_req_ops);
+       netfs_inode_init(&vnode->netfs, &afs_req_ops, true);
 }
 
 /*
@@ -166,6 +166,7 @@ static void afs_apply_status(struct afs_operation *op,
        struct inode *inode = &vnode->netfs.inode;
        struct timespec64 t;
        umode_t mode;
+       bool unexpected_jump = false;
        bool data_changed = false;
        bool change_size = vp->set_size;
 
@@ -230,6 +231,7 @@ static void afs_apply_status(struct afs_operation *op,
                }
                change_size = true;
                data_changed = true;
+               unexpected_jump = true;
        } else if (vnode->status.type == AFS_FTYPE_DIR) {
                /* Expected directory change is handled elsewhere so
                 * that we can locally edit the directory and save on a
@@ -249,8 +251,10 @@ static void afs_apply_status(struct afs_operation *op,
                 * what's on the server.
                 */
                vnode->netfs.remote_i_size = status->size;
-               if (change_size) {
+               if (change_size || status->size > i_size_read(inode)) {
                        afs_set_i_size(vnode, status->size);
+                       if (unexpected_jump)
+                               vnode->netfs.zero_point = status->size;
                        inode_set_ctime_to_ts(inode, t);
                        inode_set_atime_to_ts(inode, t);
                }
@@ -647,7 +651,7 @@ void afs_evict_inode(struct inode *inode)
        truncate_inode_pages_final(&inode->i_data);
 
        afs_set_cache_aux(vnode, &aux);
-       fscache_clear_inode_writeback(afs_vnode_cache(vnode), inode, &aux);
+       netfs_clear_inode_writeback(inode, &aux);
        clear_inode(inode);
 
        while (!list_empty(&vnode->wb_keys)) {
@@ -689,17 +693,17 @@ static void afs_setattr_success(struct afs_operation *op)
 static void afs_setattr_edit_file(struct afs_operation *op)
 {
        struct afs_vnode_param *vp = &op->file[0];
-       struct inode *inode = &vp->vnode->netfs.inode;
+       struct afs_vnode *vnode = vp->vnode;
 
        if (op->setattr.attr->ia_valid & ATTR_SIZE) {
                loff_t size = op->setattr.attr->ia_size;
                loff_t i_size = op->setattr.old_i_size;
 
-               if (size < i_size)
-                       truncate_pagecache(inode, size);
-               if (size != i_size)
-                       fscache_resize_cookie(afs_vnode_cache(vp->vnode),
-                                             vp->scb.status.size);
+               if (size != i_size) {
+                       truncate_setsize(&vnode->netfs.inode, size);
+                       netfs_resize_file(&vnode->netfs, size, true);
+                       fscache_resize_cookie(afs_vnode_cache(vnode), size);
+               }
        }
 }
 
@@ -767,11 +771,11 @@ int afs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
                 */
                if (!(attr->ia_valid & (supported & ~ATTR_SIZE & ~ATTR_MTIME)) &&
                    attr->ia_size < i_size &&
-                   attr->ia_size > vnode->status.size) {
-                       truncate_pagecache(inode, attr->ia_size);
+                   attr->ia_size > vnode->netfs.remote_i_size) {
+                       truncate_setsize(inode, attr->ia_size);
+                       netfs_resize_file(&vnode->netfs, size, false);
                        fscache_resize_cookie(afs_vnode_cache(vnode),
                                              attr->ia_size);
-                       i_size_write(inode, attr->ia_size);
                        ret = 0;
                        goto out_unlock;
                }
index 55aa0679d8cec4b349424239852372e39b656395..9c03fcf7ffaa84e9f7604444209bd934b64db466 100644 (file)
@@ -985,62 +985,6 @@ static inline void afs_invalidate_cache(struct afs_vnode *vnode, unsigned int fl
                           i_size_read(&vnode->netfs.inode), flags);
 }
 
-/*
- * We use folio->private to hold the amount of the folio that we've written to,
- * splitting the field into two parts.  However, we need to represent a range
- * 0...FOLIO_SIZE, so we reduce the resolution if the size of the folio
- * exceeds what we can encode.
- */
-#ifdef CONFIG_64BIT
-#define __AFS_FOLIO_PRIV_MASK          0x7fffffffUL
-#define __AFS_FOLIO_PRIV_SHIFT         32
-#define __AFS_FOLIO_PRIV_MMAPPED       0x80000000UL
-#else
-#define __AFS_FOLIO_PRIV_MASK          0x7fffUL
-#define __AFS_FOLIO_PRIV_SHIFT         16
-#define __AFS_FOLIO_PRIV_MMAPPED       0x8000UL
-#endif
-
-static inline unsigned int afs_folio_dirty_resolution(struct folio *folio)
-{
-       int shift = folio_shift(folio) - (__AFS_FOLIO_PRIV_SHIFT - 1);
-       return (shift > 0) ? shift : 0;
-}
-
-static inline size_t afs_folio_dirty_from(struct folio *folio, unsigned long priv)
-{
-       unsigned long x = priv & __AFS_FOLIO_PRIV_MASK;
-
-       /* The lower bound is inclusive */
-       return x << afs_folio_dirty_resolution(folio);
-}
-
-static inline size_t afs_folio_dirty_to(struct folio *folio, unsigned long priv)
-{
-       unsigned long x = (priv >> __AFS_FOLIO_PRIV_SHIFT) & __AFS_FOLIO_PRIV_MASK;
-
-       /* The upper bound is immediately beyond the region */
-       return (x + 1) << afs_folio_dirty_resolution(folio);
-}
-
-static inline unsigned long afs_folio_dirty(struct folio *folio, size_t from, size_t to)
-{
-       unsigned int res = afs_folio_dirty_resolution(folio);
-       from >>= res;
-       to = (to - 1) >> res;
-       return (to << __AFS_FOLIO_PRIV_SHIFT) | from;
-}
-
-static inline unsigned long afs_folio_dirty_mmapped(unsigned long priv)
-{
-       return priv | __AFS_FOLIO_PRIV_MMAPPED;
-}
-
-static inline bool afs_is_folio_dirty_mmapped(unsigned long priv)
-{
-       return priv & __AFS_FOLIO_PRIV_MMAPPED;
-}
-
 #include <trace/events/afs.h>
 
 /*****************************************************************************/
@@ -1167,7 +1111,6 @@ extern int afs_release(struct inode *, struct file *);
 extern int afs_fetch_data(struct afs_vnode *, struct afs_read *);
 extern struct afs_read *afs_alloc_read(gfp_t);
 extern void afs_put_read(struct afs_read *);
-extern int afs_write_inode(struct inode *, struct writeback_control *);
 
 static inline struct afs_read *afs_get_read(struct afs_read *req)
 {
@@ -1658,24 +1601,11 @@ extern int afs_check_volume_status(struct afs_volume *, struct afs_operation *);
 /*
  * write.c
  */
-#ifdef CONFIG_AFS_FSCACHE
-bool afs_dirty_folio(struct address_space *, struct folio *);
-#else
-#define afs_dirty_folio filemap_dirty_folio
-#endif
-extern int afs_write_begin(struct file *file, struct address_space *mapping,
-                       loff_t pos, unsigned len,
-                       struct page **pagep, void **fsdata);
-extern int afs_write_end(struct file *file, struct address_space *mapping,
-                       loff_t pos, unsigned len, unsigned copied,
-                       struct page *page, void *fsdata);
-extern int afs_writepage(struct page *, struct writeback_control *);
 extern int afs_writepages(struct address_space *, struct writeback_control *);
-extern ssize_t afs_file_write(struct kiocb *, struct iov_iter *);
 extern int afs_fsync(struct file *, loff_t, loff_t, int);
 extern vm_fault_t afs_page_mkwrite(struct vm_fault *vmf);
 extern void afs_prune_wb_keys(struct afs_vnode *);
-int afs_launder_folio(struct folio *);
+void afs_create_write_requests(struct netfs_io_request *wreq, loff_t start, size_t len);
 
 /*
  * xattr.c
index 3bd02571f30debca6159756b5abe30e3dd905583..15eab053af6dc05931363c619cd32cf041093a3f 100644 (file)
@@ -166,7 +166,7 @@ static int afs_proc_addr_prefs_show(struct seq_file *m, void *v)
 
        if (!preflist) {
                seq_puts(m, "NO PREFS\n");
-               return 0;
+               goto out;
        }
 
        seq_printf(m, "PROT SUBNET                                      PRIOR (v=%u n=%u/%u/%u)\n",
@@ -191,7 +191,8 @@ static int afs_proc_addr_prefs_show(struct seq_file *m, void *v)
                }
        }
 
-       rcu_read_lock();
+out:
+       rcu_read_unlock();
        return 0;
 }
 
index ae2d66a52add9818101351d63a5cbad334e96e44..f3ba1c3e72f5b8d58e3f1c13eb2935d202cfec68 100644 (file)
@@ -55,7 +55,7 @@ int afs_net_id;
 static const struct super_operations afs_super_ops = {
        .statfs         = afs_statfs,
        .alloc_inode    = afs_alloc_inode,
-       .write_inode    = afs_write_inode,
+       .write_inode    = netfs_unpin_writeback,
        .drop_inode     = afs_drop_inode,
        .destroy_inode  = afs_destroy_inode,
        .free_inode     = afs_free_inode,
index 61d34ad2ca7dcd48229f7ff1ae40519511c88aac..74402d95a88434bb58e1e3989d296c11d8f9861d 100644 (file)
 #include <linux/writeback.h>
 #include <linux/pagevec.h>
 #include <linux/netfs.h>
+#include <trace/events/netfs.h>
 #include "internal.h"
 
-static int afs_writepages_region(struct address_space *mapping,
-                                struct writeback_control *wbc,
-                                loff_t start, loff_t end, loff_t *_next,
-                                bool max_one_loop);
-
-static void afs_write_to_cache(struct afs_vnode *vnode, loff_t start, size_t len,
-                              loff_t i_size, bool caching);
-
-#ifdef CONFIG_AFS_FSCACHE
-/*
- * Mark a page as having been made dirty and thus needing writeback.  We also
- * need to pin the cache object to write back to.
- */
-bool afs_dirty_folio(struct address_space *mapping, struct folio *folio)
-{
-       return fscache_dirty_folio(mapping, folio,
-                               afs_vnode_cache(AFS_FS_I(mapping->host)));
-}
-static void afs_folio_start_fscache(bool caching, struct folio *folio)
-{
-       if (caching)
-               folio_start_fscache(folio);
-}
-#else
-static void afs_folio_start_fscache(bool caching, struct folio *folio)
-{
-}
-#endif
-
-/*
- * Flush out a conflicting write.  This may extend the write to the surrounding
- * pages if also dirty and contiguous to the conflicting region..
- */
-static int afs_flush_conflicting_write(struct address_space *mapping,
-                                      struct folio *folio)
-{
-       struct writeback_control wbc = {
-               .sync_mode      = WB_SYNC_ALL,
-               .nr_to_write    = LONG_MAX,
-               .range_start    = folio_pos(folio),
-               .range_end      = LLONG_MAX,
-       };
-       loff_t next;
-
-       return afs_writepages_region(mapping, &wbc, folio_pos(folio), LLONG_MAX,
-                                    &next, true);
-}
-
-/*
- * prepare to perform part of a write to a page
- */
-int afs_write_begin(struct file *file, struct address_space *mapping,
-                   loff_t pos, unsigned len,
-                   struct page **_page, void **fsdata)
-{
-       struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
-       struct folio *folio;
-       unsigned long priv;
-       unsigned f, from;
-       unsigned t, to;
-       pgoff_t index;
-       int ret;
-
-       _enter("{%llx:%llu},%llx,%x",
-              vnode->fid.vid, vnode->fid.vnode, pos, len);
-
-       /* Prefetch area to be written into the cache if we're caching this
-        * file.  We need to do this before we get a lock on the page in case
-        * there's more than one writer competing for the same cache block.
-        */
-       ret = netfs_write_begin(&vnode->netfs, file, mapping, pos, len, &folio, fsdata);
-       if (ret < 0)
-               return ret;
-
-       index = folio_index(folio);
-       from = pos - index * PAGE_SIZE;
-       to = from + len;
-
-try_again:
-       /* See if this page is already partially written in a way that we can
-        * merge the new write with.
-        */
-       if (folio_test_private(folio)) {
-               priv = (unsigned long)folio_get_private(folio);
-               f = afs_folio_dirty_from(folio, priv);
-               t = afs_folio_dirty_to(folio, priv);
-               ASSERTCMP(f, <=, t);
-
-               if (folio_test_writeback(folio)) {
-                       trace_afs_folio_dirty(vnode, tracepoint_string("alrdy"), folio);
-                       folio_unlock(folio);
-                       goto wait_for_writeback;
-               }
-               /* If the file is being filled locally, allow inter-write
-                * spaces to be merged into writes.  If it's not, only write
-                * back what the user gives us.
-                */
-               if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) &&
-                   (to < f || from > t))
-                       goto flush_conflicting_write;
-       }
-
-       *_page = folio_file_page(folio, pos / PAGE_SIZE);
-       _leave(" = 0");
-       return 0;
-
-       /* The previous write and this write aren't adjacent or overlapping, so
-        * flush the page out.
-        */
-flush_conflicting_write:
-       trace_afs_folio_dirty(vnode, tracepoint_string("confl"), folio);
-       folio_unlock(folio);
-
-       ret = afs_flush_conflicting_write(mapping, folio);
-       if (ret < 0)
-               goto error;
-
-wait_for_writeback:
-       ret = folio_wait_writeback_killable(folio);
-       if (ret < 0)
-               goto error;
-
-       ret = folio_lock_killable(folio);
-       if (ret < 0)
-               goto error;
-       goto try_again;
-
-error:
-       folio_put(folio);
-       _leave(" = %d", ret);
-       return ret;
-}
-
-/*
- * finalise part of a write to a page
- */
-int afs_write_end(struct file *file, struct address_space *mapping,
-                 loff_t pos, unsigned len, unsigned copied,
-                 struct page *subpage, void *fsdata)
-{
-       struct folio *folio = page_folio(subpage);
-       struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
-       unsigned long priv;
-       unsigned int f, from = offset_in_folio(folio, pos);
-       unsigned int t, to = from + copied;
-       loff_t i_size, write_end_pos;
-
-       _enter("{%llx:%llu},{%lx}",
-              vnode->fid.vid, vnode->fid.vnode, folio_index(folio));
-
-       if (!folio_test_uptodate(folio)) {
-               if (copied < len) {
-                       copied = 0;
-                       goto out;
-               }
-
-               folio_mark_uptodate(folio);
-       }
-
-       if (copied == 0)
-               goto out;
-
-       write_end_pos = pos + copied;
-
-       i_size = i_size_read(&vnode->netfs.inode);
-       if (write_end_pos > i_size) {
-               write_seqlock(&vnode->cb_lock);
-               i_size = i_size_read(&vnode->netfs.inode);
-               if (write_end_pos > i_size)
-                       afs_set_i_size(vnode, write_end_pos);
-               write_sequnlock(&vnode->cb_lock);
-               fscache_update_cookie(afs_vnode_cache(vnode), NULL, &write_end_pos);
-       }
-
-       if (folio_test_private(folio)) {
-               priv = (unsigned long)folio_get_private(folio);
-               f = afs_folio_dirty_from(folio, priv);
-               t = afs_folio_dirty_to(folio, priv);
-               if (from < f)
-                       f = from;
-               if (to > t)
-                       t = to;
-               priv = afs_folio_dirty(folio, f, t);
-               folio_change_private(folio, (void *)priv);
-               trace_afs_folio_dirty(vnode, tracepoint_string("dirty+"), folio);
-       } else {
-               priv = afs_folio_dirty(folio, from, to);
-               folio_attach_private(folio, (void *)priv);
-               trace_afs_folio_dirty(vnode, tracepoint_string("dirty"), folio);
-       }
-
-       if (folio_mark_dirty(folio))
-               _debug("dirtied %lx", folio_index(folio));
-
-out:
-       folio_unlock(folio);
-       folio_put(folio);
-       return copied;
-}
-
-/*
- * kill all the pages in the given range
- */
-static void afs_kill_pages(struct address_space *mapping,
-                          loff_t start, loff_t len)
-{
-       struct afs_vnode *vnode = AFS_FS_I(mapping->host);
-       struct folio *folio;
-       pgoff_t index = start / PAGE_SIZE;
-       pgoff_t last = (start + len - 1) / PAGE_SIZE, next;
-
-       _enter("{%llx:%llu},%llx @%llx",
-              vnode->fid.vid, vnode->fid.vnode, len, start);
-
-       do {
-               _debug("kill %lx (to %lx)", index, last);
-
-               folio = filemap_get_folio(mapping, index);
-               if (IS_ERR(folio)) {
-                       next = index + 1;
-                       continue;
-               }
-
-               next = folio_next_index(folio);
-
-               folio_clear_uptodate(folio);
-               folio_end_writeback(folio);
-               folio_lock(folio);
-               generic_error_remove_folio(mapping, folio);
-               folio_unlock(folio);
-               folio_put(folio);
-
-       } while (index = next, index <= last);
-
-       _leave("");
-}
-
-/*
- * Redirty all the pages in a given range.
- */
-static void afs_redirty_pages(struct writeback_control *wbc,
-                             struct address_space *mapping,
-                             loff_t start, loff_t len)
-{
-       struct afs_vnode *vnode = AFS_FS_I(mapping->host);
-       struct folio *folio;
-       pgoff_t index = start / PAGE_SIZE;
-       pgoff_t last = (start + len - 1) / PAGE_SIZE, next;
-
-       _enter("{%llx:%llu},%llx @%llx",
-              vnode->fid.vid, vnode->fid.vnode, len, start);
-
-       do {
-               _debug("redirty %llx @%llx", len, start);
-
-               folio = filemap_get_folio(mapping, index);
-               if (IS_ERR(folio)) {
-                       next = index + 1;
-                       continue;
-               }
-
-               next = index + folio_nr_pages(folio);
-               folio_redirty_for_writepage(wbc, folio);
-               folio_end_writeback(folio);
-               folio_put(folio);
-       } while (index = next, index <= last);
-
-       _leave("");
-}
-
 /*
  * completion of write to server
  */
 static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsigned int len)
 {
-       struct address_space *mapping = vnode->netfs.inode.i_mapping;
-       struct folio *folio;
-       pgoff_t end;
-
-       XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE);
-
        _enter("{%llx:%llu},{%x @%llx}",
               vnode->fid.vid, vnode->fid.vnode, len, start);
 
-       rcu_read_lock();
-
-       end = (start + len - 1) / PAGE_SIZE;
-       xas_for_each(&xas, folio, end) {
-               if (!folio_test_writeback(folio)) {
-                       kdebug("bad %x @%llx page %lx %lx",
-                              len, start, folio_index(folio), end);
-                       ASSERT(folio_test_writeback(folio));
-               }
-
-               trace_afs_folio_dirty(vnode, tracepoint_string("clear"), folio);
-               folio_detach_private(folio);
-               folio_end_writeback(folio);
-       }
-
-       rcu_read_unlock();
-
        afs_prune_wb_keys(vnode);
        _leave("");
 }
@@ -451,363 +159,53 @@ try_next_key:
        return afs_put_operation(op);
 }
 
-/*
- * Extend the region to be written back to include subsequent contiguously
- * dirty pages if possible, but don't sleep while doing so.
- *
- * If this page holds new content, then we can include filler zeros in the
- * writeback.
- */
-static void afs_extend_writeback(struct address_space *mapping,
-                                struct afs_vnode *vnode,
-                                long *_count,
-                                loff_t start,
-                                loff_t max_len,
-                                bool new_content,
-                                bool caching,
-                                unsigned int *_len)
+static void afs_upload_to_server(struct netfs_io_subrequest *subreq)
 {
-       struct folio_batch fbatch;
-       struct folio *folio;
-       unsigned long priv;
-       unsigned int psize, filler = 0;
-       unsigned int f, t;
-       loff_t len = *_len;
-       pgoff_t index = (start + len) / PAGE_SIZE;
-       bool stop = true;
-       unsigned int i;
-
-       XA_STATE(xas, &mapping->i_pages, index);
-       folio_batch_init(&fbatch);
-
-       do {
-               /* Firstly, we gather up a batch of contiguous dirty pages
-                * under the RCU read lock - but we can't clear the dirty flags
-                * there if any of those pages are mapped.
-                */
-               rcu_read_lock();
-
-               xas_for_each(&xas, folio, ULONG_MAX) {
-                       stop = true;
-                       if (xas_retry(&xas, folio))
-                               continue;
-                       if (xa_is_value(folio))
-                               break;
-                       if (folio_index(folio) != index)
-                               break;
-
-                       if (!folio_try_get_rcu(folio)) {
-                               xas_reset(&xas);
-                               continue;
-                       }
-
-                       /* Has the page moved or been split? */
-                       if (unlikely(folio != xas_reload(&xas))) {
-                               folio_put(folio);
-                               break;
-                       }
-
-                       if (!folio_trylock(folio)) {
-                               folio_put(folio);
-                               break;
-                       }
-                       if (!folio_test_dirty(folio) ||
-                           folio_test_writeback(folio) ||
-                           folio_test_fscache(folio)) {
-                               folio_unlock(folio);
-                               folio_put(folio);
-                               break;
-                       }
-
-                       psize = folio_size(folio);
-                       priv = (unsigned long)folio_get_private(folio);
-                       f = afs_folio_dirty_from(folio, priv);
-                       t = afs_folio_dirty_to(folio, priv);
-                       if (f != 0 && !new_content) {
-                               folio_unlock(folio);
-                               folio_put(folio);
-                               break;
-                       }
-
-                       len += filler + t;
-                       filler = psize - t;
-                       if (len >= max_len || *_count <= 0)
-                               stop = true;
-                       else if (t == psize || new_content)
-                               stop = false;
-
-                       index += folio_nr_pages(folio);
-                       if (!folio_batch_add(&fbatch, folio))
-                               break;
-                       if (stop)
-                               break;
-               }
-
-               if (!stop)
-                       xas_pause(&xas);
-               rcu_read_unlock();
-
-               /* Now, if we obtained any folios, we can shift them to being
-                * writable and mark them for caching.
-                */
-               if (!folio_batch_count(&fbatch))
-                       break;
-
-               for (i = 0; i < folio_batch_count(&fbatch); i++) {
-                       folio = fbatch.folios[i];
-                       trace_afs_folio_dirty(vnode, tracepoint_string("store+"), folio);
-
-                       if (!folio_clear_dirty_for_io(folio))
-                               BUG();
-                       folio_start_writeback(folio);
-                       afs_folio_start_fscache(caching, folio);
-
-                       *_count -= folio_nr_pages(folio);
-                       folio_unlock(folio);
-               }
+       struct afs_vnode *vnode = AFS_FS_I(subreq->rreq->inode);
+       ssize_t ret;
 
-               folio_batch_release(&fbatch);
-               cond_resched();
-       } while (!stop);
+       _enter("%x[%x],%zx",
+              subreq->rreq->debug_id, subreq->debug_index, subreq->io_iter.count);
 
-       *_len = len;
+       trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
+       ret = afs_store_data(vnode, &subreq->io_iter, subreq->start,
+                            subreq->rreq->origin == NETFS_LAUNDER_WRITE);
+       netfs_write_subrequest_terminated(subreq, ret < 0 ? ret : subreq->len,
+                                         false);
 }
 
-/*
- * Synchronously write back the locked page and any subsequent non-locked dirty
- * pages.
- */
-static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping,
-                                               struct writeback_control *wbc,
-                                               struct folio *folio,
-                                               loff_t start, loff_t end)
+static void afs_upload_to_server_worker(struct work_struct *work)
 {
-       struct afs_vnode *vnode = AFS_FS_I(mapping->host);
-       struct iov_iter iter;
-       unsigned long priv;
-       unsigned int offset, to, len, max_len;
-       loff_t i_size = i_size_read(&vnode->netfs.inode);
-       bool new_content = test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
-       bool caching = fscache_cookie_enabled(afs_vnode_cache(vnode));
-       long count = wbc->nr_to_write;
-       int ret;
-
-       _enter(",%lx,%llx-%llx", folio_index(folio), start, end);
-
-       folio_start_writeback(folio);
-       afs_folio_start_fscache(caching, folio);
-
-       count -= folio_nr_pages(folio);
-
-       /* Find all consecutive lockable dirty pages that have contiguous
-        * written regions, stopping when we find a page that is not
-        * immediately lockable, is not dirty or is missing, or we reach the
-        * end of the range.
-        */
-       priv = (unsigned long)folio_get_private(folio);
-       offset = afs_folio_dirty_from(folio, priv);
-       to = afs_folio_dirty_to(folio, priv);
-       trace_afs_folio_dirty(vnode, tracepoint_string("store"), folio);
-
-       len = to - offset;
-       start += offset;
-       if (start < i_size) {
-               /* Trim the write to the EOF; the extra data is ignored.  Also
-                * put an upper limit on the size of a single storedata op.
-                */
-               max_len = 65536 * 4096;
-               max_len = min_t(unsigned long long, max_len, end - start + 1);
-               max_len = min_t(unsigned long long, max_len, i_size - start);
-
-               if (len < max_len &&
-                   (to == folio_size(folio) || new_content))
-                       afs_extend_writeback(mapping, vnode, &count,
-                                            start, max_len, new_content,
-                                            caching, &len);
-               len = min_t(loff_t, len, max_len);
-       }
-
-       /* We now have a contiguous set of dirty pages, each with writeback
-        * set; the first page is still locked at this point, but all the rest
-        * have been unlocked.
-        */
-       folio_unlock(folio);
-
-       if (start < i_size) {
-               _debug("write back %x @%llx [%llx]", len, start, i_size);
-
-               /* Speculatively write to the cache.  We have to fix this up
-                * later if the store fails.
-                */
-               afs_write_to_cache(vnode, start, len, i_size, caching);
-
-               iov_iter_xarray(&iter, ITER_SOURCE, &mapping->i_pages, start, len);
-               ret = afs_store_data(vnode, &iter, start, false);
-       } else {
-               _debug("write discard %x @%llx [%llx]", len, start, i_size);
-
-               /* The dirty region was entirely beyond the EOF. */
-               fscache_clear_page_bits(mapping, start, len, caching);
-               afs_pages_written_back(vnode, start, len);
-               ret = 0;
-       }
-
-       switch (ret) {
-       case 0:
-               wbc->nr_to_write = count;
-               ret = len;
-               break;
+       struct netfs_io_subrequest *subreq =
+               container_of(work, struct netfs_io_subrequest, work);
 
-       default:
-               pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret);
-               fallthrough;
-       case -EACCES:
-       case -EPERM:
-       case -ENOKEY:
-       case -EKEYEXPIRED:
-       case -EKEYREJECTED:
-       case -EKEYREVOKED:
-       case -ENETRESET:
-               afs_redirty_pages(wbc, mapping, start, len);
-               mapping_set_error(mapping, ret);
-               break;
-
-       case -EDQUOT:
-       case -ENOSPC:
-               afs_redirty_pages(wbc, mapping, start, len);
-               mapping_set_error(mapping, -ENOSPC);
-               break;
-
-       case -EROFS:
-       case -EIO:
-       case -EREMOTEIO:
-       case -EFBIG:
-       case -ENOENT:
-       case -ENOMEDIUM:
-       case -ENXIO:
-               trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail);
-               afs_kill_pages(mapping, start, len);
-               mapping_set_error(mapping, ret);
-               break;
-       }
-
-       _leave(" = %d", ret);
-       return ret;
+       afs_upload_to_server(subreq);
 }
 
 /*
- * write a region of pages back to the server
+ * Set up write requests for a writeback slice.  We need to add a write request
+ * for each write we want to make.
  */
-static int afs_writepages_region(struct address_space *mapping,
-                                struct writeback_control *wbc,
-                                loff_t start, loff_t end, loff_t *_next,
-                                bool max_one_loop)
+void afs_create_write_requests(struct netfs_io_request *wreq, loff_t start, size_t len)
 {
-       struct folio *folio;
-       struct folio_batch fbatch;
-       ssize_t ret;
-       unsigned int i;
-       int n, skips = 0;
-
-       _enter("%llx,%llx,", start, end);
-       folio_batch_init(&fbatch);
-
-       do {
-               pgoff_t index = start / PAGE_SIZE;
-
-               n = filemap_get_folios_tag(mapping, &index, end / PAGE_SIZE,
-                                       PAGECACHE_TAG_DIRTY, &fbatch);
-
-               if (!n)
-                       break;
-               for (i = 0; i < n; i++) {
-                       folio = fbatch.folios[i];
-                       start = folio_pos(folio); /* May regress with THPs */
-
-                       _debug("wback %lx", folio_index(folio));
-
-                       /* At this point we hold neither the i_pages lock nor the
-                        * page lock: the page may be truncated or invalidated
-                        * (changing page->mapping to NULL), or even swizzled
-                        * back from swapper_space to tmpfs file mapping
-                        */
-try_again:
-                       if (wbc->sync_mode != WB_SYNC_NONE) {
-                               ret = folio_lock_killable(folio);
-                               if (ret < 0) {
-                                       folio_batch_release(&fbatch);
-                                       return ret;
-                               }
-                       } else {
-                               if (!folio_trylock(folio))
-                                       continue;
-                       }
-
-                       if (folio->mapping != mapping ||
-                           !folio_test_dirty(folio)) {
-                               start += folio_size(folio);
-                               folio_unlock(folio);
-                               continue;
-                       }
-
-                       if (folio_test_writeback(folio) ||
-                           folio_test_fscache(folio)) {
-                               folio_unlock(folio);
-                               if (wbc->sync_mode != WB_SYNC_NONE) {
-                                       folio_wait_writeback(folio);
-#ifdef CONFIG_AFS_FSCACHE
-                                       folio_wait_fscache(folio);
-#endif
-                                       goto try_again;
-                               }
-
-                               start += folio_size(folio);
-                               if (wbc->sync_mode == WB_SYNC_NONE) {
-                                       if (skips >= 5 || need_resched()) {
-                                               *_next = start;
-                                               folio_batch_release(&fbatch);
-                                               _leave(" = 0 [%llx]", *_next);
-                                               return 0;
-                                       }
-                                       skips++;
-                               }
-                               continue;
-                       }
-
-                       if (!folio_clear_dirty_for_io(folio))
-                               BUG();
-                       ret = afs_write_back_from_locked_folio(mapping, wbc,
-                                       folio, start, end);
-                       if (ret < 0) {
-                               _leave(" = %zd", ret);
-                               folio_batch_release(&fbatch);
-                               return ret;
-                       }
-
-                       start += ret;
-               }
+       struct netfs_io_subrequest *subreq;
 
-               folio_batch_release(&fbatch);
-               cond_resched();
-       } while (wbc->nr_to_write > 0);
+       _enter("%x,%llx-%llx", wreq->debug_id, start, start + len);
 
-       *_next = start;
-       _leave(" = 0 [%llx]", *_next);
-       return 0;
+       subreq = netfs_create_write_request(wreq, NETFS_UPLOAD_TO_SERVER,
+                                           start, len, afs_upload_to_server_worker);
+       if (subreq)
+               netfs_queue_write_request(subreq);
 }
 
 /*
  * write some of the pending data back to the server
  */
-int afs_writepages(struct address_space *mapping,
-                  struct writeback_control *wbc)
+int afs_writepages(struct address_space *mapping, struct writeback_control *wbc)
 {
        struct afs_vnode *vnode = AFS_FS_I(mapping->host);
-       loff_t start, next;
        int ret;
 
-       _enter("");
-
        /* We have to be careful as we can end up racing with setattr()
         * truncating the pagecache since the caller doesn't take a lock here
         * to prevent it.
@@ -817,68 +215,11 @@ int afs_writepages(struct address_space *mapping,
        else if (!down_read_trylock(&vnode->validate_lock))
                return 0;
 
-       if (wbc->range_cyclic) {
-               start = mapping->writeback_index * PAGE_SIZE;
-               ret = afs_writepages_region(mapping, wbc, start, LLONG_MAX,
-                                           &next, false);
-               if (ret == 0) {
-                       mapping->writeback_index = next / PAGE_SIZE;
-                       if (start > 0 && wbc->nr_to_write > 0) {
-                               ret = afs_writepages_region(mapping, wbc, 0,
-                                                           start, &next, false);
-                               if (ret == 0)
-                                       mapping->writeback_index =
-                                               next / PAGE_SIZE;
-                       }
-               }
-       } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
-               ret = afs_writepages_region(mapping, wbc, 0, LLONG_MAX,
-                                           &next, false);
-               if (wbc->nr_to_write > 0 && ret == 0)
-                       mapping->writeback_index = next / PAGE_SIZE;
-       } else {
-               ret = afs_writepages_region(mapping, wbc,
-                                           wbc->range_start, wbc->range_end,
-                                           &next, false);
-       }
-
+       ret = netfs_writepages(mapping, wbc);
        up_read(&vnode->validate_lock);
-       _leave(" = %d", ret);
        return ret;
 }
 
-/*
- * write to an AFS file
- */
-ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
-{
-       struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
-       struct afs_file *af = iocb->ki_filp->private_data;
-       ssize_t result;
-       size_t count = iov_iter_count(from);
-
-       _enter("{%llx:%llu},{%zu},",
-              vnode->fid.vid, vnode->fid.vnode, count);
-
-       if (IS_SWAPFILE(&vnode->netfs.inode)) {
-               printk(KERN_INFO
-                      "AFS: Attempt to write to active swap file!\n");
-               return -EBUSY;
-       }
-
-       if (!count)
-               return 0;
-
-       result = afs_validate(vnode, af->key);
-       if (result < 0)
-               return result;
-
-       result = generic_file_write_iter(iocb, from);
-
-       _leave(" = %zd", result);
-       return result;
-}
-
 /*
  * flush any dirty pages for this process, and check for write errors.
  * - the return status from this call provides a reliable indication of
@@ -907,59 +248,11 @@ int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
  */
 vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
 {
-       struct folio *folio = page_folio(vmf->page);
        struct file *file = vmf->vma->vm_file;
-       struct inode *inode = file_inode(file);
-       struct afs_vnode *vnode = AFS_FS_I(inode);
-       struct afs_file *af = file->private_data;
-       unsigned long priv;
-       vm_fault_t ret = VM_FAULT_RETRY;
-
-       _enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, folio_index(folio));
-
-       afs_validate(vnode, af->key);
 
-       sb_start_pagefault(inode->i_sb);
-
-       /* Wait for the page to be written to the cache before we allow it to
-        * be modified.  We then assume the entire page will need writing back.
-        */
-#ifdef CONFIG_AFS_FSCACHE
-       if (folio_test_fscache(folio) &&
-           folio_wait_fscache_killable(folio) < 0)
-               goto out;
-#endif
-
-       if (folio_wait_writeback_killable(folio))
-               goto out;
-
-       if (folio_lock_killable(folio) < 0)
-               goto out;
-
-       /* We mustn't change folio->private until writeback is complete as that
-        * details the portion of the page we need to write back and we might
-        * need to redirty the page if there's a problem.
-        */
-       if (folio_wait_writeback_killable(folio) < 0) {
-               folio_unlock(folio);
-               goto out;
-       }
-
-       priv = afs_folio_dirty(folio, 0, folio_size(folio));
-       priv = afs_folio_dirty_mmapped(priv);
-       if (folio_test_private(folio)) {
-               folio_change_private(folio, (void *)priv);
-               trace_afs_folio_dirty(vnode, tracepoint_string("mkwrite+"), folio);
-       } else {
-               folio_attach_private(folio, (void *)priv);
-               trace_afs_folio_dirty(vnode, tracepoint_string("mkwrite"), folio);
-       }
-       file_update_time(file);
-
-       ret = VM_FAULT_LOCKED;
-out:
-       sb_end_pagefault(inode->i_sb);
-       return ret;
+       if (afs_validate(AFS_FS_I(file_inode(file)), afs_file_key(file)) < 0)
+               return VM_FAULT_SIGBUS;
+       return netfs_page_mkwrite(vmf, NULL);
 }
 
 /*
@@ -989,64 +282,3 @@ void afs_prune_wb_keys(struct afs_vnode *vnode)
                afs_put_wb_key(wbk);
        }
 }
-
-/*
- * Clean up a page during invalidation.
- */
-int afs_launder_folio(struct folio *folio)
-{
-       struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio));
-       struct iov_iter iter;
-       struct bio_vec bv;
-       unsigned long priv;
-       unsigned int f, t;
-       int ret = 0;
-
-       _enter("{%lx}", folio->index);
-
-       priv = (unsigned long)folio_get_private(folio);
-       if (folio_clear_dirty_for_io(folio)) {
-               f = 0;
-               t = folio_size(folio);
-               if (folio_test_private(folio)) {
-                       f = afs_folio_dirty_from(folio, priv);
-                       t = afs_folio_dirty_to(folio, priv);
-               }
-
-               bvec_set_folio(&bv, folio, t - f, f);
-               iov_iter_bvec(&iter, ITER_SOURCE, &bv, 1, bv.bv_len);
-
-               trace_afs_folio_dirty(vnode, tracepoint_string("launder"), folio);
-               ret = afs_store_data(vnode, &iter, folio_pos(folio) + f, true);
-       }
-
-       trace_afs_folio_dirty(vnode, tracepoint_string("laundered"), folio);
-       folio_detach_private(folio);
-       folio_wait_fscache(folio);
-       return ret;
-}
-
-/*
- * Deal with the completion of writing the data to the cache.
- */
-static void afs_write_to_cache_done(void *priv, ssize_t transferred_or_error,
-                                   bool was_async)
-{
-       struct afs_vnode *vnode = priv;
-
-       if (IS_ERR_VALUE(transferred_or_error) &&
-           transferred_or_error != -ENOBUFS)
-               afs_invalidate_cache(vnode, 0);
-}
-
-/*
- * Save the write to the cache also.
- */
-static void afs_write_to_cache(struct afs_vnode *vnode,
-                              loff_t start, size_t len, loff_t i_size,
-                              bool caching)
-{
-       fscache_write_to_cache(afs_vnode_cache(vnode),
-                              vnode->netfs.inode.i_mapping, start, len, i_size,
-                              afs_write_to_cache_done, vnode, caching);
-}
index d26222b7eefe247f778d6b574873a9058d225146..0496cb5b6eab9a56a20c4243e348235875970d92 100644 (file)
@@ -79,7 +79,7 @@ static struct file *__anon_inode_getfile(const char *name,
                                         const struct file_operations *fops,
                                         void *priv, int flags,
                                         const struct inode *context_inode,
-                                        bool secure)
+                                        bool make_inode)
 {
        struct inode *inode;
        struct file *file;
@@ -87,7 +87,7 @@ static struct file *__anon_inode_getfile(const char *name,
        if (fops->owner && !try_module_get(fops->owner))
                return ERR_PTR(-ENOENT);
 
-       if (secure) {
+       if (make_inode) {
                inode = anon_inode_make_secure_inode(name, context_inode);
                if (IS_ERR(inode)) {
                        file = ERR_CAST(inode);
@@ -149,13 +149,10 @@ struct file *anon_inode_getfile(const char *name,
 EXPORT_SYMBOL_GPL(anon_inode_getfile);
 
 /**
- * anon_inode_getfile_secure - Like anon_inode_getfile(), but creates a new
+ * anon_inode_create_getfile - Like anon_inode_getfile(), but creates a new
  *                             !S_PRIVATE anon inode rather than reuse the
  *                             singleton anon inode and calls the
- *                             inode_init_security_anon() LSM hook.  This
- *                             allows for both the inode to have its own
- *                             security context and for the LSM to enforce
- *                             policy on the inode's creation.
+ *                             inode_init_security_anon() LSM hook.
  *
  * @name:    [in]    name of the "class" of the new file
  * @fops:    [in]    file operations for the new file
@@ -164,11 +161,21 @@ EXPORT_SYMBOL_GPL(anon_inode_getfile);
  * @context_inode:
  *           [in]    the logical relationship with the new inode (optional)
  *
+ * Create a new anonymous inode and file pair.  This can be done for two
+ * reasons:
+ *
+ * - for the inode to have its own security context, so that LSMs can enforce
+ *   policy on the inode's creation;
+ *
+ * - if the caller needs a unique inode, for example in order to customize
+ *   the size returned by fstat()
+ *
  * The LSM may use @context_inode in inode_init_security_anon(), but a
- * reference to it is not held.  Returns the newly created file* or an error
- * pointer.  See the anon_inode_getfile() documentation for more information.
+ * reference to it is not held.
+ *
+ * Returns the newly created file* or an error pointer.
  */
-struct file *anon_inode_getfile_secure(const char *name,
+struct file *anon_inode_create_getfile(const char *name,
                                       const struct file_operations *fops,
                                       void *priv, int flags,
                                       const struct inode *context_inode)
@@ -176,12 +183,13 @@ struct file *anon_inode_getfile_secure(const char *name,
        return __anon_inode_getfile(name, fops, priv, flags,
                                    context_inode, true);
 }
+EXPORT_SYMBOL_GPL(anon_inode_create_getfile);
 
 static int __anon_inode_getfd(const char *name,
                              const struct file_operations *fops,
                              void *priv, int flags,
                              const struct inode *context_inode,
-                             bool secure)
+                             bool make_inode)
 {
        int error, fd;
        struct file *file;
@@ -192,7 +200,7 @@ static int __anon_inode_getfd(const char *name,
        fd = error;
 
        file = __anon_inode_getfile(name, fops, priv, flags, context_inode,
-                                   secure);
+                                   make_inode);
        if (IS_ERR(file)) {
                error = PTR_ERR(file);
                goto err_put_unused_fd;
@@ -231,10 +239,9 @@ int anon_inode_getfd(const char *name, const struct file_operations *fops,
 EXPORT_SYMBOL_GPL(anon_inode_getfd);
 
 /**
- * anon_inode_getfd_secure - Like anon_inode_getfd(), but creates a new
+ * anon_inode_create_getfd - Like anon_inode_getfd(), but creates a new
  * !S_PRIVATE anon inode rather than reuse the singleton anon inode, and calls
- * the inode_init_security_anon() LSM hook. This allows the inode to have its
- * own security context and for a LSM to reject creation of the inode.
+ * the inode_init_security_anon() LSM hook.
  *
  * @name:    [in]    name of the "class" of the new file
  * @fops:    [in]    file operations for the new file
@@ -243,16 +250,26 @@ EXPORT_SYMBOL_GPL(anon_inode_getfd);
  * @context_inode:
  *           [in]    the logical relationship with the new inode (optional)
  *
+ * Create a new anonymous inode and file pair.  This can be done for two
+ * reasons:
+ *
+ * - for the inode to have its own security context, so that LSMs can enforce
+ *   policy on the inode's creation;
+ *
+ * - if the caller needs a unique inode, for example in order to customize
+ *   the size returned by fstat()
+ *
  * The LSM may use @context_inode in inode_init_security_anon(), but a
  * reference to it is not held.
+ *
+ * Returns a newly created file descriptor or an error code.
  */
-int anon_inode_getfd_secure(const char *name, const struct file_operations *fops,
+int anon_inode_create_getfd(const char *name, const struct file_operations *fops,
                            void *priv, int flags,
                            const struct inode *context_inode)
 {
        return __anon_inode_getfd(name, fops, priv, flags, context_inode, true);
 }
-EXPORT_SYMBOL_GPL(anon_inode_getfd_secure);
 
 static int __init anon_inode_init(void)
 {
index 7423a3557c6807a620831475e8608a690fd3315f..1a05cecda7cc5c47695911e7d715aa215f26688d 100644 (file)
@@ -27,7 +27,6 @@ bcachefs-y            :=      \
        checksum.o              \
        clock.o                 \
        compress.o              \
-       counters.o              \
        darray.o                \
        debug.o                 \
        dirent.o                \
@@ -71,6 +70,7 @@ bcachefs-y            :=      \
        reflink.o               \
        replicas.o              \
        sb-clean.o              \
+       sb-counters.o           \
        sb-downgrade.o          \
        sb-errors.o             \
        sb-members.o            \
index a09b9d00226a4e1dd510c0c097ac59e7cb7d3c77..fd3e175d83423261d68124cd26fc0351488ad05e 100644 (file)
@@ -273,7 +273,7 @@ int bch2_alloc_v4_invalid(struct bch_fs *c, struct bkey_s_c k,
                bkey_fsck_err_on(!bch2_bucket_sectors_dirty(*a.v),
                                 c, err, alloc_key_dirty_sectors_0,
                                 "data_type %s but dirty_sectors==0",
-                                bch2_data_types[a.v->data_type]);
+                                bch2_data_type_str(a.v->data_type));
                break;
        case BCH_DATA_cached:
                bkey_fsck_err_on(!a.v->cached_sectors ||
@@ -321,16 +321,12 @@ void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c
 {
        struct bch_alloc_v4 _a;
        const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &_a);
-       unsigned i;
 
        prt_newline(out);
        printbuf_indent_add(out, 2);
 
-       prt_printf(out, "gen %u oldest_gen %u data_type %s",
-              a->gen, a->oldest_gen,
-              a->data_type < BCH_DATA_NR
-              ? bch2_data_types[a->data_type]
-              : "(invalid data type)");
+       prt_printf(out, "gen %u oldest_gen %u data_type ", a->gen, a->oldest_gen);
+       bch2_prt_data_type(out, a->data_type);
        prt_newline(out);
        prt_printf(out, "journal_seq       %llu",       a->journal_seq);
        prt_newline(out);
@@ -353,23 +349,6 @@ void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c
        prt_printf(out, "fragmentation     %llu",       a->fragmentation_lru);
        prt_newline(out);
        prt_printf(out, "bp_start          %llu", BCH_ALLOC_V4_BACKPOINTERS_START(a));
-       prt_newline(out);
-
-       if (BCH_ALLOC_V4_NR_BACKPOINTERS(a)) {
-               struct bkey_s_c_alloc_v4 a_raw = bkey_s_c_to_alloc_v4(k);
-               const struct bch_backpointer *bps = alloc_v4_backpointers_c(a_raw.v);
-
-               prt_printf(out, "backpointers:     %llu", BCH_ALLOC_V4_NR_BACKPOINTERS(a_raw.v));
-               printbuf_indent_add(out, 2);
-
-               for (i = 0; i < BCH_ALLOC_V4_NR_BACKPOINTERS(a_raw.v); i++) {
-                       prt_newline(out);
-                       bch2_backpointer_to_text(out, &bps[i]);
-               }
-
-               printbuf_indent_sub(out, 2);
-       }
-
        printbuf_indent_sub(out, 2);
 }
 
@@ -839,7 +818,7 @@ int bch2_trigger_alloc(struct btree_trans *trans,
                }
        }
 
-       if (!(flags & BTREE_TRIGGER_TRANSACTIONAL) && (flags & BTREE_TRIGGER_INSERT)) {
+       if ((flags & BTREE_TRIGGER_ATOMIC) && (flags & BTREE_TRIGGER_INSERT)) {
                struct bch_alloc_v4 *new_a = bkey_s_to_alloc_v4(new).v;
                u64 journal_seq = trans->journal_res.seq;
                u64 bucket_journal_seq = new_a->journal_seq;
@@ -1625,13 +1604,36 @@ int bch2_check_alloc_to_lru_refs(struct bch_fs *c)
        return ret;
 }
 
+struct discard_buckets_state {
+       u64             seen;
+       u64             open;
+       u64             need_journal_commit;
+       u64             discarded;
+       struct bch_dev  *ca;
+       u64             need_journal_commit_this_dev;
+};
+
+static void discard_buckets_next_dev(struct bch_fs *c, struct discard_buckets_state *s, struct bch_dev *ca)
+{
+       if (s->ca == ca)
+               return;
+
+       if (s->ca && s->need_journal_commit_this_dev >
+           bch2_dev_usage_read(s->ca).d[BCH_DATA_free].buckets)
+               bch2_journal_flush_async(&c->journal, NULL);
+
+       if (s->ca)
+               percpu_ref_put(&s->ca->ref);
+       if (ca)
+               percpu_ref_get(&ca->ref);
+       s->ca = ca;
+       s->need_journal_commit_this_dev = 0;
+}
+
 static int bch2_discard_one_bucket(struct btree_trans *trans,
                                   struct btree_iter *need_discard_iter,
                                   struct bpos *discard_pos_done,
-                                  u64 *seen,
-                                  u64 *open,
-                                  u64 *need_journal_commit,
-                                  u64 *discarded)
+                                  struct discard_buckets_state *s)
 {
        struct bch_fs *c = trans->c;
        struct bpos pos = need_discard_iter->pos;
@@ -1643,20 +1645,24 @@ static int bch2_discard_one_bucket(struct btree_trans *trans,
        int ret = 0;
 
        ca = bch_dev_bkey_exists(c, pos.inode);
+
        if (!percpu_ref_tryget(&ca->io_ref)) {
                bch2_btree_iter_set_pos(need_discard_iter, POS(pos.inode + 1, 0));
                return 0;
        }
 
+       discard_buckets_next_dev(c, s, ca);
+
        if (bch2_bucket_is_open_safe(c, pos.inode, pos.offset)) {
-               (*open)++;
+               s->open++;
                goto out;
        }
 
        if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
                        c->journal.flushed_seq_ondisk,
                        pos.inode, pos.offset)) {
-               (*need_journal_commit)++;
+               s->need_journal_commit++;
+               s->need_journal_commit_this_dev++;
                goto out;
        }
 
@@ -1709,7 +1715,7 @@ static int bch2_discard_one_bucket(struct btree_trans *trans,
                 * This works without any other locks because this is the only
                 * thread that removes items from the need_discard tree
                 */
-               bch2_trans_unlock(trans);
+               bch2_trans_unlock_long(trans);
                blkdev_issue_discard(ca->disk_sb.bdev,
                                     k.k->p.offset * ca->mi.bucket_size,
                                     ca->mi.bucket_size,
@@ -1732,9 +1738,9 @@ write:
                goto out;
 
        count_event(c, bucket_discard);
-       (*discarded)++;
+       s->discarded++;
 out:
-       (*seen)++;
+       s->seen++;
        bch2_trans_iter_exit(trans, &iter);
        percpu_ref_put(&ca->io_ref);
        printbuf_exit(&buf);
@@ -1744,7 +1750,7 @@ out:
 static void bch2_do_discards_work(struct work_struct *work)
 {
        struct bch_fs *c = container_of(work, struct bch_fs, discard_work);
-       u64 seen = 0, open = 0, need_journal_commit = 0, discarded = 0;
+       struct discard_buckets_state s = {};
        struct bpos discard_pos_done = POS_MAX;
        int ret;
 
@@ -1756,19 +1762,14 @@ static void bch2_do_discards_work(struct work_struct *work)
        ret = bch2_trans_run(c,
                for_each_btree_key(trans, iter,
                                   BTREE_ID_need_discard, POS_MIN, 0, k,
-                       bch2_discard_one_bucket(trans, &iter, &discard_pos_done,
-                                               &seen,
-                                               &open,
-                                               &need_journal_commit,
-                                               &discarded)));
-
-       if (need_journal_commit * 2 > seen)
-               bch2_journal_flush_async(&c->journal, NULL);
+                       bch2_discard_one_bucket(trans, &iter, &discard_pos_done, &s)));
 
-       bch2_write_ref_put(c, BCH_WRITE_REF_discard);
+       discard_buckets_next_dev(c, &s, NULL);
 
-       trace_discard_buckets(c, seen, open, need_journal_commit, discarded,
+       trace_discard_buckets(c, s.seen, s.open, s.need_journal_commit, s.discarded,
                              bch2_err_str(ret));
+
+       bch2_write_ref_put(c, BCH_WRITE_REF_discard);
 }
 
 void bch2_do_discards(struct bch_fs *c)
diff --git a/fs/bcachefs/alloc_background_format.h b/fs/bcachefs/alloc_background_format.h
new file mode 100644 (file)
index 0000000..b4ec20b
--- /dev/null
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_ALLOC_BACKGROUND_FORMAT_H
+#define _BCACHEFS_ALLOC_BACKGROUND_FORMAT_H
+
+struct bch_alloc {
+       struct bch_val          v;
+       __u8                    fields;
+       __u8                    gen;
+       __u8                    data[];
+} __packed __aligned(8);
+
+#define BCH_ALLOC_FIELDS_V1()                  \
+       x(read_time,            16)             \
+       x(write_time,           16)             \
+       x(data_type,            8)              \
+       x(dirty_sectors,        16)             \
+       x(cached_sectors,       16)             \
+       x(oldest_gen,           8)              \
+       x(stripe,               32)             \
+       x(stripe_redundancy,    8)
+
+enum {
+#define x(name, _bits) BCH_ALLOC_FIELD_V1_##name,
+       BCH_ALLOC_FIELDS_V1()
+#undef x
+};
+
+struct bch_alloc_v2 {
+       struct bch_val          v;
+       __u8                    nr_fields;
+       __u8                    gen;
+       __u8                    oldest_gen;
+       __u8                    data_type;
+       __u8                    data[];
+} __packed __aligned(8);
+
+#define BCH_ALLOC_FIELDS_V2()                  \
+       x(read_time,            64)             \
+       x(write_time,           64)             \
+       x(dirty_sectors,        32)             \
+       x(cached_sectors,       32)             \
+       x(stripe,               32)             \
+       x(stripe_redundancy,    8)
+
+struct bch_alloc_v3 {
+       struct bch_val          v;
+       __le64                  journal_seq;
+       __le32                  flags;
+       __u8                    nr_fields;
+       __u8                    gen;
+       __u8                    oldest_gen;
+       __u8                    data_type;
+       __u8                    data[];
+} __packed __aligned(8);
+
+LE32_BITMASK(BCH_ALLOC_V3_NEED_DISCARD,struct bch_alloc_v3, flags,  0,  1)
+LE32_BITMASK(BCH_ALLOC_V3_NEED_INC_GEN,struct bch_alloc_v3, flags,  1,  2)
+
+struct bch_alloc_v4 {
+       struct bch_val          v;
+       __u64                   journal_seq;
+       __u32                   flags;
+       __u8                    gen;
+       __u8                    oldest_gen;
+       __u8                    data_type;
+       __u8                    stripe_redundancy;
+       __u32                   dirty_sectors;
+       __u32                   cached_sectors;
+       __u64                   io_time[2];
+       __u32                   stripe;
+       __u32                   nr_external_backpointers;
+       __u64                   fragmentation_lru;
+} __packed __aligned(8);
+
+#define BCH_ALLOC_V4_U64s_V0   6
+#define BCH_ALLOC_V4_U64s      (sizeof(struct bch_alloc_v4) / sizeof(__u64))
+
+BITMASK(BCH_ALLOC_V4_NEED_DISCARD,     struct bch_alloc_v4, flags,  0,  1)
+BITMASK(BCH_ALLOC_V4_NEED_INC_GEN,     struct bch_alloc_v4, flags,  1,  2)
+BITMASK(BCH_ALLOC_V4_BACKPOINTERS_START,struct bch_alloc_v4, flags,  2,  8)
+BITMASK(BCH_ALLOC_V4_NR_BACKPOINTERS,  struct bch_alloc_v4, flags,  8,  14)
+
+#define KEY_TYPE_BUCKET_GENS_BITS      8
+#define KEY_TYPE_BUCKET_GENS_NR                (1U << KEY_TYPE_BUCKET_GENS_BITS)
+#define KEY_TYPE_BUCKET_GENS_MASK      (KEY_TYPE_BUCKET_GENS_NR - 1)
+
+struct bch_bucket_gens {
+       struct bch_val          v;
+       u8                      gens[KEY_TYPE_BUCKET_GENS_NR];
+} __packed __aligned(8);
+
+#endif /* _BCACHEFS_ALLOC_BACKGROUND_FORMAT_H */
index b0ff47998a9440912f940dc09e27b34e6341cb9e..633d3223b353f83e83501601024dd262952236c6 100644 (file)
@@ -1525,10 +1525,11 @@ static void bch2_open_bucket_to_text(struct printbuf *out, struct bch_fs *c, str
        unsigned data_type = ob->data_type;
        barrier(); /* READ_ONCE() doesn't work on bitfields */
 
-       prt_printf(out, "%zu ref %u %s %u:%llu gen %u allocated %u/%u",
+       prt_printf(out, "%zu ref %u ",
                   ob - c->open_buckets,
-                  atomic_read(&ob->pin),
-                  data_type < BCH_DATA_NR ? bch2_data_types[data_type] : "invalid data type",
+                  atomic_read(&ob->pin));
+       bch2_prt_data_type(out, data_type);
+       prt_printf(out, " %u:%llu gen %u allocated %u/%u",
                   ob->dev, ob->bucket, ob->gen,
                   ca->mi.bucket_size - ob->sectors_free, ca->mi.bucket_size);
        if (ob->ec)
index e358a2ffffdea48c80eee18ab299cd7103d72991..b4dc319bcb2bc0a5363e74f6d2096d3b5652599d 100644 (file)
@@ -400,13 +400,24 @@ int bch2_check_btree_backpointers(struct bch_fs *c)
        return ret;
 }
 
+static inline bool bkey_and_val_eq(struct bkey_s_c l, struct bkey_s_c r)
+{
+       return bpos_eq(l.k->p, r.k->p) &&
+               bkey_bytes(l.k) == bkey_bytes(r.k) &&
+               !memcmp(l.v, r.v, bkey_val_bytes(l.k));
+}
+
+struct extents_to_bp_state {
+       struct bpos     bucket_start;
+       struct bpos     bucket_end;
+       struct bkey_buf last_flushed;
+};
+
 static int check_bp_exists(struct btree_trans *trans,
+                          struct extents_to_bp_state *s,
                           struct bpos bucket,
                           struct bch_backpointer bp,
-                          struct bkey_s_c orig_k,
-                          struct bpos bucket_start,
-                          struct bpos bucket_end,
-                          struct bkey_buf *last_flushed)
+                          struct bkey_s_c orig_k)
 {
        struct bch_fs *c = trans->c;
        struct btree_iter bp_iter = { NULL };
@@ -417,8 +428,8 @@ static int check_bp_exists(struct btree_trans *trans,
 
        bch2_bkey_buf_init(&tmp);
 
-       if (bpos_lt(bucket, bucket_start) ||
-           bpos_gt(bucket, bucket_end))
+       if (bpos_lt(bucket, s->bucket_start) ||
+           bpos_gt(bucket, s->bucket_end))
                return 0;
 
        if (!bch2_dev_bucket_exists(c, bucket))
@@ -433,11 +444,9 @@ static int check_bp_exists(struct btree_trans *trans,
 
        if (bp_k.k->type != KEY_TYPE_backpointer ||
            memcmp(bkey_s_c_to_backpointer(bp_k).v, &bp, sizeof(bp))) {
-               if (!bpos_eq(orig_k.k->p, last_flushed->k->k.p) ||
-                   bkey_bytes(orig_k.k) != bkey_bytes(&last_flushed->k->k) ||
-                   memcmp(orig_k.v, &last_flushed->k->v, bkey_val_bytes(orig_k.k))) {
-                       bch2_bkey_buf_reassemble(&tmp, c, orig_k);
+               bch2_bkey_buf_reassemble(&tmp, c, orig_k);
 
+               if (!bkey_and_val_eq(orig_k, bkey_i_to_s_c(s->last_flushed.k))) {
                        if (bp.level) {
                                bch2_trans_unlock(trans);
                                bch2_btree_interior_updates_flush(c);
@@ -447,7 +456,7 @@ static int check_bp_exists(struct btree_trans *trans,
                        if (ret)
                                goto err;
 
-                       bch2_bkey_buf_copy(last_flushed, c, tmp.k);
+                       bch2_bkey_buf_copy(&s->last_flushed, c, tmp.k);
                        ret = -BCH_ERR_transaction_restart_write_buffer_flush;
                        goto out;
                }
@@ -475,10 +484,8 @@ missing:
 }
 
 static int check_extent_to_backpointers(struct btree_trans *trans,
+                                       struct extents_to_bp_state *s,
                                        enum btree_id btree, unsigned level,
-                                       struct bpos bucket_start,
-                                       struct bpos bucket_end,
-                                       struct bkey_buf *last_flushed,
                                        struct bkey_s_c k)
 {
        struct bch_fs *c = trans->c;
@@ -498,9 +505,7 @@ static int check_extent_to_backpointers(struct btree_trans *trans,
                bch2_extent_ptr_to_bp(c, btree, level,
                                      k, p, &bucket_pos, &bp);
 
-               ret = check_bp_exists(trans, bucket_pos, bp, k,
-                                     bucket_start, bucket_end,
-                                     last_flushed);
+               ret = check_bp_exists(trans, s, bucket_pos, bp, k);
                if (ret)
                        return ret;
        }
@@ -509,10 +514,8 @@ static int check_extent_to_backpointers(struct btree_trans *trans,
 }
 
 static int check_btree_root_to_backpointers(struct btree_trans *trans,
+                                           struct extents_to_bp_state *s,
                                            enum btree_id btree_id,
-                                           struct bpos bucket_start,
-                                           struct bpos bucket_end,
-                                           struct bkey_buf *last_flushed,
                                            int *level)
 {
        struct bch_fs *c = trans->c;
@@ -536,9 +539,7 @@ retry:
        *level = b->c.level;
 
        k = bkey_i_to_s_c(&b->key);
-       ret = check_extent_to_backpointers(trans, btree_id, b->c.level + 1,
-                                     bucket_start, bucket_end,
-                                     last_flushed, k);
+       ret = check_extent_to_backpointers(trans, s, btree_id, b->c.level + 1, k);
 err:
        bch2_trans_iter_exit(trans, &iter);
        return ret;
@@ -559,7 +560,7 @@ static size_t btree_nodes_fit_in_ram(struct bch_fs *c)
 
        si_meminfo(&i);
        mem_bytes = i.totalram * i.mem_unit;
-       return div_u64(mem_bytes >> 1, btree_bytes(c));
+       return div_u64(mem_bytes >> 1, c->opts.btree_node_size);
 }
 
 static int bch2_get_btree_in_memory_pos(struct btree_trans *trans,
@@ -610,43 +611,35 @@ static int bch2_get_btree_in_memory_pos(struct btree_trans *trans,
 }
 
 static int bch2_check_extents_to_backpointers_pass(struct btree_trans *trans,
-                                                  struct bpos bucket_start,
-                                                  struct bpos bucket_end)
+                                                  struct extents_to_bp_state *s)
 {
        struct bch_fs *c = trans->c;
-       struct btree_iter iter;
-       enum btree_id btree_id;
-       struct bkey_s_c k;
-       struct bkey_buf last_flushed;
        int ret = 0;
 
-       bch2_bkey_buf_init(&last_flushed);
-       bkey_init(&last_flushed.k->k);
-
-       for (btree_id = 0; btree_id < btree_id_nr_alive(c); btree_id++) {
+       for (enum btree_id btree_id = 0;
+            btree_id < btree_id_nr_alive(c);
+            btree_id++) {
                int level, depth = btree_type_has_ptrs(btree_id) ? 0 : 1;
 
                ret = commit_do(trans, NULL, NULL,
                                BCH_TRANS_COMMIT_no_enospc,
-                               check_btree_root_to_backpointers(trans, btree_id,
-                                                       bucket_start, bucket_end,
-                                                       &last_flushed, &level));
+                               check_btree_root_to_backpointers(trans, s, btree_id, &level));
                if (ret)
                        return ret;
 
                while (level >= depth) {
+                       struct btree_iter iter;
                        bch2_trans_node_iter_init(trans, &iter, btree_id, POS_MIN, 0,
                                                  level,
                                                  BTREE_ITER_PREFETCH);
                        while (1) {
                                bch2_trans_begin(trans);
-                               k = bch2_btree_iter_peek(&iter);
+
+                               struct bkey_s_c k = bch2_btree_iter_peek(&iter);
                                if (!k.k)
                                        break;
                                ret = bkey_err(k) ?:
-                                       check_extent_to_backpointers(trans, btree_id, level,
-                                                                    bucket_start, bucket_end,
-                                                                    &last_flushed, k) ?:
+                                       check_extent_to_backpointers(trans, s, btree_id, level, k) ?:
                                        bch2_trans_commit(trans, NULL, NULL,
                                                          BCH_TRANS_COMMIT_no_enospc);
                                if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
@@ -668,7 +661,6 @@ static int bch2_check_extents_to_backpointers_pass(struct btree_trans *trans,
                }
        }
 
-       bch2_bkey_buf_exit(&last_flushed, c);
        return 0;
 }
 
@@ -731,37 +723,43 @@ static int bch2_get_alloc_in_memory_pos(struct btree_trans *trans,
 int bch2_check_extents_to_backpointers(struct bch_fs *c)
 {
        struct btree_trans *trans = bch2_trans_get(c);
-       struct bpos start = POS_MIN, end;
+       struct extents_to_bp_state s = { .bucket_start = POS_MIN };
        int ret;
 
+       bch2_bkey_buf_init(&s.last_flushed);
+       bkey_init(&s.last_flushed.k->k);
+
        while (1) {
-               ret = bch2_get_alloc_in_memory_pos(trans, start, &end);
+               ret = bch2_get_alloc_in_memory_pos(trans, s.bucket_start, &s.bucket_end);
                if (ret)
                        break;
 
-               if (bpos_eq(start, POS_MIN) && !bpos_eq(end, SPOS_MAX))
+               if ( bpos_eq(s.bucket_start, POS_MIN) &&
+                   !bpos_eq(s.bucket_end, SPOS_MAX))
                        bch_verbose(c, "%s(): alloc info does not fit in ram, running in multiple passes with %zu nodes per pass",
                                    __func__, btree_nodes_fit_in_ram(c));
 
-               if (!bpos_eq(start, POS_MIN) || !bpos_eq(end, SPOS_MAX)) {
+               if (!bpos_eq(s.bucket_start, POS_MIN) ||
+                   !bpos_eq(s.bucket_end, SPOS_MAX)) {
                        struct printbuf buf = PRINTBUF;
 
                        prt_str(&buf, "check_extents_to_backpointers(): ");
-                       bch2_bpos_to_text(&buf, start);
+                       bch2_bpos_to_text(&buf, s.bucket_start);
                        prt_str(&buf, "-");
-                       bch2_bpos_to_text(&buf, end);
+                       bch2_bpos_to_text(&buf, s.bucket_end);
 
                        bch_verbose(c, "%s", buf.buf);
                        printbuf_exit(&buf);
                }
 
-               ret = bch2_check_extents_to_backpointers_pass(trans, start, end);
-               if (ret || bpos_eq(end, SPOS_MAX))
+               ret = bch2_check_extents_to_backpointers_pass(trans, &s);
+               if (ret || bpos_eq(s.bucket_end, SPOS_MAX))
                        break;
 
-               start = bpos_successor(end);
+               s.bucket_start = bpos_successor(s.bucket_end);
        }
        bch2_trans_put(trans);
+       bch2_bkey_buf_exit(&s.last_flushed, c);
 
        bch_err_fn(c, ret);
        return ret;
index 737e2396ade7ec44edf4f18738e286b5da3189bd..327365a9feac4e8fa69575ec6fe6157fd3edb127 100644 (file)
@@ -2,6 +2,7 @@
 #ifndef _BCACHEFS_BACKPOINTERS_BACKGROUND_H
 #define _BCACHEFS_BACKPOINTERS_BACKGROUND_H
 
+#include "btree_cache.h"
 #include "btree_iter.h"
 #include "btree_update.h"
 #include "buckets.h"
index dac383e3718163b6566eb2e6a4ff305fb65da715..b80c6c9efd8cef95b46b5b45b21f639e18373755 100644 (file)
@@ -1204,11 +1204,6 @@ static inline unsigned block_sectors(const struct bch_fs *c)
        return c->opts.block_size >> 9;
 }
 
-static inline size_t btree_sectors(const struct bch_fs *c)
-{
-       return c->opts.btree_node_size >> 9;
-}
-
 static inline bool btree_id_cached(const struct bch_fs *c, enum btree_id btree)
 {
        return c->btree_key_cache_btrees & (1U << btree);
index 0d5ac4184fbcef5a2b7ae618d6bdf81478f09530..0668b682a21ca8e035cae73f73e6774c99eaeb94 100644 (file)
@@ -417,600 +417,12 @@ struct bch_set {
        struct bch_val          v;
 };
 
-/* Extents */
-
-/*
- * In extent bkeys, the value is a list of pointers (bch_extent_ptr), optionally
- * preceded by checksum/compression information (bch_extent_crc32 or
- * bch_extent_crc64).
- *
- * One major determining factor in the format of extents is how we handle and
- * represent extents that have been partially overwritten and thus trimmed:
- *
- * If an extent is not checksummed or compressed, when the extent is trimmed we
- * don't have to remember the extent we originally allocated and wrote: we can
- * merely adjust ptr->offset to point to the start of the data that is currently
- * live. The size field in struct bkey records the current (live) size of the
- * extent, and is also used to mean "size of region on disk that we point to" in
- * this case.
- *
- * Thus an extent that is not checksummed or compressed will consist only of a
- * list of bch_extent_ptrs, with none of the fields in
- * bch_extent_crc32/bch_extent_crc64.
- *
- * When an extent is checksummed or compressed, it's not possible to read only
- * the data that is currently live: we have to read the entire extent that was
- * originally written, and then return only the part of the extent that is
- * currently live.
- *
- * Thus, in addition to the current size of the extent in struct bkey, we need
- * to store the size of the originally allocated space - this is the
- * compressed_size and uncompressed_size fields in bch_extent_crc32/64. Also,
- * when the extent is trimmed, instead of modifying the offset field of the
- * pointer, we keep a second smaller offset field - "offset into the original
- * extent of the currently live region".
- *
- * The other major determining factor is replication and data migration:
- *
- * Each pointer may have its own bch_extent_crc32/64. When doing a replicated
- * write, we will initially write all the replicas in the same format, with the
- * same checksum type and compression format - however, when copygc runs later (or
- * tiering/cache promotion, anything that moves data), it is not in general
- * going to rewrite all the pointers at once - one of the replicas may be in a
- * bucket on one device that has very little fragmentation while another lives
- * in a bucket that has become heavily fragmented, and thus is being rewritten
- * sooner than the rest.
- *
- * Thus it will only move a subset of the pointers (or in the case of
- * tiering/cache promotion perhaps add a single pointer without dropping any
- * current pointers), and if the extent has been partially overwritten it must
- * write only the currently live portion (or copygc would not be able to reduce
- * fragmentation!) - which necessitates a different bch_extent_crc format for
- * the new pointer.
- *
- * But in the interests of space efficiency, we don't want to store one
- * bch_extent_crc for each pointer if we don't have to.
- *
- * Thus, a bch_extent consists of bch_extent_crc32s, bch_extent_crc64s, and
- * bch_extent_ptrs appended arbitrarily one after the other. We determine the
- * type of a given entry with a scheme similar to utf8 (except we're encoding a
- * type, not a size), encoding the type in the position of the first set bit:
- *
- * bch_extent_crc32    - 0b1
- * bch_extent_ptr      - 0b10
- * bch_extent_crc64    - 0b100
- *
- * We do it this way because bch_extent_crc32 is _very_ constrained on bits (and
- * bch_extent_crc64 is the least constrained).
- *
- * Then, each bch_extent_crc32/64 applies to the pointers that follow after it,
- * until the next bch_extent_crc32/64.
- *
- * If there are no bch_extent_crcs preceding a bch_extent_ptr, then that pointer
- * is neither checksummed nor compressed.
- */
-
 /* 128 bits, sufficient for cryptographic MACs: */
 struct bch_csum {
        __le64                  lo;
        __le64                  hi;
 } __packed __aligned(8);
 
-#define BCH_EXTENT_ENTRY_TYPES()               \
-       x(ptr,                  0)              \
-       x(crc32,                1)              \
-       x(crc64,                2)              \
-       x(crc128,               3)              \
-       x(stripe_ptr,           4)              \
-       x(rebalance,            5)
-#define BCH_EXTENT_ENTRY_MAX   6
-
-enum bch_extent_entry_type {
-#define x(f, n) BCH_EXTENT_ENTRY_##f = n,
-       BCH_EXTENT_ENTRY_TYPES()
-#undef x
-};
-
-/* Compressed/uncompressed size are stored biased by 1: */
-struct bch_extent_crc32 {
-#if defined(__LITTLE_ENDIAN_BITFIELD)
-       __u32                   type:2,
-                               _compressed_size:7,
-                               _uncompressed_size:7,
-                               offset:7,
-                               _unused:1,
-                               csum_type:4,
-                               compression_type:4;
-       __u32                   csum;
-#elif defined (__BIG_ENDIAN_BITFIELD)
-       __u32                   csum;
-       __u32                   compression_type:4,
-                               csum_type:4,
-                               _unused:1,
-                               offset:7,
-                               _uncompressed_size:7,
-                               _compressed_size:7,
-                               type:2;
-#endif
-} __packed __aligned(8);
-
-#define CRC32_SIZE_MAX         (1U << 7)
-#define CRC32_NONCE_MAX                0
-
-struct bch_extent_crc64 {
-#if defined(__LITTLE_ENDIAN_BITFIELD)
-       __u64                   type:3,
-                               _compressed_size:9,
-                               _uncompressed_size:9,
-                               offset:9,
-                               nonce:10,
-                               csum_type:4,
-                               compression_type:4,
-                               csum_hi:16;
-#elif defined (__BIG_ENDIAN_BITFIELD)
-       __u64                   csum_hi:16,
-                               compression_type:4,
-                               csum_type:4,
-                               nonce:10,
-                               offset:9,
-                               _uncompressed_size:9,
-                               _compressed_size:9,
-                               type:3;
-#endif
-       __u64                   csum_lo;
-} __packed __aligned(8);
-
-#define CRC64_SIZE_MAX         (1U << 9)
-#define CRC64_NONCE_MAX                ((1U << 10) - 1)
-
-struct bch_extent_crc128 {
-#if defined(__LITTLE_ENDIAN_BITFIELD)
-       __u64                   type:4,
-                               _compressed_size:13,
-                               _uncompressed_size:13,
-                               offset:13,
-                               nonce:13,
-                               csum_type:4,
-                               compression_type:4;
-#elif defined (__BIG_ENDIAN_BITFIELD)
-       __u64                   compression_type:4,
-                               csum_type:4,
-                               nonce:13,
-                               offset:13,
-                               _uncompressed_size:13,
-                               _compressed_size:13,
-                               type:4;
-#endif
-       struct bch_csum         csum;
-} __packed __aligned(8);
-
-#define CRC128_SIZE_MAX                (1U << 13)
-#define CRC128_NONCE_MAX       ((1U << 13) - 1)
-
-/*
- * @reservation - pointer hasn't been written to, just reserved
- */
-struct bch_extent_ptr {
-#if defined(__LITTLE_ENDIAN_BITFIELD)
-       __u64                   type:1,
-                               cached:1,
-                               unused:1,
-                               unwritten:1,
-                               offset:44, /* 8 petabytes */
-                               dev:8,
-                               gen:8;
-#elif defined (__BIG_ENDIAN_BITFIELD)
-       __u64                   gen:8,
-                               dev:8,
-                               offset:44,
-                               unwritten:1,
-                               unused:1,
-                               cached:1,
-                               type:1;
-#endif
-} __packed __aligned(8);
-
-struct bch_extent_stripe_ptr {
-#if defined(__LITTLE_ENDIAN_BITFIELD)
-       __u64                   type:5,
-                               block:8,
-                               redundancy:4,
-                               idx:47;
-#elif defined (__BIG_ENDIAN_BITFIELD)
-       __u64                   idx:47,
-                               redundancy:4,
-                               block:8,
-                               type:5;
-#endif
-};
-
-struct bch_extent_rebalance {
-#if defined(__LITTLE_ENDIAN_BITFIELD)
-       __u64                   type:6,
-                               unused:34,
-                               compression:8, /* enum bch_compression_opt */
-                               target:16;
-#elif defined (__BIG_ENDIAN_BITFIELD)
-       __u64                   target:16,
-                               compression:8,
-                               unused:34,
-                               type:6;
-#endif
-};
-
-union bch_extent_entry {
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ ||  __BITS_PER_LONG == 64
-       unsigned long                   type;
-#elif __BITS_PER_LONG == 32
-       struct {
-               unsigned long           pad;
-               unsigned long           type;
-       };
-#else
-#error edit for your odd byteorder.
-#endif
-
-#define x(f, n) struct bch_extent_##f  f;
-       BCH_EXTENT_ENTRY_TYPES()
-#undef x
-};
-
-struct bch_btree_ptr {
-       struct bch_val          v;
-
-       __u64                   _data[0];
-       struct bch_extent_ptr   start[];
-} __packed __aligned(8);
-
-struct bch_btree_ptr_v2 {
-       struct bch_val          v;
-
-       __u64                   mem_ptr;
-       __le64                  seq;
-       __le16                  sectors_written;
-       __le16                  flags;
-       struct bpos             min_key;
-       __u64                   _data[0];
-       struct bch_extent_ptr   start[];
-} __packed __aligned(8);
-
-LE16_BITMASK(BTREE_PTR_RANGE_UPDATED,  struct bch_btree_ptr_v2, flags, 0, 1);
-
-struct bch_extent {
-       struct bch_val          v;
-
-       __u64                   _data[0];
-       union bch_extent_entry  start[];
-} __packed __aligned(8);
-
-struct bch_reservation {
-       struct bch_val          v;
-
-       __le32                  generation;
-       __u8                    nr_replicas;
-       __u8                    pad[3];
-} __packed __aligned(8);
-
-/* Maximum size (in u64s) a single pointer could be: */
-#define BKEY_EXTENT_PTR_U64s_MAX\
-       ((sizeof(struct bch_extent_crc128) +                    \
-         sizeof(struct bch_extent_ptr)) / sizeof(__u64))
-
-/* Maximum possible size of an entire extent value: */
-#define BKEY_EXTENT_VAL_U64s_MAX                               \
-       (1 + BKEY_EXTENT_PTR_U64s_MAX * (BCH_REPLICAS_MAX + 1))
-
-/* * Maximum possible size of an entire extent, key + value: */
-#define BKEY_EXTENT_U64s_MAX           (BKEY_U64s + BKEY_EXTENT_VAL_U64s_MAX)
-
-/* Btree pointers don't carry around checksums: */
-#define BKEY_BTREE_PTR_VAL_U64s_MAX                            \
-       ((sizeof(struct bch_btree_ptr_v2) +                     \
-         sizeof(struct bch_extent_ptr) * BCH_REPLICAS_MAX) / sizeof(__u64))
-#define BKEY_BTREE_PTR_U64s_MAX                                        \
-       (BKEY_U64s + BKEY_BTREE_PTR_VAL_U64s_MAX)
-
-/* Inodes */
-
-#define BLOCKDEV_INODE_MAX     4096
-
-#define BCACHEFS_ROOT_INO      4096
-
-struct bch_inode {
-       struct bch_val          v;
-
-       __le64                  bi_hash_seed;
-       __le32                  bi_flags;
-       __le16                  bi_mode;
-       __u8                    fields[];
-} __packed __aligned(8);
-
-struct bch_inode_v2 {
-       struct bch_val          v;
-
-       __le64                  bi_journal_seq;
-       __le64                  bi_hash_seed;
-       __le64                  bi_flags;
-       __le16                  bi_mode;
-       __u8                    fields[];
-} __packed __aligned(8);
-
-struct bch_inode_v3 {
-       struct bch_val          v;
-
-       __le64                  bi_journal_seq;
-       __le64                  bi_hash_seed;
-       __le64                  bi_flags;
-       __le64                  bi_sectors;
-       __le64                  bi_size;
-       __le64                  bi_version;
-       __u8                    fields[];
-} __packed __aligned(8);
-
-#define INODEv3_FIELDS_START_INITIAL   6
-#define INODEv3_FIELDS_START_CUR       (offsetof(struct bch_inode_v3, fields) / sizeof(__u64))
-
-struct bch_inode_generation {
-       struct bch_val          v;
-
-       __le32                  bi_generation;
-       __le32                  pad;
-} __packed __aligned(8);
-
-/*
- * bi_subvol and bi_parent_subvol are only set for subvolume roots:
- */
-
-#define BCH_INODE_FIELDS_v2()                  \
-       x(bi_atime,                     96)     \
-       x(bi_ctime,                     96)     \
-       x(bi_mtime,                     96)     \
-       x(bi_otime,                     96)     \
-       x(bi_size,                      64)     \
-       x(bi_sectors,                   64)     \
-       x(bi_uid,                       32)     \
-       x(bi_gid,                       32)     \
-       x(bi_nlink,                     32)     \
-       x(bi_generation,                32)     \
-       x(bi_dev,                       32)     \
-       x(bi_data_checksum,             8)      \
-       x(bi_compression,               8)      \
-       x(bi_project,                   32)     \
-       x(bi_background_compression,    8)      \
-       x(bi_data_replicas,             8)      \
-       x(bi_promote_target,            16)     \
-       x(bi_foreground_target,         16)     \
-       x(bi_background_target,         16)     \
-       x(bi_erasure_code,              16)     \
-       x(bi_fields_set,                16)     \
-       x(bi_dir,                       64)     \
-       x(bi_dir_offset,                64)     \
-       x(bi_subvol,                    32)     \
-       x(bi_parent_subvol,             32)
-
-#define BCH_INODE_FIELDS_v3()                  \
-       x(bi_atime,                     96)     \
-       x(bi_ctime,                     96)     \
-       x(bi_mtime,                     96)     \
-       x(bi_otime,                     96)     \
-       x(bi_uid,                       32)     \
-       x(bi_gid,                       32)     \
-       x(bi_nlink,                     32)     \
-       x(bi_generation,                32)     \
-       x(bi_dev,                       32)     \
-       x(bi_data_checksum,             8)      \
-       x(bi_compression,               8)      \
-       x(bi_project,                   32)     \
-       x(bi_background_compression,    8)      \
-       x(bi_data_replicas,             8)      \
-       x(bi_promote_target,            16)     \
-       x(bi_foreground_target,         16)     \
-       x(bi_background_target,         16)     \
-       x(bi_erasure_code,              16)     \
-       x(bi_fields_set,                16)     \
-       x(bi_dir,                       64)     \
-       x(bi_dir_offset,                64)     \
-       x(bi_subvol,                    32)     \
-       x(bi_parent_subvol,             32)     \
-       x(bi_nocow,                     8)
-
-/* subset of BCH_INODE_FIELDS */
-#define BCH_INODE_OPTS()                       \
-       x(data_checksum,                8)      \
-       x(compression,                  8)      \
-       x(project,                      32)     \
-       x(background_compression,       8)      \
-       x(data_replicas,                8)      \
-       x(promote_target,               16)     \
-       x(foreground_target,            16)     \
-       x(background_target,            16)     \
-       x(erasure_code,                 16)     \
-       x(nocow,                        8)
-
-enum inode_opt_id {
-#define x(name, ...)                           \
-       Inode_opt_##name,
-       BCH_INODE_OPTS()
-#undef  x
-       Inode_opt_nr,
-};
-
-#define BCH_INODE_FLAGS()                      \
-       x(sync,                         0)      \
-       x(immutable,                    1)      \
-       x(append,                       2)      \
-       x(nodump,                       3)      \
-       x(noatime,                      4)      \
-       x(i_size_dirty,                 5)      \
-       x(i_sectors_dirty,              6)      \
-       x(unlinked,                     7)      \
-       x(backptr_untrusted,            8)
-
-/* bits 20+ reserved for packed fields below: */
-
-enum bch_inode_flags {
-#define x(t, n)        BCH_INODE_##t = 1U << n,
-       BCH_INODE_FLAGS()
-#undef x
-};
-
-enum __bch_inode_flags {
-#define x(t, n)        __BCH_INODE_##t = n,
-       BCH_INODE_FLAGS()
-#undef x
-};
-
-LE32_BITMASK(INODE_STR_HASH,   struct bch_inode, bi_flags, 20, 24);
-LE32_BITMASK(INODE_NR_FIELDS,  struct bch_inode, bi_flags, 24, 31);
-LE32_BITMASK(INODE_NEW_VARINT, struct bch_inode, bi_flags, 31, 32);
-
-LE64_BITMASK(INODEv2_STR_HASH, struct bch_inode_v2, bi_flags, 20, 24);
-LE64_BITMASK(INODEv2_NR_FIELDS,        struct bch_inode_v2, bi_flags, 24, 31);
-
-LE64_BITMASK(INODEv3_STR_HASH, struct bch_inode_v3, bi_flags, 20, 24);
-LE64_BITMASK(INODEv3_NR_FIELDS,        struct bch_inode_v3, bi_flags, 24, 31);
-
-LE64_BITMASK(INODEv3_FIELDS_START,
-                               struct bch_inode_v3, bi_flags, 31, 36);
-LE64_BITMASK(INODEv3_MODE,     struct bch_inode_v3, bi_flags, 36, 52);
-
-/* Dirents */
-
-/*
- * Dirents (and xattrs) have to implement string lookups; since our b-tree
- * doesn't support arbitrary length strings for the key, we instead index by a
- * 64 bit hash (currently truncated sha1) of the string, stored in the offset
- * field of the key - using linear probing to resolve hash collisions. This also
- * provides us with the readdir cookie posix requires.
- *
- * Linear probing requires us to use whiteouts for deletions, in the event of a
- * collision:
- */
-
-struct bch_dirent {
-       struct bch_val          v;
-
-       /* Target inode number: */
-       union {
-       __le64                  d_inum;
-       struct {                /* DT_SUBVOL */
-       __le32                  d_child_subvol;
-       __le32                  d_parent_subvol;
-       };
-       };
-
-       /*
-        * Copy of mode bits 12-15 from the target inode - so userspace can get
-        * the filetype without having to do a stat()
-        */
-       __u8                    d_type;
-
-       __u8                    d_name[];
-} __packed __aligned(8);
-
-#define DT_SUBVOL      16
-#define BCH_DT_MAX     17
-
-#define BCH_NAME_MAX   512
-
-/* Xattrs */
-
-#define KEY_TYPE_XATTR_INDEX_USER                      0
-#define KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS  1
-#define KEY_TYPE_XATTR_INDEX_POSIX_ACL_DEFAULT 2
-#define KEY_TYPE_XATTR_INDEX_TRUSTED                   3
-#define KEY_TYPE_XATTR_INDEX_SECURITY          4
-
-struct bch_xattr {
-       struct bch_val          v;
-       __u8                    x_type;
-       __u8                    x_name_len;
-       __le16                  x_val_len;
-       __u8                    x_name[];
-} __packed __aligned(8);
-
-/* Bucket/allocation information: */
-
-struct bch_alloc {
-       struct bch_val          v;
-       __u8                    fields;
-       __u8                    gen;
-       __u8                    data[];
-} __packed __aligned(8);
-
-#define BCH_ALLOC_FIELDS_V1()                  \
-       x(read_time,            16)             \
-       x(write_time,           16)             \
-       x(data_type,            8)              \
-       x(dirty_sectors,        16)             \
-       x(cached_sectors,       16)             \
-       x(oldest_gen,           8)              \
-       x(stripe,               32)             \
-       x(stripe_redundancy,    8)
-
-enum {
-#define x(name, _bits) BCH_ALLOC_FIELD_V1_##name,
-       BCH_ALLOC_FIELDS_V1()
-#undef x
-};
-
-struct bch_alloc_v2 {
-       struct bch_val          v;
-       __u8                    nr_fields;
-       __u8                    gen;
-       __u8                    oldest_gen;
-       __u8                    data_type;
-       __u8                    data[];
-} __packed __aligned(8);
-
-#define BCH_ALLOC_FIELDS_V2()                  \
-       x(read_time,            64)             \
-       x(write_time,           64)             \
-       x(dirty_sectors,        32)             \
-       x(cached_sectors,       32)             \
-       x(stripe,               32)             \
-       x(stripe_redundancy,    8)
-
-struct bch_alloc_v3 {
-       struct bch_val          v;
-       __le64                  journal_seq;
-       __le32                  flags;
-       __u8                    nr_fields;
-       __u8                    gen;
-       __u8                    oldest_gen;
-       __u8                    data_type;
-       __u8                    data[];
-} __packed __aligned(8);
-
-LE32_BITMASK(BCH_ALLOC_V3_NEED_DISCARD,struct bch_alloc_v3, flags,  0,  1)
-LE32_BITMASK(BCH_ALLOC_V3_NEED_INC_GEN,struct bch_alloc_v3, flags,  1,  2)
-
-struct bch_alloc_v4 {
-       struct bch_val          v;
-       __u64                   journal_seq;
-       __u32                   flags;
-       __u8                    gen;
-       __u8                    oldest_gen;
-       __u8                    data_type;
-       __u8                    stripe_redundancy;
-       __u32                   dirty_sectors;
-       __u32                   cached_sectors;
-       __u64                   io_time[2];
-       __u32                   stripe;
-       __u32                   nr_external_backpointers;
-       __u64                   fragmentation_lru;
-} __packed __aligned(8);
-
-#define BCH_ALLOC_V4_U64s_V0   6
-#define BCH_ALLOC_V4_U64s      (sizeof(struct bch_alloc_v4) / sizeof(__u64))
-
-BITMASK(BCH_ALLOC_V4_NEED_DISCARD,     struct bch_alloc_v4, flags,  0,  1)
-BITMASK(BCH_ALLOC_V4_NEED_INC_GEN,     struct bch_alloc_v4, flags,  1,  2)
-BITMASK(BCH_ALLOC_V4_BACKPOINTERS_START,struct bch_alloc_v4, flags,  2,  8)
-BITMASK(BCH_ALLOC_V4_NR_BACKPOINTERS,  struct bch_alloc_v4, flags,  8,  14)
-
-#define BCH_ALLOC_V4_NR_BACKPOINTERS_MAX       40
-
 struct bch_backpointer {
        struct bch_val          v;
        __u8                    btree_id;
@@ -1021,154 +433,6 @@ struct bch_backpointer {
        struct bpos             pos;
 } __packed __aligned(8);
 
-#define KEY_TYPE_BUCKET_GENS_BITS      8
-#define KEY_TYPE_BUCKET_GENS_NR                (1U << KEY_TYPE_BUCKET_GENS_BITS)
-#define KEY_TYPE_BUCKET_GENS_MASK      (KEY_TYPE_BUCKET_GENS_NR - 1)
-
-struct bch_bucket_gens {
-       struct bch_val          v;
-       u8                      gens[KEY_TYPE_BUCKET_GENS_NR];
-} __packed __aligned(8);
-
-/* Quotas: */
-
-enum quota_types {
-       QTYP_USR                = 0,
-       QTYP_GRP                = 1,
-       QTYP_PRJ                = 2,
-       QTYP_NR                 = 3,
-};
-
-enum quota_counters {
-       Q_SPC                   = 0,
-       Q_INO                   = 1,
-       Q_COUNTERS              = 2,
-};
-
-struct bch_quota_counter {
-       __le64                  hardlimit;
-       __le64                  softlimit;
-};
-
-struct bch_quota {
-       struct bch_val          v;
-       struct bch_quota_counter c[Q_COUNTERS];
-} __packed __aligned(8);
-
-/* Erasure coding */
-
-struct bch_stripe {
-       struct bch_val          v;
-       __le16                  sectors;
-       __u8                    algorithm;
-       __u8                    nr_blocks;
-       __u8                    nr_redundant;
-
-       __u8                    csum_granularity_bits;
-       __u8                    csum_type;
-       __u8                    pad;
-
-       struct bch_extent_ptr   ptrs[];
-} __packed __aligned(8);
-
-/* Reflink: */
-
-struct bch_reflink_p {
-       struct bch_val          v;
-       __le64                  idx;
-       /*
-        * A reflink pointer might point to an indirect extent which is then
-        * later split (by copygc or rebalance). If we only pointed to part of
-        * the original indirect extent, and then one of the fragments is
-        * outside the range we point to, we'd leak a refcount: so when creating
-        * reflink pointers, we need to store pad values to remember the full
-        * range we were taking a reference on.
-        */
-       __le32                  front_pad;
-       __le32                  back_pad;
-} __packed __aligned(8);
-
-struct bch_reflink_v {
-       struct bch_val          v;
-       __le64                  refcount;
-       union bch_extent_entry  start[0];
-       __u64                   _data[];
-} __packed __aligned(8);
-
-struct bch_indirect_inline_data {
-       struct bch_val          v;
-       __le64                  refcount;
-       u8                      data[];
-};
-
-/* Inline data */
-
-struct bch_inline_data {
-       struct bch_val          v;
-       u8                      data[];
-};
-
-/* Subvolumes: */
-
-#define SUBVOL_POS_MIN         POS(0, 1)
-#define SUBVOL_POS_MAX         POS(0, S32_MAX)
-#define BCACHEFS_ROOT_SUBVOL   1
-
-struct bch_subvolume {
-       struct bch_val          v;
-       __le32                  flags;
-       __le32                  snapshot;
-       __le64                  inode;
-       /*
-        * Snapshot subvolumes form a tree, separate from the snapshot nodes
-        * tree - if this subvolume is a snapshot, this is the ID of the
-        * subvolume it was created from:
-        */
-       __le32                  parent;
-       __le32                  pad;
-       bch_le128               otime;
-};
-
-LE32_BITMASK(BCH_SUBVOLUME_RO,         struct bch_subvolume, flags,  0,  1)
-/*
- * We need to know whether a subvolume is a snapshot so we can know whether we
- * can delete it (or whether it should just be rm -rf'd)
- */
-LE32_BITMASK(BCH_SUBVOLUME_SNAP,       struct bch_subvolume, flags,  1,  2)
-LE32_BITMASK(BCH_SUBVOLUME_UNLINKED,   struct bch_subvolume, flags,  2,  3)
-
-/* Snapshots */
-
-struct bch_snapshot {
-       struct bch_val          v;
-       __le32                  flags;
-       __le32                  parent;
-       __le32                  children[2];
-       __le32                  subvol;
-       /* corresponds to a bch_snapshot_tree in BTREE_ID_snapshot_trees */
-       __le32                  tree;
-       __le32                  depth;
-       __le32                  skip[3];
-};
-
-LE32_BITMASK(BCH_SNAPSHOT_DELETED,     struct bch_snapshot, flags,  0,  1)
-
-/* True if a subvolume points to this snapshot node: */
-LE32_BITMASK(BCH_SNAPSHOT_SUBVOL,      struct bch_snapshot, flags,  1,  2)
-
-/*
- * Snapshot trees:
- *
- * The snapshot_trees btree gives us persistent indentifier for each tree of
- * bch_snapshot nodes, and allow us to record and easily find the root/master
- * subvolume that other snapshots were created from:
- */
-struct bch_snapshot_tree {
-       struct bch_val          v;
-       __le32                  master_subvol;
-       __le32                  root_snapshot;
-};
-
 /* LRU btree: */
 
 struct bch_lru {
@@ -1178,33 +442,6 @@ struct bch_lru {
 
 #define LRU_ID_STRIPES         (1U << 16)
 
-/* Logged operations btree: */
-
-struct bch_logged_op_truncate {
-       struct bch_val          v;
-       __le32                  subvol;
-       __le32                  pad;
-       __le64                  inum;
-       __le64                  new_i_size;
-};
-
-enum logged_op_finsert_state {
-       LOGGED_OP_FINSERT_start,
-       LOGGED_OP_FINSERT_shift_extents,
-       LOGGED_OP_FINSERT_finish,
-};
-
-struct bch_logged_op_finsert {
-       struct bch_val          v;
-       __u8                    state;
-       __u8                    pad[3];
-       __le32                  subvol;
-       __le64                  inum;
-       __le64                  dst_offset;
-       __le64                  src_offset;
-       __le64                  pos;
-};
-
 /* Optional/variable size superblock sections: */
 
 struct bch_sb_field {
@@ -1230,6 +467,19 @@ struct bch_sb_field {
        x(ext,                          13)     \
        x(downgrade,                    14)
 
+#include "alloc_background_format.h"
+#include "extents_format.h"
+#include "reflink_format.h"
+#include "ec_format.h"
+#include "inode_format.h"
+#include "dirent_format.h"
+#include "xattr_format.h"
+#include "quota_format.h"
+#include "logged_ops_format.h"
+#include "snapshot_format.h"
+#include "subvolume_format.h"
+#include "sb-counters_format.h"
+
 enum bch_sb_field_type {
 #define x(f, nr)       BCH_SB_FIELD_##f = nr,
        BCH_SB_FIELDS()
@@ -1465,23 +715,6 @@ struct bch_sb_field_replicas {
        struct bch_replicas_entry_v1 entries[];
 } __packed __aligned(8);
 
-/* BCH_SB_FIELD_quota: */
-
-struct bch_sb_quota_counter {
-       __le32                          timelimit;
-       __le32                          warnlimit;
-};
-
-struct bch_sb_quota_type {
-       __le64                          flags;
-       struct bch_sb_quota_counter     c[Q_COUNTERS];
-};
-
-struct bch_sb_field_quota {
-       struct bch_sb_field             field;
-       struct bch_sb_quota_type        q[QTYP_NR];
-} __packed __aligned(8);
-
 /* BCH_SB_FIELD_disk_groups: */
 
 #define BCH_SB_LABEL_SIZE              32
@@ -1500,101 +733,6 @@ struct bch_sb_field_disk_groups {
        struct bch_disk_group   entries[];
 } __packed __aligned(8);
 
-/* BCH_SB_FIELD_counters */
-
-#define BCH_PERSISTENT_COUNTERS()                              \
-       x(io_read,                                      0)      \
-       x(io_write,                                     1)      \
-       x(io_move,                                      2)      \
-       x(bucket_invalidate,                            3)      \
-       x(bucket_discard,                               4)      \
-       x(bucket_alloc,                                 5)      \
-       x(bucket_alloc_fail,                            6)      \
-       x(btree_cache_scan,                             7)      \
-       x(btree_cache_reap,                             8)      \
-       x(btree_cache_cannibalize,                      9)      \
-       x(btree_cache_cannibalize_lock,                 10)     \
-       x(btree_cache_cannibalize_lock_fail,            11)     \
-       x(btree_cache_cannibalize_unlock,               12)     \
-       x(btree_node_write,                             13)     \
-       x(btree_node_read,                              14)     \
-       x(btree_node_compact,                           15)     \
-       x(btree_node_merge,                             16)     \
-       x(btree_node_split,                             17)     \
-       x(btree_node_rewrite,                           18)     \
-       x(btree_node_alloc,                             19)     \
-       x(btree_node_free,                              20)     \
-       x(btree_node_set_root,                          21)     \
-       x(btree_path_relock_fail,                       22)     \
-       x(btree_path_upgrade_fail,                      23)     \
-       x(btree_reserve_get_fail,                       24)     \
-       x(journal_entry_full,                           25)     \
-       x(journal_full,                                 26)     \
-       x(journal_reclaim_finish,                       27)     \
-       x(journal_reclaim_start,                        28)     \
-       x(journal_write,                                29)     \
-       x(read_promote,                                 30)     \
-       x(read_bounce,                                  31)     \
-       x(read_split,                                   33)     \
-       x(read_retry,                                   32)     \
-       x(read_reuse_race,                              34)     \
-       x(move_extent_read,                             35)     \
-       x(move_extent_write,                            36)     \
-       x(move_extent_finish,                           37)     \
-       x(move_extent_fail,                             38)     \
-       x(move_extent_start_fail,                       39)     \
-       x(copygc,                                       40)     \
-       x(copygc_wait,                                  41)     \
-       x(gc_gens_end,                                  42)     \
-       x(gc_gens_start,                                43)     \
-       x(trans_blocked_journal_reclaim,                44)     \
-       x(trans_restart_btree_node_reused,              45)     \
-       x(trans_restart_btree_node_split,               46)     \
-       x(trans_restart_fault_inject,                   47)     \
-       x(trans_restart_iter_upgrade,                   48)     \
-       x(trans_restart_journal_preres_get,             49)     \
-       x(trans_restart_journal_reclaim,                50)     \
-       x(trans_restart_journal_res_get,                51)     \
-       x(trans_restart_key_cache_key_realloced,        52)     \
-       x(trans_restart_key_cache_raced,                53)     \
-       x(trans_restart_mark_replicas,                  54)     \
-       x(trans_restart_mem_realloced,                  55)     \
-       x(trans_restart_memory_allocation_failure,      56)     \
-       x(trans_restart_relock,                         57)     \
-       x(trans_restart_relock_after_fill,              58)     \
-       x(trans_restart_relock_key_cache_fill,          59)     \
-       x(trans_restart_relock_next_node,               60)     \
-       x(trans_restart_relock_parent_for_fill,         61)     \
-       x(trans_restart_relock_path,                    62)     \
-       x(trans_restart_relock_path_intent,             63)     \
-       x(trans_restart_too_many_iters,                 64)     \
-       x(trans_restart_traverse,                       65)     \
-       x(trans_restart_upgrade,                        66)     \
-       x(trans_restart_would_deadlock,                 67)     \
-       x(trans_restart_would_deadlock_write,           68)     \
-       x(trans_restart_injected,                       69)     \
-       x(trans_restart_key_cache_upgrade,              70)     \
-       x(trans_traverse_all,                           71)     \
-       x(transaction_commit,                           72)     \
-       x(write_super,                                  73)     \
-       x(trans_restart_would_deadlock_recursion_limit, 74)     \
-       x(trans_restart_write_buffer_flush,             75)     \
-       x(trans_restart_split_race,                     76)     \
-       x(write_buffer_flush_slowpath,                  77)     \
-       x(write_buffer_flush_sync,                      78)
-
-enum bch_persistent_counters {
-#define x(t, n, ...) BCH_COUNTER_##t,
-       BCH_PERSISTENT_COUNTERS()
-#undef x
-       BCH_COUNTER_NR
-};
-
-struct bch_sb_field_counters {
-       struct bch_sb_field     field;
-       __le64                  d[];
-};
-
 /*
  * On clean shutdown, store btree roots and current journal sequence number in
  * the superblock:
index abdb05507d162c7c06bb89ce96bf67f6484207a7..76e79a15ba08fb23ed9d0560dcd5966fe68ce92a 100644 (file)
@@ -33,7 +33,7 @@ void bch2_bkey_packed_to_binary_text(struct printbuf *out,
                        next_key_bits -= 64;
                }
 
-               bch2_prt_u64_binary(out, v, min(word_bits, nr_key_bits));
+               bch2_prt_u64_base2_nbits(out, v, min(word_bits, nr_key_bits));
 
                if (!next_key_bits)
                        break;
index 761f5e33b1e69e94ca0aaaa41a9825e496b5840f..5e52684764eb14de4d8433abd5954a829648440b 100644 (file)
@@ -63,8 +63,17 @@ static int key_type_cookie_invalid(struct bch_fs *c, struct bkey_s_c k,
        return 0;
 }
 
+static void key_type_cookie_to_text(struct printbuf *out, struct bch_fs *c,
+                                   struct bkey_s_c k)
+{
+       struct bkey_s_c_cookie ck = bkey_s_c_to_cookie(k);
+
+       prt_printf(out, "%llu", le64_to_cpu(ck.v->cookie));
+}
+
 #define bch2_bkey_ops_cookie ((struct bkey_ops) {      \
        .key_invalid    = key_type_cookie_invalid,      \
+       .val_to_text    = key_type_cookie_to_text,      \
        .min_val_size   = 8,                            \
 })
 
index ee82283722b759bbce174b2d902403c0024fe574..03efe8ee565a90672367c2146e3ff44ceb0db526 100644 (file)
@@ -83,9 +83,10 @@ enum btree_update_flags {
 
        __BTREE_TRIGGER_NORUN,
        __BTREE_TRIGGER_TRANSACTIONAL,
+       __BTREE_TRIGGER_ATOMIC,
+       __BTREE_TRIGGER_GC,
        __BTREE_TRIGGER_INSERT,
        __BTREE_TRIGGER_OVERWRITE,
-       __BTREE_TRIGGER_GC,
        __BTREE_TRIGGER_BUCKET_INVALIDATE,
 };
 
@@ -107,6 +108,10 @@ enum btree_update_flags {
  * causing us to go emergency read-only)
  */
 #define BTREE_TRIGGER_TRANSACTIONAL    (1U << __BTREE_TRIGGER_TRANSACTIONAL)
+#define BTREE_TRIGGER_ATOMIC           (1U << __BTREE_TRIGGER_ATOMIC)
+
+/* We're in gc/fsck: running triggers to recalculate e.g. disk usage */
+#define BTREE_TRIGGER_GC               (1U << __BTREE_TRIGGER_GC)
 
 /* @new is entering the btree */
 #define BTREE_TRIGGER_INSERT           (1U << __BTREE_TRIGGER_INSERT)
@@ -114,9 +119,6 @@ enum btree_update_flags {
 /* @old is leaving the btree */
 #define BTREE_TRIGGER_OVERWRITE                (1U << __BTREE_TRIGGER_OVERWRITE)
 
-/* We're in gc/fsck: running triggers to recalculate e.g. disk usage */
-#define BTREE_TRIGGER_GC               (1U << __BTREE_TRIGGER_GC)
-
 /* signal from bucket invalidate path to alloc trigger */
 #define BTREE_TRIGGER_BUCKET_INVALIDATE        (1U << __BTREE_TRIGGER_BUCKET_INVALIDATE)
 
index 74bf8eb90a4c42cd24dc61024ecb448740e271a7..3fd1085b6c61ee72e7e814cf722306ebdba057c4 100644 (file)
@@ -720,7 +720,7 @@ static noinline void __build_ro_aux_tree(struct btree *b, struct bset_tree *t)
 {
        struct bkey_packed *prev = NULL, *k = btree_bkey_first(b, t);
        struct bkey_i min_key, max_key;
-       unsigned j, cacheline = 1;
+       unsigned cacheline = 1;
 
        t->size = min(bkey_to_cacheline(b, t, btree_bkey_last(b, t)),
                      bset_ro_tree_capacity(b, t));
@@ -823,13 +823,12 @@ void bch2_bset_init_first(struct btree *b, struct bset *i)
        set_btree_bset(b, t, i);
 }
 
-void bch2_bset_init_next(struct bch_fs *c, struct btree *b,
-                        struct btree_node_entry *bne)
+void bch2_bset_init_next(struct btree *b, struct btree_node_entry *bne)
 {
        struct bset *i = &bne->keys;
        struct bset_tree *t;
 
-       BUG_ON(bset_byte_offset(b, bne) >= btree_bytes(c));
+       BUG_ON(bset_byte_offset(b, bne) >= btree_buf_bytes(b));
        BUG_ON((void *) bne < (void *) btree_bkey_last(b, bset_tree_last(b)));
        BUG_ON(b->nsets >= MAX_BSETS);
 
index 632c2b8c54609b4be37f11e18868e4c41dcb736b..79c77baaa383868c99660a78a656c73d187f996f 100644 (file)
@@ -264,8 +264,7 @@ static inline struct bset *bset_next_set(struct btree *b,
 void bch2_btree_keys_init(struct btree *);
 
 void bch2_bset_init_first(struct btree *, struct bset *);
-void bch2_bset_init_next(struct bch_fs *, struct btree *,
-                        struct btree_node_entry *);
+void bch2_bset_init_next(struct btree *, struct btree_node_entry *);
 void bch2_bset_build_aux_tree(struct btree *, struct bset_tree *, bool);
 
 void bch2_bset_insert(struct btree *, struct btree_node_iter *,
index 8e2488a4b58d00a45f78a7c64a6c1e83f4b0ff59..d7c81beac14afae7ee44f11f28eb424f1b54a063 100644 (file)
@@ -60,7 +60,7 @@ static void btree_node_data_free(struct bch_fs *c, struct btree *b)
 
        clear_btree_node_just_written(b);
 
-       kvpfree(b->data, btree_bytes(c));
+       kvpfree(b->data, btree_buf_bytes(b));
        b->data = NULL;
 #ifdef __KERNEL__
        kvfree(b->aux_data);
@@ -94,7 +94,7 @@ static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
 {
        BUG_ON(b->data || b->aux_data);
 
-       b->data = kvpmalloc(btree_bytes(c), gfp);
+       b->data = kvpmalloc(btree_buf_bytes(b), gfp);
        if (!b->data)
                return -BCH_ERR_ENOMEM_btree_node_mem_alloc;
 #ifdef __KERNEL__
@@ -107,7 +107,7 @@ static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
                b->aux_data = NULL;
 #endif
        if (!b->aux_data) {
-               kvpfree(b->data, btree_bytes(c));
+               kvpfree(b->data, btree_buf_bytes(b));
                b->data = NULL;
                return -BCH_ERR_ENOMEM_btree_node_mem_alloc;
        }
@@ -126,7 +126,7 @@ static struct btree *__btree_node_mem_alloc(struct bch_fs *c, gfp_t gfp)
        bkey_btree_ptr_init(&b->key);
        INIT_LIST_HEAD(&b->list);
        INIT_LIST_HEAD(&b->write_blocked);
-       b->byte_order = ilog2(btree_bytes(c));
+       b->byte_order = ilog2(c->opts.btree_node_size);
        return b;
 }
 
@@ -408,7 +408,7 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c)
        if (c->verify_data)
                list_move(&c->verify_data->list, &bc->live);
 
-       kvpfree(c->verify_ondisk, btree_bytes(c));
+       kvpfree(c->verify_ondisk, c->opts.btree_node_size);
 
        for (i = 0; i < btree_id_nr_alive(c); i++) {
                struct btree_root *r = bch2_btree_id_root(c, i);
@@ -1192,7 +1192,7 @@ void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c, const struc
               "    failed unpacked %zu\n",
               b->unpack_fn_len,
               b->nr.live_u64s * sizeof(u64),
-              btree_bytes(c) - sizeof(struct btree_node),
+              btree_buf_bytes(b) - sizeof(struct btree_node),
               b->nr.live_u64s * 100 / btree_max_u64s(c),
               b->sib_u64s[0],
               b->sib_u64s[1],
index 4e1af58820522fc8feec3caf9afc34d12f76c772..6d33885fdbde0d101b4c5785a1bf57bf072fe8de 100644 (file)
@@ -74,22 +74,27 @@ static inline bool btree_node_hashed(struct btree *b)
             _iter = 0; _iter < (_tbl)->size; _iter++)                  \
                rht_for_each_entry_rcu((_b), (_pos), _tbl, _iter, hash)
 
-static inline size_t btree_bytes(struct bch_fs *c)
+static inline size_t btree_buf_bytes(const struct btree *b)
 {
-       return c->opts.btree_node_size;
+       return 1UL << b->byte_order;
 }
 
-static inline size_t btree_max_u64s(struct bch_fs *c)
+static inline size_t btree_buf_max_u64s(const struct btree *b)
 {
-       return (btree_bytes(c) - sizeof(struct btree_node)) / sizeof(u64);
+       return (btree_buf_bytes(b) - sizeof(struct btree_node)) / sizeof(u64);
 }
 
-static inline size_t btree_pages(struct bch_fs *c)
+static inline size_t btree_max_u64s(const struct bch_fs *c)
 {
-       return btree_bytes(c) / PAGE_SIZE;
+       return (c->opts.btree_node_size - sizeof(struct btree_node)) / sizeof(u64);
 }
 
-static inline unsigned btree_blocks(struct bch_fs *c)
+static inline size_t btree_sectors(const struct bch_fs *c)
+{
+       return c->opts.btree_node_size >> SECTOR_SHIFT;
+}
+
+static inline unsigned btree_blocks(const struct bch_fs *c)
 {
        return btree_sectors(c) >> c->block_bits;
 }
index 49b4ade758c3623ed35557a02a00afd31b0bec52..1102995643b137c3a8a9fe5f12f0cce95edfafeb 100644 (file)
@@ -597,7 +597,7 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
                              "bucket %u:%zu data type %s ptr gen %u missing in alloc btree\n"
                              "while marking %s",
                              p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
-                             bch2_data_types[ptr_data_type(k->k, &p.ptr)],
+                             bch2_data_type_str(ptr_data_type(k->k, &p.ptr)),
                              p.ptr.gen,
                              (printbuf_reset(&buf),
                               bch2_bkey_val_to_text(&buf, c, *k), buf.buf)))) {
@@ -615,7 +615,7 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
                              "bucket %u:%zu data type %s ptr gen in the future: %u > %u\n"
                              "while marking %s",
                              p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
-                             bch2_data_types[ptr_data_type(k->k, &p.ptr)],
+                             bch2_data_type_str(ptr_data_type(k->k, &p.ptr)),
                              p.ptr.gen, g->gen,
                              (printbuf_reset(&buf),
                               bch2_bkey_val_to_text(&buf, c, *k), buf.buf)))) {
@@ -637,7 +637,7 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
                              "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
                              "while marking %s",
                              p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen,
-                             bch2_data_types[ptr_data_type(k->k, &p.ptr)],
+                             bch2_data_type_str(ptr_data_type(k->k, &p.ptr)),
                              p.ptr.gen,
                              (printbuf_reset(&buf),
                               bch2_bkey_val_to_text(&buf, c, *k), buf.buf))))
@@ -649,7 +649,7 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
                              "bucket %u:%zu data type %s stale dirty ptr: %u < %u\n"
                              "while marking %s",
                              p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
-                             bch2_data_types[ptr_data_type(k->k, &p.ptr)],
+                             bch2_data_type_str(ptr_data_type(k->k, &p.ptr)),
                              p.ptr.gen, g->gen,
                              (printbuf_reset(&buf),
                               bch2_bkey_val_to_text(&buf, c, *k), buf.buf))))
@@ -664,8 +664,8 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
                                "bucket %u:%zu different types of data in same bucket: %s, %s\n"
                                "while marking %s",
                                p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
-                               bch2_data_types[g->data_type],
-                               bch2_data_types[data_type],
+                               bch2_data_type_str(g->data_type),
+                               bch2_data_type_str(data_type),
                                (printbuf_reset(&buf),
                                 bch2_bkey_val_to_text(&buf, c, *k), buf.buf))) {
                        if (data_type == BCH_DATA_btree) {
@@ -1238,11 +1238,11 @@ static int bch2_gc_done(struct bch_fs *c,
 
                for (i = 0; i < BCH_DATA_NR; i++) {
                        copy_dev_field(dev_usage_buckets_wrong,
-                                      d[i].buckets,    "%s buckets", bch2_data_types[i]);
+                                      d[i].buckets,    "%s buckets", bch2_data_type_str(i));
                        copy_dev_field(dev_usage_sectors_wrong,
-                                      d[i].sectors,    "%s sectors", bch2_data_types[i]);
+                                      d[i].sectors,    "%s sectors", bch2_data_type_str(i));
                        copy_dev_field(dev_usage_fragmented_wrong,
-                                      d[i].fragmented, "%s fragmented", bch2_data_types[i]);
+                                      d[i].fragmented, "%s fragmented", bch2_data_type_str(i));
                }
        }
 
@@ -1253,19 +1253,19 @@ static int bch2_gc_done(struct bch_fs *c,
                        bch2_acc_percpu_u64s((u64 __percpu *) c->usage_gc, nr);
 
                copy_fs_field(fs_usage_hidden_wrong,
-                             hidden,           "hidden");
+                             b.hidden,         "hidden");
                copy_fs_field(fs_usage_btree_wrong,
-                             btree,            "btree");
+                             b.btree,          "btree");
 
                if (!metadata_only) {
                        copy_fs_field(fs_usage_data_wrong,
-                                     data,     "data");
+                                     b.data,   "data");
                        copy_fs_field(fs_usage_cached_wrong,
-                                     cached,   "cached");
+                                     b.cached, "cached");
                        copy_fs_field(fs_usage_reserved_wrong,
-                                     reserved, "reserved");
+                                     b.reserved,       "reserved");
                        copy_fs_field(fs_usage_nr_inodes_wrong,
-                                     nr_inodes,"nr_inodes");
+                                     b.nr_inodes,"nr_inodes");
 
                        for (i = 0; i < BCH_REPLICAS_MAX; i++)
                                copy_fs_field(fs_usage_persistent_reserved_wrong,
@@ -1417,8 +1417,8 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
                        ": got %s, should be %s",
                        iter->pos.inode, iter->pos.offset,
                        gc.gen,
-                       bch2_data_types[new.data_type],
-                       bch2_data_types[gc.data_type]))
+                       bch2_data_type_str(new.data_type),
+                       bch2_data_type_str(gc.data_type)))
                new.data_type = gc.data_type;
 
 #define copy_bucket_field(_errtype, _f)                                        \
@@ -1428,7 +1428,7 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
                        ": got %u, should be %u",                       \
                        iter->pos.inode, iter->pos.offset,              \
                        gc.gen,                                         \
-                       bch2_data_types[gc.data_type],                  \
+                       bch2_data_type_str(gc.data_type),               \
                        new._f, gc._f))                                 \
                new._f = gc._f;                                         \
 
index 33db48e2153fef61f0c733f97278018f419c2b05..aa9b6cbe3226909626411b886731a8bb8648a558 100644 (file)
@@ -112,7 +112,7 @@ static void *btree_bounce_alloc(struct bch_fs *c, size_t size,
        unsigned flags = memalloc_nofs_save();
        void *p;
 
-       BUG_ON(size > btree_bytes(c));
+       BUG_ON(size > c->opts.btree_node_size);
 
        *used_mempool = false;
        p = vpmalloc(size, __GFP_NOWARN|GFP_NOWAIT);
@@ -174,8 +174,8 @@ static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b)
 
        ptrs = ptrs_end = ((void *) new_whiteouts + bytes);
 
-       for (k = unwritten_whiteouts_start(c, b);
-            k != unwritten_whiteouts_end(c, b);
+       for (k = unwritten_whiteouts_start(b);
+            k != unwritten_whiteouts_end(b);
             k = bkey_p_next(k))
                *--ptrs = k;
 
@@ -192,7 +192,7 @@ static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b)
        verify_no_dups(b, new_whiteouts,
                       (void *) ((u64 *) new_whiteouts + b->whiteout_u64s));
 
-       memcpy_u64s(unwritten_whiteouts_start(c, b),
+       memcpy_u64s(unwritten_whiteouts_start(b),
                    new_whiteouts, b->whiteout_u64s);
 
        btree_bounce_free(c, bytes, used_mempool, new_whiteouts);
@@ -313,7 +313,7 @@ static void btree_node_sort(struct bch_fs *c, struct btree *b,
        }
 
        bytes = sorting_entire_node
-               ? btree_bytes(c)
+               ? btree_buf_bytes(b)
                : __vstruct_bytes(struct btree_node, u64s);
 
        out = btree_bounce_alloc(c, bytes, &used_mempool);
@@ -338,7 +338,7 @@ static void btree_node_sort(struct bch_fs *c, struct btree *b,
        if (sorting_entire_node) {
                u64s = le16_to_cpu(out->keys.u64s);
 
-               BUG_ON(bytes != btree_bytes(c));
+               BUG_ON(bytes != btree_buf_bytes(b));
 
                /*
                 * Our temporary buffer is the same size as the btree node's
@@ -502,7 +502,7 @@ void bch2_btree_init_next(struct btree_trans *trans, struct btree *b)
 
        bne = want_new_bset(c, b);
        if (bne)
-               bch2_bset_init_next(c, b, bne);
+               bch2_bset_init_next(b, bne);
 
        bch2_btree_build_aux_trees(b);
 
@@ -1160,7 +1160,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
                             ptr_written, b->written);
        } else {
                for (bne = write_block(b);
-                    bset_byte_offset(b, bne) < btree_bytes(c);
+                    bset_byte_offset(b, bne) < btree_buf_bytes(b);
                     bne = (void *) bne + block_bytes(c))
                        btree_err_on(bne->keys.seq == b->data->keys.seq &&
                                     !bch2_journal_seq_is_blacklisted(c,
@@ -1172,7 +1172,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
                                     "found bset signature after last bset");
        }
 
-       sorted = btree_bounce_alloc(c, btree_bytes(c), &used_mempool);
+       sorted = btree_bounce_alloc(c, btree_buf_bytes(b), &used_mempool);
        sorted->keys.u64s = 0;
 
        set_btree_bset(b, b->set, &b->data->keys);
@@ -1188,7 +1188,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
 
        BUG_ON(b->nr.live_u64s != u64s);
 
-       btree_bounce_free(c, btree_bytes(c), used_mempool, sorted);
+       btree_bounce_free(c, btree_buf_bytes(b), used_mempool, sorted);
 
        if (updated_range)
                bch2_btree_node_drop_keys_outside_node(b);
@@ -1284,7 +1284,7 @@ static void btree_node_read_work(struct work_struct *work)
                rb->have_ioref          = bch2_dev_get_ioref(ca, READ);
                bio_reset(bio, NULL, REQ_OP_READ|REQ_SYNC|REQ_META);
                bio->bi_iter.bi_sector  = rb->pick.ptr.offset;
-               bio->bi_iter.bi_size    = btree_bytes(c);
+               bio->bi_iter.bi_size    = btree_buf_bytes(b);
 
                if (rb->have_ioref) {
                        bio_set_dev(bio, ca->disk_sb.bdev);
@@ -1512,7 +1512,7 @@ fsck_err:
        }
 
        if (best >= 0) {
-               memcpy(b->data, ra->buf[best], btree_bytes(c));
+               memcpy(b->data, ra->buf[best], btree_buf_bytes(b));
                ret = bch2_btree_node_read_done(c, NULL, b, false, saw_error);
        } else {
                ret = -1;
@@ -1578,7 +1578,7 @@ static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool
        for (i = 0; i < ra->nr; i++) {
                ra->buf[i] = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
                ra->bio[i] = bio_alloc_bioset(NULL,
-                                             buf_pages(ra->buf[i], btree_bytes(c)),
+                                             buf_pages(ra->buf[i], btree_buf_bytes(b)),
                                              REQ_OP_READ|REQ_SYNC|REQ_META,
                                              GFP_NOFS,
                                              &c->btree_bio);
@@ -1598,7 +1598,7 @@ static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool
                rb->pick                = pick;
                rb->bio.bi_iter.bi_sector = pick.ptr.offset;
                rb->bio.bi_end_io       = btree_node_read_all_replicas_endio;
-               bch2_bio_map(&rb->bio, ra->buf[i], btree_bytes(c));
+               bch2_bio_map(&rb->bio, ra->buf[i], btree_buf_bytes(b));
 
                if (rb->have_ioref) {
                        this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
@@ -1665,7 +1665,7 @@ void bch2_btree_node_read(struct btree_trans *trans, struct btree *b,
        ca = bch_dev_bkey_exists(c, pick.ptr.dev);
 
        bio = bio_alloc_bioset(NULL,
-                              buf_pages(b->data, btree_bytes(c)),
+                              buf_pages(b->data, btree_buf_bytes(b)),
                               REQ_OP_READ|REQ_SYNC|REQ_META,
                               GFP_NOFS,
                               &c->btree_bio);
@@ -1679,7 +1679,7 @@ void bch2_btree_node_read(struct btree_trans *trans, struct btree *b,
        INIT_WORK(&rb->work, btree_node_read_work);
        bio->bi_iter.bi_sector  = pick.ptr.offset;
        bio->bi_end_io          = btree_node_read_endio;
-       bch2_bio_map(bio, b->data, btree_bytes(c));
+       bch2_bio_map(bio, b->data, btree_buf_bytes(b));
 
        if (rb->have_ioref) {
                this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
@@ -2074,8 +2074,8 @@ do_write:
        i->u64s         = 0;
 
        sort_iter_add(&sort_iter.iter,
-                     unwritten_whiteouts_start(c, b),
-                     unwritten_whiteouts_end(c, b));
+                     unwritten_whiteouts_start(b),
+                     unwritten_whiteouts_end(b));
        SET_BSET_SEPARATE_WHITEOUTS(i, false);
 
        b->whiteout_u64s = 0;
@@ -2251,7 +2251,7 @@ bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
 
        bne = want_new_bset(c, b);
        if (bne)
-               bch2_bset_init_next(c, b, bne);
+               bch2_bset_init_next(b, bne);
 
        bch2_btree_build_aux_trees(b);
 
index fa298289e01656b989db38dcf19301ae4d880bb7..5467a8635be113102c56bb6f02986209533c35ac 100644 (file)
@@ -1337,7 +1337,7 @@ void bch2_path_put(struct btree_trans *trans, btree_path_idx_t path_idx, bool in
 
        if (path->should_be_locked &&
            !trans->restarted &&
-           (!dup || !bch2_btree_path_relock_norestart(trans, dup, _THIS_IP_)))
+           (!dup || !bch2_btree_path_relock_norestart(trans, dup)))
                return;
 
        if (dup) {
index da2b74fa63fcece86d7d92d18dc340330180c657..24772538e4cc74ada59851bd7847dd5ece5ea122 100644 (file)
@@ -819,6 +819,11 @@ __bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
 #define for_each_btree_key_continue_norestart(_iter, _flags, _k, _ret) \
        for_each_btree_key_upto_continue_norestart(_iter, SPOS_MAX, _flags, _k, _ret)
 
+/*
+ * This should not be used in a fastpath, without first trying _do in
+ * nonblocking mode - it will cause excessive transaction restarts and
+ * potentially livelocking:
+ */
 #define drop_locks_do(_trans, _do)                                     \
 ({                                                                     \
        bch2_trans_unlock(_trans);                                      \
index 2d1c95c42f240cc88b31c2728d7a970560e4865a..6843974423381029e7a8cf24fd4cd5c6c33627cd 100644 (file)
@@ -92,7 +92,7 @@ static noinline void print_cycle(struct printbuf *out, struct lock_graph *g)
                        continue;
 
                bch2_btree_trans_to_text(out, i->trans);
-               bch2_prt_task_backtrace(out, task, i == g->g ? 5 : 1);
+               bch2_prt_task_backtrace(out, task, i == g->g ? 5 : 1, GFP_NOWAIT);
        }
 }
 
@@ -227,7 +227,7 @@ static noinline int break_cycle(struct lock_graph *g, struct printbuf *cycle)
                        prt_printf(&buf, "backtrace:");
                        prt_newline(&buf);
                        printbuf_indent_add(&buf, 2);
-                       bch2_prt_task_backtrace(&buf, trans->locking_wait.task, 2);
+                       bch2_prt_task_backtrace(&buf, trans->locking_wait.task, 2, GFP_NOWAIT);
                        printbuf_indent_sub(&buf, 2);
                        prt_newline(&buf);
                }
@@ -631,8 +631,7 @@ int bch2_btree_path_relock_intent(struct btree_trans *trans,
 }
 
 __flatten
-bool bch2_btree_path_relock_norestart(struct btree_trans *trans,
-                       struct btree_path *path, unsigned long trace_ip)
+bool bch2_btree_path_relock_norestart(struct btree_trans *trans, struct btree_path *path)
 {
        struct get_locks_fail f;
 
@@ -642,7 +641,7 @@ bool bch2_btree_path_relock_norestart(struct btree_trans *trans,
 int __bch2_btree_path_relock(struct btree_trans *trans,
                        struct btree_path *path, unsigned long trace_ip)
 {
-       if (!bch2_btree_path_relock_norestart(trans, path, trace_ip)) {
+       if (!bch2_btree_path_relock_norestart(trans, path)) {
                trace_and_count(trans->c, trans_restart_relock_path, trans, trace_ip, path);
                return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path);
        }
@@ -759,12 +758,39 @@ int bch2_trans_relock(struct btree_trans *trans)
        if (unlikely(trans->restarted))
                return -((int) trans->restarted);
 
-       trans_for_each_path(trans, path, i)
+       trans_for_each_path(trans, path, i) {
+               struct get_locks_fail f;
+
                if (path->should_be_locked &&
-                   !bch2_btree_path_relock_norestart(trans, path, _RET_IP_)) {
-                       trace_and_count(trans->c, trans_restart_relock, trans, _RET_IP_, path);
+                   !btree_path_get_locks(trans, path, false, &f)) {
+                       if (trace_trans_restart_relock_enabled()) {
+                               struct printbuf buf = PRINTBUF;
+
+                               bch2_bpos_to_text(&buf, path->pos);
+                               prt_printf(&buf, " l=%u seq=%u node seq=",
+                                          f.l, path->l[f.l].lock_seq);
+                               if (IS_ERR_OR_NULL(f.b)) {
+                                       prt_str(&buf, bch2_err_str(PTR_ERR(f.b)));
+                               } else {
+                                       prt_printf(&buf, "%u", f.b->c.lock.seq);
+
+                                       struct six_lock_count c =
+                                               bch2_btree_node_lock_counts(trans, NULL, &f.b->c, f.l);
+                                       prt_printf(&buf, " self locked %u.%u.%u", c.n[0], c.n[1], c.n[2]);
+
+                                       c = six_lock_counts(&f.b->c.lock);
+                                       prt_printf(&buf, " total locked %u.%u.%u", c.n[0], c.n[1], c.n[2]);
+                               }
+
+                               trace_trans_restart_relock(trans, _RET_IP_, buf.buf);
+                               printbuf_exit(&buf);
+                       }
+
+                       count_event(trans->c, trans_restart_relock);
                        return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
                }
+       }
+
        return 0;
 }
 
@@ -778,7 +804,7 @@ int bch2_trans_relock_notrace(struct btree_trans *trans)
 
        trans_for_each_path(trans, path, i)
                if (path->should_be_locked &&
-                   !bch2_btree_path_relock_norestart(trans, path, _RET_IP_)) {
+                   !bch2_btree_path_relock_norestart(trans, path)) {
                        return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
                }
        return 0;
index cc5500a957a1b3084d005abe8b0893146e354bca..4bd72c855da1a4028106b70e10727ad07d578614 100644 (file)
@@ -312,8 +312,7 @@ void bch2_btree_node_lock_write_nofail(struct btree_trans *,
 
 /* relock: */
 
-bool bch2_btree_path_relock_norestart(struct btree_trans *,
-                                     struct btree_path *, unsigned long);
+bool bch2_btree_path_relock_norestart(struct btree_trans *, struct btree_path *);
 int __bch2_btree_path_relock(struct btree_trans *,
                             struct btree_path *, unsigned long);
 
@@ -353,12 +352,6 @@ static inline bool bch2_btree_node_relock_notrace(struct btree_trans *trans,
 
 /* upgrade */
 
-
-struct get_locks_fail {
-       unsigned        l;
-       struct btree    *b;
-};
-
 bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *,
                               struct btree_path *, unsigned,
                               struct get_locks_fail *);
index 90eb8065ff2da0224c8627987f58e9314412dcff..30d69a6d133eec77c76c7e64a5de0d896ad6b732 100644 (file)
@@ -139,8 +139,7 @@ bool bch2_btree_bset_insert_key(struct btree_trans *trans,
        EBUG_ON(bkey_deleted(&insert->k) && bkey_val_u64s(&insert->k));
        EBUG_ON(bpos_lt(insert->k.p, b->data->min_key));
        EBUG_ON(bpos_gt(insert->k.p, b->data->max_key));
-       EBUG_ON(insert->k.u64s >
-               bch_btree_keys_u64s_remaining(trans->c, b));
+       EBUG_ON(insert->k.u64s > bch2_btree_keys_u64s_remaining(b));
        EBUG_ON(!b->c.level && !bpos_eq(insert->k.p, path->pos));
 
        k = bch2_btree_node_iter_peek_all(node_iter, b);
@@ -160,7 +159,7 @@ bool bch2_btree_bset_insert_key(struct btree_trans *trans,
                k->type = KEY_TYPE_deleted;
 
                if (k->needs_whiteout)
-                       push_whiteout(trans->c, b, insert->k.p);
+                       push_whiteout(b, insert->k.p);
                k->needs_whiteout = false;
 
                if (k >= btree_bset_last(b)->start) {
@@ -348,9 +347,7 @@ static noinline void journal_transaction_name(struct btree_trans *trans)
 static inline int btree_key_can_insert(struct btree_trans *trans,
                                       struct btree *b, unsigned u64s)
 {
-       struct bch_fs *c = trans->c;
-
-       if (!bch2_btree_node_insert_fits(c, b, u64s))
+       if (!bch2_btree_node_insert_fits(b, u64s))
                return -BCH_ERR_btree_insert_btree_node_full;
 
        return 0;
@@ -418,7 +415,7 @@ static int btree_key_can_insert_cached(struct btree_trans *trans, unsigned flags
                return 0;
 
        new_u64s        = roundup_pow_of_two(u64s);
-       new_k           = krealloc(ck->k, new_u64s * sizeof(u64), GFP_NOWAIT);
+       new_k           = krealloc(ck->k, new_u64s * sizeof(u64), GFP_NOWAIT|__GFP_NOWARN);
        if (unlikely(!new_k))
                return btree_key_can_insert_cached_slowpath(trans, flags, path, new_u64s);
 
@@ -448,9 +445,6 @@ static int run_one_mem_trigger(struct btree_trans *trans,
        if (unlikely(flags & BTREE_TRIGGER_NORUN))
                return 0;
 
-       if (!btree_node_type_needs_gc(__btree_node_type(i->level, i->btree_id)))
-               return 0;
-
        if (old_ops->trigger == new_ops->trigger) {
                ret   = bch2_key_trigger(trans, i->btree_id, i->level,
                                old, bkey_i_to_s(new),
@@ -586,9 +580,6 @@ static int bch2_trans_commit_run_triggers(struct btree_trans *trans)
 
 static noinline int bch2_trans_commit_run_gc_triggers(struct btree_trans *trans)
 {
-       struct bch_fs *c = trans->c;
-       int ret = 0;
-
        trans_for_each_update(trans, i) {
                /*
                 * XXX: synchronization of cached update triggers with gc
@@ -596,14 +587,15 @@ static noinline int bch2_trans_commit_run_gc_triggers(struct btree_trans *trans)
                 */
                BUG_ON(i->cached || i->level);
 
-               if (gc_visited(c, gc_pos_btree_node(insert_l(trans, i)->b))) {
-                       ret = run_one_mem_trigger(trans, i, i->flags|BTREE_TRIGGER_GC);
+               if (btree_node_type_needs_gc(__btree_node_type(i->level, i->btree_id)) &&
+                   gc_visited(trans->c, gc_pos_btree_node(insert_l(trans, i)->b))) {
+                       int ret = run_one_mem_trigger(trans, i, i->flags|BTREE_TRIGGER_GC);
                        if (ret)
-                               break;
+                               return ret;
                }
        }
 
-       return ret;
+       return 0;
 }
 
 static inline int
@@ -680,6 +672,9 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
            bch2_trans_fs_usage_apply(trans, trans->fs_usage_deltas))
                return -BCH_ERR_btree_insert_need_mark_replicas;
 
+       /* XXX: we only want to run this if deltas are nonzero */
+       bch2_trans_account_disk_usage_change(trans);
+
        h = trans->hooks;
        while (h) {
                ret = h->fn(trans, h);
@@ -689,8 +684,8 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
        }
 
        trans_for_each_update(trans, i)
-               if (BTREE_NODE_TYPE_HAS_MEM_TRIGGERS & (1U << i->bkey_type)) {
-                       ret = run_one_mem_trigger(trans, i, i->flags);
+               if (BTREE_NODE_TYPE_HAS_ATOMIC_TRIGGERS & (1U << i->bkey_type)) {
+                       ret = run_one_mem_trigger(trans, i, BTREE_TRIGGER_ATOMIC|i->flags);
                        if (ret)
                                goto fatal_err;
                }
@@ -994,6 +989,8 @@ int __bch2_trans_commit(struct btree_trans *trans, unsigned flags)
            !trans->journal_entries_u64s)
                goto out_reset;
 
+       memset(&trans->fs_usage_delta, 0, sizeof(trans->fs_usage_delta));
+
        ret = bch2_trans_commit_run_triggers(trans);
        if (ret)
                goto out_reset;
index d530307046f4cf93bdb4c4063409a9fff5e705c4..4a5a64499eb76698743ae7f20b4e47eaca09b868 100644 (file)
@@ -430,6 +430,9 @@ struct btree_trans {
        struct journal_res      journal_res;
        u64                     *journal_seq;
        struct disk_reservation *disk_res;
+
+       struct bch_fs_usage_base fs_usage_delta;
+
        unsigned                journal_u64s;
        unsigned                extra_disk_res; /* XXX kill */
        struct replicas_delta_list *fs_usage_deltas;
@@ -653,7 +656,7 @@ const char *bch2_btree_node_type_str(enum btree_node_type);
         BIT_ULL(BKEY_TYPE_reflink)|                    \
         BIT_ULL(BKEY_TYPE_btree))
 
-#define BTREE_NODE_TYPE_HAS_MEM_TRIGGERS               \
+#define BTREE_NODE_TYPE_HAS_ATOMIC_TRIGGERS            \
        (BIT_ULL(BKEY_TYPE_alloc)|                      \
         BIT_ULL(BKEY_TYPE_inodes)|                     \
         BIT_ULL(BKEY_TYPE_stripes)|                    \
@@ -661,7 +664,7 @@ const char *bch2_btree_node_type_str(enum btree_node_type);
 
 #define BTREE_NODE_TYPE_HAS_TRIGGERS                   \
        (BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS|            \
-        BTREE_NODE_TYPE_HAS_MEM_TRIGGERS)
+        BTREE_NODE_TYPE_HAS_ATOMIC_TRIGGERS)
 
 static inline bool btree_node_type_needs_gc(enum btree_node_type type)
 {
@@ -738,4 +741,9 @@ enum btree_node_sibling {
        btree_next_sib,
 };
 
+struct get_locks_fail {
+       unsigned        l;
+       struct btree    *b;
+};
+
 #endif /* _BCACHEFS_BTREE_TYPES_H */
index 44f9dfa28a09d89984150b19d3831077a18485f1..17a5938aa71a6b43b45c12383e4690df146ee2a3 100644 (file)
@@ -159,7 +159,7 @@ static bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *b,
 {
        size_t u64s = btree_node_u64s_with_format(nr, &b->format, new_f);
 
-       return __vstruct_bytes(struct btree_node, u64s) < btree_bytes(c);
+       return __vstruct_bytes(struct btree_node, u64s) < btree_buf_bytes(b);
 }
 
 /* Btree node freeing/allocation: */
@@ -1097,7 +1097,7 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
                 * Always check for space for two keys, even if we won't have to
                 * split at prior level - it might have been a merge instead:
                 */
-               if (bch2_btree_node_insert_fits(c, path->l[update_level].b,
+               if (bch2_btree_node_insert_fits(path->l[update_level].b,
                                                BKEY_BTREE_PTR_U64s_MAX * 2))
                        break;
 
@@ -1401,7 +1401,7 @@ static void __btree_split_node(struct btree_update *as,
 
                unsigned u64s = nr_keys[i].nr_keys * n[i]->data->format.key_u64s +
                        nr_keys[i].val_u64s;
-               if (__vstruct_bytes(struct btree_node, u64s) > btree_bytes(as->c))
+               if (__vstruct_bytes(struct btree_node, u64s) > btree_buf_bytes(b))
                        n[i]->data->format = b->format;
 
                btree_node_set_format(n[i], n[i]->data->format);
@@ -1703,7 +1703,7 @@ static int bch2_btree_insert_node(struct btree_update *as, struct btree_trans *t
 
        bch2_btree_node_prep_for_write(trans, path, b);
 
-       if (!bch2_btree_node_insert_fits(c, b, bch2_keylist_u64s(keys))) {
+       if (!bch2_btree_node_insert_fits(b, bch2_keylist_u64s(keys))) {
                bch2_btree_node_unlock_write(trans, path, b);
                goto split;
        }
index adfc62083844cf3b93d16d25d8269564f5b022a3..c593c925d1e3b03cfae5b4e7fdf0f7bc4b99df5c 100644 (file)
@@ -184,21 +184,19 @@ static inline void btree_node_reset_sib_u64s(struct btree *b)
        b->sib_u64s[1] = b->nr.live_u64s;
 }
 
-static inline void *btree_data_end(struct bch_fs *c, struct btree *b)
+static inline void *btree_data_end(struct btree *b)
 {
-       return (void *) b->data + btree_bytes(c);
+       return (void *) b->data + btree_buf_bytes(b);
 }
 
-static inline struct bkey_packed *unwritten_whiteouts_start(struct bch_fs *c,
-                                                           struct btree *b)
+static inline struct bkey_packed *unwritten_whiteouts_start(struct btree *b)
 {
-       return (void *) ((u64 *) btree_data_end(c, b) - b->whiteout_u64s);
+       return (void *) ((u64 *) btree_data_end(b) - b->whiteout_u64s);
 }
 
-static inline struct bkey_packed *unwritten_whiteouts_end(struct bch_fs *c,
-                                                         struct btree *b)
+static inline struct bkey_packed *unwritten_whiteouts_end(struct btree *b)
 {
-       return btree_data_end(c, b);
+       return btree_data_end(b);
 }
 
 static inline void *write_block(struct btree *b)
@@ -221,13 +219,11 @@ static inline bool bkey_written(struct btree *b, struct bkey_packed *k)
        return __btree_addr_written(b, k);
 }
 
-static inline ssize_t __bch_btree_u64s_remaining(struct bch_fs *c,
-                                                struct btree *b,
-                                                void *end)
+static inline ssize_t __bch2_btree_u64s_remaining(struct btree *b, void *end)
 {
        ssize_t used = bset_byte_offset(b, end) / sizeof(u64) +
                b->whiteout_u64s;
-       ssize_t total = c->opts.btree_node_size >> 3;
+       ssize_t total = btree_buf_bytes(b) >> 3;
 
        /* Always leave one extra u64 for bch2_varint_decode: */
        used++;
@@ -235,10 +231,9 @@ static inline ssize_t __bch_btree_u64s_remaining(struct bch_fs *c,
        return total - used;
 }
 
-static inline size_t bch_btree_keys_u64s_remaining(struct bch_fs *c,
-                                                  struct btree *b)
+static inline size_t bch2_btree_keys_u64s_remaining(struct btree *b)
 {
-       ssize_t remaining = __bch_btree_u64s_remaining(c, b,
+       ssize_t remaining = __bch2_btree_u64s_remaining(b,
                                btree_bkey_last(b, bset_tree_last(b)));
 
        BUG_ON(remaining < 0);
@@ -260,14 +255,13 @@ static inline unsigned btree_write_set_buffer(struct btree *b)
        return 8 << BTREE_WRITE_SET_U64s_BITS;
 }
 
-static inline struct btree_node_entry *want_new_bset(struct bch_fs *c,
-                                                    struct btree *b)
+static inline struct btree_node_entry *want_new_bset(struct bch_fs *c, struct btree *b)
 {
        struct bset_tree *t = bset_tree_last(b);
        struct btree_node_entry *bne = max(write_block(b),
                        (void *) btree_bkey_last(b, bset_tree_last(b)));
        ssize_t remaining_space =
-               __bch_btree_u64s_remaining(c, b, bne->keys.start);
+               __bch2_btree_u64s_remaining(b, bne->keys.start);
 
        if (unlikely(bset_written(b, bset(b, t)))) {
                if (remaining_space > (ssize_t) (block_bytes(c) >> 3))
@@ -281,12 +275,11 @@ static inline struct btree_node_entry *want_new_bset(struct bch_fs *c,
        return NULL;
 }
 
-static inline void push_whiteout(struct bch_fs *c, struct btree *b,
-                                struct bpos pos)
+static inline void push_whiteout(struct btree *b, struct bpos pos)
 {
        struct bkey_packed k;
 
-       BUG_ON(bch_btree_keys_u64s_remaining(c, b) < BKEY_U64s);
+       BUG_ON(bch2_btree_keys_u64s_remaining(b) < BKEY_U64s);
        EBUG_ON(btree_node_just_written(b));
 
        if (!bkey_pack_pos(&k, pos, b)) {
@@ -299,20 +292,19 @@ static inline void push_whiteout(struct bch_fs *c, struct btree *b,
        k.needs_whiteout = true;
 
        b->whiteout_u64s += k.u64s;
-       bkey_p_copy(unwritten_whiteouts_start(c, b), &k);
+       bkey_p_copy(unwritten_whiteouts_start(b), &k);
 }
 
 /*
  * write lock must be held on @b (else the dirty bset that we were going to
  * insert into could be written out from under us)
  */
-static inline bool bch2_btree_node_insert_fits(struct bch_fs *c,
-                                              struct btree *b, unsigned u64s)
+static inline bool bch2_btree_node_insert_fits(struct btree *b, unsigned u64s)
 {
        if (unlikely(btree_node_need_rewrite(b)))
                return false;
 
-       return u64s <= bch_btree_keys_u64s_remaining(c, b);
+       return u64s <= bch2_btree_keys_u64s_remaining(b);
 }
 
 void bch2_btree_updates_to_text(struct printbuf *, struct bch_fs *);
index 5c1169c78dafec7bf238854a74b37120f1c835cd..ac7844861966368cdce41efd9e27c898fe8ad6e7 100644 (file)
@@ -125,13 +125,12 @@ static inline int wb_flush_one(struct btree_trans *trans, struct btree_iter *ite
                               struct btree_write_buffered_key *wb,
                               bool *write_locked, size_t *fast)
 {
-       struct bch_fs *c = trans->c;
        struct btree_path *path;
        int ret;
 
        EBUG_ON(!wb->journal_seq);
-       EBUG_ON(!c->btree_write_buffer.flushing.pin.seq);
-       EBUG_ON(c->btree_write_buffer.flushing.pin.seq > wb->journal_seq);
+       EBUG_ON(!trans->c->btree_write_buffer.flushing.pin.seq);
+       EBUG_ON(trans->c->btree_write_buffer.flushing.pin.seq > wb->journal_seq);
 
        ret = bch2_btree_iter_traverse(iter);
        if (ret)
@@ -155,7 +154,7 @@ static inline int wb_flush_one(struct btree_trans *trans, struct btree_iter *ite
                *write_locked = true;
        }
 
-       if (unlikely(!bch2_btree_node_insert_fits(c, path->l[0].b, wb->k.k.u64s))) {
+       if (unlikely(!bch2_btree_node_insert_fits(path->l[0].b, wb->k.k.u64s))) {
                *write_locked = false;
                return wb_flush_one_slowpath(trans, iter, wb);
        }
index d83ea0e53df3f36f8476cd096ca4cc6948145cc3..54f7826ac49874d46b08330678ea0b2565ecc491 100644 (file)
@@ -25,7 +25,7 @@
 
 #include <linux/preempt.h>
 
-static inline void fs_usage_data_type_to_base(struct bch_fs_usage *fs_usage,
+static inline void fs_usage_data_type_to_base(struct bch_fs_usage_base *fs_usage,
                                              enum bch_data_type data_type,
                                              s64 sectors)
 {
@@ -54,20 +54,20 @@ void bch2_fs_usage_initialize(struct bch_fs *c)
                bch2_fs_usage_acc_to_base(c, i);
 
        for (unsigned i = 0; i < BCH_REPLICAS_MAX; i++)
-               usage->reserved += usage->persistent_reserved[i];
+               usage->b.reserved += usage->persistent_reserved[i];
 
        for (unsigned i = 0; i < c->replicas.nr; i++) {
                struct bch_replicas_entry_v1 *e =
                        cpu_replicas_entry(&c->replicas, i);
 
-               fs_usage_data_type_to_base(usage, e->data_type, usage->replicas[i]);
+               fs_usage_data_type_to_base(&usage->b, e->data_type, usage->replicas[i]);
        }
 
        for_each_member_device(c, ca) {
                struct bch_dev_usage dev = bch2_dev_usage_read(ca);
 
-               usage->hidden += (dev.d[BCH_DATA_sb].buckets +
-                                 dev.d[BCH_DATA_journal].buckets) *
+               usage->b.hidden += (dev.d[BCH_DATA_sb].buckets +
+                                   dev.d[BCH_DATA_journal].buckets) *
                        ca->mi.bucket_size;
        }
 
@@ -188,15 +188,15 @@ void bch2_fs_usage_to_text(struct printbuf *out,
        prt_printf(out, "capacity:\t\t\t%llu\n", c->capacity);
 
        prt_printf(out, "hidden:\t\t\t\t%llu\n",
-              fs_usage->u.hidden);
+              fs_usage->u.b.hidden);
        prt_printf(out, "data:\t\t\t\t%llu\n",
-              fs_usage->u.data);
+              fs_usage->u.b.data);
        prt_printf(out, "cached:\t\t\t\t%llu\n",
-              fs_usage->u.cached);
+              fs_usage->u.b.cached);
        prt_printf(out, "reserved:\t\t\t%llu\n",
-              fs_usage->u.reserved);
+              fs_usage->u.b.reserved);
        prt_printf(out, "nr_inodes:\t\t\t%llu\n",
-              fs_usage->u.nr_inodes);
+              fs_usage->u.b.nr_inodes);
        prt_printf(out, "online reserved:\t\t%llu\n",
               fs_usage->online_reserved);
 
@@ -225,10 +225,10 @@ static u64 reserve_factor(u64 r)
 
 u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage_online *fs_usage)
 {
-       return min(fs_usage->u.hidden +
-                  fs_usage->u.btree +
-                  fs_usage->u.data +
-                  reserve_factor(fs_usage->u.reserved +
+       return min(fs_usage->u.b.hidden +
+                  fs_usage->u.b.btree +
+                  fs_usage->u.b.data +
+                  reserve_factor(fs_usage->u.b.reserved +
                                  fs_usage->online_reserved),
                   c->capacity);
 }
@@ -240,17 +240,17 @@ __bch2_fs_usage_read_short(struct bch_fs *c)
        u64 data, reserved;
 
        ret.capacity = c->capacity -
-               bch2_fs_usage_read_one(c, &c->usage_base->hidden);
+               bch2_fs_usage_read_one(c, &c->usage_base->b.hidden);
 
-       data            = bch2_fs_usage_read_one(c, &c->usage_base->data) +
-               bch2_fs_usage_read_one(c, &c->usage_base->btree);
-       reserved        = bch2_fs_usage_read_one(c, &c->usage_base->reserved) +
+       data            = bch2_fs_usage_read_one(c, &c->usage_base->b.data) +
+               bch2_fs_usage_read_one(c, &c->usage_base->b.btree);
+       reserved        = bch2_fs_usage_read_one(c, &c->usage_base->b.reserved) +
                percpu_u64_get(c->online_reserved);
 
        ret.used        = min(ret.capacity, data + reserve_factor(reserved));
        ret.free        = ret.capacity - ret.used;
 
-       ret.nr_inodes   = bch2_fs_usage_read_one(c, &c->usage_base->nr_inodes);
+       ret.nr_inodes   = bch2_fs_usage_read_one(c, &c->usage_base->b.nr_inodes);
 
        return ret;
 }
@@ -284,7 +284,7 @@ void bch2_dev_usage_to_text(struct printbuf *out, struct bch_dev_usage *usage)
        prt_newline(out);
 
        for (unsigned i = 0; i < BCH_DATA_NR; i++) {
-               prt_str(out, bch2_data_types[i]);
+               bch2_prt_data_type(out, i);
                prt_tab(out);
                prt_u64(out, usage->d[i].buckets);
                prt_tab_rjust(out);
@@ -308,9 +308,9 @@ void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
        fs_usage = fs_usage_ptr(c, journal_seq, gc);
 
        if (data_type_is_hidden(old->data_type))
-               fs_usage->hidden -= ca->mi.bucket_size;
+               fs_usage->b.hidden -= ca->mi.bucket_size;
        if (data_type_is_hidden(new->data_type))
-               fs_usage->hidden += ca->mi.bucket_size;
+               fs_usage->b.hidden += ca->mi.bucket_size;
 
        u = dev_usage_ptr(ca, journal_seq, gc);
 
@@ -359,7 +359,7 @@ static inline int __update_replicas(struct bch_fs *c,
        if (idx < 0)
                return -1;
 
-       fs_usage_data_type_to_base(fs_usage, r->data_type, sectors);
+       fs_usage_data_type_to_base(&fs_usage->b, r->data_type, sectors);
        fs_usage->replicas[idx]         += sectors;
        return 0;
 }
@@ -394,7 +394,7 @@ int bch2_update_replicas(struct bch_fs *c, struct bkey_s_c k,
 
        preempt_disable();
        fs_usage = fs_usage_ptr(c, journal_seq, gc);
-       fs_usage_data_type_to_base(fs_usage, r->data_type, sectors);
+       fs_usage_data_type_to_base(&fs_usage->b, r->data_type, sectors);
        fs_usage->replicas[idx]         += sectors;
        preempt_enable();
 err:
@@ -523,8 +523,8 @@ int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
        if (bch2_fs_inconsistent_on(g->data_type &&
                        g->data_type != data_type, c,
                        "different types of data in same bucket: %s, %s",
-                       bch2_data_types[g->data_type],
-                       bch2_data_types[data_type])) {
+                       bch2_data_type_str(g->data_type),
+                       bch2_data_type_str(data_type))) {
                ret = -EIO;
                goto err;
        }
@@ -532,7 +532,7 @@ int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
        if (bch2_fs_inconsistent_on((u64) g->dirty_sectors + sectors > ca->mi.bucket_size, c,
                        "bucket %u:%zu gen %u data type %s sector count overflow: %u + %u > bucket size",
                        ca->dev_idx, b, g->gen,
-                       bch2_data_types[g->data_type ?: data_type],
+                       bch2_data_type_str(g->data_type ?: data_type),
                        g->dirty_sectors, sectors)) {
                ret = -EIO;
                goto err;
@@ -575,7 +575,7 @@ int bch2_check_bucket_ref(struct btree_trans *trans,
                        "bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen\n"
                        "while marking %s",
                        ptr->dev, bucket_nr, b_gen,
-                       bch2_data_types[bucket_data_type ?: ptr_data_type],
+                       bch2_data_type_str(bucket_data_type ?: ptr_data_type),
                        ptr->gen,
                        (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
                ret = -EIO;
@@ -588,7 +588,7 @@ int bch2_check_bucket_ref(struct btree_trans *trans,
                        "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
                        "while marking %s",
                        ptr->dev, bucket_nr, b_gen,
-                       bch2_data_types[bucket_data_type ?: ptr_data_type],
+                       bch2_data_type_str(bucket_data_type ?: ptr_data_type),
                        ptr->gen,
                        (printbuf_reset(&buf),
                         bch2_bkey_val_to_text(&buf, c, k), buf.buf));
@@ -603,7 +603,7 @@ int bch2_check_bucket_ref(struct btree_trans *trans,
                        "while marking %s",
                        ptr->dev, bucket_nr, b_gen,
                        *bucket_gen(ca, bucket_nr),
-                       bch2_data_types[bucket_data_type ?: ptr_data_type],
+                       bch2_data_type_str(bucket_data_type ?: ptr_data_type),
                        ptr->gen,
                        (printbuf_reset(&buf),
                         bch2_bkey_val_to_text(&buf, c, k), buf.buf));
@@ -624,8 +624,8 @@ int bch2_check_bucket_ref(struct btree_trans *trans,
                        "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
                        "while marking %s",
                        ptr->dev, bucket_nr, b_gen,
-                       bch2_data_types[bucket_data_type],
-                       bch2_data_types[ptr_data_type],
+                       bch2_data_type_str(bucket_data_type),
+                       bch2_data_type_str(ptr_data_type),
                        (printbuf_reset(&buf),
                         bch2_bkey_val_to_text(&buf, c, k), buf.buf));
                ret = -EIO;
@@ -638,7 +638,7 @@ int bch2_check_bucket_ref(struct btree_trans *trans,
                        "bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U32_MAX\n"
                        "while marking %s",
                        ptr->dev, bucket_nr, b_gen,
-                       bch2_data_types[bucket_data_type ?: ptr_data_type],
+                       bch2_data_type_str(bucket_data_type ?: ptr_data_type),
                        bucket_sectors, sectors,
                        (printbuf_reset(&buf),
                         bch2_bkey_val_to_text(&buf, c, k), buf.buf));
@@ -677,11 +677,11 @@ void bch2_trans_fs_usage_revert(struct btree_trans *trans,
                BUG_ON(__update_replicas(c, dst, &d->r, -d->delta));
        }
 
-       dst->nr_inodes -= deltas->nr_inodes;
+       dst->b.nr_inodes -= deltas->nr_inodes;
 
        for (i = 0; i < BCH_REPLICAS_MAX; i++) {
                added                           -= deltas->persistent_reserved[i];
-               dst->reserved                   -= deltas->persistent_reserved[i];
+               dst->b.reserved                 -= deltas->persistent_reserved[i];
                dst->persistent_reserved[i]     -= deltas->persistent_reserved[i];
        }
 
@@ -694,48 +694,25 @@ void bch2_trans_fs_usage_revert(struct btree_trans *trans,
        percpu_up_read(&c->mark_lock);
 }
 
-int bch2_trans_fs_usage_apply(struct btree_trans *trans,
-                             struct replicas_delta_list *deltas)
+void bch2_trans_account_disk_usage_change(struct btree_trans *trans)
 {
        struct bch_fs *c = trans->c;
+       u64 disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
        static int warned_disk_usage = 0;
        bool warn = false;
-       u64 disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
-       struct replicas_delta *d, *d2;
-       struct replicas_delta *top = (void *) deltas->d + deltas->used;
-       struct bch_fs_usage *dst;
-       s64 added = 0, should_not_have_added;
-       unsigned i;
 
        percpu_down_read(&c->mark_lock);
        preempt_disable();
-       dst = fs_usage_ptr(c, trans->journal_res.seq, false);
-
-       for (d = deltas->d; d != top; d = replicas_delta_next(d)) {
-               switch (d->r.data_type) {
-               case BCH_DATA_btree:
-               case BCH_DATA_user:
-               case BCH_DATA_parity:
-                       added += d->delta;
-               }
+       struct bch_fs_usage_base *dst = &fs_usage_ptr(c, trans->journal_res.seq, false)->b;
+       struct bch_fs_usage_base *src = &trans->fs_usage_delta;
 
-               if (__update_replicas(c, dst, &d->r, d->delta))
-                       goto need_mark;
-       }
-
-       dst->nr_inodes += deltas->nr_inodes;
-
-       for (i = 0; i < BCH_REPLICAS_MAX; i++) {
-               added                           += deltas->persistent_reserved[i];
-               dst->reserved                   += deltas->persistent_reserved[i];
-               dst->persistent_reserved[i]     += deltas->persistent_reserved[i];
-       }
+       s64 added = src->btree + src->data + src->reserved;
 
        /*
         * Not allowed to reduce sectors_available except by getting a
         * reservation:
         */
-       should_not_have_added = added - (s64) disk_res_sectors;
+       s64 should_not_have_added = added - (s64) disk_res_sectors;
        if (unlikely(should_not_have_added > 0)) {
                u64 old, new, v = atomic64_read(&c->sectors_available);
 
@@ -754,6 +731,13 @@ int bch2_trans_fs_usage_apply(struct btree_trans *trans,
                this_cpu_sub(*c->online_reserved, added);
        }
 
+       dst->hidden     += src->hidden;
+       dst->btree      += src->btree;
+       dst->data       += src->data;
+       dst->cached     += src->cached;
+       dst->reserved   += src->reserved;
+       dst->nr_inodes  += src->nr_inodes;
+
        preempt_enable();
        percpu_up_read(&c->mark_lock);
 
@@ -761,6 +745,34 @@ int bch2_trans_fs_usage_apply(struct btree_trans *trans,
                bch2_trans_inconsistent(trans,
                                        "disk usage increased %lli more than %llu sectors reserved)",
                                        should_not_have_added, disk_res_sectors);
+}
+
+int bch2_trans_fs_usage_apply(struct btree_trans *trans,
+                             struct replicas_delta_list *deltas)
+{
+       struct bch_fs *c = trans->c;
+       struct replicas_delta *d, *d2;
+       struct replicas_delta *top = (void *) deltas->d + deltas->used;
+       struct bch_fs_usage *dst;
+       unsigned i;
+
+       percpu_down_read(&c->mark_lock);
+       preempt_disable();
+       dst = fs_usage_ptr(c, trans->journal_res.seq, false);
+
+       for (d = deltas->d; d != top; d = replicas_delta_next(d))
+               if (__update_replicas(c, dst, &d->r, d->delta))
+                       goto need_mark;
+
+       dst->b.nr_inodes += deltas->nr_inodes;
+
+       for (i = 0; i < BCH_REPLICAS_MAX; i++) {
+               dst->b.reserved                 += deltas->persistent_reserved[i];
+               dst->persistent_reserved[i]     += deltas->persistent_reserved[i];
+       }
+
+       preempt_enable();
+       percpu_up_read(&c->mark_lock);
        return 0;
 need_mark:
        /* revert changes: */
@@ -1084,7 +1096,7 @@ static int __trigger_reservation(struct btree_trans *trans,
                struct bch_fs_usage *fs_usage = this_cpu_ptr(c->usage_gc);
 
                replicas = min(replicas, ARRAY_SIZE(fs_usage->persistent_reserved));
-               fs_usage->reserved                              += sectors;
+               fs_usage->b.reserved                            += sectors;
                fs_usage->persistent_reserved[replicas - 1]     += sectors;
 
                preempt_enable();
@@ -1130,9 +1142,9 @@ static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
                        "bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n"
                        "while marking %s",
                        iter.pos.inode, iter.pos.offset, a->v.gen,
-                       bch2_data_types[a->v.data_type],
-                       bch2_data_types[type],
-                       bch2_data_types[type]);
+                       bch2_data_type_str(a->v.data_type),
+                       bch2_data_type_str(type),
+                       bch2_data_type_str(type));
                ret = -EIO;
                goto err;
        }
index 2c95cc5d86be661c6d6a0783d366d5d8b8b919d7..6387e039f7897534e27c207dd3818dc4b6afb3b7 100644 (file)
@@ -356,6 +356,8 @@ int bch2_trigger_reservation(struct btree_trans *, enum btree_id, unsigned,
        ret;                                                                                    \
 })
 
+void bch2_trans_account_disk_usage_change(struct btree_trans *);
+
 void bch2_trans_fs_usage_revert(struct btree_trans *, struct replicas_delta_list *);
 int bch2_trans_fs_usage_apply(struct btree_trans *, struct replicas_delta_list *);
 
@@ -385,6 +387,21 @@ static inline bool is_superblock_bucket(struct bch_dev *ca, u64 b)
        return false;
 }
 
+static inline const char *bch2_data_type_str(enum bch_data_type type)
+{
+       return type < BCH_DATA_NR
+               ? __bch2_data_types[type]
+               : "(invalid data type)";
+}
+
+static inline void bch2_prt_data_type(struct printbuf *out, enum bch_data_type type)
+{
+       if (type < BCH_DATA_NR)
+               prt_str(out, __bch2_data_types[type]);
+       else
+               prt_printf(out, "(invalid data type %u)", type);
+}
+
 /* disk reservations: */
 
 static inline void bch2_disk_reservation_put(struct bch_fs *c,
index 783f71017204cafa0277644a6d1b5564c779d366..6a31740222a7132e3f0735675ba63ed3402f00a8 100644 (file)
@@ -45,23 +45,18 @@ struct bch_dev_usage {
        }                       d[BCH_DATA_NR];
 };
 
-struct bch_fs_usage {
-       /* all fields are in units of 512 byte sectors: */
+struct bch_fs_usage_base {
        u64                     hidden;
        u64                     btree;
        u64                     data;
        u64                     cached;
        u64                     reserved;
        u64                     nr_inodes;
+};
 
-       /* XXX: add stats for compression ratio */
-#if 0
-       u64                     uncompressed;
-       u64                     compressed;
-#endif
-
-       /* broken out: */
-
+struct bch_fs_usage {
+       /* all fields are in units of 512 byte sectors: */
+       struct bch_fs_usage_base b;
        u64                     persistent_reserved[BCH_REPLICAS_MAX];
        u64                     replicas[];
 };
index f41889093a2c7eacaa1723667fc7bb2af5d0f3aa..3636444511064b51e5a004b953eacf94e7c70d12 100644 (file)
@@ -109,7 +109,7 @@ void bch2_kthread_io_clock_wait(struct io_clock *clock,
        if (cpu_timeout != MAX_SCHEDULE_TIMEOUT)
                mod_timer(&wait.cpu_timer, cpu_timeout + jiffies);
 
-       while (1) {
+       do {
                set_current_state(TASK_INTERRUPTIBLE);
                if (kthread && kthread_should_stop())
                        break;
@@ -119,7 +119,7 @@ void bch2_kthread_io_clock_wait(struct io_clock *clock,
 
                schedule();
                try_to_freeze();
-       }
+       } while (0);
 
        __set_current_state(TASK_RUNNING);
        del_timer_sync(&wait.cpu_timer);
index 607fd5e232c902dbb39f3dac84ea2e214e6b106c..58c2eb45570ff022764720f9beb10ecfa2926367 100644 (file)
@@ -47,6 +47,14 @@ static inline enum bch_compression_type bch2_compression_opt_to_type(unsigned v)
        return __bch2_compression_opt_to_type[bch2_compression_decode(v).type];
 }
 
+static inline void bch2_prt_compression_type(struct printbuf *out, enum bch_compression_type type)
+{
+       if (type < BCH_COMPRESSION_TYPE_NR)
+               prt_str(out, __bch2_compression_types[type]);
+       else
+               prt_printf(out, "(invalid compression type %u)", type);
+}
+
 int bch2_bio_uncompress_inplace(struct bch_fs *, struct bio *,
                                struct bch_extent_crc_unpacked *);
 int bch2_bio_uncompress(struct bch_fs *, struct bio *, struct bio *,
index 6f13477ff652e9e0552b9fbbb49009a5651d6d76..4150feca42a2e65e63a59234a3e806ebbd09e1ac 100644 (file)
@@ -285,9 +285,7 @@ restart_drop_extra_replicas:
                                                k.k->p, bkey_start_pos(&insert->k)) ?:
                        bch2_insert_snapshot_whiteouts(trans, m->btree_id,
                                                k.k->p, insert->k.p) ?:
-                       bch2_bkey_set_needs_rebalance(c, insert,
-                                                     op->opts.background_target,
-                                                     op->opts.background_compression) ?:
+                       bch2_bkey_set_needs_rebalance(c, insert, &op->opts) ?:
                        bch2_trans_update(trans, &iter, insert,
                                BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
                        bch2_trans_commit(trans, &op->res,
@@ -529,7 +527,7 @@ int bch2_data_update_init(struct btree_trans *trans,
                BCH_WRITE_DATA_ENCODED|
                BCH_WRITE_MOVE|
                m->data_opts.write_flags;
-       m->op.compression_opt   = io_opts.background_compression ?: io_opts.compression;
+       m->op.compression_opt   = background_compression(io_opts);
        m->op.watermark         = m->data_opts.btree_insert_flags & BCH_WATERMARK_MASK;
 
        bkey_for_each_ptr(ptrs, ptr)
index d6418948495f8392898178dd9b350b1829a24aae..7bdba8507fc93cdfdecc29de3e70e5589cf8177b 100644 (file)
@@ -44,19 +44,19 @@ static bool bch2_btree_verify_replica(struct bch_fs *c, struct btree *b,
                return false;
 
        bio = bio_alloc_bioset(ca->disk_sb.bdev,
-                              buf_pages(n_sorted, btree_bytes(c)),
+                              buf_pages(n_sorted, btree_buf_bytes(b)),
                               REQ_OP_READ|REQ_META,
                               GFP_NOFS,
                               &c->btree_bio);
        bio->bi_iter.bi_sector  = pick.ptr.offset;
-       bch2_bio_map(bio, n_sorted, btree_bytes(c));
+       bch2_bio_map(bio, n_sorted, btree_buf_bytes(b));
 
        submit_bio_wait(bio);
 
        bio_put(bio);
        percpu_ref_put(&ca->io_ref);
 
-       memcpy(n_ondisk, n_sorted, btree_bytes(c));
+       memcpy(n_ondisk, n_sorted, btree_buf_bytes(b));
 
        v->written = 0;
        if (bch2_btree_node_read_done(c, ca, v, false, &saw_error) || saw_error)
@@ -137,7 +137,7 @@ void __bch2_btree_verify(struct bch_fs *c, struct btree *b)
        mutex_lock(&c->verify_lock);
 
        if (!c->verify_ondisk) {
-               c->verify_ondisk = kvpmalloc(btree_bytes(c), GFP_KERNEL);
+               c->verify_ondisk = kvpmalloc(btree_buf_bytes(b), GFP_KERNEL);
                if (!c->verify_ondisk)
                        goto out;
        }
@@ -199,19 +199,19 @@ void bch2_btree_node_ondisk_to_text(struct printbuf *out, struct bch_fs *c,
                return;
        }
 
-       n_ondisk = kvpmalloc(btree_bytes(c), GFP_KERNEL);
+       n_ondisk = kvpmalloc(btree_buf_bytes(b), GFP_KERNEL);
        if (!n_ondisk) {
                prt_printf(out, "memory allocation failure\n");
                goto out;
        }
 
        bio = bio_alloc_bioset(ca->disk_sb.bdev,
-                              buf_pages(n_ondisk, btree_bytes(c)),
+                              buf_pages(n_ondisk, btree_buf_bytes(b)),
                               REQ_OP_READ|REQ_META,
                               GFP_NOFS,
                               &c->btree_bio);
        bio->bi_iter.bi_sector  = pick.ptr.offset;
-       bch2_bio_map(bio, n_ondisk, btree_bytes(c));
+       bch2_bio_map(bio, n_ondisk, btree_buf_bytes(b));
 
        ret = submit_bio_wait(bio);
        if (ret) {
@@ -293,7 +293,7 @@ void bch2_btree_node_ondisk_to_text(struct printbuf *out, struct bch_fs *c,
 out:
        if (bio)
                bio_put(bio);
-       kvpfree(n_ondisk, btree_bytes(c));
+       kvpfree(n_ondisk, btree_buf_bytes(b));
        percpu_ref_put(&ca->io_ref);
 }
 
@@ -627,7 +627,7 @@ restart:
                prt_printf(&i->buf, "backtrace:");
                prt_newline(&i->buf);
                printbuf_indent_add(&i->buf, 2);
-               bch2_prt_task_backtrace(&i->buf, task, 0);
+               bch2_prt_task_backtrace(&i->buf, task, 0, GFP_KERNEL);
                printbuf_indent_sub(&i->buf, 2);
                prt_newline(&i->buf);
 
diff --git a/fs/bcachefs/dirent_format.h b/fs/bcachefs/dirent_format.h
new file mode 100644 (file)
index 0000000..5e116b8
--- /dev/null
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_DIRENT_FORMAT_H
+#define _BCACHEFS_DIRENT_FORMAT_H
+
+/*
+ * Dirents (and xattrs) have to implement string lookups; since our b-tree
+ * doesn't support arbitrary length strings for the key, we instead index by a
+ * 64 bit hash (currently truncated sha1) of the string, stored in the offset
+ * field of the key - using linear probing to resolve hash collisions. This also
+ * provides us with the readdir cookie posix requires.
+ *
+ * Linear probing requires us to use whiteouts for deletions, in the event of a
+ * collision:
+ */
+
+struct bch_dirent {
+       struct bch_val          v;
+
+       /* Target inode number: */
+       union {
+       __le64                  d_inum;
+       struct {                /* DT_SUBVOL */
+       __le32                  d_child_subvol;
+       __le32                  d_parent_subvol;
+       };
+       };
+
+       /*
+        * Copy of mode bits 12-15 from the target inode - so userspace can get
+        * the filetype without having to do a stat()
+        */
+       __u8                    d_type;
+
+       __u8                    d_name[];
+} __packed __aligned(8);
+
+#define DT_SUBVOL      16
+#define BCH_DT_MAX     17
+
+#define BCH_NAME_MAX   512
+
+#endif /* _BCACHEFS_DIRENT_FORMAT_H */
index d802bc63c8d0b4832bd8062ce827c8af180361e6..d503af2700247d8aa1257962c37df9b042ee55ec 100644 (file)
@@ -190,7 +190,7 @@ static int bch2_trans_mark_stripe_bucket(struct btree_trans *trans,
                                               a->v.stripe_redundancy, trans,
                                "bucket %llu:%llu gen %u data type %s dirty_sectors %u: multiple stripes using same bucket (%u, %llu)",
                                iter.pos.inode, iter.pos.offset, a->v.gen,
-                               bch2_data_types[a->v.data_type],
+                               bch2_data_type_str(a->v.data_type),
                                a->v.dirty_sectors,
                                a->v.stripe, s.k->p.offset)) {
                        ret = -EIO;
@@ -200,7 +200,7 @@ static int bch2_trans_mark_stripe_bucket(struct btree_trans *trans,
                if (bch2_trans_inconsistent_on(data_type && a->v.dirty_sectors, trans,
                                "bucket %llu:%llu gen %u data type %s dirty_sectors %u: data already in stripe bucket %llu",
                                iter.pos.inode, iter.pos.offset, a->v.gen,
-                               bch2_data_types[a->v.data_type],
+                               bch2_data_type_str(a->v.data_type),
                                a->v.dirty_sectors,
                                s.k->p.offset)) {
                        ret = -EIO;
@@ -367,7 +367,7 @@ int bch2_trigger_stripe(struct btree_trans *trans,
                }
        }
 
-       if (!(flags & (BTREE_TRIGGER_TRANSACTIONAL|BTREE_TRIGGER_GC))) {
+       if (flags & BTREE_TRIGGER_ATOMIC) {
                struct stripe *m = genradix_ptr(&c->stripes, idx);
 
                if (!m) {
diff --git a/fs/bcachefs/ec_format.h b/fs/bcachefs/ec_format.h
new file mode 100644 (file)
index 0000000..44ce88b
--- /dev/null
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_EC_FORMAT_H
+#define _BCACHEFS_EC_FORMAT_H
+
+struct bch_stripe {
+       struct bch_val          v;
+       __le16                  sectors;
+       __u8                    algorithm;
+       __u8                    nr_blocks;
+       __u8                    nr_redundant;
+
+       __u8                    csum_granularity_bits;
+       __u8                    csum_type;
+       __u8                    pad;
+
+       struct bch_extent_ptr   ptrs[];
+} __packed __aligned(8);
+
+#endif /* _BCACHEFS_EC_FORMAT_H */
index 82ec056f4cdbb1f4e4234fce274939b61b7a5015..61395b113df9bdad67c0da7d2a4cc4f99664bc4e 100644 (file)
@@ -8,6 +8,7 @@
 
 #include "bcachefs.h"
 #include "bkey_methods.h"
+#include "btree_cache.h"
 #include "btree_gc.h"
 #include "btree_io.h"
 #include "btree_iter.h"
@@ -1018,12 +1019,12 @@ void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
                        struct bch_extent_crc_unpacked crc =
                                bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
 
-                       prt_printf(out, "crc: c_size %u size %u offset %u nonce %u csum %s compress %s",
+                       prt_printf(out, "crc: c_size %u size %u offset %u nonce %u csum %s compress ",
                               crc.compressed_size,
                               crc.uncompressed_size,
                               crc.offset, crc.nonce,
-                              bch2_csum_types[crc.csum_type],
-                              bch2_compression_types[crc.compression_type]);
+                              bch2_csum_types[crc.csum_type]);
+                       bch2_prt_compression_type(out, crc.compression_type);
                        break;
                }
                case BCH_EXTENT_ENTRY_stripe_ptr: {
@@ -1334,10 +1335,12 @@ bool bch2_bkey_needs_rebalance(struct bch_fs *c, struct bkey_s_c k)
 }
 
 int bch2_bkey_set_needs_rebalance(struct bch_fs *c, struct bkey_i *_k,
-                                 unsigned target, unsigned compression)
+                                 struct bch_io_opts *opts)
 {
        struct bkey_s k = bkey_i_to_s(_k);
        struct bch_extent_rebalance *r;
+       unsigned target = opts->background_target;
+       unsigned compression = background_compression(*opts);
        bool needs_rebalance;
 
        if (!bkey_extent_is_direct_data(k.k))
index a855c94d43ddb4f770f69807401f6d9dd5f66cbf..6bf839d69e84e6e24ed3bf2bf611177fc04676e1 100644 (file)
@@ -708,7 +708,7 @@ unsigned bch2_bkey_ptrs_need_rebalance(struct bch_fs *, struct bkey_s_c,
 bool bch2_bkey_needs_rebalance(struct bch_fs *, struct bkey_s_c);
 
 int bch2_bkey_set_needs_rebalance(struct bch_fs *, struct bkey_i *,
-                                 unsigned, unsigned);
+                                 struct bch_io_opts *);
 
 /* Generic extent code: */
 
diff --git a/fs/bcachefs/extents_format.h b/fs/bcachefs/extents_format.h
new file mode 100644 (file)
index 0000000..3bd2fdb
--- /dev/null
@@ -0,0 +1,295 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_EXTENTS_FORMAT_H
+#define _BCACHEFS_EXTENTS_FORMAT_H
+
+/*
+ * In extent bkeys, the value is a list of pointers (bch_extent_ptr), optionally
+ * preceded by checksum/compression information (bch_extent_crc32 or
+ * bch_extent_crc64).
+ *
+ * One major determining factor in the format of extents is how we handle and
+ * represent extents that have been partially overwritten and thus trimmed:
+ *
+ * If an extent is not checksummed or compressed, when the extent is trimmed we
+ * don't have to remember the extent we originally allocated and wrote: we can
+ * merely adjust ptr->offset to point to the start of the data that is currently
+ * live. The size field in struct bkey records the current (live) size of the
+ * extent, and is also used to mean "size of region on disk that we point to" in
+ * this case.
+ *
+ * Thus an extent that is not checksummed or compressed will consist only of a
+ * list of bch_extent_ptrs, with none of the fields in
+ * bch_extent_crc32/bch_extent_crc64.
+ *
+ * When an extent is checksummed or compressed, it's not possible to read only
+ * the data that is currently live: we have to read the entire extent that was
+ * originally written, and then return only the part of the extent that is
+ * currently live.
+ *
+ * Thus, in addition to the current size of the extent in struct bkey, we need
+ * to store the size of the originally allocated space - this is the
+ * compressed_size and uncompressed_size fields in bch_extent_crc32/64. Also,
+ * when the extent is trimmed, instead of modifying the offset field of the
+ * pointer, we keep a second smaller offset field - "offset into the original
+ * extent of the currently live region".
+ *
+ * The other major determining factor is replication and data migration:
+ *
+ * Each pointer may have its own bch_extent_crc32/64. When doing a replicated
+ * write, we will initially write all the replicas in the same format, with the
+ * same checksum type and compression format - however, when copygc runs later (or
+ * tiering/cache promotion, anything that moves data), it is not in general
+ * going to rewrite all the pointers at once - one of the replicas may be in a
+ * bucket on one device that has very little fragmentation while another lives
+ * in a bucket that has become heavily fragmented, and thus is being rewritten
+ * sooner than the rest.
+ *
+ * Thus it will only move a subset of the pointers (or in the case of
+ * tiering/cache promotion perhaps add a single pointer without dropping any
+ * current pointers), and if the extent has been partially overwritten it must
+ * write only the currently live portion (or copygc would not be able to reduce
+ * fragmentation!) - which necessitates a different bch_extent_crc format for
+ * the new pointer.
+ *
+ * But in the interests of space efficiency, we don't want to store one
+ * bch_extent_crc for each pointer if we don't have to.
+ *
+ * Thus, a bch_extent consists of bch_extent_crc32s, bch_extent_crc64s, and
+ * bch_extent_ptrs appended arbitrarily one after the other. We determine the
+ * type of a given entry with a scheme similar to utf8 (except we're encoding a
+ * type, not a size), encoding the type in the position of the first set bit:
+ *
+ * bch_extent_crc32    - 0b1
+ * bch_extent_ptr      - 0b10
+ * bch_extent_crc64    - 0b100
+ *
+ * We do it this way because bch_extent_crc32 is _very_ constrained on bits (and
+ * bch_extent_crc64 is the least constrained).
+ *
+ * Then, each bch_extent_crc32/64 applies to the pointers that follow after it,
+ * until the next bch_extent_crc32/64.
+ *
+ * If there are no bch_extent_crcs preceding a bch_extent_ptr, then that pointer
+ * is neither checksummed nor compressed.
+ */
+
+#define BCH_EXTENT_ENTRY_TYPES()               \
+       x(ptr,                  0)              \
+       x(crc32,                1)              \
+       x(crc64,                2)              \
+       x(crc128,               3)              \
+       x(stripe_ptr,           4)              \
+       x(rebalance,            5)
+#define BCH_EXTENT_ENTRY_MAX   6
+
+enum bch_extent_entry_type {
+#define x(f, n) BCH_EXTENT_ENTRY_##f = n,
+       BCH_EXTENT_ENTRY_TYPES()
+#undef x
+};
+
+/* Compressed/uncompressed size are stored biased by 1: */
+struct bch_extent_crc32 {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+       __u32                   type:2,
+                               _compressed_size:7,
+                               _uncompressed_size:7,
+                               offset:7,
+                               _unused:1,
+                               csum_type:4,
+                               compression_type:4;
+       __u32                   csum;
+#elif defined (__BIG_ENDIAN_BITFIELD)
+       __u32                   csum;
+       __u32                   compression_type:4,
+                               csum_type:4,
+                               _unused:1,
+                               offset:7,
+                               _uncompressed_size:7,
+                               _compressed_size:7,
+                               type:2;
+#endif
+} __packed __aligned(8);
+
+#define CRC32_SIZE_MAX         (1U << 7)
+#define CRC32_NONCE_MAX                0
+
+struct bch_extent_crc64 {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+       __u64                   type:3,
+                               _compressed_size:9,
+                               _uncompressed_size:9,
+                               offset:9,
+                               nonce:10,
+                               csum_type:4,
+                               compression_type:4,
+                               csum_hi:16;
+#elif defined (__BIG_ENDIAN_BITFIELD)
+       __u64                   csum_hi:16,
+                               compression_type:4,
+                               csum_type:4,
+                               nonce:10,
+                               offset:9,
+                               _uncompressed_size:9,
+                               _compressed_size:9,
+                               type:3;
+#endif
+       __u64                   csum_lo;
+} __packed __aligned(8);
+
+#define CRC64_SIZE_MAX         (1U << 9)
+#define CRC64_NONCE_MAX                ((1U << 10) - 1)
+
+struct bch_extent_crc128 {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+       __u64                   type:4,
+                               _compressed_size:13,
+                               _uncompressed_size:13,
+                               offset:13,
+                               nonce:13,
+                               csum_type:4,
+                               compression_type:4;
+#elif defined (__BIG_ENDIAN_BITFIELD)
+       __u64                   compression_type:4,
+                               csum_type:4,
+                               nonce:13,
+                               offset:13,
+                               _uncompressed_size:13,
+                               _compressed_size:13,
+                               type:4;
+#endif
+       struct bch_csum         csum;
+} __packed __aligned(8);
+
+#define CRC128_SIZE_MAX                (1U << 13)
+#define CRC128_NONCE_MAX       ((1U << 13) - 1)
+
+/*
+ * @reservation - pointer hasn't been written to, just reserved
+ */
+struct bch_extent_ptr {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+       __u64                   type:1,
+                               cached:1,
+                               unused:1,
+                               unwritten:1,
+                               offset:44, /* 8 petabytes */
+                               dev:8,
+                               gen:8;
+#elif defined (__BIG_ENDIAN_BITFIELD)
+       __u64                   gen:8,
+                               dev:8,
+                               offset:44,
+                               unwritten:1,
+                               unused:1,
+                               cached:1,
+                               type:1;
+#endif
+} __packed __aligned(8);
+
+struct bch_extent_stripe_ptr {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+       __u64                   type:5,
+                               block:8,
+                               redundancy:4,
+                               idx:47;
+#elif defined (__BIG_ENDIAN_BITFIELD)
+       __u64                   idx:47,
+                               redundancy:4,
+                               block:8,
+                               type:5;
+#endif
+};
+
+struct bch_extent_rebalance {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+       __u64                   type:6,
+                               unused:34,
+                               compression:8, /* enum bch_compression_opt */
+                               target:16;
+#elif defined (__BIG_ENDIAN_BITFIELD)
+       __u64                   target:16,
+                               compression:8,
+                               unused:34,
+                               type:6;
+#endif
+};
+
+union bch_extent_entry {
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ ||  __BITS_PER_LONG == 64
+       unsigned long                   type;
+#elif __BITS_PER_LONG == 32
+       struct {
+               unsigned long           pad;
+               unsigned long           type;
+       };
+#else
+#error edit for your odd byteorder.
+#endif
+
+#define x(f, n) struct bch_extent_##f  f;
+       BCH_EXTENT_ENTRY_TYPES()
+#undef x
+};
+
+struct bch_btree_ptr {
+       struct bch_val          v;
+
+       __u64                   _data[0];
+       struct bch_extent_ptr   start[];
+} __packed __aligned(8);
+
+struct bch_btree_ptr_v2 {
+       struct bch_val          v;
+
+       __u64                   mem_ptr;
+       __le64                  seq;
+       __le16                  sectors_written;
+       __le16                  flags;
+       struct bpos             min_key;
+       __u64                   _data[0];
+       struct bch_extent_ptr   start[];
+} __packed __aligned(8);
+
+LE16_BITMASK(BTREE_PTR_RANGE_UPDATED,  struct bch_btree_ptr_v2, flags, 0, 1);
+
+struct bch_extent {
+       struct bch_val          v;
+
+       __u64                   _data[0];
+       union bch_extent_entry  start[];
+} __packed __aligned(8);
+
+/* Maximum size (in u64s) a single pointer could be: */
+#define BKEY_EXTENT_PTR_U64s_MAX\
+       ((sizeof(struct bch_extent_crc128) +                    \
+         sizeof(struct bch_extent_ptr)) / sizeof(__u64))
+
+/* Maximum possible size of an entire extent value: */
+#define BKEY_EXTENT_VAL_U64s_MAX                               \
+       (1 + BKEY_EXTENT_PTR_U64s_MAX * (BCH_REPLICAS_MAX + 1))
+
+/* * Maximum possible size of an entire extent, key + value: */
+#define BKEY_EXTENT_U64s_MAX           (BKEY_U64s + BKEY_EXTENT_VAL_U64s_MAX)
+
+/* Btree pointers don't carry around checksums: */
+#define BKEY_BTREE_PTR_VAL_U64s_MAX                            \
+       ((sizeof(struct bch_btree_ptr_v2) +                     \
+         sizeof(struct bch_extent_ptr) * BCH_REPLICAS_MAX) / sizeof(__u64))
+#define BKEY_BTREE_PTR_U64s_MAX                                        \
+       (BKEY_U64s + BKEY_BTREE_PTR_VAL_U64s_MAX)
+
+struct bch_reservation {
+       struct bch_val          v;
+
+       __le32                  generation;
+       __u8                    nr_replicas;
+       __u8                    pad[3];
+} __packed __aligned(8);
+
+struct bch_inline_data {
+       struct bch_val          v;
+       u8                      data[];
+};
+
+#endif /* _BCACHEFS_EXTENTS_FORMAT_H */
index 9637f636e32d508571a5908c536b48b8e3ed792c..b04750dbf870bc78c95ece35d363e3a4c0936b50 100644 (file)
@@ -156,7 +156,7 @@ static inline unsigned inorder_to_eytzinger1(unsigned i, unsigned size)
 }
 
 #define eytzinger1_for_each(_i, _size)                 \
-       for ((_i) = eytzinger1_first((_size));          \
+       for (unsigned (_i) = eytzinger1_first((_size)); \
             (_i) != 0;                                 \
             (_i) = eytzinger1_next((_i), (_size)))
 
@@ -227,7 +227,7 @@ static inline unsigned inorder_to_eytzinger0(unsigned i, unsigned size)
 }
 
 #define eytzinger0_for_each(_i, _size)                 \
-       for ((_i) = eytzinger0_first((_size));          \
+       for (unsigned (_i) = eytzinger0_first((_size)); \
             (_i) != -1;                                \
             (_i) = eytzinger0_next((_i), (_size)))
 
index fdd57c5785c9cebf609959fb753ee30e55e85b92..e3b219e19e1008ccfe1ff61e966115795f9c1831 100644 (file)
@@ -77,6 +77,10 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
 
        bch2_inode_opts_get(&opts, c, &inode->ei_inode);
 
+       /* bios must be 512 byte aligned: */
+       if ((offset|iter->count) & (SECTOR_SIZE - 1))
+               return -EINVAL;
+
        ret = min_t(loff_t, iter->count,
                    max_t(loff_t, 0, i_size_read(&inode->v) - offset));
 
index ff664fd0d8ef80e8b4816d7c430e87d41759b498..d359aa9b33b828342bd466b899713f401d939b30 100644 (file)
@@ -309,39 +309,49 @@ void bch2_mark_pagecache_unallocated(struct bch_inode_info *inode,
        }
 }
 
-void bch2_mark_pagecache_reserved(struct bch_inode_info *inode,
-                                 u64 start, u64 end)
+int bch2_mark_pagecache_reserved(struct bch_inode_info *inode,
+                                u64 *start, u64 end,
+                                bool nonblocking)
 {
        struct bch_fs *c = inode->v.i_sb->s_fs_info;
-       pgoff_t index = start >> PAGE_SECTORS_SHIFT;
+       pgoff_t index = *start >> PAGE_SECTORS_SHIFT;
        pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
        struct folio_batch fbatch;
        s64 i_sectors_delta = 0;
-       unsigned i, j;
+       int ret = 0;
 
-       if (end <= start)
-               return;
+       if (end <= *start)
+               return 0;
 
        folio_batch_init(&fbatch);
 
        while (filemap_get_folios(inode->v.i_mapping,
                                  &index, end_index, &fbatch)) {
-               for (i = 0; i < folio_batch_count(&fbatch); i++) {
+               for (unsigned i = 0; i < folio_batch_count(&fbatch); i++) {
                        struct folio *folio = fbatch.folios[i];
+
+                       if (!nonblocking)
+                               folio_lock(folio);
+                       else if (!folio_trylock(folio)) {
+                               folio_batch_release(&fbatch);
+                               ret = -EAGAIN;
+                               break;
+                       }
+
                        u64 folio_start = folio_sector(folio);
                        u64 folio_end = folio_end_sector(folio);
-                       unsigned folio_offset = max(start, folio_start) - folio_start;
-                       unsigned folio_len = min(end, folio_end) - folio_offset - folio_start;
-                       struct bch_folio *s;
 
                        BUG_ON(end <= folio_start);
 
-                       folio_lock(folio);
-                       s = bch2_folio(folio);
+                       *start = min(end, folio_end);
 
+                       struct bch_folio *s = bch2_folio(folio);
                        if (s) {
+                               unsigned folio_offset = max(*start, folio_start) - folio_start;
+                               unsigned folio_len = min(end, folio_end) - folio_offset - folio_start;
+
                                spin_lock(&s->lock);
-                               for (j = folio_offset; j < folio_offset + folio_len; j++) {
+                               for (unsigned j = folio_offset; j < folio_offset + folio_len; j++) {
                                        i_sectors_delta -= s->s[j].state == SECTOR_dirty;
                                        bch2_folio_sector_set(folio, s, j,
                                                folio_sector_reserve(s->s[j].state));
@@ -356,6 +366,7 @@ void bch2_mark_pagecache_reserved(struct bch_inode_info *inode,
        }
 
        bch2_i_sectors_acct(c, inode, NULL, i_sectors_delta);
+       return ret;
 }
 
 static inline unsigned sectors_to_reserve(struct bch_folio_sector *s,
index 27f712ae37a68209275cc3b2955a542314e80e68..8cbaba6565b4493695d679fe41553c197468c752 100644 (file)
@@ -143,7 +143,7 @@ int bch2_folio_set(struct bch_fs *, subvol_inum, struct folio **, unsigned);
 void bch2_bio_page_state_set(struct bio *, struct bkey_s_c);
 
 void bch2_mark_pagecache_unallocated(struct bch_inode_info *, u64, u64);
-void bch2_mark_pagecache_reserved(struct bch_inode_info *, u64, u64);
+int bch2_mark_pagecache_reserved(struct bch_inode_info *, u64 *, u64, bool);
 
 int bch2_get_folio_disk_reservation(struct bch_fs *,
                                struct bch_inode_info *,
index 98bd5babab193bec842dce20b0783e6c958ac5bf..8c70123b6a0c809b6d50040593281c2e9c115828 100644 (file)
@@ -79,7 +79,7 @@ void bch2_inode_flush_nocow_writes_async(struct bch_fs *c,
                        continue;
 
                bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev, 0,
-                                                   REQ_OP_FLUSH,
+                                                   REQ_OP_WRITE|REQ_PREFLUSH,
                                                    GFP_KERNEL,
                                                    &c->nocow_flush_bioset),
                                   struct nocow_flush, bio);
@@ -675,8 +675,11 @@ static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
 
                bch2_i_sectors_acct(c, inode, &quota_res, i_sectors_delta);
 
-               drop_locks_do(trans,
-                       (bch2_mark_pagecache_reserved(inode, hole_start, iter.pos.offset), 0));
+               if (bch2_mark_pagecache_reserved(inode, &hole_start,
+                                                iter.pos.offset, true))
+                       drop_locks_do(trans,
+                               bch2_mark_pagecache_reserved(inode, &hole_start,
+                                                            iter.pos.offset, false));
 bkey_err:
                bch2_quota_reservation_put(c, inode, &quota_res);
                if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
index 946cc610eef5ccc020171cecae62b1d8bab2e0ae..3dc8630ff9fe139bd44317d72502ed9bf1f73751 100644 (file)
@@ -337,11 +337,12 @@ static long __bch2_ioctl_subvolume_create(struct bch_fs *c, struct file *filp,
        if (arg.flags & BCH_SUBVOL_SNAPSHOT_RO)
                create_flags |= BCH_CREATE_SNAPSHOT_RO;
 
-       /* why do we need this lock? */
-       down_read(&c->vfs_sb->s_umount);
-
-       if (arg.flags & BCH_SUBVOL_SNAPSHOT_CREATE)
+       if (arg.flags & BCH_SUBVOL_SNAPSHOT_CREATE) {
+               /* sync_inodes_sb enforce s_umount is locked */
+               down_read(&c->vfs_sb->s_umount);
                sync_inodes_sb(c->vfs_sb);
+               up_read(&c->vfs_sb->s_umount);
+       }
 retry:
        if (arg.src_ptr) {
                error = user_path_at(arg.dirfd,
@@ -425,8 +426,6 @@ err2:
                goto retry;
        }
 err1:
-       up_read(&c->vfs_sb->s_umount);
-
        return error;
 }
 
@@ -443,33 +442,36 @@ static long bch2_ioctl_subvolume_create(struct bch_fs *c, struct file *filp,
 static long bch2_ioctl_subvolume_destroy(struct bch_fs *c, struct file *filp,
                                struct bch_ioctl_subvolume arg)
 {
+       const char __user *name = (void __user *)(unsigned long)arg.dst_ptr;
        struct path path;
        struct inode *dir;
+       struct dentry *victim;
        int ret = 0;
 
        if (arg.flags)
                return -EINVAL;
 
-       ret = user_path_at(arg.dirfd,
-                       (const char __user *)(unsigned long)arg.dst_ptr,
-                       LOOKUP_FOLLOW, &path);
-       if (ret)
-               return ret;
+       victim = user_path_locked_at(arg.dirfd, name, &path);
+       if (IS_ERR(victim))
+               return PTR_ERR(victim);
 
-       if (path.dentry->d_sb->s_fs_info != c) {
+       dir = d_inode(path.dentry);
+       if (victim->d_sb->s_fs_info != c) {
                ret = -EXDEV;
                goto err;
        }
-
-       dir = path.dentry->d_parent->d_inode;
-
-       ret = __bch2_unlink(dir, path.dentry, true);
-       if (ret)
+       if (!d_is_positive(victim)) {
+               ret = -ENOENT;
                goto err;
-
-       fsnotify_rmdir(dir, path.dentry);
-       d_delete(path.dentry);
+       }
+       ret = __bch2_unlink(dir, victim, true);
+       if (!ret) {
+               fsnotify_rmdir(dir, victim);
+               d_delete(victim);
+       }
 err:
+       inode_unlock(dir);
+       dput(victim);
        path_put(&path);
        return ret;
 }
index 4f0ecd60567570b7364cef517225ea0e3dfa5575..6a760777bafb06d08b449ee0db4308a77b54b11e 100644 (file)
@@ -119,22 +119,19 @@ static int lookup_inode(struct btree_trans *trans, u64 inode_nr,
        if (!ret)
                *snapshot = iter.pos.snapshot;
 err:
-       bch_err_msg(trans->c, ret, "fetching inode %llu:%u", inode_nr, *snapshot);
        bch2_trans_iter_exit(trans, &iter);
        return ret;
 }
 
-static int __lookup_dirent(struct btree_trans *trans,
+static int lookup_dirent_in_snapshot(struct btree_trans *trans,
                           struct bch_hash_info hash_info,
                           subvol_inum dir, struct qstr *name,
-                          u64 *target, unsigned *type)
+                          u64 *target, unsigned *type, u32 snapshot)
 {
        struct btree_iter iter;
        struct bkey_s_c_dirent d;
-       int ret;
-
-       ret = bch2_hash_lookup(trans, &iter, bch2_dirent_hash_desc,
-                              &hash_info, dir, name, 0);
+       int ret = bch2_hash_lookup_in_snapshot(trans, &iter, bch2_dirent_hash_desc,
+                              &hash_info, dir, name, 0, snapshot);
        if (ret)
                return ret;
 
@@ -225,15 +222,16 @@ static int lookup_lostfound(struct btree_trans *trans, u32 snapshot,
 
        struct bch_inode_unpacked root_inode;
        struct bch_hash_info root_hash_info;
-       ret = lookup_inode(trans, root_inum.inum, &root_inode, &snapshot);
+       u32 root_inode_snapshot = snapshot;
+       ret = lookup_inode(trans, root_inum.inum, &root_inode, &root_inode_snapshot);
        bch_err_msg(c, ret, "looking up root inode");
        if (ret)
                return ret;
 
        root_hash_info = bch2_hash_info_init(c, &root_inode);
 
-       ret = __lookup_dirent(trans, root_hash_info, root_inum,
-                             &lostfound_str, &inum, &d_type);
+       ret = lookup_dirent_in_snapshot(trans, root_hash_info, root_inum,
+                             &lostfound_str, &inum, &d_type, snapshot);
        if (bch2_err_matches(ret, ENOENT))
                goto create_lostfound;
 
@@ -250,7 +248,10 @@ static int lookup_lostfound(struct btree_trans *trans, u32 snapshot,
         * The bch2_check_dirents pass has already run, dangling dirents
         * shouldn't exist here:
         */
-       return lookup_inode(trans, inum, lostfound, &snapshot);
+       ret = lookup_inode(trans, inum, lostfound, &snapshot);
+       bch_err_msg(c, ret, "looking up lost+found %llu:%u in (root inode %llu, snapshot root %u)",
+                   inum, snapshot, root_inum.inum, bch2_snapshot_root(c, snapshot));
+       return ret;
 
 create_lostfound:
        /*
index 37dce96f48ac42d28b98d99e75a77b049e04de8f..086f0090b03a4015388dce49388ba5951940cb0a 100644 (file)
@@ -506,22 +506,33 @@ fsck_err:
 static void __bch2_inode_unpacked_to_text(struct printbuf *out,
                                          struct bch_inode_unpacked *inode)
 {
-       prt_printf(out, "mode=%o ", inode->bi_mode);
+       printbuf_indent_add(out, 2);
+       prt_printf(out, "mode=%o", inode->bi_mode);
+       prt_newline(out);
 
        prt_str(out, "flags=");
        prt_bitflags(out, bch2_inode_flag_strs, inode->bi_flags & ((1U << 20) - 1));
        prt_printf(out, " (%x)", inode->bi_flags);
+       prt_newline(out);
 
-       prt_printf(out, " journal_seq=%llu bi_size=%llu bi_sectors=%llu bi_version=%llu",
-              inode->bi_journal_seq,
-              inode->bi_size,
-              inode->bi_sectors,
-              inode->bi_version);
+       prt_printf(out, "journal_seq=%llu", inode->bi_journal_seq);
+       prt_newline(out);
+
+       prt_printf(out, "bi_size=%llu", inode->bi_size);
+       prt_newline(out);
+
+       prt_printf(out, "bi_sectors=%llu", inode->bi_sectors);
+       prt_newline(out);
+
+       prt_newline(out);
+       prt_printf(out, "bi_version=%llu", inode->bi_version);
 
 #define x(_name, _bits)                                                \
-       prt_printf(out, " "#_name "=%llu", (u64) inode->_name);
+       prt_printf(out, #_name "=%llu", (u64) inode->_name);    \
+       prt_newline(out);
        BCH_INODE_FIELDS_v3()
 #undef  x
+       printbuf_indent_sub(out, 2);
 }
 
 void bch2_inode_unpacked_to_text(struct printbuf *out, struct bch_inode_unpacked *inode)
@@ -587,7 +598,7 @@ int bch2_trigger_inode(struct btree_trans *trans,
                }
        }
 
-       if (!(flags & BTREE_TRIGGER_TRANSACTIONAL) && (flags & BTREE_TRIGGER_INSERT)) {
+       if ((flags & BTREE_TRIGGER_ATOMIC) && (flags & BTREE_TRIGGER_INSERT)) {
                BUG_ON(!trans->journal_res.seq);
 
                bkey_s_to_inode_v3(new).v->bi_journal_seq = cpu_to_le64(trans->journal_res.seq);
@@ -597,7 +608,7 @@ int bch2_trigger_inode(struct btree_trans *trans,
                struct bch_fs *c = trans->c;
 
                percpu_down_read(&c->mark_lock);
-               this_cpu_add(c->usage_gc->nr_inodes, nr);
+               this_cpu_add(c->usage_gc->b.nr_inodes, nr);
                percpu_up_read(&c->mark_lock);
        }
 
diff --git a/fs/bcachefs/inode_format.h b/fs/bcachefs/inode_format.h
new file mode 100644 (file)
index 0000000..83d1073
--- /dev/null
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_INODE_FORMAT_H
+#define _BCACHEFS_INODE_FORMAT_H
+
+#define BLOCKDEV_INODE_MAX     4096
+#define BCACHEFS_ROOT_INO      4096
+
+struct bch_inode {
+       struct bch_val          v;
+
+       __le64                  bi_hash_seed;
+       __le32                  bi_flags;
+       __le16                  bi_mode;
+       __u8                    fields[];
+} __packed __aligned(8);
+
+struct bch_inode_v2 {
+       struct bch_val          v;
+
+       __le64                  bi_journal_seq;
+       __le64                  bi_hash_seed;
+       __le64                  bi_flags;
+       __le16                  bi_mode;
+       __u8                    fields[];
+} __packed __aligned(8);
+
+struct bch_inode_v3 {
+       struct bch_val          v;
+
+       __le64                  bi_journal_seq;
+       __le64                  bi_hash_seed;
+       __le64                  bi_flags;
+       __le64                  bi_sectors;
+       __le64                  bi_size;
+       __le64                  bi_version;
+       __u8                    fields[];
+} __packed __aligned(8);
+
+#define INODEv3_FIELDS_START_INITIAL   6
+#define INODEv3_FIELDS_START_CUR       (offsetof(struct bch_inode_v3, fields) / sizeof(__u64))
+
+struct bch_inode_generation {
+       struct bch_val          v;
+
+       __le32                  bi_generation;
+       __le32                  pad;
+} __packed __aligned(8);
+
+/*
+ * bi_subvol and bi_parent_subvol are only set for subvolume roots:
+ */
+
+#define BCH_INODE_FIELDS_v2()                  \
+       x(bi_atime,                     96)     \
+       x(bi_ctime,                     96)     \
+       x(bi_mtime,                     96)     \
+       x(bi_otime,                     96)     \
+       x(bi_size,                      64)     \
+       x(bi_sectors,                   64)     \
+       x(bi_uid,                       32)     \
+       x(bi_gid,                       32)     \
+       x(bi_nlink,                     32)     \
+       x(bi_generation,                32)     \
+       x(bi_dev,                       32)     \
+       x(bi_data_checksum,             8)      \
+       x(bi_compression,               8)      \
+       x(bi_project,                   32)     \
+       x(bi_background_compression,    8)      \
+       x(bi_data_replicas,             8)      \
+       x(bi_promote_target,            16)     \
+       x(bi_foreground_target,         16)     \
+       x(bi_background_target,         16)     \
+       x(bi_erasure_code,              16)     \
+       x(bi_fields_set,                16)     \
+       x(bi_dir,                       64)     \
+       x(bi_dir_offset,                64)     \
+       x(bi_subvol,                    32)     \
+       x(bi_parent_subvol,             32)
+
+#define BCH_INODE_FIELDS_v3()                  \
+       x(bi_atime,                     96)     \
+       x(bi_ctime,                     96)     \
+       x(bi_mtime,                     96)     \
+       x(bi_otime,                     96)     \
+       x(bi_uid,                       32)     \
+       x(bi_gid,                       32)     \
+       x(bi_nlink,                     32)     \
+       x(bi_generation,                32)     \
+       x(bi_dev,                       32)     \
+       x(bi_data_checksum,             8)      \
+       x(bi_compression,               8)      \
+       x(bi_project,                   32)     \
+       x(bi_background_compression,    8)      \
+       x(bi_data_replicas,             8)      \
+       x(bi_promote_target,            16)     \
+       x(bi_foreground_target,         16)     \
+       x(bi_background_target,         16)     \
+       x(bi_erasure_code,              16)     \
+       x(bi_fields_set,                16)     \
+       x(bi_dir,                       64)     \
+       x(bi_dir_offset,                64)     \
+       x(bi_subvol,                    32)     \
+       x(bi_parent_subvol,             32)     \
+       x(bi_nocow,                     8)
+
+/* subset of BCH_INODE_FIELDS */
+#define BCH_INODE_OPTS()                       \
+       x(data_checksum,                8)      \
+       x(compression,                  8)      \
+       x(project,                      32)     \
+       x(background_compression,       8)      \
+       x(data_replicas,                8)      \
+       x(promote_target,               16)     \
+       x(foreground_target,            16)     \
+       x(background_target,            16)     \
+       x(erasure_code,                 16)     \
+       x(nocow,                        8)
+
+enum inode_opt_id {
+#define x(name, ...)                           \
+       Inode_opt_##name,
+       BCH_INODE_OPTS()
+#undef  x
+       Inode_opt_nr,
+};
+
+#define BCH_INODE_FLAGS()                      \
+       x(sync,                         0)      \
+       x(immutable,                    1)      \
+       x(append,                       2)      \
+       x(nodump,                       3)      \
+       x(noatime,                      4)      \
+       x(i_size_dirty,                 5)      \
+       x(i_sectors_dirty,              6)      \
+       x(unlinked,                     7)      \
+       x(backptr_untrusted,            8)
+
+/* bits 20+ reserved for packed fields below: */
+
+enum bch_inode_flags {
+#define x(t, n)        BCH_INODE_##t = 1U << n,
+       BCH_INODE_FLAGS()
+#undef x
+};
+
+enum __bch_inode_flags {
+#define x(t, n)        __BCH_INODE_##t = n,
+       BCH_INODE_FLAGS()
+#undef x
+};
+
+LE32_BITMASK(INODE_STR_HASH,   struct bch_inode, bi_flags, 20, 24);
+LE32_BITMASK(INODE_NR_FIELDS,  struct bch_inode, bi_flags, 24, 31);
+LE32_BITMASK(INODE_NEW_VARINT, struct bch_inode, bi_flags, 31, 32);
+
+LE64_BITMASK(INODEv2_STR_HASH, struct bch_inode_v2, bi_flags, 20, 24);
+LE64_BITMASK(INODEv2_NR_FIELDS,        struct bch_inode_v2, bi_flags, 24, 31);
+
+LE64_BITMASK(INODEv3_STR_HASH, struct bch_inode_v3, bi_flags, 20, 24);
+LE64_BITMASK(INODEv3_NR_FIELDS,        struct bch_inode_v3, bi_flags, 24, 31);
+
+LE64_BITMASK(INODEv3_FIELDS_START,
+                               struct bch_inode_v3, bi_flags, 31, 36);
+LE64_BITMASK(INODEv3_MODE,     struct bch_inode_v3, bi_flags, 36, 52);
+
+#endif /* _BCACHEFS_INODE_FORMAT_H */
index ca6d5f516aa2be80824e7479e73d1cbfc2607117..1baf78594ccaf85d7d89fea4fc938a7f700d6dc0 100644 (file)
@@ -442,9 +442,7 @@ case LOGGED_OP_FINSERT_shift_extents:
 
                op->v.pos = cpu_to_le64(insert ? bkey_start_offset(&delete.k) : delete.k.p.offset);
 
-               ret =   bch2_bkey_set_needs_rebalance(c, copy,
-                                       opts.background_target,
-                                       opts.background_compression) ?:
+               ret =   bch2_bkey_set_needs_rebalance(c, copy, &opts) ?:
                        bch2_btree_insert_trans(trans, BTREE_ID_extents, &delete, 0) ?:
                        bch2_btree_insert_trans(trans, BTREE_ID_extents, copy, 0) ?:
                        bch2_logged_op_update(trans, &op->k_i) ?:
index 33c0e783d54697b50c490309726b49eacb410189..ef3a53f9045af2591ab1f9e272dd9d6151250444 100644 (file)
@@ -362,9 +362,7 @@ static int bch2_write_index_default(struct bch_write_op *op)
                                     bkey_start_pos(&sk.k->k),
                                     BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
 
-               ret =   bch2_bkey_set_needs_rebalance(c, sk.k,
-                                       op->opts.background_target,
-                                       op->opts.background_compression) ?:
+               ret =   bch2_bkey_set_needs_rebalance(c, sk.k, &op->opts) ?:
                        bch2_extent_update(trans, inum, &iter, sk.k,
                                        &op->res,
                                        op->new_i_size, &op->i_sectors_delta,
@@ -1447,10 +1445,11 @@ err:
                        op->flags |= BCH_WRITE_DONE;
 
                        if (ret < 0) {
-                               bch_err_inum_offset_ratelimited(c,
-                                       op->pos.inode,
-                                       op->pos.offset << 9,
-                                       "%s(): error: %s", __func__, bch2_err_str(ret));
+                               if (!(op->flags & BCH_WRITE_ALLOC_NOWAIT))
+                                       bch_err_inum_offset_ratelimited(c,
+                                               op->pos.inode,
+                                               op->pos.offset << 9,
+                                               "%s(): error: %s", __func__, bch2_err_str(ret));
                                op->error = ret;
                                break;
                        }
index 8538ef34f62bc54e8bc570acbe793e4771745247..bc890776eb57933a5931edd2a2f07570f52b7ab3 100644 (file)
@@ -27,6 +27,47 @@ static const char * const bch2_journal_errors[] = {
        NULL
 };
 
+static void bch2_journal_buf_to_text(struct printbuf *out, struct journal *j, u64 seq)
+{
+       union journal_res_state s = READ_ONCE(j->reservations);
+       unsigned i = seq & JOURNAL_BUF_MASK;
+       struct journal_buf *buf = j->buf + i;
+
+       prt_printf(out, "seq:");
+       prt_tab(out);
+       prt_printf(out, "%llu", seq);
+       prt_newline(out);
+       printbuf_indent_add(out, 2);
+
+       prt_printf(out, "refcount:");
+       prt_tab(out);
+       prt_printf(out, "%u", journal_state_count(s, i));
+       prt_newline(out);
+
+       prt_printf(out, "size:");
+       prt_tab(out);
+       prt_human_readable_u64(out, vstruct_bytes(buf->data));
+       prt_newline(out);
+
+       prt_printf(out, "expires");
+       prt_tab(out);
+       prt_printf(out, "%li jiffies", buf->expires - jiffies);
+       prt_newline(out);
+
+       printbuf_indent_sub(out, 2);
+}
+
+static void bch2_journal_bufs_to_text(struct printbuf *out, struct journal *j)
+{
+       if (!out->nr_tabstops)
+               printbuf_tabstop_push(out, 24);
+
+       for (u64 seq = journal_last_unwritten_seq(j);
+            seq <= journal_cur_seq(j);
+            seq++)
+               bch2_journal_buf_to_text(out, j, seq);
+}
+
 static inline bool journal_seq_unwritten(struct journal *j, u64 seq)
 {
        return seq > j->seq_ondisk;
@@ -156,7 +197,7 @@ void bch2_journal_buf_put_final(struct journal *j, u64 seq, bool write)
  * We don't close a journal_buf until the next journal_buf is finished writing,
  * and can be opened again - this also initializes the next journal_buf:
  */
-static void __journal_entry_close(struct journal *j, unsigned closed_val)
+static void __journal_entry_close(struct journal *j, unsigned closed_val, bool trace)
 {
        struct bch_fs *c = container_of(j, struct bch_fs, journal);
        struct journal_buf *buf = journal_cur_buf(j);
@@ -185,7 +226,17 @@ static void __journal_entry_close(struct journal *j, unsigned closed_val)
        /* Close out old buffer: */
        buf->data->u64s         = cpu_to_le32(old.cur_entry_offset);
 
-       trace_journal_entry_close(c, vstruct_bytes(buf->data));
+       if (trace_journal_entry_close_enabled() && trace) {
+               struct printbuf pbuf = PRINTBUF;
+               pbuf.atomic++;
+
+               prt_str(&pbuf, "entry size: ");
+               prt_human_readable_u64(&pbuf, vstruct_bytes(buf->data));
+               prt_newline(&pbuf);
+               bch2_prt_task_backtrace(&pbuf, current, 1, GFP_NOWAIT);
+               trace_journal_entry_close(c, pbuf.buf);
+               printbuf_exit(&pbuf);
+       }
 
        sectors = vstruct_blocks_plus(buf->data, c->block_bits,
                                      buf->u64s_reserved) << c->block_bits;
@@ -225,7 +276,7 @@ static void __journal_entry_close(struct journal *j, unsigned closed_val)
 void bch2_journal_halt(struct journal *j)
 {
        spin_lock(&j->lock);
-       __journal_entry_close(j, JOURNAL_ENTRY_ERROR_VAL);
+       __journal_entry_close(j, JOURNAL_ENTRY_ERROR_VAL, true);
        if (!j->err_seq)
                j->err_seq = journal_cur_seq(j);
        journal_wake(j);
@@ -239,7 +290,7 @@ static bool journal_entry_want_write(struct journal *j)
 
        /* Don't close it yet if we already have a write in flight: */
        if (ret)
-               __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
+               __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
        else if (nr_unwritten_journal_entries(j)) {
                struct journal_buf *buf = journal_cur_buf(j);
 
@@ -406,7 +457,7 @@ static void journal_write_work(struct work_struct *work)
        if (delta > 0)
                mod_delayed_work(c->io_complete_wq, &j->write_work, delta);
        else
-               __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
+               __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
 unlock:
        spin_unlock(&j->lock);
 }
@@ -463,13 +514,21 @@ retry:
            buf->buf_size < JOURNAL_ENTRY_SIZE_MAX)
                j->buf_size_want = max(j->buf_size_want, buf->buf_size << 1);
 
-       __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
+       __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, false);
        ret = journal_entry_open(j);
 
        if (ret == JOURNAL_ERR_max_in_flight) {
                track_event_change(&c->times[BCH_TIME_blocked_journal_max_in_flight],
                                   &j->max_in_flight_start, true);
-               trace_and_count(c, journal_entry_full, c);
+               if (trace_journal_entry_full_enabled()) {
+                       struct printbuf buf = PRINTBUF;
+                       buf.atomic++;
+
+                       bch2_journal_bufs_to_text(&buf, j);
+                       trace_journal_entry_full(c, buf.buf);
+                       printbuf_exit(&buf);
+               }
+               count_event(c, journal_entry_full);
        }
 unlock:
        can_discard = j->can_discard;
@@ -549,7 +608,7 @@ void bch2_journal_entry_res_resize(struct journal *j,
                /*
                 * Not enough room in current journal entry, have to flush it:
                 */
-               __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
+               __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
        } else {
                journal_cur_buf(j)->u64s_reserved += d;
        }
@@ -606,7 +665,7 @@ recheck_need_open:
                struct journal_res res = { 0 };
 
                if (journal_entry_is_open(j))
-                       __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
+                       __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
 
                spin_unlock(&j->lock);
 
@@ -786,7 +845,7 @@ static struct journal_buf *__bch2_next_write_buffer_flush_journal_buf(struct jou
 
                if (buf->need_flush_to_write_buffer) {
                        if (seq == journal_cur_seq(j))
-                               __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
+                               __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
 
                        union journal_res_state s;
                        s.v = atomic64_read_acquire(&j->reservations.counter);
@@ -1339,35 +1398,9 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
        }
 
        prt_newline(out);
-
-       for (u64 seq = journal_cur_seq(j);
-            seq >= journal_last_unwritten_seq(j);
-            --seq) {
-               unsigned i = seq & JOURNAL_BUF_MASK;
-
-               prt_printf(out, "unwritten entry:");
-               prt_tab(out);
-               prt_printf(out, "%llu", seq);
-               prt_newline(out);
-               printbuf_indent_add(out, 2);
-
-               prt_printf(out, "refcount:");
-               prt_tab(out);
-               prt_printf(out, "%u", journal_state_count(s, i));
-               prt_newline(out);
-
-               prt_printf(out, "sectors:");
-               prt_tab(out);
-               prt_printf(out, "%u", j->buf[i].sectors);
-               prt_newline(out);
-
-               prt_printf(out, "expires");
-               prt_tab(out);
-               prt_printf(out, "%li jiffies", j->buf[i].expires - jiffies);
-               prt_newline(out);
-
-               printbuf_indent_sub(out, 2);
-       }
+       prt_printf(out, "unwritten entries:");
+       prt_newline(out);
+       bch2_journal_bufs_to_text(out, j);
 
        prt_printf(out,
               "replay done:\t\t%i\n",
index b0f4dd491e1205d28c6af528fb59696cdbc4dc9c..bfd6585e746da45880da9b5ad8fb502586cbf933 100644 (file)
@@ -683,10 +683,7 @@ static void journal_entry_dev_usage_to_text(struct printbuf *out, struct bch_fs
        prt_printf(out, "dev=%u", le32_to_cpu(u->dev));
 
        for (i = 0; i < nr_types; i++) {
-               if (i < BCH_DATA_NR)
-                       prt_printf(out, " %s", bch2_data_types[i]);
-               else
-                       prt_printf(out, " (unknown data type %u)", i);
+               bch2_prt_data_type(out, i);
                prt_printf(out, ": buckets=%llu sectors=%llu fragmented=%llu",
                       le64_to_cpu(u->d[i].buckets),
                       le64_to_cpu(u->d[i].sectors),
@@ -1991,7 +1988,8 @@ CLOSURE_CALLBACK(bch2_journal_write)
                        percpu_ref_get(&ca->io_ref);
 
                        bio = ca->journal.bio;
-                       bio_reset(bio, ca->disk_sb.bdev, REQ_OP_FLUSH);
+                       bio_reset(bio, ca->disk_sb.bdev,
+                                 REQ_OP_WRITE|REQ_PREFLUSH);
                        bio->bi_end_io          = journal_write_endio;
                        bio->bi_private         = ca;
                        closure_bio_submit(bio, cl);
diff --git a/fs/bcachefs/logged_ops_format.h b/fs/bcachefs/logged_ops_format.h
new file mode 100644 (file)
index 0000000..6a4bf71
--- /dev/null
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_LOGGED_OPS_FORMAT_H
+#define _BCACHEFS_LOGGED_OPS_FORMAT_H
+
+struct bch_logged_op_truncate {
+       struct bch_val          v;
+       __le32                  subvol;
+       __le32                  pad;
+       __le64                  inum;
+       __le64                  new_i_size;
+};
+
+enum logged_op_finsert_state {
+       LOGGED_OP_FINSERT_start,
+       LOGGED_OP_FINSERT_shift_extents,
+       LOGGED_OP_FINSERT_finish,
+};
+
+struct bch_logged_op_finsert {
+       struct bch_val          v;
+       __u8                    state;
+       __u8                    pad[3];
+       __le32                  subvol;
+       __le64                  inum;
+       __le64                  dst_offset;
+       __le64                  src_offset;
+       __le64                  pos;
+};
+
+#endif /* _BCACHEFS_LOGGED_OPS_FORMAT_H */
index b2be565bb8f214bc2ac4ebd6efac324ac20b7241..64df11ab422bf455560bad095973cc6e5a296697 100644 (file)
@@ -17,7 +17,7 @@
  * Rust and rustc has issues with u128.
  */
 
-#if defined(__SIZEOF_INT128__) && defined(__KERNEL__)
+#if defined(__SIZEOF_INT128__) && defined(__KERNEL__) && !defined(CONFIG_PARISC)
 
 typedef struct {
        unsigned __int128 v;
index 7a33319dcd168001594f6532bafe0caf92f83c22..bf68ea49447b95055a4f6a1e6e7c6a7e373aebc5 100644 (file)
@@ -6,9 +6,11 @@
 #include "backpointers.h"
 #include "bkey_buf.h"
 #include "btree_gc.h"
+#include "btree_io.h"
 #include "btree_update.h"
 #include "btree_update_interior.h"
 #include "btree_write_buffer.h"
+#include "compress.h"
 #include "disk_groups.h"
 #include "ec.h"
 #include "errcode.h"
@@ -34,12 +36,46 @@ const char * const bch2_data_ops_strs[] = {
        NULL
 };
 
-static void trace_move_extent2(struct bch_fs *c, struct bkey_s_c k)
+static void bch2_data_update_opts_to_text(struct printbuf *out, struct bch_fs *c,
+                                         struct bch_io_opts *io_opts,
+                                         struct data_update_opts *data_opts)
+{
+       printbuf_tabstop_push(out, 20);
+       prt_str(out, "rewrite ptrs:");
+       prt_tab(out);
+       bch2_prt_u64_base2(out, data_opts->rewrite_ptrs);
+       prt_newline(out);
+
+       prt_str(out, "kill ptrs: ");
+       prt_tab(out);
+       bch2_prt_u64_base2(out, data_opts->kill_ptrs);
+       prt_newline(out);
+
+       prt_str(out, "target: ");
+       prt_tab(out);
+       bch2_target_to_text(out, c, data_opts->target);
+       prt_newline(out);
+
+       prt_str(out, "compression: ");
+       prt_tab(out);
+       bch2_compression_opt_to_text(out, background_compression(*io_opts));
+       prt_newline(out);
+
+       prt_str(out, "extra replicas: ");
+       prt_tab(out);
+       prt_u64(out, data_opts->extra_replicas);
+}
+
+static void trace_move_extent2(struct bch_fs *c, struct bkey_s_c k,
+                              struct bch_io_opts *io_opts,
+                              struct data_update_opts *data_opts)
 {
        if (trace_move_extent_enabled()) {
                struct printbuf buf = PRINTBUF;
 
                bch2_bkey_val_to_text(&buf, c, k);
+               prt_newline(&buf);
+               bch2_data_update_opts_to_text(&buf, c, io_opts, data_opts);
                trace_move_extent(c, buf.buf);
                printbuf_exit(&buf);
        }
@@ -111,6 +147,15 @@ static void move_write(struct moving_io *io)
                return;
        }
 
+       if (trace_move_extent_write_enabled()) {
+               struct bch_fs *c = io->write.op.c;
+               struct printbuf buf = PRINTBUF;
+
+               bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(io->write.k.k));
+               trace_move_extent_write(c, buf.buf);
+               printbuf_exit(&buf);
+       }
+
        closure_get(&io->write.ctxt->cl);
        atomic_add(io->write_sectors, &io->write.ctxt->write_sectors);
        atomic_inc(&io->write.ctxt->write_ios);
@@ -241,9 +286,10 @@ int bch2_move_extent(struct moving_context *ctxt,
        unsigned sectors = k.k->size, pages;
        int ret = -ENOMEM;
 
+       trace_move_extent2(c, k, &io_opts, &data_opts);
+
        if (ctxt->stats)
                ctxt->stats->pos = BBPOS(iter->btree_id, iter->pos);
-       trace_move_extent2(c, k);
 
        bch2_data_update_opts_normalize(k, &data_opts);
 
@@ -759,6 +805,8 @@ int bch2_evacuate_bucket(struct moving_context *ctxt,
                        if (!b)
                                goto next;
 
+                       unsigned sectors = btree_ptr_sectors_written(&b->key);
+
                        ret = bch2_btree_node_rewrite(trans, &iter, b, 0);
                        bch2_trans_iter_exit(trans, &iter);
 
@@ -768,11 +816,10 @@ int bch2_evacuate_bucket(struct moving_context *ctxt,
                                goto err;
 
                        if (ctxt->rate)
-                               bch2_ratelimit_increment(ctxt->rate,
-                                                        c->opts.btree_node_size >> 9);
+                               bch2_ratelimit_increment(ctxt->rate, sectors);
                        if (ctxt->stats) {
-                               atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_seen);
-                               atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_moved);
+                               atomic64_add(sectors, &ctxt->stats->sectors_seen);
+                               atomic64_add(sectors, &ctxt->stats->sectors_moved);
                        }
                }
 next:
@@ -1083,9 +1130,9 @@ int bch2_data_job(struct bch_fs *c,
 
 void bch2_move_stats_to_text(struct printbuf *out, struct bch_move_stats *stats)
 {
-       prt_printf(out, "%s: data type=%s pos=",
-                  stats->name,
-                  bch2_data_types[stats->data_type]);
+       prt_printf(out, "%s: data type==", stats->name);
+       bch2_prt_data_type(out, stats->data_type);
+       prt_str(out, " pos=");
        bch2_bbpos_to_text(out, stats->pos);
        prt_newline(out);
        printbuf_indent_add(out, 2);
index 8e6f230eac38155bf5d048367d6ebde35a4a15bd..b1ed0b9a20d35d61491ce0cff28b4bb2c7be42c3 100644 (file)
@@ -52,7 +52,7 @@ const char * const bch2_csum_opts[] = {
        NULL
 };
 
-const char * const bch2_compression_types[] = {
+const char * const __bch2_compression_types[] = {
        BCH_COMPRESSION_TYPES()
        NULL
 };
@@ -72,7 +72,7 @@ const char * const bch2_str_hash_opts[] = {
        NULL
 };
 
-const char * const bch2_data_types[] = {
+const char * const __bch2_data_types[] = {
        BCH_DATA_TYPES()
        NULL
 };
index 93a24fef42148488cdddb391cd291dd0e0168063..9a4b7faa376503993f1c2da8f8d1e5963ef6ca5a 100644 (file)
@@ -18,11 +18,11 @@ extern const char * const bch2_sb_compat[];
 extern const char * const __bch2_btree_ids[];
 extern const char * const bch2_csum_types[];
 extern const char * const bch2_csum_opts[];
-extern const char * const bch2_compression_types[];
+extern const char * const __bch2_compression_types[];
 extern const char * const bch2_compression_opts[];
 extern const char * const bch2_str_hash_types[];
 extern const char * const bch2_str_hash_opts[];
-extern const char * const bch2_data_types[];
+extern const char * const __bch2_data_types[];
 extern const char * const bch2_member_states[];
 extern const char * const bch2_jset_entry_types[];
 extern const char * const bch2_fs_usage_types[];
@@ -564,6 +564,11 @@ struct bch_io_opts {
 #undef x
 };
 
+static inline unsigned background_compression(struct bch_io_opts opts)
+{
+       return opts.background_compression ?: opts.compression;
+}
+
 struct bch_io_opts bch2_opts_to_inode_opts(struct bch_opts);
 bool bch2_opt_is_inode_opt(enum bch_opt_id);
 
diff --git a/fs/bcachefs/quota_format.h b/fs/bcachefs/quota_format.h
new file mode 100644 (file)
index 0000000..dc34347
--- /dev/null
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_QUOTA_FORMAT_H
+#define _BCACHEFS_QUOTA_FORMAT_H
+
+/* KEY_TYPE_quota: */
+
+enum quota_types {
+       QTYP_USR                = 0,
+       QTYP_GRP                = 1,
+       QTYP_PRJ                = 2,
+       QTYP_NR                 = 3,
+};
+
+enum quota_counters {
+       Q_SPC                   = 0,
+       Q_INO                   = 1,
+       Q_COUNTERS              = 2,
+};
+
+struct bch_quota_counter {
+       __le64                  hardlimit;
+       __le64                  softlimit;
+};
+
+struct bch_quota {
+       struct bch_val          v;
+       struct bch_quota_counter c[Q_COUNTERS];
+} __packed __aligned(8);
+
+/* BCH_SB_FIELD_quota: */
+
+struct bch_sb_quota_counter {
+       __le32                          timelimit;
+       __le32                          warnlimit;
+};
+
+struct bch_sb_quota_type {
+       __le64                          flags;
+       struct bch_sb_quota_counter     c[Q_COUNTERS];
+};
+
+struct bch_sb_field_quota {
+       struct bch_sb_field             field;
+       struct bch_sb_quota_type        q[QTYP_NR];
+} __packed __aligned(8);
+
+#endif /* _BCACHEFS_QUOTA_FORMAT_H */
index 95f46cb3b5bdfd820e845a8cceda2b3c2fb67cf4..22d1017aa49b975756905a9a69ce8bcd82416ca3 100644 (file)
@@ -177,8 +177,7 @@ static struct bkey_s_c next_rebalance_extent(struct btree_trans *trans,
                prt_str(&buf, "target=");
                bch2_target_to_text(&buf, c, r->target);
                prt_str(&buf, " compression=");
-               struct bch_compression_opt opt = __bch2_compression_decode(r->compression);
-               prt_str(&buf, bch2_compression_opts[opt.type]);
+               bch2_compression_opt_to_text(&buf, r->compression);
                prt_str(&buf, " ");
                bch2_bkey_val_to_text(&buf, c, k);
 
@@ -254,13 +253,12 @@ static bool rebalance_pred(struct bch_fs *c, void *arg,
 
        if (k.k->p.inode) {
                target          = io_opts->background_target;
-               compression     = io_opts->background_compression ?: io_opts->compression;
+               compression     = background_compression(*io_opts);
        } else {
                const struct bch_extent_rebalance *r = bch2_bkey_rebalance_opts(k);
 
                target          = r ? r->target : io_opts->background_target;
-               compression     = r ? r->compression :
-                       (io_opts->background_compression ?: io_opts->compression);
+               compression     = r ? r->compression : background_compression(*io_opts);
        }
 
        data_opts->rewrite_ptrs         = bch2_bkey_ptrs_need_rebalance(c, k, target, compression);
@@ -371,6 +369,7 @@ static int do_rebalance(struct moving_context *ctxt)
            !kthread_should_stop() &&
            !atomic64_read(&r->work_stats.sectors_seen) &&
            !atomic64_read(&r->scan_stats.sectors_seen)) {
+               bch2_moving_ctxt_flush_all(ctxt);
                bch2_trans_unlock_long(trans);
                rebalance_wait(c);
        }
@@ -385,7 +384,6 @@ static int bch2_rebalance_thread(void *arg)
        struct bch_fs *c = arg;
        struct bch_fs_rebalance *r = &c->rebalance;
        struct moving_context ctxt;
-       int ret;
 
        set_freezable();
 
@@ -393,8 +391,7 @@ static int bch2_rebalance_thread(void *arg)
                              writepoint_ptr(&c->rebalance_write_point),
                              true);
 
-       while (!kthread_should_stop() &&
-              !(ret = do_rebalance(&ctxt)))
+       while (!kthread_should_stop() && !do_rebalance(&ctxt))
                ;
 
        bch2_moving_ctxt_exit(&ctxt);
index 725214605a050996196c28a9132f8fe247e76d28..9127d0e3ca2f6a3fd44e076b42f01ee6f7736427 100644 (file)
@@ -280,7 +280,7 @@ static int journal_replay_entry_early(struct bch_fs *c,
                                        le64_to_cpu(u->v);
                        break;
                case BCH_FS_USAGE_inodes:
-                       c->usage_base->nr_inodes = le64_to_cpu(u->v);
+                       c->usage_base->b.nr_inodes = le64_to_cpu(u->v);
                        break;
                case BCH_FS_USAGE_key_version:
                        atomic64_set(&c->key_version,
index faa5d367005874f8838128822c9584f9bdf48b33..c47c66c2b394dc8df391fa3adf8bfea03e1e447e 100644 (file)
@@ -292,10 +292,10 @@ static inline void check_indirect_extent_deleting(struct bkey_s new, unsigned *f
        }
 }
 
-int bch2_trans_mark_reflink_v(struct btree_trans *trans,
-                             enum btree_id btree_id, unsigned level,
-                             struct bkey_s_c old, struct bkey_s new,
-                             unsigned flags)
+int bch2_trigger_reflink_v(struct btree_trans *trans,
+                          enum btree_id btree_id, unsigned level,
+                          struct bkey_s_c old, struct bkey_s new,
+                          unsigned flags)
 {
        if ((flags & BTREE_TRIGGER_TRANSACTIONAL) &&
            (flags & BTREE_TRIGGER_INSERT))
@@ -324,7 +324,7 @@ void bch2_indirect_inline_data_to_text(struct printbuf *out,
               min(datalen, 32U), d.v->data);
 }
 
-int bch2_trans_mark_indirect_inline_data(struct btree_trans *trans,
+int bch2_trigger_indirect_inline_data(struct btree_trans *trans,
                              enum btree_id btree_id, unsigned level,
                              struct bkey_s_c old, struct bkey_s new,
                              unsigned flags)
@@ -486,6 +486,13 @@ s64 bch2_remap_range(struct bch_fs *c,
 
                bch2_btree_iter_set_snapshot(&dst_iter, dst_snapshot);
 
+               if (dst_inum.inum < src_inum.inum) {
+                       /* Avoid some lock cycle transaction restarts */
+                       ret = bch2_btree_iter_traverse(&dst_iter);
+                       if (ret)
+                               continue;
+               }
+
                dst_done = dst_iter.pos.offset - dst_start.offset;
                src_want = POS(src_start.inode, src_start.offset + dst_done);
                bch2_btree_iter_set_pos(&src_iter, src_want);
@@ -538,9 +545,7 @@ s64 bch2_remap_range(struct bch_fs *c,
                                min(src_k.k->p.offset - src_want.offset,
                                    dst_end.offset - dst_iter.pos.offset));
 
-               ret =   bch2_bkey_set_needs_rebalance(c, new_dst.k,
-                                       opts.background_target,
-                                       opts.background_compression) ?:
+               ret =   bch2_bkey_set_needs_rebalance(c, new_dst.k, &opts) ?:
                        bch2_extent_update(trans, dst_inum, &dst_iter,
                                        new_dst.k, &disk_res,
                                        new_i_size, i_sectors_delta,
index 8ee778ec0022a327145eb91ebefbcb38cc1240bf..4d8867289717bf6cf46f05b0c58e3adcc42efae7 100644 (file)
@@ -24,14 +24,14 @@ int bch2_reflink_v_invalid(struct bch_fs *, struct bkey_s_c,
                           enum bkey_invalid_flags, struct printbuf *);
 void bch2_reflink_v_to_text(struct printbuf *, struct bch_fs *,
                            struct bkey_s_c);
-int bch2_trans_mark_reflink_v(struct btree_trans *, enum btree_id, unsigned,
+int bch2_trigger_reflink_v(struct btree_trans *, enum btree_id, unsigned,
                              struct bkey_s_c, struct bkey_s, unsigned);
 
 #define bch2_bkey_ops_reflink_v ((struct bkey_ops) {           \
        .key_invalid    = bch2_reflink_v_invalid,               \
        .val_to_text    = bch2_reflink_v_to_text,               \
        .swab           = bch2_ptr_swab,                        \
-       .trigger        = bch2_trans_mark_reflink_v,            \
+       .trigger        = bch2_trigger_reflink_v,               \
        .min_val_size   = 8,                                    \
 })
 
@@ -39,7 +39,7 @@ int bch2_indirect_inline_data_invalid(struct bch_fs *, struct bkey_s_c,
                                      enum bkey_invalid_flags, struct printbuf *);
 void bch2_indirect_inline_data_to_text(struct printbuf *,
                                struct bch_fs *, struct bkey_s_c);
-int bch2_trans_mark_indirect_inline_data(struct btree_trans *,
+int bch2_trigger_indirect_inline_data(struct btree_trans *,
                                         enum btree_id, unsigned,
                              struct bkey_s_c, struct bkey_s,
                              unsigned);
@@ -47,7 +47,7 @@ int bch2_trans_mark_indirect_inline_data(struct btree_trans *,
 #define bch2_bkey_ops_indirect_inline_data ((struct bkey_ops) {        \
        .key_invalid    = bch2_indirect_inline_data_invalid,    \
        .val_to_text    = bch2_indirect_inline_data_to_text,    \
-       .trigger        = bch2_trans_mark_indirect_inline_data, \
+       .trigger        = bch2_trigger_indirect_inline_data,    \
        .min_val_size   = 8,                                    \
 })
 
diff --git a/fs/bcachefs/reflink_format.h b/fs/bcachefs/reflink_format.h
new file mode 100644 (file)
index 0000000..6772eeb
--- /dev/null
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_REFLINK_FORMAT_H
+#define _BCACHEFS_REFLINK_FORMAT_H
+
+struct bch_reflink_p {
+       struct bch_val          v;
+       __le64                  idx;
+       /*
+        * A reflink pointer might point to an indirect extent which is then
+        * later split (by copygc or rebalance). If we only pointed to part of
+        * the original indirect extent, and then one of the fragments is
+        * outside the range we point to, we'd leak a refcount: so when creating
+        * reflink pointers, we need to store pad values to remember the full
+        * range we were taking a reference on.
+        */
+       __le32                  front_pad;
+       __le32                  back_pad;
+} __packed __aligned(8);
+
+struct bch_reflink_v {
+       struct bch_val          v;
+       __le64                  refcount;
+       union bch_extent_entry  start[0];
+       __u64                   _data[];
+} __packed __aligned(8);
+
+struct bch_indirect_inline_data {
+       struct bch_val          v;
+       __le64                  refcount;
+       u8                      data[];
+};
+
+#endif /* _BCACHEFS_REFLINK_FORMAT_H */
index 92ba56ef1fc89690656e9625871ecd7ee38b5f9b..cc2672c120312c39f82e9a1a9afe0ed959b15dba 100644 (file)
@@ -9,6 +9,12 @@
 static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *,
                                            struct bch_replicas_cpu *);
 
+/* Some (buggy!) compilers don't allow memcmp to be passed as a pointer */
+static int bch2_memcmp(const void *l, const void *r, size_t size)
+{
+       return memcmp(l, r, size);
+}
+
 /* Replicas tracking - in memory: */
 
 static void verify_replicas_entry(struct bch_replicas_entry_v1 *e)
@@ -33,21 +39,16 @@ void bch2_replicas_entry_sort(struct bch_replicas_entry_v1 *e)
 
 static void bch2_cpu_replicas_sort(struct bch_replicas_cpu *r)
 {
-       eytzinger0_sort(r->entries, r->nr, r->entry_size, memcmp, NULL);
+       eytzinger0_sort(r->entries, r->nr, r->entry_size, bch2_memcmp, NULL);
 }
 
 static void bch2_replicas_entry_v0_to_text(struct printbuf *out,
                                           struct bch_replicas_entry_v0 *e)
 {
-       unsigned i;
-
-       if (e->data_type < BCH_DATA_NR)
-               prt_printf(out, "%s", bch2_data_types[e->data_type]);
-       else
-               prt_printf(out, "(invalid data type %u)", e->data_type);
+       bch2_prt_data_type(out, e->data_type);
 
        prt_printf(out, ": %u [", e->nr_devs);
-       for (i = 0; i < e->nr_devs; i++)
+       for (unsigned i = 0; i < e->nr_devs; i++)
                prt_printf(out, i ? " %u" : "%u", e->devs[i]);
        prt_printf(out, "]");
 }
@@ -55,15 +56,10 @@ static void bch2_replicas_entry_v0_to_text(struct printbuf *out,
 void bch2_replicas_entry_to_text(struct printbuf *out,
                                 struct bch_replicas_entry_v1 *e)
 {
-       unsigned i;
-
-       if (e->data_type < BCH_DATA_NR)
-               prt_printf(out, "%s", bch2_data_types[e->data_type]);
-       else
-               prt_printf(out, "(invalid data type %u)", e->data_type);
+       bch2_prt_data_type(out, e->data_type);
 
        prt_printf(out, ": %u/%u [", e->nr_required, e->nr_devs);
-       for (i = 0; i < e->nr_devs; i++)
+       for (unsigned i = 0; i < e->nr_devs; i++)
                prt_printf(out, i ? " %u" : "%u", e->devs[i]);
        prt_printf(out, "]");
 }
@@ -831,7 +827,7 @@ static int bch2_cpu_replicas_validate(struct bch_replicas_cpu *cpu_r,
        sort_cmp_size(cpu_r->entries,
                      cpu_r->nr,
                      cpu_r->entry_size,
-                     memcmp, NULL);
+                     bch2_memcmp, NULL);
 
        for (i = 0; i < cpu_r->nr; i++) {
                struct bch_replicas_entry_v1 *e =
index 9632f36f5f318134065cfdbae613b422cce98f6a..b6bf0ebe7e84046a5d08ade7d34bae9ae0bff3a5 100644 (file)
@@ -207,7 +207,7 @@ void bch2_journal_super_entries_add_common(struct bch_fs *c,
 
                u->entry.type   = BCH_JSET_ENTRY_usage;
                u->entry.btree_id = BCH_FS_USAGE_inodes;
-               u->v            = cpu_to_le64(c->usage_base->nr_inodes);
+               u->v            = cpu_to_le64(c->usage_base->b.nr_inodes);
        }
 
        {
similarity index 99%
rename from fs/bcachefs/counters.c
rename to fs/bcachefs/sb-counters.c
index 02a996e06a64e3d10483f7fcbffc0de66428f9ed..7dc898761bb3125a79c82a5de17de0807920d98d 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 #include "bcachefs.h"
 #include "super-io.h"
-#include "counters.h"
+#include "sb-counters.h"
 
 /* BCH_SB_FIELD_counters */
 
similarity index 77%
rename from fs/bcachefs/counters.h
rename to fs/bcachefs/sb-counters.h
index 4778aa19bf346459c5ca252e6b75279503867f43..81f8aec9fcb1cedf43143f269fdfc1b6fb39e441 100644 (file)
@@ -1,11 +1,10 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_COUNTERS_H
-#define _BCACHEFS_COUNTERS_H
+#ifndef _BCACHEFS_SB_COUNTERS_H
+#define _BCACHEFS_SB_COUNTERS_H
 
 #include "bcachefs.h"
 #include "super-io.h"
 
-
 int bch2_sb_counters_to_cpu(struct bch_fs *);
 int bch2_sb_counters_from_cpu(struct bch_fs *);
 
@@ -14,4 +13,4 @@ int bch2_fs_counters_init(struct bch_fs *);
 
 extern const struct bch_sb_field_ops bch_sb_field_ops_counters;
 
-#endif // _BCACHEFS_COUNTERS_H
+#endif // _BCACHEFS_SB_COUNTERS_H
diff --git a/fs/bcachefs/sb-counters_format.h b/fs/bcachefs/sb-counters_format.h
new file mode 100644 (file)
index 0000000..62ea478
--- /dev/null
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_SB_COUNTERS_FORMAT_H
+#define _BCACHEFS_SB_COUNTERS_FORMAT_H
+
+#define BCH_PERSISTENT_COUNTERS()                              \
+       x(io_read,                                      0)      \
+       x(io_write,                                     1)      \
+       x(io_move,                                      2)      \
+       x(bucket_invalidate,                            3)      \
+       x(bucket_discard,                               4)      \
+       x(bucket_alloc,                                 5)      \
+       x(bucket_alloc_fail,                            6)      \
+       x(btree_cache_scan,                             7)      \
+       x(btree_cache_reap,                             8)      \
+       x(btree_cache_cannibalize,                      9)      \
+       x(btree_cache_cannibalize_lock,                 10)     \
+       x(btree_cache_cannibalize_lock_fail,            11)     \
+       x(btree_cache_cannibalize_unlock,               12)     \
+       x(btree_node_write,                             13)     \
+       x(btree_node_read,                              14)     \
+       x(btree_node_compact,                           15)     \
+       x(btree_node_merge,                             16)     \
+       x(btree_node_split,                             17)     \
+       x(btree_node_rewrite,                           18)     \
+       x(btree_node_alloc,                             19)     \
+       x(btree_node_free,                              20)     \
+       x(btree_node_set_root,                          21)     \
+       x(btree_path_relock_fail,                       22)     \
+       x(btree_path_upgrade_fail,                      23)     \
+       x(btree_reserve_get_fail,                       24)     \
+       x(journal_entry_full,                           25)     \
+       x(journal_full,                                 26)     \
+       x(journal_reclaim_finish,                       27)     \
+       x(journal_reclaim_start,                        28)     \
+       x(journal_write,                                29)     \
+       x(read_promote,                                 30)     \
+       x(read_bounce,                                  31)     \
+       x(read_split,                                   33)     \
+       x(read_retry,                                   32)     \
+       x(read_reuse_race,                              34)     \
+       x(move_extent_read,                             35)     \
+       x(move_extent_write,                            36)     \
+       x(move_extent_finish,                           37)     \
+       x(move_extent_fail,                             38)     \
+       x(move_extent_start_fail,                       39)     \
+       x(copygc,                                       40)     \
+       x(copygc_wait,                                  41)     \
+       x(gc_gens_end,                                  42)     \
+       x(gc_gens_start,                                43)     \
+       x(trans_blocked_journal_reclaim,                44)     \
+       x(trans_restart_btree_node_reused,              45)     \
+       x(trans_restart_btree_node_split,               46)     \
+       x(trans_restart_fault_inject,                   47)     \
+       x(trans_restart_iter_upgrade,                   48)     \
+       x(trans_restart_journal_preres_get,             49)     \
+       x(trans_restart_journal_reclaim,                50)     \
+       x(trans_restart_journal_res_get,                51)     \
+       x(trans_restart_key_cache_key_realloced,        52)     \
+       x(trans_restart_key_cache_raced,                53)     \
+       x(trans_restart_mark_replicas,                  54)     \
+       x(trans_restart_mem_realloced,                  55)     \
+       x(trans_restart_memory_allocation_failure,      56)     \
+       x(trans_restart_relock,                         57)     \
+       x(trans_restart_relock_after_fill,              58)     \
+       x(trans_restart_relock_key_cache_fill,          59)     \
+       x(trans_restart_relock_next_node,               60)     \
+       x(trans_restart_relock_parent_for_fill,         61)     \
+       x(trans_restart_relock_path,                    62)     \
+       x(trans_restart_relock_path_intent,             63)     \
+       x(trans_restart_too_many_iters,                 64)     \
+       x(trans_restart_traverse,                       65)     \
+       x(trans_restart_upgrade,                        66)     \
+       x(trans_restart_would_deadlock,                 67)     \
+       x(trans_restart_would_deadlock_write,           68)     \
+       x(trans_restart_injected,                       69)     \
+       x(trans_restart_key_cache_upgrade,              70)     \
+       x(trans_traverse_all,                           71)     \
+       x(transaction_commit,                           72)     \
+       x(write_super,                                  73)     \
+       x(trans_restart_would_deadlock_recursion_limit, 74)     \
+       x(trans_restart_write_buffer_flush,             75)     \
+       x(trans_restart_split_race,                     76)     \
+       x(write_buffer_flush_slowpath,                  77)     \
+       x(write_buffer_flush_sync,                      78)
+
+enum bch_persistent_counters {
+#define x(t, n, ...) BCH_COUNTER_##t,
+       BCH_PERSISTENT_COUNTERS()
+#undef x
+       BCH_COUNTER_NR
+};
+
+struct bch_sb_field_counters {
+       struct bch_sb_field     field;
+       __le64                  d[];
+};
+
+#endif /* _BCACHEFS_SB_COUNTERS_FORMAT_H */
index a44a238bf8b5550023226844734424b1211c812a..a45354d2acde9f3ad0b149247c8ff4c7c869fb15 100644 (file)
@@ -251,7 +251,7 @@ static void member_to_text(struct printbuf *out,
        prt_printf(out, "Data allowed:");
        prt_tab(out);
        if (BCH_MEMBER_DATA_ALLOWED(&m))
-               prt_bitflags(out, bch2_data_types, BCH_MEMBER_DATA_ALLOWED(&m));
+               prt_bitflags(out, __bch2_data_types, BCH_MEMBER_DATA_ALLOWED(&m));
        else
                prt_printf(out, "(none)");
        prt_newline(out);
@@ -259,7 +259,7 @@ static void member_to_text(struct printbuf *out,
        prt_printf(out, "Has data:");
        prt_tab(out);
        if (data_have)
-               prt_bitflags(out, bch2_data_types, data_have);
+               prt_bitflags(out, __bch2_data_types, data_have);
        else
                prt_printf(out, "(none)");
        prt_newline(out);
index 56af937523ff2a8deda0a5168f45a67533a57da5..45f67e8b29eb67f188e5cfb32aa39e0b1ad1d625 100644 (file)
@@ -1053,6 +1053,8 @@ static int create_snapids(struct btree_trans *trans, u32 parent, u32 tree,
                n->v.subvol     = cpu_to_le32(snapshot_subvols[i]);
                n->v.tree       = cpu_to_le32(tree);
                n->v.depth      = cpu_to_le32(depth);
+               n->v.btime.lo   = cpu_to_le64(bch2_current_time(c));
+               n->v.btime.hi   = 0;
 
                for (j = 0; j < ARRAY_SIZE(n->v.skip); j++)
                        n->v.skip[j] = cpu_to_le32(bch2_snapshot_skiplist_get(c, parent));
@@ -1681,5 +1683,5 @@ int bch2_snapshots_read(struct bch_fs *c)
 
 void bch2_fs_snapshots_exit(struct bch_fs *c)
 {
-       kfree(rcu_dereference_protected(c->snapshots, true));
+       kvfree(rcu_dereference_protected(c->snapshots, true));
 }
diff --git a/fs/bcachefs/snapshot_format.h b/fs/bcachefs/snapshot_format.h
new file mode 100644 (file)
index 0000000..aabcd3a
--- /dev/null
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_SNAPSHOT_FORMAT_H
+#define _BCACHEFS_SNAPSHOT_FORMAT_H
+
+struct bch_snapshot {
+       struct bch_val          v;
+       __le32                  flags;
+       __le32                  parent;
+       __le32                  children[2];
+       __le32                  subvol;
+       /* corresponds to a bch_snapshot_tree in BTREE_ID_snapshot_trees */
+       __le32                  tree;
+       __le32                  depth;
+       __le32                  skip[3];
+       bch_le128               btime;
+};
+
+LE32_BITMASK(BCH_SNAPSHOT_DELETED,     struct bch_snapshot, flags,  0,  1)
+
+/* True if a subvolume points to this snapshot node: */
+LE32_BITMASK(BCH_SNAPSHOT_SUBVOL,      struct bch_snapshot, flags,  1,  2)
+
+/*
+ * Snapshot trees:
+ *
+ * The snapshot_trees btree gives us persistent indentifier for each tree of
+ * bch_snapshot nodes, and allow us to record and easily find the root/master
+ * subvolume that other snapshots were created from:
+ */
+struct bch_snapshot_tree {
+       struct bch_val          v;
+       __le32                  master_subvol;
+       __le32                  root_snapshot;
+};
+
+#endif /* _BCACHEFS_SNAPSHOT_FORMAT_H */
index 89fdb7c21134ebbb6c145a88ed5b1943ab54588a..fcaa5a888744881a4f6c37dd77fbd8cf73b2f4d0 100644 (file)
@@ -160,21 +160,16 @@ static inline bool is_visible_key(struct bch_hash_desc desc, subvol_inum inum, s
 }
 
 static __always_inline int
-bch2_hash_lookup(struct btree_trans *trans,
+bch2_hash_lookup_in_snapshot(struct btree_trans *trans,
                 struct btree_iter *iter,
                 const struct bch_hash_desc desc,
                 const struct bch_hash_info *info,
                 subvol_inum inum, const void *key,
-                unsigned flags)
+                unsigned flags, u32 snapshot)
 {
        struct bkey_s_c k;
-       u32 snapshot;
        int ret;
 
-       ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
-       if (ret)
-               return ret;
-
        for_each_btree_key_upto_norestart(trans, *iter, desc.btree_id,
                           SPOS(inum.inum, desc.hash_key(info, key), snapshot),
                           POS(inum.inum, U64_MAX),
@@ -194,6 +189,19 @@ bch2_hash_lookup(struct btree_trans *trans,
        return ret ?: -BCH_ERR_ENOENT_str_hash_lookup;
 }
 
+static __always_inline int
+bch2_hash_lookup(struct btree_trans *trans,
+                struct btree_iter *iter,
+                const struct bch_hash_desc desc,
+                const struct bch_hash_info *info,
+                subvol_inum inum, const void *key,
+                unsigned flags)
+{
+       u32 snapshot;
+       return  bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot) ?:
+               bch2_hash_lookup_in_snapshot(trans, iter, desc, info, inum, key, flags, snapshot);
+}
+
 static __always_inline int
 bch2_hash_hole(struct btree_trans *trans,
               struct btree_iter *iter,
diff --git a/fs/bcachefs/subvolume_format.h b/fs/bcachefs/subvolume_format.h
new file mode 100644 (file)
index 0000000..af79134
--- /dev/null
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_SUBVOLUME_FORMAT_H
+#define _BCACHEFS_SUBVOLUME_FORMAT_H
+
+#define SUBVOL_POS_MIN         POS(0, 1)
+#define SUBVOL_POS_MAX         POS(0, S32_MAX)
+#define BCACHEFS_ROOT_SUBVOL   1
+
+struct bch_subvolume {
+       struct bch_val          v;
+       __le32                  flags;
+       __le32                  snapshot;
+       __le64                  inode;
+       /*
+        * Snapshot subvolumes form a tree, separate from the snapshot nodes
+        * tree - if this subvolume is a snapshot, this is the ID of the
+        * subvolume it was created from:
+        *
+        * This is _not_ necessarily the subvolume of the directory containing
+        * this subvolume:
+        */
+       __le32                  parent;
+       __le32                  pad;
+       bch_le128               otime;
+};
+
+LE32_BITMASK(BCH_SUBVOLUME_RO,         struct bch_subvolume, flags,  0,  1)
+/*
+ * We need to know whether a subvolume is a snapshot so we can know whether we
+ * can delete it (or whether it should just be rm -rf'd)
+ */
+LE32_BITMASK(BCH_SUBVOLUME_SNAP,       struct bch_subvolume, flags,  1,  2)
+LE32_BITMASK(BCH_SUBVOLUME_UNLINKED,   struct bch_subvolume, flags,  2,  3)
+
+#endif /* _BCACHEFS_SUBVOLUME_FORMAT_H */
index 6d3db5cce5f6ac9e315500c14fbb5e1d97ea8098..d60c7d27a0477cb0de116675671d5c888d8f1c86 100644 (file)
@@ -2,7 +2,6 @@
 
 #include "bcachefs.h"
 #include "checksum.h"
-#include "counters.h"
 #include "disk_groups.h"
 #include "ec.h"
 #include "error.h"
@@ -13,6 +12,7 @@
 #include "replicas.h"
 #include "quota.h"
 #include "sb-clean.h"
+#include "sb-counters.h"
 #include "sb-downgrade.h"
 #include "sb-errors.h"
 #include "sb-members.h"
@@ -1321,7 +1321,9 @@ void bch2_sb_to_text(struct printbuf *out, struct bch_sb *sb,
 
        prt_printf(out, "Superblock size:");
        prt_tab(out);
-       prt_printf(out, "%zu", vstruct_bytes(sb));
+       prt_units_u64(out, vstruct_bytes(sb));
+       prt_str(out, "/");
+       prt_units_u64(out, 512ULL << sb->layout.sb_max_size_bits);
        prt_newline(out);
 
        prt_printf(out, "Clean:");
index 9dbc35940197f1c55c1bc48746bc23a3983ac203..b9911402b1753baa986a1673339c4454eba87431 100644 (file)
@@ -23,7 +23,6 @@
 #include "checksum.h"
 #include "clock.h"
 #include "compress.h"
-#include "counters.h"
 #include "debug.h"
 #include "disk_groups.h"
 #include "ec.h"
@@ -49,6 +48,7 @@
 #include "recovery.h"
 #include "replicas.h"
 #include "sb-clean.h"
+#include "sb-counters.h"
 #include "sb-errors.h"
 #include "sb-members.h"
 #include "snapshot.h"
@@ -883,7 +883,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
            !(c->pcpu = alloc_percpu(struct bch_fs_pcpu)) ||
            !(c->online_reserved = alloc_percpu(u64)) ||
            mempool_init_kvpmalloc_pool(&c->btree_bounce_pool, 1,
-                                       btree_bytes(c)) ||
+                                       c->opts.btree_node_size) ||
            mempool_init_kmalloc_pool(&c->large_bkey_pool, 1, 2048) ||
            !(c->unused_inode_hints = kcalloc(1U << c->inode_shard_bits,
                                              sizeof(u64), GFP_KERNEL))) {
@@ -1386,8 +1386,8 @@ static int bch2_dev_attach_bdev(struct bch_fs *c, struct bch_sb_handle *sb)
        prt_bdevname(&name, ca->disk_sb.bdev);
 
        if (c->sb.nr_devices == 1)
-               strlcpy(c->name, name.buf, sizeof(c->name));
-       strlcpy(ca->name, name.buf, sizeof(ca->name));
+               strscpy(c->name, name.buf, sizeof(c->name));
+       strscpy(ca->name, name.buf, sizeof(ca->name));
 
        printbuf_exit(&name);
 
@@ -1625,7 +1625,7 @@ int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
        if (data) {
                struct printbuf data_has = PRINTBUF;
 
-               prt_bitflags(&data_has, bch2_data_types, data);
+               prt_bitflags(&data_has, __bch2_data_types, data);
                bch_err(ca, "Remove failed, still has data (%s)", data_has.buf);
                printbuf_exit(&data_has);
                ret = -EBUSY;
index 8ed52319ff68d2b93194970b7da51218a579b0dd..cee80c47feea2b27fa7d18fc55a39228db7f0b96 100644 (file)
@@ -21,6 +21,7 @@
 #include "btree_gc.h"
 #include "buckets.h"
 #include "clock.h"
+#include "compress.h"
 #include "disk_groups.h"
 #include "ec.h"
 #include "inode.h"
@@ -247,7 +248,7 @@ static size_t bch2_btree_cache_size(struct bch_fs *c)
 
        mutex_lock(&c->btree_cache.lock);
        list_for_each_entry(b, &c->btree_cache.live, list)
-               ret += btree_bytes(c);
+               ret += btree_buf_bytes(b);
 
        mutex_unlock(&c->btree_cache.lock);
        return ret;
@@ -330,7 +331,7 @@ static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c
        prt_newline(out);
 
        for (unsigned i = 0; i < ARRAY_SIZE(s); i++) {
-               prt_str(out, bch2_compression_types[i]);
+               bch2_prt_compression_type(out, i);
                prt_tab(out);
 
                prt_human_readable_u64(out, s[i].sectors_compressed << 9);
@@ -725,8 +726,10 @@ STORE(bch2_fs_opts_dir)
        bch2_opt_set_sb(c, opt, v);
        bch2_opt_set_by_id(&c->opts, id, v);
 
-       if ((id == Opt_background_target ||
-            id == Opt_background_compression) && v)
+       if (v &&
+           (id == Opt_background_target ||
+            id == Opt_background_compression ||
+            (id == Opt_compression && !c->opts.background_compression)))
                bch2_set_rebalance_needs_scan(c, 0);
 
        ret = size;
@@ -883,7 +886,7 @@ static void dev_io_done_to_text(struct printbuf *out, struct bch_dev *ca)
 
                for (i = 1; i < BCH_DATA_NR; i++)
                        prt_printf(out, "%-12s:%12llu\n",
-                              bch2_data_types[i],
+                              bch2_data_type_str(i),
                               percpu_u64_get(&ca->io_done->sectors[rw][i]) << 9);
        }
 }
@@ -908,7 +911,7 @@ SHOW(bch2_dev)
        }
 
        if (attr == &sysfs_has_data) {
-               prt_bitflags(out, bch2_data_types, bch2_dev_has_data(c, ca));
+               prt_bitflags(out, __bch2_data_types, bch2_dev_has_data(c, ca));
                prt_char(out, '\n');
        }
 
index b1c867aa2b58e6f097cba1e4eedc37f55a58cc93..9220d7de10db67f6cd4a36040af7fe557756230b 100644 (file)
@@ -53,9 +53,9 @@ int bch2_run_thread_with_file(struct thread_with_file *thr,
        if (ret)
                goto err;
 
-       fd_install(fd, file);
        get_task_struct(thr->task);
        wake_up_process(thr->task);
+       fd_install(fd, file);
        return fd;
 err:
        if (fd >= 0)
index c94876b3bb06e4d8bf0ba490421ead37d87e5569..293b90d704fb5b48ed39038e793c4d3cbf77b5a8 100644 (file)
@@ -46,7 +46,7 @@ DECLARE_EVENT_CLASS(fs_str,
                __assign_str(str, str);
        ),
 
-       TP_printk("%d,%d %s", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(str))
+       TP_printk("%d,%d\n%s", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(str))
 );
 
 DECLARE_EVENT_CLASS(trans_str,
@@ -273,28 +273,14 @@ DEFINE_EVENT(bch_fs, journal_full,
        TP_ARGS(c)
 );
 
-DEFINE_EVENT(bch_fs, journal_entry_full,
-       TP_PROTO(struct bch_fs *c),
-       TP_ARGS(c)
+DEFINE_EVENT(fs_str, journal_entry_full,
+       TP_PROTO(struct bch_fs *c, const char *str),
+       TP_ARGS(c, str)
 );
 
-TRACE_EVENT(journal_entry_close,
-       TP_PROTO(struct bch_fs *c, unsigned bytes),
-       TP_ARGS(c, bytes),
-
-       TP_STRUCT__entry(
-               __field(dev_t,          dev                     )
-               __field(u32,            bytes                   )
-       ),
-
-       TP_fast_assign(
-               __entry->dev                    = c->dev;
-               __entry->bytes                  = bytes;
-       ),
-
-       TP_printk("%d,%d entry bytes %u",
-                 MAJOR(__entry->dev), MINOR(__entry->dev),
-                 __entry->bytes)
+DEFINE_EVENT(fs_str, journal_entry_close,
+       TP_PROTO(struct bch_fs *c, const char *str),
+       TP_ARGS(c, str)
 );
 
 DEFINE_EVENT(bio, journal_write,
@@ -542,7 +528,7 @@ TRACE_EVENT(btree_path_relock_fail,
                __entry->level                  = path->level;
                TRACE_BPOS_assign(pos, path->pos);
 
-               c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level),
+               c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level);
                __entry->self_read_count        = c.n[SIX_LOCK_read];
                __entry->self_intent_count      = c.n[SIX_LOCK_intent];
 
@@ -827,40 +813,28 @@ TRACE_EVENT(bucket_evacuate,
 );
 
 DEFINE_EVENT(fs_str, move_extent,
-       TP_PROTO(struct bch_fs *c, const char *k),
-       TP_ARGS(c, k)
+       TP_PROTO(struct bch_fs *c, const char *str),
+       TP_ARGS(c, str)
 );
 
 DEFINE_EVENT(fs_str, move_extent_read,
-       TP_PROTO(struct bch_fs *c, const char *k),
-       TP_ARGS(c, k)
+       TP_PROTO(struct bch_fs *c, const char *str),
+       TP_ARGS(c, str)
 );
 
 DEFINE_EVENT(fs_str, move_extent_write,
-       TP_PROTO(struct bch_fs *c, const char *k),
-       TP_ARGS(c, k)
+       TP_PROTO(struct bch_fs *c, const char *str),
+       TP_ARGS(c, str)
 );
 
 DEFINE_EVENT(fs_str, move_extent_finish,
-       TP_PROTO(struct bch_fs *c, const char *k),
-       TP_ARGS(c, k)
+       TP_PROTO(struct bch_fs *c, const char *str),
+       TP_ARGS(c, str)
 );
 
-TRACE_EVENT(move_extent_fail,
-       TP_PROTO(struct bch_fs *c, const char *msg),
-       TP_ARGS(c, msg),
-
-       TP_STRUCT__entry(
-               __field(dev_t,          dev                     )
-               __string(msg,           msg                     )
-       ),
-
-       TP_fast_assign(
-               __entry->dev            = c->dev;
-               __assign_str(msg, msg);
-       ),
-
-       TP_printk("%d:%d %s", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(msg))
+DEFINE_EVENT(fs_str, move_extent_fail,
+       TP_PROTO(struct bch_fs *c, const char *str),
+       TP_ARGS(c, str)
 );
 
 DEFINE_EVENT(fs_str, move_extent_start_fail,
@@ -1039,7 +1013,7 @@ TRACE_EVENT(trans_restart_split_race,
                __entry->level          = b->c.level;
                __entry->written        = b->written;
                __entry->blocks         = btree_blocks(trans->c);
-               __entry->u64s_remaining = bch_btree_keys_u64s_remaining(trans->c, b);
+               __entry->u64s_remaining = bch2_btree_keys_u64s_remaining(b);
        ),
 
        TP_printk("%s %pS l=%u written %u/%u u64s remaining %u",
@@ -1146,8 +1120,6 @@ DEFINE_EVENT(transaction_restart_iter,    trans_restart_btree_node_split,
        TP_ARGS(trans, caller_ip, path)
 );
 
-struct get_locks_fail;
-
 TRACE_EVENT(trans_restart_upgrade,
        TP_PROTO(struct btree_trans *trans,
                 unsigned long caller_ip,
@@ -1195,11 +1167,9 @@ TRACE_EVENT(trans_restart_upgrade,
                  __entry->node_seq)
 );
 
-DEFINE_EVENT(transaction_restart_iter, trans_restart_relock,
-       TP_PROTO(struct btree_trans *trans,
-                unsigned long caller_ip,
-                struct btree_path *path),
-       TP_ARGS(trans, caller_ip, path)
+DEFINE_EVENT(trans_str,        trans_restart_relock,
+       TP_PROTO(struct btree_trans *trans, unsigned long caller_ip, const char *str),
+       TP_ARGS(trans, caller_ip, str)
 );
 
 DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_next_node,
index c2ef7cddaa4fcb0e9de9df263aadd019cc7a4965..231003b405efc304a4cefa61a6e4e2f30b4b9466 100644 (file)
@@ -241,12 +241,17 @@ bool bch2_is_zero(const void *_p, size_t n)
        return true;
 }
 
-void bch2_prt_u64_binary(struct printbuf *out, u64 v, unsigned nr_bits)
+void bch2_prt_u64_base2_nbits(struct printbuf *out, u64 v, unsigned nr_bits)
 {
        while (nr_bits)
                prt_char(out, '0' + ((v >> --nr_bits) & 1));
 }
 
+void bch2_prt_u64_base2(struct printbuf *out, u64 v)
+{
+       bch2_prt_u64_base2_nbits(out, v, fls64(v) ?: 1);
+}
+
 void bch2_print_string_as_lines(const char *prefix, const char *lines)
 {
        const char *p;
@@ -267,14 +272,14 @@ void bch2_print_string_as_lines(const char *prefix, const char *lines)
        console_unlock();
 }
 
-int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *task, unsigned skipnr)
+int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *task, unsigned skipnr,
+                       gfp_t gfp)
 {
 #ifdef CONFIG_STACKTRACE
        unsigned nr_entries = 0;
-       int ret = 0;
 
        stack->nr = 0;
-       ret = darray_make_room(stack, 32);
+       int ret = darray_make_room_gfp(stack, 32, gfp);
        if (ret)
                return ret;
 
@@ -303,10 +308,10 @@ void bch2_prt_backtrace(struct printbuf *out, bch_stacktrace *stack)
        }
 }
 
-int bch2_prt_task_backtrace(struct printbuf *out, struct task_struct *task, unsigned skipnr)
+int bch2_prt_task_backtrace(struct printbuf *out, struct task_struct *task, unsigned skipnr, gfp_t gfp)
 {
        bch_stacktrace stack = { 0 };
-       int ret = bch2_save_backtrace(&stack, task, skipnr + 1);
+       int ret = bch2_save_backtrace(&stack, task, skipnr + 1, gfp);
 
        bch2_prt_backtrace(out, &stack);
        darray_exit(&stack);
@@ -413,14 +418,15 @@ static inline void bch2_time_stats_update_one(struct bch2_time_stats *stats,
                bch2_quantiles_update(&stats->quantiles, duration);
        }
 
-       if (time_after64(end, stats->last_event)) {
+       if (stats->last_event && time_after64(end, stats->last_event)) {
                freq = end - stats->last_event;
                mean_and_variance_update(&stats->freq_stats, freq);
                mean_and_variance_weighted_update(&stats->freq_stats_weighted, freq);
                stats->max_freq = max(stats->max_freq, freq);
                stats->min_freq = min(stats->min_freq, freq);
-               stats->last_event = end;
        }
+
+       stats->last_event = end;
 }
 
 static void __bch2_time_stats_clear_buffer(struct bch2_time_stats *stats,
@@ -1186,7 +1192,9 @@ int bch2_split_devs(const char *_dev_name, darray_str *ret)
 {
        darray_init(ret);
 
-       char *dev_name = kstrdup(_dev_name, GFP_KERNEL), *s = dev_name;
+       char *dev_name, *s, *orig;
+
+       dev_name = orig = kstrdup(_dev_name, GFP_KERNEL);
        if (!dev_name)
                return -ENOMEM;
 
@@ -1201,10 +1209,10 @@ int bch2_split_devs(const char *_dev_name, darray_str *ret)
                }
        }
 
-       kfree(dev_name);
+       kfree(orig);
        return 0;
 err:
        bch2_darray_str_exit(ret);
-       kfree(dev_name);
+       kfree(orig);
        return -ENOMEM;
 }
index c75fc31915d3936d8c0a26949915534aac482b3a..b414736d59a5b36d1344657eaeb6de6113ec5a09 100644 (file)
@@ -342,14 +342,15 @@ bool bch2_is_zero(const void *, size_t);
 
 u64 bch2_read_flag_list(char *, const char * const[]);
 
-void bch2_prt_u64_binary(struct printbuf *, u64, unsigned);
+void bch2_prt_u64_base2_nbits(struct printbuf *, u64, unsigned);
+void bch2_prt_u64_base2(struct printbuf *, u64);
 
 void bch2_print_string_as_lines(const char *prefix, const char *lines);
 
 typedef DARRAY(unsigned long) bch_stacktrace;
-int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *, unsigned);
+int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *, unsigned, gfp_t);
 void bch2_prt_backtrace(struct printbuf *, bch_stacktrace *);
-int bch2_prt_task_backtrace(struct printbuf *, struct task_struct *, unsigned);
+int bch2_prt_task_backtrace(struct printbuf *, struct task_struct *, unsigned, gfp_t);
 
 static inline void prt_bdevname(struct printbuf *out, struct block_device *bdev)
 {
index 5a1858fb9879afd1c70c3d5a64883315090d6dbe..9c0d2316031b1beceda4e1b68dcda4e34184a89e 100644 (file)
@@ -590,8 +590,9 @@ err:
        mutex_unlock(&inode->ei_update_lock);
 
        if (value &&
-           (opt_id == Opt_background_compression ||
-            opt_id == Opt_background_target))
+           (opt_id == Opt_background_target ||
+            opt_id == Opt_background_compression ||
+            (opt_id == Opt_compression && !inode_opt_get(c, &inode->ei_inode, background_compression))))
                bch2_set_rebalance_needs_scan(c, inode->ei_inode.bi_inum);
 
        return bch2_err_class(ret);
diff --git a/fs/bcachefs/xattr_format.h b/fs/bcachefs/xattr_format.h
new file mode 100644 (file)
index 0000000..e9f8105
--- /dev/null
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_XATTR_FORMAT_H
+#define _BCACHEFS_XATTR_FORMAT_H
+
+#define KEY_TYPE_XATTR_INDEX_USER              0
+#define KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS  1
+#define KEY_TYPE_XATTR_INDEX_POSIX_ACL_DEFAULT 2
+#define KEY_TYPE_XATTR_INDEX_TRUSTED           3
+#define KEY_TYPE_XATTR_INDEX_SECURITY          4
+
+struct bch_xattr {
+       struct bch_val          v;
+       __u8                    x_type;
+       __u8                    x_name_len;
+       __le16                  x_val_len;
+       __u8                    x_name[];
+} __packed __aligned(8);
+
+#endif /* _BCACHEFS_XATTR_FORMAT_H */
index a9be9ac9922225bb32801aec5834c9e9d87ffc97..378d9103a2072b1628e66d850a42b9254be72b36 100644 (file)
@@ -1455,6 +1455,7 @@ out:
  */
 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
 {
+       LIST_HEAD(retry_list);
        struct btrfs_block_group *block_group;
        struct btrfs_space_info *space_info;
        struct btrfs_trans_handle *trans;
@@ -1476,6 +1477,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
 
        spin_lock(&fs_info->unused_bgs_lock);
        while (!list_empty(&fs_info->unused_bgs)) {
+               u64 used;
                int trimming;
 
                block_group = list_first_entry(&fs_info->unused_bgs,
@@ -1511,9 +1513,9 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
                        goto next;
                }
 
+               spin_lock(&space_info->lock);
                spin_lock(&block_group->lock);
-               if (block_group->reserved || block_group->pinned ||
-                   block_group->used || block_group->ro ||
+               if (btrfs_is_block_group_used(block_group) || block_group->ro ||
                    list_is_singular(&block_group->list)) {
                        /*
                         * We want to bail if we made new allocations or have
@@ -1523,10 +1525,49 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
                         */
                        trace_btrfs_skip_unused_block_group(block_group);
                        spin_unlock(&block_group->lock);
+                       spin_unlock(&space_info->lock);
                        up_write(&space_info->groups_sem);
                        goto next;
                }
+
+               /*
+                * The block group may be unused but there may be space reserved
+                * accounting with the existence of that block group, that is,
+                * space_info->bytes_may_use was incremented by a task but no
+                * space was yet allocated from the block group by the task.
+                * That space may or may not be allocated, as we are generally
+                * pessimistic about space reservation for metadata as well as
+                * for data when using compression (as we reserve space based on
+                * the worst case, when data can't be compressed, and before
+                * actually attempting compression, before starting writeback).
+                *
+                * So check if the total space of the space_info minus the size
+                * of this block group is less than the used space of the
+                * space_info - if that's the case, then it means we have tasks
+                * that might be relying on the block group in order to allocate
+                * extents, and add back the block group to the unused list when
+                * we finish, so that we retry later in case no tasks ended up
+                * needing to allocate extents from the block group.
+                */
+               used = btrfs_space_info_used(space_info, true);
+               if (space_info->total_bytes - block_group->length < used) {
+                       /*
+                        * Add a reference for the list, compensate for the ref
+                        * drop under the "next" label for the
+                        * fs_info->unused_bgs list.
+                        */
+                       btrfs_get_block_group(block_group);
+                       list_add_tail(&block_group->bg_list, &retry_list);
+
+                       trace_btrfs_skip_unused_block_group(block_group);
+                       spin_unlock(&block_group->lock);
+                       spin_unlock(&space_info->lock);
+                       up_write(&space_info->groups_sem);
+                       goto next;
+               }
+
                spin_unlock(&block_group->lock);
+               spin_unlock(&space_info->lock);
 
                /* We don't want to force the issue, only flip if it's ok. */
                ret = inc_block_group_ro(block_group, 0);
@@ -1650,12 +1691,16 @@ next:
                btrfs_put_block_group(block_group);
                spin_lock(&fs_info->unused_bgs_lock);
        }
+       list_splice_tail(&retry_list, &fs_info->unused_bgs);
        spin_unlock(&fs_info->unused_bgs_lock);
        mutex_unlock(&fs_info->reclaim_bgs_lock);
        return;
 
 flip_async:
        btrfs_end_transaction(trans);
+       spin_lock(&fs_info->unused_bgs_lock);
+       list_splice_tail(&retry_list, &fs_info->unused_bgs);
+       spin_unlock(&fs_info->unused_bgs_lock);
        mutex_unlock(&fs_info->reclaim_bgs_lock);
        btrfs_put_block_group(block_group);
        btrfs_discard_punt_unused_bgs_list(fs_info);
@@ -2684,6 +2729,37 @@ next:
                btrfs_dec_delayed_refs_rsv_bg_inserts(fs_info);
                list_del_init(&block_group->bg_list);
                clear_bit(BLOCK_GROUP_FLAG_NEW, &block_group->runtime_flags);
+
+               /*
+                * If the block group is still unused, add it to the list of
+                * unused block groups. The block group may have been created in
+                * order to satisfy a space reservation, in which case the
+                * extent allocation only happens later. But often we don't
+                * actually need to allocate space that we previously reserved,
+                * so the block group may become unused for a long time. For
+                * example for metadata we generally reserve space for a worst
+                * possible scenario, but then don't end up allocating all that
+                * space or none at all (due to no need to COW, extent buffers
+                * were already COWed in the current transaction and still
+                * unwritten, tree heights lower than the maximum possible
+                * height, etc). For data we generally reserve the axact amount
+                * of space we are going to allocate later, the exception is
+                * when using compression, as we must reserve space based on the
+                * uncompressed data size, because the compression is only done
+                * when writeback triggered and we don't know how much space we
+                * are actually going to need, so we reserve the uncompressed
+                * size because the data may be uncompressible in the worst case.
+                */
+               if (ret == 0) {
+                       bool used;
+
+                       spin_lock(&block_group->lock);
+                       used = btrfs_is_block_group_used(block_group);
+                       spin_unlock(&block_group->lock);
+
+                       if (!used)
+                               btrfs_mark_bg_unused(block_group);
+               }
        }
        btrfs_trans_release_chunk_metadata(trans);
 }
index c4a1f01cc1c240d108702fc8899de9efe00da613..962b11983901a86ae16add7962c5ea5a26796b6f 100644 (file)
@@ -257,6 +257,13 @@ static inline u64 btrfs_block_group_end(struct btrfs_block_group *block_group)
        return (block_group->start + block_group->length);
 }
 
+static inline bool btrfs_is_block_group_used(const struct btrfs_block_group *bg)
+{
+       lockdep_assert_held(&bg->lock);
+
+       return (bg->used > 0 || bg->reserved > 0 || bg->pinned > 0);
+}
+
 static inline bool btrfs_is_block_group_data_only(
                                        struct btrfs_block_group *block_group)
 {
index 193168214eeb17fc8a8a9cff3942eb3f68958e1b..68345f73d429aa2d4537ef620a0048e61c4eb7a8 100644 (file)
@@ -141,16 +141,16 @@ static int compression_decompress_bio(struct list_head *ws,
 }
 
 static int compression_decompress(int type, struct list_head *ws,
-               const u8 *data_in, struct page *dest_page,
-               unsigned long start_byte, size_t srclen, size_t destlen)
+               const u8 *data_in, struct page *dest_page,
+               unsigned long dest_pgoff, size_t srclen, size_t destlen)
 {
        switch (type) {
        case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page,
-                                               start_byte, srclen, destlen);
+                                               dest_pgoff, srclen, destlen);
        case BTRFS_COMPRESS_LZO:  return lzo_decompress(ws, data_in, dest_page,
-                                               start_byte, srclen, destlen);
+                                               dest_pgoff, srclen, destlen);
        case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page,
-                                               start_byte, srclen, destlen);
+                                               dest_pgoff, srclen, destlen);
        case BTRFS_COMPRESS_NONE:
        default:
                /*
@@ -1037,14 +1037,23 @@ static int btrfs_decompress_bio(struct compressed_bio *cb)
  * start_byte tells us the offset into the compressed data we're interested in
  */
 int btrfs_decompress(int type, const u8 *data_in, struct page *dest_page,
-                    unsigned long start_byte, size_t srclen, size_t destlen)
+                    unsigned long dest_pgoff, size_t srclen, size_t destlen)
 {
+       struct btrfs_fs_info *fs_info = btrfs_sb(dest_page->mapping->host->i_sb);
        struct list_head *workspace;
+       const u32 sectorsize = fs_info->sectorsize;
        int ret;
 
+       /*
+        * The full destination page range should not exceed the page size.
+        * And the @destlen should not exceed sectorsize, as this is only called for
+        * inline file extents, which should not exceed sectorsize.
+        */
+       ASSERT(dest_pgoff + destlen <= PAGE_SIZE && destlen <= sectorsize);
+
        workspace = get_workspace(type, 0);
        ret = compression_decompress(type, workspace, data_in, dest_page,
-                                    start_byte, srclen, destlen);
+                                    dest_pgoff, srclen, destlen);
        put_workspace(type, workspace);
 
        return ret;
index 93cc92974deee4cebb4fd25d38118f2c046e1840..afd7e50d073d4ac743c924b70e7e1734af2f6ffc 100644 (file)
@@ -148,7 +148,7 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
                unsigned long *total_in, unsigned long *total_out);
 int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
 int zlib_decompress(struct list_head *ws, const u8 *data_in,
-               struct page *dest_page, unsigned long start_byte, size_t srclen,
+               struct page *dest_page, unsigned long dest_pgoff, size_t srclen,
                size_t destlen);
 struct list_head *zlib_alloc_workspace(unsigned int level);
 void zlib_free_workspace(struct list_head *ws);
@@ -159,7 +159,7 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
                unsigned long *total_in, unsigned long *total_out);
 int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
 int lzo_decompress(struct list_head *ws, const u8 *data_in,
-               struct page *dest_page, unsigned long start_byte, size_t srclen,
+               struct page *dest_page, unsigned long dest_pgoff, size_t srclen,
                size_t destlen);
 struct list_head *lzo_alloc_workspace(unsigned int level);
 void lzo_free_workspace(struct list_head *ws);
index 2833e8ef4c098f680a4883d41a1e925dc477bc2f..acf9f4b6c044025fe2ef288e99716d0373d01f31 100644 (file)
@@ -245,7 +245,6 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info,
        struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
        u64 reserve_size = 0;
        u64 qgroup_rsv_size = 0;
-       u64 csum_leaves;
        unsigned outstanding_extents;
 
        lockdep_assert_held(&inode->lock);
@@ -260,10 +259,12 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info,
                                                outstanding_extents);
                reserve_size += btrfs_calc_metadata_size(fs_info, 1);
        }
-       csum_leaves = btrfs_csum_bytes_to_leaves(fs_info,
-                                                inode->csum_bytes);
-       reserve_size += btrfs_calc_insert_metadata_size(fs_info,
-                                                       csum_leaves);
+       if (!(inode->flags & BTRFS_INODE_NODATASUM)) {
+               u64 csum_leaves;
+
+               csum_leaves = btrfs_csum_bytes_to_leaves(fs_info, inode->csum_bytes);
+               reserve_size += btrfs_calc_insert_metadata_size(fs_info, csum_leaves);
+       }
        /*
         * For qgroup rsv, the calculation is very simple:
         * account one nodesize for each outstanding extent
@@ -278,14 +279,20 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info,
        spin_unlock(&block_rsv->lock);
 }
 
-static void calc_inode_reservations(struct btrfs_fs_info *fs_info,
+static void calc_inode_reservations(struct btrfs_inode *inode,
                                    u64 num_bytes, u64 disk_num_bytes,
                                    u64 *meta_reserve, u64 *qgroup_reserve)
 {
+       struct btrfs_fs_info *fs_info = inode->root->fs_info;
        u64 nr_extents = count_max_extents(fs_info, num_bytes);
-       u64 csum_leaves = btrfs_csum_bytes_to_leaves(fs_info, disk_num_bytes);
+       u64 csum_leaves;
        u64 inode_update = btrfs_calc_metadata_size(fs_info, 1);
 
+       if (inode->flags & BTRFS_INODE_NODATASUM)
+               csum_leaves = 0;
+       else
+               csum_leaves = btrfs_csum_bytes_to_leaves(fs_info, disk_num_bytes);
+
        *meta_reserve = btrfs_calc_insert_metadata_size(fs_info,
                                                nr_extents + csum_leaves);
 
@@ -337,7 +344,7 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes,
         * everything out and try again, which is bad.  This way we just
         * over-reserve slightly, and clean up the mess when we are done.
         */
-       calc_inode_reservations(fs_info, num_bytes, disk_num_bytes,
+       calc_inode_reservations(inode, num_bytes, disk_num_bytes,
                                &meta_reserve, &qgroup_reserve);
        ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_reserve, true,
                                                 noflush);
@@ -359,7 +366,8 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes,
        nr_extents = count_max_extents(fs_info, num_bytes);
        spin_lock(&inode->lock);
        btrfs_mod_outstanding_extents(inode, nr_extents);
-       inode->csum_bytes += disk_num_bytes;
+       if (!(inode->flags & BTRFS_INODE_NODATASUM))
+               inode->csum_bytes += disk_num_bytes;
        btrfs_calculate_inode_block_rsv_size(fs_info, inode);
        spin_unlock(&inode->lock);
 
@@ -393,7 +401,8 @@ void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes,
 
        num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
        spin_lock(&inode->lock);
-       inode->csum_bytes -= num_bytes;
+       if (!(inode->flags & BTRFS_INODE_NODATASUM))
+               inode->csum_bytes -= num_bytes;
        btrfs_calculate_inode_block_rsv_size(fs_info, inode);
        spin_unlock(&inode->lock);
 
index c6907d533fe83912576fd92283658539e0abbb81..e71ef97d0a7cabb236e8cbd073667b2e3d143dca 100644 (file)
@@ -1336,8 +1336,17 @@ static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info,
 again:
        root = btrfs_lookup_fs_root(fs_info, objectid);
        if (root) {
-               /* Shouldn't get preallocated anon_dev for cached roots */
-               ASSERT(!anon_dev);
+               /*
+                * Some other caller may have read out the newly inserted
+                * subvolume already (for things like backref walk etc).  Not
+                * that common but still possible.  In that case, we just need
+                * to free the anon_dev.
+                */
+               if (unlikely(anon_dev)) {
+                       free_anon_bdev(anon_dev);
+                       anon_dev = 0;
+               }
+
                if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
                        btrfs_put_root(root);
                        return ERR_PTR(-ENOENT);
index f396aba92c579641d1cce38b48e7e7cd4febc510..8e8cc11112772dfd020217e30d74fe138c3151ca 100644 (file)
@@ -1260,7 +1260,8 @@ static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
        u64 bytes_left, end;
        u64 aligned_start = ALIGN(start, 1 << SECTOR_SHIFT);
 
-       if (WARN_ON(start != aligned_start)) {
+       /* Adjust the range to be aligned to 512B sectors if necessary. */
+       if (start != aligned_start) {
                len -= aligned_start - start;
                len = round_down(len, 1 << SECTOR_SHIFT);
                start = aligned_start;
@@ -4298,6 +4299,42 @@ static int prepare_allocation_clustered(struct btrfs_fs_info *fs_info,
        return 0;
 }
 
+static int prepare_allocation_zoned(struct btrfs_fs_info *fs_info,
+                                   struct find_free_extent_ctl *ffe_ctl)
+{
+       if (ffe_ctl->for_treelog) {
+               spin_lock(&fs_info->treelog_bg_lock);
+               if (fs_info->treelog_bg)
+                       ffe_ctl->hint_byte = fs_info->treelog_bg;
+               spin_unlock(&fs_info->treelog_bg_lock);
+       } else if (ffe_ctl->for_data_reloc) {
+               spin_lock(&fs_info->relocation_bg_lock);
+               if (fs_info->data_reloc_bg)
+                       ffe_ctl->hint_byte = fs_info->data_reloc_bg;
+               spin_unlock(&fs_info->relocation_bg_lock);
+       } else if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA) {
+               struct btrfs_block_group *block_group;
+
+               spin_lock(&fs_info->zone_active_bgs_lock);
+               list_for_each_entry(block_group, &fs_info->zone_active_bgs, active_bg_list) {
+                       /*
+                        * No lock is OK here because avail is monotinically
+                        * decreasing, and this is just a hint.
+                        */
+                       u64 avail = block_group->zone_capacity - block_group->alloc_offset;
+
+                       if (block_group_bits(block_group, ffe_ctl->flags) &&
+                           avail >= ffe_ctl->num_bytes) {
+                               ffe_ctl->hint_byte = block_group->start;
+                               break;
+                       }
+               }
+               spin_unlock(&fs_info->zone_active_bgs_lock);
+       }
+
+       return 0;
+}
+
 static int prepare_allocation(struct btrfs_fs_info *fs_info,
                              struct find_free_extent_ctl *ffe_ctl,
                              struct btrfs_space_info *space_info,
@@ -4308,19 +4345,7 @@ static int prepare_allocation(struct btrfs_fs_info *fs_info,
                return prepare_allocation_clustered(fs_info, ffe_ctl,
                                                    space_info, ins);
        case BTRFS_EXTENT_ALLOC_ZONED:
-               if (ffe_ctl->for_treelog) {
-                       spin_lock(&fs_info->treelog_bg_lock);
-                       if (fs_info->treelog_bg)
-                               ffe_ctl->hint_byte = fs_info->treelog_bg;
-                       spin_unlock(&fs_info->treelog_bg_lock);
-               }
-               if (ffe_ctl->for_data_reloc) {
-                       spin_lock(&fs_info->relocation_bg_lock);
-                       if (fs_info->data_reloc_bg)
-                               ffe_ctl->hint_byte = fs_info->data_reloc_bg;
-                       spin_unlock(&fs_info->relocation_bg_lock);
-               }
-               return 0;
+               return prepare_allocation_zoned(fs_info, ffe_ctl);
        default:
                BUG();
        }
index 809b11472a806c92ef9ad4454d354a9460a51b7b..f88e0ca8331d9b5448e8e07c89e5e66395c782b3 100644 (file)
@@ -3184,8 +3184,23 @@ out:
                        unwritten_start += logical_len;
                clear_extent_uptodate(io_tree, unwritten_start, end, NULL);
 
-               /* Drop extent maps for the part of the extent we didn't write. */
-               btrfs_drop_extent_map_range(inode, unwritten_start, end, false);
+               /*
+                * Drop extent maps for the part of the extent we didn't write.
+                *
+                * We have an exception here for the free_space_inode, this is
+                * because when we do btrfs_get_extent() on the free space inode
+                * we will search the commit root.  If this is a new block group
+                * we won't find anything, and we will trip over the assert in
+                * writepage where we do ASSERT(em->block_start !=
+                * EXTENT_MAP_HOLE).
+                *
+                * Theoretically we could also skip this for any NOCOW extent as
+                * we don't mess with the extent map tree in the NOCOW case, but
+                * for now simply skip this if we are the free space inode.
+                */
+               if (!btrfs_is_free_space_inode(inode))
+                       btrfs_drop_extent_map_range(inode, unwritten_start,
+                                                   end, false);
 
                /*
                 * If the ordered extent had an IOERR or something else went
@@ -4458,6 +4473,8 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
        u64 root_flags;
        int ret;
 
+       down_write(&fs_info->subvol_sem);
+
        /*
         * Don't allow to delete a subvolume with send in progress. This is
         * inside the inode lock so the error handling that has to drop the bit
@@ -4469,25 +4486,25 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
                btrfs_warn(fs_info,
                           "attempt to delete subvolume %llu during send",
                           dest->root_key.objectid);
-               return -EPERM;
+               ret = -EPERM;
+               goto out_up_write;
        }
        if (atomic_read(&dest->nr_swapfiles)) {
                spin_unlock(&dest->root_item_lock);
                btrfs_warn(fs_info,
                           "attempt to delete subvolume %llu with active swapfile",
                           root->root_key.objectid);
-               return -EPERM;
+               ret = -EPERM;
+               goto out_up_write;
        }
        root_flags = btrfs_root_flags(&dest->root_item);
        btrfs_set_root_flags(&dest->root_item,
                             root_flags | BTRFS_ROOT_SUBVOL_DEAD);
        spin_unlock(&dest->root_item_lock);
 
-       down_write(&fs_info->subvol_sem);
-
        ret = may_destroy_subvol(dest);
        if (ret)
-               goto out_up_write;
+               goto out_undead;
 
        btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
        /*
@@ -4497,7 +4514,7 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
         */
        ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true);
        if (ret)
-               goto out_up_write;
+               goto out_undead;
 
        trans = btrfs_start_transaction(root, 0);
        if (IS_ERR(trans)) {
@@ -4563,15 +4580,17 @@ out_end_trans:
        inode->i_flags |= S_DEAD;
 out_release:
        btrfs_subvolume_release_metadata(root, &block_rsv);
-out_up_write:
-       up_write(&fs_info->subvol_sem);
+out_undead:
        if (ret) {
                spin_lock(&dest->root_item_lock);
                root_flags = btrfs_root_flags(&dest->root_item);
                btrfs_set_root_flags(&dest->root_item,
                                root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
                spin_unlock(&dest->root_item_lock);
-       } else {
+       }
+out_up_write:
+       up_write(&fs_info->subvol_sem);
+       if (!ret) {
                d_invalidate(dentry);
                btrfs_prune_dentries(dest);
                ASSERT(dest->send_in_progress == 0);
@@ -10269,6 +10288,13 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
        if (encoded->encryption != BTRFS_ENCODED_IO_ENCRYPTION_NONE)
                return -EINVAL;
 
+       /*
+        * Compressed extents should always have checksums, so error out if we
+        * have a NOCOW file or inode was created while mounted with NODATASUM.
+        */
+       if (inode->flags & BTRFS_INODE_NODATASUM)
+               return -EINVAL;
+
        orig_count = iov_iter_count(from);
 
        /* The extent size must be sane. */
index 41b479861b3c767bb582920db56ea442c8f7f381..ac3316e0d11c3a42835dc8a2094b00a16019bd64 100644 (file)
@@ -790,6 +790,9 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
                return -EOPNOTSUPP;
        }
 
+       if (btrfs_root_refs(&root->root_item) == 0)
+               return -ENOENT;
+
        if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
                return -EINVAL;
 
@@ -2608,6 +2611,10 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
                                ret = -EFAULT;
                                goto out;
                        }
+                       if (range.flags & ~BTRFS_DEFRAG_RANGE_FLAGS_SUPP) {
+                               ret = -EOPNOTSUPP;
+                               goto out;
+                       }
                        /* compression requires us to start the IO */
                        if ((range.flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
                                range.flags |= BTRFS_DEFRAG_RANGE_START_IO;
@@ -3808,6 +3815,11 @@ static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
                goto out;
        }
 
+       if (sa->create && is_fstree(sa->qgroupid)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
        trans = btrfs_join_transaction(root);
        if (IS_ERR(trans)) {
                ret = PTR_ERR(trans);
index 1131d5a29d612ee50e14c488b1812a0657c259f1..e43bc0fdc74ec9b0224568928b31e0ca10c77805 100644 (file)
@@ -425,16 +425,16 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
 }
 
 int lzo_decompress(struct list_head *ws, const u8 *data_in,
-               struct page *dest_page, unsigned long start_byte, size_t srclen,
+               struct page *dest_page, unsigned long dest_pgoff, size_t srclen,
                size_t destlen)
 {
        struct workspace *workspace = list_entry(ws, struct workspace, list);
+       struct btrfs_fs_info *fs_info = btrfs_sb(dest_page->mapping->host->i_sb);
+       const u32 sectorsize = fs_info->sectorsize;
        size_t in_len;
        size_t out_len;
        size_t max_segment_len = WORKSPACE_BUF_LENGTH;
        int ret = 0;
-       char *kaddr;
-       unsigned long bytes;
 
        if (srclen < LZO_LEN || srclen > max_segment_len + LZO_LEN * 2)
                return -EUCLEAN;
@@ -451,7 +451,7 @@ int lzo_decompress(struct list_head *ws, const u8 *data_in,
        }
        data_in += LZO_LEN;
 
-       out_len = PAGE_SIZE;
+       out_len = sectorsize;
        ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len);
        if (ret != LZO_E_OK) {
                pr_warn("BTRFS: decompress failed!\n");
@@ -459,29 +459,13 @@ int lzo_decompress(struct list_head *ws, const u8 *data_in,
                goto out;
        }
 
-       if (out_len < start_byte) {
+       ASSERT(out_len <= sectorsize);
+       memcpy_to_page(dest_page, dest_pgoff, workspace->buf, out_len);
+       /* Early end, considered as an error. */
+       if (unlikely(out_len < destlen)) {
                ret = -EIO;
-               goto out;
+               memzero_page(dest_page, dest_pgoff + out_len, destlen - out_len);
        }
-
-       /*
-        * the caller is already checking against PAGE_SIZE, but lets
-        * move this check closer to the memcpy/memset
-        */
-       destlen = min_t(unsigned long, destlen, PAGE_SIZE);
-       bytes = min_t(unsigned long, destlen, out_len - start_byte);
-
-       kaddr = kmap_local_page(dest_page);
-       memcpy(kaddr, workspace->buf + start_byte, bytes);
-
-       /*
-        * btrfs_getblock is doing a zero on the tail of the page too,
-        * but this will cover anything missing from the decompressed
-        * data.
-        */
-       if (bytes < destlen)
-               memset(kaddr+bytes, 0, destlen-bytes);
-       kunmap_local(kaddr);
 out:
        return ret;
 }
index 63b426cc77989670e0f7890a1cb75a17348e96cb..5470e1cdf10c5348df676cd290bef45811a46019 100644 (file)
@@ -1736,6 +1736,15 @@ out:
        return ret;
 }
 
+static bool qgroup_has_usage(struct btrfs_qgroup *qgroup)
+{
+       return (qgroup->rfer > 0 || qgroup->rfer_cmpr > 0 ||
+               qgroup->excl > 0 || qgroup->excl_cmpr > 0 ||
+               qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA] > 0 ||
+               qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC] > 0 ||
+               qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS] > 0);
+}
+
 int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
 {
        struct btrfs_fs_info *fs_info = trans->fs_info;
@@ -1755,6 +1764,11 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
                goto out;
        }
 
+       if (is_fstree(qgroupid) && qgroup_has_usage(qgroup)) {
+               ret = -EBUSY;
+               goto out;
+       }
+
        /* Check if there are no children of this qgroup */
        if (!list_empty(&qgroup->members)) {
                ret = -EBUSY;
index 6486f0d7e9931b4fafbc03ddc5ddca0863679d7a..8c4fc98ca9ce7de055841a06e43863eeb6b960e0 100644 (file)
@@ -889,8 +889,10 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
 out_unlock:
        spin_unlock(&fs_info->ref_verify_lock);
 out:
-       if (ret)
+       if (ret) {
+               btrfs_free_ref_cache(fs_info);
                btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
+       }
        return ret;
 }
 
@@ -1021,8 +1023,8 @@ int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info)
                }
        }
        if (ret) {
-               btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
                btrfs_free_ref_cache(fs_info);
+               btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
        }
        btrfs_free_path(path);
        return ret;
index a01807cbd4d44e4127c798e470cef51d8bfa13e6..0123d272892373b3465c942e75e181d3bc77e681 100644 (file)
@@ -1098,12 +1098,22 @@ out:
 static void scrub_read_endio(struct btrfs_bio *bbio)
 {
        struct scrub_stripe *stripe = bbio->private;
+       struct bio_vec *bvec;
+       int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
+       int num_sectors;
+       u32 bio_size = 0;
+       int i;
+
+       ASSERT(sector_nr < stripe->nr_sectors);
+       bio_for_each_bvec_all(bvec, &bbio->bio, i)
+               bio_size += bvec->bv_len;
+       num_sectors = bio_size >> stripe->bg->fs_info->sectorsize_bits;
 
        if (bbio->bio.bi_status) {
-               bitmap_set(&stripe->io_error_bitmap, 0, stripe->nr_sectors);
-               bitmap_set(&stripe->error_bitmap, 0, stripe->nr_sectors);
+               bitmap_set(&stripe->io_error_bitmap, sector_nr, num_sectors);
+               bitmap_set(&stripe->error_bitmap, sector_nr, num_sectors);
        } else {
-               bitmap_clear(&stripe->io_error_bitmap, 0, stripe->nr_sectors);
+               bitmap_clear(&stripe->io_error_bitmap, sector_nr, num_sectors);
        }
        bio_put(&bbio->bio);
        if (atomic_dec_and_test(&stripe->pending_io)) {
@@ -1636,6 +1646,9 @@ static void scrub_submit_extent_sector_read(struct scrub_ctx *sctx,
 {
        struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
        struct btrfs_bio *bbio = NULL;
+       unsigned int nr_sectors = min(BTRFS_STRIPE_LEN, stripe->bg->start +
+                                     stripe->bg->length - stripe->logical) >>
+                                 fs_info->sectorsize_bits;
        u64 stripe_len = BTRFS_STRIPE_LEN;
        int mirror = stripe->mirror_num;
        int i;
@@ -1646,6 +1659,10 @@ static void scrub_submit_extent_sector_read(struct scrub_ctx *sctx,
                struct page *page = scrub_stripe_get_page(stripe, i);
                unsigned int pgoff = scrub_stripe_get_page_offset(stripe, i);
 
+               /* We're beyond the chunk boundary, no need to read anymore. */
+               if (i >= nr_sectors)
+                       break;
+
                /* The current sector cannot be merged, submit the bio. */
                if (bbio &&
                    ((i > 0 &&
@@ -1701,6 +1718,9 @@ static void scrub_submit_initial_read(struct scrub_ctx *sctx,
 {
        struct btrfs_fs_info *fs_info = sctx->fs_info;
        struct btrfs_bio *bbio;
+       unsigned int nr_sectors = min(BTRFS_STRIPE_LEN, stripe->bg->start +
+                                     stripe->bg->length - stripe->logical) >>
+                                 fs_info->sectorsize_bits;
        int mirror = stripe->mirror_num;
 
        ASSERT(stripe->bg);
@@ -1715,14 +1735,16 @@ static void scrub_submit_initial_read(struct scrub_ctx *sctx,
        bbio = btrfs_bio_alloc(SCRUB_STRIPE_PAGES, REQ_OP_READ, fs_info,
                               scrub_read_endio, stripe);
 
-       /* Read the whole stripe. */
        bbio->bio.bi_iter.bi_sector = stripe->logical >> SECTOR_SHIFT;
-       for (int i = 0; i < BTRFS_STRIPE_LEN >> PAGE_SHIFT; i++) {
+       /* Read the whole range inside the chunk boundary. */
+       for (unsigned int cur = 0; cur < nr_sectors; cur++) {
+               struct page *page = scrub_stripe_get_page(stripe, cur);
+               unsigned int pgoff = scrub_stripe_get_page_offset(stripe, cur);
                int ret;
 
-               ret = bio_add_page(&bbio->bio, stripe->pages[i], PAGE_SIZE, 0);
+               ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
                /* We should have allocated enough bio vectors. */
-               ASSERT(ret == PAGE_SIZE);
+               ASSERT(ret == fs_info->sectorsize);
        }
        atomic_inc(&stripe->pending_io);
 
index 4e36550618e580044fb0b0d573ddfee196cdca5d..7902298c1f25bbee1586a97f2223f3c079e3a8fb 100644 (file)
@@ -8111,7 +8111,7 @@ long btrfs_ioctl_send(struct inode *inode, struct btrfs_ioctl_send_args *arg)
        }
 
        if (arg->flags & ~BTRFS_SEND_FLAG_MASK) {
-               ret = -EINVAL;
+               ret = -EOPNOTSUPP;
                goto out;
        }
 
@@ -8205,8 +8205,8 @@ long btrfs_ioctl_send(struct inode *inode, struct btrfs_ioctl_send_args *arg)
                goto out;
        }
 
-       sctx->clone_roots = kvcalloc(sizeof(*sctx->clone_roots),
-                                    arg->clone_sources_count + 1,
+       sctx->clone_roots = kvcalloc(arg->clone_sources_count + 1,
+                                    sizeof(*sctx->clone_roots),
                                     GFP_KERNEL);
        if (!sctx->clone_roots) {
                ret = -ENOMEM;
index 93511d54abf8280bc6778a17b5fa75a28d3585c1..0e49dab8dad2480243f4d32e6ee934c0f2b35b67 100644 (file)
@@ -475,7 +475,8 @@ void btrfs_subpage_set_writeback(const struct btrfs_fs_info *fs_info,
 
        spin_lock_irqsave(&subpage->lock, flags);
        bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
-       folio_start_writeback(folio);
+       if (!folio_test_writeback(folio))
+               folio_start_writeback(folio);
        spin_unlock_irqrestore(&subpage->lock, flags);
 }
 
index 896acfda17895150ff501960dd72f084c542301e..101f786963d4d7712baab28c912226fb741c0c9b 100644 (file)
@@ -1457,6 +1457,14 @@ static int btrfs_reconfigure(struct fs_context *fc)
 
        btrfs_info_to_ctx(fs_info, &old_ctx);
 
+       /*
+        * This is our "bind mount" trick, we don't want to allow the user to do
+        * anything other than mount a different ro/rw and a different subvol,
+        * all of the mount options should be maintained.
+        */
+       if (mount_reconfigure)
+               ctx->mount_opt = old_ctx.mount_opt;
+
        sync_filesystem(sb);
        set_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
 
index 5b3333ceef04818dbf98270da4bb84c99e5c70f8..c52807d97efa553b0b5e4765e11606a8ce644161 100644 (file)
@@ -564,56 +564,22 @@ static int btrfs_reserve_trans_metadata(struct btrfs_fs_info *fs_info,
                                        u64 num_bytes,
                                        u64 *delayed_refs_bytes)
 {
-       struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
        struct btrfs_space_info *si = fs_info->trans_block_rsv.space_info;
-       u64 extra_delayed_refs_bytes = 0;
-       u64 bytes;
+       u64 bytes = num_bytes + *delayed_refs_bytes;
        int ret;
 
-       /*
-        * If there's a gap between the size of the delayed refs reserve and
-        * its reserved space, than some tasks have added delayed refs or bumped
-        * its size otherwise (due to block group creation or removal, or block
-        * group item update). Also try to allocate that gap in order to prevent
-        * using (and possibly abusing) the global reserve when committing the
-        * transaction.
-        */
-       if (flush == BTRFS_RESERVE_FLUSH_ALL &&
-           !btrfs_block_rsv_full(delayed_refs_rsv)) {
-               spin_lock(&delayed_refs_rsv->lock);
-               if (delayed_refs_rsv->size > delayed_refs_rsv->reserved)
-                       extra_delayed_refs_bytes = delayed_refs_rsv->size -
-                               delayed_refs_rsv->reserved;
-               spin_unlock(&delayed_refs_rsv->lock);
-       }
-
-       bytes = num_bytes + *delayed_refs_bytes + extra_delayed_refs_bytes;
-
        /*
         * We want to reserve all the bytes we may need all at once, so we only
         * do 1 enospc flushing cycle per transaction start.
         */
        ret = btrfs_reserve_metadata_bytes(fs_info, si, bytes, flush);
-       if (ret == 0) {
-               if (extra_delayed_refs_bytes > 0)
-                       btrfs_migrate_to_delayed_refs_rsv(fs_info,
-                                                         extra_delayed_refs_bytes);
-               return 0;
-       }
-
-       if (extra_delayed_refs_bytes > 0) {
-               bytes -= extra_delayed_refs_bytes;
-               ret = btrfs_reserve_metadata_bytes(fs_info, si, bytes, flush);
-               if (ret == 0)
-                       return 0;
-       }
 
        /*
         * If we are an emergency flush, which can steal from the global block
         * reserve, then attempt to not reserve space for the delayed refs, as
         * we will consume space for them from the global block reserve.
         */
-       if (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL) {
+       if (ret && flush == BTRFS_RESERVE_FLUSH_ALL_STEAL) {
                bytes -= *delayed_refs_bytes;
                *delayed_refs_bytes = 0;
                ret = btrfs_reserve_metadata_bytes(fs_info, si, bytes, flush);
index 50fdc69fdddf9d26014a65ed73c13fe694d05e4b..6eccf8496486c0630cd85c90ca813170f08e6eb5 100644 (file)
@@ -1436,7 +1436,7 @@ static int check_extent_item(struct extent_buffer *leaf,
                if (unlikely(ptr + btrfs_extent_inline_ref_size(inline_type) > end)) {
                        extent_err(leaf, slot,
 "inline ref item overflows extent item, ptr %lu iref size %u end %lu",
-                                  ptr, inline_type, end);
+                                  ptr, btrfs_extent_inline_ref_size(inline_type), end);
                        return -EUCLEAN;
                }
 
index 4c32497311d2ff6ba28fc9ac5ba8dd5b8f835a66..d67785be2c778c6611d639dcbdcffffec4c513c2 100644 (file)
@@ -3087,7 +3087,6 @@ struct btrfs_chunk_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
        map = btrfs_find_chunk_map(fs_info, logical, length);
 
        if (unlikely(!map)) {
-               read_unlock(&fs_info->mapping_tree_lock);
                btrfs_crit(fs_info,
                           "unable to find chunk map for logical %llu length %llu",
                           logical, length);
@@ -3095,7 +3094,6 @@ struct btrfs_chunk_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
        }
 
        if (unlikely(map->start > logical || map->start + map->chunk_len <= logical)) {
-               read_unlock(&fs_info->mapping_tree_lock);
                btrfs_crit(fs_info,
                           "found a bad chunk map, wanted %llu-%llu, found %llu-%llu",
                           logical, logical + length, map->start,
index 36cf1f0e338e2f59d736aaeb1001e00e8eaddaa3..8da66ea699e8febfdef6cc189c5917d22628265d 100644 (file)
@@ -354,18 +354,13 @@ done:
 }
 
 int zlib_decompress(struct list_head *ws, const u8 *data_in,
-               struct page *dest_page, unsigned long start_byte, size_t srclen,
+               struct page *dest_page, unsigned long dest_pgoff, size_t srclen,
                size_t destlen)
 {
        struct workspace *workspace = list_entry(ws, struct workspace, list);
        int ret = 0;
        int wbits = MAX_WBITS;
-       unsigned long bytes_left;
-       unsigned long total_out = 0;
-       unsigned long pg_offset = 0;
-
-       destlen = min_t(unsigned long, destlen, PAGE_SIZE);
-       bytes_left = destlen;
+       unsigned long to_copy;
 
        workspace->strm.next_in = data_in;
        workspace->strm.avail_in = srclen;
@@ -390,60 +385,30 @@ int zlib_decompress(struct list_head *ws, const u8 *data_in,
                return -EIO;
        }
 
-       while (bytes_left > 0) {
-               unsigned long buf_start;
-               unsigned long buf_offset;
-               unsigned long bytes;
-
-               ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
-               if (ret != Z_OK && ret != Z_STREAM_END)
-                       break;
-
-               buf_start = total_out;
-               total_out = workspace->strm.total_out;
-
-               if (total_out == buf_start) {
-                       ret = -EIO;
-                       break;
-               }
-
-               if (total_out <= start_byte)
-                       goto next;
-
-               if (total_out > start_byte && buf_start < start_byte)
-                       buf_offset = start_byte - buf_start;
-               else
-                       buf_offset = 0;
-
-               bytes = min(PAGE_SIZE - pg_offset,
-                           PAGE_SIZE - (buf_offset % PAGE_SIZE));
-               bytes = min(bytes, bytes_left);
+       /*
+        * Everything (in/out buf) should be at most one sector, there should
+        * be no need to switch any input/output buffer.
+        */
+       ret = zlib_inflate(&workspace->strm, Z_FINISH);
+       to_copy = min(workspace->strm.total_out, destlen);
+       if (ret != Z_STREAM_END)
+               goto out;
 
-               memcpy_to_page(dest_page, pg_offset,
-                              workspace->buf + buf_offset, bytes);
+       memcpy_to_page(dest_page, dest_pgoff, workspace->buf, to_copy);
 
-               pg_offset += bytes;
-               bytes_left -= bytes;
-next:
-               workspace->strm.next_out = workspace->buf;
-               workspace->strm.avail_out = workspace->buf_size;
-       }
-
-       if (ret != Z_STREAM_END && bytes_left != 0)
+out:
+       if (unlikely(to_copy != destlen)) {
+               pr_warn_ratelimited("BTRFS: infalte failed, decompressed=%lu expected=%zu\n",
+                                       to_copy, destlen);
                ret = -EIO;
-       else
+       } else {
                ret = 0;
+       }
 
        zlib_inflateEnd(&workspace->strm);
 
-       /*
-        * this should only happen if zlib returned fewer bytes than we
-        * expected.  btrfs_get_block is responsible for zeroing from the
-        * end of the inline extent (destlen) to the end of the page
-        */
-       if (pg_offset < destlen) {
-               memzero_page(dest_page, pg_offset, destlen - pg_offset);
-       }
+       if (unlikely(to_copy < destlen))
+               memzero_page(dest_page, dest_pgoff + to_copy, destlen - to_copy);
        return ret;
 }
 
index 5bd76813b23f065fdf670bf8fe3fbd59ee0c88d9..3a5d69ff25fc221f20c1e37a9854021eff246bad 100644 (file)
@@ -1670,6 +1670,7 @@ out:
        }
        bitmap_free(active);
        kfree(zone_info);
+       btrfs_free_chunk_map(map);
 
        return ret;
 }
@@ -2055,6 +2056,7 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
 
        map = block_group->physical_map;
 
+       spin_lock(&fs_info->zone_active_bgs_lock);
        spin_lock(&block_group->lock);
        if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) {
                ret = true;
@@ -2067,7 +2069,6 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
                goto out_unlock;
        }
 
-       spin_lock(&fs_info->zone_active_bgs_lock);
        for (i = 0; i < map->num_stripes; i++) {
                struct btrfs_zoned_device_info *zinfo;
                int reserved = 0;
@@ -2087,20 +2088,17 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
                 */
                if (atomic_read(&zinfo->active_zones_left) <= reserved) {
                        ret = false;
-                       spin_unlock(&fs_info->zone_active_bgs_lock);
                        goto out_unlock;
                }
 
                if (!btrfs_dev_set_active_zone(device, physical)) {
                        /* Cannot activate the zone */
                        ret = false;
-                       spin_unlock(&fs_info->zone_active_bgs_lock);
                        goto out_unlock;
                }
                if (!is_data)
                        zinfo->reserved_active_zones--;
        }
-       spin_unlock(&fs_info->zone_active_bgs_lock);
 
        /* Successfully activated all the zones */
        set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
@@ -2108,8 +2106,6 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
 
        /* For the active block group list */
        btrfs_get_block_group(block_group);
-
-       spin_lock(&fs_info->zone_active_bgs_lock);
        list_add_tail(&block_group->active_bg_list, &fs_info->zone_active_bgs);
        spin_unlock(&fs_info->zone_active_bgs_lock);
 
@@ -2117,6 +2113,7 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
 
 out_unlock:
        spin_unlock(&block_group->lock);
+       spin_unlock(&fs_info->zone_active_bgs_lock);
        return ret;
 }
 
index 8df715640a48f32cae9b1e104e2bd55cc99f25fd..c5a070550ee334f69b57cb2b1ed3af7ceaac3b4f 100644 (file)
@@ -2,7 +2,7 @@
 
 config CACHEFILES
        tristate "Filesystem caching on files"
-       depends on FSCACHE && BLOCK
+       depends on NETFS_SUPPORT && FSCACHE && BLOCK
        help
          This permits use of a mounted filesystem as a cache for other
          filesystems - primarily networking filesystems - thus allowing fast
index 4a87c9d714a9498b80599d15461c48f0ea1c3f68..d33169f0018b103a7ad30ed20b258869e740e556 100644 (file)
@@ -246,7 +246,7 @@ extern bool cachefiles_begin_operation(struct netfs_cache_resources *cres,
                                       enum fscache_want_state want_state);
 extern int __cachefiles_prepare_write(struct cachefiles_object *object,
                                      struct file *file,
-                                     loff_t *_start, size_t *_len,
+                                     loff_t *_start, size_t *_len, size_t upper_len,
                                      bool no_space_allocated_yet);
 extern int __cachefiles_write(struct cachefiles_object *object,
                              struct file *file,
index 5857241c59181674ef8dafcfc9b6216d65db75a0..1d685357e67fc71ffc2be73513b00f7efd8ee906 100644 (file)
@@ -517,18 +517,26 @@ cachefiles_prepare_ondemand_read(struct netfs_cache_resources *cres,
  */
 int __cachefiles_prepare_write(struct cachefiles_object *object,
                               struct file *file,
-                              loff_t *_start, size_t *_len,
+                              loff_t *_start, size_t *_len, size_t upper_len,
                               bool no_space_allocated_yet)
 {
        struct cachefiles_cache *cache = object->volume->cache;
        loff_t start = *_start, pos;
-       size_t len = *_len, down;
+       size_t len = *_len;
        int ret;
 
        /* Round to DIO size */
-       down = start - round_down(start, PAGE_SIZE);
-       *_start = start - down;
-       *_len = round_up(down + len, PAGE_SIZE);
+       start = round_down(*_start, PAGE_SIZE);
+       if (start != *_start || *_len > upper_len) {
+               /* Probably asked to cache a streaming write written into the
+                * pagecache when the cookie was temporarily out of service to
+                * culling.
+                */
+               fscache_count_dio_misfit();
+               return -ENOBUFS;
+       }
+
+       *_len = round_up(len, PAGE_SIZE);
 
        /* We need to work out whether there's sufficient disk space to perform
         * the write - but we can skip that check if we have space already
@@ -539,7 +547,7 @@ int __cachefiles_prepare_write(struct cachefiles_object *object,
 
        pos = cachefiles_inject_read_error();
        if (pos == 0)
-               pos = vfs_llseek(file, *_start, SEEK_DATA);
+               pos = vfs_llseek(file, start, SEEK_DATA);
        if (pos < 0 && pos >= (loff_t)-MAX_ERRNO) {
                if (pos == -ENXIO)
                        goto check_space; /* Unallocated tail */
@@ -547,7 +555,7 @@ int __cachefiles_prepare_write(struct cachefiles_object *object,
                                          cachefiles_trace_seek_error);
                return pos;
        }
-       if ((u64)pos >= (u64)*_start + *_len)
+       if ((u64)pos >= (u64)start + *_len)
                goto check_space; /* Unallocated region */
 
        /* We have a block that's at least partially filled - if we're low on
@@ -560,13 +568,13 @@ int __cachefiles_prepare_write(struct cachefiles_object *object,
 
        pos = cachefiles_inject_read_error();
        if (pos == 0)
-               pos = vfs_llseek(file, *_start, SEEK_HOLE);
+               pos = vfs_llseek(file, start, SEEK_HOLE);
        if (pos < 0 && pos >= (loff_t)-MAX_ERRNO) {
                trace_cachefiles_io_error(object, file_inode(file), pos,
                                          cachefiles_trace_seek_error);
                return pos;
        }
-       if ((u64)pos >= (u64)*_start + *_len)
+       if ((u64)pos >= (u64)start + *_len)
                return 0; /* Fully allocated */
 
        /* Partially allocated, but insufficient space: cull. */
@@ -574,7 +582,7 @@ int __cachefiles_prepare_write(struct cachefiles_object *object,
        ret = cachefiles_inject_remove_error();
        if (ret == 0)
                ret = vfs_fallocate(file, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
-                                   *_start, *_len);
+                                   start, *_len);
        if (ret < 0) {
                trace_cachefiles_io_error(object, file_inode(file), ret,
                                          cachefiles_trace_fallocate_error);
@@ -591,8 +599,8 @@ check_space:
 }
 
 static int cachefiles_prepare_write(struct netfs_cache_resources *cres,
-                                   loff_t *_start, size_t *_len, loff_t i_size,
-                                   bool no_space_allocated_yet)
+                                   loff_t *_start, size_t *_len, size_t upper_len,
+                                   loff_t i_size, bool no_space_allocated_yet)
 {
        struct cachefiles_object *object = cachefiles_cres_object(cres);
        struct cachefiles_cache *cache = object->volume->cache;
@@ -608,7 +616,7 @@ static int cachefiles_prepare_write(struct netfs_cache_resources *cres,
 
        cachefiles_begin_secure(cache, &saved_cred);
        ret = __cachefiles_prepare_write(object, cachefiles_cres_file(cres),
-                                        _start, _len,
+                                        _start, _len, upper_len,
                                         no_space_allocated_yet);
        cachefiles_end_secure(cache, saved_cred);
        return ret;
index b8fbbb1961bbcefc158fd32306d3a6abd63e607c..4ba42f1fa3b4077b04735282354de250e70fe87d 100644 (file)
@@ -50,7 +50,7 @@ static ssize_t cachefiles_ondemand_fd_write_iter(struct kiocb *kiocb,
                return -ENOBUFS;
 
        cachefiles_begin_secure(cache, &saved_cred);
-       ret = __cachefiles_prepare_write(object, file, &pos, &len, true);
+       ret = __cachefiles_prepare_write(object, file, &pos, &len, len, true);
        cachefiles_end_secure(cache, saved_cred);
        if (ret < 0)
                return ret;
@@ -539,6 +539,9 @@ int cachefiles_ondemand_init_object(struct cachefiles_object *object)
        struct fscache_volume *volume = object->volume->vcookie;
        size_t volume_key_size, cookie_key_size, data_len;
 
+       if (!object->ondemand)
+               return 0;
+
        /*
         * CacheFiles will firstly check the cache file under the root cache
         * directory. If the coherency check failed, it will fallback to
index 94df854147d3597e0b6b7655e5c68e0d87334543..7249d70e1a43fade3a72728df628274d25f7e9c9 100644 (file)
@@ -7,6 +7,7 @@ config CEPH_FS
        select CRYPTO_AES
        select CRYPTO
        select NETFS_SUPPORT
+       select FS_ENCRYPTION_ALGS if FS_ENCRYPTION
        default n
        help
          Choose Y or M here to include support for mounting the
index 13af429ab030b6232c197c53659d982ac8bdc43a..1340d77124ae4db09c3b96548acdf1cd8a6c3fb0 100644 (file)
@@ -159,27 +159,7 @@ static void ceph_invalidate_folio(struct folio *folio, size_t offset,
                ceph_put_snap_context(snapc);
        }
 
-       folio_wait_fscache(folio);
-}
-
-static bool ceph_release_folio(struct folio *folio, gfp_t gfp)
-{
-       struct inode *inode = folio->mapping->host;
-       struct ceph_client *cl = ceph_inode_to_client(inode);
-
-       doutc(cl, "%llx.%llx idx %lu (%sdirty)\n", ceph_vinop(inode),
-             folio->index, folio_test_dirty(folio) ? "" : "not ");
-
-       if (folio_test_private(folio))
-               return false;
-
-       if (folio_test_fscache(folio)) {
-               if (current_is_kswapd() || !(gfp & __GFP_FS))
-                       return false;
-               folio_wait_fscache(folio);
-       }
-       ceph_fscache_note_page_release(inode);
-       return true;
+       netfs_invalidate_folio(folio, offset, length);
 }
 
 static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq)
@@ -357,6 +337,7 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
        u64 len = subreq->len;
        bool sparse = IS_ENCRYPTED(inode) || ceph_test_mount_opt(fsc, SPARSEREAD);
        u64 off = subreq->start;
+       int extent_cnt;
 
        if (ceph_inode_is_shutdown(inode)) {
                err = -EIO;
@@ -370,8 +351,8 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
 
        req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, vino,
                        off, &len, 0, 1, sparse ? CEPH_OSD_OP_SPARSE_READ : CEPH_OSD_OP_READ,
-                       CEPH_OSD_FLAG_READ | fsc->client->osdc.client->options->read_from_replica,
-                       NULL, ci->i_truncate_seq, ci->i_truncate_size, false);
+                       CEPH_OSD_FLAG_READ, NULL, ci->i_truncate_seq,
+                       ci->i_truncate_size, false);
        if (IS_ERR(req)) {
                err = PTR_ERR(req);
                req = NULL;
@@ -379,7 +360,8 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
        }
 
        if (sparse) {
-               err = ceph_alloc_sparse_ext_map(&req->r_ops[0]);
+               extent_cnt = __ceph_sparse_read_ext_count(inode, len);
+               err = ceph_alloc_sparse_ext_map(&req->r_ops[0], extent_cnt);
                if (err)
                        goto out;
        }
@@ -509,7 +491,6 @@ static void ceph_netfs_free_request(struct netfs_io_request *rreq)
 const struct netfs_request_ops ceph_netfs_ops = {
        .init_request           = ceph_init_request,
        .free_request           = ceph_netfs_free_request,
-       .begin_cache_operation  = ceph_begin_cache_operation,
        .issue_read             = ceph_netfs_issue_read,
        .expand_readahead       = ceph_netfs_expand_readahead,
        .clamp_length           = ceph_netfs_clamp_length,
@@ -1586,7 +1567,7 @@ const struct address_space_operations ceph_aops = {
        .write_end = ceph_write_end,
        .dirty_folio = ceph_dirty_folio,
        .invalidate_folio = ceph_invalidate_folio,
-       .release_folio = ceph_release_folio,
+       .release_folio = netfs_release_folio,
        .direct_IO = noop_direct_IO,
 };
 
index dc502daac49ab580380deca8f969b3f648a4c299..20efac020394eeb3608d6a2200e8a08591f6e7ba 100644 (file)
@@ -43,38 +43,19 @@ static inline void ceph_fscache_resize(struct inode *inode, loff_t to)
        }
 }
 
-static inline void ceph_fscache_unpin_writeback(struct inode *inode,
+static inline int ceph_fscache_unpin_writeback(struct inode *inode,
                                                struct writeback_control *wbc)
 {
-       fscache_unpin_writeback(wbc, ceph_fscache_cookie(ceph_inode(inode)));
+       return netfs_unpin_writeback(inode, wbc);
 }
 
-static inline int ceph_fscache_dirty_folio(struct address_space *mapping,
-               struct folio *folio)
-{
-       struct ceph_inode_info *ci = ceph_inode(mapping->host);
-
-       return fscache_dirty_folio(mapping, folio, ceph_fscache_cookie(ci));
-}
-
-static inline int ceph_begin_cache_operation(struct netfs_io_request *rreq)
-{
-       struct fscache_cookie *cookie = ceph_fscache_cookie(ceph_inode(rreq->inode));
-
-       return fscache_begin_read_operation(&rreq->cache_resources, cookie);
-}
+#define ceph_fscache_dirty_folio netfs_dirty_folio
 
 static inline bool ceph_is_cache_enabled(struct inode *inode)
 {
        return fscache_cookie_enabled(ceph_fscache_cookie(ceph_inode(inode)));
 }
 
-static inline void ceph_fscache_note_page_release(struct inode *inode)
-{
-       struct ceph_inode_info *ci = ceph_inode(inode);
-
-       fscache_note_page_release(ceph_fscache_cookie(ci));
-}
 #else /* CONFIG_CEPH_FSCACHE */
 static inline int ceph_fscache_register_fs(struct ceph_fs_client* fsc,
                                           struct fs_context *fc)
@@ -119,30 +100,18 @@ static inline void ceph_fscache_resize(struct inode *inode, loff_t to)
 {
 }
 
-static inline void ceph_fscache_unpin_writeback(struct inode *inode,
-                                               struct writeback_control *wbc)
+static inline int ceph_fscache_unpin_writeback(struct inode *inode,
+                                              struct writeback_control *wbc)
 {
+       return 0;
 }
 
-static inline int ceph_fscache_dirty_folio(struct address_space *mapping,
-               struct folio *folio)
-{
-       return filemap_dirty_folio(mapping, folio);
-}
+#define ceph_fscache_dirty_folio filemap_dirty_folio
 
 static inline bool ceph_is_cache_enabled(struct inode *inode)
 {
        return false;
 }
-
-static inline int ceph_begin_cache_operation(struct netfs_io_request *rreq)
-{
-       return -ENOBUFS;
-}
-
-static inline void ceph_fscache_note_page_release(struct inode *inode)
-{
-}
 #endif /* CONFIG_CEPH_FSCACHE */
 
 #endif
index 2c0b8dc3dd0d80314b04c0717501f066079b97eb..ad1f46c66fbffbee52ba280ad79e03279e56e1e5 100644 (file)
@@ -1452,7 +1452,7 @@ static void __prep_cap(struct cap_msg_args *arg, struct ceph_cap *cap,
        if (flushing & CEPH_CAP_XATTR_EXCL) {
                arg->old_xattr_buf = __ceph_build_xattrs_blob(ci);
                arg->xattr_version = ci->i_xattrs.version;
-               arg->xattr_buf = ci->i_xattrs.blob;
+               arg->xattr_buf = ceph_buffer_get(ci->i_xattrs.blob);
        } else {
                arg->xattr_buf = NULL;
                arg->old_xattr_buf = NULL;
@@ -1553,6 +1553,7 @@ static void __send_cap(struct cap_msg_args *arg, struct ceph_inode_info *ci)
        encode_cap_msg(msg, arg);
        ceph_con_send(&arg->session->s_con, msg);
        ceph_buffer_put(arg->old_xattr_buf);
+       ceph_buffer_put(arg->xattr_buf);
        if (arg->wake)
                wake_up_all(&ci->i_cap_wq);
 }
@@ -3215,7 +3216,6 @@ static int ceph_try_drop_cap_snap(struct ceph_inode_info *ci,
 
 enum put_cap_refs_mode {
        PUT_CAP_REFS_SYNC = 0,
-       PUT_CAP_REFS_NO_CHECK,
        PUT_CAP_REFS_ASYNC,
 };
 
@@ -3331,11 +3331,6 @@ void ceph_put_cap_refs_async(struct ceph_inode_info *ci, int had)
        __ceph_put_cap_refs(ci, had, PUT_CAP_REFS_ASYNC);
 }
 
-void ceph_put_cap_refs_no_check_caps(struct ceph_inode_info *ci, int had)
-{
-       __ceph_put_cap_refs(ci, had, PUT_CAP_REFS_NO_CHECK);
-}
-
 /*
  * Release @nr WRBUFFER refs on dirty pages for the given @snapc snap
  * context.  Adjust per-snap dirty page accounting as appropriate.
@@ -4887,13 +4882,15 @@ int ceph_encode_dentry_release(void **p, struct dentry *dentry,
                               struct inode *dir,
                               int mds, int drop, int unless)
 {
-       struct dentry *parent = NULL;
        struct ceph_mds_request_release *rel = *p;
        struct ceph_dentry_info *di = ceph_dentry(dentry);
        struct ceph_client *cl;
        int force = 0;
        int ret;
 
+       /* This shouldn't happen */
+       BUG_ON(!dir);
+
        /*
         * force an record for the directory caps if we have a dentry lease.
         * this is racy (can't take i_ceph_lock and d_lock together), but it
@@ -4903,14 +4900,9 @@ int ceph_encode_dentry_release(void **p, struct dentry *dentry,
        spin_lock(&dentry->d_lock);
        if (di->lease_session && di->lease_session->s_mds == mds)
                force = 1;
-       if (!dir) {
-               parent = dget(dentry->d_parent);
-               dir = d_inode(parent);
-       }
        spin_unlock(&dentry->d_lock);
 
        ret = ceph_encode_inode_release(p, dir, mds, drop, unless, force);
-       dput(parent);
 
        cl = ceph_inode_to_client(dir);
        spin_lock(&dentry->d_lock);
index 678596684596f71d5ad730713eb7aae0431d7f4e..0e9f56eaba1e693d22142487e79b433a0213f759 100644 (file)
@@ -1593,10 +1593,12 @@ struct ceph_lease_walk_control {
        unsigned long dir_lease_ttl;
 };
 
+static int __dir_lease_check(const struct dentry *, struct ceph_lease_walk_control *);
+static int __dentry_lease_check(const struct dentry *);
+
 static unsigned long
 __dentry_leases_walk(struct ceph_mds_client *mdsc,
-                    struct ceph_lease_walk_control *lwc,
-                    int (*check)(struct dentry*, void*))
+                    struct ceph_lease_walk_control *lwc)
 {
        struct ceph_dentry_info *di, *tmp;
        struct dentry *dentry, *last = NULL;
@@ -1624,7 +1626,10 @@ __dentry_leases_walk(struct ceph_mds_client *mdsc,
                        goto next;
                }
 
-               ret = check(dentry, lwc);
+               if (lwc->dir_lease)
+                       ret = __dir_lease_check(dentry, lwc);
+               else
+                       ret = __dentry_lease_check(dentry);
                if (ret & TOUCH) {
                        /* move it into tail of dir lease list */
                        __dentry_dir_lease_touch(mdsc, di);
@@ -1681,7 +1686,7 @@ next:
        return freed;
 }
 
-static int __dentry_lease_check(struct dentry *dentry, void *arg)
+static int __dentry_lease_check(const struct dentry *dentry)
 {
        struct ceph_dentry_info *di = ceph_dentry(dentry);
        int ret;
@@ -1696,9 +1701,9 @@ static int __dentry_lease_check(struct dentry *dentry, void *arg)
        return DELETE;
 }
 
-static int __dir_lease_check(struct dentry *dentry, void *arg)
+static int __dir_lease_check(const struct dentry *dentry,
+                            struct ceph_lease_walk_control *lwc)
 {
-       struct ceph_lease_walk_control *lwc = arg;
        struct ceph_dentry_info *di = ceph_dentry(dentry);
 
        int ret = __dir_lease_try_check(dentry);
@@ -1737,7 +1742,7 @@ int ceph_trim_dentries(struct ceph_mds_client *mdsc)
 
        lwc.dir_lease = false;
        lwc.nr_to_scan  = CEPH_CAPS_PER_RELEASE * 2;
-       freed = __dentry_leases_walk(mdsc, &lwc, __dentry_lease_check);
+       freed = __dentry_leases_walk(mdsc, &lwc);
        if (!lwc.nr_to_scan) /* more invalid leases */
                return -EAGAIN;
 
@@ -1747,7 +1752,7 @@ int ceph_trim_dentries(struct ceph_mds_client *mdsc)
        lwc.dir_lease = true;
        lwc.expire_dir_lease = freed < count;
        lwc.dir_lease_ttl = mdsc->fsc->mount_options->caps_wanted_delay_max * HZ;
-       freed +=__dentry_leases_walk(mdsc, &lwc, __dir_lease_check);
+       freed +=__dentry_leases_walk(mdsc, &lwc);
        if (!lwc.nr_to_scan) /* more to check */
                return -EAGAIN;
 
index 726af69d4d62cd7341c0c1aefa46fd553f89ec47..a79f163ae4ed2ce1962289478b348c53a338a8c0 100644 (file)
@@ -286,8 +286,6 @@ static struct dentry *__snapfh_to_dentry(struct super_block *sb,
                doutc(cl, "%llx.%llx parent %llx hash %x err=%d", vino.ino,
                      vino.snap, sfh->parent_ino, sfh->hash, err);
        }
-       if (IS_ERR(inode))
-               return ERR_CAST(inode);
        /* see comments in ceph_get_parent() */
        return unlinked ? d_obtain_root(inode) : d_obtain_alias(inode);
 }
index d380d9dad0e018426177110f17c51942b1c8a868..abe8028d95bf4e3e99091d83cf1784f2b9a249e1 100644 (file)
@@ -1029,6 +1029,7 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
                struct ceph_osd_req_op *op;
                u64 read_off = off;
                u64 read_len = len;
+               int extent_cnt;
 
                /* determine new offset/length if encrypted */
                ceph_fscrypt_adjust_off_and_len(inode, &read_off, &read_len);
@@ -1068,7 +1069,8 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
 
                op = &req->r_ops[0];
                if (sparse) {
-                       ret = ceph_alloc_sparse_ext_map(op);
+                       extent_cnt = __ceph_sparse_read_ext_count(inode, read_len);
+                       ret = ceph_alloc_sparse_ext_map(op, extent_cnt);
                        if (ret) {
                                ceph_osdc_put_request(req);
                                break;
@@ -1465,6 +1467,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
                ssize_t len;
                struct ceph_osd_req_op *op;
                int readop = sparse ? CEPH_OSD_OP_SPARSE_READ : CEPH_OSD_OP_READ;
+               int extent_cnt;
 
                if (write)
                        size = min_t(u64, size, fsc->mount_options->wsize);
@@ -1528,7 +1531,8 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
                osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
                op = &req->r_ops[0];
                if (sparse) {
-                       ret = ceph_alloc_sparse_ext_map(op);
+                       extent_cnt = __ceph_sparse_read_ext_count(inode, size);
+                       ret = ceph_alloc_sparse_ext_map(op, extent_cnt);
                        if (ret) {
                                ceph_osdc_put_request(req);
                                break;
index 0679240f06db924e9aba25052675268885c4bd04..7b2e77517f235ecd47264061c04bae2e6d0b7c83 100644 (file)
@@ -78,6 +78,8 @@ struct inode *ceph_new_inode(struct inode *dir, struct dentry *dentry,
        if (!inode)
                return ERR_PTR(-ENOMEM);
 
+       inode->i_blkbits = CEPH_FSCRYPT_BLOCK_SHIFT;
+
        if (!S_ISLNK(*mode)) {
                err = ceph_pre_init_acls(dir, mode, as_ctx);
                if (err < 0)
@@ -574,7 +576,7 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
        doutc(fsc->client, "%p\n", &ci->netfs.inode);
 
        /* Set parameters for the netfs library */
-       netfs_inode_init(&ci->netfs, &ceph_netfs_ops);
+       netfs_inode_init(&ci->netfs, &ceph_netfs_ops, false);
 
        spin_lock_init(&ci->i_ceph_lock);
 
@@ -694,7 +696,7 @@ void ceph_evict_inode(struct inode *inode)
        percpu_counter_dec(&mdsc->metric.total_inodes);
 
        truncate_inode_pages_final(&inode->i_data);
-       if (inode->i_state & I_PINNING_FSCACHE_WB)
+       if (inode->i_state & I_PINNING_NETFS_WB)
                ceph_fscache_unuse_cookie(inode, true);
        clear_inode(inode);
 
index 02ebfabfc8eef26e7753ea4b7a8e9540e64d35c3..f71bb9c9569fc754f447b50cfb8abc89083fe7fd 100644 (file)
@@ -1089,7 +1089,7 @@ void ceph_mdsc_release_request(struct kref *kref)
        struct ceph_mds_request *req = container_of(kref,
                                                    struct ceph_mds_request,
                                                    r_kref);
-       ceph_mdsc_release_dir_caps_no_check(req);
+       ceph_mdsc_release_dir_caps_async(req);
        destroy_reply_info(&req->r_reply_info);
        if (req->r_request)
                ceph_msg_put(req->r_request);
@@ -1534,7 +1534,8 @@ static int encode_metric_spec(void **p, void *end)
  * session message, specialization for CEPH_SESSION_REQUEST_OPEN
  * to include additional client metadata fields.
  */
-static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u64 seq)
+static struct ceph_msg *
+create_session_full_msg(struct ceph_mds_client *mdsc, int op, u64 seq)
 {
        struct ceph_msg *msg;
        struct ceph_mds_session_head *h;
@@ -1578,6 +1579,9 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6
                size = METRIC_BYTES(count);
        extra_bytes += 2 + 4 + 4 + size;
 
+       /* flags, mds auth caps and oldest_client_tid */
+       extra_bytes += 4 + 4 + 8;
+
        /* Allocate the message */
        msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + extra_bytes,
                           GFP_NOFS, false);
@@ -1589,16 +1593,16 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6
        end = p + msg->front.iov_len;
 
        h = p;
-       h->op = cpu_to_le32(CEPH_SESSION_REQUEST_OPEN);
+       h->op = cpu_to_le32(op);
        h->seq = cpu_to_le64(seq);
 
        /*
         * Serialize client metadata into waiting buffer space, using
         * the format that userspace expects for map<string, string>
         *
-        * ClientSession messages with metadata are v4
+        * ClientSession messages with metadata are v7
         */
-       msg->hdr.version = cpu_to_le16(4);
+       msg->hdr.version = cpu_to_le16(7);
        msg->hdr.compat_version = cpu_to_le16(1);
 
        /* The write pointer, following the session_head structure */
@@ -1634,6 +1638,15 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6
                return ERR_PTR(ret);
        }
 
+       /* version == 5, flags */
+       ceph_encode_32(&p, 0);
+
+       /* version == 6, mds auth caps */
+       ceph_encode_32(&p, 0);
+
+       /* version == 7, oldest_client_tid */
+       ceph_encode_64(&p, mdsc->oldest_tid);
+
        msg->front.iov_len = p - msg->front.iov_base;
        msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
 
@@ -1663,7 +1676,8 @@ static int __open_session(struct ceph_mds_client *mdsc,
        session->s_renew_requested = jiffies;
 
        /* send connect message */
-       msg = create_session_open_msg(mdsc, session->s_seq);
+       msg = create_session_full_msg(mdsc, CEPH_SESSION_REQUEST_OPEN,
+                                     session->s_seq);
        if (IS_ERR(msg))
                return PTR_ERR(msg);
        ceph_con_send(&session->s_con, msg);
@@ -2028,10 +2042,10 @@ static int send_renew_caps(struct ceph_mds_client *mdsc,
 
        doutc(cl, "to mds%d (%s)\n", session->s_mds,
              ceph_mds_state_name(state));
-       msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS,
+       msg = create_session_full_msg(mdsc, CEPH_SESSION_REQUEST_RENEWCAPS,
                                      ++session->s_renew_seq);
-       if (!msg)
-               return -ENOMEM;
+       if (IS_ERR(msg))
+               return PTR_ERR(msg);
        ceph_con_send(&session->s_con, msg);
        return 0;
 }
@@ -4128,12 +4142,12 @@ static void handle_session(struct ceph_mds_session *session,
                        pr_info_client(cl, "mds%d reconnect success\n",
                                       session->s_mds);
 
+               session->s_features = features;
                if (session->s_state == CEPH_MDS_SESSION_OPEN) {
                        pr_notice_client(cl, "mds%d is already opened\n",
                                         session->s_mds);
                } else {
                        session->s_state = CEPH_MDS_SESSION_OPEN;
-                       session->s_features = features;
                        renewed_caps(mdsc, session, 0);
                        if (test_bit(CEPHFS_FEATURE_METRIC_COLLECT,
                                     &session->s_features))
@@ -4247,7 +4261,7 @@ void ceph_mdsc_release_dir_caps(struct ceph_mds_request *req)
        }
 }
 
-void ceph_mdsc_release_dir_caps_no_check(struct ceph_mds_request *req)
+void ceph_mdsc_release_dir_caps_async(struct ceph_mds_request *req)
 {
        struct ceph_client *cl = req->r_mdsc->fsc->client;
        int dcaps;
@@ -4255,8 +4269,7 @@ void ceph_mdsc_release_dir_caps_no_check(struct ceph_mds_request *req)
        dcaps = xchg(&req->r_dir_caps, 0);
        if (dcaps) {
                doutc(cl, "releasing r_dir_caps=%s\n", ceph_cap_string(dcaps));
-               ceph_put_cap_refs_no_check_caps(ceph_inode(req->r_parent),
-                                               dcaps);
+               ceph_put_cap_refs_async(ceph_inode(req->r_parent), dcaps);
        }
 }
 
@@ -4292,7 +4305,7 @@ static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
                if (req->r_session->s_mds != session->s_mds)
                        continue;
 
-               ceph_mdsc_release_dir_caps_no_check(req);
+               ceph_mdsc_release_dir_caps_async(req);
 
                __send_request(session, req, true);
        }
@@ -5870,7 +5883,8 @@ static void mds_peer_reset(struct ceph_connection *con)
 
        pr_warn_client(mdsc->fsc->client, "mds%d closed our session\n",
                       s->s_mds);
-       if (READ_ONCE(mdsc->fsc->mount_state) != CEPH_MOUNT_FENCE_IO)
+       if (READ_ONCE(mdsc->fsc->mount_state) != CEPH_MOUNT_FENCE_IO &&
+           ceph_mdsmap_get_state(mdsc->mdsmap, s->s_mds) >= CEPH_MDS_STATE_RECONNECT)
                send_mds_reconnect(mdsc, s);
 }
 
index 2e6ddaa13d725016dc9a93c6ad1838806eac547e..40560af3882720bd4bc90c1f728cd2ebeaaf3de9 100644 (file)
@@ -552,7 +552,7 @@ extern int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
                                struct inode *dir,
                                struct ceph_mds_request *req);
 extern void ceph_mdsc_release_dir_caps(struct ceph_mds_request *req);
-extern void ceph_mdsc_release_dir_caps_no_check(struct ceph_mds_request *req);
+extern void ceph_mdsc_release_dir_caps_async(struct ceph_mds_request *req);
 static inline void ceph_mdsc_get_request(struct ceph_mds_request *req)
 {
        kref_get(&req->r_kref);
index 9d36c3532de14fc41e0517f494fd6cfb7713cc28..06ee397e0c3a6172592e62dba95cd267cfff0db1 100644 (file)
@@ -197,10 +197,10 @@ void ceph_cleanup_quotarealms_inodes(struct ceph_mds_client *mdsc)
 }
 
 /*
- * This function walks through the snaprealm for an inode and returns the
- * ceph_snap_realm for the first snaprealm that has quotas set (max_files,
+ * This function walks through the snaprealm for an inode and set the
+ * realmp with the first snaprealm that has quotas set (max_files,
  * max_bytes, or any, depending on the 'which_quota' argument).  If the root is
- * reached, return the root ceph_snap_realm instead.
+ * reached, set the realmp with the root ceph_snap_realm instead.
  *
  * Note that the caller is responsible for calling ceph_put_snap_realm() on the
  * returned realm.
@@ -211,10 +211,9 @@ void ceph_cleanup_quotarealms_inodes(struct ceph_mds_client *mdsc)
  * this function will return -EAGAIN; otherwise, the snaprealms walk-through
  * will be restarted.
  */
-static struct ceph_snap_realm *get_quota_realm(struct ceph_mds_client *mdsc,
-                                              struct inode *inode,
-                                              enum quota_get_realm which_quota,
-                                              bool retry)
+static int get_quota_realm(struct ceph_mds_client *mdsc, struct inode *inode,
+                          enum quota_get_realm which_quota,
+                          struct ceph_snap_realm **realmp, bool retry)
 {
        struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_inode_info *ci = NULL;
@@ -222,8 +221,10 @@ static struct ceph_snap_realm *get_quota_realm(struct ceph_mds_client *mdsc,
        struct inode *in;
        bool has_quota;
 
+       if (realmp)
+               *realmp = NULL;
        if (ceph_snap(inode) != CEPH_NOSNAP)
-               return NULL;
+               return 0;
 
 restart:
        realm = ceph_inode(inode)->i_snap_realm;
@@ -250,7 +251,7 @@ restart:
                                break;
                        ceph_put_snap_realm(mdsc, realm);
                        if (!retry)
-                               return ERR_PTR(-EAGAIN);
+                               return -EAGAIN;
                        goto restart;
                }
 
@@ -259,8 +260,11 @@ restart:
                iput(in);
 
                next = realm->parent;
-               if (has_quota || !next)
-                      return realm;
+               if (has_quota || !next) {
+                       if (realmp)
+                               *realmp = realm;
+                       return 0;
+               }
 
                ceph_get_snap_realm(mdsc, next);
                ceph_put_snap_realm(mdsc, realm);
@@ -269,7 +273,7 @@ restart:
        if (realm)
                ceph_put_snap_realm(mdsc, realm);
 
-       return NULL;
+       return 0;
 }
 
 bool ceph_quota_is_same_realm(struct inode *old, struct inode *new)
@@ -277,6 +281,7 @@ bool ceph_quota_is_same_realm(struct inode *old, struct inode *new)
        struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(old->i_sb);
        struct ceph_snap_realm *old_realm, *new_realm;
        bool is_same;
+       int ret;
 
 restart:
        /*
@@ -286,9 +291,9 @@ restart:
         * dropped and we can then restart the whole operation.
         */
        down_read(&mdsc->snap_rwsem);
-       old_realm = get_quota_realm(mdsc, old, QUOTA_GET_ANY, true);
-       new_realm = get_quota_realm(mdsc, new, QUOTA_GET_ANY, false);
-       if (PTR_ERR(new_realm) == -EAGAIN) {
+       get_quota_realm(mdsc, old, QUOTA_GET_ANY, &old_realm, true);
+       ret = get_quota_realm(mdsc, new, QUOTA_GET_ANY, &new_realm, false);
+       if (ret == -EAGAIN) {
                up_read(&mdsc->snap_rwsem);
                if (old_realm)
                        ceph_put_snap_realm(mdsc, old_realm);
@@ -492,8 +497,8 @@ bool ceph_quota_update_statfs(struct ceph_fs_client *fsc, struct kstatfs *buf)
        bool is_updated = false;
 
        down_read(&mdsc->snap_rwsem);
-       realm = get_quota_realm(mdsc, d_inode(fsc->sb->s_root),
-                               QUOTA_GET_MAX_BYTES, true);
+       get_quota_realm(mdsc, d_inode(fsc->sb->s_root), QUOTA_GET_MAX_BYTES,
+                       &realm, true);
        up_read(&mdsc->snap_rwsem);
        if (!realm)
                return false;
index fe0f64a0acb27058014b188bec906e07310fad1f..b63b4cd9b5b685a930bc33673f28e7c48a93d605 100644 (file)
@@ -3,6 +3,7 @@
 #define _FS_CEPH_SUPER_H
 
 #include <linux/ceph/ceph_debug.h>
+#include <linux/ceph/osd_client.h>
 
 #include <asm/unaligned.h>
 #include <linux/backing-dev.h>
@@ -1254,8 +1255,6 @@ extern void ceph_take_cap_refs(struct ceph_inode_info *ci, int caps,
 extern void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps);
 extern void ceph_put_cap_refs(struct ceph_inode_info *ci, int had);
 extern void ceph_put_cap_refs_async(struct ceph_inode_info *ci, int had);
-extern void ceph_put_cap_refs_no_check_caps(struct ceph_inode_info *ci,
-                                           int had);
 extern void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
                                       struct ceph_snap_context *snapc);
 extern void __ceph_remove_capsnap(struct inode *inode,
@@ -1407,6 +1406,19 @@ static inline void __ceph_update_quota(struct ceph_inode_info *ci,
                ceph_adjust_quota_realms_count(&ci->netfs.inode, has_quota);
 }
 
+static inline int __ceph_sparse_read_ext_count(struct inode *inode, u64 len)
+{
+       int cnt = 0;
+
+       if (IS_ENCRYPTED(inode)) {
+               cnt = len >> CEPH_FSCRYPT_BLOCK_SHIFT;
+               if (cnt > CEPH_SPARSE_EXT_ARRAY_INITIAL)
+                       cnt = 0;
+       }
+
+       return cnt;
+}
+
 extern void ceph_handle_quota(struct ceph_mds_client *mdsc,
                              struct ceph_mds_session *session,
                              struct ceph_msg *msg);
index 1d318f85232de9361714471ac973762ed2e6b0e6..fffd3919343e4553abdb1e6607c2eb4ef2bda011 100644 (file)
@@ -114,8 +114,11 @@ config EROFS_FS_ZIP_DEFLATE
 
 config EROFS_FS_ONDEMAND
        bool "EROFS fscache-based on-demand read support"
-       depends on CACHEFILES_ONDEMAND && (EROFS_FS=m && FSCACHE || EROFS_FS=y && FSCACHE=y)
-       default n
+       depends on EROFS_FS
+       select NETFS_SUPPORT
+       select FSCACHE
+       select CACHEFILES
+       select CACHEFILES_ONDEMAND
        help
          This permits EROFS to use fscache-backed data blobs with on-demand
          read support.
index 279933e007d21798549df035b4aa595597f225b6..7cc5841577b240f90f9a623e64adc87c3fb24982 100644 (file)
 struct z_erofs_decompress_req {
        struct super_block *sb;
        struct page **in, **out;
-
        unsigned short pageofs_in, pageofs_out;
        unsigned int inputsize, outputsize;
 
-       /* indicate the algorithm will be used for decompression */
-       unsigned int alg;
+       unsigned int alg;       /* the algorithm for decompression */
        bool inplace_io, partial_decoding, fillgaps;
+       gfp_t gfp;      /* allocation flags for extra temporary buffers */
 };
 
 struct z_erofs_decompressor {
index 1d65b9f60a39059c0e90ce04bbbe4ae5c69ef510..d4cee95af14c7490e85706589853059b99b7e688 100644 (file)
@@ -111,8 +111,9 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx,
                        victim = availables[--top];
                        get_page(victim);
                } else {
-                       victim = erofs_allocpage(pagepool,
-                                                GFP_KERNEL | __GFP_NOFAIL);
+                       victim = erofs_allocpage(pagepool, rq->gfp);
+                       if (!victim)
+                               return -ENOMEM;
                        set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE);
                }
                rq->out[i] = victim;
@@ -408,7 +409,7 @@ int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb)
        int size, ret = 0;
 
        if (!erofs_sb_has_compr_cfgs(sbi)) {
-               sbi->available_compr_algs = Z_EROFS_COMPRESSION_LZ4;
+               sbi->available_compr_algs = 1 << Z_EROFS_COMPRESSION_LZ4;
                return z_erofs_load_lz4_config(sb, dsb, NULL, 0);
        }
 
index 4a64a9c91dd322379d2c4be2268f6c4c24f995ee..b98872058abe82d4034b84c1c93c46645b50968b 100644 (file)
@@ -95,7 +95,7 @@ int z_erofs_load_deflate_config(struct super_block *sb,
 }
 
 int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
-                              struct page **pagepool)
+                              struct page **pgpl)
 {
        const unsigned int nrpages_out =
                PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
@@ -158,8 +158,12 @@ again:
                        strm->z.avail_out = min_t(u32, outsz, PAGE_SIZE - pofs);
                        outsz -= strm->z.avail_out;
                        if (!rq->out[no]) {
-                               rq->out[no] = erofs_allocpage(pagepool,
-                                               GFP_KERNEL | __GFP_NOFAIL);
+                               rq->out[no] = erofs_allocpage(pgpl, rq->gfp);
+                               if (!rq->out[no]) {
+                                       kout = NULL;
+                                       err = -ENOMEM;
+                                       break;
+                               }
                                set_page_private(rq->out[no],
                                                 Z_EROFS_SHORTLIVED_PAGE);
                        }
@@ -211,8 +215,11 @@ again:
 
                        DBG_BUGON(erofs_page_is_managed(EROFS_SB(sb),
                                                        rq->in[j]));
-                       tmppage = erofs_allocpage(pagepool,
-                                                 GFP_KERNEL | __GFP_NOFAIL);
+                       tmppage = erofs_allocpage(pgpl, rq->gfp);
+                       if (!tmppage) {
+                               err = -ENOMEM;
+                               goto failed;
+                       }
                        set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE);
                        copy_highpage(tmppage, rq->in[j]);
                        rq->in[j] = tmppage;
@@ -230,7 +237,7 @@ again:
                        break;
                }
        }
-
+failed:
        if (zlib_inflateEnd(&strm->z) != Z_OK && !err)
                err = -EIO;
        if (kout)
index 2dd14f99c1dc10eeea57eedfccbb649bf184828f..6ca357d83cfa458225f20e2d6f6a45307fef2194 100644 (file)
@@ -148,7 +148,7 @@ again:
 }
 
 int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
-                           struct page **pagepool)
+                           struct page **pgpl)
 {
        const unsigned int nrpages_out =
                PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
@@ -215,8 +215,11 @@ again:
                                                   PAGE_SIZE - pageofs);
                        outlen -= strm->buf.out_size;
                        if (!rq->out[no] && rq->fillgaps) {     /* deduped */
-                               rq->out[no] = erofs_allocpage(pagepool,
-                                               GFP_KERNEL | __GFP_NOFAIL);
+                               rq->out[no] = erofs_allocpage(pgpl, rq->gfp);
+                               if (!rq->out[no]) {
+                                       err = -ENOMEM;
+                                       break;
+                               }
                                set_page_private(rq->out[no],
                                                 Z_EROFS_SHORTLIVED_PAGE);
                        }
@@ -258,8 +261,11 @@ again:
 
                        DBG_BUGON(erofs_page_is_managed(EROFS_SB(rq->sb),
                                                        rq->in[j]));
-                       tmppage = erofs_allocpage(pagepool,
-                                                 GFP_KERNEL | __GFP_NOFAIL);
+                       tmppage = erofs_allocpage(pgpl, rq->gfp);
+                       if (!tmppage) {
+                               err = -ENOMEM;
+                               goto failed;
+                       }
                        set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE);
                        copy_highpage(tmppage, rq->in[j]);
                        rq->in[j] = tmppage;
@@ -277,6 +283,7 @@ again:
                        break;
                }
        }
+failed:
        if (no < nrpages_out && strm->buf.out)
                kunmap(rq->out[no]);
        if (ni < nrpages_in)
index 87ff35bff8d5bb3acb8dbc4d79c07d1d018cba56..5ff90026fd43fe116e3a34178bf00b9f3303b411 100644 (file)
@@ -165,10 +165,10 @@ static int erofs_fscache_read_folios_async(struct fscache_cookie *cookie,
 static int erofs_fscache_meta_read_folio(struct file *data, struct folio *folio)
 {
        int ret;
-       struct erofs_fscache *ctx = folio_mapping(folio)->host->i_private;
+       struct erofs_fscache *ctx = folio->mapping->host->i_private;
        struct erofs_fscache_request *req;
 
-       req = erofs_fscache_req_alloc(folio_mapping(folio),
+       req = erofs_fscache_req_alloc(folio->mapping,
                                folio_pos(folio), folio_size(folio));
        if (IS_ERR(req)) {
                folio_unlock(folio);
@@ -276,7 +276,7 @@ static int erofs_fscache_read_folio(struct file *file, struct folio *folio)
        struct erofs_fscache_request *req;
        int ret;
 
-       req = erofs_fscache_req_alloc(folio_mapping(folio),
+       req = erofs_fscache_req_alloc(folio->mapping,
                        folio_pos(folio), folio_size(folio));
        if (IS_ERR(req)) {
                folio_unlock(folio);
@@ -459,7 +459,7 @@ static struct erofs_fscache *erofs_fscache_acquire_cookie(struct super_block *sb
 
        inode->i_size = OFFSET_MAX;
        inode->i_mapping->a_ops = &erofs_fscache_meta_aops;
-       mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
+       mapping_set_gfp_mask(inode->i_mapping, GFP_KERNEL);
        inode->i_blkbits = EROFS_SB(sb)->blkszbits;
        inode->i_private = ctx;
 
index 3d616dea55dc3dbccbac495988f865947b0d2a96..36e638e8b53a3d290fcb7ade23a40dc4805be9e6 100644 (file)
@@ -60,7 +60,7 @@ static void *erofs_read_inode(struct erofs_buf *buf,
                } else {
                        const unsigned int gotten = sb->s_blocksize - *ofs;
 
-                       copied = kmalloc(vi->inode_isize, GFP_NOFS);
+                       copied = kmalloc(vi->inode_isize, GFP_KERNEL);
                        if (!copied) {
                                err = -ENOMEM;
                                goto err_out;
index 5dea308764b45038f8236bf31b004067f0f297a6..e146d09151af4188efe4cb7bf2ad4a938b8596af 100644 (file)
@@ -81,7 +81,7 @@ struct erofs_workgroup *erofs_insert_workgroup(struct super_block *sb,
 repeat:
        xa_lock(&sbi->managed_pslots);
        pre = __xa_cmpxchg(&sbi->managed_pslots, grp->index,
-                          NULL, grp, GFP_NOFS);
+                          NULL, grp, GFP_KERNEL);
        if (pre) {
                if (xa_is_err(pre)) {
                        pre = ERR_PTR(xa_err(pre));
index 692c0c39be638dc4b2454b63968a0467043ddc7a..ff0aa72b0db342f10ed7c1b565d2cc7bd6a540ff 100644 (file)
@@ -82,6 +82,9 @@ struct z_erofs_pcluster {
        /* L: indicate several pageofs_outs or not */
        bool multibases;
 
+       /* L: whether extra buffer allocations are best-effort */
+       bool besteffort;
+
        /* A: compressed bvecs (can be cached or inplaced pages) */
        struct z_erofs_bvec compressed_bvecs[];
 };
@@ -230,7 +233,7 @@ static int z_erofs_bvec_enqueue(struct z_erofs_bvec_iter *iter,
                struct page *nextpage = *candidate_bvpage;
 
                if (!nextpage) {
-                       nextpage = erofs_allocpage(pagepool, GFP_NOFS);
+                       nextpage = erofs_allocpage(pagepool, GFP_KERNEL);
                        if (!nextpage)
                                return -ENOMEM;
                        set_page_private(nextpage, Z_EROFS_SHORTLIVED_PAGE);
@@ -302,7 +305,7 @@ static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int size)
                if (nrpages > pcs->maxpages)
                        continue;
 
-               pcl = kmem_cache_zalloc(pcs->slab, GFP_NOFS);
+               pcl = kmem_cache_zalloc(pcs->slab, GFP_KERNEL);
                if (!pcl)
                        return ERR_PTR(-ENOMEM);
                pcl->pclustersize = size;
@@ -563,21 +566,19 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
                        __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
        unsigned int i;
 
-       if (i_blocksize(fe->inode) != PAGE_SIZE)
-               return;
-       if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED)
+       if (i_blocksize(fe->inode) != PAGE_SIZE ||
+           fe->mode < Z_EROFS_PCLUSTER_FOLLOWED)
                return;
 
        for (i = 0; i < pclusterpages; ++i) {
                struct page *page, *newpage;
                void *t;        /* mark pages just found for debugging */
 
-               /* the compressed page was loaded before */
+               /* Inaccurate check w/o locking to avoid unneeded lookups */
                if (READ_ONCE(pcl->compressed_bvecs[i].page))
                        continue;
 
                page = find_get_page(mc, pcl->obj.index + i);
-
                if (page) {
                        t = (void *)((unsigned long)page | 1);
                        newpage = NULL;
@@ -597,9 +598,13 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
                        set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE);
                        t = (void *)((unsigned long)newpage | 1);
                }
-
-               if (!cmpxchg_relaxed(&pcl->compressed_bvecs[i].page, NULL, t))
+               spin_lock(&pcl->obj.lockref.lock);
+               if (!pcl->compressed_bvecs[i].page) {
+                       pcl->compressed_bvecs[i].page = t;
+                       spin_unlock(&pcl->obj.lockref.lock);
                        continue;
+               }
+               spin_unlock(&pcl->obj.lockref.lock);
 
                if (page)
                        put_page(page);
@@ -694,7 +699,7 @@ static void z_erofs_cache_invalidate_folio(struct folio *folio,
        DBG_BUGON(stop > folio_size(folio) || stop < length);
 
        if (offset == 0 && stop == folio_size(folio))
-               while (!z_erofs_cache_release_folio(folio, GFP_NOFS))
+               while (!z_erofs_cache_release_folio(folio, 0))
                        cond_resched();
 }
 
@@ -713,36 +718,30 @@ int erofs_init_managed_cache(struct super_block *sb)
        set_nlink(inode, 1);
        inode->i_size = OFFSET_MAX;
        inode->i_mapping->a_ops = &z_erofs_cache_aops;
-       mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
+       mapping_set_gfp_mask(inode->i_mapping, GFP_KERNEL);
        EROFS_SB(sb)->managed_cache = inode;
        return 0;
 }
 
-static bool z_erofs_try_inplace_io(struct z_erofs_decompress_frontend *fe,
-                                  struct z_erofs_bvec *bvec)
-{
-       struct z_erofs_pcluster *const pcl = fe->pcl;
-
-       while (fe->icur > 0) {
-               if (!cmpxchg(&pcl->compressed_bvecs[--fe->icur].page,
-                            NULL, bvec->page)) {
-                       pcl->compressed_bvecs[fe->icur] = *bvec;
-                       return true;
-               }
-       }
-       return false;
-}
-
 /* callers must be with pcluster lock held */
 static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
                               struct z_erofs_bvec *bvec, bool exclusive)
 {
+       struct z_erofs_pcluster *pcl = fe->pcl;
        int ret;
 
        if (exclusive) {
                /* give priority for inplaceio to use file pages first */
-               if (z_erofs_try_inplace_io(fe, bvec))
+               spin_lock(&pcl->obj.lockref.lock);
+               while (fe->icur > 0) {
+                       if (pcl->compressed_bvecs[--fe->icur].page)
+                               continue;
+                       pcl->compressed_bvecs[fe->icur] = *bvec;
+                       spin_unlock(&pcl->obj.lockref.lock);
                        return 0;
+               }
+               spin_unlock(&pcl->obj.lockref.lock);
+
                /* otherwise, check if it can be used as a bvpage */
                if (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED &&
                    !fe->candidate_bvpage)
@@ -964,7 +963,7 @@ static int z_erofs_read_fragment(struct super_block *sb, struct page *page,
 }
 
 static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
-                               struct page *page)
+                               struct page *page, bool ra)
 {
        struct inode *const inode = fe->inode;
        struct erofs_map_blocks *const map = &fe->map;
@@ -1014,6 +1013,7 @@ repeat:
                err = z_erofs_pcluster_begin(fe);
                if (err)
                        goto out;
+               fe->pcl->besteffort |= !ra;
        }
 
        /*
@@ -1280,6 +1280,9 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
                                        .inplace_io = overlapped,
                                        .partial_decoding = pcl->partial,
                                        .fillgaps = pcl->multibases,
+                                       .gfp = pcl->besteffort ?
+                                               GFP_KERNEL | __GFP_NOFAIL :
+                                               GFP_NOWAIT | __GFP_NORETRY
                                 }, be->pagepool);
 
        /* must handle all compressed pages before actual file pages */
@@ -1322,6 +1325,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
        pcl->length = 0;
        pcl->partial = true;
        pcl->multibases = false;
+       pcl->besteffort = false;
        pcl->bvset.nextpage = NULL;
        pcl->vcnt = 0;
 
@@ -1423,23 +1427,26 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
 {
        gfp_t gfp = mapping_gfp_mask(mc);
        bool tocache = false;
-       struct z_erofs_bvec *zbv = pcl->compressed_bvecs + nr;
+       struct z_erofs_bvec zbv;
        struct address_space *mapping;
-       struct page *page, *oldpage;
+       struct page *page;
        int justfound, bs = i_blocksize(f->inode);
 
        /* Except for inplace pages, the entire page can be used for I/Os */
        bvec->bv_offset = 0;
        bvec->bv_len = PAGE_SIZE;
 repeat:
-       oldpage = READ_ONCE(zbv->page);
-       if (!oldpage)
+       spin_lock(&pcl->obj.lockref.lock);
+       zbv = pcl->compressed_bvecs[nr];
+       page = zbv.page;
+       justfound = (unsigned long)page & 1UL;
+       page = (struct page *)((unsigned long)page & ~1UL);
+       pcl->compressed_bvecs[nr].page = page;
+       spin_unlock(&pcl->obj.lockref.lock);
+       if (!page)
                goto out_allocpage;
 
-       justfound = (unsigned long)oldpage & 1UL;
-       page = (struct page *)((unsigned long)oldpage & ~1UL);
        bvec->bv_page = page;
-
        DBG_BUGON(z_erofs_is_shortlived_page(page));
        /*
         * Handle preallocated cached pages.  We tried to allocate such pages
@@ -1448,7 +1455,6 @@ repeat:
         */
        if (page->private == Z_EROFS_PREALLOCATED_PAGE) {
                set_page_private(page, 0);
-               WRITE_ONCE(zbv->page, page);
                tocache = true;
                goto out_tocache;
        }
@@ -1459,9 +1465,9 @@ repeat:
         * therefore it is impossible for `mapping` to be NULL.
         */
        if (mapping && mapping != mc) {
-               if (zbv->offset < 0)
-                       bvec->bv_offset = round_up(-zbv->offset, bs);
-               bvec->bv_len = round_up(zbv->end, bs) - bvec->bv_offset;
+               if (zbv.offset < 0)
+                       bvec->bv_offset = round_up(-zbv.offset, bs);
+               bvec->bv_len = round_up(zbv.end, bs) - bvec->bv_offset;
                return;
        }
 
@@ -1471,7 +1477,6 @@ repeat:
 
        /* the cached page is still in managed cache */
        if (page->mapping == mc) {
-               WRITE_ONCE(zbv->page, page);
                /*
                 * The cached page is still available but without a valid
                 * `->private` pcluster hint.  Let's reconnect them.
@@ -1503,11 +1508,15 @@ repeat:
        put_page(page);
 out_allocpage:
        page = erofs_allocpage(&f->pagepool, gfp | __GFP_NOFAIL);
-       if (oldpage != cmpxchg(&zbv->page, oldpage, page)) {
+       spin_lock(&pcl->obj.lockref.lock);
+       if (pcl->compressed_bvecs[nr].page) {
                erofs_pagepool_add(&f->pagepool, page);
+               spin_unlock(&pcl->obj.lockref.lock);
                cond_resched();
                goto repeat;
        }
+       pcl->compressed_bvecs[nr].page = page;
+       spin_unlock(&pcl->obj.lockref.lock);
        bvec->bv_page = page;
 out_tocache:
        if (!tocache || bs != PAGE_SIZE ||
@@ -1685,6 +1694,7 @@ submit_bio_retry:
 
                        if (cur + bvec.bv_len > end)
                                bvec.bv_len = end - cur;
+                       DBG_BUGON(bvec.bv_len < sb->s_blocksize);
                        if (!bio_add_page(bio, bvec.bv_page, bvec.bv_len,
                                          bvec.bv_offset))
                                goto submit_bio_retry;
@@ -1785,7 +1795,7 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
                        if (PageUptodate(page))
                                unlock_page(page);
                        else
-                               (void)z_erofs_do_read_page(f, page);
+                               (void)z_erofs_do_read_page(f, page, !!rac);
                        put_page(page);
                }
 
@@ -1806,7 +1816,7 @@ static int z_erofs_read_folio(struct file *file, struct folio *folio)
        f.headoffset = (erofs_off_t)folio->index << PAGE_SHIFT;
 
        z_erofs_pcluster_readmore(&f, NULL, true);
-       err = z_erofs_do_read_page(&f, &folio->page);
+       err = z_erofs_do_read_page(&f, &folio->page, false);
        z_erofs_pcluster_readmore(&f, NULL, false);
        z_erofs_pcluster_end(&f);
 
@@ -1847,7 +1857,7 @@ static void z_erofs_readahead(struct readahead_control *rac)
                folio = head;
                head = folio_get_private(folio);
 
-               err = z_erofs_do_read_page(&f, &folio->page);
+               err = z_erofs_do_read_page(&f, &folio->page, true);
                if (err && err != -EINTR)
                        erofs_err(inode->i_sb, "readahead error at folio %lu @ nid %llu",
                                  folio->index, EROFS_I(inode)->nid);
index 9753875e41cb35a4e83468aafb885f71a4bb1547..e313c936351d51fb39685702437d22bbef16719a 100644 (file)
@@ -454,7 +454,7 @@ static int z_erofs_do_map_blocks(struct inode *inode,
                .map = map,
        };
        int err = 0;
-       unsigned int lclusterbits, endoff;
+       unsigned int lclusterbits, endoff, afmt;
        unsigned long initial_lcn;
        unsigned long long ofs, end;
 
@@ -543,17 +543,20 @@ static int z_erofs_do_map_blocks(struct inode *inode,
                        err = -EFSCORRUPTED;
                        goto unmap_out;
                }
-               if (vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER)
-                       map->m_algorithmformat =
-                               Z_EROFS_COMPRESSION_INTERLACED;
-               else
-                       map->m_algorithmformat =
-                               Z_EROFS_COMPRESSION_SHIFTED;
-       } else if (m.headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2) {
-               map->m_algorithmformat = vi->z_algorithmtype[1];
+               afmt = vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER ?
+                       Z_EROFS_COMPRESSION_INTERLACED :
+                       Z_EROFS_COMPRESSION_SHIFTED;
        } else {
-               map->m_algorithmformat = vi->z_algorithmtype[0];
+               afmt = m.headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2 ?
+                       vi->z_algorithmtype[1] : vi->z_algorithmtype[0];
+               if (!(EROFS_I_SB(inode)->available_compr_algs & (1 << afmt))) {
+                       erofs_err(inode->i_sb, "inconsistent algorithmtype %u for nid %llu",
+                                 afmt, vi->nid);
+                       err = -EFSCORRUPTED;
+                       goto unmap_out;
+               }
        }
+       map->m_algorithmformat = afmt;
 
        if ((flags & EROFS_GET_BLOCKS_FIEMAP) ||
            ((flags & EROFS_GET_BLOCKS_READMORE) &&
index 73e4045df271d148377340a40c77271ee36b2161..af4fbb61cd53e97c788387a0d8277d1ce5495d7d 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -128,7 +128,7 @@ SYSCALL_DEFINE1(uselib, const char __user *, library)
        struct filename *tmp = getname(library);
        int error = PTR_ERR(tmp);
        static const struct open_flags uselib_flags = {
-               .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
+               .open_flag = O_LARGEFILE | O_RDONLY,
                .acc_mode = MAY_READ | MAY_EXEC,
                .intent = LOOKUP_OPEN,
                .lookup_flags = LOOKUP_FOLLOW,
@@ -904,6 +904,10 @@ EXPORT_SYMBOL(transfer_args_to_stack);
 
 #endif /* CONFIG_MMU */
 
+/*
+ * On success, caller must call do_close_execat() on the returned
+ * struct file to close it.
+ */
 static struct file *do_open_execat(int fd, struct filename *name, int flags)
 {
        struct file *file;
@@ -948,6 +952,17 @@ exit:
        return ERR_PTR(err);
 }
 
+/**
+ * open_exec - Open a path name for execution
+ *
+ * @name: path name to open with the intent of executing it.
+ *
+ * Returns ERR_PTR on failure or allocated struct file on success.
+ *
+ * As this is a wrapper for the internal do_open_execat(), callers
+ * must call allow_write_access() before fput() on release. Also see
+ * do_close_execat().
+ */
 struct file *open_exec(const char *name)
 {
        struct filename *filename = getname_kernel(name);
@@ -1409,6 +1424,9 @@ int begin_new_exec(struct linux_binprm * bprm)
 
 out_unlock:
        up_write(&me->signal->exec_update_lock);
+       if (!bprm->cred)
+               mutex_unlock(&me->signal->cred_guard_mutex);
+
 out:
        return retval;
 }
@@ -1484,6 +1502,15 @@ static int prepare_bprm_creds(struct linux_binprm *bprm)
        return -ENOMEM;
 }
 
+/* Matches do_open_execat() */
+static void do_close_execat(struct file *file)
+{
+       if (!file)
+               return;
+       allow_write_access(file);
+       fput(file);
+}
+
 static void free_bprm(struct linux_binprm *bprm)
 {
        if (bprm->mm) {
@@ -1495,10 +1522,7 @@ static void free_bprm(struct linux_binprm *bprm)
                mutex_unlock(&current->signal->cred_guard_mutex);
                abort_creds(bprm->cred);
        }
-       if (bprm->file) {
-               allow_write_access(bprm->file);
-               fput(bprm->file);
-       }
+       do_close_execat(bprm->file);
        if (bprm->executable)
                fput(bprm->executable);
        /* If a binfmt changed the interp, free it. */
@@ -1508,12 +1532,23 @@ static void free_bprm(struct linux_binprm *bprm)
        kfree(bprm);
 }
 
-static struct linux_binprm *alloc_bprm(int fd, struct filename *filename)
+static struct linux_binprm *alloc_bprm(int fd, struct filename *filename, int flags)
 {
-       struct linux_binprm *bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
+       struct linux_binprm *bprm;
+       struct file *file;
        int retval = -ENOMEM;
-       if (!bprm)
-               goto out;
+
+       file = do_open_execat(fd, filename, flags);
+       if (IS_ERR(file))
+               return ERR_CAST(file);
+
+       bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
+       if (!bprm) {
+               do_close_execat(file);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       bprm->file = file;
 
        if (fd == AT_FDCWD || filename->name[0] == '/') {
                bprm->filename = filename->name;
@@ -1526,18 +1561,28 @@ static struct linux_binprm *alloc_bprm(int fd, struct filename *filename)
                if (!bprm->fdpath)
                        goto out_free;
 
+               /*
+                * Record that a name derived from an O_CLOEXEC fd will be
+                * inaccessible after exec.  This allows the code in exec to
+                * choose to fail when the executable is not mmaped into the
+                * interpreter and an open file descriptor is not passed to
+                * the interpreter.  This makes for a better user experience
+                * than having the interpreter start and then immediately fail
+                * when it finds the executable is inaccessible.
+                */
+               if (get_close_on_exec(fd))
+                       bprm->interp_flags |= BINPRM_FLAGS_PATH_INACCESSIBLE;
+
                bprm->filename = bprm->fdpath;
        }
        bprm->interp = bprm->filename;
 
        retval = bprm_mm_init(bprm);
-       if (retval)
-               goto out_free;
-       return bprm;
+       if (!retval)
+               return bprm;
 
 out_free:
        free_bprm(bprm);
-out:
        return ERR_PTR(retval);
 }
 
@@ -1588,6 +1633,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
        }
        rcu_read_unlock();
 
+       /* "users" and "in_exec" locked for copy_fs() */
        if (p->fs->users > n_fs)
                bprm->unsafe |= LSM_UNSAFE_SHARE;
        else
@@ -1804,13 +1850,8 @@ static int exec_binprm(struct linux_binprm *bprm)
        return 0;
 }
 
-/*
- * sys_execve() executes a new program.
- */
-static int bprm_execve(struct linux_binprm *bprm,
-                      int fd, struct filename *filename, int flags)
+static int bprm_execve(struct linux_binprm *bprm)
 {
-       struct file *file;
        int retval;
 
        retval = prepare_bprm_creds(bprm);
@@ -1826,26 +1867,8 @@ static int bprm_execve(struct linux_binprm *bprm,
        current->in_execve = 1;
        sched_mm_cid_before_execve(current);
 
-       file = do_open_execat(fd, filename, flags);
-       retval = PTR_ERR(file);
-       if (IS_ERR(file))
-               goto out_unmark;
-
        sched_exec();
 
-       bprm->file = file;
-       /*
-        * Record that a name derived from an O_CLOEXEC fd will be
-        * inaccessible after exec.  This allows the code in exec to
-        * choose to fail when the executable is not mmaped into the
-        * interpreter and an open file descriptor is not passed to
-        * the interpreter.  This makes for a better user experience
-        * than having the interpreter start and then immediately fail
-        * when it finds the executable is inaccessible.
-        */
-       if (bprm->fdpath && get_close_on_exec(fd))
-               bprm->interp_flags |= BINPRM_FLAGS_PATH_INACCESSIBLE;
-
        /* Set the unchanging part of bprm->cred */
        retval = security_bprm_creds_for_exec(bprm);
        if (retval)
@@ -1875,7 +1898,6 @@ out:
        if (bprm->point_of_no_return && !fatal_signal_pending(current))
                force_fatal_sig(SIGSEGV);
 
-out_unmark:
        sched_mm_cid_after_execve(current);
        current->fs->in_exec = 0;
        current->in_execve = 0;
@@ -1910,7 +1932,7 @@ static int do_execveat_common(int fd, struct filename *filename,
         * further execve() calls fail. */
        current->flags &= ~PF_NPROC_EXCEEDED;
 
-       bprm = alloc_bprm(fd, filename);
+       bprm = alloc_bprm(fd, filename, flags);
        if (IS_ERR(bprm)) {
                retval = PTR_ERR(bprm);
                goto out_ret;
@@ -1959,7 +1981,7 @@ static int do_execveat_common(int fd, struct filename *filename,
                bprm->argc = 1;
        }
 
-       retval = bprm_execve(bprm, fd, filename, flags);
+       retval = bprm_execve(bprm);
 out_free:
        free_bprm(bprm);
 
@@ -1984,7 +2006,7 @@ int kernel_execve(const char *kernel_filename,
        if (IS_ERR(filename))
                return PTR_ERR(filename);
 
-       bprm = alloc_bprm(fd, filename);
+       bprm = alloc_bprm(fd, filename, 0);
        if (IS_ERR(bprm)) {
                retval = PTR_ERR(bprm);
                goto out_ret;
@@ -2019,7 +2041,7 @@ int kernel_execve(const char *kernel_filename,
        if (retval < 0)
                goto out_free;
 
-       retval = bprm_execve(bprm, fd, filename, 0);
+       retval = bprm_execve(bprm);
 out_free:
        free_bprm(bprm);
 out_ret:
index e918decb37358636113b9379860b75cf2af8e736..0356c88252bd34abcdb28e2d478f66f656f3ec4c 100644 (file)
@@ -5,42 +5,23 @@
 
 #include <linux/blkdev.h>
 #include <linux/slab.h>
+#include <linux/bitmap.h>
 #include <linux/buffer_head.h>
 
 #include "exfat_raw.h"
 #include "exfat_fs.h"
 
-static const unsigned char free_bit[] = {
-       0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2,/*  0 ~  19*/
-       0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3,/* 20 ~  39*/
-       0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2,/* 40 ~  59*/
-       0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4,/* 60 ~  79*/
-       0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2,/* 80 ~  99*/
-       0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3,/*100 ~ 119*/
-       0, 1, 0, 2, 0, 1, 0, 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2,/*120 ~ 139*/
-       0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5,/*140 ~ 159*/
-       0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2,/*160 ~ 179*/
-       0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3,/*180 ~ 199*/
-       0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2,/*200 ~ 219*/
-       0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4,/*220 ~ 239*/
-       0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0                /*240 ~ 254*/
-};
-
-static const unsigned char used_bit[] = {
-       0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3,/*  0 ~  19*/
-       2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 1, 2, 2, 3, 2, 3, 3, 4,/* 20 ~  39*/
-       2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5,/* 40 ~  59*/
-       4, 5, 5, 6, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,/* 60 ~  79*/
-       2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4,/* 80 ~  99*/
-       3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6,/*100 ~ 119*/
-       4, 5, 5, 6, 5, 6, 6, 7, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4,/*120 ~ 139*/
-       3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,/*140 ~ 159*/
-       2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5,/*160 ~ 179*/
-       4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4, 5,/*180 ~ 199*/
-       3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6,/*200 ~ 219*/
-       5, 6, 6, 7, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,/*220 ~ 239*/
-       4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8             /*240 ~ 255*/
-};
+#if BITS_PER_LONG == 32
+#define __le_long __le32
+#define lel_to_cpu(A) le32_to_cpu(A)
+#define cpu_to_lel(A) cpu_to_le32(A)
+#elif BITS_PER_LONG == 64
+#define __le_long __le64
+#define lel_to_cpu(A) le64_to_cpu(A)
+#define cpu_to_lel(A) cpu_to_le64(A)
+#else
+#error "BITS_PER_LONG not 32 or 64"
+#endif
 
 /*
  *  Allocation Bitmap Management Functions
@@ -200,32 +181,35 @@ unsigned int exfat_find_free_bitmap(struct super_block *sb, unsigned int clu)
 {
        unsigned int i, map_i, map_b, ent_idx;
        unsigned int clu_base, clu_free;
-       unsigned char k, clu_mask;
+       unsigned long clu_bits, clu_mask;
        struct exfat_sb_info *sbi = EXFAT_SB(sb);
+       __le_long bitval;
 
        WARN_ON(clu < EXFAT_FIRST_CLUSTER);
-       ent_idx = CLUSTER_TO_BITMAP_ENT(clu);
-       clu_base = BITMAP_ENT_TO_CLUSTER(ent_idx & ~(BITS_PER_BYTE_MASK));
+       ent_idx = ALIGN_DOWN(CLUSTER_TO_BITMAP_ENT(clu), BITS_PER_LONG);
+       clu_base = BITMAP_ENT_TO_CLUSTER(ent_idx);
        clu_mask = IGNORED_BITS_REMAINED(clu, clu_base);
 
        map_i = BITMAP_OFFSET_SECTOR_INDEX(sb, ent_idx);
        map_b = BITMAP_OFFSET_BYTE_IN_SECTOR(sb, ent_idx);
 
        for (i = EXFAT_FIRST_CLUSTER; i < sbi->num_clusters;
-            i += BITS_PER_BYTE) {
-               k = *(sbi->vol_amap[map_i]->b_data + map_b);
+            i += BITS_PER_LONG) {
+               bitval = *(__le_long *)(sbi->vol_amap[map_i]->b_data + map_b);
                if (clu_mask > 0) {
-                       k |= clu_mask;
+                       bitval |= cpu_to_lel(clu_mask);
                        clu_mask = 0;
                }
-               if (k < 0xFF) {
-                       clu_free = clu_base + free_bit[k];
+               if (lel_to_cpu(bitval) != ULONG_MAX) {
+                       clu_bits = lel_to_cpu(bitval);
+                       clu_free = clu_base + ffz(clu_bits);
                        if (clu_free < sbi->num_clusters)
                                return clu_free;
                }
-               clu_base += BITS_PER_BYTE;
+               clu_base += BITS_PER_LONG;
+               map_b += sizeof(long);
 
-               if (++map_b >= sb->s_blocksize ||
+               if (map_b >= sb->s_blocksize ||
                    clu_base >= sbi->num_clusters) {
                        if (++map_i >= sbi->map_sectors) {
                                clu_base = EXFAT_FIRST_CLUSTER;
@@ -244,25 +228,24 @@ int exfat_count_used_clusters(struct super_block *sb, unsigned int *ret_count)
        unsigned int count = 0;
        unsigned int i, map_i = 0, map_b = 0;
        unsigned int total_clus = EXFAT_DATA_CLUSTER_COUNT(sbi);
-       unsigned int last_mask = total_clus & BITS_PER_BYTE_MASK;
-       unsigned char clu_bits;
-       const unsigned char last_bit_mask[] = {0, 0b00000001, 0b00000011,
-               0b00000111, 0b00001111, 0b00011111, 0b00111111, 0b01111111};
+       unsigned int last_mask = total_clus & (BITS_PER_LONG - 1);
+       unsigned long *bitmap, clu_bits;
 
        total_clus &= ~last_mask;
-       for (i = 0; i < total_clus; i += BITS_PER_BYTE) {
-               clu_bits = *(sbi->vol_amap[map_i]->b_data + map_b);
-               count += used_bit[clu_bits];
-               if (++map_b >= (unsigned int)sb->s_blocksize) {
+       for (i = 0; i < total_clus; i += BITS_PER_LONG) {
+               bitmap = (void *)(sbi->vol_amap[map_i]->b_data + map_b);
+               count += hweight_long(*bitmap);
+               map_b += sizeof(long);
+               if (map_b >= (unsigned int)sb->s_blocksize) {
                        map_i++;
                        map_b = 0;
                }
        }
 
        if (last_mask) {
-               clu_bits = *(sbi->vol_amap[map_i]->b_data + map_b);
-               clu_bits &= last_bit_mask[last_mask];
-               count += used_bit[clu_bits];
+               bitmap = (void *)(sbi->vol_amap[map_i]->b_data + map_b);
+               clu_bits = lel_to_cpu(*(__le_long *)bitmap);
+               count += hweight_long(clu_bits & BITMAP_LAST_WORD_MASK(last_mask));
        }
 
        *ret_count = count;
index a7a2c35d74fbd70ac5980e780840cadd43385316..9474cd50da6d4fd8b9fba92f1f3d8717f19245dc 100644 (file)
@@ -135,8 +135,7 @@ enum {
 #define BITMAP_OFFSET_BIT_IN_SECTOR(sb, ent) (ent & BITS_PER_SECTOR_MASK(sb))
 #define BITMAP_OFFSET_BYTE_IN_SECTOR(sb, ent) \
        ((ent / BITS_PER_BYTE) & ((sb)->s_blocksize - 1))
-#define BITS_PER_BYTE_MASK     0x7
-#define IGNORED_BITS_REMAINED(clu, clu_base) ((1 << ((clu) - (clu_base))) - 1)
+#define IGNORED_BITS_REMAINED(clu, clu_base) ((1UL << ((clu) - (clu_base))) - 1)
 
 #define ES_ENTRY_NUM(name_len) (ES_IDX_LAST_FILENAME(name_len) + 1)
 /* 19 entries = 1 file entry + 1 stream entry + 17 filename entries */
@@ -208,6 +207,7 @@ struct exfat_dir_entry {
        unsigned char flags;
        unsigned short attr;
        loff_t size;
+       loff_t valid_size;
        unsigned int num_subdirs;
        struct timespec64 atime;
        struct timespec64 mtime;
@@ -317,6 +317,7 @@ struct exfat_inode_info {
        loff_t i_size_aligned;
        /* on-disk position of directory entry or 0 */
        loff_t i_pos;
+       loff_t valid_size;
        /* hash by i_location */
        struct hlist_node i_hash_fat;
        /* protect bmap against truncate */
index bfdfafe0099309f59da5c19b27eee2f4ed10db31..d25a96a148af4cdb966c5d20f720aa944cab10c2 100644 (file)
 #include <linux/fsnotify.h>
 #include <linux/security.h>
 #include <linux/msdos_fs.h>
+#include <linux/writeback.h>
 
 #include "exfat_raw.h"
 #include "exfat_fs.h"
 
 static int exfat_cont_expand(struct inode *inode, loff_t size)
 {
-       struct address_space *mapping = inode->i_mapping;
-       loff_t start = i_size_read(inode), count = size - i_size_read(inode);
-       int err, err2;
+       int ret;
+       unsigned int num_clusters, new_num_clusters, last_clu;
+       struct exfat_inode_info *ei = EXFAT_I(inode);
+       struct super_block *sb = inode->i_sb;
+       struct exfat_sb_info *sbi = EXFAT_SB(sb);
+       struct exfat_chain clu;
 
-       err = generic_cont_expand_simple(inode, size);
-       if (err)
-               return err;
+       ret = inode_newsize_ok(inode, size);
+       if (ret)
+               return ret;
+
+       num_clusters = EXFAT_B_TO_CLU_ROUND_UP(ei->i_size_ondisk, sbi);
+       new_num_clusters = EXFAT_B_TO_CLU_ROUND_UP(size, sbi);
+
+       if (new_num_clusters == num_clusters)
+               goto out;
+
+       exfat_chain_set(&clu, ei->start_clu, num_clusters, ei->flags);
+       ret = exfat_find_last_cluster(sb, &clu, &last_clu);
+       if (ret)
+               return ret;
+
+       clu.dir = (last_clu == EXFAT_EOF_CLUSTER) ?
+                       EXFAT_EOF_CLUSTER : last_clu + 1;
+       clu.size = 0;
+       clu.flags = ei->flags;
+
+       ret = exfat_alloc_cluster(inode, new_num_clusters - num_clusters,
+                       &clu, IS_DIRSYNC(inode));
+       if (ret)
+               return ret;
+
+       /* Append new clusters to chain */
+       if (clu.flags != ei->flags) {
+               exfat_chain_cont_cluster(sb, ei->start_clu, num_clusters);
+               ei->flags = ALLOC_FAT_CHAIN;
+       }
+       if (clu.flags == ALLOC_FAT_CHAIN)
+               if (exfat_ent_set(sb, last_clu, clu.dir))
+                       goto free_clu;
 
+       if (num_clusters == 0)
+               ei->start_clu = clu.dir;
+
+out:
        inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
-       mark_inode_dirty(inode);
+       /* Expanded range not zeroed, do not update valid_size */
+       i_size_write(inode, size);
 
-       if (!IS_SYNC(inode))
-               return 0;
+       ei->i_size_aligned = round_up(size, sb->s_blocksize);
+       ei->i_size_ondisk = ei->i_size_aligned;
+       inode->i_blocks = round_up(size, sbi->cluster_size) >> 9;
 
-       err = filemap_fdatawrite_range(mapping, start, start + count - 1);
-       err2 = sync_mapping_buffers(mapping);
-       if (!err)
-               err = err2;
-       err2 = write_inode_now(inode, 1);
-       if (!err)
-               err = err2;
-       if (err)
-               return err;
+       if (IS_DIRSYNC(inode))
+               return write_inode_now(inode, 1);
+
+       mark_inode_dirty(inode);
 
-       return filemap_fdatawait_range(mapping, start, start + count - 1);
+       return 0;
+
+free_clu:
+       exfat_free_cluster(inode, &clu);
+       return -EIO;
 }
 
 static bool exfat_allow_set_time(struct exfat_sb_info *sbi, struct inode *inode)
@@ -146,6 +185,9 @@ int __exfat_truncate(struct inode *inode)
                ei->start_clu = EXFAT_EOF_CLUSTER;
        }
 
+       if (i_size_read(inode) < ei->valid_size)
+               ei->valid_size = i_size_read(inode);
+
        if (ei->type == TYPE_FILE)
                ei->attr |= EXFAT_ATTR_ARCHIVE;
 
@@ -474,15 +516,124 @@ int exfat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
        return blkdev_issue_flush(inode->i_sb->s_bdev);
 }
 
+static int exfat_file_zeroed_range(struct file *file, loff_t start, loff_t end)
+{
+       int err;
+       struct inode *inode = file_inode(file);
+       struct address_space *mapping = inode->i_mapping;
+       const struct address_space_operations *ops = mapping->a_ops;
+
+       while (start < end) {
+               u32 zerofrom, len;
+               struct page *page = NULL;
+
+               zerofrom = start & (PAGE_SIZE - 1);
+               len = PAGE_SIZE - zerofrom;
+               if (start + len > end)
+                       len = end - start;
+
+               err = ops->write_begin(file, mapping, start, len, &page, NULL);
+               if (err)
+                       goto out;
+
+               zero_user_segment(page, zerofrom, zerofrom + len);
+
+               err = ops->write_end(file, mapping, start, len, len, page, NULL);
+               if (err < 0)
+                       goto out;
+               start += len;
+
+               balance_dirty_pages_ratelimited(mapping);
+               cond_resched();
+       }
+
+out:
+       return err;
+}
+
+static ssize_t exfat_file_write_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+       ssize_t ret;
+       struct file *file = iocb->ki_filp;
+       struct inode *inode = file_inode(file);
+       struct exfat_inode_info *ei = EXFAT_I(inode);
+       loff_t pos = iocb->ki_pos;
+       loff_t valid_size;
+
+       inode_lock(inode);
+
+       valid_size = ei->valid_size;
+
+       ret = generic_write_checks(iocb, iter);
+       if (ret < 0)
+               goto unlock;
+
+       if (pos > valid_size) {
+               ret = exfat_file_zeroed_range(file, valid_size, pos);
+               if (ret < 0 && ret != -ENOSPC) {
+                       exfat_err(inode->i_sb,
+                               "write: fail to zero from %llu to %llu(%zd)",
+                               valid_size, pos, ret);
+               }
+               if (ret < 0)
+                       goto unlock;
+       }
+
+       ret = __generic_file_write_iter(iocb, iter);
+       if (ret < 0)
+               goto unlock;
+
+       inode_unlock(inode);
+
+       if (pos > valid_size)
+               pos = valid_size;
+
+       if (iocb_is_dsync(iocb) && iocb->ki_pos > pos) {
+               ssize_t err = vfs_fsync_range(file, pos, iocb->ki_pos - 1,
+                               iocb->ki_flags & IOCB_SYNC);
+               if (err < 0)
+                       return err;
+       }
+
+       return ret;
+
+unlock:
+       inode_unlock(inode);
+
+       return ret;
+}
+
+static int exfat_file_mmap(struct file *file, struct vm_area_struct *vma)
+{
+       int ret;
+       struct inode *inode = file_inode(file);
+       struct exfat_inode_info *ei = EXFAT_I(inode);
+       loff_t start = ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
+       loff_t end = min_t(loff_t, i_size_read(inode),
+                       start + vma->vm_end - vma->vm_start);
+
+       if ((vma->vm_flags & VM_WRITE) && ei->valid_size < end) {
+               ret = exfat_file_zeroed_range(file, ei->valid_size, end);
+               if (ret < 0) {
+                       exfat_err(inode->i_sb,
+                                 "mmap: fail to zero from %llu to %llu(%d)",
+                                 start, end, ret);
+                       return ret;
+               }
+       }
+
+       return generic_file_mmap(file, vma);
+}
+
 const struct file_operations exfat_file_operations = {
        .llseek         = generic_file_llseek,
        .read_iter      = generic_file_read_iter,
-       .write_iter     = generic_file_write_iter,
+       .write_iter     = exfat_file_write_iter,
        .unlocked_ioctl = exfat_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl = exfat_compat_ioctl,
 #endif
-       .mmap           = generic_file_mmap,
+       .mmap           = exfat_file_mmap,
        .fsync          = exfat_file_fsync,
        .splice_read    = filemap_splice_read,
        .splice_write   = iter_file_splice_write,
index e7ff58b8e68c78d53c01c8126cd1b6276c632a8c..0687f952956c34b6d85e785ee13f231d49679e64 100644 (file)
@@ -75,8 +75,17 @@ int __exfat_write_inode(struct inode *inode, int sync)
        if (ei->start_clu == EXFAT_EOF_CLUSTER)
                on_disk_size = 0;
 
-       ep2->dentry.stream.valid_size = cpu_to_le64(on_disk_size);
-       ep2->dentry.stream.size = ep2->dentry.stream.valid_size;
+       ep2->dentry.stream.size = cpu_to_le64(on_disk_size);
+       /*
+        * mmap write does not use exfat_write_end(), valid_size may be
+        * extended to the sector-aligned length in exfat_get_block().
+        * So we need to fixup valid_size to the writren length.
+        */
+       if (on_disk_size < ei->valid_size)
+               ep2->dentry.stream.valid_size = ep2->dentry.stream.size;
+       else
+               ep2->dentry.stream.valid_size = cpu_to_le64(ei->valid_size);
+
        if (on_disk_size) {
                ep2->dentry.stream.flags = ei->flags;
                ep2->dentry.stream.start_clu = cpu_to_le32(ei->start_clu);
@@ -278,6 +287,7 @@ static int exfat_get_block(struct inode *inode, sector_t iblock,
        unsigned int cluster, sec_offset;
        sector_t last_block;
        sector_t phys = 0;
+       sector_t valid_blks;
        loff_t pos;
 
        mutex_lock(&sbi->s_lock);
@@ -306,17 +316,32 @@ static int exfat_get_block(struct inode *inode, sector_t iblock,
        mapped_blocks = sbi->sect_per_clus - sec_offset;
        max_blocks = min(mapped_blocks, max_blocks);
 
-       /* Treat newly added block / cluster */
-       if (iblock < last_block)
-               create = 0;
-
-       if (create || buffer_delay(bh_result)) {
-               pos = EXFAT_BLK_TO_B((iblock + 1), sb);
+       pos = EXFAT_BLK_TO_B((iblock + 1), sb);
+       if ((create && iblock >= last_block) || buffer_delay(bh_result)) {
                if (ei->i_size_ondisk < pos)
                        ei->i_size_ondisk = pos;
        }
 
+       map_bh(bh_result, sb, phys);
+       if (buffer_delay(bh_result))
+               clear_buffer_delay(bh_result);
+
        if (create) {
+               valid_blks = EXFAT_B_TO_BLK_ROUND_UP(ei->valid_size, sb);
+
+               if (iblock + max_blocks < valid_blks) {
+                       /* The range has been written, map it */
+                       goto done;
+               } else if (iblock < valid_blks) {
+                       /*
+                        * The range has been partially written,
+                        * map the written part.
+                        */
+                       max_blocks = valid_blks - iblock;
+                       goto done;
+               }
+
+               /* The area has not been written, map and mark as new. */
                err = exfat_map_new_buffer(ei, bh_result, pos);
                if (err) {
                        exfat_fs_error(sb,
@@ -324,11 +349,58 @@ static int exfat_get_block(struct inode *inode, sector_t iblock,
                                        pos, ei->i_size_aligned);
                        goto unlock_ret;
                }
-       }
 
-       if (buffer_delay(bh_result))
-               clear_buffer_delay(bh_result);
-       map_bh(bh_result, sb, phys);
+               ei->valid_size = EXFAT_BLK_TO_B(iblock + max_blocks, sb);
+               mark_inode_dirty(inode);
+       } else {
+               valid_blks = EXFAT_B_TO_BLK(ei->valid_size, sb);
+
+               if (iblock + max_blocks < valid_blks) {
+                       /* The range has been written, map it */
+                       goto done;
+               } else if (iblock < valid_blks) {
+                       /*
+                        * The area has been partially written,
+                        * map the written part.
+                        */
+                       max_blocks = valid_blks - iblock;
+                       goto done;
+               } else if (iblock == valid_blks &&
+                          (ei->valid_size & (sb->s_blocksize - 1))) {
+                       /*
+                        * The block has been partially written,
+                        * zero the unwritten part and map the block.
+                        */
+                       loff_t size, off;
+
+                       max_blocks = 1;
+
+                       /*
+                        * For direct read, the unwritten part will be zeroed in
+                        * exfat_direct_IO()
+                        */
+                       if (!bh_result->b_folio)
+                               goto done;
+
+                       pos -= sb->s_blocksize;
+                       size = ei->valid_size - pos;
+                       off = pos & (PAGE_SIZE - 1);
+
+                       folio_set_bh(bh_result, bh_result->b_folio, off);
+                       err = bh_read(bh_result, 0);
+                       if (err < 0)
+                               goto unlock_ret;
+
+                       folio_zero_segment(bh_result->b_folio, off + size,
+                                       off + sb->s_blocksize);
+               } else {
+                       /*
+                        * The range has not been written, clear the mapped flag
+                        * to only zero the cache and do not read from disk.
+                        */
+                       clear_buffer_mapped(bh_result);
+               }
+       }
 done:
        bh_result->b_size = EXFAT_BLK_TO_B(max_blocks, sb);
 unlock_ret:
@@ -343,6 +415,17 @@ static int exfat_read_folio(struct file *file, struct folio *folio)
 
 static void exfat_readahead(struct readahead_control *rac)
 {
+       struct address_space *mapping = rac->mapping;
+       struct inode *inode = mapping->host;
+       struct exfat_inode_info *ei = EXFAT_I(inode);
+       loff_t pos = readahead_pos(rac);
+
+       /* Range cross valid_size, read it page by page. */
+       if (ei->valid_size < i_size_read(inode) &&
+           pos <= ei->valid_size &&
+           ei->valid_size < pos + readahead_length(rac))
+               return;
+
        mpage_readahead(rac, exfat_get_block);
 }
 
@@ -370,9 +453,7 @@ static int exfat_write_begin(struct file *file, struct address_space *mapping,
        int ret;
 
        *pagep = NULL;
-       ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata,
-                              exfat_get_block,
-                              &EXFAT_I(mapping->host)->i_size_ondisk);
+       ret = block_write_begin(mapping, pos, len, pagep, exfat_get_block);
 
        if (ret < 0)
                exfat_write_failed(mapping, pos+len);
@@ -400,6 +481,11 @@ static int exfat_write_end(struct file *file, struct address_space *mapping,
        if (err < len)
                exfat_write_failed(mapping, pos+len);
 
+       if (!(err < 0) && pos + err > ei->valid_size) {
+               ei->valid_size = pos + err;
+               mark_inode_dirty(inode);
+       }
+
        if (!(err < 0) && !(ei->attr & EXFAT_ATTR_ARCHIVE)) {
                inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
                ei->attr |= EXFAT_ATTR_ARCHIVE;
@@ -413,7 +499,9 @@ static ssize_t exfat_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 {
        struct address_space *mapping = iocb->ki_filp->f_mapping;
        struct inode *inode = mapping->host;
-       loff_t size = iocb->ki_pos + iov_iter_count(iter);
+       struct exfat_inode_info *ei = EXFAT_I(inode);
+       loff_t pos = iocb->ki_pos;
+       loff_t size = pos + iov_iter_count(iter);
        int rw = iov_iter_rw(iter);
        ssize_t ret;
 
@@ -436,8 +524,20 @@ static ssize_t exfat_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
         * condition of exfat_get_block() and ->truncate().
         */
        ret = blockdev_direct_IO(iocb, inode, iter, exfat_get_block);
-       if (ret < 0 && (rw & WRITE))
-               exfat_write_failed(mapping, size);
+       if (ret < 0) {
+               if (rw == WRITE && ret != -EIOCBQUEUED)
+                       exfat_write_failed(mapping, size);
+
+               return ret;
+       } else
+               size = pos + ret;
+
+       /* zero the unwritten part in the partially written block */
+       if (rw == READ && pos < ei->valid_size && ei->valid_size < size) {
+               iov_iter_revert(iter, size - ei->valid_size);
+               iov_iter_zero(size - ei->valid_size, iter);
+       }
+
        return ret;
 }
 
@@ -537,6 +637,7 @@ static int exfat_fill_inode(struct inode *inode, struct exfat_dir_entry *info)
        ei->start_clu = info->start_clu;
        ei->flags = info->flags;
        ei->type = info->type;
+       ei->valid_size = info->valid_size;
 
        ei->version = 0;
        ei->hint_stat.eidx = 0;
index 5d737e0b639a14747ac6ba15f1cdd72163818c2a..9c549fd11fc847055a35f7a5872dc044b1576796 100644 (file)
@@ -406,6 +406,7 @@ static int exfat_find_empty_entry(struct inode *inode,
                i_size_write(inode, size);
                ei->i_size_ondisk += sbi->cluster_size;
                ei->i_size_aligned += sbi->cluster_size;
+               ei->valid_size += sbi->cluster_size;
                ei->flags = p_dir->flags;
                inode->i_blocks += sbi->cluster_size >> 9;
        }
@@ -558,6 +559,8 @@ static int exfat_add_entry(struct inode *inode, const char *path,
                info->size = clu_size;
                info->num_subdirs = EXFAT_MIN_SUBDIR;
        }
+       info->valid_size = info->size;
+
        memset(&info->crtime, 0, sizeof(info->crtime));
        memset(&info->mtime, 0, sizeof(info->mtime));
        memset(&info->atime, 0, sizeof(info->atime));
@@ -660,6 +663,8 @@ static int exfat_find(struct inode *dir, struct qstr *qname,
        info->type = exfat_get_entry_type(ep);
        info->attr = le16_to_cpu(ep->dentry.file.attr);
        info->size = le64_to_cpu(ep2->dentry.stream.valid_size);
+       info->valid_size = le64_to_cpu(ep2->dentry.stream.valid_size);
+       info->size = le64_to_cpu(ep2->dentry.stream.size);
        if (info->size == 0) {
                info->flags = ALLOC_NO_FAT_CHAIN;
                info->start_clu = EXFAT_EOF_CLUSTER;
@@ -1288,6 +1293,7 @@ static int __exfat_rename(struct inode *old_parent_inode,
                        }
 
                        i_size_write(new_inode, 0);
+                       new_ei->valid_size = 0;
                        new_ei->start_clu = EXFAT_EOF_CLUSTER;
                        new_ei->flags = ALLOC_NO_FAT_CHAIN;
                }
index a5d784872303ddb6731f2bf0f8579170809b36fd..023571f8dd1b43887b691c4dd61742d07ac1b356 100644 (file)
@@ -252,8 +252,10 @@ struct ext4_allocation_request {
 #define EXT4_MAP_MAPPED                BIT(BH_Mapped)
 #define EXT4_MAP_UNWRITTEN     BIT(BH_Unwritten)
 #define EXT4_MAP_BOUNDARY      BIT(BH_Boundary)
+#define EXT4_MAP_DELAYED       BIT(BH_Delay)
 #define EXT4_MAP_FLAGS         (EXT4_MAP_NEW | EXT4_MAP_MAPPED |\
-                                EXT4_MAP_UNWRITTEN | EXT4_MAP_BOUNDARY)
+                                EXT4_MAP_UNWRITTEN | EXT4_MAP_BOUNDARY |\
+                                EXT4_MAP_DELAYED)
 
 struct ext4_map_blocks {
        ext4_fsblk_t m_pblk;
@@ -2912,10 +2914,10 @@ extern const struct seq_operations ext4_mb_seq_groups_ops;
 extern const struct seq_operations ext4_mb_seq_structs_summary_ops;
 extern int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset);
 extern int ext4_mb_init(struct super_block *);
-extern int ext4_mb_release(struct super_block *);
+extern void ext4_mb_release(struct super_block *);
 extern ext4_fsblk_t ext4_mb_new_blocks(handle_t *,
                                struct ext4_allocation_request *, int *);
-extern void ext4_discard_preallocations(struct inode *, unsigned int);
+extern void ext4_discard_preallocations(struct inode *);
 extern int __init ext4_init_mballoc(void);
 extern void ext4_exit_mballoc(void);
 extern ext4_group_t ext4_mb_prefetch(struct super_block *sb,
index 01299b55a567aa41fe7147c3d625e5087b11a0fc..7669d154c05e0c1c86c725c2bab490753317d632 100644 (file)
@@ -100,7 +100,7 @@ static int ext4_ext_trunc_restart_fn(struct inode *inode, int *dropped)
         * i_rwsem. So we can safely drop the i_data_sem here.
         */
        BUG_ON(EXT4_JOURNAL(inode) == NULL);
-       ext4_discard_preallocations(inode, 0);
+       ext4_discard_preallocations(inode);
        up_write(&EXT4_I(inode)->i_data_sem);
        *dropped = 1;
        return 0;
@@ -2229,7 +2229,7 @@ static int ext4_fill_es_cache_info(struct inode *inode,
 
 
 /*
- * ext4_ext_determine_hole - determine hole around given block
+ * ext4_ext_find_hole - find hole around given block according to the given path
  * @inode:     inode we lookup in
  * @path:      path in extent tree to @lblk
  * @lblk:      pointer to logical block around which we want to determine hole
@@ -2241,9 +2241,9 @@ static int ext4_fill_es_cache_info(struct inode *inode,
  * The function returns the length of a hole starting at @lblk. We update @lblk
  * to the beginning of the hole if we managed to find it.
  */
-static ext4_lblk_t ext4_ext_determine_hole(struct inode *inode,
-                                          struct ext4_ext_path *path,
-                                          ext4_lblk_t *lblk)
+static ext4_lblk_t ext4_ext_find_hole(struct inode *inode,
+                                     struct ext4_ext_path *path,
+                                     ext4_lblk_t *lblk)
 {
        int depth = ext_depth(inode);
        struct ext4_extent *ex;
@@ -2270,30 +2270,6 @@ static ext4_lblk_t ext4_ext_determine_hole(struct inode *inode,
        return len;
 }
 
-/*
- * ext4_ext_put_gap_in_cache:
- * calculate boundaries of the gap that the requested block fits into
- * and cache this gap
- */
-static void
-ext4_ext_put_gap_in_cache(struct inode *inode, ext4_lblk_t hole_start,
-                         ext4_lblk_t hole_len)
-{
-       struct extent_status es;
-
-       ext4_es_find_extent_range(inode, &ext4_es_is_delayed, hole_start,
-                                 hole_start + hole_len - 1, &es);
-       if (es.es_len) {
-               /* There's delayed extent containing lblock? */
-               if (es.es_lblk <= hole_start)
-                       return;
-               hole_len = min(es.es_lblk - hole_start, hole_len);
-       }
-       ext_debug(inode, " -> %u:%u\n", hole_start, hole_len);
-       ext4_es_insert_extent(inode, hole_start, hole_len, ~0,
-                             EXTENT_STATUS_HOLE);
-}
-
 /*
  * ext4_ext_rm_idx:
  * removes index from the index block.
@@ -4062,6 +4038,72 @@ static int get_implied_cluster_alloc(struct super_block *sb,
        return 0;
 }
 
+/*
+ * Determine hole length around the given logical block, first try to
+ * locate and expand the hole from the given @path, and then adjust it
+ * if it's partially or completely converted to delayed extents, insert
+ * it into the extent cache tree if it's indeed a hole, finally return
+ * the length of the determined extent.
+ */
+static ext4_lblk_t ext4_ext_determine_insert_hole(struct inode *inode,
+                                                 struct ext4_ext_path *path,
+                                                 ext4_lblk_t lblk)
+{
+       ext4_lblk_t hole_start, len;
+       struct extent_status es;
+
+       hole_start = lblk;
+       len = ext4_ext_find_hole(inode, path, &hole_start);
+again:
+       ext4_es_find_extent_range(inode, &ext4_es_is_delayed, hole_start,
+                                 hole_start + len - 1, &es);
+       if (!es.es_len)
+               goto insert_hole;
+
+       /*
+        * There's a delalloc extent in the hole, handle it if the delalloc
+        * extent is in front of, behind and straddle the queried range.
+        */
+       if (lblk >= es.es_lblk + es.es_len) {
+               /*
+                * The delalloc extent is in front of the queried range,
+                * find again from the queried start block.
+                */
+               len -= lblk - hole_start;
+               hole_start = lblk;
+               goto again;
+       } else if (in_range(lblk, es.es_lblk, es.es_len)) {
+               /*
+                * The delalloc extent containing lblk, it must have been
+                * added after ext4_map_blocks() checked the extent status
+                * tree so we are not holding i_rwsem and delalloc info is
+                * only stabilized by i_data_sem we are going to release
+                * soon. Don't modify the extent status tree and report
+                * extent as a hole, just adjust the length to the delalloc
+                * extent's after lblk.
+                */
+               len = es.es_lblk + es.es_len - lblk;
+               return len;
+       } else {
+               /*
+                * The delalloc extent is partially or completely behind
+                * the queried range, update hole length until the
+                * beginning of the delalloc extent.
+                */
+               len = min(es.es_lblk - hole_start, len);
+       }
+
+insert_hole:
+       /* Put just found gap into cache to speed up subsequent requests */
+       ext_debug(inode, " -> %u:%u\n", hole_start, len);
+       ext4_es_insert_extent(inode, hole_start, len, ~0, EXTENT_STATUS_HOLE);
+
+       /* Update hole_len to reflect hole size after lblk */
+       if (hole_start != lblk)
+               len -= lblk - hole_start;
+
+       return len;
+}
 
 /*
  * Block allocation/map/preallocation routine for extents based files
@@ -4179,22 +4221,12 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
         * we couldn't try to create block if create flag is zero
         */
        if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
-               ext4_lblk_t hole_start, hole_len;
+               ext4_lblk_t len;
 
-               hole_start = map->m_lblk;
-               hole_len = ext4_ext_determine_hole(inode, path, &hole_start);
-               /*
-                * put just found gap into cache to speed up
-                * subsequent requests
-                */
-               ext4_ext_put_gap_in_cache(inode, hole_start, hole_len);
+               len = ext4_ext_determine_insert_hole(inode, path, map->m_lblk);
 
-               /* Update hole_len to reflect hole size after map->m_lblk */
-               if (hole_start != map->m_lblk)
-                       hole_len -= map->m_lblk - hole_start;
                map->m_pblk = 0;
-               map->m_len = min_t(unsigned int, map->m_len, hole_len);
-
+               map->m_len = min_t(unsigned int, map->m_len, len);
                goto out;
        }
 
@@ -4313,7 +4345,7 @@ got_allocated_blocks:
                         * not a good idea to call discard here directly,
                         * but otherwise we'd need to call it every free().
                         */
-                       ext4_discard_preallocations(inode, 0);
+                       ext4_discard_preallocations(inode);
                        if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
                                fb_flags = EXT4_FREE_BLOCKS_NO_QUOT_UPDATE;
                        ext4_free_blocks(handle, inode, NULL, newblock,
@@ -5357,7 +5389,7 @@ static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
        ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle);
 
        down_write(&EXT4_I(inode)->i_data_sem);
-       ext4_discard_preallocations(inode, 0);
+       ext4_discard_preallocations(inode);
        ext4_es_remove_extent(inode, punch_start, EXT_MAX_BLOCKS - punch_start);
 
        ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1);
@@ -5365,7 +5397,7 @@ static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
                up_write(&EXT4_I(inode)->i_data_sem);
                goto out_stop;
        }
-       ext4_discard_preallocations(inode, 0);
+       ext4_discard_preallocations(inode);
 
        ret = ext4_ext_shift_extents(inode, handle, punch_stop,
                                     punch_stop - punch_start, SHIFT_LEFT);
@@ -5497,7 +5529,7 @@ static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
                goto out_stop;
 
        down_write(&EXT4_I(inode)->i_data_sem);
-       ext4_discard_preallocations(inode, 0);
+       ext4_discard_preallocations(inode);
 
        path = ext4_find_extent(inode, offset_lblk, NULL, 0);
        if (IS_ERR(path)) {
index 6aa15dafc67786559d3b68ebfefd8f90e119b3fc..54d6ff22585cf1835e8aced5548dbac7c1b89757 100644 (file)
@@ -174,7 +174,7 @@ static int ext4_release_file(struct inode *inode, struct file *filp)
                        (atomic_read(&inode->i_writecount) == 1) &&
                        !EXT4_I(inode)->i_reserved_data_blocks) {
                down_write(&EXT4_I(inode)->i_data_sem);
-               ext4_discard_preallocations(inode, 0);
+               ext4_discard_preallocations(inode);
                up_write(&EXT4_I(inode)->i_data_sem);
        }
        if (is_dx(inode) && filp->private_data)
index a9f3716119d37249de9cd1c12f02abf5c8db08cb..d8ca7f64f9523412a264dc5e266a72a7e6027e04 100644 (file)
@@ -714,7 +714,7 @@ static int ext4_ind_trunc_restart_fn(handle_t *handle, struct inode *inode,
         * i_rwsem. So we can safely drop the i_data_sem here.
         */
        BUG_ON(EXT4_JOURNAL(inode) == NULL);
-       ext4_discard_preallocations(inode, 0);
+       ext4_discard_preallocations(inode);
        up_write(&EXT4_I(inode)->i_data_sem);
        *dropped = 1;
        return 0;
index 5af1b0b8680e9fa5f34f94f4a49127b554691f00..2ccf3b5e3a7c4dcb1b0c6a9d27a3c8a77a145730 100644 (file)
@@ -371,7 +371,7 @@ void ext4_da_update_reserve_space(struct inode *inode,
         */
        if ((ei->i_reserved_data_blocks == 0) &&
            !inode_is_open_for_write(inode))
-               ext4_discard_preallocations(inode, 0);
+               ext4_discard_preallocations(inode);
 }
 
 static int __check_block_validity(struct inode *inode, const char *func,
@@ -515,6 +515,8 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
                        map->m_len = retval;
                } else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) {
                        map->m_pblk = 0;
+                       map->m_flags |= ext4_es_is_delayed(&es) ?
+                                       EXT4_MAP_DELAYED : 0;
                        retval = es.es_len - (map->m_lblk - es.es_lblk);
                        if (retval > map->m_len)
                                retval = map->m_len;
@@ -1703,11 +1705,8 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
 
        /* Lookup extent status tree firstly */
        if (ext4_es_lookup_extent(inode, iblock, NULL, &es)) {
-               if (ext4_es_is_hole(&es)) {
-                       retval = 0;
-                       down_read(&EXT4_I(inode)->i_data_sem);
+               if (ext4_es_is_hole(&es))
                        goto add_delayed;
-               }
 
                /*
                 * Delayed extent could be allocated by fallocate.
@@ -1749,26 +1748,11 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
                retval = ext4_ext_map_blocks(NULL, inode, map, 0);
        else
                retval = ext4_ind_map_blocks(NULL, inode, map, 0);
-
-add_delayed:
-       if (retval == 0) {
-               int ret;
-
-               /*
-                * XXX: __block_prepare_write() unmaps passed block,
-                * is it OK?
-                */
-
-               ret = ext4_insert_delayed_block(inode, map->m_lblk);
-               if (ret != 0) {
-                       retval = ret;
-                       goto out_unlock;
-               }
-
-               map_bh(bh, inode->i_sb, invalid_block);
-               set_buffer_new(bh);
-               set_buffer_delay(bh);
-       } else if (retval > 0) {
+       if (retval < 0) {
+               up_read(&EXT4_I(inode)->i_data_sem);
+               return retval;
+       }
+       if (retval > 0) {
                unsigned int status;
 
                if (unlikely(retval != map->m_len)) {
@@ -1783,11 +1767,21 @@ add_delayed:
                                EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
                ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
                                      map->m_pblk, status);
+               up_read(&EXT4_I(inode)->i_data_sem);
+               return retval;
        }
+       up_read(&EXT4_I(inode)->i_data_sem);
 
-out_unlock:
-       up_read((&EXT4_I(inode)->i_data_sem));
+add_delayed:
+       down_write(&EXT4_I(inode)->i_data_sem);
+       retval = ext4_insert_delayed_block(inode, map->m_lblk);
+       up_write(&EXT4_I(inode)->i_data_sem);
+       if (retval)
+               return retval;
 
+       map_bh(bh, inode->i_sb, invalid_block);
+       set_buffer_new(bh);
+       set_buffer_delay(bh);
        return retval;
 }
 
@@ -3268,6 +3262,9 @@ static void ext4_set_iomap(struct inode *inode, struct iomap *iomap,
                iomap->addr = (u64) map->m_pblk << blkbits;
                if (flags & IOMAP_DAX)
                        iomap->addr += EXT4_SB(inode->i_sb)->s_dax_part_off;
+       } else if (map->m_flags & EXT4_MAP_DELAYED) {
+               iomap->type = IOMAP_DELALLOC;
+               iomap->addr = IOMAP_NULL_ADDR;
        } else {
                iomap->type = IOMAP_HOLE;
                iomap->addr = IOMAP_NULL_ADDR;
@@ -3430,35 +3427,11 @@ const struct iomap_ops ext4_iomap_overwrite_ops = {
        .iomap_end              = ext4_iomap_end,
 };
 
-static bool ext4_iomap_is_delalloc(struct inode *inode,
-                                  struct ext4_map_blocks *map)
-{
-       struct extent_status es;
-       ext4_lblk_t offset = 0, end = map->m_lblk + map->m_len - 1;
-
-       ext4_es_find_extent_range(inode, &ext4_es_is_delayed,
-                                 map->m_lblk, end, &es);
-
-       if (!es.es_len || es.es_lblk > end)
-               return false;
-
-       if (es.es_lblk > map->m_lblk) {
-               map->m_len = es.es_lblk - map->m_lblk;
-               return false;
-       }
-
-       offset = map->m_lblk - es.es_lblk;
-       map->m_len = es.es_len - offset;
-
-       return true;
-}
-
 static int ext4_iomap_begin_report(struct inode *inode, loff_t offset,
                                   loff_t length, unsigned int flags,
                                   struct iomap *iomap, struct iomap *srcmap)
 {
        int ret;
-       bool delalloc = false;
        struct ext4_map_blocks map;
        u8 blkbits = inode->i_blkbits;
 
@@ -3499,13 +3472,8 @@ static int ext4_iomap_begin_report(struct inode *inode, loff_t offset,
        ret = ext4_map_blocks(NULL, inode, &map, 0);
        if (ret < 0)
                return ret;
-       if (ret == 0)
-               delalloc = ext4_iomap_is_delalloc(inode, &map);
-
 set_iomap:
        ext4_set_iomap(inode, iomap, &map, offset, length, flags);
-       if (delalloc && iomap->type == IOMAP_HOLE)
-               iomap->type = IOMAP_DELALLOC;
 
        return 0;
 }
@@ -4015,12 +3983,12 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
 
        /* If there are blocks to remove, do it */
        if (stop_block > first_block) {
+               ext4_lblk_t hole_len = stop_block - first_block;
 
                down_write(&EXT4_I(inode)->i_data_sem);
-               ext4_discard_preallocations(inode, 0);
+               ext4_discard_preallocations(inode);
 
-               ext4_es_remove_extent(inode, first_block,
-                                     stop_block - first_block);
+               ext4_es_remove_extent(inode, first_block, hole_len);
 
                if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
                        ret = ext4_ext_remove_space(inode, first_block,
@@ -4029,6 +3997,8 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
                        ret = ext4_ind_remove_space(handle, inode, first_block,
                                                    stop_block);
 
+               ext4_es_insert_extent(inode, first_block, hole_len, ~0,
+                                     EXTENT_STATUS_HOLE);
                up_write(&EXT4_I(inode)->i_data_sem);
        }
        ext4_fc_track_range(handle, inode, first_block, stop_block);
@@ -4170,7 +4140,7 @@ int ext4_truncate(struct inode *inode)
 
        down_write(&EXT4_I(inode)->i_data_sem);
 
-       ext4_discard_preallocations(inode, 0);
+       ext4_discard_preallocations(inode);
 
        if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
                err = ext4_ext_truncate(handle, inode);
index aa6be510eb8f578f09faf937a3debaabb9c3b499..7160a71044c88a8fe409111ec51cd597408f98f4 100644 (file)
@@ -467,7 +467,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
        ext4_reset_inode_seed(inode);
        ext4_reset_inode_seed(inode_bl);
 
-       ext4_discard_preallocations(inode, 0);
+       ext4_discard_preallocations(inode);
 
        err = ext4_mark_inode_dirty(handle, inode);
        if (err < 0) {
index f44f668e407f2bda9fe325631c9a4ab62649b9dc..e4f7cf9d89c45a881d6c403fd50fcc499db0b708 100644 (file)
@@ -564,14 +564,14 @@ static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
 
                        blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
                        blocknr += EXT4_C2B(EXT4_SB(sb), first + i);
+                       ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
+                                       EXT4_GROUP_INFO_BBITMAP_CORRUPT);
                        ext4_grp_locked_error(sb, e4b->bd_group,
                                              inode ? inode->i_ino : 0,
                                              blocknr,
                                              "freeing block already freed "
                                              "(bit %u)",
                                              first + i);
-                       ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
-                                       EXT4_GROUP_INFO_BBITMAP_CORRUPT);
                }
                mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
        }
@@ -677,7 +677,7 @@ do {                                                                        \
        }                                                               \
 } while (0)
 
-static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
+static void __mb_check_buddy(struct ext4_buddy *e4b, char *file,
                                const char *function, int line)
 {
        struct super_block *sb = e4b->bd_sb;
@@ -696,7 +696,7 @@ static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
        void *buddy2;
 
        if (e4b->bd_info->bb_check_counter++ % 10)
-               return 0;
+               return;
 
        while (order > 1) {
                buddy = mb_find_buddy(e4b, order, &max);
@@ -758,7 +758,7 @@ static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
 
        grp = ext4_get_group_info(sb, e4b->bd_group);
        if (!grp)
-               return NULL;
+               return;
        list_for_each(cur, &grp->bb_prealloc_list) {
                ext4_group_t groupnr;
                struct ext4_prealloc_space *pa;
@@ -768,7 +768,6 @@ static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
                for (i = 0; i < pa->pa_len; i++)
                        MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
        }
-       return 0;
 }
 #undef MB_CHECK_ASSERT
 #define mb_check_buddy(e4b) __mb_check_buddy(e4b,      \
@@ -842,7 +841,7 @@ mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp)
        struct ext4_sb_info *sbi = EXT4_SB(sb);
        int new_order;
 
-       if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_free == 0)
+       if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_fragments == 0)
                return;
 
        new_order = mb_avg_fragment_size_order(sb,
@@ -871,7 +870,7 @@ mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp)
  * cr level needs an update.
  */
 static void ext4_mb_choose_next_group_p2_aligned(struct ext4_allocation_context *ac,
-                       enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups)
+                       enum criteria *new_cr, ext4_group_t *group)
 {
        struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
        struct ext4_group_info *iter;
@@ -945,7 +944,7 @@ ext4_mb_find_good_group_avg_frag_lists(struct ext4_allocation_context *ac, int o
  * order. Updates *new_cr if cr level needs an update.
  */
 static void ext4_mb_choose_next_group_goal_fast(struct ext4_allocation_context *ac,
-               enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups)
+               enum criteria *new_cr, ext4_group_t *group)
 {
        struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
        struct ext4_group_info *grp = NULL;
@@ -990,7 +989,7 @@ static void ext4_mb_choose_next_group_goal_fast(struct ext4_allocation_context *
  * much and fall to CR_GOAL_LEN_SLOW in that case.
  */
 static void ext4_mb_choose_next_group_best_avail(struct ext4_allocation_context *ac,
-               enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups)
+               enum criteria *new_cr, ext4_group_t *group)
 {
        struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
        struct ext4_group_info *grp = NULL;
@@ -1125,11 +1124,11 @@ static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac,
        }
 
        if (*new_cr == CR_POWER2_ALIGNED) {
-               ext4_mb_choose_next_group_p2_aligned(ac, new_cr, group, ngroups);
+               ext4_mb_choose_next_group_p2_aligned(ac, new_cr, group);
        } else if (*new_cr == CR_GOAL_LEN_FAST) {
-               ext4_mb_choose_next_group_goal_fast(ac, new_cr, group, ngroups);
+               ext4_mb_choose_next_group_goal_fast(ac, new_cr, group);
        } else if (*new_cr == CR_BEST_AVAIL_LEN) {
-               ext4_mb_choose_next_group_best_avail(ac, new_cr, group, ngroups);
+               ext4_mb_choose_next_group_best_avail(ac, new_cr, group);
        } else {
                /*
                 * TODO: For CR=2, we can arrange groups in an rb tree sorted by
@@ -1233,6 +1232,24 @@ void ext4_mb_generate_buddy(struct super_block *sb,
        atomic64_add(period, &sbi->s_mb_generation_time);
 }
 
+static void mb_regenerate_buddy(struct ext4_buddy *e4b)
+{
+       int count;
+       int order = 1;
+       void *buddy;
+
+       while ((buddy = mb_find_buddy(e4b, order++, &count)))
+               mb_set_bits(buddy, 0, count);
+
+       e4b->bd_info->bb_fragments = 0;
+       memset(e4b->bd_info->bb_counters, 0,
+               sizeof(*e4b->bd_info->bb_counters) *
+               (e4b->bd_sb->s_blocksize_bits + 2));
+
+       ext4_mb_generate_buddy(e4b->bd_sb, e4b->bd_buddy,
+               e4b->bd_bitmap, e4b->bd_group, e4b->bd_info);
+}
+
 /* The buddy information is attached the buddy cache inode
  * for convenience. The information regarding each group
  * is loaded via ext4_mb_load_buddy. The information involve
@@ -1891,11 +1908,6 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
        mb_check_buddy(e4b);
        mb_free_blocks_double(inode, e4b, first, count);
 
-       this_cpu_inc(discard_pa_seq);
-       e4b->bd_info->bb_free += count;
-       if (first < e4b->bd_info->bb_first_free)
-               e4b->bd_info->bb_first_free = first;
-
        /* access memory sequentially: check left neighbour,
         * clear range and then check right neighbour
         */
@@ -1909,21 +1921,31 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
                struct ext4_sb_info *sbi = EXT4_SB(sb);
                ext4_fsblk_t blocknr;
 
+               /*
+                * Fastcommit replay can free already freed blocks which
+                * corrupts allocation info. Regenerate it.
+                */
+               if (sbi->s_mount_state & EXT4_FC_REPLAY) {
+                       mb_regenerate_buddy(e4b);
+                       goto check;
+               }
+
                blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
                blocknr += EXT4_C2B(sbi, block);
-               if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
-                       ext4_grp_locked_error(sb, e4b->bd_group,
-                                             inode ? inode->i_ino : 0,
-                                             blocknr,
-                                             "freeing already freed block (bit %u); block bitmap corrupt.",
-                                             block);
-                       ext4_mark_group_bitmap_corrupted(
-                               sb, e4b->bd_group,
+               ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
                                EXT4_GROUP_INFO_BBITMAP_CORRUPT);
-               }
-               goto done;
+               ext4_grp_locked_error(sb, e4b->bd_group,
+                                     inode ? inode->i_ino : 0, blocknr,
+                                     "freeing already freed block (bit %u); block bitmap corrupt.",
+                                     block);
+               return;
        }
 
+       this_cpu_inc(discard_pa_seq);
+       e4b->bd_info->bb_free += count;
+       if (first < e4b->bd_info->bb_first_free)
+               e4b->bd_info->bb_first_free = first;
+
        /* let's maintain fragments counter */
        if (left_is_free && right_is_free)
                e4b->bd_info->bb_fragments--;
@@ -1948,9 +1970,9 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
        if (first <= last)
                mb_buddy_mark_free(e4b, first >> 1, last >> 1);
 
-done:
        mb_set_largest_free_order(sb, e4b->bd_info);
        mb_update_avg_fragment_size(sb, e4b->bd_info);
+check:
        mb_check_buddy(e4b);
 }
 
@@ -2276,6 +2298,9 @@ void ext4_mb_try_best_found(struct ext4_allocation_context *ac,
                return;
 
        ext4_lock_group(ac->ac_sb, group);
+       if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
+               goto out;
+
        max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex);
 
        if (max > 0) {
@@ -2283,6 +2308,7 @@ void ext4_mb_try_best_found(struct ext4_allocation_context *ac,
                ext4_mb_use_best_found(ac, e4b);
        }
 
+out:
        ext4_unlock_group(ac->ac_sb, group);
        ext4_mb_unload_buddy(e4b);
 }
@@ -2309,12 +2335,10 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
        if (err)
                return err;
 
-       if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) {
-               ext4_mb_unload_buddy(e4b);
-               return 0;
-       }
-
        ext4_lock_group(ac->ac_sb, group);
+       if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
+               goto out;
+
        max = mb_find_extent(e4b, ac->ac_g_ex.fe_start,
                             ac->ac_g_ex.fe_len, &ex);
        ex.fe_logical = 0xDEADFA11; /* debug value */
@@ -2347,6 +2371,7 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
                ac->ac_b_ex = ex;
                ext4_mb_use_best_found(ac, e4b);
        }
+out:
        ext4_unlock_group(ac->ac_sb, group);
        ext4_mb_unload_buddy(e4b);
 
@@ -2380,12 +2405,12 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
 
                k = mb_find_next_zero_bit(buddy, max, 0);
                if (k >= max) {
+                       ext4_mark_group_bitmap_corrupted(ac->ac_sb,
+                                       e4b->bd_group,
+                                       EXT4_GROUP_INFO_BBITMAP_CORRUPT);
                        ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0,
                                "%d free clusters of order %d. But found 0",
                                grp->bb_counters[i], i);
-                       ext4_mark_group_bitmap_corrupted(ac->ac_sb,
-                                        e4b->bd_group,
-                                       EXT4_GROUP_INFO_BBITMAP_CORRUPT);
                        break;
                }
                ac->ac_found++;
@@ -2436,12 +2461,12 @@ void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
                         * free blocks even though group info says we
                         * have free blocks
                         */
+                       ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
+                                       EXT4_GROUP_INFO_BBITMAP_CORRUPT);
                        ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
                                        "%d free clusters as per "
                                        "group info. But bitmap says 0",
                                        free);
-                       ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
-                                       EXT4_GROUP_INFO_BBITMAP_CORRUPT);
                        break;
                }
 
@@ -2467,12 +2492,12 @@ void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
                if (WARN_ON(ex.fe_len <= 0))
                        break;
                if (free < ex.fe_len) {
+                       ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
+                                       EXT4_GROUP_INFO_BBITMAP_CORRUPT);
                        ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
                                        "%d free clusters as per "
                                        "group info. But got %d blocks",
                                        free, ex.fe_len);
-                       ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
-                                       EXT4_GROUP_INFO_BBITMAP_CORRUPT);
                        /*
                         * The number of free blocks differs. This mostly
                         * indicate that the bitmap is corrupt. So exit
@@ -3725,7 +3750,7 @@ static int ext4_mb_cleanup_pa(struct ext4_group_info *grp)
        return count;
 }
 
-int ext4_mb_release(struct super_block *sb)
+void ext4_mb_release(struct super_block *sb)
 {
        ext4_group_t ngroups = ext4_get_groups_count(sb);
        ext4_group_t i;
@@ -3801,8 +3826,6 @@ int ext4_mb_release(struct super_block *sb)
        }
 
        free_percpu(sbi->s_locality_groups);
-
-       return 0;
 }
 
 static inline int ext4_issue_discard(struct super_block *sb,
@@ -5284,7 +5307,7 @@ static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
  * the caller MUST hold group/inode locks.
  * TODO: optimize the case when there are no in-core structures yet
  */
-static noinline_for_stack int
+static noinline_for_stack void
 ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
                        struct ext4_prealloc_space *pa)
 {
@@ -5334,11 +5357,9 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
                 */
        }
        atomic_add(free, &sbi->s_mb_discarded);
-
-       return 0;
 }
 
-static noinline_for_stack int
+static noinline_for_stack void
 ext4_mb_release_group_pa(struct ext4_buddy *e4b,
                                struct ext4_prealloc_space *pa)
 {
@@ -5352,13 +5373,11 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
        if (unlikely(group != e4b->bd_group && pa->pa_len != 0)) {
                ext4_warning(sb, "bad group: expected %u, group %u, pa_start %llu",
                             e4b->bd_group, group, pa->pa_pstart);
-               return 0;
+               return;
        }
        mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
        atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
        trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
-
-       return 0;
 }
 
 /*
@@ -5479,7 +5498,7 @@ out_dbg:
  *
  * FIXME!! Make sure it is valid at all the call sites
  */
-void ext4_discard_preallocations(struct inode *inode, unsigned int needed)
+void ext4_discard_preallocations(struct inode *inode)
 {
        struct ext4_inode_info *ei = EXT4_I(inode);
        struct super_block *sb = inode->i_sb;
@@ -5491,9 +5510,8 @@ void ext4_discard_preallocations(struct inode *inode, unsigned int needed)
        struct rb_node *iter;
        int err;
 
-       if (!S_ISREG(inode->i_mode)) {
+       if (!S_ISREG(inode->i_mode))
                return;
-       }
 
        if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY)
                return;
@@ -5501,15 +5519,12 @@ void ext4_discard_preallocations(struct inode *inode, unsigned int needed)
        mb_debug(sb, "discard preallocation for inode %lu\n",
                 inode->i_ino);
        trace_ext4_discard_preallocations(inode,
-                       atomic_read(&ei->i_prealloc_active), needed);
-
-       if (needed == 0)
-               needed = UINT_MAX;
+                       atomic_read(&ei->i_prealloc_active));
 
 repeat:
        /* first, collect all pa's in the inode */
        write_lock(&ei->i_prealloc_lock);
-       for (iter = rb_first(&ei->i_prealloc_node); iter && needed;
+       for (iter = rb_first(&ei->i_prealloc_node); iter;
             iter = rb_next(iter)) {
                pa = rb_entry(iter, struct ext4_prealloc_space,
                              pa_node.inode_node);
@@ -5533,7 +5548,6 @@ repeat:
                        spin_unlock(&pa->pa_lock);
                        rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node);
                        list_add(&pa->u.pa_tmp_list, &list);
-                       needed--;
                        continue;
                }
 
@@ -5943,7 +5957,7 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
 /*
  * release all resource we used in allocation
  */
-static int ext4_mb_release_context(struct ext4_allocation_context *ac)
+static void ext4_mb_release_context(struct ext4_allocation_context *ac)
 {
        struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
        struct ext4_prealloc_space *pa = ac->ac_pa;
@@ -5980,7 +5994,6 @@ static int ext4_mb_release_context(struct ext4_allocation_context *ac)
        if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
                mutex_unlock(&ac->ac_lg->lg_mutex);
        ext4_mb_collect_stats(ac);
-       return 0;
 }
 
 static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
@@ -6761,6 +6774,9 @@ __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
        bool set_trimmed = false;
        void *bitmap;
 
+       if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
+               return 0;
+
        last = ext4_last_grp_cluster(sb, e4b->bd_group);
        bitmap = e4b->bd_bitmap;
        if (start == 0 && max >= last)
index d7aeb5da7d86768c10efdc7ef38b4a6cee38ca78..56938532b4ce258e210178d2406e187eec5ef8cc 100644 (file)
@@ -192,7 +192,6 @@ struct ext4_allocation_context {
         */
        ext4_grpblk_t   ac_orig_goal_len;
 
-       __u32 ac_groups_considered;
        __u32 ac_flags;         /* allocation hints */
        __u16 ac_groups_scanned;
        __u16 ac_groups_linear_remaining;
index 3aa57376d9c2ecbba3d272b572bf56b924b33103..7cd4afa4de1d3127a34ec02f166e90876ad5c6e4 100644 (file)
@@ -618,6 +618,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
                goto out;
        o_end = o_start + len;
 
+       *moved_len = 0;
        while (o_start < o_end) {
                struct ext4_extent *ex;
                ext4_lblk_t cur_blk, next_blk;
@@ -672,7 +673,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
                 */
                ext4_double_up_write_data_sem(orig_inode, donor_inode);
                /* Swap original branches with new branches */
-               move_extent_per_page(o_filp, donor_inode,
+               *moved_len += move_extent_per_page(o_filp, donor_inode,
                                     orig_page_index, donor_page_index,
                                     offset_in_page, cur_len,
                                     unwritten, &ret);
@@ -682,14 +683,11 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
                o_start += cur_len;
                d_start += cur_len;
        }
-       *moved_len = o_start - orig_blk;
-       if (*moved_len > len)
-               *moved_len = len;
 
 out:
        if (*moved_len) {
-               ext4_discard_preallocations(orig_inode, 0);
-               ext4_discard_preallocations(donor_inode, 0);
+               ext4_discard_preallocations(orig_inode);
+               ext4_discard_preallocations(donor_inode);
        }
 
        ext4_free_ext_path(path);
index dcba0f85dfe245ab83598d5a451783b02044be52..0f931d0c227daa8b00950667d8b8bb42a7a28a48 100644 (file)
@@ -1525,7 +1525,7 @@ void ext4_clear_inode(struct inode *inode)
        ext4_fc_del(inode);
        invalidate_inode_buffers(inode);
        clear_inode(inode);
-       ext4_discard_preallocations(inode, 0);
+       ext4_discard_preallocations(inode);
        ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
        dquot_drop(inode);
        if (EXT4_I(inode)->jinode) {
index d00d21a8b53adf8defa18deb55d438e3dd855a1a..d45ab0992ae5947e6f89628e8e8829c548645d26 100644 (file)
@@ -4880,6 +4880,7 @@ free_sbi:
        if (sbi->s_chksum_driver)
                crypto_free_shash(sbi->s_chksum_driver);
        kfree(sbi);
+       sb->s_fs_info = NULL;
 
        /* give only one another chance */
        if (retry_cnt > 0 && skip_recovery) {
index 1767493dffda73b77fe3c394967c003a426fb13d..3d84fcc471c6000e38625e2652121282e4bec3c0 100644 (file)
@@ -1675,11 +1675,11 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
 
        if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
                inode->i_state |= I_DIRTY_PAGES;
-       else if (unlikely(inode->i_state & I_PINNING_FSCACHE_WB)) {
+       else if (unlikely(inode->i_state & I_PINNING_NETFS_WB)) {
                if (!(inode->i_state & I_DIRTY_PAGES)) {
-                       inode->i_state &= ~I_PINNING_FSCACHE_WB;
-                       wbc->unpinned_fscache_wb = true;
-                       dirty |= I_PINNING_FSCACHE_WB; /* Cause write_inode */
+                       inode->i_state &= ~I_PINNING_NETFS_WB;
+                       wbc->unpinned_netfs_wb = true;
+                       dirty |= I_PINNING_NETFS_WB; /* Cause write_inode */
                }
        }
 
@@ -1691,7 +1691,7 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
                if (ret == 0)
                        ret = err;
        }
-       wbc->unpinned_fscache_wb = false;
+       wbc->unpinned_netfs_wb = false;
        trace_writeback_single_inode(inode, wbc, nr_to_write);
        return ret;
 }
diff --git a/fs/fscache/Kconfig b/fs/fscache/Kconfig
deleted file mode 100644 (file)
index b313a97..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-
-config FSCACHE
-       tristate "General filesystem local caching manager"
-       select NETFS_SUPPORT
-       help
-         This option enables a generic filesystem caching manager that can be
-         used by various network and other filesystems to cache data locally.
-         Different sorts of caches can be plugged in, depending on the
-         resources available.
-
-         See Documentation/filesystems/caching/fscache.rst for more information.
-
-config FSCACHE_STATS
-       bool "Gather statistical information on local caching"
-       depends on FSCACHE && PROC_FS
-       select NETFS_STATS
-       help
-         This option causes statistical information to be gathered on local
-         caching and exported through file:
-
-               /proc/fs/fscache/stats
-
-         The gathering of statistics adds a certain amount of overhead to
-         execution as there are a quite a few stats gathered, and on a
-         multi-CPU system these may be on cachelines that keep bouncing
-         between CPUs.  On the other hand, the stats are very useful for
-         debugging purposes.  Saying 'Y' here is recommended.
-
-         See Documentation/filesystems/caching/fscache.rst for more information.
-
-config FSCACHE_DEBUG
-       bool "Debug FS-Cache"
-       depends on FSCACHE
-       help
-         This permits debugging to be dynamically enabled in the local caching
-         management module.  If this is set, the debugging output may be
-         enabled by setting bits in /sys/modules/fscache/parameter/debug.
-
-         See Documentation/filesystems/caching/fscache.rst for more information.
diff --git a/fs/fscache/Makefile b/fs/fscache/Makefile
deleted file mode 100644 (file)
index afb090e..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for general filesystem caching code
-#
-
-fscache-y := \
-       cache.o \
-       cookie.o \
-       io.o \
-       main.o \
-       volume.o
-
-fscache-$(CONFIG_PROC_FS) += proc.o
-fscache-$(CONFIG_FSCACHE_STATS) += stats.o
-
-obj-$(CONFIG_FSCACHE) := fscache.o
diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
deleted file mode 100644 (file)
index 1336f51..0000000
+++ /dev/null
@@ -1,277 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/* Internal definitions for FS-Cache
- *
- * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- */
-
-#ifdef pr_fmt
-#undef pr_fmt
-#endif
-
-#define pr_fmt(fmt) "FS-Cache: " fmt
-
-#include <linux/slab.h>
-#include <linux/fscache-cache.h>
-#include <trace/events/fscache.h>
-#include <linux/sched.h>
-#include <linux/seq_file.h>
-
-/*
- * cache.c
- */
-#ifdef CONFIG_PROC_FS
-extern const struct seq_operations fscache_caches_seq_ops;
-#endif
-bool fscache_begin_cache_access(struct fscache_cache *cache, enum fscache_access_trace why);
-void fscache_end_cache_access(struct fscache_cache *cache, enum fscache_access_trace why);
-struct fscache_cache *fscache_lookup_cache(const char *name, bool is_cache);
-void fscache_put_cache(struct fscache_cache *cache, enum fscache_cache_trace where);
-
-static inline enum fscache_cache_state fscache_cache_state(const struct fscache_cache *cache)
-{
-       return smp_load_acquire(&cache->state);
-}
-
-static inline bool fscache_cache_is_live(const struct fscache_cache *cache)
-{
-       return fscache_cache_state(cache) == FSCACHE_CACHE_IS_ACTIVE;
-}
-
-static inline void fscache_set_cache_state(struct fscache_cache *cache,
-                                          enum fscache_cache_state new_state)
-{
-       smp_store_release(&cache->state, new_state);
-
-}
-
-static inline bool fscache_set_cache_state_maybe(struct fscache_cache *cache,
-                                                enum fscache_cache_state old_state,
-                                                enum fscache_cache_state new_state)
-{
-       return try_cmpxchg_release(&cache->state, &old_state, new_state);
-}
-
-/*
- * cookie.c
- */
-extern struct kmem_cache *fscache_cookie_jar;
-#ifdef CONFIG_PROC_FS
-extern const struct seq_operations fscache_cookies_seq_ops;
-#endif
-extern struct timer_list fscache_cookie_lru_timer;
-
-extern void fscache_print_cookie(struct fscache_cookie *cookie, char prefix);
-extern bool fscache_begin_cookie_access(struct fscache_cookie *cookie,
-                                       enum fscache_access_trace why);
-
-static inline void fscache_see_cookie(struct fscache_cookie *cookie,
-                                     enum fscache_cookie_trace where)
-{
-       trace_fscache_cookie(cookie->debug_id, refcount_read(&cookie->ref),
-                            where);
-}
-
-/*
- * main.c
- */
-extern unsigned fscache_debug;
-
-extern unsigned int fscache_hash(unsigned int salt, const void *data, size_t len);
-
-/*
- * proc.c
- */
-#ifdef CONFIG_PROC_FS
-extern int __init fscache_proc_init(void);
-extern void fscache_proc_cleanup(void);
-#else
-#define fscache_proc_init()    (0)
-#define fscache_proc_cleanup() do {} while (0)
-#endif
-
-/*
- * stats.c
- */
-#ifdef CONFIG_FSCACHE_STATS
-extern atomic_t fscache_n_volumes;
-extern atomic_t fscache_n_volumes_collision;
-extern atomic_t fscache_n_volumes_nomem;
-extern atomic_t fscache_n_cookies;
-extern atomic_t fscache_n_cookies_lru;
-extern atomic_t fscache_n_cookies_lru_expired;
-extern atomic_t fscache_n_cookies_lru_removed;
-extern atomic_t fscache_n_cookies_lru_dropped;
-
-extern atomic_t fscache_n_acquires;
-extern atomic_t fscache_n_acquires_ok;
-extern atomic_t fscache_n_acquires_oom;
-
-extern atomic_t fscache_n_invalidates;
-
-extern atomic_t fscache_n_relinquishes;
-extern atomic_t fscache_n_relinquishes_retire;
-extern atomic_t fscache_n_relinquishes_dropped;
-
-extern atomic_t fscache_n_resizes;
-extern atomic_t fscache_n_resizes_null;
-
-static inline void fscache_stat(atomic_t *stat)
-{
-       atomic_inc(stat);
-}
-
-static inline void fscache_stat_d(atomic_t *stat)
-{
-       atomic_dec(stat);
-}
-
-#define __fscache_stat(stat) (stat)
-
-int fscache_stats_show(struct seq_file *m, void *v);
-#else
-
-#define __fscache_stat(stat) (NULL)
-#define fscache_stat(stat) do {} while (0)
-#define fscache_stat_d(stat) do {} while (0)
-#endif
-
-/*
- * volume.c
- */
-#ifdef CONFIG_PROC_FS
-extern const struct seq_operations fscache_volumes_seq_ops;
-#endif
-
-struct fscache_volume *fscache_get_volume(struct fscache_volume *volume,
-                                         enum fscache_volume_trace where);
-void fscache_put_volume(struct fscache_volume *volume,
-                       enum fscache_volume_trace where);
-bool fscache_begin_volume_access(struct fscache_volume *volume,
-                                struct fscache_cookie *cookie,
-                                enum fscache_access_trace why);
-void fscache_create_volume(struct fscache_volume *volume, bool wait);
-
-
-/*****************************************************************************/
-/*
- * debug tracing
- */
-#define dbgprintk(FMT, ...) \
-       printk("[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__)
-
-#define kenter(FMT, ...) dbgprintk("==> %s("FMT")", __func__, ##__VA_ARGS__)
-#define kleave(FMT, ...) dbgprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
-#define kdebug(FMT, ...) dbgprintk(FMT, ##__VA_ARGS__)
-
-#define kjournal(FMT, ...) no_printk(FMT, ##__VA_ARGS__)
-
-#ifdef __KDEBUG
-#define _enter(FMT, ...) kenter(FMT, ##__VA_ARGS__)
-#define _leave(FMT, ...) kleave(FMT, ##__VA_ARGS__)
-#define _debug(FMT, ...) kdebug(FMT, ##__VA_ARGS__)
-
-#elif defined(CONFIG_FSCACHE_DEBUG)
-#define _enter(FMT, ...)                       \
-do {                                           \
-       if (__do_kdebug(ENTER))                 \
-               kenter(FMT, ##__VA_ARGS__);     \
-} while (0)
-
-#define _leave(FMT, ...)                       \
-do {                                           \
-       if (__do_kdebug(LEAVE))                 \
-               kleave(FMT, ##__VA_ARGS__);     \
-} while (0)
-
-#define _debug(FMT, ...)                       \
-do {                                           \
-       if (__do_kdebug(DEBUG))                 \
-               kdebug(FMT, ##__VA_ARGS__);     \
-} while (0)
-
-#else
-#define _enter(FMT, ...) no_printk("==> %s("FMT")", __func__, ##__VA_ARGS__)
-#define _leave(FMT, ...) no_printk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
-#define _debug(FMT, ...) no_printk(FMT, ##__VA_ARGS__)
-#endif
-
-/*
- * determine whether a particular optional debugging point should be logged
- * - we need to go through three steps to persuade cpp to correctly join the
- *   shorthand in FSCACHE_DEBUG_LEVEL with its prefix
- */
-#define ____do_kdebug(LEVEL, POINT) \
-       unlikely((fscache_debug & \
-                 (FSCACHE_POINT_##POINT << (FSCACHE_DEBUG_ ## LEVEL * 3))))
-#define ___do_kdebug(LEVEL, POINT) \
-       ____do_kdebug(LEVEL, POINT)
-#define __do_kdebug(POINT) \
-       ___do_kdebug(FSCACHE_DEBUG_LEVEL, POINT)
-
-#define FSCACHE_DEBUG_CACHE    0
-#define FSCACHE_DEBUG_COOKIE   1
-#define FSCACHE_DEBUG_OBJECT   2
-#define FSCACHE_DEBUG_OPERATION        3
-
-#define FSCACHE_POINT_ENTER    1
-#define FSCACHE_POINT_LEAVE    2
-#define FSCACHE_POINT_DEBUG    4
-
-#ifndef FSCACHE_DEBUG_LEVEL
-#define FSCACHE_DEBUG_LEVEL CACHE
-#endif
-
-/*
- * assertions
- */
-#if 1 /* defined(__KDEBUGALL) */
-
-#define ASSERT(X)                                                      \
-do {                                                                   \
-       if (unlikely(!(X))) {                                           \
-               pr_err("\n");                                   \
-               pr_err("Assertion failed\n");   \
-               BUG();                                                  \
-       }                                                               \
-} while (0)
-
-#define ASSERTCMP(X, OP, Y)                                            \
-do {                                                                   \
-       if (unlikely(!((X) OP (Y)))) {                                  \
-               pr_err("\n");                                   \
-               pr_err("Assertion failed\n");   \
-               pr_err("%lx " #OP " %lx is false\n",            \
-                      (unsigned long)(X), (unsigned long)(Y));         \
-               BUG();                                                  \
-       }                                                               \
-} while (0)
-
-#define ASSERTIF(C, X)                                                 \
-do {                                                                   \
-       if (unlikely((C) && !(X))) {                                    \
-               pr_err("\n");                                   \
-               pr_err("Assertion failed\n");   \
-               BUG();                                                  \
-       }                                                               \
-} while (0)
-
-#define ASSERTIFCMP(C, X, OP, Y)                                       \
-do {                                                                   \
-       if (unlikely((C) && !((X) OP (Y)))) {                           \
-               pr_err("\n");                                   \
-               pr_err("Assertion failed\n");   \
-               pr_err("%lx " #OP " %lx is false\n",            \
-                      (unsigned long)(X), (unsigned long)(Y));         \
-               BUG();                                                  \
-       }                                                               \
-} while (0)
-
-#else
-
-#define ASSERT(X)                      do {} while (0)
-#define ASSERTCMP(X, OP, Y)            do {} while (0)
-#define ASSERTIF(C, X)                 do {} while (0)
-#define ASSERTIFCMP(C, X, OP, Y)       do {} while (0)
-
-#endif /* assert or not */
index 177f1f41f225458344cd000147d71079c19689ab..2e215e8c3c88e57d6ed17ba6cc5cb22420e99af6 100644 (file)
 
 static int gfs2_drevalidate(struct dentry *dentry, unsigned int flags)
 {
-       struct dentry *parent = NULL;
+       struct dentry *parent;
        struct gfs2_sbd *sdp;
        struct gfs2_inode *dip;
-       struct inode *dinode, *inode;
+       struct inode *inode;
        struct gfs2_holder d_gh;
        struct gfs2_inode *ip = NULL;
        int error, valid = 0;
        int had_lock = 0;
 
-       if (flags & LOOKUP_RCU) {
-               dinode = d_inode_rcu(READ_ONCE(dentry->d_parent));
-               if (!dinode)
-                       return -ECHILD;
-       } else {
-               parent = dget_parent(dentry);
-               dinode = d_inode(parent);
-       }
-       sdp = GFS2_SB(dinode);
-       dip = GFS2_I(dinode);
+       if (flags & LOOKUP_RCU)
+               return -ECHILD;
+
+       parent = dget_parent(dentry);
+       sdp = GFS2_SB(d_inode(parent));
+       dip = GFS2_I(d_inode(parent));
        inode = d_inode(dentry);
 
        if (inode) {
@@ -66,8 +62,7 @@ static int gfs2_drevalidate(struct dentry *dentry, unsigned int flags)
 
        had_lock = (gfs2_glock_is_locked_by_me(dip->i_gl) != NULL);
        if (!had_lock) {
-               error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED,
-                                          flags & LOOKUP_RCU ? GL_NOBLOCK : 0, &d_gh);
+               error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
                if (error)
                        goto out;
        }
index 6bfc9383b7b8eca60aad0d88c341904b572681bb..1b95db2c3aac3c9a9d5d881985e70622342b52ab 100644 (file)
@@ -1882,10 +1882,10 @@ int gfs2_permission(struct mnt_idmap *idmap, struct inode *inode,
                WARN_ON_ONCE(!may_not_block);
                return -ECHILD;
         }
-       if (gfs2_glock_is_locked_by_me(ip->i_gl) == NULL) {
-               int noblock = may_not_block ? GL_NOBLOCK : 0;
-               error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED,
-                                          LM_FLAG_ANY | noblock, &i_gh);
+       if (gfs2_glock_is_locked_by_me(gl) == NULL) {
+               if (may_not_block)
+                       return -ECHILD;
+               error = gfs2_glock_nq_init(gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
                if (error)
                        return error;
        }
index ea5b8e57d904e20b964fb5e627c4bae894370401..d746866ae3b6ba79a4ed1d8b6600c29cfc28e005 100644 (file)
@@ -100,6 +100,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
        loff_t len, vma_len;
        int ret;
        struct hstate *h = hstate_file(file);
+       vm_flags_t vm_flags;
 
        /*
         * vma address alignment (but not the pgoff alignment) has
@@ -141,10 +142,20 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
        file_accessed(file);
 
        ret = -ENOMEM;
+
+       vm_flags = vma->vm_flags;
+       /*
+        * for SHM_HUGETLB, the pages are reserved in the shmget() call so skip
+        * reserving here. Note: only for SHM hugetlbfs file, the inode
+        * flag S_PRIVATE is set.
+        */
+       if (inode->i_flags & S_PRIVATE)
+               vm_flags |= VM_NORESERVE;
+
        if (!hugetlb_reserve_pages(inode,
                                vma->vm_pgoff >> huge_page_order(h),
                                len >> huge_page_shift(h), vma,
-                               vma->vm_flags))
+                               vm_flags))
                goto out;
 
        ret = 0;
@@ -340,7 +351,7 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
                } else {
                        folio_unlock(folio);
 
-                       if (!folio_test_has_hwpoisoned(folio))
+                       if (!folio_test_hwpoison(folio))
                                want = nr;
                        else {
                                /*
@@ -1354,6 +1365,7 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par
 {
        struct hugetlbfs_fs_context *ctx = fc->fs_private;
        struct fs_parse_result result;
+       struct hstate *h;
        char *rest;
        unsigned long ps;
        int opt;
@@ -1398,11 +1410,12 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par
 
        case Opt_pagesize:
                ps = memparse(param->string, &rest);
-               ctx->hstate = size_to_hstate(ps);
-               if (!ctx->hstate) {
+               h = size_to_hstate(ps);
+               if (!h) {
                        pr_err("Unsupported page size %lu MB\n", ps / SZ_1M);
                        return -EINVAL;
                }
+               ctx->hstate = h;
                return 0;
 
        case Opt_min_size:
index 8eec84c651bfba2da05af6a834c4ad3fe7a60f2b..cb3cda1390adb16e1ad8031783849ba59022db87 100644 (file)
@@ -2763,9 +2763,7 @@ static int dbBackSplit(dmtree_t *tp, int leafno, bool is_ctl)
  *     leafno  - the number of the leaf to be updated.
  *     newval  - the new value for the leaf.
  *
- * RETURN VALUES:
- *  0          - success
- *     -EIO    - i/o error
+ * RETURN VALUES: none
  */
 static int dbJoin(dmtree_t *tp, int leafno, int newval, bool is_ctl)
 {
@@ -2792,10 +2790,6 @@ static int dbJoin(dmtree_t *tp, int leafno, int newval, bool is_ctl)
                 * get the buddy size (number of words covered) of
                 * the new value.
                 */
-
-               if ((newval - tp->dmt_budmin) > BUDMIN)
-                       return -EIO;
-
                budsz = BUDSIZE(newval, tp->dmt_budmin);
 
                /* try to join.
index 8b2bd65d70e7257022f139d7755d650a2f4b3ac6..bce1d7ac95caaa6ae48ba62c094d43c9da27298e 100644 (file)
@@ -54,9 +54,9 @@ static bool kernfs_lockdep(struct kernfs_node *kn)
 static int kernfs_name_locked(struct kernfs_node *kn, char *buf, size_t buflen)
 {
        if (!kn)
-               return strlcpy(buf, "(null)", buflen);
+               return strscpy(buf, "(null)", buflen);
 
-       return strlcpy(buf, kn->parent ? kn->name : "/", buflen);
+       return strscpy(buf, kn->parent ? kn->name : "/", buflen);
 }
 
 /* kernfs_node_depth - compute depth from @from to @to */
@@ -127,7 +127,7 @@ static struct kernfs_node *kernfs_common_ancestor(struct kernfs_node *a,
  *
  * [3] when @kn_to is %NULL result will be "(null)"
  *
- * Return: the length of the full path.  If the full length is equal to or
+ * Return: the length of the constructed path.  If the path would have been
  * greater than @buflen, @buf contains the truncated path with the trailing
  * '\0'.  On error, -errno is returned.
  */
@@ -138,16 +138,17 @@ static int kernfs_path_from_node_locked(struct kernfs_node *kn_to,
        struct kernfs_node *kn, *common;
        const char parent_str[] = "/..";
        size_t depth_from, depth_to, len = 0;
+       ssize_t copied;
        int i, j;
 
        if (!kn_to)
-               return strlcpy(buf, "(null)", buflen);
+               return strscpy(buf, "(null)", buflen);
 
        if (!kn_from)
                kn_from = kernfs_root(kn_to)->kn;
 
        if (kn_from == kn_to)
-               return strlcpy(buf, "/", buflen);
+               return strscpy(buf, "/", buflen);
 
        common = kernfs_common_ancestor(kn_from, kn_to);
        if (WARN_ON(!common))
@@ -158,18 +159,19 @@ static int kernfs_path_from_node_locked(struct kernfs_node *kn_to,
 
        buf[0] = '\0';
 
-       for (i = 0; i < depth_from; i++)
-               len += strlcpy(buf + len, parent_str,
-                              len < buflen ? buflen - len : 0);
+       for (i = 0; i < depth_from; i++) {
+               copied = strscpy(buf + len, parent_str, buflen - len);
+               if (copied < 0)
+                       return copied;
+               len += copied;
+       }
 
        /* Calculate how many bytes we need for the rest */
        for (i = depth_to - 1; i >= 0; i--) {
                for (kn = kn_to, j = 0; j < i; j++)
                        kn = kn->parent;
-               len += strlcpy(buf + len, "/",
-                              len < buflen ? buflen - len : 0);
-               len += strlcpy(buf + len, kn->name,
-                              len < buflen ? buflen - len : 0);
+
+               len += scnprintf(buf + len, buflen - len, "/%s", kn->name);
        }
 
        return len;
@@ -182,12 +184,12 @@ static int kernfs_path_from_node_locked(struct kernfs_node *kn_to,
  * @buflen: size of @buf
  *
  * Copies the name of @kn into @buf of @buflen bytes.  The behavior is
- * similar to strlcpy().
+ * similar to strscpy().
  *
  * Fills buffer with "(null)" if @kn is %NULL.
  *
- * Return: the length of @kn's name and if @buf isn't long enough,
- * it's filled up to @buflen-1 and nul terminated.
+ * Return: the resulting length of @buf. If @buf isn't long enough,
+ * it's filled up to @buflen-1 and nul terminated, and returns -E2BIG.
  *
  * This function can be called from any context.
  */
@@ -214,7 +216,7 @@ int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen)
  * path (which includes '..'s) as needed to reach from @from to @to is
  * returned.
  *
- * Return: the length of the full path.  If the full length is equal to or
+ * Return: the length of the constructed path.  If the path would have been
  * greater than @buflen, @buf contains the truncated path with the trailing
  * '\0'.  On error, -errno is returned.
  */
@@ -265,12 +267,10 @@ void pr_cont_kernfs_path(struct kernfs_node *kn)
        sz = kernfs_path_from_node(kn, NULL, kernfs_pr_cont_buf,
                                   sizeof(kernfs_pr_cont_buf));
        if (sz < 0) {
-               pr_cont("(error)");
-               goto out;
-       }
-
-       if (sz >= sizeof(kernfs_pr_cont_buf)) {
-               pr_cont("(name too long)");
+               if (sz == -E2BIG)
+                       pr_cont("(name too long)");
+               else
+                       pr_cont("(error)");
                goto out;
        }
 
@@ -676,6 +676,18 @@ struct kernfs_node *kernfs_new_node(struct kernfs_node *parent,
 {
        struct kernfs_node *kn;
 
+       if (parent->mode & S_ISGID) {
+               /* this code block imitates inode_init_owner() for
+                * kernfs
+                */
+
+               if (parent->iattr)
+                       gid = parent->iattr->ia_gid;
+
+               if (flags & KERNFS_DIR)
+                       mode |= S_ISGID;
+       }
+
        kn = __kernfs_new_node(kernfs_root(parent), parent,
                               name, mode, uid, gid, flags);
        if (kn) {
@@ -850,16 +862,16 @@ static struct kernfs_node *kernfs_walk_ns(struct kernfs_node *parent,
                                          const unsigned char *path,
                                          const void *ns)
 {
-       size_t len;
+       ssize_t len;
        char *p, *name;
 
        lockdep_assert_held_read(&kernfs_root(parent)->kernfs_rwsem);
 
        spin_lock_irq(&kernfs_pr_cont_lock);
 
-       len = strlcpy(kernfs_pr_cont_buf, path, sizeof(kernfs_pr_cont_buf));
+       len = strscpy(kernfs_pr_cont_buf, path, sizeof(kernfs_pr_cont_buf));
 
-       if (len >= sizeof(kernfs_pr_cont_buf)) {
+       if (len < 0) {
                spin_unlock_irq(&kernfs_pr_cont_lock);
                return NULL;
        }
index f0cb729e9a97811dbc11cd0d250217b8a39db2ca..ffa4565c275a7a1f9dfeb5075ccc82261c971c29 100644 (file)
@@ -447,7 +447,7 @@ static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma)
         * warnings and we don't want to add spurious locking dependency
         * between the two.  Check whether mmap is actually implemented
         * without grabbing @of->mutex by testing HAS_MMAP flag.  See the
-        * comment in kernfs_file_open() for more details.
+        * comment in kernfs_fop_open() for more details.
         */
        if (!(of->kn->flags & KERNFS_HAS_MMAP))
                return -ENODEV;
index 4628edde2e7e1ad7a4e59b7f1117331a83d29b38..0c93cad0f0acac80425dcdbd21b1eedaffe8c4d6 100644 (file)
@@ -125,9 +125,6 @@ static struct dentry *__kernfs_fh_to_dentry(struct super_block *sb,
 
        inode = kernfs_get_inode(sb, kn);
        kernfs_put(kn);
-       if (!inode)
-               return ERR_PTR(-ESTALE);
-
        return d_obtain_alias(inode);
 }
 
index 5c318d657503c72f0f903c652d0378577047ecf6..4e0de939fea127034c24d7badb18253a9351b52e 100644 (file)
@@ -2572,13 +2572,13 @@ static int filename_parentat(int dfd, struct filename *name,
 }
 
 /* does lookup, returns the object with parent locked */
-static struct dentry *__kern_path_locked(struct filename *name, struct path *path)
+static struct dentry *__kern_path_locked(int dfd, struct filename *name, struct path *path)
 {
        struct dentry *d;
        struct qstr last;
        int type, error;
 
-       error = filename_parentat(AT_FDCWD, name, 0, path, &last, &type);
+       error = filename_parentat(dfd, name, 0, path, &last, &type);
        if (error)
                return ERR_PTR(error);
        if (unlikely(type != LAST_NORM)) {
@@ -2597,12 +2597,22 @@ static struct dentry *__kern_path_locked(struct filename *name, struct path *pat
 struct dentry *kern_path_locked(const char *name, struct path *path)
 {
        struct filename *filename = getname_kernel(name);
-       struct dentry *res = __kern_path_locked(filename, path);
+       struct dentry *res = __kern_path_locked(AT_FDCWD, filename, path);
 
        putname(filename);
        return res;
 }
 
+struct dentry *user_path_locked_at(int dfd, const char __user *name, struct path *path)
+{
+       struct filename *filename = getname(name);
+       struct dentry *res = __kern_path_locked(dfd, filename, path);
+
+       putname(filename);
+       return res;
+}
+EXPORT_SYMBOL(user_path_locked_at);
+
 int kern_path(const char *name, unsigned int flags, struct path *path)
 {
        struct filename *filename = getname_kernel(name);
index ef1fd6829814cdf9f97ddf10968a11fc7b91a3c9..5a51315c6678145467520800ceedc3378df5e7da 100644 (file)
@@ -4472,10 +4472,15 @@ static int do_mount_setattr(struct path *path, struct mount_kattr *kattr)
        /*
         * If this is an attached mount make sure it's located in the callers
         * mount namespace. If it's not don't let the caller interact with it.
-        * If this is a detached mount make sure it has an anonymous mount
-        * namespace attached to it, i.e. we've created it via OPEN_TREE_CLONE.
+        *
+        * If this mount doesn't have a parent it's most often simply a
+        * detached mount with an anonymous mount namespace. IOW, something
+        * that's simply not attached yet. But there are apparently also users
+        * that do change mount properties on the rootfs itself. That obviously
+        * neither has a parent nor is it a detached mount so we cannot
+        * unconditionally check for detached mounts.
         */
-       if (!(mnt_has_parent(mnt) ? check_mnt(mnt) : is_anon_ns(mnt->mnt_ns)))
+       if ((mnt_has_parent(mnt) || !is_anon_ns(mnt->mnt_ns)) && !check_mnt(mnt))
                goto out;
 
        /*
@@ -5042,13 +5047,12 @@ static struct mount *listmnt_next(struct mount *curr)
        return node_to_mount(rb_next(&curr->mnt_node));
 }
 
-static ssize_t do_listmount(struct mount *first, struct path *orig, u64 mnt_id,
-                           u64 __user *buf, size_t bufsize,
-                           const struct path *root)
+static ssize_t do_listmount(struct mount *first, struct path *orig,
+                           u64 mnt_parent_id, u64 __user *mnt_ids,
+                           size_t nr_mnt_ids, const struct path *root)
 {
        struct mount *r;
-       ssize_t ctr;
-       int err;
+       ssize_t ret;
 
        /*
         * Don't trigger audit denials. We just want to determine what
@@ -5058,50 +5062,57 @@ static ssize_t do_listmount(struct mount *first, struct path *orig, u64 mnt_id,
            !ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN))
                return -EPERM;
 
-       err = security_sb_statfs(orig->dentry);
-       if (err)
-               return err;
+       ret = security_sb_statfs(orig->dentry);
+       if (ret)
+               return ret;
 
-       for (ctr = 0, r = first; r && ctr < bufsize; r = listmnt_next(r)) {
-               if (r->mnt_id_unique == mnt_id)
+       for (ret = 0, r = first; r && nr_mnt_ids; r = listmnt_next(r)) {
+               if (r->mnt_id_unique == mnt_parent_id)
                        continue;
                if (!is_path_reachable(r, r->mnt.mnt_root, orig))
                        continue;
-               ctr = array_index_nospec(ctr, bufsize);
-               if (put_user(r->mnt_id_unique, buf + ctr))
+               if (put_user(r->mnt_id_unique, mnt_ids))
                        return -EFAULT;
-               if (check_add_overflow(ctr, 1, &ctr))
-                       return -ERANGE;
+               mnt_ids++;
+               nr_mnt_ids--;
+               ret++;
        }
-       return ctr;
+       return ret;
 }
 
-SYSCALL_DEFINE4(listmount, const struct mnt_id_req __user *, req,
-               u64 __user *, buf, size_t, bufsize, unsigned int, flags)
+SYSCALL_DEFINE4(listmount, const struct mnt_id_req __user *, req, u64 __user *,
+               mnt_ids, size_t, nr_mnt_ids, unsigned int, flags)
 {
        struct mnt_namespace *ns = current->nsproxy->mnt_ns;
        struct mnt_id_req kreq;
        struct mount *first;
        struct path root, orig;
-       u64 mnt_id, last_mnt_id;
+       u64 mnt_parent_id, last_mnt_id;
+       const size_t maxcount = (size_t)-1 >> 3;
        ssize_t ret;
 
        if (flags)
                return -EINVAL;
 
+       if (unlikely(nr_mnt_ids > maxcount))
+               return -EFAULT;
+
+       if (!access_ok(mnt_ids, nr_mnt_ids * sizeof(*mnt_ids)))
+               return -EFAULT;
+
        ret = copy_mnt_id_req(req, &kreq);
        if (ret)
                return ret;
-       mnt_id = kreq.mnt_id;
+       mnt_parent_id = kreq.mnt_id;
        last_mnt_id = kreq.param;
 
        down_read(&namespace_sem);
        get_fs_root(current->fs, &root);
-       if (mnt_id == LSMT_ROOT) {
+       if (mnt_parent_id == LSMT_ROOT) {
                orig = root;
        } else {
                ret = -ENOENT;
-               orig.mnt  = lookup_mnt_in_ns(mnt_id, ns);
+               orig.mnt = lookup_mnt_in_ns(mnt_parent_id, ns);
                if (!orig.mnt)
                        goto err;
                orig.dentry = orig.mnt->mnt_root;
@@ -5111,7 +5122,7 @@ SYSCALL_DEFINE4(listmount, const struct mnt_id_req __user *, req,
        else
                first = mnt_find_id_at(ns, last_mnt_id + 1);
 
-       ret = do_listmount(first, &orig, mnt_id, buf, bufsize, &root);
+       ret = do_listmount(first, &orig, mnt_parent_id, mnt_ids, nr_mnt_ids, &root);
 err:
        path_put(&root);
        up_read(&namespace_sem);
index b4db21022cb43f3b371e31687a89ec309f5b3726..bec805e0c44c072190394283a598d27ee095d0b7 100644 (file)
@@ -21,3 +21,42 @@ config NETFS_STATS
          multi-CPU system these may be on cachelines that keep bouncing
          between CPUs.  On the other hand, the stats are very useful for
          debugging purposes.  Saying 'Y' here is recommended.
+
+config FSCACHE
+       bool "General filesystem local caching manager"
+       depends on NETFS_SUPPORT
+       help
+         This option enables a generic filesystem caching manager that can be
+         used by various network and other filesystems to cache data locally.
+         Different sorts of caches can be plugged in, depending on the
+         resources available.
+
+         See Documentation/filesystems/caching/fscache.rst for more information.
+
+config FSCACHE_STATS
+       bool "Gather statistical information on local caching"
+       depends on FSCACHE && PROC_FS
+       select NETFS_STATS
+       help
+         This option causes statistical information to be gathered on local
+         caching and exported through file:
+
+               /proc/fs/fscache/stats
+
+         The gathering of statistics adds a certain amount of overhead to
+         execution as there are a quite a few stats gathered, and on a
+         multi-CPU system these may be on cachelines that keep bouncing
+         between CPUs.  On the other hand, the stats are very useful for
+         debugging purposes.  Saying 'Y' here is recommended.
+
+         See Documentation/filesystems/caching/fscache.rst for more information.
+
+config FSCACHE_DEBUG
+       bool "Debug FS-Cache"
+       depends on FSCACHE
+       help
+         This permits debugging to be dynamically enabled in the local caching
+         management module.  If this is set, the debugging output may be
+         enabled by setting bits in /sys/modules/fscache/parameter/debug.
+
+         See Documentation/filesystems/caching/fscache.rst for more information.
index 386d6fb92793a5d1f4f247e9a1440bea7a1eb59d..d4d1d799819ec4c92807449aebeb38e744d43991 100644 (file)
@@ -2,11 +2,29 @@
 
 netfs-y := \
        buffered_read.o \
+       buffered_write.o \
+       direct_read.o \
+       direct_write.o \
        io.o \
        iterator.o \
+       locking.o \
        main.o \
-       objects.o
+       misc.o \
+       objects.o \
+       output.o
 
 netfs-$(CONFIG_NETFS_STATS) += stats.o
 
-obj-$(CONFIG_NETFS_SUPPORT) := netfs.o
+netfs-$(CONFIG_FSCACHE) += \
+       fscache_cache.o \
+       fscache_cookie.o \
+       fscache_io.o \
+       fscache_main.o \
+       fscache_volume.o
+
+ifeq ($(CONFIG_PROC_FS),y)
+netfs-$(CONFIG_FSCACHE) += fscache_proc.o
+endif
+netfs-$(CONFIG_FSCACHE_STATS) += fscache_stats.o
+
+obj-$(CONFIG_NETFS_SUPPORT) += netfs.o
index 2cd3ccf4c439960053e436d63792bc5bf7a914de..3298c29b5548398c0026ccf6ab30c32cb290070d 100644 (file)
@@ -16,6 +16,7 @@
 void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
 {
        struct netfs_io_subrequest *subreq;
+       struct netfs_folio *finfo;
        struct folio *folio;
        pgoff_t start_page = rreq->start / PAGE_SIZE;
        pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1;
@@ -63,6 +64,7 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
                                break;
                        }
                        if (!folio_started && test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) {
+                               trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache);
                                folio_start_fscache(folio);
                                folio_started = true;
                        }
@@ -86,11 +88,20 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
 
                if (!pg_failed) {
                        flush_dcache_folio(folio);
+                       finfo = netfs_folio_info(folio);
+                       if (finfo) {
+                               trace_netfs_folio(folio, netfs_folio_trace_filled_gaps);
+                               if (finfo->netfs_group)
+                                       folio_change_private(folio, finfo->netfs_group);
+                               else
+                                       folio_detach_private(folio);
+                               kfree(finfo);
+                       }
                        folio_mark_uptodate(folio);
                }
 
                if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
-                       if (folio_index(folio) == rreq->no_unlock_folio &&
+                       if (folio->index == rreq->no_unlock_folio &&
                            test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags))
                                _debug("no unlock");
                        else
@@ -147,6 +158,15 @@ static void netfs_rreq_expand(struct netfs_io_request *rreq,
        }
 }
 
+/*
+ * Begin an operation, and fetch the stored zero point value from the cookie if
+ * available.
+ */
+static int netfs_begin_cache_read(struct netfs_io_request *rreq, struct netfs_inode *ctx)
+{
+       return fscache_begin_read_operation(&rreq->cache_resources, netfs_i_cookie(ctx));
+}
+
 /**
  * netfs_readahead - Helper to manage a read request
  * @ractl: The description of the readahead request
@@ -180,11 +200,9 @@ void netfs_readahead(struct readahead_control *ractl)
        if (IS_ERR(rreq))
                return;
 
-       if (ctx->ops->begin_cache_operation) {
-               ret = ctx->ops->begin_cache_operation(rreq);
-               if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
-                       goto cleanup_free;
-       }
+       ret = netfs_begin_cache_read(rreq, ctx);
+       if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
+               goto cleanup_free;
 
        netfs_stat(&netfs_n_rh_readahead);
        trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
@@ -192,6 +210,10 @@ void netfs_readahead(struct readahead_control *ractl)
 
        netfs_rreq_expand(rreq, ractl);
 
+       /* Set up the output buffer */
+       iov_iter_xarray(&rreq->iter, ITER_DEST, &ractl->mapping->i_pages,
+                       rreq->start, rreq->len);
+
        /* Drop the refs on the folios here rather than in the cache or
         * filesystem.  The locks will be dropped in netfs_rreq_unlock().
         */
@@ -199,6 +221,7 @@ void netfs_readahead(struct readahead_control *ractl)
                ;
 
        netfs_begin_read(rreq, false);
+       netfs_put_request(rreq, false, netfs_rreq_trace_put_return);
        return;
 
 cleanup_free:
@@ -223,12 +246,13 @@ EXPORT_SYMBOL(netfs_readahead);
  */
 int netfs_read_folio(struct file *file, struct folio *folio)
 {
-       struct address_space *mapping = folio_file_mapping(folio);
+       struct address_space *mapping = folio->mapping;
        struct netfs_io_request *rreq;
        struct netfs_inode *ctx = netfs_inode(mapping->host);
+       struct folio *sink = NULL;
        int ret;
 
-       _enter("%lx", folio_index(folio));
+       _enter("%lx", folio->index);
 
        rreq = netfs_alloc_request(mapping, file,
                                   folio_file_pos(folio), folio_size(folio),
@@ -238,15 +262,64 @@ int netfs_read_folio(struct file *file, struct folio *folio)
                goto alloc_error;
        }
 
-       if (ctx->ops->begin_cache_operation) {
-               ret = ctx->ops->begin_cache_operation(rreq);
-               if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
-                       goto discard;
-       }
+       ret = netfs_begin_cache_read(rreq, ctx);
+       if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
+               goto discard;
 
        netfs_stat(&netfs_n_rh_readpage);
        trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage);
-       return netfs_begin_read(rreq, true);
+
+       /* Set up the output buffer */
+       if (folio_test_dirty(folio)) {
+               /* Handle someone trying to read from an unflushed streaming
+                * write.  We fiddle the buffer so that a gap at the beginning
+                * and/or a gap at the end get copied to, but the middle is
+                * discarded.
+                */
+               struct netfs_folio *finfo = netfs_folio_info(folio);
+               struct bio_vec *bvec;
+               unsigned int from = finfo->dirty_offset;
+               unsigned int to = from + finfo->dirty_len;
+               unsigned int off = 0, i = 0;
+               size_t flen = folio_size(folio);
+               size_t nr_bvec = flen / PAGE_SIZE + 2;
+               size_t part;
+
+               ret = -ENOMEM;
+               bvec = kmalloc_array(nr_bvec, sizeof(*bvec), GFP_KERNEL);
+               if (!bvec)
+                       goto discard;
+
+               sink = folio_alloc(GFP_KERNEL, 0);
+               if (!sink)
+                       goto discard;
+
+               trace_netfs_folio(folio, netfs_folio_trace_read_gaps);
+
+               rreq->direct_bv = bvec;
+               rreq->direct_bv_count = nr_bvec;
+               if (from > 0) {
+                       bvec_set_folio(&bvec[i++], folio, from, 0);
+                       off = from;
+               }
+               while (off < to) {
+                       part = min_t(size_t, to - off, PAGE_SIZE);
+                       bvec_set_folio(&bvec[i++], sink, part, 0);
+                       off += part;
+               }
+               if (to < flen)
+                       bvec_set_folio(&bvec[i++], folio, flen - to, to);
+               iov_iter_bvec(&rreq->iter, ITER_DEST, bvec, i, rreq->len);
+       } else {
+               iov_iter_xarray(&rreq->iter, ITER_DEST, &mapping->i_pages,
+                               rreq->start, rreq->len);
+       }
+
+       ret = netfs_begin_read(rreq, true);
+       if (sink)
+               folio_put(sink);
+       netfs_put_request(rreq, false, netfs_rreq_trace_put_return);
+       return ret < 0 ? ret : 0;
 
 discard:
        netfs_put_request(rreq, false, netfs_rreq_trace_put_discard);
@@ -387,14 +460,12 @@ retry:
                ret = PTR_ERR(rreq);
                goto error;
        }
-       rreq->no_unlock_folio   = folio_index(folio);
+       rreq->no_unlock_folio   = folio->index;
        __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags);
 
-       if (ctx->ops->begin_cache_operation) {
-               ret = ctx->ops->begin_cache_operation(rreq);
-               if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
-                       goto error_put;
-       }
+       ret = netfs_begin_cache_read(rreq, ctx);
+       if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
+               goto error_put;
 
        netfs_stat(&netfs_n_rh_write_begin);
        trace_netfs_read(rreq, pos, len, netfs_read_trace_write_begin);
@@ -405,6 +476,10 @@ retry:
        ractl._nr_pages = folio_nr_pages(folio);
        netfs_rreq_expand(rreq, &ractl);
 
+       /* Set up the output buffer */
+       iov_iter_xarray(&rreq->iter, ITER_DEST, &mapping->i_pages,
+                       rreq->start, rreq->len);
+
        /* We hold the folio locks, so we can drop the references */
        folio_get(folio);
        while (readahead_folio(&ractl))
@@ -413,6 +488,7 @@ retry:
        ret = netfs_begin_read(rreq, true);
        if (ret < 0)
                goto error;
+       netfs_put_request(rreq, false, netfs_rreq_trace_put_return);
 
 have_folio:
        ret = folio_wait_fscache_killable(folio);
@@ -434,3 +510,124 @@ error:
        return ret;
 }
 EXPORT_SYMBOL(netfs_write_begin);
+
+/*
+ * Preload the data into a page we're proposing to write into.
+ */
+int netfs_prefetch_for_write(struct file *file, struct folio *folio,
+                            size_t offset, size_t len)
+{
+       struct netfs_io_request *rreq;
+       struct address_space *mapping = folio->mapping;
+       struct netfs_inode *ctx = netfs_inode(mapping->host);
+       unsigned long long start = folio_pos(folio);
+       size_t flen = folio_size(folio);
+       int ret;
+
+       _enter("%zx @%llx", flen, start);
+
+       ret = -ENOMEM;
+
+       rreq = netfs_alloc_request(mapping, file, start, flen,
+                                  NETFS_READ_FOR_WRITE);
+       if (IS_ERR(rreq)) {
+               ret = PTR_ERR(rreq);
+               goto error;
+       }
+
+       rreq->no_unlock_folio = folio->index;
+       __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags);
+       ret = netfs_begin_cache_read(rreq, ctx);
+       if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
+               goto error_put;
+
+       netfs_stat(&netfs_n_rh_write_begin);
+       trace_netfs_read(rreq, start, flen, netfs_read_trace_prefetch_for_write);
+
+       /* Set up the output buffer */
+       iov_iter_xarray(&rreq->iter, ITER_DEST, &mapping->i_pages,
+                       rreq->start, rreq->len);
+
+       ret = netfs_begin_read(rreq, true);
+       netfs_put_request(rreq, false, netfs_rreq_trace_put_return);
+       return ret;
+
+error_put:
+       netfs_put_request(rreq, false, netfs_rreq_trace_put_discard);
+error:
+       _leave(" = %d", ret);
+       return ret;
+}
+
+/**
+ * netfs_buffered_read_iter - Filesystem buffered I/O read routine
+ * @iocb: kernel I/O control block
+ * @iter: destination for the data read
+ *
+ * This is the ->read_iter() routine for all filesystems that can use the page
+ * cache directly.
+ *
+ * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall be
+ * returned when no data can be read without waiting for I/O requests to
+ * complete; it doesn't prevent readahead.
+ *
+ * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O requests
+ * shall be made for the read or for readahead.  When no data can be read,
+ * -EAGAIN shall be returned.  When readahead would be triggered, a partial,
+ * possibly empty read shall be returned.
+ *
+ * Return:
+ * * number of bytes copied, even for partial reads
+ * * negative error code (or 0 if IOCB_NOIO) if nothing was read
+ */
+ssize_t netfs_buffered_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+       struct inode *inode = file_inode(iocb->ki_filp);
+       struct netfs_inode *ictx = netfs_inode(inode);
+       ssize_t ret;
+
+       if (WARN_ON_ONCE((iocb->ki_flags & IOCB_DIRECT) ||
+                        test_bit(NETFS_ICTX_UNBUFFERED, &ictx->flags)))
+               return -EINVAL;
+
+       ret = netfs_start_io_read(inode);
+       if (ret == 0) {
+               ret = filemap_read(iocb, iter, 0);
+               netfs_end_io_read(inode);
+       }
+       return ret;
+}
+EXPORT_SYMBOL(netfs_buffered_read_iter);
+
+/**
+ * netfs_file_read_iter - Generic filesystem read routine
+ * @iocb: kernel I/O control block
+ * @iter: destination for the data read
+ *
+ * This is the ->read_iter() routine for all filesystems that can use the page
+ * cache directly.
+ *
+ * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall be
+ * returned when no data can be read without waiting for I/O requests to
+ * complete; it doesn't prevent readahead.
+ *
+ * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O requests
+ * shall be made for the read or for readahead.  When no data can be read,
+ * -EAGAIN shall be returned.  When readahead would be triggered, a partial,
+ * possibly empty read shall be returned.
+ *
+ * Return:
+ * * number of bytes copied, even for partial reads
+ * * negative error code (or 0 if IOCB_NOIO) if nothing was read
+ */
+ssize_t netfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+       struct netfs_inode *ictx = netfs_inode(iocb->ki_filp->f_mapping->host);
+
+       if ((iocb->ki_flags & IOCB_DIRECT) ||
+           test_bit(NETFS_ICTX_UNBUFFERED, &ictx->flags))
+               return netfs_unbuffered_read_iter(iocb, iter);
+
+       return netfs_buffered_read_iter(iocb, iter);
+}
+EXPORT_SYMBOL(netfs_file_read_iter);
diff --git a/fs/netfs/buffered_write.c b/fs/netfs/buffered_write.c
new file mode 100644 (file)
index 0000000..a3059b3
--- /dev/null
@@ -0,0 +1,1254 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Network filesystem high-level write support.
+ *
+ * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include <linux/pagevec.h>
+#include "internal.h"
+
+/*
+ * Determined write method.  Adjust netfs_folio_traces if this is changed.
+ */
+enum netfs_how_to_modify {
+       NETFS_FOLIO_IS_UPTODATE,        /* Folio is uptodate already */
+       NETFS_JUST_PREFETCH,            /* We have to read the folio anyway */
+       NETFS_WHOLE_FOLIO_MODIFY,       /* We're going to overwrite the whole folio */
+       NETFS_MODIFY_AND_CLEAR,         /* We can assume there is no data to be downloaded. */
+       NETFS_STREAMING_WRITE,          /* Store incomplete data in non-uptodate page. */
+       NETFS_STREAMING_WRITE_CONT,     /* Continue streaming write. */
+       NETFS_FLUSH_CONTENT,            /* Flush incompatible content. */
+};
+
+static void netfs_cleanup_buffered_write(struct netfs_io_request *wreq);
+
+static void netfs_set_group(struct folio *folio, struct netfs_group *netfs_group)
+{
+       if (netfs_group && !folio_get_private(folio))
+               folio_attach_private(folio, netfs_get_group(netfs_group));
+}
+
+#if IS_ENABLED(CONFIG_FSCACHE)
+static void netfs_folio_start_fscache(bool caching, struct folio *folio)
+{
+       if (caching)
+               folio_start_fscache(folio);
+}
+#else
+static void netfs_folio_start_fscache(bool caching, struct folio *folio)
+{
+}
+#endif
+
+/*
+ * Decide how we should modify a folio.  We might be attempting to do
+ * write-streaming, in which case we don't want to a local RMW cycle if we can
+ * avoid it.  If we're doing local caching or content crypto, we award that
+ * priority over avoiding RMW.  If the file is open readably, then we also
+ * assume that we may want to read what we wrote.
+ */
+static enum netfs_how_to_modify netfs_how_to_modify(struct netfs_inode *ctx,
+                                                   struct file *file,
+                                                   struct folio *folio,
+                                                   void *netfs_group,
+                                                   size_t flen,
+                                                   size_t offset,
+                                                   size_t len,
+                                                   bool maybe_trouble)
+{
+       struct netfs_folio *finfo = netfs_folio_info(folio);
+       loff_t pos = folio_file_pos(folio);
+
+       _enter("");
+
+       if (netfs_folio_group(folio) != netfs_group)
+               return NETFS_FLUSH_CONTENT;
+
+       if (folio_test_uptodate(folio))
+               return NETFS_FOLIO_IS_UPTODATE;
+
+       if (pos >= ctx->zero_point)
+               return NETFS_MODIFY_AND_CLEAR;
+
+       if (!maybe_trouble && offset == 0 && len >= flen)
+               return NETFS_WHOLE_FOLIO_MODIFY;
+
+       if (file->f_mode & FMODE_READ)
+               goto no_write_streaming;
+       if (test_bit(NETFS_ICTX_NO_WRITE_STREAMING, &ctx->flags))
+               goto no_write_streaming;
+
+       if (netfs_is_cache_enabled(ctx)) {
+               /* We don't want to get a streaming write on a file that loses
+                * caching service temporarily because the backing store got
+                * culled.
+                */
+               if (!test_bit(NETFS_ICTX_NO_WRITE_STREAMING, &ctx->flags))
+                       set_bit(NETFS_ICTX_NO_WRITE_STREAMING, &ctx->flags);
+               goto no_write_streaming;
+       }
+
+       if (!finfo)
+               return NETFS_STREAMING_WRITE;
+
+       /* We can continue a streaming write only if it continues on from the
+        * previous.  If it overlaps, we must flush lest we suffer a partial
+        * copy and disjoint dirty regions.
+        */
+       if (offset == finfo->dirty_offset + finfo->dirty_len)
+               return NETFS_STREAMING_WRITE_CONT;
+       return NETFS_FLUSH_CONTENT;
+
+no_write_streaming:
+       if (finfo) {
+               netfs_stat(&netfs_n_wh_wstream_conflict);
+               return NETFS_FLUSH_CONTENT;
+       }
+       return NETFS_JUST_PREFETCH;
+}
+
+/*
+ * Grab a folio for writing and lock it.  Attempt to allocate as large a folio
+ * as possible to hold as much of the remaining length as possible in one go.
+ */
+static struct folio *netfs_grab_folio_for_write(struct address_space *mapping,
+                                               loff_t pos, size_t part)
+{
+       pgoff_t index = pos / PAGE_SIZE;
+       fgf_t fgp_flags = FGP_WRITEBEGIN;
+
+       if (mapping_large_folio_support(mapping))
+               fgp_flags |= fgf_set_order(pos % PAGE_SIZE + part);
+
+       return __filemap_get_folio(mapping, index, fgp_flags,
+                                  mapping_gfp_mask(mapping));
+}
+
+/**
+ * netfs_perform_write - Copy data into the pagecache.
+ * @iocb: The operation parameters
+ * @iter: The source buffer
+ * @netfs_group: Grouping for dirty pages (eg. ceph snaps).
+ *
+ * Copy data into pagecache pages attached to the inode specified by @iocb.
+ * The caller must hold appropriate inode locks.
+ *
+ * Dirty pages are tagged with a netfs_folio struct if they're not up to date
+ * to indicate the range modified.  Dirty pages may also be tagged with a
+ * netfs-specific grouping such that data from an old group gets flushed before
+ * a new one is started.
+ */
+ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
+                           struct netfs_group *netfs_group)
+{
+       struct file *file = iocb->ki_filp;
+       struct inode *inode = file_inode(file);
+       struct address_space *mapping = inode->i_mapping;
+       struct netfs_inode *ctx = netfs_inode(inode);
+       struct writeback_control wbc = {
+               .sync_mode      = WB_SYNC_NONE,
+               .for_sync       = true,
+               .nr_to_write    = LONG_MAX,
+               .range_start    = iocb->ki_pos,
+               .range_end      = iocb->ki_pos + iter->count,
+       };
+       struct netfs_io_request *wreq = NULL;
+       struct netfs_folio *finfo;
+       struct folio *folio;
+       enum netfs_how_to_modify howto;
+       enum netfs_folio_trace trace;
+       unsigned int bdp_flags = (iocb->ki_flags & IOCB_SYNC) ? 0: BDP_ASYNC;
+       ssize_t written = 0, ret;
+       loff_t i_size, pos = iocb->ki_pos, from, to;
+       size_t max_chunk = PAGE_SIZE << MAX_PAGECACHE_ORDER;
+       bool maybe_trouble = false;
+
+       if (unlikely(test_bit(NETFS_ICTX_WRITETHROUGH, &ctx->flags) ||
+                    iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC))
+           ) {
+               if (pos < i_size_read(inode)) {
+                       ret = filemap_write_and_wait_range(mapping, pos, pos + iter->count);
+                       if (ret < 0) {
+                               goto out;
+                       }
+               }
+
+               wbc_attach_fdatawrite_inode(&wbc, mapping->host);
+
+               wreq = netfs_begin_writethrough(iocb, iter->count);
+               if (IS_ERR(wreq)) {
+                       wbc_detach_inode(&wbc);
+                       ret = PTR_ERR(wreq);
+                       wreq = NULL;
+                       goto out;
+               }
+               if (!is_sync_kiocb(iocb))
+                       wreq->iocb = iocb;
+               wreq->cleanup = netfs_cleanup_buffered_write;
+       }
+
+       do {
+               size_t flen;
+               size_t offset;  /* Offset into pagecache folio */
+               size_t part;    /* Bytes to write to folio */
+               size_t copied;  /* Bytes copied from user */
+
+               ret = balance_dirty_pages_ratelimited_flags(mapping, bdp_flags);
+               if (unlikely(ret < 0))
+                       break;
+
+               offset = pos & (max_chunk - 1);
+               part = min(max_chunk - offset, iov_iter_count(iter));
+
+               /* Bring in the user pages that we will copy from _first_ lest
+                * we hit a nasty deadlock on copying from the same page as
+                * we're writing to, without it being marked uptodate.
+                *
+                * Not only is this an optimisation, but it is also required to
+                * check that the address is actually valid, when atomic
+                * usercopies are used below.
+                *
+                * We rely on the page being held onto long enough by the LRU
+                * that we can grab it below if this causes it to be read.
+                */
+               ret = -EFAULT;
+               if (unlikely(fault_in_iov_iter_readable(iter, part) == part))
+                       break;
+
+               folio = netfs_grab_folio_for_write(mapping, pos, part);
+               if (IS_ERR(folio)) {
+                       ret = PTR_ERR(folio);
+                       break;
+               }
+
+               flen = folio_size(folio);
+               offset = pos & (flen - 1);
+               part = min_t(size_t, flen - offset, part);
+
+               if (signal_pending(current)) {
+                       ret = written ? -EINTR : -ERESTARTSYS;
+                       goto error_folio_unlock;
+               }
+
+               /* See if we need to prefetch the area we're going to modify.
+                * We need to do this before we get a lock on the folio in case
+                * there's more than one writer competing for the same cache
+                * block.
+                */
+               howto = netfs_how_to_modify(ctx, file, folio, netfs_group,
+                                           flen, offset, part, maybe_trouble);
+               _debug("howto %u", howto);
+               switch (howto) {
+               case NETFS_JUST_PREFETCH:
+                       ret = netfs_prefetch_for_write(file, folio, offset, part);
+                       if (ret < 0) {
+                               _debug("prefetch = %zd", ret);
+                               goto error_folio_unlock;
+                       }
+                       break;
+               case NETFS_FOLIO_IS_UPTODATE:
+               case NETFS_WHOLE_FOLIO_MODIFY:
+               case NETFS_STREAMING_WRITE_CONT:
+                       break;
+               case NETFS_MODIFY_AND_CLEAR:
+                       zero_user_segment(&folio->page, 0, offset);
+                       break;
+               case NETFS_STREAMING_WRITE:
+                       ret = -EIO;
+                       if (WARN_ON(folio_get_private(folio)))
+                               goto error_folio_unlock;
+                       break;
+               case NETFS_FLUSH_CONTENT:
+                       trace_netfs_folio(folio, netfs_flush_content);
+                       from = folio_pos(folio);
+                       to = from + folio_size(folio) - 1;
+                       folio_unlock(folio);
+                       folio_put(folio);
+                       ret = filemap_write_and_wait_range(mapping, from, to);
+                       if (ret < 0)
+                               goto error_folio_unlock;
+                       continue;
+               }
+
+               if (mapping_writably_mapped(mapping))
+                       flush_dcache_folio(folio);
+
+               copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
+
+               flush_dcache_folio(folio);
+
+               /* Deal with a (partially) failed copy */
+               if (copied == 0) {
+                       ret = -EFAULT;
+                       goto error_folio_unlock;
+               }
+
+               trace = (enum netfs_folio_trace)howto;
+               switch (howto) {
+               case NETFS_FOLIO_IS_UPTODATE:
+               case NETFS_JUST_PREFETCH:
+                       netfs_set_group(folio, netfs_group);
+                       break;
+               case NETFS_MODIFY_AND_CLEAR:
+                       zero_user_segment(&folio->page, offset + copied, flen);
+                       netfs_set_group(folio, netfs_group);
+                       folio_mark_uptodate(folio);
+                       break;
+               case NETFS_WHOLE_FOLIO_MODIFY:
+                       if (unlikely(copied < part)) {
+                               maybe_trouble = true;
+                               iov_iter_revert(iter, copied);
+                               copied = 0;
+                               goto retry;
+                       }
+                       netfs_set_group(folio, netfs_group);
+                       folio_mark_uptodate(folio);
+                       break;
+               case NETFS_STREAMING_WRITE:
+                       if (offset == 0 && copied == flen) {
+                               netfs_set_group(folio, netfs_group);
+                               folio_mark_uptodate(folio);
+                               trace = netfs_streaming_filled_page;
+                               break;
+                       }
+                       finfo = kzalloc(sizeof(*finfo), GFP_KERNEL);
+                       if (!finfo) {
+                               iov_iter_revert(iter, copied);
+                               ret = -ENOMEM;
+                               goto error_folio_unlock;
+                       }
+                       finfo->netfs_group = netfs_get_group(netfs_group);
+                       finfo->dirty_offset = offset;
+                       finfo->dirty_len = copied;
+                       folio_attach_private(folio, (void *)((unsigned long)finfo |
+                                                            NETFS_FOLIO_INFO));
+                       break;
+               case NETFS_STREAMING_WRITE_CONT:
+                       finfo = netfs_folio_info(folio);
+                       finfo->dirty_len += copied;
+                       if (finfo->dirty_offset == 0 && finfo->dirty_len == flen) {
+                               if (finfo->netfs_group)
+                                       folio_change_private(folio, finfo->netfs_group);
+                               else
+                                       folio_detach_private(folio);
+                               folio_mark_uptodate(folio);
+                               kfree(finfo);
+                               trace = netfs_streaming_cont_filled_page;
+                       }
+                       break;
+               default:
+                       WARN(true, "Unexpected modify type %u ix=%lx\n",
+                            howto, folio->index);
+                       ret = -EIO;
+                       goto error_folio_unlock;
+               }
+
+               trace_netfs_folio(folio, trace);
+
+               /* Update the inode size if we moved the EOF marker */
+               i_size = i_size_read(inode);
+               pos += copied;
+               if (pos > i_size) {
+                       if (ctx->ops->update_i_size) {
+                               ctx->ops->update_i_size(inode, pos);
+                       } else {
+                               i_size_write(inode, pos);
+#if IS_ENABLED(CONFIG_FSCACHE)
+                               fscache_update_cookie(ctx->cache, NULL, &pos);
+#endif
+                       }
+               }
+               written += copied;
+
+               if (likely(!wreq)) {
+                       folio_mark_dirty(folio);
+               } else {
+                       if (folio_test_dirty(folio))
+                               /* Sigh.  mmap. */
+                               folio_clear_dirty_for_io(folio);
+                       /* We make multiple writes to the folio... */
+                       if (!folio_test_writeback(folio)) {
+                               folio_wait_fscache(folio);
+                               folio_start_writeback(folio);
+                               folio_start_fscache(folio);
+                               if (wreq->iter.count == 0)
+                                       trace_netfs_folio(folio, netfs_folio_trace_wthru);
+                               else
+                                       trace_netfs_folio(folio, netfs_folio_trace_wthru_plus);
+                       }
+                       netfs_advance_writethrough(wreq, copied,
+                                                  offset + copied == flen);
+               }
+       retry:
+               folio_unlock(folio);
+               folio_put(folio);
+               folio = NULL;
+
+               cond_resched();
+       } while (iov_iter_count(iter));
+
+out:
+       if (unlikely(wreq)) {
+               ret = netfs_end_writethrough(wreq, iocb);
+               wbc_detach_inode(&wbc);
+               if (ret == -EIOCBQUEUED)
+                       return ret;
+       }
+
+       iocb->ki_pos += written;
+       _leave(" = %zd [%zd]", written, ret);
+       return written ? written : ret;
+
+error_folio_unlock:
+       folio_unlock(folio);
+       folio_put(folio);
+       goto out;
+}
+EXPORT_SYMBOL(netfs_perform_write);
+
+/**
+ * netfs_buffered_write_iter_locked - write data to a file
+ * @iocb:      IO state structure (file, offset, etc.)
+ * @from:      iov_iter with data to write
+ * @netfs_group: Grouping for dirty pages (eg. ceph snaps).
+ *
+ * This function does all the work needed for actually writing data to a
+ * file. It does all basic checks, removes SUID from the file, updates
+ * modification times and calls proper subroutines depending on whether we
+ * do direct IO or a standard buffered write.
+ *
+ * The caller must hold appropriate locks around this function and have called
+ * generic_write_checks() already.  The caller is also responsible for doing
+ * any necessary syncing afterwards.
+ *
+ * This function does *not* take care of syncing data in case of O_SYNC write.
+ * A caller has to handle it. This is mainly due to the fact that we want to
+ * avoid syncing under i_rwsem.
+ *
+ * Return:
+ * * number of bytes written, even for truncated writes
+ * * negative error code if no data has been written at all
+ */
+ssize_t netfs_buffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *from,
+                                        struct netfs_group *netfs_group)
+{
+       struct file *file = iocb->ki_filp;
+       ssize_t ret;
+
+       trace_netfs_write_iter(iocb, from);
+
+       ret = file_remove_privs(file);
+       if (ret)
+               return ret;
+
+       ret = file_update_time(file);
+       if (ret)
+               return ret;
+
+       return netfs_perform_write(iocb, from, netfs_group);
+}
+EXPORT_SYMBOL(netfs_buffered_write_iter_locked);
+
+/**
+ * netfs_file_write_iter - write data to a file
+ * @iocb: IO state structure
+ * @from: iov_iter with data to write
+ *
+ * Perform a write to a file, writing into the pagecache if possible and doing
+ * an unbuffered write instead if not.
+ *
+ * Return:
+ * * Negative error code if no data has been written at all of
+ *   vfs_fsync_range() failed for a synchronous write
+ * * Number of bytes written, even for truncated writes
+ */
+ssize_t netfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
+       struct file *file = iocb->ki_filp;
+       struct inode *inode = file->f_mapping->host;
+       struct netfs_inode *ictx = netfs_inode(inode);
+       ssize_t ret;
+
+       _enter("%llx,%zx,%llx", iocb->ki_pos, iov_iter_count(from), i_size_read(inode));
+
+       if ((iocb->ki_flags & IOCB_DIRECT) ||
+           test_bit(NETFS_ICTX_UNBUFFERED, &ictx->flags))
+               return netfs_unbuffered_write_iter(iocb, from);
+
+       ret = netfs_start_io_write(inode);
+       if (ret < 0)
+               return ret;
+
+       ret = generic_write_checks(iocb, from);
+       if (ret > 0)
+               ret = netfs_buffered_write_iter_locked(iocb, from, NULL);
+       netfs_end_io_write(inode);
+       if (ret > 0)
+               ret = generic_write_sync(iocb, ret);
+       return ret;
+}
+EXPORT_SYMBOL(netfs_file_write_iter);
+
+/*
+ * Notification that a previously read-only page is about to become writable.
+ * Note that the caller indicates a single page of a multipage folio.
+ */
+vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group)
+{
+       struct folio *folio = page_folio(vmf->page);
+       struct file *file = vmf->vma->vm_file;
+       struct inode *inode = file_inode(file);
+       vm_fault_t ret = VM_FAULT_RETRY;
+       int err;
+
+       _enter("%lx", folio->index);
+
+       sb_start_pagefault(inode->i_sb);
+
+       if (folio_wait_writeback_killable(folio))
+               goto out;
+
+       if (folio_lock_killable(folio) < 0)
+               goto out;
+
+       /* Can we see a streaming write here? */
+       if (WARN_ON(!folio_test_uptodate(folio))) {
+               ret = VM_FAULT_SIGBUS | VM_FAULT_LOCKED;
+               goto out;
+       }
+
+       if (netfs_folio_group(folio) != netfs_group) {
+               folio_unlock(folio);
+               err = filemap_fdatawait_range(inode->i_mapping,
+                                             folio_pos(folio),
+                                             folio_pos(folio) + folio_size(folio));
+               switch (err) {
+               case 0:
+                       ret = VM_FAULT_RETRY;
+                       goto out;
+               case -ENOMEM:
+                       ret = VM_FAULT_OOM;
+                       goto out;
+               default:
+                       ret = VM_FAULT_SIGBUS;
+                       goto out;
+               }
+       }
+
+       if (folio_test_dirty(folio))
+               trace_netfs_folio(folio, netfs_folio_trace_mkwrite_plus);
+       else
+               trace_netfs_folio(folio, netfs_folio_trace_mkwrite);
+       netfs_set_group(folio, netfs_group);
+       file_update_time(file);
+       ret = VM_FAULT_LOCKED;
+out:
+       sb_end_pagefault(inode->i_sb);
+       return ret;
+}
+EXPORT_SYMBOL(netfs_page_mkwrite);
+
+/*
+ * Kill all the pages in the given range
+ */
+static void netfs_kill_pages(struct address_space *mapping,
+                            loff_t start, loff_t len)
+{
+       struct folio *folio;
+       pgoff_t index = start / PAGE_SIZE;
+       pgoff_t last = (start + len - 1) / PAGE_SIZE, next;
+
+       _enter("%llx-%llx", start, start + len - 1);
+
+       do {
+               _debug("kill %lx (to %lx)", index, last);
+
+               folio = filemap_get_folio(mapping, index);
+               if (IS_ERR(folio)) {
+                       next = index + 1;
+                       continue;
+               }
+
+               next = folio_next_index(folio);
+
+               trace_netfs_folio(folio, netfs_folio_trace_kill);
+               folio_clear_uptodate(folio);
+               if (folio_test_fscache(folio))
+                       folio_end_fscache(folio);
+               folio_end_writeback(folio);
+               folio_lock(folio);
+               generic_error_remove_folio(mapping, folio);
+               folio_unlock(folio);
+               folio_put(folio);
+
+       } while (index = next, index <= last);
+
+       _leave("");
+}
+
+/*
+ * Redirty all the pages in a given range.
+ */
+static void netfs_redirty_pages(struct address_space *mapping,
+                               loff_t start, loff_t len)
+{
+       struct folio *folio;
+       pgoff_t index = start / PAGE_SIZE;
+       pgoff_t last = (start + len - 1) / PAGE_SIZE, next;
+
+       _enter("%llx-%llx", start, start + len - 1);
+
+       do {
+               _debug("redirty %llx @%llx", len, start);
+
+               folio = filemap_get_folio(mapping, index);
+               if (IS_ERR(folio)) {
+                       next = index + 1;
+                       continue;
+               }
+
+               next = folio_next_index(folio);
+               trace_netfs_folio(folio, netfs_folio_trace_redirty);
+               filemap_dirty_folio(mapping, folio);
+               if (folio_test_fscache(folio))
+                       folio_end_fscache(folio);
+               folio_end_writeback(folio);
+               folio_put(folio);
+       } while (index = next, index <= last);
+
+       balance_dirty_pages_ratelimited(mapping);
+
+       _leave("");
+}
+
+/*
+ * Completion of write to server
+ */
+static void netfs_pages_written_back(struct netfs_io_request *wreq)
+{
+       struct address_space *mapping = wreq->mapping;
+       struct netfs_folio *finfo;
+       struct netfs_group *group = NULL;
+       struct folio *folio;
+       pgoff_t last;
+       int gcount = 0;
+
+       XA_STATE(xas, &mapping->i_pages, wreq->start / PAGE_SIZE);
+
+       _enter("%llx-%llx", wreq->start, wreq->start + wreq->len);
+
+       rcu_read_lock();
+
+       last = (wreq->start + wreq->len - 1) / PAGE_SIZE;
+       xas_for_each(&xas, folio, last) {
+               WARN(!folio_test_writeback(folio),
+                    "bad %zx @%llx page %lx %lx\n",
+                    wreq->len, wreq->start, folio->index, last);
+
+               if ((finfo = netfs_folio_info(folio))) {
+                       /* Streaming writes cannot be redirtied whilst under
+                        * writeback, so discard the streaming record.
+                        */
+                       folio_detach_private(folio);
+                       group = finfo->netfs_group;
+                       gcount++;
+                       trace_netfs_folio(folio, netfs_folio_trace_clear_s);
+                       kfree(finfo);
+               } else if ((group = netfs_folio_group(folio))) {
+                       /* Need to detach the group pointer if the page didn't
+                        * get redirtied.  If it has been redirtied, then it
+                        * must be within the same group.
+                        */
+                       if (folio_test_dirty(folio)) {
+                               trace_netfs_folio(folio, netfs_folio_trace_redirtied);
+                               goto end_wb;
+                       }
+                       if (folio_trylock(folio)) {
+                               if (!folio_test_dirty(folio)) {
+                                       folio_detach_private(folio);
+                                       gcount++;
+                                       trace_netfs_folio(folio, netfs_folio_trace_clear_g);
+                               } else {
+                                       trace_netfs_folio(folio, netfs_folio_trace_redirtied);
+                               }
+                               folio_unlock(folio);
+                               goto end_wb;
+                       }
+
+                       xas_pause(&xas);
+                       rcu_read_unlock();
+                       folio_lock(folio);
+                       if (!folio_test_dirty(folio)) {
+                               folio_detach_private(folio);
+                               gcount++;
+                               trace_netfs_folio(folio, netfs_folio_trace_clear_g);
+                       } else {
+                               trace_netfs_folio(folio, netfs_folio_trace_redirtied);
+                       }
+                       folio_unlock(folio);
+                       rcu_read_lock();
+               } else {
+                       trace_netfs_folio(folio, netfs_folio_trace_clear);
+               }
+       end_wb:
+               if (folio_test_fscache(folio))
+                       folio_end_fscache(folio);
+               xas_advance(&xas, folio_next_index(folio) - 1);
+               folio_end_writeback(folio);
+       }
+
+       rcu_read_unlock();
+       netfs_put_group_many(group, gcount);
+       _leave("");
+}
+
+/*
+ * Deal with the disposition of the folios that are under writeback to close
+ * out the operation.
+ */
+static void netfs_cleanup_buffered_write(struct netfs_io_request *wreq)
+{
+       struct address_space *mapping = wreq->mapping;
+
+       _enter("");
+
+       switch (wreq->error) {
+       case 0:
+               netfs_pages_written_back(wreq);
+               break;
+
+       default:
+               pr_notice("R=%08x Unexpected error %d\n", wreq->debug_id, wreq->error);
+               fallthrough;
+       case -EACCES:
+       case -EPERM:
+       case -ENOKEY:
+       case -EKEYEXPIRED:
+       case -EKEYREJECTED:
+       case -EKEYREVOKED:
+       case -ENETRESET:
+       case -EDQUOT:
+       case -ENOSPC:
+               netfs_redirty_pages(mapping, wreq->start, wreq->len);
+               break;
+
+       case -EROFS:
+       case -EIO:
+       case -EREMOTEIO:
+       case -EFBIG:
+       case -ENOENT:
+       case -ENOMEDIUM:
+       case -ENXIO:
+               netfs_kill_pages(mapping, wreq->start, wreq->len);
+               break;
+       }
+
+       if (wreq->error)
+               mapping_set_error(mapping, wreq->error);
+       if (wreq->netfs_ops->done)
+               wreq->netfs_ops->done(wreq);
+}
+
+/*
+ * Extend the region to be written back to include subsequent contiguously
+ * dirty pages if possible, but don't sleep while doing so.
+ *
+ * If this page holds new content, then we can include filler zeros in the
+ * writeback.
+ */
+static void netfs_extend_writeback(struct address_space *mapping,
+                                  struct netfs_group *group,
+                                  struct xa_state *xas,
+                                  long *_count,
+                                  loff_t start,
+                                  loff_t max_len,
+                                  bool caching,
+                                  size_t *_len,
+                                  size_t *_top)
+{
+       struct netfs_folio *finfo;
+       struct folio_batch fbatch;
+       struct folio *folio;
+       unsigned int i;
+       pgoff_t index = (start + *_len) / PAGE_SIZE;
+       size_t len;
+       void *priv;
+       bool stop = true;
+
+       folio_batch_init(&fbatch);
+
+       do {
+               /* Firstly, we gather up a batch of contiguous dirty pages
+                * under the RCU read lock - but we can't clear the dirty flags
+                * there if any of those pages are mapped.
+                */
+               rcu_read_lock();
+
+               xas_for_each(xas, folio, ULONG_MAX) {
+                       stop = true;
+                       if (xas_retry(xas, folio))
+                               continue;
+                       if (xa_is_value(folio))
+                               break;
+                       if (folio->index != index) {
+                               xas_reset(xas);
+                               break;
+                       }
+
+                       if (!folio_try_get_rcu(folio)) {
+                               xas_reset(xas);
+                               continue;
+                       }
+
+                       /* Has the folio moved or been split? */
+                       if (unlikely(folio != xas_reload(xas))) {
+                               folio_put(folio);
+                               xas_reset(xas);
+                               break;
+                       }
+
+                       if (!folio_trylock(folio)) {
+                               folio_put(folio);
+                               xas_reset(xas);
+                               break;
+                       }
+                       if (!folio_test_dirty(folio) ||
+                           folio_test_writeback(folio) ||
+                           folio_test_fscache(folio)) {
+                               folio_unlock(folio);
+                               folio_put(folio);
+                               xas_reset(xas);
+                               break;
+                       }
+
+                       stop = false;
+                       len = folio_size(folio);
+                       priv = folio_get_private(folio);
+                       if ((const struct netfs_group *)priv != group) {
+                               stop = true;
+                               finfo = netfs_folio_info(folio);
+                               if (finfo->netfs_group != group ||
+                                   finfo->dirty_offset > 0) {
+                                       folio_unlock(folio);
+                                       folio_put(folio);
+                                       xas_reset(xas);
+                                       break;
+                               }
+                               len = finfo->dirty_len;
+                       }
+
+                       *_top += folio_size(folio);
+                       index += folio_nr_pages(folio);
+                       *_count -= folio_nr_pages(folio);
+                       *_len += len;
+                       if (*_len >= max_len || *_count <= 0)
+                               stop = true;
+
+                       if (!folio_batch_add(&fbatch, folio))
+                               break;
+                       if (stop)
+                               break;
+               }
+
+               xas_pause(xas);
+               rcu_read_unlock();
+
+               /* Now, if we obtained any folios, we can shift them to being
+                * writable and mark them for caching.
+                */
+               if (!folio_batch_count(&fbatch))
+                       break;
+
+               for (i = 0; i < folio_batch_count(&fbatch); i++) {
+                       folio = fbatch.folios[i];
+                       trace_netfs_folio(folio, netfs_folio_trace_store_plus);
+
+                       if (!folio_clear_dirty_for_io(folio))
+                               BUG();
+                       folio_start_writeback(folio);
+                       netfs_folio_start_fscache(caching, folio);
+                       folio_unlock(folio);
+               }
+
+               folio_batch_release(&fbatch);
+               cond_resched();
+       } while (!stop);
+}
+
+/*
+ * Synchronously write back the locked page and any subsequent non-locked dirty
+ * pages.
+ */
+static ssize_t netfs_write_back_from_locked_folio(struct address_space *mapping,
+                                                 struct writeback_control *wbc,
+                                                 struct netfs_group *group,
+                                                 struct xa_state *xas,
+                                                 struct folio *folio,
+                                                 unsigned long long start,
+                                                 unsigned long long end)
+{
+       struct netfs_io_request *wreq;
+       struct netfs_folio *finfo;
+       struct netfs_inode *ctx = netfs_inode(mapping->host);
+       unsigned long long i_size = i_size_read(&ctx->inode);
+       size_t len, max_len;
+       bool caching = netfs_is_cache_enabled(ctx);
+       long count = wbc->nr_to_write;
+       int ret;
+
+       _enter(",%lx,%llx-%llx,%u", folio->index, start, end, caching);
+
+       wreq = netfs_alloc_request(mapping, NULL, start, folio_size(folio),
+                                  NETFS_WRITEBACK);
+       if (IS_ERR(wreq)) {
+               folio_unlock(folio);
+               return PTR_ERR(wreq);
+       }
+
+       if (!folio_clear_dirty_for_io(folio))
+               BUG();
+       folio_start_writeback(folio);
+       netfs_folio_start_fscache(caching, folio);
+
+       count -= folio_nr_pages(folio);
+
+       /* Find all consecutive lockable dirty pages that have contiguous
+        * written regions, stopping when we find a page that is not
+        * immediately lockable, is not dirty or is missing, or we reach the
+        * end of the range.
+        */
+       trace_netfs_folio(folio, netfs_folio_trace_store);
+
+       len = wreq->len;
+       finfo = netfs_folio_info(folio);
+       if (finfo) {
+               start += finfo->dirty_offset;
+               if (finfo->dirty_offset + finfo->dirty_len != len) {
+                       len = finfo->dirty_len;
+                       goto cant_expand;
+               }
+               len = finfo->dirty_len;
+       }
+
+       if (start < i_size) {
+               /* Trim the write to the EOF; the extra data is ignored.  Also
+                * put an upper limit on the size of a single storedata op.
+                */
+               max_len = 65536 * 4096;
+               max_len = min_t(unsigned long long, max_len, end - start + 1);
+               max_len = min_t(unsigned long long, max_len, i_size - start);
+
+               if (len < max_len)
+                       netfs_extend_writeback(mapping, group, xas, &count, start,
+                                              max_len, caching, &len, &wreq->upper_len);
+       }
+
+cant_expand:
+       len = min_t(unsigned long long, len, i_size - start);
+
+       /* We now have a contiguous set of dirty pages, each with writeback
+        * set; the first page is still locked at this point, but all the rest
+        * have been unlocked.
+        */
+       folio_unlock(folio);
+       wreq->start = start;
+       wreq->len = len;
+
+       if (start < i_size) {
+               _debug("write back %zx @%llx [%llx]", len, start, i_size);
+
+               /* Speculatively write to the cache.  We have to fix this up
+                * later if the store fails.
+                */
+               wreq->cleanup = netfs_cleanup_buffered_write;
+
+               iov_iter_xarray(&wreq->iter, ITER_SOURCE, &mapping->i_pages, start,
+                               wreq->upper_len);
+               __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
+               ret = netfs_begin_write(wreq, true, netfs_write_trace_writeback);
+               if (ret == 0 || ret == -EIOCBQUEUED)
+                       wbc->nr_to_write -= len / PAGE_SIZE;
+       } else {
+               _debug("write discard %zx @%llx [%llx]", len, start, i_size);
+
+               /* The dirty region was entirely beyond the EOF. */
+               fscache_clear_page_bits(mapping, start, len, caching);
+               netfs_pages_written_back(wreq);
+               ret = 0;
+       }
+
+       netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
+       _leave(" = 1");
+       return 1;
+}
+
+/*
+ * Write a region of pages back to the server
+ */
+static ssize_t netfs_writepages_begin(struct address_space *mapping,
+                                     struct writeback_control *wbc,
+                                     struct netfs_group *group,
+                                     struct xa_state *xas,
+                                     unsigned long long *_start,
+                                     unsigned long long end)
+{
+       const struct netfs_folio *finfo;
+       struct folio *folio;
+       unsigned long long start = *_start;
+       ssize_t ret;
+       void *priv;
+       int skips = 0;
+
+       _enter("%llx,%llx,", start, end);
+
+search_again:
+       /* Find the first dirty page in the group. */
+       rcu_read_lock();
+
+       for (;;) {
+               folio = xas_find_marked(xas, end / PAGE_SIZE, PAGECACHE_TAG_DIRTY);
+               if (xas_retry(xas, folio) || xa_is_value(folio))
+                       continue;
+               if (!folio)
+                       break;
+
+               if (!folio_try_get_rcu(folio)) {
+                       xas_reset(xas);
+                       continue;
+               }
+
+               if (unlikely(folio != xas_reload(xas))) {
+                       folio_put(folio);
+                       xas_reset(xas);
+                       continue;
+               }
+
+               /* Skip any dirty folio that's not in the group of interest. */
+               priv = folio_get_private(folio);
+               if ((const struct netfs_group *)priv != group) {
+                       finfo = netfs_folio_info(folio);
+                       if (finfo->netfs_group != group) {
+                               folio_put(folio);
+                               continue;
+                       }
+               }
+
+               xas_pause(xas);
+               break;
+       }
+       rcu_read_unlock();
+       if (!folio)
+               return 0;
+
+       start = folio_pos(folio); /* May regress with THPs */
+
+       _debug("wback %lx", folio->index);
+
+       /* At this point we hold neither the i_pages lock nor the page lock:
+        * the page may be truncated or invalidated (changing page->mapping to
+        * NULL), or even swizzled back from swapper_space to tmpfs file
+        * mapping
+        */
+lock_again:
+       if (wbc->sync_mode != WB_SYNC_NONE) {
+               ret = folio_lock_killable(folio);
+               if (ret < 0)
+                       return ret;
+       } else {
+               if (!folio_trylock(folio))
+                       goto search_again;
+       }
+
+       if (folio->mapping != mapping ||
+           !folio_test_dirty(folio)) {
+               start += folio_size(folio);
+               folio_unlock(folio);
+               goto search_again;
+       }
+
+       if (folio_test_writeback(folio) ||
+           folio_test_fscache(folio)) {
+               folio_unlock(folio);
+               if (wbc->sync_mode != WB_SYNC_NONE) {
+                       folio_wait_writeback(folio);
+#ifdef CONFIG_FSCACHE
+                       folio_wait_fscache(folio);
+#endif
+                       goto lock_again;
+               }
+
+               start += folio_size(folio);
+               if (wbc->sync_mode == WB_SYNC_NONE) {
+                       if (skips >= 5 || need_resched()) {
+                               ret = 0;
+                               goto out;
+                       }
+                       skips++;
+               }
+               goto search_again;
+       }
+
+       ret = netfs_write_back_from_locked_folio(mapping, wbc, group, xas,
+                                                folio, start, end);
+out:
+       if (ret > 0)
+               *_start = start + ret;
+       _leave(" = %zd [%llx]", ret, *_start);
+       return ret;
+}
+
+/*
+ * Write a region of pages back to the server
+ */
+static int netfs_writepages_region(struct address_space *mapping,
+                                  struct writeback_control *wbc,
+                                  struct netfs_group *group,
+                                  unsigned long long *_start,
+                                  unsigned long long end)
+{
+       ssize_t ret;
+
+       XA_STATE(xas, &mapping->i_pages, *_start / PAGE_SIZE);
+
+       do {
+               ret = netfs_writepages_begin(mapping, wbc, group, &xas,
+                                            _start, end);
+               if (ret > 0 && wbc->nr_to_write > 0)
+                       cond_resched();
+       } while (ret > 0 && wbc->nr_to_write > 0);
+
+       return ret > 0 ? 0 : ret;
+}
+
+/*
+ * write some of the pending data back to the server
+ */
+int netfs_writepages(struct address_space *mapping,
+                    struct writeback_control *wbc)
+{
+       struct netfs_group *group = NULL;
+       loff_t start, end;
+       int ret;
+
+       _enter("");
+
+       /* We have to be careful as we can end up racing with setattr()
+        * truncating the pagecache since the caller doesn't take a lock here
+        * to prevent it.
+        */
+
+       if (wbc->range_cyclic && mapping->writeback_index) {
+               start = mapping->writeback_index * PAGE_SIZE;
+               ret = netfs_writepages_region(mapping, wbc, group,
+                                             &start, LLONG_MAX);
+               if (ret < 0)
+                       goto out;
+
+               if (wbc->nr_to_write <= 0) {
+                       mapping->writeback_index = start / PAGE_SIZE;
+                       goto out;
+               }
+
+               start = 0;
+               end = mapping->writeback_index * PAGE_SIZE;
+               mapping->writeback_index = 0;
+               ret = netfs_writepages_region(mapping, wbc, group, &start, end);
+               if (ret == 0)
+                       mapping->writeback_index = start / PAGE_SIZE;
+       } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
+               start = 0;
+               ret = netfs_writepages_region(mapping, wbc, group,
+                                             &start, LLONG_MAX);
+               if (wbc->nr_to_write > 0 && ret == 0)
+                       mapping->writeback_index = start / PAGE_SIZE;
+       } else {
+               start = wbc->range_start;
+               ret = netfs_writepages_region(mapping, wbc, group,
+                                             &start, wbc->range_end);
+       }
+
+out:
+       _leave(" = %d", ret);
+       return ret;
+}
+EXPORT_SYMBOL(netfs_writepages);
+
+/*
+ * Deal with the disposition of a laundered folio.
+ */
+static void netfs_cleanup_launder_folio(struct netfs_io_request *wreq)
+{
+       if (wreq->error) {
+               pr_notice("R=%08x Laundering error %d\n", wreq->debug_id, wreq->error);
+               mapping_set_error(wreq->mapping, wreq->error);
+       }
+}
+
+/**
+ * netfs_launder_folio - Clean up a dirty folio that's being invalidated
+ * @folio: The folio to clean
+ *
+ * This is called to write back a folio that's being invalidated when an inode
+ * is getting torn down.  Ideally, writepages would be used instead.
+ */
+int netfs_launder_folio(struct folio *folio)
+{
+       struct netfs_io_request *wreq;
+       struct address_space *mapping = folio->mapping;
+       struct netfs_folio *finfo = netfs_folio_info(folio);
+       struct netfs_group *group = netfs_folio_group(folio);
+       struct bio_vec bvec;
+       unsigned long long i_size = i_size_read(mapping->host);
+       unsigned long long start = folio_pos(folio);
+       size_t offset = 0, len;
+       int ret = 0;
+
+       if (finfo) {
+               offset = finfo->dirty_offset;
+               start += offset;
+               len = finfo->dirty_len;
+       } else {
+               len = folio_size(folio);
+       }
+       len = min_t(unsigned long long, len, i_size - start);
+
+       wreq = netfs_alloc_request(mapping, NULL, start, len, NETFS_LAUNDER_WRITE);
+       if (IS_ERR(wreq)) {
+               ret = PTR_ERR(wreq);
+               goto out;
+       }
+
+       if (!folio_clear_dirty_for_io(folio))
+               goto out_put;
+
+       trace_netfs_folio(folio, netfs_folio_trace_launder);
+
+       _debug("launder %llx-%llx", start, start + len - 1);
+
+       /* Speculatively write to the cache.  We have to fix this up later if
+        * the store fails.
+        */
+       wreq->cleanup = netfs_cleanup_launder_folio;
+
+       bvec_set_folio(&bvec, folio, len, offset);
+       iov_iter_bvec(&wreq->iter, ITER_SOURCE, &bvec, 1, len);
+       __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
+       ret = netfs_begin_write(wreq, true, netfs_write_trace_launder);
+
+out_put:
+       folio_detach_private(folio);
+       netfs_put_group(group);
+       kfree(finfo);
+       netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
+out:
+       folio_wait_fscache(folio);
+       _leave(" = %d", ret);
+       return ret;
+}
+EXPORT_SYMBOL(netfs_launder_folio);
diff --git a/fs/netfs/direct_read.c b/fs/netfs/direct_read.c
new file mode 100644 (file)
index 0000000..ad4370b
--- /dev/null
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Direct I/O support.
+ *
+ * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include <linux/uio.h>
+#include <linux/sched/mm.h>
+#include <linux/task_io_accounting_ops.h>
+#include <linux/netfs.h>
+#include "internal.h"
+
+/**
+ * netfs_unbuffered_read_iter_locked - Perform an unbuffered or direct I/O read
+ * @iocb: The I/O control descriptor describing the read
+ * @iter: The output buffer (also specifies read length)
+ *
+ * Perform an unbuffered I/O or direct I/O from the file in @iocb to the
+ * output buffer.  No use is made of the pagecache.
+ *
+ * The caller must hold any appropriate locks.
+ */
+static ssize_t netfs_unbuffered_read_iter_locked(struct kiocb *iocb, struct iov_iter *iter)
+{
+       struct netfs_io_request *rreq;
+       ssize_t ret;
+       size_t orig_count = iov_iter_count(iter);
+       bool async = !is_sync_kiocb(iocb);
+
+       _enter("");
+
+       if (!orig_count)
+               return 0; /* Don't update atime */
+
+       ret = kiocb_write_and_wait(iocb, orig_count);
+       if (ret < 0)
+               return ret;
+       file_accessed(iocb->ki_filp);
+
+       rreq = netfs_alloc_request(iocb->ki_filp->f_mapping, iocb->ki_filp,
+                                  iocb->ki_pos, orig_count,
+                                  NETFS_DIO_READ);
+       if (IS_ERR(rreq))
+               return PTR_ERR(rreq);
+
+       netfs_stat(&netfs_n_rh_dio_read);
+       trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_dio_read);
+
+       /* If this is an async op, we have to keep track of the destination
+        * buffer for ourselves as the caller's iterator will be trashed when
+        * we return.
+        *
+        * In such a case, extract an iterator to represent as much of the the
+        * output buffer as we can manage.  Note that the extraction might not
+        * be able to allocate a sufficiently large bvec array and may shorten
+        * the request.
+        */
+       if (user_backed_iter(iter)) {
+               ret = netfs_extract_user_iter(iter, rreq->len, &rreq->iter, 0);
+               if (ret < 0)
+                       goto out;
+               rreq->direct_bv = (struct bio_vec *)rreq->iter.bvec;
+               rreq->direct_bv_count = ret;
+               rreq->direct_bv_unpin = iov_iter_extract_will_pin(iter);
+               rreq->len = iov_iter_count(&rreq->iter);
+       } else {
+               rreq->iter = *iter;
+               rreq->len = orig_count;
+               rreq->direct_bv_unpin = false;
+               iov_iter_advance(iter, orig_count);
+       }
+
+       // TODO: Set up bounce buffer if needed
+
+       if (async)
+               rreq->iocb = iocb;
+
+       ret = netfs_begin_read(rreq, is_sync_kiocb(iocb));
+       if (ret < 0)
+               goto out; /* May be -EIOCBQUEUED */
+       if (!async) {
+               // TODO: Copy from bounce buffer
+               iocb->ki_pos += rreq->transferred;
+               ret = rreq->transferred;
+       }
+
+out:
+       netfs_put_request(rreq, false, netfs_rreq_trace_put_return);
+       if (ret > 0)
+               orig_count -= ret;
+       if (ret != -EIOCBQUEUED)
+               iov_iter_revert(iter, orig_count - iov_iter_count(iter));
+       return ret;
+}
+
+/**
+ * netfs_unbuffered_read_iter - Perform an unbuffered or direct I/O read
+ * @iocb: The I/O control descriptor describing the read
+ * @iter: The output buffer (also specifies read length)
+ *
+ * Perform an unbuffered I/O or direct I/O from the file in @iocb to the
+ * output buffer.  No use is made of the pagecache.
+ */
+ssize_t netfs_unbuffered_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+       struct inode *inode = file_inode(iocb->ki_filp);
+       ssize_t ret;
+
+       if (!iter->count)
+               return 0; /* Don't update atime */
+
+       ret = netfs_start_io_direct(inode);
+       if (ret == 0) {
+               ret = netfs_unbuffered_read_iter_locked(iocb, iter);
+               netfs_end_io_direct(inode);
+       }
+       return ret;
+}
+EXPORT_SYMBOL(netfs_unbuffered_read_iter);
diff --git a/fs/netfs/direct_write.c b/fs/netfs/direct_write.c
new file mode 100644 (file)
index 0000000..60a40d2
--- /dev/null
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Unbuffered and direct write support.
+ *
+ * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/export.h>
+#include <linux/uio.h>
+#include "internal.h"
+
+static void netfs_cleanup_dio_write(struct netfs_io_request *wreq)
+{
+       struct inode *inode = wreq->inode;
+       unsigned long long end = wreq->start + wreq->len;
+
+       if (!wreq->error &&
+           i_size_read(inode) < end) {
+               if (wreq->netfs_ops->update_i_size)
+                       wreq->netfs_ops->update_i_size(inode, end);
+               else
+                       i_size_write(inode, end);
+       }
+}
+
+/*
+ * Perform an unbuffered write where we may have to do an RMW operation on an
+ * encrypted file.  This can also be used for direct I/O writes.
+ */
+static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *iter,
+                                                 struct netfs_group *netfs_group)
+{
+       struct netfs_io_request *wreq;
+       unsigned long long start = iocb->ki_pos;
+       unsigned long long end = start + iov_iter_count(iter);
+       ssize_t ret, n;
+       bool async = !is_sync_kiocb(iocb);
+
+       _enter("");
+
+       /* We're going to need a bounce buffer if what we transmit is going to
+        * be different in some way to the source buffer, e.g. because it gets
+        * encrypted/compressed or because it needs expanding to a block size.
+        */
+       // TODO
+
+       _debug("uw %llx-%llx", start, end);
+
+       wreq = netfs_alloc_request(iocb->ki_filp->f_mapping, iocb->ki_filp,
+                                  start, end - start,
+                                  iocb->ki_flags & IOCB_DIRECT ?
+                                  NETFS_DIO_WRITE : NETFS_UNBUFFERED_WRITE);
+       if (IS_ERR(wreq))
+               return PTR_ERR(wreq);
+
+       {
+               /* If this is an async op and we're not using a bounce buffer,
+                * we have to save the source buffer as the iterator is only
+                * good until we return.  In such a case, extract an iterator
+                * to represent as much of the the output buffer as we can
+                * manage.  Note that the extraction might not be able to
+                * allocate a sufficiently large bvec array and may shorten the
+                * request.
+                */
+               if (async || user_backed_iter(iter)) {
+                       n = netfs_extract_user_iter(iter, wreq->len, &wreq->iter, 0);
+                       if (n < 0) {
+                               ret = n;
+                               goto out;
+                       }
+                       wreq->direct_bv = (struct bio_vec *)wreq->iter.bvec;
+                       wreq->direct_bv_count = n;
+                       wreq->direct_bv_unpin = iov_iter_extract_will_pin(iter);
+                       wreq->len = iov_iter_count(&wreq->iter);
+               } else {
+                       wreq->iter = *iter;
+               }
+
+               wreq->io_iter = wreq->iter;
+       }
+
+       /* Copy the data into the bounce buffer and encrypt it. */
+       // TODO
+
+       /* Dispatch the write. */
+       __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
+       if (async)
+               wreq->iocb = iocb;
+       wreq->cleanup = netfs_cleanup_dio_write;
+       ret = netfs_begin_write(wreq, is_sync_kiocb(iocb),
+                               iocb->ki_flags & IOCB_DIRECT ?
+                               netfs_write_trace_dio_write :
+                               netfs_write_trace_unbuffered_write);
+       if (ret < 0) {
+               _debug("begin = %zd", ret);
+               goto out;
+       }
+
+       if (!async) {
+               trace_netfs_rreq(wreq, netfs_rreq_trace_wait_ip);
+               wait_on_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS,
+                           TASK_UNINTERRUPTIBLE);
+
+               ret = wreq->error;
+               _debug("waited = %zd", ret);
+               if (ret == 0) {
+                       ret = wreq->transferred;
+                       iocb->ki_pos += ret;
+               }
+       } else {
+               ret = -EIOCBQUEUED;
+       }
+
+out:
+       netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
+       return ret;
+}
+
+/**
+ * netfs_unbuffered_write_iter - Unbuffered write to a file
+ * @iocb: IO state structure
+ * @from: iov_iter with data to write
+ *
+ * Do an unbuffered write to a file, writing the data directly to the server
+ * and not lodging the data in the pagecache.
+ *
+ * Return:
+ * * Negative error code if no data has been written at all of
+ *   vfs_fsync_range() failed for a synchronous write
+ * * Number of bytes written, even for truncated writes
+ */
+ssize_t netfs_unbuffered_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
+       struct file *file = iocb->ki_filp;
+       struct inode *inode = file->f_mapping->host;
+       struct netfs_inode *ictx = netfs_inode(inode);
+       unsigned long long end;
+       ssize_t ret;
+
+       _enter("%llx,%zx,%llx", iocb->ki_pos, iov_iter_count(from), i_size_read(inode));
+
+       trace_netfs_write_iter(iocb, from);
+       netfs_stat(&netfs_n_rh_dio_write);
+
+       ret = netfs_start_io_direct(inode);
+       if (ret < 0)
+               return ret;
+       ret = generic_write_checks(iocb, from);
+       if (ret < 0)
+               goto out;
+       ret = file_remove_privs(file);
+       if (ret < 0)
+               goto out;
+       ret = file_update_time(file);
+       if (ret < 0)
+               goto out;
+       ret = kiocb_invalidate_pages(iocb, iov_iter_count(from));
+       if (ret < 0)
+               goto out;
+       end = iocb->ki_pos + iov_iter_count(from);
+       if (end > ictx->zero_point)
+               ictx->zero_point = end;
+
+       fscache_invalidate(netfs_i_cookie(ictx), NULL, i_size_read(inode),
+                          FSCACHE_INVAL_DIO_WRITE);
+       ret = netfs_unbuffered_write_iter_locked(iocb, from, NULL);
+out:
+       netfs_end_io_direct(inode);
+       return ret;
+}
+EXPORT_SYMBOL(netfs_unbuffered_write_iter);
similarity index 99%
rename from fs/fscache/cache.c
rename to fs/netfs/fscache_cache.c
index d645f8b302a27882c86c3c46e134dd5bcbc35cef..9397ed39b0b4ecbdd9c9b5860887162990c2f66d 100644 (file)
@@ -179,13 +179,14 @@ EXPORT_SYMBOL(fscache_acquire_cache);
 void fscache_put_cache(struct fscache_cache *cache,
                       enum fscache_cache_trace where)
 {
-       unsigned int debug_id = cache->debug_id;
+       unsigned int debug_id;
        bool zero;
        int ref;
 
        if (IS_ERR_OR_NULL(cache))
                return;
 
+       debug_id = cache->debug_id;
        zero = __refcount_dec_and_test(&cache->ref, &ref);
        trace_fscache_cache(debug_id, ref - 1, where);
 
diff --git a/fs/netfs/fscache_internal.h b/fs/netfs/fscache_internal.h
new file mode 100644 (file)
index 0000000..a09b948
--- /dev/null
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Internal definitions for FS-Cache
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include "internal.h"
+
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) "FS-Cache: " fmt
similarity index 86%
rename from fs/fscache/io.c
rename to fs/netfs/fscache_io.c
index 0d2b8dec8f82cd040391b01f814e175cc39eeae9..ad572f7ee897b9d26d2439a6a1178332d2a2e547 100644 (file)
@@ -158,46 +158,6 @@ int __fscache_begin_write_operation(struct netfs_cache_resources *cres,
 }
 EXPORT_SYMBOL(__fscache_begin_write_operation);
 
-/**
- * fscache_dirty_folio - Mark folio dirty and pin a cache object for writeback
- * @mapping: The mapping the folio belongs to.
- * @folio: The folio being dirtied.
- * @cookie: The cookie referring to the cache object
- *
- * Set the dirty flag on a folio and pin an in-use cache object in memory
- * so that writeback can later write to it.  This is intended
- * to be called from the filesystem's ->dirty_folio() method.
- *
- * Return: true if the dirty flag was set on the folio, false otherwise.
- */
-bool fscache_dirty_folio(struct address_space *mapping, struct folio *folio,
-                               struct fscache_cookie *cookie)
-{
-       struct inode *inode = mapping->host;
-       bool need_use = false;
-
-       _enter("");
-
-       if (!filemap_dirty_folio(mapping, folio))
-               return false;
-       if (!fscache_cookie_valid(cookie))
-               return true;
-
-       if (!(inode->i_state & I_PINNING_FSCACHE_WB)) {
-               spin_lock(&inode->i_lock);
-               if (!(inode->i_state & I_PINNING_FSCACHE_WB)) {
-                       inode->i_state |= I_PINNING_FSCACHE_WB;
-                       need_use = true;
-               }
-               spin_unlock(&inode->i_lock);
-
-               if (need_use)
-                       fscache_use_cookie(cookie, true);
-       }
-       return true;
-}
-EXPORT_SYMBOL(fscache_dirty_folio);
-
 struct fscache_write_request {
        struct netfs_cache_resources cache_resources;
        struct address_space    *mapping;
@@ -277,7 +237,7 @@ void __fscache_write_to_cache(struct fscache_cookie *cookie,
                                    fscache_access_io_write) < 0)
                goto abandon_free;
 
-       ret = cres->ops->prepare_write(cres, &start, &len, i_size, false);
+       ret = cres->ops->prepare_write(cres, &start, &len, len, i_size, false);
        if (ret < 0)
                goto abandon_end;
 
similarity index 84%
rename from fs/fscache/main.c
rename to fs/netfs/fscache_main.c
index dad85fd84f6f9f9245112b7bdcea4305313c8950..42e98bb523e369f8251146bb7a3b9802c4874b3d 100644 (file)
@@ -8,18 +8,9 @@
 #define FSCACHE_DEBUG_LEVEL CACHE
 #include <linux/module.h>
 #include <linux/init.h>
-#define CREATE_TRACE_POINTS
 #include "internal.h"
-
-MODULE_DESCRIPTION("FS Cache Manager");
-MODULE_AUTHOR("Red Hat, Inc.");
-MODULE_LICENSE("GPL");
-
-unsigned fscache_debug;
-module_param_named(debug, fscache_debug, uint,
-                  S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(fscache_debug,
-                "FS-Cache debugging mask");
+#define CREATE_TRACE_POINTS
+#include <trace/events/fscache.h>
 
 EXPORT_TRACEPOINT_SYMBOL(fscache_access_cache);
 EXPORT_TRACEPOINT_SYMBOL(fscache_access_volume);
@@ -71,7 +62,7 @@ unsigned int fscache_hash(unsigned int salt, const void *data, size_t len)
 /*
  * initialise the fs caching module
  */
-static int __init fscache_init(void)
+int __init fscache_init(void)
 {
        int ret = -ENOMEM;
 
@@ -92,7 +83,7 @@ static int __init fscache_init(void)
                goto error_cookie_jar;
        }
 
-       pr_notice("Loaded\n");
+       pr_notice("FS-Cache loaded\n");
        return 0;
 
 error_cookie_jar:
@@ -103,19 +94,15 @@ error_wq:
        return ret;
 }
 
-fs_initcall(fscache_init);
-
 /*
  * clean up on module removal
  */
-static void __exit fscache_exit(void)
+void __exit fscache_exit(void)
 {
        _enter("");
 
        kmem_cache_destroy(fscache_cookie_jar);
        fscache_proc_cleanup();
        destroy_workqueue(fscache_wq);
-       pr_notice("Unloaded\n");
+       pr_notice("FS-Cache unloaded\n");
 }
-
-module_exit(fscache_exit);
similarity index 58%
rename from fs/fscache/proc.c
rename to fs/netfs/fscache_proc.c
index dc3b0e9c8cce848a4777a5cfbdcf621b4a3688b7..874d951bc39012d487b87e27b641c3591cf51909 100644 (file)
 #include "internal.h"
 
 /*
- * initialise the /proc/fs/fscache/ directory
+ * Add files to /proc/fs/netfs/.
  */
 int __init fscache_proc_init(void)
 {
-       if (!proc_mkdir("fs/fscache", NULL))
-               goto error_dir;
+       if (!proc_symlink("fs/fscache", NULL, "netfs"))
+               goto error_sym;
 
-       if (!proc_create_seq("fs/fscache/caches", S_IFREG | 0444, NULL,
+       if (!proc_create_seq("fs/netfs/caches", S_IFREG | 0444, NULL,
                             &fscache_caches_seq_ops))
                goto error;
 
-       if (!proc_create_seq("fs/fscache/volumes", S_IFREG | 0444, NULL,
+       if (!proc_create_seq("fs/netfs/volumes", S_IFREG | 0444, NULL,
                             &fscache_volumes_seq_ops))
                goto error;
 
-       if (!proc_create_seq("fs/fscache/cookies", S_IFREG | 0444, NULL,
+       if (!proc_create_seq("fs/netfs/cookies", S_IFREG | 0444, NULL,
                             &fscache_cookies_seq_ops))
                goto error;
-
-#ifdef CONFIG_FSCACHE_STATS
-       if (!proc_create_single("fs/fscache/stats", S_IFREG | 0444, NULL,
-                               fscache_stats_show))
-               goto error;
-#endif
-
        return 0;
 
 error:
        remove_proc_entry("fs/fscache", NULL);
-error_dir:
+error_sym:
        return -ENOMEM;
 }
 
 /*
- * clean up the /proc/fs/fscache/ directory
+ * Clean up the /proc/fs/fscache symlink.
  */
 void fscache_proc_cleanup(void)
 {
similarity index 90%
rename from fs/fscache/stats.c
rename to fs/netfs/fscache_stats.c
index fc94e5e79f1c6d456bd6e48ac2fe8a5141f70ef6..add21abdf7134983c30a497644c20b14a7a9a8ac 100644 (file)
@@ -48,13 +48,15 @@ atomic_t fscache_n_no_create_space;
 EXPORT_SYMBOL(fscache_n_no_create_space);
 atomic_t fscache_n_culled;
 EXPORT_SYMBOL(fscache_n_culled);
+atomic_t fscache_n_dio_misfit;
+EXPORT_SYMBOL(fscache_n_dio_misfit);
 
 /*
  * display the general statistics
  */
-int fscache_stats_show(struct seq_file *m, void *v)
+int fscache_stats_show(struct seq_file *m)
 {
-       seq_puts(m, "FS-Cache statistics\n");
+       seq_puts(m, "-- FS-Cache statistics --\n");
        seq_printf(m, "Cookies: n=%d v=%d vcol=%u voom=%u\n",
                   atomic_read(&fscache_n_cookies),
                   atomic_read(&fscache_n_volumes),
@@ -93,10 +95,9 @@ int fscache_stats_show(struct seq_file *m, void *v)
                   atomic_read(&fscache_n_no_create_space),
                   atomic_read(&fscache_n_culled));
 
-       seq_printf(m, "IO     : rd=%u wr=%u\n",
+       seq_printf(m, "IO     : rd=%u wr=%u mis=%u\n",
                   atomic_read(&fscache_n_read),
-                  atomic_read(&fscache_n_write));
-
-       netfs_stats_show(m);
+                  atomic_read(&fscache_n_write),
+                  atomic_read(&fscache_n_dio_misfit));
        return 0;
 }
index 43fac1b14e40cd1351cbac875d1886b0a9256835..ec7045d24400df09bd5a933401a7fbb2d36a3d24 100644 (file)
@@ -5,9 +5,13 @@
  * Written by David Howells (dhowells@redhat.com)
  */
 
+#include <linux/slab.h>
+#include <linux/seq_file.h>
 #include <linux/netfs.h>
 #include <linux/fscache.h>
+#include <linux/fscache-cache.h>
 #include <trace/events/netfs.h>
+#include <trace/events/fscache.h>
 
 #ifdef pr_fmt
 #undef pr_fmt
@@ -19,6 +23,8 @@
  * buffered_read.c
  */
 void netfs_rreq_unlock_folios(struct netfs_io_request *rreq);
+int netfs_prefetch_for_write(struct file *file, struct folio *folio,
+                            size_t offset, size_t len);
 
 /*
  * io.c
@@ -29,6 +35,41 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync);
  * main.c
  */
 extern unsigned int netfs_debug;
+extern struct list_head netfs_io_requests;
+extern spinlock_t netfs_proc_lock;
+
+#ifdef CONFIG_PROC_FS
+static inline void netfs_proc_add_rreq(struct netfs_io_request *rreq)
+{
+       spin_lock(&netfs_proc_lock);
+       list_add_tail_rcu(&rreq->proc_link, &netfs_io_requests);
+       spin_unlock(&netfs_proc_lock);
+}
+static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq)
+{
+       if (!list_empty(&rreq->proc_link)) {
+               spin_lock(&netfs_proc_lock);
+               list_del_rcu(&rreq->proc_link);
+               spin_unlock(&netfs_proc_lock);
+       }
+}
+#else
+static inline void netfs_proc_add_rreq(struct netfs_io_request *rreq) {}
+static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq) {}
+#endif
+
+/*
+ * misc.c
+ */
+#define NETFS_FLAG_PUT_MARK            BIT(0)
+#define NETFS_FLAG_PAGECACHE_MARK      BIT(1)
+int netfs_xa_store_and_mark(struct xarray *xa, unsigned long index,
+                           struct folio *folio, unsigned int flags,
+                           gfp_t gfp_mask);
+int netfs_add_folios_to_buffer(struct xarray *buffer,
+                              struct address_space *mapping,
+                              pgoff_t index, pgoff_t to, gfp_t gfp_mask);
+void netfs_clear_buffer(struct xarray *buffer);
 
 /*
  * objects.c
@@ -49,10 +90,21 @@ static inline void netfs_see_request(struct netfs_io_request *rreq,
        trace_netfs_rreq_ref(rreq->debug_id, refcount_read(&rreq->ref), what);
 }
 
+/*
+ * output.c
+ */
+int netfs_begin_write(struct netfs_io_request *wreq, bool may_wait,
+                     enum netfs_write_trace what);
+struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len);
+int netfs_advance_writethrough(struct netfs_io_request *wreq, size_t copied, bool to_page_end);
+int netfs_end_writethrough(struct netfs_io_request *wreq, struct kiocb *iocb);
+
 /*
  * stats.c
  */
 #ifdef CONFIG_NETFS_STATS
+extern atomic_t netfs_n_rh_dio_read;
+extern atomic_t netfs_n_rh_dio_write;
 extern atomic_t netfs_n_rh_readahead;
 extern atomic_t netfs_n_rh_readpage;
 extern atomic_t netfs_n_rh_rreq;
@@ -71,7 +123,15 @@ extern atomic_t netfs_n_rh_write_begin;
 extern atomic_t netfs_n_rh_write_done;
 extern atomic_t netfs_n_rh_write_failed;
 extern atomic_t netfs_n_rh_write_zskip;
+extern atomic_t netfs_n_wh_wstream_conflict;
+extern atomic_t netfs_n_wh_upload;
+extern atomic_t netfs_n_wh_upload_done;
+extern atomic_t netfs_n_wh_upload_failed;
+extern atomic_t netfs_n_wh_write;
+extern atomic_t netfs_n_wh_write_done;
+extern atomic_t netfs_n_wh_write_failed;
 
+int netfs_stats_show(struct seq_file *m, void *v);
 
 static inline void netfs_stat(atomic_t *stat)
 {
@@ -103,6 +163,176 @@ static inline bool netfs_is_cache_enabled(struct netfs_inode *ctx)
 #endif
 }
 
+/*
+ * Get a ref on a netfs group attached to a dirty page (e.g. a ceph snap).
+ */
+static inline struct netfs_group *netfs_get_group(struct netfs_group *netfs_group)
+{
+       if (netfs_group)
+               refcount_inc(&netfs_group->ref);
+       return netfs_group;
+}
+
+/*
+ * Dispose of a netfs group attached to a dirty page (e.g. a ceph snap).
+ */
+static inline void netfs_put_group(struct netfs_group *netfs_group)
+{
+       if (netfs_group && refcount_dec_and_test(&netfs_group->ref))
+               netfs_group->free(netfs_group);
+}
+
+/*
+ * Dispose of a netfs group attached to a dirty page (e.g. a ceph snap).
+ */
+static inline void netfs_put_group_many(struct netfs_group *netfs_group, int nr)
+{
+       if (netfs_group && refcount_sub_and_test(nr, &netfs_group->ref))
+               netfs_group->free(netfs_group);
+}
+
+/*
+ * fscache-cache.c
+ */
+#ifdef CONFIG_PROC_FS
+extern const struct seq_operations fscache_caches_seq_ops;
+#endif
+bool fscache_begin_cache_access(struct fscache_cache *cache, enum fscache_access_trace why);
+void fscache_end_cache_access(struct fscache_cache *cache, enum fscache_access_trace why);
+struct fscache_cache *fscache_lookup_cache(const char *name, bool is_cache);
+void fscache_put_cache(struct fscache_cache *cache, enum fscache_cache_trace where);
+
+static inline enum fscache_cache_state fscache_cache_state(const struct fscache_cache *cache)
+{
+       return smp_load_acquire(&cache->state);
+}
+
+static inline bool fscache_cache_is_live(const struct fscache_cache *cache)
+{
+       return fscache_cache_state(cache) == FSCACHE_CACHE_IS_ACTIVE;
+}
+
+static inline void fscache_set_cache_state(struct fscache_cache *cache,
+                                          enum fscache_cache_state new_state)
+{
+       smp_store_release(&cache->state, new_state);
+
+}
+
+static inline bool fscache_set_cache_state_maybe(struct fscache_cache *cache,
+                                                enum fscache_cache_state old_state,
+                                                enum fscache_cache_state new_state)
+{
+       return try_cmpxchg_release(&cache->state, &old_state, new_state);
+}
+
+/*
+ * fscache-cookie.c
+ */
+extern struct kmem_cache *fscache_cookie_jar;
+#ifdef CONFIG_PROC_FS
+extern const struct seq_operations fscache_cookies_seq_ops;
+#endif
+extern struct timer_list fscache_cookie_lru_timer;
+
+extern void fscache_print_cookie(struct fscache_cookie *cookie, char prefix);
+extern bool fscache_begin_cookie_access(struct fscache_cookie *cookie,
+                                       enum fscache_access_trace why);
+
+static inline void fscache_see_cookie(struct fscache_cookie *cookie,
+                                     enum fscache_cookie_trace where)
+{
+       trace_fscache_cookie(cookie->debug_id, refcount_read(&cookie->ref),
+                            where);
+}
+
+/*
+ * fscache-main.c
+ */
+extern unsigned int fscache_hash(unsigned int salt, const void *data, size_t len);
+#ifdef CONFIG_FSCACHE
+int __init fscache_init(void);
+void __exit fscache_exit(void);
+#else
+static inline int fscache_init(void) { return 0; }
+static inline void fscache_exit(void) {}
+#endif
+
+/*
+ * fscache-proc.c
+ */
+#ifdef CONFIG_PROC_FS
+extern int __init fscache_proc_init(void);
+extern void fscache_proc_cleanup(void);
+#else
+#define fscache_proc_init()    (0)
+#define fscache_proc_cleanup() do {} while (0)
+#endif
+
+/*
+ * fscache-stats.c
+ */
+#ifdef CONFIG_FSCACHE_STATS
+extern atomic_t fscache_n_volumes;
+extern atomic_t fscache_n_volumes_collision;
+extern atomic_t fscache_n_volumes_nomem;
+extern atomic_t fscache_n_cookies;
+extern atomic_t fscache_n_cookies_lru;
+extern atomic_t fscache_n_cookies_lru_expired;
+extern atomic_t fscache_n_cookies_lru_removed;
+extern atomic_t fscache_n_cookies_lru_dropped;
+
+extern atomic_t fscache_n_acquires;
+extern atomic_t fscache_n_acquires_ok;
+extern atomic_t fscache_n_acquires_oom;
+
+extern atomic_t fscache_n_invalidates;
+
+extern atomic_t fscache_n_relinquishes;
+extern atomic_t fscache_n_relinquishes_retire;
+extern atomic_t fscache_n_relinquishes_dropped;
+
+extern atomic_t fscache_n_resizes;
+extern atomic_t fscache_n_resizes_null;
+
+static inline void fscache_stat(atomic_t *stat)
+{
+       atomic_inc(stat);
+}
+
+static inline void fscache_stat_d(atomic_t *stat)
+{
+       atomic_dec(stat);
+}
+
+#define __fscache_stat(stat) (stat)
+
+int fscache_stats_show(struct seq_file *m);
+#else
+
+#define __fscache_stat(stat) (NULL)
+#define fscache_stat(stat) do {} while (0)
+#define fscache_stat_d(stat) do {} while (0)
+
+static inline int fscache_stats_show(struct seq_file *m) { return 0; }
+#endif
+
+/*
+ * fscache-volume.c
+ */
+#ifdef CONFIG_PROC_FS
+extern const struct seq_operations fscache_volumes_seq_ops;
+#endif
+
+struct fscache_volume *fscache_get_volume(struct fscache_volume *volume,
+                                         enum fscache_volume_trace where);
+void fscache_put_volume(struct fscache_volume *volume,
+                       enum fscache_volume_trace where);
+bool fscache_begin_volume_access(struct fscache_volume *volume,
+                                struct fscache_cookie *cookie,
+                                enum fscache_access_trace why);
+void fscache_create_volume(struct fscache_volume *volume, bool wait);
+
 /*****************************************************************************/
 /*
  * debug tracing
@@ -143,3 +373,57 @@ do {                                               \
 #define _leave(FMT, ...) no_printk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
 #define _debug(FMT, ...) no_printk(FMT, ##__VA_ARGS__)
 #endif
+
+/*
+ * assertions
+ */
+#if 1 /* defined(__KDEBUGALL) */
+
+#define ASSERT(X)                                                      \
+do {                                                                   \
+       if (unlikely(!(X))) {                                           \
+               pr_err("\n");                                   \
+               pr_err("Assertion failed\n");   \
+               BUG();                                                  \
+       }                                                               \
+} while (0)
+
+#define ASSERTCMP(X, OP, Y)                                            \
+do {                                                                   \
+       if (unlikely(!((X) OP (Y)))) {                                  \
+               pr_err("\n");                                   \
+               pr_err("Assertion failed\n");   \
+               pr_err("%lx " #OP " %lx is false\n",            \
+                      (unsigned long)(X), (unsigned long)(Y));         \
+               BUG();                                                  \
+       }                                                               \
+} while (0)
+
+#define ASSERTIF(C, X)                                                 \
+do {                                                                   \
+       if (unlikely((C) && !(X))) {                                    \
+               pr_err("\n");                                   \
+               pr_err("Assertion failed\n");   \
+               BUG();                                                  \
+       }                                                               \
+} while (0)
+
+#define ASSERTIFCMP(C, X, OP, Y)                                       \
+do {                                                                   \
+       if (unlikely((C) && !((X) OP (Y)))) {                           \
+               pr_err("\n");                                   \
+               pr_err("Assertion failed\n");   \
+               pr_err("%lx " #OP " %lx is false\n",            \
+                      (unsigned long)(X), (unsigned long)(Y));         \
+               BUG();                                                  \
+       }                                                               \
+} while (0)
+
+#else
+
+#define ASSERT(X)                      do {} while (0)
+#define ASSERTCMP(X, OP, Y)            do {} while (0)
+#define ASSERTIF(C, X)                 do {} while (0)
+#define ASSERTIFCMP(C, X, OP, Y)       do {} while (0)
+
+#endif /* assert or not */
index 7f753380e047ab5102f9908bc2f4a5f99bbdda6e..e8ff1e61ce79b7f67e1252f4b66aa461bfe1d4b8 100644 (file)
  */
 static void netfs_clear_unread(struct netfs_io_subrequest *subreq)
 {
-       struct iov_iter iter;
-
-       iov_iter_xarray(&iter, ITER_DEST, &subreq->rreq->mapping->i_pages,
-                       subreq->start + subreq->transferred,
-                       subreq->len   - subreq->transferred);
-       iov_iter_zero(iov_iter_count(&iter), &iter);
+       iov_iter_zero(iov_iter_count(&subreq->io_iter), &subreq->io_iter);
 }
 
 static void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error,
@@ -46,14 +41,9 @@ static void netfs_read_from_cache(struct netfs_io_request *rreq,
                                  enum netfs_read_from_hole read_hole)
 {
        struct netfs_cache_resources *cres = &rreq->cache_resources;
-       struct iov_iter iter;
 
        netfs_stat(&netfs_n_rh_read);
-       iov_iter_xarray(&iter, ITER_DEST, &rreq->mapping->i_pages,
-                       subreq->start + subreq->transferred,
-                       subreq->len   - subreq->transferred);
-
-       cres->ops->read(cres, subreq->start, &iter, read_hole,
+       cres->ops->read(cres, subreq->start, &subreq->io_iter, read_hole,
                        netfs_cache_read_terminated, subreq);
 }
 
@@ -88,6 +78,13 @@ static void netfs_read_from_server(struct netfs_io_request *rreq,
                                   struct netfs_io_subrequest *subreq)
 {
        netfs_stat(&netfs_n_rh_download);
+
+       if (rreq->origin != NETFS_DIO_READ &&
+           iov_iter_count(&subreq->io_iter) != subreq->len - subreq->transferred)
+               pr_warn("R=%08x[%u] ITER PRE-MISMATCH %zx != %zx-%zx %lx\n",
+                       rreq->debug_id, subreq->debug_index,
+                       iov_iter_count(&subreq->io_iter), subreq->len,
+                       subreq->transferred, subreq->flags);
        rreq->netfs_ops->issue_read(subreq);
 }
 
@@ -127,9 +124,10 @@ static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq,
                        /* We might have multiple writes from the same huge
                         * folio, but we mustn't unlock a folio more than once.
                         */
-                       if (have_unlocked && folio_index(folio) <= unlocked)
+                       if (have_unlocked && folio->index <= unlocked)
                                continue;
-                       unlocked = folio_index(folio);
+                       unlocked = folio_next_index(folio) - 1;
+                       trace_netfs_folio(folio, netfs_folio_trace_end_copy);
                        folio_end_fscache(folio);
                        have_unlocked = true;
                }
@@ -201,7 +199,7 @@ static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq)
                }
 
                ret = cres->ops->prepare_write(cres, &subreq->start, &subreq->len,
-                                              rreq->i_size, true);
+                                              subreq->len, rreq->i_size, true);
                if (ret < 0) {
                        trace_netfs_failure(rreq, subreq, ret, netfs_fail_prepare_write);
                        trace_netfs_sreq(subreq, netfs_sreq_trace_write_skip);
@@ -259,6 +257,30 @@ static void netfs_rreq_short_read(struct netfs_io_request *rreq,
                netfs_read_from_server(rreq, subreq);
 }
 
+/*
+ * Reset the subrequest iterator prior to resubmission.
+ */
+static void netfs_reset_subreq_iter(struct netfs_io_request *rreq,
+                                   struct netfs_io_subrequest *subreq)
+{
+       size_t remaining = subreq->len - subreq->transferred;
+       size_t count = iov_iter_count(&subreq->io_iter);
+
+       if (count == remaining)
+               return;
+
+       _debug("R=%08x[%u] ITER RESUB-MISMATCH %zx != %zx-%zx-%llx %x\n",
+              rreq->debug_id, subreq->debug_index,
+              iov_iter_count(&subreq->io_iter), subreq->transferred,
+              subreq->len, rreq->i_size,
+              subreq->io_iter.iter_type);
+
+       if (count < remaining)
+               iov_iter_revert(&subreq->io_iter, remaining - count);
+       else
+               iov_iter_advance(&subreq->io_iter, count - remaining);
+}
+
 /*
  * Resubmit any short or failed operations.  Returns true if we got the rreq
  * ref back.
@@ -287,6 +309,7 @@ static bool netfs_rreq_perform_resubmissions(struct netfs_io_request *rreq)
                        trace_netfs_sreq(subreq, netfs_sreq_trace_download_instead);
                        netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
                        atomic_inc(&rreq->nr_outstanding);
+                       netfs_reset_subreq_iter(rreq, subreq);
                        netfs_read_from_server(rreq, subreq);
                } else if (test_bit(NETFS_SREQ_SHORT_IO, &subreq->flags)) {
                        netfs_rreq_short_read(rreq, subreq);
@@ -320,6 +343,43 @@ static void netfs_rreq_is_still_valid(struct netfs_io_request *rreq)
        }
 }
 
+/*
+ * Determine how much we can admit to having read from a DIO read.
+ */
+static void netfs_rreq_assess_dio(struct netfs_io_request *rreq)
+{
+       struct netfs_io_subrequest *subreq;
+       unsigned int i;
+       size_t transferred = 0;
+
+       for (i = 0; i < rreq->direct_bv_count; i++)
+               flush_dcache_page(rreq->direct_bv[i].bv_page);
+
+       list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
+               if (subreq->error || subreq->transferred == 0)
+                       break;
+               transferred += subreq->transferred;
+               if (subreq->transferred < subreq->len)
+                       break;
+       }
+
+       for (i = 0; i < rreq->direct_bv_count; i++)
+               flush_dcache_page(rreq->direct_bv[i].bv_page);
+
+       rreq->transferred = transferred;
+       task_io_account_read(transferred);
+
+       if (rreq->iocb) {
+               rreq->iocb->ki_pos += transferred;
+               if (rreq->iocb->ki_complete)
+                       rreq->iocb->ki_complete(
+                               rreq->iocb, rreq->error ? rreq->error : transferred);
+       }
+       if (rreq->netfs_ops->done)
+               rreq->netfs_ops->done(rreq);
+       inode_dio_end(rreq->inode);
+}
+
 /*
  * Assess the state of a read request and decide what to do next.
  *
@@ -340,8 +400,12 @@ again:
                return;
        }
 
-       netfs_rreq_unlock_folios(rreq);
+       if (rreq->origin != NETFS_DIO_READ)
+               netfs_rreq_unlock_folios(rreq);
+       else
+               netfs_rreq_assess_dio(rreq);
 
+       trace_netfs_rreq(rreq, netfs_rreq_trace_wake_ip);
        clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
        wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
 
@@ -399,9 +463,9 @@ void netfs_subreq_terminated(struct netfs_io_subrequest *subreq,
        struct netfs_io_request *rreq = subreq->rreq;
        int u;
 
-       _enter("[%u]{%llx,%lx},%zd",
-              subreq->debug_index, subreq->start, subreq->flags,
-              transferred_or_error);
+       _enter("R=%x[%x]{%llx,%lx},%zd",
+              rreq->debug_id, subreq->debug_index,
+              subreq->start, subreq->flags, transferred_or_error);
 
        switch (subreq->source) {
        case NETFS_READ_FROM_CACHE:
@@ -501,15 +565,20 @@ static enum netfs_io_source netfs_cache_prepare_read(struct netfs_io_subrequest
  */
 static enum netfs_io_source
 netfs_rreq_prepare_read(struct netfs_io_request *rreq,
-                       struct netfs_io_subrequest *subreq)
+                       struct netfs_io_subrequest *subreq,
+                       struct iov_iter *io_iter)
 {
-       enum netfs_io_source source;
+       enum netfs_io_source source = NETFS_DOWNLOAD_FROM_SERVER;
+       struct netfs_inode *ictx = netfs_inode(rreq->inode);
+       size_t lsize;
 
        _enter("%llx-%llx,%llx", subreq->start, subreq->start + subreq->len, rreq->i_size);
 
-       source = netfs_cache_prepare_read(subreq, rreq->i_size);
-       if (source == NETFS_INVALID_READ)
-               goto out;
+       if (rreq->origin != NETFS_DIO_READ) {
+               source = netfs_cache_prepare_read(subreq, rreq->i_size);
+               if (source == NETFS_INVALID_READ)
+                       goto out;
+       }
 
        if (source == NETFS_DOWNLOAD_FROM_SERVER) {
                /* Call out to the netfs to let it shrink the request to fit
@@ -518,19 +587,52 @@ netfs_rreq_prepare_read(struct netfs_io_request *rreq,
                 * to make serial calls, it can indicate a short read and then
                 * we will call it again.
                 */
+               if (rreq->origin != NETFS_DIO_READ) {
+                       if (subreq->start >= ictx->zero_point) {
+                               source = NETFS_FILL_WITH_ZEROES;
+                               goto set;
+                       }
+                       if (subreq->len > ictx->zero_point - subreq->start)
+                               subreq->len = ictx->zero_point - subreq->start;
+               }
                if (subreq->len > rreq->i_size - subreq->start)
                        subreq->len = rreq->i_size - subreq->start;
+               if (rreq->rsize && subreq->len > rreq->rsize)
+                       subreq->len = rreq->rsize;
 
                if (rreq->netfs_ops->clamp_length &&
                    !rreq->netfs_ops->clamp_length(subreq)) {
                        source = NETFS_INVALID_READ;
                        goto out;
                }
+
+               if (subreq->max_nr_segs) {
+                       lsize = netfs_limit_iter(io_iter, 0, subreq->len,
+                                                subreq->max_nr_segs);
+                       if (subreq->len > lsize) {
+                               subreq->len = lsize;
+                               trace_netfs_sreq(subreq, netfs_sreq_trace_limited);
+                       }
+               }
        }
 
-       if (WARN_ON(subreq->len == 0))
+set:
+       if (subreq->len > rreq->len)
+               pr_warn("R=%08x[%u] SREQ>RREQ %zx > %zx\n",
+                       rreq->debug_id, subreq->debug_index,
+                       subreq->len, rreq->len);
+
+       if (WARN_ON(subreq->len == 0)) {
                source = NETFS_INVALID_READ;
+               goto out;
+       }
 
+       subreq->source = source;
+       trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
+
+       subreq->io_iter = *io_iter;
+       iov_iter_truncate(&subreq->io_iter, subreq->len);
+       iov_iter_advance(io_iter, subreq->len);
 out:
        subreq->source = source;
        trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
@@ -541,6 +643,7 @@ out:
  * Slice off a piece of a read request and submit an I/O request for it.
  */
 static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq,
+                                   struct iov_iter *io_iter,
                                    unsigned int *_debug_index)
 {
        struct netfs_io_subrequest *subreq;
@@ -552,7 +655,7 @@ static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq,
 
        subreq->debug_index     = (*_debug_index)++;
        subreq->start           = rreq->start + rreq->submitted;
-       subreq->len             = rreq->len   - rreq->submitted;
+       subreq->len             = io_iter->count;
 
        _debug("slice %llx,%zx,%zx", subreq->start, subreq->len, rreq->submitted);
        list_add_tail(&subreq->rreq_link, &rreq->subrequests);
@@ -565,7 +668,7 @@ static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq,
         * (the starts must coincide), in which case, we go around the loop
         * again and ask it to download the next piece.
         */
-       source = netfs_rreq_prepare_read(rreq, subreq);
+       source = netfs_rreq_prepare_read(rreq, subreq, io_iter);
        if (source == NETFS_INVALID_READ)
                goto subreq_failed;
 
@@ -603,6 +706,7 @@ subreq_failed:
  */
 int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
 {
+       struct iov_iter io_iter;
        unsigned int debug_index = 0;
        int ret;
 
@@ -611,50 +715,71 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
 
        if (rreq->len == 0) {
                pr_err("Zero-sized read [R=%x]\n", rreq->debug_id);
-               netfs_put_request(rreq, false, netfs_rreq_trace_put_zero_len);
                return -EIO;
        }
 
-       INIT_WORK(&rreq->work, netfs_rreq_work);
+       if (rreq->origin == NETFS_DIO_READ)
+               inode_dio_begin(rreq->inode);
 
-       if (sync)
-               netfs_get_request(rreq, netfs_rreq_trace_get_hold);
+       // TODO: Use bounce buffer if requested
+       rreq->io_iter = rreq->iter;
+
+       INIT_WORK(&rreq->work, netfs_rreq_work);
 
        /* Chop the read into slices according to what the cache and the netfs
         * want and submit each one.
         */
+       netfs_get_request(rreq, netfs_rreq_trace_get_for_outstanding);
        atomic_set(&rreq->nr_outstanding, 1);
+       io_iter = rreq->io_iter;
        do {
-               if (!netfs_rreq_submit_slice(rreq, &debug_index))
+               _debug("submit %llx + %zx >= %llx",
+                      rreq->start, rreq->submitted, rreq->i_size);
+               if (rreq->origin == NETFS_DIO_READ &&
+                   rreq->start + rreq->submitted >= rreq->i_size)
+                       break;
+               if (!netfs_rreq_submit_slice(rreq, &io_iter, &debug_index))
+                       break;
+               if (test_bit(NETFS_RREQ_BLOCKED, &rreq->flags) &&
+                   test_bit(NETFS_RREQ_NONBLOCK, &rreq->flags))
                        break;
 
        } while (rreq->submitted < rreq->len);
 
+       if (!rreq->submitted) {
+               netfs_put_request(rreq, false, netfs_rreq_trace_put_no_submit);
+               ret = 0;
+               goto out;
+       }
+
        if (sync) {
-               /* Keep nr_outstanding incremented so that the ref always belongs to
-                * us, and the service code isn't punted off to a random thread pool to
-                * process.
+               /* Keep nr_outstanding incremented so that the ref always
+                * belongs to us, and the service code isn't punted off to a
+                * random thread pool to process.  Note that this might start
+                * further work, such as writing to the cache.
                 */
-               for (;;) {
-                       wait_var_event(&rreq->nr_outstanding,
-                                      atomic_read(&rreq->nr_outstanding) == 1);
+               wait_var_event(&rreq->nr_outstanding,
+                              atomic_read(&rreq->nr_outstanding) == 1);
+               if (atomic_dec_and_test(&rreq->nr_outstanding))
                        netfs_rreq_assess(rreq, false);
-                       if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags))
-                               break;
-                       cond_resched();
-               }
+
+               trace_netfs_rreq(rreq, netfs_rreq_trace_wait_ip);
+               wait_on_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS,
+                           TASK_UNINTERRUPTIBLE);
 
                ret = rreq->error;
-               if (ret == 0 && rreq->submitted < rreq->len) {
+               if (ret == 0 && rreq->submitted < rreq->len &&
+                   rreq->origin != NETFS_DIO_READ) {
                        trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read);
                        ret = -EIO;
                }
-               netfs_put_request(rreq, false, netfs_rreq_trace_put_hold);
        } else {
                /* If we decrement nr_outstanding to 0, the ref belongs to us. */
                if (atomic_dec_and_test(&rreq->nr_outstanding))
                        netfs_rreq_assess(rreq, false);
-               ret = 0;
+               ret = -EIOCBQUEUED;
        }
+
+out:
        return ret;
 }
index 2ff07ba655a072b3c0e31c6bf473a7a80ea0c24f..b781bbbf1d8d643727e4710358e4211face70bd1 100644 (file)
@@ -101,3 +101,100 @@ ssize_t netfs_extract_user_iter(struct iov_iter *orig, size_t orig_len,
        return npages;
 }
 EXPORT_SYMBOL_GPL(netfs_extract_user_iter);
+
+/*
+ * Select the span of a bvec iterator we're going to use.  Limit it by both maximum
+ * size and maximum number of segments.  Returns the size of the span in bytes.
+ */
+static size_t netfs_limit_bvec(const struct iov_iter *iter, size_t start_offset,
+                              size_t max_size, size_t max_segs)
+{
+       const struct bio_vec *bvecs = iter->bvec;
+       unsigned int nbv = iter->nr_segs, ix = 0, nsegs = 0;
+       size_t len, span = 0, n = iter->count;
+       size_t skip = iter->iov_offset + start_offset;
+
+       if (WARN_ON(!iov_iter_is_bvec(iter)) ||
+           WARN_ON(start_offset > n) ||
+           n == 0)
+               return 0;
+
+       while (n && ix < nbv && skip) {
+               len = bvecs[ix].bv_len;
+               if (skip < len)
+                       break;
+               skip -= len;
+               n -= len;
+               ix++;
+       }
+
+       while (n && ix < nbv) {
+               len = min3(n, bvecs[ix].bv_len - skip, max_size);
+               span += len;
+               nsegs++;
+               ix++;
+               if (span >= max_size || nsegs >= max_segs)
+                       break;
+               skip = 0;
+               n -= len;
+       }
+
+       return min(span, max_size);
+}
+
+/*
+ * Select the span of an xarray iterator we're going to use.  Limit it by both
+ * maximum size and maximum number of segments.  It is assumed that segments
+ * can be larger than a page in size, provided they're physically contiguous.
+ * Returns the size of the span in bytes.
+ */
+static size_t netfs_limit_xarray(const struct iov_iter *iter, size_t start_offset,
+                                size_t max_size, size_t max_segs)
+{
+       struct folio *folio;
+       unsigned int nsegs = 0;
+       loff_t pos = iter->xarray_start + iter->iov_offset;
+       pgoff_t index = pos / PAGE_SIZE;
+       size_t span = 0, n = iter->count;
+
+       XA_STATE(xas, iter->xarray, index);
+
+       if (WARN_ON(!iov_iter_is_xarray(iter)) ||
+           WARN_ON(start_offset > n) ||
+           n == 0)
+               return 0;
+       max_size = min(max_size, n - start_offset);
+
+       rcu_read_lock();
+       xas_for_each(&xas, folio, ULONG_MAX) {
+               size_t offset, flen, len;
+               if (xas_retry(&xas, folio))
+                       continue;
+               if (WARN_ON(xa_is_value(folio)))
+                       break;
+               if (WARN_ON(folio_test_hugetlb(folio)))
+                       break;
+
+               flen = folio_size(folio);
+               offset = offset_in_folio(folio, pos);
+               len = min(max_size, flen - offset);
+               span += len;
+               nsegs++;
+               if (span >= max_size || nsegs >= max_segs)
+                       break;
+       }
+
+       rcu_read_unlock();
+       return min(span, max_size);
+}
+
+size_t netfs_limit_iter(const struct iov_iter *iter, size_t start_offset,
+                       size_t max_size, size_t max_segs)
+{
+       if (iov_iter_is_bvec(iter))
+               return netfs_limit_bvec(iter, start_offset, max_size, max_segs);
+       if (iov_iter_is_xarray(iter))
+               return netfs_limit_xarray(iter, start_offset, max_size, max_segs);
+       BUG();
+}
+EXPORT_SYMBOL(netfs_limit_iter);
diff --git a/fs/netfs/locking.c b/fs/netfs/locking.c
new file mode 100644 (file)
index 0000000..75dc52a
--- /dev/null
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * I/O and data path helper functionality.
+ *
+ * Borrowed from NFS Copyright (c) 2016 Trond Myklebust
+ */
+
+#include <linux/kernel.h>
+#include <linux/netfs.h>
+#include "internal.h"
+
+/*
+ * inode_dio_wait_interruptible - wait for outstanding DIO requests to finish
+ * @inode: inode to wait for
+ *
+ * Waits for all pending direct I/O requests to finish so that we can
+ * proceed with a truncate or equivalent operation.
+ *
+ * Must be called under a lock that serializes taking new references
+ * to i_dio_count, usually by inode->i_mutex.
+ */
+static int inode_dio_wait_interruptible(struct inode *inode)
+{
+       if (!atomic_read(&inode->i_dio_count))
+               return 0;
+
+       wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP);
+       DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);
+
+       for (;;) {
+               prepare_to_wait(wq, &q.wq_entry, TASK_INTERRUPTIBLE);
+               if (!atomic_read(&inode->i_dio_count))
+                       break;
+               if (signal_pending(current))
+                       break;
+               schedule();
+       }
+       finish_wait(wq, &q.wq_entry);
+
+       return atomic_read(&inode->i_dio_count) ? -ERESTARTSYS : 0;
+}
+
+/* Call with exclusively locked inode->i_rwsem */
+static int netfs_block_o_direct(struct netfs_inode *ictx)
+{
+       if (!test_bit(NETFS_ICTX_ODIRECT, &ictx->flags))
+               return 0;
+       clear_bit(NETFS_ICTX_ODIRECT, &ictx->flags);
+       return inode_dio_wait_interruptible(&ictx->inode);
+}
+
+/**
+ * netfs_start_io_read - declare the file is being used for buffered reads
+ * @inode: file inode
+ *
+ * Declare that a buffered read operation is about to start, and ensure
+ * that we block all direct I/O.
+ * On exit, the function ensures that the NETFS_ICTX_ODIRECT flag is unset,
+ * and holds a shared lock on inode->i_rwsem to ensure that the flag
+ * cannot be changed.
+ * In practice, this means that buffered read operations are allowed to
+ * execute in parallel, thanks to the shared lock, whereas direct I/O
+ * operations need to wait to grab an exclusive lock in order to set
+ * NETFS_ICTX_ODIRECT.
+ * Note that buffered writes and truncates both take a write lock on
+ * inode->i_rwsem, meaning that those are serialised w.r.t. the reads.
+ */
+int netfs_start_io_read(struct inode *inode)
+       __acquires(inode->i_rwsem)
+{
+       struct netfs_inode *ictx = netfs_inode(inode);
+
+       /* Be an optimist! */
+       if (down_read_interruptible(&inode->i_rwsem) < 0)
+               return -ERESTARTSYS;
+       if (test_bit(NETFS_ICTX_ODIRECT, &ictx->flags) == 0)
+               return 0;
+       up_read(&inode->i_rwsem);
+
+       /* Slow path.... */
+       if (down_write_killable(&inode->i_rwsem) < 0)
+               return -ERESTARTSYS;
+       if (netfs_block_o_direct(ictx) < 0) {
+               up_write(&inode->i_rwsem);
+               return -ERESTARTSYS;
+       }
+       downgrade_write(&inode->i_rwsem);
+       return 0;
+}
+EXPORT_SYMBOL(netfs_start_io_read);
+
+/**
+ * netfs_end_io_read - declare that the buffered read operation is done
+ * @inode: file inode
+ *
+ * Declare that a buffered read operation is done, and release the shared
+ * lock on inode->i_rwsem.
+ */
+void netfs_end_io_read(struct inode *inode)
+       __releases(inode->i_rwsem)
+{
+       up_read(&inode->i_rwsem);
+}
+EXPORT_SYMBOL(netfs_end_io_read);
+
+/**
+ * netfs_start_io_write - declare the file is being used for buffered writes
+ * @inode: file inode
+ *
+ * Declare that a buffered read operation is about to start, and ensure
+ * that we block all direct I/O.
+ */
+int netfs_start_io_write(struct inode *inode)
+       __acquires(inode->i_rwsem)
+{
+       struct netfs_inode *ictx = netfs_inode(inode);
+
+       if (down_write_killable(&inode->i_rwsem) < 0)
+               return -ERESTARTSYS;
+       if (netfs_block_o_direct(ictx) < 0) {
+               up_write(&inode->i_rwsem);
+               return -ERESTARTSYS;
+       }
+       return 0;
+}
+EXPORT_SYMBOL(netfs_start_io_write);
+
+/**
+ * netfs_end_io_write - declare that the buffered write operation is done
+ * @inode: file inode
+ *
+ * Declare that a buffered write operation is done, and release the
+ * lock on inode->i_rwsem.
+ */
+void netfs_end_io_write(struct inode *inode)
+       __releases(inode->i_rwsem)
+{
+       up_write(&inode->i_rwsem);
+}
+EXPORT_SYMBOL(netfs_end_io_write);
+
+/* Call with exclusively locked inode->i_rwsem */
+static int netfs_block_buffered(struct inode *inode)
+{
+       struct netfs_inode *ictx = netfs_inode(inode);
+       int ret;
+
+       if (!test_bit(NETFS_ICTX_ODIRECT, &ictx->flags)) {
+               set_bit(NETFS_ICTX_ODIRECT, &ictx->flags);
+               if (inode->i_mapping->nrpages != 0) {
+                       unmap_mapping_range(inode->i_mapping, 0, 0, 0);
+                       ret = filemap_fdatawait(inode->i_mapping);
+                       if (ret < 0) {
+                               clear_bit(NETFS_ICTX_ODIRECT, &ictx->flags);
+                               return ret;
+                       }
+               }
+       }
+       return 0;
+}
+
+/**
+ * netfs_start_io_direct - declare the file is being used for direct i/o
+ * @inode: file inode
+ *
+ * Declare that a direct I/O operation is about to start, and ensure
+ * that we block all buffered I/O.
+ * On exit, the function ensures that the NETFS_ICTX_ODIRECT flag is set,
+ * and holds a shared lock on inode->i_rwsem to ensure that the flag
+ * cannot be changed.
+ * In practice, this means that direct I/O operations are allowed to
+ * execute in parallel, thanks to the shared lock, whereas buffered I/O
+ * operations need to wait to grab an exclusive lock in order to clear
+ * NETFS_ICTX_ODIRECT.
+ * Note that buffered writes and truncates both take a write lock on
+ * inode->i_rwsem, meaning that those are serialised w.r.t. O_DIRECT.
+ */
+int netfs_start_io_direct(struct inode *inode)
+       __acquires(inode->i_rwsem)
+{
+       struct netfs_inode *ictx = netfs_inode(inode);
+       int ret;
+
+       /* Be an optimist! */
+       if (down_read_interruptible(&inode->i_rwsem) < 0)
+               return -ERESTARTSYS;
+       if (test_bit(NETFS_ICTX_ODIRECT, &ictx->flags) != 0)
+               return 0;
+       up_read(&inode->i_rwsem);
+
+       /* Slow path.... */
+       if (down_write_killable(&inode->i_rwsem) < 0)
+               return -ERESTARTSYS;
+       ret = netfs_block_buffered(inode);
+       if (ret < 0) {
+               up_write(&inode->i_rwsem);
+               return ret;
+       }
+       downgrade_write(&inode->i_rwsem);
+       return 0;
+}
+EXPORT_SYMBOL(netfs_start_io_direct);
+
+/**
+ * netfs_end_io_direct - declare that the direct i/o operation is done
+ * @inode: file inode
+ *
+ * Declare that a direct I/O operation is done, and release the shared
+ * lock on inode->i_rwsem.
+ */
+void netfs_end_io_direct(struct inode *inode)
+       __releases(inode->i_rwsem)
+{
+       up_read(&inode->i_rwsem);
+}
+EXPORT_SYMBOL(netfs_end_io_direct);
index 068568702957e867d539b210e136aa2e66ad3746..5e77618a79409c253ab21aa51c186a07f691f356 100644 (file)
@@ -7,6 +7,8 @@
 
 #include <linux/module.h>
 #include <linux/export.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
 #include "internal.h"
 #define CREATE_TRACE_POINTS
 #include <trace/events/netfs.h>
@@ -15,6 +17,113 @@ MODULE_DESCRIPTION("Network fs support");
 MODULE_AUTHOR("Red Hat, Inc.");
 MODULE_LICENSE("GPL");
 
+EXPORT_TRACEPOINT_SYMBOL(netfs_sreq);
+
 unsigned netfs_debug;
 module_param_named(debug, netfs_debug, uint, S_IWUSR | S_IRUGO);
 MODULE_PARM_DESC(netfs_debug, "Netfs support debugging mask");
+
+#ifdef CONFIG_PROC_FS
+LIST_HEAD(netfs_io_requests);
+DEFINE_SPINLOCK(netfs_proc_lock);
+
+static const char *netfs_origins[nr__netfs_io_origin] = {
+       [NETFS_READAHEAD]               = "RA",
+       [NETFS_READPAGE]                = "RP",
+       [NETFS_READ_FOR_WRITE]          = "RW",
+       [NETFS_WRITEBACK]               = "WB",
+       [NETFS_WRITETHROUGH]            = "WT",
+       [NETFS_LAUNDER_WRITE]           = "LW",
+       [NETFS_UNBUFFERED_WRITE]        = "UW",
+       [NETFS_DIO_READ]                = "DR",
+       [NETFS_DIO_WRITE]               = "DW",
+};
+
+/*
+ * Generate a list of I/O requests in /proc/fs/netfs/requests
+ */
+static int netfs_requests_seq_show(struct seq_file *m, void *v)
+{
+       struct netfs_io_request *rreq;
+
+       if (v == &netfs_io_requests) {
+               seq_puts(m,
+                        "REQUEST  OR REF FL ERR  OPS COVERAGE\n"
+                        "======== == === == ==== === =========\n"
+                        );
+               return 0;
+       }
+
+       rreq = list_entry(v, struct netfs_io_request, proc_link);
+       seq_printf(m,
+                  "%08x %s %3d %2lx %4d %3d @%04llx %zx/%zx",
+                  rreq->debug_id,
+                  netfs_origins[rreq->origin],
+                  refcount_read(&rreq->ref),
+                  rreq->flags,
+                  rreq->error,
+                  atomic_read(&rreq->nr_outstanding),
+                  rreq->start, rreq->submitted, rreq->len);
+       seq_putc(m, '\n');
+       return 0;
+}
+
+static void *netfs_requests_seq_start(struct seq_file *m, loff_t *_pos)
+       __acquires(rcu)
+{
+       rcu_read_lock();
+       return seq_list_start_head(&netfs_io_requests, *_pos);
+}
+
+static void *netfs_requests_seq_next(struct seq_file *m, void *v, loff_t *_pos)
+{
+       return seq_list_next(v, &netfs_io_requests, _pos);
+}
+
+static void netfs_requests_seq_stop(struct seq_file *m, void *v)
+       __releases(rcu)
+{
+       rcu_read_unlock();
+}
+
+static const struct seq_operations netfs_requests_seq_ops = {
+       .start  = netfs_requests_seq_start,
+       .next   = netfs_requests_seq_next,
+       .stop   = netfs_requests_seq_stop,
+       .show   = netfs_requests_seq_show,
+};
+#endif /* CONFIG_PROC_FS */
+
+static int __init netfs_init(void)
+{
+       int ret = -ENOMEM;
+
+       if (!proc_mkdir("fs/netfs", NULL))
+               goto error;
+       if (!proc_create_seq("fs/netfs/requests", S_IFREG | 0444, NULL,
+                            &netfs_requests_seq_ops))
+               goto error_proc;
+#ifdef CONFIG_FSCACHE_STATS
+       if (!proc_create_single("fs/netfs/stats", S_IFREG | 0444, NULL,
+                               netfs_stats_show))
+               goto error_proc;
+#endif
+
+       ret = fscache_init();
+       if (ret < 0)
+               goto error_proc;
+       return 0;
+
+error_proc:
+       remove_proc_entry("fs/netfs", NULL);
+error:
+       return ret;
+}
+fs_initcall(netfs_init);
+
+static void __exit netfs_exit(void)
+{
+       fscache_exit();
+       remove_proc_entry("fs/netfs", NULL);
+}
+module_exit(netfs_exit);
diff --git a/fs/netfs/misc.c b/fs/netfs/misc.c
new file mode 100644 (file)
index 0000000..90051ce
--- /dev/null
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Miscellaneous routines.
+ *
+ * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/swap.h>
+#include "internal.h"
+
+/*
+ * Attach a folio to the buffer and maybe set marks on it to say that we need
+ * to put the folio later and twiddle the pagecache flags.
+ */
+int netfs_xa_store_and_mark(struct xarray *xa, unsigned long index,
+                           struct folio *folio, unsigned int flags,
+                           gfp_t gfp_mask)
+{
+       XA_STATE_ORDER(xas, xa, index, folio_order(folio));
+
+retry:
+       xas_lock(&xas);
+       for (;;) {
+               xas_store(&xas, folio);
+               if (!xas_error(&xas))
+                       break;
+               xas_unlock(&xas);
+               if (!xas_nomem(&xas, gfp_mask))
+                       return xas_error(&xas);
+               goto retry;
+       }
+
+       if (flags & NETFS_FLAG_PUT_MARK)
+               xas_set_mark(&xas, NETFS_BUF_PUT_MARK);
+       if (flags & NETFS_FLAG_PAGECACHE_MARK)
+               xas_set_mark(&xas, NETFS_BUF_PAGECACHE_MARK);
+       xas_unlock(&xas);
+       return xas_error(&xas);
+}
+
+/*
+ * Create the specified range of folios in the buffer attached to the read
+ * request.  The folios are marked with NETFS_BUF_PUT_MARK so that we know that
+ * these need freeing later.
+ */
+int netfs_add_folios_to_buffer(struct xarray *buffer,
+                              struct address_space *mapping,
+                              pgoff_t index, pgoff_t to, gfp_t gfp_mask)
+{
+       struct folio *folio;
+       int ret;
+
+       if (to + 1 == index) /* Page range is inclusive */
+               return 0;
+
+       do {
+               /* TODO: Figure out what order folio can be allocated here */
+               folio = filemap_alloc_folio(readahead_gfp_mask(mapping), 0);
+               if (!folio)
+                       return -ENOMEM;
+               folio->index = index;
+               ret = netfs_xa_store_and_mark(buffer, index, folio,
+                                             NETFS_FLAG_PUT_MARK, gfp_mask);
+               if (ret < 0) {
+                       folio_put(folio);
+                       return ret;
+               }
+
+               index += folio_nr_pages(folio);
+       } while (index <= to && index != 0);
+
+       return 0;
+}
+
+/*
+ * Clear an xarray buffer, putting a ref on the folios that have
+ * NETFS_BUF_PUT_MARK set.
+ */
+void netfs_clear_buffer(struct xarray *buffer)
+{
+       struct folio *folio;
+       XA_STATE(xas, buffer, 0);
+
+       rcu_read_lock();
+       xas_for_each_marked(&xas, folio, ULONG_MAX, NETFS_BUF_PUT_MARK) {
+               folio_put(folio);
+       }
+       rcu_read_unlock();
+       xa_destroy(buffer);
+}
+
+/**
+ * netfs_dirty_folio - Mark folio dirty and pin a cache object for writeback
+ * @mapping: The mapping the folio belongs to.
+ * @folio: The folio being dirtied.
+ *
+ * Set the dirty flag on a folio and pin an in-use cache object in memory so
+ * that writeback can later write to it.  This is intended to be called from
+ * the filesystem's ->dirty_folio() method.
+ *
+ * Return: true if the dirty flag was set on the folio, false otherwise.
+ */
+bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio)
+{
+       struct inode *inode = mapping->host;
+       struct netfs_inode *ictx = netfs_inode(inode);
+       struct fscache_cookie *cookie = netfs_i_cookie(ictx);
+       bool need_use = false;
+
+       _enter("");
+
+       if (!filemap_dirty_folio(mapping, folio))
+               return false;
+       if (!fscache_cookie_valid(cookie))
+               return true;
+
+       if (!(inode->i_state & I_PINNING_NETFS_WB)) {
+               spin_lock(&inode->i_lock);
+               if (!(inode->i_state & I_PINNING_NETFS_WB)) {
+                       inode->i_state |= I_PINNING_NETFS_WB;
+                       need_use = true;
+               }
+               spin_unlock(&inode->i_lock);
+
+               if (need_use)
+                       fscache_use_cookie(cookie, true);
+       }
+       return true;
+}
+EXPORT_SYMBOL(netfs_dirty_folio);
+
+/**
+ * netfs_unpin_writeback - Unpin writeback resources
+ * @inode: The inode on which the cookie resides
+ * @wbc: The writeback control
+ *
+ * Unpin the writeback resources pinned by netfs_dirty_folio().  This is
+ * intended to be called as/by the netfs's ->write_inode() method.
+ */
+int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc)
+{
+       struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode));
+
+       if (wbc->unpinned_netfs_wb)
+               fscache_unuse_cookie(cookie, NULL, NULL);
+       return 0;
+}
+EXPORT_SYMBOL(netfs_unpin_writeback);
+
+/**
+ * netfs_clear_inode_writeback - Clear writeback resources pinned by an inode
+ * @inode: The inode to clean up
+ * @aux: Auxiliary data to apply to the inode
+ *
+ * Clear any writeback resources held by an inode when the inode is evicted.
+ * This must be called before clear_inode() is called.
+ */
+void netfs_clear_inode_writeback(struct inode *inode, const void *aux)
+{
+       struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode));
+
+       if (inode->i_state & I_PINNING_NETFS_WB) {
+               loff_t i_size = i_size_read(inode);
+               fscache_unuse_cookie(cookie, aux, &i_size);
+       }
+}
+EXPORT_SYMBOL(netfs_clear_inode_writeback);
+
+/**
+ * netfs_invalidate_folio - Invalidate or partially invalidate a folio
+ * @folio: Folio proposed for release
+ * @offset: Offset of the invalidated region
+ * @length: Length of the invalidated region
+ *
+ * Invalidate part or all of a folio for a network filesystem.  The folio will
+ * be removed afterwards if the invalidated region covers the entire folio.
+ */
+void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
+{
+       struct netfs_folio *finfo = NULL;
+       size_t flen = folio_size(folio);
+
+       _enter("{%lx},%zx,%zx", folio->index, offset, length);
+
+       folio_wait_fscache(folio);
+
+       if (!folio_test_private(folio))
+               return;
+
+       finfo = netfs_folio_info(folio);
+
+       if (offset == 0 && length >= flen)
+               goto erase_completely;
+
+       if (finfo) {
+               /* We have a partially uptodate page from a streaming write. */
+               unsigned int fstart = finfo->dirty_offset;
+               unsigned int fend = fstart + finfo->dirty_len;
+               unsigned int end = offset + length;
+
+               if (offset >= fend)
+                       return;
+               if (end <= fstart)
+                       return;
+               if (offset <= fstart && end >= fend)
+                       goto erase_completely;
+               if (offset <= fstart && end > fstart)
+                       goto reduce_len;
+               if (offset > fstart && end >= fend)
+                       goto move_start;
+               /* A partial write was split.  The caller has already zeroed
+                * it, so just absorb the hole.
+                */
+       }
+       return;
+
+erase_completely:
+       netfs_put_group(netfs_folio_group(folio));
+       folio_detach_private(folio);
+       folio_clear_uptodate(folio);
+       kfree(finfo);
+       return;
+reduce_len:
+       finfo->dirty_len = offset + length - finfo->dirty_offset;
+       return;
+move_start:
+       finfo->dirty_len -= offset - finfo->dirty_offset;
+       finfo->dirty_offset = offset;
+}
+EXPORT_SYMBOL(netfs_invalidate_folio);
+
+/**
+ * netfs_release_folio - Try to release a folio
+ * @folio: Folio proposed for release
+ * @gfp: Flags qualifying the release
+ *
+ * Request release of a folio and clean up its private state if it's not busy.
+ * Returns true if the folio can now be released, false if not
+ */
+bool netfs_release_folio(struct folio *folio, gfp_t gfp)
+{
+       struct netfs_inode *ctx = netfs_inode(folio_inode(folio));
+       unsigned long long end;
+
+       end = folio_pos(folio) + folio_size(folio);
+       if (end > ctx->zero_point)
+               ctx->zero_point = end;
+
+       if (folio_test_private(folio))
+               return false;
+       if (folio_test_fscache(folio)) {
+               if (current_is_kswapd() || !(gfp & __GFP_FS))
+                       return false;
+               folio_wait_fscache(folio);
+       }
+
+       fscache_note_page_release(netfs_i_cookie(ctx));
+       return true;
+}
+EXPORT_SYMBOL(netfs_release_folio);
index e17cdf53f6a7883a3459c47d5695554e516f4c51..610ceb5bd86c08ba7c61905d07d19092940f44ae 100644 (file)
@@ -20,14 +20,20 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
        struct inode *inode = file ? file_inode(file) : mapping->host;
        struct netfs_inode *ctx = netfs_inode(inode);
        struct netfs_io_request *rreq;
+       bool is_unbuffered = (origin == NETFS_UNBUFFERED_WRITE ||
+                             origin == NETFS_DIO_READ ||
+                             origin == NETFS_DIO_WRITE);
+       bool cached = !is_unbuffered && netfs_is_cache_enabled(ctx);
        int ret;
 
-       rreq = kzalloc(sizeof(struct netfs_io_request), GFP_KERNEL);
+       rreq = kzalloc(ctx->ops->io_request_size ?: sizeof(struct netfs_io_request),
+                      GFP_KERNEL);
        if (!rreq)
                return ERR_PTR(-ENOMEM);
 
        rreq->start     = start;
        rreq->len       = len;
+       rreq->upper_len = len;
        rreq->origin    = origin;
        rreq->netfs_ops = ctx->ops;
        rreq->mapping   = mapping;
@@ -35,8 +41,14 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
        rreq->i_size    = i_size_read(inode);
        rreq->debug_id  = atomic_inc_return(&debug_ids);
        INIT_LIST_HEAD(&rreq->subrequests);
+       INIT_WORK(&rreq->work, NULL);
        refcount_set(&rreq->ref, 1);
+
        __set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
+       if (cached)
+               __set_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags);
+       if (file && file->f_flags & O_NONBLOCK)
+               __set_bit(NETFS_RREQ_NONBLOCK, &rreq->flags);
        if (rreq->netfs_ops->init_request) {
                ret = rreq->netfs_ops->init_request(rreq, file);
                if (ret < 0) {
@@ -45,6 +57,8 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
                }
        }
 
+       trace_netfs_rreq_ref(rreq->debug_id, 1, netfs_rreq_trace_new);
+       netfs_proc_add_rreq(rreq);
        netfs_stat(&netfs_n_rh_rreq);
        return rreq;
 }
@@ -74,33 +88,47 @@ static void netfs_free_request(struct work_struct *work)
 {
        struct netfs_io_request *rreq =
                container_of(work, struct netfs_io_request, work);
+       unsigned int i;
 
        trace_netfs_rreq(rreq, netfs_rreq_trace_free);
+       netfs_proc_del_rreq(rreq);
        netfs_clear_subrequests(rreq, false);
        if (rreq->netfs_ops->free_request)
                rreq->netfs_ops->free_request(rreq);
        if (rreq->cache_resources.ops)
                rreq->cache_resources.ops->end_operation(&rreq->cache_resources);
-       kfree(rreq);
+       if (rreq->direct_bv) {
+               for (i = 0; i < rreq->direct_bv_count; i++) {
+                       if (rreq->direct_bv[i].bv_page) {
+                               if (rreq->direct_bv_unpin)
+                                       unpin_user_page(rreq->direct_bv[i].bv_page);
+                       }
+               }
+               kvfree(rreq->direct_bv);
+       }
+       kfree_rcu(rreq, rcu);
        netfs_stat_d(&netfs_n_rh_rreq);
 }
 
 void netfs_put_request(struct netfs_io_request *rreq, bool was_async,
                       enum netfs_rreq_ref_trace what)
 {
-       unsigned int debug_id = rreq->debug_id;
+       unsigned int debug_id;
        bool dead;
        int r;
 
-       dead = __refcount_dec_and_test(&rreq->ref, &r);
-       trace_netfs_rreq_ref(debug_id, r - 1, what);
-       if (dead) {
-               if (was_async) {
-                       rreq->work.func = netfs_free_request;
-                       if (!queue_work(system_unbound_wq, &rreq->work))
-                               BUG();
-               } else {
-                       netfs_free_request(&rreq->work);
+       if (rreq) {
+               debug_id = rreq->debug_id;
+               dead = __refcount_dec_and_test(&rreq->ref, &r);
+               trace_netfs_rreq_ref(debug_id, r - 1, what);
+               if (dead) {
+                       if (was_async) {
+                               rreq->work.func = netfs_free_request;
+                               if (!queue_work(system_unbound_wq, &rreq->work))
+                                       BUG();
+                       } else {
+                               netfs_free_request(&rreq->work);
+                       }
                }
        }
 }
@@ -112,8 +140,11 @@ struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq
 {
        struct netfs_io_subrequest *subreq;
 
-       subreq = kzalloc(sizeof(struct netfs_io_subrequest), GFP_KERNEL);
+       subreq = kzalloc(rreq->netfs_ops->io_subrequest_size ?:
+                        sizeof(struct netfs_io_subrequest),
+                        GFP_KERNEL);
        if (subreq) {
+               INIT_WORK(&subreq->work, NULL);
                INIT_LIST_HEAD(&subreq->rreq_link);
                refcount_set(&subreq->ref, 2);
                subreq->rreq = rreq;
@@ -140,6 +171,8 @@ static void netfs_free_subrequest(struct netfs_io_subrequest *subreq,
        struct netfs_io_request *rreq = subreq->rreq;
 
        trace_netfs_sreq(subreq, netfs_sreq_trace_free);
+       if (rreq->netfs_ops->free_subrequest)
+               rreq->netfs_ops->free_subrequest(subreq);
        kfree(subreq);
        netfs_stat_d(&netfs_n_rh_sreq);
        netfs_put_request(rreq, was_async, netfs_rreq_trace_put_subreq);
diff --git a/fs/netfs/output.c b/fs/netfs/output.c
new file mode 100644 (file)
index 0000000..625eb68
--- /dev/null
@@ -0,0 +1,478 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Network filesystem high-level write support.
+ *
+ * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include <linux/writeback.h>
+#include <linux/pagevec.h>
+#include "internal.h"
+
+/**
+ * netfs_create_write_request - Create a write operation.
+ * @wreq: The write request this is storing from.
+ * @dest: The destination type
+ * @start: Start of the region this write will modify
+ * @len: Length of the modification
+ * @worker: The worker function to handle the write(s)
+ *
+ * Allocate a write operation, set it up and add it to the list on a write
+ * request.
+ */
+struct netfs_io_subrequest *netfs_create_write_request(struct netfs_io_request *wreq,
+                                                      enum netfs_io_source dest,
+                                                      loff_t start, size_t len,
+                                                      work_func_t worker)
+{
+       struct netfs_io_subrequest *subreq;
+
+       subreq = netfs_alloc_subrequest(wreq);
+       if (subreq) {
+               INIT_WORK(&subreq->work, worker);
+               subreq->source  = dest;
+               subreq->start   = start;
+               subreq->len     = len;
+               subreq->debug_index = wreq->subreq_counter++;
+
+               switch (subreq->source) {
+               case NETFS_UPLOAD_TO_SERVER:
+                       netfs_stat(&netfs_n_wh_upload);
+                       break;
+               case NETFS_WRITE_TO_CACHE:
+                       netfs_stat(&netfs_n_wh_write);
+                       break;
+               default:
+                       BUG();
+               }
+
+               subreq->io_iter = wreq->io_iter;
+               iov_iter_advance(&subreq->io_iter, subreq->start - wreq->start);
+               iov_iter_truncate(&subreq->io_iter, subreq->len);
+
+               trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index,
+                                    refcount_read(&subreq->ref),
+                                    netfs_sreq_trace_new);
+               atomic_inc(&wreq->nr_outstanding);
+               list_add_tail(&subreq->rreq_link, &wreq->subrequests);
+               trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
+       }
+
+       return subreq;
+}
+EXPORT_SYMBOL(netfs_create_write_request);
+
+/*
+ * Process a completed write request once all the component operations have
+ * been completed.
+ */
+static void netfs_write_terminated(struct netfs_io_request *wreq, bool was_async)
+{
+       struct netfs_io_subrequest *subreq;
+       struct netfs_inode *ctx = netfs_inode(wreq->inode);
+       size_t transferred = 0;
+
+       _enter("R=%x[]", wreq->debug_id);
+
+       trace_netfs_rreq(wreq, netfs_rreq_trace_write_done);
+
+       list_for_each_entry(subreq, &wreq->subrequests, rreq_link) {
+               if (subreq->error || subreq->transferred == 0)
+                       break;
+               transferred += subreq->transferred;
+               if (subreq->transferred < subreq->len)
+                       break;
+       }
+       wreq->transferred = transferred;
+
+       list_for_each_entry(subreq, &wreq->subrequests, rreq_link) {
+               if (!subreq->error)
+                       continue;
+               switch (subreq->source) {
+               case NETFS_UPLOAD_TO_SERVER:
+                       /* Depending on the type of failure, this may prevent
+                        * writeback completion unless we're in disconnected
+                        * mode.
+                        */
+                       if (!wreq->error)
+                               wreq->error = subreq->error;
+                       break;
+
+               case NETFS_WRITE_TO_CACHE:
+                       /* Failure doesn't prevent writeback completion unless
+                        * we're in disconnected mode.
+                        */
+                       if (subreq->error != -ENOBUFS)
+                               ctx->ops->invalidate_cache(wreq);
+                       break;
+
+               default:
+                       WARN_ON_ONCE(1);
+                       if (!wreq->error)
+                               wreq->error = -EIO;
+                       return;
+               }
+       }
+
+       wreq->cleanup(wreq);
+
+       if (wreq->origin == NETFS_DIO_WRITE &&
+           wreq->mapping->nrpages) {
+               pgoff_t first = wreq->start >> PAGE_SHIFT;
+               pgoff_t last = (wreq->start + wreq->transferred - 1) >> PAGE_SHIFT;
+               invalidate_inode_pages2_range(wreq->mapping, first, last);
+       }
+
+       if (wreq->origin == NETFS_DIO_WRITE)
+               inode_dio_end(wreq->inode);
+
+       _debug("finished");
+       trace_netfs_rreq(wreq, netfs_rreq_trace_wake_ip);
+       clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &wreq->flags);
+       wake_up_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS);
+
+       if (wreq->iocb) {
+               wreq->iocb->ki_pos += transferred;
+               if (wreq->iocb->ki_complete)
+                       wreq->iocb->ki_complete(
+                               wreq->iocb, wreq->error ? wreq->error : transferred);
+       }
+
+       netfs_clear_subrequests(wreq, was_async);
+       netfs_put_request(wreq, was_async, netfs_rreq_trace_put_complete);
+}
+
+/*
+ * Deal with the completion of writing the data to the cache.
+ */
+void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error,
+                                      bool was_async)
+{
+       struct netfs_io_subrequest *subreq = _op;
+       struct netfs_io_request *wreq = subreq->rreq;
+       unsigned int u;
+
+       _enter("%x[%x] %zd", wreq->debug_id, subreq->debug_index, transferred_or_error);
+
+       switch (subreq->source) {
+       case NETFS_UPLOAD_TO_SERVER:
+               netfs_stat(&netfs_n_wh_upload_done);
+               break;
+       case NETFS_WRITE_TO_CACHE:
+               netfs_stat(&netfs_n_wh_write_done);
+               break;
+       case NETFS_INVALID_WRITE:
+               break;
+       default:
+               BUG();
+       }
+
+       if (IS_ERR_VALUE(transferred_or_error)) {
+               subreq->error = transferred_or_error;
+               trace_netfs_failure(wreq, subreq, transferred_or_error,
+                                   netfs_fail_write);
+               goto failed;
+       }
+
+       if (WARN(transferred_or_error > subreq->len - subreq->transferred,
+                "Subreq excess write: R%x[%x] %zd > %zu - %zu",
+                wreq->debug_id, subreq->debug_index,
+                transferred_or_error, subreq->len, subreq->transferred))
+               transferred_or_error = subreq->len - subreq->transferred;
+
+       subreq->error = 0;
+       subreq->transferred += transferred_or_error;
+
+       if (iov_iter_count(&subreq->io_iter) != subreq->len - subreq->transferred)
+               pr_warn("R=%08x[%u] ITER POST-MISMATCH %zx != %zx-%zx %x\n",
+                       wreq->debug_id, subreq->debug_index,
+                       iov_iter_count(&subreq->io_iter), subreq->len,
+                       subreq->transferred, subreq->io_iter.iter_type);
+
+       if (subreq->transferred < subreq->len)
+               goto incomplete;
+
+       __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
+out:
+       trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
+
+       /* If we decrement nr_outstanding to 0, the ref belongs to us. */
+       u = atomic_dec_return(&wreq->nr_outstanding);
+       if (u == 0)
+               netfs_write_terminated(wreq, was_async);
+       else if (u == 1)
+               wake_up_var(&wreq->nr_outstanding);
+
+       netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
+       return;
+
+incomplete:
+       if (transferred_or_error == 0) {
+               if (__test_and_set_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags)) {
+                       subreq->error = -ENODATA;
+                       goto failed;
+               }
+       } else {
+               __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
+       }
+
+       __set_bit(NETFS_SREQ_SHORT_IO, &subreq->flags);
+       set_bit(NETFS_RREQ_INCOMPLETE_IO, &wreq->flags);
+       goto out;
+
+failed:
+       switch (subreq->source) {
+       case NETFS_WRITE_TO_CACHE:
+               netfs_stat(&netfs_n_wh_write_failed);
+               set_bit(NETFS_RREQ_INCOMPLETE_IO, &wreq->flags);
+               break;
+       case NETFS_UPLOAD_TO_SERVER:
+               netfs_stat(&netfs_n_wh_upload_failed);
+               set_bit(NETFS_RREQ_FAILED, &wreq->flags);
+               wreq->error = subreq->error;
+               break;
+       default:
+               break;
+       }
+       goto out;
+}
+EXPORT_SYMBOL(netfs_write_subrequest_terminated);
+
+static void netfs_write_to_cache_op(struct netfs_io_subrequest *subreq)
+{
+       struct netfs_io_request *wreq = subreq->rreq;
+       struct netfs_cache_resources *cres = &wreq->cache_resources;
+
+       trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
+
+       cres->ops->write(cres, subreq->start, &subreq->io_iter,
+                        netfs_write_subrequest_terminated, subreq);
+}
+
+static void netfs_write_to_cache_op_worker(struct work_struct *work)
+{
+       struct netfs_io_subrequest *subreq =
+               container_of(work, struct netfs_io_subrequest, work);
+
+       netfs_write_to_cache_op(subreq);
+}
+
+/**
+ * netfs_queue_write_request - Queue a write request for attention
+ * @subreq: The write request to be queued
+ *
+ * Queue the specified write request for processing by a worker thread.  We
+ * pass the caller's ref on the request to the worker thread.
+ */
+void netfs_queue_write_request(struct netfs_io_subrequest *subreq)
+{
+       if (!queue_work(system_unbound_wq, &subreq->work))
+               netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_wip);
+}
+EXPORT_SYMBOL(netfs_queue_write_request);
+
+/*
+ * Set up a op for writing to the cache.
+ */
+static void netfs_set_up_write_to_cache(struct netfs_io_request *wreq)
+{
+       struct netfs_cache_resources *cres = &wreq->cache_resources;
+       struct netfs_io_subrequest *subreq;
+       struct netfs_inode *ctx = netfs_inode(wreq->inode);
+       struct fscache_cookie *cookie = netfs_i_cookie(ctx);
+       loff_t start = wreq->start;
+       size_t len = wreq->len;
+       int ret;
+
+       if (!fscache_cookie_enabled(cookie)) {
+               clear_bit(NETFS_RREQ_WRITE_TO_CACHE, &wreq->flags);
+               return;
+       }
+
+       _debug("write to cache");
+       ret = fscache_begin_write_operation(cres, cookie);
+       if (ret < 0)
+               return;
+
+       ret = cres->ops->prepare_write(cres, &start, &len, wreq->upper_len,
+                                      i_size_read(wreq->inode), true);
+       if (ret < 0)
+               return;
+
+       subreq = netfs_create_write_request(wreq, NETFS_WRITE_TO_CACHE, start, len,
+                                           netfs_write_to_cache_op_worker);
+       if (!subreq)
+               return;
+
+       netfs_write_to_cache_op(subreq);
+}
+
+/*
+ * Begin the process of writing out a chunk of data.
+ *
+ * We are given a write request that holds a series of dirty regions and
+ * (partially) covers a sequence of folios, all of which are present.  The
+ * pages must have been marked as writeback as appropriate.
+ *
+ * We need to perform the following steps:
+ *
+ * (1) If encrypting, create an output buffer and encrypt each block of the
+ *     data into it, otherwise the output buffer will point to the original
+ *     folios.
+ *
+ * (2) If the data is to be cached, set up a write op for the entire output
+ *     buffer to the cache, if the cache wants to accept it.
+ *
+ * (3) If the data is to be uploaded (ie. not merely cached):
+ *
+ *     (a) If the data is to be compressed, create a compression buffer and
+ *         compress the data into it.
+ *
+ *     (b) For each destination we want to upload to, set up write ops to write
+ *         to that destination.  We may need multiple writes if the data is not
+ *         contiguous or the span exceeds wsize for a server.
+ */
+int netfs_begin_write(struct netfs_io_request *wreq, bool may_wait,
+                     enum netfs_write_trace what)
+{
+       struct netfs_inode *ctx = netfs_inode(wreq->inode);
+
+       _enter("R=%x %llx-%llx f=%lx",
+              wreq->debug_id, wreq->start, wreq->start + wreq->len - 1,
+              wreq->flags);
+
+       trace_netfs_write(wreq, what);
+       if (wreq->len == 0 || wreq->iter.count == 0) {
+               pr_err("Zero-sized write [R=%x]\n", wreq->debug_id);
+               return -EIO;
+       }
+
+       if (wreq->origin == NETFS_DIO_WRITE)
+               inode_dio_begin(wreq->inode);
+
+       wreq->io_iter = wreq->iter;
+
+       /* ->outstanding > 0 carries a ref */
+       netfs_get_request(wreq, netfs_rreq_trace_get_for_outstanding);
+       atomic_set(&wreq->nr_outstanding, 1);
+
+       /* Start the encryption/compression going.  We can do that in the
+        * background whilst we generate a list of write ops that we want to
+        * perform.
+        */
+       // TODO: Encrypt or compress the region as appropriate
+
+       /* We need to write all of the region to the cache */
+       if (test_bit(NETFS_RREQ_WRITE_TO_CACHE, &wreq->flags))
+               netfs_set_up_write_to_cache(wreq);
+
+       /* However, we don't necessarily write all of the region to the server.
+        * Caching of reads is being managed this way also.
+        */
+       if (test_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags))
+               ctx->ops->create_write_requests(wreq, wreq->start, wreq->len);
+
+       if (atomic_dec_and_test(&wreq->nr_outstanding))
+               netfs_write_terminated(wreq, false);
+
+       if (!may_wait)
+               return -EIOCBQUEUED;
+
+       wait_on_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS,
+                   TASK_UNINTERRUPTIBLE);
+       return wreq->error;
+}
+
+/*
+ * Begin a write operation for writing through the pagecache.
+ */
+struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len)
+{
+       struct netfs_io_request *wreq;
+       struct file *file = iocb->ki_filp;
+
+       wreq = netfs_alloc_request(file->f_mapping, file, iocb->ki_pos, len,
+                                  NETFS_WRITETHROUGH);
+       if (IS_ERR(wreq))
+               return wreq;
+
+       trace_netfs_write(wreq, netfs_write_trace_writethrough);
+
+       __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
+       iov_iter_xarray(&wreq->iter, ITER_SOURCE, &wreq->mapping->i_pages, wreq->start, 0);
+       wreq->io_iter = wreq->iter;
+
+       /* ->outstanding > 0 carries a ref */
+       netfs_get_request(wreq, netfs_rreq_trace_get_for_outstanding);
+       atomic_set(&wreq->nr_outstanding, 1);
+       return wreq;
+}
+
+static void netfs_submit_writethrough(struct netfs_io_request *wreq, bool final)
+{
+       struct netfs_inode *ictx = netfs_inode(wreq->inode);
+       unsigned long long start;
+       size_t len;
+
+       if (!test_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags))
+               return;
+
+       start = wreq->start + wreq->submitted;
+       len = wreq->iter.count - wreq->submitted;
+       if (!final) {
+               len /= wreq->wsize; /* Round to number of maximum packets */
+               len *= wreq->wsize;
+       }
+
+       ictx->ops->create_write_requests(wreq, start, len);
+       wreq->submitted += len;
+}
+
+/*
+ * Advance the state of the write operation used when writing through the
+ * pagecache.  Data has been copied into the pagecache that we need to append
+ * to the request.  If we've added more than wsize then we need to create a new
+ * subrequest.
+ */
+int netfs_advance_writethrough(struct netfs_io_request *wreq, size_t copied, bool to_page_end)
+{
+       _enter("ic=%zu sb=%zu ws=%u cp=%zu tp=%u",
+              wreq->iter.count, wreq->submitted, wreq->wsize, copied, to_page_end);
+
+       wreq->iter.count += copied;
+       wreq->io_iter.count += copied;
+       if (to_page_end && wreq->io_iter.count - wreq->submitted >= wreq->wsize)
+               netfs_submit_writethrough(wreq, false);
+
+       return wreq->error;
+}
+
+/*
+ * End a write operation used when writing through the pagecache.
+ */
+int netfs_end_writethrough(struct netfs_io_request *wreq, struct kiocb *iocb)
+{
+       int ret = -EIOCBQUEUED;
+
+       _enter("ic=%zu sb=%zu ws=%u",
+              wreq->iter.count, wreq->submitted, wreq->wsize);
+
+       if (wreq->submitted < wreq->io_iter.count)
+               netfs_submit_writethrough(wreq, true);
+
+       if (atomic_dec_and_test(&wreq->nr_outstanding))
+               netfs_write_terminated(wreq, false);
+
+       if (is_sync_kiocb(iocb)) {
+               wait_on_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS,
+                           TASK_UNINTERRUPTIBLE);
+               ret = wreq->error;
+       }
+
+       netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
+       return ret;
+}
index 5510a7a14a40dda1a53d344399852d001252aaa0..deeba9f9dcf5d55f7bf0692ecdf5991334a848ea 100644 (file)
@@ -9,6 +9,8 @@
 #include <linux/seq_file.h>
 #include "internal.h"
 
+atomic_t netfs_n_rh_dio_read;
+atomic_t netfs_n_rh_dio_write;
 atomic_t netfs_n_rh_readahead;
 atomic_t netfs_n_rh_readpage;
 atomic_t netfs_n_rh_rreq;
@@ -27,32 +29,48 @@ atomic_t netfs_n_rh_write_begin;
 atomic_t netfs_n_rh_write_done;
 atomic_t netfs_n_rh_write_failed;
 atomic_t netfs_n_rh_write_zskip;
+atomic_t netfs_n_wh_wstream_conflict;
+atomic_t netfs_n_wh_upload;
+atomic_t netfs_n_wh_upload_done;
+atomic_t netfs_n_wh_upload_failed;
+atomic_t netfs_n_wh_write;
+atomic_t netfs_n_wh_write_done;
+atomic_t netfs_n_wh_write_failed;
 
-void netfs_stats_show(struct seq_file *m)
+int netfs_stats_show(struct seq_file *m, void *v)
 {
-       seq_printf(m, "RdHelp : RA=%u RP=%u WB=%u WBZ=%u rr=%u sr=%u\n",
+       seq_printf(m, "Netfs  : DR=%u DW=%u RA=%u RP=%u WB=%u WBZ=%u\n",
+                  atomic_read(&netfs_n_rh_dio_read),
+                  atomic_read(&netfs_n_rh_dio_write),
                   atomic_read(&netfs_n_rh_readahead),
                   atomic_read(&netfs_n_rh_readpage),
                   atomic_read(&netfs_n_rh_write_begin),
-                  atomic_read(&netfs_n_rh_write_zskip),
-                  atomic_read(&netfs_n_rh_rreq),
-                  atomic_read(&netfs_n_rh_sreq));
-       seq_printf(m, "RdHelp : ZR=%u sh=%u sk=%u\n",
+                  atomic_read(&netfs_n_rh_write_zskip));
+       seq_printf(m, "Netfs  : ZR=%u sh=%u sk=%u\n",
                   atomic_read(&netfs_n_rh_zero),
                   atomic_read(&netfs_n_rh_short_read),
                   atomic_read(&netfs_n_rh_write_zskip));
-       seq_printf(m, "RdHelp : DL=%u ds=%u df=%u di=%u\n",
+       seq_printf(m, "Netfs  : DL=%u ds=%u df=%u di=%u\n",
                   atomic_read(&netfs_n_rh_download),
                   atomic_read(&netfs_n_rh_download_done),
                   atomic_read(&netfs_n_rh_download_failed),
                   atomic_read(&netfs_n_rh_download_instead));
-       seq_printf(m, "RdHelp : RD=%u rs=%u rf=%u\n",
+       seq_printf(m, "Netfs  : RD=%u rs=%u rf=%u\n",
                   atomic_read(&netfs_n_rh_read),
                   atomic_read(&netfs_n_rh_read_done),
                   atomic_read(&netfs_n_rh_read_failed));
-       seq_printf(m, "RdHelp : WR=%u ws=%u wf=%u\n",
-                  atomic_read(&netfs_n_rh_write),
-                  atomic_read(&netfs_n_rh_write_done),
-                  atomic_read(&netfs_n_rh_write_failed));
+       seq_printf(m, "Netfs  : UL=%u us=%u uf=%u\n",
+                  atomic_read(&netfs_n_wh_upload),
+                  atomic_read(&netfs_n_wh_upload_done),
+                  atomic_read(&netfs_n_wh_upload_failed));
+       seq_printf(m, "Netfs  : WR=%u ws=%u wf=%u\n",
+                  atomic_read(&netfs_n_wh_write),
+                  atomic_read(&netfs_n_wh_write_done),
+                  atomic_read(&netfs_n_wh_write_failed));
+       seq_printf(m, "Netfs  : rr=%u sr=%u wsc=%u\n",
+                  atomic_read(&netfs_n_rh_rreq),
+                  atomic_read(&netfs_n_rh_sreq),
+                  atomic_read(&netfs_n_wh_wstream_conflict));
+       return fscache_stats_show(m);
 }
 EXPORT_SYMBOL(netfs_stats_show);
index 01ac733a63203a459a994a0ec9df8d6006fcb875..f7e32d76e34d74b76aba8f6d31bcdb95a310f2f1 100644 (file)
@@ -169,8 +169,8 @@ config ROOT_NFS
 
 config NFS_FSCACHE
        bool "Provide NFS client caching support"
-       depends on NFS_FS=m && FSCACHE || NFS_FS=y && FSCACHE=y
-       select NETFS_SUPPORT
+       depends on NFS_FS=m && NETFS_SUPPORT || NFS_FS=y && NETFS_SUPPORT=y
+       select FSCACHE
        help
          Say Y here if you want NFS data to be cached locally on disc through
          the general filesystem cache manager
index b05717fe0d4e4f5b505f98e52e25273fe216b325..2d1bfee225c3693d4443c62463944ecf04439bca 100644 (file)
@@ -274,12 +274,6 @@ static void nfs_netfs_free_request(struct netfs_io_request *rreq)
        put_nfs_open_context(rreq->netfs_priv);
 }
 
-static inline int nfs_netfs_begin_cache_operation(struct netfs_io_request *rreq)
-{
-       return fscache_begin_read_operation(&rreq->cache_resources,
-                                           netfs_i_cookie(netfs_inode(rreq->inode)));
-}
-
 static struct nfs_netfs_io_data *nfs_netfs_alloc(struct netfs_io_subrequest *sreq)
 {
        struct nfs_netfs_io_data *netfs;
@@ -387,7 +381,6 @@ void nfs_netfs_read_completion(struct nfs_pgio_header *hdr)
 const struct netfs_request_ops nfs_netfs_ops = {
        .init_request           = nfs_netfs_init_request,
        .free_request           = nfs_netfs_free_request,
-       .begin_cache_operation  = nfs_netfs_begin_cache_operation,
        .issue_read             = nfs_netfs_issue_read,
        .clamp_length           = nfs_netfs_clamp_length
 };
index 5407ab8c8783574da8c83e0e752f8d3640bfb8d7..e3cb4923316b2cc044fd87ef6399c867ba19663c 100644 (file)
@@ -80,7 +80,7 @@ static inline void nfs_netfs_put(struct nfs_netfs_io_data *netfs)
 }
 static inline void nfs_netfs_inode_init(struct nfs_inode *nfsi)
 {
-       netfs_inode_init(&nfsi->netfs, &nfs_netfs_ops);
+       netfs_inode_init(&nfsi->netfs, &nfs_netfs_ops, false);
 }
 extern void nfs_netfs_initiate_read(struct nfs_pgio_header *hdr);
 extern void nfs_netfs_read_completion(struct nfs_pgio_header *hdr);
index 2fa54cfd4882307e87e9c109070ecdc25a3db401..7d6c657e0409ddc62567554304e4a51779dd2934 100644 (file)
@@ -4945,10 +4945,8 @@ nfsd_break_deleg_cb(struct file_lock *fl)
         */
        fl->fl_break_time = 0;
 
-       spin_lock(&fp->fi_lock);
        fp->fi_had_conflict = true;
        nfsd_break_one_deleg(dp);
-       spin_unlock(&fp->fi_lock);
        return false;
 }
 
@@ -5557,12 +5555,13 @@ nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
        if (status)
                goto out_unlock;
 
+       status = -EAGAIN;
+       if (fp->fi_had_conflict)
+               goto out_unlock;
+
        spin_lock(&state_lock);
        spin_lock(&fp->fi_lock);
-       if (fp->fi_had_conflict)
-               status = -EAGAIN;
-       else
-               status = hash_delegation_locked(dp, fp);
+       status = hash_delegation_locked(dp, fp);
        spin_unlock(&fp->fi_lock);
        spin_unlock(&state_lock);
 
@@ -7911,14 +7910,16 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
 {
        struct file_lock *fl;
        int status = false;
-       struct nfsd_file *nf = find_any_file(fp);
+       struct nfsd_file *nf;
        struct inode *inode;
        struct file_lock_context *flctx;
 
+       spin_lock(&fp->fi_lock);
+       nf = find_any_file_locked(fp);
        if (!nf) {
                /* Any valid lock stateid should have some sort of access */
                WARN_ON_ONCE(1);
-               return status;
+               goto out;
        }
 
        inode = file_inode(nf->nf_file);
@@ -7934,7 +7935,8 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
                }
                spin_unlock(&flctx->flc_lock);
        }
-       nfsd_file_put(nf);
+out:
+       spin_unlock(&fp->fi_lock);
        return status;
 }
 
@@ -7944,10 +7946,8 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
  * @cstate: NFSv4 COMPOUND state
  * @u: RELEASE_LOCKOWNER arguments
  *
- * The lockowner's so_count is bumped when a lock record is added
- * or when copying a conflicting lock. The latter case is brief,
- * but can lead to fleeting false positives when looking for
- * locks-in-use.
+ * Check if theree are any locks still held and if not - free the lockowner
+ * and any lock state that is owned.
  *
  * Return values:
  *   %nfs_ok: lockowner released or not found
@@ -7983,10 +7983,13 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp,
                spin_unlock(&clp->cl_lock);
                return nfs_ok;
        }
-       if (atomic_read(&lo->lo_owner.so_count) != 2) {
-               spin_unlock(&clp->cl_lock);
-               nfs4_put_stateowner(&lo->lo_owner);
-               return nfserr_locks_held;
+
+       list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) {
+               if (check_for_locks(stp->st_stid.sc_file, lo)) {
+                       spin_unlock(&clp->cl_lock);
+                       nfs4_put_stateowner(&lo->lo_owner);
+                       return nfserr_locks_held;
+               }
        }
        unhash_lockowner_locked(lo);
        while (!list_empty(&lo->lo_owner.so_stateids)) {
index bec33b89a075858ebf289a95fa4c83dbf6e86103..0e3fc5ba33c73d7f22deefc1cb68ee8395a1efa4 100644 (file)
@@ -107,7 +107,13 @@ static vm_fault_t nilfs_page_mkwrite(struct vm_fault *vmf)
        nilfs_transaction_commit(inode->i_sb);
 
  mapped:
-       folio_wait_stable(folio);
+       /*
+        * Since checksumming including data blocks is performed to determine
+        * the validity of the log to be written and used for recovery, it is
+        * necessary to wait for writeback to finish here, regardless of the
+        * stable write requirement of the backing device.
+        */
+       folio_wait_writeback(folio);
  out:
        sb_end_pagefault(inode->i_sb);
        return vmf_fs_error(ret);
index 0955b657938ff2ce993d92e7d8f81322ce71c2e1..a9b8d77c8c1d55b551582b826dafdcdcd047d13a 100644 (file)
@@ -472,9 +472,10 @@ static int nilfs_prepare_segment_for_recovery(struct the_nilfs *nilfs,
 
 static int nilfs_recovery_copy_block(struct the_nilfs *nilfs,
                                     struct nilfs_recovery_block *rb,
-                                    struct page *page)
+                                    loff_t pos, struct page *page)
 {
        struct buffer_head *bh_org;
+       size_t from = pos & ~PAGE_MASK;
        void *kaddr;
 
        bh_org = __bread(nilfs->ns_bdev, rb->blocknr, nilfs->ns_blocksize);
@@ -482,7 +483,7 @@ static int nilfs_recovery_copy_block(struct the_nilfs *nilfs,
                return -EIO;
 
        kaddr = kmap_atomic(page);
-       memcpy(kaddr + bh_offset(bh_org), bh_org->b_data, bh_org->b_size);
+       memcpy(kaddr + from, bh_org->b_data, bh_org->b_size);
        kunmap_atomic(kaddr);
        brelse(bh_org);
        return 0;
@@ -521,7 +522,7 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
                        goto failed_inode;
                }
 
-               err = nilfs_recovery_copy_block(nilfs, rb, page);
+               err = nilfs_recovery_copy_block(nilfs, rb, pos, page);
                if (unlikely(err))
                        goto failed_page;
 
index 2590a0860eab022ba68a18b9db8fe181faab5069..2bfb08052d399972dee9fd49583b77b95104ac83 100644 (file)
@@ -1703,7 +1703,6 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
 
                list_for_each_entry(bh, &segbuf->sb_payload_buffers,
                                    b_assoc_buffers) {
-                       set_buffer_async_write(bh);
                        if (bh == segbuf->sb_super_root) {
                                if (bh->b_folio != bd_folio) {
                                        folio_lock(bd_folio);
@@ -1714,6 +1713,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
                                }
                                break;
                        }
+                       set_buffer_async_write(bh);
                        if (bh->b_folio != fs_folio) {
                                nilfs_begin_folio_io(fs_folio);
                                fs_folio = bh->b_folio;
@@ -1800,7 +1800,6 @@ static void nilfs_abort_logs(struct list_head *logs, int err)
 
                list_for_each_entry(bh, &segbuf->sb_payload_buffers,
                                    b_assoc_buffers) {
-                       clear_buffer_async_write(bh);
                        if (bh == segbuf->sb_super_root) {
                                clear_buffer_uptodate(bh);
                                if (bh->b_folio != bd_folio) {
@@ -1809,6 +1808,7 @@ static void nilfs_abort_logs(struct list_head *logs, int err)
                                }
                                break;
                        }
+                       clear_buffer_async_write(bh);
                        if (bh->b_folio != fs_folio) {
                                nilfs_end_folio_io(fs_folio, err);
                                fs_folio = bh->b_folio;
@@ -1896,8 +1896,9 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
                                 BIT(BH_Delay) | BIT(BH_NILFS_Volatile) |
                                 BIT(BH_NILFS_Redirected));
 
-                       set_mask_bits(&bh->b_state, clear_bits, set_bits);
                        if (bh == segbuf->sb_super_root) {
+                               set_buffer_uptodate(bh);
+                               clear_buffer_dirty(bh);
                                if (bh->b_folio != bd_folio) {
                                        folio_end_writeback(bd_folio);
                                        bd_folio = bh->b_folio;
@@ -1905,6 +1906,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
                                update_sr = true;
                                break;
                        }
+                       set_mask_bits(&bh->b_state, clear_bits, set_bits);
                        if (bh->b_folio != fs_folio) {
                                nilfs_end_folio_io(fs_folio, 0);
                                fs_folio = bh->b_folio;
index 63f70259edc0d44d6a52c946c9a90663fe16722a..7aadf5010999455e4d16e20581e2334034451e57 100644 (file)
@@ -886,7 +886,7 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
        struct runs_tree *run = &ni->file.run;
        struct ntfs_sb_info *sbi;
        u8 cluster_bits;
-       struct ATTRIB *attr = NULL, *attr_b;
+       struct ATTRIB *attr, *attr_b;
        struct ATTR_LIST_ENTRY *le, *le_b;
        struct mft_inode *mi, *mi_b;
        CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end, vcn0, alen;
@@ -904,12 +904,8 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
                *len = 0;
        up_read(&ni->file.run_lock);
 
-       if (*len) {
-               if (*lcn != SPARSE_LCN || !new)
-                       return 0; /* Fast normal way without allocation. */
-               else if (clen > *len)
-                       clen = *len;
-       }
+       if (*len && (*lcn != SPARSE_LCN || !new))
+               return 0; /* Fast normal way without allocation. */
 
        /* No cluster in cache or we need to allocate cluster in hole. */
        sbi = ni->mi.sbi;
@@ -918,6 +914,17 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
        ni_lock(ni);
        down_write(&ni->file.run_lock);
 
+       /* Repeat the code above (under write lock). */
+       if (!run_lookup_entry(run, vcn, lcn, len, NULL))
+               *len = 0;
+
+       if (*len) {
+               if (*lcn != SPARSE_LCN || !new)
+                       goto out; /* normal way without allocation. */
+               if (clen > *len)
+                       clen = *len;
+       }
+
        le_b = NULL;
        attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
        if (!attr_b) {
@@ -1736,8 +1743,10 @@ repack:
                        le_b = NULL;
                        attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
                                              0, NULL, &mi_b);
-                       if (!attr_b)
-                               return -ENOENT;
+                       if (!attr_b) {
+                               err = -ENOENT;
+                               goto out;
+                       }
 
                        attr = attr_b;
                        le = le_b;
@@ -1818,13 +1827,15 @@ ins_ext:
 ok:
        run_truncate_around(run, vcn);
 out:
-       if (new_valid > data_size)
-               new_valid = data_size;
+       if (attr_b) {
+               if (new_valid > data_size)
+                       new_valid = data_size;
 
-       valid_size = le64_to_cpu(attr_b->nres.valid_size);
-       if (new_valid != valid_size) {
-               attr_b->nres.valid_size = cpu_to_le64(valid_size);
-               mi_b->dirty = true;
+               valid_size = le64_to_cpu(attr_b->nres.valid_size);
+               if (new_valid != valid_size) {
+                       attr_b->nres.valid_size = cpu_to_le64(valid_size);
+                       mi_b->dirty = true;
+               }
        }
 
        return err;
@@ -2073,7 +2084,7 @@ next_attr:
 
        /* Update inode size. */
        ni->i_valid = valid_size;
-       ni->vfs_inode.i_size = data_size;
+       i_size_write(&ni->vfs_inode, data_size);
        inode_set_bytes(&ni->vfs_inode, total_size);
        ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
        mark_inode_dirty(&ni->vfs_inode);
@@ -2488,7 +2499,7 @@ int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
        mi_b->dirty = true;
 
 done:
-       ni->vfs_inode.i_size += bytes;
+       i_size_write(&ni->vfs_inode, ni->vfs_inode.i_size + bytes);
        ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
        mark_inode_dirty(&ni->vfs_inode);
 
index 7c01735d1219d858b46809147fa06dbcc6cafe4c..9f4bd8d260901ca4fd4db97aea3687459e4ebc1a 100644 (file)
@@ -29,7 +29,7 @@ static inline bool al_is_valid_le(const struct ntfs_inode *ni,
 void al_destroy(struct ntfs_inode *ni)
 {
        run_close(&ni->attr_list.run);
-       kfree(ni->attr_list.le);
+       kvfree(ni->attr_list.le);
        ni->attr_list.le = NULL;
        ni->attr_list.size = 0;
        ni->attr_list.dirty = false;
@@ -127,12 +127,13 @@ struct ATTR_LIST_ENTRY *al_enumerate(struct ntfs_inode *ni,
 {
        size_t off;
        u16 sz;
+       const unsigned le_min_size = le_size(0);
 
        if (!le) {
                le = ni->attr_list.le;
        } else {
                sz = le16_to_cpu(le->size);
-               if (sz < sizeof(struct ATTR_LIST_ENTRY)) {
+               if (sz < le_min_size) {
                        /* Impossible 'cause we should not return such le. */
                        return NULL;
                }
@@ -141,7 +142,7 @@ struct ATTR_LIST_ENTRY *al_enumerate(struct ntfs_inode *ni,
 
        /* Check boundary. */
        off = PtrOffset(ni->attr_list.le, le);
-       if (off + sizeof(struct ATTR_LIST_ENTRY) > ni->attr_list.size) {
+       if (off + le_min_size > ni->attr_list.size) {
                /* The regular end of list. */
                return NULL;
        }
@@ -149,8 +150,7 @@ struct ATTR_LIST_ENTRY *al_enumerate(struct ntfs_inode *ni,
        sz = le16_to_cpu(le->size);
 
        /* Check le for errors. */
-       if (sz < sizeof(struct ATTR_LIST_ENTRY) ||
-           off + sz > ni->attr_list.size ||
+       if (sz < le_min_size || off + sz > ni->attr_list.size ||
            sz < le->name_off + le->name_len * sizeof(short)) {
                return NULL;
        }
@@ -318,7 +318,7 @@ int al_add_le(struct ntfs_inode *ni, enum ATTR_TYPE type, const __le16 *name,
                memcpy(ptr, al->le, off);
                memcpy(Add2Ptr(ptr, off + sz), le, old_size - off);
                le = Add2Ptr(ptr, off);
-               kfree(al->le);
+               kvfree(al->le);
                al->le = ptr;
        } else {
                memmove(Add2Ptr(le, sz), le, old_size - off);
index 63f14a0232f6a0e0672c5373748bf77b72931bf2..845f9b22deef0f42cabfb4156d4d8fe05c31fce9 100644 (file)
@@ -124,7 +124,7 @@ void wnd_close(struct wnd_bitmap *wnd)
 {
        struct rb_node *node, *next;
 
-       kfree(wnd->free_bits);
+       kvfree(wnd->free_bits);
        wnd->free_bits = NULL;
        run_close(&wnd->run);
 
@@ -1360,7 +1360,7 @@ int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits)
                memcpy(new_free, wnd->free_bits, wnd->nwnd * sizeof(short));
                memset(new_free + wnd->nwnd, 0,
                       (new_wnd - wnd->nwnd) * sizeof(short));
-               kfree(wnd->free_bits);
+               kvfree(wnd->free_bits);
                wnd->free_bits = new_free;
        }
 
index ec0566b322d5d0b4b36533a3a218671bb0ff7b02..5cf3d9decf646b1935517e8b564d807626e60e0f 100644 (file)
@@ -309,11 +309,31 @@ static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
                return 0;
        }
 
-       /* NTFS: symlinks are "dir + reparse" or "file + reparse" */
-       if (fname->dup.fa & FILE_ATTRIBUTE_REPARSE_POINT)
-               dt_type = DT_LNK;
-       else
-               dt_type = (fname->dup.fa & FILE_ATTRIBUTE_DIRECTORY) ? DT_DIR : DT_REG;
+       /*
+        * NTFS: symlinks are "dir + reparse" or "file + reparse"
+        * Unfortunately reparse attribute is used for many purposes (several dozens).
+        * It is not possible here to know is this name symlink or not.
+        * To get exactly the type of name we should to open inode (read mft).
+        * getattr for opened file (fstat) correctly returns symlink.
+        */
+       dt_type = (fname->dup.fa & FILE_ATTRIBUTE_DIRECTORY) ? DT_DIR : DT_REG;
+
+       /*
+        * It is not reliable to detect the type of name using duplicated information
+        * stored in parent directory.
+        * The only correct way to get the type of name - read MFT record and find ATTR_STD.
+        * The code below is not good idea.
+        * It does additional locks/reads just to get the type of name.
+        * Should we use additional mount option to enable branch below?
+        */
+       if ((fname->dup.fa & FILE_ATTRIBUTE_REPARSE_POINT) &&
+           ino != ni->mi.rno) {
+               struct inode *inode = ntfs_iget5(sbi->sb, &e->ref, NULL);
+               if (!IS_ERR_OR_NULL(inode)) {
+                       dt_type = fs_umode_to_dtype(inode->i_mode);
+                       iput(inode);
+               }
+       }
 
        return !dir_emit(ctx, (s8 *)name, name_len, ino, dt_type);
 }
@@ -495,11 +515,9 @@ static int ntfs_dir_count(struct inode *dir, bool *is_empty, size_t *dirs,
        struct INDEX_HDR *hdr;
        const struct ATTR_FILE_NAME *fname;
        u32 e_size, off, end;
-       u64 vbo = 0;
        size_t drs = 0, fles = 0, bit = 0;
-       loff_t i_size = ni->vfs_inode.i_size;
        struct indx_node *node = NULL;
-       u8 index_bits = ni->dir.index_bits;
+       size_t max_indx = i_size_read(&ni->vfs_inode) >> ni->dir.index_bits;
 
        if (is_empty)
                *is_empty = true;
@@ -518,8 +536,10 @@ static int ntfs_dir_count(struct inode *dir, bool *is_empty, size_t *dirs,
                        e = Add2Ptr(hdr, off);
                        e_size = le16_to_cpu(e->size);
                        if (e_size < sizeof(struct NTFS_DE) ||
-                           off + e_size > end)
+                           off + e_size > end) {
+                               /* Looks like corruption. */
                                break;
+                       }
 
                        if (de_is_last(e))
                                break;
@@ -543,7 +563,7 @@ static int ntfs_dir_count(struct inode *dir, bool *is_empty, size_t *dirs,
                                fles += 1;
                }
 
-               if (vbo >= i_size)
+               if (bit >= max_indx)
                        goto out;
 
                err = indx_used_bit(&ni->dir, ni, &bit);
@@ -553,8 +573,7 @@ static int ntfs_dir_count(struct inode *dir, bool *is_empty, size_t *dirs,
                if (bit == MINUS_ONE_T)
                        goto out;
 
-               vbo = (u64)bit << index_bits;
-               if (vbo >= i_size)
+               if (bit >= max_indx)
                        goto out;
 
                err = indx_read(&ni->dir, ni, bit << ni->dir.idx2vbn_bits,
@@ -564,7 +583,6 @@ static int ntfs_dir_count(struct inode *dir, bool *is_empty, size_t *dirs,
 
                hdr = &node->index->ihdr;
                bit += 1;
-               vbo = (u64)bit << ni->dir.idx2vbn_bits;
        }
 
 out:
@@ -593,5 +611,9 @@ const struct file_operations ntfs_dir_operations = {
        .iterate_shared = ntfs_readdir,
        .fsync          = generic_file_fsync,
        .open           = ntfs_file_open,
+       .unlocked_ioctl = ntfs_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl   = ntfs_compat_ioctl,
+#endif
 };
 // clang-format on
index a5a30a24ce5dfa70d670826d1b5ac16a668d06be..5418662c80d8878afe72a8b8e8ffc43cc834b176 100644 (file)
@@ -48,7 +48,7 @@ static int ntfs_ioctl_fitrim(struct ntfs_sb_info *sbi, unsigned long arg)
        return 0;
 }
 
-static long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg)
+long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg)
 {
        struct inode *inode = file_inode(filp);
        struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
@@ -61,7 +61,7 @@ static long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg)
 }
 
 #ifdef CONFIG_COMPAT
-static long ntfs_compat_ioctl(struct file *filp, u32 cmd, unsigned long arg)
+long ntfs_compat_ioctl(struct file *filp, u32 cmd, unsigned long arg)
 
 {
        return ntfs_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
@@ -188,6 +188,7 @@ static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to)
        u32 bh_next, bh_off, to;
        sector_t iblock;
        struct folio *folio;
+       bool dirty = false;
 
        for (; idx < idx_end; idx += 1, from = 0) {
                page_off = (loff_t)idx << PAGE_SHIFT;
@@ -223,29 +224,27 @@ static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to)
                        /* Ok, it's mapped. Make sure it's up-to-date. */
                        if (folio_test_uptodate(folio))
                                set_buffer_uptodate(bh);
-
-                       if (!buffer_uptodate(bh)) {
-                               err = bh_read(bh, 0);
-                               if (err < 0) {
-                                       folio_unlock(folio);
-                                       folio_put(folio);
-                                       goto out;
-                               }
+                       else if (bh_read(bh, 0) < 0) {
+                               err = -EIO;
+                               folio_unlock(folio);
+                               folio_put(folio);
+                               goto out;
                        }
 
                        mark_buffer_dirty(bh);
-
                } while (bh_off = bh_next, iblock += 1,
                         head != (bh = bh->b_this_page));
 
                folio_zero_segment(folio, from, to);
+               dirty = true;
 
                folio_unlock(folio);
                folio_put(folio);
                cond_resched();
        }
 out:
-       mark_inode_dirty(inode);
+       if (dirty)
+               mark_inode_dirty(inode);
        return err;
 }
 
@@ -261,6 +260,9 @@ static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma)
        bool rw = vma->vm_flags & VM_WRITE;
        int err;
 
+       if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
+               return -EIO;
+
        if (is_encrypted(ni)) {
                ntfs_inode_warn(inode, "mmap encrypted not supported");
                return -EOPNOTSUPP;
@@ -499,10 +501,14 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
                ni_lock(ni);
                err = attr_punch_hole(ni, vbo, len, &frame_size);
                ni_unlock(ni);
+               if (!err)
+                       goto ok;
+
                if (err != E_NTFS_NOTALIGNED)
                        goto out;
 
                /* Process not aligned punch. */
+               err = 0;
                mask = frame_size - 1;
                vbo_a = (vbo + mask) & ~mask;
                end_a = end & ~mask;
@@ -525,6 +531,8 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
                        ni_lock(ni);
                        err = attr_punch_hole(ni, vbo_a, end_a - vbo_a, NULL);
                        ni_unlock(ni);
+                       if (err)
+                               goto out;
                }
        } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
                /*
@@ -564,6 +572,8 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
                ni_lock(ni);
                err = attr_insert_range(ni, vbo, len);
                ni_unlock(ni);
+               if (err)
+                       goto out;
        } else {
                /* Check new size. */
                u8 cluster_bits = sbi->cluster_bits;
@@ -633,11 +643,18 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
                                            &ni->file.run, i_size, &ni->i_valid,
                                            true, NULL);
                        ni_unlock(ni);
+                       if (err)
+                               goto out;
                } else if (new_size > i_size) {
-                       inode->i_size = new_size;
+                       i_size_write(inode, new_size);
                }
        }
 
+ok:
+       err = file_modified(file);
+       if (err)
+               goto out;
+
 out:
        if (map_locked)
                filemap_invalidate_unlock(mapping);
@@ -663,6 +680,9 @@ int ntfs3_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
        umode_t mode = inode->i_mode;
        int err;
 
+       if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
+               return -EIO;
+
        err = setattr_prepare(idmap, dentry, attr);
        if (err)
                goto out;
@@ -676,7 +696,7 @@ int ntfs3_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
                        goto out;
                }
                inode_dio_wait(inode);
-               oldsize = inode->i_size;
+               oldsize = i_size_read(inode);
                newsize = attr->ia_size;
 
                if (newsize <= oldsize)
@@ -688,7 +708,7 @@ int ntfs3_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
                        goto out;
 
                ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
-               inode->i_size = newsize;
+               i_size_write(inode, newsize);
        }
 
        setattr_copy(idmap, inode, attr);
@@ -718,6 +738,9 @@ static ssize_t ntfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
        struct inode *inode = file->f_mapping->host;
        struct ntfs_inode *ni = ntfs_i(inode);
 
+       if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
+               return -EIO;
+
        if (is_encrypted(ni)) {
                ntfs_inode_warn(inode, "encrypted i/o not supported");
                return -EOPNOTSUPP;
@@ -752,6 +775,9 @@ static ssize_t ntfs_file_splice_read(struct file *in, loff_t *ppos,
        struct inode *inode = in->f_mapping->host;
        struct ntfs_inode *ni = ntfs_i(inode);
 
+       if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
+               return -EIO;
+
        if (is_encrypted(ni)) {
                ntfs_inode_warn(inode, "encrypted i/o not supported");
                return -EOPNOTSUPP;
@@ -821,7 +847,7 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
        size_t count = iov_iter_count(from);
        loff_t pos = iocb->ki_pos;
        struct inode *inode = file_inode(file);
-       loff_t i_size = inode->i_size;
+       loff_t i_size = i_size_read(inode);
        struct address_space *mapping = inode->i_mapping;
        struct ntfs_inode *ni = ntfs_i(inode);
        u64 valid = ni->i_valid;
@@ -1028,6 +1054,8 @@ out:
        iocb->ki_pos += written;
        if (iocb->ki_pos > ni->i_valid)
                ni->i_valid = iocb->ki_pos;
+       if (iocb->ki_pos > i_size)
+               i_size_write(inode, iocb->ki_pos);
 
        return written;
 }
@@ -1041,8 +1069,12 @@ static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
        struct address_space *mapping = file->f_mapping;
        struct inode *inode = mapping->host;
        ssize_t ret;
+       int err;
        struct ntfs_inode *ni = ntfs_i(inode);
 
+       if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
+               return -EIO;
+
        if (is_encrypted(ni)) {
                ntfs_inode_warn(inode, "encrypted i/o not supported");
                return -EOPNOTSUPP;
@@ -1068,6 +1100,12 @@ static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
        if (ret <= 0)
                goto out;
 
+       err = file_modified(iocb->ki_filp);
+       if (err) {
+               ret = err;
+               goto out;
+       }
+
        if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
                /* Should never be here, see ntfs_file_open(). */
                ret = -EOPNOTSUPP;
@@ -1097,6 +1135,9 @@ int ntfs_file_open(struct inode *inode, struct file *file)
 {
        struct ntfs_inode *ni = ntfs_i(inode);
 
+       if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
+               return -EIO;
+
        if (unlikely((is_compressed(ni) || is_encrypted(ni)) &&
                     (file->f_flags & O_DIRECT))) {
                return -EOPNOTSUPP;
@@ -1138,7 +1179,8 @@ static int ntfs_file_release(struct inode *inode, struct file *file)
                down_write(&ni->file.run_lock);
 
                err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
-                                   inode->i_size, &ni->i_valid, false, NULL);
+                                   i_size_read(inode), &ni->i_valid, false,
+                                   NULL);
 
                up_write(&ni->file.run_lock);
                ni_unlock(ni);
index 3df2d9e34b9144f4b039ccc6197c9aa249b7ac64..3b42938a9d3b229b0bbf1eef9a80f83da5111113 100644 (file)
@@ -778,7 +778,7 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
        run_deallocate(sbi, &ni->attr_list.run, true);
        run_close(&ni->attr_list.run);
        ni->attr_list.size = 0;
-       kfree(ni->attr_list.le);
+       kvfree(ni->attr_list.le);
        ni->attr_list.le = NULL;
        ni->attr_list.dirty = false;
 
@@ -927,7 +927,7 @@ int ni_create_attr_list(struct ntfs_inode *ni)
        return 0;
 
 out:
-       kfree(ni->attr_list.le);
+       kvfree(ni->attr_list.le);
        ni->attr_list.le = NULL;
        ni->attr_list.size = 0;
        return err;
@@ -2099,7 +2099,7 @@ int ni_readpage_cmpr(struct ntfs_inode *ni, struct page *page)
        gfp_t gfp_mask;
        struct page *pg;
 
-       if (vbo >= ni->vfs_inode.i_size) {
+       if (vbo >= i_size_read(&ni->vfs_inode)) {
                SetPageUptodate(page);
                err = 0;
                goto out;
@@ -2173,7 +2173,7 @@ int ni_decompress_file(struct ntfs_inode *ni)
 {
        struct ntfs_sb_info *sbi = ni->mi.sbi;
        struct inode *inode = &ni->vfs_inode;
-       loff_t i_size = inode->i_size;
+       loff_t i_size = i_size_read(inode);
        struct address_space *mapping = inode->i_mapping;
        gfp_t gfp_mask = mapping_gfp_mask(mapping);
        struct page **pages = NULL;
@@ -2457,6 +2457,7 @@ int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
        struct ATTR_LIST_ENTRY *le = NULL;
        struct runs_tree *run = &ni->file.run;
        u64 valid_size = ni->i_valid;
+       loff_t i_size = i_size_read(&ni->vfs_inode);
        u64 vbo_disk;
        size_t unc_size;
        u32 frame_size, i, npages_disk, ondisk_size;
@@ -2548,7 +2549,7 @@ int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
                        }
                }
 
-               frames = (ni->vfs_inode.i_size - 1) >> frame_bits;
+               frames = (i_size - 1) >> frame_bits;
 
                err = attr_wof_frame_info(ni, attr, run, frame64, frames,
                                          frame_bits, &ondisk_size, &vbo_data);
@@ -2556,8 +2557,7 @@ int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
                        goto out2;
 
                if (frame64 == frames) {
-                       unc_size = 1 + ((ni->vfs_inode.i_size - 1) &
-                                       (frame_size - 1));
+                       unc_size = 1 + ((i_size - 1) & (frame_size - 1));
                        ondisk_size = attr_size(attr) - vbo_data;
                } else {
                        unc_size = frame_size;
@@ -3259,6 +3259,9 @@ int ni_write_inode(struct inode *inode, int sync, const char *hint)
        if (is_bad_inode(inode) || sb_rdonly(sb))
                return 0;
 
+       if (unlikely(ntfs3_forced_shutdown(sb)))
+               return -EIO;
+
        if (!ni_trylock(ni)) {
                /* 'ni' is under modification, skip for now. */
                mark_inode_dirty_sync(inode);
@@ -3288,7 +3291,7 @@ int ni_write_inode(struct inode *inode, int sync, const char *hint)
                        modified = true;
                }
 
-               ts = inode_get_mtime(inode);
+               ts = inode_get_ctime(inode);
                dup.c_time = kernel2nt(&ts);
                if (std->c_time != dup.c_time) {
                        std->c_time = dup.c_time;
index 98ccb66508583138ed7f5c273b2f4450be88159b..855519713bf79074ed336ca7094cca5d5cdbc009 100644 (file)
@@ -465,7 +465,7 @@ static inline bool is_rst_area_valid(const struct RESTART_HDR *rhdr)
 {
        const struct RESTART_AREA *ra;
        u16 cl, fl, ul;
-       u32 off, l_size, file_dat_bits, file_size_round;
+       u32 off, l_size, seq_bits;
        u16 ro = le16_to_cpu(rhdr->ra_off);
        u32 sys_page = le32_to_cpu(rhdr->sys_page_size);
 
@@ -511,13 +511,15 @@ static inline bool is_rst_area_valid(const struct RESTART_HDR *rhdr)
        /* Make sure the sequence number bits match the log file size. */
        l_size = le64_to_cpu(ra->l_size);
 
-       file_dat_bits = sizeof(u64) * 8 - le32_to_cpu(ra->seq_num_bits);
-       file_size_round = 1u << (file_dat_bits + 3);
-       if (file_size_round != l_size &&
-           (file_size_round < l_size || (file_size_round / 2) > l_size)) {
-               return false;
+       seq_bits = sizeof(u64) * 8 + 3;
+       while (l_size) {
+               l_size >>= 1;
+               seq_bits -= 1;
        }
 
+       if (seq_bits != ra->seq_num_bits)
+               return false;
+
        /* The log page data offset and record header length must be quad-aligned. */
        if (!IS_ALIGNED(le16_to_cpu(ra->data_off), 8) ||
            !IS_ALIGNED(le16_to_cpu(ra->rec_hdr_len), 8))
@@ -974,6 +976,16 @@ skip_looking:
        return e;
 }
 
+struct restart_info {
+       u64 last_lsn;
+       struct RESTART_HDR *r_page;
+       u32 vbo;
+       bool chkdsk_was_run;
+       bool valid_page;
+       bool initialized;
+       bool restart;
+};
+
 #define RESTART_SINGLE_PAGE_IO cpu_to_le16(0x0001)
 
 #define NTFSLOG_WRAPPED 0x00000001
@@ -987,6 +999,7 @@ struct ntfs_log {
        struct ntfs_inode *ni;
 
        u32 l_size;
+       u32 orig_file_size;
        u32 sys_page_size;
        u32 sys_page_mask;
        u32 page_size;
@@ -1040,6 +1053,8 @@ struct ntfs_log {
 
        struct CLIENT_ID client_id;
        u32 client_undo_commit;
+
+       struct restart_info rst_info, rst_info2;
 };
 
 static inline u32 lsn_to_vbo(struct ntfs_log *log, const u64 lsn)
@@ -1105,16 +1120,6 @@ static inline bool verify_client_lsn(struct ntfs_log *log,
               lsn <= le64_to_cpu(log->ra->current_lsn) && lsn;
 }
 
-struct restart_info {
-       u64 last_lsn;
-       struct RESTART_HDR *r_page;
-       u32 vbo;
-       bool chkdsk_was_run;
-       bool valid_page;
-       bool initialized;
-       bool restart;
-};
-
 static int read_log_page(struct ntfs_log *log, u32 vbo,
                         struct RECORD_PAGE_HDR **buffer, bool *usa_error)
 {
@@ -1176,7 +1181,7 @@ out:
  * restart page header. It will stop the first time we find a
  * valid page header.
  */
-static int log_read_rst(struct ntfs_log *log, u32 l_size, bool first,
+static int log_read_rst(struct ntfs_log *log, bool first,
                        struct restart_info *info)
 {
        u32 skip, vbo;
@@ -1192,7 +1197,7 @@ static int log_read_rst(struct ntfs_log *log, u32 l_size, bool first,
        }
 
        /* Loop continuously until we succeed. */
-       for (; vbo < l_size; vbo = 2 * vbo + skip, skip = 0) {
+       for (; vbo < log->l_size; vbo = 2 * vbo + skip, skip = 0) {
                bool usa_error;
                bool brst, bchk;
                struct RESTART_AREA *ra;
@@ -1285,22 +1290,17 @@ check_result:
 /*
  * Ilog_init_pg_hdr - Init @log from restart page header.
  */
-static void log_init_pg_hdr(struct ntfs_log *log, u32 sys_page_size,
-                           u32 page_size, u16 major_ver, u16 minor_ver)
+static void log_init_pg_hdr(struct ntfs_log *log, u16 major_ver, u16 minor_ver)
 {
-       log->sys_page_size = sys_page_size;
-       log->sys_page_mask = sys_page_size - 1;
-       log->page_size = page_size;
-       log->page_mask = page_size - 1;
-       log->page_bits = blksize_bits(page_size);
+       log->sys_page_size = log->page_size;
+       log->sys_page_mask = log->page_mask;
 
        log->clst_per_page = log->page_size >> log->ni->mi.sbi->cluster_bits;
        if (!log->clst_per_page)
                log->clst_per_page = 1;
 
-       log->first_page = major_ver >= 2 ?
-                                 0x22 * page_size :
-                                 ((sys_page_size << 1) + (page_size << 1));
+       log->first_page = major_ver >= 2 ? 0x22 * log->page_size :
+                                          4 * log->page_size;
        log->major_ver = major_ver;
        log->minor_ver = minor_ver;
 }
@@ -1308,12 +1308,11 @@ static void log_init_pg_hdr(struct ntfs_log *log, u32 sys_page_size,
 /*
  * log_create - Init @log in cases when we don't have a restart area to use.
  */
-static void log_create(struct ntfs_log *log, u32 l_size, const u64 last_lsn,
+static void log_create(struct ntfs_log *log, const u64 last_lsn,
                       u32 open_log_count, bool wrapped, bool use_multi_page)
 {
-       log->l_size = l_size;
        /* All file offsets must be quadword aligned. */
-       log->file_data_bits = blksize_bits(l_size) - 3;
+       log->file_data_bits = blksize_bits(log->l_size) - 3;
        log->seq_num_mask = (8 << log->file_data_bits) - 1;
        log->seq_num_bits = sizeof(u64) * 8 - log->file_data_bits;
        log->seq_num = (last_lsn >> log->file_data_bits) + 2;
@@ -3720,10 +3719,8 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
        struct ntfs_sb_info *sbi = ni->mi.sbi;
        struct ntfs_log *log;
 
-       struct restart_info rst_info, rst_info2;
-       u64 rec_lsn, ra_lsn, checkpt_lsn = 0, rlsn = 0;
+       u64 rec_lsn, checkpt_lsn = 0, rlsn = 0;
        struct ATTR_NAME_ENTRY *attr_names = NULL;
-       struct ATTR_NAME_ENTRY *ane;
        struct RESTART_TABLE *dptbl = NULL;
        struct RESTART_TABLE *trtbl = NULL;
        const struct RESTART_TABLE *rt;
@@ -3741,9 +3738,7 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
        struct TRANSACTION_ENTRY *tr;
        struct DIR_PAGE_ENTRY *dp;
        u32 i, bytes_per_attr_entry;
-       u32 l_size = ni->vfs_inode.i_size;
-       u32 orig_file_size = l_size;
-       u32 page_size, vbo, tail, off, dlen;
+       u32 vbo, tail, off, dlen;
        u32 saved_len, rec_len, transact_id;
        bool use_second_page;
        struct RESTART_AREA *ra2, *ra = NULL;
@@ -3758,52 +3753,50 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
        u16 t16;
        u32 t32;
 
-       /* Get the size of page. NOTE: To replay we can use default page. */
-#if PAGE_SIZE >= DefaultLogPageSize && PAGE_SIZE <= DefaultLogPageSize * 2
-       page_size = norm_file_page(PAGE_SIZE, &l_size, true);
-#else
-       page_size = norm_file_page(PAGE_SIZE, &l_size, false);
-#endif
-       if (!page_size)
-               return -EINVAL;
-
        log = kzalloc(sizeof(struct ntfs_log), GFP_NOFS);
        if (!log)
                return -ENOMEM;
 
        log->ni = ni;
-       log->l_size = l_size;
-       log->one_page_buf = kmalloc(page_size, GFP_NOFS);
+       log->l_size = log->orig_file_size = ni->vfs_inode.i_size;
 
+       /* Get the size of page. NOTE: To replay we can use default page. */
+#if PAGE_SIZE >= DefaultLogPageSize && PAGE_SIZE <= DefaultLogPageSize * 2
+       log->page_size = norm_file_page(PAGE_SIZE, &log->l_size, true);
+#else
+       log->page_size = norm_file_page(PAGE_SIZE, &log->l_size, false);
+#endif
+       if (!log->page_size) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       log->one_page_buf = kmalloc(log->page_size, GFP_NOFS);
        if (!log->one_page_buf) {
                err = -ENOMEM;
                goto out;
        }
 
-       log->page_size = page_size;
-       log->page_mask = page_size - 1;
-       log->page_bits = blksize_bits(page_size);
+       log->page_mask = log->page_size - 1;
+       log->page_bits = blksize_bits(log->page_size);
 
        /* Look for a restart area on the disk. */
-       memset(&rst_info, 0, sizeof(struct restart_info));
-       err = log_read_rst(log, l_size, true, &rst_info);
+       err = log_read_rst(log, true, &log->rst_info);
        if (err)
                goto out;
 
        /* remember 'initialized' */
-       *initialized = rst_info.initialized;
+       *initialized = log->rst_info.initialized;
 
-       if (!rst_info.restart) {
-               if (rst_info.initialized) {
+       if (!log->rst_info.restart) {
+               if (log->rst_info.initialized) {
                        /* No restart area but the file is not initialized. */
                        err = -EINVAL;
                        goto out;
                }
 
-               log_init_pg_hdr(log, page_size, page_size, 1, 1);
-               log_create(log, l_size, 0, get_random_u32(), false, false);
-
-               log->ra = ra;
+               log_init_pg_hdr(log, 1, 1);
+               log_create(log, 0, get_random_u32(), false, false);
 
                ra = log_create_ra(log);
                if (!ra) {
@@ -3820,25 +3813,26 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
         * If the restart offset above wasn't zero then we won't
         * look for a second restart.
         */
-       if (rst_info.vbo)
+       if (log->rst_info.vbo)
                goto check_restart_area;
 
-       memset(&rst_info2, 0, sizeof(struct restart_info));
-       err = log_read_rst(log, l_size, false, &rst_info2);
+       err = log_read_rst(log, false, &log->rst_info2);
        if (err)
                goto out;
 
        /* Determine which restart area to use. */
-       if (!rst_info2.restart || rst_info2.last_lsn <= rst_info.last_lsn)
+       if (!log->rst_info2.restart ||
+           log->rst_info2.last_lsn <= log->rst_info.last_lsn)
                goto use_first_page;
 
        use_second_page = true;
 
-       if (rst_info.chkdsk_was_run && page_size != rst_info.vbo) {
+       if (log->rst_info.chkdsk_was_run &&
+           log->page_size != log->rst_info.vbo) {
                struct RECORD_PAGE_HDR *sp = NULL;
                bool usa_error;
 
-               if (!read_log_page(log, page_size, &sp, &usa_error) &&
+               if (!read_log_page(log, log->page_size, &sp, &usa_error) &&
                    sp->rhdr.sign == NTFS_CHKD_SIGNATURE) {
                        use_second_page = false;
                }
@@ -3846,52 +3840,43 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
        }
 
        if (use_second_page) {
-               kfree(rst_info.r_page);
-               memcpy(&rst_info, &rst_info2, sizeof(struct restart_info));
-               rst_info2.r_page = NULL;
+               kfree(log->rst_info.r_page);
+               memcpy(&log->rst_info, &log->rst_info2,
+                      sizeof(struct restart_info));
+               log->rst_info2.r_page = NULL;
        }
 
 use_first_page:
-       kfree(rst_info2.r_page);
+       kfree(log->rst_info2.r_page);
 
 check_restart_area:
        /*
         * If the restart area is at offset 0, we want
         * to write the second restart area first.
         */
-       log->init_ra = !!rst_info.vbo;
+       log->init_ra = !!log->rst_info.vbo;
 
        /* If we have a valid page then grab a pointer to the restart area. */
-       ra2 = rst_info.valid_page ?
-                     Add2Ptr(rst_info.r_page,
-                             le16_to_cpu(rst_info.r_page->ra_off)) :
+       ra2 = log->rst_info.valid_page ?
+                     Add2Ptr(log->rst_info.r_page,
+                             le16_to_cpu(log->rst_info.r_page->ra_off)) :
                      NULL;
 
-       if (rst_info.chkdsk_was_run ||
+       if (log->rst_info.chkdsk_was_run ||
            (ra2 && ra2->client_idx[1] == LFS_NO_CLIENT_LE)) {
                bool wrapped = false;
                bool use_multi_page = false;
                u32 open_log_count;
 
                /* Do some checks based on whether we have a valid log page. */
-               if (!rst_info.valid_page) {
-                       open_log_count = get_random_u32();
-                       goto init_log_instance;
-               }
-               open_log_count = le32_to_cpu(ra2->open_log_count);
-
-               /*
-                * If the restart page size isn't changing then we want to
-                * check how much work we need to do.
-                */
-               if (page_size != le32_to_cpu(rst_info.r_page->sys_page_size))
-                       goto init_log_instance;
+               open_log_count = log->rst_info.valid_page ?
+                                        le32_to_cpu(ra2->open_log_count) :
+                                        get_random_u32();
 
-init_log_instance:
-               log_init_pg_hdr(log, page_size, page_size, 1, 1);
+               log_init_pg_hdr(log, 1, 1);
 
-               log_create(log, l_size, rst_info.last_lsn, open_log_count,
-                          wrapped, use_multi_page);
+               log_create(log, log->rst_info.last_lsn, open_log_count, wrapped,
+                          use_multi_page);
 
                ra = log_create_ra(log);
                if (!ra) {
@@ -3916,28 +3901,27 @@ init_log_instance:
         * use the log file. We must use the system page size instead of the
         * default size if there is not a clean shutdown.
         */
-       t32 = le32_to_cpu(rst_info.r_page->sys_page_size);
-       if (page_size != t32) {
-               l_size = orig_file_size;
-               page_size =
-                       norm_file_page(t32, &l_size, t32 == DefaultLogPageSize);
+       t32 = le32_to_cpu(log->rst_info.r_page->sys_page_size);
+       if (log->page_size != t32) {
+               log->l_size = log->orig_file_size;
+               log->page_size = norm_file_page(t32, &log->l_size,
+                                               t32 == DefaultLogPageSize);
        }
 
-       if (page_size != t32 ||
-           page_size != le32_to_cpu(rst_info.r_page->page_size)) {
+       if (log->page_size != t32 ||
+           log->page_size != le32_to_cpu(log->rst_info.r_page->page_size)) {
                err = -EINVAL;
                goto out;
        }
 
        /* If the file size has shrunk then we won't mount it. */
-       if (l_size < le64_to_cpu(ra2->l_size)) {
+       if (log->l_size < le64_to_cpu(ra2->l_size)) {
                err = -EINVAL;
                goto out;
        }
 
-       log_init_pg_hdr(log, page_size, page_size,
-                       le16_to_cpu(rst_info.r_page->major_ver),
-                       le16_to_cpu(rst_info.r_page->minor_ver));
+       log_init_pg_hdr(log, le16_to_cpu(log->rst_info.r_page->major_ver),
+                       le16_to_cpu(log->rst_info.r_page->minor_ver));
 
        log->l_size = le64_to_cpu(ra2->l_size);
        log->seq_num_bits = le32_to_cpu(ra2->seq_num_bits);
@@ -3945,7 +3929,7 @@ init_log_instance:
        log->seq_num_mask = (8 << log->file_data_bits) - 1;
        log->last_lsn = le64_to_cpu(ra2->current_lsn);
        log->seq_num = log->last_lsn >> log->file_data_bits;
-       log->ra_off = le16_to_cpu(rst_info.r_page->ra_off);
+       log->ra_off = le16_to_cpu(log->rst_info.r_page->ra_off);
        log->restart_size = log->sys_page_size - log->ra_off;
        log->record_header_len = le16_to_cpu(ra2->rec_hdr_len);
        log->ra_size = le16_to_cpu(ra2->ra_len);
@@ -4045,7 +4029,7 @@ find_oldest:
        log->current_avail = current_log_avail(log);
 
        /* Remember which restart area to write first. */
-       log->init_ra = rst_info.vbo;
+       log->init_ra = log->rst_info.vbo;
 
 process_log:
        /* 1.0, 1.1, 2.0 log->major_ver/minor_ver - short values. */
@@ -4105,7 +4089,7 @@ process_log:
        log->client_id.seq_num = cr->seq_num;
        log->client_id.client_idx = client;
 
-       err = read_rst_area(log, &rst, &ra_lsn);
+       err = read_rst_area(log, &rst, &checkpt_lsn);
        if (err)
                goto out;
 
@@ -4114,9 +4098,8 @@ process_log:
 
        bytes_per_attr_entry = !rst->major_ver ? 0x2C : 0x28;
 
-       checkpt_lsn = le64_to_cpu(rst->check_point_start);
-       if (!checkpt_lsn)
-               checkpt_lsn = ra_lsn;
+       if (rst->check_point_start)
+               checkpt_lsn = le64_to_cpu(rst->check_point_start);
 
        /* Allocate and Read the Transaction Table. */
        if (!rst->transact_table_len)
@@ -4330,23 +4313,20 @@ check_attr_table:
        lcb = NULL;
 
 check_attribute_names2:
-       if (!rst->attr_names_len)
-               goto trace_attribute_table;
-
-       ane = attr_names;
-       if (!oatbl)
-               goto trace_attribute_table;
-       while (ane->off) {
-               /* TODO: Clear table on exit! */
-               oe = Add2Ptr(oatbl, le16_to_cpu(ane->off));
-               t16 = le16_to_cpu(ane->name_bytes);
-               oe->name_len = t16 / sizeof(short);
-               oe->ptr = ane->name;
-               oe->is_attr_name = 2;
-               ane = Add2Ptr(ane, sizeof(struct ATTR_NAME_ENTRY) + t16);
-       }
-
-trace_attribute_table:
+       if (rst->attr_names_len && oatbl) {
+               struct ATTR_NAME_ENTRY *ane = attr_names;
+               while (ane->off) {
+                       /* TODO: Clear table on exit! */
+                       oe = Add2Ptr(oatbl, le16_to_cpu(ane->off));
+                       t16 = le16_to_cpu(ane->name_bytes);
+                       oe->name_len = t16 / sizeof(short);
+                       oe->ptr = ane->name;
+                       oe->is_attr_name = 2;
+                       ane = Add2Ptr(ane,
+                                     sizeof(struct ATTR_NAME_ENTRY) + t16);
+               }
+       }
+
        /*
         * If the checkpt_lsn is zero, then this is a freshly
         * formatted disk and we have no work to do.
@@ -5189,7 +5169,7 @@ out:
        kfree(oatbl);
        kfree(dptbl);
        kfree(attr_names);
-       kfree(rst_info.r_page);
+       kfree(log->rst_info.r_page);
 
        kfree(ra);
        kfree(log->one_page_buf);
index fbfe21dbb42597cdb25a6d809f7b396e37508046..ae2ef5c11868c360f1285feffae734b2974d0d5c 100644 (file)
@@ -853,7 +853,8 @@ void ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
        /*
         * sb can be NULL here. In this case sbi->flags should be 0 too.
         */
-       if (!sb || !(sbi->flags & NTFS_FLAGS_MFTMIRR))
+       if (!sb || !(sbi->flags & NTFS_FLAGS_MFTMIRR) ||
+           unlikely(ntfs3_forced_shutdown(sb)))
                return;
 
        blocksize = sb->s_blocksize;
@@ -1006,6 +1007,30 @@ static inline __le32 security_hash(const void *sd, size_t bytes)
        return cpu_to_le32(hash);
 }
 
+/*
+ * simple wrapper for sb_bread_unmovable.
+ */
+struct buffer_head *ntfs_bread(struct super_block *sb, sector_t block)
+{
+       struct ntfs_sb_info *sbi = sb->s_fs_info;
+       struct buffer_head *bh;
+
+       if (unlikely(block >= sbi->volume.blocks)) {
+               /* prevent generic message "attempt to access beyond end of device" */
+               ntfs_err(sb, "try to read out of volume at offset 0x%llx",
+                        (u64)block << sb->s_blocksize_bits);
+               return NULL;
+       }
+
+       bh = sb_bread_unmovable(sb, block);
+       if (bh)
+               return bh;
+
+       ntfs_err(sb, "failed to read volume at offset 0x%llx",
+                (u64)block << sb->s_blocksize_bits);
+       return NULL;
+}
+
 int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer)
 {
        struct block_device *bdev = sb->s_bdev;
@@ -2128,8 +2153,8 @@ int ntfs_insert_security(struct ntfs_sb_info *sbi,
                        if (le32_to_cpu(d_security->size) == new_sec_size &&
                            d_security->key.hash == hash_key.hash &&
                            !memcmp(d_security + 1, sd, size_sd)) {
-                               *security_id = d_security->key.sec_id;
                                /* Such security already exists. */
+                               *security_id = d_security->key.sec_id;
                                err = 0;
                                goto out;
                        }
index cf92b2433f7a750aeb86383eb7440c730ad7dc95..daabaad63aaf64ae65b8d67bbd40de837bfe3486 100644 (file)
@@ -1462,7 +1462,7 @@ static int indx_create_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
                goto out2;
 
        if (in->name == I30_NAME) {
-               ni->vfs_inode.i_size = data_size;
+               i_size_write(&ni->vfs_inode, data_size);
                inode_set_bytes(&ni->vfs_inode, alloc_size);
        }
 
@@ -1544,7 +1544,7 @@ static int indx_add_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
        }
 
        if (in->name == I30_NAME)
-               ni->vfs_inode.i_size = data_size;
+               i_size_write(&ni->vfs_inode, data_size);
 
        *vbn = bit << indx->idx2vbn_bits;
 
@@ -2090,7 +2090,7 @@ static int indx_shrink(struct ntfs_index *indx, struct ntfs_inode *ni,
                return err;
 
        if (in->name == I30_NAME)
-               ni->vfs_inode.i_size = new_data;
+               i_size_write(&ni->vfs_inode, new_data);
 
        bpb = bitmap_size(bit);
        if (bpb * 8 == nbits)
@@ -2576,7 +2576,7 @@ int indx_delete_entry(struct ntfs_index *indx, struct ntfs_inode *ni,
                err = attr_set_size(ni, ATTR_ALLOC, in->name, in->name_len,
                                    &indx->alloc_run, 0, NULL, false, NULL);
                if (in->name == I30_NAME)
-                       ni->vfs_inode.i_size = 0;
+                       i_size_write(&ni->vfs_inode, 0);
 
                err = ni_remove_attr(ni, ATTR_ALLOC, in->name, in->name_len,
                                     false, NULL);
index 5e3d713749185f116e145adea4b196fbd7be72d2..eb7a8c9fba0183f40096d673473be4dffaa7c4c8 100644 (file)
@@ -345,9 +345,7 @@ next_attr:
                        inode->i_size = le16_to_cpu(rp.SymbolicLinkReparseBuffer
                                                            .PrintNameLength) /
                                        sizeof(u16);
-
                        ni->i_valid = inode->i_size;
-
                        /* Clear directory bit. */
                        if (ni->ni_flags & NI_FLAG_DIR) {
                                indx_clear(&ni->dir);
@@ -412,7 +410,6 @@ end_enum:
                goto out;
 
        if (!is_match && name) {
-               /* Reuse rec as buffer for ascii name. */
                err = -ENOENT;
                goto out;
        }
@@ -427,6 +424,7 @@ end_enum:
 
        if (names != le16_to_cpu(rec->hard_links)) {
                /* Correct minor error on the fly. Do not mark inode as dirty. */
+               ntfs_inode_warn(inode, "Correct links count -> %u.", names);
                rec->hard_links = cpu_to_le16(names);
                ni->mi.dirty = true;
        }
@@ -653,9 +651,10 @@ static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo,
                        off = vbo & (PAGE_SIZE - 1);
                        folio_set_bh(bh, folio, off);
 
-                       err = bh_read(bh, 0);
-                       if (err < 0)
+                       if (bh_read(bh, 0) < 0) {
+                               err = -EIO;
                                goto out;
+                       }
                        folio_zero_segment(folio, off + voff, off + block_size);
                }
        }
@@ -853,9 +852,13 @@ static int ntfs_resident_writepage(struct folio *folio,
                                   struct writeback_control *wbc, void *data)
 {
        struct address_space *mapping = data;
-       struct ntfs_inode *ni = ntfs_i(mapping->host);
+       struct inode *inode = mapping->host;
+       struct ntfs_inode *ni = ntfs_i(inode);
        int ret;
 
+       if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
+               return -EIO;
+
        ni_lock(ni);
        ret = attr_data_write_resident(ni, &folio->page);
        ni_unlock(ni);
@@ -869,7 +872,12 @@ static int ntfs_resident_writepage(struct folio *folio,
 static int ntfs_writepages(struct address_space *mapping,
                           struct writeback_control *wbc)
 {
-       if (is_resident(ntfs_i(mapping->host)))
+       struct inode *inode = mapping->host;
+
+       if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
+               return -EIO;
+
+       if (is_resident(ntfs_i(inode)))
                return write_cache_pages(mapping, wbc, ntfs_resident_writepage,
                                         mapping);
        return mpage_writepages(mapping, wbc, ntfs_get_block);
@@ -889,6 +897,9 @@ int ntfs_write_begin(struct file *file, struct address_space *mapping,
        struct inode *inode = mapping->host;
        struct ntfs_inode *ni = ntfs_i(inode);
 
+       if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
+               return -EIO;
+
        *pagep = NULL;
        if (is_resident(ni)) {
                struct page *page =
@@ -974,7 +985,7 @@ int ntfs_write_end(struct file *file, struct address_space *mapping, loff_t pos,
                }
 
                if (pos + err > inode->i_size) {
-                       inode->i_size = pos + err;
+                       i_size_write(inode, pos + err);
                        dirty = true;
                }
 
@@ -1306,6 +1317,11 @@ struct inode *ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir,
                goto out1;
        }
 
+       if (unlikely(ntfs3_forced_shutdown(sb))) {
+               err = -EIO;
+               goto out2;
+       }
+
        /* Mark rw ntfs as dirty. it will be cleared at umount. */
        ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
 
index ee3093be51701e78d1e02f6f30a7b5a4019831a0..cae41db0aaa7d13e1fb4e0132b79261156a39306 100644 (file)
@@ -181,6 +181,9 @@ static int ntfs_unlink(struct inode *dir, struct dentry *dentry)
        struct ntfs_inode *ni = ntfs_i(dir);
        int err;
 
+       if (unlikely(ntfs3_forced_shutdown(dir->i_sb)))
+               return -EIO;
+
        ni_lock_dir(ni);
 
        err = ntfs_unlink_inode(dir, dentry);
@@ -199,6 +202,9 @@ static int ntfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
        u32 size = strlen(symname);
        struct inode *inode;
 
+       if (unlikely(ntfs3_forced_shutdown(dir->i_sb)))
+               return -EIO;
+
        inode = ntfs_create_inode(idmap, dir, dentry, NULL, S_IFLNK | 0777, 0,
                                  symname, size, NULL);
 
@@ -227,6 +233,9 @@ static int ntfs_rmdir(struct inode *dir, struct dentry *dentry)
        struct ntfs_inode *ni = ntfs_i(dir);
        int err;
 
+       if (unlikely(ntfs3_forced_shutdown(dir->i_sb)))
+               return -EIO;
+
        ni_lock_dir(ni);
 
        err = ntfs_unlink_inode(dir, dentry);
@@ -264,6 +273,9 @@ static int ntfs_rename(struct mnt_idmap *idmap, struct inode *dir,
                      1024);
        static_assert(PATH_MAX >= 4 * 1024);
 
+       if (unlikely(ntfs3_forced_shutdown(sb)))
+               return -EIO;
+
        if (flags & ~RENAME_NOREPLACE)
                return -EINVAL;
 
index 86aecbb01a92f282ab621a26df9897b70b65df28..9c7478150a0352d4f46574b76e91ec27ce661cdb 100644 (file)
@@ -523,12 +523,10 @@ struct ATTR_LIST_ENTRY {
        __le64 vcn;             // 0x08: Starting VCN of this attribute.
        struct MFT_REF ref;     // 0x10: MFT record number with attribute.
        __le16 id;              // 0x18: struct ATTRIB ID.
-       __le16 name[3];         // 0x1A: Just to align. To get real name can use bNameOffset.
+       __le16 name[];          // 0x1A: To get real name use name_off.
 
 }; // sizeof(0x20)
 
-static_assert(sizeof(struct ATTR_LIST_ENTRY) == 0x20);
-
 static inline u32 le_size(u8 name_len)
 {
        return ALIGN(offsetof(struct ATTR_LIST_ENTRY, name) +
index f6706143d14bced3c2bfdbac59d4df1a8cb9da5e..79356fd29a14141de34ed006517b153fd9e4872b 100644 (file)
@@ -61,6 +61,8 @@ enum utf16_endian;
 
 /* sbi->flags */
 #define NTFS_FLAGS_NODISCARD           0x00000001
+/* ntfs in shutdown state. */
+#define NTFS_FLAGS_SHUTDOWN_BIT                0x00000002  /* == 4*/
 /* Set when LogFile is replaying. */
 #define NTFS_FLAGS_LOG_REPLAYING       0x00000008
 /* Set when we changed first MFT's which copy must be updated in $MftMirr. */
@@ -226,7 +228,7 @@ struct ntfs_sb_info {
        u64 maxbytes; // Maximum size for normal files.
        u64 maxbytes_sparse; // Maximum size for sparse file.
 
-       u32 flags; // See NTFS_FLAGS_XXX.
+       unsigned long flags; // See NTFS_FLAGS_
 
        CLST zone_max; // Maximum MFT zone length in clusters
        CLST bad_clusters; // The count of marked bad clusters.
@@ -473,7 +475,7 @@ bool al_delete_le(struct ntfs_inode *ni, enum ATTR_TYPE type, CLST vcn,
 int al_update(struct ntfs_inode *ni, int sync);
 static inline size_t al_aligned(size_t size)
 {
-       return (size + 1023) & ~(size_t)1023;
+       return size_add(size, 1023) & ~(size_t)1023;
 }
 
 /* Globals from bitfunc.c */
@@ -500,6 +502,8 @@ int ntfs3_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
 int ntfs_file_open(struct inode *inode, struct file *file);
 int ntfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                __u64 start, __u64 len);
+long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg);
+long ntfs_compat_ioctl(struct file *filp, u32 cmd, unsigned long arg);
 extern const struct inode_operations ntfs_special_inode_operations;
 extern const struct inode_operations ntfs_file_inode_operations;
 extern const struct file_operations ntfs_file_operations;
@@ -584,6 +588,7 @@ bool check_index_header(const struct INDEX_HDR *hdr, size_t bytes);
 int log_replay(struct ntfs_inode *ni, bool *initialized);
 
 /* Globals from fsntfs.c */
+struct buffer_head *ntfs_bread(struct super_block *sb, sector_t block);
 bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes);
 int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
                       bool simple);
@@ -872,7 +877,7 @@ int ntfs_init_acl(struct mnt_idmap *idmap, struct inode *inode,
 
 int ntfs_acl_chmod(struct mnt_idmap *idmap, struct dentry *dentry);
 ssize_t ntfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
-extern const struct xattr_handler * const ntfs_xattr_handlers[];
+extern const struct xattr_handler *const ntfs_xattr_handlers[];
 
 int ntfs_save_wsl_perm(struct inode *inode, __le16 *ea_size);
 void ntfs_get_wsl_perm(struct inode *inode);
@@ -999,6 +1004,11 @@ static inline struct ntfs_sb_info *ntfs_sb(struct super_block *sb)
        return sb->s_fs_info;
 }
 
+static inline int ntfs3_forced_shutdown(struct super_block *sb)
+{
+       return test_bit(NTFS_FLAGS_SHUTDOWN_BIT, &ntfs_sb(sb)->flags);
+}
+
 /*
  * ntfs_up_cluster - Align up on cluster boundary.
  */
@@ -1025,19 +1035,6 @@ static inline u64 bytes_to_block(const struct super_block *sb, u64 size)
        return (size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
 }
 
-static inline struct buffer_head *ntfs_bread(struct super_block *sb,
-                                            sector_t block)
-{
-       struct buffer_head *bh = sb_bread(sb, block);
-
-       if (bh)
-               return bh;
-
-       ntfs_err(sb, "failed to read volume at offset 0x%llx",
-                (u64)block << sb->s_blocksize_bits);
-       return NULL;
-}
-
 static inline struct ntfs_inode *ntfs_i(struct inode *inode)
 {
        return container_of(inode, struct ntfs_inode, vfs_inode);
index 53629b1f65e995978cef9b312462447305cb578d..6aa3a9d44df1bdc90f56a947caf94c902590d983 100644 (file)
@@ -279,7 +279,7 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
                if (t16 > asize)
                        return NULL;
 
-               if (t16 + le32_to_cpu(attr->res.data_size) > asize)
+               if (le32_to_cpu(attr->res.data_size) > asize - t16)
                        return NULL;
 
                t32 = sizeof(short) * attr->name_len;
@@ -535,8 +535,20 @@ bool mi_remove_attr(struct ntfs_inode *ni, struct mft_inode *mi,
                return false;
 
        if (ni && is_attr_indexed(attr)) {
-               le16_add_cpu(&ni->mi.mrec->hard_links, -1);
-               ni->mi.dirty = true;
+               u16 links = le16_to_cpu(ni->mi.mrec->hard_links);
+               struct ATTR_FILE_NAME *fname =
+                       attr->type != ATTR_NAME ?
+                               NULL :
+                               resident_data_ex(attr,
+                                                SIZEOF_ATTRIBUTE_FILENAME);
+               if (fname && fname->type == FILE_NAME_DOS) {
+                       /* Do not decrease links count deleting DOS name. */
+               } else if (!links) {
+                       /* minor error. Not critical. */
+               } else {
+                       ni->mi.mrec->hard_links = cpu_to_le16(links - 1);
+                       ni->mi.dirty = true;
+               }
        }
 
        used -= asize;
index 9153dffde950c2a396291bea88e3e6d31169f568..cef5467fd92833aec6fb0bce3879826c2a627a09 100644 (file)
@@ -122,13 +122,12 @@ void ntfs_inode_printk(struct inode *inode, const char *fmt, ...)
 
        if (name) {
                struct dentry *de = d_find_alias(inode);
-               const u32 name_len = ARRAY_SIZE(s_name_buf) - 1;
 
                if (de) {
                        spin_lock(&de->d_lock);
-                       snprintf(name, name_len, " \"%s\"", de->d_name.name);
+                       snprintf(name, sizeof(s_name_buf), " \"%s\"",
+                                de->d_name.name);
                        spin_unlock(&de->d_lock);
-                       name[name_len] = 0; /* To be sure. */
                } else {
                        name[0] = 0;
                }
@@ -625,7 +624,7 @@ static void ntfs3_free_sbi(struct ntfs_sb_info *sbi)
 {
        kfree(sbi->new_rec);
        kvfree(ntfs_put_shared(sbi->upcase));
-       kfree(sbi->def_table);
+       kvfree(sbi->def_table);
        kfree(sbi->compress.lznt);
 #ifdef CONFIG_NTFS3_LZX_XPRESS
        xpress_free_decompressor(sbi->compress.xpress);
@@ -714,6 +713,14 @@ static int ntfs_show_options(struct seq_file *m, struct dentry *root)
        return 0;
 }
 
+/*
+ * ntfs_shutdown - super_operations::shutdown
+ */
+static void ntfs_shutdown(struct super_block *sb)
+{
+       set_bit(NTFS_FLAGS_SHUTDOWN_BIT, &ntfs_sb(sb)->flags);
+}
+
 /*
  * ntfs_sync_fs - super_operations::sync_fs
  */
@@ -724,6 +731,9 @@ static int ntfs_sync_fs(struct super_block *sb, int wait)
        struct ntfs_inode *ni;
        struct inode *inode;
 
+       if (unlikely(ntfs3_forced_shutdown(sb)))
+               return -EIO;
+
        ni = sbi->security.ni;
        if (ni) {
                inode = &ni->vfs_inode;
@@ -763,6 +773,7 @@ static const struct super_operations ntfs_sops = {
        .put_super = ntfs_put_super,
        .statfs = ntfs_statfs,
        .show_options = ntfs_show_options,
+       .shutdown = ntfs_shutdown,
        .sync_fs = ntfs_sync_fs,
        .write_inode = ntfs3_write_inode,
 };
@@ -866,6 +877,7 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
        u16 fn, ao;
        u8 cluster_bits;
        u32 boot_off = 0;
+       sector_t boot_block = 0;
        const char *hint = "Primary boot";
 
        /* Save original dev_size. Used with alternative boot. */
@@ -873,11 +885,11 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
 
        sbi->volume.blocks = dev_size >> PAGE_SHIFT;
 
-       bh = ntfs_bread(sb, 0);
+read_boot:
+       bh = ntfs_bread(sb, boot_block);
        if (!bh)
-               return -EIO;
+               return boot_block ? -EINVAL : -EIO;
 
-check_boot:
        err = -EINVAL;
 
        /* Corrupted image; do not read OOB */
@@ -1108,26 +1120,24 @@ check_boot:
        }
 
 out:
-       if (err == -EINVAL && !bh->b_blocknr && dev_size0 > PAGE_SHIFT) {
+       brelse(bh);
+
+       if (err == -EINVAL && !boot_block && dev_size0 > PAGE_SHIFT) {
                u32 block_size = min_t(u32, sector_size, PAGE_SIZE);
                u64 lbo = dev_size0 - sizeof(*boot);
 
-               /*
-                * Try alternative boot (last sector)
-                */
-               brelse(bh);
-
-               sb_set_blocksize(sb, block_size);
-               bh = ntfs_bread(sb, lbo >> blksize_bits(block_size));
-               if (!bh)
-                       return -EINVAL;
-
+               boot_block = lbo >> blksize_bits(block_size);
                boot_off = lbo & (block_size - 1);
-               hint = "Alternative boot";
-               dev_size = dev_size0; /* restore original size. */
-               goto check_boot;
+               if (boot_block && block_size >= boot_off + sizeof(*boot)) {
+                       /*
+                        * Try alternative boot (last sector)
+                        */
+                       sb_set_blocksize(sb, block_size);
+                       hint = "Alternative boot";
+                       dev_size = dev_size0; /* restore original size. */
+                       goto read_boot;
+               }
        }
-       brelse(bh);
 
        return err;
 }
index 4274b6f31cfa1c49fec865fe0e1d81ab152e040b..53e7d1fa036aa6e50a3ccd529d88584b1350cd74 100644 (file)
@@ -219,6 +219,9 @@ static ssize_t ntfs_list_ea(struct ntfs_inode *ni, char *buffer,
                if (!ea->name_len)
                        break;
 
+               if (ea->name_len > ea_size)
+                       break;
+
                if (buffer) {
                        /* Check if we can use field ea->name */
                        if (off + ea_size > size)
@@ -744,6 +747,9 @@ static int ntfs_getxattr(const struct xattr_handler *handler, struct dentry *de,
        int err;
        struct ntfs_inode *ni = ntfs_i(inode);
 
+       if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
+               return -EIO;
+
        /* Dispatch request. */
        if (!strcmp(name, SYSTEM_DOS_ATTRIB)) {
                /* system.dos_attrib */
index b8e25ca51016d9df648ca58495baa9db553330ec..8586e2f5d24390c91263ea1ee48e7c3b22199cd2 100644 (file)
@@ -265,20 +265,18 @@ static int ovl_copy_up_file(struct ovl_fs *ofs, struct dentry *dentry,
        if (IS_ERR(old_file))
                return PTR_ERR(old_file);
 
+       /* Try to use clone_file_range to clone up within the same fs */
+       cloned = vfs_clone_file_range(old_file, 0, new_file, 0, len, 0);
+       if (cloned == len)
+               goto out_fput;
+
+       /* Couldn't clone, so now we try to copy the data */
        error = rw_verify_area(READ, old_file, &old_pos, len);
        if (!error)
                error = rw_verify_area(WRITE, new_file, &new_pos, len);
        if (error)
                goto out_fput;
 
-       /* Try to use clone_file_range to clone up within the same fs */
-       ovl_start_write(dentry);
-       cloned = do_clone_file_range(old_file, 0, new_file, 0, len, 0);
-       ovl_end_write(dentry);
-       if (cloned == len)
-               goto out_fput;
-       /* Couldn't clone, so now we try to copy the data */
-
        /* Check if lower fs supports seek operation */
        if (old_file->f_mode & FMODE_LSEEK)
                skip_hole = true;
index 984ffdaeed6ca8efcf8acb7852a481628ee3c380..5764f91d283e7027e2ca075057242968c1016455 100644 (file)
 
 struct ovl_lookup_data {
        struct super_block *sb;
-       struct vfsmount *mnt;
+       const struct ovl_layer *layer;
        struct qstr name;
        bool is_dir;
        bool opaque;
+       bool xwhiteouts;
        bool stop;
        bool last;
        char *redirect;
@@ -201,17 +202,13 @@ struct dentry *ovl_decode_real_fh(struct ovl_fs *ofs, struct ovl_fh *fh,
        return real;
 }
 
-static bool ovl_is_opaquedir(struct ovl_fs *ofs, const struct path *path)
-{
-       return ovl_path_check_dir_xattr(ofs, path, OVL_XATTR_OPAQUE);
-}
-
 static struct dentry *ovl_lookup_positive_unlocked(struct ovl_lookup_data *d,
                                                   const char *name,
                                                   struct dentry *base, int len,
                                                   bool drop_negative)
 {
-       struct dentry *ret = lookup_one_unlocked(mnt_idmap(d->mnt), name, base, len);
+       struct dentry *ret = lookup_one_unlocked(mnt_idmap(d->layer->mnt), name,
+                                                base, len);
 
        if (!IS_ERR(ret) && d_flags_negative(smp_load_acquire(&ret->d_flags))) {
                if (drop_negative && ret->d_lockref.count == 1) {
@@ -232,10 +229,13 @@ static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
                             size_t prelen, const char *post,
                             struct dentry **ret, bool drop_negative)
 {
+       struct ovl_fs *ofs = OVL_FS(d->sb);
        struct dentry *this;
        struct path path;
        int err;
        bool last_element = !post[0];
+       bool is_upper = d->layer->idx == 0;
+       char val;
 
        this = ovl_lookup_positive_unlocked(d, name, base, namelen, drop_negative);
        if (IS_ERR(this)) {
@@ -253,8 +253,8 @@ static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
        }
 
        path.dentry = this;
-       path.mnt = d->mnt;
-       if (ovl_path_is_whiteout(OVL_FS(d->sb), &path)) {
+       path.mnt = d->layer->mnt;
+       if (ovl_path_is_whiteout(ofs, &path)) {
                d->stop = d->opaque = true;
                goto put_and_out;
        }
@@ -272,7 +272,7 @@ static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
                        d->stop = true;
                        goto put_and_out;
                }
-               err = ovl_check_metacopy_xattr(OVL_FS(d->sb), &path, NULL);
+               err = ovl_check_metacopy_xattr(ofs, &path, NULL);
                if (err < 0)
                        goto out_err;
 
@@ -292,7 +292,12 @@ static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
                if (d->last)
                        goto out;
 
-               if (ovl_is_opaquedir(OVL_FS(d->sb), &path)) {
+               /* overlay.opaque=x means xwhiteouts directory */
+               val = ovl_get_opaquedir_val(ofs, &path);
+               if (last_element && !is_upper && val == 'x') {
+                       d->xwhiteouts = true;
+                       ovl_layer_set_xwhiteouts(ofs, d->layer);
+               } else if (val == 'y') {
                        d->stop = true;
                        if (last_element)
                                d->opaque = true;
@@ -863,7 +868,8 @@ fail:
  * Returns next layer in stack starting from top.
  * Returns -1 if this is the last layer.
  */
-int ovl_path_next(int idx, struct dentry *dentry, struct path *path)
+int ovl_path_next(int idx, struct dentry *dentry, struct path *path,
+                 const struct ovl_layer **layer)
 {
        struct ovl_entry *oe = OVL_E(dentry);
        struct ovl_path *lowerstack = ovl_lowerstack(oe);
@@ -871,13 +877,16 @@ int ovl_path_next(int idx, struct dentry *dentry, struct path *path)
        BUG_ON(idx < 0);
        if (idx == 0) {
                ovl_path_upper(dentry, path);
-               if (path->dentry)
+               if (path->dentry) {
+                       *layer = &OVL_FS(dentry->d_sb)->layers[0];
                        return ovl_numlower(oe) ? 1 : -1;
+               }
                idx++;
        }
        BUG_ON(idx > ovl_numlower(oe));
        path->dentry = lowerstack[idx - 1].dentry;
-       path->mnt = lowerstack[idx - 1].layer->mnt;
+       *layer = lowerstack[idx - 1].layer;
+       path->mnt = (*layer)->mnt;
 
        return (idx < ovl_numlower(oe)) ? idx + 1 : -1;
 }
@@ -1055,7 +1064,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
        old_cred = ovl_override_creds(dentry->d_sb);
        upperdir = ovl_dentry_upper(dentry->d_parent);
        if (upperdir) {
-               d.mnt = ovl_upper_mnt(ofs);
+               d.layer = &ofs->layers[0];
                err = ovl_lookup_layer(upperdir, &d, &upperdentry, true);
                if (err)
                        goto out;
@@ -1111,7 +1120,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
                else if (d.is_dir || !ofs->numdatalayer)
                        d.last = lower.layer->idx == ovl_numlower(roe);
 
-               d.mnt = lower.layer->mnt;
+               d.layer = lower.layer;
                err = ovl_lookup_layer(lower.dentry, &d, &this, false);
                if (err)
                        goto out_put;
@@ -1278,6 +1287,8 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
 
        if (upperopaque)
                ovl_dentry_set_opaque(dentry);
+       if (d.xwhiteouts)
+               ovl_dentry_set_xwhiteouts(dentry);
 
        if (upperdentry)
                ovl_dentry_set_upper_alias(dentry);
index 5ba11eb4376792f3047bb683913557f79f1fa53e..ee949f3e7c77839e999cb6f5344cb167fd4e43b5 100644 (file)
@@ -50,7 +50,6 @@ enum ovl_xattr {
        OVL_XATTR_METACOPY,
        OVL_XATTR_PROTATTR,
        OVL_XATTR_XWHITEOUT,
-       OVL_XATTR_XWHITEOUTS,
 };
 
 enum ovl_inode_flag {
@@ -70,6 +69,8 @@ enum ovl_entry_flag {
        OVL_E_UPPER_ALIAS,
        OVL_E_OPAQUE,
        OVL_E_CONNECTED,
+       /* Lower stack may contain xwhiteout entries */
+       OVL_E_XWHITEOUTS,
 };
 
 enum {
@@ -477,6 +478,10 @@ bool ovl_dentry_test_flag(unsigned long flag, struct dentry *dentry);
 bool ovl_dentry_is_opaque(struct dentry *dentry);
 bool ovl_dentry_is_whiteout(struct dentry *dentry);
 void ovl_dentry_set_opaque(struct dentry *dentry);
+bool ovl_dentry_has_xwhiteouts(struct dentry *dentry);
+void ovl_dentry_set_xwhiteouts(struct dentry *dentry);
+void ovl_layer_set_xwhiteouts(struct ovl_fs *ofs,
+                             const struct ovl_layer *layer);
 bool ovl_dentry_has_upper_alias(struct dentry *dentry);
 void ovl_dentry_set_upper_alias(struct dentry *dentry);
 bool ovl_dentry_needs_data_copy_up(struct dentry *dentry, int flags);
@@ -494,11 +499,10 @@ struct file *ovl_path_open(const struct path *path, int flags);
 int ovl_copy_up_start(struct dentry *dentry, int flags);
 void ovl_copy_up_end(struct dentry *dentry);
 bool ovl_already_copied_up(struct dentry *dentry, int flags);
-bool ovl_path_check_dir_xattr(struct ovl_fs *ofs, const struct path *path,
-                             enum ovl_xattr ox);
+char ovl_get_dir_xattr_val(struct ovl_fs *ofs, const struct path *path,
+                          enum ovl_xattr ox);
 bool ovl_path_check_origin_xattr(struct ovl_fs *ofs, const struct path *path);
 bool ovl_path_check_xwhiteout_xattr(struct ovl_fs *ofs, const struct path *path);
-bool ovl_path_check_xwhiteouts_xattr(struct ovl_fs *ofs, const struct path *path);
 bool ovl_init_uuid_xattr(struct super_block *sb, struct ovl_fs *ofs,
                         const struct path *upperpath);
 
@@ -573,7 +577,13 @@ static inline bool ovl_is_impuredir(struct super_block *sb,
                .mnt = ovl_upper_mnt(ofs),
        };
 
-       return ovl_path_check_dir_xattr(ofs, &upperpath, OVL_XATTR_IMPURE);
+       return ovl_get_dir_xattr_val(ofs, &upperpath, OVL_XATTR_IMPURE) == 'y';
+}
+
+static inline char ovl_get_opaquedir_val(struct ovl_fs *ofs,
+                                        const struct path *path)
+{
+       return ovl_get_dir_xattr_val(ofs, path, OVL_XATTR_OPAQUE);
 }
 
 static inline bool ovl_redirect_follow(struct ovl_fs *ofs)
@@ -680,7 +690,8 @@ int ovl_get_index_name(struct ovl_fs *ofs, struct dentry *origin,
 struct dentry *ovl_get_index_fh(struct ovl_fs *ofs, struct ovl_fh *fh);
 struct dentry *ovl_lookup_index(struct ovl_fs *ofs, struct dentry *upper,
                                struct dentry *origin, bool verify);
-int ovl_path_next(int idx, struct dentry *dentry, struct path *path);
+int ovl_path_next(int idx, struct dentry *dentry, struct path *path,
+                 const struct ovl_layer **layer);
 int ovl_verify_lowerdata(struct dentry *dentry);
 struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
                          unsigned int flags);
index 5fa9c58af65f2107c19524fc58808a3140820f5c..cb449ab310a7a89aafa0ee04ee7ff6c8141dd7d5 100644 (file)
@@ -40,6 +40,8 @@ struct ovl_layer {
        int idx;
        /* One fsid per unique underlying sb (upper fsid == 0) */
        int fsid;
+       /* xwhiteouts were found on this layer */
+       bool has_xwhiteouts;
 };
 
 struct ovl_path {
@@ -59,7 +61,7 @@ struct ovl_fs {
        unsigned int numfs;
        /* Number of data-only lower layers */
        unsigned int numdatalayer;
-       const struct ovl_layer *layers;
+       struct ovl_layer *layers;
        struct ovl_sb *fs;
        /* workbasedir is the path at workdir= mount option */
        struct dentry *workbasedir;
index e71156baa7bccae2d15c1830938d62116f546b74..0ca8af060b0c194e5824e59b59d9d2dc8b051355 100644 (file)
@@ -305,8 +305,6 @@ static inline int ovl_dir_read(const struct path *realpath,
        if (IS_ERR(realfile))
                return PTR_ERR(realfile);
 
-       rdd->in_xwhiteouts_dir = rdd->dentry &&
-               ovl_path_check_xwhiteouts_xattr(OVL_FS(rdd->dentry->d_sb), realpath);
        rdd->first_maybe_whiteout = NULL;
        rdd->ctx.pos = 0;
        do {
@@ -359,10 +357,13 @@ static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list,
                .is_lowest = false,
        };
        int idx, next;
+       const struct ovl_layer *layer;
 
        for (idx = 0; idx != -1; idx = next) {
-               next = ovl_path_next(idx, dentry, &realpath);
+               next = ovl_path_next(idx, dentry, &realpath, &layer);
                rdd.is_upper = ovl_dentry_upper(dentry) == realpath.dentry;
+               rdd.in_xwhiteouts_dir = layer->has_xwhiteouts &&
+                                       ovl_dentry_has_xwhiteouts(dentry);
 
                if (next != -1) {
                        err = ovl_dir_read(&realpath, &rdd);
index 4ab66e3d4cff9854a99bcc1505963927476bf1d5..2eef6c70b2aed54027b9ec2b1b544101ea32aefc 100644 (file)
@@ -1249,6 +1249,7 @@ static struct dentry *ovl_get_root(struct super_block *sb,
                                   struct ovl_entry *oe)
 {
        struct dentry *root;
+       struct ovl_fs *ofs = OVL_FS(sb);
        struct ovl_path *lowerpath = ovl_lowerstack(oe);
        unsigned long ino = d_inode(lowerpath->dentry)->i_ino;
        int fsid = lowerpath->layer->fsid;
@@ -1270,6 +1271,20 @@ static struct dentry *ovl_get_root(struct super_block *sb,
                        ovl_set_flag(OVL_IMPURE, d_inode(root));
        }
 
+       /* Look for xwhiteouts marker except in the lowermost layer */
+       for (int i = 0; i < ovl_numlower(oe) - 1; i++, lowerpath++) {
+               struct path path = {
+                       .mnt = lowerpath->layer->mnt,
+                       .dentry = lowerpath->dentry,
+               };
+
+               /* overlay.opaque=x means xwhiteouts directory */
+               if (ovl_get_opaquedir_val(ofs, &path) == 'x') {
+                       ovl_layer_set_xwhiteouts(ofs, lowerpath->layer);
+                       ovl_dentry_set_xwhiteouts(root);
+               }
+       }
+
        /* Root is always merge -> can have whiteouts */
        ovl_set_flag(OVL_WHITEOUTS, d_inode(root));
        ovl_dentry_set_flag(OVL_E_CONNECTED, root);
index 0217094c23ea6ae8905c7cb0c44c3ba969345200..a8e17f14d7a219aafada9e174ef50c6f67f56ff7 100644 (file)
@@ -461,6 +461,33 @@ void ovl_dentry_set_opaque(struct dentry *dentry)
        ovl_dentry_set_flag(OVL_E_OPAQUE, dentry);
 }
 
+bool ovl_dentry_has_xwhiteouts(struct dentry *dentry)
+{
+       return ovl_dentry_test_flag(OVL_E_XWHITEOUTS, dentry);
+}
+
+void ovl_dentry_set_xwhiteouts(struct dentry *dentry)
+{
+       ovl_dentry_set_flag(OVL_E_XWHITEOUTS, dentry);
+}
+
+/*
+ * ovl_layer_set_xwhiteouts() is called before adding the overlay dir
+ * dentry to dcache, while readdir of that same directory happens after
+ * the overlay dir dentry is in dcache, so if some cpu observes that
+ * ovl_dentry_is_xwhiteouts(), it will also observe layer->has_xwhiteouts
+ * for the layers where xwhiteouts marker was found in that merge dir.
+ */
+void ovl_layer_set_xwhiteouts(struct ovl_fs *ofs,
+                             const struct ovl_layer *layer)
+{
+       if (layer->has_xwhiteouts)
+               return;
+
+       /* Write once to read-mostly layer properties */
+       ofs->layers[layer->idx].has_xwhiteouts = true;
+}
+
 /*
  * For hard links and decoded file handles, it's possible for ovl_dentry_upper()
  * to return positive, while there's no actual upper alias for the inode.
@@ -739,19 +766,6 @@ bool ovl_path_check_xwhiteout_xattr(struct ovl_fs *ofs, const struct path *path)
        return res >= 0;
 }
 
-bool ovl_path_check_xwhiteouts_xattr(struct ovl_fs *ofs, const struct path *path)
-{
-       struct dentry *dentry = path->dentry;
-       int res;
-
-       /* xattr.whiteouts must be a directory */
-       if (!d_is_dir(dentry))
-               return false;
-
-       res = ovl_path_getxattr(ofs, path, OVL_XATTR_XWHITEOUTS, NULL, 0);
-       return res >= 0;
-}
-
 /*
  * Load persistent uuid from xattr into s_uuid if found, or store a new
  * random generated value in s_uuid and in xattr.
@@ -811,20 +825,17 @@ fail:
        return false;
 }
 
-bool ovl_path_check_dir_xattr(struct ovl_fs *ofs, const struct path *path,
-                              enum ovl_xattr ox)
+char ovl_get_dir_xattr_val(struct ovl_fs *ofs, const struct path *path,
+                          enum ovl_xattr ox)
 {
        int res;
        char val;
 
        if (!d_is_dir(path->dentry))
-               return false;
+               return 0;
 
        res = ovl_path_getxattr(ofs, path, ox, &val, 1);
-       if (res == 1 && val == 'y')
-               return true;
-
-       return false;
+       return res == 1 ? val : 0;
 }
 
 #define OVL_XATTR_OPAQUE_POSTFIX       "opaque"
@@ -837,7 +848,6 @@ bool ovl_path_check_dir_xattr(struct ovl_fs *ofs, const struct path *path,
 #define OVL_XATTR_METACOPY_POSTFIX     "metacopy"
 #define OVL_XATTR_PROTATTR_POSTFIX     "protattr"
 #define OVL_XATTR_XWHITEOUT_POSTFIX    "whiteout"
-#define OVL_XATTR_XWHITEOUTS_POSTFIX   "whiteouts"
 
 #define OVL_XATTR_TAB_ENTRY(x) \
        [x] = { [false] = OVL_XATTR_TRUSTED_PREFIX x ## _POSTFIX, \
@@ -854,7 +864,6 @@ const char *const ovl_xattr_table[][2] = {
        OVL_XATTR_TAB_ENTRY(OVL_XATTR_METACOPY),
        OVL_XATTR_TAB_ENTRY(OVL_XATTR_PROTATTR),
        OVL_XATTR_TAB_ENTRY(OVL_XATTR_XWHITEOUT),
-       OVL_XATTR_TAB_ENTRY(OVL_XATTR_XWHITEOUTS),
 };
 
 int ovl_check_setxattr(struct ovl_fs *ofs, struct dentry *upperdentry,
index ff08a8957552add31a8fdf98e202f8380d519e50..34a47fb0c57f2570a4f7cb1f45373ddaf2afa883 100644 (file)
@@ -477,13 +477,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
        int permitted;
        struct mm_struct *mm;
        unsigned long long start_time;
-       unsigned long cmin_flt = 0, cmaj_flt = 0;
-       unsigned long  min_flt = 0,  maj_flt = 0;
-       u64 cutime, cstime, utime, stime;
-       u64 cgtime, gtime;
+       unsigned long cmin_flt, cmaj_flt, min_flt, maj_flt;
+       u64 cutime, cstime, cgtime, utime, stime, gtime;
        unsigned long rsslim = 0;
        unsigned long flags;
        int exit_code = task->exit_code;
+       struct signal_struct *sig = task->signal;
+       unsigned int seq = 1;
 
        state = *get_task_state(task);
        vsize = eip = esp = 0;
@@ -511,12 +511,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
 
        sigemptyset(&sigign);
        sigemptyset(&sigcatch);
-       cutime = cstime = utime = stime = 0;
-       cgtime = gtime = 0;
 
        if (lock_task_sighand(task, &flags)) {
-               struct signal_struct *sig = task->signal;
-
                if (sig->tty) {
                        struct pid *pgrp = tty_get_pgrp(sig->tty);
                        tty_pgrp = pid_nr_ns(pgrp, ns);
@@ -527,28 +523,9 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
                num_threads = get_nr_threads(task);
                collect_sigign_sigcatch(task, &sigign, &sigcatch);
 
-               cmin_flt = sig->cmin_flt;
-               cmaj_flt = sig->cmaj_flt;
-               cutime = sig->cutime;
-               cstime = sig->cstime;
-               cgtime = sig->cgtime;
                rsslim = READ_ONCE(sig->rlim[RLIMIT_RSS].rlim_cur);
 
-               /* add up live thread stats at the group level */
                if (whole) {
-                       struct task_struct *t;
-
-                       __for_each_thread(sig, t) {
-                               min_flt += t->min_flt;
-                               maj_flt += t->maj_flt;
-                               gtime += task_gtime(t);
-                       }
-
-                       min_flt += sig->min_flt;
-                       maj_flt += sig->maj_flt;
-                       thread_group_cputime_adjusted(task, &utime, &stime);
-                       gtime += sig->gtime;
-
                        if (sig->flags & (SIGNAL_GROUP_EXIT | SIGNAL_STOP_STOPPED))
                                exit_code = sig->group_exit_code;
                }
@@ -562,10 +539,41 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
 
        if (permitted && (!whole || num_threads < 2))
                wchan = !task_is_running(task);
-       if (!whole) {
+
+       do {
+               seq++; /* 2 on the 1st/lockless path, otherwise odd */
+               flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq);
+
+               cmin_flt = sig->cmin_flt;
+               cmaj_flt = sig->cmaj_flt;
+               cutime = sig->cutime;
+               cstime = sig->cstime;
+               cgtime = sig->cgtime;
+
+               if (whole) {
+                       struct task_struct *t;
+
+                       min_flt = sig->min_flt;
+                       maj_flt = sig->maj_flt;
+                       gtime = sig->gtime;
+
+                       rcu_read_lock();
+                       __for_each_thread(sig, t) {
+                               min_flt += t->min_flt;
+                               maj_flt += t->maj_flt;
+                               gtime += task_gtime(t);
+                       }
+                       rcu_read_unlock();
+               }
+       } while (need_seqretry(&sig->stats_lock, seq));
+       done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
+
+       if (whole) {
+               thread_group_cputime_adjusted(task, &utime, &stime);
+       } else {
+               task_cputime_adjusted(task, &utime, &stime);
                min_flt = task->min_flt;
                maj_flt = task->maj_flt;
-               task_cputime_adjusted(task, &utime, &stime);
                gtime = task_gtime(task);
        }
 
index 62b16f42d5d2585074733e8f825fa4a928f371a8..3f78ebbb795fe237398789fb700d1b6d358859f5 100644 (file)
@@ -2432,7 +2432,6 @@ static long pagemap_scan_flush_buffer(struct pagemap_scan_private *p)
 
 static long do_pagemap_scan(struct mm_struct *mm, unsigned long uarg)
 {
-       struct mmu_notifier_range range;
        struct pagemap_scan_private p = {0};
        unsigned long walk_start;
        size_t n_ranges_out = 0;
@@ -2448,15 +2447,9 @@ static long do_pagemap_scan(struct mm_struct *mm, unsigned long uarg)
        if (ret)
                return ret;
 
-       /* Protection change for the range is going to happen. */
-       if (p.arg.flags & PM_SCAN_WP_MATCHING) {
-               mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 0,
-                                       mm, p.arg.start, p.arg.end);
-               mmu_notifier_invalidate_range_start(&range);
-       }
-
        for (walk_start = p.arg.start; walk_start < p.arg.end;
                        walk_start = p.arg.walk_end) {
+               struct mmu_notifier_range range;
                long n_out;
 
                if (fatal_signal_pending(current)) {
@@ -2467,8 +2460,20 @@ static long do_pagemap_scan(struct mm_struct *mm, unsigned long uarg)
                ret = mmap_read_lock_killable(mm);
                if (ret)
                        break;
+
+               /* Protection change for the range is going to happen. */
+               if (p.arg.flags & PM_SCAN_WP_MATCHING) {
+                       mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 0,
+                                               mm, walk_start, p.arg.end);
+                       mmu_notifier_invalidate_range_start(&range);
+               }
+
                ret = walk_page_range(mm, walk_start, p.arg.end,
                                      &pagemap_scan_ops, &p);
+
+               if (p.arg.flags & PM_SCAN_WP_MATCHING)
+                       mmu_notifier_invalidate_range_end(&range);
+
                mmap_read_unlock(mm);
 
                n_out = pagemap_scan_flush_buffer(&p);
@@ -2494,9 +2499,6 @@ static long do_pagemap_scan(struct mm_struct *mm, unsigned long uarg)
        if (pagemap_scan_writeback_args(&p.arg, uarg))
                ret = -EFAULT;
 
-       if (p.arg.flags & PM_SCAN_WP_MATCHING)
-               mmu_notifier_invalidate_range_end(&range);
-
        kfree(p.vec_buf);
        return ret;
 }
index f8c1120b8311f62324324b911b0aa4aebe4ccb04..de07f978ce3ebe16bf42bf5315996fd074de5aac 100644 (file)
@@ -373,9 +373,9 @@ int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
 }
 EXPORT_SYMBOL(generic_remap_file_range_prep);
 
-loff_t do_clone_file_range(struct file *file_in, loff_t pos_in,
-                          struct file *file_out, loff_t pos_out,
-                          loff_t len, unsigned int remap_flags)
+loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in,
+                           struct file *file_out, loff_t pos_out,
+                           loff_t len, unsigned int remap_flags)
 {
        loff_t ret;
 
@@ -391,23 +391,6 @@ loff_t do_clone_file_range(struct file *file_in, loff_t pos_in,
        if (!file_in->f_op->remap_file_range)
                return -EOPNOTSUPP;
 
-       ret = file_in->f_op->remap_file_range(file_in, pos_in,
-                       file_out, pos_out, len, remap_flags);
-       if (ret < 0)
-               return ret;
-
-       fsnotify_access(file_in);
-       fsnotify_modify(file_out);
-       return ret;
-}
-EXPORT_SYMBOL(do_clone_file_range);
-
-loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in,
-                           struct file *file_out, loff_t pos_out,
-                           loff_t len, unsigned int remap_flags)
-{
-       loff_t ret;
-
        ret = remap_verify_area(file_in, pos_in, len, false);
        if (ret)
                return ret;
@@ -417,10 +400,14 @@ loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in,
                return ret;
 
        file_start_write(file_out);
-       ret = do_clone_file_range(file_in, pos_in, file_out, pos_out, len,
-                                 remap_flags);
+       ret = file_in->f_op->remap_file_range(file_in, pos_in,
+                       file_out, pos_out, len, remap_flags);
        file_end_write(file_out);
+       if (ret < 0)
+               return ret;
 
+       fsnotify_access(file_in);
+       fsnotify_modify(file_out);
        return ret;
 }
 EXPORT_SYMBOL(vfs_clone_file_range);
index d64a306a414be0580e910842b19f150bf43863a9..1daeb5714faad14c24c49a5efd5d118aaf04b54c 100644 (file)
@@ -145,21 +145,27 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
        struct cached_fid *cfid;
        struct cached_fids *cfids;
        const char *npath;
+       int retries = 0, cur_sleep = 1;
 
        if (tcon == NULL || tcon->cfids == NULL || tcon->nohandlecache ||
            is_smb1_server(tcon->ses->server) || (dir_cache_timeout == 0))
                return -EOPNOTSUPP;
 
        ses = tcon->ses;
-       server = ses->server;
        cfids = tcon->cfids;
 
-       if (!server->ops->new_lease_key)
-               return -EIO;
-
        if (cifs_sb->root == NULL)
                return -ENOENT;
 
+replay_again:
+       /* reinitialize for possible replay */
+       flags = 0;
+       oplock = SMB2_OPLOCK_LEVEL_II;
+       server = cifs_pick_channel(ses);
+
+       if (!server->ops->new_lease_key)
+               return -EIO;
+
        utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
        if (!utf16_path)
                return -ENOMEM;
@@ -268,6 +274,11 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
         */
        cfid->has_lease = true;
 
+       if (retries) {
+               smb2_set_replay(server, &rqst[0]);
+               smb2_set_replay(server, &rqst[1]);
+       }
+
        rc = compound_send_recv(xid, ses, server,
                                flags, 2, rqst,
                                resp_buftype, rsp_iov);
@@ -367,6 +378,11 @@ out:
                atomic_inc(&tcon->num_remote_opens);
        }
        kfree(utf16_path);
+
+       if (is_replayable_error(rc) &&
+           smb2_should_replay(tcon, &retries, &cur_sleep))
+               goto replay_again;
+
        return rc;
 }
 
index 60027f5aebe87f2050584994ee68699ae7ed6e5b..3e4209f41c18f854a190c523b67e3c105689ca2f 100644 (file)
@@ -659,6 +659,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
                                        spin_lock(&tcon->stat_lock);
                                        tcon->bytes_read = 0;
                                        tcon->bytes_written = 0;
+                                       tcon->stats_from_time = ktime_get_real_seconds();
                                        spin_unlock(&tcon->stat_lock);
                                        if (server->ops->clear_stats)
                                                server->ops->clear_stats(tcon);
@@ -737,8 +738,9 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
                                seq_printf(m, "\n%d) %s", i, tcon->tree_name);
                                if (tcon->need_reconnect)
                                        seq_puts(m, "\tDISCONNECTED ");
-                               seq_printf(m, "\nSMBs: %d",
-                                          atomic_read(&tcon->num_smbs_sent));
+                               seq_printf(m, "\nSMBs: %d since %ptTs UTC",
+                                          atomic_read(&tcon->num_smbs_sent),
+                                          &tcon->stats_from_time);
                                if (server->ops->print_stats)
                                        server->ops->print_stats(m, tcon);
                        }
index ef4c2e3c9fa6130b129be94d4a15c4724b952ed9..6322f0f68a176b177c943b074fe414c4905bf9bb 100644 (file)
@@ -572,7 +572,7 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
                len = cifs_strtoUTF16(user, ses->user_name, len, nls_cp);
                UniStrupr(user);
        } else {
-               memset(user, '\0', 2);
+               *(u16 *)user = 0;
        }
 
        rc = crypto_shash_update(ses->server->secmech.hmacmd5,
index 99b0ade833aa3c5469405da758709a8cc36a18f9..2a4a4e3a8751f2ce8f0409ce79dc5024e02bb883 100644 (file)
@@ -396,7 +396,7 @@ cifs_alloc_inode(struct super_block *sb)
        spin_lock_init(&cifs_inode->writers_lock);
        cifs_inode->writers = 0;
        cifs_inode->netfs.inode.i_blkbits = 14;  /* 2**14 = CIFS_MAX_MSGSIZE */
-       cifs_inode->server_eof = 0;
+       cifs_inode->netfs.remote_i_size = 0;
        cifs_inode->uniqueid = 0;
        cifs_inode->createtime = 0;
        cifs_inode->epoch = 0;
@@ -430,7 +430,7 @@ static void
 cifs_evict_inode(struct inode *inode)
 {
        truncate_inode_pages_final(&inode->i_data);
-       if (inode->i_state & I_PINNING_FSCACHE_WB)
+       if (inode->i_state & I_PINNING_NETFS_WB)
                cifs_fscache_unuse_inode_cookie(inode, true);
        cifs_fscache_release_inode_cookie(inode);
        clear_inode(inode);
@@ -681,6 +681,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
                seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
        if (tcon->ses->server->min_offload)
                seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
+       if (tcon->ses->server->retrans)
+               seq_printf(s, ",retrans=%u", tcon->ses->server->retrans);
        seq_printf(s, ",echo_interval=%lu",
                        tcon->ses->server->echo_interval / HZ);
 
@@ -793,8 +795,7 @@ static int cifs_show_stats(struct seq_file *s, struct dentry *root)
 
 static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
 {
-       fscache_unpin_writeback(wbc, cifs_inode_cookie(inode));
-       return 0;
+       return netfs_unpin_writeback(inode, wbc);
 }
 
 static int cifs_drop_inode(struct inode *inode)
@@ -1222,7 +1223,7 @@ static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *s
        if (rc < 0)
                goto set_failed;
 
-       netfs_resize_file(&src_cifsi->netfs, src_end);
+       netfs_resize_file(&src_cifsi->netfs, src_end, true);
        fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end);
        return 0;
 
@@ -1353,7 +1354,7 @@ static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
                        smb_file_src, smb_file_target, off, len, destoff);
                if (rc == 0 && new_size > i_size_read(target_inode)) {
                        truncate_setsize(target_inode, new_size);
-                       netfs_resize_file(&target_cifsi->netfs, new_size);
+                       netfs_resize_file(&target_cifsi->netfs, new_size, true);
                        fscache_resize_cookie(cifs_inode_cookie(target_inode),
                                              new_size);
                }
@@ -1379,6 +1380,7 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
        struct inode *src_inode = file_inode(src_file);
        struct inode *target_inode = file_inode(dst_file);
        struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
+       struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
        struct cifsFileInfo *smb_file_src;
        struct cifsFileInfo *smb_file_target;
        struct cifs_tcon *src_tcon;
@@ -1427,7 +1429,7 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
         * Advance the EOF marker after the flush above to the end of the range
         * if it's short of that.
         */
-       if (src_cifsi->server_eof < off + len) {
+       if (src_cifsi->netfs.remote_i_size < off + len) {
                rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
                if (rc < 0)
                        goto unlock;
@@ -1451,12 +1453,22 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
        /* Discard all the folios that overlap the destination region. */
        truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
 
+       fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
+                          i_size_read(target_inode), 0);
+
        rc = file_modified(dst_file);
        if (!rc) {
                rc = target_tcon->ses->server->ops->copychunk_range(xid,
                        smb_file_src, smb_file_target, off, len, destoff);
-               if (rc > 0 && destoff + rc > i_size_read(target_inode))
+               if (rc > 0 && destoff + rc > i_size_read(target_inode)) {
                        truncate_setsize(target_inode, destoff + rc);
+                       netfs_resize_file(&target_cifsi->netfs,
+                                         i_size_read(target_inode), true);
+                       fscache_resize_cookie(cifs_inode_cookie(target_inode),
+                                             i_size_read(target_inode));
+               }
+               if (rc > 0 && destoff + rc > target_cifsi->netfs.zero_point)
+                       target_cifsi->netfs.zero_point = destoff + rc;
        }
 
        file_accessed(src_file);
index 879d5ef8a66eda8bd3c0aeb8dcea6556ce7acef8..c86a72c9d9ecd4268481a4caa29e58004cb7d6b9 100644 (file)
  */
 #define CIFS_DEF_ACTIMEO (1 * HZ)
 
+/*
+ * max sleep time before retry to server
+ */
+#define CIFS_MAX_SLEEP 2000
+
 /*
  * max attribute cache timeout (jiffies) - 2^30
  */
@@ -82,7 +87,7 @@
 #define SMB_INTERFACE_POLL_INTERVAL    600
 
 /* maximum number of PDUs in one compound */
-#define MAX_COMPOUND 5
+#define MAX_COMPOUND 7
 
 /*
  * Default number of credits to keep available for SMB3.
@@ -204,6 +209,8 @@ struct cifs_open_info_data {
                };
        } reparse;
        char *symlink_target;
+       struct cifs_sid posix_owner;
+       struct cifs_sid posix_group;
        union {
                struct smb2_file_all_info fi;
                struct smb311_posix_qinfo posix_fi;
@@ -751,6 +758,7 @@ struct TCP_Server_Info {
        unsigned int    max_read;
        unsigned int    max_write;
        unsigned int    min_offload;
+       unsigned int    retrans;
        __le16  compress_algorithm;
        __u16   signing_algorithm;
        __le16  cipher_type;
@@ -1024,6 +1032,8 @@ struct cifs_chan {
        __u8 signkey[SMB3_SIGN_KEY_SIZE];
 };
 
+#define CIFS_SES_FLAG_SCALE_CHANNELS (0x1)
+
 /*
  * Session structure.  One of these for each uid session with a particular host
  */
@@ -1056,6 +1066,7 @@ struct cifs_ses {
        enum securityEnum sectype; /* what security flavor was specified? */
        bool sign;              /* is signing required? */
        bool domainAuto:1;
+       unsigned int flags;
        __u16 session_flags;
        __u8 smb3signingkey[SMB3_SIGN_KEY_SIZE];
        __u8 smb3encryptionkey[SMB3_ENC_DEC_KEY_SIZE];
@@ -1207,6 +1218,7 @@ struct cifs_tcon {
        __u64    bytes_read;
        __u64    bytes_written;
        spinlock_t stat_lock;  /* protects the two fields above */
+       time64_t stats_from_time;
        FILE_SYSTEM_DEVICE_INFO fsDevInfo;
        FILE_SYSTEM_ATTRIBUTE_INFO fsAttrInfo; /* ok if fs name truncated */
        FILE_SYSTEM_UNIX_INFO fsUnixInfo;
@@ -1497,6 +1509,7 @@ struct cifs_writedata {
        struct smbd_mr                  *mr;
 #endif
        struct cifs_credits             credits;
+       bool                            replay;
 };
 
 /*
@@ -1557,7 +1570,6 @@ struct cifsInodeInfo {
        spinlock_t writers_lock;
        unsigned int writers;           /* Number of writers on this inode */
        unsigned long time;             /* jiffies of last update of inode */
-       u64  server_eof;                /* current file size on server -- protected by i_lock */
        u64  uniqueid;                  /* server inode number */
        u64  createtime;                /* creation time on server */
        __u8 lease_key[SMB2_LEASE_KEY_SIZE];    /* lease key for this inode */
@@ -1827,6 +1839,13 @@ static inline bool is_retryable_error(int error)
        return false;
 }
 
+static inline bool is_replayable_error(int error)
+{
+       if (error == -EAGAIN || error == -ECONNABORTED)
+               return true;
+       return false;
+}
+
 
 /* cifs_get_writable_file() flags */
 #define FIND_WR_ANY         0
index 3052a208c6ca05aa52c7e297c1c2f6eed0af7b40..d03253f8f14552074dc79726a8d6521412eba6c4 100644 (file)
@@ -233,6 +233,12 @@ cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server,
        list_for_each_entry_safe(ses, nses, &pserver->smb_ses_list, smb_ses_list) {
                /* check if iface is still active */
                spin_lock(&ses->chan_lock);
+               if (cifs_ses_get_chan_index(ses, server) ==
+                   CIFS_INVAL_CHAN_INDEX) {
+                       spin_unlock(&ses->chan_lock);
+                       continue;
+               }
+
                if (!cifs_chan_is_iface_active(ses, server)) {
                        spin_unlock(&ses->chan_lock);
                        cifs_chan_update_iface(ses, server);
@@ -1574,6 +1580,9 @@ static int match_server(struct TCP_Server_Info *server,
        if (server->min_offload != ctx->min_offload)
                return 0;
 
+       if (server->retrans != ctx->retrans)
+               return 0;
+
        return 1;
 }
 
@@ -1798,6 +1807,7 @@ smbd_connected:
                goto out_err_crypto_release;
        }
        tcp_ses->min_offload = ctx->min_offload;
+       tcp_ses->retrans = ctx->retrans;
        /*
         * at this point we are the only ones with the pointer
         * to the struct since the kernel thread not created yet
@@ -4224,6 +4234,11 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
 
        /* only send once per connect */
        spin_lock(&tcon->tc_lock);
+
+       /* if tcon is marked for needing reconnect, update state */
+       if (tcon->need_reconnect)
+               tcon->status = TID_NEED_TCON;
+
        if (tcon->status == TID_GOOD) {
                spin_unlock(&tcon->tc_lock);
                return 0;
index a8a1d386da6566a2dec94099ae08a80199462bae..449c59830039bc04897e5031dba2dbc9c6649bad 100644 (file)
@@ -565,6 +565,11 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
 
        /* only send once per connect */
        spin_lock(&tcon->tc_lock);
+
+       /* if tcon is marked for needing reconnect, update state */
+       if (tcon->need_reconnect)
+               tcon->status = TID_NEED_TCON;
+
        if (tcon->status == TID_GOOD) {
                spin_unlock(&tcon->tc_lock);
                return 0;
@@ -625,8 +630,8 @@ out:
                spin_lock(&tcon->tc_lock);
                if (tcon->status == TID_IN_TCON)
                        tcon->status = TID_GOOD;
-               spin_unlock(&tcon->tc_lock);
                tcon->need_reconnect = false;
+               spin_unlock(&tcon->tc_lock);
        }
 
        return rc;
index 1b4262aff8fab0d66d885dcae7134fb87ba19f85..f391c9b803d84f9549b50a57f860abdaf58e43b2 100644 (file)
@@ -87,7 +87,7 @@ void cifs_pages_written_back(struct inode *inode, loff_t start, unsigned int len
                        continue;
                if (!folio_test_writeback(folio)) {
                        WARN_ONCE(1, "bad %x @%llx page %lx %lx\n",
-                                 len, start, folio_index(folio), end);
+                                 len, start, folio->index, end);
                        continue;
                }
 
@@ -120,7 +120,7 @@ void cifs_pages_write_failed(struct inode *inode, loff_t start, unsigned int len
                        continue;
                if (!folio_test_writeback(folio)) {
                        WARN_ONCE(1, "bad %x @%llx page %lx %lx\n",
-                                 len, start, folio_index(folio), end);
+                                 len, start, folio->index, end);
                        continue;
                }
 
@@ -151,7 +151,7 @@ void cifs_pages_write_redirty(struct inode *inode, loff_t start, unsigned int le
        xas_for_each(&xas, folio, end) {
                if (!folio_test_writeback(folio)) {
                        WARN_ONCE(1, "bad %x @%llx page %lx %lx\n",
-                                 len, start, folio_index(folio), end);
+                                 len, start, folio->index, end);
                        continue;
                }
 
@@ -175,6 +175,9 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
 
        /* only send once per connect */
        spin_lock(&tcon->tc_lock);
+       if (tcon->need_reconnect)
+               tcon->status = TID_NEED_RECON;
+
        if (tcon->status != TID_NEED_RECON) {
                spin_unlock(&tcon->tc_lock);
                return;
@@ -2120,8 +2123,8 @@ cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
 {
        loff_t end_of_write = offset + bytes_written;
 
-       if (end_of_write > cifsi->server_eof)
-               cifsi->server_eof = end_of_write;
+       if (end_of_write > cifsi->netfs.remote_i_size)
+               netfs_resize_file(&cifsi->netfs, end_of_write, true);
 }
 
 static ssize_t
@@ -2651,7 +2654,7 @@ static void cifs_extend_writeback(struct address_space *mapping,
                                continue;
                        if (xa_is_value(folio))
                                break;
-                       if (folio_index(folio) != index)
+                       if (folio->index != index)
                                break;
                        if (!folio_try_get_rcu(folio)) {
                                xas_reset(&xas);
@@ -2899,7 +2902,7 @@ redo_folio:
                                        goto skip_write;
                        }
 
-                       if (folio_mapping(folio) != mapping ||
+                       if (folio->mapping != mapping ||
                            !folio_test_dirty(folio)) {
                                start += folio_size(folio);
                                folio_unlock(folio);
@@ -3247,8 +3250,8 @@ cifs_uncached_writev_complete(struct work_struct *work)
 
        spin_lock(&inode->i_lock);
        cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
-       if (cifsi->server_eof > inode->i_size)
-               i_size_write(inode, cifsi->server_eof);
+       if (cifsi->netfs.remote_i_size > inode->i_size)
+               i_size_write(inode, cifsi->netfs.remote_i_size);
        spin_unlock(&inode->i_lock);
 
        complete(&wdata->done);
@@ -3300,6 +3303,7 @@ cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
                        if (wdata->cfile->invalidHandle)
                                rc = -EAGAIN;
                        else {
+                               wdata->replay = true;
 #ifdef CONFIG_CIFS_SMB_DIRECT
                                if (wdata->mr) {
                                        wdata->mr->need_invalidate = true;
@@ -5043,27 +5047,13 @@ static void cifs_swap_deactivate(struct file *file)
        /* do we need to unpin (or unlock) the file */
 }
 
-/*
- * Mark a page as having been made dirty and thus needing writeback.  We also
- * need to pin the cache object to write back to.
- */
-#ifdef CONFIG_CIFS_FSCACHE
-static bool cifs_dirty_folio(struct address_space *mapping, struct folio *folio)
-{
-       return fscache_dirty_folio(mapping, folio,
-                                       cifs_inode_cookie(mapping->host));
-}
-#else
-#define cifs_dirty_folio filemap_dirty_folio
-#endif
-
 const struct address_space_operations cifs_addr_ops = {
        .read_folio = cifs_read_folio,
        .readahead = cifs_readahead,
        .writepages = cifs_writepages,
        .write_begin = cifs_write_begin,
        .write_end = cifs_write_end,
-       .dirty_folio = cifs_dirty_folio,
+       .dirty_folio = netfs_dirty_folio,
        .release_folio = cifs_release_folio,
        .direct_IO = cifs_direct_io,
        .invalidate_folio = cifs_invalidate_folio,
@@ -5087,7 +5077,7 @@ const struct address_space_operations cifs_addr_ops_smallbuf = {
        .writepages = cifs_writepages,
        .write_begin = cifs_write_begin,
        .write_end = cifs_write_end,
-       .dirty_folio = cifs_dirty_folio,
+       .dirty_folio = netfs_dirty_folio,
        .release_folio = cifs_release_folio,
        .invalidate_folio = cifs_invalidate_folio,
        .launder_folio = cifs_launder_folio,
index a3493da12ad1e6cbac7249f3e8464cf7eeff542e..aec8dbd1f9dbd2b685219850c65b0695ba657aba 100644 (file)
@@ -139,6 +139,7 @@ const struct fs_parameter_spec smb3_fs_parameters[] = {
        fsparam_u32("dir_mode", Opt_dirmode),
        fsparam_u32("port", Opt_port),
        fsparam_u32("min_enc_offload", Opt_min_enc_offload),
+       fsparam_u32("retrans", Opt_retrans),
        fsparam_u32("esize", Opt_min_enc_offload),
        fsparam_u32("bsize", Opt_blocksize),
        fsparam_u32("rasize", Opt_rasize),
@@ -210,7 +211,7 @@ cifs_parse_security_flavors(struct fs_context *fc, char *value, struct smb3_fs_c
 
        switch (match_token(value, cifs_secflavor_tokens, args)) {
        case Opt_sec_krb5p:
-               cifs_errorf(fc, "sec=krb5p is not supported!\n");
+               cifs_errorf(fc, "sec=krb5p is not supported. Use sec=krb5,seal instead\n");
                return 1;
        case Opt_sec_krb5i:
                ctx->sign = true;
@@ -1064,6 +1065,9 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
        case Opt_min_enc_offload:
                ctx->min_offload = result.uint_32;
                break;
+       case Opt_retrans:
+               ctx->retrans = result.uint_32;
+               break;
        case Opt_blocksize:
                /*
                 * inode blocksize realistically should never need to be
@@ -1619,6 +1623,8 @@ int smb3_init_fs_context(struct fs_context *fc)
        ctx->backupuid_specified = false; /* no backup intent for a user */
        ctx->backupgid_specified = false; /* no backup intent for a group */
 
+       ctx->retrans = 1;
+
 /*
  *     short int override_uid = -1;
  *     short int override_gid = -1;
index cf46916286d029a9bd36ea4980786456bd11449f..182ce11cbe9362eccf73eebadcdcfc2ee7ac7988 100644 (file)
@@ -118,6 +118,7 @@ enum cifs_param {
        Opt_file_mode,
        Opt_dirmode,
        Opt_min_enc_offload,
+       Opt_retrans,
        Opt_blocksize,
        Opt_rasize,
        Opt_rsize,
@@ -245,6 +246,7 @@ struct smb3_fs_context {
        unsigned int rsize;
        unsigned int wsize;
        unsigned int min_offload;
+       unsigned int retrans;
        bool sockopt_tcp_nodelay:1;
        /* attribute cache timemout for files and directories in jiffies */
        unsigned long acregmax;
index e5cad149f5a2d7d3f12d53ef61f78332a828c367..c4a3cb736881ae73fe2e002fcb2f5cadbe6cd731 100644 (file)
@@ -180,7 +180,7 @@ static int fscache_fallback_write_pages(struct inode *inode, loff_t start, size_
        if (ret < 0)
                return ret;
 
-       ret = cres.ops->prepare_write(&cres, &start, &len, i_size_read(inode),
+       ret = cres.ops->prepare_write(&cres, &start, &len, len, i_size_read(inode),
                                      no_space_allocated_yet);
        if (ret == 0)
                ret = fscache_write(&cres, start, &iter, NULL, NULL);
index 9f37c1758f732cb310a0dc958b288b225e851412..d02f8ba29cb5bf22f1dcdcc3932f20afc3094f22 100644 (file)
@@ -104,7 +104,7 @@ cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr)
        fattr->cf_mtime = timestamp_truncate(fattr->cf_mtime, inode);
        mtime = inode_get_mtime(inode);
        if (timespec64_equal(&mtime, &fattr->cf_mtime) &&
-           cifs_i->server_eof == fattr->cf_eof) {
+           cifs_i->netfs.remote_i_size == fattr->cf_eof) {
                cifs_dbg(FYI, "%s: inode %llu is unchanged\n",
                         __func__, cifs_i->uniqueid);
                return;
@@ -194,7 +194,7 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
        else
                clear_bit(CIFS_INO_DELETE_PENDING, &cifs_i->flags);
 
-       cifs_i->server_eof = fattr->cf_eof;
+       cifs_i->netfs.remote_i_size = fattr->cf_eof;
        /*
         * Can't safely change the file size here if the client is writing to
         * it due to potential races.
@@ -665,8 +665,6 @@ static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path,
 /* Fill a cifs_fattr struct with info from POSIX info struct */
 static void smb311_posix_info_to_fattr(struct cifs_fattr *fattr,
                                       struct cifs_open_info_data *data,
-                                      struct cifs_sid *owner,
-                                      struct cifs_sid *group,
                                       struct super_block *sb)
 {
        struct smb311_posix_qinfo *info = &data->posix_fi;
@@ -722,8 +720,8 @@ out_reparse:
                fattr->cf_symlink_target = data->symlink_target;
                data->symlink_target = NULL;
        }
-       sid_to_id(cifs_sb, owner, fattr, SIDOWNER);
-       sid_to_id(cifs_sb, group, fattr, SIDGROUP);
+       sid_to_id(cifs_sb, &data->posix_owner, fattr, SIDOWNER);
+       sid_to_id(cifs_sb, &data->posix_group, fattr, SIDGROUP);
 
        cifs_dbg(FYI, "POSIX query info: mode 0x%x uniqueid 0x%llx nlink %d\n",
                fattr->cf_mode, fattr->cf_uniqueid, fattr->cf_nlink);
@@ -1070,9 +1068,7 @@ static int reparse_info_to_fattr(struct cifs_open_info_data *data,
                                 const unsigned int xid,
                                 struct cifs_tcon *tcon,
                                 const char *full_path,
-                                struct cifs_fattr *fattr,
-                                struct cifs_sid *owner,
-                                struct cifs_sid *group)
+                                struct cifs_fattr *fattr)
 {
        struct TCP_Server_Info *server = tcon->ses->server;
        struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
@@ -1117,7 +1113,7 @@ static int reparse_info_to_fattr(struct cifs_open_info_data *data,
        }
 
        if (tcon->posix_extensions)
-               smb311_posix_info_to_fattr(fattr, data, owner, group, sb);
+               smb311_posix_info_to_fattr(fattr, data, sb);
        else
                cifs_open_info_to_fattr(fattr, data, sb);
 out:
@@ -1171,8 +1167,7 @@ static int cifs_get_fattr(struct cifs_open_info_data *data,
                 */
                if (cifs_open_data_reparse(data)) {
                        rc = reparse_info_to_fattr(data, sb, xid, tcon,
-                                                  full_path, fattr,
-                                                  NULL, NULL);
+                                                  full_path, fattr);
                } else {
                        cifs_open_info_to_fattr(fattr, data, sb);
                }
@@ -1317,10 +1312,10 @@ static int smb311_posix_get_fattr(struct cifs_open_info_data *data,
                                  const unsigned int xid)
 {
        struct cifs_open_info_data tmp_data = {};
+       struct TCP_Server_Info *server;
        struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
        struct cifs_tcon *tcon;
        struct tcon_link *tlink;
-       struct cifs_sid owner, group;
        int tmprc;
        int rc = 0;
 
@@ -1328,14 +1323,14 @@ static int smb311_posix_get_fattr(struct cifs_open_info_data *data,
        if (IS_ERR(tlink))
                return PTR_ERR(tlink);
        tcon = tlink_tcon(tlink);
+       server = tcon->ses->server;
 
        /*
         * 1. Fetch file metadata if not provided (data)
         */
        if (!data) {
-               rc = smb311_posix_query_path_info(xid, tcon, cifs_sb,
-                                                 full_path, &tmp_data,
-                                                 &owner, &group);
+               rc = server->ops->query_path_info(xid, tcon, cifs_sb,
+                                                 full_path, &tmp_data);
                data = &tmp_data;
        }
 
@@ -1347,11 +1342,9 @@ static int smb311_posix_get_fattr(struct cifs_open_info_data *data,
        case 0:
                if (cifs_open_data_reparse(data)) {
                        rc = reparse_info_to_fattr(data, sb, xid, tcon,
-                                                  full_path, fattr,
-                                                  &owner, &group);
+                                                  full_path, fattr);
                } else {
-                       smb311_posix_info_to_fattr(fattr, data,
-                                                  &owner, &group, sb);
+                       smb311_posix_info_to_fattr(fattr, data, sb);
                }
                break;
        case -EREMOTE:
@@ -2865,7 +2858,7 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
 
 set_size_out:
        if (rc == 0) {
-               cifsInode->server_eof = attrs->ia_size;
+               netfs_resize_file(&cifsInode->netfs, attrs->ia_size, true);
                cifs_setsize(inode, attrs->ia_size);
                /*
                 * i_blocks is not related to (i_size / i_blksize), but instead
@@ -3018,6 +3011,7 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
        if ((attrs->ia_valid & ATTR_SIZE) &&
            attrs->ia_size != i_size_read(inode)) {
                truncate_setsize(inode, attrs->ia_size);
+               netfs_resize_file(&cifsInode->netfs, attrs->ia_size, true);
                fscache_resize_cookie(cifs_inode_cookie(inode), attrs->ia_size);
        }
 
@@ -3217,6 +3211,7 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
        if ((attrs->ia_valid & ATTR_SIZE) &&
            attrs->ia_size != i_size_read(inode)) {
                truncate_setsize(inode, attrs->ia_size);
+               netfs_resize_file(&cifsInode->netfs, attrs->ia_size, true);
                fscache_resize_cookie(cifs_inode_cookie(inode), attrs->ia_size);
        }
 
index c2137ea3c2538937665056619d3ad17a0089eb29..0748d7b757b95a88abcab10418d5f4d8dc78642d 100644 (file)
@@ -140,6 +140,7 @@ tcon_info_alloc(bool dir_leases_enabled)
        spin_lock_init(&ret_buf->stat_lock);
        atomic_set(&ret_buf->num_local_opens, 0);
        atomic_set(&ret_buf->num_remote_opens, 0);
+       ret_buf->stats_from_time = ktime_get_real_seconds();
 #ifdef CONFIG_CIFS_DFS_UPCALL
        INIT_LIST_HEAD(&ret_buf->dfs_ses_list);
 #endif
index 056cae1ddccef274010b09e64ddbe5231a485f40..b520eea7bfce83b2fd8e9a1d9e1685c39516840e 100644 (file)
@@ -133,15 +133,15 @@ retry:
                                 * Query dir responses don't provide enough
                                 * information about reparse points other than
                                 * their reparse tags.  Save an invalidation by
-                                * not clobbering the existing mode, size and
-                                * symlink target (if any) when reparse tag and
-                                * ctime haven't changed.
+                                * not clobbering some existing attributes when
+                                * reparse tag and ctime haven't changed.
                                 */
                                rc = 0;
                                if (fattr->cf_cifsattrs & ATTR_REPARSE) {
                                        if (likely(reparse_inode_match(inode, fattr))) {
                                                fattr->cf_mode = inode->i_mode;
-                                               fattr->cf_eof = CIFS_I(inode)->server_eof;
+                                               fattr->cf_rdev = inode->i_rdev;
+                                               fattr->cf_eof = CIFS_I(inode)->netfs.remote_i_size;
                                                fattr->cf_symlink_target = NULL;
                                        } else {
                                                CIFS_I(inode)->time = 0;
@@ -307,14 +307,16 @@ cifs_dir_info_to_fattr(struct cifs_fattr *fattr, FILE_DIRECTORY_INFO *info,
 }
 
 static void cifs_fulldir_info_to_fattr(struct cifs_fattr *fattr,
-                                      SEARCH_ID_FULL_DIR_INFO *info,
+                                      const void *info,
                                       struct cifs_sb_info *cifs_sb)
 {
+       const FILE_FULL_DIRECTORY_INFO *di = info;
+
        __dir_info_to_fattr(fattr, info);
 
-       /* See MS-FSCC 2.4.19 FileIdFullDirectoryInformation */
+       /* See MS-FSCC 2.4.14, 2.4.19 */
        if (fattr->cf_cifsattrs & ATTR_REPARSE)
-               fattr->cf_cifstag = le32_to_cpu(info->EaSize);
+               fattr->cf_cifstag = le32_to_cpu(di->EaSize);
        cifs_fill_common_info(fattr, cifs_sb);
 }
 
@@ -396,7 +398,7 @@ ffirst_retry:
        } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
                cifsFile->srch_inf.info_level = SMB_FIND_FILE_ID_FULL_DIR_INFO;
        } else /* not srvinos - BB fixme add check for backlevel? */ {
-               cifsFile->srch_inf.info_level = SMB_FIND_FILE_DIRECTORY_INFO;
+               cifsFile->srch_inf.info_level = SMB_FIND_FILE_FULL_DIRECTORY_INFO;
        }
 
        search_flags = CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME;
@@ -645,10 +647,10 @@ static int cifs_entry_is_dot(struct cifs_dirent *de, bool is_unicode)
 static int is_dir_changed(struct file *file)
 {
        struct inode *inode = file_inode(file);
-       struct cifsInodeInfo *cifsInfo = CIFS_I(inode);
+       struct cifsInodeInfo *cifs_inode_info = CIFS_I(inode);
 
-       if (cifsInfo->time == 0)
-               return 1; /* directory was changed, perhaps due to unlink */
+       if (cifs_inode_info->time == 0)
+               return 1; /* directory was changed, e.g. unlink or new file */
        else
                return 0;
 
@@ -987,10 +989,9 @@ static int cifs_filldir(char *find_entry, struct file *file,
                                       (FIND_FILE_STANDARD_INFO *)find_entry,
                                       cifs_sb);
                break;
+       case SMB_FIND_FILE_FULL_DIRECTORY_INFO:
        case SMB_FIND_FILE_ID_FULL_DIR_INFO:
-               cifs_fulldir_info_to_fattr(&fattr,
-                                          (SEARCH_ID_FULL_DIR_INFO *)find_entry,
-                                          cifs_sb);
+               cifs_fulldir_info_to_fattr(&fattr, find_entry, cifs_sb);
                break;
        default:
                cifs_dir_info_to_fattr(&fattr,
index cde81042bebda6b8f3a454f46eb8b055af8d2f3c..8f37373fd33344bacbf4d492f6115d9396573379 100644 (file)
@@ -75,6 +75,10 @@ cifs_ses_get_chan_index(struct cifs_ses *ses,
 {
        unsigned int i;
 
+       /* if the channel is waiting for termination */
+       if (server && server->terminate)
+               return CIFS_INVAL_CHAN_INDEX;
+
        for (i = 0; i < ses->chan_count; i++) {
                if (ses->chans[i].server == server)
                        return i;
@@ -84,7 +88,6 @@ cifs_ses_get_chan_index(struct cifs_ses *ses,
        if (server)
                cifs_dbg(VFS, "unable to get chan index for server: 0x%llx",
                         server->conn_id);
-       WARN_ON(1);
        return CIFS_INVAL_CHAN_INDEX;
 }
 
@@ -269,6 +272,8 @@ int cifs_try_adding_channels(struct cifs_ses *ses)
                                         &iface->sockaddr,
                                         rc);
                                kref_put(&iface->refcount, release_iface);
+                               /* failure to add chan should increase weight */
+                               iface->weight_fulfilled++;
                                continue;
                        }
 
index 5053a5550abeda064234e82c037fbbd89df6f746..05818cd6d932e91792ecc65d764eba0a942cb28d 100644 (file)
@@ -56,6 +56,35 @@ static inline __u32 file_create_options(struct dentry *dentry)
        return 0;
 }
 
+/* Parse owner and group from SMB3.1.1 POSIX query info */
+static int parse_posix_sids(struct cifs_open_info_data *data,
+                           struct kvec *rsp_iov)
+{
+       struct smb2_query_info_rsp *qi = rsp_iov->iov_base;
+       unsigned int out_len = le32_to_cpu(qi->OutputBufferLength);
+       unsigned int qi_len = sizeof(data->posix_fi);
+       int owner_len, group_len;
+       u8 *sidsbuf, *sidsbuf_end;
+
+       if (out_len <= qi_len)
+               return -EINVAL;
+
+       sidsbuf = (u8 *)qi + le16_to_cpu(qi->OutputBufferOffset) + qi_len;
+       sidsbuf_end = sidsbuf + out_len - qi_len;
+
+       owner_len = posix_info_sid_size(sidsbuf, sidsbuf_end);
+       if (owner_len == -1)
+               return -EINVAL;
+
+       memcpy(&data->posix_owner, sidsbuf, owner_len);
+       group_len = posix_info_sid_size(sidsbuf + owner_len, sidsbuf_end);
+       if (group_len == -1)
+               return -EINVAL;
+
+       memcpy(&data->posix_group, sidsbuf + owner_len, group_len);
+       return 0;
+}
+
 /*
  * note: If cfile is passed, the reference to it is dropped here.
  * So make sure that you do not reuse cfile after return from this func.
@@ -69,7 +98,6 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
                            __u32 desired_access, __u32 create_disposition,
                            __u32 create_options, umode_t mode, struct kvec *in_iov,
                            int *cmds, int num_cmds, struct cifsFileInfo *cfile,
-                           __u8 **extbuf, size_t *extbuflen,
                            struct kvec *out_iov, int *out_buftype)
 {
 
@@ -92,6 +120,14 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
        unsigned int size[2];
        void *data[2];
        int len;
+       int retries = 0, cur_sleep = 1;
+
+replay_again:
+       /* reinitialize for possible replay */
+       flags = 0;
+       oplock = SMB2_OPLOCK_LEVEL_NONE;
+       num_rqst = 0;
+       server = cifs_pick_channel(ses);
 
        vars = kzalloc(sizeof(*vars), GFP_ATOMIC);
        if (vars == NULL)
@@ -99,8 +135,6 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
        rqst = &vars->rqst[0];
        rsp_iov = &vars->rsp_iov[0];
 
-       server = cifs_pick_channel(ses);
-
        if (smb3_encryption_required(tcon))
                flags |= CIFS_TRANSFORM_REQ;
 
@@ -435,15 +469,24 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
        num_rqst++;
 
        if (cfile) {
+               if (retries)
+                       for (i = 1; i < num_rqst - 2; i++)
+                               smb2_set_replay(server, &rqst[i]);
+
                rc = compound_send_recv(xid, ses, server,
                                        flags, num_rqst - 2,
                                        &rqst[1], &resp_buftype[1],
                                        &rsp_iov[1]);
-       } else
+       } else {
+               if (retries)
+                       for (i = 0; i < num_rqst; i++)
+                               smb2_set_replay(server, &rqst[i]);
+
                rc = compound_send_recv(xid, ses, server,
                                        flags, num_rqst,
                                        rqst, resp_buftype,
                                        rsp_iov);
+       }
 
 finished:
        num_rqst = 0;
@@ -494,21 +537,9 @@ finished:
                                        &rsp_iov[i + 1], sizeof(idata->posix_fi) /* add SIDs */,
                                        (char *)&idata->posix_fi);
                        }
-                       if (rc == 0) {
-                               unsigned int length = le32_to_cpu(qi_rsp->OutputBufferLength);
-
-                               if (length > sizeof(idata->posix_fi)) {
-                                       char *base = (char *)rsp_iov[i + 1].iov_base +
-                                               le16_to_cpu(qi_rsp->OutputBufferOffset) +
-                                               sizeof(idata->posix_fi);
-                                       *extbuflen = length - sizeof(idata->posix_fi);
-                                       *extbuf = kmemdup(base, *extbuflen, GFP_KERNEL);
-                                       if (!*extbuf)
-                                               rc = -ENOMEM;
-                               } else {
-                                       rc = -EINVAL;
-                               }
-                       }
+                       if (rc == 0)
+                               rc = parse_posix_sids(idata, &rsp_iov[i + 1]);
+
                        SMB2_query_info_free(&rqst[num_rqst++]);
                        if (rc)
                                trace_smb3_posix_query_info_compound_err(xid,  ses->Suid,
@@ -604,9 +635,6 @@ finished:
        }
        SMB2_close_free(&rqst[num_rqst]);
 
-       if (cfile)
-               cifsFileInfo_put(cfile);
-
        num_cmds += 2;
        if (out_iov && out_buftype) {
                memcpy(out_iov, rsp_iov, num_cmds * sizeof(*out_iov));
@@ -616,7 +644,16 @@ finished:
                for (i = 0; i < num_cmds; i++)
                        free_rsp_buf(resp_buftype[i], rsp_iov[i].iov_base);
        }
+       num_cmds -= 2; /* correct num_cmds as there could be a retry */
        kfree(vars);
+
+       if (is_replayable_error(rc) &&
+           smb2_should_replay(tcon, &retries, &cur_sleep))
+               goto replay_again;
+
+       if (cfile)
+               cifsFileInfo_put(cfile);
+
        return rc;
 }
 
@@ -662,7 +699,7 @@ int smb2_query_path_info(const unsigned int xid,
        struct smb2_hdr *hdr;
        struct kvec in_iov[2], out_iov[3] = {};
        int out_buftype[3] = {};
-       int cmds[2] = { SMB2_OP_QUERY_INFO,  };
+       int cmds[2];
        bool islink;
        int i, num_cmds;
        int rc, rc2;
@@ -670,20 +707,36 @@ int smb2_query_path_info(const unsigned int xid,
        data->adjust_tz = false;
        data->reparse_point = false;
 
-       if (strcmp(full_path, ""))
-               rc = -ENOENT;
-       else
-               rc = open_cached_dir(xid, tcon, full_path, cifs_sb, false, &cfid);
-       /* If it is a root and its handle is cached then use it */
-       if (!rc) {
-               if (cfid->file_all_info_is_valid) {
-                       memcpy(&data->fi, &cfid->file_all_info, sizeof(data->fi));
+       /*
+        * BB TODO: Add support for using cached root handle in SMB3.1.1 POSIX.
+        * Create SMB2_query_posix_info worker function to do non-compounded
+        * query when we already have an open file handle for this. For now this
+        * is fast enough (always using the compounded version).
+        */
+       if (!tcon->posix_extensions) {
+               if (*full_path) {
+                       rc = -ENOENT;
                } else {
-                       rc = SMB2_query_info(xid, tcon, cfid->fid.persistent_fid,
-                                            cfid->fid.volatile_fid, &data->fi);
+                       rc = open_cached_dir(xid, tcon, full_path,
+                                            cifs_sb, false, &cfid);
+               }
+               /* If it is a root and its handle is cached then use it */
+               if (!rc) {
+                       if (cfid->file_all_info_is_valid) {
+                               memcpy(&data->fi, &cfid->file_all_info,
+                                      sizeof(data->fi));
+                       } else {
+                               rc = SMB2_query_info(xid, tcon,
+                                                    cfid->fid.persistent_fid,
+                                                    cfid->fid.volatile_fid,
+                                                    &data->fi);
+                       }
+                       close_cached_dir(cfid);
+                       return rc;
                }
-               close_cached_dir(cfid);
-               return rc;
+               cmds[0] = SMB2_OP_QUERY_INFO;
+       } else {
+               cmds[0] = SMB2_OP_POSIX_QUERY_INFO;
        }
 
        in_iov[0].iov_base = data;
@@ -693,9 +746,8 @@ int smb2_query_path_info(const unsigned int xid,
        cifs_get_readable_path(tcon, full_path, &cfile);
        rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
                              FILE_READ_ATTRIBUTES, FILE_OPEN,
-                             create_options, ACL_NO_MODE,
-                             in_iov, cmds, 1, cfile,
-                             NULL, NULL, out_iov, out_buftype);
+                             create_options, ACL_NO_MODE, in_iov,
+                             cmds, 1, cfile, out_iov, out_buftype);
        hdr = out_iov[0].iov_base;
        /*
         * If first iov is unset, then SMB session was dropped or we've got a
@@ -707,6 +759,10 @@ int smb2_query_path_info(const unsigned int xid,
        switch (rc) {
        case 0:
        case -EOPNOTSUPP:
+               /*
+                * BB TODO: When support for special files added to Samba
+                * re-verify this path.
+                */
                rc = parse_create_response(data, cifs_sb, &out_iov[0]);
                if (rc || !data->reparse_point)
                        goto out;
@@ -722,8 +778,8 @@ int smb2_query_path_info(const unsigned int xid,
                cifs_get_readable_path(tcon, full_path, &cfile);
                rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
                                      FILE_READ_ATTRIBUTES, FILE_OPEN,
-                                     create_options, ACL_NO_MODE, in_iov, cmds,
-                                     num_cmds, cfile, NULL, NULL, NULL, NULL);
+                                     create_options, ACL_NO_MODE, in_iov,
+                                     cmds, num_cmds, cfile, NULL, NULL);
                break;
        case -EREMOTE:
                break;
@@ -746,101 +802,6 @@ out:
        return rc;
 }
 
-int smb311_posix_query_path_info(const unsigned int xid,
-                                struct cifs_tcon *tcon,
-                                struct cifs_sb_info *cifs_sb,
-                                const char *full_path,
-                                struct cifs_open_info_data *data,
-                                struct cifs_sid *owner,
-                                struct cifs_sid *group)
-{
-       int rc;
-       __u32 create_options = 0;
-       struct cifsFileInfo *cfile;
-       struct kvec in_iov[2], out_iov[3] = {};
-       int out_buftype[3] = {};
-       __u8 *sidsbuf = NULL;
-       __u8 *sidsbuf_end = NULL;
-       size_t sidsbuflen = 0;
-       size_t owner_len, group_len;
-       int cmds[2] = { SMB2_OP_POSIX_QUERY_INFO,  };
-       int i, num_cmds;
-
-       data->adjust_tz = false;
-       data->reparse_point = false;
-
-       /*
-        * BB TODO: Add support for using the cached root handle.
-        * Create SMB2_query_posix_info worker function to do non-compounded query
-        * when we already have an open file handle for this. For now this is fast enough
-        * (always using the compounded version).
-        */
-       in_iov[0].iov_base = data;
-       in_iov[0].iov_len = sizeof(*data);
-       in_iov[1] = in_iov[0];
-
-       cifs_get_readable_path(tcon, full_path, &cfile);
-       rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
-                             FILE_READ_ATTRIBUTES, FILE_OPEN,
-                             create_options, ACL_NO_MODE, in_iov, cmds, 1,
-                             cfile, &sidsbuf, &sidsbuflen, out_iov, out_buftype);
-       /*
-        * If first iov is unset, then SMB session was dropped or we've got a
-        * cached open file (@cfile).
-        */
-       if (!out_iov[0].iov_base || out_buftype[0] == CIFS_NO_BUFFER)
-               goto out;
-
-       switch (rc) {
-       case 0:
-       case -EOPNOTSUPP:
-               /* BB TODO: When support for special files added to Samba re-verify this path */
-               rc = parse_create_response(data, cifs_sb, &out_iov[0]);
-               if (rc || !data->reparse_point)
-                       goto out;
-
-               if (data->reparse.tag == IO_REPARSE_TAG_SYMLINK) {
-                       /* symlink already parsed in create response */
-                       num_cmds = 1;
-               } else {
-                       cmds[1] = SMB2_OP_GET_REPARSE;
-                       num_cmds = 2;
-               }
-               create_options |= OPEN_REPARSE_POINT;
-               cifs_get_readable_path(tcon, full_path, &cfile);
-               rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
-                                     FILE_READ_ATTRIBUTES, FILE_OPEN,
-                                     create_options, ACL_NO_MODE, in_iov, cmds,
-                                     num_cmds, cfile, &sidsbuf, &sidsbuflen, NULL, NULL);
-               break;
-       }
-
-out:
-       if (rc == 0) {
-               sidsbuf_end = sidsbuf + sidsbuflen;
-
-               owner_len = posix_info_sid_size(sidsbuf, sidsbuf_end);
-               if (owner_len == -1) {
-                       rc = -EINVAL;
-                       goto out;
-               }
-               memcpy(owner, sidsbuf, owner_len);
-
-               group_len = posix_info_sid_size(
-                       sidsbuf + owner_len, sidsbuf_end);
-               if (group_len == -1) {
-                       rc = -EINVAL;
-                       goto out;
-               }
-               memcpy(group, sidsbuf + owner_len, group_len);
-       }
-
-       kfree(sidsbuf);
-       for (i = 0; i < ARRAY_SIZE(out_buftype); i++)
-               free_rsp_buf(out_buftype[i], out_iov[i].iov_base);
-       return rc;
-}
-
 int
 smb2_mkdir(const unsigned int xid, struct inode *parent_inode, umode_t mode,
           struct cifs_tcon *tcon, const char *name,
@@ -848,9 +809,9 @@ smb2_mkdir(const unsigned int xid, struct inode *parent_inode, umode_t mode,
 {
        return smb2_compound_op(xid, tcon, cifs_sb, name,
                                FILE_WRITE_ATTRIBUTES, FILE_CREATE,
-                               CREATE_NOT_FILE, mode, NULL,
-                               &(int){SMB2_OP_MKDIR}, 1,
-                               NULL, NULL, NULL, NULL, NULL);
+                               CREATE_NOT_FILE, mode,
+                               NULL, &(int){SMB2_OP_MKDIR}, 1,
+                               NULL, NULL, NULL);
 }
 
 void
@@ -875,7 +836,7 @@ smb2_mkdir_setinfo(struct inode *inode, const char *name,
                                 FILE_WRITE_ATTRIBUTES, FILE_CREATE,
                                 CREATE_NOT_FILE, ACL_NO_MODE, &in_iov,
                                 &(int){SMB2_OP_SET_INFO}, 1,
-                                cfile, NULL, NULL, NULL, NULL);
+                                cfile, NULL, NULL);
        if (tmprc == 0)
                cifs_i->cifsAttrs = dosattrs;
 }
@@ -887,8 +848,9 @@ smb2_rmdir(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
        drop_cached_dir_by_name(xid, tcon, name, cifs_sb);
        return smb2_compound_op(xid, tcon, cifs_sb, name,
                                DELETE, FILE_OPEN, CREATE_NOT_FILE,
-                               ACL_NO_MODE, NULL, &(int){SMB2_OP_RMDIR}, 1,
-                               NULL, NULL, NULL, NULL, NULL);
+                               ACL_NO_MODE, NULL,
+                               &(int){SMB2_OP_RMDIR}, 1,
+                               NULL, NULL, NULL);
 }
 
 int
@@ -897,8 +859,9 @@ smb2_unlink(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
 {
        return smb2_compound_op(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN,
                                CREATE_DELETE_ON_CLOSE | OPEN_REPARSE_POINT,
-                               ACL_NO_MODE, NULL, &(int){SMB2_OP_DELETE}, 1,
-                               NULL, NULL, NULL, NULL, NULL);
+                               ACL_NO_MODE, NULL,
+                               &(int){SMB2_OP_DELETE}, 1,
+                               NULL, NULL, NULL);
 }
 
 static int smb2_set_path_attr(const unsigned int xid, struct cifs_tcon *tcon,
@@ -919,8 +882,8 @@ static int smb2_set_path_attr(const unsigned int xid, struct cifs_tcon *tcon,
        in_iov.iov_base = smb2_to_name;
        in_iov.iov_len = 2 * UniStrnlen((wchar_t *)smb2_to_name, PATH_MAX);
        rc = smb2_compound_op(xid, tcon, cifs_sb, from_name, access,
-                             FILE_OPEN, create_options, ACL_NO_MODE, &in_iov,
-                             &command, 1, cfile, NULL, NULL, NULL, NULL);
+                             FILE_OPEN, create_options, ACL_NO_MODE,
+                             &in_iov, &command, 1, cfile, NULL, NULL);
 smb2_rename_path:
        kfree(smb2_to_name);
        return rc;
@@ -971,7 +934,7 @@ smb2_set_path_size(const unsigned int xid, struct cifs_tcon *tcon,
                                FILE_WRITE_DATA, FILE_OPEN,
                                0, ACL_NO_MODE, &in_iov,
                                &(int){SMB2_OP_SET_EOF}, 1,
-                               cfile, NULL, NULL, NULL, NULL);
+                               cfile, NULL, NULL);
 }
 
 int
@@ -999,8 +962,8 @@ smb2_set_file_info(struct inode *inode, const char *full_path,
        rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
                              FILE_WRITE_ATTRIBUTES, FILE_OPEN,
                              0, ACL_NO_MODE, &in_iov,
-                             &(int){SMB2_OP_SET_INFO}, 1, cfile,
-                             NULL, NULL, NULL, NULL);
+                             &(int){SMB2_OP_SET_INFO}, 1,
+                             cfile, NULL, NULL);
        cifs_put_tlink(tlink);
        return rc;
 }
@@ -1035,7 +998,7 @@ struct inode *smb2_get_reparse_inode(struct cifs_open_info_data *data,
                cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile);
                rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
                                      da, cd, co, ACL_NO_MODE, in_iov,
-                                     cmds, 2, cfile, NULL, NULL, NULL, NULL);
+                                     cmds, 2, cfile, NULL, NULL);
                if (!rc) {
                        rc = smb311_posix_get_inode_info(&new, full_path,
                                                         data, sb, xid);
@@ -1045,7 +1008,7 @@ struct inode *smb2_get_reparse_inode(struct cifs_open_info_data *data,
                cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile);
                rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
                                      da, cd, co, ACL_NO_MODE, in_iov,
-                                     cmds, 2, cfile, NULL, NULL, NULL, NULL);
+                                     cmds, 2, cfile, NULL, NULL);
                if (!rc) {
                        rc = cifs_get_inode_info(&new, full_path,
                                                 data, sb, xid, NULL);
@@ -1072,8 +1035,8 @@ int smb2_query_reparse_point(const unsigned int xid,
        rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
                              FILE_READ_ATTRIBUTES, FILE_OPEN,
                              OPEN_REPARSE_POINT, ACL_NO_MODE, &in_iov,
-                             &(int){SMB2_OP_GET_REPARSE}, 1, cfile,
-                             NULL, NULL, NULL, NULL);
+                             &(int){SMB2_OP_GET_REPARSE}, 1,
+                             cfile, NULL, NULL);
        if (rc)
                goto out;
 
index 1a90dd78b238f0de191421bd0d1838164bff9778..ac1895358908abff42e51059644aed30be670373 100644 (file)
@@ -1210,6 +1210,8 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
        {STATUS_INVALID_TASK_INDEX, -EIO, "STATUS_INVALID_TASK_INDEX"},
        {STATUS_THREAD_ALREADY_IN_TASK, -EIO, "STATUS_THREAD_ALREADY_IN_TASK"},
        {STATUS_CALLBACK_BYPASS, -EIO, "STATUS_CALLBACK_BYPASS"},
+       {STATUS_SERVER_UNAVAILABLE, -EAGAIN, "STATUS_SERVER_UNAVAILABLE"},
+       {STATUS_FILE_NOT_AVAILABLE, -EAGAIN, "STATUS_FILE_NOT_AVAILABLE"},
        {STATUS_PORT_CLOSED, -EIO, "STATUS_PORT_CLOSED"},
        {STATUS_MESSAGE_LOST, -EIO, "STATUS_MESSAGE_LOST"},
        {STATUS_INVALID_MESSAGE, -EIO, "STATUS_INVALID_MESSAGE"},
index 01a5bd7e6a307f1d20619001e2a1e5da7f8e2e87..83c898afc8354bf04c7a86ee57e4343ad3618319 100644 (file)
@@ -614,7 +614,8 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
                                 "multichannel not available\n"
                                 "Empty network interface list returned by server %s\n",
                                 ses->server->hostname);
-               rc = -EINVAL;
+               rc = -EOPNOTSUPP;
+               ses->iface_last_update = jiffies;
                goto out;
        }
 
@@ -712,7 +713,6 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
 
                ses->iface_count++;
                spin_unlock(&ses->iface_lock);
-               ses->iface_last_update = jiffies;
 next_iface:
                nb_iface++;
                next = le32_to_cpu(p->Next);
@@ -734,11 +734,7 @@ next_iface:
        if ((bytes_left > 8) || p->Next)
                cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
 
-
-       if (!ses->iface_count) {
-               rc = -EINVAL;
-               goto out;
-       }
+       ses->iface_last_update = jiffies;
 
 out:
        /*
@@ -1112,7 +1108,7 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
 {
        struct smb2_compound_vars *vars;
        struct cifs_ses *ses = tcon->ses;
-       struct TCP_Server_Info *server = cifs_pick_channel(ses);
+       struct TCP_Server_Info *server;
        struct smb_rqst *rqst;
        struct kvec *rsp_iov;
        __le16 *utf16_path = NULL;
@@ -1128,6 +1124,13 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
        struct smb2_file_full_ea_info *ea = NULL;
        struct smb2_query_info_rsp *rsp;
        int rc, used_len = 0;
+       int retries = 0, cur_sleep = 1;
+
+replay_again:
+       /* reinitialize for possible replay */
+       flags = CIFS_CP_CREATE_CLOSE_OP;
+       oplock = SMB2_OPLOCK_LEVEL_NONE;
+       server = cifs_pick_channel(ses);
 
        if (smb3_encryption_required(tcon))
                flags |= CIFS_TRANSFORM_REQ;
@@ -1248,6 +1251,12 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
                goto sea_exit;
        smb2_set_related(&rqst[2]);
 
+       if (retries) {
+               smb2_set_replay(server, &rqst[0]);
+               smb2_set_replay(server, &rqst[1]);
+               smb2_set_replay(server, &rqst[2]);
+       }
+
        rc = compound_send_recv(xid, ses, server,
                                flags, 3, rqst,
                                resp_buftype, rsp_iov);
@@ -1264,6 +1273,11 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
        kfree(vars);
 out_free_path:
        kfree(utf16_path);
+
+       if (is_replayable_error(rc) &&
+           smb2_should_replay(tcon, &retries, &cur_sleep))
+               goto replay_again;
+
        return rc;
 }
 #endif
@@ -1488,7 +1502,7 @@ smb2_ioctl_query_info(const unsigned int xid,
        struct smb_rqst *rqst;
        struct kvec *rsp_iov;
        struct cifs_ses *ses = tcon->ses;
-       struct TCP_Server_Info *server = cifs_pick_channel(ses);
+       struct TCP_Server_Info *server;
        char __user *arg = (char __user *)p;
        struct smb_query_info qi;
        struct smb_query_info __user *pqi;
@@ -1505,6 +1519,13 @@ smb2_ioctl_query_info(const unsigned int xid,
        void *data[2];
        int create_options = is_dir ? CREATE_NOT_FILE : CREATE_NOT_DIR;
        void (*free_req1_func)(struct smb_rqst *r);
+       int retries = 0, cur_sleep = 1;
+
+replay_again:
+       /* reinitialize for possible replay */
+       flags = CIFS_CP_CREATE_CLOSE_OP;
+       oplock = SMB2_OPLOCK_LEVEL_NONE;
+       server = cifs_pick_channel(ses);
 
        vars = kzalloc(sizeof(*vars), GFP_ATOMIC);
        if (vars == NULL)
@@ -1645,6 +1666,12 @@ smb2_ioctl_query_info(const unsigned int xid,
                goto free_req_1;
        smb2_set_related(&rqst[2]);
 
+       if (retries) {
+               smb2_set_replay(server, &rqst[0]);
+               smb2_set_replay(server, &rqst[1]);
+               smb2_set_replay(server, &rqst[2]);
+       }
+
        rc = compound_send_recv(xid, ses, server,
                                flags, 3, rqst,
                                resp_buftype, rsp_iov);
@@ -1705,6 +1732,11 @@ free_output_buffer:
        kfree(buffer);
 free_vars:
        kfree(vars);
+
+       if (is_replayable_error(rc) &&
+           smb2_should_replay(tcon, &retries, &cur_sleep))
+               goto replay_again;
+
        return rc;
 }
 
@@ -2231,8 +2263,14 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
        struct cifs_open_parms oparms;
        struct smb2_query_directory_rsp *qd_rsp = NULL;
        struct smb2_create_rsp *op_rsp = NULL;
-       struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
-       int retry_count = 0;
+       struct TCP_Server_Info *server;
+       int retries = 0, cur_sleep = 1;
+
+replay_again:
+       /* reinitialize for possible replay */
+       flags = 0;
+       oplock = SMB2_OPLOCK_LEVEL_NONE;
+       server = cifs_pick_channel(tcon->ses);
 
        utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
        if (!utf16_path)
@@ -2282,14 +2320,15 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
 
        smb2_set_related(&rqst[1]);
 
-again:
+       if (retries) {
+               smb2_set_replay(server, &rqst[0]);
+               smb2_set_replay(server, &rqst[1]);
+       }
+
        rc = compound_send_recv(xid, tcon->ses, server,
                                flags, 2, rqst,
                                resp_buftype, rsp_iov);
 
-       if (rc == -EAGAIN && retry_count++ < 10)
-               goto again;
-
        /* If the open failed there is nothing to do */
        op_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
        if (op_rsp == NULL || op_rsp->hdr.Status != STATUS_SUCCESS) {
@@ -2337,6 +2376,11 @@ again:
        SMB2_query_directory_free(&rqst[1]);
        free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
        free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
+
+       if (is_replayable_error(rc) &&
+           smb2_should_replay(tcon, &retries, &cur_sleep))
+               goto replay_again;
+
        return rc;
 }
 
@@ -2461,6 +2505,22 @@ smb2_oplock_response(struct cifs_tcon *tcon, __u64 persistent_fid,
                                 CIFS_CACHE_READ(cinode) ? 1 : 0);
 }
 
+void
+smb2_set_replay(struct TCP_Server_Info *server, struct smb_rqst *rqst)
+{
+       struct smb2_hdr *shdr;
+
+       if (server->dialect < SMB30_PROT_ID)
+               return;
+
+       shdr = (struct smb2_hdr *)(rqst->rq_iov[0].iov_base);
+       if (shdr == NULL) {
+               cifs_dbg(FYI, "shdr NULL in smb2_set_related\n");
+               return;
+       }
+       shdr->Flags |= SMB2_FLAGS_REPLAY_OPERATION;
+}
+
 void
 smb2_set_related(struct smb_rqst *rqst)
 {
@@ -2533,6 +2593,27 @@ smb2_set_next_command(struct cifs_tcon *tcon, struct smb_rqst *rqst)
        shdr->NextCommand = cpu_to_le32(len);
 }
 
+/*
+ * helper function for exponential backoff and check if replayable
+ */
+bool smb2_should_replay(struct cifs_tcon *tcon,
+                               int *pretries,
+                               int *pcur_sleep)
+{
+       if (!pretries || !pcur_sleep)
+               return false;
+
+       if (tcon->retry || (*pretries)++ < tcon->ses->server->retrans) {
+               msleep(*pcur_sleep);
+               (*pcur_sleep) = ((*pcur_sleep) << 1);
+               if ((*pcur_sleep) > CIFS_MAX_SLEEP)
+                       (*pcur_sleep) = CIFS_MAX_SLEEP;
+               return true;
+       }
+
+       return false;
+}
+
 /*
  * Passes the query info response back to the caller on success.
  * Caller need to free this with free_rsp_buf().
@@ -2546,7 +2627,7 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
 {
        struct smb2_compound_vars *vars;
        struct cifs_ses *ses = tcon->ses;
-       struct TCP_Server_Info *server = cifs_pick_channel(ses);
+       struct TCP_Server_Info *server;
        int flags = CIFS_CP_CREATE_CLOSE_OP;
        struct smb_rqst *rqst;
        int resp_buftype[3];
@@ -2557,6 +2638,13 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
        int rc;
        __le16 *utf16_path;
        struct cached_fid *cfid = NULL;
+       int retries = 0, cur_sleep = 1;
+
+replay_again:
+       /* reinitialize for possible replay */
+       flags = CIFS_CP_CREATE_CLOSE_OP;
+       oplock = SMB2_OPLOCK_LEVEL_NONE;
+       server = cifs_pick_channel(ses);
 
        if (!path)
                path = "";
@@ -2637,6 +2725,14 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
                goto qic_exit;
        smb2_set_related(&rqst[2]);
 
+       if (retries) {
+               if (!cfid) {
+                       smb2_set_replay(server, &rqst[0]);
+                       smb2_set_replay(server, &rqst[2]);
+               }
+               smb2_set_replay(server, &rqst[1]);
+       }
+
        if (cfid) {
                rc = compound_send_recv(xid, ses, server,
                                        flags, 1, &rqst[1],
@@ -2669,6 +2765,11 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
        kfree(vars);
 out_free_path:
        kfree(utf16_path);
+
+       if (is_replayable_error(rc) &&
+           smb2_should_replay(tcon, &retries, &cur_sleep))
+               goto replay_again;
+
        return rc;
 }
 
@@ -3217,6 +3318,9 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
                                  cfile->fid.volatile_fid, cfile->pid, new_size);
                if (rc >= 0) {
                        truncate_setsize(inode, new_size);
+                       netfs_resize_file(&cifsi->netfs, new_size, true);
+                       if (offset < cifsi->netfs.zero_point)
+                               cifsi->netfs.zero_point = offset;
                        fscache_resize_cookie(cifs_inode_cookie(inode), new_size);
                }
        }
@@ -3440,7 +3544,7 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
                rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
                                  cfile->fid.volatile_fid, cfile->pid, new_eof);
                if (rc == 0) {
-                       cifsi->server_eof = new_eof;
+                       netfs_resize_file(&cifsi->netfs, new_eof, true);
                        cifs_setsize(inode, new_eof);
                        cifs_truncate_page(inode->i_mapping, inode->i_size);
                        truncate_setsize(inode, new_eof);
@@ -3532,8 +3636,9 @@ static long smb3_collapse_range(struct file *file, struct cifs_tcon *tcon,
        int rc;
        unsigned int xid;
        struct inode *inode = file_inode(file);
-       struct cifsFileInfo *cfile = file->private_data;
        struct cifsInodeInfo *cifsi = CIFS_I(inode);
+       struct cifsFileInfo *cfile = file->private_data;
+       struct netfs_inode *ictx = &cifsi->netfs;
        loff_t old_eof, new_eof;
 
        xid = get_xid();
@@ -3553,6 +3658,7 @@ static long smb3_collapse_range(struct file *file, struct cifs_tcon *tcon,
                goto out_2;
 
        truncate_pagecache_range(inode, off, old_eof);
+       ictx->zero_point = old_eof;
 
        rc = smb2_copychunk_range(xid, cfile, cfile, off + len,
                                  old_eof - off - len, off);
@@ -3567,9 +3673,10 @@ static long smb3_collapse_range(struct file *file, struct cifs_tcon *tcon,
 
        rc = 0;
 
-       cifsi->server_eof = i_size_read(inode) - len;
-       truncate_setsize(inode, cifsi->server_eof);
-       fscache_resize_cookie(cifs_inode_cookie(inode), cifsi->server_eof);
+       truncate_setsize(inode, new_eof);
+       netfs_resize_file(&cifsi->netfs, new_eof, true);
+       ictx->zero_point = new_eof;
+       fscache_resize_cookie(cifs_inode_cookie(inode), new_eof);
 out_2:
        filemap_invalidate_unlock(inode->i_mapping);
  out:
@@ -3585,6 +3692,7 @@ static long smb3_insert_range(struct file *file, struct cifs_tcon *tcon,
        unsigned int xid;
        struct cifsFileInfo *cfile = file->private_data;
        struct inode *inode = file_inode(file);
+       struct cifsInodeInfo *cifsi = CIFS_I(inode);
        __u64 count, old_eof, new_eof;
 
        xid = get_xid();
@@ -3612,6 +3720,7 @@ static long smb3_insert_range(struct file *file, struct cifs_tcon *tcon,
                goto out_2;
 
        truncate_setsize(inode, new_eof);
+       netfs_resize_file(&cifsi->netfs, i_size_read(inode), true);
        fscache_resize_cookie(cifs_inode_cookie(inode), i_size_read(inode));
 
        rc = smb2_copychunk_range(xid, cfile, cfile, off, count, off + len);
index bd25c34dc398b6460c37e3c1ea778fdc0cca6b80..4085ce27fd388c7eab9a8402e095e84551158cb1 100644 (file)
@@ -156,6 +156,56 @@ out:
        return;
 }
 
+/* helper function for code reuse */
+static int
+cifs_chan_skip_or_disable(struct cifs_ses *ses,
+                         struct TCP_Server_Info *server,
+                         bool from_reconnect)
+{
+       struct TCP_Server_Info *pserver;
+       unsigned int chan_index;
+
+       if (SERVER_IS_CHAN(server)) {
+               cifs_dbg(VFS,
+                       "server %s does not support multichannel anymore. Skip secondary channel\n",
+                        ses->server->hostname);
+
+               spin_lock(&ses->chan_lock);
+               chan_index = cifs_ses_get_chan_index(ses, server);
+               if (chan_index == CIFS_INVAL_CHAN_INDEX) {
+                       spin_unlock(&ses->chan_lock);
+                       goto skip_terminate;
+               }
+
+               ses->chans[chan_index].server = NULL;
+               server->terminate = true;
+               spin_unlock(&ses->chan_lock);
+
+               /*
+                * the above reference of server by channel
+                * needs to be dropped without holding chan_lock
+                * as cifs_put_tcp_session takes a higher lock
+                * i.e. cifs_tcp_ses_lock
+                */
+               cifs_put_tcp_session(server, from_reconnect);
+
+               cifs_signal_cifsd_for_reconnect(server, false);
+
+               /* mark primary server as needing reconnect */
+               pserver = server->primary_server;
+               cifs_signal_cifsd_for_reconnect(pserver, false);
+skip_terminate:
+               return -EHOSTDOWN;
+       }
+
+       cifs_server_dbg(VFS,
+               "server does not support multichannel anymore. Disable all other channels\n");
+       cifs_disable_secondary_channels(ses);
+
+
+       return 0;
+}
+
 static int
 smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
               struct TCP_Server_Info *server, bool from_reconnect)
@@ -164,8 +214,6 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
        struct nls_table *nls_codepage = NULL;
        struct cifs_ses *ses;
        int xid;
-       struct TCP_Server_Info *pserver;
-       unsigned int chan_index;
 
        /*
         * SMB2s NegProt, SessSetup, Logoff do not have tcon yet so
@@ -310,44 +358,11 @@ again:
                 */
                if (ses->chan_count > 1 &&
                    !(server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
-                       if (SERVER_IS_CHAN(server)) {
-                               cifs_dbg(VFS, "server %s does not support " \
-                                        "multichannel anymore. skipping secondary channel\n",
-                                        ses->server->hostname);
-
-                               spin_lock(&ses->chan_lock);
-                               chan_index = cifs_ses_get_chan_index(ses, server);
-                               if (chan_index == CIFS_INVAL_CHAN_INDEX) {
-                                       spin_unlock(&ses->chan_lock);
-                                       goto skip_terminate;
-                               }
-
-                               ses->chans[chan_index].server = NULL;
-                               spin_unlock(&ses->chan_lock);
-
-                               /*
-                                * the above reference of server by channel
-                                * needs to be dropped without holding chan_lock
-                                * as cifs_put_tcp_session takes a higher lock
-                                * i.e. cifs_tcp_ses_lock
-                                */
-                               cifs_put_tcp_session(server, from_reconnect);
-
-                               server->terminate = true;
-                               cifs_signal_cifsd_for_reconnect(server, false);
-
-                               /* mark primary server as needing reconnect */
-                               pserver = server->primary_server;
-                               cifs_signal_cifsd_for_reconnect(pserver, false);
-
-skip_terminate:
+                       rc = cifs_chan_skip_or_disable(ses, server,
+                                                      from_reconnect);
+                       if (rc) {
                                mutex_unlock(&ses->session_mutex);
-                               rc = -EHOSTDOWN;
                                goto out;
-                       } else {
-                               cifs_server_dbg(VFS, "does not support " \
-                                        "multichannel anymore. disabling all other channels\n");
-                               cifs_disable_secondary_channels(ses);
                        }
                }
 
@@ -384,6 +399,15 @@ skip_sess_setup:
                goto out;
        }
 
+       spin_lock(&ses->ses_lock);
+       if (ses->flags & CIFS_SES_FLAG_SCALE_CHANNELS) {
+               spin_unlock(&ses->ses_lock);
+               mutex_unlock(&ses->session_mutex);
+               goto skip_add_channels;
+       }
+       ses->flags |= CIFS_SES_FLAG_SCALE_CHANNELS;
+       spin_unlock(&ses->ses_lock);
+
        if (!rc &&
            (server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
                mutex_unlock(&ses->session_mutex);
@@ -395,14 +419,29 @@ skip_sess_setup:
                rc = SMB3_request_interfaces(xid, tcon, false);
                free_xid(xid);
 
-               if (rc)
+               if (rc == -EOPNOTSUPP && ses->chan_count > 1) {
+                       /*
+                        * some servers like Azure SMB server do not advertise
+                        * that multichannel has been disabled with server
+                        * capabilities, rather return STATUS_NOT_IMPLEMENTED.
+                        * treat this as server not supporting multichannel
+                        */
+
+                       rc = cifs_chan_skip_or_disable(ses, server,
+                                                      from_reconnect);
+                       goto skip_add_channels;
+               } else if (rc)
                        cifs_dbg(FYI, "%s: failed to query server interfaces: %d\n",
                                 __func__, rc);
 
                if (ses->chan_max > ses->chan_count &&
+                   ses->iface_count &&
                    !SERVER_IS_CHAN(server)) {
-                       if (ses->chan_count == 1)
+                       if (ses->chan_count == 1) {
                                cifs_server_dbg(VFS, "supports multichannel now\n");
+                               queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
+                                                (SMB_INTERFACE_POLL_INTERVAL * HZ));
+                       }
 
                        cifs_try_adding_channels(ses);
                }
@@ -410,6 +449,11 @@ skip_sess_setup:
                mutex_unlock(&ses->session_mutex);
        }
 
+skip_add_channels:
+       spin_lock(&ses->ses_lock);
+       ses->flags &= ~CIFS_SES_FLAG_SCALE_CHANNELS;
+       spin_unlock(&ses->ses_lock);
+
        if (smb2_command != SMB2_INTERNAL_CMD)
                mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
 
@@ -1958,10 +2002,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
        __le16 *unc_path = NULL;
        int flags = 0;
        unsigned int total_len;
-       struct TCP_Server_Info *server;
-
-       /* always use master channel */
-       server = ses->server;
+       struct TCP_Server_Info *server = cifs_pick_channel(ses);
 
        cifs_dbg(FYI, "TCON\n");
 
@@ -2094,6 +2135,7 @@ SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
        struct smb2_tree_disconnect_req *req; /* response is trivial */
        int rc = 0;
        struct cifs_ses *ses = tcon->ses;
+       struct TCP_Server_Info *server = cifs_pick_channel(ses);
        int flags = 0;
        unsigned int total_len;
        struct kvec iov[1];
@@ -2116,7 +2158,7 @@ SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
 
        invalidate_all_cached_dirs(tcon);
 
-       rc = smb2_plain_req_init(SMB2_TREE_DISCONNECT, tcon, ses->server,
+       rc = smb2_plain_req_init(SMB2_TREE_DISCONNECT, tcon, server,
                                 (void **) &req,
                                 &total_len);
        if (rc)
@@ -2134,7 +2176,7 @@ SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
        rqst.rq_iov = iov;
        rqst.rq_nvec = 1;
 
-       rc = cifs_send_recv(xid, ses, ses->server,
+       rc = cifs_send_recv(xid, ses, server,
                            &rqst, &resp_buf_type, flags, &rsp_iov);
        cifs_small_buf_release(req);
        if (rc) {
@@ -2279,7 +2321,7 @@ int smb2_parse_contexts(struct TCP_Server_Info *server,
 
                noff = le16_to_cpu(cc->NameOffset);
                nlen = le16_to_cpu(cc->NameLength);
-               if (noff + nlen >= doff)
+               if (noff + nlen > doff)
                        return -EINVAL;
 
                name = (char *)cc + noff;
@@ -2736,7 +2778,14 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
        int flags = 0;
        unsigned int total_len;
        __le16 *utf16_path = NULL;
-       struct TCP_Server_Info *server = cifs_pick_channel(ses);
+       struct TCP_Server_Info *server;
+       int retries = 0, cur_sleep = 1;
+
+replay_again:
+       /* reinitialize for possible replay */
+       flags = 0;
+       n_iov = 2;
+       server = cifs_pick_channel(ses);
 
        cifs_dbg(FYI, "mkdir\n");
 
@@ -2840,6 +2889,10 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
        /* no need to inc num_remote_opens because we close it just below */
        trace_smb3_posix_mkdir_enter(xid, tcon->tid, ses->Suid, full_path, CREATE_NOT_FILE,
                                    FILE_WRITE_ATTRIBUTES);
+
+       if (retries)
+               smb2_set_replay(server, &rqst);
+
        /* resource #4: response buffer */
        rc = cifs_send_recv(xid, ses, server,
                            &rqst, &resp_buftype, flags, &rsp_iov);
@@ -2877,6 +2930,11 @@ err_free_req:
        cifs_small_buf_release(req);
 err_free_path:
        kfree(utf16_path);
+
+       if (is_replayable_error(rc) &&
+           smb2_should_replay(tcon, &retries, &cur_sleep))
+               goto replay_again;
+
        return rc;
 }
 
@@ -3072,12 +3130,18 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
        struct smb2_create_rsp *rsp = NULL;
        struct cifs_tcon *tcon = oparms->tcon;
        struct cifs_ses *ses = tcon->ses;
-       struct TCP_Server_Info *server = cifs_pick_channel(ses);
+       struct TCP_Server_Info *server;
        struct kvec iov[SMB2_CREATE_IOV_SIZE];
        struct kvec rsp_iov = {NULL, 0};
        int resp_buftype = CIFS_NO_BUFFER;
        int rc = 0;
        int flags = 0;
+       int retries = 0, cur_sleep = 1;
+
+replay_again:
+       /* reinitialize for possible replay */
+       flags = 0;
+       server = cifs_pick_channel(ses);
 
        cifs_dbg(FYI, "create/open\n");
        if (!ses || !server)
@@ -3099,6 +3163,9 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
        trace_smb3_open_enter(xid, tcon->tid, tcon->ses->Suid, oparms->path,
                oparms->create_options, oparms->desired_access);
 
+       if (retries)
+               smb2_set_replay(server, &rqst);
+
        rc = cifs_send_recv(xid, ses, server,
                            &rqst, &resp_buftype, flags,
                            &rsp_iov);
@@ -3152,6 +3219,11 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
 creat_exit:
        SMB2_open_free(&rqst);
        free_rsp_buf(resp_buftype, rsp);
+
+       if (is_replayable_error(rc) &&
+           smb2_should_replay(tcon, &retries, &cur_sleep))
+               goto replay_again;
+
        return rc;
 }
 
@@ -3276,15 +3348,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
        int resp_buftype = CIFS_NO_BUFFER;
        int rc = 0;
        int flags = 0;
-
-       cifs_dbg(FYI, "SMB2 IOCTL\n");
-
-       if (out_data != NULL)
-               *out_data = NULL;
-
-       /* zero out returned data len, in case of error */
-       if (plen)
-               *plen = 0;
+       int retries = 0, cur_sleep = 1;
 
        if (!tcon)
                return -EIO;
@@ -3293,10 +3357,23 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
        if (!ses)
                return -EIO;
 
+replay_again:
+       /* reinitialize for possible replay */
+       flags = 0;
        server = cifs_pick_channel(ses);
+
        if (!server)
                return -EIO;
 
+       cifs_dbg(FYI, "SMB2 IOCTL\n");
+
+       if (out_data != NULL)
+               *out_data = NULL;
+
+       /* zero out returned data len, in case of error */
+       if (plen)
+               *plen = 0;
+
        if (smb3_encryption_required(tcon))
                flags |= CIFS_TRANSFORM_REQ;
 
@@ -3311,6 +3388,9 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
        if (rc)
                goto ioctl_exit;
 
+       if (retries)
+               smb2_set_replay(server, &rqst);
+
        rc = cifs_send_recv(xid, ses, server,
                            &rqst, &resp_buftype, flags,
                            &rsp_iov);
@@ -3380,6 +3460,11 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
 ioctl_exit:
        SMB2_ioctl_free(&rqst);
        free_rsp_buf(resp_buftype, rsp);
+
+       if (is_replayable_error(rc) &&
+           smb2_should_replay(tcon, &retries, &cur_sleep))
+               goto replay_again;
+
        return rc;
 }
 
@@ -3451,13 +3536,20 @@ __SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
        struct smb_rqst rqst;
        struct smb2_close_rsp *rsp = NULL;
        struct cifs_ses *ses = tcon->ses;
-       struct TCP_Server_Info *server = cifs_pick_channel(ses);
+       struct TCP_Server_Info *server;
        struct kvec iov[1];
        struct kvec rsp_iov;
        int resp_buftype = CIFS_NO_BUFFER;
        int rc = 0;
        int flags = 0;
        bool query_attrs = false;
+       int retries = 0, cur_sleep = 1;
+
+replay_again:
+       /* reinitialize for possible replay */
+       flags = 0;
+       query_attrs = false;
+       server = cifs_pick_channel(ses);
 
        cifs_dbg(FYI, "Close\n");
 
@@ -3483,6 +3575,9 @@ __SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
        if (rc)
                goto close_exit;
 
+       if (retries)
+               smb2_set_replay(server, &rqst);
+
        rc = cifs_send_recv(xid, ses, server,
                            &rqst, &resp_buftype, flags, &rsp_iov);
        rsp = (struct smb2_close_rsp *)rsp_iov.iov_base;
@@ -3516,6 +3611,11 @@ close_exit:
                        cifs_dbg(VFS, "handle cancelled close fid 0x%llx returned error %d\n",
                                 persistent_fid, tmp_rc);
        }
+
+       if (is_replayable_error(rc) &&
+           smb2_should_replay(tcon, &retries, &cur_sleep))
+               goto replay_again;
+
        return rc;
 }
 
@@ -3646,12 +3746,19 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
        struct TCP_Server_Info *server;
        int flags = 0;
        bool allocated = false;
+       int retries = 0, cur_sleep = 1;
 
        cifs_dbg(FYI, "Query Info\n");
 
        if (!ses)
                return -EIO;
+
+replay_again:
+       /* reinitialize for possible replay */
+       flags = 0;
+       allocated = false;
        server = cifs_pick_channel(ses);
+
        if (!server)
                return -EIO;
 
@@ -3673,6 +3780,9 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
        trace_smb3_query_info_enter(xid, persistent_fid, tcon->tid,
                                    ses->Suid, info_class, (__u32)info_type);
 
+       if (retries)
+               smb2_set_replay(server, &rqst);
+
        rc = cifs_send_recv(xid, ses, server,
                            &rqst, &resp_buftype, flags, &rsp_iov);
        rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
@@ -3715,6 +3825,11 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
 qinf_exit:
        SMB2_query_info_free(&rqst);
        free_rsp_buf(resp_buftype, rsp);
+
+       if (is_replayable_error(rc) &&
+           smb2_should_replay(tcon, &retries, &cur_sleep))
+               goto replay_again;
+
        return rc;
 }
 
@@ -3815,7 +3930,7 @@ SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
                u32 *plen /* returned data len */)
 {
        struct cifs_ses *ses = tcon->ses;
-       struct TCP_Server_Info *server = cifs_pick_channel(ses);
+       struct TCP_Server_Info *server;
        struct smb_rqst rqst;
        struct smb2_change_notify_rsp *smb_rsp;
        struct kvec iov[1];
@@ -3823,6 +3938,12 @@ SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
        int resp_buftype = CIFS_NO_BUFFER;
        int flags = 0;
        int rc = 0;
+       int retries = 0, cur_sleep = 1;
+
+replay_again:
+       /* reinitialize for possible replay */
+       flags = 0;
+       server = cifs_pick_channel(ses);
 
        cifs_dbg(FYI, "change notify\n");
        if (!ses || !server)
@@ -3847,6 +3968,10 @@ SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
 
        trace_smb3_notify_enter(xid, persistent_fid, tcon->tid, ses->Suid,
                                (u8)watch_tree, completion_filter);
+
+       if (retries)
+               smb2_set_replay(server, &rqst);
+
        rc = cifs_send_recv(xid, ses, server,
                            &rqst, &resp_buftype, flags, &rsp_iov);
 
@@ -3881,6 +4006,11 @@ SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
        if (rqst.rq_iov)
                cifs_small_buf_release(rqst.rq_iov[0].iov_base); /* request */
        free_rsp_buf(resp_buftype, rsp_iov.iov_base);
+
+       if (is_replayable_error(rc) &&
+           smb2_should_replay(tcon, &retries, &cur_sleep))
+               goto replay_again;
+
        return rc;
 }
 
@@ -3918,7 +4048,7 @@ void smb2_reconnect_server(struct work_struct *work)
        struct cifs_ses *ses, *ses2;
        struct cifs_tcon *tcon, *tcon2;
        struct list_head tmp_list, tmp_ses_list;
-       bool tcon_exist = false, ses_exist = false;
+       bool ses_exist = false;
        bool tcon_selected = false;
        int rc;
        bool resched = false;
@@ -3964,7 +4094,7 @@ void smb2_reconnect_server(struct work_struct *work)
                        if (tcon->need_reconnect || tcon->need_reopen_files) {
                                tcon->tc_count++;
                                list_add_tail(&tcon->rlist, &tmp_list);
-                               tcon_selected = tcon_exist = true;
+                               tcon_selected = true;
                        }
                }
                /*
@@ -3973,7 +4103,7 @@ void smb2_reconnect_server(struct work_struct *work)
                 */
                if (ses->tcon_ipc && ses->tcon_ipc->need_reconnect) {
                        list_add_tail(&ses->tcon_ipc->rlist, &tmp_list);
-                       tcon_selected = tcon_exist = true;
+                       tcon_selected = true;
                        cifs_smb_ses_inc_refcount(ses);
                }
                /*
@@ -4123,10 +4253,16 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
        struct smb_rqst rqst;
        struct kvec iov[1];
        struct kvec rsp_iov = {NULL, 0};
-       struct TCP_Server_Info *server = cifs_pick_channel(ses);
+       struct TCP_Server_Info *server;
        int resp_buftype = CIFS_NO_BUFFER;
        int flags = 0;
        int rc = 0;
+       int retries = 0, cur_sleep = 1;
+
+replay_again:
+       /* reinitialize for possible replay */
+       flags = 0;
+       server = cifs_pick_channel(ses);
 
        cifs_dbg(FYI, "flush\n");
        if (!ses || !(ses->server))
@@ -4146,6 +4282,10 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
                goto flush_exit;
 
        trace_smb3_flush_enter(xid, persistent_fid, tcon->tid, ses->Suid);
+
+       if (retries)
+               smb2_set_replay(server, &rqst);
+
        rc = cifs_send_recv(xid, ses, server,
                            &rqst, &resp_buftype, flags, &rsp_iov);
 
@@ -4160,6 +4300,11 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
  flush_exit:
        SMB2_flush_free(&rqst);
        free_rsp_buf(resp_buftype, rsp_iov.iov_base);
+
+       if (is_replayable_error(rc) &&
+           smb2_should_replay(tcon, &retries, &cur_sleep))
+               goto replay_again;
+
        return rc;
 }
 
@@ -4639,7 +4784,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
        struct cifs_io_parms *io_parms = NULL;
        int credit_request;
 
-       if (!wdata->server)
+       if (!wdata->server || wdata->replay)
                server = wdata->server = cifs_pick_channel(tcon->ses);
 
        /*
@@ -4724,6 +4869,8 @@ smb2_async_writev(struct cifs_writedata *wdata,
        rqst.rq_nvec = 1;
        rqst.rq_iter = wdata->iter;
        rqst.rq_iter_size = iov_iter_count(&rqst.rq_iter);
+       if (wdata->replay)
+               smb2_set_replay(server, &rqst);
 #ifdef CONFIG_CIFS_SMB_DIRECT
        if (wdata->mr)
                iov[0].iov_len += sizeof(struct smbd_buffer_descriptor_v1);
@@ -4797,18 +4944,21 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
        int flags = 0;
        unsigned int total_len;
        struct TCP_Server_Info *server;
+       int retries = 0, cur_sleep = 1;
 
+replay_again:
+       /* reinitialize for possible replay */
+       flags = 0;
        *nbytes = 0;
-
-       if (n_vec < 1)
-               return rc;
-
        if (!io_parms->server)
                io_parms->server = cifs_pick_channel(io_parms->tcon->ses);
        server = io_parms->server;
        if (server == NULL)
                return -ECONNABORTED;
 
+       if (n_vec < 1)
+               return rc;
+
        rc = smb2_plain_req_init(SMB2_WRITE, io_parms->tcon, server,
                                 (void **) &req, &total_len);
        if (rc)
@@ -4842,6 +4992,9 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
        rqst.rq_iov = iov;
        rqst.rq_nvec = n_vec + 1;
 
+       if (retries)
+               smb2_set_replay(server, &rqst);
+
        rc = cifs_send_recv(xid, io_parms->tcon->ses, server,
                            &rqst,
                            &resp_buftype, flags, &rsp_iov);
@@ -4866,6 +5019,11 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
 
        cifs_small_buf_release(req);
        free_rsp_buf(resp_buftype, rsp);
+
+       if (is_replayable_error(rc) &&
+           smb2_should_replay(io_parms->tcon, &retries, &cur_sleep))
+               goto replay_again;
+
        return rc;
 }
 
@@ -5048,6 +5206,9 @@ int SMB2_query_directory_init(const unsigned int xid,
        case SMB_FIND_FILE_POSIX_INFO:
                req->FileInformationClass = SMB_FIND_FILE_POSIX_INFO;
                break;
+       case SMB_FIND_FILE_FULL_DIRECTORY_INFO:
+               req->FileInformationClass = FILE_FULL_DIRECTORY_INFORMATION;
+               break;
        default:
                cifs_tcon_dbg(VFS, "info level %u isn't supported\n",
                        info_level);
@@ -5117,6 +5278,9 @@ smb2_parse_query_directory(struct cifs_tcon *tcon,
                /* note that posix payload are variable size */
                info_buf_size = sizeof(struct smb2_posix_info);
                break;
+       case SMB_FIND_FILE_FULL_DIRECTORY_INFO:
+               info_buf_size = sizeof(FILE_FULL_DIRECTORY_INFO);
+               break;
        default:
                cifs_tcon_dbg(VFS, "info level %u isn't supported\n",
                         srch_inf->info_level);
@@ -5177,8 +5341,14 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
        struct kvec rsp_iov;
        int rc = 0;
        struct cifs_ses *ses = tcon->ses;
-       struct TCP_Server_Info *server = cifs_pick_channel(ses);
+       struct TCP_Server_Info *server;
        int flags = 0;
+       int retries = 0, cur_sleep = 1;
+
+replay_again:
+       /* reinitialize for possible replay */
+       flags = 0;
+       server = cifs_pick_channel(ses);
 
        if (!ses || !(ses->server))
                return -EIO;
@@ -5198,6 +5368,9 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
        if (rc)
                goto qdir_exit;
 
+       if (retries)
+               smb2_set_replay(server, &rqst);
+
        rc = cifs_send_recv(xid, ses, server,
                            &rqst, &resp_buftype, flags, &rsp_iov);
        rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base;
@@ -5232,6 +5405,11 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
 qdir_exit:
        SMB2_query_directory_free(&rqst);
        free_rsp_buf(resp_buftype, rsp);
+
+       if (is_replayable_error(rc) &&
+           smb2_should_replay(tcon, &retries, &cur_sleep))
+               goto replay_again;
+
        return rc;
 }
 
@@ -5298,8 +5476,14 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
        int rc = 0;
        int resp_buftype;
        struct cifs_ses *ses = tcon->ses;
-       struct TCP_Server_Info *server = cifs_pick_channel(ses);
+       struct TCP_Server_Info *server;
        int flags = 0;
+       int retries = 0, cur_sleep = 1;
+
+replay_again:
+       /* reinitialize for possible replay */
+       flags = 0;
+       server = cifs_pick_channel(ses);
 
        if (!ses || !server)
                return -EIO;
@@ -5327,6 +5511,8 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
                return rc;
        }
 
+       if (retries)
+               smb2_set_replay(server, &rqst);
 
        rc = cifs_send_recv(xid, ses, server,
                            &rqst, &resp_buftype, flags,
@@ -5342,6 +5528,11 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
 
        free_rsp_buf(resp_buftype, rsp);
        kfree(iov);
+
+       if (is_replayable_error(rc) &&
+           smb2_should_replay(tcon, &retries, &cur_sleep))
+               goto replay_again;
+
        return rc;
 }
 
@@ -5394,12 +5585,18 @@ SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
        int rc;
        struct smb2_oplock_break *req = NULL;
        struct cifs_ses *ses = tcon->ses;
-       struct TCP_Server_Info *server = cifs_pick_channel(ses);
+       struct TCP_Server_Info *server;
        int flags = CIFS_OBREAK_OP;
        unsigned int total_len;
        struct kvec iov[1];
        struct kvec rsp_iov;
        int resp_buf_type;
+       int retries = 0, cur_sleep = 1;
+
+replay_again:
+       /* reinitialize for possible replay */
+       flags = CIFS_OBREAK_OP;
+       server = cifs_pick_channel(ses);
 
        cifs_dbg(FYI, "SMB2_oplock_break\n");
        rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, server,
@@ -5424,15 +5621,21 @@ SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
        rqst.rq_iov = iov;
        rqst.rq_nvec = 1;
 
+       if (retries)
+               smb2_set_replay(server, &rqst);
+
        rc = cifs_send_recv(xid, ses, server,
                            &rqst, &resp_buf_type, flags, &rsp_iov);
        cifs_small_buf_release(req);
-
        if (rc) {
                cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
                cifs_dbg(FYI, "Send error in Oplock Break = %d\n", rc);
        }
 
+       if (is_replayable_error(rc) &&
+           smb2_should_replay(tcon, &retries, &cur_sleep))
+               goto replay_again;
+
        return rc;
 }
 
@@ -5518,9 +5721,15 @@ SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
        int rc = 0;
        int resp_buftype;
        struct cifs_ses *ses = tcon->ses;
-       struct TCP_Server_Info *server = cifs_pick_channel(ses);
+       struct TCP_Server_Info *server;
        FILE_SYSTEM_POSIX_INFO *info = NULL;
        int flags = 0;
+       int retries = 0, cur_sleep = 1;
+
+replay_again:
+       /* reinitialize for possible replay */
+       flags = 0;
+       server = cifs_pick_channel(ses);
 
        rc = build_qfs_info_req(&iov, tcon, server,
                                FS_POSIX_INFORMATION,
@@ -5536,6 +5745,9 @@ SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
        rqst.rq_iov = &iov;
        rqst.rq_nvec = 1;
 
+       if (retries)
+               smb2_set_replay(server, &rqst);
+
        rc = cifs_send_recv(xid, ses, server,
                            &rqst, &resp_buftype, flags, &rsp_iov);
        free_qfs_info_req(&iov);
@@ -5555,6 +5767,11 @@ SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
 
 posix_qfsinf_exit:
        free_rsp_buf(resp_buftype, rsp_iov.iov_base);
+
+       if (is_replayable_error(rc) &&
+           smb2_should_replay(tcon, &retries, &cur_sleep))
+               goto replay_again;
+
        return rc;
 }
 
@@ -5569,9 +5786,15 @@ SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
        int rc = 0;
        int resp_buftype;
        struct cifs_ses *ses = tcon->ses;
-       struct TCP_Server_Info *server = cifs_pick_channel(ses);
+       struct TCP_Server_Info *server;
        struct smb2_fs_full_size_info *info = NULL;
        int flags = 0;
+       int retries = 0, cur_sleep = 1;
+
+replay_again:
+       /* reinitialize for possible replay */
+       flags = 0;
+       server = cifs_pick_channel(ses);
 
        rc = build_qfs_info_req(&iov, tcon, server,
                                FS_FULL_SIZE_INFORMATION,
@@ -5587,6 +5810,9 @@ SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
        rqst.rq_iov = &iov;
        rqst.rq_nvec = 1;
 
+       if (retries)
+               smb2_set_replay(server, &rqst);
+
        rc = cifs_send_recv(xid, ses, server,
                            &rqst, &resp_buftype, flags, &rsp_iov);
        free_qfs_info_req(&iov);
@@ -5606,6 +5832,11 @@ SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
 
 qfsinf_exit:
        free_rsp_buf(resp_buftype, rsp_iov.iov_base);
+
+       if (is_replayable_error(rc) &&
+           smb2_should_replay(tcon, &retries, &cur_sleep))
+               goto replay_again;
+
        return rc;
 }
 
@@ -5620,9 +5851,15 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
        int rc = 0;
        int resp_buftype, max_len, min_len;
        struct cifs_ses *ses = tcon->ses;
-       struct TCP_Server_Info *server = cifs_pick_channel(ses);
+       struct TCP_Server_Info *server;
        unsigned int rsp_len, offset;
        int flags = 0;
+       int retries = 0, cur_sleep = 1;
+
+replay_again:
+       /* reinitialize for possible replay */
+       flags = 0;
+       server = cifs_pick_channel(ses);
 
        if (level == FS_DEVICE_INFORMATION) {
                max_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
@@ -5654,6 +5891,9 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
        rqst.rq_iov = &iov;
        rqst.rq_nvec = 1;
 
+       if (retries)
+               smb2_set_replay(server, &rqst);
+
        rc = cifs_send_recv(xid, ses, server,
                            &rqst, &resp_buftype, flags, &rsp_iov);
        free_qfs_info_req(&iov);
@@ -5691,6 +5931,11 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
 
 qfsattr_exit:
        free_rsp_buf(resp_buftype, rsp_iov.iov_base);
+
+       if (is_replayable_error(rc) &&
+           smb2_should_replay(tcon, &retries, &cur_sleep))
+               goto replay_again;
+
        return rc;
 }
 
@@ -5708,7 +5953,13 @@ smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
        unsigned int count;
        int flags = CIFS_NO_RSP_BUF;
        unsigned int total_len;
-       struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
+       struct TCP_Server_Info *server;
+       int retries = 0, cur_sleep = 1;
+
+replay_again:
+       /* reinitialize for possible replay */
+       flags = CIFS_NO_RSP_BUF;
+       server = cifs_pick_channel(tcon->ses);
 
        cifs_dbg(FYI, "smb2_lockv num lock %d\n", num_lock);
 
@@ -5739,6 +5990,9 @@ smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
        rqst.rq_iov = iov;
        rqst.rq_nvec = 2;
 
+       if (retries)
+               smb2_set_replay(server, &rqst);
+
        rc = cifs_send_recv(xid, tcon->ses, server,
                            &rqst, &resp_buf_type, flags,
                            &rsp_iov);
@@ -5750,6 +6004,10 @@ smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
                                    tcon->ses->Suid, rc);
        }
 
+       if (is_replayable_error(rc) &&
+           smb2_should_replay(tcon, &retries, &cur_sleep))
+               goto replay_again;
+
        return rc;
 }
 
index 343ada691e763bfce3eb64f1c7871922f7eb7a76..b3069911e9dd8f51ea38ea54da740049696d18e6 100644 (file)
@@ -122,6 +122,11 @@ extern unsigned long smb_rqst_len(struct TCP_Server_Info *server,
 extern void smb2_set_next_command(struct cifs_tcon *tcon,
                                  struct smb_rqst *rqst);
 extern void smb2_set_related(struct smb_rqst *rqst);
+extern void smb2_set_replay(struct TCP_Server_Info *server,
+                           struct smb_rqst *rqst);
+extern bool smb2_should_replay(struct cifs_tcon *tcon,
+                         int *pretries,
+                         int *pcur_sleep);
 
 /*
  * SMB2 Worker functions - most of protocol specific implementation details
@@ -299,9 +304,7 @@ int smb311_posix_query_path_info(const unsigned int xid,
                                 struct cifs_tcon *tcon,
                                 struct cifs_sb_info *cifs_sb,
                                 const char *full_path,
-                                struct cifs_open_info_data *data,
-                                struct cifs_sid *owner,
-                                struct cifs_sid *group);
+                                struct cifs_open_info_data *data);
 int posix_info_parse(const void *beg, const void *end,
                     struct smb2_posix_info_parsed *out);
 int posix_info_sid_size(const void *beg, const void *end);
index a9e958166fc53a3c4b5d7a23efb8d326deec3543..9c6d79b0bd4978cea9e33bcfd17432219a9a5232 100644 (file)
@@ -982,6 +982,8 @@ struct ntstatus {
 #define STATUS_INVALID_TASK_INDEX cpu_to_le32(0xC0000501)
 #define STATUS_THREAD_ALREADY_IN_TASK cpu_to_le32(0xC0000502)
 #define STATUS_CALLBACK_BYPASS cpu_to_le32(0xC0000503)
+#define STATUS_SERVER_UNAVAILABLE cpu_to_le32(0xC0000466)
+#define STATUS_FILE_NOT_AVAILABLE cpu_to_le32(0xC0000467)
 #define STATUS_PORT_CLOSED cpu_to_le32(0xC0000700)
 #define STATUS_MESSAGE_LOST cpu_to_le32(0xC0000701)
 #define STATUS_INVALID_MESSAGE cpu_to_le32(0xC0000702)
index f0ce26414f17377365ed0201f21dd4e9cdf06b59..1d1ee9f18f373501f781447f82b494857dd8e9f3 100644 (file)
 #include "cifsproto.h"
 #include "../common/md4.h"
 
-#ifndef false
-#define false 0
-#endif
-#ifndef true
-#define true 1
-#endif
-
 /* following came from the other byteorder.h to avoid include conflicts */
 #define CVAL(buf,pos) (((unsigned char *)(buf))[pos])
 #define SSVALX(buf,pos,val) (CVAL(buf,pos)=(val)&0xFF,CVAL(buf,pos+1)=(val)>>8)
index 4f717ad7c21b424d45f785fdbb94be941c1d7f14..994d70193432978de213a19a0f9933bd90e63671 100644 (file)
@@ -400,10 +400,17 @@ unmask:
                                                  server->conn_id, server->hostname);
        }
 smbd_done:
-       if (rc < 0 && rc != -EINTR)
+       /*
+        * there's hardly any use for the layers above to know the
+        * actual error code here. All they should do at this point is
+        * to retry the connection and hope it goes away.
+        */
+       if (rc < 0 && rc != -EINTR && rc != -EAGAIN) {
                cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
                         rc);
-       else if (rc > 0)
+               rc = -ECONNABORTED;
+               cifs_signal_cifsd_for_reconnect(server, false);
+       } else if (rc > 0)
                rc = 0;
 out:
        cifs_in_send_dec(server);
@@ -428,8 +435,8 @@ smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
        if (!(flags & CIFS_TRANSFORM_REQ))
                return __smb_send_rqst(server, num_rqst, rqst);
 
-       if (num_rqst > MAX_COMPOUND - 1)
-               return -ENOMEM;
+       if (WARN_ON_ONCE(num_rqst > MAX_COMPOUND - 1))
+               return -EIO;
 
        if (!server->ops->init_transform_rq) {
                cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
@@ -1026,6 +1033,9 @@ struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
                if (!server || server->terminate)
                        continue;
 
+               if (CIFS_CHAN_NEEDS_RECONNECT(ses, i))
+                       continue;
+
                /*
                 * strictly speaking, we should pick up req_lock to read
                 * server->in_flight. But it shouldn't matter much here if we
index 4a4b2b03ff33df060c4c7b16112b98e7a8a3a7c4..b931a99ab9c85e016319244070a21bbf54028ef7 100644 (file)
@@ -214,10 +214,15 @@ static int ksmbd_neg_token_alloc(void *context, size_t hdrlen,
 {
        struct ksmbd_conn *conn = context;
 
+       if (!vlen)
+               return -EINVAL;
+
        conn->mechToken = kmemdup_nul(value, vlen, GFP_KERNEL);
        if (!conn->mechToken)
                return -ENOMEM;
 
+       conn->mechTokenLen = (unsigned int)vlen;
+
        return 0;
 }
 
index d311c2ee10bd7f82172342dd53e58f07a36d1702..09e1e7771592f522e44e309505078e04b6853cad 100644 (file)
@@ -416,13 +416,7 @@ static void stop_sessions(void)
 again:
        down_read(&conn_list_lock);
        list_for_each_entry(conn, &conn_list, conns_list) {
-               struct task_struct *task;
-
                t = conn->transport;
-               task = t->handler;
-               if (task)
-                       ksmbd_debug(CONN, "Stop session handler %s/%d\n",
-                                   task->comm, task_pid_nr(task));
                ksmbd_conn_set_exiting(conn);
                if (t->ops->shutdown) {
                        up_read(&conn_list_lock);
index 3c005246a32e8d2c38bde51b3ea8994e319c9c6b..0e04cf8b1d896ab346834b94dd912c53c86c2b0f 100644 (file)
@@ -88,6 +88,7 @@ struct ksmbd_conn {
        __u16                           dialect;
 
        char                            *mechToken;
+       unsigned int                    mechTokenLen;
 
        struct ksmbd_conn_ops   *conn_ops;
 
@@ -134,7 +135,6 @@ struct ksmbd_transport_ops {
 struct ksmbd_transport {
        struct ksmbd_conn               *conn;
        struct ksmbd_transport_ops      *ops;
-       struct task_struct              *handler;
 };
 
 #define KSMBD_TCP_RECV_TIMEOUT (7 * HZ)
index b7521e41402e003a3fd7e121c6003cf7edf23539..0ebf91ffa2361c0940aba0fc301d1a65bf1612e5 100644 (file)
@@ -304,7 +304,8 @@ enum ksmbd_event {
        KSMBD_EVENT_SPNEGO_AUTHEN_REQUEST,
        KSMBD_EVENT_SPNEGO_AUTHEN_RESPONSE      = 15,
 
-       KSMBD_EVENT_MAX
+       __KSMBD_EVENT_MAX,
+       KSMBD_EVENT_MAX = __KSMBD_EVENT_MAX - 1
 };
 
 /*
index 9e8afaa686e3aa8c12e908348aa38b34168ec367..1a5faa6f6e7bc3ddb96bdaa1ce953ba06f3bf5a2 100644 (file)
@@ -261,6 +261,7 @@ out_ascii:
 
 /**
  * ksmbd_extract_sharename() - get share name from tree connect request
+ * @um: pointer to a unicode_map structure for character encoding handling
  * @treename:  buffer containing tree name and share name
  *
  * Return:      share name on success, otherwise error
index 001926d3b348c88ff98bc1766a6a8b280415e39e..53dfaac425c68dc5f2192924b546b4f9fb71f6c8 100644 (file)
@@ -1197,6 +1197,12 @@ int smb_grant_oplock(struct ksmbd_work *work, int req_op_level, u64 pid,
        bool prev_op_has_lease;
        __le32 prev_op_state = 0;
 
+       /* Only v2 leases handle the directory */
+       if (S_ISDIR(file_inode(fp->filp)->i_mode)) {
+               if (!lctx || lctx->version != 2)
+                       return 0;
+       }
+
        opinfo = alloc_opinfo(work, pid, tid);
        if (!opinfo)
                return -ENOMEM;
index 3143819935dca1a90fbc5355f11786afd61aae1a..0c97d3c860726a303081eb25927d38439bfaf4ea 100644 (file)
@@ -1414,7 +1414,10 @@ static struct ksmbd_user *session_user(struct ksmbd_conn *conn,
        char *name;
        unsigned int name_off, name_len, secbuf_len;
 
-       secbuf_len = le16_to_cpu(req->SecurityBufferLength);
+       if (conn->use_spnego && conn->mechToken)
+               secbuf_len = conn->mechTokenLen;
+       else
+               secbuf_len = le16_to_cpu(req->SecurityBufferLength);
        if (secbuf_len < sizeof(struct authenticate_message)) {
                ksmbd_debug(SMB, "blob len %d too small\n", secbuf_len);
                return NULL;
@@ -1505,7 +1508,10 @@ static int ntlm_authenticate(struct ksmbd_work *work,
                struct authenticate_message *authblob;
 
                authblob = user_authblob(conn, req);
-               sz = le16_to_cpu(req->SecurityBufferLength);
+               if (conn->use_spnego && conn->mechToken)
+                       sz = conn->mechTokenLen;
+               else
+                       sz = le16_to_cpu(req->SecurityBufferLength);
                rc = ksmbd_decode_ntlmssp_auth_blob(authblob, sz, conn, sess);
                if (rc) {
                        set_user_flag(sess->user, KSMBD_USER_FLAG_BAD_PASSWORD);
@@ -1778,8 +1784,7 @@ int smb2_sess_setup(struct ksmbd_work *work)
 
        negblob_off = le16_to_cpu(req->SecurityBufferOffset);
        negblob_len = le16_to_cpu(req->SecurityBufferLength);
-       if (negblob_off < offsetof(struct smb2_sess_setup_req, Buffer) ||
-           negblob_len < offsetof(struct negotiate_message, NegotiateFlags)) {
+       if (negblob_off < offsetof(struct smb2_sess_setup_req, Buffer)) {
                rc = -EINVAL;
                goto out_err;
        }
@@ -1788,8 +1793,15 @@ int smb2_sess_setup(struct ksmbd_work *work)
                        negblob_off);
 
        if (decode_negotiation_token(conn, negblob, negblob_len) == 0) {
-               if (conn->mechToken)
+               if (conn->mechToken) {
                        negblob = (struct negotiate_message *)conn->mechToken;
+                       negblob_len = conn->mechTokenLen;
+               }
+       }
+
+       if (negblob_len < offsetof(struct negotiate_message, NegotiateFlags)) {
+               rc = -EINVAL;
+               goto out_err;
        }
 
        if (server_conf.auth_mechs & conn->auth_mechs) {
@@ -6161,8 +6173,10 @@ static noinline int smb2_read_pipe(struct ksmbd_work *work)
                err = ksmbd_iov_pin_rsp_read(work, (void *)rsp,
                                             offsetof(struct smb2_read_rsp, Buffer),
                                             aux_payload_buf, nbytes);
-               if (err)
+               if (err) {
+                       kvfree(aux_payload_buf);
                        goto out;
+               }
                kvfree(rpc_resp);
        } else {
                err = ksmbd_iov_pin_rsp(work, (void *)rsp,
@@ -6372,8 +6386,10 @@ int smb2_read(struct ksmbd_work *work)
        err = ksmbd_iov_pin_rsp_read(work, (void *)rsp,
                                     offsetof(struct smb2_read_rsp, Buffer),
                                     aux_payload_buf, nbytes);
-       if (err)
+       if (err) {
+               kvfree(aux_payload_buf);
                goto out;
+       }
        ksmbd_fd_put(work, fp);
        return 0;
 
index b49d47bdafc945e31bdfa8d7b9f9931752c4d17c..f29bb03f0dc47bfcb0fe3fc5c5acff16d5a314a8 100644 (file)
@@ -74,7 +74,7 @@ static int handle_unsupported_event(struct sk_buff *skb, struct genl_info *info)
 static int handle_generic_event(struct sk_buff *skb, struct genl_info *info);
 static int ksmbd_ipc_heartbeat_request(void);
 
-static const struct nla_policy ksmbd_nl_policy[KSMBD_EVENT_MAX] = {
+static const struct nla_policy ksmbd_nl_policy[KSMBD_EVENT_MAX + 1] = {
        [KSMBD_EVENT_UNSPEC] = {
                .len = 0,
        },
@@ -403,7 +403,7 @@ static int handle_generic_event(struct sk_buff *skb, struct genl_info *info)
                return -EPERM;
 #endif
 
-       if (type >= KSMBD_EVENT_MAX) {
+       if (type > KSMBD_EVENT_MAX) {
                WARN_ON(1);
                return -EINVAL;
        }
index c5629a68c8b73ecf4f3cbc6fe728aac33ce434aa..8faa25c6e129b5ef7f38721ef398b942e56b0bc2 100644 (file)
@@ -2039,6 +2039,7 @@ static bool rdma_frwr_is_supported(struct ib_device_attr *attrs)
 static int smb_direct_handle_connect_request(struct rdma_cm_id *new_cm_id)
 {
        struct smb_direct_transport *t;
+       struct task_struct *handler;
        int ret;
 
        if (!rdma_frwr_is_supported(&new_cm_id->device->attrs)) {
@@ -2056,11 +2057,11 @@ static int smb_direct_handle_connect_request(struct rdma_cm_id *new_cm_id)
        if (ret)
                goto out_err;
 
-       KSMBD_TRANS(t)->handler = kthread_run(ksmbd_conn_handler_loop,
-                                             KSMBD_TRANS(t)->conn, "ksmbd:r%u",
-                                             smb_direct_port);
-       if (IS_ERR(KSMBD_TRANS(t)->handler)) {
-               ret = PTR_ERR(KSMBD_TRANS(t)->handler);
+       handler = kthread_run(ksmbd_conn_handler_loop,
+                             KSMBD_TRANS(t)->conn, "ksmbd:r%u",
+                             smb_direct_port);
+       if (IS_ERR(handler)) {
+               ret = PTR_ERR(handler);
                pr_err("Can't start thread\n");
                goto out_err;
        }
index eff7a1d793f00382078f3132f818a2dd4fe62cda..002a3f0dc7c5880b61045cf7f10f7e078b85d6a9 100644 (file)
@@ -185,6 +185,7 @@ static int ksmbd_tcp_new_connection(struct socket *client_sk)
        struct sockaddr *csin;
        int rc = 0;
        struct tcp_transport *t;
+       struct task_struct *handler;
 
        t = alloc_transport(client_sk);
        if (!t) {
@@ -199,13 +200,13 @@ static int ksmbd_tcp_new_connection(struct socket *client_sk)
                goto out_error;
        }
 
-       KSMBD_TRANS(t)->handler = kthread_run(ksmbd_conn_handler_loop,
-                                             KSMBD_TRANS(t)->conn,
-                                             "ksmbd:%u",
-                                             ksmbd_tcp_get_port(csin));
-       if (IS_ERR(KSMBD_TRANS(t)->handler)) {
+       handler = kthread_run(ksmbd_conn_handler_loop,
+                             KSMBD_TRANS(t)->conn,
+                             "ksmbd:%u",
+                             ksmbd_tcp_get_port(csin));
+       if (IS_ERR(handler)) {
                pr_err("cannot start conn thread\n");
-               rc = PTR_ERR(KSMBD_TRANS(t)->handler);
+               rc = PTR_ERR(handler);
                free_transport(t);
        }
        return rc;
@@ -364,6 +365,7 @@ static int ksmbd_tcp_readv(struct tcp_transport *t, struct kvec *iov_orig,
  * @t:         TCP transport instance
  * @buf:       buffer to store read data from socket
  * @to_read:   number of bytes to read from socket
+ * @max_retries: number of retries if reading from socket fails
  *
  * Return:     on success return number of bytes read from socket,
  *             otherwise return error number
@@ -415,6 +417,7 @@ static void tcp_destroy_socket(struct socket *ksmbd_socket)
 
 /**
  * create_socket - create socket for ksmbd/0
+ * @iface:      interface to bind the created socket to
  *
  * Return:     0 on success, error number otherwise
  */
index b6b6796e16160dbdc92e1f63d370acba93b82760..4df2afa551dc6f8791a98d5e1ac394becf819d8f 100644 (file)
@@ -81,7 +81,7 @@ void sysfs_remove_dir(struct kobject *kobj)
        struct kernfs_node *kn = kobj->sd;
 
        /*
-        * In general, kboject owner is responsible for ensuring removal
+        * In general, kobject owner is responsible for ensuring removal
         * doesn't race with other operations and sysfs doesn't provide any
         * protection; however, when @kobj is used as a symlink target, the
         * symlinking entity usually doesn't own @kobj and thus has no
index f0677ea0ec24e7156ae749c1990ec43263c1097b..110e8a27218900756f3af6cd515d6e8cf33e9514 100644 (file)
  */
 static DEFINE_MUTEX(eventfs_mutex);
 
+/* Choose something "unique" ;-) */
+#define EVENTFS_FILE_INODE_INO         0x12c4e37
+
+/* Just try to make something consistent and unique */
+static int eventfs_dir_ino(struct eventfs_inode *ei)
+{
+       if (!ei->ino)
+               ei->ino = get_next_ino();
+
+       return ei->ino;
+}
+
 /*
  * The eventfs_inode (ei) itself is protected by SRCU. It is released from
  * its parent's list and will have is_freed set (under eventfs_mutex).
@@ -45,16 +57,55 @@ enum {
        EVENTFS_SAVE_MODE       = BIT(16),
        EVENTFS_SAVE_UID        = BIT(17),
        EVENTFS_SAVE_GID        = BIT(18),
+       EVENTFS_TOPLEVEL        = BIT(19),
 };
 
 #define EVENTFS_MODE_MASK      (EVENTFS_SAVE_MODE - 1)
 
+/*
+ * eventfs_inode reference count management.
+ *
+ * NOTE! We count only references from dentries, in the
+ * form 'dentry->d_fsdata'. There are also references from
+ * directory inodes ('ti->private'), but the dentry reference
+ * count is always a superset of the inode reference count.
+ */
+static void release_ei(struct kref *ref)
+{
+       struct eventfs_inode *ei = container_of(ref, struct eventfs_inode, kref);
+
+       WARN_ON_ONCE(!ei->is_freed);
+
+       kfree(ei->entry_attrs);
+       kfree_const(ei->name);
+       kfree_rcu(ei, rcu);
+}
+
+static inline void put_ei(struct eventfs_inode *ei)
+{
+       if (ei)
+               kref_put(&ei->kref, release_ei);
+}
+
+static inline void free_ei(struct eventfs_inode *ei)
+{
+       if (ei) {
+               ei->is_freed = 1;
+               put_ei(ei);
+       }
+}
+
+static inline struct eventfs_inode *get_ei(struct eventfs_inode *ei)
+{
+       if (ei)
+               kref_get(&ei->kref);
+       return ei;
+}
+
 static struct dentry *eventfs_root_lookup(struct inode *dir,
                                          struct dentry *dentry,
                                          unsigned int flags);
-static int dcache_dir_open_wrapper(struct inode *inode, struct file *file);
-static int dcache_readdir_wrapper(struct file *file, struct dir_context *ctx);
-static int eventfs_release(struct inode *inode, struct file *file);
+static int eventfs_iterate(struct file *file, struct dir_context *ctx);
 
 static void update_attr(struct eventfs_attr *attr, struct iattr *iattr)
 {
@@ -94,7 +145,7 @@ static int eventfs_set_attr(struct mnt_idmap *idmap, struct dentry *dentry,
        /* Preallocate the children mode array if necessary */
        if (!(dentry->d_inode->i_mode & S_IFDIR)) {
                if (!ei->entry_attrs) {
-                       ei->entry_attrs = kzalloc(sizeof(*ei->entry_attrs) * ei->nr_entries,
+                       ei->entry_attrs = kcalloc(ei->nr_entries, sizeof(*ei->entry_attrs),
                                                  GFP_NOFS);
                        if (!ei->entry_attrs) {
                                ret = -ENOMEM;
@@ -117,10 +168,17 @@ static int eventfs_set_attr(struct mnt_idmap *idmap, struct dentry *dentry,
                 * The events directory dentry is never freed, unless its
                 * part of an instance that is deleted. It's attr is the
                 * default for its child files and directories.
-                * Do not update it. It's not used for its own mode or ownership
+                * Do not update it. It's not used for its own mode or ownership.
                 */
-               if (!ei->is_events)
+               if (ei->is_events) {
+                       /* But it still needs to know if it was modified */
+                       if (iattr->ia_valid & ATTR_UID)
+                               ei->attr.mode |= EVENTFS_SAVE_UID;
+                       if (iattr->ia_valid & ATTR_GID)
+                               ei->attr.mode |= EVENTFS_SAVE_GID;
+               } else {
                        update_attr(&ei->attr, iattr);
+               }
 
        } else {
                name = dentry->d_name.name;
@@ -138,9 +196,63 @@ static int eventfs_set_attr(struct mnt_idmap *idmap, struct dentry *dentry,
        return ret;
 }
 
+static void update_top_events_attr(struct eventfs_inode *ei, struct super_block *sb)
+{
+       struct inode *root;
+
+       /* Only update if the "events" was on the top level */
+       if (!ei || !(ei->attr.mode & EVENTFS_TOPLEVEL))
+               return;
+
+       /* Get the tracefs root inode. */
+       root = d_inode(sb->s_root);
+       ei->attr.uid = root->i_uid;
+       ei->attr.gid = root->i_gid;
+}
+
+static void set_top_events_ownership(struct inode *inode)
+{
+       struct tracefs_inode *ti = get_tracefs(inode);
+       struct eventfs_inode *ei = ti->private;
+
+       /* The top events directory doesn't get automatically updated */
+       if (!ei || !ei->is_events || !(ei->attr.mode & EVENTFS_TOPLEVEL))
+               return;
+
+       update_top_events_attr(ei, inode->i_sb);
+
+       if (!(ei->attr.mode & EVENTFS_SAVE_UID))
+               inode->i_uid = ei->attr.uid;
+
+       if (!(ei->attr.mode & EVENTFS_SAVE_GID))
+               inode->i_gid = ei->attr.gid;
+}
+
+static int eventfs_get_attr(struct mnt_idmap *idmap,
+                           const struct path *path, struct kstat *stat,
+                           u32 request_mask, unsigned int flags)
+{
+       struct dentry *dentry = path->dentry;
+       struct inode *inode = d_backing_inode(dentry);
+
+       set_top_events_ownership(inode);
+
+       generic_fillattr(idmap, request_mask, inode, stat);
+       return 0;
+}
+
+static int eventfs_permission(struct mnt_idmap *idmap,
+                             struct inode *inode, int mask)
+{
+       set_top_events_ownership(inode);
+       return generic_permission(idmap, inode, mask);
+}
+
 static const struct inode_operations eventfs_root_dir_inode_operations = {
        .lookup         = eventfs_root_lookup,
        .setattr        = eventfs_set_attr,
+       .getattr        = eventfs_get_attr,
+       .permission     = eventfs_permission,
 };
 
 static const struct inode_operations eventfs_file_inode_operations = {
@@ -148,11 +260,9 @@ static const struct inode_operations eventfs_file_inode_operations = {
 };
 
 static const struct file_operations eventfs_file_operations = {
-       .open           = dcache_dir_open_wrapper,
        .read           = generic_read_dir,
-       .iterate_shared = dcache_readdir_wrapper,
+       .iterate_shared = eventfs_iterate,
        .llseek         = generic_file_llseek,
-       .release        = eventfs_release,
 };
 
 /* Return the evenfs_inode of the "events" directory */
@@ -160,10 +270,11 @@ static struct eventfs_inode *eventfs_find_events(struct dentry *dentry)
 {
        struct eventfs_inode *ei;
 
-       mutex_lock(&eventfs_mutex);
        do {
-               /* The parent always has an ei, except for events itself */
-               ei = dentry->d_parent->d_fsdata;
+               // The parent is stable because we do not do renames
+               dentry = dentry->d_parent;
+               // ... and directories always have d_fsdata
+               ei = dentry->d_fsdata;
 
                /*
                 * If the ei is being freed, the ownership of the children
@@ -173,10 +284,10 @@ static struct eventfs_inode *eventfs_find_events(struct dentry *dentry)
                        ei = NULL;
                        break;
                }
-
-               dentry = ei->dentry;
+               // Walk upwards until you find the events inode
        } while (!ei->is_events);
-       mutex_unlock(&eventfs_mutex);
+
+       update_top_events_attr(ei, dentry->d_sb);
 
        return ei;
 }
@@ -206,50 +317,11 @@ static void update_inode_attr(struct dentry *dentry, struct inode *inode,
                inode->i_gid = attr->gid;
 }
 
-static void update_gid(struct eventfs_inode *ei, kgid_t gid, int level)
-{
-       struct eventfs_inode *ei_child;
-
-       /* at most we have events/system/event */
-       if (WARN_ON_ONCE(level > 3))
-               return;
-
-       ei->attr.gid = gid;
-
-       if (ei->entry_attrs) {
-               for (int i = 0; i < ei->nr_entries; i++) {
-                       ei->entry_attrs[i].gid = gid;
-               }
-       }
-
-       /*
-        * Only eventfs_inode with dentries are updated, make sure
-        * all eventfs_inodes are updated. If one of the children
-        * do not have a dentry, this function must traverse it.
-        */
-       list_for_each_entry_srcu(ei_child, &ei->children, list,
-                                srcu_read_lock_held(&eventfs_srcu)) {
-               if (!ei_child->dentry)
-                       update_gid(ei_child, gid, level + 1);
-       }
-}
-
-void eventfs_update_gid(struct dentry *dentry, kgid_t gid)
-{
-       struct eventfs_inode *ei = dentry->d_fsdata;
-       int idx;
-
-       idx = srcu_read_lock(&eventfs_srcu);
-       update_gid(ei, gid, 0);
-       srcu_read_unlock(&eventfs_srcu, idx);
-}
-
 /**
- * create_file - create a file in the tracefs filesystem
- * @name: the name of the file to create.
+ * lookup_file - look up a file in the tracefs filesystem
+ * @dentry: the dentry to look up
  * @mode: the permission that the file should have.
  * @attr: saved attributes changed by user
- * @parent: parent dentry for this file.
  * @data: something that the caller will want to get to later on.
  * @fop: struct file_operations that should be used for this file.
  *
@@ -257,30 +329,25 @@ void eventfs_update_gid(struct dentry *dentry, kgid_t gid)
  * directory. The inode.i_private pointer will point to @data in the open()
  * call.
  */
-static struct dentry *create_file(const char *name, umode_t mode,
+static struct dentry *lookup_file(struct eventfs_inode *parent_ei,
+                                 struct dentry *dentry,
+                                 umode_t mode,
                                  struct eventfs_attr *attr,
-                                 struct dentry *parent, void *data,
+                                 void *data,
                                  const struct file_operations *fop)
 {
        struct tracefs_inode *ti;
-       struct dentry *dentry;
        struct inode *inode;
 
        if (!(mode & S_IFMT))
                mode |= S_IFREG;
 
        if (WARN_ON_ONCE(!S_ISREG(mode)))
-               return NULL;
-
-       WARN_ON_ONCE(!parent);
-       dentry = eventfs_start_creating(name, parent);
-
-       if (IS_ERR(dentry))
-               return dentry;
+               return ERR_PTR(-EIO);
 
        inode = tracefs_get_inode(dentry->d_sb);
        if (unlikely(!inode))
-               return eventfs_failed_creating(dentry);
+               return ERR_PTR(-ENOMEM);
 
        /* If the user updated the directory's attributes, use them */
        update_inode_attr(dentry, inode, attr, mode);
@@ -289,34 +356,36 @@ static struct dentry *create_file(const char *name, umode_t mode,
        inode->i_fop = fop;
        inode->i_private = data;
 
+       /* All files will have the same inode number */
+       inode->i_ino = EVENTFS_FILE_INODE_INO;
+
        ti = get_tracefs(inode);
        ti->flags |= TRACEFS_EVENT_INODE;
-       d_instantiate(dentry, inode);
-       fsnotify_create(dentry->d_parent->d_inode, dentry);
-       return eventfs_end_creating(dentry);
+
+       // Files have their parent's ei as their fsdata
+       dentry->d_fsdata = get_ei(parent_ei);
+
+       d_add(dentry, inode);
+       return NULL;
 };
 
 /**
- * create_dir - create a dir in the tracefs filesystem
+ * lookup_dir_entry - look up a dir in the tracefs filesystem
+ * @dentry: the directory to look up
  * @ei: the eventfs_inode that represents the directory to create
- * @parent: parent dentry for this file.
  *
- * This function will create a dentry for a directory represented by
+ * This function will look up a dentry for a directory represented by
  * a eventfs_inode.
  */
-static struct dentry *create_dir(struct eventfs_inode *ei, struct dentry *parent)
+static struct dentry *lookup_dir_entry(struct dentry *dentry,
+       struct eventfs_inode *pei, struct eventfs_inode *ei)
 {
        struct tracefs_inode *ti;
-       struct dentry *dentry;
        struct inode *inode;
 
-       dentry = eventfs_start_creating(ei->name, parent);
-       if (IS_ERR(dentry))
-               return dentry;
-
        inode = tracefs_get_inode(dentry->d_sb);
        if (unlikely(!inode))
-               return eventfs_failed_creating(dentry);
+               return ERR_PTR(-ENOMEM);
 
        /* If the user updated the directory's attributes, use them */
        update_inode_attr(dentry, inode, &ei->attr,
@@ -325,247 +394,72 @@ static struct dentry *create_dir(struct eventfs_inode *ei, struct dentry *parent
        inode->i_op = &eventfs_root_dir_inode_operations;
        inode->i_fop = &eventfs_file_operations;
 
+       /* All directories will have the same inode number */
+       inode->i_ino = eventfs_dir_ino(ei);
+
        ti = get_tracefs(inode);
        ti->flags |= TRACEFS_EVENT_INODE;
+       /* Only directories have ti->private set to an ei, not files */
+       ti->private = ei;
 
-       inc_nlink(inode);
-       d_instantiate(dentry, inode);
-       inc_nlink(dentry->d_parent->d_inode);
-       fsnotify_mkdir(dentry->d_parent->d_inode, dentry);
-       return eventfs_end_creating(dentry);
+       dentry->d_fsdata = get_ei(ei);
+
+       d_add(dentry, inode);
+       return NULL;
 }
 
-static void free_ei(struct eventfs_inode *ei)
+static inline struct eventfs_inode *alloc_ei(const char *name)
 {
-       kfree_const(ei->name);
-       kfree(ei->d_children);
-       kfree(ei->entry_attrs);
-       kfree(ei);
+       struct eventfs_inode *ei = kzalloc(sizeof(*ei), GFP_KERNEL);
+
+       if (!ei)
+               return NULL;
+
+       ei->name = kstrdup_const(name, GFP_KERNEL);
+       if (!ei->name) {
+               kfree(ei);
+               return NULL;
+       }
+       kref_init(&ei->kref);
+       return ei;
 }
 
 /**
- * eventfs_set_ei_status_free - remove the dentry reference from an eventfs_inode
- * @ti: the tracefs_inode of the dentry
+ * eventfs_d_release - dentry is going away
  * @dentry: dentry which has the reference to remove.
  *
  * Remove the association between a dentry from an eventfs_inode.
  */
-void eventfs_set_ei_status_free(struct tracefs_inode *ti, struct dentry *dentry)
+void eventfs_d_release(struct dentry *dentry)
 {
-       struct eventfs_inode *ei;
-       int i;
-
-       mutex_lock(&eventfs_mutex);
-
-       ei = dentry->d_fsdata;
-       if (!ei)
-               goto out;
-
-       /* This could belong to one of the files of the ei */
-       if (ei->dentry != dentry) {
-               for (i = 0; i < ei->nr_entries; i++) {
-                       if (ei->d_children[i] == dentry)
-                               break;
-               }
-               if (WARN_ON_ONCE(i == ei->nr_entries))
-                       goto out;
-               ei->d_children[i] = NULL;
-       } else if (ei->is_freed) {
-               free_ei(ei);
-       } else {
-               ei->dentry = NULL;
-       }
-
-       dentry->d_fsdata = NULL;
- out:
-       mutex_unlock(&eventfs_mutex);
+       put_ei(dentry->d_fsdata);
 }
 
 /**
- * create_file_dentry - create a dentry for a file of an eventfs_inode
+ * lookup_file_dentry - create a dentry for a file of an eventfs_inode
  * @ei: the eventfs_inode that the file will be created under
- * @idx: the index into the d_children[] of the @ei
+ * @idx: the index into the entry_attrs[] of the @ei
  * @parent: The parent dentry of the created file.
  * @name: The name of the file to create
  * @mode: The mode of the file.
  * @data: The data to use to set the inode of the file with on open()
  * @fops: The fops of the file to be created.
- * @lookup: If called by the lookup routine, in which case, dput() the created dentry.
  *
  * Create a dentry for a file of an eventfs_inode @ei and place it into the
- * address located at @e_dentry. If the @e_dentry already has a dentry, then
- * just do a dget() on it and return. Otherwise create the dentry and attach it.
+ * address located at @e_dentry.
  */
 static struct dentry *
-create_file_dentry(struct eventfs_inode *ei, int idx,
-                  struct dentry *parent, const char *name, umode_t mode, void *data,
-                  const struct file_operations *fops, bool lookup)
+lookup_file_dentry(struct dentry *dentry,
+                  struct eventfs_inode *ei, int idx,
+                  umode_t mode, void *data,
+                  const struct file_operations *fops)
 {
        struct eventfs_attr *attr = NULL;
-       struct dentry **e_dentry = &ei->d_children[idx];
-       struct dentry *dentry;
-
-       WARN_ON_ONCE(!inode_is_locked(parent->d_inode));
 
-       mutex_lock(&eventfs_mutex);
-       if (ei->is_freed) {
-               mutex_unlock(&eventfs_mutex);
-               return NULL;
-       }
-       /* If the e_dentry already has a dentry, use it */
-       if (*e_dentry) {
-               /* lookup does not need to up the ref count */
-               if (!lookup)
-                       dget(*e_dentry);
-               mutex_unlock(&eventfs_mutex);
-               return *e_dentry;
-       }
-
-       /* ei->entry_attrs are protected by SRCU */
        if (ei->entry_attrs)
                attr = &ei->entry_attrs[idx];
 
-       mutex_unlock(&eventfs_mutex);
-
-       dentry = create_file(name, mode, attr, parent, data, fops);
-
-       mutex_lock(&eventfs_mutex);
-
-       if (IS_ERR_OR_NULL(dentry)) {
-               /*
-                * When the mutex was released, something else could have
-                * created the dentry for this e_dentry. In which case
-                * use that one.
-                *
-                * If ei->is_freed is set, the e_dentry is currently on its
-                * way to being freed, don't return it. If e_dentry is NULL
-                * it means it was already freed.
-                */
-               if (ei->is_freed)
-                       dentry = NULL;
-               else
-                       dentry = *e_dentry;
-               /* The lookup does not need to up the dentry refcount */
-               if (dentry && !lookup)
-                       dget(dentry);
-               mutex_unlock(&eventfs_mutex);
-               return dentry;
-       }
-
-       if (!*e_dentry && !ei->is_freed) {
-               *e_dentry = dentry;
-               dentry->d_fsdata = ei;
-       } else {
-               /*
-                * Should never happen unless we get here due to being freed.
-                * Otherwise it means two dentries exist with the same name.
-                */
-               WARN_ON_ONCE(!ei->is_freed);
-               dentry = NULL;
-       }
-       mutex_unlock(&eventfs_mutex);
-
-       if (lookup)
-               dput(dentry);
-
-       return dentry;
-}
-
-/**
- * eventfs_post_create_dir - post create dir routine
- * @ei: eventfs_inode of recently created dir
- *
- * Map the meta-data of files within an eventfs dir to their parent dentry
- */
-static void eventfs_post_create_dir(struct eventfs_inode *ei)
-{
-       struct eventfs_inode *ei_child;
-       struct tracefs_inode *ti;
-
-       lockdep_assert_held(&eventfs_mutex);
-
-       /* srcu lock already held */
-       /* fill parent-child relation */
-       list_for_each_entry_srcu(ei_child, &ei->children, list,
-                                srcu_read_lock_held(&eventfs_srcu)) {
-               ei_child->d_parent = ei->dentry;
-       }
-
-       ti = get_tracefs(ei->dentry->d_inode);
-       ti->private = ei;
-}
-
-/**
- * create_dir_dentry - Create a directory dentry for the eventfs_inode
- * @pei: The eventfs_inode parent of ei.
- * @ei: The eventfs_inode to create the directory for
- * @parent: The dentry of the parent of this directory
- * @lookup: True if this is called by the lookup code
- *
- * This creates and attaches a directory dentry to the eventfs_inode @ei.
- */
-static struct dentry *
-create_dir_dentry(struct eventfs_inode *pei, struct eventfs_inode *ei,
-                 struct dentry *parent, bool lookup)
-{
-       struct dentry *dentry = NULL;
-
-       WARN_ON_ONCE(!inode_is_locked(parent->d_inode));
-
-       mutex_lock(&eventfs_mutex);
-       if (pei->is_freed || ei->is_freed) {
-               mutex_unlock(&eventfs_mutex);
-               return NULL;
-       }
-       if (ei->dentry) {
-               /* If the dentry already has a dentry, use it */
-               dentry = ei->dentry;
-               /* lookup does not need to up the ref count */
-               if (!lookup)
-                       dget(dentry);
-               mutex_unlock(&eventfs_mutex);
-               return dentry;
-       }
-       mutex_unlock(&eventfs_mutex);
-
-       dentry = create_dir(ei, parent);
-
-       mutex_lock(&eventfs_mutex);
-
-       if (IS_ERR_OR_NULL(dentry) && !ei->is_freed) {
-               /*
-                * When the mutex was released, something else could have
-                * created the dentry for this e_dentry. In which case
-                * use that one.
-                *
-                * If ei->is_freed is set, the e_dentry is currently on its
-                * way to being freed.
-                */
-               dentry = ei->dentry;
-               if (dentry && !lookup)
-                       dget(dentry);
-               mutex_unlock(&eventfs_mutex);
-               return dentry;
-       }
-
-       if (!ei->dentry && !ei->is_freed) {
-               ei->dentry = dentry;
-               eventfs_post_create_dir(ei);
-               dentry->d_fsdata = ei;
-       } else {
-               /*
-                * Should never happen unless we get here due to being freed.
-                * Otherwise it means two dentries exist with the same name.
-                */
-               WARN_ON_ONCE(!ei->is_freed);
-               dentry = NULL;
-       }
-       mutex_unlock(&eventfs_mutex);
-
-       if (lookup)
-               dput(dentry);
-
-       return dentry;
+       return lookup_file(ei, dentry, mode, attr, data, fops);
 }
 
 /**
@@ -582,250 +476,153 @@ static struct dentry *eventfs_root_lookup(struct inode *dir,
                                          struct dentry *dentry,
                                          unsigned int flags)
 {
-       const struct file_operations *fops;
-       const struct eventfs_entry *entry;
        struct eventfs_inode *ei_child;
        struct tracefs_inode *ti;
        struct eventfs_inode *ei;
-       struct dentry *ei_dentry = NULL;
-       struct dentry *ret = NULL;
        const char *name = dentry->d_name.name;
-       bool created = false;
-       umode_t mode;
-       void *data;
-       int idx;
-       int i;
-       int r;
+       struct dentry *result = NULL;
 
        ti = get_tracefs(dir);
        if (!(ti->flags & TRACEFS_EVENT_INODE))
-               return NULL;
-
-       /* Grab srcu to prevent the ei from going away */
-       idx = srcu_read_lock(&eventfs_srcu);
+               return ERR_PTR(-EIO);
 
-       /*
-        * Grab the eventfs_mutex to consistent value from ti->private.
-        * This s
-        */
        mutex_lock(&eventfs_mutex);
-       ei = READ_ONCE(ti->private);
-       if (ei && !ei->is_freed)
-               ei_dentry = READ_ONCE(ei->dentry);
-       mutex_unlock(&eventfs_mutex);
 
-       if (!ei || !ei_dentry)
+       ei = ti->private;
+       if (!ei || ei->is_freed)
                goto out;
 
-       data = ei->data;
-
-       list_for_each_entry_srcu(ei_child, &ei->children, list,
-                                srcu_read_lock_held(&eventfs_srcu)) {
+       list_for_each_entry(ei_child, &ei->children, list) {
                if (strcmp(ei_child->name, name) != 0)
                        continue;
-               ret = simple_lookup(dir, dentry, flags);
-               if (IS_ERR(ret))
+               if (ei_child->is_freed)
                        goto out;
-               create_dir_dentry(ei, ei_child, ei_dentry, true);
-               created = true;
-               break;
-       }
-
-       if (created)
+               result = lookup_dir_entry(dentry, ei, ei_child);
                goto out;
-
-       for (i = 0; i < ei->nr_entries; i++) {
-               entry = &ei->entries[i];
-               if (strcmp(name, entry->name) == 0) {
-                       void *cdata = data;
-                       mutex_lock(&eventfs_mutex);
-                       /* If ei->is_freed, then the event itself may be too */
-                       if (!ei->is_freed)
-                               r = entry->callback(name, &mode, &cdata, &fops);
-                       else
-                               r = -1;
-                       mutex_unlock(&eventfs_mutex);
-                       if (r <= 0)
-                               continue;
-                       ret = simple_lookup(dir, dentry, flags);
-                       if (IS_ERR(ret))
-                               goto out;
-                       create_file_dentry(ei, i, ei_dentry, name, mode, cdata,
-                                          fops, true);
-                       break;
-               }
        }
- out:
-       srcu_read_unlock(&eventfs_srcu, idx);
-       return ret;
-}
-
-struct dentry_list {
-       void                    *cursor;
-       struct dentry           **dentries;
-};
 
-/**
- * eventfs_release - called to release eventfs file/dir
- * @inode: inode to be released
- * @file: file to be released (not used)
- */
-static int eventfs_release(struct inode *inode, struct file *file)
-{
-       struct tracefs_inode *ti;
-       struct dentry_list *dlist = file->private_data;
-       void *cursor;
-       int i;
+       for (int i = 0; i < ei->nr_entries; i++) {
+               void *data;
+               umode_t mode;
+               const struct file_operations *fops;
+               const struct eventfs_entry *entry = &ei->entries[i];
 
-       ti = get_tracefs(inode);
-       if (!(ti->flags & TRACEFS_EVENT_INODE))
-               return -EINVAL;
+               if (strcmp(name, entry->name) != 0)
+                       continue;
 
-       if (WARN_ON_ONCE(!dlist))
-               return -EINVAL;
+               data = ei->data;
+               if (entry->callback(name, &mode, &data, &fops) <= 0)
+                       goto out;
 
-       for (i = 0; dlist->dentries && dlist->dentries[i]; i++) {
-               dput(dlist->dentries[i]);
+               result = lookup_file_dentry(dentry, ei, i, mode, data, fops);
+               goto out;
        }
-
-       cursor = dlist->cursor;
-       kfree(dlist->dentries);
-       kfree(dlist);
-       file->private_data = cursor;
-       return dcache_dir_close(inode, file);
-}
-
-static int add_dentries(struct dentry ***dentries, struct dentry *d, int cnt)
-{
-       struct dentry **tmp;
-
-       tmp = krealloc(*dentries, sizeof(d) * (cnt + 2), GFP_NOFS);
-       if (!tmp)
-               return -1;
-       tmp[cnt] = d;
-       tmp[cnt + 1] = NULL;
-       *dentries = tmp;
-       return 0;
+ out:
+       mutex_unlock(&eventfs_mutex);
+       return result;
 }
 
-/**
- * dcache_dir_open_wrapper - eventfs open wrapper
- * @inode: not used
- * @file: dir to be opened (to create it's children)
- *
- * Used to dynamic create file/dir with-in @file, all the
- * file/dir will be created. If already created then references
- * will be increased
+/*
+ * Walk the children of a eventfs_inode to fill in getdents().
  */
-static int dcache_dir_open_wrapper(struct inode *inode, struct file *file)
+static int eventfs_iterate(struct file *file, struct dir_context *ctx)
 {
        const struct file_operations *fops;
+       struct inode *f_inode = file_inode(file);
        const struct eventfs_entry *entry;
        struct eventfs_inode *ei_child;
        struct tracefs_inode *ti;
        struct eventfs_inode *ei;
-       struct dentry_list *dlist;
-       struct dentry **dentries = NULL;
-       struct dentry *parent = file_dentry(file);
-       struct dentry *d;
-       struct inode *f_inode = file_inode(file);
-       const char *name = parent->d_name.name;
+       const char *name;
        umode_t mode;
-       void *data;
-       int cnt = 0;
        int idx;
-       int ret;
-       int i;
-       int r;
+       int ret = -EINVAL;
+       int ino;
+       int i, r, c;
+
+       if (!dir_emit_dots(file, ctx))
+               return 0;
 
        ti = get_tracefs(f_inode);
        if (!(ti->flags & TRACEFS_EVENT_INODE))
                return -EINVAL;
 
-       if (WARN_ON_ONCE(file->private_data))
-               return -EINVAL;
+       c = ctx->pos - 2;
 
        idx = srcu_read_lock(&eventfs_srcu);
 
        mutex_lock(&eventfs_mutex);
        ei = READ_ONCE(ti->private);
+       if (ei && ei->is_freed)
+               ei = NULL;
        mutex_unlock(&eventfs_mutex);
 
-       if (!ei) {
-               srcu_read_unlock(&eventfs_srcu, idx);
-               return -EINVAL;
-       }
-
+       if (!ei)
+               goto out;
 
-       data = ei->data;
+       /*
+        * Need to create the dentries and inodes to have a consistent
+        * inode number.
+        */
+       ret = 0;
 
-       dlist = kmalloc(sizeof(*dlist), GFP_KERNEL);
-       if (!dlist) {
-               srcu_read_unlock(&eventfs_srcu, idx);
-               return -ENOMEM;
-       }
+       /* Start at 'c' to jump over already read entries */
+       for (i = c; i < ei->nr_entries; i++, ctx->pos++) {
+               void *cdata = ei->data;
 
-       inode_lock(parent->d_inode);
-       list_for_each_entry_srcu(ei_child, &ei->children, list,
-                                srcu_read_lock_held(&eventfs_srcu)) {
-               d = create_dir_dentry(ei, ei_child, parent, false);
-               if (d) {
-                       ret = add_dentries(&dentries, d, cnt);
-                       if (ret < 0)
-                               break;
-                       cnt++;
-               }
-       }
-
-       for (i = 0; i < ei->nr_entries; i++) {
-               void *cdata = data;
                entry = &ei->entries[i];
                name = entry->name;
+
                mutex_lock(&eventfs_mutex);
-               /* If ei->is_freed, then the event itself may be too */
-               if (!ei->is_freed)
-                       r = entry->callback(name, &mode, &cdata, &fops);
-               else
-                       r = -1;
+               /* If ei->is_freed then just bail here, nothing more to do */
+               if (ei->is_freed) {
+                       mutex_unlock(&eventfs_mutex);
+                       goto out;
+               }
+               r = entry->callback(name, &mode, &cdata, &fops);
                mutex_unlock(&eventfs_mutex);
                if (r <= 0)
                        continue;
-               d = create_file_dentry(ei, i, parent, name, mode, cdata, fops, false);
-               if (d) {
-                       ret = add_dentries(&dentries, d, cnt);
-                       if (ret < 0)
-                               break;
-                       cnt++;
+
+               ino = EVENTFS_FILE_INODE_INO;
+
+               if (!dir_emit(ctx, name, strlen(name), ino, DT_REG))
+                       goto out;
+       }
+
+       /* Subtract the skipped entries above */
+       c -= min((unsigned int)c, (unsigned int)ei->nr_entries);
+
+       list_for_each_entry_srcu(ei_child, &ei->children, list,
+                                srcu_read_lock_held(&eventfs_srcu)) {
+
+               if (c > 0) {
+                       c--;
+                       continue;
                }
+
+               ctx->pos++;
+
+               if (ei_child->is_freed)
+                       continue;
+
+               name = ei_child->name;
+
+               ino = eventfs_dir_ino(ei_child);
+
+               if (!dir_emit(ctx, name, strlen(name), ino, DT_DIR))
+                       goto out_dec;
        }
-       inode_unlock(parent->d_inode);
+       ret = 1;
+ out:
        srcu_read_unlock(&eventfs_srcu, idx);
-       ret = dcache_dir_open(inode, file);
 
-       /*
-        * dcache_dir_open() sets file->private_data to a dentry cursor.
-        * Need to save that but also save all the dentries that were
-        * opened by this function.
-        */
-       dlist->cursor = file->private_data;
-       dlist->dentries = dentries;
-       file->private_data = dlist;
        return ret;
-}
-
-/*
- * This just sets the file->private_data back to the cursor and back.
- */
-static int dcache_readdir_wrapper(struct file *file, struct dir_context *ctx)
-{
-       struct dentry_list *dlist = file->private_data;
-       int ret;
 
-       file->private_data = dlist->cursor;
-       ret = dcache_readdir(file, ctx);
-       dlist->cursor = file->private_data;
-       file->private_data = dlist;
-       return ret;
+ out_dec:
+       /* Incremented ctx->pos without adding something, reset it */
+       ctx->pos--;
+       goto out;
 }
 
 /**
@@ -872,25 +669,10 @@ struct eventfs_inode *eventfs_create_dir(const char *name, struct eventfs_inode
        if (!parent)
                return ERR_PTR(-EINVAL);
 
-       ei = kzalloc(sizeof(*ei), GFP_KERNEL);
+       ei = alloc_ei(name);
        if (!ei)
                return ERR_PTR(-ENOMEM);
 
-       ei->name = kstrdup_const(name, GFP_KERNEL);
-       if (!ei->name) {
-               kfree(ei);
-               return ERR_PTR(-ENOMEM);
-       }
-
-       if (size) {
-               ei->d_children = kzalloc(sizeof(*ei->d_children) * size, GFP_KERNEL);
-               if (!ei->d_children) {
-                       kfree_const(ei->name);
-                       kfree(ei);
-                       return ERR_PTR(-ENOMEM);
-               }
-       }
-
        ei->entries = entries;
        ei->nr_entries = size;
        ei->data = data;
@@ -898,10 +680,8 @@ struct eventfs_inode *eventfs_create_dir(const char *name, struct eventfs_inode
        INIT_LIST_HEAD(&ei->list);
 
        mutex_lock(&eventfs_mutex);
-       if (!parent->is_freed) {
+       if (!parent->is_freed)
                list_add_tail(&ei->list, &parent->children);
-               ei->d_parent = parent->dentry;
-       }
        mutex_unlock(&eventfs_mutex);
 
        /* Was the parent freed? */
@@ -941,33 +721,33 @@ struct eventfs_inode *eventfs_create_events_dir(const char *name, struct dentry
        if (IS_ERR(dentry))
                return ERR_CAST(dentry);
 
-       ei = kzalloc(sizeof(*ei), GFP_KERNEL);
+       ei = alloc_ei(name);
        if (!ei)
-               goto fail_ei;
+               goto fail;
 
        inode = tracefs_get_inode(dentry->d_sb);
        if (unlikely(!inode))
                goto fail;
 
-       if (size) {
-               ei->d_children = kzalloc(sizeof(*ei->d_children) * size, GFP_KERNEL);
-               if (!ei->d_children)
-                       goto fail;
-       }
-
-       ei->dentry = dentry;
+       // Note: we have a ref to the dentry from tracefs_start_creating()
+       ei->events_dir = dentry;
        ei->entries = entries;
        ei->nr_entries = size;
        ei->is_events = 1;
        ei->data = data;
-       ei->name = kstrdup_const(name, GFP_KERNEL);
-       if (!ei->name)
-               goto fail;
 
        /* Save the ownership of this directory */
        uid = d_inode(dentry->d_parent)->i_uid;
        gid = d_inode(dentry->d_parent)->i_gid;
 
+       /*
+        * If the events directory is of the top instance, then parent
+        * is NULL. Set the attr.mode to reflect this and its permissions will
+        * default to the tracefs root dentry.
+        */
+       if (!parent)
+               ei->attr.mode = EVENTFS_TOPLEVEL;
+
        /* This is used as the default ownership of the files and directories */
        ei->attr.uid = uid;
        ei->attr.gid = gid;
@@ -985,11 +765,19 @@ struct eventfs_inode *eventfs_create_events_dir(const char *name, struct dentry
        inode->i_op = &eventfs_root_dir_inode_operations;
        inode->i_fop = &eventfs_file_operations;
 
-       dentry->d_fsdata = ei;
+       dentry->d_fsdata = get_ei(ei);
 
-       /* directory inodes start off with i_nlink == 2 (for "." entry) */
-       inc_nlink(inode);
+       /*
+        * Keep all eventfs directories with i_nlink == 1.
+        * Due to the dynamic nature of the dentry creations and not
+        * wanting to add a pointer to the parent eventfs_inode in the
+        * eventfs_inode structure, keeping the i_nlink in sync with the
+        * number of directories would cause too much complexity for
+        * something not worth much. Keeping directory links at 1
+        * tells userspace not to trust the link number.
+        */
        d_instantiate(dentry, inode);
+       /* The dentry of the "events" parent does keep track though */
        inc_nlink(dentry->d_parent->d_inode);
        fsnotify_mkdir(dentry->d_parent->d_inode, dentry);
        tracefs_end_creating(dentry);
@@ -997,72 +785,11 @@ struct eventfs_inode *eventfs_create_events_dir(const char *name, struct dentry
        return ei;
 
  fail:
-       kfree(ei->d_children);
-       kfree(ei);
- fail_ei:
+       free_ei(ei);
        tracefs_failed_creating(dentry);
        return ERR_PTR(-ENOMEM);
 }
 
-static LLIST_HEAD(free_list);
-
-static void eventfs_workfn(struct work_struct *work)
-{
-        struct eventfs_inode *ei, *tmp;
-        struct llist_node *llnode;
-
-       llnode = llist_del_all(&free_list);
-        llist_for_each_entry_safe(ei, tmp, llnode, llist) {
-               /* This dput() matches the dget() from unhook_dentry() */
-               for (int i = 0; i < ei->nr_entries; i++) {
-                       if (ei->d_children[i])
-                               dput(ei->d_children[i]);
-               }
-               /* This should only get here if it had a dentry */
-               if (!WARN_ON_ONCE(!ei->dentry))
-                       dput(ei->dentry);
-        }
-}
-
-static DECLARE_WORK(eventfs_work, eventfs_workfn);
-
-static void free_rcu_ei(struct rcu_head *head)
-{
-       struct eventfs_inode *ei = container_of(head, struct eventfs_inode, rcu);
-
-       if (ei->dentry) {
-               /* Do not free the ei until all references of dentry are gone */
-               if (llist_add(&ei->llist, &free_list))
-                       queue_work(system_unbound_wq, &eventfs_work);
-               return;
-       }
-
-       /* If the ei doesn't have a dentry, neither should its children */
-       for (int i = 0; i < ei->nr_entries; i++) {
-               WARN_ON_ONCE(ei->d_children[i]);
-       }
-
-       free_ei(ei);
-}
-
-static void unhook_dentry(struct dentry *dentry)
-{
-       if (!dentry)
-               return;
-       /*
-        * Need to add a reference to the dentry that is expected by
-        * simple_recursive_removal(), which will include a dput().
-        */
-       dget(dentry);
-
-       /*
-        * Also add a reference for the dput() in eventfs_workfn().
-        * That is required as that dput() will free the ei after
-        * the SRCU grace period is over.
-        */
-       dget(dentry);
-}
-
 /**
  * eventfs_remove_rec - remove eventfs dir or file from list
  * @ei: eventfs_inode to be removed.
@@ -1075,8 +802,6 @@ static void eventfs_remove_rec(struct eventfs_inode *ei, int level)
 {
        struct eventfs_inode *ei_child;
 
-       if (!ei)
-               return;
        /*
         * Check recursion depth. It should never be greater than 3:
         * 0 - events/
@@ -1088,28 +813,11 @@ static void eventfs_remove_rec(struct eventfs_inode *ei, int level)
                return;
 
        /* search for nested folders or files */
-       list_for_each_entry_srcu(ei_child, &ei->children, list,
-                                lockdep_is_held(&eventfs_mutex)) {
-               /* Children only have dentry if parent does */
-               WARN_ON_ONCE(ei_child->dentry && !ei->dentry);
+       list_for_each_entry(ei_child, &ei->children, list)
                eventfs_remove_rec(ei_child, level + 1);
-       }
-
-
-       ei->is_freed = 1;
 
-       for (int i = 0; i < ei->nr_entries; i++) {
-               if (ei->d_children[i]) {
-                       /* Children only have dentry if parent does */
-                       WARN_ON_ONCE(!ei->dentry);
-                       unhook_dentry(ei->d_children[i]);
-               }
-       }
-
-       unhook_dentry(ei->dentry);
-
-       list_del_rcu(&ei->list);
-       call_srcu(&eventfs_srcu, &ei->rcu, free_rcu_ei);
+       list_del(&ei->list);
+       free_ei(ei);
 }
 
 /**
@@ -1120,22 +828,12 @@ static void eventfs_remove_rec(struct eventfs_inode *ei, int level)
  */
 void eventfs_remove_dir(struct eventfs_inode *ei)
 {
-       struct dentry *dentry;
-
        if (!ei)
                return;
 
        mutex_lock(&eventfs_mutex);
-       dentry = ei->dentry;
        eventfs_remove_rec(ei, 0);
        mutex_unlock(&eventfs_mutex);
-
-       /*
-        * If any of the ei children has a dentry, then the ei itself
-        * must have a dentry.
-        */
-       if (dentry)
-               simple_recursive_removal(dentry, NULL);
 }
 
 /**
@@ -1148,7 +846,11 @@ void eventfs_remove_events_dir(struct eventfs_inode *ei)
 {
        struct dentry *dentry;
 
-       dentry = ei->dentry;
+       dentry = ei->events_dir;
+       if (!dentry)
+               return;
+
+       ei->events_dir = NULL;
        eventfs_remove_dir(ei);
 
        /*
@@ -1158,5 +860,6 @@ void eventfs_remove_events_dir(struct eventfs_inode *ei)
         * sticks around while the other ei->dentry are created
         * and destroyed dynamically.
         */
+       d_invalidate(dentry);
        dput(dentry);
 }
index ad20e6af938d9b68df7b27e08d44a5351f0d977e..d65ffad4c327ca11a98a8d2073d8e5c77ac138c3 100644 (file)
@@ -38,8 +38,6 @@ static struct inode *tracefs_alloc_inode(struct super_block *sb)
        if (!ti)
                return NULL;
 
-       ti->flags = 0;
-
        return &ti->vfs_inode;
 }
 
@@ -91,6 +89,7 @@ static int tracefs_syscall_mkdir(struct mnt_idmap *idmap,
                                 struct inode *inode, struct dentry *dentry,
                                 umode_t mode)
 {
+       struct tracefs_inode *ti;
        char *name;
        int ret;
 
@@ -98,6 +97,15 @@ static int tracefs_syscall_mkdir(struct mnt_idmap *idmap,
        if (!name)
                return -ENOMEM;
 
+       /*
+        * This is a new directory that does not take the default of
+        * the rootfs. It becomes the default permissions for all the
+        * files and directories underneath it.
+        */
+       ti = get_tracefs(inode);
+       ti->flags |= TRACEFS_INSTANCE_INODE;
+       ti->private = inode;
+
        /*
         * The mkdir call can call the generic functions that create
         * the files within the tracefs system. It is up to the individual
@@ -141,10 +149,76 @@ static int tracefs_syscall_rmdir(struct inode *inode, struct dentry *dentry)
        return ret;
 }
 
-static const struct inode_operations tracefs_dir_inode_operations = {
+static void set_tracefs_inode_owner(struct inode *inode)
+{
+       struct tracefs_inode *ti = get_tracefs(inode);
+       struct inode *root_inode = ti->private;
+
+       /*
+        * If this inode has never been referenced, then update
+        * the permissions to the superblock.
+        */
+       if (!(ti->flags & TRACEFS_UID_PERM_SET))
+               inode->i_uid = root_inode->i_uid;
+
+       if (!(ti->flags & TRACEFS_GID_PERM_SET))
+               inode->i_gid = root_inode->i_gid;
+}
+
+static int tracefs_permission(struct mnt_idmap *idmap,
+                             struct inode *inode, int mask)
+{
+       set_tracefs_inode_owner(inode);
+       return generic_permission(idmap, inode, mask);
+}
+
+static int tracefs_getattr(struct mnt_idmap *idmap,
+                          const struct path *path, struct kstat *stat,
+                          u32 request_mask, unsigned int flags)
+{
+       struct inode *inode = d_backing_inode(path->dentry);
+
+       set_tracefs_inode_owner(inode);
+       generic_fillattr(idmap, request_mask, inode, stat);
+       return 0;
+}
+
+static int tracefs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+                          struct iattr *attr)
+{
+       unsigned int ia_valid = attr->ia_valid;
+       struct inode *inode = d_inode(dentry);
+       struct tracefs_inode *ti = get_tracefs(inode);
+
+       if (ia_valid & ATTR_UID)
+               ti->flags |= TRACEFS_UID_PERM_SET;
+
+       if (ia_valid & ATTR_GID)
+               ti->flags |= TRACEFS_GID_PERM_SET;
+
+       return simple_setattr(idmap, dentry, attr);
+}
+
+static const struct inode_operations tracefs_instance_dir_inode_operations = {
        .lookup         = simple_lookup,
        .mkdir          = tracefs_syscall_mkdir,
        .rmdir          = tracefs_syscall_rmdir,
+       .permission     = tracefs_permission,
+       .getattr        = tracefs_getattr,
+       .setattr        = tracefs_setattr,
+};
+
+static const struct inode_operations tracefs_dir_inode_operations = {
+       .lookup         = simple_lookup,
+       .permission     = tracefs_permission,
+       .getattr        = tracefs_getattr,
+       .setattr        = tracefs_setattr,
+};
+
+static const struct inode_operations tracefs_file_inode_operations = {
+       .permission     = tracefs_permission,
+       .getattr        = tracefs_getattr,
+       .setattr        = tracefs_setattr,
 };
 
 struct inode *tracefs_get_inode(struct super_block *sb)
@@ -183,82 +257,6 @@ struct tracefs_fs_info {
        struct tracefs_mount_opts mount_opts;
 };
 
-static void change_gid(struct dentry *dentry, kgid_t gid)
-{
-       if (!dentry->d_inode)
-               return;
-       dentry->d_inode->i_gid = gid;
-}
-
-/*
- * Taken from d_walk, but without he need for handling renames.
- * Nothing can be renamed while walking the list, as tracefs
- * does not support renames. This is only called when mounting
- * or remounting the file system, to set all the files to
- * the given gid.
- */
-static void set_gid(struct dentry *parent, kgid_t gid)
-{
-       struct dentry *this_parent, *dentry;
-
-       this_parent = parent;
-       spin_lock(&this_parent->d_lock);
-
-       change_gid(this_parent, gid);
-repeat:
-       dentry = d_first_child(this_parent);
-resume:
-       hlist_for_each_entry_from(dentry, d_sib) {
-               struct tracefs_inode *ti;
-
-               /* Note, getdents() can add a cursor dentry with no inode */
-               if (!dentry->d_inode)
-                       continue;
-
-               spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
-
-               change_gid(dentry, gid);
-
-               /* If this is the events directory, update that too */
-               ti = get_tracefs(dentry->d_inode);
-               if (ti && (ti->flags & TRACEFS_EVENT_INODE))
-                       eventfs_update_gid(dentry, gid);
-
-               if (!hlist_empty(&dentry->d_children)) {
-                       spin_unlock(&this_parent->d_lock);
-                       spin_release(&dentry->d_lock.dep_map, _RET_IP_);
-                       this_parent = dentry;
-                       spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
-                       goto repeat;
-               }
-               spin_unlock(&dentry->d_lock);
-       }
-       /*
-        * All done at this level ... ascend and resume the search.
-        */
-       rcu_read_lock();
-ascend:
-       if (this_parent != parent) {
-               dentry = this_parent;
-               this_parent = dentry->d_parent;
-
-               spin_unlock(&dentry->d_lock);
-               spin_lock(&this_parent->d_lock);
-
-               /* go into the first sibling still alive */
-               hlist_for_each_entry_continue(dentry, d_sib) {
-                       if (likely(!(dentry->d_flags & DCACHE_DENTRY_KILLED))) {
-                               rcu_read_unlock();
-                               goto resume;
-                       }
-               }
-               goto ascend;
-       }
-       rcu_read_unlock();
-       spin_unlock(&this_parent->d_lock);
-       return;
-}
-
 static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts)
 {
        substring_t args[MAX_OPT_ARGS];
@@ -331,10 +329,8 @@ static int tracefs_apply_options(struct super_block *sb, bool remount)
        if (!remount || opts->opts & BIT(Opt_uid))
                inode->i_uid = opts->uid;
 
-       if (!remount || opts->opts & BIT(Opt_gid)) {
-               /* Set all the group ids to the mount option */
-               set_gid(sb->s_root, opts->gid);
-       }
+       if (!remount || opts->opts & BIT(Opt_gid))
+               inode->i_gid = opts->gid;
 
        return 0;
 }
@@ -381,21 +377,30 @@ static const struct super_operations tracefs_super_operations = {
        .show_options   = tracefs_show_options,
 };
 
-static void tracefs_dentry_iput(struct dentry *dentry, struct inode *inode)
+/*
+ * It would be cleaner if eventfs had its own dentry ops.
+ *
+ * Note that d_revalidate is called potentially under RCU,
+ * so it can't take the eventfs mutex etc. It's fine - if
+ * we open a file just as it's marked dead, things will
+ * still work just fine, and just see the old stale case.
+ */
+static void tracefs_d_release(struct dentry *dentry)
 {
-       struct tracefs_inode *ti;
+       if (dentry->d_fsdata)
+               eventfs_d_release(dentry);
+}
 
-       if (!dentry || !inode)
-               return;
+static int tracefs_d_revalidate(struct dentry *dentry, unsigned int flags)
+{
+       struct eventfs_inode *ei = dentry->d_fsdata;
 
-       ti = get_tracefs(inode);
-       if (ti && ti->flags & TRACEFS_EVENT_INODE)
-               eventfs_set_ei_status_free(ti, dentry);
-       iput(inode);
+       return !(ei && ei->is_freed);
 }
 
 static const struct dentry_operations tracefs_dentry_operations = {
-       .d_iput = tracefs_dentry_iput,
+       .d_revalidate = tracefs_d_revalidate,
+       .d_release = tracefs_d_release,
 };
 
 static int trace_fill_super(struct super_block *sb, void *data, int silent)
@@ -499,73 +504,24 @@ struct dentry *tracefs_end_creating(struct dentry *dentry)
        return dentry;
 }
 
-/**
- * eventfs_start_creating - start the process of creating a dentry
- * @name: Name of the file created for the dentry
- * @parent: The parent dentry where this dentry will be created
- *
- * This is a simple helper function for the dynamically created eventfs
- * files. When the directory of the eventfs files are accessed, their
- * dentries are created on the fly. This function is used to start that
- * process.
- */
-struct dentry *eventfs_start_creating(const char *name, struct dentry *parent)
+/* Find the inode that this will use for default */
+static struct inode *instance_inode(struct dentry *parent, struct inode *inode)
 {
-       struct dentry *dentry;
-       int error;
-
-       /* Must always have a parent. */
-       if (WARN_ON_ONCE(!parent))
-               return ERR_PTR(-EINVAL);
-
-       error = simple_pin_fs(&trace_fs_type, &tracefs_mount,
-                             &tracefs_mount_count);
-       if (error)
-               return ERR_PTR(error);
+       struct tracefs_inode *ti;
 
-       if (unlikely(IS_DEADDIR(parent->d_inode)))
-               dentry = ERR_PTR(-ENOENT);
-       else
-               dentry = lookup_one_len(name, parent, strlen(name));
+       /* If parent is NULL then use root inode */
+       if (!parent)
+               return d_inode(inode->i_sb->s_root);
 
-       if (!IS_ERR(dentry) && dentry->d_inode) {
-               dput(dentry);
-               dentry = ERR_PTR(-EEXIST);
+       /* Find the inode that is flagged as an instance or the root inode */
+       while (!IS_ROOT(parent)) {
+               ti = get_tracefs(d_inode(parent));
+               if (ti->flags & TRACEFS_INSTANCE_INODE)
+                       break;
+               parent = parent->d_parent;
        }
 
-       if (IS_ERR(dentry))
-               simple_release_fs(&tracefs_mount, &tracefs_mount_count);
-
-       return dentry;
-}
-
-/**
- * eventfs_failed_creating - clean up a failed eventfs dentry creation
- * @dentry: The dentry to clean up
- *
- * If after calling eventfs_start_creating(), a failure is detected, the
- * resources created by eventfs_start_creating() needs to be cleaned up. In
- * that case, this function should be called to perform that clean up.
- */
-struct dentry *eventfs_failed_creating(struct dentry *dentry)
-{
-       dput(dentry);
-       simple_release_fs(&tracefs_mount, &tracefs_mount_count);
-       return NULL;
-}
-
-/**
- * eventfs_end_creating - Finish the process of creating a eventfs dentry
- * @dentry: The dentry that has successfully been created.
- *
- * This function is currently just a place holder to match
- * eventfs_start_creating(). In case any synchronization needs to be added,
- * this function will be used to implement that without having to modify
- * the callers of eventfs_start_creating().
- */
-struct dentry *eventfs_end_creating(struct dentry *dentry)
-{
-       return dentry;
+       return d_inode(parent);
 }
 
 /**
@@ -598,6 +554,7 @@ struct dentry *tracefs_create_file(const char *name, umode_t mode,
                                   struct dentry *parent, void *data,
                                   const struct file_operations *fops)
 {
+       struct tracefs_inode *ti;
        struct dentry *dentry;
        struct inode *inode;
 
@@ -616,7 +573,11 @@ struct dentry *tracefs_create_file(const char *name, umode_t mode,
        if (unlikely(!inode))
                return tracefs_failed_creating(dentry);
 
+       ti = get_tracefs(inode);
+       ti->private = instance_inode(parent, inode);
+
        inode->i_mode = mode;
+       inode->i_op = &tracefs_file_inode_operations;
        inode->i_fop = fops ? fops : &tracefs_file_operations;
        inode->i_private = data;
        inode->i_uid = d_inode(dentry->d_parent)->i_uid;
@@ -629,6 +590,7 @@ struct dentry *tracefs_create_file(const char *name, umode_t mode,
 static struct dentry *__create_dir(const char *name, struct dentry *parent,
                                   const struct inode_operations *ops)
 {
+       struct tracefs_inode *ti;
        struct dentry *dentry = tracefs_start_creating(name, parent);
        struct inode *inode;
 
@@ -646,6 +608,9 @@ static struct dentry *__create_dir(const char *name, struct dentry *parent,
        inode->i_uid = d_inode(dentry->d_parent)->i_uid;
        inode->i_gid = d_inode(dentry->d_parent)->i_gid;
 
+       ti = get_tracefs(inode);
+       ti->private = instance_inode(parent, inode);
+
        /* directory inodes start off with i_nlink == 2 (for "." entry) */
        inc_nlink(inode);
        d_instantiate(dentry, inode);
@@ -676,7 +641,7 @@ struct dentry *tracefs_create_dir(const char *name, struct dentry *parent)
        if (security_locked_down(LOCKDOWN_TRACEFS))
                return NULL;
 
-       return __create_dir(name, parent, &simple_dir_inode_operations);
+       return __create_dir(name, parent, &tracefs_dir_inode_operations);
 }
 
 /**
@@ -707,7 +672,7 @@ __init struct dentry *tracefs_create_instance_dir(const char *name,
        if (WARN_ON(tracefs_ops.mkdir || tracefs_ops.rmdir))
                return NULL;
 
-       dentry = __create_dir(name, parent, &tracefs_dir_inode_operations);
+       dentry = __create_dir(name, parent, &tracefs_instance_dir_inode_operations);
        if (!dentry)
                return NULL;
 
@@ -752,7 +717,11 @@ static void init_once(void *foo)
 {
        struct tracefs_inode *ti = (struct tracefs_inode *) foo;
 
+       /* inode_init_once() calls memset() on the vfs_inode portion */
        inode_init_once(&ti->vfs_inode);
+
+       /* Zero out the rest */
+       memset_after(ti, 0, vfs_inode);
 }
 
 static int __init tracefs_init(void)
index 42bdeb471a0720c5d96b1830ecf2ec75206e2ae4..beb3dcd0e434207c882bcfe6ec027b6ca3e43526 100644 (file)
@@ -5,12 +5,16 @@
 enum {
        TRACEFS_EVENT_INODE             = BIT(1),
        TRACEFS_EVENT_TOP_INODE         = BIT(2),
+       TRACEFS_GID_PERM_SET            = BIT(3),
+       TRACEFS_UID_PERM_SET            = BIT(4),
+       TRACEFS_INSTANCE_INODE          = BIT(5),
 };
 
 struct tracefs_inode {
+       struct inode            vfs_inode;
+       /* The below gets initialized with memset_after(ti, 0, vfs_inode) */
        unsigned long           flags;
        void                    *private;
-       struct inode            vfs_inode;
 };
 
 /*
@@ -28,42 +32,37 @@ struct eventfs_attr {
 /*
  * struct eventfs_inode - hold the properties of the eventfs directories.
  * @list:      link list into the parent directory
+ * @rcu:       Union with @list for freeing
+ * @children:  link list into the child eventfs_inode
  * @entries:   the array of entries representing the files in the directory
  * @name:      the name of the directory to create
- * @children:  link list into the child eventfs_inode
- * @dentry:     the dentry of the directory
- * @d_parent:   pointer to the parent's dentry
- * @d_children: The array of dentries to represent the files when created
+ * @events_dir: the dentry of the events directory
  * @entry_attrs: Saved mode and ownership of the @d_children
- * @attr:      Saved mode and ownership of eventfs_inode itself
  * @data:      The private data to pass to the callbacks
+ * @attr:      Saved mode and ownership of eventfs_inode itself
  * @is_freed:  Flag set if the eventfs is on its way to be freed
  *                Note if is_freed is set, then dentry is corrupted.
+ * @is_events: Flag set for only the top level "events" directory
  * @nr_entries: The number of items in @entries
+ * @ino:       The saved inode number
  */
 struct eventfs_inode {
-       struct list_head                list;
+       union {
+               struct list_head        list;
+               struct rcu_head         rcu;
+       };
+       struct list_head                children;
        const struct eventfs_entry      *entries;
        const char                      *name;
-       struct list_head                children;
-       struct dentry                   *dentry; /* Check is_freed to access */
-       struct dentry                   *d_parent;
-       struct dentry                   **d_children;
+       struct dentry                   *events_dir;
        struct eventfs_attr             *entry_attrs;
-       struct eventfs_attr             attr;
        void                            *data;
-       /*
-        * Union - used for deletion
-        * @llist:      for calling dput() if needed after RCU
-        * @rcu:        eventfs_inode to delete in RCU
-        */
-       union {
-               struct llist_node       llist;
-               struct rcu_head         rcu;
-       };
+       struct eventfs_attr             attr;
+       struct kref                     kref;
        unsigned int                    is_freed:1;
        unsigned int                    is_events:1;
        unsigned int                    nr_entries:30;
+       unsigned int                    ino;
 };
 
 static inline struct tracefs_inode *get_tracefs(const struct inode *inode)
@@ -75,10 +74,7 @@ struct dentry *tracefs_start_creating(const char *name, struct dentry *parent);
 struct dentry *tracefs_end_creating(struct dentry *dentry);
 struct dentry *tracefs_failed_creating(struct dentry *dentry);
 struct inode *tracefs_get_inode(struct super_block *sb);
-struct dentry *eventfs_start_creating(const char *name, struct dentry *parent);
-struct dentry *eventfs_failed_creating(struct dentry *dentry);
-struct dentry *eventfs_end_creating(struct dentry *dentry);
-void eventfs_update_gid(struct dentry *dentry, kgid_t gid);
-void eventfs_set_ei_status_free(struct tracefs_inode *ti, struct dentry *dentry);
+
+void eventfs_d_release(struct dentry *dentry);
 
 #endif /* _TRACEFS_INTERNAL_H */
index 0d561ecb686943e7063c167ba096030bb95b8e3d..a4a0158f712d3de2637364f006bc3c03bda15911 100644 (file)
@@ -18,7 +18,7 @@
 #include "ubifs.h"
 
 /**
- * ubifs_node_calc_hash - calculate the hash of a UBIFS node
+ * __ubifs_node_calc_hash - calculate the hash of a UBIFS node
  * @c: UBIFS file-system description object
  * @node: the node to calculate a hash for
  * @hash: the returned hash
@@ -507,28 +507,13 @@ out:
  */
 int ubifs_hmac_wkm(struct ubifs_info *c, u8 *hmac)
 {
-       SHASH_DESC_ON_STACK(shash, c->hmac_tfm);
-       int err;
        const char well_known_message[] = "UBIFS";
 
        if (!ubifs_authenticated(c))
                return 0;
 
-       shash->tfm = c->hmac_tfm;
-
-       err = crypto_shash_init(shash);
-       if (err)
-               return err;
-
-       err = crypto_shash_update(shash, well_known_message,
-                                 sizeof(well_known_message) - 1);
-       if (err < 0)
-               return err;
-
-       err = crypto_shash_final(shash, hmac);
-       if (err)
-               return err;
-       return 0;
+       return crypto_shash_tfm_digest(c->hmac_tfm, well_known_message,
+                                      sizeof(well_known_message) - 1, hmac);
 }
 
 /*
index c4fc1047fc0797f492a8f0cdf596f9f2215fd907..5b3a840098b06dae0d3ca588ae9645c4f8c2504c 100644 (file)
@@ -69,6 +69,14 @@ static int nothing_to_commit(struct ubifs_info *c)
        if (c->zroot.znode && ubifs_zn_dirty(c->zroot.znode))
                return 0;
 
+       /*
+        * Increasing @c->dirty_pn_cnt/@c->dirty_nn_cnt and marking
+        * nnodes/pnodes as dirty in run_gc() could race with following
+        * checking, which leads inconsistent states between @c->nroot
+        * and @c->dirty_pn_cnt/@c->dirty_nn_cnt, holding @c->lp_mutex
+        * to avoid that.
+        */
+       mutex_lock(&c->lp_mutex);
        /*
         * Even though the TNC is clean, the LPT tree may have dirty nodes. For
         * example, this may happen if the budgeting subsystem invoked GC to
@@ -76,12 +84,15 @@ static int nothing_to_commit(struct ubifs_info *c)
         * free space. In this case GC would just change the lprops of this
         * LEB (by turning all space into free space) and unmap it.
         */
-       if (c->nroot && test_bit(DIRTY_CNODE, &c->nroot->flags))
+       if (c->nroot && test_bit(DIRTY_CNODE, &c->nroot->flags)) {
+               mutex_unlock(&c->lp_mutex);
                return 0;
+       }
 
        ubifs_assert(c, atomic_long_read(&c->dirty_zn_cnt) == 0);
        ubifs_assert(c, c->dirty_pn_cnt == 0);
        ubifs_assert(c, c->dirty_nn_cnt == 0);
+       mutex_unlock(&c->lp_mutex);
 
        return 1;
 }
index 3b13c648d4900efeca9f17f118f2dbd0c5e1e8be..e413a9cf8ee38b9e0f9fad3d9234653626dbf1d7 100644 (file)
@@ -1234,6 +1234,8 @@ out_cancel:
        dir_ui->ui_size = dir->i_size;
        mutex_unlock(&dir_ui->ui_mutex);
 out_inode:
+       /* Free inode->i_link before inode is marked as bad. */
+       fscrypt_free_inode(inode);
        make_bad_inode(inode);
        iput(inode);
 out_fname:
index 2d2b39f843ce9dba5caf51e6d4f9f7a335a4af0f..5029eb3390a5607892b7eabdb9a862ea10938519 100644 (file)
@@ -318,8 +318,9 @@ static int write_begin_slow(struct address_space *mapping,
  * This is a helper function for 'ubifs_write_begin()' which allocates budget
  * for the operation. The budget is allocated differently depending on whether
  * this is appending, whether the page is dirty or not, and so on. This
- * function leaves the @ui->ui_mutex locked in case of appending. Returns zero
- * in case of success and %-ENOSPC in case of failure.
+ * function leaves the @ui->ui_mutex locked in case of appending.
+ *
+ * Returns: %0 in case of success and %-ENOSPC in case of failure.
  */
 static int allocate_budget(struct ubifs_info *c, struct page *page,
                           struct ubifs_inode *ui, int appending)
@@ -600,7 +601,7 @@ out:
  * @bu: bulk-read information
  * @n: next zbranch slot
  *
- * This function returns %0 on success and a negative error code on failure.
+ * Returns: %0 on success and a negative error code on failure.
  */
 static int populate_page(struct ubifs_info *c, struct page *page,
                         struct bu_info *bu, int *n)
@@ -711,7 +712,7 @@ out_err:
  * @bu: bulk-read information
  * @page1: first page to read
  *
- * This function returns %1 if the bulk-read is done, otherwise %0 is returned.
+ * Returns: %1 if the bulk-read is done, otherwise %0 is returned.
  */
 static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
                              struct page *page1)
@@ -821,7 +822,9 @@ out_bu_off:
  * Some flash media are capable of reading sequentially at faster rates. UBIFS
  * bulk-read facility is designed to take advantage of that, by reading in one
  * go consecutive data nodes that are also located consecutively in the same
- * LEB. This function returns %1 if a bulk-read is done and %0 otherwise.
+ * LEB.
+ *
+ * Returns: %1 if a bulk-read is done and %0 otherwise.
  */
 static int ubifs_bulk_read(struct page *page)
 {
@@ -1109,7 +1112,9 @@ static void do_attr_changes(struct inode *inode, const struct iattr *attr)
  * @attr: inode attribute changes description
  *
  * This function implements VFS '->setattr()' call when the inode is truncated
- * to a smaller size. Returns zero in case of success and a negative error code
+ * to a smaller size.
+ *
+ * Returns: %0 in case of success and a negative error code
  * in case of failure.
  */
 static int do_truncation(struct ubifs_info *c, struct inode *inode,
@@ -1215,7 +1220,9 @@ out_budg:
  * @attr: inode attribute changes description
  *
  * This function implements VFS '->setattr()' call for all cases except
- * truncations to smaller size. Returns zero in case of success and a negative
+ * truncations to smaller size.
+ *
+ * Returns: %0 in case of success and a negative
  * error code in case of failure.
  */
 static int do_setattr(struct ubifs_info *c, struct inode *inode,
@@ -1360,6 +1367,8 @@ out:
  * This helper function checks if the inode mtime/ctime should be updated or
  * not. If current values of the time-stamps are within the UBIFS inode time
  * granularity, they are not updated. This is an optimization.
+ *
+ * Returns: %1 if time update is needed, %0 if not
  */
 static inline int mctime_update_needed(const struct inode *inode,
                                       const struct timespec64 *now)
@@ -1375,11 +1384,12 @@ static inline int mctime_update_needed(const struct inode *inode,
 /**
  * ubifs_update_time - update time of inode.
  * @inode: inode to update
- * @time:  timespec structure to hold the current time value
  * @flags: time updating control flag determines updating
  *         which time fields of @inode
  *
  * This function updates time of the inode.
+ *
+ * Returns: %0 for success or a negative error code otherwise.
  */
 int ubifs_update_time(struct inode *inode, int flags)
 {
@@ -1413,7 +1423,9 @@ int ubifs_update_time(struct inode *inode, int flags)
  * @inode: inode to update
  *
  * This function updates mtime and ctime of the inode if it is not equivalent to
- * current time. Returns zero in case of success and a negative error code in
+ * current time.
+ *
+ * Returns: %0 in case of success and a negative error code in
  * case of failure.
  */
 static int update_mctime(struct inode *inode)
index c59d47fe79396f6f20d120516e7187b92f12196f..17da28d6247ace77b6168853a32b4771d141d6d8 100644 (file)
@@ -365,6 +365,7 @@ static void destroy_replay_list(struct ubifs_info *c)
  * @lnum: node logical eraseblock number
  * @offs: node offset
  * @len: node length
+ * @hash: node hash
  * @key: node key
  * @sqnum: sequence number
  * @deletion: non-zero if this is a deletion
@@ -417,6 +418,7 @@ static int insert_node(struct ubifs_info *c, int lnum, int offs, int len,
  * @lnum: node logical eraseblock number
  * @offs: node offset
  * @len: node length
+ * @hash: node hash
  * @key: node key
  * @name: directory entry name
  * @nlen: directory entry name length
index 4fcefe5ef7cb5265cc56088157f8ba1418659be1..959551ff9a951474844efbd7029aec1b65faf94e 100644 (file)
@@ -1032,7 +1032,7 @@ static int resolve_userfault_fork(struct userfaultfd_ctx *new,
 {
        int fd;
 
-       fd = anon_inode_getfd_secure("[userfaultfd]", &userfaultfd_fops, new,
+       fd = anon_inode_create_getfd("[userfaultfd]", &userfaultfd_fops, new,
                        O_RDONLY | (new->flags & UFFD_SHARED_FCNTL_FLAGS), inode);
        if (fd < 0)
                return fd;
@@ -2260,7 +2260,8 @@ static int new_userfaultfd(int flags)
        /* prevent the mm struct to be freed */
        mmgrab(ctx->mm);
 
-       fd = anon_inode_getfd_secure("[userfaultfd]", &userfaultfd_fops, ctx,
+       /* Create a new inode so that the LSM can block the creation.  */
+       fd = anon_inode_create_getfd("[userfaultfd]", &userfaultfd_fops, ctx,
                        O_RDONLY | (flags & UFFD_SHARED_FCNTL_FLAGS), NULL);
        if (fd < 0) {
                mmdrop(ctx->mm);
index 9976a00a73f99c46fc27bf6a8e93a390af7b941e..e965a48e7db96f89b782038e0aa363d526c2a42e 100644 (file)
@@ -421,10 +421,10 @@ xfs_attr_complete_op(
        bool                    do_replace = args->op_flags & XFS_DA_OP_REPLACE;
 
        args->op_flags &= ~XFS_DA_OP_REPLACE;
-       if (do_replace) {
-               args->attr_filter &= ~XFS_ATTR_INCOMPLETE;
+       args->attr_filter &= ~XFS_ATTR_INCOMPLETE;
+       if (do_replace)
                return replace_state;
-       }
+
        return XFS_DAS_DONE;
 }
 
index 98aaca933bddb76f62b7927576f5d560ebd61c84..f362345467facd57cc314547142e1093b4e54983 100644 (file)
@@ -3277,7 +3277,7 @@ xfs_bmap_alloc_account(
        struct xfs_bmalloca     *ap)
 {
        bool                    isrt = XFS_IS_REALTIME_INODE(ap->ip) &&
-                                       (ap->flags & XFS_BMAPI_ATTRFORK);
+                                       !(ap->flags & XFS_BMAPI_ATTRFORK);
        uint                    fld;
 
        if (ap->flags & XFS_BMAPI_COWFORK) {
index 31100120b2c586bbcfb5ee3d1d413926400e215a..e31663cb7b4349e173c2b19ac33eb6b10cd59a33 100644 (file)
@@ -1118,20 +1118,6 @@ xfs_rtbitmap_blockcount(
        return howmany_64(rtextents, NBBY * mp->m_sb.sb_blocksize);
 }
 
-/*
- * Compute the maximum level number of the realtime summary file, as defined by
- * mkfs.  The historic use of highbit32 on a 64-bit quantity prohibited correct
- * use of rt volumes with more than 2^32 extents.
- */
-uint8_t
-xfs_compute_rextslog(
-       xfs_rtbxlen_t           rtextents)
-{
-       if (!rtextents)
-               return 0;
-       return xfs_highbit64(rtextents);
-}
-
 /*
  * Compute the number of rtbitmap words needed to populate every block of a
  * bitmap that is large enough to track the given number of rt extents.
index 274dc7dae1faf836217bcac95a859fb9cf510d93..152a66750af554d91a0641ae9a3ed1011ffb7386 100644 (file)
@@ -351,20 +351,6 @@ xfs_rtfree_extent(
 int xfs_rtfree_blocks(struct xfs_trans *tp, xfs_fsblock_t rtbno,
                xfs_filblks_t rtlen);
 
-uint8_t xfs_compute_rextslog(xfs_rtbxlen_t rtextents);
-
-/* Do we support an rt volume having this number of rtextents? */
-static inline bool
-xfs_validate_rtextents(
-       xfs_rtbxlen_t           rtextents)
-{
-       /* No runt rt volumes */
-       if (rtextents == 0)
-               return false;
-
-       return true;
-}
-
 xfs_filblks_t xfs_rtbitmap_blockcount(struct xfs_mount *mp, xfs_rtbxlen_t
                rtextents);
 unsigned long long xfs_rtbitmap_wordcount(struct xfs_mount *mp,
@@ -383,8 +369,6 @@ unsigned long long xfs_rtsummary_wordcount(struct xfs_mount *mp,
 # define xfs_rtsummary_read_buf(a,b)                   (-ENOSYS)
 # define xfs_rtbuf_cache_relse(a)                      (0)
 # define xfs_rtalloc_extent_is_free(m,t,s,l,i)         (-ENOSYS)
-# define xfs_compute_rextslog(rtx)                     (0)
-# define xfs_validate_rtextents(rtx)                   (false)
 static inline xfs_filblks_t
 xfs_rtbitmap_blockcount(struct xfs_mount *mp, xfs_rtbxlen_t rtextents)
 {
index 4a9e8588f4c98c3647a85682d56fe26620a59ffc..5bb6e2bd6deeed152414cbc8fae5db927f90bdd4 100644 (file)
@@ -1377,3 +1377,17 @@ xfs_validate_stripe_geometry(
        }
        return true;
 }
+
+/*
+ * Compute the maximum level number of the realtime summary file, as defined by
+ * mkfs.  The historic use of highbit32 on a 64-bit quantity prohibited correct
+ * use of rt volumes with more than 2^32 extents.
+ */
+uint8_t
+xfs_compute_rextslog(
+       xfs_rtbxlen_t           rtextents)
+{
+       if (!rtextents)
+               return 0;
+       return xfs_highbit64(rtextents);
+}
index 19134b23c10be3824de6a7949d6ccf9ebdfa8de0..2e8e8d63d4eb2249d148b8f6d50f2a71726911f5 100644 (file)
@@ -38,4 +38,6 @@ extern int    xfs_sb_get_secondary(struct xfs_mount *mp,
 extern bool    xfs_validate_stripe_geometry(struct xfs_mount *mp,
                __s64 sunit, __s64 swidth, int sectorsize, bool silent);
 
+uint8_t xfs_compute_rextslog(xfs_rtbxlen_t rtextents);
+
 #endif /* __XFS_SB_H__ */
index 20b5375f2d9c9ec466ab2cfc0a6482d20e23965b..62e02d5380ad3b47d6dc403a3b1ffba0d202ce43 100644 (file)
@@ -251,4 +251,16 @@ bool xfs_verify_fileoff(struct xfs_mount *mp, xfs_fileoff_t off);
 bool xfs_verify_fileext(struct xfs_mount *mp, xfs_fileoff_t off,
                xfs_fileoff_t len);
 
+/* Do we support an rt volume having this number of rtextents? */
+static inline bool
+xfs_validate_rtextents(
+       xfs_rtbxlen_t           rtextents)
+{
+       /* No runt rt volumes */
+       if (rtextents == 0)
+               return false;
+
+       return true;
+}
+
 #endif /* __XFS_TYPES_H__ */
index 441ca99776527453a19b2e709f2482a758fe5af0..46583517377ffadd57e557897bc050428e84bd4c 100644 (file)
@@ -15,6 +15,7 @@
 #include "xfs_inode.h"
 #include "xfs_bmap.h"
 #include "xfs_bit.h"
+#include "xfs_sb.h"
 #include "scrub/scrub.h"
 #include "scrub/common.h"
 #include "scrub/repair.h"
index fabd0ed9dfa67637686768dff5d27ef0ee78674d..b1ff4f33324a7481ae9818173187e5a767348c76 100644 (file)
@@ -16,6 +16,7 @@
 #include "xfs_rtbitmap.h"
 #include "xfs_bit.h"
 #include "xfs_bmap.h"
+#include "xfs_sb.h"
 #include "scrub/scrub.h"
 #include "scrub/common.h"
 #include "scrub/trace.h"
index aff20ddd4a9f9cdeeeca1f54f210d19462773a5b..5a2512d20bd07473a872592911ede7246b8c11b7 100644 (file)
@@ -1496,6 +1496,18 @@ xfs_fs_fill_super(
 
        mp->m_super = sb;
 
+       /*
+        * Copy VFS mount flags from the context now that all parameter parsing
+        * is guaranteed to have been completed by either the old mount API or
+        * the newer fsopen/fsconfig API.
+        */
+       if (fc->sb_flags & SB_RDONLY)
+               set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
+       if (fc->sb_flags & SB_DIRSYNC)
+               mp->m_features |= XFS_FEAT_DIRSYNC;
+       if (fc->sb_flags & SB_SYNCHRONOUS)
+               mp->m_features |= XFS_FEAT_WSYNC;
+
        error = xfs_fs_validate_params(mp);
        if (error)
                return error;
@@ -1965,6 +1977,11 @@ static const struct fs_context_operations xfs_context_ops = {
        .free        = xfs_fs_free,
 };
 
+/*
+ * WARNING: do not initialise any parameters in this function that depend on
+ * mount option parsing having already been performed as this can be called from
+ * fsopen() before any parameters have been set.
+ */
 static int xfs_init_fs_context(
        struct fs_context       *fc)
 {
@@ -1996,16 +2013,6 @@ static int xfs_init_fs_context(
        mp->m_logbsize = -1;
        mp->m_allocsize_log = 16; /* 64k */
 
-       /*
-        * Copy binary VFS mount flags we are interested in.
-        */
-       if (fc->sb_flags & SB_RDONLY)
-               set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
-       if (fc->sb_flags & SB_DIRSYNC)
-               mp->m_features |= XFS_FEAT_DIRSYNC;
-       if (fc->sb_flags & SB_SYNCHRONOUS)
-               mp->m_features |= XFS_FEAT_WSYNC;
-
        fc->s_fs_info = mp;
        fc->ops = &xfs_context_ops;
 
index 2b3ae51f950df0639f35a17450a3d9b2a7ce9be6..e4d24d3f9abb5e20fcf24f3b6bf88c8d6e632108 100644 (file)
@@ -719,6 +719,8 @@ struct acpi_pci_root {
 
 /* helper */
 
+struct iommu_ops;
+
 bool acpi_dma_supported(const struct acpi_device *adev);
 enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev);
 int acpi_iommu_fwspec_init(struct device *dev, u32 id,
index 3751ae69432f1285533a61db8e0c31d42ed5fa31..9775384d61c693648c09cd1d99d1f16db964b13f 100644 (file)
@@ -1046,6 +1046,8 @@ struct acpi_madt_generic_interrupt {
 /* ACPI_MADT_ENABLED                    (1)      Processor is usable if set */
 #define ACPI_MADT_PERFORMANCE_IRQ_MODE  (1<<1) /* 01: Performance Interrupt Mode */
 #define ACPI_MADT_VGIC_IRQ_MODE         (1<<2) /* 02: VGIC Maintenance Interrupt mode */
+#define ACPI_MADT_GICC_ONLINE_CAPABLE   (1<<3) /* 03: Processor is online capable  */
+#define ACPI_MADT_GICC_NON_COHERENT     (1<<4) /* 04: GIC redistributor is not coherent */
 
 /* 12: Generic Distributor (ACPI 5.0 + ACPI 6.0 changes) */
 
@@ -1090,21 +1092,27 @@ struct acpi_madt_generic_msi_frame {
 
 struct acpi_madt_generic_redistributor {
        struct acpi_subtable_header header;
-       u16 reserved;           /* reserved - must be zero */
+       u8 flags;
+       u8 reserved;            /* reserved - must be zero */
        u64 base_address;
        u32 length;
 };
 
+#define ACPI_MADT_GICR_NON_COHERENT     (1)
+
 /* 15: Generic Translator (ACPI 6.0) */
 
 struct acpi_madt_generic_translator {
        struct acpi_subtable_header header;
-       u16 reserved;           /* reserved - must be zero */
+       u8 flags;
+       u8 reserved;            /* reserved - must be zero */
        u32 translation_id;
        u64 base_address;
        u32 reserved2;
 };
 
+#define ACPI_MADT_ITS_NON_COHERENT      (1)
+
 /* 16: Multiprocessor wakeup (ACPI 6.4) */
 
 struct acpi_madt_multiproc_wakeup {
index 84ec53ccc450296f264cad5952b4cc26e73dad40..7ee8a179d1036e1d8010b8b18a8f3022e41c1695 100644 (file)
@@ -91,6 +91,12 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end)
 }
 #endif
 
+#ifndef flush_cache_vmap_early
+static inline void flush_cache_vmap_early(unsigned long start, unsigned long end)
+{
+}
+#endif
+
 #ifndef flush_cache_vunmap
 static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
 {
index 43e18db89c1439fc3aed1ed6a29a003fa66b899d..ad928cce268b40bcf09566c708e16a432f7c7c6d 100644 (file)
@@ -2,6 +2,8 @@
 #ifndef __ASM_GENERIC_CHECKSUM_H
 #define __ASM_GENERIC_CHECKSUM_H
 
+#include <linux/bitops.h>
+
 /*
  * computes the checksum of a memory block at buff, length len,
  * and adds in "sum" (32-bit)
@@ -31,9 +33,7 @@ extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
 static inline __sum16 csum_fold(__wsum csum)
 {
        u32 sum = (__force u32)csum;
-       sum = (sum & 0xffff) + (sum >> 16);
-       sum = (sum & 0xffff) + (sum >> 16);
-       return (__force __sum16)~sum;
+       return (__force __sum16)((~sum - ror32(sum, 16)) >> 16);
 }
 #endif
 
diff --git a/include/dt-bindings/dma/fsl-edma.h b/include/dt-bindings/dma/fsl-edma.h
new file mode 100644 (file)
index 0000000..fd11478
--- /dev/null
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+
+#ifndef _FSL_EDMA_DT_BINDING_H_
+#define _FSL_EDMA_DT_BINDING_H_
+
+/* Receive Channel */
+#define FSL_EDMA_RX            0x1
+
+/* iMX8 audio remote DMA */
+#define FSL_EDMA_REMOTE                0x2
+
+/* FIFO is continue memory region */
+#define FSL_EDMA_MULTI_FIFO    0x4
+
+/* Channel need stick to even channel */
+#define FSL_EDMA_EVEN_CH       0x8
+
+/* Channel need stick to odd channel */
+#define FSL_EDMA_ODD_CH                0x10
+
+#endif
index 118a18b7ff844a357cba99eeb5272bbdc2165f7d..b7165e52b3c687bde0d295e7094cff66f5aebd48 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/mod_devicetable.h>
 #include <linux/property.h>
 #include <linux/uuid.h>
+#include <linux/node.h>
 
 struct irq_domain;
 struct irq_domain_ops;
@@ -431,6 +432,16 @@ int thermal_acpi_hot_trip_temp(struct acpi_device *adev, int *ret_temp);
 int thermal_acpi_critical_trip_temp(struct acpi_device *adev, int *ret_temp);
 #endif
 
+#ifdef CONFIG_ACPI_HMAT
+int acpi_get_genport_coordinates(u32 uid, struct access_coordinate *coord);
+#else
+static inline int acpi_get_genport_coordinates(u32 uid,
+                                              struct access_coordinate *coord)
+{
+       return -EOPNOTSUPP;
+}
+#endif
+
 #ifdef CONFIG_ACPI_NUMA
 int acpi_map_pxm_to_node(int pxm);
 int acpi_get_node(acpi_handle handle);
index f6ea2f57d8089688870aed9a962cc742ff44da58..ae0fae70d4bd2c15a7109ba89dcfd5974177a930 100644 (file)
 struct pci_dev;
 
 struct aer_header_log_regs {
-       unsigned int dw0;
-       unsigned int dw1;
-       unsigned int dw2;
-       unsigned int dw3;
+       u32 dw0;
+       u32 dw1;
+       u32 dw2;
+       u32 dw3;
 };
 
 struct aer_capability_regs {
index a1307b58cc2c6d4fbfcbe5c00635935c47a5fd1b..9120de05ead08d61a5a8ee55808c6b049999916c 100644 (file)
 #ifndef ASM_ARM_HARDWARE_SERIAL_AMBA_H
 #define ASM_ARM_HARDWARE_SERIAL_AMBA_H
 
+#ifndef __ASSEMBLY__
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#endif
+
 #include <linux/types.h>
 
 /* -------------------------------------------------------------------------------
 #define ZX_UART011_ICR         0x4c
 #define ZX_UART011_DMACR       0x50
 
-#define UART011_DR_OE          (1 << 11)
-#define UART011_DR_BE          (1 << 10)
-#define UART011_DR_PE          (1 << 9)
-#define UART011_DR_FE          (1 << 8)
-
-#define UART01x_RSR_OE                 0x08
-#define UART01x_RSR_BE                 0x04
-#define UART01x_RSR_PE                 0x02
-#define UART01x_RSR_FE                 0x01
-
-#define UART011_FR_RI          0x100
-#define UART011_FR_TXFE                0x080
-#define UART011_FR_RXFF                0x040
-#define UART01x_FR_TXFF                0x020
-#define UART01x_FR_RXFE                0x010
-#define UART01x_FR_BUSY                0x008
-#define UART01x_FR_DCD                 0x004
-#define UART01x_FR_DSR                 0x002
-#define UART01x_FR_CTS                 0x001
+#define UART011_DR_OE          BIT(11)
+#define UART011_DR_BE          BIT(10)
+#define UART011_DR_PE          BIT(9)
+#define UART011_DR_FE          BIT(8)
+
+#define UART01x_RSR_OE         BIT(3)
+#define UART01x_RSR_BE         BIT(2)
+#define UART01x_RSR_PE         BIT(1)
+#define UART01x_RSR_FE         BIT(0)
+
+#define UART011_FR_RI          BIT(8)
+#define UART011_FR_TXFE                BIT(7)
+#define UART011_FR_RXFF                BIT(6)
+#define UART01x_FR_TXFF                (1 << 5)        /* used in ASM */
+#define UART01x_FR_RXFE                BIT(4)
+#define UART01x_FR_BUSY                (1 << 3)        /* used in ASM */
+#define UART01x_FR_DCD         BIT(2)
+#define UART01x_FR_DSR         BIT(1)
+#define UART01x_FR_CTS         BIT(0)
 #define UART01x_FR_TMSK                (UART01x_FR_TXFF + UART01x_FR_BUSY)
 
 /*
  * Some bits of Flag Register on ZTE device have different position from
  * standard ones.
  */
-#define ZX_UART01x_FR_BUSY     0x100
-#define ZX_UART01x_FR_DSR      0x008
-#define ZX_UART01x_FR_CTS      0x002
-#define ZX_UART011_FR_RI       0x001
-
-#define UART011_CR_CTSEN       0x8000  /* CTS hardware flow control */
-#define UART011_CR_RTSEN       0x4000  /* RTS hardware flow control */
-#define UART011_CR_OUT2                0x2000  /* OUT2 */
-#define UART011_CR_OUT1                0x1000  /* OUT1 */
-#define UART011_CR_RTS         0x0800  /* RTS */
-#define UART011_CR_DTR         0x0400  /* DTR */
-#define UART011_CR_RXE         0x0200  /* receive enable */
-#define UART011_CR_TXE         0x0100  /* transmit enable */
-#define UART011_CR_LBE         0x0080  /* loopback enable */
-#define UART010_CR_RTIE                0x0040
-#define UART010_CR_TIE                 0x0020
-#define UART010_CR_RIE                 0x0010
-#define UART010_CR_MSIE                0x0008
-#define ST_UART011_CR_OVSFACT  0x0008  /* Oversampling factor */
-#define UART01x_CR_IIRLP       0x0004  /* SIR low power mode */
-#define UART01x_CR_SIREN       0x0002  /* SIR enable */
-#define UART01x_CR_UARTEN      0x0001  /* UART enable */
-#define UART011_LCRH_SPS       0x80
+#define ZX_UART01x_FR_BUSY     BIT(8)
+#define ZX_UART01x_FR_DSR      BIT(3)
+#define ZX_UART01x_FR_CTS      BIT(1)
+#define ZX_UART011_FR_RI       BIT(0)
+
+#define UART011_CR_CTSEN       BIT(15) /* CTS hardware flow control */
+#define UART011_CR_RTSEN       BIT(14) /* RTS hardware flow control */
+#define UART011_CR_OUT2                BIT(13) /* OUT2 */
+#define UART011_CR_OUT1                BIT(12) /* OUT1 */
+#define UART011_CR_RTS         BIT(11) /* RTS */
+#define UART011_CR_DTR         BIT(10) /* DTR */
+#define UART011_CR_RXE         BIT(9)  /* receive enable */
+#define UART011_CR_TXE         BIT(8)  /* transmit enable */
+#define UART011_CR_LBE         BIT(7)  /* loopback enable */
+#define UART010_CR_RTIE                BIT(6)
+#define UART010_CR_TIE         BIT(5)
+#define UART010_CR_RIE         BIT(4)
+#define UART010_CR_MSIE                BIT(3)
+#define ST_UART011_CR_OVSFACT  BIT(3)  /* Oversampling factor */
+#define UART01x_CR_IIRLP       BIT(2)  /* SIR low power mode */
+#define UART01x_CR_SIREN       BIT(1)  /* SIR enable */
+#define UART01x_CR_UARTEN      BIT(0)  /* UART enable */
+
+#define UART011_LCRH_SPS       BIT(7)
 #define UART01x_LCRH_WLEN_8    0x60
 #define UART01x_LCRH_WLEN_7    0x40
 #define UART01x_LCRH_WLEN_6    0x20
 #define UART01x_LCRH_WLEN_5    0x00
-#define UART01x_LCRH_FEN       0x10
-#define UART01x_LCRH_STP2      0x08
-#define UART01x_LCRH_EPS       0x04
-#define UART01x_LCRH_PEN       0x02
-#define UART01x_LCRH_BRK       0x01
-
-#define ST_UART011_DMAWM_RX_1  (0 << 3)
-#define ST_UART011_DMAWM_RX_2  (1 << 3)
-#define ST_UART011_DMAWM_RX_4  (2 << 3)
-#define ST_UART011_DMAWM_RX_8  (3 << 3)
-#define ST_UART011_DMAWM_RX_16 (4 << 3)
-#define ST_UART011_DMAWM_RX_32 (5 << 3)
-#define ST_UART011_DMAWM_RX_48 (6 << 3)
-#define ST_UART011_DMAWM_TX_1  0
-#define ST_UART011_DMAWM_TX_2  1
-#define ST_UART011_DMAWM_TX_4  2
-#define ST_UART011_DMAWM_TX_8  3
-#define ST_UART011_DMAWM_TX_16 4
-#define ST_UART011_DMAWM_TX_32 5
-#define ST_UART011_DMAWM_TX_48 6
-
-#define UART010_IIR_RTIS       0x08
-#define UART010_IIR_TIS                0x04
-#define UART010_IIR_RIS                0x02
-#define UART010_IIR_MIS                0x01
-
-#define UART011_IFLS_RX1_8     (0 << 3)
-#define UART011_IFLS_RX2_8     (1 << 3)
-#define UART011_IFLS_RX4_8     (2 << 3)
-#define UART011_IFLS_RX6_8     (3 << 3)
-#define UART011_IFLS_RX7_8     (4 << 3)
-#define UART011_IFLS_TX1_8     (0 << 0)
-#define UART011_IFLS_TX2_8     (1 << 0)
-#define UART011_IFLS_TX4_8     (2 << 0)
-#define UART011_IFLS_TX6_8     (3 << 0)
-#define UART011_IFLS_TX7_8     (4 << 0)
+#define UART01x_LCRH_FEN       BIT(4)
+#define UART01x_LCRH_STP2      BIT(3)
+#define UART01x_LCRH_EPS       BIT(2)
+#define UART01x_LCRH_PEN       BIT(1)
+#define UART01x_LCRH_BRK       BIT(0)
+
+#define ST_UART011_DMAWM_RX    GENMASK(5, 3)
+#define ST_UART011_DMAWM_RX_1  FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 0)
+#define ST_UART011_DMAWM_RX_2  FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 1)
+#define ST_UART011_DMAWM_RX_4  FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 2)
+#define ST_UART011_DMAWM_RX_8  FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 3)
+#define ST_UART011_DMAWM_RX_16 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 4)
+#define ST_UART011_DMAWM_RX_32 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 5)
+#define ST_UART011_DMAWM_RX_48 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 6)
+#define ST_UART011_DMAWM_TX    GENMASK(2, 0)
+#define ST_UART011_DMAWM_TX_1  FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 0)
+#define ST_UART011_DMAWM_TX_2  FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 1)
+#define ST_UART011_DMAWM_TX_4  FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 2)
+#define ST_UART011_DMAWM_TX_8  FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 3)
+#define ST_UART011_DMAWM_TX_16 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 4)
+#define ST_UART011_DMAWM_TX_32 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 5)
+#define ST_UART011_DMAWM_TX_48 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 6)
+
+#define UART010_IIR_RTIS       BIT(3)
+#define UART010_IIR_TIS                BIT(2)
+#define UART010_IIR_RIS                BIT(1)
+#define UART010_IIR_MIS                BIT(0)
+
+#define UART011_IFLS_RXIFLSEL  GENMASK(5, 3)
+#define UART011_IFLS_RX1_8     FIELD_PREP_CONST(UART011_IFLS_RXIFLSEL, 0)
+#define UART011_IFLS_RX2_8     FIELD_PREP_CONST(UART011_IFLS_RXIFLSEL, 1)
+#define UART011_IFLS_RX4_8     FIELD_PREP_CONST(UART011_IFLS_RXIFLSEL, 2)
+#define UART011_IFLS_RX6_8     FIELD_PREP_CONST(UART011_IFLS_RXIFLSEL, 3)
+#define UART011_IFLS_RX7_8     FIELD_PREP_CONST(UART011_IFLS_RXIFLSEL, 4)
+#define UART011_IFLS_TXIFLSEL  GENMASK(2, 0)
+#define UART011_IFLS_TX1_8     FIELD_PREP_CONST(UART011_IFLS_TXIFLSEL, 0)
+#define UART011_IFLS_TX2_8     FIELD_PREP_CONST(UART011_IFLS_TXIFLSEL, 1)
+#define UART011_IFLS_TX4_8     FIELD_PREP_CONST(UART011_IFLS_TXIFLSEL, 2)
+#define UART011_IFLS_TX6_8     FIELD_PREP_CONST(UART011_IFLS_TXIFLSEL, 3)
+#define UART011_IFLS_TX7_8     FIELD_PREP_CONST(UART011_IFLS_TXIFLSEL, 4)
 /* special values for ST vendor with deeper fifo */
-#define UART011_IFLS_RX_HALF   (5 << 3)
-#define UART011_IFLS_TX_HALF   (5 << 0)
-
-#define UART011_OEIM           (1 << 10)       /* overrun error interrupt mask */
-#define UART011_BEIM           (1 << 9)        /* break error interrupt mask */
-#define UART011_PEIM           (1 << 8)        /* parity error interrupt mask */
-#define UART011_FEIM           (1 << 7)        /* framing error interrupt mask */
-#define UART011_RTIM           (1 << 6)        /* receive timeout interrupt mask */
-#define UART011_TXIM           (1 << 5)        /* transmit interrupt mask */
-#define UART011_RXIM           (1 << 4)        /* receive interrupt mask */
-#define UART011_DSRMIM         (1 << 3)        /* DSR interrupt mask */
-#define UART011_DCDMIM         (1 << 2)        /* DCD interrupt mask */
-#define UART011_CTSMIM         (1 << 1)        /* CTS interrupt mask */
-#define UART011_RIMIM          (1 << 0)        /* RI interrupt mask */
-
-#define UART011_OEIS           (1 << 10)       /* overrun error interrupt status */
-#define UART011_BEIS           (1 << 9)        /* break error interrupt status */
-#define UART011_PEIS           (1 << 8)        /* parity error interrupt status */
-#define UART011_FEIS           (1 << 7)        /* framing error interrupt status */
-#define UART011_RTIS           (1 << 6)        /* receive timeout interrupt status */
-#define UART011_TXIS           (1 << 5)        /* transmit interrupt status */
-#define UART011_RXIS           (1 << 4)        /* receive interrupt status */
-#define UART011_DSRMIS         (1 << 3)        /* DSR interrupt status */
-#define UART011_DCDMIS         (1 << 2)        /* DCD interrupt status */
-#define UART011_CTSMIS         (1 << 1)        /* CTS interrupt status */
-#define UART011_RIMIS          (1 << 0)        /* RI interrupt status */
-
-#define UART011_OEIC           (1 << 10)       /* overrun error interrupt clear */
-#define UART011_BEIC           (1 << 9)        /* break error interrupt clear */
-#define UART011_PEIC           (1 << 8)        /* parity error interrupt clear */
-#define UART011_FEIC           (1 << 7)        /* framing error interrupt clear */
-#define UART011_RTIC           (1 << 6)        /* receive timeout interrupt clear */
-#define UART011_TXIC           (1 << 5)        /* transmit interrupt clear */
-#define UART011_RXIC           (1 << 4)        /* receive interrupt clear */
-#define UART011_DSRMIC         (1 << 3)        /* DSR interrupt clear */
-#define UART011_DCDMIC         (1 << 2)        /* DCD interrupt clear */
-#define UART011_CTSMIC         (1 << 1)        /* CTS interrupt clear */
-#define UART011_RIMIC          (1 << 0)        /* RI interrupt clear */
-
-#define UART011_DMAONERR       (1 << 2)        /* disable dma on error */
-#define UART011_TXDMAE         (1 << 1)        /* enable transmit dma */
-#define UART011_RXDMAE         (1 << 0)        /* enable receive dma */
-
-#define UART01x_RSR_ANY                (UART01x_RSR_OE|UART01x_RSR_BE|UART01x_RSR_PE|UART01x_RSR_FE)
-#define UART01x_FR_MODEM_ANY   (UART01x_FR_DCD|UART01x_FR_DSR|UART01x_FR_CTS)
+#define UART011_IFLS_RX_HALF   FIELD_PREP_CONST(UART011_IFLS_RXIFLSEL, 5)
+#define UART011_IFLS_TX_HALF   FIELD_PREP_CONST(UART011_IFLS_TXIFLSEL, 5)
+
+#define UART011_OEIM           BIT(10) /* overrun error interrupt mask */
+#define UART011_BEIM           BIT(9)  /* break error interrupt mask */
+#define UART011_PEIM           BIT(8)  /* parity error interrupt mask */
+#define UART011_FEIM           BIT(7)  /* framing error interrupt mask */
+#define UART011_RTIM           BIT(6)  /* receive timeout interrupt mask */
+#define UART011_TXIM           BIT(5)  /* transmit interrupt mask */
+#define UART011_RXIM           BIT(4)  /* receive interrupt mask */
+#define UART011_DSRMIM         BIT(3)  /* DSR interrupt mask */
+#define UART011_DCDMIM         BIT(2)  /* DCD interrupt mask */
+#define UART011_CTSMIM         BIT(1)  /* CTS interrupt mask */
+#define UART011_RIMIM          BIT(0)  /* RI interrupt mask */
+
+#define UART011_OEIS           BIT(10) /* overrun error interrupt status */
+#define UART011_BEIS           BIT(9)  /* break error interrupt status */
+#define UART011_PEIS           BIT(8)  /* parity error interrupt status */
+#define UART011_FEIS           BIT(7)  /* framing error interrupt status */
+#define UART011_RTIS           BIT(6)  /* receive timeout interrupt status */
+#define UART011_TXIS           BIT(5)  /* transmit interrupt status */
+#define UART011_RXIS           BIT(4)  /* receive interrupt status */
+#define UART011_DSRMIS         BIT(3)  /* DSR interrupt status */
+#define UART011_DCDMIS         BIT(2)  /* DCD interrupt status */
+#define UART011_CTSMIS         BIT(1)  /* CTS interrupt status */
+#define UART011_RIMIS          BIT(0)  /* RI interrupt status */
+
+#define UART011_OEIC           BIT(10) /* overrun error interrupt clear */
+#define UART011_BEIC           BIT(9)  /* break error interrupt clear */
+#define UART011_PEIC           BIT(8)  /* parity error interrupt clear */
+#define UART011_FEIC           BIT(7)  /* framing error interrupt clear */
+#define UART011_RTIC           BIT(6)  /* receive timeout interrupt clear */
+#define UART011_TXIC           BIT(5)  /* transmit interrupt clear */
+#define UART011_RXIC           BIT(4)  /* receive interrupt clear */
+#define UART011_DSRMIC         BIT(3)  /* DSR interrupt clear */
+#define UART011_DCDMIC         BIT(2)  /* DCD interrupt clear */
+#define UART011_CTSMIC         BIT(1)  /* CTS interrupt clear */
+#define UART011_RIMIC          BIT(0)  /* RI interrupt clear */
+
+#define UART011_DMAONERR       BIT(2)  /* disable dma on error */
+#define UART011_TXDMAE         BIT(1)  /* enable transmit dma */
+#define UART011_RXDMAE         BIT(0)  /* enable receive dma */
+
+#define UART01x_RSR_ANY                (UART01x_RSR_OE | UART01x_RSR_BE | UART01x_RSR_PE | UART01x_RSR_FE)
+#define UART01x_FR_MODEM_ANY   (UART01x_FR_DCD | UART01x_FR_DSR | UART01x_FR_CTS)
 
 #ifndef __ASSEMBLY__
 struct amba_device; /* in uncompress this is included but amba/bus.h is not */
@@ -220,8 +229,8 @@ struct amba_pl011_data {
        bool dma_rx_poll_enable;
        unsigned int dma_rx_poll_rate;
        unsigned int dma_rx_poll_timeout;
-        void (*init) (void);
-       void (*exit) (void);
+       void (*init)(void);
+       void (*exit)(void);
 };
 #endif
 
index 5deaddbd79278fb9954833da36a3c5444d4c6ead..93a5f16d03f3f38612d37f0bcef56e6948912413 100644 (file)
@@ -15,13 +15,13 @@ struct inode;
 struct file *anon_inode_getfile(const char *name,
                                const struct file_operations *fops,
                                void *priv, int flags);
-struct file *anon_inode_getfile_secure(const char *name,
+struct file *anon_inode_create_getfile(const char *name,
                                       const struct file_operations *fops,
                                       void *priv, int flags,
                                       const struct inode *context_inode);
 int anon_inode_getfd(const char *name, const struct file_operations *fops,
                     void *priv, int flags);
-int anon_inode_getfd_secure(const char *name,
+int anon_inode_create_getfd(const char *name,
                            const struct file_operations *fops,
                            void *priv, int flags,
                            const struct inode *context_inode);
index ae12696ec492c67339409904bb612e9fdc372689..2ad261082bba5f6f0049fa1c642b6ff057f32b5a 100644 (file)
@@ -141,8 +141,6 @@ struct bdi_writeback {
        struct delayed_work dwork;      /* work item used for writeback */
        struct delayed_work bw_dwork;   /* work item used for bandwidth estimate */
 
-       unsigned long dirty_sleep;      /* last wait */
-
        struct list_head bdi_node;      /* anchored at bdi->wb_list */
 
 #ifdef CONFIG_CGROUP_WRITEBACK
@@ -179,6 +177,11 @@ struct backing_dev_info {
         * any dirty wbs, which is depended upon by bdi_has_dirty().
         */
        atomic_long_t tot_write_bandwidth;
+       /*
+        * Jiffies when last process was dirty throttled on this bdi. Used by
+        * blk-wbt.
+        */
+       unsigned long last_bdp_sleep;
 
        struct bdi_writeback wb;  /* the root writeback info for this bdi */
        struct list_head wb_list; /* list of all wbs */
index ec4db73e5f4ec42409c38d228dcf3a9d9c42c184..875d792bffff827aa2f489a7aa1b631810750b10 100644 (file)
@@ -286,6 +286,11 @@ static inline void bio_first_folio(struct folio_iter *fi, struct bio *bio,
 {
        struct bio_vec *bvec = bio_first_bvec_all(bio) + i;
 
+       if (unlikely(i >= bio->bi_vcnt)) {
+               fi->folio = NULL;
+               return;
+       }
+
        fi->folio = page_folio(bvec->bv_page);
        fi->offset = bvec->bv_offset +
                        PAGE_SIZE * (bvec->bv_page - &fi->folio->page);
@@ -303,10 +308,8 @@ static inline void bio_next_folio(struct folio_iter *fi, struct bio *bio)
                fi->offset = 0;
                fi->length = min(folio_size(fi->folio), fi->_seg_count);
                fi->_next = folio_next(fi->folio);
-       } else if (fi->_i + 1 < bio->bi_vcnt) {
-               bio_first_folio(fi, bio, fi->_i + 1);
        } else {
-               fi->folio = NULL;
+               bio_first_folio(fi, bio, fi->_i + 1);
        }
 }
 
index a676e116085f331ed8bb03208ce7f744a8376f21..7a8150a5f051339f680b9df83fa78da48b8c8af1 100644 (file)
@@ -391,9 +391,6 @@ struct blk_mq_hw_ctx {
         */
        struct blk_mq_tags      *sched_tags;
 
-       /** @run: Number of dispatched requests. */
-       unsigned long           run;
-
        /** @numa_node: NUMA node the storage adapter has been connected to. */
        unsigned int            numa_node;
        /** @queue_num: Index of this hardware queue. */
index 59d404e22814e885ae8429a361f2f43bed3b8f30..cf5c6ff489812e1cabe102f89a69c561319f6e99 100644 (file)
@@ -512,7 +512,7 @@ s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id);
 int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_cnt,
                                struct module *owner);
 struct btf_struct_meta *btf_find_struct_meta(const struct btf *btf, u32 btf_id);
-const struct btf_member *
+const struct btf_type *
 btf_get_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
                      const struct btf_type *t, enum bpf_prog_type prog_type,
                      int arg);
index 94ad2c9017c9d839f22769fc18e9560f6b5d93dd..6355a36a3f8151f96c17d9d343903bd715dc157e 100644 (file)
@@ -113,6 +113,7 @@ struct cdx_controller {
  * @dev_num: Device number for this device
  * @res: array of MMIO region entries
  * @res_attr: resource binary attribute
+ * @debugfs_dir: debugfs directory for this device
  * @res_count: number of valid MMIO regions
  * @dma_mask: Default DMA mask
  * @flags: CDX device flags
@@ -135,6 +136,8 @@ struct cdx_device {
        u8 bus_num;
        u8 dev_num;
        struct resource res[MAX_CDX_DEV_RESOURCES];
+       struct bin_attribute *res_attr[MAX_CDX_DEV_RESOURCES];
+       struct dentry *debugfs_dir;
        u8 res_count;
        u64 dma_mask;
        u16 flags;
@@ -147,6 +150,15 @@ struct cdx_device {
 #define to_cdx_device(_dev) \
        container_of(_dev, struct cdx_device, dev)
 
+#define cdx_resource_start(dev, num)   ((dev)->res[(num)].start)
+#define cdx_resource_end(dev, num)     ((dev)->res[(num)].end)
+#define cdx_resource_flags(dev, num)   ((dev)->res[(num)].flags)
+#define cdx_resource_len(dev, num) \
+       ((cdx_resource_start((dev), (num)) == 0 &&      \
+         cdx_resource_end((dev), (num)) ==             \
+         cdx_resource_start((dev), (num))) ? 0 :       \
+        (cdx_resource_end((dev), (num)) -              \
+         cdx_resource_start((dev), (num)) + 1))
 /**
  * struct cdx_driver - CDX device driver
  * @driver: Generic device driver
index 2eaaabbe98cb64d3f64a698a6f527e244649ca4e..1717cc57cdacd3532e5de7d35f1c2d6eb4e1ef5b 100644 (file)
@@ -283,7 +283,7 @@ struct ceph_msg {
        struct kref kref;
        bool more_to_follow;
        bool needs_out_seq;
-       bool sparse_read;
+       u64 sparse_read_total;
        int front_alloc_len;
 
        struct ceph_msgpool *pool;
index b8610e9d2471f5a7928e8d1b62a418e491ea575d..f66f6aac74f6f108ffba40b62159e047a184b732 100644 (file)
@@ -45,6 +45,7 @@ enum ceph_sparse_read_state {
        CEPH_SPARSE_READ_HDR    = 0,
        CEPH_SPARSE_READ_EXTENTS,
        CEPH_SPARSE_READ_DATA_LEN,
+       CEPH_SPARSE_READ_DATA_PRE,
        CEPH_SPARSE_READ_DATA,
 };
 
@@ -64,7 +65,7 @@ struct ceph_sparse_read {
        u64                             sr_req_len;  /* orig request length */
        u64                             sr_pos;      /* current pos in buffer */
        int                             sr_index;    /* current extent index */
-       __le32                          sr_datalen;  /* length of actual data */
+       u32                             sr_datalen;  /* length of actual data */
        u32                             sr_count;    /* extent count in reply */
        int                             sr_ext_len;  /* length of extent array */
        struct ceph_sparse_extent       *sr_extent;  /* extent array */
@@ -572,9 +573,12 @@ int __ceph_alloc_sparse_ext_map(struct ceph_osd_req_op *op, int cnt);
  */
 #define CEPH_SPARSE_EXT_ARRAY_INITIAL  16
 
-static inline int ceph_alloc_sparse_ext_map(struct ceph_osd_req_op *op)
+static inline int ceph_alloc_sparse_ext_map(struct ceph_osd_req_op *op, int cnt)
 {
-       return __ceph_alloc_sparse_ext_map(op, CEPH_SPARSE_EXT_ARRAY_INITIAL);
+       if (!cnt)
+               cnt = CEPH_SPARSE_EXT_ARRAY_INITIAL;
+
+       return __ceph_alloc_sparse_ext_map(op, cnt);
 }
 
 extern void ceph_osdc_get_request(struct ceph_osd_request *req);
index aebb65bf95a7988dfe8cf9cb1b5ec0945640ced3..c1a963be7d289e6edafba98f1d7d0236ee4f69f1 100644 (file)
                __builtin_unreachable();        \
        } while (0)
 
+/*
+ * GCC 'asm goto' with outputs miscompiles certain code sequences:
+ *
+ *   https://gcc.gnu.org/bugzilla/show_bug.cgi?id=110420
+ *   https://gcc.gnu.org/bugzilla/show_bug.cgi?id=110422
+ *
+ * Work it around via the same compiler barrier quirk that we used
+ * to use for the old 'asm goto' workaround.
+ *
+ * Also, always mark such 'asm goto' statements as volatile: all
+ * asm goto statements are supposed to be volatile as per the
+ * documentation, but some versions of gcc didn't actually do
+ * that for asms with outputs:
+ *
+ *    https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98619
+ */
+#define asm_goto_output(x...) \
+       do { asm volatile goto(x); asm (""); } while (0)
+
 #if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP)
 #define __HAVE_BUILTIN_BSWAP32__
 #define __HAVE_BUILTIN_BSWAP64__
index 6f1ca49306d2f7e7b51817fc579b82a85246736d..663d8791c871a7310467ea1b5417b65e58db604f 100644 (file)
@@ -362,8 +362,8 @@ struct ftrace_likely_data {
 #define __member_size(p)       __builtin_object_size(p, 1)
 #endif
 
-#ifndef asm_volatile_goto
-#define asm_volatile_goto(x...) asm goto(x)
+#ifndef asm_goto_output
+#define asm_goto_output(x...) asm goto(x)
 #endif
 
 #ifdef CONFIG_CC_HAS_ASM_INLINE
index 2566a1baa736a27127530e58be0f045b48084af3..dd00cc918a926e862d887869acaf9c6fc723a76b 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/device.h>
 
 /* drivers/base/power/container.c */
-extern struct bus_type container_subsys;
+extern const struct bus_type container_subsys;
 
 struct container_dev {
        struct device dev;
index a269fffaf991ceee721f00b19c2ab838615f23e7..a4cb7dd6ca2374c91d9cee002b04024bba0ada7a 100644 (file)
@@ -64,6 +64,7 @@ enum coresight_dev_subtype_source {
        CORESIGHT_DEV_SUBTYPE_SOURCE_PROC,
        CORESIGHT_DEV_SUBTYPE_SOURCE_BUS,
        CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE,
+       CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM,
        CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS,
 };
 
index c1a7dc3251215a5ba0e982568a746ff5b04602d1..265b0f8fc0b3c876191ba94bbc2d1d9dd66dd848 100644 (file)
@@ -90,6 +90,29 @@ enum {
        GUID_INIT(0x667DD791, 0xC6B3, 0x4c27, 0x8A, 0x6B, 0x0F, 0x8E,   \
                  0x72, 0x2D, 0xEB, 0x41)
 
+/* CXL Event record UUIDs are formatted as GUIDs and reported in section type */
+/*
+ * General Media Event Record
+ * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43
+ */
+#define CPER_SEC_CXL_GEN_MEDIA_GUID                                    \
+       GUID_INIT(0xfbcd0a77, 0xc260, 0x417f,                           \
+                 0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6)
+/*
+ * DRAM Event Record
+ * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44
+ */
+#define CPER_SEC_CXL_DRAM_GUID                                         \
+       GUID_INIT(0x601dcbb3, 0x9c06, 0x4eab,                           \
+                 0xb8, 0xaf, 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24)
+/*
+ * Memory Module Event Record
+ * CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45
+ */
+#define CPER_SEC_CXL_MEM_MODULE_GUID                                   \
+       GUID_INIT(0xfe927475, 0xdd59, 0x4339,                           \
+                 0xa5, 0x86, 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74)
+
 /*
  * Flags bits definitions for flags in struct cper_record_header
  * If set, the error has been recovered
index fc8094419084f6b77b192d2f40fb59dcc056ae92..dcb89c9871640f0a92c85c5f16d94a8e199c3a23 100644 (file)
@@ -80,6 +80,7 @@ extern __printf(4, 5)
 struct device *cpu_device_create(struct device *parent, void *drvdata,
                                 const struct attribute_group **groups,
                                 const char *fmt, ...);
+extern bool arch_cpu_is_hotpluggable(int cpu);
 extern int arch_register_cpu(int cpu);
 extern void arch_unregister_cpu(int cpu);
 #ifdef CONFIG_HOTPLUG_CPU
@@ -88,6 +89,10 @@ extern ssize_t arch_cpu_probe(const char *, size_t);
 extern ssize_t arch_cpu_release(const char *, size_t);
 #endif
 
+#ifdef CONFIG_GENERIC_CPU_DEVICES
+DECLARE_PER_CPU(struct cpu, cpu_devices);
+#endif
+
 /*
  * These states are not related to the core CPU hotplug mechanism. They are
  * used by various (sub)architectures to track internal state
diff --git a/include/linux/cxl-event.h b/include/linux/cxl-event.h
new file mode 100644 (file)
index 0000000..91125ec
--- /dev/null
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2023 Intel Corporation. */
+#ifndef _LINUX_CXL_EVENT_H
+#define _LINUX_CXL_EVENT_H
+
+/*
+ * Common Event Record Format
+ * CXL rev 3.0 section 8.2.9.2.1; Table 8-42
+ */
+struct cxl_event_record_hdr {
+       u8 length;
+       u8 flags[3];
+       __le16 handle;
+       __le16 related_handle;
+       __le64 timestamp;
+       u8 maint_op_class;
+       u8 reserved[15];
+} __packed;
+
+#define CXL_EVENT_RECORD_DATA_LENGTH 0x50
+struct cxl_event_generic {
+       struct cxl_event_record_hdr hdr;
+       u8 data[CXL_EVENT_RECORD_DATA_LENGTH];
+} __packed;
+
+/*
+ * General Media Event Record
+ * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43
+ */
+#define CXL_EVENT_GEN_MED_COMP_ID_SIZE 0x10
+struct cxl_event_gen_media {
+       struct cxl_event_record_hdr hdr;
+       __le64 phys_addr;
+       u8 descriptor;
+       u8 type;
+       u8 transaction_type;
+       u8 validity_flags[2];
+       u8 channel;
+       u8 rank;
+       u8 device[3];
+       u8 component_id[CXL_EVENT_GEN_MED_COMP_ID_SIZE];
+       u8 reserved[46];
+} __packed;
+
+/*
+ * DRAM Event Record - DER
+ * CXL rev 3.0 section 8.2.9.2.1.2; Table 3-44
+ */
+#define CXL_EVENT_DER_CORRECTION_MASK_SIZE     0x20
+struct cxl_event_dram {
+       struct cxl_event_record_hdr hdr;
+       __le64 phys_addr;
+       u8 descriptor;
+       u8 type;
+       u8 transaction_type;
+       u8 validity_flags[2];
+       u8 channel;
+       u8 rank;
+       u8 nibble_mask[3];
+       u8 bank_group;
+       u8 bank;
+       u8 row[3];
+       u8 column[2];
+       u8 correction_mask[CXL_EVENT_DER_CORRECTION_MASK_SIZE];
+       u8 reserved[0x17];
+} __packed;
+
+/*
+ * Get Health Info Record
+ * CXL rev 3.0 section 8.2.9.8.3.1; Table 8-100
+ */
+struct cxl_get_health_info {
+       u8 health_status;
+       u8 media_status;
+       u8 add_status;
+       u8 life_used;
+       u8 device_temp[2];
+       u8 dirty_shutdown_cnt[4];
+       u8 cor_vol_err_cnt[4];
+       u8 cor_per_err_cnt[4];
+} __packed;
+
+/*
+ * Memory Module Event Record
+ * CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45
+ */
+struct cxl_event_mem_module {
+       struct cxl_event_record_hdr hdr;
+       u8 event_type;
+       struct cxl_get_health_info info;
+       u8 reserved[0x3d];
+} __packed;
+
+union cxl_event {
+       struct cxl_event_generic generic;
+       struct cxl_event_gen_media gen_media;
+       struct cxl_event_dram dram;
+       struct cxl_event_mem_module mem_module;
+} __packed;
+
+/*
+ * Common Event Record Format; in event logs
+ * CXL rev 3.0 section 8.2.9.2.1; Table 8-42
+ */
+struct cxl_event_record_raw {
+       uuid_t id;
+       union cxl_event event;
+} __packed;
+
+enum cxl_event_type {
+       CXL_CPER_EVENT_GENERIC,
+       CXL_CPER_EVENT_GEN_MEDIA,
+       CXL_CPER_EVENT_DRAM,
+       CXL_CPER_EVENT_MEM_MODULE,
+};
+
+#define CPER_CXL_DEVICE_ID_VALID               BIT(0)
+#define CPER_CXL_DEVICE_SN_VALID               BIT(1)
+#define CPER_CXL_COMP_EVENT_LOG_VALID          BIT(2)
+struct cxl_cper_event_rec {
+       struct {
+               u32 length;
+               u64 validation_bits;
+               struct cper_cxl_event_devid {
+                       u16 vendor_id;
+                       u16 device_id;
+                       u8 func_num;
+                       u8 device_num;
+                       u8 bus_num;
+                       u16 segment_num;
+                       u16 slot_num; /* bits 2:0 reserved */
+                       u8 reserved;
+               } __packed device_id;
+               struct cper_cxl_event_sn {
+                       u32 lower_dw;
+                       u32 upper_dw;
+               } __packed dev_serial_num;
+       } __packed hdr;
+
+       union cxl_event event;
+} __packed;
+
+typedef void (*cxl_cper_callback)(enum cxl_event_type type,
+                                 struct cxl_cper_event_rec *rec);
+
+#ifdef CONFIG_ACPI_APEI_GHES
+int cxl_cper_register_callback(cxl_cper_callback callback);
+int cxl_cper_unregister_callback(cxl_cper_callback callback);
+#else
+static inline int cxl_cper_register_callback(cxl_cper_callback callback)
+{
+       return 0;
+}
+
+static inline int cxl_cper_unregister_callback(cxl_cper_callback callback)
+{
+       return 0;
+}
+#endif
+
+#endif /* _LINUX_CXL_EVENT_H */
index 6c83294395ac08a0aa4a61f5de74c225b29624e6..97c4b046c09d9464243c81f294724985dc4a292a 100644 (file)
@@ -42,7 +42,6 @@ struct class;
 struct subsys_private;
 struct device_node;
 struct fwnode_handle;
-struct iommu_ops;
 struct iommu_group;
 struct dev_pin_info;
 struct dev_iommu;
@@ -63,7 +62,7 @@ struct msi_device_data;
  */
 struct subsys_interface {
        const char *name;
-       struct bus_type *subsys;
+       const struct bus_type *subsys;
        struct list_head node;
        int (*add_dev)(struct device *dev, struct subsys_interface *sif);
        void (*remove_dev)(struct device *dev, struct subsys_interface *sif);
@@ -72,9 +71,9 @@ struct subsys_interface {
 int subsys_interface_register(struct subsys_interface *sif);
 void subsys_interface_unregister(struct subsys_interface *sif);
 
-int subsys_system_register(struct bus_type *subsys,
+int subsys_system_register(const struct bus_type *subsys,
                           const struct attribute_group **groups);
-int subsys_virtual_register(struct bus_type *subsys,
+int subsys_virtual_register(const struct bus_type *subsys,
                            const struct attribute_group **groups);
 
 /*
@@ -662,7 +661,6 @@ struct device_physical_location {
  * @id:                device instance
  * @devres_lock: Spinlock to protect the resource of the device.
  * @devres_head: The resources list of the device.
- * @knode_class: The node used to add the device to the class list.
  * @class:     The class of the device.
  * @groups:    Optional attribute groups.
  * @release:   Callback to free the device after all references have
@@ -1073,7 +1071,6 @@ int device_rename(struct device *dev, const char *new_name);
 int device_move(struct device *dev, struct device *new_parent,
                enum dpm_order dpm_order);
 int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid);
-int device_is_dependent(struct device *dev, void *target);
 
 static inline bool device_supports_offline(struct device *dev)
 {
index ae10c432275437dec02f431278f976e7059446a0..5ef4ec1c36c3b9d7d8e514cf0f46ac2f14bc66f6 100644 (file)
@@ -62,9 +62,6 @@ struct fwnode_handle;
  *                     this bus.
  * @pm:                Power management operations of this bus, callback the specific
  *             device driver's pm-ops.
- * @iommu_ops:  IOMMU specific operations for this bus, used to attach IOMMU
- *              driver implementations to a bus and allow the driver to do
- *              bus-specific setup
  * @need_parent_lock:  When probing or removing a device on this bus, the
  *                     device core should lock the device's parent.
  *
@@ -104,8 +101,6 @@ struct bus_type {
 
        const struct dev_pm_ops *pm;
 
-       const struct iommu_ops *iommu_ops;
-
        bool need_parent_lock;
 };
 
@@ -232,7 +227,7 @@ bus_find_device_by_acpi_dev(const struct bus_type *bus, const void *adev)
 
 int bus_for_each_drv(const struct bus_type *bus, struct device_driver *start,
                     void *data, int (*fn)(struct device_driver *, void *));
-void bus_sort_breadthfirst(struct bus_type *bus,
+void bus_sort_breadthfirst(const struct bus_type *bus,
                           int (*compare)(const struct device *a,
                                          const struct device *b));
 /*
index abf3d3bfb6fe4dc3f29e26c599a8b18a676793ba..c576b49c55c22ea0684fc480ba1e00ed64351944 100644 (file)
@@ -40,8 +40,6 @@ struct fwnode_handle;
  *             for the devices belonging to the class. Usually tied to
  *             device's namespace.
  * @pm:                The default device power management operations of this class.
- * @p:         The private data of the driver core, no one other than the
- *             driver core can touch this.
  *
  * A class is a higher-level view of a device that abstracts out low-level
  * implementation details. Drivers may see a SCSI disk or an ATA disk, but,
index f2fc203fb8a1a253d3abad99402d9e7da148d2ea..4abc60f04209281bf8af6905c4ec3d3bb6b531b5 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/slab.h>
 
 struct cma;
+struct iommu_ops;
 
 /*
  * Values for struct dma_map_ops.flags:
@@ -426,10 +427,10 @@ bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg,
 
 #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
-               const struct iommu_ops *iommu, bool coherent);
+               bool coherent);
 #else
 static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
-               u64 size, const struct iommu_ops *iommu, bool coherent)
+               u64 size, bool coherent)
 {
 }
 #endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */
@@ -443,10 +444,10 @@ static inline void arch_teardown_dma_ops(struct device *dev)
 #endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */
 
 #ifdef CONFIG_DMA_API_DEBUG
-void dma_debug_add_bus(struct bus_type *bus);
+void dma_debug_add_bus(const struct bus_type *bus);
 void debug_dma_dump_mappings(struct device *dev);
 #else
-static inline void dma_debug_add_bus(struct bus_type *bus)
+static inline void dma_debug_add_bus(const struct bus_type *bus)
 {
 }
 static inline void debug_dma_dump_mappings(struct device *dev)
index 3df70d6131c8fee686ffbd8ecfc7e7c432370bac..752dbde4cec1f8073e225961a41bc91435583350 100644 (file)
@@ -953,7 +953,8 @@ static inline int dmaengine_slave_config(struct dma_chan *chan,
 
 static inline bool is_slave_direction(enum dma_transfer_direction direction)
 {
-       return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM);
+       return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM) ||
+              (direction == DMA_DEV_TO_DEV);
 }
 
 static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
index 1174beb94ab6da27151cf3a410be892b6fb2d43f..b4ee8961e6236c3ed02b1c67a817c8cee9882c33 100644 (file)
@@ -30,7 +30,7 @@ struct device;
 
 extern int edac_op_state;
 
-struct bus_type *edac_get_sysfs_subsys(void);
+const struct bus_type *edac_get_sysfs_subsys(void);
 
 static inline void opstate_init(void)
 {
@@ -495,7 +495,7 @@ struct edac_raw_error_desc {
  */
 struct mem_ctl_info {
        struct device                   dev;
-       struct bus_type                 *bus;
+       const struct bus_type           *bus;
 
        struct list_head link;  /* for global list of mem_ctl_info structs */
 
index 9911508a9604fb048c2587730520331ac621fd80..0bbd02fd351db9239cfd39709e945c649582fb52 100644 (file)
@@ -6,15 +6,6 @@
 #include <linux/linkage.h>
 #include <linux/stringify.h>
 
-/*
- * Export symbols from the kernel to modules.  Forked from module.h
- * to reduce the amount of pointless cruft we feed to gcc when only
- * exporting a simple symbol or two.
- *
- * Try not to add #includes here.  It slows compilation and makes kernel
- * hackers place grumpy comments in header files.
- */
-
 /*
  * This comment block is used by fixdep. Please do not remove.
  *
  * side effect of the *.o build rule.
  */
 
-#ifndef __ASSEMBLY__
-#ifdef MODULE
-extern struct module __this_module;
-#define THIS_MODULE (&__this_module)
-#else
-#define THIS_MODULE ((struct module *)0)
-#endif
-#endif /* __ASSEMBLY__ */
-
 #ifdef CONFIG_64BIT
 #define __EXPORT_SYMBOL_REF(sym)                       \
        .balign 8                               ASM_NL  \
index d1ea3898564ca9fa9b9c06d83da16c88f7b4a478..9a7e527392512c69955ab015ac9f9f5273c4c65f 100644 (file)
@@ -32,6 +32,7 @@
 #define PM_SIP_SVC                     0xC2000000
 
 /* PM API versions */
+#define PM_API_VERSION_1       1
 #define PM_API_VERSION_2       2
 
 #define PM_PINCTRL_PARAM_SET_VERSION   2
@@ -47,6 +48,9 @@
 #define FAMILY_CODE_MASK       GENMASK(27, 21)
 #define SUB_FAMILY_CODE_MASK   GENMASK(20, 19)
 
+#define API_ID_MASK            GENMASK(7, 0)
+#define MODULE_ID_MASK         GENMASK(11, 8)
+
 /* ATF only commands */
 #define TF_A_PM_REGISTER_SGI           0xa04
 #define PM_GET_TRUSTZONE_VERSION       0xa03
 /*
  * Node IDs for the Error Events.
  */
-#define EVENT_ERROR_PMC_ERR1   (0x28100000U)
-#define EVENT_ERROR_PMC_ERR2   (0x28104000U)
-#define EVENT_ERROR_PSM_ERR1   (0x28108000U)
-#define EVENT_ERROR_PSM_ERR2   (0x2810C000U)
+#define VERSAL_EVENT_ERROR_PMC_ERR1    (0x28100000U)
+#define VERSAL_EVENT_ERROR_PMC_ERR2    (0x28104000U)
+#define VERSAL_EVENT_ERROR_PSM_ERR1    (0x28108000U)
+#define VERSAL_EVENT_ERROR_PSM_ERR2    (0x2810C000U)
+
+#define VERSAL_NET_EVENT_ERROR_PMC_ERR1        (0x28100000U)
+#define VERSAL_NET_EVENT_ERROR_PMC_ERR2        (0x28104000U)
+#define VERSAL_NET_EVENT_ERROR_PMC_ERR3        (0x28108000U)
+#define VERSAL_NET_EVENT_ERROR_PSM_ERR1        (0x2810C000U)
+#define VERSAL_NET_EVENT_ERROR_PSM_ERR2        (0x28110000U)
+#define VERSAL_NET_EVENT_ERROR_PSM_ERR3        (0x28114000U)
+#define VERSAL_NET_EVENT_ERROR_PSM_ERR4        (0x28118000U)
 
 /* ZynqMP SD tap delay tuning */
 #define SD_ITAPDLY     0xFF180314
 #define XPM_EVENT_ERROR_MASK_NOC_NCR           BIT(13)
 #define XPM_EVENT_ERROR_MASK_NOC_CR            BIT(12)
 
+enum pm_module_id {
+       PM_MODULE_ID = 0x0,
+       XSEM_MODULE_ID = 0x3,
+       TF_A_MODULE_ID = 0xa,
+};
+
 enum pm_api_cb_id {
        PM_INIT_SUSPEND_CB = 30,
        PM_ACKNOWLEDGE_CB = 31,
@@ -119,6 +137,7 @@ enum pm_api_cb_id {
 };
 
 enum pm_api_id {
+       PM_API_FEATURES = 0,
        PM_GET_API_VERSION = 1,
        PM_REGISTER_NOTIFIER = 5,
        PM_FORCE_POWERDOWN = 8,
@@ -138,7 +157,6 @@ enum pm_api_id {
        PM_SECURE_SHA = 26,
        PM_PINCTRL_REQUEST = 28,
        PM_PINCTRL_RELEASE = 29,
-       PM_PINCTRL_GET_FUNCTION = 30,
        PM_PINCTRL_SET_FUNCTION = 31,
        PM_PINCTRL_CONFIG_PARAM_GET = 32,
        PM_PINCTRL_CONFIG_PARAM_SET = 33,
@@ -149,8 +167,6 @@ enum pm_api_id {
        PM_CLOCK_GETSTATE = 38,
        PM_CLOCK_SETDIVIDER = 39,
        PM_CLOCK_GETDIVIDER = 40,
-       PM_CLOCK_SETRATE = 41,
-       PM_CLOCK_GETRATE = 42,
        PM_CLOCK_SETPARENT = 43,
        PM_CLOCK_GETPARENT = 44,
        PM_FPGA_READ = 46,
@@ -161,7 +177,9 @@ enum pm_api_id {
 /* PMU-FW return status codes */
 enum pm_ret_status {
        XST_PM_SUCCESS = 0,
+       XST_PM_INVALID_VERSION = 4,
        XST_PM_NO_FEATURE = 19,
+       XST_PM_INVALID_CRC = 301,
        XST_PM_INTERNAL = 2000,
        XST_PM_CONFLICT = 2001,
        XST_PM_NO_ACCESS = 2002,
@@ -509,20 +527,18 @@ struct zynqmp_pm_query_data {
        u32 arg3;
 };
 
-int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1,
-                       u32 arg2, u32 arg3, u32 *ret_payload);
+int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 *ret_payload, u32 num_args, ...);
 
 #if IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE)
 int zynqmp_pm_get_api_version(u32 *version);
 int zynqmp_pm_get_chipid(u32 *idcode, u32 *version);
+int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily);
 int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out);
 int zynqmp_pm_clock_enable(u32 clock_id);
 int zynqmp_pm_clock_disable(u32 clock_id);
 int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state);
 int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider);
 int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider);
-int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate);
-int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate);
 int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id);
 int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id);
 int zynqmp_pm_set_pll_frac_mode(u32 clk_id, u32 mode);
@@ -559,7 +575,6 @@ int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype);
 int zynqmp_pm_set_boot_health_status(u32 value);
 int zynqmp_pm_pinctrl_request(const u32 pin);
 int zynqmp_pm_pinctrl_release(const u32 pin);
-int zynqmp_pm_pinctrl_get_function(const u32 pin, u32 *id);
 int zynqmp_pm_pinctrl_set_function(const u32 pin, const u32 id);
 int zynqmp_pm_pinctrl_get_config(const u32 pin, const u32 param,
                                 u32 *value);
@@ -596,6 +611,11 @@ static inline int zynqmp_pm_get_chipid(u32 *idcode, u32 *version)
        return -ENODEV;
 }
 
+static inline int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily)
+{
+       return -ENODEV;
+}
+
 static inline int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata,
                                       u32 *out)
 {
@@ -627,16 +647,6 @@ static inline int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider)
        return -ENODEV;
 }
 
-static inline int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate)
-{
-       return -ENODEV;
-}
-
-static inline int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate)
-{
-       return -ENODEV;
-}
-
 static inline int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id)
 {
        return -ENODEV;
@@ -806,11 +816,6 @@ static inline int zynqmp_pm_pinctrl_release(const u32 pin)
        return -ENODEV;
 }
 
-static inline int zynqmp_pm_pinctrl_get_function(const u32 pin, u32 *id)
-{
-       return -ENODEV;
-}
-
 static inline int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id)
 {
        return -ENODEV;
index 79ef6ac4c02113e92454d94e80565b06073c4722..89a6888f2f9e502d38f09e6a1c66f0697c3d7d08 100644 (file)
@@ -214,51 +214,6 @@ __kernel_size_t __fortify_strlen(const char * const POS p)
        return ret;
 }
 
-/* Defined after fortified strlen() to reuse it. */
-extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy);
-/**
- * strlcpy - Copy a string into another string buffer
- *
- * @p: pointer to destination of copy
- * @q: pointer to NUL-terminated source string to copy
- * @size: maximum number of bytes to write at @p
- *
- * If strlen(@q) >= @size, the copy of @q will be truncated at
- * @size - 1 bytes. @p will always be NUL-terminated.
- *
- * Do not use this function. While FORTIFY_SOURCE tries to avoid
- * over-reads when calculating strlen(@q), it is still possible.
- * Prefer strscpy(), though note its different return values for
- * detecting truncation.
- *
- * Returns total number of bytes written to @p, including terminating NUL.
- *
- */
-__FORTIFY_INLINE size_t strlcpy(char * const POS p, const char * const POS q, size_t size)
-{
-       const size_t p_size = __member_size(p);
-       const size_t q_size = __member_size(q);
-       size_t q_len;   /* Full count of source string length. */
-       size_t len;     /* Count of characters going into destination. */
-
-       if (p_size == SIZE_MAX && q_size == SIZE_MAX)
-               return __real_strlcpy(p, q, size);
-       q_len = strlen(q);
-       len = (q_len >= size) ? size - 1 : q_len;
-       if (__builtin_constant_p(size) && __builtin_constant_p(q_len) && size) {
-               /* Write size is always larger than destination. */
-               if (len >= p_size)
-                       __write_overflow();
-       }
-       if (size) {
-               if (len >= p_size)
-                       fortify_panic(__func__);
-               __underlying_memcpy(p, q, len);
-               p[len] = '\0';
-       }
-       return q_len;
-}
-
 /* Defined after fortified strnlen() to reuse it. */
 extern ssize_t __real_strscpy(char *, const char *, size_t) __RENAME(strscpy);
 /**
@@ -272,12 +227,6 @@ extern ssize_t __real_strscpy(char *, const char *, size_t) __RENAME(strscpy);
  * @p buffer. The behavior is undefined if the string buffers overlap. The
  * destination @p buffer is always NUL terminated, unless it's zero-sized.
  *
- * Preferred to strlcpy() since the API doesn't require reading memory
- * from the source @q string beyond the specified @size bytes, and since
- * the return value is easier to error-check than strlcpy()'s.
- * In addition, the implementation is robust to the string changing out
- * from underneath it, unlike the current strlcpy() implementation.
- *
  * Preferred to strncpy() since it always returns a valid string, and
  * doesn't unnecessarily force the tail of the destination buffer to be
  * zero padded. If padding is desired please use strscpy_pad().
index e6ba0cc6f2eeeaea1291dbf4e88c8f6af462e96a..023f37c607094a5339598ac2c7dddd09745c907e 100644 (file)
@@ -2101,9 +2101,6 @@ int __generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
 int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
                                  struct file *file_out, loff_t pos_out,
                                  loff_t *count, unsigned int remap_flags);
-extern loff_t do_clone_file_range(struct file *file_in, loff_t pos_in,
-                                 struct file *file_out, loff_t pos_out,
-                                 loff_t len, unsigned int remap_flags);
 extern loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in,
                                   struct file *file_out, loff_t pos_out,
                                   loff_t len, unsigned int remap_flags);
@@ -2371,7 +2368,7 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
 #define I_CREATING             (1 << 15)
 #define I_DONTCACHE            (1 << 16)
 #define I_SYNC_QUEUED          (1 << 17)
-#define I_PINNING_FSCACHE_WB   (1 << 18)
+#define I_PINNING_NETFS_WB     (1 << 18)
 
 #define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
 #define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)
index a174cedf4d9072ae708202693f9647d85dcbe515..bdf7f3eddf0a2fb26b9276f6dacb92228c8e6d29 100644 (file)
@@ -189,17 +189,20 @@ extern atomic_t fscache_n_write;
 extern atomic_t fscache_n_no_write_space;
 extern atomic_t fscache_n_no_create_space;
 extern atomic_t fscache_n_culled;
+extern atomic_t fscache_n_dio_misfit;
 #define fscache_count_read() atomic_inc(&fscache_n_read)
 #define fscache_count_write() atomic_inc(&fscache_n_write)
 #define fscache_count_no_write_space() atomic_inc(&fscache_n_no_write_space)
 #define fscache_count_no_create_space() atomic_inc(&fscache_n_no_create_space)
 #define fscache_count_culled() atomic_inc(&fscache_n_culled)
+#define fscache_count_dio_misfit() atomic_inc(&fscache_n_dio_misfit)
 #else
 #define fscache_count_read() do {} while(0)
 #define fscache_count_write() do {} while(0)
 #define fscache_count_no_write_space() do {} while(0)
 #define fscache_count_no_create_space() do {} while(0)
 #define fscache_count_culled() do {} while(0)
+#define fscache_count_dio_misfit() do {} while(0)
 #endif
 
 #endif /* _LINUX_FSCACHE_CACHE_H */
index 8e312c8323a8e5048d0780401659d2dbe7d90948..6e8562cbcc43221e50cfd2b5698a99b2f7c2cb3f 100644 (file)
@@ -437,9 +437,6 @@ const struct netfs_cache_ops *fscache_operation_valid(const struct netfs_cache_r
  * indicates the cache resources to which the operation state should be
  * attached; @cookie indicates the cache object that will be accessed.
  *
- * This is intended to be called from the ->begin_cache_operation() netfs lib
- * operation as implemented by the network filesystem.
- *
  * @cres->inval_counter is set from @cookie->inval_counter for comparison at
  * the end of the operation.  This allows invalidation during the operation to
  * be detected by the caller.
@@ -629,48 +626,6 @@ static inline void fscache_write_to_cache(struct fscache_cookie *cookie,
 
 }
 
-#if __fscache_available
-bool fscache_dirty_folio(struct address_space *mapping, struct folio *folio,
-               struct fscache_cookie *cookie);
-#else
-#define fscache_dirty_folio(MAPPING, FOLIO, COOKIE) \
-               filemap_dirty_folio(MAPPING, FOLIO)
-#endif
-
-/**
- * fscache_unpin_writeback - Unpin writeback resources
- * @wbc: The writeback control
- * @cookie: The cookie referring to the cache object
- *
- * Unpin the writeback resources pinned by fscache_dirty_folio().  This is
- * intended to be called by the netfs's ->write_inode() method.
- */
-static inline void fscache_unpin_writeback(struct writeback_control *wbc,
-                                          struct fscache_cookie *cookie)
-{
-       if (wbc->unpinned_fscache_wb)
-               fscache_unuse_cookie(cookie, NULL, NULL);
-}
-
-/**
- * fscache_clear_inode_writeback - Clear writeback resources pinned by an inode
- * @cookie: The cookie referring to the cache object
- * @inode: The inode to clean up
- * @aux: Auxiliary data to apply to the inode
- *
- * Clear any writeback resources held by an inode when the inode is evicted.
- * This must be called before clear_inode() is called.
- */
-static inline void fscache_clear_inode_writeback(struct fscache_cookie *cookie,
-                                                struct inode *inode,
-                                                const void *aux)
-{
-       if (inode->i_state & I_PINNING_FSCACHE_WB) {
-               loff_t i_size = i_size_read(inode);
-               fscache_unuse_cookie(cookie, aux, &i_size);
-       }
-}
-
 /**
  * fscache_note_page_release - Note that a netfs page got released
  * @cookie: The cookie corresponding to the file
index 11e6434b8e714a1d03d7ea2807c795513916b58e..8300a52869887a30388a6d8bb360b008340dff97 100644 (file)
@@ -100,6 +100,7 @@ static inline int fsnotify_file(struct file *file, __u32 mask)
        return fsnotify_parent(path->dentry, mask, path, FSNOTIFY_EVENT_PATH);
 }
 
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
 /*
  * fsnotify_file_area_perm - permission hook before access to file range
  */
@@ -145,6 +146,24 @@ static inline int fsnotify_open_perm(struct file *file)
        return fsnotify_file(file, FS_OPEN_PERM);
 }
 
+#else
+static inline int fsnotify_file_area_perm(struct file *file, int perm_mask,
+                                         const loff_t *ppos, size_t count)
+{
+       return 0;
+}
+
+static inline int fsnotify_file_perm(struct file *file, int perm_mask)
+{
+       return 0;
+}
+
+static inline int fsnotify_open_perm(struct file *file)
+{
+       return 0;
+}
+#endif
+
 /*
  * fsnotify_link_count - inode's link count changed
  */
index ca49947f0a775a16df3f086bb490c46b9f6efb17..95421860397a236d101bf4c3bc7a934a515bf847 100644 (file)
@@ -25,16 +25,35 @@ struct acpi_subtable_proc {
        int count;
 };
 
+union fw_table_header {
+       struct acpi_table_header acpi;
+       struct acpi_table_cdat cdat;
+};
+
 union acpi_subtable_headers {
        struct acpi_subtable_header common;
        struct acpi_hmat_structure hmat;
        struct acpi_prmt_module_header prmt;
        struct acpi_cedt_header cedt;
+       struct acpi_cdat_header cdat;
 };
 
 int acpi_parse_entries_array(char *id, unsigned long table_size,
-                            struct acpi_table_header *table_header,
+                            union fw_table_header *table_header,
                             struct acpi_subtable_proc *proc,
                             int proc_num, unsigned int max_entries);
 
+int cdat_table_parse(enum acpi_cdat_type type,
+                    acpi_tbl_entry_handler_arg handler_arg, void *arg,
+                    struct acpi_table_cdat *table_header);
+
+/* CXL is the only non-ACPI consumer of the FIRMWARE_TABLE library */
+#if IS_ENABLED(CONFIG_ACPI) && !IS_ENABLED(CONFIG_CXL_BUS)
+#define EXPORT_SYMBOL_FWTBL_LIB(x) EXPORT_SYMBOL_ACPI_LIB(x)
+#define __init_or_fwtbl_lib __init_or_acpilib
+#else
+#define EXPORT_SYMBOL_FWTBL_LIB(x) EXPORT_SYMBOL_NS_GPL(x, CXL)
+#define __init_or_fwtbl_lib
+#endif
+
 #endif
index e846bd4e7559bb54bba7ffb2dcda3dc8e0099e16..9a5c6c76e6533385dbb32de98abfd330c8736585 100644 (file)
@@ -635,7 +635,7 @@ struct gpio_device *gpio_device_get(struct gpio_device *gdev);
 void gpio_device_put(struct gpio_device *gdev);
 
 DEFINE_FREE(gpio_device_put, struct gpio_device *,
-           if (IS_ERR_OR_NULL(_T)) gpio_device_put(_T));
+           if (!IS_ERR_OR_NULL(_T)) gpio_device_put(_T))
 
 struct device *gpio_device_to_device(struct gpio_device *gdev);
 
index 3f84aeb81e480b238842cc5c0e18478da854778a..80fa930b04c6795eb7c6143a79655a6f918446eb 100644 (file)
@@ -21,6 +21,7 @@ struct device;
  *                     disable button via sysfs
  * @value:             axis value for %EV_ABS
  * @irq:               Irq number in case of interrupt keys
+ * @wakeirq:           Optional dedicated wake-up interrupt
  */
 struct gpio_keys_button {
        unsigned int code;
@@ -34,6 +35,7 @@ struct gpio_keys_button {
        bool can_disable;
        int value;
        unsigned int irq;
+       unsigned int wakeirq;
 };
 
 /**
index 840cd254172d061ec445bdc845a7f1e8cbf20463..7118ac28d46879b615de35a6e3702208de4001e9 100644 (file)
@@ -77,17 +77,6 @@ enum hid_bpf_attach_flags {
 int hid_bpf_device_event(struct hid_bpf_ctx *ctx);
 int hid_bpf_rdesc_fixup(struct hid_bpf_ctx *ctx);
 
-/* Following functions are kfunc that we export to BPF programs */
-/* available everywhere in HID-BPF */
-__u8 *hid_bpf_get_data(struct hid_bpf_ctx *ctx, unsigned int offset, const size_t __sz);
-
-/* only available in syscall */
-int hid_bpf_attach_prog(unsigned int hid_id, int prog_fd, __u32 flags);
-int hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz,
-                      enum hid_report_type rtype, enum hid_class_request reqtype);
-struct hid_bpf_ctx *hid_bpf_allocate_context(unsigned int hid_id);
-void hid_bpf_release_context(struct hid_bpf_ctx *ctx);
-
 /*
  * Below is HID internal
  */
index 87e3bedf8eb00323c102787243e7dbfd045ba4e9..641c4567cfa7aee830f8ad0b52abb24bcbe353a8 100644 (file)
@@ -157,6 +157,7 @@ enum  hrtimer_base_type {
  * @max_hang_time:     Maximum time spent in hrtimer_interrupt
  * @softirq_expiry_lock: Lock which is taken while softirq based hrtimer are
  *                      expired
+ * @online:            CPU is online from an hrtimers point of view
  * @timer_waiters:     A hrtimer_cancel() invocation waits for the timer
  *                     callback to finish.
  * @expires_next:      absolute time of the next event, is required for remote
@@ -179,7 +180,8 @@ struct hrtimer_cpu_base {
        unsigned int                    hres_active             : 1,
                                        in_hrtirq               : 1,
                                        hang_detected           : 1,
-                                       softirq_activated       : 1;
+                                       softirq_activated       : 1,
+                                       online                  : 1;
 #ifdef CONFIG_HIGH_RES_TIMERS
        unsigned int                    nr_events;
        unsigned short                  nr_retries;
index 0dae9db275380b16bada4327f8f02e53ff8db30f..652ecb7abedae4b5bc3c451410139168702a63f9 100644 (file)
@@ -23,7 +23,7 @@
 #include <linux/swab.h>                /* for swab16 */
 #include <uapi/linux/i2c.h>
 
-extern struct bus_type i2c_bus_type;
+extern const struct bus_type i2c_bus_type;
 extern struct device_type i2c_adapter_type;
 extern struct device_type i2c_client_type;
 
@@ -746,6 +746,8 @@ struct i2c_adapter {
 
        struct irq_domain *host_notify_domain;
        struct regulator *bus_regulator;
+
+       struct dentry *debugfs;
 };
 #define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev)
 
@@ -850,7 +852,6 @@ static inline void i2c_mark_adapter_resumed(struct i2c_adapter *adap)
 
 /* i2c adapter classes (bitmask) */
 #define I2C_CLASS_HWMON                (1<<0)  /* lm_sensors, ... */
-#define I2C_CLASS_DDC          (1<<3)  /* DDC bus on graphics adapters */
 #define I2C_CLASS_SPD          (1<<7)  /* Memory modules */
 /* Warn users that the adapter doesn't support classes anymore */
 #define I2C_CLASS_DEPRECATED   (1<<8)
index 84ed77c049400546e968016af548bce081805cca..e119f11948efe2958e5df530647e8408f54485b2 100644 (file)
@@ -54,6 +54,7 @@ enum i3c_hdr_mode {
  * struct i3c_priv_xfer - I3C SDR private transfer
  * @rnw: encodes the transfer direction. true for a read, false for a write
  * @len: transfer length in bytes of the transfer
+ * @actual_len: actual length in bytes are transferred by the controller
  * @data: input/output buffer
  * @data.in: input buffer. Must point to a DMA-able buffer
  * @data.out: output buffer. Must point to a DMA-able buffer
@@ -62,6 +63,7 @@ enum i3c_hdr_mode {
 struct i3c_priv_xfer {
        u8 rnw;
        u16 len;
+       u16 actual_len;
        union {
                void *in;
                const void *out;
index 24c1863b86e2b6940d263555515039fa0b317203..0ca27dd869561fd96c8f2a1470a0f8adae6d4d5e 100644 (file)
@@ -76,7 +76,6 @@ struct i2c_dev_boardinfo {
 /**
  * struct i2c_dev_desc - I2C device descriptor
  * @common: common part of the I2C device descriptor
- * @boardinfo: pointer to the boardinfo attached to this I2C device
  * @dev: I2C device object registered to the I2C framework
  * @addr: I2C device address
  * @lvr: LVR (Legacy Virtual Register) needed by the I3C core to know about
@@ -434,6 +433,8 @@ struct i3c_bus {
  *                   for a future IBI
  *                   This method is mandatory only if ->request_ibi is not
  *                   NULL.
+ * @enable_hotjoin: enable hot join event detect.
+ * @disable_hotjoin: disable hot join event detect.
  */
 struct i3c_master_controller_ops {
        int (*bus_init)(struct i3c_master_controller *master);
@@ -460,6 +461,8 @@ struct i3c_master_controller_ops {
        int (*disable_ibi)(struct i3c_dev_desc *dev);
        void (*recycle_ibi_slot)(struct i3c_dev_desc *dev,
                                 struct i3c_ibi_slot *slot);
+       int (*enable_hotjoin)(struct i3c_master_controller *master);
+       int (*disable_hotjoin)(struct i3c_master_controller *master);
 };
 
 /**
@@ -473,6 +476,7 @@ struct i3c_master_controller_ops {
  * @ops: master operations. See &struct i3c_master_controller_ops
  * @secondary: true if the master is a secondary master
  * @init_done: true when the bus initialization is done
+ * @hotjoin: true if the master support hotjoin
  * @boardinfo.i3c: list of I3C  boardinfo objects
  * @boardinfo.i2c: list of I2C boardinfo objects
  * @boardinfo: board-level information attached to devices connected on the bus
@@ -495,6 +499,7 @@ struct i3c_master_controller {
        const struct i3c_master_controller_ops *ops;
        unsigned int secondary : 1;
        unsigned int init_done : 1;
+       unsigned int hotjoin: 1;
        struct {
                struct list_head i3c;
                struct list_head i2c;
@@ -551,6 +556,8 @@ int i3c_master_register(struct i3c_master_controller *master,
                        const struct i3c_master_controller_ops *ops,
                        bool secondary);
 void i3c_master_unregister(struct i3c_master_controller *master);
+int i3c_master_enable_hotjoin(struct i3c_master_controller *master);
+int i3c_master_disable_hotjoin(struct i3c_master_controller *master);
 
 /**
  * i3c_dev_get_master_data() - get master private data attached to an I3C
index 52620e5b80522ee75e5223092bbb4ed2e31c4b45..b7904992d56191e147035191e93160dd8bcab918 100644 (file)
@@ -41,6 +41,7 @@ struct adi_axi_adc_chip_info {
  * @reg_access         IIO debugfs_reg_access hook for the client ADC
  * @read_raw           IIO read_raw hook for the client ADC
  * @write_raw          IIO write_raw hook for the client ADC
+ * @read_avail         IIO read_avail hook for the client ADC
  */
 struct adi_axi_adc_conv {
        const struct adi_axi_adc_chip_info              *chip_info;
@@ -54,6 +55,9 @@ struct adi_axi_adc_conv {
        int (*write_raw)(struct adi_axi_adc_conv *conv,
                         struct iio_chan_spec const *chan,
                         int val, int val2, long mask);
+       int (*read_avail)(struct adi_axi_adc_conv *conv,
+                         struct iio_chan_spec const *chan,
+                         const int **val, int *type, int *length, long mask);
 };
 
 struct adi_axi_adc_conv *devm_adi_axi_adc_conv_register(struct device *dev,
index 6564bdcdac66c90530184572f3ad15f6b74025d7..18d3702fa95d13d6cd13de305f14b4fe71bc4964 100644 (file)
@@ -19,14 +19,12 @@ struct device;
 
 /**
  * enum iio_block_state - State of a struct iio_dma_buffer_block
- * @IIO_BLOCK_STATE_DEQUEUED: Block is not queued
  * @IIO_BLOCK_STATE_QUEUED: Block is on the incoming queue
  * @IIO_BLOCK_STATE_ACTIVE: Block is currently being processed by the DMA
  * @IIO_BLOCK_STATE_DONE: Block is on the outgoing queue
  * @IIO_BLOCK_STATE_DEAD: Block has been marked as to be freed
  */
 enum iio_block_state {
-       IIO_BLOCK_STATE_DEQUEUED,
        IIO_BLOCK_STATE_QUEUED,
        IIO_BLOCK_STATE_ACTIVE,
        IIO_BLOCK_STATE_DONE,
@@ -73,12 +71,15 @@ struct iio_dma_buffer_block {
  * @active_block: Block being used in read()
  * @pos: Read offset in the active block
  * @block_size: Size of each block
+ * @next_dequeue: index of next block that will be dequeued
  */
 struct iio_dma_buffer_queue_fileio {
        struct iio_dma_buffer_block *blocks[2];
        struct iio_dma_buffer_block *active_block;
        size_t pos;
        size_t block_size;
+
+       unsigned int next_dequeue;
 };
 
 /**
@@ -93,7 +94,6 @@ struct iio_dma_buffer_queue_fileio {
  *   list and typically also a list of active blocks in the part that handles
  *   the DMA controller
  * @incoming: List of buffers on the incoming queue
- * @outgoing: List of buffers on the outgoing queue
  * @active: Whether the buffer is currently active
  * @fileio: FileIO state
  */
@@ -105,7 +105,6 @@ struct iio_dma_buffer_queue {
        struct mutex lock;
        spinlock_t list_lock;
        struct list_head incoming;
-       struct list_head outgoing;
 
        bool active;
 
index d0ce3b71106aa7116e86ed313f8be1e06795dfa4..c5b36d2c1e7359891a90d89b9b1680c736b263e1 100644 (file)
@@ -434,13 +434,7 @@ struct iio_trigger; /* forward declaration */
  * @update_scan_mode:  function to configure device and scan buffer when
  *                     channels have changed
  * @debugfs_reg_access:        function to read or write register value of device
- * @of_xlate:          function pointer to obtain channel specifier index.
- *                     When #iio-cells is greater than '0', the driver could
- *                     provide a custom of_xlate function that reads the
- *                     *args* and returns the appropriate index in registered
- *                     IIO channels array.
  * @fwnode_xlate:      fwnode based function pointer to obtain channel specifier index.
- *                     Functionally the same as @of_xlate.
  * @hwfifo_set_watermark: function pointer to set the current hardware
  *                     fifo watermark level; see hwfifo_* entries in
  *                     Documentation/ABI/testing/sysfs-bus-iio for details on
index 117bde7d6ad7915be091d2fa9e958a9efcafd4ff..d89982c98368cf72c0fc30fa66ab001e48af4e8b 100644 (file)
@@ -68,6 +68,7 @@ enum iio_chan_info_enum {
        IIO_CHAN_INFO_THERMOCOUPLE_TYPE,
        IIO_CHAN_INFO_CALIBAMBIENT,
        IIO_CHAN_INFO_ZEROPOINT,
+       IIO_CHAN_INFO_TROUGH,
 };
 
 #endif /* _IIO_TYPES_H_ */
index 01b52c9c75268f1cdebcfaba9750304d20618002..3fa3f6241350b2a81226a58fc77e2e4d0135e78d 100644 (file)
@@ -179,6 +179,13 @@ extern void (*late_time_init)(void);
 
 extern bool initcall_debug;
 
+#ifdef MODULE
+extern struct module __this_module;
+#define THIS_MODULE (&__this_module)
+#else
+#define THIS_MODULE ((struct module *)0)
+#endif
+
 #endif
   
 #ifndef MODULE
index 5fba52a56cd61e88a319b10a0c6242fd155c3576..5705d5de3aeaeeca919eae7cdc89d617a0cb5075 100644 (file)
@@ -7,7 +7,6 @@
  */
 
 struct as5011_platform_data {
-       unsigned int button_gpio;
        unsigned int axis_irq; /* irq number */
        unsigned long axis_irqflags;
        char xp, xn; /* threshold for x axis */
index d464ffb4db52b9654074af09264754043a30beab..5192ae3f5ec1b67738fb31d34a34cd97e19f3ab3 100644 (file)
@@ -5,5 +5,4 @@
 
 struct navpoint_platform_data {
        int             port;           /* PXA SSP port for pxa_ssp_request() */
-       int             gpio;           /* GPIO for power on/off */
 };
index 25142a0e2fc2c51d4c7807a1fb87cc21b16a163b..86cf1f7ae389a40180b86dd6850102f6fe04c188 100644 (file)
@@ -100,6 +100,30 @@ struct io_pgtable_cfg {
        const struct iommu_flush_ops    *tlb;
        struct device                   *iommu_dev;
 
+       /**
+        * @alloc: Custom page allocator.
+        *
+        * Optional hook used to allocate page tables. If this function is NULL,
+        * @free must be NULL too.
+        *
+        * Memory returned should be zeroed and suitable for dma_map_single() and
+        * virt_to_phys().
+        *
+        * Not all formats support custom page allocators. Before considering
+        * passing a non-NULL value, make sure the chosen page format supports
+        * this feature.
+        */
+       void *(*alloc)(void *cookie, size_t size, gfp_t gfp);
+
+       /**
+        * @free: Custom page de-allocator.
+        *
+        * Optional hook used to free page tables allocated with the @alloc
+        * hook. Must be non-NULL if @alloc is not NULL, must be NULL
+        * otherwise.
+        */
+       void (*free)(void *cookie, void *pages, size_t size);
+
        /* Low-level data specific to the table format */
        union {
                struct {
@@ -241,16 +265,26 @@ io_pgtable_tlb_add_page(struct io_pgtable *iop,
                iop->cfg.tlb->tlb_add_page(gather, iova, granule, iop->cookie);
 }
 
+/**
+ * enum io_pgtable_caps - IO page table backend capabilities.
+ */
+enum io_pgtable_caps {
+       /** @IO_PGTABLE_CAP_CUSTOM_ALLOCATOR: Backend accepts custom page table allocators. */
+       IO_PGTABLE_CAP_CUSTOM_ALLOCATOR = BIT(0),
+};
+
 /**
  * struct io_pgtable_init_fns - Alloc/free a set of page tables for a
  *                              particular format.
  *
  * @alloc: Allocate a set of page tables described by cfg.
  * @free:  Free the page tables associated with iop.
+ * @caps:  Combination of @io_pgtable_caps flags encoding the backend capabilities.
  */
 struct io_pgtable_init_fns {
        struct io_pgtable *(*alloc)(struct io_pgtable_cfg *cfg, void *cookie);
        void (*free)(struct io_pgtable *iop);
+       u32 caps;
 };
 
 extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
index 6291aa7b079b0df321f06ede5b69180b20941521..1ea2a820e1eb035c9eea2ec97d9874c52bbd0b42 100644 (file)
@@ -106,7 +106,7 @@ struct iommu_domain {
        unsigned type;
        const struct iommu_domain_ops *ops;
        const struct iommu_dirty_ops *dirty_ops;
-
+       const struct iommu_ops *owner; /* Whose domain_alloc we came from */
        unsigned long pgsize_bitmap;    /* Bitmap of page sizes in use */
        struct iommu_domain_geometry geometry;
        struct iommu_dma_cookie *iova_cookie;
@@ -121,6 +121,11 @@ struct iommu_domain {
                struct {        /* IOMMU_DOMAIN_SVA */
                        struct mm_struct *mm;
                        int users;
+                       /*
+                        * Next iommu_domain in mm->iommu_mm->sva-domains list
+                        * protected by iommu_sva_lock.
+                        */
+                       struct list_head next;
                };
        };
 };
@@ -284,6 +289,23 @@ struct iommu_user_data {
        size_t len;
 };
 
+/**
+ * struct iommu_user_data_array - iommu driver specific user space data array
+ * @type: The data type of all the entries in the user buffer array
+ * @uptr: Pointer to the user buffer array
+ * @entry_len: The fixed-width length of an entry in the array, in bytes
+ * @entry_num: The number of total entries in the array
+ *
+ * The user buffer includes an array of requests with format defined in
+ * include/uapi/linux/iommufd.h
+ */
+struct iommu_user_data_array {
+       unsigned int type;
+       void __user *uptr;
+       size_t entry_len;
+       u32 entry_num;
+};
+
 /**
  * __iommu_copy_struct_from_user - Copy iommu driver specific user space data
  * @dst_data: Pointer to an iommu driver specific user data that is defined in
@@ -324,6 +346,57 @@ static inline int __iommu_copy_struct_from_user(
                                      sizeof(*kdst),                      \
                                      offsetofend(typeof(*kdst), min_last))
 
+/**
+ * __iommu_copy_struct_from_user_array - Copy iommu driver specific user space
+ *                                       data from an iommu_user_data_array
+ * @dst_data: Pointer to an iommu driver specific user data that is defined in
+ *            include/uapi/linux/iommufd.h
+ * @src_array: Pointer to a struct iommu_user_data_array for a user space array
+ * @data_type: The data type of the @dst_data. Must match with @src_array.type
+ * @index: Index to the location in the array to copy user data from
+ * @data_len: Length of current user data structure, i.e. sizeof(struct _dst)
+ * @min_len: Initial length of user data structure for backward compatibility.
+ *           This should be offsetofend using the last member in the user data
+ *           struct that was initially added to include/uapi/linux/iommufd.h
+ */
+static inline int __iommu_copy_struct_from_user_array(
+       void *dst_data, const struct iommu_user_data_array *src_array,
+       unsigned int data_type, unsigned int index, size_t data_len,
+       size_t min_len)
+{
+       struct iommu_user_data src_data;
+
+       if (WARN_ON(!src_array || index >= src_array->entry_num))
+               return -EINVAL;
+       if (!src_array->entry_num)
+               return -EINVAL;
+       src_data.uptr = src_array->uptr + src_array->entry_len * index;
+       src_data.len = src_array->entry_len;
+       src_data.type = src_array->type;
+
+       return __iommu_copy_struct_from_user(dst_data, &src_data, data_type,
+                                            data_len, min_len);
+}
+
+/**
+ * iommu_copy_struct_from_user_array - Copy iommu driver specific user space
+ *                                     data from an iommu_user_data_array
+ * @kdst: Pointer to an iommu driver specific user data that is defined in
+ *        include/uapi/linux/iommufd.h
+ * @user_array: Pointer to a struct iommu_user_data_array for a user space
+ *              array
+ * @data_type: The data type of the @kdst. Must match with @user_array->type
+ * @index: Index to the location in the array to copy user data from
+ * @min_last: The last member of the data structure @kdst points in the
+ *            initial version.
+ * Return 0 for success, otherwise -error.
+ */
+#define iommu_copy_struct_from_user_array(kdst, user_array, data_type, index, \
+                                         min_last)                           \
+       __iommu_copy_struct_from_user_array(                                  \
+               kdst, user_array, data_type, index, sizeof(*(kdst)),          \
+               offsetofend(typeof(*(kdst)), min_last))
+
 /**
  * struct iommu_ops - iommu ops and capabilities
  * @capable: check capability
@@ -440,6 +513,13 @@ struct iommu_ops {
  * @iotlb_sync_map: Sync mappings created recently using @map to the hardware
  * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
  *            queue
+ * @cache_invalidate_user: Flush hardware cache for user space IO page table.
+ *                         The @domain must be IOMMU_DOMAIN_NESTED. The @array
+ *                         passes in the cache invalidation requests, in form
+ *                         of a driver data structure. The driver must update
+ *                         array->entry_num to report the number of handled
+ *                         invalidation requests. The driver data structure
+ *                         must be defined in include/uapi/linux/iommufd.h
  * @iova_to_phys: translate iova to physical address
  * @enforce_cache_coherency: Prevent any kind of DMA from bypassing IOMMU_CACHE,
  *                           including no-snoop TLPs on PCIe or other platform
@@ -465,6 +545,8 @@ struct iommu_domain_ops {
                              size_t size);
        void (*iotlb_sync)(struct iommu_domain *domain,
                           struct iommu_iotlb_gather *iotlb_gather);
+       int (*cache_invalidate_user)(struct iommu_domain *domain,
+                                    struct iommu_user_data_array *array);
 
        phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
                                    dma_addr_t iova);
@@ -812,6 +894,11 @@ struct iommu_sva {
        struct iommu_domain             *domain;
 };
 
+struct iommu_mm_data {
+       u32                     pasid;
+       struct list_head        sva_domains;
+};
+
 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
                      const struct iommu_ops *ops);
 void iommu_fwspec_free(struct device *dev);
@@ -840,10 +927,7 @@ static inline void *dev_iommu_priv_get(struct device *dev)
                return NULL;
 }
 
-static inline void dev_iommu_priv_set(struct device *dev, void *priv)
-{
-       dev->iommu->priv = priv;
-}
+void dev_iommu_priv_set(struct device *dev, void *priv);
 
 extern struct mutex iommu_probe_device_lock;
 int iommu_probe_device(struct device *dev);
@@ -1337,15 +1421,33 @@ static inline bool tegra_dev_iommu_get_stream_id(struct device *dev, u32 *stream
        return false;
 }
 
-#ifdef CONFIG_IOMMU_SVA
+#ifdef CONFIG_IOMMU_MM_DATA
 static inline void mm_pasid_init(struct mm_struct *mm)
 {
-       mm->pasid = IOMMU_PASID_INVALID;
+       /*
+        * During dup_mm(), a new mm will be memcpy'd from an old one and that makes
+        * the new mm and the old one point to a same iommu_mm instance. When either
+        * one of the two mms gets released, the iommu_mm instance is freed, leaving
+        * the other mm running into a use-after-free/double-free problem. To avoid
+        * the problem, zeroing the iommu_mm pointer of a new mm is needed here.
+        */
+       mm->iommu_mm = NULL;
 }
+
 static inline bool mm_valid_pasid(struct mm_struct *mm)
 {
-       return mm->pasid != IOMMU_PASID_INVALID;
+       return READ_ONCE(mm->iommu_mm);
+}
+
+static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm)
+{
+       struct iommu_mm_data *iommu_mm = READ_ONCE(mm->iommu_mm);
+
+       if (!iommu_mm)
+               return IOMMU_PASID_INVALID;
+       return iommu_mm->pasid;
 }
+
 void mm_pasid_drop(struct mm_struct *mm);
 struct iommu_sva *iommu_sva_bind_device(struct device *dev,
                                        struct mm_struct *mm);
@@ -1368,6 +1470,12 @@ static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle)
 }
 static inline void mm_pasid_init(struct mm_struct *mm) {}
 static inline bool mm_valid_pasid(struct mm_struct *mm) { return false; }
+
+static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm)
+{
+       return IOMMU_PASID_INVALID;
+}
+
 static inline void mm_pasid_drop(struct mm_struct *mm) {}
 #endif /* CONFIG_IOMMU_SVA */
 
index 7578d4f6a969a419a8e39e38a8886dc9119c6973..db1249cd9692080f495c8986826d96eaf56b7995 100644 (file)
@@ -47,7 +47,30 @@ static inline int task_nice_ioclass(struct task_struct *task)
 }
 
 #ifdef CONFIG_BLOCK
-int __get_task_ioprio(struct task_struct *p);
+/*
+ * If the task has set an I/O priority, use that. Otherwise, return
+ * the default I/O priority.
+ *
+ * Expected to be called for current task or with task_lock() held to keep
+ * io_context stable.
+ */
+static inline int __get_task_ioprio(struct task_struct *p)
+{
+       struct io_context *ioc = p->io_context;
+       int prio;
+
+       if (!ioc)
+               return IOPRIO_DEFAULT;
+
+       if (p != current)
+               lockdep_assert_held(&p->alloc_lock);
+
+       prio = ioc->ioprio;
+       if (IOPRIO_PRIO_CLASS(prio) == IOPRIO_CLASS_NONE)
+               prio = IOPRIO_PRIO_VALUE(task_nice_ioclass(p),
+                                        task_nice_ioprio(p));
+       return prio;
+}
 #else
 static inline int __get_task_ioprio(struct task_struct *p)
 {
index 4944136efaa22c98931252f1d45338ecb5381b06..7e7fd25b09b3ebe3d81e30fb23f506a9ee5a6519 100644 (file)
@@ -80,8 +80,8 @@
 /* Two fragments for cross MMIO pages. */
 #define KVM_MAX_MMIO_FRAGMENTS 2
 
-#ifndef KVM_ADDRESS_SPACE_NUM
-#define KVM_ADDRESS_SPACE_NUM  1
+#ifndef KVM_MAX_NR_ADDRESS_SPACES
+#define KVM_MAX_NR_ADDRESS_SPACES      1
 #endif
 
 /*
@@ -253,9 +253,10 @@ bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
 #endif
 
-#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
+#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
 union kvm_mmu_notifier_arg {
        pte_t pte;
+       unsigned long attributes;
 };
 
 struct kvm_gfn_range {
@@ -588,8 +589,20 @@ struct kvm_memory_slot {
        u32 flags;
        short id;
        u16 as_id;
+
+#ifdef CONFIG_KVM_PRIVATE_MEM
+       struct {
+               struct file __rcu *file;
+               pgoff_t pgoff;
+       } gmem;
+#endif
 };
 
+static inline bool kvm_slot_can_be_private(const struct kvm_memory_slot *slot)
+{
+       return slot && (slot->flags & KVM_MEM_GUEST_MEMFD);
+}
+
 static inline bool kvm_slot_dirty_track_enabled(const struct kvm_memory_slot *slot)
 {
        return slot->flags & KVM_MEM_LOG_DIRTY_PAGES;
@@ -677,13 +690,29 @@ bool kvm_arch_irqchip_in_kernel(struct kvm *kvm);
 #define KVM_MEM_SLOTS_NUM SHRT_MAX
 #define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_INTERNAL_MEM_SLOTS)
 
-#ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
+#if KVM_MAX_NR_ADDRESS_SPACES == 1
+static inline int kvm_arch_nr_memslot_as_ids(struct kvm *kvm)
+{
+       return KVM_MAX_NR_ADDRESS_SPACES;
+}
+
 static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
 {
        return 0;
 }
 #endif
 
+/*
+ * Arch code must define kvm_arch_has_private_mem if support for private memory
+ * is enabled.
+ */
+#if !defined(kvm_arch_has_private_mem) && !IS_ENABLED(CONFIG_KVM_PRIVATE_MEM)
+static inline bool kvm_arch_has_private_mem(struct kvm *kvm)
+{
+       return false;
+}
+#endif
+
 struct kvm_memslots {
        u64 generation;
        atomic_long_t last_used_slot;
@@ -721,9 +750,9 @@ struct kvm {
        struct mm_struct *mm; /* userspace tied to this vm */
        unsigned long nr_memslot_pages;
        /* The two memslot sets - active and inactive (per address space) */
-       struct kvm_memslots __memslots[KVM_ADDRESS_SPACE_NUM][2];
+       struct kvm_memslots __memslots[KVM_MAX_NR_ADDRESS_SPACES][2];
        /* The current active memslot set for each address space */
-       struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM];
+       struct kvm_memslots __rcu *memslots[KVM_MAX_NR_ADDRESS_SPACES];
        struct xarray vcpu_array;
        /*
         * Protected by slots_lock, but can be read outside if an
@@ -753,7 +782,7 @@ struct kvm {
        struct list_head vm_list;
        struct mutex lock;
        struct kvm_io_bus __rcu *buses[KVM_NR_BUSES];
-#ifdef CONFIG_HAVE_KVM_EVENTFD
+#ifdef CONFIG_HAVE_KVM_IRQCHIP
        struct {
                spinlock_t        lock;
                struct list_head  items;
@@ -761,8 +790,8 @@ struct kvm {
                struct list_head  resampler_list;
                struct mutex      resampler_lock;
        } irqfds;
-       struct list_head ioeventfds;
 #endif
+       struct list_head ioeventfds;
        struct kvm_vm_stat stat;
        struct kvm_arch arch;
        refcount_t users_count;
@@ -778,17 +807,16 @@ struct kvm {
         * Update side is protected by irq_lock.
         */
        struct kvm_irq_routing_table __rcu *irq_routing;
-#endif
-#ifdef CONFIG_HAVE_KVM_IRQFD
+
        struct hlist_head irq_ack_notifier_list;
 #endif
 
-#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
+#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
        struct mmu_notifier mmu_notifier;
        unsigned long mmu_invalidate_seq;
        long mmu_invalidate_in_progress;
-       unsigned long mmu_invalidate_range_start;
-       unsigned long mmu_invalidate_range_end;
+       gfn_t mmu_invalidate_range_start;
+       gfn_t mmu_invalidate_range_end;
 #endif
        struct list_head devices;
        u64 manual_dirty_log_protect;
@@ -806,6 +834,10 @@ struct kvm {
 
 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
        struct notifier_block pm_notifier;
+#endif
+#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+       /* Protected by slots_locks (for writes) and RCU (for reads) */
+       struct xarray mem_attr_array;
 #endif
        char stats_id[KVM_STATS_NAME_SIZE];
 };
@@ -965,7 +997,7 @@ static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm)
 }
 #endif
 
-#ifdef CONFIG_HAVE_KVM_IRQFD
+#ifdef CONFIG_HAVE_KVM_IRQCHIP
 int kvm_irqfd_init(void);
 void kvm_irqfd_exit(void);
 #else
@@ -989,7 +1021,7 @@ void kvm_put_kvm_no_destroy(struct kvm *kvm);
 
 static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
 {
-       as_id = array_index_nospec(as_id, KVM_ADDRESS_SPACE_NUM);
+       as_id = array_index_nospec(as_id, KVM_MAX_NR_ADDRESS_SPACES);
        return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu,
                        lockdep_is_held(&kvm->slots_lock) ||
                        !refcount_read(&kvm->users_count));
@@ -1146,9 +1178,9 @@ enum kvm_mr_change {
 };
 
 int kvm_set_memory_region(struct kvm *kvm,
-                         const struct kvm_userspace_memory_region *mem);
+                         const struct kvm_userspace_memory_region2 *mem);
 int __kvm_set_memory_region(struct kvm *kvm,
-                           const struct kvm_userspace_memory_region *mem);
+                           const struct kvm_userspace_memory_region2 *mem);
 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot);
 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
 int kvm_arch_prepare_memory_region(struct kvm *kvm,
@@ -1392,10 +1424,10 @@ void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc);
 void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
 #endif
 
-void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start,
-                             unsigned long end);
-void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start,
-                           unsigned long end);
+void kvm_mmu_invalidate_begin(struct kvm *kvm);
+void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end);
+void kvm_mmu_invalidate_end(struct kvm *kvm);
+bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
 
 long kvm_arch_dev_ioctl(struct file *filp,
                        unsigned int ioctl, unsigned long arg);
@@ -1947,7 +1979,7 @@ extern const struct _kvm_stats_desc kvm_vm_stats_desc[];
 extern const struct kvm_stats_header kvm_vcpu_stats_header;
 extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[];
 
-#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
+#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
 static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq)
 {
        if (unlikely(kvm->mmu_invalidate_in_progress))
@@ -1970,9 +2002,9 @@ static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq)
        return 0;
 }
 
-static inline int mmu_invalidate_retry_hva(struct kvm *kvm,
+static inline int mmu_invalidate_retry_gfn(struct kvm *kvm,
                                           unsigned long mmu_seq,
-                                          unsigned long hva)
+                                          gfn_t gfn)
 {
        lockdep_assert_held(&kvm->mmu_lock);
        /*
@@ -1981,10 +2013,20 @@ static inline int mmu_invalidate_retry_hva(struct kvm *kvm,
         * that might be being invalidated. Note that it may include some false
         * positives, due to shortcuts when handing concurrent invalidations.
         */
-       if (unlikely(kvm->mmu_invalidate_in_progress) &&
-           hva >= kvm->mmu_invalidate_range_start &&
-           hva < kvm->mmu_invalidate_range_end)
-               return 1;
+       if (unlikely(kvm->mmu_invalidate_in_progress)) {
+               /*
+                * Dropping mmu_lock after bumping mmu_invalidate_in_progress
+                * but before updating the range is a KVM bug.
+                */
+               if (WARN_ON_ONCE(kvm->mmu_invalidate_range_start == INVALID_GPA ||
+                                kvm->mmu_invalidate_range_end == INVALID_GPA))
+                       return 1;
+
+               if (gfn >= kvm->mmu_invalidate_range_start &&
+                   gfn < kvm->mmu_invalidate_range_end)
+                       return 1;
+       }
+
        if (kvm->mmu_invalidate_seq != mmu_seq)
                return 1;
        return 0;
@@ -2013,12 +2055,10 @@ static inline void kvm_free_irq_routing(struct kvm *kvm) {}
 
 int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
 
-#ifdef CONFIG_HAVE_KVM_EVENTFD
-
 void kvm_eventfd_init(struct kvm *kvm);
 int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
 
-#ifdef CONFIG_HAVE_KVM_IRQFD
+#ifdef CONFIG_HAVE_KVM_IRQCHIP
 int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
 void kvm_irqfd_release(struct kvm *kvm);
 bool kvm_notify_irqfd_resampler(struct kvm *kvm,
@@ -2039,31 +2079,7 @@ static inline bool kvm_notify_irqfd_resampler(struct kvm *kvm,
 {
        return false;
 }
-#endif
-
-#else
-
-static inline void kvm_eventfd_init(struct kvm *kvm) {}
-
-static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
-{
-       return -EINVAL;
-}
-
-static inline void kvm_irqfd_release(struct kvm *kvm) {}
-
-#ifdef CONFIG_HAVE_KVM_IRQCHIP
-static inline void kvm_irq_routing_update(struct kvm *kvm)
-{
-}
-#endif
-
-static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
-{
-       return -ENOSYS;
-}
-
-#endif /* CONFIG_HAVE_KVM_EVENTFD */
+#endif /* CONFIG_HAVE_KVM_IRQCHIP */
 
 void kvm_arch_irq_routing_update(struct kvm *kvm);
 
@@ -2318,4 +2334,57 @@ static inline void kvm_account_pgtable_pages(void *virt, int nr)
 /* Max number of entries allowed for each kvm dirty ring */
 #define  KVM_DIRTY_RING_MAX_ENTRIES  65536
 
+static inline void kvm_prepare_memory_fault_exit(struct kvm_vcpu *vcpu,
+                                                gpa_t gpa, gpa_t size,
+                                                bool is_write, bool is_exec,
+                                                bool is_private)
+{
+       vcpu->run->exit_reason = KVM_EXIT_MEMORY_FAULT;
+       vcpu->run->memory_fault.gpa = gpa;
+       vcpu->run->memory_fault.size = size;
+
+       /* RWX flags are not (yet) defined or communicated to userspace. */
+       vcpu->run->memory_fault.flags = 0;
+       if (is_private)
+               vcpu->run->memory_fault.flags |= KVM_MEMORY_EXIT_FLAG_PRIVATE;
+}
+
+#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+static inline unsigned long kvm_get_memory_attributes(struct kvm *kvm, gfn_t gfn)
+{
+       return xa_to_value(xa_load(&kvm->mem_attr_array, gfn));
+}
+
+bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
+                                    unsigned long attrs);
+bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
+                                       struct kvm_gfn_range *range);
+bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
+                                        struct kvm_gfn_range *range);
+
+static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn)
+{
+       return IS_ENABLED(CONFIG_KVM_PRIVATE_MEM) &&
+              kvm_get_memory_attributes(kvm, gfn) & KVM_MEMORY_ATTRIBUTE_PRIVATE;
+}
+#else
+static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn)
+{
+       return false;
+}
+#endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
+
+#ifdef CONFIG_KVM_PRIVATE_MEM
+int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
+                    gfn_t gfn, kvm_pfn_t *pfn, int *max_order);
+#else
+static inline int kvm_gmem_get_pfn(struct kvm *kvm,
+                                  struct kvm_memory_slot *slot, gfn_t gfn,
+                                  kvm_pfn_t *pfn, int *max_order)
+{
+       KVM_BUG_ON(1, kvm);
+       return -EIO;
+}
+#endif /* CONFIG_KVM_PRIVATE_MEM */
+
 #endif
index 6f4737d5046a41181401c1be7a663f0d7eb76a95..9d1f7835d8c13917ad171297752e072c04bec1b3 100644 (file)
@@ -6,6 +6,7 @@
 struct kvm;
 struct kvm_async_pf;
 struct kvm_device_ops;
+struct kvm_gfn_range;
 struct kvm_interrupt;
 struct kvm_irq_routing_table;
 struct kvm_memory_slot;
index aa16dc2a8230fa50ebcb6c8005f8806922e0d958..4754b02d3a2c585e3c8ce65e43ca6136b823de83 100644 (file)
@@ -527,23 +527,6 @@ static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
        return led_cdev->trigger_data;
 }
 
-/**
- * led_trigger_rename_static - rename a trigger
- * @name: the new trigger name
- * @trig: the LED trigger to rename
- *
- * Change a LED trigger name by copying the string passed in
- * name into current trigger name, which MUST be large
- * enough for the new string.
- *
- * Note that name must NOT point to the same string used
- * during LED registration, as that could lead to races.
- *
- * This is meant to be used on triggers with statically
- * allocated name.
- */
-void led_trigger_rename_static(const char *name, struct led_trigger *trig);
-
 #define module_led_trigger(__led_trigger) \
        module_driver(__led_trigger, led_trigger_register, \
                      led_trigger_unregister)
@@ -588,6 +571,9 @@ enum led_trigger_netdev_modes {
        TRIGGER_NETDEV_LINK_10,
        TRIGGER_NETDEV_LINK_100,
        TRIGGER_NETDEV_LINK_1000,
+       TRIGGER_NETDEV_LINK_2500,
+       TRIGGER_NETDEV_LINK_5000,
+       TRIGGER_NETDEV_LINK_10000,
        TRIGGER_NETDEV_HALF_DUPLEX,
        TRIGGER_NETDEV_FULL_DUPLEX,
        TRIGGER_NETDEV_TX,
index 1dbb14daccfaf326af0c54c89ff61afb50e07982..26d68115afb826b65a9fd11ce329635161e39cca 100644 (file)
@@ -471,7 +471,7 @@ enum ata_completion_errors {
 
 /*
  * Link power management policy: If you alter this, you also need to
- * alter libata-scsi.c (for the ascii descriptions)
+ * alter libata-sata.c (for the ascii descriptions)
  */
 enum ata_lpm_policy {
        ATA_LPM_UNKNOWN,
index 185924c5637876a153957a3a206fe907dcc784a5..76458b6d53da7667b31fc3a80007bb1a609ec1d8 100644 (file)
@@ -315,9 +315,9 @@ LSM_HOOK(int, 0, socket_getsockopt, struct socket *sock, int level, int optname)
 LSM_HOOK(int, 0, socket_setsockopt, struct socket *sock, int level, int optname)
 LSM_HOOK(int, 0, socket_shutdown, struct socket *sock, int how)
 LSM_HOOK(int, 0, socket_sock_rcv_skb, struct sock *sk, struct sk_buff *skb)
-LSM_HOOK(int, 0, socket_getpeersec_stream, struct socket *sock,
+LSM_HOOK(int, -ENOPROTOOPT, socket_getpeersec_stream, struct socket *sock,
         sockptr_t optval, sockptr_t optlen, unsigned int len)
-LSM_HOOK(int, 0, socket_getpeersec_dgram, struct socket *sock,
+LSM_HOOK(int, -ENOPROTOOPT, socket_getpeersec_dgram, struct socket *sock,
         struct sk_buff *skb, u32 *secid)
 LSM_HOOK(int, 0, sk_alloc_security, struct sock *sk, int family, gfp_t priority)
 LSM_HOOK(void, LSM_RET_VOID, sk_free_security, struct sock *sk)
index 9b140272ee165b314744271ca90cf28008f8ddf9..9aae44efcfd4c988c34bd6c05e7b7d79d9aa649c 100644 (file)
@@ -5,7 +5,6 @@
 #include <mach/maple.h>
 
 struct device;
-extern struct bus_type maple_bus_type;
 
 /* Maple Bus command and response codes */
 enum maple_code {
index b0da04fe087bb8cb32de8f62a4a52abbaa4d5b80..34dfcc77f505aa39ad6b48f394578d542394dde5 100644 (file)
@@ -126,10 +126,11 @@ struct cmos_rtc_board_info {
 #endif /* ARCH_RTC_LOCATION */
 
 bool mc146818_does_rtc_work(void);
-int mc146818_get_time(struct rtc_time *time);
+int mc146818_get_time(struct rtc_time *time, int timeout);
 int mc146818_set_time(struct rtc_time *time);
 
 bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
+                       int timeout,
                        void *param);
 
 #endif /* _MC146818RTC_H */
index 1e39d27bee418c1a7c0a6a966bbc249056f008fd..69e78190008271a94ebdabb9d944612c58acb7e1 100644 (file)
@@ -33,7 +33,7 @@ struct memory_dev_type {
        struct kref kref;
 };
 
-struct node_hmem_attrs;
+struct access_coordinate;
 
 #ifdef CONFIG_NUMA
 extern bool numa_demotion_enabled;
@@ -45,9 +45,9 @@ void clear_node_memory_type(int node, struct memory_dev_type *memtype);
 int register_mt_adistance_algorithm(struct notifier_block *nb);
 int unregister_mt_adistance_algorithm(struct notifier_block *nb);
 int mt_calc_adistance(int node, int *adist);
-int mt_set_default_dram_perf(int nid, struct node_hmem_attrs *perf,
+int mt_set_default_dram_perf(int nid, struct access_coordinate *perf,
                             const char *source);
-int mt_perf_to_adistance(struct node_hmem_attrs *perf, int *adist);
+int mt_perf_to_adistance(struct access_coordinate *perf, int *adist);
 #ifdef CONFIG_MIGRATION
 int next_demotion_node(int node);
 void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets);
@@ -126,13 +126,13 @@ static inline int mt_calc_adistance(int node, int *adist)
        return NOTIFY_DONE;
 }
 
-static inline int mt_set_default_dram_perf(int nid, struct node_hmem_attrs *perf,
+static inline int mt_set_default_dram_perf(int nid, struct access_coordinate *perf,
                                           const char *source)
 {
        return -EIO;
 }
 
-static inline int mt_perf_to_adistance(struct node_hmem_attrs *perf, int *adist)
+static inline int mt_perf_to_adistance(struct access_coordinate *perf, int *adist)
 {
        return -EIO;
 }
index 311f7d3d23236e3d60cfb27943cb107565838caa..54444ff2a5deacb94944c05f37bc7d1b1855de67 100644 (file)
@@ -405,7 +405,7 @@ enum max77693_haptic_reg {
        MAX77693_HAPTIC_REG_END,
 };
 
-/* max77693-pmic LSCNFG configuraton register */
+/* max77693-pmic LSCNFG configuration register */
 #define MAX77693_PMIC_LOW_SYS_MASK      0x80
 #define MAX77693_PMIC_LOW_SYS_SHIFT     7
 
index 0bc7454c4dbe31c18ab892189f09b7b4d0936c41..2fb4db67f1104a06711d19232d4b37b1196f3748 100644 (file)
@@ -198,7 +198,7 @@ enum max77843_irq_muic {
 #define MAX77843_MCONFIG_MEN_MASK              BIT(MCONFIG_MEN_SHIFT)
 #define MAX77843_MCONFIG_PDIV_MASK             (0x3 << MCONFIG_PDIV_SHIFT)
 
-/* Max77843 charger insterrupts */
+/* Max77843 charger interrupts */
 #define MAX77843_CHG_BYP_I                     BIT(0)
 #define MAX77843_CHG_BATP_I                    BIT(2)
 #define MAX77843_CHG_BAT_I                     BIT(3)
index 18363b773d070960c7db2f2141537c950f8bc5e0..cb99e16ca9473698f86aa5769a210b6b001355c3 100644 (file)
@@ -10,7 +10,7 @@
 #ifndef __SI476X_PLATFORM_H__
 #define __SI476X_PLATFORM_H__
 
-/* It is possible to select one of the four adresses using pins A0
+/* It is possible to select one of the four addresses using pins A0
  * and A1 on SI476x */
 #define SI476X_I2C_ADDR_1      0x60
 #define SI476X_I2C_ADDR_2      0x61
index 701925db75b3f6dbd3af9a20566325f57518fa4c..f67ef0a4e041c1b0e391c8135219758f087b74ce 100644 (file)
 #define VDDCTRL_ST_SHIFT                                 0
 
 
-/*Register VDDCTRL_OP  (0x28) bit definitios */
+/*Register VDDCTRL_OP  (0x28) bit definitions */
 #define VDDCTRL_OP_CMD_MASK                              0x80
 #define VDDCTRL_OP_CMD_SHIFT                             7
 #define VDDCTRL_OP_SEL_MASK                              0x7F
index 039943ec4d4e7240425c961adfe3d4b11decfc1e..d0f9b522f328bf5a4a846fd46be84a4705ab5657 100644 (file)
@@ -266,6 +266,7 @@ struct mhi_event_config {
  * struct mhi_controller_config - Root MHI controller configuration
  * @max_channels: Maximum number of channels supported
  * @timeout_ms: Timeout value for operations. 0 means use default
+ * @ready_timeout_ms: Timeout value for waiting device to be ready (optional)
  * @buf_len: Size of automatically allocated buffers. 0 means use default
  * @num_channels: Number of channels defined in @ch_cfg
  * @ch_cfg: Array of defined channels
@@ -277,6 +278,7 @@ struct mhi_event_config {
 struct mhi_controller_config {
        u32 max_channels;
        u32 timeout_ms;
+       u32 ready_timeout_ms;
        u32 buf_len;
        u32 num_channels;
        const struct mhi_channel_config *ch_cfg;
@@ -330,6 +332,7 @@ struct mhi_controller_config {
  * @pm_mutex: Mutex for suspend/resume operation
  * @pm_lock: Lock for protecting MHI power management state
  * @timeout_ms: Timeout in ms for state transitions
+ * @ready_timeout_ms: Timeout in ms for waiting device to be ready (optional)
  * @pm_state: MHI power management state
  * @db_access: DB access states
  * @ee: MHI device execution environment
@@ -419,6 +422,7 @@ struct mhi_controller {
        struct mutex pm_mutex;
        rwlock_t pm_lock;
        u32 timeout_ms;
+       u32 ready_timeout_ms;
        u32 pm_state;
        u32 db_access;
        enum mhi_ee_type ee;
index f198a8ac7ee72c71bae107d4ea9b070a85a86008..11bf3212f7822dea0979c249b09f9ae249c3bbab 100644 (file)
@@ -49,6 +49,27 @@ struct mhi_ep_db_info {
        u32 status;
 };
 
+/**
+ * struct mhi_ep_buf_info - MHI Endpoint transfer buffer info
+ * @mhi_dev: MHI device associated with this buffer
+ * @dev_addr: Address of the buffer in endpoint
+ * @host_addr: Address of the bufffer in host
+ * @size: Size of the buffer
+ * @code: Transfer completion code
+ * @cb: Callback to be executed by controller drivers after transfer completion (async)
+ * @cb_buf: Opaque buffer to be passed to the callback
+ */
+struct mhi_ep_buf_info {
+       struct mhi_ep_device *mhi_dev;
+       void *dev_addr;
+       u64 host_addr;
+       size_t size;
+       int code;
+
+       void (*cb)(struct mhi_ep_buf_info *buf_info);
+       void *cb_buf;
+};
+
 /**
  * struct mhi_ep_cntrl - MHI Endpoint controller structure
  * @cntrl_dev: Pointer to the struct device of physical bus acting as the MHI
@@ -82,8 +103,10 @@ struct mhi_ep_db_info {
  * @raise_irq: CB function for raising IRQ to the host
  * @alloc_map: CB function for allocating memory in endpoint for storing host context and mapping it
  * @unmap_free: CB function to unmap and free the allocated memory in endpoint for storing host context
- * @read_from_host: CB function for reading from host memory from endpoint
- * @write_to_host: CB function for writing to host memory from endpoint
+ * @read_sync: CB function for reading from host memory synchronously
+ * @write_sync: CB function for writing to host memory synchronously
+ * @read_async: CB function for reading from host memory asynchronously
+ * @write_async: CB function for writing to host memory asynchronously
  * @mhi_state: MHI Endpoint state
  * @max_chan: Maximum channels supported by the endpoint controller
  * @mru: MRU (Maximum Receive Unit) value of the endpoint controller
@@ -128,14 +151,19 @@ struct mhi_ep_cntrl {
        struct work_struct reset_work;
        struct work_struct cmd_ring_work;
        struct work_struct ch_ring_work;
+       struct kmem_cache *ring_item_cache;
+       struct kmem_cache *ev_ring_el_cache;
+       struct kmem_cache *tre_buf_cache;
 
        void (*raise_irq)(struct mhi_ep_cntrl *mhi_cntrl, u32 vector);
        int (*alloc_map)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t *phys_ptr,
                         void __iomem **virt, size_t size);
        void (*unmap_free)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t phys,
                           void __iomem *virt, size_t size);
-       int (*read_from_host)(struct mhi_ep_cntrl *mhi_cntrl, u64 from, void *to, size_t size);
-       int (*write_to_host)(struct mhi_ep_cntrl *mhi_cntrl, void *from, u64 to, size_t size);
+       int (*read_sync)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
+       int (*write_sync)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
+       int (*read_async)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
+       int (*write_async)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
 
        enum mhi_state mhi_state;
 
index 8c55ff351e5f2eed3416b0b59dd7e193f06bec02..41f03b352401e7556ddf92f0b9a53da4918a291a 100644 (file)
@@ -681,6 +681,7 @@ struct mlx5e_resources {
                struct mlx5_sq_bfreg       bfreg;
 #define MLX5_MAX_NUM_TC 8
                u32                        tisn[MLX5_MAX_PORTS][MLX5_MAX_NUM_TC];
+               bool                       tisn_valid;
        } hw_objs;
        struct net_device *uplink_netdev;
        struct mutex uplink_netdev_lock;
index 6f7725238abc2fcfeaf471e988b0035df25b9b87..3fb428ce7d1c7c0dd57969e8b82e227a4efb5d41 100644 (file)
@@ -132,6 +132,7 @@ struct mlx5_flow_handle;
 
 enum {
        FLOW_CONTEXT_HAS_TAG = BIT(0),
+       FLOW_CONTEXT_UPLINK_HAIRPIN_EN = BIT(1),
 };
 
 struct mlx5_flow_context {
index 7235f3eaed8bee8ad09a1dac4225960f354862d9..c726f90ab752452cbe9726462ecedd72b21656f6 100644 (file)
@@ -1241,7 +1241,8 @@ struct mlx5_ifc_virtio_emulation_cap_bits {
 
        u8         reserved_at_c0[0x13];
        u8         desc_group_mkey_supported[0x1];
-       u8         reserved_at_d4[0xc];
+       u8         freeze_to_rdy_supported[0x1];
+       u8         reserved_at_d5[0xb];
 
        u8         reserved_at_e0[0x20];
 
@@ -3575,7 +3576,7 @@ struct mlx5_ifc_flow_context_bits {
        u8         action[0x10];
 
        u8         extended_destination[0x1];
-       u8         reserved_at_81[0x1];
+       u8         uplink_hairpin_en[0x1];
        u8         flow_source[0x2];
        u8         encrypt_decrypt_type[0x4];
        u8         destination_list_size[0x18];
@@ -4035,8 +4036,13 @@ struct mlx5_ifc_nic_vport_context_bits {
        u8         affiliation_criteria[0x4];
        u8         affiliated_vhca_id[0x10];
 
-       u8         reserved_at_60[0xd0];
+       u8         reserved_at_60[0xa0];
+
+       u8         reserved_at_100[0x1];
+       u8         sd_group[0x3];
+       u8         reserved_at_104[0x1c];
 
+       u8         reserved_at_120[0x10];
        u8         mtu[0x10];
 
        u8         system_image_guid[0x40];
@@ -10121,8 +10127,7 @@ struct mlx5_ifc_mpir_reg_bits {
        u8         reserved_at_20[0x20];
 
        u8         local_port[0x8];
-       u8         reserved_at_28[0x15];
-       u8         sd_group[0x3];
+       u8         reserved_at_28[0x18];
 
        u8         reserved_at_60[0x20];
 };
index b86d51a855f6709762e2da6862e387eaf56a99e1..40371c916cf94d13030c6b6bf83c6118d46f0caa 100644 (file)
@@ -145,6 +145,10 @@ enum {
        MLX5_VIRTQ_MODIFY_MASK_STATE                    = (u64)1 << 0,
        MLX5_VIRTQ_MODIFY_MASK_DIRTY_BITMAP_PARAMS      = (u64)1 << 3,
        MLX5_VIRTQ_MODIFY_MASK_DIRTY_BITMAP_DUMP_ENABLE = (u64)1 << 4,
+       MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_ADDRS           = (u64)1 << 6,
+       MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_AVAIL_IDX       = (u64)1 << 7,
+       MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_USED_IDX        = (u64)1 << 8,
+       MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY            = (u64)1 << 11,
        MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY          = (u64)1 << 14,
 };
 
index fbb9bf4478894c72e0e4a3f4d6404008611cbca0..c36cc6d829267e8b795c5c1ea7f71c1e28dcdaed 100644 (file)
@@ -72,6 +72,7 @@ int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);
 int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
                                           u64 *system_image_guid);
+int mlx5_query_nic_vport_sd_group(struct mlx5_core_dev *mdev, u8 *sd_group);
 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
 int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
                                    u16 vport, u64 node_guid);
index b2d3a88a34d127733f1e54043d5c08814a8af28d..8b611e13153e68d944126f7cf57bca8a2bc69290 100644 (file)
@@ -730,6 +730,7 @@ struct mm_cid {
 #endif
 
 struct kioctx_table;
+struct iommu_mm_data;
 struct mm_struct {
        struct {
                /*
@@ -941,8 +942,8 @@ struct mm_struct {
 #endif
                struct work_struct async_put_work;
 
-#ifdef CONFIG_IOMMU_SVA
-               u32 pasid;
+#ifdef CONFIG_IOMMU_MM_DATA
+               struct iommu_mm_data *iommu_mm;
 #endif
 #ifdef CONFIG_KSM
                /*
index 40d94411d49204e7276a6ad9554eb17335fd4577..dc7048824be81d628ca12f0874c1a7508da0d5c1 100644 (file)
@@ -156,6 +156,7 @@ calc_vm_flag_bits(unsigned long flags)
        return _calc_vm_trans(flags, MAP_GROWSDOWN,  VM_GROWSDOWN ) |
               _calc_vm_trans(flags, MAP_LOCKED,     VM_LOCKED    ) |
               _calc_vm_trans(flags, MAP_SYNC,       VM_SYNC      ) |
+              _calc_vm_trans(flags, MAP_STACK,      VM_NOHUGEPAGE) |
               arch_calc_vm_flag_bits(flags);
 }
 
index 4ed33b12782151632e36aa114039cb4a0916fe06..a497f189d98818bcda37458746ebb2bded7826e4 100644 (file)
@@ -2013,9 +2013,9 @@ static inline int pfn_valid(unsigned long pfn)
        if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
                return 0;
        ms = __pfn_to_section(pfn);
-       rcu_read_lock();
+       rcu_read_lock_sched();
        if (!valid_section(ms)) {
-               rcu_read_unlock();
+               rcu_read_unlock_sched();
                return 0;
        }
        /*
@@ -2023,7 +2023,7 @@ static inline int pfn_valid(unsigned long pfn)
         * the entire section-sized span.
         */
        ret = early_section(ms) || pfn_section_valid(ms, pfn);
-       rcu_read_unlock();
+       rcu_read_unlock_sched();
 
        return ret;
 }
index 79184948fab4705d47e2fa47a8c7595f77fc43f3..ac577699edfdb9a4651b3bd9dd48edce2bd064f4 100644 (file)
@@ -35,8 +35,6 @@ enum turris_mox_module_id {
 
 #define MOXTET_NIRQS   16
 
-extern struct bus_type moxtet_type;
-
 struct moxtet {
        struct device                   *dev;
        struct mutex                    lock;
index 3100371b5e3218a92039b363e3508b5c69166238..74e0cc14ebf86bb9444b54d628908f976125ff3f 100644 (file)
@@ -66,6 +66,7 @@ extern struct dentry *kern_path_create(int, const char *, struct path *, unsigne
 extern struct dentry *user_path_create(int, const char __user *, struct path *, unsigned int);
 extern void done_path_create(struct path *, struct dentry *);
 extern struct dentry *kern_path_locked(const char *, struct path *);
+extern struct dentry *user_path_locked_at(int , const char __user *, struct path *);
 int vfs_path_parent_lookup(struct filename *filename, unsigned int flags,
                           struct path *parent, struct qstr *last, int *type,
                           const struct path *root);
index e8c350a3ade153d852bec011dbd3c72a352d319d..e9f4f845d760afafbfb6e45b220dfb6919a29779 100644 (file)
@@ -186,6 +186,8 @@ struct ip_set_type_variant {
        /* Return true if "b" set is the same as "a"
         * according to the create set parameters */
        bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
+       /* Cancel ongoing garbage collectors before destroying the set*/
+       void (*cancel_gc)(struct ip_set *set);
        /* Region-locking is used */
        bool region_lock;
 };
@@ -242,6 +244,8 @@ extern void ip_set_type_unregister(struct ip_set_type *set_type);
 
 /* A generic IP set */
 struct ip_set {
+       /* For call_cru in destroy */
+       struct rcu_head rcu;
        /* The name of the set */
        char name[IPSET_MAXNAMELEN];
        /* Lock protecting the set data */
index f980edfdd2783ed954c1043324928bccd6fbfa65..743475ca7e9d5132f48ee78c46b485e1018a08b3 100644 (file)
@@ -42,7 +42,7 @@ static inline int nf_bridge_get_physinif(const struct sk_buff *skb)
        if (!nf_bridge)
                return 0;
 
-       return nf_bridge->physindev ? nf_bridge->physindev->ifindex : 0;
+       return nf_bridge->physinif;
 }
 
 static inline int nf_bridge_get_physoutif(const struct sk_buff *skb)
@@ -56,11 +56,11 @@ static inline int nf_bridge_get_physoutif(const struct sk_buff *skb)
 }
 
 static inline struct net_device *
-nf_bridge_get_physindev(const struct sk_buff *skb)
+nf_bridge_get_physindev(const struct sk_buff *skb, struct net *net)
 {
        const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
 
-       return nf_bridge ? nf_bridge->physindev : NULL;
+       return nf_bridge ? dev_get_by_index_rcu(net, nf_bridge->physinif) : NULL;
 }
 
 static inline struct net_device *
index b11a84f6c32b79ea1efcfa692f3e0326c807f5e2..100cbb261269d1921bff6e616e86223ee9e5512c 100644 (file)
@@ -109,11 +109,18 @@ static inline int wait_on_page_fscache_killable(struct page *page)
        return folio_wait_private_2_killable(page_folio(page));
 }
 
+/* Marks used on xarray-based buffers */
+#define NETFS_BUF_PUT_MARK     XA_MARK_0       /* - Page needs putting  */
+#define NETFS_BUF_PAGECACHE_MARK XA_MARK_1     /* - Page needs wb/dirty flag wrangling */
+
 enum netfs_io_source {
        NETFS_FILL_WITH_ZEROES,
        NETFS_DOWNLOAD_FROM_SERVER,
        NETFS_READ_FROM_CACHE,
        NETFS_INVALID_READ,
+       NETFS_UPLOAD_TO_SERVER,
+       NETFS_WRITE_TO_CACHE,
+       NETFS_INVALID_WRITE,
 } __mode(byte);
 
 typedef void (*netfs_io_terminated_t)(void *priv, ssize_t transferred_or_error,
@@ -129,8 +136,56 @@ struct netfs_inode {
        struct fscache_cookie   *cache;
 #endif
        loff_t                  remote_i_size;  /* Size of the remote file */
+       loff_t                  zero_point;     /* Size after which we assume there's no data
+                                                * on the server */
+       unsigned long           flags;
+#define NETFS_ICTX_ODIRECT     0               /* The file has DIO in progress */
+#define NETFS_ICTX_UNBUFFERED  1               /* I/O should not use the pagecache */
+#define NETFS_ICTX_WRITETHROUGH        2               /* Write-through caching */
+#define NETFS_ICTX_NO_WRITE_STREAMING  3       /* Don't engage in write-streaming */
+};
+
+/*
+ * A netfs group - for instance a ceph snap.  This is marked on dirty pages and
+ * pages marked with a group must be flushed before they can be written under
+ * the domain of another group.
+ */
+struct netfs_group {
+       refcount_t              ref;
+       void (*free)(struct netfs_group *netfs_group);
 };
 
+/*
+ * Information about a dirty page (attached only if necessary).
+ * folio->private
+ */
+struct netfs_folio {
+       struct netfs_group      *netfs_group;   /* Filesystem's grouping marker (or NULL). */
+       unsigned int            dirty_offset;   /* Write-streaming dirty data offset */
+       unsigned int            dirty_len;      /* Write-streaming dirty data length */
+};
+#define NETFS_FOLIO_INFO       0x1UL   /* OR'd with folio->private. */
+
+static inline struct netfs_folio *netfs_folio_info(struct folio *folio)
+{
+       void *priv = folio_get_private(folio);
+
+       if ((unsigned long)priv & NETFS_FOLIO_INFO)
+               return (struct netfs_folio *)((unsigned long)priv & ~NETFS_FOLIO_INFO);
+       return NULL;
+}
+
+static inline struct netfs_group *netfs_folio_group(struct folio *folio)
+{
+       struct netfs_folio *finfo;
+       void *priv = folio_get_private(folio);
+
+       finfo = netfs_folio_info(folio);
+       if (finfo)
+               return finfo->netfs_group;
+       return priv;
+}
+
 /*
  * Resources required to do operations on a cache.
  */
@@ -143,17 +198,24 @@ struct netfs_cache_resources {
 };
 
 /*
- * Descriptor for a single component subrequest.
+ * Descriptor for a single component subrequest.  Each operation represents an
+ * individual read/write from/to a server, a cache, a journal, etc..
+ *
+ * The buffer iterator is persistent for the life of the subrequest struct and
+ * the pages it points to can be relied on to exist for the duration.
  */
 struct netfs_io_subrequest {
        struct netfs_io_request *rreq;          /* Supervising I/O request */
+       struct work_struct      work;
        struct list_head        rreq_link;      /* Link in rreq->subrequests */
+       struct iov_iter         io_iter;        /* Iterator for this subrequest */
        loff_t                  start;          /* Where to start the I/O */
        size_t                  len;            /* Size of the I/O */
        size_t                  transferred;    /* Amount of data transferred */
        refcount_t              ref;
        short                   error;          /* 0 or error that occurred */
        unsigned short          debug_index;    /* Index in list (for debugging output) */
+       unsigned int            max_nr_segs;    /* 0 or max number of segments in an iterator */
        enum netfs_io_source    source;         /* Where to read from/write to */
        unsigned long           flags;
 #define NETFS_SREQ_COPY_TO_CACHE       0       /* Set if should copy the data to the cache */
@@ -168,6 +230,13 @@ enum netfs_io_origin {
        NETFS_READAHEAD,                /* This read was triggered by readahead */
        NETFS_READPAGE,                 /* This read is a synchronous read */
        NETFS_READ_FOR_WRITE,           /* This read is to prepare a write */
+       NETFS_WRITEBACK,                /* This write was triggered by writepages */
+       NETFS_WRITETHROUGH,             /* This write was made by netfs_perform_write() */
+       NETFS_LAUNDER_WRITE,            /* This is triggered by ->launder_folio() */
+       NETFS_UNBUFFERED_WRITE,         /* This is an unbuffered write */
+       NETFS_DIO_READ,                 /* This is a direct I/O read */
+       NETFS_DIO_WRITE,                /* This is a direct I/O write */
+       nr__netfs_io_origin
 } __mode(byte);
 
 /*
@@ -175,19 +244,34 @@ enum netfs_io_origin {
  * operations to a variety of data stores and then stitch the result together.
  */
 struct netfs_io_request {
-       struct work_struct      work;
+       union {
+               struct work_struct work;
+               struct rcu_head rcu;
+       };
        struct inode            *inode;         /* The file being accessed */
        struct address_space    *mapping;       /* The mapping being accessed */
+       struct kiocb            *iocb;          /* AIO completion vector */
        struct netfs_cache_resources cache_resources;
+       struct list_head        proc_link;      /* Link in netfs_iorequests */
        struct list_head        subrequests;    /* Contributory I/O operations */
+       struct iov_iter         iter;           /* Unencrypted-side iterator */
+       struct iov_iter         io_iter;        /* I/O (Encrypted-side) iterator */
        void                    *netfs_priv;    /* Private data for the netfs */
+       struct bio_vec          *direct_bv;     /* DIO buffer list (when handling iovec-iter) */
+       unsigned int            direct_bv_count; /* Number of elements in direct_bv[] */
        unsigned int            debug_id;
+       unsigned int            rsize;          /* Maximum read size (0 for none) */
+       unsigned int            wsize;          /* Maximum write size (0 for none) */
+       unsigned int            subreq_counter; /* Next subreq->debug_index */
        atomic_t                nr_outstanding; /* Number of ops in progress */
        atomic_t                nr_copy_ops;    /* Number of copy-to-cache ops in progress */
        size_t                  submitted;      /* Amount submitted for I/O so far */
        size_t                  len;            /* Length of the request */
+       size_t                  upper_len;      /* Length can be extended to here */
+       size_t                  transferred;    /* Amount to be indicated as transferred */
        short                   error;          /* 0 or error that occurred */
        enum netfs_io_origin    origin;         /* Origin of the request */
+       bool                    direct_bv_unpin; /* T if direct_bv[] must be unpinned */
        loff_t                  i_size;         /* Size of the file */
        loff_t                  start;          /* Start position */
        pgoff_t                 no_unlock_folio; /* Don't unlock this folio after read */
@@ -199,17 +283,25 @@ struct netfs_io_request {
 #define NETFS_RREQ_DONT_UNLOCK_FOLIOS  3       /* Don't unlock the folios on completion */
 #define NETFS_RREQ_FAILED              4       /* The request failed */
 #define NETFS_RREQ_IN_PROGRESS         5       /* Unlocked when the request completes */
+#define NETFS_RREQ_WRITE_TO_CACHE      7       /* Need to write to the cache */
+#define NETFS_RREQ_UPLOAD_TO_SERVER    8       /* Need to write to the server */
+#define NETFS_RREQ_NONBLOCK            9       /* Don't block if possible (O_NONBLOCK) */
+#define NETFS_RREQ_BLOCKED             10      /* We blocked */
        const struct netfs_request_ops *netfs_ops;
+       void (*cleanup)(struct netfs_io_request *req);
 };
 
 /*
  * Operations the network filesystem can/must provide to the helpers.
  */
 struct netfs_request_ops {
+       unsigned int    io_request_size;        /* Alloc size for netfs_io_request struct */
+       unsigned int    io_subrequest_size;     /* Alloc size for netfs_io_subrequest struct */
        int (*init_request)(struct netfs_io_request *rreq, struct file *file);
        void (*free_request)(struct netfs_io_request *rreq);
-       int (*begin_cache_operation)(struct netfs_io_request *rreq);
+       void (*free_subrequest)(struct netfs_io_subrequest *rreq);
 
+       /* Read request handling */
        void (*expand_readahead)(struct netfs_io_request *rreq);
        bool (*clamp_length)(struct netfs_io_subrequest *subreq);
        void (*issue_read)(struct netfs_io_subrequest *subreq);
@@ -217,6 +309,14 @@ struct netfs_request_ops {
        int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
                                 struct folio **foliop, void **_fsdata);
        void (*done)(struct netfs_io_request *rreq);
+
+       /* Modification handling */
+       void (*update_i_size)(struct inode *inode, loff_t i_size);
+
+       /* Write request handling */
+       void (*create_write_requests)(struct netfs_io_request *wreq,
+                                     loff_t start, size_t len);
+       void (*invalidate_cache)(struct netfs_io_request *wreq);
 };
 
 /*
@@ -229,8 +329,7 @@ enum netfs_read_from_hole {
 };
 
 /*
- * Table of operations for access to a cache.  This is obtained by
- * rreq->ops->begin_cache_operation().
+ * Table of operations for access to a cache.
  */
 struct netfs_cache_ops {
        /* End an operation */
@@ -265,8 +364,8 @@ struct netfs_cache_ops {
         * actually do.
         */
        int (*prepare_write)(struct netfs_cache_resources *cres,
-                            loff_t *_start, size_t *_len, loff_t i_size,
-                            bool no_space_allocated_yet);
+                            loff_t *_start, size_t *_len, size_t upper_len,
+                            loff_t i_size, bool no_space_allocated_yet);
 
        /* Prepare an on-demand read operation, shortening it to a cached/uncached
         * boundary as appropriate.
@@ -284,22 +383,62 @@ struct netfs_cache_ops {
                               loff_t *_data_start, size_t *_data_len);
 };
 
+/* High-level read API. */
+ssize_t netfs_unbuffered_read_iter(struct kiocb *iocb, struct iov_iter *iter);
+ssize_t netfs_buffered_read_iter(struct kiocb *iocb, struct iov_iter *iter);
+ssize_t netfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter);
+
+/* High-level write API */
+ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
+                           struct netfs_group *netfs_group);
+ssize_t netfs_buffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *from,
+                                        struct netfs_group *netfs_group);
+ssize_t netfs_unbuffered_write_iter(struct kiocb *iocb, struct iov_iter *from);
+ssize_t netfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from);
+
+/* Address operations API */
 struct readahead_control;
 void netfs_readahead(struct readahead_control *);
 int netfs_read_folio(struct file *, struct folio *);
 int netfs_write_begin(struct netfs_inode *, struct file *,
-               struct address_space *, loff_t pos, unsigned int len,
-               struct folio **, void **fsdata);
-
+                     struct address_space *, loff_t pos, unsigned int len,
+                     struct folio **, void **fsdata);
+int netfs_writepages(struct address_space *mapping,
+                    struct writeback_control *wbc);
+bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio);
+int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc);
+void netfs_clear_inode_writeback(struct inode *inode, const void *aux);
+void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length);
+bool netfs_release_folio(struct folio *folio, gfp_t gfp);
+int netfs_launder_folio(struct folio *folio);
+
+/* VMA operations API. */
+vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group);
+
+/* (Sub)request management API. */
 void netfs_subreq_terminated(struct netfs_io_subrequest *, ssize_t, bool);
 void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
                          enum netfs_sreq_ref_trace what);
 void netfs_put_subrequest(struct netfs_io_subrequest *subreq,
                          bool was_async, enum netfs_sreq_ref_trace what);
-void netfs_stats_show(struct seq_file *);
 ssize_t netfs_extract_user_iter(struct iov_iter *orig, size_t orig_len,
                                struct iov_iter *new,
                                iov_iter_extraction_t extraction_flags);
+size_t netfs_limit_iter(const struct iov_iter *iter, size_t start_offset,
+                       size_t max_size, size_t max_segs);
+struct netfs_io_subrequest *netfs_create_write_request(
+       struct netfs_io_request *wreq, enum netfs_io_source dest,
+       loff_t start, size_t len, work_func_t worker);
+void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error,
+                                      bool was_async);
+void netfs_queue_write_request(struct netfs_io_subrequest *subreq);
+
+int netfs_start_io_read(struct inode *inode);
+void netfs_end_io_read(struct inode *inode);
+int netfs_start_io_write(struct inode *inode);
+void netfs_end_io_write(struct inode *inode);
+int netfs_start_io_direct(struct inode *inode);
+void netfs_end_io_direct(struct inode *inode);
 
 /**
  * netfs_inode - Get the netfs inode context from the inode
@@ -317,30 +456,44 @@ static inline struct netfs_inode *netfs_inode(struct inode *inode)
  * netfs_inode_init - Initialise a netfslib inode context
  * @ctx: The netfs inode to initialise
  * @ops: The netfs's operations list
+ * @use_zero_point: True to use the zero_point read optimisation
  *
  * Initialise the netfs library context struct.  This is expected to follow on
  * directly from the VFS inode struct.
  */
 static inline void netfs_inode_init(struct netfs_inode *ctx,
-                                   const struct netfs_request_ops *ops)
+                                   const struct netfs_request_ops *ops,
+                                   bool use_zero_point)
 {
        ctx->ops = ops;
        ctx->remote_i_size = i_size_read(&ctx->inode);
+       ctx->zero_point = LLONG_MAX;
+       ctx->flags = 0;
 #if IS_ENABLED(CONFIG_FSCACHE)
        ctx->cache = NULL;
 #endif
+       /* ->releasepage() drives zero_point */
+       if (use_zero_point) {
+               ctx->zero_point = ctx->remote_i_size;
+               mapping_set_release_always(ctx->inode.i_mapping);
+       }
 }
 
 /**
  * netfs_resize_file - Note that a file got resized
  * @ctx: The netfs inode being resized
  * @new_i_size: The new file size
+ * @changed_on_server: The change was applied to the server
  *
  * Inform the netfs lib that a file got resized so that it can adjust its state.
  */
-static inline void netfs_resize_file(struct netfs_inode *ctx, loff_t new_i_size)
+static inline void netfs_resize_file(struct netfs_inode *ctx, loff_t new_i_size,
+                                    bool changed_on_server)
 {
-       ctx->remote_i_size = new_i_size;
+       if (changed_on_server)
+               ctx->remote_i_size = new_i_size;
+       if (new_i_size < ctx->zero_point)
+               ctx->zero_point = new_i_size;
 }
 
 /**
index 427a5975cf405045ded741ed4c1b4b9eaca1ffec..25b66d705ee2ec754021d5ea2e2f1bce15ef7dbe 100644 (file)
 #include <linux/list.h>
 
 /**
- * struct node_hmem_attrs - heterogeneous memory performance attributes
+ * struct access_coordinate - generic performance coordinates container
  *
  * @read_bandwidth:    Read bandwidth in MB/s
  * @write_bandwidth:   Write bandwidth in MB/s
  * @read_latency:      Read latency in nanoseconds
  * @write_latency:     Write latency in nanoseconds
  */
-struct node_hmem_attrs {
+struct access_coordinate {
        unsigned int read_bandwidth;
        unsigned int write_bandwidth;
        unsigned int read_latency;
@@ -65,7 +65,7 @@ struct node_cache_attrs {
 
 #ifdef CONFIG_HMEM_REPORTING
 void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs);
-void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs,
+void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord,
                         unsigned access);
 #else
 static inline void node_add_cache(unsigned int nid,
@@ -74,7 +74,7 @@ static inline void node_add_cache(unsigned int nid,
 }
 
 static inline void node_set_perf_attrs(unsigned int nid,
-                                      struct node_hmem_attrs *hmem_attrs,
+                                      struct access_coordinate *coord,
                                       unsigned access)
 {
 }
index 44325c068b6a01eb81274fa65767dd9298d35643..bc605ec4a3fd06f9145242827fe310a760f00122 100644 (file)
@@ -20,7 +20,6 @@
 #define NVMF_TRSVCID_SIZE      32
 #define NVMF_TRADDR_SIZE       256
 #define NVMF_TSAS_SIZE         256
-#define NVMF_AUTH_HASH_LEN     64
 
 #define NVME_DISC_SUBSYS_NAME  "nqn.2014-08.org.nvmexpress.discovery"
 
@@ -817,12 +816,6 @@ struct nvme_reservation_status_ext {
        struct nvme_registered_ctrl_ext regctl_eds[];
 };
 
-enum nvme_async_event_type {
-       NVME_AER_TYPE_ERROR     = 0,
-       NVME_AER_TYPE_SMART     = 1,
-       NVME_AER_TYPE_NOTICE    = 2,
-};
-
 /* I/O commands */
 
 enum nvme_opcode {
@@ -1819,7 +1812,7 @@ struct nvme_command {
        };
 };
 
-static inline bool nvme_is_fabrics(struct nvme_command *cmd)
+static inline bool nvme_is_fabrics(const struct nvme_command *cmd)
 {
        return cmd->common.opcode == nvme_fabrics_command;
 }
@@ -1838,7 +1831,7 @@ struct nvme_error_slot {
        __u8            resv2[24];
 };
 
-static inline bool nvme_is_write(struct nvme_command *cmd)
+static inline bool nvme_is_write(const struct nvme_command *cmd)
 {
        /*
         * What a mess...
index 6ec4b9743e25d4463fafe5d44d51ab89eacc316d..34c0e58dfa26636d2804fcc7e0bc4a875ee73dae 100644 (file)
@@ -81,6 +81,7 @@ int nvmem_device_cell_write(struct nvmem_device *nvmem,
                            struct nvmem_cell_info *info, void *buf);
 
 const char *nvmem_dev_name(struct nvmem_device *nvmem);
+size_t nvmem_dev_size(struct nvmem_device *nvmem);
 
 void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries,
                            size_t nentries);
@@ -247,7 +248,6 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
                                     const char *id);
 struct nvmem_device *of_nvmem_device_get(struct device_node *np,
                                         const char *name);
-struct device_node *of_nvmem_layout_get_container(struct nvmem_device *nvmem);
 #else
 static inline struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
                                                   const char *id)
@@ -260,12 +260,6 @@ static inline struct nvmem_device *of_nvmem_device_get(struct device_node *np,
 {
        return ERR_PTR(-EOPNOTSUPP);
 }
-
-static inline struct device_node *
-of_nvmem_layout_get_container(struct nvmem_device *nvmem)
-{
-       return NULL;
-}
 #endif /* CONFIG_NVMEM && CONFIG_OF */
 
 #endif  /* ifndef _LINUX_NVMEM_CONSUMER_H */
index e3930835235ba763493318985f917d52ef3861f5..f0ba0e03218f965f732a499c99aff966029c2a48 100644 (file)
@@ -9,6 +9,7 @@
 #ifndef _LINUX_NVMEM_PROVIDER_H
 #define _LINUX_NVMEM_PROVIDER_H
 
+#include <linux/device.h>
 #include <linux/device/driver.h>
 #include <linux/err.h>
 #include <linux/errno.h>
@@ -83,6 +84,8 @@ struct nvmem_cell_info {
  * @cells:     Optional array of pre-defined NVMEM cells.
  * @ncells:    Number of elements in cells.
  * @add_legacy_fixed_of_cells: Read fixed NVMEM cells from old OF syntax.
+ * @fixup_dt_cell_info: Will be called before a cell is added. Can be
+ *             used to modify the nvmem_cell_info.
  * @keepout:   Optional array of keepout ranges (sorted ascending by start).
  * @nkeepout:  Number of elements in the keepout array.
  * @type:      Type of the nvmem storage
@@ -113,6 +116,8 @@ struct nvmem_config {
        const struct nvmem_cell_info    *cells;
        int                     ncells;
        bool                    add_legacy_fixed_of_cells;
+       void (*fixup_dt_cell_info)(struct nvmem_device *nvmem,
+                                  struct nvmem_cell_info *cell);
        const struct nvmem_keepout *keepout;
        unsigned int            nkeepout;
        enum nvmem_type         type;
@@ -154,15 +159,11 @@ struct nvmem_cell_table {
 /**
  * struct nvmem_layout - NVMEM layout definitions
  *
- * @name:              Layout name.
- * @of_match_table:    Open firmware match table.
+ * @dev:               Device-model layout device.
+ * @nvmem:             The underlying NVMEM device
  * @add_cells:         Will be called if a nvmem device is found which
  *                     has this layout. The function will add layout
  *                     specific cells with nvmem_add_one_cell().
- * @fixup_cell_info:   Will be called before a cell is added. Can be
- *                     used to modify the nvmem_cell_info.
- * @owner:             Pointer to struct module.
- * @node:              List node.
  *
  * A nvmem device can hold a well defined structure which can just be
  * evaluated during runtime. For example a TLV list, or a list of "name=val"
@@ -170,17 +171,15 @@ struct nvmem_cell_table {
  * cells.
  */
 struct nvmem_layout {
-       const char *name;
-       const struct of_device_id *of_match_table;
-       int (*add_cells)(struct device *dev, struct nvmem_device *nvmem,
-                        struct nvmem_layout *layout);
-       void (*fixup_cell_info)(struct nvmem_device *nvmem,
-                               struct nvmem_layout *layout,
-                               struct nvmem_cell_info *cell);
-
-       /* private */
-       struct module *owner;
-       struct list_head node;
+       struct device dev;
+       struct nvmem_device *nvmem;
+       int (*add_cells)(struct nvmem_layout *layout);
+};
+
+struct nvmem_layout_driver {
+       struct device_driver driver;
+       int (*probe)(struct nvmem_layout *layout);
+       void (*remove)(struct nvmem_layout *layout);
 };
 
 #if IS_ENABLED(CONFIG_NVMEM)
@@ -197,13 +196,14 @@ void nvmem_del_cell_table(struct nvmem_cell_table *table);
 int nvmem_add_one_cell(struct nvmem_device *nvmem,
                       const struct nvmem_cell_info *info);
 
-int __nvmem_layout_register(struct nvmem_layout *layout, struct module *owner);
-#define nvmem_layout_register(layout) \
-       __nvmem_layout_register(layout, THIS_MODULE)
+int nvmem_layout_register(struct nvmem_layout *layout);
 void nvmem_layout_unregister(struct nvmem_layout *layout);
 
-const void *nvmem_layout_get_match_data(struct nvmem_device *nvmem,
-                                       struct nvmem_layout *layout);
+int nvmem_layout_driver_register(struct nvmem_layout_driver *drv);
+void nvmem_layout_driver_unregister(struct nvmem_layout_driver *drv);
+#define module_nvmem_layout_driver(__nvmem_layout_driver)              \
+       module_driver(__nvmem_layout_driver, nvmem_layout_driver_register, \
+                     nvmem_layout_driver_unregister)
 
 #else
 
@@ -235,17 +235,27 @@ static inline int nvmem_layout_register(struct nvmem_layout *layout)
 
 static inline void nvmem_layout_unregister(struct nvmem_layout *layout) {}
 
-static inline const void *
-nvmem_layout_get_match_data(struct nvmem_device *nvmem,
-                           struct nvmem_layout *layout)
+#endif /* CONFIG_NVMEM */
+
+#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF)
+
+/**
+ * of_nvmem_layout_get_container() - Get OF node of layout container
+ *
+ * @nvmem: nvmem device
+ *
+ * Return: a node pointer with refcount incremented or NULL if no
+ * container exists. Use of_node_put() on it when done.
+ */
+struct device_node *of_nvmem_layout_get_container(struct nvmem_device *nvmem);
+
+#else  /* CONFIG_NVMEM && CONFIG_OF */
+
+static inline struct device_node *of_nvmem_layout_get_container(struct nvmem_device *nvmem)
 {
        return NULL;
 }
 
-#endif /* CONFIG_NVMEM */
-
-#define module_nvmem_layout_driver(__layout_driver)            \
-       module_driver(__layout_driver, nvmem_layout_register,   \
-                     nvmem_layout_unregister)
+#endif /* CONFIG_NVMEM && CONFIG_OF */
 
 #endif  /* ifndef _LINUX_NVMEM_PROVIDER_H */
index 2c7a3d4bc775b95f7092a181a75a2d6152e5a96d..9042bca5bb848c5fd4239565aaac679fc951e754 100644 (file)
@@ -2,10 +2,7 @@
 #ifndef _LINUX_OF_DEVICE_H
 #define _LINUX_OF_DEVICE_H
 
-#include <linux/platform_device.h>
-#include <linux/of_platform.h> /* temporary until merge */
-
-#include <linux/of.h>
+#include <linux/device/driver.h>
 
 struct device;
 struct of_device_id;
@@ -40,6 +37,9 @@ static inline int of_dma_configure(struct device *dev,
 {
        return of_dma_configure_id(dev, np, force_dma, NULL);
 }
+
+void of_device_make_bus_id(struct device *dev);
+
 #else /* CONFIG_OF */
 
 static inline int of_driver_match_device(struct device *dev,
@@ -82,6 +82,9 @@ static inline int of_dma_configure(struct device *dev,
 {
        return 0;
 }
+
+static inline void of_device_make_bus_id(struct device *dev) {}
+
 #endif /* CONFIG_OF */
 
 #endif /* _LINUX_OF_DEVICE_H */
index 9a5e6b410dd2fb154c40da4139514c0945f6e370..e61cbbe12dac6f40d739be9b77c2bc755fead974 100644 (file)
@@ -8,20 +8,19 @@ struct iommu_ops;
 
 #ifdef CONFIG_OF_IOMMU
 
-extern const struct iommu_ops *of_iommu_configure(struct device *dev,
-                                       struct device_node *master_np,
-                                       const u32 *id);
+extern int of_iommu_configure(struct device *dev, struct device_node *master_np,
+                             const u32 *id);
 
 extern void of_iommu_get_resv_regions(struct device *dev,
                                      struct list_head *list);
 
 #else
 
-static inline const struct iommu_ops *of_iommu_configure(struct device *dev,
-                                        struct device_node *master_np,
-                                        const u32 *id)
+static inline int of_iommu_configure(struct device *dev,
+                                    struct device_node *master_np,
+                                    const u32 *id)
 {
-       return NULL;
+       return -ENODEV;
 }
 
 static inline void of_iommu_get_resv_regions(struct device *dev,
index fadfea5754852df256da1f77461f1d119a3bf418..a2ff1ad48f7f0c19f1e3403a9d2deb8ac860cb85 100644 (file)
@@ -7,11 +7,11 @@
  */
 
 #include <linux/mod_devicetable.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
 
 struct device;
+struct device_node;
 struct of_device_id;
+struct platform_device;
 
 /**
  * struct of_dev_auxdata - lookup table entry for device names & platform_data
index 06142ff7f9ce0ef0c600c3aa24f68b1bd5450bdb..2df35e65557d27c19fc3d87e865bc869b87bd18c 100644 (file)
@@ -206,6 +206,7 @@ enum mapping_flags {
        AS_RELEASE_ALWAYS,      /* Call ->release_folio(), even if no private data */
        AS_STABLE_WRITES,       /* must wait for writeback before modifying
                                   folio contents */
+       AS_UNMOVABLE,           /* The mapping cannot be moved, ever */
 };
 
 /**
@@ -306,6 +307,22 @@ static inline void mapping_clear_stable_writes(struct address_space *mapping)
        clear_bit(AS_STABLE_WRITES, &mapping->flags);
 }
 
+static inline void mapping_set_unmovable(struct address_space *mapping)
+{
+       /*
+        * It's expected unmovable mappings are also unevictable. Compaction
+        * migrate scanner (isolate_migratepages_block()) relies on this to
+        * reduce page locking.
+        */
+       set_bit(AS_UNEVICTABLE, &mapping->flags);
+       set_bit(AS_UNMOVABLE, &mapping->flags);
+}
+
+static inline bool mapping_unmovable(struct address_space *mapping)
+{
+       return test_bit(AS_UNMOVABLE, &mapping->flags);
+}
+
 static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
 {
        return mapping->gfp_mask;
index 6b1301e2498e9e163c6e9a0525e49afa226bffea..3a4860bd27586587cb23e7d6f84f63efcf6f2fc6 100644 (file)
@@ -93,6 +93,6 @@ extern const struct pci_ecam_ops loongson_pci_ecam_ops; /* Loongson PCIe */
 #if IS_ENABLED(CONFIG_PCI_HOST_COMMON)
 /* for DT-based PCI controllers that support ECAM */
 int pci_host_common_probe(struct platform_device *pdev);
-int pci_host_common_remove(struct platform_device *pdev);
+void pci_host_common_remove(struct platform_device *pdev);
 #endif
 #endif
index 5cb69403107290dc452690730cf6fd55e06dd7ce..40ea18f5aa029ef5bebc3d4a56ae3e061afa4a28 100644 (file)
@@ -19,13 +19,6 @@ enum pci_epc_interface_type {
        SECONDARY_INTERFACE,
 };
 
-enum pci_epc_irq_type {
-       PCI_EPC_IRQ_UNKNOWN,
-       PCI_EPC_IRQ_LEGACY,
-       PCI_EPC_IRQ_MSI,
-       PCI_EPC_IRQ_MSIX,
-};
-
 static inline const char *
 pci_epc_interface_string(enum pci_epc_interface_type type)
 {
@@ -79,7 +72,7 @@ struct pci_epc_ops {
                            u16 interrupts, enum pci_barno, u32 offset);
        int     (*get_msix)(struct pci_epc *epc, u8 func_no, u8 vfunc_no);
        int     (*raise_irq)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
-                            enum pci_epc_irq_type type, u16 interrupt_num);
+                            unsigned int type, u16 interrupt_num);
        int     (*map_msi_irq)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
                               phys_addr_t phys_addr, u8 interrupt_num,
                               u32 entry_size, u32 *msi_data,
@@ -122,7 +115,7 @@ struct pci_epc_mem {
  * struct pci_epc - represents the PCI EPC device
  * @dev: PCI EPC device
  * @pci_epf: list of endpoint functions present in this EPC device
- * list_lock: Mutex for protecting pci_epf list
+ * @list_lock: Mutex for protecting pci_epf list
  * @ops: function pointers for performing endpoint operations
  * @windows: array of address space of the endpoint controller
  * @mem: first window of the endpoint controller, which corresponds to
@@ -229,7 +222,7 @@ int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
                        phys_addr_t phys_addr, u8 interrupt_num,
                        u32 entry_size, u32 *msi_data, u32 *msi_addr_offset);
 int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
-                     enum pci_epc_irq_type type, u16 interrupt_num);
+                     unsigned int type, u16 interrupt_num);
 int pci_epc_start(struct pci_epc *epc);
 void pci_epc_stop(struct pci_epc *epc);
 const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
index 3f44b6aec4770cb5ee5c925f02cca519d08688ac..77b146e0f6727e2e8b9820b90b16584714063044 100644 (file)
@@ -68,7 +68,7 @@ struct pci_epf_ops {
 };
 
 /**
- * struct pci_epf_event_ops - Callbacks for capturing the EPC events
+ * struct pci_epc_event_ops - Callbacks for capturing the EPC events
  * @core_init: Callback for the EPC initialization complete event
  * @link_up: Callback for the EPC link up event
  * @link_down: Callback for the EPC link down event
@@ -98,7 +98,7 @@ struct pci_epf_driver {
        void    (*remove)(struct pci_epf *epf);
 
        struct device_driver    driver;
-       struct pci_epf_ops      *ops;
+       const struct pci_epf_ops *ops;
        struct module           *owner;
        struct list_head        epf_group;
        const struct pci_epf_device_id  *id_table;
index 58a4c976c39bde68bb7ad522e010c3705847ed04..7ab0d13672dafa0faaeaf4cf02e7bab6fcc3130b 100644 (file)
@@ -715,6 +715,7 @@ static inline bool pci_is_bridge(struct pci_dev *dev)
 
 /**
  * pci_is_vga - check if the PCI device is a VGA device
+ * @pdev: PCI device
  *
  * The PCI Code and ID Assignment spec, r1.15, secs 1.4 and 1.1, define
  * VGA Base Class and Sub-Classes:
@@ -885,7 +886,6 @@ struct module;
 
 /**
  * struct pci_driver - PCI driver structure
- * @node:      List of driver structures.
  * @name:      Driver name.
  * @id_table:  Pointer to table of device IDs the driver is
  *             interested in.  Most drivers should export this
@@ -940,7 +940,6 @@ struct module;
  *             own I/O address space.
  */
 struct pci_driver {
-       struct list_head        node;
        const char              *name;
        const struct pci_device_id *id_table;   /* Must be non-NULL for probe to be called */
        int  (*probe)(struct pci_dev *dev, const struct pci_device_id *id);     /* New device inserted */
@@ -1073,11 +1072,13 @@ enum {
        PCI_SCAN_ALL_PCIE_DEVS  = 0x00000040,   /* Scan all, not just dev 0 */
 };
 
-#define PCI_IRQ_LEGACY         (1 << 0) /* Allow legacy interrupts */
+#define PCI_IRQ_INTX           (1 << 0) /* Allow INTx interrupts */
 #define PCI_IRQ_MSI            (1 << 1) /* Allow MSI interrupts */
 #define PCI_IRQ_MSIX           (1 << 2) /* Allow MSI-X interrupts */
 #define PCI_IRQ_AFFINITY       (1 << 3) /* Auto-assign affinity */
 
+#define PCI_IRQ_LEGACY         PCI_IRQ_INTX /* Deprecated! Use PCI_IRQ_INTX */
+
 /* These external functions are only available when PCI support is enabled */
 #ifdef CONFIG_PCI
 
@@ -1170,6 +1171,7 @@ int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
 struct pci_dev *pci_dev_get(struct pci_dev *dev);
 void pci_dev_put(struct pci_dev *dev);
+DEFINE_FREE(pci_dev_put, struct pci_dev *, if (_T) pci_dev_put(_T))
 void pci_remove_bus(struct pci_bus *b);
 void pci_stop_and_remove_bus_device(struct pci_dev *dev);
 void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev);
@@ -1366,6 +1368,7 @@ int pcie_set_mps(struct pci_dev *dev, int mps);
 u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
                             enum pci_bus_speed *speed,
                             enum pcie_link_width *width);
+int pcie_link_speed_mbps(struct pci_dev *pdev);
 void pcie_print_link_status(struct pci_dev *dev);
 int pcie_reset_flr(struct pci_dev *dev, bool probe);
 int pcie_flr(struct pci_dev *dev);
@@ -1419,6 +1422,7 @@ int pci_load_and_free_saved_state(struct pci_dev *dev,
                                  struct pci_saved_state **state);
 int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state);
 int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
+int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state);
 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
 void pci_pme_active(struct pci_dev *dev, bool enable);
@@ -1622,6 +1626,8 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
 
 void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
                  void *userdata);
+void pci_walk_bus_locked(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
+                        void *userdata);
 int pci_cfg_space_size(struct pci_dev *dev);
 unsigned char pci_bus_max_busnr(struct pci_bus *bus);
 void pci_setup_bridge(struct pci_bus *bus);
@@ -1876,6 +1882,7 @@ void pci_cfg_access_unlock(struct pci_dev *dev);
 void pci_dev_lock(struct pci_dev *dev);
 int pci_dev_trylock(struct pci_dev *dev);
 void pci_dev_unlock(struct pci_dev *dev);
+DEFINE_GUARD(pci_dev, struct pci_dev *, pci_dev_lock(_T), pci_dev_unlock(_T))
 
 /*
  * PCI domain support.  Sometimes called PCI segment (eg by ACPI),
@@ -2021,6 +2028,8 @@ static inline int pci_save_state(struct pci_dev *dev) { return 0; }
 static inline void pci_restore_state(struct pci_dev *dev) { }
 static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
 { return 0; }
+static inline int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state)
+{ return 0; }
 static inline int pci_wake_from_d3(struct pci_dev *dev, bool enable)
 { return 0; }
 static inline pci_power_t pci_choose_state(struct pci_dev *dev,
@@ -2132,14 +2141,14 @@ int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma);
        (pci_resource_end((dev), (bar)) ?                               \
         resource_size(pci_resource_n((dev), (bar))) : 0)
 
-#define __pci_dev_for_each_res0(dev, res, ...)                         \
-       for (unsigned int __b = 0;                                      \
-            res = pci_resource_n(dev, __b), __b < PCI_NUM_RESOURCES;   \
+#define __pci_dev_for_each_res0(dev, res, ...)                           \
+       for (unsigned int __b = 0;                                        \
+            __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \
             __b++)
 
-#define __pci_dev_for_each_res1(dev, res, __b)                         \
-       for (__b = 0;                                                   \
-            res = pci_resource_n(dev, __b), __b < PCI_NUM_RESOURCES;   \
+#define __pci_dev_for_each_res1(dev, res, __b)                           \
+       for (__b = 0;                                                     \
+            __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \
             __b++)
 
 #define pci_dev_for_each_resource(dev, res, ...)                       \
index 466cf477551a8c94de0a1c79a459e3ae74cde203..f6d0e3513948ac4567a54a012d2517f4dcea7efe 100644 (file)
@@ -299,6 +299,27 @@ static inline pmd_t pmdp_get(pmd_t *pmdp)
 }
 #endif
 
+#ifndef pudp_get
+static inline pud_t pudp_get(pud_t *pudp)
+{
+       return READ_ONCE(*pudp);
+}
+#endif
+
+#ifndef p4dp_get
+static inline p4d_t p4dp_get(p4d_t *p4dp)
+{
+       return READ_ONCE(*p4dp);
+}
+#endif
+
+#ifndef pgdp_get
+static inline pgd_t pgdp_get(pgd_t *pgdp)
+{
+       return READ_ONCE(*pgdp);
+}
+#endif
+
 #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
                                            unsigned long address,
index ee8803f6ad07c105dfffec738358773eb92575df..673e96df453b3bf3405d4dde93364694689974c4 100644 (file)
@@ -47,7 +47,7 @@ struct pinctrl_map_mux {
 struct pinctrl_map_configs {
        const char *group_or_pin;
        unsigned long *configs;
-       unsigned num_configs;
+       unsigned int num_configs;
 };
 
 /**
@@ -154,13 +154,13 @@ struct pinctrl_map;
 #ifdef CONFIG_PINCTRL
 
 extern int pinctrl_register_mappings(const struct pinctrl_map *map,
-                                    unsigned num_maps);
+                                    unsigned int num_maps);
 extern void pinctrl_unregister_mappings(const struct pinctrl_map *map);
 extern void pinctrl_provide_dummies(void);
 #else
 
 static inline int pinctrl_register_mappings(const struct pinctrl_map *map,
-                                           unsigned num_maps)
+                                           unsigned int num_maps)
 {
        return 0;
 }
index d74b7a4ea154dec659e7ebf7a48d8824203963a9..a65d3d078e58ba6c9b70bd349f9687b649d3f35e 100644 (file)
@@ -193,17 +193,17 @@ struct pinconf_generic_params {
 
 int pinconf_generic_dt_subnode_to_map(struct pinctrl_dev *pctldev,
                struct device_node *np, struct pinctrl_map **map,
-               unsigned *reserved_maps, unsigned *num_maps,
+               unsigned int *reserved_maps, unsigned int *num_maps,
                enum pinctrl_map_type type);
 int pinconf_generic_dt_node_to_map(struct pinctrl_dev *pctldev,
                struct device_node *np_config, struct pinctrl_map **map,
-               unsigned *num_maps, enum pinctrl_map_type type);
+               unsigned int *num_maps, enum pinctrl_map_type type);
 void pinconf_generic_dt_free_map(struct pinctrl_dev *pctldev,
-               struct pinctrl_map *map, unsigned num_maps);
+               struct pinctrl_map *map, unsigned int num_maps);
 
 static inline int pinconf_generic_dt_node_to_map_group(struct pinctrl_dev *pctldev,
                struct device_node *np_config, struct pinctrl_map **map,
-               unsigned *num_maps)
+               unsigned int *num_maps)
 {
        return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps,
                        PIN_MAP_TYPE_CONFIGS_GROUP);
@@ -211,7 +211,7 @@ static inline int pinconf_generic_dt_node_to_map_group(struct pinctrl_dev *pctld
 
 static inline int pinconf_generic_dt_node_to_map_pin(struct pinctrl_dev *pctldev,
                struct device_node *np_config, struct pinctrl_map **map,
-               unsigned *num_maps)
+               unsigned int *num_maps)
 {
        return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps,
                        PIN_MAP_TYPE_CONFIGS_PIN);
index f8a8215e9021e1364832e1d6441b1e4b299cc9a0..770ec2221156ceefcd57c1278ef24193aa0dcb76 100644 (file)
@@ -40,25 +40,25 @@ struct pinconf_ops {
        bool is_generic;
 #endif
        int (*pin_config_get) (struct pinctrl_dev *pctldev,
-                              unsigned pin,
+                              unsigned int pin,
                               unsigned long *config);
        int (*pin_config_set) (struct pinctrl_dev *pctldev,
-                              unsigned pin,
+                              unsigned int pin,
                               unsigned long *configs,
-                              unsigned num_configs);
+                              unsigned int num_configs);
        int (*pin_config_group_get) (struct pinctrl_dev *pctldev,
-                                    unsigned selector,
+                                    unsigned int selector,
                                     unsigned long *config);
        int (*pin_config_group_set) (struct pinctrl_dev *pctldev,
-                                    unsigned selector,
+                                    unsigned int selector,
                                     unsigned long *configs,
-                                    unsigned num_configs);
+                                    unsigned int num_configs);
        void (*pin_config_dbg_show) (struct pinctrl_dev *pctldev,
                                     struct seq_file *s,
-                                    unsigned offset);
+                                    unsigned int offset);
        void (*pin_config_group_dbg_show) (struct pinctrl_dev *pctldev,
                                           struct seq_file *s,
-                                          unsigned selector);
+                                          unsigned int selector);
        void (*pin_config_config_dbg_show) (struct pinctrl_dev *pctldev,
                                            struct seq_file *s,
                                            unsigned long config);
index 4d252ea00ed1a99a673fa0f73af976ef95d07410..9a8189ffd0f2c28c88640280deee194d17d18400 100644 (file)
@@ -54,7 +54,7 @@ struct pingroup {
  * @drv_data: driver-defined per-pin data. pinctrl core does not touch this
  */
 struct pinctrl_pin_desc {
-       unsigned number;
+       unsigned int number;
        const char *name;
        void *drv_data;
 };
@@ -82,7 +82,7 @@ struct pinctrl_gpio_range {
        unsigned int base;
        unsigned int pin_base;
        unsigned int npins;
-       unsigned const *pins;
+       unsigned int const *pins;
        struct gpio_chip *gc;
 };
 
@@ -108,18 +108,18 @@ struct pinctrl_gpio_range {
 struct pinctrl_ops {
        int (*get_groups_count) (struct pinctrl_dev *pctldev);
        const char *(*get_group_name) (struct pinctrl_dev *pctldev,
-                                      unsigned selector);
+                                      unsigned int selector);
        int (*get_group_pins) (struct pinctrl_dev *pctldev,
-                              unsigned selector,
-                              const unsigned **pins,
-                              unsigned *num_pins);
+                              unsigned int selector,
+                              const unsigned int **pins,
+                              unsigned int *num_pins);
        void (*pin_dbg_show) (struct pinctrl_dev *pctldev, struct seq_file *s,
-                         unsigned offset);
+                             unsigned int offset);
        int (*dt_node_to_map) (struct pinctrl_dev *pctldev,
                               struct device_node *np_config,
-                              struct pinctrl_map **map, unsigned *num_maps);
+                              struct pinctrl_map **map, unsigned int *num_maps);
        void (*dt_free_map) (struct pinctrl_dev *pctldev,
-                            struct pinctrl_map *map, unsigned num_maps);
+                            struct pinctrl_map *map, unsigned int num_maps);
 };
 
 /**
@@ -193,7 +193,7 @@ extern void pinctrl_add_gpio_range(struct pinctrl_dev *pctldev,
                                struct pinctrl_gpio_range *range);
 extern void pinctrl_add_gpio_ranges(struct pinctrl_dev *pctldev,
                                struct pinctrl_gpio_range *ranges,
-                               unsigned nranges);
+                               unsigned int nranges);
 extern void pinctrl_remove_gpio_range(struct pinctrl_dev *pctldev,
                                struct pinctrl_gpio_range *range);
 
@@ -203,8 +203,8 @@ extern struct pinctrl_gpio_range *
 pinctrl_find_gpio_range_from_pin(struct pinctrl_dev *pctldev,
                                 unsigned int pin);
 extern int pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
-                               const char *pin_group, const unsigned **pins,
-                               unsigned *num_pins);
+                                 const char *pin_group, const unsigned int **pins,
+                                 unsigned int *num_pins);
 
 /**
  * struct pinfunction - Description about a function
index a7e370965c53121392527b2dee84f55c56ed4313..d6f7b58d6ad0cce421aad80463529c9ccc65d68e 100644 (file)
@@ -57,26 +57,26 @@ struct pinctrl_gpio_range;
  *     the pin request.
  */
 struct pinmux_ops {
-       int (*request) (struct pinctrl_dev *pctldev, unsigned offset);
-       int (*free) (struct pinctrl_dev *pctldev, unsigned offset);
+       int (*request) (struct pinctrl_dev *pctldev, unsigned int offset);
+       int (*free) (struct pinctrl_dev *pctldev, unsigned int offset);
        int (*get_functions_count) (struct pinctrl_dev *pctldev);
        const char *(*get_function_name) (struct pinctrl_dev *pctldev,
-                                         unsigned selector);
+                                         unsigned int selector);
        int (*get_function_groups) (struct pinctrl_dev *pctldev,
-                                 unsigned selector,
-                                 const char * const **groups,
-                                 unsigned *num_groups);
-       int (*set_mux) (struct pinctrl_dev *pctldev, unsigned func_selector,
-                       unsigned group_selector);
+                                   unsigned int selector,
+                                   const char * const **groups,
+                                   unsigned int *num_groups);
+       int (*set_mux) (struct pinctrl_dev *pctldev, unsigned int func_selector,
+                       unsigned int group_selector);
        int (*gpio_request_enable) (struct pinctrl_dev *pctldev,
                                    struct pinctrl_gpio_range *range,
-                                   unsigned offset);
+                                   unsigned int offset);
        void (*gpio_disable_free) (struct pinctrl_dev *pctldev,
                                   struct pinctrl_gpio_range *range,
-                                  unsigned offset);
+                                  unsigned int offset);
        int (*gpio_set_direction) (struct pinctrl_dev *pctldev,
                                   struct pinctrl_gpio_range *range,
-                                  unsigned offset,
+                                  unsigned int offset,
                                   bool input);
        bool strict;
 };
index 2543c2a1c9aef76717fb539f4c94d2b59e0b7b33..e2e8957683116fd3c3f254639f082d3f0f26b27b 100644 (file)
@@ -17,7 +17,6 @@
  * @n_values: Number of multiplexer channels
  * @little_endian: Indicating if the register is in little endian
  * @write_only: Reading the register is not allowed by hardware
- * @classes: Optional I2C auto-detection classes
  * @idle: Value to write to mux when idle
  * @idle_in_use: indicate if idle value is in use
  * @reg: Virtual address of the register to switch channel
@@ -30,7 +29,6 @@ struct i2c_mux_reg_platform_data {
        int n_values;
        bool little_endian;
        bool write_only;
-       const unsigned int *classes;
        u32 idle;
        bool idle_in_use;
        void __iomem *reg;
index 3e7c64c854f4cd2b8817384a43681bcd8515e7de..f3f1311cdf3aa5ef9440664f38d37c1e94fff485 100644 (file)
@@ -19,9 +19,6 @@ struct omap_kp_platform_data {
        bool rep;
        unsigned long delay;
        bool dbounce;
-       /* specific to OMAP242x*/
-       unsigned int *row_gpios;
-       unsigned int *col_gpios;
 };
 
 /* Group (0..3) -- when multiple keys are pressed, only the
index 92a4f69de0e803f9b6edc6eba8a6d6d173265de5..a2f3e53a8196d847af8ec0aa88d0336989ee302b 100644 (file)
@@ -681,6 +681,7 @@ struct dev_pm_info {
        bool                    wakeup_path:1;
        bool                    syscore:1;
        bool                    no_pm_callbacks:1;      /* Owned by the PM core */
+       bool                    async_in_progress:1;    /* Owned by the PM core */
        unsigned int            must_resume:1;  /* Owned by the PM core */
        unsigned int            may_skip_resume:1;      /* Set by subsystems */
 #else
index ada3a0ab10bf2268d336e47f2698a8b5f15df788..68669ce187204af7869000988e87f8e1dc03a044 100644 (file)
@@ -91,10 +91,10 @@ static inline int devm_pm_clk_create(struct device *dev)
 #endif
 
 #ifdef CONFIG_HAVE_CLK
-extern void pm_clk_add_notifier(struct bus_type *bus,
+extern void pm_clk_add_notifier(const struct bus_type *bus,
                                        struct pm_clk_notifier_block *clknb);
 #else
-static inline void pm_clk_add_notifier(struct bus_type *bus,
+static inline void pm_clk_add_notifier(const struct bus_type *bus,
                                        struct pm_clk_notifier_block *clknb)
 {
 }
index 267fb8a4fb6e081c8627c130610acca86638be7d..ddbe7c3ca4ce2a582b48bf3af305d20135889044 100644 (file)
@@ -435,7 +435,7 @@ struct pnp_protocol {
 #define protocol_for_each_dev(protocol, dev)   \
        list_for_each_entry(dev, &(protocol)->devices, protocol_list)
 
-extern struct bus_type pnp_bus_type;
+extern const struct bus_type pnp_bus_type;
 
 #if defined(CONFIG_PNP)
 
index 7c8d65414a70ad5784badca31fedf2c95d44fc0c..7d8025fb74b701d6ac64cd3add0f7ca297fe5413 100644 (file)
@@ -83,5 +83,6 @@ struct bq27xxx_device_info {
 void bq27xxx_battery_update(struct bq27xxx_device_info *di);
 int bq27xxx_battery_setup(struct bq27xxx_device_info *di);
 void bq27xxx_battery_teardown(struct bq27xxx_device_info *di);
+extern const struct dev_pm_ops bq27xxx_battery_battery_pm_ops;
 
 #endif
index 60e35bd97398eb194477372602217de245671d31..e6516d0b7d52ad0570eb345955701be5856455c0 100644 (file)
@@ -124,6 +124,18 @@ static inline bool device_is_compatible(const struct device *dev, const char *co
        return fwnode_device_is_compatible(dev_fwnode(dev), compat);
 }
 
+int fwnode_property_match_property_string(const struct fwnode_handle *fwnode,
+                                         const char *propname,
+                                         const char * const *array, size_t n);
+
+static inline
+int device_property_match_property_string(const struct device *dev,
+                                         const char *propname,
+                                         const char * const *array, size_t n)
+{
+       return fwnode_property_match_property_string(dev_fwnode(dev), propname, array, n);
+}
+
 int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode,
                                       const char *prop, const char *nargs_prop,
                                       unsigned int nargs, unsigned int index,
index eaaef3ffec221b93cfbb9a2f4c20646b473754fa..90507d4afcd6debb80eef494c4c874c8a1732e49 100644 (file)
@@ -393,6 +393,10 @@ static inline void user_single_step_report(struct pt_regs *regs)
 #define current_user_stack_pointer() user_stack_pointer(current_pt_regs())
 #endif
 
+#ifndef exception_ip
+#define exception_ip(x) instruction_pointer(x)
+#endif
+
 extern int task_current_syscall(struct task_struct *target, struct syscall_info *info);
 
 extern void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact);
index cda3597b84f2c32fbe567c04b3c4114311bed02a..fcc2c4496f7316fe298fdf3890ef854d3bb9d9d5 100644 (file)
@@ -69,7 +69,6 @@ struct pwm_state {
  * @label: name of the PWM device
  * @flags: flags associated with the PWM device
  * @hwpwm: per-chip relative index of the PWM device
- * @pwm: global index of the PWM device
  * @chip: PWM chip providing this PWM device
  * @args: PWM arguments
  * @state: last applied state
@@ -79,7 +78,6 @@ struct pwm_device {
        const char *label;
        unsigned long flags;
        unsigned int hwpwm;
-       unsigned int pwm;
        struct pwm_chip *chip;
 
        struct pwm_args args;
@@ -93,8 +91,8 @@ struct pwm_device {
  * @state: state to fill with the current PWM state
  *
  * The returned PWM state represents the state that was applied by a previous call to
- * pwm_apply_state(). Drivers may have to slightly tweak that state before programming it to
- * hardware. If pwm_apply_state() was never called, this returns either the current hardware
+ * pwm_apply_might_sleep(). Drivers may have to slightly tweak that state before programming it to
+ * hardware. If pwm_apply_might_sleep() was never called, this returns either the current hardware
  * state (if supported) or the default settings.
  */
 static inline void pwm_get_state(const struct pwm_device *pwm,
@@ -112,12 +110,6 @@ static inline bool pwm_is_enabled(const struct pwm_device *pwm)
        return state.enabled;
 }
 
-static inline void pwm_set_period(struct pwm_device *pwm, u64 period)
-{
-       if (pwm)
-               pwm->state.period = period;
-}
-
 static inline u64 pwm_get_period(const struct pwm_device *pwm)
 {
        struct pwm_state state;
@@ -127,12 +119,6 @@ static inline u64 pwm_get_period(const struct pwm_device *pwm)
        return state.period;
 }
 
-static inline void pwm_set_duty_cycle(struct pwm_device *pwm, unsigned int duty)
-{
-       if (pwm)
-               pwm->state.duty_cycle = duty;
-}
-
 static inline u64 pwm_get_duty_cycle(const struct pwm_device *pwm)
 {
        struct pwm_state state;
@@ -158,20 +144,20 @@ static inline void pwm_get_args(const struct pwm_device *pwm,
 }
 
 /**
- * pwm_init_state() - prepare a new state to be applied with pwm_apply_state()
+ * pwm_init_state() - prepare a new state to be applied with pwm_apply_might_sleep()
  * @pwm: PWM device
  * @state: state to fill with the prepared PWM state
  *
  * This functions prepares a state that can later be tweaked and applied
- * to the PWM device with pwm_apply_state(). This is a convenient function
+ * to the PWM device with pwm_apply_might_sleep(). This is a convenient function
  * that first retrieves the current PWM state and the replaces the period
  * and polarity fields with the reference values defined in pwm->args.
  * Once the function returns, you can adjust the ->enabled and ->duty_cycle
- * fields according to your needs before calling pwm_apply_state().
+ * fields according to your needs before calling pwm_apply_might_sleep().
  *
  * ->duty_cycle is initially set to zero to avoid cases where the current
  * ->duty_cycle value exceed the pwm_args->period one, which would trigger
- * an error if the user calls pwm_apply_state() without adjusting ->duty_cycle
+ * an error if the user calls pwm_apply_might_sleep() without adjusting ->duty_cycle
  * first.
  */
 static inline void pwm_init_state(const struct pwm_device *pwm,
@@ -227,7 +213,7 @@ pwm_get_relative_duty_cycle(const struct pwm_state *state, unsigned int scale)
  *
  * pwm_init_state(pwm, &state);
  * pwm_set_relative_duty_cycle(&state, 50, 100);
- * pwm_apply_state(pwm, &state);
+ * pwm_apply_might_sleep(pwm, &state);
  *
  * This functions returns -EINVAL if @duty_cycle and/or @scale are
  * inconsistent (@scale == 0 or @duty_cycle > @scale).
@@ -282,32 +268,33 @@ struct pwm_ops {
  * @dev: device providing the PWMs
  * @ops: callbacks for this PWM controller
  * @owner: module providing this chip
- * @base: number of first PWM controlled by this chip
+ * @id: unique number of this PWM chip
  * @npwm: number of PWMs controlled by this chip
  * @of_xlate: request a PWM device given a device tree PWM specifier
  * @of_pwm_n_cells: number of cells expected in the device tree PWM specifier
- * @list: list node for internal use
+ * @atomic: can the driver's ->apply() be called in atomic context
  * @pwms: array of PWM devices allocated by the framework
  */
 struct pwm_chip {
        struct device *dev;
        const struct pwm_ops *ops;
        struct module *owner;
-       int base;
+       unsigned int id;
        unsigned int npwm;
 
        struct pwm_device * (*of_xlate)(struct pwm_chip *chip,
                                        const struct of_phandle_args *args);
        unsigned int of_pwm_n_cells;
+       bool atomic;
 
        /* only used internally by the PWM framework */
-       struct list_head list;
        struct pwm_device *pwms;
 };
 
 #if IS_ENABLED(CONFIG_PWM)
 /* PWM user APIs */
-int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state);
+int pwm_apply_might_sleep(struct pwm_device *pwm, const struct pwm_state *state);
+int pwm_apply_atomic(struct pwm_device *pwm, const struct pwm_state *state);
 int pwm_adjust_config(struct pwm_device *pwm);
 
 /**
@@ -335,7 +322,7 @@ static inline int pwm_config(struct pwm_device *pwm, int duty_ns,
 
        state.duty_cycle = duty_ns;
        state.period = period_ns;
-       return pwm_apply_state(pwm, &state);
+       return pwm_apply_might_sleep(pwm, &state);
 }
 
 /**
@@ -356,7 +343,7 @@ static inline int pwm_enable(struct pwm_device *pwm)
                return 0;
 
        state.enabled = true;
-       return pwm_apply_state(pwm, &state);
+       return pwm_apply_might_sleep(pwm, &state);
 }
 
 /**
@@ -375,7 +362,18 @@ static inline void pwm_disable(struct pwm_device *pwm)
                return;
 
        state.enabled = false;
-       pwm_apply_state(pwm, &state);
+       pwm_apply_might_sleep(pwm, &state);
+}
+
+/**
+ * pwm_might_sleep() - is pwm_apply_atomic() supported?
+ * @pwm: PWM device
+ *
+ * Returns: false if pwm_apply_atomic() can be called from atomic context.
+ */
+static inline bool pwm_might_sleep(struct pwm_device *pwm)
+{
+       return !pwm->chip->atomic;
 }
 
 /* PWM provider APIs */
@@ -406,16 +404,27 @@ struct pwm_device *devm_fwnode_pwm_get(struct device *dev,
                                       struct fwnode_handle *fwnode,
                                       const char *con_id);
 #else
-static inline int pwm_apply_state(struct pwm_device *pwm,
-                                 const struct pwm_state *state)
+static inline bool pwm_might_sleep(struct pwm_device *pwm)
+{
+       return true;
+}
+
+static inline int pwm_apply_might_sleep(struct pwm_device *pwm,
+                                       const struct pwm_state *state)
 {
        might_sleep();
-       return -ENOTSUPP;
+       return -EOPNOTSUPP;
+}
+
+static inline int pwm_apply_atomic(struct pwm_device *pwm,
+                                  const struct pwm_state *state)
+{
+       return -EOPNOTSUPP;
 }
 
 static inline int pwm_adjust_config(struct pwm_device *pwm)
 {
-       return -ENOTSUPP;
+       return -EOPNOTSUPP;
 }
 
 static inline int pwm_config(struct pwm_device *pwm, int duty_ns,
@@ -524,7 +533,14 @@ static inline void pwm_apply_args(struct pwm_device *pwm)
        state.period = pwm->args.period;
        state.usage_power = false;
 
-       pwm_apply_state(pwm, &state);
+       pwm_apply_might_sleep(pwm, &state);
+}
+
+/* only for backwards-compatibility, new code should not use this */
+static inline int pwm_apply_state(struct pwm_device *pwm,
+                                 const struct pwm_state *state)
+{
+       return pwm_apply_might_sleep(pwm, state);
 }
 
 struct pwm_lookup {
index ebf371364581d540639f268191be2316c66d23bb..5640f024773b3762d75a72e03b87b1624e0e5d84 100644 (file)
@@ -13,7 +13,7 @@
 #define RCU_STALL_NOTIFY_NORM  1
 #define RCU_STALL_NOTIFY_EXP   2
 
-#ifdef CONFIG_RCU_STALL_COMMON
+#if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER)
 
 #include <linux/notifier.h>
 #include <linux/types.h>
 int rcu_stall_chain_notifier_register(struct notifier_block *n);
 int rcu_stall_chain_notifier_unregister(struct notifier_block *n);
 
-#else // #ifdef CONFIG_RCU_STALL_COMMON
+#else // #if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER)
 
 // No RCU CPU stall warnings in Tiny RCU.
 static inline int rcu_stall_chain_notifier_register(struct notifier_block *n) { return -EEXIST; }
 static inline int rcu_stall_chain_notifier_unregister(struct notifier_block *n) { return -ENOENT; }
 
-#endif // #else // #ifdef CONFIG_RCU_STALL_COMMON
+#endif // #else // #if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER)
 
 #endif /* __LINUX_RCU_NOTIFIER_H */
index d29740be4833e22c604298046784443a87e73338..3dc1e58865f77b957d7a626ad57ad4a1c10df42d 100644 (file)
@@ -355,7 +355,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
 })
 
 /**
- * list_next_or_null_rcu - get the first element from a list
+ * list_next_or_null_rcu - get the next element from a list
  * @head:      the head for the list.
  * @ptr:        the list head to take the next element from.
  * @type:       the type of the struct this is embedded in.
index f7206b2623c98c16ed02d73ac77774f43d5cbe37..0746b1b0b6639d9a912e2ba7503b928f40e92748 100644 (file)
@@ -34,9 +34,6 @@
 
 #define ULONG_CMP_GE(a, b)     (ULONG_MAX / 2 >= (a) - (b))
 #define ULONG_CMP_LT(a, b)     (ULONG_MAX / 2 < (a) - (b))
-#define ulong2long(a)          (*(long *)(&(a)))
-#define USHORT_CMP_GE(a, b)    (USHRT_MAX / 2 >= (unsigned short)((a) - (b)))
-#define USHORT_CMP_LT(a, b)    (USHRT_MAX / 2 < (unsigned short)((a) - (b)))
 
 /* Exported common interfaces */
 void call_rcu(struct rcu_head *head, rcu_callback_t func);
@@ -301,6 +298,11 @@ static inline void rcu_lock_acquire(struct lockdep_map *map)
        lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_);
 }
 
+static inline void rcu_try_lock_acquire(struct lockdep_map *map)
+{
+       lock_acquire(map, 0, 1, 2, 0, NULL, _THIS_IP_);
+}
+
 static inline void rcu_lock_release(struct lockdep_map *map)
 {
        lock_release(map, _THIS_IP_);
@@ -315,6 +317,7 @@ int rcu_read_lock_any_held(void);
 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
 
 # define rcu_lock_acquire(a)           do { } while (0)
+# define rcu_try_lock_acquire(a)       do { } while (0)
 # define rcu_lock_release(a)           do { } while (0)
 
 static inline int rcu_read_lock_held(void)
index 782e14f62201f7c76e911886b3ef263d116a3298..fa802db216f94f8aa7349b02216f536dfa494279 100644 (file)
@@ -141,6 +141,7 @@ int ring_buffer_iter_empty(struct ring_buffer_iter *iter);
 bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter);
 
 unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu);
+unsigned long ring_buffer_max_event_size(struct trace_buffer *buffer);
 
 void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu);
 void ring_buffer_reset_online_cpus(struct trace_buffer *buffer);
@@ -191,15 +192,24 @@ bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer);
 size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu);
 size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu);
 
-void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu);
-void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data);
-int ring_buffer_read_page(struct trace_buffer *buffer, void **data_page,
+struct buffer_data_read_page;
+struct buffer_data_read_page *
+ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu);
+void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu,
+                               struct buffer_data_read_page *page);
+int ring_buffer_read_page(struct trace_buffer *buffer,
+                         struct buffer_data_read_page *data_page,
                          size_t len, int cpu, int full);
+void *ring_buffer_read_page_data(struct buffer_data_read_page *page);
 
 struct trace_seq;
 
 int ring_buffer_print_entry_header(struct trace_seq *s);
-int ring_buffer_print_page_header(struct trace_seq *s);
+int ring_buffer_print_page_header(struct trace_buffer *buffer, struct trace_seq *s);
+
+int ring_buffer_subbuf_order_get(struct trace_buffer *buffer);
+int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order);
+int ring_buffer_subbuf_size_get(struct trace_buffer *buffer);
 
 enum ring_buffer_flags {
        RB_FL_OVERWRITE         = 1 << 0,
index 534038d962e4fec9825e871e05c2f4f8f5c9e883..4612ef09a0c760bd303368a0fe4ad21c1f52e02e 100644 (file)
@@ -60,6 +60,7 @@
 #define   SD_EXIST                     (1 << 16)
 #define   DELINK_INT                   GPIO0_INT
 #define   MS_OC_INT                    (1 << 23)
+#define   SD_OVP_INT           (1 << 23)
 #define   SD_OC_INT                    (1 << 22)
 
 #define CARD_INT               (XD_INT | MS_INT | SD_INT)
@@ -80,6 +81,7 @@
 #define   OC_INT_EN                    (1 << 23)
 #define   DELINK_INT_EN                        GPIO0_INT_EN
 #define   MS_OC_INT_EN                 (1 << 23)
+#define   SD_OVP_INT_EN                        (1 << 23)
 #define   SD_OC_INT_EN                 (1 << 22)
 
 #define RTSX_DUM_REG                   0x1C
 #define   OBFF_DISABLE                 0x00
 
 #define CDRESUMECTL                    0xFE52
+#define CDGW                           0xFE53
 #define WAKE_SEL_CTL                   0xFE54
 #define PCLK_CTL                       0xFE55
 #define   PCLK_MODE_SEL                        0x20
 #define   SD_VIO_LDO_1V8               0x40
 #define   SD_VIO_LDO_3V3               0x70
 
+#define RTS5264_AUTOLOAD_CFG2          0xFF7D
+#define RTS5264_CHIP_RST_N_SEL         (1 << 6)
+
 #define RTS5260_AUTOLOAD_CFG4          0xFF7F
 #define   RTS5260_MIMO_DISABLE         0x8A
 /*RTS5261*/
@@ -1261,6 +1267,7 @@ struct rtsx_pcr {
        u8                              dma_error_count;
        u8                      ocp_stat;
        u8                      ocp_stat2;
+       u8                      ovp_stat;
        u8                      rtd3_en;
 };
 
@@ -1271,6 +1278,7 @@ struct rtsx_pcr {
 #define PID_5260       0x5260
 #define PID_5261       0x5261
 #define PID_5228       0x5228
+#define PID_5264       0x5264
 
 #define CHK_PCI_PID(pcr, pid)          ((pcr)->pci->device == (pid))
 #define PCI_VID(pcr)                   ((pcr)->pci->vendor)
index 9a66147915b2702588d33c9c443b65a27a0cc7fb..ffe8f618ab869729bd6c888a8a05e45d46a6c7c2 100644 (file)
@@ -920,7 +920,7 @@ struct task_struct {
        unsigned                        sched_rt_mutex:1;
 #endif
 
-       /* Bit to tell LSMs we're in execve(): */
+       /* Bit to tell TOMOYO we're in execve(): */
        unsigned                        in_execve:1;
        unsigned                        in_iowait:1;
 #ifndef TIF_RESTORE_SIGMASK
@@ -957,7 +957,7 @@ struct task_struct {
        /* Recursion prevention for eventfd_signal() */
        unsigned                        in_eventfd:1;
 #endif
-#ifdef CONFIG_IOMMU_SVA
+#ifdef CONFIG_ARCH_HAS_CPU_PASID
        unsigned                        pasid_activated:1;
 #endif
 #ifdef CONFIG_CPU_SUP_INTEL
index 5fb1f12c33f90232e774fa2ff5988023277c55f4..c44f4b47b945306318d8ed164c498abfe2512a10 100644 (file)
@@ -22,9 +22,8 @@ struct seq_buf {
 };
 
 #define DECLARE_SEQ_BUF(NAME, SIZE)                    \
-       char __ ## NAME ## _buffer[SIZE] = "";          \
        struct seq_buf NAME = {                         \
-               .buffer = &__ ## NAME ## _buffer,       \
+               .buffer = (char[SIZE]) { 0 },           \
                .size = SIZE,                           \
        }
 
index f5f97fa25e8ad0d3d65fa5f869c52a8982e6c6f5..3fab88ba265ee1d44ce48d9efe0ddc31d5c8e934 100644 (file)
@@ -27,7 +27,7 @@ struct serdev_device;
  *                     not sleep.
  */
 struct serdev_device_ops {
-       int (*receive_buf)(struct serdev_device *, const unsigned char *, size_t);
+       ssize_t (*receive_buf)(struct serdev_device *, const u8 *, size_t);
        void (*write_wakeup)(struct serdev_device *);
 };
 
@@ -82,7 +82,7 @@ enum serdev_parity {
  * serdev controller structures
  */
 struct serdev_controller_ops {
-       int (*write_buf)(struct serdev_controller *, const unsigned char *, size_t);
+       ssize_t (*write_buf)(struct serdev_controller *, const u8 *, size_t);
        void (*write_flush)(struct serdev_controller *);
        int (*write_room)(struct serdev_controller *);
        int (*open)(struct serdev_controller *);
@@ -99,12 +99,14 @@ struct serdev_controller_ops {
 /**
  * struct serdev_controller - interface to the serdev controller
  * @dev:       Driver model representation of the device.
+ * @host:      Serial port hardware controller device
  * @nr:                number identifier for this controller/bus.
  * @serdev:    Pointer to slave device for this controller.
  * @ops:       Controller operations.
  */
 struct serdev_controller {
        struct device           dev;
+       struct device           *host;
        unsigned int            nr;
        struct serdev_device    *serdev;
        const struct serdev_controller_ops *ops;
@@ -167,7 +169,9 @@ struct serdev_device *serdev_device_alloc(struct serdev_controller *);
 int serdev_device_add(struct serdev_device *);
 void serdev_device_remove(struct serdev_device *);
 
-struct serdev_controller *serdev_controller_alloc(struct device *, size_t);
+struct serdev_controller *serdev_controller_alloc(struct device *host,
+                                                 struct device *parent,
+                                                 size_t size);
 int serdev_controller_add(struct serdev_controller *);
 void serdev_controller_remove(struct serdev_controller *);
 
@@ -181,9 +185,9 @@ static inline void serdev_controller_write_wakeup(struct serdev_controller *ctrl
        serdev->ops->write_wakeup(serdev);
 }
 
-static inline int serdev_controller_receive_buf(struct serdev_controller *ctrl,
-                                             const unsigned char *data,
-                                             size_t count)
+static inline ssize_t serdev_controller_receive_buf(struct serdev_controller *ctrl,
+                                                   const u8 *data,
+                                                   size_t count)
 {
        struct serdev_device *serdev = ctrl->serdev;
 
@@ -200,13 +204,13 @@ void serdev_device_close(struct serdev_device *);
 int devm_serdev_device_open(struct device *, struct serdev_device *);
 unsigned int serdev_device_set_baudrate(struct serdev_device *, unsigned int);
 void serdev_device_set_flow_control(struct serdev_device *, bool);
-int serdev_device_write_buf(struct serdev_device *, const unsigned char *, size_t);
+int serdev_device_write_buf(struct serdev_device *, const u8 *, size_t);
 void serdev_device_wait_until_sent(struct serdev_device *, long);
 int serdev_device_get_tiocm(struct serdev_device *);
 int serdev_device_set_tiocm(struct serdev_device *, int, int);
 int serdev_device_break_ctl(struct serdev_device *serdev, int break_state);
 void serdev_device_write_wakeup(struct serdev_device *);
-int serdev_device_write(struct serdev_device *, const unsigned char *, size_t, long);
+ssize_t serdev_device_write(struct serdev_device *, const u8 *, size_t, long);
 void serdev_device_write_flush(struct serdev_device *);
 int serdev_device_write_room(struct serdev_device *);
 
@@ -244,7 +248,7 @@ static inline unsigned int serdev_device_set_baudrate(struct serdev_device *sdev
 }
 static inline void serdev_device_set_flow_control(struct serdev_device *sdev, bool enable) {}
 static inline int serdev_device_write_buf(struct serdev_device *serdev,
-                                         const unsigned char *buf,
+                                         const u8 *buf,
                                          size_t count)
 {
        return -ENODEV;
@@ -262,8 +266,9 @@ static inline int serdev_device_break_ctl(struct serdev_device *serdev, int brea
 {
        return -EOPNOTSUPP;
 }
-static inline int serdev_device_write(struct serdev_device *sdev, const unsigned char *buf,
-                                     size_t count, unsigned long timeout)
+static inline ssize_t serdev_device_write(struct serdev_device *sdev,
+                                         const u8 *buf, size_t count,
+                                         unsigned long timeout)
 {
        return -ENODEV;
 }
@@ -311,11 +316,13 @@ struct tty_driver;
 
 #ifdef CONFIG_SERIAL_DEV_CTRL_TTYPORT
 struct device *serdev_tty_port_register(struct tty_port *port,
+                                       struct device *host,
                                        struct device *parent,
                                        struct tty_driver *drv, int idx);
 int serdev_tty_port_unregister(struct tty_port *port);
 #else
 static inline struct device *serdev_tty_port_register(struct tty_port *port,
+                                          struct device *host,
                                           struct device *parent,
                                           struct tty_driver *drv, int idx)
 {
index 89f7b6c63598c2fdc4bbb981fb4217800f7803a7..536b2581d3e2007593323a53c050d037d6ac5dd1 100644 (file)
@@ -852,9 +852,9 @@ static inline unsigned long uart_fifo_timeout(struct uart_port *port)
 }
 
 /* Base timer interval for polling */
-static inline int uart_poll_timeout(struct uart_port *port)
+static inline unsigned long uart_poll_timeout(struct uart_port *port)
 {
-       int timeout = uart_fifo_timeout(port);
+       unsigned long timeout = uart_fifo_timeout(port);
 
        return timeout > 6 ? (timeout / 2 - 2) : 1;
 }
index a5ae952454c890c4aa95aadf2f3bc79ff782279a..2dde34c29203be8c0ead789ac93fabd23120727f 100644 (file)
@@ -295,7 +295,7 @@ struct nf_bridge_info {
        u8                      bridged_dnat:1;
        u8                      sabotage_in_done:1;
        __u16                   frag_max_size;
-       struct net_device       *physindev;
+       int                     physinif;
 
        /* always valid & non-NULL from FORWARD on, for physdev match */
        struct net_device       *physoutdev;
index 888a4b217829fd4d6baf52f784ce35e9ad6bd0ed..e65ec3fd27998a5b82fc2c4597c575125e653056 100644 (file)
@@ -505,12 +505,6 @@ static inline bool sk_psock_strp_enabled(struct sk_psock *psock)
        return !!psock->saved_data_ready;
 }
 
-static inline bool sk_is_udp(const struct sock *sk)
-{
-       return sk->sk_type == SOCK_DGRAM &&
-              sk->sk_protocol == IPPROTO_UDP;
-}
-
 #if IS_ENABLED(CONFIG_NET_SOCK_MSG)
 
 #define BPF_F_STRPARSER        (1UL << 1)
index 4f3d14bb15385a2860e4e1ee54738f60eb7c1b06..66f814b63a435f65bcdf8743d7adbfde39ca7ad7 100644 (file)
@@ -886,7 +886,8 @@ struct sdw_master_ops {
  * struct sdw_bus - SoundWire bus
  * @dev: Shortcut to &bus->md->dev to avoid changing the entire code.
  * @md: Master device
- * @link_id: Link id number, can be 0 to N, unique for each Master
+ * @controller_id: system-unique controller ID. If set to -1, the bus @id will be used.
+ * @link_id: Link id number, can be 0 to N, unique for each Controller
  * @id: bus system-wide unique id
  * @slaves: list of Slaves on this bus
  * @assigned: Bitmap for Slave device numbers.
@@ -918,6 +919,7 @@ struct sdw_master_ops {
 struct sdw_bus {
        struct device *dev;
        struct sdw_master_device *md;
+       int controller_id;
        unsigned int link_id;
        int id;
        struct list_head slaves;
@@ -1040,7 +1042,7 @@ int sdw_compute_params(struct sdw_bus *bus);
 
 int sdw_stream_add_master(struct sdw_bus *bus,
                struct sdw_stream_config *stream_config,
-               struct sdw_port_config *port_config,
+               const struct sdw_port_config *port_config,
                unsigned int num_ports,
                struct sdw_stream_runtime *stream);
 int sdw_stream_remove_master(struct sdw_bus *bus,
@@ -1062,7 +1064,7 @@ void sdw_extract_slave_id(struct sdw_bus *bus, u64 addr, struct sdw_slave_id *id
 
 int sdw_stream_add_slave(struct sdw_slave *slave,
                         struct sdw_stream_config *stream_config,
-                        struct sdw_port_config *port_config,
+                        const struct sdw_port_config *port_config,
                         unsigned int num_ports,
                         struct sdw_stream_runtime *stream);
 int sdw_stream_remove_slave(struct sdw_slave *slave,
@@ -1084,7 +1086,7 @@ int sdw_update_no_pm(struct sdw_slave *slave, u32 addr, u8 mask, u8 val);
 
 static inline int sdw_stream_add_slave(struct sdw_slave *slave,
                                       struct sdw_stream_config *stream_config,
-                                      struct sdw_port_config *port_config,
+                                      const struct sdw_port_config *port_config,
                                       unsigned int num_ports,
                                       struct sdw_stream_runtime *stream)
 {
index 471fe2ff9066b75e82795b92972905bbae3cc48c..600fbd5daf683d4d93536a569ef5e52248d7a851 100644 (file)
@@ -21,7 +21,7 @@
 #include <uapi/linux/spi/spi.h>
 
 /* Max no. of CS supported per spi device */
-#define SPI_CS_CNT_MAX 4
+#define SPI_CS_CNT_MAX 16
 
 struct dma_chan;
 struct software_node;
index eaac8b0da25b8aef964a311eee34d0313c549838..3fcd20de6ca88e83abedf8329a3528aacead6f6d 100644 (file)
@@ -449,6 +449,12 @@ static __always_inline int spin_is_contended(spinlock_t *lock)
        return raw_spin_is_contended(&lock->rlock);
 }
 
+#define assert_spin_locked(lock)       assert_raw_spin_locked(&(lock)->rlock)
+
+#else  /* !CONFIG_PREEMPT_RT */
+# include <linux/spinlock_rt.h>
+#endif /* CONFIG_PREEMPT_RT */
+
 /*
  * Does a critical section need to be broken due to another
  * task waiting?: (technically does not depend on CONFIG_PREEMPTION,
@@ -480,12 +486,6 @@ static inline int rwlock_needbreak(rwlock_t *lock)
 #endif
 }
 
-#define assert_spin_locked(lock)       assert_raw_spin_locked(&(lock)->rlock)
-
-#else  /* !CONFIG_PREEMPT_RT */
-# include <linux/spinlock_rt.h>
-#endif /* CONFIG_PREEMPT_RT */
-
 /*
  * Pull the atomic_t declaration:
  * (asm-mips/atomic.h needs above definitions)
index 2a4ce4144f9f1b62ba2abb577d543f16028869d9..28e8c8bd39441fa6451be3364006fb3b47a47dc9 100644 (file)
@@ -120,6 +120,9 @@ static inline void spmi_controller_put(struct spmi_controller *ctrl)
 int spmi_controller_add(struct spmi_controller *ctrl);
 void spmi_controller_remove(struct spmi_controller *ctrl);
 
+struct spmi_controller *devm_spmi_controller_alloc(struct device *parent, size_t size);
+int devm_spmi_controller_add(struct device *parent, struct spmi_controller *ctrl);
+
 /**
  * struct spmi_driver - SPMI slave device driver
  * @driver:    SPMI device drivers should initialize name and owner field of
index 127ef3b2e6073bc7a099aa632ff1b77f4d003387..236610e4a8fa5d704b7e4bb243b720a7c5fc1283 100644 (file)
@@ -229,7 +229,7 @@ static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp
 
        srcu_check_nmi_safety(ssp, true);
        retval = __srcu_read_lock_nmisafe(ssp);
-       rcu_lock_acquire(&ssp->dep_map);
+       rcu_try_lock_acquire(&ssp->dep_map);
        return retval;
 }
 
index ce137830a0b99c1f79b100b857966443199e9777..ab148d8dbfc146d2aed178b694f33506d06bfd05 100644 (file)
@@ -66,9 +66,6 @@ extern char * strcpy(char *,const char *);
 #ifndef __HAVE_ARCH_STRNCPY
 extern char * strncpy(char *,const char *, __kernel_size_t);
 #endif
-#ifndef __HAVE_ARCH_STRLCPY
-size_t strlcpy(char *, const char *, size_t);
-#endif
 #ifndef __HAVE_ARCH_STRSCPY
 ssize_t strscpy(char *, const char *, size_t);
 #endif
index 42b249b4c24b1fb748f2584f902f86f03cf4dee0..8cd8c38cf3f3022782a0870f46d9b22b18e0b618 100644 (file)
@@ -193,7 +193,6 @@ struct ssam_device_driver {
 
 #ifdef CONFIG_SURFACE_AGGREGATOR_BUS
 
-extern struct bus_type ssam_bus_type;
 extern const struct device_type ssam_device_type;
 
 /**
index 5c0dbef55792f269b6faa1aa81c1684f35839fe7..77eb9b0e768504daa57af63b2eb7c8debd00dfda 100644 (file)
@@ -128,6 +128,7 @@ struct mnt_id_req;
 #define __TYPE_IS_LL(t) (__TYPE_AS(t, 0LL) || __TYPE_AS(t, 0ULL))
 #define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
 #define __SC_CAST(t, a)        (__force t) a
+#define __SC_TYPE(t, a)        t
 #define __SC_ARGS(t, a)        a
 #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
 
@@ -414,7 +415,7 @@ asmlinkage long sys_statmount(const struct mnt_id_req __user *req,
                              struct statmount __user *buf, size_t bufsize,
                              unsigned int flags);
 asmlinkage long sys_listmount(const struct mnt_id_req __user *req,
-                             u64 __user *buf, size_t bufsize,
+                             u64 __user *mnt_ids, size_t nr_mnt_ids,
                              unsigned int flags);
 asmlinkage long sys_truncate(const char __user *path, long length);
 asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
index bf84595a4e866cb7a686af685c72c5947acef26b..b7a3deb372fd42d8097648a00903ffad7148ff39 100644 (file)
@@ -32,6 +32,7 @@
 struct thermal_zone_device;
 struct thermal_cooling_device;
 struct thermal_instance;
+struct thermal_debugfs;
 struct thermal_attr;
 
 enum thermal_trend {
@@ -102,7 +103,7 @@ struct thermal_cooling_device_ops {
 
 struct thermal_cooling_device {
        int id;
-       char *type;
+       const char *type;
        unsigned long max_state;
        struct device device;
        struct device_node *np;
@@ -113,6 +114,9 @@ struct thermal_cooling_device {
        struct mutex lock; /* protect thermal_instances list */
        struct list_head thermal_instances;
        struct list_head node;
+#ifdef CONFIG_THERMAL_DEBUGFS
+       struct thermal_debugfs *debugfs;
+#endif
 };
 
 /**
@@ -189,6 +193,9 @@ struct thermal_zone_device {
        struct list_head node;
        struct delayed_work poll_queue;
        enum thermal_notify_event notify_event;
+#ifdef CONFIG_THERMAL_DEBUGFS
+       struct thermal_debugfs *debugfs;
+#endif
        bool suspended;
 };
 
index 6151c210d987d4468aa84c6bcb3584256548a1ca..2c835e5c41f6303da0d86e25619a5dbedc4743d3 100644 (file)
@@ -86,7 +86,7 @@ struct tb {
        unsigned long privdata[];
 };
 
-extern struct bus_type tb_bus_type;
+extern const struct bus_type tb_bus_type;
 extern struct device_type tb_service_type;
 extern struct device_type tb_xdomain_type;
 
index 2a70a447184c9e006ac13e3353987990b9ba21c4..fdcd76b7be83d7f11c46878c19d4dab97828a446 100644 (file)
@@ -51,7 +51,7 @@ int trace_array_printk(struct trace_array *tr, unsigned long ip,
                       const char *fmt, ...);
 int trace_array_init_printk(struct trace_array *tr);
 void trace_array_put(struct trace_array *tr);
-struct trace_array *trace_array_get_by_name(const char *name);
+struct trace_array *trace_array_get_by_name(const char *name, const char *systems);
 int trace_array_destroy(struct trace_array *tr);
 
 /* For osnoise tracer */
@@ -84,7 +84,7 @@ static inline int trace_array_init_printk(struct trace_array *tr)
 static inline void trace_array_put(struct trace_array *tr)
 {
 }
-static inline struct trace_array *trace_array_get_by_name(const char *name)
+static inline struct trace_array *trace_array_get_by_name(const char *name, const char *systems)
 {
        return NULL;
 }
index 3691e0e76a1a209601f97fe03ca97738dafcd841..9ec229dfddaa774b9c0a4f2ae410eb273061c330 100644 (file)
@@ -8,11 +8,14 @@
 
 /*
  * Trace sequences are used to allow a function to call several other functions
- * to create a string of data to use (up to a max of PAGE_SIZE).
+ * to create a string of data to use.
  */
 
+#define TRACE_SEQ_BUFFER_SIZE  (PAGE_SIZE * 2 - \
+       (sizeof(struct seq_buf) + sizeof(size_t) + sizeof(int)))
+
 struct trace_seq {
-       char                    buffer[PAGE_SIZE];
+       char                    buffer[TRACE_SEQ_BUFFER_SIZE];
        struct seq_buf          seq;
        size_t                  readpos;
        int                     full;
@@ -21,7 +24,7 @@ struct trace_seq {
 static inline void
 trace_seq_init(struct trace_seq *s)
 {
-       seq_buf_init(&s->seq, s->buffer, PAGE_SIZE);
+       seq_buf_init(&s->seq, s->buffer, TRACE_SEQ_BUFFER_SIZE);
        s->full = 0;
        s->readpos = 0;
 }
index 4b6340ac2af28f556cde4aea6295a0cc11bc719a..8c76fd97d4adcd5cb9189dd0eceb5d33dac47b56 100644 (file)
@@ -242,7 +242,7 @@ struct tty_struct {
        void *driver_data;
        spinlock_t files_lock;
        int write_cnt;
-       unsigned char *write_buf;
+       u8 *write_buf;
 
        struct list_head tty_files;
 
@@ -393,8 +393,10 @@ extern const struct class tty_class;
  * tty_kref_get - get a tty reference
  * @tty: tty device
  *
- * Returns: a new reference to a tty object. The caller must hold sufficient
- * locks/counts to ensure that their existing reference cannot go away
+ * Returns: a new reference to a tty object
+ *
+ * Locking: The caller must hold sufficient locks/counts to ensure that their
+ * existing reference cannot go away.
  */
 static inline struct tty_struct *tty_kref_get(struct tty_struct *tty)
 {
@@ -408,8 +410,8 @@ void tty_wait_until_sent(struct tty_struct *tty, long timeout);
 void stop_tty(struct tty_struct *tty);
 void start_tty(struct tty_struct *tty);
 void tty_write_message(struct tty_struct *tty, char *msg);
-int tty_send_xchar(struct tty_struct *tty, char ch);
-int tty_put_char(struct tty_struct *tty, unsigned char c);
+int tty_send_xchar(struct tty_struct *tty, u8 ch);
+int tty_put_char(struct tty_struct *tty, u8 c);
 unsigned int tty_chars_in_buffer(struct tty_struct *tty);
 unsigned int tty_write_room(struct tty_struct *tty);
 void tty_driver_flush_buffer(struct tty_struct *tty);
@@ -419,6 +421,7 @@ bool tty_unthrottle_safe(struct tty_struct *tty);
 int tty_do_resize(struct tty_struct *tty, struct winsize *ws);
 int tty_get_icount(struct tty_struct *tty,
                struct serial_icounter_struct *icount);
+int tty_get_tiocm(struct tty_struct *tty);
 int is_current_pgrp_orphaned(void);
 void tty_hangup(struct tty_struct *tty);
 void tty_vhangup(struct tty_struct *tty);
@@ -436,12 +439,11 @@ void tty_encode_baud_rate(struct tty_struct *tty, speed_t ibaud,
  * tty_get_baud_rate - get tty bit rates
  * @tty: tty to query
  *
- * Returns: the baud rate as an integer for this terminal. The termios lock
- * must be held by the caller and the terminal bit flags may be updated.
+ * Returns: the baud rate as an integer for this terminal
  *
- * Locking: none
+ * Locking: The termios lock must be held by the caller.
  */
-static inline speed_t tty_get_baud_rate(struct tty_struct *tty)
+static inline speed_t tty_get_baud_rate(const struct tty_struct *tty)
 {
        return tty_termios_baud_rate(&tty->termios);
 }
index 18beff0cec1abb444bb1be7706785aadeb18035f..7372124fbf90b3dc724e5ea40c7f0168191cd275 100644 (file)
@@ -72,8 +72,7 @@ struct serial_struct;
  *     is closed for the last time freeing up the resources. This is
  *     actually the second part of shutdown for routines that might sleep.
  *
- * @write: ``ssize_t ()(struct tty_struct *tty, const unsigned char *buf,
- *                 size_t count)``
+ * @write: ``ssize_t ()(struct tty_struct *tty, const u8 *buf, size_t count)``
  *
  *     This routine is called by the kernel to write a series (@count) of
  *     characters (@buf) to the @tty device. The characters may come from
@@ -85,7 +84,7 @@ struct serial_struct;
  *
  *     Optional: Required for writable devices. May not sleep.
  *
- * @put_char: ``int ()(struct tty_struct *tty, unsigned char ch)``
+ * @put_char: ``int ()(struct tty_struct *tty, u8 ch)``
  *
  *     This routine is called by the kernel to write a single character @ch to
  *     the @tty device. If the kernel uses this routine, it must call the
@@ -243,7 +242,7 @@ struct serial_struct;
  *     Optional: If not provided, the device is assumed to have no FIFO.
  *     Usually correct to invoke via tty_wait_until_sent(). May sleep.
  *
- * @send_xchar: ``void ()(struct tty_struct *tty, char ch)``
+ * @send_xchar: ``void ()(struct tty_struct *tty, u8 ch)``
  *
  *     This routine is used to send a high-priority XON/XOFF character (@ch)
  *     to the @tty device.
@@ -375,7 +374,7 @@ struct tty_operations {
        void (*flush_buffer)(struct tty_struct *tty);
        void (*set_ldisc)(struct tty_struct *tty);
        void (*wait_until_sent)(struct tty_struct *tty, int timeout);
-       void (*send_xchar)(struct tty_struct *tty, char ch);
+       void (*send_xchar)(struct tty_struct *tty, u8 ch);
        int (*tiocmget)(struct tty_struct *tty);
        int (*tiocmset)(struct tty_struct *tty,
                        unsigned int set, unsigned int clear);
index 6b367eb17979a20436e856311d211859db101c1c..1b861f2100b69f120971c7dd503d6635db1fb468 100644 (file)
@@ -114,8 +114,8 @@ struct tty_port {
        unsigned char           console:1;
        struct mutex            mutex;
        struct mutex            buf_mutex;
-       unsigned char           *xmit_buf;
-       DECLARE_KFIFO_PTR(xmit_fifo, unsigned char);
+       u8                      *xmit_buf;
+       DECLARE_KFIFO_PTR(xmit_fifo, u8);
        unsigned int            close_delay;
        unsigned int            closing_wait;
        int                     drain_delay;
@@ -149,10 +149,10 @@ struct device *tty_port_register_device_attr(struct tty_port *port,
                const struct attribute_group **attr_grp);
 struct device *tty_port_register_device_serdev(struct tty_port *port,
                struct tty_driver *driver, unsigned index,
-               struct device *device);
+               struct device *host, struct device *parent);
 struct device *tty_port_register_device_attr_serdev(struct tty_port *port,
                struct tty_driver *driver, unsigned index,
-               struct device *device, void *drvdata,
+               struct device *host, struct device *parent, void *drvdata,
                const struct attribute_group **attr_grp);
 void tty_port_unregister_device(struct tty_port *port,
                struct tty_driver *driver, unsigned index);
index 8c61643acd4993ceca4fe4bb0382d4261ccd9c84..9e52179872a50dd3598afc606b4c524caa51ed1d 100644 (file)
@@ -632,7 +632,6 @@ struct usb3_lpm_parameters {
  * @reset_resume: needs reset instead of resume
  * @port_is_suspended: the upstream port is suspended (L2 or U3)
  * @slot_id: Slot ID assigned by xHCI
- * @removable: Device can be physically removed from this port
  * @l1_params: best effor service latency for USB2 L1 LPM state, and L1 timeout.
  * @u1_params: exit latencies for USB3 U1 LPM state, and hub-initiated timeout.
  * @u2_params: exit latencies for USB3 U2 LPM state, and hub-initiated timeout.
@@ -1144,16 +1143,6 @@ extern ssize_t usb_store_new_id(struct usb_dynids *dynids,
 
 extern ssize_t usb_show_dynids(struct usb_dynids *dynids, char *buf);
 
-/**
- * struct usbdrv_wrap - wrapper for driver-model structure
- * @driver: The driver-model core driver structure.
- * @for_devices: Non-zero for device drivers, 0 for interface drivers.
- */
-struct usbdrv_wrap {
-       struct device_driver driver;
-       int for_devices;
-};
-
 /**
  * struct usb_driver - identifies USB interface driver to usbcore
  * @name: The driver name should be unique among USB drivers,
@@ -1194,7 +1183,7 @@ struct usbdrv_wrap {
  *     is bound to the driver.
  * @dynids: used internally to hold the list of dynamically added device
  *     ids for this driver.
- * @drvwrap: Driver-model core structure wrapper.
+ * @driver: The driver-model core driver structure.
  * @no_dynamic_id: if set to 1, the USB core will not allow dynamic ids to be
  *     added to this driver by preventing the sysfs file from being created.
  * @supports_autosuspend: if set to 0, the USB core will not allow autosuspend
@@ -1242,13 +1231,13 @@ struct usb_driver {
        const struct attribute_group **dev_groups;
 
        struct usb_dynids dynids;
-       struct usbdrv_wrap drvwrap;
+       struct device_driver driver;
        unsigned int no_dynamic_id:1;
        unsigned int supports_autosuspend:1;
        unsigned int disable_hub_initiated_lpm:1;
        unsigned int soft_unbind:1;
 };
-#define        to_usb_driver(d) container_of(d, struct usb_driver, drvwrap.driver)
+#define        to_usb_driver(d) container_of(d, struct usb_driver, driver)
 
 /**
  * struct usb_device_driver - identifies USB device driver to usbcore
@@ -1264,9 +1253,12 @@ struct usb_driver {
  *     module is being unloaded.
  * @suspend: Called when the device is going to be suspended by the system.
  * @resume: Called when the device is being resumed by the system.
+ * @choose_configuration: If non-NULL, called instead of the default
+ *     usb_choose_configuration(). If this returns an error then we'll go
+ *     on to call the normal usb_choose_configuration().
  * @dev_groups: Attributes attached to the device that will be created once it
  *     is bound to the driver.
- * @drvwrap: Driver-model core structure wrapper.
+ * @driver: The driver-model core driver structure.
  * @id_table: used with @match() to select better matching driver at
  *     probe() time.
  * @supports_autosuspend: if set to 0, the USB core will not allow autosuspend
@@ -1275,7 +1267,7 @@ struct usb_driver {
  *     resume and suspend functions will be called in addition to the driver's
  *     own, so this part of the setup does not need to be replicated.
  *
- * USB drivers must provide all the fields listed above except drvwrap,
+ * USB drivers must provide all the fields listed above except driver,
  * match, and id_table.
  */
 struct usb_device_driver {
@@ -1287,14 +1279,17 @@ struct usb_device_driver {
 
        int (*suspend) (struct usb_device *udev, pm_message_t message);
        int (*resume) (struct usb_device *udev, pm_message_t message);
+
+       int (*choose_configuration) (struct usb_device *udev);
+
        const struct attribute_group **dev_groups;
-       struct usbdrv_wrap drvwrap;
+       struct device_driver driver;
        const struct usb_device_id *id_table;
        unsigned int supports_autosuspend:1;
        unsigned int generic_subclass:1;
 };
 #define        to_usb_device_driver(d) container_of(d, struct usb_device_driver, \
-               drvwrap.driver)
+               driver)
 
 /**
  * struct usb_class_driver - identifies a USB driver that wants to use the USB major number
index 6532beb587b1978e09bc5b17dc088daf91f9f88c..a771ccc038ac949f2b4a835e28d720735e17ee22 100644 (file)
@@ -236,6 +236,7 @@ struct usb_ep {
        unsigned                max_streams:16;
        unsigned                mult:2;
        unsigned                maxburst:5;
+       unsigned                fifo_mode:1;
        u8                      address;
        const struct usb_endpoint_descriptor    *desc;
        const struct usb_ss_ep_comp_descriptor  *comp_desc;
index 00724b4f6e122bf700da6c0c81c250c7c6bc19f7..cd77fc6095a15b5f8d313179436fc8b33efbc769 100644 (file)
@@ -372,8 +372,9 @@ struct hc_driver {
                 * or bandwidth constraints.
                 */
        void    (*reset_bandwidth)(struct usb_hcd *, struct usb_device *);
-               /* Returns the hardware-chosen device address */
-       int     (*address_device)(struct usb_hcd *, struct usb_device *udev);
+               /* Set the hardware-chosen device address */
+       int     (*address_device)(struct usb_hcd *, struct usb_device *udev,
+                                 unsigned int timeout_ms);
                /* prepares the hardware to send commands to the device */
        int     (*enable_device)(struct usb_hcd *, struct usb_device *udev);
                /* Notifies the HCD after a hub descriptor is fetched.
index eeb7c2157c72fba0e54f926cec863bb9e50ff9f5..59409c1fc3dee7b20112867615f9a7a92b65fa17 100644 (file)
@@ -72,4 +72,7 @@
 /* device has endpoints that should be ignored */
 #define USB_QUIRK_ENDPOINT_IGNORE              BIT(15)
 
+/* short SET_ADDRESS request timeout */
+#define USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT        BIT(16)
+
 #endif /* __LINUX_USB_QUIRKS_H */
index 83376473ac76500d47e50f1be3de7e50816002b9..467e8045e9f8690e461df4094b6afd47346b0c17 100644 (file)
@@ -36,7 +36,9 @@
 
 #define TCPC_ALERT_MASK                        0x12
 #define TCPC_POWER_STATUS_MASK         0x14
-#define TCPC_FAULT_STATUS_MASK         0x15
+
+#define TCPC_FAULT_STATUS_MASK                 0x15
+#define TCPC_FAULT_STATUS_MASK_VCONN_OC                BIT(1)
 
 #define TCPC_EXTENDED_STATUS_MASK              0x16
 #define TCPC_EXTENDED_STATUS_MASK_VSAFE0V      BIT(0)
 
 #define TCPC_FAULT_STATUS              0x1f
 #define TCPC_FAULT_STATUS_ALL_REG_RST_TO_DEFAULT BIT(7)
+#define TCPC_FAULT_STATUS_VCONN_OC     BIT(1)
 
 #define TCPC_ALERT_EXTENDED            0x21
 
index ab7ca872950bb16b73967b458bb8fce91f421d55..65fac5e1f3178c4b11650771ded16d35b792b6a2 100644 (file)
@@ -173,5 +173,6 @@ void tcpm_pd_hard_reset(struct tcpm_port *port);
 void tcpm_tcpc_reset(struct tcpm_port *port);
 void tcpm_port_clean(struct tcpm_port *port);
 bool tcpm_port_is_toggling(struct tcpm_port *port);
+void tcpm_port_error_recovery(struct tcpm_port *port);
 
 #endif /* __LINUX_USB_TCPM_H */
index a65b2513f8cdcba6b41be132e1f66734b1aeff8b..89b265bc6ec315bcadadebcc92b5ea4ab283822f 100644 (file)
@@ -69,6 +69,13 @@ struct vfio_device {
        u8 iommufd_attached:1;
 #endif
        u8 cdev_opened:1;
+#ifdef CONFIG_DEBUG_FS
+       /*
+        * debug_root is a static property of the vfio_device
+        * which must be set prior to registering the vfio_device.
+        */
+       struct dentry *debug_root;
+#endif
 };
 
 /**
index 562e8754869da6c6a5c46c2dd55b4775c27b0722..85e84b92751b67354ad6671d945cd39069afcdcc 100644 (file)
@@ -127,7 +127,27 @@ int vfio_pci_core_match(struct vfio_device *core_vdev, char *buf);
 int vfio_pci_core_enable(struct vfio_pci_core_device *vdev);
 void vfio_pci_core_disable(struct vfio_pci_core_device *vdev);
 void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev);
+int vfio_pci_core_setup_barmap(struct vfio_pci_core_device *vdev, int bar);
 pci_ers_result_t vfio_pci_core_aer_err_detected(struct pci_dev *pdev,
                                                pci_channel_state_t state);
 
+#define VFIO_IOWRITE_DECLATION(size) \
+int vfio_pci_core_iowrite##size(struct vfio_pci_core_device *vdev,     \
+                       bool test_mem, u##size val, void __iomem *io);
+
+VFIO_IOWRITE_DECLATION(8)
+VFIO_IOWRITE_DECLATION(16)
+VFIO_IOWRITE_DECLATION(32)
+#ifdef iowrite64
+VFIO_IOWRITE_DECLATION(64)
+#endif
+
+#define VFIO_IOREAD_DECLATION(size) \
+int vfio_pci_core_ioread##size(struct vfio_pci_core_device *vdev,      \
+                       bool test_mem, u##size *val, void __iomem *io);
+
+VFIO_IOREAD_DECLATION(8)
+VFIO_IOREAD_DECLATION(16)
+VFIO_IOREAD_DECLATION(32)
+
 #endif /* VFIO_PCI_CORE_H */
index 4cc614a38376593787d7fd2842a7b3649979ead8..b0201747a263a9526c5d60c2c2644a8e064a8439 100644 (file)
@@ -103,6 +103,14 @@ int virtqueue_resize(struct virtqueue *vq, u32 num,
 int virtqueue_reset(struct virtqueue *vq,
                    void (*recycle)(struct virtqueue *vq, void *buf));
 
+struct virtio_admin_cmd {
+       __le16 opcode;
+       __le16 group_type;
+       __le64 group_member_id;
+       struct scatterlist *data_sg;
+       struct scatterlist *result_sg;
+};
+
 /**
  * struct virtio_device - representation of a device using virtio
  * @index: unique position on the virtio bus
index 2b3438de2c4d4e887bdfbf89f5c137f3218b00e9..da9b271b54db8a82fce123befe529c1851bc5b7a 100644 (file)
@@ -93,6 +93,8 @@ typedef void vq_callback_t(struct virtqueue *);
  *     Returns 0 on success or error status
  *     If disable_vq_and_reset is set, then enable_vq_after_reset must also be
  *     set.
+ * @create_avq: create admin virtqueue resource.
+ * @destroy_avq: destroy admin virtqueue resource.
  */
 struct virtio_config_ops {
        void (*get)(struct virtio_device *vdev, unsigned offset,
@@ -120,6 +122,8 @@ struct virtio_config_ops {
                               struct virtio_shm_region *region, u8 id);
        int (*disable_vq_and_reset)(struct virtqueue *vq);
        int (*enable_vq_after_reset)(struct virtqueue *vq);
+       int (*create_avq)(struct virtio_device *vdev);
+       void (*destroy_avq)(struct virtio_device *vdev);
 };
 
 /* If driver didn't advertise the feature, it will never appear. */
diff --git a/include/linux/virtio_console.h b/include/linux/virtio_console.h
deleted file mode 100644 (file)
index d2e2785..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so
- * anyone can use the definitions to implement compatible drivers/servers:
- *
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. Neither the name of IBM nor the names of its contributors
- *    may be used to endorse or promote products derived from this software
- *    without specific prior written permission.
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * Copyright (C) Red Hat, Inc., 2009, 2010, 2011
- * Copyright (C) Amit Shah <amit.shah@redhat.com>, 2009, 2010, 2011
- */
-#ifndef _LINUX_VIRTIO_CONSOLE_H
-#define _LINUX_VIRTIO_CONSOLE_H
-
-#include <uapi/linux/virtio_console.h>
-
-int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int));
-#endif /* _LINUX_VIRTIO_CONSOLE_H */
index 27cc1d4643219a44c01a2404124cd45ef46f7f3d..4dfa9b69ca8d95d43e44831bc166eadbe5715d3c 100644 (file)
@@ -3,6 +3,8 @@
 #define _LINUX_VIRTIO_NET_H
 
 #include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
 #include <linux/udp.h>
 #include <uapi/linux/tcp.h>
 #include <uapi/linux/virtio_net.h>
@@ -49,6 +51,7 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
                                        const struct virtio_net_hdr *hdr,
                                        bool little_endian)
 {
+       unsigned int nh_min_len = sizeof(struct iphdr);
        unsigned int gso_type = 0;
        unsigned int thlen = 0;
        unsigned int p_off = 0;
@@ -65,6 +68,7 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
                        gso_type = SKB_GSO_TCPV6;
                        ip_proto = IPPROTO_TCP;
                        thlen = sizeof(struct tcphdr);
+                       nh_min_len = sizeof(struct ipv6hdr);
                        break;
                case VIRTIO_NET_HDR_GSO_UDP:
                        gso_type = SKB_GSO_UDP;
@@ -100,7 +104,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
                if (!skb_partial_csum_set(skb, start, off))
                        return -EINVAL;
 
-               p_off = skb_transport_offset(skb) + thlen;
+               nh_min_len = max_t(u32, nh_min_len, skb_transport_offset(skb));
+               p_off = nh_min_len + thlen;
                if (!pskb_may_pull(skb, p_off))
                        return -EINVAL;
        } else {
@@ -140,7 +145,7 @@ retry:
 
                        skb_set_transport_header(skb, keys.control.thoff);
                } else if (gso_type) {
-                       p_off = thlen;
+                       p_off = nh_min_len + thlen;
                        if (!pskb_may_pull(skb, p_off))
                                return -EINVAL;
                }
diff --git a/include/linux/virtio_pci_admin.h b/include/linux/virtio_pci_admin.h
new file mode 100644 (file)
index 0000000..f4a100a
--- /dev/null
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_VIRTIO_PCI_ADMIN_H
+#define _LINUX_VIRTIO_PCI_ADMIN_H
+
+#include <linux/types.h>
+#include <linux/pci.h>
+
+#ifdef CONFIG_VIRTIO_PCI_ADMIN_LEGACY
+bool virtio_pci_admin_has_legacy_io(struct pci_dev *pdev);
+int virtio_pci_admin_legacy_common_io_write(struct pci_dev *pdev, u8 offset,
+                                           u8 size, u8 *buf);
+int virtio_pci_admin_legacy_common_io_read(struct pci_dev *pdev, u8 offset,
+                                          u8 size, u8 *buf);
+int virtio_pci_admin_legacy_device_io_write(struct pci_dev *pdev, u8 offset,
+                                           u8 size, u8 *buf);
+int virtio_pci_admin_legacy_device_io_read(struct pci_dev *pdev, u8 offset,
+                                          u8 size, u8 *buf);
+int virtio_pci_admin_legacy_io_notify_info(struct pci_dev *pdev,
+                                          u8 req_bar_flags, u8 *bar,
+                                          u64 *bar_offset);
+#endif
+
+#endif /* _LINUX_VIRTIO_PCI_ADMIN_H */
index a09e13a577a99a0f91916ad121329dde11f42b1f..c0b1b1ca1163507dddca0870a46c93194ae07e75 100644 (file)
@@ -125,4 +125,6 @@ int vp_modern_probe(struct virtio_pci_modern_device *mdev);
 void vp_modern_remove(struct virtio_pci_modern_device *mdev);
 int vp_modern_get_queue_reset(struct virtio_pci_modern_device *mdev, u16 index);
 void vp_modern_set_queue_reset(struct virtio_pci_modern_device *mdev, u16 index);
+u16 vp_modern_avq_num(struct virtio_pci_modern_device *mdev);
+u16 vp_modern_avq_index(struct virtio_pci_modern_device *mdev);
 #endif
diff --git a/include/linux/w1-gpio.h b/include/linux/w1-gpio.h
deleted file mode 100644 (file)
index 3495fd0..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * w1-gpio interface to platform code
- *
- * Copyright (C) 2007 Ville Syrjala <syrjala@sci.fi>
- */
-#ifndef _LINUX_W1_GPIO_H
-#define _LINUX_W1_GPIO_H
-
-struct gpio_desc;
-
-/**
- * struct w1_gpio_platform_data - Platform-dependent data for w1-gpio
- */
-struct w1_gpio_platform_data {
-       struct gpio_desc *gpiod;
-       struct gpio_desc *pullup_gpiod;
-       void (*enable_external_pullup)(int enable);
-       unsigned int pullup_duration;
-};
-
-#endif /* _LINUX_W1_GPIO_H */
index 6d0a14f7019d1e7b76a1931be98ff4f7fa9f0493..453736fd1d23ce673345833cc13593ce13450ba1 100644 (file)
@@ -60,7 +60,7 @@ struct writeback_control {
        unsigned for_reclaim:1;         /* Invoked from the page allocator */
        unsigned range_cyclic:1;        /* range_start is cyclic */
        unsigned for_sync:1;            /* sync(2) WB_SYNC_ALL writeback */
-       unsigned unpinned_fscache_wb:1; /* Cleared I_PINNING_FSCACHE_WB */
+       unsigned unpinned_netfs_wb:1;   /* Cleared I_PINNING_NETFS_WB */
 
        /*
         * When writeback IOs are bounced through async layers, only the
index 49c4640027d8a6b93e903a6238d21e8541e31da4..afd40dce40f3d593f6fa0a11828aee9fd1582de3 100644 (file)
@@ -46,12 +46,6 @@ struct scm_stat {
 
 #define UNIXCB(skb)    (*(struct unix_skb_parms *)&((skb)->cb))
 
-#define unix_state_lock(s)     spin_lock(&unix_sk(s)->lock)
-#define unix_state_unlock(s)   spin_unlock(&unix_sk(s)->lock)
-#define unix_state_lock_nested(s) \
-                               spin_lock_nested(&unix_sk(s)->lock, \
-                               SINGLE_DEPTH_NESTING)
-
 /* The AF_UNIX socket */
 struct unix_sock {
        /* WARNING: sk has to be the first member */
@@ -77,6 +71,20 @@ struct unix_sock {
 #define unix_sk(ptr) container_of_const(ptr, struct unix_sock, sk)
 #define unix_peer(sk) (unix_sk(sk)->peer)
 
+#define unix_state_lock(s)     spin_lock(&unix_sk(s)->lock)
+#define unix_state_unlock(s)   spin_unlock(&unix_sk(s)->lock)
+enum unix_socket_lock_class {
+       U_LOCK_NORMAL,
+       U_LOCK_SECOND,  /* for double locking, see unix_state_double_lock(). */
+       U_LOCK_DIAG, /* used while dumping icons, see sk_diag_dump_icons(). */
+};
+
+static inline void unix_state_lock_nested(struct sock *sk,
+                                  enum unix_socket_lock_class subclass)
+{
+       spin_lock_nested(&unix_sk(sk)->lock, subclass);
+}
+
 #define peer_wait peer_wq.wait
 
 long unix_inq_len(struct sock *sk);
index cf79656ce09ca1f05b733bfce2393f4a044d9454..2b54fdd8ca15a8fae0f810fc5ba550a1b1a44676 100644 (file)
@@ -2910,6 +2910,8 @@ struct cfg80211_bss_ies {
  *     own the beacon_ies, but they're just pointers to the ones from the
  *     @hidden_beacon_bss struct)
  * @proberesp_ies: the information elements from the last Probe Response frame
+ * @proberesp_ecsa_stuck: ECSA element is stuck in the Probe Response frame,
+ *     cannot rely on it having valid data
  * @hidden_beacon_bss: in case this BSS struct represents a probe response from
  *     a BSS that hides the SSID in its beacon, this points to the BSS struct
  *     that holds the beacon data. @beacon_ies is still valid, of course, and
@@ -2950,6 +2952,8 @@ struct cfg80211_bss {
        u8 chains;
        s8 chain_signal[IEEE80211_MAX_CHAINS];
 
+       u8 proberesp_ecsa_stuck:1;
+
        u8 bssid_index;
        u8 max_bssid_indicator;
 
index d0a2f827d5f20f3fed3c177d9b64d9dac373a26f..9ab4bf704e864358215d2370d33d3d9668681923 100644 (file)
@@ -357,4 +357,12 @@ static inline bool inet_csk_has_ulp(const struct sock *sk)
        return inet_test_bit(IS_ICSK, sk) && !!inet_csk(sk)->icsk_ulp_ops;
 }
 
+static inline void inet_init_csk_locks(struct sock *sk)
+{
+       struct inet_connection_sock *icsk = inet_csk(sk);
+
+       spin_lock_init(&icsk->icsk_accept_queue.rskq_lock);
+       spin_lock_init(&icsk->icsk_accept_queue.fastopenq.lock);
+}
+
 #endif /* _INET_CONNECTION_SOCK_H */
index aa86453f6b9ba367f772570a7b783bb098be6236..d94c242eb3ed20b2c5b2e5ceea3953cf96341fb7 100644 (file)
@@ -307,11 +307,6 @@ static inline unsigned long inet_cmsg_flags(const struct inet_sock *inet)
 #define inet_assign_bit(nr, sk, val)           \
        assign_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags, val)
 
-static inline bool sk_is_inet(struct sock *sk)
-{
-       return sk->sk_family == AF_INET || sk->sk_family == AF_INET6;
-}
-
 /**
  * sk_to_full_sk - Access to a full socket
  * @sk: pointer to a socket
index de0c69c57e3cb7485e3d8473bc0b109e4280d2f6..25cb688bdc62360292e25b0d676f135101a2118c 100644 (file)
@@ -767,7 +767,7 @@ int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
  *     Functions provided by ip_sockglue.c
  */
 
-void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
+void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb, bool drop_dst);
 void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
                         struct sk_buff *skb, int tlen, int offset);
 int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
index 7e73f8e5e4970d4d12b89bf6a1a3988f88e2b635..1d55ba7c45be16356e4144e09cdfeb7da99a7971 100644 (file)
@@ -262,8 +262,7 @@ static inline void llc_pdu_header_init(struct sk_buff *skb, u8 type,
  */
 static inline void llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa)
 {
-       if (skb->protocol == htons(ETH_P_802_2))
-               memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN);
+       memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN);
 }
 
 /**
@@ -275,8 +274,7 @@ static inline void llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa)
  */
 static inline void llc_pdu_decode_da(struct sk_buff *skb, u8 *da)
 {
-       if (skb->protocol == htons(ETH_P_802_2))
-               memcpy(da, eth_hdr(skb)->h_dest, ETH_ALEN);
+       memcpy(da, eth_hdr(skb)->h_dest, ETH_ALEN);
 }
 
 /**
index d68b0a4834315062a4b1b450dd87fb66a76218c0..8b8ed4e13d74dfaa63ede54f00c3ba5aac2f6ee8 100644 (file)
@@ -128,7 +128,7 @@ netdev_txq_completed_mb(struct netdev_queue *dev_queue,
                netdev_txq_completed_mb(txq, pkts, bytes);              \
                                                                        \
                _res = -1;                                              \
-               if (pkts && likely(get_desc > start_thrs)) {            \
+               if (pkts && likely(get_desc >= start_thrs)) {           \
                        _res = 1;                                       \
                        if (unlikely(netif_tx_queue_stopped(txq)) &&    \
                            !(down_cond)) {                             \
index b157c5cafd14cfe307f3d36ad533d528f142eea6..510244cc0f8f0e479f252598ba2aaf43b8918978 100644 (file)
@@ -205,6 +205,7 @@ static inline void nft_data_copy(u32 *dst, const struct nft_data *src,
  *     @nla: netlink attributes
  *     @portid: netlink portID of the original message
  *     @seq: netlink sequence number
+ *     @flags: modifiers to new request
  *     @family: protocol family
  *     @level: depth of the chains
  *     @report: notify via unicast netlink message
@@ -282,6 +283,7 @@ struct nft_elem_priv { };
  *
  *     @key: element key
  *     @key_end: closing element key
+ *     @data: element data
  *     @priv: element private data and extensions
  */
 struct nft_set_elem {
@@ -325,10 +327,10 @@ struct nft_set_iter {
  *     @dtype: data type
  *     @dlen: data length
  *     @objtype: object type
- *     @flags: flags
  *     @size: number of set elements
  *     @policy: set policy
  *     @gc_int: garbage collector interval
+ *     @timeout: element timeout
  *     @field_len: length of each field in concatenation, bytes
  *     @field_count: number of concatenated fields in element
  *     @expr: set must support for expressions
@@ -351,9 +353,9 @@ struct nft_set_desc {
 /**
  *     enum nft_set_class - performance class
  *
- *     @NFT_LOOKUP_O_1: constant, O(1)
- *     @NFT_LOOKUP_O_LOG_N: logarithmic, O(log N)
- *     @NFT_LOOKUP_O_N: linear, O(N)
+ *     @NFT_SET_CLASS_O_1: constant, O(1)
+ *     @NFT_SET_CLASS_O_LOG_N: logarithmic, O(log N)
+ *     @NFT_SET_CLASS_O_N: linear, O(N)
  */
 enum nft_set_class {
        NFT_SET_CLASS_O_1,
@@ -422,9 +424,13 @@ struct nft_set_ext;
  *     @remove: remove element from set
  *     @walk: iterate over all set elements
  *     @get: get set elements
+ *     @commit: commit set elements
+ *     @abort: abort set elements
  *     @privsize: function to return size of set private data
+ *     @estimate: estimate the required memory size and the lookup complexity class
  *     @init: initialize private data of new set instance
  *     @destroy: destroy private data of set instance
+ *     @gc_init: initialize garbage collection
  *     @elemsize: element private size
  *
  *     Operations lookup, update and delete have simpler interfaces, are faster
@@ -540,13 +546,16 @@ struct nft_set_elem_expr {
  *     @policy: set parameterization (see enum nft_set_policies)
  *     @udlen: user data length
  *     @udata: user data
- *     @expr: stateful expression
+ *     @pending_update: list of pending update set element
  *     @ops: set ops
  *     @flags: set flags
  *     @dead: set will be freed, never cleared
  *     @genmask: generation mask
  *     @klen: key length
  *     @dlen: data length
+ *     @num_exprs: numbers of exprs
+ *     @exprs: stateful expression
+ *     @catchall_list: list of catch-all set element
  *     @data: private set data
  */
 struct nft_set {
@@ -692,6 +701,7 @@ extern const struct nft_set_ext_type nft_set_ext_types[];
  *
  *     @len: length of extension area
  *     @offset: offsets of individual extension types
+ *     @ext_len: length of the expected extension(used to sanity check)
  */
 struct nft_set_ext_tmpl {
        u16     len;
@@ -798,10 +808,16 @@ static inline struct nft_set_elem_expr *nft_set_ext_expr(const struct nft_set_ex
        return nft_set_ext(ext, NFT_SET_EXT_EXPRESSIONS);
 }
 
-static inline bool nft_set_elem_expired(const struct nft_set_ext *ext)
+static inline bool __nft_set_elem_expired(const struct nft_set_ext *ext,
+                                         u64 tstamp)
 {
        return nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION) &&
-              time_is_before_eq_jiffies64(*nft_set_ext_expiration(ext));
+              time_after_eq64(tstamp, *nft_set_ext_expiration(ext));
+}
+
+static inline bool nft_set_elem_expired(const struct nft_set_ext *ext)
+{
+       return __nft_set_elem_expired(ext, get_jiffies_64());
 }
 
 static inline struct nft_set_ext *nft_set_elem_ext(const struct nft_set *set,
@@ -840,6 +856,7 @@ struct nft_expr_ops;
  *     @select_ops: function to select nft_expr_ops
  *     @release_ops: release nft_expr_ops
  *     @ops: default ops, used when no select_ops functions is present
+ *     @inner_ops: inner ops, used for inner packet operation
  *     @list: used internally
  *     @name: Identifier
  *     @owner: module reference
@@ -881,14 +898,22 @@ struct nft_offload_ctx;
  *     struct nft_expr_ops - nf_tables expression operations
  *
  *     @eval: Expression evaluation function
+ *     @clone: Expression clone function
  *     @size: full expression size, including private data size
  *     @init: initialization function
  *     @activate: activate expression in the next generation
  *     @deactivate: deactivate expression in next generation
  *     @destroy: destruction function, called after synchronize_rcu
+ *     @destroy_clone: destruction clone function
  *     @dump: function to dump parameters
- *     @type: expression type
  *     @validate: validate expression, called during loop detection
+ *     @reduce: reduce expression
+ *     @gc: garbage collection expression
+ *     @offload: hardware offload expression
+ *     @offload_action: function to report true/false to allocate one slot or not in the flow
+ *                      offload array
+ *     @offload_stats: function to synchronize hardware stats via updating the counter expression
+ *     @type: expression type
  *     @data: extra data to attach to this expression operation
  */
 struct nft_expr_ops {
@@ -1041,14 +1066,21 @@ struct nft_rule_blob {
 /**
  *     struct nft_chain - nf_tables chain
  *
+ *     @blob_gen_0: rule blob pointer to the current generation
+ *     @blob_gen_1: rule blob pointer to the future generation
  *     @rules: list of rules in the chain
  *     @list: used internally
  *     @rhlhead: used internally
  *     @table: table that this chain belongs to
  *     @handle: chain handle
  *     @use: number of jump references to this chain
- *     @flags: bitmask of enum nft_chain_flags
+ *     @flags: bitmask of enum NFTA_CHAIN_FLAGS
+ *     @bound: bind or not
+ *     @genmask: generation mask
  *     @name: name of the chain
+ *     @udlen: user data length
+ *     @udata: user data in the chain
+ *     @blob_next: rule blob pointer to the next in the chain
  */
 struct nft_chain {
        struct nft_rule_blob            __rcu *blob_gen_0;
@@ -1146,6 +1178,7 @@ struct nft_hook {
  *     @hook_list: list of netfilter hooks (for NFPROTO_NETDEV family)
  *     @type: chain type
  *     @policy: default policy
+ *     @flags: indicate the base chain disabled or not
  *     @stats: per-cpu chain stats
  *     @chain: the chain
  *     @flow_block: flow block (for hardware offload)
@@ -1274,11 +1307,13 @@ struct nft_object_hash_key {
  *     struct nft_object - nf_tables stateful object
  *
  *     @list: table stateful object list node
- *     @key:  keys that identify this object
  *     @rhlhead: nft_objname_ht node
+ *     @key: keys that identify this object
  *     @genmask: generation mask
  *     @use: number of references to this stateful object
  *     @handle: unique object handle
+ *     @udlen: length of user data
+ *     @udata: user data
  *     @ops: object operations
  *     @data: object data, layout depends on type
  */
@@ -1322,6 +1357,7 @@ void nft_obj_notify(struct net *net, const struct nft_table *table,
  *     @type: stateful object numeric type
  *     @owner: module owner
  *     @maxattr: maximum netlink attribute
+ *     @family: address family for AF-specific object types
  *     @policy: netlink attribute policy
  */
 struct nft_object_type {
@@ -1331,6 +1367,7 @@ struct nft_object_type {
        struct list_head                list;
        u32                             type;
        unsigned int                    maxattr;
+       u8                              family;
        struct module                   *owner;
        const struct nla_policy         *policy;
 };
@@ -1344,6 +1381,7 @@ struct nft_object_type {
  *     @destroy: release existing stateful object
  *     @dump: netlink dump stateful object
  *     @update: update stateful object
+ *     @type: pointer to object type
  */
 struct nft_object_ops {
        void                            (*eval)(struct nft_object *obj,
@@ -1379,9 +1417,8 @@ void nft_unregister_obj(struct nft_object_type *obj_type);
  *     @genmask: generation mask
  *     @use: number of references to this flow table
  *     @handle: unique object handle
- *     @dev_name: array of device names
+ *     @hook_list: hook list for hooks per net_device in flowtables
  *     @data: rhashtable and garbage collector
- *     @ops: array of hooks
  */
 struct nft_flowtable {
        struct list_head                list;
@@ -1748,6 +1785,7 @@ struct nftables_pernet {
        struct list_head        notify_list;
        struct mutex            commit_mutex;
        u64                     table_handle;
+       u64                     tstamp;
        unsigned int            base_seq;
        unsigned int            gc_seq;
        u8                      validate_state;
@@ -1760,6 +1798,11 @@ static inline struct nftables_pernet *nft_pernet(const struct net *net)
        return net_generic(net, nf_tables_net_id);
 }
 
+static inline u64 nft_net_tstamp(const struct net *net)
+{
+       return nft_pernet(net)->tstamp;
+}
+
 #define __NFT_REDUCE_READONLY  1UL
 #define NFT_REDUCE_READONLY    (void *)__NFT_REDUCE_READONLY
 
index ba3e1b315de838f9696ad7948ae474552c288e73..934fdb9775519ff45d9455e74a8695bf8a1e4bce 100644 (file)
@@ -375,6 +375,10 @@ struct tcf_proto_ops {
                                                struct nlattr **tca,
                                                struct netlink_ext_ack *extack);
        void                    (*tmplt_destroy)(void *tmplt_priv);
+       void                    (*tmplt_reoffload)(struct tcf_chain *chain,
+                                                  bool add,
+                                                  flow_setup_cb_t *cb,
+                                                  void *cb_priv);
        struct tcf_exts *       (*get_exts)(const struct tcf_proto *tp,
                                            u32 handle);
 
index a7f815c7cfdfdf1296be2967fd100efdb10cdd63..54ca8dcbfb4335d657b5cea323aa7d8c4316d49e 100644 (file)
@@ -2765,9 +2765,25 @@ static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
                           &skb_shinfo(skb)->tskey);
 }
 
+static inline bool sk_is_inet(const struct sock *sk)
+{
+       int family = READ_ONCE(sk->sk_family);
+
+       return family == AF_INET || family == AF_INET6;
+}
+
 static inline bool sk_is_tcp(const struct sock *sk)
 {
-       return sk->sk_type == SOCK_STREAM && sk->sk_protocol == IPPROTO_TCP;
+       return sk_is_inet(sk) &&
+              sk->sk_type == SOCK_STREAM &&
+              sk->sk_protocol == IPPROTO_TCP;
+}
+
+static inline bool sk_is_udp(const struct sock *sk)
+{
+       return sk_is_inet(sk) &&
+              sk->sk_type == SOCK_DGRAM &&
+              sk->sk_protocol == IPPROTO_UDP;
 }
 
 static inline bool sk_is_stream_unix(const struct sock *sk)
index 526c1e7f505e4d9633bfb6da058ea25b9f2b9cfa..c9aec9ab6191205c7c6f8d3f0f5c136cae520750 100644 (file)
@@ -159,11 +159,29 @@ static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
        return ret;
 }
 
+static inline void xsk_buff_del_tail(struct xdp_buff *tail)
+{
+       struct xdp_buff_xsk *xskb = container_of(tail, struct xdp_buff_xsk, xdp);
+
+       list_del(&xskb->xskb_list_node);
+}
+
+static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
+{
+       struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
+       struct xdp_buff_xsk *frag;
+
+       frag = list_last_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk,
+                              xskb_list_node);
+       return &frag->xdp;
+}
+
 static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
 {
        xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
        xdp->data_meta = xdp->data;
        xdp->data_end = xdp->data + size;
+       xdp->flags = 0;
 }
 
 static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
@@ -350,6 +368,15 @@ static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
        return NULL;
 }
 
+static inline void xsk_buff_del_tail(struct xdp_buff *tail)
+{
+}
+
+static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
+{
+       return NULL;
+}
+
 static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
 {
 }
index c47cc71a999ec94b22c78c9430018fcecd1fda77..17a0a8c3d656087fdb8b53a666b46c13b75fcf14 100644 (file)
@@ -31,6 +31,7 @@
 #define PM8998_SUBTYPE         0x14
 #define PMI8998_SUBTYPE                0x15
 #define PM8005_SUBTYPE         0x18
+#define PM8937_SUBTYPE         0x19
 #define PM660L_SUBTYPE         0x1a
 #define PM660_SUBTYPE          0x1b
 #define PM8150_SUBTYPE         0x1e
index 8c18e8b6d27d21b34962cc392dfeaffe48070847..b24716ab27504bdfa17f221a39053dd21dd961d9 100644 (file)
@@ -75,6 +75,7 @@
 #define CS35L56_DSP1_AHBM_WINDOW_DEBUG_0               0x25E2040
 #define CS35L56_DSP1_AHBM_WINDOW_DEBUG_1               0x25E2044
 #define CS35L56_DSP1_XMEM_UNPACKED24_0                 0x2800000
+#define CS35L56_DSP1_FW_VER                            0x2800010
 #define CS35L56_DSP1_HALO_STATE_A1                     0x2801E58
 #define CS35L56_DSP1_HALO_STATE                                0x28021E0
 #define CS35L56_DSP1_PM_CUR_STATE_A1                   0x2804000
 
 #define CS35L56_CONTROL_PORT_READY_US                  2200
 #define CS35L56_HALO_STATE_POLL_US                     1000
-#define CS35L56_HALO_STATE_TIMEOUT_US                  50000
+#define CS35L56_HALO_STATE_TIMEOUT_US                  250000
 #define CS35L56_RESET_PULSE_MIN_US                     1100
 #define CS35L56_WAKE_HOLD_TIME_US                      1000
 
@@ -272,6 +273,7 @@ extern const char * const cs35l56_tx_input_texts[CS35L56_NUM_INPUT_SRC];
 extern const unsigned int cs35l56_tx_input_values[CS35L56_NUM_INPUT_SRC];
 
 int cs35l56_set_patch(struct cs35l56_base *cs35l56_base);
+int cs35l56_force_sync_asp1_registers_from_cache(struct cs35l56_base *cs35l56_base);
 int cs35l56_mbox_send(struct cs35l56_base *cs35l56_base, unsigned int command);
 int cs35l56_firmware_shutdown(struct cs35l56_base *cs35l56_base);
 int cs35l56_wait_for_firmware_boot(struct cs35l56_base *cs35l56_base);
@@ -284,7 +286,10 @@ int cs35l56_is_fw_reload_needed(struct cs35l56_base *cs35l56_base);
 int cs35l56_runtime_suspend_common(struct cs35l56_base *cs35l56_base);
 int cs35l56_runtime_resume_common(struct cs35l56_base *cs35l56_base, bool is_soundwire);
 void cs35l56_init_cs_dsp(struct cs35l56_base *cs35l56_base, struct cs_dsp *cs_dsp);
+int cs35l56_read_prot_status(struct cs35l56_base *cs35l56_base,
+                            bool *fw_missing, unsigned int *fw_version);
 int cs35l56_hw_init(struct cs35l56_base *cs35l56_base);
+int cs35l56_get_speaker_id(struct cs35l56_base *cs35l56_base);
 int cs35l56_get_bclk_freq_id(unsigned int freq);
 void cs35l56_fill_supply_names(struct regulator_bulk_data *data);
 
index 0a86ab8d47b9806955b5e759d59cfa7acc76b49e..b00d65417c310a42a39aec6ce927b85083ace264 100644 (file)
@@ -1,13 +1,13 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 //
-// ALSA SoC Texas Instruments TAS2781 Audio Smart Amplifier
+// ALSA SoC Texas Instruments TAS2563/TAS2781 Audio Smart Amplifier
 //
 // Copyright (C) 2022 - 2023 Texas Instruments Incorporated
 // https://www.ti.com
 //
-// The TAS2781 driver implements a flexible and configurable
+// The TAS2563/TAS2781 driver implements a flexible and configurable
 // algo coefficient setting for one, two, or even multiple
-// TAS2781 chips.
+// TAS2563/TAS2781 chips.
 //
 // Author: Shenghao Ding <shenghao-ding@ti.com>
 // Author: Kevin Lu <kevin-lu@ti.com>
@@ -60,7 +60,8 @@
 #define TASDEVICE_CMD_FIELD_W          0x4
 
 enum audio_device {
-       TAS2781 = 0,
+       TAS2563,
+       TAS2781,
 };
 
 enum device_catlog_id {
index 5194b7e6dc8d07170e2fcda6079ed4d9b7f1293f..08f2c93d6b1607939fb47c510153d544f496febe 100644 (file)
@@ -902,37 +902,6 @@ TRACE_EVENT(afs_dir_check_failed,
                      __entry->vnode, __entry->off, __entry->i_size)
            );
 
-TRACE_EVENT(afs_folio_dirty,
-           TP_PROTO(struct afs_vnode *vnode, const char *where, struct folio *folio),
-
-           TP_ARGS(vnode, where, folio),
-
-           TP_STRUCT__entry(
-                   __field(struct afs_vnode *,         vnode)
-                   __field(const char *,               where)
-                   __field(pgoff_t,                    index)
-                   __field(unsigned long,              from)
-                   __field(unsigned long,              to)
-                            ),
-
-           TP_fast_assign(
-                   unsigned long priv = (unsigned long)folio_get_private(folio);
-                   __entry->vnode = vnode;
-                   __entry->where = where;
-                   __entry->index = folio_index(folio);
-                   __entry->from  = afs_folio_dirty_from(folio, priv);
-                   __entry->to    = afs_folio_dirty_to(folio, priv);
-                   __entry->to   |= (afs_is_folio_dirty_mmapped(priv) ?
-                                     (1UL << (BITS_PER_LONG - 1)) : 0);
-                          ),
-
-           TP_printk("vn=%p %lx %s %lx-%lx%s",
-                     __entry->vnode, __entry->index, __entry->where,
-                     __entry->from,
-                     __entry->to & ~(1UL << (BITS_PER_LONG - 1)),
-                     __entry->to & (1UL << (BITS_PER_LONG - 1)) ? " M" : "")
-           );
-
 TRACE_EVENT(afs_call_state,
            TP_PROTO(struct afs_call *call,
                     enum afs_call_state from,
@@ -1102,6 +1071,31 @@ TRACE_EVENT(afs_file_error,
                      __print_symbolic(__entry->where, afs_file_errors))
            );
 
+TRACE_EVENT(afs_bulkstat_error,
+           TP_PROTO(struct afs_operation *op, struct afs_fid *fid, unsigned int index, s32 abort),
+
+           TP_ARGS(op, fid, index, abort),
+
+           TP_STRUCT__entry(
+                   __field_struct(struct afs_fid,      fid)
+                   __field(unsigned int,               op)
+                   __field(unsigned int,               index)
+                   __field(s32,                        abort)
+                            ),
+
+           TP_fast_assign(
+                   __entry->op = op->debug_id;
+                   __entry->fid = *fid;
+                   __entry->index = index;
+                   __entry->abort = abort;
+                          ),
+
+           TP_printk("OP=%08x[%02x] %llx:%llx:%x a=%d",
+                     __entry->op, __entry->index,
+                     __entry->fid.vid, __entry->fid.vnode, __entry->fid.unique,
+                     __entry->abort)
+           );
+
 TRACE_EVENT(afs_cm_no_server,
            TP_PROTO(struct afs_call *call, struct sockaddr_rxrpc *srx),
 
index 65029dfb92fbc3162c30d0a85a6805afa3ab335e..a697f4b77162dd79c45c5cdb25db63332818fcc7 100644 (file)
@@ -772,15 +772,14 @@ TRACE_EVENT(ext4_mb_release_group_pa,
 );
 
 TRACE_EVENT(ext4_discard_preallocations,
-       TP_PROTO(struct inode *inode, unsigned int len, unsigned int needed),
+       TP_PROTO(struct inode *inode, unsigned int len),
 
-       TP_ARGS(inode, len, needed),
+       TP_ARGS(inode, len),
 
        TP_STRUCT__entry(
                __field(        dev_t,          dev             )
                __field(        ino_t,          ino             )
                __field(        unsigned int,   len             )
-               __field(        unsigned int,   needed          )
 
        ),
 
@@ -788,13 +787,11 @@ TRACE_EVENT(ext4_discard_preallocations,
                __entry->dev    = inode->i_sb->s_dev;
                __entry->ino    = inode->i_ino;
                __entry->len    = len;
-               __entry->needed = needed;
        ),
 
-       TP_printk("dev %d,%d ino %lu len: %u needed %u",
+       TP_printk("dev %d,%d ino %lu len: %u",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
-                 (unsigned long) __entry->ino, __entry->len,
-                 __entry->needed)
+                 (unsigned long) __entry->ino, __entry->len)
 );
 
 TRACE_EVENT(ext4_mb_discard_preallocations,
index 3bd31ea23fee9e294659d4da406e60d4c6e2f23b..011fba6b55522d747128d886adceb57e490ce7ef 100644 (file)
@@ -62,7 +62,7 @@ TRACE_EVENT(kvm_vcpu_wakeup,
                  __entry->valid ? "valid" : "invalid")
 );
 
-#if defined(CONFIG_HAVE_KVM_IRQFD)
+#if defined(CONFIG_HAVE_KVM_IRQCHIP)
 TRACE_EVENT(kvm_set_irq,
        TP_PROTO(unsigned int gsi, int level, int irq_source_id),
        TP_ARGS(gsi, level, irq_source_id),
@@ -82,7 +82,7 @@ TRACE_EVENT(kvm_set_irq,
        TP_printk("gsi %u level %d source %d",
                  __entry->gsi, __entry->level, __entry->irq_source_id)
 );
-#endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
+#endif /* defined(CONFIG_HAVE_KVM_IRQCHIP) */
 
 #if defined(__KVM_HAVE_IOAPIC)
 #define kvm_deliver_mode               \
@@ -170,7 +170,7 @@ TRACE_EVENT(kvm_msi_set_irq,
 
 #endif /* defined(__KVM_HAVE_IOAPIC) */
 
-#if defined(CONFIG_HAVE_KVM_IRQFD)
+#if defined(CONFIG_HAVE_KVM_IRQCHIP)
 
 #ifdef kvm_irqchips
 #define kvm_ack_irq_string "irqchip %s pin %u"
@@ -197,7 +197,7 @@ TRACE_EVENT(kvm_ack_irq,
        TP_printk(kvm_ack_irq_string, kvm_ack_irq_parm)
 );
 
-#endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
+#endif /* defined(CONFIG_HAVE_KVM_IRQCHIP) */
 
 
 
index beec534cbaab25e6a2ce4aacaa09134068f7394a..447a8c21cf57df7d30de48efcd27c435fab2c308 100644 (file)
  * Define enums for tracing information.
  */
 #define netfs_read_traces                                      \
+       EM(netfs_read_trace_dio_read,           "DIO-READ ")    \
        EM(netfs_read_trace_expanded,           "EXPANDED ")    \
        EM(netfs_read_trace_readahead,          "READAHEAD")    \
        EM(netfs_read_trace_readpage,           "READPAGE ")    \
+       EM(netfs_read_trace_prefetch_for_write, "PREFETCHW")    \
        E_(netfs_read_trace_write_begin,        "WRITEBEGN")
 
+#define netfs_write_traces                                     \
+       EM(netfs_write_trace_dio_write,         "DIO-WRITE")    \
+       EM(netfs_write_trace_launder,           "LAUNDER  ")    \
+       EM(netfs_write_trace_unbuffered_write,  "UNB-WRITE")    \
+       EM(netfs_write_trace_writeback,         "WRITEBACK")    \
+       E_(netfs_write_trace_writethrough,      "WRITETHRU")
+
 #define netfs_rreq_origins                                     \
        EM(NETFS_READAHEAD,                     "RA")           \
        EM(NETFS_READPAGE,                      "RP")           \
-       E_(NETFS_READ_FOR_WRITE,                "RW")
+       EM(NETFS_READ_FOR_WRITE,                "RW")           \
+       EM(NETFS_WRITEBACK,                     "WB")           \
+       EM(NETFS_WRITETHROUGH,                  "WT")           \
+       EM(NETFS_LAUNDER_WRITE,                 "LW")           \
+       EM(NETFS_UNBUFFERED_WRITE,              "UW")           \
+       EM(NETFS_DIO_READ,                      "DR")           \
+       E_(NETFS_DIO_WRITE,                     "DW")
 
 #define netfs_rreq_traces                                      \
        EM(netfs_rreq_trace_assess,             "ASSESS ")      \
        EM(netfs_rreq_trace_copy,               "COPY   ")      \
        EM(netfs_rreq_trace_done,               "DONE   ")      \
        EM(netfs_rreq_trace_free,               "FREE   ")      \
+       EM(netfs_rreq_trace_redirty,            "REDIRTY")      \
        EM(netfs_rreq_trace_resubmit,           "RESUBMT")      \
        EM(netfs_rreq_trace_unlock,             "UNLOCK ")      \
-       E_(netfs_rreq_trace_unmark,             "UNMARK ")
+       EM(netfs_rreq_trace_unmark,             "UNMARK ")      \
+       EM(netfs_rreq_trace_wait_ip,            "WAIT-IP")      \
+       EM(netfs_rreq_trace_wake_ip,            "WAKE-IP")      \
+       E_(netfs_rreq_trace_write_done,         "WR-DONE")
 
 #define netfs_sreq_sources                                     \
        EM(NETFS_FILL_WITH_ZEROES,              "ZERO")         \
        EM(NETFS_DOWNLOAD_FROM_SERVER,          "DOWN")         \
        EM(NETFS_READ_FROM_CACHE,               "READ")         \
-       E_(NETFS_INVALID_READ,                  "INVL")         \
+       EM(NETFS_INVALID_READ,                  "INVL")         \
+       EM(NETFS_UPLOAD_TO_SERVER,              "UPLD")         \
+       EM(NETFS_WRITE_TO_CACHE,                "WRIT")         \
+       E_(NETFS_INVALID_WRITE,                 "INVL")
 
 #define netfs_sreq_traces                                      \
        EM(netfs_sreq_trace_download_instead,   "RDOWN")        \
        EM(netfs_sreq_trace_free,               "FREE ")        \
+       EM(netfs_sreq_trace_limited,            "LIMIT")        \
        EM(netfs_sreq_trace_prepare,            "PREP ")        \
        EM(netfs_sreq_trace_resubmit_short,     "SHORT")        \
        EM(netfs_sreq_trace_submit,             "SUBMT")        \
 #define netfs_failures                                                 \
        EM(netfs_fail_check_write_begin,        "check-write-begin")    \
        EM(netfs_fail_copy_to_cache,            "copy-to-cache")        \
+       EM(netfs_fail_dio_read_short,           "dio-read-short")       \
+       EM(netfs_fail_dio_read_zero,            "dio-read-zero")        \
        EM(netfs_fail_read,                     "read")                 \
        EM(netfs_fail_short_read,               "short-read")           \
-       E_(netfs_fail_prepare_write,            "prep-write")
+       EM(netfs_fail_prepare_write,            "prep-write")           \
+       E_(netfs_fail_write,                    "write")
 
 #define netfs_rreq_ref_traces                                  \
-       EM(netfs_rreq_trace_get_hold,           "GET HOLD   ")  \
+       EM(netfs_rreq_trace_get_for_outstanding,"GET OUTSTND")  \
        EM(netfs_rreq_trace_get_subreq,         "GET SUBREQ ")  \
        EM(netfs_rreq_trace_put_complete,       "PUT COMPLT ")  \
        EM(netfs_rreq_trace_put_discard,        "PUT DISCARD")  \
        EM(netfs_rreq_trace_put_failed,         "PUT FAILED ")  \
-       EM(netfs_rreq_trace_put_hold,           "PUT HOLD   ")  \
+       EM(netfs_rreq_trace_put_no_submit,      "PUT NO-SUBM")  \
+       EM(netfs_rreq_trace_put_return,         "PUT RETURN ")  \
        EM(netfs_rreq_trace_put_subreq,         "PUT SUBREQ ")  \
-       EM(netfs_rreq_trace_put_zero_len,       "PUT ZEROLEN")  \
+       EM(netfs_rreq_trace_put_work,           "PUT WORK   ")  \
+       EM(netfs_rreq_trace_see_work,           "SEE WORK   ")  \
        E_(netfs_rreq_trace_new,                "NEW        ")
 
 #define netfs_sreq_ref_traces                                  \
        EM(netfs_sreq_trace_get_short_read,     "GET SHORTRD")  \
        EM(netfs_sreq_trace_new,                "NEW        ")  \
        EM(netfs_sreq_trace_put_clear,          "PUT CLEAR  ")  \
+       EM(netfs_sreq_trace_put_discard,        "PUT DISCARD")  \
        EM(netfs_sreq_trace_put_failed,         "PUT FAILED ")  \
        EM(netfs_sreq_trace_put_merged,         "PUT MERGED ")  \
        EM(netfs_sreq_trace_put_no_copy,        "PUT NO COPY")  \
+       EM(netfs_sreq_trace_put_wip,            "PUT WIP    ")  \
+       EM(netfs_sreq_trace_put_work,           "PUT WORK   ")  \
        E_(netfs_sreq_trace_put_terminated,     "PUT TERM   ")
 
+#define netfs_folio_traces                                     \
+       /* The first few correspond to enum netfs_how_to_modify */      \
+       EM(netfs_folio_is_uptodate,             "mod-uptodate") \
+       EM(netfs_just_prefetch,                 "mod-prefetch") \
+       EM(netfs_whole_folio_modify,            "mod-whole-f")  \
+       EM(netfs_modify_and_clear,              "mod-n-clear")  \
+       EM(netfs_streaming_write,               "mod-streamw")  \
+       EM(netfs_streaming_write_cont,          "mod-streamw+") \
+       EM(netfs_flush_content,                 "flush")        \
+       EM(netfs_streaming_filled_page,         "mod-streamw-f") \
+       EM(netfs_streaming_cont_filled_page,    "mod-streamw-f+") \
+       /* The rest are for writeback */                        \
+       EM(netfs_folio_trace_clear,             "clear")        \
+       EM(netfs_folio_trace_clear_s,           "clear-s")      \
+       EM(netfs_folio_trace_clear_g,           "clear-g")      \
+       EM(netfs_folio_trace_copy_to_cache,     "copy")         \
+       EM(netfs_folio_trace_end_copy,          "end-copy")     \
+       EM(netfs_folio_trace_filled_gaps,       "filled-gaps")  \
+       EM(netfs_folio_trace_kill,              "kill")         \
+       EM(netfs_folio_trace_launder,           "launder")      \
+       EM(netfs_folio_trace_mkwrite,           "mkwrite")      \
+       EM(netfs_folio_trace_mkwrite_plus,      "mkwrite+")     \
+       EM(netfs_folio_trace_read_gaps,         "read-gaps")    \
+       EM(netfs_folio_trace_redirty,           "redirty")      \
+       EM(netfs_folio_trace_redirtied,         "redirtied")    \
+       EM(netfs_folio_trace_store,             "store")        \
+       EM(netfs_folio_trace_store_plus,        "store+")       \
+       EM(netfs_folio_trace_wthru,             "wthru")        \
+       E_(netfs_folio_trace_wthru_plus,        "wthru+")
+
 #ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
 #define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
 
 #define E_(a, b) a
 
 enum netfs_read_trace { netfs_read_traces } __mode(byte);
+enum netfs_write_trace { netfs_write_traces } __mode(byte);
 enum netfs_rreq_trace { netfs_rreq_traces } __mode(byte);
 enum netfs_sreq_trace { netfs_sreq_traces } __mode(byte);
 enum netfs_failure { netfs_failures } __mode(byte);
 enum netfs_rreq_ref_trace { netfs_rreq_ref_traces } __mode(byte);
 enum netfs_sreq_ref_trace { netfs_sreq_ref_traces } __mode(byte);
+enum netfs_folio_trace { netfs_folio_traces } __mode(byte);
 
 #endif
 
@@ -107,6 +170,7 @@ enum netfs_sreq_ref_trace { netfs_sreq_ref_traces } __mode(byte);
 #define E_(a, b) TRACE_DEFINE_ENUM(a);
 
 netfs_read_traces;
+netfs_write_traces;
 netfs_rreq_origins;
 netfs_rreq_traces;
 netfs_sreq_sources;
@@ -114,6 +178,7 @@ netfs_sreq_traces;
 netfs_failures;
 netfs_rreq_ref_traces;
 netfs_sreq_ref_traces;
+netfs_folio_traces;
 
 /*
  * Now redefine the EM() and E_() macros to map the enums to the strings that
@@ -314,6 +379,82 @@ TRACE_EVENT(netfs_sreq_ref,
                      __entry->ref)
            );
 
+TRACE_EVENT(netfs_folio,
+           TP_PROTO(struct folio *folio, enum netfs_folio_trace why),
+
+           TP_ARGS(folio, why),
+
+           TP_STRUCT__entry(
+                   __field(ino_t,                      ino)
+                   __field(pgoff_t,                    index)
+                   __field(unsigned int,               nr)
+                   __field(enum netfs_folio_trace,     why)
+                            ),
+
+           TP_fast_assign(
+                   __entry->ino = folio->mapping->host->i_ino;
+                   __entry->why = why;
+                   __entry->index = folio_index(folio);
+                   __entry->nr = folio_nr_pages(folio);
+                          ),
+
+           TP_printk("i=%05lx ix=%05lx-%05lx %s",
+                     __entry->ino, __entry->index, __entry->index + __entry->nr - 1,
+                     __print_symbolic(__entry->why, netfs_folio_traces))
+           );
+
+TRACE_EVENT(netfs_write_iter,
+           TP_PROTO(const struct kiocb *iocb, const struct iov_iter *from),
+
+           TP_ARGS(iocb, from),
+
+           TP_STRUCT__entry(
+                   __field(unsigned long long,         start           )
+                   __field(size_t,                     len             )
+                   __field(unsigned int,               flags           )
+                            ),
+
+           TP_fast_assign(
+                   __entry->start      = iocb->ki_pos;
+                   __entry->len        = iov_iter_count(from);
+                   __entry->flags      = iocb->ki_flags;
+                          ),
+
+           TP_printk("WRITE-ITER s=%llx l=%zx f=%x",
+                     __entry->start, __entry->len, __entry->flags)
+           );
+
+TRACE_EVENT(netfs_write,
+           TP_PROTO(const struct netfs_io_request *wreq,
+                    enum netfs_write_trace what),
+
+           TP_ARGS(wreq, what),
+
+           TP_STRUCT__entry(
+                   __field(unsigned int,               wreq            )
+                   __field(unsigned int,               cookie          )
+                   __field(enum netfs_write_trace,     what            )
+                   __field(unsigned long long,         start           )
+                   __field(size_t,                     len             )
+                            ),
+
+           TP_fast_assign(
+                   struct netfs_inode *__ctx = netfs_inode(wreq->inode);
+                   struct fscache_cookie *__cookie = netfs_i_cookie(__ctx);
+                   __entry->wreq       = wreq->debug_id;
+                   __entry->cookie     = __cookie ? __cookie->debug_id : 0;
+                   __entry->what       = what;
+                   __entry->start      = wreq->start;
+                   __entry->len        = wreq->len;
+                          ),
+
+           TP_printk("R=%08x %s c=%08x by=%llx-%llx",
+                     __entry->wreq,
+                     __print_symbolic(__entry->what, netfs_write_traces),
+                     __entry->cookie,
+                     __entry->start, __entry->start + __entry->len - 1)
+           );
+
 #undef EM
 #undef E_
 #endif /* _TRACE_NETFS_H */
index 4c1ef7b3705c26baf79c135f235c94195635bf4b..87b8de9b6c1c440ce4a8b2fe6072b4d81cbc1cf4 100644 (file)
        EM(rxrpc_skb_eaten_by_unshare_nomem,    "ETN unshar-nm") \
        EM(rxrpc_skb_get_conn_secured,          "GET conn-secd") \
        EM(rxrpc_skb_get_conn_work,             "GET conn-work") \
+       EM(rxrpc_skb_get_last_nack,             "GET last-nack") \
        EM(rxrpc_skb_get_local_work,            "GET locl-work") \
        EM(rxrpc_skb_get_reject_work,           "GET rej-work ") \
        EM(rxrpc_skb_get_to_recvmsg,            "GET to-recv  ") \
        EM(rxrpc_skb_put_error_report,          "PUT error-rep") \
        EM(rxrpc_skb_put_input,                 "PUT input    ") \
        EM(rxrpc_skb_put_jumbo_subpacket,       "PUT jumbo-sub") \
+       EM(rxrpc_skb_put_last_nack,             "PUT last-nack") \
        EM(rxrpc_skb_put_purge,                 "PUT purge    ") \
        EM(rxrpc_skb_put_rotate,                "PUT rotate   ") \
        EM(rxrpc_skb_put_unknown,               "PUT unknown  ") \
@@ -1552,7 +1554,7 @@ TRACE_EVENT(rxrpc_congest,
                    memcpy(&__entry->sum, summary, sizeof(__entry->sum));
                           ),
 
-           TP_printk("c=%08x r=%08x %s q=%08x %s cw=%u ss=%u nA=%u,%u+%u r=%u b=%u u=%u d=%u l=%x%s%s%s",
+           TP_printk("c=%08x r=%08x %s q=%08x %s cw=%u ss=%u nA=%u,%u+%u,%u b=%u u=%u d=%u l=%x%s%s%s",
                      __entry->call,
                      __entry->ack_serial,
                      __print_symbolic(__entry->sum.ack_reason, rxrpc_ack_names),
@@ -1560,9 +1562,9 @@ TRACE_EVENT(rxrpc_congest,
                      __print_symbolic(__entry->sum.mode, rxrpc_congest_modes),
                      __entry->sum.cwnd,
                      __entry->sum.ssthresh,
-                     __entry->sum.nr_acks, __entry->sum.saw_nacks,
+                     __entry->sum.nr_acks, __entry->sum.nr_retained_nacks,
                      __entry->sum.nr_new_acks,
-                     __entry->sum.nr_rot_new_acks,
+                     __entry->sum.nr_new_nacks,
                      __entry->top - __entry->hard_ack,
                      __entry->sum.cumulative_acks,
                      __entry->sum.dup_acks,
index de1944e42c6556a46a8a87855189f34b86859e99..19a13468eca5e4c13cbdf05444604a54567a67ae 100644 (file)
@@ -53,7 +53,7 @@ extern "C" {
 #define DRM_IVPU_PARAM_CORE_CLOCK_RATE     3
 #define DRM_IVPU_PARAM_NUM_CONTEXTS        4
 #define DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS 5
-#define DRM_IVPU_PARAM_CONTEXT_PRIORITY            6
+#define DRM_IVPU_PARAM_CONTEXT_PRIORITY            6 /* Deprecated */
 #define DRM_IVPU_PARAM_CONTEXT_ID          7
 #define DRM_IVPU_PARAM_FW_API_VERSION      8
 #define DRM_IVPU_PARAM_ENGINE_HEARTBEAT            9
@@ -64,11 +64,18 @@ extern "C" {
 
 #define DRM_IVPU_PLATFORM_TYPE_SILICON     0
 
+/* Deprecated, use DRM_IVPU_JOB_PRIORITY */
 #define DRM_IVPU_CONTEXT_PRIORITY_IDLE     0
 #define DRM_IVPU_CONTEXT_PRIORITY_NORMAL    1
 #define DRM_IVPU_CONTEXT_PRIORITY_FOCUS            2
 #define DRM_IVPU_CONTEXT_PRIORITY_REALTIME  3
 
+#define DRM_IVPU_JOB_PRIORITY_DEFAULT  0
+#define DRM_IVPU_JOB_PRIORITY_IDLE     1
+#define DRM_IVPU_JOB_PRIORITY_NORMAL   2
+#define DRM_IVPU_JOB_PRIORITY_FOCUS    3
+#define DRM_IVPU_JOB_PRIORITY_REALTIME 4
+
 /**
  * DRM_IVPU_CAP_METRIC_STREAMER
  *
@@ -112,10 +119,6 @@ struct drm_ivpu_param {
         * %DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
         * Lowest VPU virtual address available in the current context (read-only)
         *
-        * %DRM_IVPU_PARAM_CONTEXT_PRIORITY:
-        * Value of current context scheduling priority (read-write).
-        * See DRM_IVPU_CONTEXT_PRIORITY_* for possible values.
-        *
         * %DRM_IVPU_PARAM_CONTEXT_ID:
         * Current context ID, always greater than 0 (read-only)
         *
@@ -286,10 +289,23 @@ struct drm_ivpu_submit {
         * to be executed. The offset has to be 8-byte aligned.
         */
        __u32 commands_offset;
+
+       /**
+        * @priority:
+        *
+        * Priority to be set for related job command queue, can be one of the following:
+        * %DRM_IVPU_JOB_PRIORITY_DEFAULT
+        * %DRM_IVPU_JOB_PRIORITY_IDLE
+        * %DRM_IVPU_JOB_PRIORITY_NORMAL
+        * %DRM_IVPU_JOB_PRIORITY_FOCUS
+        * %DRM_IVPU_JOB_PRIORITY_REALTIME
+        */
+       __u32 priority;
 };
 
 /* drm_ivpu_bo_wait job status codes */
 #define DRM_IVPU_JOB_STATUS_SUCCESS 0
+#define DRM_IVPU_JOB_STATUS_ABORTED 256
 
 /**
  * struct drm_ivpu_bo_wait - Wait for BO to become inactive
index 5f636b5afcd741398300786733453e0f2407ecd5..d44a8118b2ed6e328c8a99a13e0cecadd6f11b52 100644 (file)
@@ -251,20 +251,22 @@ struct binder_extended_error {
        __s32   param;
 };
 
-#define BINDER_WRITE_READ              _IOWR('b', 1, struct binder_write_read)
-#define BINDER_SET_IDLE_TIMEOUT                _IOW('b', 3, __s64)
-#define BINDER_SET_MAX_THREADS         _IOW('b', 5, __u32)
-#define BINDER_SET_IDLE_PRIORITY       _IOW('b', 6, __s32)
-#define BINDER_SET_CONTEXT_MGR         _IOW('b', 7, __s32)
-#define BINDER_THREAD_EXIT             _IOW('b', 8, __s32)
-#define BINDER_VERSION                 _IOWR('b', 9, struct binder_version)
-#define BINDER_GET_NODE_DEBUG_INFO     _IOWR('b', 11, struct binder_node_debug_info)
-#define BINDER_GET_NODE_INFO_FOR_REF   _IOWR('b', 12, struct binder_node_info_for_ref)
-#define BINDER_SET_CONTEXT_MGR_EXT     _IOW('b', 13, struct flat_binder_object)
-#define BINDER_FREEZE                  _IOW('b', 14, struct binder_freeze_info)
-#define BINDER_GET_FROZEN_INFO         _IOWR('b', 15, struct binder_frozen_status_info)
-#define BINDER_ENABLE_ONEWAY_SPAM_DETECTION    _IOW('b', 16, __u32)
-#define BINDER_GET_EXTENDED_ERROR      _IOWR('b', 17, struct binder_extended_error)
+enum {
+       BINDER_WRITE_READ               = _IOWR('b', 1, struct binder_write_read),
+       BINDER_SET_IDLE_TIMEOUT         = _IOW('b', 3, __s64),
+       BINDER_SET_MAX_THREADS          = _IOW('b', 5, __u32),
+       BINDER_SET_IDLE_PRIORITY        = _IOW('b', 6, __s32),
+       BINDER_SET_CONTEXT_MGR          = _IOW('b', 7, __s32),
+       BINDER_THREAD_EXIT              = _IOW('b', 8, __s32),
+       BINDER_VERSION                  = _IOWR('b', 9, struct binder_version),
+       BINDER_GET_NODE_DEBUG_INFO      = _IOWR('b', 11, struct binder_node_debug_info),
+       BINDER_GET_NODE_INFO_FOR_REF    = _IOWR('b', 12, struct binder_node_info_for_ref),
+       BINDER_SET_CONTEXT_MGR_EXT      = _IOW('b', 13, struct flat_binder_object),
+       BINDER_FREEZE                   = _IOW('b', 14, struct binder_freeze_info),
+       BINDER_GET_FROZEN_INFO          = _IOWR('b', 15, struct binder_frozen_status_info),
+       BINDER_ENABLE_ONEWAY_SPAM_DETECTION     = _IOW('b', 16, __u32),
+       BINDER_GET_EXTENDED_ERROR       = _IOWR('b', 17, struct binder_extended_error),
+};
 
 /*
  * NOTE: Two special error codes you should check for when calling
index 7c29d82db9ee0dcb5ce770b384149c9734a50f30..f8bc34a6bcfa2f7313f2e9eac38e2df6a25aafca 100644 (file)
@@ -614,6 +614,9 @@ struct btrfs_ioctl_clone_range_args {
  */
 #define BTRFS_DEFRAG_RANGE_COMPRESS 1
 #define BTRFS_DEFRAG_RANGE_START_IO 2
+#define BTRFS_DEFRAG_RANGE_FLAGS_SUPP  (BTRFS_DEFRAG_RANGE_COMPRESS |          \
+                                        BTRFS_DEFRAG_RANGE_START_IO)
+
 struct btrfs_ioctl_defrag_range_args {
        /* start of the defrag operation */
        __u64 start;
index 14bc6e7421483105c7ab0fd4e664d354cc3386b1..42066f4eb890362b2c4f150245db5bd1bf399a10 100644 (file)
@@ -46,6 +46,7 @@
        ___C(GET_SCAN_MEDIA_CAPS, "Get Scan Media Capabilities"),         \
        ___DEPRECATED(SCAN_MEDIA, "Scan Media"),                          \
        ___DEPRECATED(GET_SCAN_MEDIA, "Get Scan Media Results"),          \
+       ___C(GET_TIMESTAMP, "Get Timestamp"),                             \
        ___C(MAX, "invalid / last command")
 
 #define ___C(a, b) CXL_MEM_COMMAND_ID_##a
index 9c2ffdcd66230055eda531a6b25cbd2513830f7f..5060963707b1ed44d9b640454e9b0656a505d761 100644 (file)
@@ -91,6 +91,8 @@ enum iio_modifier {
        IIO_MOD_CO2,
        IIO_MOD_VOC,
        IIO_MOD_LIGHT_UV,
+       IIO_MOD_LIGHT_UVA,
+       IIO_MOD_LIGHT_UVB,
        IIO_MOD_LIGHT_DUV,
        IIO_MOD_PM1,
        IIO_MOD_PM2P5,
index 0b2bc6252e2ca2840b556ee6dd858ae123f22c9b..1dfeaa2e649ee41751162073463df20cb0a130ad 100644 (file)
@@ -49,6 +49,7 @@ enum {
        IOMMUFD_CMD_GET_HW_INFO,
        IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING,
        IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP,
+       IOMMUFD_CMD_HWPT_INVALIDATE,
 };
 
 /**
@@ -613,4 +614,82 @@ struct iommu_hwpt_get_dirty_bitmap {
 #define IOMMU_HWPT_GET_DIRTY_BITMAP _IO(IOMMUFD_TYPE, \
                                        IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP)
 
+/**
+ * enum iommu_hwpt_invalidate_data_type - IOMMU HWPT Cache Invalidation
+ *                                        Data Type
+ * @IOMMU_HWPT_INVALIDATE_DATA_VTD_S1: Invalidation data for VTD_S1
+ */
+enum iommu_hwpt_invalidate_data_type {
+       IOMMU_HWPT_INVALIDATE_DATA_VTD_S1,
+};
+
+/**
+ * enum iommu_hwpt_vtd_s1_invalidate_flags - Flags for Intel VT-d
+ *                                           stage-1 cache invalidation
+ * @IOMMU_VTD_INV_FLAGS_LEAF: Indicates whether the invalidation applies
+ *                            to all-levels page structure cache or just
+ *                            the leaf PTE cache.
+ */
+enum iommu_hwpt_vtd_s1_invalidate_flags {
+       IOMMU_VTD_INV_FLAGS_LEAF = 1 << 0,
+};
+
+/**
+ * struct iommu_hwpt_vtd_s1_invalidate - Intel VT-d cache invalidation
+ *                                       (IOMMU_HWPT_INVALIDATE_DATA_VTD_S1)
+ * @addr: The start address of the range to be invalidated. It needs to
+ *        be 4KB aligned.
+ * @npages: Number of contiguous 4K pages to be invalidated.
+ * @flags: Combination of enum iommu_hwpt_vtd_s1_invalidate_flags
+ * @__reserved: Must be 0
+ *
+ * The Intel VT-d specific invalidation data for user-managed stage-1 cache
+ * invalidation in nested translation. Userspace uses this structure to
+ * tell the impacted cache scope after modifying the stage-1 page table.
+ *
+ * Invalidating all the caches related to the page table by setting @addr
+ * to be 0 and @npages to be U64_MAX.
+ *
+ * The device TLB will be invalidated automatically if ATS is enabled.
+ */
+struct iommu_hwpt_vtd_s1_invalidate {
+       __aligned_u64 addr;
+       __aligned_u64 npages;
+       __u32 flags;
+       __u32 __reserved;
+};
+
+/**
+ * struct iommu_hwpt_invalidate - ioctl(IOMMU_HWPT_INVALIDATE)
+ * @size: sizeof(struct iommu_hwpt_invalidate)
+ * @hwpt_id: ID of a nested HWPT for cache invalidation
+ * @data_uptr: User pointer to an array of driver-specific cache invalidation
+ *             data.
+ * @data_type: One of enum iommu_hwpt_invalidate_data_type, defining the data
+ *             type of all the entries in the invalidation request array. It
+ *             should be a type supported by the hwpt pointed by @hwpt_id.
+ * @entry_len: Length (in bytes) of a request entry in the request array
+ * @entry_num: Input the number of cache invalidation requests in the array.
+ *             Output the number of requests successfully handled by kernel.
+ * @__reserved: Must be 0.
+ *
+ * Invalidate the iommu cache for user-managed page table. Modifications on a
+ * user-managed page table should be followed by this operation to sync cache.
+ * Each ioctl can support one or more cache invalidation requests in the array
+ * that has a total size of @entry_len * @entry_num.
+ *
+ * An empty invalidation request array by setting @entry_num==0 is allowed, and
+ * @entry_len and @data_uptr would be ignored in this case. This can be used to
+ * check if the given @data_type is supported or not by kernel.
+ */
+struct iommu_hwpt_invalidate {
+       __u32 size;
+       __u32 hwpt_id;
+       __aligned_u64 data_uptr;
+       __u32 data_type;
+       __u32 entry_len;
+       __u32 entry_num;
+       __u32 __reserved;
+};
+#define IOMMU_HWPT_INVALIDATE _IO(IOMMUFD_TYPE, IOMMUFD_CMD_HWPT_INVALIDATE)
 #endif
index 211b86de35ac53f6457bbd2fae8c973ce6b3a968..c3308536482bdb2bfb1279279325faf5430a3356 100644 (file)
 
 #define KVM_API_VERSION 12
 
-/* *** Deprecated interfaces *** */
-
-#define KVM_TRC_SHIFT           16
-
-#define KVM_TRC_ENTRYEXIT       (1 << KVM_TRC_SHIFT)
-#define KVM_TRC_HANDLER         (1 << (KVM_TRC_SHIFT + 1))
-
-#define KVM_TRC_VMENTRY         (KVM_TRC_ENTRYEXIT + 0x01)
-#define KVM_TRC_VMEXIT          (KVM_TRC_ENTRYEXIT + 0x02)
-#define KVM_TRC_PAGE_FAULT      (KVM_TRC_HANDLER + 0x01)
-
-#define KVM_TRC_HEAD_SIZE       12
-#define KVM_TRC_CYCLE_SIZE      8
-#define KVM_TRC_EXTRA_MAX       7
-
-#define KVM_TRC_INJ_VIRQ         (KVM_TRC_HANDLER + 0x02)
-#define KVM_TRC_REDELIVER_EVT    (KVM_TRC_HANDLER + 0x03)
-#define KVM_TRC_PEND_INTR        (KVM_TRC_HANDLER + 0x04)
-#define KVM_TRC_IO_READ          (KVM_TRC_HANDLER + 0x05)
-#define KVM_TRC_IO_WRITE         (KVM_TRC_HANDLER + 0x06)
-#define KVM_TRC_CR_READ          (KVM_TRC_HANDLER + 0x07)
-#define KVM_TRC_CR_WRITE         (KVM_TRC_HANDLER + 0x08)
-#define KVM_TRC_DR_READ          (KVM_TRC_HANDLER + 0x09)
-#define KVM_TRC_DR_WRITE         (KVM_TRC_HANDLER + 0x0A)
-#define KVM_TRC_MSR_READ         (KVM_TRC_HANDLER + 0x0B)
-#define KVM_TRC_MSR_WRITE        (KVM_TRC_HANDLER + 0x0C)
-#define KVM_TRC_CPUID            (KVM_TRC_HANDLER + 0x0D)
-#define KVM_TRC_INTR             (KVM_TRC_HANDLER + 0x0E)
-#define KVM_TRC_NMI              (KVM_TRC_HANDLER + 0x0F)
-#define KVM_TRC_VMMCALL          (KVM_TRC_HANDLER + 0x10)
-#define KVM_TRC_HLT              (KVM_TRC_HANDLER + 0x11)
-#define KVM_TRC_CLTS             (KVM_TRC_HANDLER + 0x12)
-#define KVM_TRC_LMSW             (KVM_TRC_HANDLER + 0x13)
-#define KVM_TRC_APIC_ACCESS      (KVM_TRC_HANDLER + 0x14)
-#define KVM_TRC_TDP_FAULT        (KVM_TRC_HANDLER + 0x15)
-#define KVM_TRC_GTLB_WRITE       (KVM_TRC_HANDLER + 0x16)
-#define KVM_TRC_STLB_WRITE       (KVM_TRC_HANDLER + 0x17)
-#define KVM_TRC_STLB_INVAL       (KVM_TRC_HANDLER + 0x18)
-#define KVM_TRC_PPC_INSTR        (KVM_TRC_HANDLER + 0x19)
-
-struct kvm_user_trace_setup {
-       __u32 buf_size;
-       __u32 buf_nr;
-};
-
-#define __KVM_DEPRECATED_MAIN_W_0x06 \
-       _IOW(KVMIO, 0x06, struct kvm_user_trace_setup)
-#define __KVM_DEPRECATED_MAIN_0x07 _IO(KVMIO, 0x07)
-#define __KVM_DEPRECATED_MAIN_0x08 _IO(KVMIO, 0x08)
-
-#define __KVM_DEPRECATED_VM_R_0x70 _IOR(KVMIO, 0x70, struct kvm_assigned_irq)
-
-struct kvm_breakpoint {
-       __u32 enabled;
-       __u32 padding;
-       __u64 address;
-};
-
-struct kvm_debug_guest {
-       __u32 enabled;
-       __u32 pad;
-       struct kvm_breakpoint breakpoints[4];
-       __u32 singlestep;
-};
-
-#define __KVM_DEPRECATED_VCPU_W_0x87 _IOW(KVMIO, 0x87, struct kvm_debug_guest)
-
-/* *** End of deprecated interfaces *** */
-
-
 /* for KVM_SET_USER_MEMORY_REGION */
 struct kvm_userspace_memory_region {
        __u32 slot;
@@ -95,6 +25,19 @@ struct kvm_userspace_memory_region {
        __u64 userspace_addr; /* start of the userspace allocated memory */
 };
 
+/* for KVM_SET_USER_MEMORY_REGION2 */
+struct kvm_userspace_memory_region2 {
+       __u32 slot;
+       __u32 flags;
+       __u64 guest_phys_addr;
+       __u64 memory_size;
+       __u64 userspace_addr;
+       __u64 guest_memfd_offset;
+       __u32 guest_memfd;
+       __u32 pad1;
+       __u64 pad2[14];
+};
+
 /*
  * The bit 0 ~ bit 15 of kvm_userspace_memory_region::flags are visible for
  * userspace, other bits are reserved for kvm internal use which are defined
@@ -102,6 +45,7 @@ struct kvm_userspace_memory_region {
  */
 #define KVM_MEM_LOG_DIRTY_PAGES        (1UL << 0)
 #define KVM_MEM_READONLY       (1UL << 1)
+#define KVM_MEM_GUEST_MEMFD    (1UL << 2)
 
 /* for KVM_IRQ_LINE */
 struct kvm_irq_level {
@@ -265,6 +209,7 @@ struct kvm_xen_exit {
 #define KVM_EXIT_RISCV_CSR        36
 #define KVM_EXIT_NOTIFY           37
 #define KVM_EXIT_LOONGARCH_IOCSR  38
+#define KVM_EXIT_MEMORY_FAULT     39
 
 /* For KVM_EXIT_INTERNAL_ERROR */
 /* Emulate instruction failed. */
@@ -518,6 +463,13 @@ struct kvm_run {
 #define KVM_NOTIFY_CONTEXT_INVALID     (1 << 0)
                        __u32 flags;
                } notify;
+               /* KVM_EXIT_MEMORY_FAULT */
+               struct {
+#define KVM_MEMORY_EXIT_FLAG_PRIVATE   (1ULL << 3)
+                       __u64 flags;
+                       __u64 gpa;
+                       __u64 size;
+               } memory_fault;
                /* Fix the size of the union. */
                char padding[256];
        };
@@ -945,9 +897,6 @@ struct kvm_ppc_resize_hpt {
  */
 #define KVM_GET_VCPU_MMAP_SIZE    _IO(KVMIO,   0x04) /* in bytes */
 #define KVM_GET_SUPPORTED_CPUID   _IOWR(KVMIO, 0x05, struct kvm_cpuid2)
-#define KVM_TRACE_ENABLE          __KVM_DEPRECATED_MAIN_W_0x06
-#define KVM_TRACE_PAUSE           __KVM_DEPRECATED_MAIN_0x07
-#define KVM_TRACE_DISABLE         __KVM_DEPRECATED_MAIN_0x08
 #define KVM_GET_EMULATED_CPUID   _IOWR(KVMIO, 0x09, struct kvm_cpuid2)
 #define KVM_GET_MSR_FEATURE_INDEX_LIST    _IOWR(KVMIO, 0x0a, struct kvm_msr_list)
 
@@ -1201,6 +1150,11 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE 228
 #define KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES 229
 #define KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES 230
+#define KVM_CAP_USER_MEMORY2 231
+#define KVM_CAP_MEMORY_FAULT_INFO 232
+#define KVM_CAP_MEMORY_ATTRIBUTES 233
+#define KVM_CAP_GUEST_MEMFD 234
+#define KVM_CAP_VM_TYPES 235
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
@@ -1291,6 +1245,7 @@ struct kvm_x86_mce {
 #define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL       (1 << 4)
 #define KVM_XEN_HVM_CONFIG_EVTCHN_SEND         (1 << 5)
 #define KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG        (1 << 6)
+#define KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE        (1 << 7)
 
 struct kvm_xen_hvm_config {
        __u32 flags;
@@ -1483,6 +1438,8 @@ struct kvm_vfio_spapr_tce {
                                        struct kvm_userspace_memory_region)
 #define KVM_SET_TSS_ADDR          _IO(KVMIO,   0x47)
 #define KVM_SET_IDENTITY_MAP_ADDR _IOW(KVMIO,  0x48, __u64)
+#define KVM_SET_USER_MEMORY_REGION2 _IOW(KVMIO, 0x49, \
+                                        struct kvm_userspace_memory_region2)
 
 /* enable ucontrol for s390 */
 struct kvm_s390_ucas_mapping {
@@ -1507,20 +1464,8 @@ struct kvm_s390_ucas_mapping {
                        _IOW(KVMIO,  0x67, struct kvm_coalesced_mmio_zone)
 #define KVM_UNREGISTER_COALESCED_MMIO \
                        _IOW(KVMIO,  0x68, struct kvm_coalesced_mmio_zone)
-#define KVM_ASSIGN_PCI_DEVICE     _IOR(KVMIO,  0x69, \
-                                      struct kvm_assigned_pci_dev)
 #define KVM_SET_GSI_ROUTING       _IOW(KVMIO,  0x6a, struct kvm_irq_routing)
-/* deprecated, replaced by KVM_ASSIGN_DEV_IRQ */
-#define KVM_ASSIGN_IRQ            __KVM_DEPRECATED_VM_R_0x70
-#define KVM_ASSIGN_DEV_IRQ        _IOW(KVMIO,  0x70, struct kvm_assigned_irq)
 #define KVM_REINJECT_CONTROL      _IO(KVMIO,   0x71)
-#define KVM_DEASSIGN_PCI_DEVICE   _IOW(KVMIO,  0x72, \
-                                      struct kvm_assigned_pci_dev)
-#define KVM_ASSIGN_SET_MSIX_NR    _IOW(KVMIO,  0x73, \
-                                      struct kvm_assigned_msix_nr)
-#define KVM_ASSIGN_SET_MSIX_ENTRY _IOW(KVMIO,  0x74, \
-                                      struct kvm_assigned_msix_entry)
-#define KVM_DEASSIGN_DEV_IRQ      _IOW(KVMIO,  0x75, struct kvm_assigned_irq)
 #define KVM_IRQFD                 _IOW(KVMIO,  0x76, struct kvm_irqfd)
 #define KVM_CREATE_PIT2                  _IOW(KVMIO,  0x77, struct kvm_pit_config)
 #define KVM_SET_BOOT_CPU_ID       _IO(KVMIO,   0x78)
@@ -1537,9 +1482,6 @@ struct kvm_s390_ucas_mapping {
 *  KVM_CAP_VM_TSC_CONTROL to set defaults for a VM */
 #define KVM_SET_TSC_KHZ           _IO(KVMIO,  0xa2)
 #define KVM_GET_TSC_KHZ           _IO(KVMIO,  0xa3)
-/* Available with KVM_CAP_PCI_2_3 */
-#define KVM_ASSIGN_SET_INTX_MASK  _IOW(KVMIO,  0xa4, \
-                                      struct kvm_assigned_pci_dev)
 /* Available with KVM_CAP_SIGNAL_MSI */
 #define KVM_SIGNAL_MSI            _IOW(KVMIO,  0xa5, struct kvm_msi)
 /* Available with KVM_CAP_PPC_GET_SMMU_INFO */
@@ -1592,8 +1534,6 @@ struct kvm_s390_ucas_mapping {
 #define KVM_SET_SREGS             _IOW(KVMIO,  0x84, struct kvm_sregs)
 #define KVM_TRANSLATE             _IOWR(KVMIO, 0x85, struct kvm_translation)
 #define KVM_INTERRUPT             _IOW(KVMIO,  0x86, struct kvm_interrupt)
-/* KVM_DEBUG_GUEST is no longer supported, use KVM_SET_GUEST_DEBUG instead */
-#define KVM_DEBUG_GUEST           __KVM_DEPRECATED_VCPU_W_0x87
 #define KVM_GET_MSRS              _IOWR(KVMIO, 0x88, struct kvm_msrs)
 #define KVM_SET_MSRS              _IOW(KVMIO,  0x89, struct kvm_msrs)
 #define KVM_SET_CPUID             _IOW(KVMIO,  0x8a, struct kvm_cpuid)
@@ -2267,4 +2207,24 @@ struct kvm_s390_zpci_op {
 /* flags for kvm_s390_zpci_op->u.reg_aen.flags */
 #define KVM_S390_ZPCIOP_REGAEN_HOST    (1 << 0)
 
+/* Available with KVM_CAP_MEMORY_ATTRIBUTES */
+#define KVM_SET_MEMORY_ATTRIBUTES              _IOW(KVMIO,  0xd2, struct kvm_memory_attributes)
+
+struct kvm_memory_attributes {
+       __u64 address;
+       __u64 size;
+       __u64 attributes;
+       __u64 flags;
+};
+
+#define KVM_MEMORY_ATTRIBUTE_PRIVATE           (1ULL << 3)
+
+#define KVM_CREATE_GUEST_MEMFD _IOWR(KVMIO,  0xd4, struct kvm_create_guest_memfd)
+
+struct kvm_create_guest_memfd {
+       __u64 size;
+       __u64 flags;
+       __u64 reserved[6];
+};
+
 #endif /* __LINUX_KVM_H */
index 171c5cce364119a5097cb82d0bd7b407a116a61e..68a0272e99b762c44525c80cdad16be362eba9ef 100644 (file)
@@ -100,14 +100,14 @@ struct mei_connect_client_data_vtag {
  * a FW client on a tagged channel. From this point on, every read
  * and write will communicate with the associated FW client
  * on the tagged channel.
- * Upone close() the communication is terminated.
+ * Upon close() the communication is terminated.
  *
  * The IOCTL argument is a struct with a union that contains
  * the input parameter and the output parameter for this IOCTL.
  *
  * The input parameter is UUID of the FW Client, a vtag [0,255].
  * The output parameter is the properties of the FW client
- * (FW protocool version and max message size).
+ * (FW protocol version and max message size).
  *
  * Clients that do not support tagged connection
  * will respond with -EOPNOTSUPP.
index ca30232b7bc8af49a6c3dd1c03e105628aafabf9..117c6a9b845b1a6fde23a952560c0e807a5a3d90 100644 (file)
@@ -285,9 +285,11 @@ enum nft_rule_attributes {
 /**
  * enum nft_rule_compat_flags - nf_tables rule compat flags
  *
+ * @NFT_RULE_COMPAT_F_UNUSED: unused
  * @NFT_RULE_COMPAT_F_INV: invert the check result
  */
 enum nft_rule_compat_flags {
+       NFT_RULE_COMPAT_F_UNUSED = (1 << 0),
        NFT_RULE_COMPAT_F_INV   = (1 << 1),
        NFT_RULE_COMPAT_F_MASK  = NFT_RULE_COMPAT_F_INV,
 };
diff --git a/include/uapi/linux/nsm.h b/include/uapi/linux/nsm.h
new file mode 100644 (file)
index 0000000..e529f23
--- /dev/null
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ */
+
+#ifndef __UAPI_LINUX_NSM_H
+#define __UAPI_LINUX_NSM_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define NSM_MAGIC              0x0A
+
+#define NSM_REQUEST_MAX_SIZE   0x1000
+#define NSM_RESPONSE_MAX_SIZE  0x3000
+
+struct nsm_iovec {
+       __u64 addr; /* Virtual address of target buffer */
+       __u64 len;  /* Length of target buffer */
+};
+
+/* Raw NSM message. Only available with CAP_SYS_ADMIN. */
+struct nsm_raw {
+       /* Request from user */
+       struct nsm_iovec request;
+       /* Response to user */
+       struct nsm_iovec response;
+};
+#define NSM_IOCTL_RAW          _IOWR(NSM_MAGIC, 0x0, struct nsm_raw)
+
+#endif /* __UAPI_LINUX_NSM_H */
index f9c1af8d141b4a52d5288841bf2a4b14718fc67a..94b46b043b536288f31c02e5b4577628d8fecaa7 100644 (file)
@@ -11,7 +11,8 @@
 #define __UAPI_LINUX_PCITEST_H
 
 #define PCITEST_BAR            _IO('P', 0x1)
-#define PCITEST_LEGACY_IRQ     _IO('P', 0x2)
+#define PCITEST_INTX_IRQ       _IO('P', 0x2)
+#define PCITEST_LEGACY_IRQ     PCITEST_INTX_IRQ
 #define PCITEST_MSI            _IOW('P', 0x3, int)
 #define PCITEST_WRITE          _IOW('P', 0x4, unsigned long)
 #define PCITEST_READ           _IOW('P', 0x5, unsigned long)
index 53bc1af67a4118527717b9a82260e2207485a775..de9b4733607e6b61b08ff7089ff90070168ff4a2 100644 (file)
@@ -11,6 +11,7 @@
 #ifndef _UAPI_LINUX_SERIAL_H
 #define _UAPI_LINUX_SERIAL_H
 
+#include <linux/const.h>
 #include <linux/types.h>
 
 #include <linux/tty_flags.h>
@@ -137,17 +138,20 @@ struct serial_icounter_struct {
  * * %SER_RS485_ADDRB          - Enable RS485 addressing mode.
  * * %SER_RS485_ADDR_RECV - Receive address filter (enables @addr_recv). Requires %SER_RS485_ADDRB.
  * * %SER_RS485_ADDR_DEST - Destination address (enables @addr_dest). Requires %SER_RS485_ADDRB.
+ * * %SER_RS485_MODE_RS422     - Enable RS422. Requires %SER_RS485_ENABLED.
  */
 struct serial_rs485 {
        __u32   flags;
-#define SER_RS485_ENABLED              (1 << 0)
-#define SER_RS485_RTS_ON_SEND          (1 << 1)
-#define SER_RS485_RTS_AFTER_SEND       (1 << 2)
-#define SER_RS485_RX_DURING_TX         (1 << 4)
-#define SER_RS485_TERMINATE_BUS                (1 << 5)
-#define SER_RS485_ADDRB                        (1 << 6)
-#define SER_RS485_ADDR_RECV            (1 << 7)
-#define SER_RS485_ADDR_DEST            (1 << 8)
+#define SER_RS485_ENABLED              _BITUL(0)
+#define SER_RS485_RTS_ON_SEND          _BITUL(1)
+#define SER_RS485_RTS_AFTER_SEND       _BITUL(2)
+/* Placeholder for bit 3: SER_RS485_RTS_BEFORE_SEND, which isn't used anymore */
+#define SER_RS485_RX_DURING_TX         _BITUL(4)
+#define SER_RS485_TERMINATE_BUS                _BITUL(5)
+#define SER_RS485_ADDRB                        _BITUL(6)
+#define SER_RS485_ADDR_RECV            _BITUL(7)
+#define SER_RS485_ADDR_DEST            _BITUL(8)
+#define SER_RS485_MODE_RS422           _BITUL(9)
 
        __u32   delay_rts_before_send;
        __u32   delay_rts_after_send;
index d77ee6b65328e876baf02a25fc04f4a3d0e72b7b..078098e73fd3e20776c8ba39a2d8c4594f10ea5f 100644 (file)
@@ -73,8 +73,10 @@ struct usb_os_desc_header {
 struct usb_ext_compat_desc {
        __u8    bFirstInterfaceNumber;
        __u8    Reserved1;
-       __u8    CompatibleID[8];
-       __u8    SubCompatibleID[8];
+       __struct_group(/* no tag */, IDs, /* no attrs */,
+               __u8    CompatibleID[8];
+               __u8    SubCompatibleID[8];
+       );
        __u8    Reserved2[6];
 };
 
index 7f5fb010226d8cb80a4e435209b3b69ba6e80e35..2b68e6cdf1902f49f8f1cc04ae5b502110a959d3 100644 (file)
@@ -1219,6 +1219,7 @@ enum vfio_device_mig_state {
        VFIO_DEVICE_STATE_RUNNING_P2P = 5,
        VFIO_DEVICE_STATE_PRE_COPY = 6,
        VFIO_DEVICE_STATE_PRE_COPY_P2P = 7,
+       VFIO_DEVICE_STATE_NR,
 };
 
 /**
index 8881aea60f6f11be1aa2fd20f813883546c8e74a..2445f365bce74b4e926c6929322b269252ab6830 100644 (file)
@@ -52,7 +52,7 @@
  * rest are per-device feature bits.
  */
 #define VIRTIO_TRANSPORT_F_START       28
-#define VIRTIO_TRANSPORT_F_END         41
+#define VIRTIO_TRANSPORT_F_END         42
 
 #ifndef VIRTIO_CONFIG_NO_LEGACY
 /* Do we get callbacks when the ring is completely used, even if we've
  * This feature indicates that the driver can reset a queue individually.
  */
 #define VIRTIO_F_RING_RESET            40
+
+/*
+ * This feature indicates that the device support administration virtqueues.
+ */
+#define VIRTIO_F_ADMIN_VQ              41
+
 #endif /* _UAPI_LINUX_VIRTIO_CONFIG_H */
index 44f4dd2add188090ff3b03d859fb4d27009d5479..ef3810dee7efac5d337d55db03131cb4796f4358 100644 (file)
@@ -175,6 +175,9 @@ struct virtio_pci_modern_common_cfg {
 
        __le16 queue_notify_data;       /* read-write */
        __le16 queue_reset;             /* read-write */
+
+       __le16 admin_queue_index;       /* read-only */
+       __le16 admin_queue_num;         /* read-only */
 };
 
 /* Fields in VIRTIO_PCI_CAP_PCI_CFG: */
@@ -215,7 +218,72 @@ struct virtio_pci_cfg_cap {
 #define VIRTIO_PCI_COMMON_Q_USEDHI     52
 #define VIRTIO_PCI_COMMON_Q_NDATA      56
 #define VIRTIO_PCI_COMMON_Q_RESET      58
+#define VIRTIO_PCI_COMMON_ADM_Q_IDX    60
+#define VIRTIO_PCI_COMMON_ADM_Q_NUM    62
 
 #endif /* VIRTIO_PCI_NO_MODERN */
 
+/* Admin command status. */
+#define VIRTIO_ADMIN_STATUS_OK         0
+
+/* Admin command opcode. */
+#define VIRTIO_ADMIN_CMD_LIST_QUERY    0x0
+#define VIRTIO_ADMIN_CMD_LIST_USE      0x1
+
+/* Admin command group type. */
+#define VIRTIO_ADMIN_GROUP_TYPE_SRIOV  0x1
+
+/* Transitional device admin command. */
+#define VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_WRITE       0x2
+#define VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_READ                0x3
+#define VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_WRITE          0x4
+#define VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_READ           0x5
+#define VIRTIO_ADMIN_CMD_LEGACY_NOTIFY_INFO            0x6
+
+struct __packed virtio_admin_cmd_hdr {
+       __le16 opcode;
+       /*
+        * 1 - SR-IOV
+        * 2-65535 - reserved
+        */
+       __le16 group_type;
+       /* Unused, reserved for future extensions. */
+       __u8 reserved1[12];
+       __le64 group_member_id;
+};
+
+struct __packed virtio_admin_cmd_status {
+       __le16 status;
+       __le16 status_qualifier;
+       /* Unused, reserved for future extensions. */
+       __u8 reserved2[4];
+};
+
+struct __packed virtio_admin_cmd_legacy_wr_data {
+       __u8 offset; /* Starting offset of the register(s) to write. */
+       __u8 reserved[7];
+       __u8 registers[];
+};
+
+struct __packed virtio_admin_cmd_legacy_rd_data {
+       __u8 offset; /* Starting offset of the register(s) to read. */
+};
+
+#define VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_END 0
+#define VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_OWNER_DEV 0x1
+#define VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_OWNER_MEM 0x2
+
+#define VIRTIO_ADMIN_CMD_MAX_NOTIFY_INFO 4
+
+struct __packed virtio_admin_cmd_notify_info_data {
+       __u8 flags; /* 0 = end of list, 1 = owner device, 2 = member device */
+       __u8 bar; /* BAR of the member or the owner device */
+       __u8 padding[6];
+       __le64 offset; /* Offset within bar. */
+};
+
+struct virtio_admin_cmd_notify_info_result {
+       struct virtio_admin_cmd_notify_info_data entries[VIRTIO_ADMIN_CMD_MAX_NOTIFY_INFO];
+};
+
 #endif
index d676b3620383c3cb76be7b4aa3651b1a99e5ded9..ede4f3564977dcd477e386c51c2a73a11dac43a2 100644 (file)
 #include <linux/virtio_ids.h>
 #include <linux/virtio_config.h>
 
+/* Feature bits */
+/* guest physical address range will be indicated as shared memory region 0 */
+#define VIRTIO_PMEM_F_SHMEM_REGION 0
+
+/* shmid of the shared memory region corresponding to the pmem */
+#define VIRTIO_PMEM_SHMEM_REGION_ID 0
+
 struct virtio_pmem_config {
        __le64 start;
        __le64 size;
index 18417b0178699a8cf2dc231fae254f84d352940f..60e42d3b760e0fb2d1664cdd2a0b4bfa72de012a 100644 (file)
@@ -537,7 +537,7 @@ struct xendispl_dbuf_create_req {
 
 struct xendispl_page_directory {
        grant_ref_t gref_dir_next_page;
-       grant_ref_t gref[1]; /* Variable length */
+       grant_ref_t gref[];
 };
 
 /*
index ba4c4274b7146ed496629318a100152a1bd938c1..4fef1efcdcab5903fc976896c19d0fb94a296922 100644 (file)
@@ -95,7 +95,7 @@ struct __name##_sring {                                                 \
     RING_IDX req_prod, req_event;                                       \
     RING_IDX rsp_prod, rsp_event;                                       \
     uint8_t __pad[48];                                                  \
-    union __name##_sring_entry ring[1]; /* variable-length */           \
+    union __name##_sring_entry ring[];                                  \
 };                                                                      \
                                                                         \
 /* "Front" end's private variables */                                   \
index 445657cdb1defbe2ecec6e4cfc0a89f50a26bf3c..b818517588b508ee6194be7b1532de475d88712e 100644 (file)
@@ -659,7 +659,7 @@ struct xensnd_open_req {
 
 struct xensnd_page_directory {
        grant_ref_t gref_dir_next_page;
-       grant_ref_t gref[1]; /* Variable length */
+       grant_ref_t gref[];
 };
 
 /*
index 8df18f3a974846b48e41b2a8dcbc2f2f2f90128e..deda3d14135bb9ed54469353e44f1c1e86e8a0f2 100644 (file)
@@ -876,6 +876,18 @@ config CC_NO_ARRAY_BOUNDS
        bool
        default y if CC_IS_GCC && GCC_VERSION >= 110000 && GCC11_NO_ARRAY_BOUNDS
 
+# Currently, disable -Wstringop-overflow for GCC globally.
+config GCC_NO_STRINGOP_OVERFLOW
+       def_bool y
+
+config CC_NO_STRINGOP_OVERFLOW
+       bool
+       default y if CC_IS_GCC && GCC_NO_STRINGOP_OVERFLOW
+
+config CC_STRINGOP_OVERFLOW
+       bool
+       default y if CC_IS_GCC && !CC_NO_STRINGOP_OVERFLOW
+
 #
 # For architectures that know their GCC __int128 support is sound
 #
index 5fdef94f086456471d8846b8b0ce3c5a23e181fb..279ad28bf4fb148e37cbd9600842a567c813039c 100644 (file)
@@ -510,7 +510,10 @@ struct file_system_type rootfs_fs_type = {
 
 void __init init_rootfs(void)
 {
-       if (IS_ENABLED(CONFIG_TMPFS) && !saved_root_name[0] &&
-               (!root_fs_names || strstr(root_fs_names, "tmpfs")))
-               is_tmpfs = true;
+       if (IS_ENABLED(CONFIG_TMPFS)) {
+               if (!saved_root_name[0] && !root_fs_names)
+                       is_tmpfs = true;
+               else if (root_fs_names && !!strstr(root_fs_names, "tmpfs"))
+                       is_tmpfs = true;
+       }
 }
index 8d0fd946cdd2b3fc8eae59ae6715928fa70e6f18..76deb48c38cb16dd779de7ee91b35785b6890579 100644 (file)
@@ -574,6 +574,16 @@ extern unsigned long __initramfs_size;
 #include <linux/initrd.h>
 #include <linux/kexec.h>
 
+static ssize_t raw_read(struct file *file, struct kobject *kobj,
+                       struct bin_attribute *attr, char *buf,
+                       loff_t pos, size_t count)
+{
+       memcpy(buf, attr->private + pos, count);
+       return count;
+}
+
+static BIN_ATTR(initrd, 0440, raw_read, NULL, 0);
+
 void __init reserve_initrd_mem(void)
 {
        phys_addr_t start;
@@ -715,8 +725,14 @@ done:
         * If the initrd region is overlapped with crashkernel reserved region,
         * free only memory that is not part of crashkernel region.
         */
-       if (!do_retain_initrd && initrd_start && !kexec_free_initrd())
+       if (!do_retain_initrd && initrd_start && !kexec_free_initrd()) {
                free_initrd_mem(initrd_start, initrd_end);
+       } else if (do_retain_initrd && initrd_start) {
+               bin_attr_initrd.size = initrd_end - initrd_start;
+               bin_attr_initrd.private = (void *)initrd_start;
+               if (sysfs_create_bin_file(firmware_kobj, &bin_attr_initrd))
+                       pr_err("Failed to create initrd sysfs file");
+       }
        initrd_start = 0;
        initrd_end = 0;
 
index 09b6d860deba3d96e086d5f16bf654b312e5be5e..cd9a137ad6cefbb907a177fd8f0c9753ac0c70dd 100644 (file)
@@ -137,6 +137,14 @@ struct io_defer_entry {
 #define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL)
 #define IO_REQ_LINK_FLAGS (REQ_F_LINK | REQ_F_HARDLINK)
 
+/*
+ * No waiters. It's larger than any valid value of the tw counter
+ * so that tests against ->cq_wait_nr would fail and skip wake_up().
+ */
+#define IO_CQ_WAKE_INIT                (-1U)
+/* Forced wake up if there is a waiter regardless of ->cq_wait_nr */
+#define IO_CQ_WAKE_FORCE       (IO_CQ_WAKE_INIT >> 1)
+
 static bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
                                         struct task_struct *task,
                                         bool cancel_all);
@@ -303,6 +311,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
                goto err;
 
        ctx->flags = p->flags;
+       atomic_set(&ctx->cq_wait_nr, IO_CQ_WAKE_INIT);
        init_waitqueue_head(&ctx->sqo_sq_wait);
        INIT_LIST_HEAD(&ctx->sqd_list);
        INIT_LIST_HEAD(&ctx->cq_overflow_list);
@@ -1304,16 +1313,23 @@ static inline void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
 {
        struct io_ring_ctx *ctx = req->ctx;
        unsigned nr_wait, nr_tw, nr_tw_prev;
-       struct llist_node *first;
+       struct llist_node *head;
+
+       /* See comment above IO_CQ_WAKE_INIT */
+       BUILD_BUG_ON(IO_CQ_WAKE_FORCE <= IORING_MAX_CQ_ENTRIES);
 
+       /*
+        * We don't know how many reuqests is there in the link and whether
+        * they can even be queued lazily, fall back to non-lazy.
+        */
        if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK))
                flags &= ~IOU_F_TWQ_LAZY_WAKE;
 
-       first = READ_ONCE(ctx->work_llist.first);
+       head = READ_ONCE(ctx->work_llist.first);
        do {
                nr_tw_prev = 0;
-               if (first) {
-                       struct io_kiocb *first_req = container_of(first,
+               if (head) {
+                       struct io_kiocb *first_req = container_of(head,
                                                        struct io_kiocb,
                                                        io_task_work.node);
                        /*
@@ -1322,17 +1338,29 @@ static inline void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
                         */
                        nr_tw_prev = READ_ONCE(first_req->nr_tw);
                }
+
+               /*
+                * Theoretically, it can overflow, but that's fine as one of
+                * previous adds should've tried to wake the task.
+                */
                nr_tw = nr_tw_prev + 1;
-               /* Large enough to fail the nr_wait comparison below */
                if (!(flags & IOU_F_TWQ_LAZY_WAKE))
-                       nr_tw = -1U;
+                       nr_tw = IO_CQ_WAKE_FORCE;
 
                req->nr_tw = nr_tw;
-               req->io_task_work.node.next = first;
-       } while (!try_cmpxchg(&ctx->work_llist.first, &first,
+               req->io_task_work.node.next = head;
+       } while (!try_cmpxchg(&ctx->work_llist.first, &head,
                              &req->io_task_work.node));
 
-       if (!first) {
+       /*
+        * cmpxchg implies a full barrier, which pairs with the barrier
+        * in set_current_state() on the io_cqring_wait() side. It's used
+        * to ensure that either we see updated ->cq_wait_nr, or waiters
+        * going to sleep will observe the work added to the list, which
+        * is similar to the wait/wawke task state sync.
+        */
+
+       if (!head) {
                if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
                        atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
                if (ctx->has_evfd)
@@ -1340,14 +1368,12 @@ static inline void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
        }
 
        nr_wait = atomic_read(&ctx->cq_wait_nr);
-       /* no one is waiting */
-       if (!nr_wait)
+       /* not enough or no one is waiting */
+       if (nr_tw < nr_wait)
                return;
-       /* either not enough or the previous add has already woken it up */
-       if (nr_wait > nr_tw || nr_tw_prev >= nr_wait)
+       /* the previous add has already woken it up */
+       if (nr_tw_prev >= nr_wait)
                return;
-       /* pairs with set_current_state() in io_cqring_wait() */
-       smp_mb__after_atomic();
        wake_up_state(ctx->submitter_task, TASK_INTERRUPTIBLE);
 }
 
@@ -2000,9 +2026,10 @@ inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
                goto out;
        fd = array_index_nospec(fd, ctx->nr_user_files);
        slot = io_fixed_file_slot(&ctx->file_table, fd);
-       file = io_slot_file(slot);
+       if (!req->rsrc_node)
+               __io_req_set_rsrc_node(req, ctx);
        req->flags |= io_slot_flags(slot);
-       io_req_set_rsrc_node(req, ctx, 0);
+       file = io_slot_file(slot);
 out:
        io_ring_submit_unlock(ctx, issue_flags);
        return file;
@@ -2613,7 +2640,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
 
                ret = io_cqring_wait_schedule(ctx, &iowq);
                __set_current_state(TASK_RUNNING);
-               atomic_set(&ctx->cq_wait_nr, 0);
+               atomic_set(&ctx->cq_wait_nr, IO_CQ_WAKE_INIT);
 
                /*
                 * Run task_work after scheduling and before io_should_wake().
@@ -3787,7 +3814,8 @@ static int io_uring_install_fd(struct file *file)
  */
 static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
 {
-       return anon_inode_getfile_secure("[io_uring]", &io_uring_fops, ctx,
+       /* Create a new inode so that the LSM can block the creation.  */
+       return anon_inode_create_getfile("[io_uring]", &io_uring_fops, ctx,
                                         O_RDWR | O_CLOEXEC, NULL);
 }
 
index 04e33f25919ca78332fc3429b8c98d427b768688..d5495710c17877624c75d8fa36b71af9535336a3 100644 (file)
 #include <trace/events/io_uring.h>
 #endif
 
-
 enum {
        IOU_OK                  = 0,
        IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED,
 
+       /*
+        * Requeue the task_work to restart operations on this request. The
+        * actual value isn't important, should just be not an otherwise
+        * valid error code, yet less than -MAX_ERRNO and valid internally.
+        */
+       IOU_REQUEUE             = -3072,
+
        /*
         * Intended only when both IO_URING_F_MULTISHOT is passed
         * to indicate to the poll runner that multishot should be
index 75d494dad7e2c7b22a53f50fc422d807a0559000..43bc9a5f96f9d1ce48f2d6e2a5cf850c675aee0d 100644 (file)
@@ -60,6 +60,7 @@ struct io_sr_msg {
        unsigned                        len;
        unsigned                        done_io;
        unsigned                        msg_flags;
+       unsigned                        nr_multishot_loops;
        u16                             flags;
        /* initialised and used only by !msg send variants */
        u16                             addr_len;
@@ -70,6 +71,13 @@ struct io_sr_msg {
        struct io_kiocb                 *notif;
 };
 
+/*
+ * Number of times we'll try and do receives if there's more data. If we
+ * exceed this limit, then add us to the back of the queue and retry from
+ * there. This helps fairness between flooding clients.
+ */
+#define MULTISHOT_MAX_RETRY    32
+
 static inline bool io_check_multishot(struct io_kiocb *req,
                                      unsigned int issue_flags)
 {
@@ -611,6 +619,7 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
                sr->msg_flags |= MSG_CMSG_COMPAT;
 #endif
        sr->done_io = 0;
+       sr->nr_multishot_loops = 0;
        return 0;
 }
 
@@ -645,23 +654,35 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
                return true;
        }
 
-       if (!mshot_finished) {
-               if (io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
-                                       *ret, cflags | IORING_CQE_F_MORE)) {
-                       io_recv_prep_retry(req);
-                       /* Known not-empty or unknown state, retry */
-                       if (cflags & IORING_CQE_F_SOCK_NONEMPTY ||
-                           msg->msg_inq == -1)
+       if (mshot_finished)
+               goto finish;
+
+       /*
+        * Fill CQE for this receive and see if we should keep trying to
+        * receive from this socket.
+        */
+       if (io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
+                               *ret, cflags | IORING_CQE_F_MORE)) {
+               struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+               int mshot_retry_ret = IOU_ISSUE_SKIP_COMPLETE;
+
+               io_recv_prep_retry(req);
+               /* Known not-empty or unknown state, retry */
+               if (cflags & IORING_CQE_F_SOCK_NONEMPTY || msg->msg_inq == -1) {
+                       if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY)
                                return false;
-                       if (issue_flags & IO_URING_F_MULTISHOT)
-                               *ret = IOU_ISSUE_SKIP_COMPLETE;
-                       else
-                               *ret = -EAGAIN;
-                       return true;
+                       /* mshot retries exceeded, force a requeue */
+                       sr->nr_multishot_loops = 0;
+                       mshot_retry_ret = IOU_REQUEUE;
                }
-               /* Otherwise stop multishot but use the current result. */
+               if (issue_flags & IO_URING_F_MULTISHOT)
+                       *ret = mshot_retry_ret;
+               else
+                       *ret = -EAGAIN;
+               return true;
        }
-
+       /* Otherwise stop multishot but use the current result. */
+finish:
        io_req_set_res(req, *ret, cflags);
 
        if (issue_flags & IO_URING_F_MULTISHOT)
@@ -902,6 +923,7 @@ retry_multishot:
                if (!buf)
                        return -ENOBUFS;
                sr->buf = buf;
+               sr->len = len;
        }
 
        ret = import_ubuf(ITER_DEST, sr->buf, len, &msg.msg_iter);
index 6705634e5f52aa625b797f3c7931903c55130359..b1ee3a9c38072933dd020848b7f3586af257d912 100644 (file)
@@ -471,7 +471,6 @@ const struct io_issue_def io_issue_defs[] = {
        },
        [IORING_OP_FIXED_FD_INSTALL] = {
                .needs_file             = 1,
-               .audit_skip             = 1,
                .prep                   = io_install_fixed_fd_prep,
                .issue                  = io_install_fixed_fd,
        },
index 0fe0dd30554623edb87cd159b4ffe0c52288211a..e3357dfa14ca42dd5b25e6cf9ce4a4be8b7ee0f4 100644 (file)
@@ -277,6 +277,10 @@ int io_install_fixed_fd_prep(struct io_kiocb *req, const struct io_uring_sqe *sq
        if (flags & ~IORING_FIXED_FD_NO_CLOEXEC)
                return -EINVAL;
 
+       /* ensure the task's creds are used when installing/receiving fds */
+       if (req->flags & REQ_F_CREDS)
+               return -EPERM;
+
        /* default to O_CLOEXEC, disable if IORING_FIXED_FD_NO_CLOEXEC is set */
        ifi = io_kiocb_to_cmd(req, struct io_fixed_install);
        ifi->o_flags = O_CLOEXEC;
index d59b74a99d4e4b444dcb2f86dc9d3594d838e1cf..7513afc7b702e4cbc727717fc59aa8eb758465df 100644 (file)
@@ -226,8 +226,29 @@ enum {
        IOU_POLL_NO_ACTION = 1,
        IOU_POLL_REMOVE_POLL_USE_RES = 2,
        IOU_POLL_REISSUE = 3,
+       IOU_POLL_REQUEUE = 4,
 };
 
+static void __io_poll_execute(struct io_kiocb *req, int mask)
+{
+       unsigned flags = 0;
+
+       io_req_set_res(req, mask, 0);
+       req->io_task_work.func = io_poll_task_func;
+
+       trace_io_uring_task_add(req, mask);
+
+       if (!(req->flags & REQ_F_POLL_NO_LAZY))
+               flags = IOU_F_TWQ_LAZY_WAKE;
+       __io_req_task_work_add(req, flags);
+}
+
+static inline void io_poll_execute(struct io_kiocb *req, int res)
+{
+       if (io_poll_get_ownership(req))
+               __io_poll_execute(req, res);
+}
+
 /*
  * All poll tw should go through this. Checks for poll events, manages
  * references, does rewait, etc.
@@ -309,6 +330,8 @@ static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
                        int ret = io_poll_issue(req, ts);
                        if (ret == IOU_STOP_MULTISHOT)
                                return IOU_POLL_REMOVE_POLL_USE_RES;
+                       else if (ret == IOU_REQUEUE)
+                               return IOU_POLL_REQUEUE;
                        if (ret < 0)
                                return ret;
                }
@@ -331,8 +354,12 @@ void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts)
        int ret;
 
        ret = io_poll_check_events(req, ts);
-       if (ret == IOU_POLL_NO_ACTION)
+       if (ret == IOU_POLL_NO_ACTION) {
+               return;
+       } else if (ret == IOU_POLL_REQUEUE) {
+               __io_poll_execute(req, 0);
                return;
+       }
        io_poll_remove_entries(req);
        io_poll_tw_hash_eject(req, ts);
 
@@ -364,26 +391,6 @@ void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts)
        }
 }
 
-static void __io_poll_execute(struct io_kiocb *req, int mask)
-{
-       unsigned flags = 0;
-
-       io_req_set_res(req, mask, 0);
-       req->io_task_work.func = io_poll_task_func;
-
-       trace_io_uring_task_add(req, mask);
-
-       if (!(req->flags & REQ_F_POLL_NO_LAZY))
-               flags = IOU_F_TWQ_LAZY_WAKE;
-       __io_req_task_work_add(req, flags);
-}
-
-static inline void io_poll_execute(struct io_kiocb *req, int res)
-{
-       if (io_poll_get_ownership(req))
-               __io_poll_execute(req, res);
-}
-
 static void io_poll_cancel_req(struct io_kiocb *req)
 {
        io_poll_mark_cancelled(req);
index ff4d5d753387e80568ccc90734732d6cb999b39e..1dacae9e816c9269e8a1ae5bfab4d12fa9aaa9ac 100644 (file)
@@ -24,6 +24,15 @@ struct async_poll {
        struct io_poll          *double_poll;
 };
 
+/*
+ * Must only be called inside issue_flags & IO_URING_F_MULTISHOT, or
+ * potentially other cases where we already "own" this poll request.
+ */
+static inline void io_poll_multishot_retry(struct io_kiocb *req)
+{
+       atomic_inc(&req->poll_refs);
+}
+
 int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
 int io_poll_add(struct io_kiocb *req, unsigned int issue_flags);
 
index 708dd1d89add4ab09ac4dd90a4a3bbaba410ea4d..5e62c1208996542537c6aedf4d57506863165e10 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/slab.h>
 #include <linux/uaccess.h>
 #include <linux/nospec.h>
+#include <linux/compat.h>
 #include <linux/io_uring.h>
 #include <linux/io_uring_types.h>
 
@@ -278,13 +279,14 @@ static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx,
        if (len > cpumask_size())
                len = cpumask_size();
 
-       if (in_compat_syscall()) {
+#ifdef CONFIG_COMPAT
+       if (in_compat_syscall())
                ret = compat_get_bitmap(cpumask_bits(new_mask),
                                        (const compat_ulong_t __user *)arg,
                                        len * 8 /* CHAR_BIT */);
-       } else {
+       else
+#endif
                ret = copy_from_user(new_mask, arg, len);
-       }
 
        if (ret) {
                free_cpumask_var(new_mask);
index 7238b9cfe33b60b7520905d7f632951212c9677c..c6f199bbee2843dfea2d88729b707d46a168d3d9 100644 (file)
@@ -102,17 +102,21 @@ static inline void io_charge_rsrc_node(struct io_ring_ctx *ctx,
        node->refs++;
 }
 
+static inline void __io_req_set_rsrc_node(struct io_kiocb *req,
+                                         struct io_ring_ctx *ctx)
+{
+       lockdep_assert_held(&ctx->uring_lock);
+       req->rsrc_node = ctx->rsrc_node;
+       io_charge_rsrc_node(ctx, ctx->rsrc_node);
+}
+
 static inline void io_req_set_rsrc_node(struct io_kiocb *req,
                                        struct io_ring_ctx *ctx,
                                        unsigned int issue_flags)
 {
        if (!req->rsrc_node) {
                io_ring_submit_lock(ctx, issue_flags);
-
-               lockdep_assert_held(&ctx->uring_lock);
-
-               req->rsrc_node = ctx->rsrc_node;
-               io_charge_rsrc_node(ctx, ctx->rsrc_node);
+               __io_req_set_rsrc_node(req, ctx);
                io_ring_submit_unlock(ctx, issue_flags);
        }
 }
index 0c856726b15db330daf6470d3e8baea4d90ad17b..d5e79d9bdc717b8cb917d6e06b2cbbe6840dd762 100644 (file)
@@ -18,6 +18,7 @@
 #include "opdef.h"
 #include "kbuf.h"
 #include "rsrc.h"
+#include "poll.h"
 #include "rw.h"
 
 struct io_rw {
@@ -168,27 +169,6 @@ void io_readv_writev_cleanup(struct io_kiocb *req)
        kfree(io->free_iovec);
 }
 
-static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
-{
-       switch (ret) {
-       case -EIOCBQUEUED:
-               break;
-       case -ERESTARTSYS:
-       case -ERESTARTNOINTR:
-       case -ERESTARTNOHAND:
-       case -ERESTART_RESTARTBLOCK:
-               /*
-                * We can't just restart the syscall, since previously
-                * submitted sqes may already be in progress. Just fail this
-                * IO with EINTR.
-                */
-               ret = -EINTR;
-               fallthrough;
-       default:
-               kiocb->ki_complete(kiocb, ret);
-       }
-}
-
 static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
 {
        struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
@@ -371,6 +351,33 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
        smp_store_release(&req->iopoll_completed, 1);
 }
 
+static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
+{
+       /* IO was queued async, completion will happen later */
+       if (ret == -EIOCBQUEUED)
+               return;
+
+       /* transform internal restart error codes */
+       if (unlikely(ret < 0)) {
+               switch (ret) {
+               case -ERESTARTSYS:
+               case -ERESTARTNOINTR:
+               case -ERESTARTNOHAND:
+               case -ERESTART_RESTARTBLOCK:
+                       /*
+                        * We can't just restart the syscall, since previously
+                        * submitted sqes may already be in progress. Just fail
+                        * this IO with EINTR.
+                        */
+                       ret = -EINTR;
+                       break;
+               }
+       }
+
+       INDIRECT_CALL_2(kiocb->ki_complete, io_complete_rw_iopoll,
+                       io_complete_rw, kiocb, ret);
+}
+
 static int kiocb_done(struct io_kiocb *req, ssize_t ret,
                       unsigned int issue_flags)
 {
@@ -956,8 +963,15 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
                if (io_fill_cqe_req_aux(req,
                                        issue_flags & IO_URING_F_COMPLETE_DEFER,
                                        ret, cflags | IORING_CQE_F_MORE)) {
-                       if (issue_flags & IO_URING_F_MULTISHOT)
+                       if (issue_flags & IO_URING_F_MULTISHOT) {
+                               /*
+                                * Force retry, as we might have more data to
+                                * be read and otherwise it won't get retried
+                                * until (if ever) another poll is triggered.
+                                */
+                               io_poll_multishot_retry(req);
                                return IOU_ISSUE_SKIP_COMPLETE;
+                       }
                        return -EAGAIN;
                }
        }
index 51e8b4bee0c8323b42cdee3c29cdaca00bbb95aa..59647118917676c7aa3c69a20cffd0946ff7a2fb 100644 (file)
@@ -5615,21 +5615,46 @@ static u8 bpf_ctx_convert_map[] = {
 #undef BPF_MAP_TYPE
 #undef BPF_LINK_TYPE
 
-const struct btf_member *
-btf_get_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
-                     const struct btf_type *t, enum bpf_prog_type prog_type,
-                     int arg)
+static const struct btf_type *find_canonical_prog_ctx_type(enum bpf_prog_type prog_type)
 {
        const struct btf_type *conv_struct;
-       const struct btf_type *ctx_struct;
        const struct btf_member *ctx_type;
-       const char *tname, *ctx_tname;
 
        conv_struct = bpf_ctx_convert.t;
-       if (!conv_struct) {
-               bpf_log(log, "btf_vmlinux is malformed\n");
+       if (!conv_struct)
                return NULL;
-       }
+       /* prog_type is valid bpf program type. No need for bounds check. */
+       ctx_type = btf_type_member(conv_struct) + bpf_ctx_convert_map[prog_type] * 2;
+       /* ctx_type is a pointer to prog_ctx_type in vmlinux.
+        * Like 'struct __sk_buff'
+        */
+       return btf_type_by_id(btf_vmlinux, ctx_type->type);
+}
+
+static int find_kern_ctx_type_id(enum bpf_prog_type prog_type)
+{
+       const struct btf_type *conv_struct;
+       const struct btf_member *ctx_type;
+
+       conv_struct = bpf_ctx_convert.t;
+       if (!conv_struct)
+               return -EFAULT;
+       /* prog_type is valid bpf program type. No need for bounds check. */
+       ctx_type = btf_type_member(conv_struct) + bpf_ctx_convert_map[prog_type] * 2 + 1;
+       /* ctx_type is a pointer to prog_ctx_type in vmlinux.
+        * Like 'struct sk_buff'
+        */
+       return ctx_type->type;
+}
+
+const struct btf_type *
+btf_get_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
+                     const struct btf_type *t, enum bpf_prog_type prog_type,
+                     int arg)
+{
+       const struct btf_type *ctx_type;
+       const char *tname, *ctx_tname;
+
        t = btf_type_by_id(btf, t->type);
        while (btf_type_is_modifier(t))
                t = btf_type_by_id(btf, t->type);
@@ -5646,17 +5671,15 @@ btf_get_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
                bpf_log(log, "arg#%d struct doesn't have a name\n", arg);
                return NULL;
        }
-       /* prog_type is valid bpf program type. No need for bounds check. */
-       ctx_type = btf_type_member(conv_struct) + bpf_ctx_convert_map[prog_type] * 2;
-       /* ctx_struct is a pointer to prog_ctx_type in vmlinux.
-        * Like 'struct __sk_buff'
-        */
-       ctx_struct = btf_type_by_id(btf_vmlinux, ctx_type->type);
-       if (!ctx_struct)
+
+       ctx_type = find_canonical_prog_ctx_type(prog_type);
+       if (!ctx_type) {
+               bpf_log(log, "btf_vmlinux is malformed\n");
                /* should not happen */
                return NULL;
+       }
 again:
-       ctx_tname = btf_name_by_offset(btf_vmlinux, ctx_struct->name_off);
+       ctx_tname = btf_name_by_offset(btf_vmlinux, ctx_type->name_off);
        if (!ctx_tname) {
                /* should not happen */
                bpf_log(log, "Please fix kernel include/linux/bpf_types.h\n");
@@ -5677,28 +5700,167 @@ again:
                /* bpf_user_pt_regs_t is a typedef, so resolve it to
                 * underlying struct and check name again
                 */
-               if (!btf_type_is_modifier(ctx_struct))
+               if (!btf_type_is_modifier(ctx_type))
                        return NULL;
-               while (btf_type_is_modifier(ctx_struct))
-                       ctx_struct = btf_type_by_id(btf_vmlinux, ctx_struct->type);
+               while (btf_type_is_modifier(ctx_type))
+                       ctx_type = btf_type_by_id(btf_vmlinux, ctx_type->type);
                goto again;
        }
        return ctx_type;
 }
 
+/* forward declarations for arch-specific underlying types of
+ * bpf_user_pt_regs_t; this avoids the need for arch-specific #ifdef
+ * compilation guards below for BPF_PROG_TYPE_PERF_EVENT checks, but still
+ * works correctly with __builtin_types_compatible_p() on respective
+ * architectures
+ */
+struct user_regs_struct;
+struct user_pt_regs;
+
+static int btf_validate_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
+                                     const struct btf_type *t, int arg,
+                                     enum bpf_prog_type prog_type,
+                                     enum bpf_attach_type attach_type)
+{
+       const struct btf_type *ctx_type;
+       const char *tname, *ctx_tname;
+
+       if (!btf_is_ptr(t)) {
+               bpf_log(log, "arg#%d type isn't a pointer\n", arg);
+               return -EINVAL;
+       }
+       t = btf_type_by_id(btf, t->type);
+
+       /* KPROBE and PERF_EVENT programs allow bpf_user_pt_regs_t typedef */
+       if (prog_type == BPF_PROG_TYPE_KPROBE || prog_type == BPF_PROG_TYPE_PERF_EVENT) {
+               while (btf_type_is_modifier(t) && !btf_type_is_typedef(t))
+                       t = btf_type_by_id(btf, t->type);
+
+               if (btf_type_is_typedef(t)) {
+                       tname = btf_name_by_offset(btf, t->name_off);
+                       if (tname && strcmp(tname, "bpf_user_pt_regs_t") == 0)
+                               return 0;
+               }
+       }
+
+       /* all other program types don't use typedefs for context type */
+       while (btf_type_is_modifier(t))
+               t = btf_type_by_id(btf, t->type);
+
+       /* `void *ctx __arg_ctx` is always valid */
+       if (btf_type_is_void(t))
+               return 0;
+
+       tname = btf_name_by_offset(btf, t->name_off);
+       if (str_is_empty(tname)) {
+               bpf_log(log, "arg#%d type doesn't have a name\n", arg);
+               return -EINVAL;
+       }
+
+       /* special cases */
+       switch (prog_type) {
+       case BPF_PROG_TYPE_KPROBE:
+               if (__btf_type_is_struct(t) && strcmp(tname, "pt_regs") == 0)
+                       return 0;
+               break;
+       case BPF_PROG_TYPE_PERF_EVENT:
+               if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct pt_regs) &&
+                   __btf_type_is_struct(t) && strcmp(tname, "pt_regs") == 0)
+                       return 0;
+               if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_pt_regs) &&
+                   __btf_type_is_struct(t) && strcmp(tname, "user_pt_regs") == 0)
+                       return 0;
+               if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_regs_struct) &&
+                   __btf_type_is_struct(t) && strcmp(tname, "user_regs_struct") == 0)
+                       return 0;
+               break;
+       case BPF_PROG_TYPE_RAW_TRACEPOINT:
+       case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
+               /* allow u64* as ctx */
+               if (btf_is_int(t) && t->size == 8)
+                       return 0;
+               break;
+       case BPF_PROG_TYPE_TRACING:
+               switch (attach_type) {
+               case BPF_TRACE_RAW_TP:
+                       /* tp_btf program is TRACING, so need special case here */
+                       if (__btf_type_is_struct(t) &&
+                           strcmp(tname, "bpf_raw_tracepoint_args") == 0)
+                               return 0;
+                       /* allow u64* as ctx */
+                       if (btf_is_int(t) && t->size == 8)
+                               return 0;
+                       break;
+               case BPF_TRACE_ITER:
+                       /* allow struct bpf_iter__xxx types only */
+                       if (__btf_type_is_struct(t) &&
+                           strncmp(tname, "bpf_iter__", sizeof("bpf_iter__") - 1) == 0)
+                               return 0;
+                       break;
+               case BPF_TRACE_FENTRY:
+               case BPF_TRACE_FEXIT:
+               case BPF_MODIFY_RETURN:
+                       /* allow u64* as ctx */
+                       if (btf_is_int(t) && t->size == 8)
+                               return 0;
+                       break;
+               default:
+                       break;
+               }
+               break;
+       case BPF_PROG_TYPE_LSM:
+       case BPF_PROG_TYPE_STRUCT_OPS:
+               /* allow u64* as ctx */
+               if (btf_is_int(t) && t->size == 8)
+                       return 0;
+               break;
+       case BPF_PROG_TYPE_TRACEPOINT:
+       case BPF_PROG_TYPE_SYSCALL:
+       case BPF_PROG_TYPE_EXT:
+               return 0; /* anything goes */
+       default:
+               break;
+       }
+
+       ctx_type = find_canonical_prog_ctx_type(prog_type);
+       if (!ctx_type) {
+               /* should not happen */
+               bpf_log(log, "btf_vmlinux is malformed\n");
+               return -EINVAL;
+       }
+
+       /* resolve typedefs and check that underlying structs are matching as well */
+       while (btf_type_is_modifier(ctx_type))
+               ctx_type = btf_type_by_id(btf_vmlinux, ctx_type->type);
+
+       /* if program type doesn't have distinctly named struct type for
+        * context, then __arg_ctx argument can only be `void *`, which we
+        * already checked above
+        */
+       if (!__btf_type_is_struct(ctx_type)) {
+               bpf_log(log, "arg#%d should be void pointer\n", arg);
+               return -EINVAL;
+       }
+
+       ctx_tname = btf_name_by_offset(btf_vmlinux, ctx_type->name_off);
+       if (!__btf_type_is_struct(t) || strcmp(ctx_tname, tname) != 0) {
+               bpf_log(log, "arg#%d should be `struct %s *`\n", arg, ctx_tname);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static int btf_translate_to_vmlinux(struct bpf_verifier_log *log,
                                     struct btf *btf,
                                     const struct btf_type *t,
                                     enum bpf_prog_type prog_type,
                                     int arg)
 {
-       const struct btf_member *prog_ctx_type, *kern_ctx_type;
-
-       prog_ctx_type = btf_get_prog_ctx_type(log, btf, t, prog_type, arg);
-       if (!prog_ctx_type)
+       if (!btf_get_prog_ctx_type(log, btf, t, prog_type, arg))
                return -ENOENT;
-       kern_ctx_type = prog_ctx_type + 1;
-       return kern_ctx_type->type;
+       return find_kern_ctx_type_id(prog_type);
 }
 
 int get_kern_ctx_btf_id(struct bpf_verifier_log *log, enum bpf_prog_type prog_type)
@@ -6934,6 +7096,23 @@ int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog)
                return -EINVAL;
        }
 
+       for (i = 0; i < nargs; i++) {
+               const char *tag;
+
+               if (sub->args[i].arg_type != ARG_PTR_TO_CTX)
+                       continue;
+
+               /* check if arg has "arg:ctx" tag */
+               t = btf_type_by_id(btf, args[i].type);
+               tag = btf_find_decl_tag_value(btf, fn_t, i, "arg:");
+               if (IS_ERR_OR_NULL(tag) || strcmp(tag, "ctx") != 0)
+                       continue;
+
+               if (btf_validate_prog_ctx_type(log, btf, t, i, prog_type,
+                                              prog->expected_attach_type))
+                       return -EINVAL;
+       }
+
        sub->arg_cnt = nargs;
        sub->args_cached = true;
 
index adbf330d364bba994488c0d835f08f8cffa67f63..65f598694d550359f2b926ef26ae30d0c80c6f69 100644 (file)
@@ -12826,6 +12826,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
        }
 
        switch (base_type(ptr_reg->type)) {
+       case PTR_TO_FLOW_KEYS:
+               if (known)
+                       break;
+               fallthrough;
        case CONST_PTR_TO_MAP:
                /* smin_val represents the known value */
                if (known && smin_val == 0 && opcode == BPF_ADD)
index 04d11a7dd95f9bcd47af010b6c1c5c42ebe63ff8..520a11cb12f44cca94d3b4edd6f4caa2f8f3d805 100644 (file)
@@ -802,7 +802,7 @@ void cgroup1_release_agent(struct work_struct *work)
                goto out_free;
 
        ret = cgroup_path_ns(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
-       if (ret < 0 || ret >= PATH_MAX)
+       if (ret < 0)
                goto out_free;
 
        argv[0] = agentbuf;
index 8f3cef1a4d8a745207483ce7f0e8bf5dc01d93de..a66c088c851cfb87036ccc93317f7357769cb268 100644 (file)
@@ -1906,7 +1906,7 @@ int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node,
        len = kernfs_path_from_node(kf_node, ns_cgroup->kn, buf, PATH_MAX);
        spin_unlock_irq(&css_set_lock);
 
-       if (len >= PATH_MAX)
+       if (len == -E2BIG)
                len = -ERANGE;
        else if (len > 0) {
                seq_escape(sf, buf, " \t\n\\");
@@ -4182,20 +4182,6 @@ static struct kernfs_ops cgroup_kf_ops = {
        .seq_show               = cgroup_seqfile_show,
 };
 
-/* set uid and gid of cgroup dirs and files to that of the creator */
-static int cgroup_kn_set_ugid(struct kernfs_node *kn)
-{
-       struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID,
-                              .ia_uid = current_fsuid(),
-                              .ia_gid = current_fsgid(), };
-
-       if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) &&
-           gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID))
-               return 0;
-
-       return kernfs_setattr(kn, &iattr);
-}
-
 static void cgroup_file_notify_timer(struct timer_list *timer)
 {
        cgroup_file_notify(container_of(timer, struct cgroup_file,
@@ -4208,25 +4194,18 @@ static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp,
        char name[CGROUP_FILE_NAME_MAX];
        struct kernfs_node *kn;
        struct lock_class_key *key = NULL;
-       int ret;
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
        key = &cft->lockdep_key;
 #endif
        kn = __kernfs_create_file(cgrp->kn, cgroup_file_name(cgrp, cft, name),
                                  cgroup_file_mode(cft),
-                                 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
+                                 current_fsuid(), current_fsgid(),
                                  0, cft->kf_ops, cft,
                                  NULL, key);
        if (IS_ERR(kn))
                return PTR_ERR(kn);
 
-       ret = cgroup_kn_set_ugid(kn);
-       if (ret) {
-               kernfs_remove(kn);
-               return ret;
-       }
-
        if (cft->file_offset) {
                struct cgroup_file *cfile = (void *)css + cft->file_offset;
 
@@ -5629,7 +5608,9 @@ static struct cgroup *cgroup_create(struct cgroup *parent, const char *name,
                goto out_cancel_ref;
 
        /* create the directory */
-       kn = kernfs_create_dir(parent->kn, name, mode, cgrp);
+       kn = kernfs_create_dir_ns(parent->kn, name, mode,
+                                 current_fsuid(), current_fsgid(),
+                                 cgrp, NULL);
        if (IS_ERR(kn)) {
                ret = PTR_ERR(kn);
                goto out_stat_exit;
@@ -5774,10 +5755,6 @@ int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, umode_t mode)
         */
        kernfs_get(cgrp->kn);
 
-       ret = cgroup_kn_set_ugid(cgrp->kn);
-       if (ret)
-               goto out_destroy;
-
        ret = css_populate_dir(&cgrp->self);
        if (ret)
                goto out_destroy;
@@ -6316,7 +6293,7 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
                if (cgroup_on_dfl(cgrp) || !(tsk->flags & PF_EXITING)) {
                        retval = cgroup_path_ns_locked(cgrp, buf, PATH_MAX,
                                                current->nsproxy->cgroup_ns);
-                       if (retval >= PATH_MAX)
+                       if (retval == -E2BIG)
                                retval = -ENAMETOOLONG;
                        if (retval < 0)
                                goto out_unlock;
index dfbb16aca9f4152df0ae7ab6ac764e355f022500..ba36c073304a3eee081b770b12dacb0e5b1a60cd 100644 (file)
@@ -5092,7 +5092,7 @@ int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
        retval = cgroup_path_ns(css->cgroup, buf, PATH_MAX,
                                current->nsproxy->cgroup_ns);
        css_put(css);
-       if (retval >= PATH_MAX)
+       if (retval == -E2BIG)
                retval = -ENAMETOOLONG;
        if (retval < 0)
                goto out_free;
index d483156677526133395bd9949abb83bbb4e94b1c..75cd6a736d030656d468e344f4035c25f6c8630e 100644 (file)
@@ -376,7 +376,6 @@ static int __init reserve_crashkernel_low(unsigned long long low_size)
 
        crashk_low_res.start = low_base;
        crashk_low_res.end   = low_base + low_size - 1;
-       insert_resource(&iomem_resource, &crashk_low_res);
 #endif
        return 0;
 }
@@ -458,8 +457,19 @@ retry:
 
        crashk_res.start = crash_base;
        crashk_res.end = crash_base + crash_size - 1;
-       insert_resource(&iomem_resource, &crashk_res);
 }
+
+static __init int insert_crashkernel_resources(void)
+{
+       if (crashk_res.start < crashk_res.end)
+               insert_resource(&iomem_resource, &crashk_res);
+
+       if (crashk_low_res.start < crashk_low_res.end)
+               insert_resource(&iomem_resource, &crashk_low_res);
+
+       return 0;
+}
+early_initcall(insert_crashkernel_resources);
 #endif
 
 int crash_prepare_elf64_headers(struct crash_mem *mem, int need_kernel_map,
@@ -867,7 +877,7 @@ subsys_initcall(crash_notes_memory_init);
  * regions are online. So mutex lock  __crash_hotplug_lock is used to
  * serialize the crash hotplug handling specifically.
  */
-DEFINE_MUTEX(__crash_hotplug_lock);
+static DEFINE_MUTEX(__crash_hotplug_lock);
 #define crash_hotplug_lock() mutex_lock(&__crash_hotplug_lock)
 #define crash_hotplug_unlock() mutex_unlock(&__crash_hotplug_lock)
 
index 6b213c8252d62df7a30dd2b0f861e6bb1895be36..d05066cb40b2ee504d780b5f9ea5d16e17bcbbd1 100644 (file)
@@ -1348,8 +1348,6 @@ do_full_getstr:
                /* PROMPT can only be set if we have MEM_READ permission. */
                snprintf(kdb_prompt_str, CMD_BUFLEN, kdbgetenv("PROMPT"),
                         raw_smp_processor_id());
-               if (defcmd_in_progress)
-                       strncat(kdb_prompt_str, "[defcmd]", CMD_BUFLEN);
 
                /*
                 * Fetch command from keyboard
index 3de494375b7b3cbe172ce00464ef9982bf1fa748..a6e3792b15f8a5ce0f870901de50b53a356df829 100644 (file)
@@ -62,7 +62,8 @@ enum map_err_types {
  * @pfn: page frame of the start address
  * @offset: offset of mapping relative to pfn
  * @map_err_type: track whether dma_mapping_error() was checked
- * @stacktrace: support backtraces when a violation is detected
+ * @stack_len: number of backtrace entries in @stack_entries
+ * @stack_entries: stack of backtrace history
  */
 struct dma_debug_entry {
        struct list_head list;
@@ -876,7 +877,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
        return 0;
 }
 
-void dma_debug_add_bus(struct bus_type *bus)
+void dma_debug_add_bus(const struct bus_type *bus)
 {
        struct notifier_block *nb;
 
index 97c298b210bc71d75eac5ce0dee78ae39f44a746..b079a9a8e08795b40fb0cc3891a68bcb90922876 100644 (file)
@@ -1136,6 +1136,9 @@ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
        int cpu, i;
        int index;
 
+       if (alloc_size > IO_TLB_SEGSIZE * IO_TLB_SIZE)
+               return -1;
+
        cpu = raw_smp_processor_id();
        for (i = 0; i < default_nareas; ++i) {
                index = swiotlb_search_area(dev, cpu, i, orig_addr, alloc_size,
index 485bb0389b488d28a4efb23901b514d93b3834f6..929e98c629652a0fef1b71e6c002cca41936c4b4 100644 (file)
@@ -537,7 +537,7 @@ retry:
                }
        }
 
-       ret = __replace_page(vma, vaddr, old_page, new_page);
+       ret = __replace_page(vma, vaddr & PAGE_MASK, old_page, new_page);
        if (new_page)
                put_page(new_page);
 put_old:
index 3988a02efaef06444654a415ce298d378ab925ec..dfb963d2f862ada6a2f06259c6df4923272cb218 100644 (file)
@@ -1127,17 +1127,14 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
                 * and nobody can change them.
                 *
                 * psig->stats_lock also protects us from our sub-threads
-                * which can reap other children at the same time. Until
-                * we change k_getrusage()-like users to rely on this lock
-                * we have to take ->siglock as well.
+                * which can reap other children at the same time.
                 *
                 * We use thread_group_cputime_adjusted() to get times for
                 * the thread group, which consolidates times for all threads
                 * in the group including the group leader.
                 */
                thread_group_cputime_adjusted(p, &tgutime, &tgstime);
-               spin_lock_irq(&current->sighand->siglock);
-               write_seqlock(&psig->stats_lock);
+               write_seqlock_irq(&psig->stats_lock);
                psig->cutime += tgutime + sig->cutime;
                psig->cstime += tgstime + sig->cstime;
                psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime;
@@ -1160,8 +1157,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
                        psig->cmaxrss = maxrss;
                task_io_accounting_add(&psig->ioac, &p->ioac);
                task_io_accounting_add(&psig->ioac, &sig->ioac);
-               write_sequnlock(&psig->stats_lock);
-               spin_unlock_irq(&current->sighand->siglock);
+               write_sequnlock_irq(&psig->stats_lock);
        }
 
        if (wo->wo_rusage)
index c981fa6171c1aebee1d4e69bc493e218b5d1bcac..0d944e92a43ffa13bdbcce6c6a28c44bab29ca19 100644 (file)
@@ -1173,7 +1173,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
        tsk->use_memdelay = 0;
 #endif
 
-#ifdef CONFIG_IOMMU_SVA
+#ifdef CONFIG_ARCH_HAS_CPU_PASID
        tsk->pasid_activated = 0;
 #endif
 
@@ -1748,6 +1748,7 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
        if (clone_flags & CLONE_FS) {
                /* tsk->fs is already what we want */
                spin_lock(&fs->lock);
+               /* "users" and "in_exec" locked for check_unsafe_exec() */
                if (fs->in_exec) {
                        spin_unlock(&fs->lock);
                        return -EAGAIN;
index e0e853412c158e1277ea4c63a40576093b0bc673..1e78ef24321e82dbfaf0c07941a0c41ad3438aaa 100644 (file)
@@ -627,12 +627,21 @@ retry:
 }
 
 /*
- * PI futexes can not be requeued and must remove themselves from the
- * hash bucket. The hash bucket lock (i.e. lock_ptr) is held.
+ * PI futexes can not be requeued and must remove themselves from the hash
+ * bucket. The hash bucket lock (i.e. lock_ptr) is held.
  */
 void futex_unqueue_pi(struct futex_q *q)
 {
-       __futex_unqueue(q);
+       /*
+        * If the lock was not acquired (due to timeout or signal) then the
+        * rt_waiter is removed before futex_q is. If this is observed by
+        * an unlocker after dropping the rtmutex wait lock and before
+        * acquiring the hash bucket lock, then the unlocker dequeues the
+        * futex_q from the hash bucket list to guarantee consistent state
+        * vs. userspace. Therefore the dequeue here must be conditional.
+        */
+       if (!plist_node_empty(&q->list))
+               __futex_unqueue(q);
 
        BUG_ON(!q->pi_state);
        put_pi_state(q->pi_state);
index 90e5197f4e5696dbd5a79fd0033ed95e4bd32fac..5722467f273794ec314870fc76d0ba04a8617f7e 100644 (file)
@@ -1135,6 +1135,7 @@ retry:
 
        hb = futex_hash(&key);
        spin_lock(&hb->lock);
+retry_hb:
 
        /*
         * Check waiters first. We do not trust user space values at
@@ -1177,12 +1178,17 @@ retry:
                /*
                 * Futex vs rt_mutex waiter state -- if there are no rt_mutex
                 * waiters even though futex thinks there are, then the waiter
-                * is leaving and the uncontended path is safe to take.
+                * is leaving. The entry needs to be removed from the list so a
+                * new futex_lock_pi() is not using this stale PI-state while
+                * the futex is available in user space again.
+                * There can be more than one task on its way out so it needs
+                * to retry.
                 */
                rt_waiter = rt_mutex_top_waiter(&pi_state->pi_mutex);
                if (!rt_waiter) {
+                       __futex_unqueue(top_waiter);
                        raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
-                       goto do_uncontended;
+                       goto retry_hb;
                }
 
                get_pi_state(pi_state);
@@ -1217,7 +1223,6 @@ retry:
                return ret;
        }
 
-do_uncontended:
        /*
         * We have no kernel internal state, i.e. no waiters in the
         * kernel. Waiters which are about to queue themselves are stuck
index 27ca1c866f298bf9d8876bce68e418881702aabf..371eb1711d3467baf596c477411c1d3ac554cedd 100644 (file)
@@ -600,7 +600,7 @@ int __init early_irq_init(void)
                mutex_init(&desc[i].request_mutex);
                init_waitqueue_head(&desc[i].wait_for_threads);
                desc_set_defaults(i, &desc[i], node, NULL, NULL);
-               irq_resend_init(desc);
+               irq_resend_init(&desc[i]);
        }
        return arch_early_irq_init();
 }
index a08031b57a61f64526b09a4ea7d28c7d6092e104..d08fc7b5db97905b3728b54aa37aa1492b60b1ee 100644 (file)
@@ -1257,6 +1257,7 @@ int kernel_kexec(void)
                kexec_in_progress = true;
                kernel_restart_prepare("kexec reboot");
                migrate_to_reboot_cpu();
+               syscore_shutdown();
 
                /*
                 * migrate_to_reboot_cpu() disables CPU hotplug assuming that
index d5a0ee40bf66c5318df14c5a49294850434e13d3..9d9095e817928658d2c6d54d5da6f4826ff7c6be 100644 (file)
@@ -1993,7 +1993,7 @@ NOKPROBE_SYMBOL(__kretprobe_find_ret_addr);
 unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp,
                                      struct llist_node **cur)
 {
-       struct kretprobe_instance *ri = NULL;
+       struct kretprobe_instance *ri;
        kprobe_opcode_t *ret;
 
        if (WARN_ON_ONCE(!cur))
@@ -2802,7 +2802,7 @@ static int show_kprobe_addr(struct seq_file *pi, void *v)
 {
        struct hlist_head *head;
        struct kprobe *p, *kp;
-       const char *sym = NULL;
+       const char *sym;
        unsigned int i = *(loff_t *) v;
        unsigned long offset = 0;
        char *modname, namebuf[KSYM_NAME_LEN];
index 69d3cd2cfc3baf1062b6a785eeca2a834fca0171..415d81e6ce7072faf1edeecb0171bc486f6ca5bf 100644 (file)
@@ -124,7 +124,7 @@ struct call_rcu_chain {
        struct rcu_head crc_rh;
        bool crc_stop;
 };
-struct call_rcu_chain *call_rcu_chain;
+struct call_rcu_chain *call_rcu_chain_list;
 
 /* Forward reference. */
 static void lock_torture_cleanup(void);
@@ -1074,12 +1074,12 @@ static int call_rcu_chain_init(void)
 
        if (call_rcu_chains <= 0)
                return 0;
-       call_rcu_chain = kcalloc(call_rcu_chains, sizeof(*call_rcu_chain), GFP_KERNEL);
-       if (!call_rcu_chain)
+       call_rcu_chain_list = kcalloc(call_rcu_chains, sizeof(*call_rcu_chain_list), GFP_KERNEL);
+       if (!call_rcu_chain_list)
                return -ENOMEM;
        for (i = 0; i < call_rcu_chains; i++) {
-               call_rcu_chain[i].crc_stop = false;
-               call_rcu(&call_rcu_chain[i].crc_rh, call_rcu_chain_cb);
+               call_rcu_chain_list[i].crc_stop = false;
+               call_rcu(&call_rcu_chain_list[i].crc_rh, call_rcu_chain_cb);
        }
        return 0;
 }
@@ -1089,13 +1089,13 @@ static void call_rcu_chain_cleanup(void)
 {
        int i;
 
-       if (!call_rcu_chain)
+       if (!call_rcu_chain_list)
                return;
        for (i = 0; i < call_rcu_chains; i++)
-               smp_store_release(&call_rcu_chain[i].crc_stop, true);
+               smp_store_release(&call_rcu_chain_list[i].crc_stop, true);
        rcu_barrier();
-       kfree(call_rcu_chain);
-       call_rcu_chain = NULL;
+       kfree(call_rcu_chain_list);
+       call_rcu_chain_list = NULL;
 }
 
 static void lock_torture_cleanup(void)
index 2984de629f7494d6dde1e936197eca987520c69f..9b0b52e1836fa42b60539edcd33f0c70358b74f2 100644 (file)
@@ -105,6 +105,31 @@ config RCU_CPU_STALL_CPUTIME
          The boot option rcupdate.rcu_cpu_stall_cputime has the same function
          as this one, but will override this if it exists.
 
+config RCU_CPU_STALL_NOTIFIER
+       bool "Provide RCU CPU-stall notifiers"
+       depends on RCU_STALL_COMMON
+       depends on DEBUG_KERNEL
+       depends on RCU_EXPERT
+       default n
+       help
+         WARNING:  You almost certainly do not want this!!!
+
+         Enable RCU CPU-stall notifiers, which are invoked just before
+         printing the RCU CPU stall warning.  As such, bugs in notifier
+         callbacks can prevent stall warnings from being printed.
+         And the whole reason that a stall warning is being printed is
+         that something is hung up somewhere.  Therefore, the notifier
+         callbacks must be written extremely carefully, preferably
+         containing only lockless code.  After all, it is quite possible
+         that the whole reason that the RCU CPU stall is happening in
+         the first place is that someone forgot to release whatever lock
+         that you are thinking of acquiring.  In which case, having your
+         notifier callback acquire that lock will hang, preventing the
+         RCU CPU stall warning from appearing.
+
+         Say Y here if you want RCU CPU stall notifiers (you don't want them)
+         Say N if you are unsure.
+
 config RCU_TRACE
        bool "Enable tracing for RCU"
        depends on DEBUG_KERNEL
index b531c33e9545b7957599fc67a7d9af3193f1279f..f94f65877f2b68055b4e5c7a057c22bf4fb96a18 100644 (file)
@@ -262,6 +262,8 @@ static inline bool rcu_stall_is_suppressed_at_boot(void)
        return rcu_cpu_stall_suppress_at_boot && !rcu_inkernel_boot_has_ended();
 }
 
+extern int rcu_cpu_stall_notifiers;
+
 #ifdef CONFIG_RCU_STALL_COMMON
 
 extern int rcu_cpu_stall_ftrace_dump;
@@ -659,10 +661,10 @@ static inline bool rcu_cpu_beenfullyonline(int cpu) { return true; }
 bool rcu_cpu_beenfullyonline(int cpu);
 #endif
 
-#ifdef CONFIG_RCU_STALL_COMMON
+#if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER)
 int rcu_stall_notifier_call_chain(unsigned long val, void *v);
-#else // #ifdef CONFIG_RCU_STALL_COMMON
+#else // #if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER)
 static inline int rcu_stall_notifier_call_chain(unsigned long val, void *v) { return NOTIFY_DONE; }
-#endif // #else // #ifdef CONFIG_RCU_STALL_COMMON
+#endif // #else // #if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER)
 
 #endif /* __LINUX_RCU_H */
index 30fc9d34e3297f86c822b1db5face539af2ee83d..7567ca8e743ca62f92fe2dda179d1bce56aaedef 100644 (file)
@@ -2450,10 +2450,12 @@ static int rcu_torture_stall(void *args)
        unsigned long stop_at;
 
        VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
-       ret = rcu_stall_chain_notifier_register(&rcu_torture_stall_block);
-       if (ret)
-               pr_info("%s: rcu_stall_chain_notifier_register() returned %d, %sexpected.\n",
-                       __func__, ret, !IS_ENABLED(CONFIG_RCU_STALL_COMMON) ? "un" : "");
+       if (rcu_cpu_stall_notifiers) {
+               ret = rcu_stall_chain_notifier_register(&rcu_torture_stall_block);
+               if (ret)
+                       pr_info("%s: rcu_stall_chain_notifier_register() returned %d, %sexpected.\n",
+                               __func__, ret, !IS_ENABLED(CONFIG_RCU_STALL_COMMON) ? "un" : "");
+       }
        if (stall_cpu_holdoff > 0) {
                VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
                schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
@@ -2497,7 +2499,7 @@ static int rcu_torture_stall(void *args)
                cur_ops->readunlock(idx);
        }
        pr_alert("%s end.\n", __func__);
-       if (!ret) {
+       if (rcu_cpu_stall_notifiers && !ret) {
                ret = rcu_stall_chain_notifier_unregister(&rcu_torture_stall_block);
                if (ret)
                        pr_info("%s: rcu_stall_chain_notifier_unregister() returned %d.\n", __func__, ret);
@@ -3872,7 +3874,9 @@ rcu_torture_init(void)
        }
        if (fqs_duration < 0)
                fqs_duration = 0;
-       if (fqs_duration) {
+       if (fqs_holdoff < 0)
+               fqs_holdoff = 0;
+       if (fqs_duration && fqs_holdoff) {
                /* Create the fqs thread */
                firsterr = torture_create_kthread(rcu_torture_fqs, NULL,
                                                  fqs_task);
index 560e99ec53335c9b4b0c7688cebff33a1fac808c..0351a4e83529e322f8e96cecb6244f728478fe7b 100644 (file)
@@ -772,20 +772,10 @@ EXPORT_SYMBOL_GPL(__srcu_read_unlock_nmisafe);
  */
 static void srcu_gp_start(struct srcu_struct *ssp)
 {
-       struct srcu_data *sdp;
        int state;
 
-       if (smp_load_acquire(&ssp->srcu_sup->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
-               sdp = per_cpu_ptr(ssp->sda, get_boot_cpu_id());
-       else
-               sdp = this_cpu_ptr(ssp->sda);
        lockdep_assert_held(&ACCESS_PRIVATE(ssp->srcu_sup, lock));
        WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_sup->srcu_gp_seq, ssp->srcu_sup->srcu_gp_seq_needed));
-       spin_lock_rcu_node(sdp);  /* Interrupts already disabled. */
-       rcu_segcblist_advance(&sdp->srcu_cblist,
-                             rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
-       WARN_ON_ONCE(!rcu_segcblist_segempty(&sdp->srcu_cblist, RCU_NEXT_TAIL));
-       spin_unlock_rcu_node(sdp);  /* Interrupts remain disabled. */
        WRITE_ONCE(ssp->srcu_sup->srcu_gp_start, jiffies);
        WRITE_ONCE(ssp->srcu_sup->srcu_n_exp_nodelay, 0);
        smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
@@ -1271,9 +1261,11 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
         *     period (gp_num = X + 8). So acceleration fails.
         */
        s = rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq);
-       rcu_segcblist_advance(&sdp->srcu_cblist,
-                             rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
-       WARN_ON_ONCE(!rcu_segcblist_accelerate(&sdp->srcu_cblist, s) && rhp);
+       if (rhp) {
+               rcu_segcblist_advance(&sdp->srcu_cblist,
+                                     rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
+               WARN_ON_ONCE(!rcu_segcblist_accelerate(&sdp->srcu_cblist, s));
+       }
        if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
                sdp->srcu_gp_seq_needed = s;
                needgp = true;
@@ -1723,6 +1715,11 @@ static void srcu_invoke_callbacks(struct work_struct *work)
        WARN_ON_ONCE(!rcu_segcblist_segempty(&sdp->srcu_cblist, RCU_NEXT_TAIL));
        rcu_segcblist_advance(&sdp->srcu_cblist,
                              rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
+       /*
+        * Although this function is theoretically re-entrant, concurrent
+        * callbacks invocation is disallowed to avoid executing an SRCU barrier
+        * too early.
+        */
        if (sdp->srcu_cblist_invoking ||
            !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
                spin_unlock_irq_rcu_node(sdp);
@@ -1753,6 +1750,7 @@ static void srcu_invoke_callbacks(struct work_struct *work)
        sdp->srcu_cblist_invoking = false;
        more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
        spin_unlock_irq_rcu_node(sdp);
+       /* An SRCU barrier or callbacks from previous nesting work pending */
        if (more)
                srcu_schedule_cbs_sdp(sdp, 0);
 }
index f54d5782eca0baf60cbb29af901de5019db63f3e..732ad5b39946a519bb0ba7a0de35537d4d19f38b 100644 (file)
@@ -975,7 +975,7 @@ static void check_holdout_task(struct task_struct *t,
            t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
            !rcu_tasks_is_holdout(t) ||
            (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
-            !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
+            !is_idle_task(t) && READ_ONCE(t->rcu_tasks_idle_cpu) >= 0)) {
                WRITE_ONCE(t->rcu_tasks_holdout, false);
                list_del_init(&t->rcu_tasks_holdout_list);
                put_task_struct(t);
@@ -993,7 +993,7 @@ static void check_holdout_task(struct task_struct *t,
                 t, ".I"[is_idle_task(t)],
                 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
                 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
-                t->rcu_tasks_idle_cpu, cpu);
+                data_race(t->rcu_tasks_idle_cpu), cpu);
        sched_show_task(t);
 }
 
index 3ac3c846105fb4c059a001ae3cf52a3c7747aac5..b2bccfd37c383d04692fb6a7a72eb71a1f62798b 100644 (file)
@@ -1013,6 +1013,38 @@ static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
        return needmore;
 }
 
+static void swake_up_one_online_ipi(void *arg)
+{
+       struct swait_queue_head *wqh = arg;
+
+       swake_up_one(wqh);
+}
+
+static void swake_up_one_online(struct swait_queue_head *wqh)
+{
+       int cpu = get_cpu();
+
+       /*
+        * If called from rcutree_report_cpu_starting(), wake up
+        * is dangerous that late in the CPU-down hotplug process. The
+        * scheduler might queue an ignored hrtimer. Defer the wake up
+        * to an online CPU instead.
+        */
+       if (unlikely(cpu_is_offline(cpu))) {
+               int target;
+
+               target = cpumask_any_and(housekeeping_cpumask(HK_TYPE_RCU),
+                                        cpu_online_mask);
+
+               smp_call_function_single(target, swake_up_one_online_ipi,
+                                        wqh, 0);
+               put_cpu();
+       } else {
+               put_cpu();
+               swake_up_one(wqh);
+       }
+}
+
 /*
  * Awaken the grace-period kthread.  Don't do a self-awaken (unless in an
  * interrupt or softirq handler, in which case we just might immediately
@@ -1037,7 +1069,7 @@ static void rcu_gp_kthread_wake(void)
                return;
        WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
        WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq));
-       swake_up_one(&rcu_state.gp_wq);
+       swake_up_one_online(&rcu_state.gp_wq);
 }
 
 /*
@@ -2338,6 +2370,8 @@ void rcu_force_quiescent_state(void)
        struct rcu_node *rnp;
        struct rcu_node *rnp_old = NULL;
 
+       if (!rcu_gp_in_progress())
+               return;
        /* Funnel through hierarchy to reduce memory contention. */
        rnp = raw_cpu_read(rcu_data.mynode);
        for (; rnp != NULL; rnp = rnp->parent) {
index 6d7cea5d591f95d823b63972da899dded9e369d1..2ac440bc7e10bc8e1248eae47a661eb017768cee 100644 (file)
@@ -173,7 +173,6 @@ static bool sync_rcu_exp_done_unlocked(struct rcu_node *rnp)
        return ret;
 }
 
-
 /*
  * Report the exit from RCU read-side critical section for the last task
  * that queued itself during or before the current expedited preemptible-RCU
@@ -201,7 +200,7 @@ static void __rcu_report_exp_rnp(struct rcu_node *rnp,
                        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                        if (wake) {
                                smp_mb(); /* EGP done before wake_up(). */
-                               swake_up_one(&rcu_state.expedited_wq);
+                               swake_up_one_online(&rcu_state.expedited_wq);
                        }
                        break;
                }
index ac8e86babe449a20b1e0bd03a7d87bfb5b7ff579..5d666428546b03ac9e865fbc6e1ae9b1e057100f 100644 (file)
@@ -1061,6 +1061,7 @@ static int __init rcu_sysrq_init(void)
 }
 early_initcall(rcu_sysrq_init);
 
+#ifdef CONFIG_RCU_CPU_STALL_NOTIFIER
 
 //////////////////////////////////////////////////////////////////////////////
 //
@@ -1081,7 +1082,13 @@ static ATOMIC_NOTIFIER_HEAD(rcu_cpu_stall_notifier_list);
  */
 int rcu_stall_chain_notifier_register(struct notifier_block *n)
 {
-       return atomic_notifier_chain_register(&rcu_cpu_stall_notifier_list, n);
+       int rcsn = rcu_cpu_stall_notifiers;
+
+       WARN(1, "Adding %pS() to RCU stall notifier list (%s).\n", n->notifier_call,
+            rcsn ? "possibly suppressing RCU CPU stall warnings" : "failed, so all is well");
+       if (rcsn)
+               return atomic_notifier_chain_register(&rcu_cpu_stall_notifier_list, n);
+       return -EEXIST;
 }
 EXPORT_SYMBOL_GPL(rcu_stall_chain_notifier_register);
 
@@ -1115,3 +1122,5 @@ int rcu_stall_notifier_call_chain(unsigned long val, void *v)
 {
        return atomic_notifier_call_chain(&rcu_cpu_stall_notifier_list, val, v);
 }
+
+#endif // #ifdef CONFIG_RCU_CPU_STALL_NOTIFIER
index c534d6806d3d5726322d5da634224cd6897293cc..46aaaa9fe33907b0eddb70e2b1c5d6a2419c13c8 100644 (file)
@@ -538,9 +538,15 @@ long torture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
 EXPORT_SYMBOL_GPL(torture_sched_setaffinity);
 #endif
 
+int rcu_cpu_stall_notifiers __read_mostly; // !0 = provide stall notifiers (rarely useful)
+EXPORT_SYMBOL_GPL(rcu_cpu_stall_notifiers);
+
 #ifdef CONFIG_RCU_STALL_COMMON
 int rcu_cpu_stall_ftrace_dump __read_mostly;
 module_param(rcu_cpu_stall_ftrace_dump, int, 0644);
+#ifdef CONFIG_RCU_CPU_STALL_NOTIFIER
+module_param(rcu_cpu_stall_notifiers, int, 0444);
+#endif // #ifdef CONFIG_RCU_CPU_STALL_NOTIFIER
 int rcu_cpu_stall_suppress __read_mostly; // !0 = suppress stall warnings.
 EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress);
 module_param(rcu_cpu_stall_suppress, int, 0644);
index 95c3c097083e505f41931168cef46f84cf4fac52..eece6244f9d2fea301f5523ef1d7d6779f0d4625 100644 (file)
@@ -133,7 +133,11 @@ unsigned long get_capacity_ref_freq(struct cpufreq_policy *policy)
        if (arch_scale_freq_invariant())
                return policy->cpuinfo.max_freq;
 
-       return policy->cur;
+       /*
+        * Apply a 25% margin so that we select a higher frequency than
+        * the current one before the CPU is fully busy:
+        */
+       return policy->cur + (policy->cur >> 2);
 }
 
 /**
index e219fcfa112d863eeef58381d04fd4bab16a1e32..f8e543f1e38a06dc3a4aa2f777c7e88d444e5565 100644 (file)
@@ -1785,21 +1785,24 @@ void getrusage(struct task_struct *p, int who, struct rusage *r)
        struct task_struct *t;
        unsigned long flags;
        u64 tgutime, tgstime, utime, stime;
-       unsigned long maxrss = 0;
+       unsigned long maxrss;
+       struct mm_struct *mm;
        struct signal_struct *sig = p->signal;
+       unsigned int seq = 0;
 
-       memset((char *)r, 0, sizeof (*r));
+retry:
+       memset(r, 0, sizeof(*r));
        utime = stime = 0;
+       maxrss = 0;
 
        if (who == RUSAGE_THREAD) {
                task_cputime_adjusted(current, &utime, &stime);
                accumulate_thread_rusage(p, r);
                maxrss = sig->maxrss;
-               goto out;
+               goto out_thread;
        }
 
-       if (!lock_task_sighand(p, &flags))
-               return;
+       flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq);
 
        switch (who) {
        case RUSAGE_BOTH:
@@ -1819,9 +1822,6 @@ void getrusage(struct task_struct *p, int who, struct rusage *r)
                fallthrough;
 
        case RUSAGE_SELF:
-               thread_group_cputime_adjusted(p, &tgutime, &tgstime);
-               utime += tgutime;
-               stime += tgstime;
                r->ru_nvcsw += sig->nvcsw;
                r->ru_nivcsw += sig->nivcsw;
                r->ru_minflt += sig->min_flt;
@@ -1830,28 +1830,42 @@ void getrusage(struct task_struct *p, int who, struct rusage *r)
                r->ru_oublock += sig->oublock;
                if (maxrss < sig->maxrss)
                        maxrss = sig->maxrss;
+
+               rcu_read_lock();
                __for_each_thread(sig, t)
                        accumulate_thread_rusage(t, r);
+               rcu_read_unlock();
+
                break;
 
        default:
                BUG();
        }
-       unlock_task_sighand(p, &flags);
 
-out:
-       r->ru_utime = ns_to_kernel_old_timeval(utime);
-       r->ru_stime = ns_to_kernel_old_timeval(stime);
+       if (need_seqretry(&sig->stats_lock, seq)) {
+               seq = 1;
+               goto retry;
+       }
+       done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
 
-       if (who != RUSAGE_CHILDREN) {
-               struct mm_struct *mm = get_task_mm(p);
+       if (who == RUSAGE_CHILDREN)
+               goto out_children;
 
-               if (mm) {
-                       setmax_mm_hiwater_rss(&maxrss, mm);
-                       mmput(mm);
-               }
+       thread_group_cputime_adjusted(p, &tgutime, &tgstime);
+       utime += tgutime;
+       stime += tgstime;
+
+out_thread:
+       mm = get_task_mm(p);
+       if (mm) {
+               setmax_mm_hiwater_rss(&maxrss, mm);
+               mmput(mm);
        }
+
+out_children:
        r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */
+       r->ru_utime = ns_to_kernel_old_timeval(utime);
+       r->ru_stime = ns_to_kernel_old_timeval(stime);
 }
 
 SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
index c108ed8a9804ada919575c97b42dd663e33c1a16..3052b1f1168e29c4432ba3b068488af11029018d 100644 (file)
@@ -99,6 +99,7 @@ static u64 suspend_start;
  * Interval: 0.5sec.
  */
 #define WATCHDOG_INTERVAL (HZ >> 1)
+#define WATCHDOG_INTERVAL_MAX_NS ((2 * WATCHDOG_INTERVAL) * (NSEC_PER_SEC / HZ))
 
 /*
  * Threshold: 0.0312s, when doubled: 0.0625s.
@@ -134,6 +135,7 @@ static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
 static DEFINE_SPINLOCK(watchdog_lock);
 static int watchdog_running;
 static atomic_t watchdog_reset_pending;
+static int64_t watchdog_max_interval;
 
 static inline void clocksource_watchdog_lock(unsigned long *flags)
 {
@@ -399,8 +401,8 @@ static inline void clocksource_reset_watchdog(void)
 static void clocksource_watchdog(struct timer_list *unused)
 {
        u64 csnow, wdnow, cslast, wdlast, delta;
+       int64_t wd_nsec, cs_nsec, interval;
        int next_cpu, reset_pending;
-       int64_t wd_nsec, cs_nsec;
        struct clocksource *cs;
        enum wd_read_status read_ret;
        unsigned long extra_wait = 0;
@@ -470,6 +472,27 @@ static void clocksource_watchdog(struct timer_list *unused)
                if (atomic_read(&watchdog_reset_pending))
                        continue;
 
+               /*
+                * The processing of timer softirqs can get delayed (usually
+                * on account of ksoftirqd not getting to run in a timely
+                * manner), which causes the watchdog interval to stretch.
+                * Skew detection may fail for longer watchdog intervals
+                * on account of fixed margins being used.
+                * Some clocksources, e.g. acpi_pm, cannot tolerate
+                * watchdog intervals longer than a few seconds.
+                */
+               interval = max(cs_nsec, wd_nsec);
+               if (unlikely(interval > WATCHDOG_INTERVAL_MAX_NS)) {
+                       if (system_state > SYSTEM_SCHEDULING &&
+                           interval > 2 * watchdog_max_interval) {
+                               watchdog_max_interval = interval;
+                               pr_warn("Long readout interval, skipping watchdog check: cs_nsec: %lld wd_nsec: %lld\n",
+                                       cs_nsec, wd_nsec);
+                       }
+                       watchdog_timer.expires = jiffies;
+                       continue;
+               }
+
                /* Check the deviation from the watchdog clocksource. */
                md = cs->uncertainty_margin + watchdog->uncertainty_margin;
                if (abs(cs_nsec - wd_nsec) > md) {
index 760793998cdd703a387c64a792a7b7f7dab552d5..edb0f821dceaa1720ac94fc53f4002a1e5f7bdd3 100644 (file)
@@ -1085,6 +1085,7 @@ static int enqueue_hrtimer(struct hrtimer *timer,
                           enum hrtimer_mode mode)
 {
        debug_activate(timer, mode);
+       WARN_ON_ONCE(!base->cpu_base->online);
 
        base->cpu_base->active_bases |= 1 << base->index;
 
@@ -2183,6 +2184,7 @@ int hrtimers_prepare_cpu(unsigned int cpu)
        cpu_base->softirq_next_timer = NULL;
        cpu_base->expires_next = KTIME_MAX;
        cpu_base->softirq_expires_next = KTIME_MAX;
+       cpu_base->online = 1;
        hrtimer_cpu_base_init_expiry_lock(cpu_base);
        return 0;
 }
@@ -2250,6 +2252,7 @@ int hrtimers_cpu_dying(unsigned int dying_cpu)
        smp_call_function_single(ncpu, retrigger_next_event, NULL, 0);
 
        raw_spin_unlock(&new_base->lock);
+       old_base->online = 0;
        raw_spin_unlock(&old_base->lock);
 
        return 0;
index a17d26002831008ae657d2c063f9ff01e2ad709c..01fb50c1b17e4f1b33285ae2ce2690f0747f8ee8 100644 (file)
@@ -1576,13 +1576,23 @@ void tick_setup_sched_timer(void)
 void tick_cancel_sched_timer(int cpu)
 {
        struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
+       ktime_t idle_sleeptime, iowait_sleeptime;
+       unsigned long idle_calls, idle_sleeps;
 
 # ifdef CONFIG_HIGH_RES_TIMERS
        if (ts->sched_timer.base)
                hrtimer_cancel(&ts->sched_timer);
 # endif
 
+       idle_sleeptime = ts->idle_sleeptime;
+       iowait_sleeptime = ts->iowait_sleeptime;
+       idle_calls = ts->idle_calls;
+       idle_sleeps = ts->idle_sleeps;
        memset(ts, 0, sizeof(*ts));
+       ts->idle_sleeptime = idle_sleeptime;
+       ts->iowait_sleeptime = iowait_sleeptime;
+       ts->idle_calls = idle_calls;
+       ts->idle_sleeps = idle_sleeps;
 }
 #endif
 
index b01ae7d36021819e6d929ce2ab1e0a5a61464309..c060d5b479102dc1a4ddfb4ae283f626fc192ada 100644 (file)
@@ -5325,7 +5325,17 @@ static LIST_HEAD(ftrace_direct_funcs);
 
 static int register_ftrace_function_nolock(struct ftrace_ops *ops);
 
+/*
+ * If there are multiple ftrace_ops, use SAVE_REGS by default, so that direct
+ * call will be jumped from ftrace_regs_caller. Only if the architecture does
+ * not support ftrace_regs_caller but direct_call, use SAVE_ARGS so that it
+ * jumps from ftrace_caller for multiple ftrace_ops.
+ */
+#ifndef HAVE_DYNAMIC_FTRACE_WITH_REGS
 #define MULTI_FLAGS (FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_ARGS)
+#else
+#define MULTI_FLAGS (FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS)
+#endif
 
 static int check_direct_multi(struct ftrace_ops *ops)
 {
index 9286f88fcd32ac329bdd9b113d2bcb995de7f39b..fd4bfe3ecf014f6b3c83f9a7fa043b7df44dac32 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/cpu.h>
 #include <linux/oom.h>
 
+#include <asm/local64.h>
 #include <asm/local.h>
 
 /*
@@ -317,6 +318,11 @@ struct buffer_data_page {
        unsigned char    data[] RB_ALIGN_DATA;  /* data of buffer page */
 };
 
+struct buffer_data_read_page {
+       unsigned                order;  /* order of the page */
+       struct buffer_data_page *data;  /* actual data, stored in this page */
+};
+
 /*
  * Note, the buffer_page list must be first. The buffer pages
  * are allocated in cache lines, which means that each buffer
@@ -331,6 +337,7 @@ struct buffer_page {
        unsigned         read;          /* index for next read */
        local_t          entries;       /* entries on this page */
        unsigned long    real_end;      /* real end of data */
+       unsigned         order;         /* order of the page */
        struct buffer_data_page *page;  /* Actual data page */
 };
 
@@ -361,7 +368,7 @@ static __always_inline unsigned int rb_page_commit(struct buffer_page *bpage)
 
 static void free_buffer_page(struct buffer_page *bpage)
 {
-       free_page((unsigned long)bpage->page);
+       free_pages((unsigned long)bpage->page, bpage->order);
        kfree(bpage);
 }
 
@@ -373,41 +380,6 @@ static inline bool test_time_stamp(u64 delta)
        return !!(delta & TS_DELTA_TEST);
 }
 
-#define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
-
-/* Max payload is BUF_PAGE_SIZE - header (8bytes) */
-#define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
-
-int ring_buffer_print_page_header(struct trace_seq *s)
-{
-       struct buffer_data_page field;
-
-       trace_seq_printf(s, "\tfield: u64 timestamp;\t"
-                        "offset:0;\tsize:%u;\tsigned:%u;\n",
-                        (unsigned int)sizeof(field.time_stamp),
-                        (unsigned int)is_signed_type(u64));
-
-       trace_seq_printf(s, "\tfield: local_t commit;\t"
-                        "offset:%u;\tsize:%u;\tsigned:%u;\n",
-                        (unsigned int)offsetof(typeof(field), commit),
-                        (unsigned int)sizeof(field.commit),
-                        (unsigned int)is_signed_type(long));
-
-       trace_seq_printf(s, "\tfield: int overwrite;\t"
-                        "offset:%u;\tsize:%u;\tsigned:%u;\n",
-                        (unsigned int)offsetof(typeof(field), commit),
-                        1,
-                        (unsigned int)is_signed_type(long));
-
-       trace_seq_printf(s, "\tfield: char data;\t"
-                        "offset:%u;\tsize:%u;\tsigned:%u;\n",
-                        (unsigned int)offsetof(typeof(field), data),
-                        (unsigned int)BUF_PAGE_SIZE,
-                        (unsigned int)is_signed_type(char));
-
-       return !trace_seq_has_overflowed(s);
-}
-
 struct rb_irq_work {
        struct irq_work                 work;
        wait_queue_head_t               waiters;
@@ -463,27 +435,9 @@ enum {
        RB_CTX_MAX
 };
 
-#if BITS_PER_LONG == 32
-#define RB_TIME_32
-#endif
-
-/* To test on 64 bit machines */
-//#define RB_TIME_32
-
-#ifdef RB_TIME_32
-
-struct rb_time_struct {
-       local_t         cnt;
-       local_t         top;
-       local_t         bottom;
-       local_t         msb;
-};
-#else
-#include <asm/local64.h>
 struct rb_time_struct {
        local64_t       time;
 };
-#endif
 typedef struct rb_time_struct rb_time_t;
 
 #define MAX_NEST       5
@@ -557,6 +511,10 @@ struct trace_buffer {
 
        struct rb_irq_work              irq_work;
        bool                            time_stamp_abs;
+
+       unsigned int                    subbuf_size;
+       unsigned int                    subbuf_order;
+       unsigned int                    max_data_size;
 };
 
 struct ring_buffer_iter {
@@ -570,150 +528,48 @@ struct ring_buffer_iter {
        u64                             read_stamp;
        u64                             page_stamp;
        struct ring_buffer_event        *event;
+       size_t                          event_size;
        int                             missed_events;
 };
 
-#ifdef RB_TIME_32
-
-/*
- * On 32 bit machines, local64_t is very expensive. As the ring
- * buffer doesn't need all the features of a true 64 bit atomic,
- * on 32 bit, it uses these functions (64 still uses local64_t).
- *
- * For the ring buffer, 64 bit required operations for the time is
- * the following:
- *
- *  - Reads may fail if it interrupted a modification of the time stamp.
- *      It will succeed if it did not interrupt another write even if
- *      the read itself is interrupted by a write.
- *      It returns whether it was successful or not.
- *
- *  - Writes always succeed and will overwrite other writes and writes
- *      that were done by events interrupting the current write.
- *
- *  - A write followed by a read of the same time stamp will always succeed,
- *      but may not contain the same value.
- *
- *  - A cmpxchg will fail if it interrupted another write or cmpxchg.
- *      Other than that, it acts like a normal cmpxchg.
- *
- * The 60 bit time stamp is broken up by 30 bits in a top and bottom half
- *  (bottom being the least significant 30 bits of the 60 bit time stamp).
- *
- * The two most significant bits of each half holds a 2 bit counter (0-3).
- * Each update will increment this counter by one.
- * When reading the top and bottom, if the two counter bits match then the
- *  top and bottom together make a valid 60 bit number.
- */
-#define RB_TIME_SHIFT  30
-#define RB_TIME_VAL_MASK ((1 << RB_TIME_SHIFT) - 1)
-#define RB_TIME_MSB_SHIFT       60
-
-static inline int rb_time_cnt(unsigned long val)
+int ring_buffer_print_page_header(struct trace_buffer *buffer, struct trace_seq *s)
 {
-       return (val >> RB_TIME_SHIFT) & 3;
-}
-
-static inline u64 rb_time_val(unsigned long top, unsigned long bottom)
-{
-       u64 val;
-
-       val = top & RB_TIME_VAL_MASK;
-       val <<= RB_TIME_SHIFT;
-       val |= bottom & RB_TIME_VAL_MASK;
-
-       return val;
-}
-
-static inline bool __rb_time_read(rb_time_t *t, u64 *ret, unsigned long *cnt)
-{
-       unsigned long top, bottom, msb;
-       unsigned long c;
-
-       /*
-        * If the read is interrupted by a write, then the cnt will
-        * be different. Loop until both top and bottom have been read
-        * without interruption.
-        */
-       do {
-               c = local_read(&t->cnt);
-               top = local_read(&t->top);
-               bottom = local_read(&t->bottom);
-               msb = local_read(&t->msb);
-       } while (c != local_read(&t->cnt));
-
-       *cnt = rb_time_cnt(top);
-
-       /* If top, msb or bottom counts don't match, this interrupted a write */
-       if (*cnt != rb_time_cnt(msb) || *cnt != rb_time_cnt(bottom))
-               return false;
-
-       /* The shift to msb will lose its cnt bits */
-       *ret = rb_time_val(top, bottom) | ((u64)msb << RB_TIME_MSB_SHIFT);
-       return true;
-}
-
-static bool rb_time_read(rb_time_t *t, u64 *ret)
-{
-       unsigned long cnt;
-
-       return __rb_time_read(t, ret, &cnt);
-}
-
-static inline unsigned long rb_time_val_cnt(unsigned long val, unsigned long cnt)
-{
-       return (val & RB_TIME_VAL_MASK) | ((cnt & 3) << RB_TIME_SHIFT);
-}
-
-static inline void rb_time_split(u64 val, unsigned long *top, unsigned long *bottom,
-                                unsigned long *msb)
-{
-       *top = (unsigned long)((val >> RB_TIME_SHIFT) & RB_TIME_VAL_MASK);
-       *bottom = (unsigned long)(val & RB_TIME_VAL_MASK);
-       *msb = (unsigned long)(val >> RB_TIME_MSB_SHIFT);
-}
+       struct buffer_data_page field;
 
-static inline void rb_time_val_set(local_t *t, unsigned long val, unsigned long cnt)
-{
-       val = rb_time_val_cnt(val, cnt);
-       local_set(t, val);
-}
+       trace_seq_printf(s, "\tfield: u64 timestamp;\t"
+                        "offset:0;\tsize:%u;\tsigned:%u;\n",
+                        (unsigned int)sizeof(field.time_stamp),
+                        (unsigned int)is_signed_type(u64));
 
-static void rb_time_set(rb_time_t *t, u64 val)
-{
-       unsigned long cnt, top, bottom, msb;
+       trace_seq_printf(s, "\tfield: local_t commit;\t"
+                        "offset:%u;\tsize:%u;\tsigned:%u;\n",
+                        (unsigned int)offsetof(typeof(field), commit),
+                        (unsigned int)sizeof(field.commit),
+                        (unsigned int)is_signed_type(long));
 
-       rb_time_split(val, &top, &bottom, &msb);
+       trace_seq_printf(s, "\tfield: int overwrite;\t"
+                        "offset:%u;\tsize:%u;\tsigned:%u;\n",
+                        (unsigned int)offsetof(typeof(field), commit),
+                        1,
+                        (unsigned int)is_signed_type(long));
 
-       /* Writes always succeed with a valid number even if it gets interrupted. */
-       do {
-               cnt = local_inc_return(&t->cnt);
-               rb_time_val_set(&t->top, top, cnt);
-               rb_time_val_set(&t->bottom, bottom, cnt);
-               rb_time_val_set(&t->msb, val >> RB_TIME_MSB_SHIFT, cnt);
-       } while (cnt != local_read(&t->cnt));
-}
+       trace_seq_printf(s, "\tfield: char data;\t"
+                        "offset:%u;\tsize:%u;\tsigned:%u;\n",
+                        (unsigned int)offsetof(typeof(field), data),
+                        (unsigned int)buffer->subbuf_size,
+                        (unsigned int)is_signed_type(char));
 
-static inline bool
-rb_time_read_cmpxchg(local_t *l, unsigned long expect, unsigned long set)
-{
-       return local_try_cmpxchg(l, &expect, set);
+       return !trace_seq_has_overflowed(s);
 }
 
-#else /* 64 bits */
-
-/* local64_t always succeeds */
-
-static inline bool rb_time_read(rb_time_t *t, u64 *ret)
+static inline void rb_time_read(rb_time_t *t, u64 *ret)
 {
        *ret = local64_read(&t->time);
-       return true;
 }
 static void rb_time_set(rb_time_t *t, u64 val)
 {
        local64_set(&t->time, val);
 }
-#endif
 
 /*
  * Enable this to make sure that the event passed to
@@ -820,10 +676,7 @@ u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer,
        WARN_ONCE(1, "nest (%d) greater than max", nest);
 
  fail:
-       /* Can only fail on 32 bit */
-       if (!rb_time_read(&cpu_buffer->write_stamp, &ts))
-               /* Screw it, just read the current time */
-               ts = rb_time_stamp(cpu_buffer->buffer);
+       rb_time_read(&cpu_buffer->write_stamp, &ts);
 
        return ts;
 }
@@ -1091,7 +944,7 @@ __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
                full = 0;
        } else {
                if (!cpumask_test_cpu(cpu, buffer->cpumask))
-                       return -EINVAL;
+                       return EPOLLERR;
 
                cpu_buffer = buffer->buffers[cpu];
                work = &cpu_buffer->irq_work;
@@ -1619,10 +1472,12 @@ static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
 
                list_add(&bpage->list, pages);
 
-               page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), mflags, 0);
+               page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), mflags,
+                                       cpu_buffer->buffer->subbuf_order);
                if (!page)
                        goto free_pages;
                bpage->page = page_address(page);
+               bpage->order = cpu_buffer->buffer->subbuf_order;
                rb_init_page(bpage->page);
 
                if (user_thread && fatal_signal_pending(current))
@@ -1701,7 +1556,8 @@ rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu)
        rb_check_bpage(cpu_buffer, bpage);
 
        cpu_buffer->reader_page = bpage;
-       page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
+
+       page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, cpu_buffer->buffer->subbuf_order);
        if (!page)
                goto fail_free_reader;
        bpage->page = page_address(page);
@@ -1784,7 +1640,14 @@ struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
        if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
                goto fail_free_buffer;
 
-       nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
+       /* Default buffer page size - one system page */
+       buffer->subbuf_order = 0;
+       buffer->subbuf_size = PAGE_SIZE - BUF_PAGE_HDR_SIZE;
+
+       /* Max payload is buffer page size - header (8bytes) */
+       buffer->max_data_size = buffer->subbuf_size - (sizeof(u32) * 2);
+
+       nr_pages = DIV_ROUND_UP(size, buffer->subbuf_size);
        buffer->flags = flags;
        buffer->clock = trace_clock_local;
        buffer->reader_lock_key = key;
@@ -2103,7 +1966,7 @@ static void update_pages_handler(struct work_struct *work)
  * @size: the new size.
  * @cpu_id: the cpu buffer to resize
  *
- * Minimum size is 2 * BUF_PAGE_SIZE.
+ * Minimum size is 2 * buffer->subbuf_size.
  *
  * Returns 0 on success and < 0 on failure.
  */
@@ -2125,7 +1988,7 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
            !cpumask_test_cpu(cpu_id, buffer->cpumask))
                return 0;
 
-       nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
+       nr_pages = DIV_ROUND_UP(size, buffer->subbuf_size);
 
        /* we need a minimum of two pages */
        if (nr_pages < 2)
@@ -2372,7 +2235,7 @@ rb_iter_head_event(struct ring_buffer_iter *iter)
         */
        barrier();
 
-       if ((iter->head + length) > commit || length > BUF_PAGE_SIZE)
+       if ((iter->head + length) > commit || length > iter->event_size)
                /* Writer corrupted the read? */
                goto reset;
 
@@ -2412,11 +2275,13 @@ rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
 }
 
 static __always_inline unsigned
-rb_event_index(struct ring_buffer_event *event)
+rb_event_index(struct ring_buffer_per_cpu *cpu_buffer, struct ring_buffer_event *event)
 {
        unsigned long addr = (unsigned long)event;
 
-       return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
+       addr &= (PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1;
+
+       return addr - BUF_PAGE_HDR_SIZE;
 }
 
 static void rb_inc_iter(struct ring_buffer_iter *iter)
@@ -2605,6 +2470,7 @@ static inline void
 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
              unsigned long tail, struct rb_event_info *info)
 {
+       unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size);
        struct buffer_page *tail_page = info->tail_page;
        struct ring_buffer_event *event;
        unsigned long length = info->length;
@@ -2613,13 +2479,13 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
         * Only the event that crossed the page boundary
         * must fill the old tail_page with padding.
         */
-       if (tail >= BUF_PAGE_SIZE) {
+       if (tail >= bsize) {
                /*
                 * If the page was filled, then we still need
                 * to update the real_end. Reset it to zero
                 * and the reader will ignore it.
                 */
-               if (tail == BUF_PAGE_SIZE)
+               if (tail == bsize)
                        tail_page->real_end = 0;
 
                local_sub(length, &tail_page->write);
@@ -2647,7 +2513,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
         * If we are less than the minimum size, we don't need to
         * worry about it.
         */
-       if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
+       if (tail > (bsize - RB_EVNT_MIN_SIZE)) {
                /* No room for any events */
 
                /* Mark the rest of the page with padding */
@@ -2662,19 +2528,19 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
        }
 
        /* Put in a discarded event */
-       event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
+       event->array[0] = (bsize - tail) - RB_EVNT_HDR_SIZE;
        event->type_len = RINGBUF_TYPE_PADDING;
        /* time delta must be non zero */
        event->time_delta = 1;
 
        /* account for padding bytes */
-       local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
+       local_add(bsize - tail, &cpu_buffer->entries_bytes);
 
        /* Make sure the padding is visible before the tail_page->write update */
        smp_wmb();
 
        /* Set write to end of buffer */
-       length = (tail + length) - BUF_PAGE_SIZE;
+       length = (tail + length) - bsize;
        local_sub(length, &tail_page->write);
 }
 
@@ -2788,7 +2654,8 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
 
 /* Slow path */
 static struct ring_buffer_event *
-rb_add_time_stamp(struct ring_buffer_event *event, u64 delta, bool abs)
+rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
+                 struct ring_buffer_event *event, u64 delta, bool abs)
 {
        if (abs)
                event->type_len = RINGBUF_TYPE_TIME_STAMP;
@@ -2796,7 +2663,7 @@ rb_add_time_stamp(struct ring_buffer_event *event, u64 delta, bool abs)
                event->type_len = RINGBUF_TYPE_TIME_EXTEND;
 
        /* Not the first event on the page, or not delta? */
-       if (abs || rb_event_index(event)) {
+       if (abs || rb_event_index(cpu_buffer, event)) {
                event->time_delta = delta & TS_MASK;
                event->array[0] = delta >> TS_SHIFT;
        } else {
@@ -2826,7 +2693,7 @@ rb_check_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
                  (unsigned long long)info->ts,
                  (unsigned long long)info->before,
                  (unsigned long long)info->after,
-                 (unsigned long long)(rb_time_read(&cpu_buffer->write_stamp, &write_stamp) ? write_stamp : 0),
+                 (unsigned long long)({rb_time_read(&cpu_buffer->write_stamp, &write_stamp); write_stamp;}),
                  sched_clock_stable() ? "" :
                  "If you just came from a suspend/resume,\n"
                  "please switch to the trace global clock:\n"
@@ -2870,7 +2737,7 @@ static void rb_add_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
                if (!abs)
                        info->delta = 0;
        }
-       *event = rb_add_time_stamp(*event, info->delta, abs);
+       *event = rb_add_time_stamp(cpu_buffer, *event, info->delta, abs);
        *length -= RB_LEN_TIME_EXTEND;
        *delta = 0;
 }
@@ -2954,10 +2821,10 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
        struct buffer_page *bpage;
        unsigned long addr;
 
-       new_index = rb_event_index(event);
+       new_index = rb_event_index(cpu_buffer, event);
        old_index = new_index + rb_event_ts_length(event);
        addr = (unsigned long)event;
-       addr &= PAGE_MASK;
+       addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1);
 
        bpage = READ_ONCE(cpu_buffer->tail_page);
 
@@ -3344,6 +3211,76 @@ EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
 #define CHECK_FULL_PAGE                1L
 
 #ifdef CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS
+
+static const char *show_irq_str(int bits)
+{
+       const char *type[] = {
+               ".",    // 0
+               "s",    // 1
+               "h",    // 2
+               "Hs",   // 3
+               "n",    // 4
+               "Ns",   // 5
+               "Nh",   // 6
+               "NHs",  // 7
+       };
+
+       return type[bits];
+}
+
+/* Assume this is an trace event */
+static const char *show_flags(struct ring_buffer_event *event)
+{
+       struct trace_entry *entry;
+       int bits = 0;
+
+       if (rb_event_data_length(event) - RB_EVNT_HDR_SIZE < sizeof(*entry))
+               return "X";
+
+       entry = ring_buffer_event_data(event);
+
+       if (entry->flags & TRACE_FLAG_SOFTIRQ)
+               bits |= 1;
+
+       if (entry->flags & TRACE_FLAG_HARDIRQ)
+               bits |= 2;
+
+       if (entry->flags & TRACE_FLAG_NMI)
+               bits |= 4;
+
+       return show_irq_str(bits);
+}
+
+static const char *show_irq(struct ring_buffer_event *event)
+{
+       struct trace_entry *entry;
+
+       if (rb_event_data_length(event) - RB_EVNT_HDR_SIZE < sizeof(*entry))
+               return "";
+
+       entry = ring_buffer_event_data(event);
+       if (entry->flags & TRACE_FLAG_IRQS_OFF)
+               return "d";
+       return "";
+}
+
+static const char *show_interrupt_level(void)
+{
+       unsigned long pc = preempt_count();
+       unsigned char level = 0;
+
+       if (pc & SOFTIRQ_OFFSET)
+               level |= 1;
+
+       if (pc & HARDIRQ_MASK)
+               level |= 2;
+
+       if (pc & NMI_MASK)
+               level |= 4;
+
+       return show_irq_str(level);
+}
+
 static void dump_buffer_page(struct buffer_data_page *bpage,
                             struct rb_event_info *info,
                             unsigned long tail)
@@ -3364,34 +3301,57 @@ static void dump_buffer_page(struct buffer_data_page *bpage,
                case RINGBUF_TYPE_TIME_EXTEND:
                        delta = rb_event_time_stamp(event);
                        ts += delta;
-                       pr_warn("  [%lld] delta:%lld TIME EXTEND\n", ts, delta);
+                       pr_warn(" 0x%x: [%lld] delta:%lld TIME EXTEND\n",
+                               e, ts, delta);
                        break;
 
                case RINGBUF_TYPE_TIME_STAMP:
                        delta = rb_event_time_stamp(event);
                        ts = rb_fix_abs_ts(delta, ts);
-                       pr_warn("  [%lld] absolute:%lld TIME STAMP\n", ts, delta);
+                       pr_warn(" 0x%x:  [%lld] absolute:%lld TIME STAMP\n",
+                               e, ts, delta);
                        break;
 
                case RINGBUF_TYPE_PADDING:
                        ts += event->time_delta;
-                       pr_warn("  [%lld] delta:%d PADDING\n", ts, event->time_delta);
+                       pr_warn(" 0x%x:  [%lld] delta:%d PADDING\n",
+                               e, ts, event->time_delta);
                        break;
 
                case RINGBUF_TYPE_DATA:
                        ts += event->time_delta;
-                       pr_warn("  [%lld] delta:%d\n", ts, event->time_delta);
+                       pr_warn(" 0x%x:  [%lld] delta:%d %s%s\n",
+                               e, ts, event->time_delta,
+                               show_flags(event), show_irq(event));
                        break;
 
                default:
                        break;
                }
        }
+       pr_warn("expected end:0x%lx last event actually ended at:0x%x\n", tail, e);
 }
 
 static DEFINE_PER_CPU(atomic_t, checking);
 static atomic_t ts_dump;
 
+#define buffer_warn_return(fmt, ...)                                   \
+       do {                                                            \
+               /* If another report is happening, ignore this one */   \
+               if (atomic_inc_return(&ts_dump) != 1) {                 \
+                       atomic_dec(&ts_dump);                           \
+                       goto out;                                       \
+               }                                                       \
+               atomic_inc(&cpu_buffer->record_disabled);               \
+               pr_warn(fmt, ##__VA_ARGS__);                            \
+               dump_buffer_page(bpage, info, tail);                    \
+               atomic_dec(&ts_dump);                                   \
+               /* There's some cases in boot up that this can happen */ \
+               if (WARN_ON_ONCE(system_state != SYSTEM_BOOTING))       \
+                       /* Do not re-enable checking */                 \
+                       return;                                         \
+       } while (0)
+
 /*
  * Check if the current event time stamp matches the deltas on
  * the buffer page.
@@ -3445,7 +3405,12 @@ static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
 
                case RINGBUF_TYPE_TIME_STAMP:
                        delta = rb_event_time_stamp(event);
-                       ts = rb_fix_abs_ts(delta, ts);
+                       delta = rb_fix_abs_ts(delta, ts);
+                       if (delta < ts) {
+                               buffer_warn_return("[CPU: %d]ABSOLUTE TIME WENT BACKWARDS: last ts: %lld absolute ts: %lld\n",
+                                                  cpu_buffer->cpu, ts, delta);
+                       }
+                       ts = delta;
                        break;
 
                case RINGBUF_TYPE_PADDING:
@@ -3462,23 +3427,11 @@ static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
        }
        if ((full && ts > info->ts) ||
            (!full && ts + info->delta != info->ts)) {
-               /* If another report is happening, ignore this one */
-               if (atomic_inc_return(&ts_dump) != 1) {
-                       atomic_dec(&ts_dump);
-                       goto out;
-               }
-               atomic_inc(&cpu_buffer->record_disabled);
-               /* There's some cases in boot up that this can happen */
-               WARN_ON_ONCE(system_state != SYSTEM_BOOTING);
-               pr_warn("[CPU: %d]TIME DOES NOT MATCH expected:%lld actual:%lld delta:%lld before:%lld after:%lld%s\n",
-                       cpu_buffer->cpu,
-                       ts + info->delta, info->ts, info->delta,
-                       info->before, info->after,
-                       full ? " (full)" : "");
-               dump_buffer_page(bpage, info, tail);
-               atomic_dec(&ts_dump);
-               /* Do not re-enable checking */
-               return;
+               buffer_warn_return("[CPU: %d]TIME DOES NOT MATCH expected:%lld actual:%lld delta:%lld before:%lld after:%lld%s context:%s\n",
+                                  cpu_buffer->cpu,
+                                  ts + info->delta, info->ts, info->delta,
+                                  info->before, info->after,
+                                  full ? " (full)" : "", show_interrupt_level());
        }
 out:
        atomic_dec(this_cpu_ptr(&checking));
@@ -3498,16 +3451,14 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
        struct ring_buffer_event *event;
        struct buffer_page *tail_page;
        unsigned long tail, write, w;
-       bool a_ok;
-       bool b_ok;
 
        /* Don't let the compiler play games with cpu_buffer->tail_page */
        tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page);
 
  /*A*/ w = local_read(&tail_page->write) & RB_WRITE_MASK;
        barrier();
-       b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before);
-       a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
+       rb_time_read(&cpu_buffer->before_stamp, &info->before);
+       rb_time_read(&cpu_buffer->write_stamp, &info->after);
        barrier();
        info->ts = rb_time_stamp(cpu_buffer->buffer);
 
@@ -3522,7 +3473,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
                if (!w) {
                        /* Use the sub-buffer timestamp */
                        info->delta = 0;
-               } else if (unlikely(!a_ok || !b_ok || info->before != info->after)) {
+               } else if (unlikely(info->before != info->after)) {
                        info->add_timestamp |= RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND;
                        info->length += RB_LEN_TIME_EXTEND;
                } else {
@@ -3544,7 +3495,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
        tail = write - info->length;
 
        /* See if we shot pass the end of this buffer page */
-       if (unlikely(write > BUF_PAGE_SIZE)) {
+       if (unlikely(write > cpu_buffer->buffer->subbuf_size)) {
                check_buffer(cpu_buffer, info, CHECK_FULL_PAGE);
                return rb_move_tail(cpu_buffer, tail, info);
        }
@@ -3571,8 +3522,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
                /* SLOW PATH - Interrupted between A and C */
 
                /* Save the old before_stamp */
-               a_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before);
-               RB_WARN_ON(cpu_buffer, !a_ok);
+               rb_time_read(&cpu_buffer->before_stamp, &info->before);
 
                /*
                 * Read a new timestamp and update the before_stamp to make
@@ -3584,9 +3534,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
                rb_time_set(&cpu_buffer->before_stamp, ts);
 
                barrier();
- /*E*/         a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
-               /* Was interrupted before here, write_stamp must be valid */
-               RB_WARN_ON(cpu_buffer, !a_ok);
+ /*E*/         rb_time_read(&cpu_buffer->write_stamp, &info->after);
                barrier();
  /*F*/         if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) &&
                    info->after == info->before && info->after < ts) {
@@ -3678,7 +3626,7 @@ rb_reserve_next_event(struct trace_buffer *buffer,
        if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) {
                add_ts_default = RB_ADD_STAMP_ABSOLUTE;
                info.length += RB_LEN_TIME_EXTEND;
-               if (info.length > BUF_MAX_DATA_SIZE)
+               if (info.length > cpu_buffer->buffer->max_data_size)
                        goto out_fail;
        } else {
                add_ts_default = RB_ADD_STAMP_NONE;
@@ -3753,7 +3701,7 @@ ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length)
        if (unlikely(atomic_read(&cpu_buffer->record_disabled)))
                goto out;
 
-       if (unlikely(length > BUF_MAX_DATA_SIZE))
+       if (unlikely(length > buffer->max_data_size))
                goto out;
 
        if (unlikely(trace_recursive_lock(cpu_buffer)))
@@ -3787,7 +3735,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
        struct buffer_page *bpage = cpu_buffer->commit_page;
        struct buffer_page *start;
 
-       addr &= PAGE_MASK;
+       addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1);
 
        /* Do the likely case first */
        if (likely(bpage->page == (void *)addr)) {
@@ -3903,7 +3851,7 @@ int ring_buffer_write(struct trace_buffer *buffer,
        if (atomic_read(&cpu_buffer->record_disabled))
                goto out;
 
-       if (length > BUF_MAX_DATA_SIZE)
+       if (length > buffer->max_data_size)
                goto out;
 
        if (unlikely(trace_recursive_lock(cpu_buffer)))
@@ -4483,6 +4431,7 @@ static struct buffer_page *
 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
 {
        struct buffer_page *reader = NULL;
+       unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size);
        unsigned long overwrite;
        unsigned long flags;
        int nr_loops = 0;
@@ -4618,7 +4567,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
 #define USECS_WAIT     1000000
         for (nr_loops = 0; nr_loops < USECS_WAIT; nr_loops++) {
                /* If the write is past the end of page, a writer is still updating it */
-               if (likely(!reader || rb_page_write(reader) <= BUF_PAGE_SIZE))
+               if (likely(!reader || rb_page_write(reader) <= bsize))
                        break;
 
                udelay(1);
@@ -5062,7 +5011,8 @@ ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags)
                return NULL;
 
        /* Holds the entire event: data and meta data */
-       iter->event = kmalloc(BUF_PAGE_SIZE, flags);
+       iter->event_size = buffer->subbuf_size;
+       iter->event = kmalloc(iter->event_size, flags);
        if (!iter->event) {
                kfree(iter);
                return NULL;
@@ -5178,19 +5128,28 @@ EXPORT_SYMBOL_GPL(ring_buffer_iter_advance);
  */
 unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu)
 {
-       /*
-        * Earlier, this method returned
-        *      BUF_PAGE_SIZE * buffer->nr_pages
-        * Since the nr_pages field is now removed, we have converted this to
-        * return the per cpu buffer value.
-        */
        if (!cpumask_test_cpu(cpu, buffer->cpumask))
                return 0;
 
-       return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages;
+       return buffer->subbuf_size * buffer->buffers[cpu]->nr_pages;
 }
 EXPORT_SYMBOL_GPL(ring_buffer_size);
 
+/**
+ * ring_buffer_max_event_size - return the max data size of an event
+ * @buffer: The ring buffer.
+ *
+ * Returns the maximum size an event can be.
+ */
+unsigned long ring_buffer_max_event_size(struct trace_buffer *buffer)
+{
+       /* If abs timestamp is requested, events have a timestamp too */
+       if (ring_buffer_time_stamp_abs(buffer))
+               return buffer->max_data_size - RB_LEN_TIME_EXTEND;
+       return buffer->max_data_size;
+}
+EXPORT_SYMBOL_GPL(ring_buffer_max_event_size);
+
 static void rb_clear_buffer_page(struct buffer_page *page)
 {
        local_set(&page->write, 0);
@@ -5461,6 +5420,9 @@ int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
        if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
                goto out;
 
+       if (buffer_a->subbuf_order != buffer_b->subbuf_order)
+               goto out;
+
        ret = -EAGAIN;
 
        if (atomic_read(&buffer_a->record_disabled))
@@ -5532,40 +5494,48 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
  * Returns:
  *  The page allocated, or ERR_PTR
  */
-void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu)
+struct buffer_data_read_page *
+ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
-       struct buffer_data_page *bpage = NULL;
+       struct buffer_data_read_page *bpage = NULL;
        unsigned long flags;
        struct page *page;
 
        if (!cpumask_test_cpu(cpu, buffer->cpumask))
                return ERR_PTR(-ENODEV);
 
+       bpage = kzalloc(sizeof(*bpage), GFP_KERNEL);
+       if (!bpage)
+               return ERR_PTR(-ENOMEM);
+
+       bpage->order = buffer->subbuf_order;
        cpu_buffer = buffer->buffers[cpu];
        local_irq_save(flags);
        arch_spin_lock(&cpu_buffer->lock);
 
        if (cpu_buffer->free_page) {
-               bpage = cpu_buffer->free_page;
+               bpage->data = cpu_buffer->free_page;
                cpu_buffer->free_page = NULL;
        }
 
        arch_spin_unlock(&cpu_buffer->lock);
        local_irq_restore(flags);
 
-       if (bpage)
+       if (bpage->data)
                goto out;
 
-       page = alloc_pages_node(cpu_to_node(cpu),
-                               GFP_KERNEL | __GFP_NORETRY, 0);
-       if (!page)
+       page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL | __GFP_NORETRY,
+                               cpu_buffer->buffer->subbuf_order);
+       if (!page) {
+               kfree(bpage);
                return ERR_PTR(-ENOMEM);
+       }
 
-       bpage = page_address(page);
+       bpage->data = page_address(page);
 
  out:
-       rb_init_page(bpage);
+       rb_init_page(bpage->data);
 
        return bpage;
 }
@@ -5575,14 +5545,15 @@ EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
  * ring_buffer_free_read_page - free an allocated read page
  * @buffer: the buffer the page was allocate for
  * @cpu: the cpu buffer the page came from
- * @data: the page to free
+ * @data_page: the page to free
  *
  * Free a page allocated from ring_buffer_alloc_read_page.
  */
-void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data)
+void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu,
+                               struct buffer_data_read_page *data_page)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
-       struct buffer_data_page *bpage = data;
+       struct buffer_data_page *bpage = data_page->data;
        struct page *page = virt_to_page(bpage);
        unsigned long flags;
 
@@ -5591,8 +5562,12 @@ void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data
 
        cpu_buffer = buffer->buffers[cpu];
 
-       /* If the page is still in use someplace else, we can't reuse it */
-       if (page_ref_count(page) > 1)
+       /*
+        * If the page is still in use someplace else, or order of the page
+        * is different from the subbuffer order of the buffer -
+        * we can't reuse it
+        */
+       if (page_ref_count(page) > 1 || data_page->order != buffer->subbuf_order)
                goto out;
 
        local_irq_save(flags);
@@ -5607,7 +5582,8 @@ void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data
        local_irq_restore(flags);
 
  out:
-       free_page((unsigned long)bpage);
+       free_pages((unsigned long)bpage, data_page->order);
+       kfree(data_page);
 }
 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
 
@@ -5628,9 +5604,10 @@ EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
  *     rpage = ring_buffer_alloc_read_page(buffer, cpu);
  *     if (IS_ERR(rpage))
  *             return PTR_ERR(rpage);
- *     ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
+ *     ret = ring_buffer_read_page(buffer, rpage, len, cpu, 0);
  *     if (ret >= 0)
- *             process_page(rpage, ret);
+ *             process_page(ring_buffer_read_page_data(rpage), ret);
+ *     ring_buffer_free_read_page(buffer, cpu, rpage);
  *
  * When @full is set, the function will not return true unless
  * the writer is off the reader page.
@@ -5645,7 +5622,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
  *  <0 if no data has been transferred.
  */
 int ring_buffer_read_page(struct trace_buffer *buffer,
-                         void **data_page, size_t len, int cpu, int full)
+                         struct buffer_data_read_page *data_page,
+                         size_t len, int cpu, int full)
 {
        struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
        struct ring_buffer_event *event;
@@ -5670,10 +5648,12 @@ int ring_buffer_read_page(struct trace_buffer *buffer,
 
        len -= BUF_PAGE_HDR_SIZE;
 
-       if (!data_page)
+       if (!data_page || !data_page->data)
+               goto out;
+       if (data_page->order != buffer->subbuf_order)
                goto out;
 
-       bpage = *data_page;
+       bpage = data_page->data;
        if (!bpage)
                goto out;
 
@@ -5767,11 +5747,11 @@ int ring_buffer_read_page(struct trace_buffer *buffer,
                /* swap the pages */
                rb_init_page(bpage);
                bpage = reader->page;
-               reader->page = *data_page;
+               reader->page = data_page->data;
                local_set(&reader->write, 0);
                local_set(&reader->entries, 0);
                reader->read = 0;
-               *data_page = bpage;
+               data_page->data = bpage;
 
                /*
                 * Use the real_end for the data size,
@@ -5793,7 +5773,7 @@ int ring_buffer_read_page(struct trace_buffer *buffer,
                /* If there is room at the end of the page to save the
                 * missed events, then record it there.
                 */
-               if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
+               if (buffer->subbuf_size - commit >= sizeof(missed_events)) {
                        memcpy(&bpage->data[commit], &missed_events,
                               sizeof(missed_events));
                        local_add(RB_MISSED_STORED, &bpage->commit);
@@ -5805,8 +5785,8 @@ int ring_buffer_read_page(struct trace_buffer *buffer,
        /*
         * This page may be off to user land. Zero it out here.
         */
-       if (commit < BUF_PAGE_SIZE)
-               memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
+       if (commit < buffer->subbuf_size)
+               memset(&bpage->data[commit], 0, buffer->subbuf_size - commit);
 
  out_unlock:
        raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
@@ -5816,6 +5796,209 @@ int ring_buffer_read_page(struct trace_buffer *buffer,
 }
 EXPORT_SYMBOL_GPL(ring_buffer_read_page);
 
+/**
+ * ring_buffer_read_page_data - get pointer to the data in the page.
+ * @page:  the page to get the data from
+ *
+ * Returns pointer to the actual data in this page.
+ */
+void *ring_buffer_read_page_data(struct buffer_data_read_page *page)
+{
+       return page->data;
+}
+EXPORT_SYMBOL_GPL(ring_buffer_read_page_data);
+
+/**
+ * ring_buffer_subbuf_size_get - get size of the sub buffer.
+ * @buffer: the buffer to get the sub buffer size from
+ *
+ * Returns size of the sub buffer, in bytes.
+ */
+int ring_buffer_subbuf_size_get(struct trace_buffer *buffer)
+{
+       return buffer->subbuf_size + BUF_PAGE_HDR_SIZE;
+}
+EXPORT_SYMBOL_GPL(ring_buffer_subbuf_size_get);
+
+/**
+ * ring_buffer_subbuf_order_get - get order of system sub pages in one buffer page.
+ * @buffer: The ring_buffer to get the system sub page order from
+ *
+ * By default, one ring buffer sub page equals to one system page. This parameter
+ * is configurable, per ring buffer. The size of the ring buffer sub page can be
+ * extended, but must be an order of system page size.
+ *
+ * Returns the order of buffer sub page size, in system pages:
+ * 0 means the sub buffer size is 1 system page and so forth.
+ * In case of an error < 0 is returned.
+ */
+int ring_buffer_subbuf_order_get(struct trace_buffer *buffer)
+{
+       if (!buffer)
+               return -EINVAL;
+
+       return buffer->subbuf_order;
+}
+EXPORT_SYMBOL_GPL(ring_buffer_subbuf_order_get);
+
+/**
+ * ring_buffer_subbuf_order_set - set the size of ring buffer sub page.
+ * @buffer: The ring_buffer to set the new page size.
+ * @order: Order of the system pages in one sub buffer page
+ *
+ * By default, one ring buffer pages equals to one system page. This API can be
+ * used to set new size of the ring buffer page. The size must be order of
+ * system page size, that's why the input parameter @order is the order of
+ * system pages that are allocated for one ring buffer page:
+ *  0 - 1 system page
+ *  1 - 2 system pages
+ *  3 - 4 system pages
+ *  ...
+ *
+ * Returns 0 on success or < 0 in case of an error.
+ */
+int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order)
+{
+       struct ring_buffer_per_cpu *cpu_buffer;
+       struct buffer_page *bpage, *tmp;
+       int old_order, old_size;
+       int nr_pages;
+       int psize;
+       int err;
+       int cpu;
+
+       if (!buffer || order < 0)
+               return -EINVAL;
+
+       if (buffer->subbuf_order == order)
+               return 0;
+
+       psize = (1 << order) * PAGE_SIZE;
+       if (psize <= BUF_PAGE_HDR_SIZE)
+               return -EINVAL;
+
+       old_order = buffer->subbuf_order;
+       old_size = buffer->subbuf_size;
+
+       /* prevent another thread from changing buffer sizes */
+       mutex_lock(&buffer->mutex);
+       atomic_inc(&buffer->record_disabled);
+
+       /* Make sure all commits have finished */
+       synchronize_rcu();
+
+       buffer->subbuf_order = order;
+       buffer->subbuf_size = psize - BUF_PAGE_HDR_SIZE;
+
+       /* Make sure all new buffers are allocated, before deleting the old ones */
+       for_each_buffer_cpu(buffer, cpu) {
+
+               if (!cpumask_test_cpu(cpu, buffer->cpumask))
+                       continue;
+
+               cpu_buffer = buffer->buffers[cpu];
+
+               /* Update the number of pages to match the new size */
+               nr_pages = old_size * buffer->buffers[cpu]->nr_pages;
+               nr_pages = DIV_ROUND_UP(nr_pages, buffer->subbuf_size);
+
+               /* we need a minimum of two pages */
+               if (nr_pages < 2)
+                       nr_pages = 2;
+
+               cpu_buffer->nr_pages_to_update = nr_pages;
+
+               /* Include the reader page */
+               nr_pages++;
+
+               /* Allocate the new size buffer */
+               INIT_LIST_HEAD(&cpu_buffer->new_pages);
+               if (__rb_allocate_pages(cpu_buffer, nr_pages,
+                                       &cpu_buffer->new_pages)) {
+                       /* not enough memory for new pages */
+                       err = -ENOMEM;
+                       goto error;
+               }
+       }
+
+       for_each_buffer_cpu(buffer, cpu) {
+
+               if (!cpumask_test_cpu(cpu, buffer->cpumask))
+                       continue;
+
+               cpu_buffer = buffer->buffers[cpu];
+
+               /* Clear the head bit to make the link list normal to read */
+               rb_head_page_deactivate(cpu_buffer);
+
+               /* Now walk the list and free all the old sub buffers */
+               list_for_each_entry_safe(bpage, tmp, cpu_buffer->pages, list) {
+                       list_del_init(&bpage->list);
+                       free_buffer_page(bpage);
+               }
+               /* The above loop stopped an the last page needing to be freed */
+               bpage = list_entry(cpu_buffer->pages, struct buffer_page, list);
+               free_buffer_page(bpage);
+
+               /* Free the current reader page */
+               free_buffer_page(cpu_buffer->reader_page);
+
+               /* One page was allocated for the reader page */
+               cpu_buffer->reader_page = list_entry(cpu_buffer->new_pages.next,
+                                                    struct buffer_page, list);
+               list_del_init(&cpu_buffer->reader_page->list);
+
+               /* The cpu_buffer pages are a link list with no head */
+               cpu_buffer->pages = cpu_buffer->new_pages.next;
+               cpu_buffer->new_pages.next->prev = cpu_buffer->new_pages.prev;
+               cpu_buffer->new_pages.prev->next = cpu_buffer->new_pages.next;
+
+               /* Clear the new_pages list */
+               INIT_LIST_HEAD(&cpu_buffer->new_pages);
+
+               cpu_buffer->head_page
+                       = list_entry(cpu_buffer->pages, struct buffer_page, list);
+               cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
+
+               cpu_buffer->nr_pages = cpu_buffer->nr_pages_to_update;
+               cpu_buffer->nr_pages_to_update = 0;
+
+               free_pages((unsigned long)cpu_buffer->free_page, old_order);
+               cpu_buffer->free_page = NULL;
+
+               rb_head_page_activate(cpu_buffer);
+
+               rb_check_pages(cpu_buffer);
+       }
+
+       atomic_dec(&buffer->record_disabled);
+       mutex_unlock(&buffer->mutex);
+
+       return 0;
+
+error:
+       buffer->subbuf_order = old_order;
+       buffer->subbuf_size = old_size;
+
+       atomic_dec(&buffer->record_disabled);
+       mutex_unlock(&buffer->mutex);
+
+       for_each_buffer_cpu(buffer, cpu) {
+               cpu_buffer = buffer->buffers[cpu];
+
+               if (!cpu_buffer->nr_pages_to_update)
+                       continue;
+
+               list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, list) {
+                       list_del_init(&bpage->list);
+                       free_buffer_page(bpage);
+               }
+       }
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(ring_buffer_subbuf_order_set);
+
 /*
  * We only allocate new buffers, never free them if the CPU goes down.
  * If we were to free the buffer, then the user would lose any trace that was in
index aef34673d79d08bc8adcf228f212e054d813a5e5..008187ebd7fe601d6e09a431cb79d606679aad43 100644 (file)
@@ -104,10 +104,11 @@ static enum event_status read_event(int cpu)
 
 static enum event_status read_page(int cpu)
 {
+       struct buffer_data_read_page *bpage;
        struct ring_buffer_event *event;
        struct rb_page *rpage;
        unsigned long commit;
-       void *bpage;
+       int page_size;
        int *entry;
        int ret;
        int inc;
@@ -117,14 +118,15 @@ static enum event_status read_page(int cpu)
        if (IS_ERR(bpage))
                return EVENT_DROPPED;
 
-       ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1);
+       page_size = ring_buffer_subbuf_size_get(buffer);
+       ret = ring_buffer_read_page(buffer, bpage, page_size, cpu, 1);
        if (ret >= 0) {
-               rpage = bpage;
+               rpage = ring_buffer_read_page_data(bpage);
                /* The commit may have missed event flags set, clear them */
                commit = local_read(&rpage->commit) & 0xfffff;
                for (i = 0; i < commit && !test_error ; i += inc) {
 
-                       if (i >= (PAGE_SIZE - offsetof(struct rb_page, data))) {
+                       if (i >= (page_size - offsetof(struct rb_page, data))) {
                                TEST_ERROR();
                                break;
                        }
index a0defe156b57109c18b5e8a6fbcc639d9c409536..9ff8a439d6746fe16e7256e4040aaad729fff1bc 100644 (file)
@@ -1263,10 +1263,17 @@ static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
 
 int tracing_alloc_snapshot_instance(struct trace_array *tr)
 {
+       int order;
        int ret;
 
        if (!tr->allocated_snapshot) {
 
+               /* Make the snapshot buffer have the same order as main buffer */
+               order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
+               ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order);
+               if (ret < 0)
+                       return ret;
+
                /* allocate spare buffer */
                ret = resize_buffer_duplicate_size(&tr->max_buffer,
                                   &tr->array_buffer, RING_BUFFER_ALL_CPUS);
@@ -1286,6 +1293,7 @@ static void free_snapshot(struct trace_array *tr)
         * The max_tr ring buffer has some state (e.g. ring->clock) and
         * we want preserve it.
         */
+       ring_buffer_subbuf_order_set(tr->max_buffer.buffer, 0);
        ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
        set_buffer_entries(&tr->max_buffer, 1);
        tracing_reset_online_cpus(&tr->max_buffer);
@@ -2312,7 +2320,7 @@ struct saved_cmdlines_buffer {
        unsigned *map_cmdline_to_pid;
        unsigned cmdline_num;
        int cmdline_idx;
-       char *saved_cmdlines;
+       char saved_cmdlines[];
 };
 static struct saved_cmdlines_buffer *savedcmd;
 
@@ -2326,47 +2334,58 @@ static inline void set_cmdline(int idx, const char *cmdline)
        strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
 }
 
-static int allocate_cmdlines_buffer(unsigned int val,
-                                   struct saved_cmdlines_buffer *s)
+static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
+{
+       int order = get_order(sizeof(*s) + s->cmdline_num * TASK_COMM_LEN);
+
+       kfree(s->map_cmdline_to_pid);
+       free_pages((unsigned long)s, order);
+}
+
+static struct saved_cmdlines_buffer *allocate_cmdlines_buffer(unsigned int val)
 {
+       struct saved_cmdlines_buffer *s;
+       struct page *page;
+       int orig_size, size;
+       int order;
+
+       /* Figure out how much is needed to hold the given number of cmdlines */
+       orig_size = sizeof(*s) + val * TASK_COMM_LEN;
+       order = get_order(orig_size);
+       size = 1 << (order + PAGE_SHIFT);
+       page = alloc_pages(GFP_KERNEL, order);
+       if (!page)
+               return NULL;
+
+       s = page_address(page);
+       memset(s, 0, sizeof(*s));
+
+       /* Round up to actual allocation */
+       val = (size - sizeof(*s)) / TASK_COMM_LEN;
+       s->cmdline_num = val;
+
        s->map_cmdline_to_pid = kmalloc_array(val,
                                              sizeof(*s->map_cmdline_to_pid),
                                              GFP_KERNEL);
-       if (!s->map_cmdline_to_pid)
-               return -ENOMEM;
-
-       s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
-       if (!s->saved_cmdlines) {
-               kfree(s->map_cmdline_to_pid);
-               return -ENOMEM;
+       if (!s->map_cmdline_to_pid) {
+               free_saved_cmdlines_buffer(s);
+               return NULL;
        }
 
        s->cmdline_idx = 0;
-       s->cmdline_num = val;
        memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
               sizeof(s->map_pid_to_cmdline));
        memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
               val * sizeof(*s->map_cmdline_to_pid));
 
-       return 0;
+       return s;
 }
 
 static int trace_create_savedcmd(void)
 {
-       int ret;
+       savedcmd = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT);
 
-       savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
-       if (!savedcmd)
-               return -ENOMEM;
-
-       ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
-       if (ret < 0) {
-               kfree(savedcmd);
-               savedcmd = NULL;
-               return -ENOMEM;
-       }
-
-       return 0;
+       return savedcmd ? 0 : -ENOMEM;
 }
 
 int is_tracing_stopped(void)
@@ -3767,7 +3786,7 @@ static bool trace_safe_str(struct trace_iterator *iter, const char *str,
 
        /* OK if part of the temp seq buffer */
        if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
-           (addr < (unsigned long)iter->tmp_seq.buffer + PAGE_SIZE))
+           (addr < (unsigned long)iter->tmp_seq.buffer + TRACE_SEQ_BUFFER_SIZE))
                return true;
 
        /* Core rodata can not be freed */
@@ -5032,7 +5051,7 @@ static int tracing_release(struct inode *inode, struct file *file)
        return 0;
 }
 
-static int tracing_release_generic_tr(struct inode *inode, struct file *file)
+int tracing_release_generic_tr(struct inode *inode, struct file *file)
 {
        struct trace_array *tr = inode->i_private;
 
@@ -6048,26 +6067,14 @@ tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 }
 
-static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
-{
-       kfree(s->saved_cmdlines);
-       kfree(s->map_cmdline_to_pid);
-       kfree(s);
-}
-
 static int tracing_resize_saved_cmdlines(unsigned int val)
 {
        struct saved_cmdlines_buffer *s, *savedcmd_temp;
 
-       s = kmalloc(sizeof(*s), GFP_KERNEL);
+       s = allocate_cmdlines_buffer(val);
        if (!s)
                return -ENOMEM;
 
-       if (allocate_cmdlines_buffer(val, s) < 0) {
-               kfree(s);
-               return -ENOMEM;
-       }
-
        preempt_disable();
        arch_spin_lock(&trace_cmdline_lock);
        savedcmd_temp = savedcmd;
@@ -6946,8 +6953,8 @@ waitagain:
                goto out;
        }
 
-       if (cnt >= PAGE_SIZE)
-               cnt = PAGE_SIZE - 1;
+       if (cnt >= TRACE_SEQ_BUFFER_SIZE)
+               cnt = TRACE_SEQ_BUFFER_SIZE - 1;
 
        /* reset all but tr, trace, and overruns */
        trace_iterator_reset(iter);
@@ -7292,8 +7299,9 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
        enum event_trigger_type tt = ETT_NONE;
        struct trace_buffer *buffer;
        struct print_entry *entry;
+       int meta_size;
        ssize_t written;
-       int size;
+       size_t size;
        int len;
 
 /* Used in tracing_mark_raw_write() as well */
@@ -7306,23 +7314,44 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
        if (!(tr->trace_flags & TRACE_ITER_MARKERS))
                return -EINVAL;
 
-       if (cnt > TRACE_BUF_SIZE)
-               cnt = TRACE_BUF_SIZE;
-
-       BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
+       if ((ssize_t)cnt < 0)
+               return -EINVAL;
 
-       size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
+       meta_size = sizeof(*entry) + 2;  /* add '\0' and possible '\n' */
+ again:
+       size = cnt + meta_size;
 
        /* If less than "<faulted>", then make sure we can still add that */
        if (cnt < FAULTED_SIZE)
                size += FAULTED_SIZE - cnt;
 
+       if (size > TRACE_SEQ_BUFFER_SIZE) {
+               cnt -= size - TRACE_SEQ_BUFFER_SIZE;
+               goto again;
+       }
+
        buffer = tr->array_buffer.buffer;
        event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
                                            tracing_gen_ctx());
-       if (unlikely(!event))
+       if (unlikely(!event)) {
+               /*
+                * If the size was greater than what was allowed, then
+                * make it smaller and try again.
+                */
+               if (size > ring_buffer_max_event_size(buffer)) {
+                       /* cnt < FAULTED size should never be bigger than max */
+                       if (WARN_ON_ONCE(cnt < FAULTED_SIZE))
+                               return -EBADF;
+                       cnt = ring_buffer_max_event_size(buffer) - meta_size;
+                       /* The above should only happen once */
+                       if (WARN_ON_ONCE(cnt + meta_size == size))
+                               return -EBADF;
+                       goto again;
+               }
+
                /* Ring buffer disabled, return as if not open for write */
                return -EBADF;
+       }
 
        entry = ring_buffer_event_data(event);
        entry->ip = _THIS_IP_;
@@ -7357,9 +7386,6 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
        return written;
 }
 
-/* Limit it for now to 3K (including tag) */
-#define RAW_DATA_MAX_SIZE (1024*3)
-
 static ssize_t
 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
                                        size_t cnt, loff_t *fpos)
@@ -7381,19 +7407,18 @@ tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
                return -EINVAL;
 
        /* The marker must at least have a tag id */
-       if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
+       if (cnt < sizeof(unsigned int))
                return -EINVAL;
 
-       if (cnt > TRACE_BUF_SIZE)
-               cnt = TRACE_BUF_SIZE;
-
-       BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
-
        size = sizeof(*entry) + cnt;
        if (cnt < FAULT_SIZE_ID)
                size += FAULT_SIZE_ID - cnt;
 
        buffer = tr->array_buffer.buffer;
+
+       if (size > ring_buffer_max_event_size(buffer))
+               return -EINVAL;
+
        event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
                                            tracing_gen_ctx());
        if (!event)
@@ -7578,6 +7603,7 @@ struct ftrace_buffer_info {
        struct trace_iterator   iter;
        void                    *spare;
        unsigned int            spare_cpu;
+       unsigned int            spare_size;
        unsigned int            read;
 };
 
@@ -8282,6 +8308,8 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
 {
        struct ftrace_buffer_info *info = filp->private_data;
        struct trace_iterator *iter = &info->iter;
+       void *trace_data;
+       int page_size;
        ssize_t ret = 0;
        ssize_t size;
 
@@ -8293,6 +8321,17 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
                return -EBUSY;
 #endif
 
+       page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer);
+
+       /* Make sure the spare matches the current sub buffer size */
+       if (info->spare) {
+               if (page_size != info->spare_size) {
+                       ring_buffer_free_read_page(iter->array_buffer->buffer,
+                                                  info->spare_cpu, info->spare);
+                       info->spare = NULL;
+               }
+       }
+
        if (!info->spare) {
                info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
                                                          iter->cpu_file);
@@ -8301,19 +8340,20 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
                        info->spare = NULL;
                } else {
                        info->spare_cpu = iter->cpu_file;
+                       info->spare_size = page_size;
                }
        }
        if (!info->spare)
                return ret;
 
        /* Do we have previous read data to read? */
-       if (info->read < PAGE_SIZE)
+       if (info->read < page_size)
                goto read;
 
  again:
        trace_access_lock(iter->cpu_file);
        ret = ring_buffer_read_page(iter->array_buffer->buffer,
-                                   &info->spare,
+                                   info->spare,
                                    count,
                                    iter->cpu_file, 0);
        trace_access_unlock(iter->cpu_file);
@@ -8334,11 +8374,11 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
 
        info->read = 0;
  read:
-       size = PAGE_SIZE - info->read;
+       size = page_size - info->read;
        if (size > count)
                size = count;
-
-       ret = copy_to_user(ubuf, info->spare + info->read, size);
+       trace_data = ring_buffer_read_page_data(info->spare);
+       ret = copy_to_user(ubuf, trace_data + info->read, size);
        if (ret == size)
                return -EFAULT;
 
@@ -8449,6 +8489,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
                .spd_release    = buffer_spd_release,
        };
        struct buffer_ref *ref;
+       int page_size;
        int entries, i;
        ssize_t ret = 0;
 
@@ -8457,13 +8498,14 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
                return -EBUSY;
 #endif
 
-       if (*ppos & (PAGE_SIZE - 1))
+       page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer);
+       if (*ppos & (page_size - 1))
                return -EINVAL;
 
-       if (len & (PAGE_SIZE - 1)) {
-               if (len < PAGE_SIZE)
+       if (len & (page_size - 1)) {
+               if (len < page_size)
                        return -EINVAL;
-               len &= PAGE_MASK;
+               len &= (~(page_size - 1));
        }
 
        if (splice_grow_spd(pipe, &spd))
@@ -8473,7 +8515,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
        trace_access_lock(iter->cpu_file);
        entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
 
-       for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
+       for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= page_size) {
                struct page *page;
                int r;
 
@@ -8494,7 +8536,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
                }
                ref->cpu = iter->cpu_file;
 
-               r = ring_buffer_read_page(ref->buffer, &ref->page,
+               r = ring_buffer_read_page(ref->buffer, ref->page,
                                          len, iter->cpu_file, 1);
                if (r < 0) {
                        ring_buffer_free_read_page(ref->buffer, ref->cpu,
@@ -8503,14 +8545,14 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
                        break;
                }
 
-               page = virt_to_page(ref->page);
+               page = virt_to_page(ring_buffer_read_page_data(ref->page));
 
                spd.pages[i] = page;
-               spd.partial[i].len = PAGE_SIZE;
+               spd.partial[i].len = page_size;
                spd.partial[i].offset = 0;
                spd.partial[i].private = (unsigned long)ref;
                spd.nr_pages++;
-               *ppos += PAGE_SIZE;
+               *ppos += page_size;
 
                entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
        }
@@ -9354,6 +9396,103 @@ static const struct file_operations buffer_percent_fops = {
        .llseek         = default_llseek,
 };
 
+static ssize_t
+buffer_subbuf_size_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       struct trace_array *tr = filp->private_data;
+       size_t size;
+       char buf[64];
+       int order;
+       int r;
+
+       order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
+       size = (PAGE_SIZE << order) / 1024;
+
+       r = sprintf(buf, "%zd\n", size);
+
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t
+buffer_subbuf_size_write(struct file *filp, const char __user *ubuf,
+                        size_t cnt, loff_t *ppos)
+{
+       struct trace_array *tr = filp->private_data;
+       unsigned long val;
+       int old_order;
+       int order;
+       int pages;
+       int ret;
+
+       ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
+       if (ret)
+               return ret;
+
+       val *= 1024; /* value passed in is in KB */
+
+       pages = DIV_ROUND_UP(val, PAGE_SIZE);
+       order = fls(pages - 1);
+
+       /* limit between 1 and 128 system pages */
+       if (order < 0 || order > 7)
+               return -EINVAL;
+
+       /* Do not allow tracing while changing the order of the ring buffer */
+       tracing_stop_tr(tr);
+
+       old_order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
+       if (old_order == order)
+               goto out;
+
+       ret = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, order);
+       if (ret)
+               goto out;
+
+#ifdef CONFIG_TRACER_MAX_TRACE
+
+       if (!tr->allocated_snapshot)
+               goto out_max;
+
+       ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order);
+       if (ret) {
+               /* Put back the old order */
+               cnt = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, old_order);
+               if (WARN_ON_ONCE(cnt)) {
+                       /*
+                        * AARGH! We are left with different orders!
+                        * The max buffer is our "snapshot" buffer.
+                        * When a tracer needs a snapshot (one of the
+                        * latency tracers), it swaps the max buffer
+                        * with the saved snap shot. We succeeded to
+                        * update the order of the main buffer, but failed to
+                        * update the order of the max buffer. But when we tried
+                        * to reset the main buffer to the original size, we
+                        * failed there too. This is very unlikely to
+                        * happen, but if it does, warn and kill all
+                        * tracing.
+                        */
+                       tracing_disabled = 1;
+               }
+               goto out;
+       }
+ out_max:
+#endif
+       (*ppos)++;
+ out:
+       if (ret)
+               cnt = ret;
+       tracing_start_tr(tr);
+       return cnt;
+}
+
+static const struct file_operations buffer_subbuf_size_fops = {
+       .open           = tracing_open_generic_tr,
+       .read           = buffer_subbuf_size_read,
+       .write          = buffer_subbuf_size_write,
+       .release        = tracing_release_generic_tr,
+       .llseek         = default_llseek,
+};
+
 static struct dentry *trace_instance_dir;
 
 static void
@@ -9504,7 +9643,8 @@ static int trace_array_create_dir(struct trace_array *tr)
        return ret;
 }
 
-static struct trace_array *trace_array_create(const char *name)
+static struct trace_array *
+trace_array_create_systems(const char *name, const char *systems)
 {
        struct trace_array *tr;
        int ret;
@@ -9524,6 +9664,12 @@ static struct trace_array *trace_array_create(const char *name)
        if (!zalloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL))
                goto out_free_tr;
 
+       if (systems) {
+               tr->system_names = kstrdup_const(systems, GFP_KERNEL);
+               if (!tr->system_names)
+                       goto out_free_tr;
+       }
+
        tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
 
        cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
@@ -9570,12 +9716,18 @@ static struct trace_array *trace_array_create(const char *name)
        free_trace_buffers(tr);
        free_cpumask_var(tr->pipe_cpumask);
        free_cpumask_var(tr->tracing_cpumask);
+       kfree_const(tr->system_names);
        kfree(tr->name);
        kfree(tr);
 
        return ERR_PTR(ret);
 }
 
+static struct trace_array *trace_array_create(const char *name)
+{
+       return trace_array_create_systems(name, NULL);
+}
+
 static int instance_mkdir(const char *name)
 {
        struct trace_array *tr;
@@ -9601,6 +9753,7 @@ out_unlock:
 /**
  * trace_array_get_by_name - Create/Lookup a trace array, given its name.
  * @name: The name of the trace array to be looked up/created.
+ * @systems: A list of systems to create event directories for (NULL for all)
  *
  * Returns pointer to trace array with given name.
  * NULL, if it cannot be created.
@@ -9614,7 +9767,7 @@ out_unlock:
  * trace_array_put() is called, user space can not delete it.
  *
  */
-struct trace_array *trace_array_get_by_name(const char *name)
+struct trace_array *trace_array_get_by_name(const char *name, const char *systems)
 {
        struct trace_array *tr;
 
@@ -9626,7 +9779,7 @@ struct trace_array *trace_array_get_by_name(const char *name)
                        goto out_unlock;
        }
 
-       tr = trace_array_create(name);
+       tr = trace_array_create_systems(name, systems);
 
        if (IS_ERR(tr))
                tr = NULL;
@@ -9673,6 +9826,7 @@ static int __remove_instance(struct trace_array *tr)
 
        free_cpumask_var(tr->pipe_cpumask);
        free_cpumask_var(tr->tracing_cpumask);
+       kfree_const(tr->system_names);
        kfree(tr->name);
        kfree(tr);
 
@@ -9805,6 +9959,9 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
        trace_create_file("buffer_percent", TRACE_MODE_WRITE, d_tracer,
                        tr, &buffer_percent_fops);
 
+       trace_create_file("buffer_subbuf_size_kb", TRACE_MODE_WRITE, d_tracer,
+                         tr, &buffer_subbuf_size_fops);
+
        create_trace_options_dir(tr);
 
 #ifdef CONFIG_TRACER_MAX_TRACE
@@ -10391,7 +10548,7 @@ __init static void enable_instances(void)
                if (IS_ENABLED(CONFIG_TRACER_MAX_TRACE))
                        do_allocate_snapshot(tok);
 
-               tr = trace_array_get_by_name(tok);
+               tr = trace_array_get_by_name(tok, NULL);
                if (!tr) {
                        pr_warn("Failed to create instance buffer %s\n", curr_str);
                        continue;
index 0489e72c8169c19754159efa623057ae72a0c5db..00f873910c5d9c835b335ba2731c1062c7427057 100644 (file)
@@ -377,6 +377,7 @@ struct trace_array {
        unsigned char           trace_flags_index[TRACE_FLAGS_MAX_SIZE];
        unsigned int            flags;
        raw_spinlock_t          start_lock;
+       const char              *system_names;
        struct list_head        err_log;
        struct dentry           *dir;
        struct dentry           *options;
@@ -615,6 +616,7 @@ void tracing_reset_all_online_cpus(void);
 void tracing_reset_all_online_cpus_unlocked(void);
 int tracing_open_generic(struct inode *inode, struct file *filp);
 int tracing_open_generic_tr(struct inode *inode, struct file *filp);
+int tracing_release_generic_tr(struct inode *inode, struct file *file);
 int tracing_open_file_tr(struct inode *inode, struct file *filp);
 int tracing_release_file_tr(struct inode *inode, struct file *filp);
 int tracing_single_release_file_tr(struct inode *inode, struct file *filp);
index 7ccc7a8e155b9e5572eb8457f300fe411d46537b..dbe29b4c6a7a07423b6baa94e243d170c4049108 100644 (file)
@@ -633,7 +633,7 @@ trace_boot_init_instances(struct xbc_node *node)
                if (!p || *p == '\0')
                        continue;
 
-               tr = trace_array_get_by_name(p);
+               tr = trace_array_get_by_name(p, NULL);
                if (!tr) {
                        pr_err("Failed to get trace instance %s\n", p);
                        continue;
index f29e815ca5b2e9bb7146f24817cc5c5b9aa78009..7c364b87352eed92e0f76137091882231f187028 100644 (file)
@@ -1893,9 +1893,9 @@ subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
 }
 
 static ssize_t
-show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+show_header_page_file(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
 {
-       int (*func)(struct trace_seq *s) = filp->private_data;
+       struct trace_array *tr = filp->private_data;
        struct trace_seq *s;
        int r;
 
@@ -1908,7 +1908,31 @@ show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
 
        trace_seq_init(s);
 
-       func(s);
+       ring_buffer_print_page_header(tr->array_buffer.buffer, s);
+       r = simple_read_from_buffer(ubuf, cnt, ppos,
+                                   s->buffer, trace_seq_used(s));
+
+       kfree(s);
+
+       return r;
+}
+
+static ssize_t
+show_header_event_file(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+       struct trace_seq *s;
+       int r;
+
+       if (*ppos)
+               return 0;
+
+       s = kmalloc(sizeof(*s), GFP_KERNEL);
+       if (!s)
+               return -ENOMEM;
+
+       trace_seq_init(s);
+
+       ring_buffer_print_entry_header(s);
        r = simple_read_from_buffer(ubuf, cnt, ppos,
                                    s->buffer, trace_seq_used(s));
 
@@ -2165,10 +2189,18 @@ static const struct file_operations ftrace_tr_enable_fops = {
        .release = subsystem_release,
 };
 
-static const struct file_operations ftrace_show_header_fops = {
-       .open = tracing_open_generic,
-       .read = show_header,
+static const struct file_operations ftrace_show_header_page_fops = {
+       .open = tracing_open_generic_tr,
+       .read = show_header_page_file,
+       .llseek = default_llseek,
+       .release = tracing_release_generic_tr,
+};
+
+static const struct file_operations ftrace_show_header_event_fops = {
+       .open = tracing_open_generic_tr,
+       .read = show_header_event_file,
        .llseek = default_llseek,
+       .release = tracing_release_generic_tr,
 };
 
 static int
@@ -2896,6 +2928,27 @@ void trace_event_eval_update(struct trace_eval_map **map, int len)
        up_write(&trace_event_sem);
 }
 
+static bool event_in_systems(struct trace_event_call *call,
+                            const char *systems)
+{
+       const char *system;
+       const char *p;
+
+       if (!systems)
+               return true;
+
+       system = call->class->system;
+       p = strstr(systems, system);
+       if (!p)
+               return false;
+
+       if (p != systems && !isspace(*(p - 1)) && *(p - 1) != ',')
+               return false;
+
+       p += strlen(system);
+       return !*p || isspace(*p) || *p == ',';
+}
+
 static struct trace_event_file *
 trace_create_new_event(struct trace_event_call *call,
                       struct trace_array *tr)
@@ -2905,9 +2958,12 @@ trace_create_new_event(struct trace_event_call *call,
        struct trace_event_file *file;
        unsigned int first;
 
+       if (!event_in_systems(call, tr->system_names))
+               return NULL;
+
        file = kmem_cache_alloc(file_cachep, GFP_TRACE);
        if (!file)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        pid_list = rcu_dereference_protected(tr->filtered_pids,
                                             lockdep_is_held(&event_mutex));
@@ -2972,8 +3028,17 @@ __trace_add_new_event(struct trace_event_call *call, struct trace_array *tr)
        struct trace_event_file *file;
 
        file = trace_create_new_event(call, tr);
+       /*
+        * trace_create_new_event() returns ERR_PTR(-ENOMEM) if failed
+        * allocation, or NULL if the event is not part of the tr->system_names.
+        * When the event is not part of the tr->system_names, return zero, not
+        * an error.
+        */
        if (!file)
-               return -ENOMEM;
+               return 0;
+
+       if (IS_ERR(file))
+               return PTR_ERR(file);
 
        if (eventdir_initialized)
                return event_create_dir(tr->event_dir, file);
@@ -3012,8 +3077,17 @@ __trace_early_add_new_event(struct trace_event_call *call,
        int ret;
 
        file = trace_create_new_event(call, tr);
+       /*
+        * trace_create_new_event() returns ERR_PTR(-ENOMEM) if failed
+        * allocation, or NULL if the event is not part of the tr->system_names.
+        * When the event is not part of the tr->system_names, return zero, not
+        * an error.
+        */
        if (!file)
-               return -ENOMEM;
+               return 0;
+
+       if (IS_ERR(file))
+               return PTR_ERR(file);
 
        ret = event_define_fields(call);
        if (ret)
@@ -3752,17 +3826,16 @@ static int events_callback(const char *name, umode_t *mode, void **data,
                return 1;
        }
 
-       if (strcmp(name, "header_page") == 0)
-               *data = ring_buffer_print_page_header;
-
-       else if (strcmp(name, "header_event") == 0)
-               *data = ring_buffer_print_entry_header;
+       if (strcmp(name, "header_page") == 0) {
+               *mode = TRACE_MODE_READ;
+               *fops = &ftrace_show_header_page_fops;
 
-       else
+       } else if (strcmp(name, "header_event") == 0) {
+               *mode = TRACE_MODE_READ;
+               *fops = &ftrace_show_header_event_fops;
+       } else
                return 0;
 
-       *mode = TRACE_MODE_READ;
-       *fops = &ftrace_show_header_fops;
        return 1;
 }
 
index 5ecf3c8bde205f360e880b608b11abcddb689a70..6ece1308d36a02dec5af3ca3cebdb9d6b427aac7 100644 (file)
@@ -4805,36 +4805,35 @@ static int parse_actions(struct hist_trigger_data *hist_data)
        int len;
 
        for (i = 0; i < hist_data->attrs->n_actions; i++) {
+               enum handler_id hid = 0;
+               char *action_str;
+
                str = hist_data->attrs->action_str[i];
 
-               if ((len = str_has_prefix(str, "onmatch("))) {
-                       char *action_str = str + len;
+               if ((len = str_has_prefix(str, "onmatch(")))
+                       hid = HANDLER_ONMATCH;
+               else if ((len = str_has_prefix(str, "onmax(")))
+                       hid = HANDLER_ONMAX;
+               else if ((len = str_has_prefix(str, "onchange(")))
+                       hid = HANDLER_ONCHANGE;
 
-                       data = onmatch_parse(tr, action_str);
-                       if (IS_ERR(data)) {
-                               ret = PTR_ERR(data);
-                               break;
-                       }
-               } else if ((len = str_has_prefix(str, "onmax("))) {
-                       char *action_str = str + len;
+               action_str = str + len;
 
-                       data = track_data_parse(hist_data, action_str,
-                                               HANDLER_ONMAX);
-                       if (IS_ERR(data)) {
-                               ret = PTR_ERR(data);
-                               break;
-                       }
-               } else if ((len = str_has_prefix(str, "onchange("))) {
-                       char *action_str = str + len;
+               switch (hid) {
+               case HANDLER_ONMATCH:
+                       data = onmatch_parse(tr, action_str);
+                       break;
+               case HANDLER_ONMAX:
+               case HANDLER_ONCHANGE:
+                       data = track_data_parse(hist_data, action_str, hid);
+                       break;
+               default:
+                       data = ERR_PTR(-EINVAL);
+                       break;
+               }
 
-                       data = track_data_parse(hist_data, action_str,
-                                               HANDLER_ONCHANGE);
-                       if (IS_ERR(data)) {
-                               ret = PTR_ERR(data);
-                               break;
-                       }
-               } else {
-                       ret = -EINVAL;
+               if (IS_ERR(data)) {
+                       ret = PTR_ERR(data);
                        break;
                }
 
index 46439e3bcec4d20b45ae8202d7a68888778fa208..b33c3861fbbbf303e78f740a0fcc41caa2a77d77 100644 (file)
@@ -1470,8 +1470,10 @@ register_snapshot_trigger(char *glob,
                          struct event_trigger_data *data,
                          struct trace_event_file *file)
 {
-       if (tracing_alloc_snapshot_instance(file->tr) != 0)
-               return 0;
+       int ret = tracing_alloc_snapshot_instance(file->tr);
+
+       if (ret < 0)
+               return ret;
 
        return register_trigger(glob, data, file);
 }
index 52f8b537dd0a0872dc1adcbe09778eb76270aad0..c4c6e0e0068be79a966775920903266df86cd8e7 100644 (file)
@@ -487,8 +487,8 @@ static int __register_trace_kprobe(struct trace_kprobe *tk)
                return -EINVAL;
 
        if (within_notrace_func(tk)) {
-               pr_warn("Could not probe notrace function %s\n",
-                       trace_kprobe_symbol(tk));
+               pr_warn("Could not probe notrace function %ps\n",
+                       (void *)trace_kprobe_address(tk));
                return -EINVAL;
        }
 
index bd0d01d00fb9d52d0736524c3274f1382fee8462..a8e28f9b9271cf6545351f7d4f7ece1fbd9d8989 100644 (file)
@@ -2444,6 +2444,9 @@ static int timerlat_fd_open(struct inode *inode, struct file *file)
        tlat = this_cpu_tmr_var();
        tlat->count = 0;
 
+       hrtimer_init(&tlat->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD);
+       tlat->timer.function = timerlat_irq;
+
        migrate_enable();
        return 0;
 };
@@ -2526,9 +2529,6 @@ timerlat_fd_read(struct file *file, char __user *ubuf, size_t count,
                tlat->tracing_thread = false;
                tlat->kthread = current;
 
-               hrtimer_init(&tlat->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD);
-               tlat->timer.function = timerlat_irq;
-
                /* Annotate now to drift new period */
                tlat->abs_period = hrtimer_cb_get_time(&tlat->timer);
 
index 4dc74d73fc1df5af9ee297611865705d44259663..34289f9c67076b2ab81ffc67bd5a518926e59ca6 100644 (file)
@@ -1159,9 +1159,12 @@ static int traceprobe_parse_probe_arg_body(const char *argv, ssize_t *size,
        if (!(ctx->flags & TPARG_FL_TEVENT) &&
            (strcmp(arg, "$comm") == 0 || strcmp(arg, "$COMM") == 0 ||
             strncmp(arg, "\\\"", 2) == 0)) {
-               /* The type of $comm must be "string", and not an array. */
-               if (parg->count || (t && strcmp(t, "string")))
+               /* The type of $comm must be "string", and not an array type. */
+               if (parg->count || (t && strcmp(t, "string"))) {
+                       trace_probe_log_err(ctx->offset + (t ? (t - arg) : 0),
+                                       NEED_STRING_TYPE);
                        goto out;
+               }
                parg->type = find_fetch_type("string", ctx->flags);
        } else
                parg->type = find_fetch_type(t, ctx->flags);
@@ -1169,18 +1172,6 @@ static int traceprobe_parse_probe_arg_body(const char *argv, ssize_t *size,
                trace_probe_log_err(ctx->offset + (t ? (t - arg) : 0), BAD_TYPE);
                goto out;
        }
-       parg->offset = *size;
-       *size += parg->type->size * (parg->count ?: 1);
-
-       ret = -ENOMEM;
-       if (parg->count) {
-               len = strlen(parg->type->fmttype) + 6;
-               parg->fmt = kmalloc(len, GFP_KERNEL);
-               if (!parg->fmt)
-                       goto out;
-               snprintf(parg->fmt, len, "%s[%d]", parg->type->fmttype,
-                        parg->count);
-       }
 
        code = tmp = kcalloc(FETCH_INSN_MAX, sizeof(*code), GFP_KERNEL);
        if (!code)
@@ -1204,6 +1195,19 @@ static int traceprobe_parse_probe_arg_body(const char *argv, ssize_t *size,
                                goto fail;
                }
        }
+       parg->offset = *size;
+       *size += parg->type->size * (parg->count ?: 1);
+
+       if (parg->count) {
+               len = strlen(parg->type->fmttype) + 6;
+               parg->fmt = kmalloc(len, GFP_KERNEL);
+               if (!parg->fmt) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+               snprintf(parg->fmt, len, "%s[%d]", parg->type->fmttype,
+                        parg->count);
+       }
 
        ret = -EINVAL;
        /* Store operation */
index 850d9ecb6765a8bd372b214ee6f302e3374ffa93..c1877d0182691c20eba09e60a98d80daf2dd810a 100644 (file)
@@ -515,7 +515,8 @@ extern int traceprobe_define_arg_fields(struct trace_event_call *event_call,
        C(BAD_HYPHEN,           "Failed to parse single hyphen. Forgot '>'?"),  \
        C(NO_BTF_FIELD,         "This field is not found."),    \
        C(BAD_BTF_TID,          "Failed to get BTF type info."),\
-       C(BAD_TYPE4STR,         "This type does not fit for string."),
+       C(BAD_TYPE4STR,         "This type does not fit for string."),\
+       C(NEED_STRING_TYPE,     "$comm and immediate-string only accepts string type"),
 
 #undef C
 #define C(a, b)                TP_ERR_##a
index 7be97229ddf86008a4fa91c4872f3b9eb85a5181..c158d65a8a886efc64516d7dd8a333aa27da594e 100644 (file)
@@ -13,9 +13,6 @@
  * trace_seq_init() more than once to reset the trace_seq to start
  * from scratch.
  * 
- * The buffer size is currently PAGE_SIZE, although it may become dynamic
- * in the future.
- *
  * A write to the buffer will either succeed or fail. That is, unlike
  * sprintf() there will not be a partial write (well it may write into
  * the buffer but it wont update the pointers). This allows users to
index c774e560f2f957127c7e41b825164a0d102b6fd0..a4dcf0f2435213bc2b2b91d677ec18290aa53859 100644 (file)
@@ -574,7 +574,12 @@ __tracing_map_insert(struct tracing_map *map, void *key, bool lookup_only)
                                }
 
                                memcpy(elt->key, key, map->key_size);
-                               entry->val = elt;
+                               /*
+                                * Ensure the initialization is visible and
+                                * publish the elt.
+                                */
+                               smp_wmb();
+                               WRITE_ONCE(entry->val, elt);
                                atomic64_inc(&map->hits);
 
                                return entry->val;
index 97ce28f4d1540482082bf9d5072c39bdd25627f1..975a07f9f1cc08838d272f83d5f04a85ff2f5cd2 100644 (file)
@@ -231,9 +231,10 @@ config DEBUG_INFO
          in the "Debug information" choice below, indicating that debug
          information will be generated for build targets.
 
-# Clang is known to generate .{s,u}leb128 with symbol deltas with DWARF5, which
-# some targets may not support: https://sourceware.org/bugzilla/show_bug.cgi?id=27215
-config AS_HAS_NON_CONST_LEB128
+# Clang generates .uleb128 with label differences for DWARF v5, a feature that
+# older binutils ports do not support when utilizing RISC-V style linker
+# relaxation: https://sourceware.org/bugzilla/show_bug.cgi?id=27215
+config AS_HAS_NON_CONST_ULEB128
        def_bool $(as-instr,.uleb128 .Lexpr_end4 - .Lexpr_start3\n.Lexpr_start3:\n.Lexpr_end4:)
 
 choice
@@ -258,7 +259,7 @@ config DEBUG_INFO_NONE
 config DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT
        bool "Rely on the toolchain's implicit default DWARF version"
        select DEBUG_INFO
-       depends on !CC_IS_CLANG || AS_IS_LLVM || CLANG_VERSION < 140000 || (AS_IS_GNU && AS_VERSION >= 23502 && AS_HAS_NON_CONST_LEB128)
+       depends on !CC_IS_CLANG || AS_IS_LLVM || CLANG_VERSION < 140000 || (AS_IS_GNU && AS_VERSION >= 23502 && AS_HAS_NON_CONST_ULEB128)
        help
          The implicit default version of DWARF debug info produced by a
          toolchain changes over time.
@@ -282,7 +283,8 @@ config DEBUG_INFO_DWARF4
 config DEBUG_INFO_DWARF5
        bool "Generate DWARF Version 5 debuginfo"
        select DEBUG_INFO
-       depends on !CC_IS_CLANG || AS_IS_LLVM || (AS_IS_GNU && AS_VERSION >= 23502 && AS_HAS_NON_CONST_LEB128)
+       depends on !ARCH_HAS_BROKEN_DWARF5
+       depends on !CC_IS_CLANG || AS_IS_LLVM || (AS_IS_GNU && AS_VERSION >= 23502 && AS_HAS_NON_CONST_ULEB128)
        help
          Generate DWARF v5 debug info. Requires binutils 2.35.2, gcc 5.0+ (gcc
          5.0+ accepts the -gdwarf-5 flag but only had partial support for some
@@ -378,6 +380,8 @@ config DEBUG_INFO_BTF
        depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST
        depends on BPF_SYSCALL
        depends on !DEBUG_INFO_DWARF5 || PAHOLE_VERSION >= 121
+       # pahole uses elfutils, which does not have support for Hexagon relocations
+       depends on !HEXAGON
        help
          Generate deduplicated BTF type information from DWARF debug info.
          Turning this on expects presence of pahole tool, which will convert
index 0eed92b77ba377cf1b838f083f6472af7601ba45..225bb77014600f796e972a9c0f03638c23750a06 100644 (file)
@@ -1,15 +1,21 @@
 // SPDX-License-Identifier: GPL-2.0+
 /*
- * Test cases csum_partial and csum_fold
+ * Test cases csum_partial, csum_fold, ip_fast_csum, csum_ipv6_magic
  */
 
 #include <kunit/test.h>
 #include <asm/checksum.h>
+#include <net/ip6_checksum.h>
 
 #define MAX_LEN 512
 #define MAX_ALIGN 64
 #define TEST_BUFLEN (MAX_LEN + MAX_ALIGN)
 
+#define IPv4_MIN_WORDS 5
+#define IPv4_MAX_WORDS 15
+#define NUM_IPv6_TESTS 200
+#define NUM_IP_FAST_CSUM_TESTS 181
+
 /* Values for a little endian CPU. Byte swap each half on big endian CPU. */
 static const u32 random_init_sum = 0x2847aab;
 static const u8 random_buf[] = {
@@ -209,6 +215,237 @@ static const u32 init_sums_no_overflow[] = {
        0xffff0000, 0xfffffffb,
 };
 
+static const __sum16 expected_csum_ipv6_magic[] = {
+       0x18d4, 0x3085, 0x2e4b, 0xd9f4, 0xbdc8, 0x78f,  0x1034, 0x8422, 0x6fc0,
+       0xd2f6, 0xbeb5, 0x9d3,  0x7e2a, 0x312e, 0x778e, 0xc1bb, 0x7cf2, 0x9d1e,
+       0xca21, 0xf3ff, 0x7569, 0xb02e, 0xca86, 0x7e76, 0x4539, 0x45e3, 0xf28d,
+       0xdf81, 0x8fd5, 0x3b5d, 0x8324, 0xf471, 0x83be, 0x1daf, 0x8c46, 0xe682,
+       0xd1fb, 0x6b2e, 0xe687, 0x2a33, 0x4833, 0x2d67, 0x660f, 0x2e79, 0xd65e,
+       0x6b62, 0x6672, 0x5dbd, 0x8680, 0xbaa5, 0x2229, 0x2125, 0x2d01, 0x1cc0,
+       0x6d36, 0x33c0, 0xee36, 0xd832, 0x9820, 0x8a31, 0x53c5, 0x2e2,  0xdb0e,
+       0x49ed, 0x17a7, 0x77a0, 0xd72e, 0x3d72, 0x7dc8, 0x5b17, 0xf55d, 0xa4d9,
+       0x1446, 0x5d56, 0x6b2e, 0x69a5, 0xadb6, 0xff2a, 0x92e,  0xe044, 0x3402,
+       0xbb60, 0xec7f, 0xe7e6, 0x1986, 0x32f4, 0x8f8,  0x5e00, 0x47c6, 0x3059,
+       0x3969, 0xe957, 0x4388, 0x2854, 0x3334, 0xea71, 0xa6de, 0x33f9, 0x83fc,
+       0x37b4, 0x5531, 0x3404, 0x1010, 0xed30, 0x610a, 0xc95,  0x9aed, 0x6ff,
+       0x5136, 0x2741, 0x660e, 0x8b80, 0xf71,  0xa263, 0x88af, 0x7a73, 0x3c37,
+       0x1908, 0x6db5, 0x2e92, 0x1cd2, 0x70c8, 0xee16, 0xe80,  0xcd55, 0x6e6,
+       0x6434, 0x127,  0x655d, 0x2ea0, 0xb4f4, 0xdc20, 0x5671, 0xe462, 0xe52b,
+       0xdb44, 0x3589, 0xc48f, 0xe60b, 0xd2d2, 0x66ad, 0x498,  0x436,  0xb917,
+       0xf0ca, 0x1a6e, 0x1cb7, 0xbf61, 0x2870, 0xc7e8, 0x5b30, 0xe4a5, 0x168,
+       0xadfc, 0xd035, 0xe690, 0xe283, 0xfb27, 0xe4ad, 0xb1a5, 0xf2d5, 0xc4b6,
+       0x8a30, 0xd7d5, 0x7df9, 0x91d5, 0x63ed, 0x2d21, 0x312b, 0xab19, 0xa632,
+       0x8d2e, 0xef06, 0x57b9, 0xc373, 0xbd1f, 0xa41f, 0x8444, 0x9975, 0x90cb,
+       0xc49c, 0xe965, 0x4eff, 0x5a,   0xef6d, 0xe81a, 0xe260, 0x853a, 0xff7a,
+       0x99aa, 0xb06b, 0xee19, 0xcc2c, 0xf34c, 0x7c49, 0xdac3, 0xa71e, 0xc988,
+       0x3845, 0x1014
+};
+
+static const __sum16 expected_fast_csum[] = {
+       0xda83, 0x45da, 0x4f46, 0x4e4f, 0x34e,  0xe902, 0xa5e9, 0x87a5, 0x7187,
+       0x5671, 0xf556, 0x6df5, 0x816d, 0x8f81, 0xbb8f, 0xfbba, 0x5afb, 0xbe5a,
+       0xedbe, 0xabee, 0x6aac, 0xe6b,  0xea0d, 0x67ea, 0x7e68, 0x8a7e, 0x6f8a,
+       0x3a70, 0x9f3a, 0xe89e, 0x75e8, 0x7976, 0xfa79, 0x2cfa, 0x3c2c, 0x463c,
+       0x7146, 0x7a71, 0x547a, 0xfd53, 0x99fc, 0xb699, 0x92b6, 0xdb91, 0xe8da,
+       0x5fe9, 0x1e60, 0xae1d, 0x39ae, 0xf439, 0xa1f4, 0xdda1, 0xede,  0x790f,
+       0x579,  0x1206, 0x9012, 0x2490, 0xd224, 0x5cd2, 0xa65d, 0xca7,  0x220d,
+       0xf922, 0xbf9,  0x920b, 0x1b92, 0x361c, 0x2e36, 0x4d2e, 0x24d,  0x2,
+       0xcfff, 0x90cf, 0xa591, 0x93a5, 0x7993, 0x9579, 0xc894, 0x50c8, 0x5f50,
+       0xd55e, 0xcad5, 0xf3c9, 0x8f4,  0x4409, 0x5043, 0x5b50, 0x55b,  0x2205,
+       0x1e22, 0x801e, 0x3780, 0xe137, 0x7ee0, 0xf67d, 0x3cf6, 0xa53c, 0x2ea5,
+       0x472e, 0x5147, 0xcf51, 0x1bcf, 0x951c, 0x1e95, 0xc71e, 0xe4c7, 0xc3e4,
+       0x3dc3, 0xee3d, 0xa4ed, 0xf9a4, 0xcbf8, 0x75cb, 0xb375, 0x50b4, 0x3551,
+       0xf835, 0x19f8, 0x8c1a, 0x538c, 0xad52, 0xa3ac, 0xb0a3, 0x5cb0, 0x6c5c,
+       0x5b6c, 0xc05a, 0x92c0, 0x4792, 0xbe47, 0x53be, 0x1554, 0x5715, 0x4b57,
+       0xe54a, 0x20e5, 0x21,   0xd500, 0xa1d4, 0xa8a1, 0x57a9, 0xca57, 0x5ca,
+       0x1c06, 0x4f1c, 0xe24e, 0xd9e2, 0xf0d9, 0x4af1, 0x474b, 0x8146, 0xe81,
+       0xfd0e, 0x84fd, 0x7c85, 0xba7c, 0x17ba, 0x4a17, 0x964a, 0xf595, 0xff5,
+       0x5310, 0x3253, 0x6432, 0x4263, 0x2242, 0xe121, 0x32e1, 0xf632, 0xc5f5,
+       0x21c6, 0x7d22, 0x8e7c, 0x418e, 0x5641, 0x3156, 0x7c31, 0x737c, 0x373,
+       0x2503, 0xc22a, 0x3c2,  0x4a04, 0x8549, 0x5285, 0xa352, 0xe8a3, 0x6fe8,
+       0x1a6f, 0x211a, 0xe021, 0x38e0, 0x7638, 0xf575, 0x9df5, 0x169e, 0xf116,
+       0x23f1, 0xcd23, 0xece,  0x660f, 0x4866, 0x6a48, 0x716a, 0xee71, 0xa2ee,
+       0xb8a2, 0x61b9, 0xa361, 0xf7a2, 0x26f7, 0x1127, 0x6611, 0xe065, 0x36e0,
+       0x1837, 0x3018, 0x1c30, 0x721b, 0x3e71, 0xe43d, 0x99e4, 0x9e9a, 0xb79d,
+       0xa9b7, 0xcaa,  0xeb0c, 0x4eb,  0x1305, 0x8813, 0xb687, 0xa9b6, 0xfba9,
+       0xd7fb, 0xccd8, 0x2ecd, 0x652f, 0xae65, 0x3fae, 0x3a40, 0x563a, 0x7556,
+       0x2776, 0x1228, 0xef12, 0xf9ee, 0xcef9, 0x56cf, 0xa956, 0x24a9, 0xba24,
+       0x5fba, 0x665f, 0xf465, 0x8ff4, 0x6d8f, 0x346d, 0x5f34, 0x385f, 0xd137,
+       0xb8d0, 0xacb8, 0x55ac, 0x7455, 0xe874, 0x89e8, 0xd189, 0xa0d1, 0xb2a0,
+       0xb8b2, 0x36b8, 0x5636, 0xd355, 0x8d3,  0x1908, 0x2118, 0xc21,  0x990c,
+       0x8b99, 0x158c, 0x7815, 0x9e78, 0x6f9e, 0x4470, 0x1d44, 0x341d, 0x2634,
+       0x3f26, 0x793e, 0xc79,  0xcc0b, 0x26cc, 0xd126, 0x1fd1, 0xb41f, 0xb6b4,
+       0x22b7, 0xa122, 0xa1,   0x7f01, 0x837e, 0x3b83, 0xaf3b, 0x6fae, 0x916f,
+       0xb490, 0xffb3, 0xceff, 0x50cf, 0x7550, 0x7275, 0x1272, 0x2613, 0xaa26,
+       0xd5aa, 0x7d5,  0x9607, 0x96,   0xb100, 0xf8b0, 0x4bf8, 0xdd4c, 0xeddd,
+       0x98ed, 0x2599, 0x9325, 0xeb92, 0x8feb, 0xcc8f, 0x2acd, 0x392b, 0x3b39,
+       0xcb3b, 0x6acb, 0xd46a, 0xb8d4, 0x6ab8, 0x106a, 0x2f10, 0x892f, 0x789,
+       0xc806, 0x45c8, 0x7445, 0x3c74, 0x3a3c, 0xcf39, 0xd7ce, 0x58d8, 0x6e58,
+       0x336e, 0x1034, 0xee10, 0xe9ed, 0xc2e9, 0x3fc2, 0xd53e, 0xd2d4, 0xead2,
+       0x8fea, 0x2190, 0x1162, 0xbe11, 0x8cbe, 0x6d8c, 0xfb6c, 0x6dfb, 0xd36e,
+       0x3ad3, 0xf3a,  0x870e, 0xc287, 0x53c3, 0xc54,  0x5b0c, 0x7d5a, 0x797d,
+       0xec79, 0x5dec, 0x4d5e, 0x184e, 0xd618, 0x60d6, 0xb360, 0x98b3, 0xf298,
+       0xb1f2, 0x69b1, 0xf969, 0xef9,  0xab0e, 0x21ab, 0xe321, 0x24e3, 0x8224,
+       0x5481, 0x5954, 0x7a59, 0xff7a, 0x7dff, 0x1a7d, 0xa51a, 0x46a5, 0x6b47,
+       0xe6b,  0x830e, 0xa083, 0xff9f, 0xd0ff, 0xffd0, 0xe6ff, 0x7de7, 0xc67d,
+       0xd0c6, 0x61d1, 0x3a62, 0xc3b,  0x150c, 0x1715, 0x4517, 0x5345, 0x3954,
+       0xdd39, 0xdadd, 0x32db, 0x6a33, 0xd169, 0x86d1, 0xb687, 0x3fb6, 0x883f,
+       0xa487, 0x39a4, 0x2139, 0xbe20, 0xffbe, 0xedfe, 0x8ded, 0x368e, 0xc335,
+       0x51c3, 0x9851, 0xf297, 0xd6f2, 0xb9d6, 0x95ba, 0x2096, 0xea1f, 0x76e9,
+       0x4e76, 0xe04d, 0xd0df, 0x80d0, 0xa280, 0xfca2, 0x75fc, 0xef75, 0x32ef,
+       0x6833, 0xdf68, 0xc4df, 0x76c4, 0xb77,  0xb10a, 0xbfb1, 0x58bf, 0x5258,
+       0x4d52, 0x6c4d, 0x7e6c, 0xb67e, 0xccb5, 0x8ccc, 0xbe8c, 0xc8bd, 0x9ac8,
+       0xa99b, 0x52a9, 0x2f53, 0xc30,  0x3e0c, 0xb83d, 0x83b7, 0x5383, 0x7e53,
+       0x4f7e, 0xe24e, 0xb3e1, 0x8db3, 0x618e, 0xc861, 0xfcc8, 0x34fc, 0x9b35,
+       0xaa9b, 0xb1aa, 0x5eb1, 0x395e, 0x8639, 0xd486, 0x8bd4, 0x558b, 0x2156,
+       0xf721, 0x4ef6, 0x14f,  0x7301, 0xdd72, 0x49de, 0x894a, 0x9889, 0x8898,
+       0x7788, 0x7b77, 0x637b, 0xb963, 0xabb9, 0x7cab, 0xc87b, 0x21c8, 0xcb21,
+       0xdfca, 0xbfdf, 0xf2bf, 0x6af2, 0x626b, 0xb261, 0x3cb2, 0xc63c, 0xc9c6,
+       0xc9c9, 0xb4c9, 0xf9b4, 0x91f9, 0x4091, 0x3a40, 0xcc39, 0xd1cb, 0x7ed1,
+       0x537f, 0x6753, 0xa167, 0xba49, 0x88ba, 0x7789, 0x3877, 0xf037, 0xd3ef,
+       0xb5d4, 0x55b6, 0xa555, 0xeca4, 0xa1ec, 0xb6a2, 0x7b7,  0x9507, 0xfd94,
+       0x82fd, 0x5c83, 0x765c, 0x9676, 0x3f97, 0xda3f, 0x6fda, 0x646f, 0x3064,
+       0x5e30, 0x655e, 0x6465, 0xcb64, 0xcdca, 0x4ccd, 0x3f4c, 0x243f, 0x6f24,
+       0x656f, 0x6065, 0x3560, 0x3b36, 0xac3b, 0x4aac, 0x714a, 0x7e71, 0xda7e,
+       0x7fda, 0xda7f, 0x6fda, 0xff6f, 0xc6ff, 0xedc6, 0xd4ed, 0x70d5, 0xeb70,
+       0xa3eb, 0x80a3, 0xca80, 0x3fcb, 0x2540, 0xf825, 0x7ef8, 0xf87e, 0x73f8,
+       0xb474, 0xb4b4, 0x92b5, 0x9293, 0x93,   0x3500, 0x7134, 0x9071, 0xfa8f,
+       0x51fa, 0x1452, 0xba13, 0x7ab9, 0x957a, 0x8a95, 0x6e8a, 0x6d6e, 0x7c6d,
+       0x447c, 0x9744, 0x4597, 0x8945, 0xef88, 0x8fee, 0x3190, 0x4831, 0x8447,
+       0xa183, 0x1da1, 0xd41d, 0x2dd4, 0x4f2e, 0xc94e, 0xcbc9, 0xc9cb, 0x9ec9,
+       0x319e, 0xd531, 0x20d5, 0x4021, 0xb23f, 0x29b2, 0xd828, 0xecd8, 0x5ded,
+       0xfc5d, 0x4dfc, 0xd24d, 0x6bd2, 0x5f6b, 0xb35e, 0x7fb3, 0xee7e, 0x56ee,
+       0xa657, 0x68a6, 0x8768, 0x7787, 0xb077, 0x4cb1, 0x764c, 0xb175, 0x7b1,
+       0x3d07, 0x603d, 0x3560, 0x3e35, 0xb03d, 0xd6b0, 0xc8d6, 0xd8c8, 0x8bd8,
+       0x3e8c, 0x303f, 0xd530, 0xf1d4, 0x42f1, 0xca42, 0xddca, 0x41dd, 0x3141,
+       0x132,  0xe901, 0x8e9,  0xbe09, 0xe0bd, 0x2ce0, 0x862d, 0x3986, 0x9139,
+       0x6d91, 0x6a6d, 0x8d6a, 0x1b8d, 0xac1b, 0xedab, 0x54ed, 0xc054, 0xcebf,
+       0xc1ce, 0x5c2,  0x3805, 0x6038, 0x5960, 0xd359, 0xdd3,  0xbe0d, 0xafbd,
+       0x6daf, 0x206d, 0x2c20, 0x862c, 0x8e86, 0xec8d, 0xa2ec, 0xa3a2, 0x51a3,
+       0x8051, 0xfd7f, 0x91fd, 0xa292, 0xaf14, 0xeeae, 0x59ef, 0x535a, 0x8653,
+       0x3986, 0x9539, 0xb895, 0xa0b8, 0x26a0, 0x2227, 0xc022, 0x77c0, 0xad77,
+       0x46ad, 0xaa46, 0x60aa, 0x8560, 0x4785, 0xd747, 0x45d7, 0x2346, 0x5f23,
+       0x25f,  0x1d02, 0x71d,  0x8206, 0xc82,  0x180c, 0x3018, 0x4b30, 0x4b,
+       0x3001, 0x1230, 0x2d12, 0x8c2d, 0x148d, 0x4015, 0x5f3f, 0x3d5f, 0x6b3d,
+       0x396b, 0x473a, 0xf746, 0x44f7, 0x8945, 0x3489, 0xcb34, 0x84ca, 0xd984,
+       0xf0d9, 0xbcf0, 0x63bd, 0x3264, 0xf332, 0x45f3, 0x7346, 0x5673, 0xb056,
+       0xd3b0, 0x4ad4, 0x184b, 0x7d18, 0x6c7d, 0xbb6c, 0xfeba, 0xe0fe, 0x10e1,
+       0x5410, 0x2954, 0x9f28, 0x3a9f, 0x5a3a, 0xdb59, 0xbdc,  0xb40b, 0x1ab4,
+       0x131b, 0x5d12, 0x6d5c, 0xe16c, 0xb0e0, 0x89b0, 0xba88, 0xbb,   0x3c01,
+       0xe13b, 0x6fe1, 0x446f, 0xa344, 0x81a3, 0xfe81, 0xc7fd, 0x38c8, 0xb38,
+       0x1a0b, 0x6d19, 0xf36c, 0x47f3, 0x6d48, 0xb76d, 0xd3b7, 0xd8d2, 0x52d9,
+       0x4b53, 0xa54a, 0x34a5, 0xc534, 0x9bc4, 0xed9b, 0xbeed, 0x3ebe, 0x233e,
+       0x9f22, 0x4a9f, 0x774b, 0x4577, 0xa545, 0x64a5, 0xb65,  0x870b, 0x487,
+       0x9204, 0x5f91, 0xd55f, 0x35d5, 0x1a35, 0x71a,  0x7a07, 0x4e7a, 0xfc4e,
+       0x1efc, 0x481f, 0x7448, 0xde74, 0xa7dd, 0x1ea7, 0xaa1e, 0xcfaa, 0xfbcf,
+       0xedfb, 0x6eee, 0x386f, 0x4538, 0x6e45, 0xd96d, 0x11d9, 0x7912, 0x4b79,
+       0x494b, 0x6049, 0xac5f, 0x65ac, 0x1366, 0x5913, 0xe458, 0x7ae4, 0x387a,
+       0x3c38, 0xb03c, 0x76b0, 0x9376, 0xe193, 0x42e1, 0x7742, 0x6476, 0x3564,
+       0x3c35, 0x6a3c, 0xcc69, 0x94cc, 0x5d95, 0xe5e,  0xee0d, 0x4ced, 0xce4c,
+       0x52ce, 0xaa52, 0xdaaa, 0xe4da, 0x1de5, 0x4530, 0x5445, 0x3954, 0xb639,
+       0x81b6, 0x7381, 0x1574, 0xc215, 0x10c2, 0x3f10, 0x6b3f, 0xe76b, 0x7be7,
+       0xbc7b, 0xf7bb, 0x41f7, 0xcc41, 0x38cc, 0x4239, 0xa942, 0x4a9,  0xc504,
+       0x7cc4, 0x437c, 0x6743, 0xea67, 0x8dea, 0xe88d, 0xd8e8, 0xdcd8, 0x17dd,
+       0x5718, 0x958,  0xa609, 0x41a5, 0x5842, 0x159,  0x9f01, 0x269f, 0x5a26,
+       0x405a, 0xc340, 0xb4c3, 0xd4b4, 0xf4d3, 0xf1f4, 0x39f2, 0xe439, 0x67e4,
+       0x4168, 0xa441, 0xdda3, 0xdedd, 0x9df,  0xab0a, 0xa5ab, 0x9a6,  0xba09,
+       0x9ab9, 0xad9a, 0x5ae,  0xe205, 0xece2, 0xecec, 0x14ed, 0xd614, 0x6bd5,
+       0x916c, 0x3391, 0x6f33, 0x206f, 0x8020, 0x780,  0x7207, 0x2472, 0x8a23,
+       0xb689, 0x3ab6, 0xf739, 0x97f6, 0xb097, 0xa4b0, 0xe6a4, 0x88e6, 0x2789,
+       0xb28,  0x350b, 0x1f35, 0x431e, 0x1043, 0xc30f, 0x79c3, 0x379,  0x5703,
+       0x3256, 0x4732, 0x7247, 0x9d72, 0x489d, 0xd348, 0xa4d3, 0x7ca4, 0xbf7b,
+       0x45c0, 0x7b45, 0x337b, 0x4034, 0x843f, 0xd083, 0x35d0, 0x6335, 0x4d63,
+       0xe14c, 0xcce0, 0xfecc, 0x35ff, 0x5636, 0xf856, 0xeef8, 0x2def, 0xfc2d,
+       0x4fc,  0x6e04, 0xb66d, 0x78b6, 0xbb78, 0x3dbb, 0x9a3d, 0x839a, 0x9283,
+       0x593,  0xd504, 0x23d5, 0x5424, 0xd054, 0x61d0, 0xdb61, 0x17db, 0x1f18,
+       0x381f, 0x9e37, 0x679e, 0x1d68, 0x381d, 0x8038, 0x917f, 0x491,  0xbb04,
+       0x23bb, 0x4124, 0xd41,  0xa30c, 0x8ba3, 0x8b8b, 0xc68b, 0xd2c6, 0xebd2,
+       0x93eb, 0xbd93, 0x99bd, 0x1a99, 0xea19, 0x58ea, 0xcf58, 0x73cf, 0x1073,
+       0x9e10, 0x139e, 0xea13, 0xcde9, 0x3ecd, 0x883f, 0xf89,  0x180f, 0x2a18,
+       0x212a, 0xce20, 0x73ce, 0xf373, 0x60f3, 0xad60, 0x4093, 0x8e40, 0xb98e,
+       0xbfb9, 0xf1bf, 0x8bf1, 0x5e8c, 0xe95e, 0x14e9, 0x4e14, 0x1c4e, 0x7f1c,
+       0xe77e, 0x6fe7, 0xf26f, 0x13f2, 0x8b13, 0xda8a, 0x5fda, 0xea5f, 0x4eea,
+       0xa84f, 0x88a8, 0x1f88, 0x2820, 0x9728, 0x5a97, 0x3f5b, 0xb23f, 0x70b2,
+       0x2c70, 0x232d, 0xf623, 0x4f6,  0x905,  0x7509, 0xd675, 0x28d7, 0x9428,
+       0x3794, 0xf036, 0x2bf0, 0xba2c, 0xedb9, 0xd7ed, 0x59d8, 0xed59, 0x4ed,
+       0xe304, 0x18e3, 0x5c19, 0x3d5c, 0x753d, 0x6d75, 0x956d, 0x7f95, 0xc47f,
+       0x83c4, 0xa84,  0x2e0a, 0x5f2e, 0xb95f, 0x77b9, 0x6d78, 0xf46d, 0x1bf4,
+       0xed1b, 0xd6ed, 0xe0d6, 0x5e1,  0x3905, 0x5638, 0xa355, 0x99a2, 0xbe99,
+       0xb4bd, 0x85b4, 0x2e86, 0x542e, 0x6654, 0xd765, 0x73d7, 0x3a74, 0x383a,
+       0x2638, 0x7826, 0x7677, 0x9a76, 0x7e99, 0x2e7e, 0xea2d, 0xa6ea, 0x8a7,
+       0x109,  0x3300, 0xad32, 0x5fad, 0x465f, 0x2f46, 0xc62f, 0xd4c5, 0xad5,
+       0xcb0a, 0x4cb,  0xb004, 0x7baf, 0xe47b, 0x92e4, 0x8e92, 0x638e, 0x1763,
+       0xc17,  0xf20b, 0x1ff2, 0x8920, 0x5889, 0xcb58, 0xf8cb, 0xcaf8, 0x84cb,
+       0x9f84, 0x8a9f, 0x918a, 0x4991, 0x8249, 0xff81, 0x46ff, 0x5046, 0x5f50,
+       0x725f, 0xf772, 0x8ef7, 0xe08f, 0xc1e0, 0x1fc2, 0x9e1f, 0x8b9d, 0x108b,
+       0x411,  0x2b04, 0xb02a, 0x1fb0, 0x1020, 0x7a0f, 0x587a, 0x8958, 0xb188,
+       0xb1b1, 0x49b2, 0xb949, 0x7ab9, 0x917a, 0xfc91, 0xe6fc, 0x47e7, 0xbc47,
+       0x8fbb, 0xea8e, 0x34ea, 0x2635, 0x1726, 0x9616, 0xc196, 0xa6c1, 0xf3a6,
+       0x11f3, 0x4811, 0x3e48, 0xeb3e, 0xf7ea, 0x1bf8, 0xdb1c, 0x8adb, 0xe18a,
+       0x42e1, 0x9d42, 0x5d9c, 0x6e5d, 0x286e, 0x4928, 0x9a49, 0xb09c, 0xa6b0,
+       0x2a7,  0xe702, 0xf5e6, 0x9af5, 0xf9b,  0x810f, 0x8080, 0x180,  0x1702,
+       0x5117, 0xa650, 0x11a6, 0x1011, 0x550f, 0xd554, 0xbdd5, 0x6bbe, 0xc66b,
+       0xfc7,  0x5510, 0x5555, 0x7655, 0x177,  0x2b02, 0x6f2a, 0xb70,  0x9f0b,
+       0xcf9e, 0xf3cf, 0x3ff4, 0xcb40, 0x8ecb, 0x768e, 0x5277, 0x8652, 0x9186,
+       0x9991, 0x5099, 0xd350, 0x93d3, 0x6d94, 0xe6d,  0x530e, 0x3153, 0xa531,
+       0x64a5, 0x7964, 0x7c79, 0x467c, 0x1746, 0x3017, 0x3730, 0x538,  0x5,
+       0x1e00, 0x5b1e, 0x955a, 0xae95, 0x3eaf, 0xff3e, 0xf8ff, 0xb2f9, 0xa1b3,
+       0xb2a1, 0x5b2,  0xad05, 0x7cac, 0x2d7c, 0xd32c, 0x80d2, 0x7280, 0x8d72,
+       0x1b8e, 0x831b, 0xac82, 0xfdac, 0xa7fd, 0x15a8, 0xd614, 0xe0d5, 0x7be0,
+       0xb37b, 0x61b3, 0x9661, 0x9d95, 0xc79d, 0x83c7, 0xd883, 0xead7, 0xceb,
+       0xf60c, 0xa9f5, 0x19a9, 0xa019, 0x8f9f, 0xd48f, 0x3ad5, 0x853a, 0x985,
+       0x5309, 0x6f52, 0x1370, 0x6e13, 0xa96d, 0x98a9, 0x5198, 0x9f51, 0xb69f,
+       0xa1b6, 0x2ea1, 0x672e, 0x2067, 0x6520, 0xaf65, 0x6eaf, 0x7e6f, 0xee7e,
+       0x17ef, 0xa917, 0xcea8, 0x9ace, 0xff99, 0x5dff, 0xdf5d, 0x38df, 0xa39,
+       0x1c0b, 0xe01b, 0x46e0, 0xcb46, 0x90cb, 0xba90, 0x4bb,  0x9104, 0x9d90,
+       0xc89c, 0xf6c8, 0x6cf6, 0x886c, 0x1789, 0xbd17, 0x70bc, 0x7e71, 0x17e,
+       0x1f01, 0xa01f, 0xbaa0, 0x14bb, 0xfc14, 0x7afb, 0xa07a, 0x3da0, 0xbf3d,
+       0x48bf, 0x8c48, 0x968b, 0x9d96, 0xfd9d, 0x96fd, 0x9796, 0x6b97, 0xd16b,
+       0xf4d1, 0x3bf4, 0x253c, 0x9125, 0x6691, 0xc166, 0x34c1, 0x5735, 0x1a57,
+       0xdc19, 0x77db, 0x8577, 0x4a85, 0x824a, 0x9182, 0x7f91, 0xfd7f, 0xb4c3,
+       0xb5b4, 0xb3b5, 0x7eb3, 0x617e, 0x4e61, 0xa4f,  0x530a, 0x3f52, 0xa33e,
+       0x34a3, 0x9234, 0xf091, 0xf4f0, 0x1bf5, 0x311b, 0x9631, 0x6a96, 0x386b,
+       0x1d39, 0xe91d, 0xe8e9, 0x69e8, 0x426a, 0xee42, 0x89ee, 0x368a, 0x2837,
+       0x7428, 0x5974, 0x6159, 0x1d62, 0x7b1d, 0xf77a, 0x7bf7, 0x6b7c, 0x696c,
+       0xf969, 0x4cf9, 0x714c, 0x4e71, 0x6b4e, 0x256c, 0x6e25, 0xe96d, 0x94e9,
+       0x8f94, 0x3e8f, 0x343e, 0x4634, 0xb646, 0x97b5, 0x8997, 0xe8a,  0x900e,
+       0x8090, 0xfd80, 0xa0fd, 0x16a1, 0xf416, 0xebf4, 0x95ec, 0x1196, 0x8911,
+       0x3d89, 0xda3c, 0x9fd9, 0xd79f, 0x4bd7, 0x214c, 0x3021, 0x4f30, 0x994e,
+       0x5c99, 0x6f5d, 0x326f, 0xab31, 0x6aab, 0xe969, 0x90e9, 0x1190, 0xff10,
+       0xa2fe, 0xe0a2, 0x66e1, 0x4067, 0x9e3f, 0x2d9e, 0x712d, 0x8170, 0xd180,
+       0xffd1, 0x25ff, 0x3826, 0x2538, 0x5f24, 0xc45e, 0x1cc4, 0xdf1c, 0x93df,
+       0xc793, 0x80c7, 0x2380, 0xd223, 0x7ed2, 0xfc7e, 0x22fd, 0x7422, 0x1474,
+       0xb714, 0x7db6, 0x857d, 0xa85,  0xa60a, 0x88a6, 0x4289, 0x7842, 0xc278,
+       0xf7c2, 0xcdf7, 0x84cd, 0xae84, 0x8cae, 0xb98c, 0x1aba, 0x4d1a, 0x884c,
+       0x4688, 0xcc46, 0xd8cb, 0x2bd9, 0xbe2b, 0xa2be, 0x72a2, 0xf772, 0xd2f6,
+       0x75d2, 0xc075, 0xa3c0, 0x63a3, 0xae63, 0x8fae, 0x2a90, 0x5f2a, 0xef5f,
+       0x5cef, 0xa05c, 0x89a0, 0x5e89, 0x6b5e, 0x736b, 0x773,  0x9d07, 0xe99c,
+       0x27ea, 0x2028, 0xc20,  0x980b, 0x4797, 0x2848, 0x9828, 0xc197, 0x48c2,
+       0x2449, 0x7024, 0x570,  0x3e05, 0xd3e,  0xf60c, 0xbbf5, 0x69bb, 0x3f6a,
+       0x740,  0xf006, 0xe0ef, 0xbbe0, 0xadbb, 0x56ad, 0xcf56, 0xbfce, 0xa9bf,
+       0x205b, 0x6920, 0xae69, 0x50ae, 0x2050, 0xf01f, 0x27f0, 0x9427, 0x8993,
+       0x8689, 0x4087, 0x6e40, 0xb16e, 0xa1b1, 0xe8a1, 0x87e8, 0x6f88, 0xfe6f,
+       0x4cfe, 0xe94d, 0xd5e9, 0x47d6, 0x3148, 0x5f31, 0xc35f, 0x13c4, 0xa413,
+       0x5a5,  0x2405, 0xc223, 0x66c2, 0x3667, 0x5e37, 0x5f5e, 0x2f5f, 0x8c2f,
+       0xe48c, 0xd0e4, 0x4d1,  0xd104, 0xe4d0, 0xcee4, 0xfcf,  0x480f, 0xa447,
+       0x5ea4, 0xff5e, 0xbefe, 0x8dbe, 0x1d8e, 0x411d, 0x1841, 0x6918, 0x5469,
+       0x1155, 0xc611, 0xaac6, 0x37ab, 0x2f37, 0xca2e, 0x87ca, 0xbd87, 0xabbd,
+       0xb3ab, 0xcb4,  0xce0c, 0xfccd, 0xa5fd, 0x72a5, 0xf072, 0x83f0, 0xfe83,
+       0x97fd, 0xc997, 0xb0c9, 0xadb0, 0xe6ac, 0x88e6, 0x1088, 0xbe10, 0x16be,
+       0xa916, 0xa3a8, 0x46a3, 0x5447, 0xe953, 0x84e8, 0x2085, 0xa11f, 0xfa1,
+       0xdd0f, 0xbedc, 0x5abe, 0x805a, 0xc97f, 0x6dc9, 0x826d, 0x4a82, 0x934a,
+       0x5293, 0xd852, 0xd3d8, 0xadd3, 0xf4ad, 0xf3f4, 0xfcf3, 0xfefc, 0xcafe,
+       0xb7ca, 0x3cb8, 0xa13c, 0x18a1, 0x1418, 0xea13, 0x91ea, 0xf891, 0x53f8,
+       0xa254, 0xe9a2, 0x87ea, 0x4188, 0x1c41, 0xdc1b, 0xf5db, 0xcaf5, 0x45ca,
+       0x6d45, 0x396d, 0xde39, 0x90dd, 0x1e91, 0x1e,   0x7b00, 0x6a7b, 0xa46a,
+       0xc9a3, 0x9bc9, 0x389b, 0x1139, 0x5211, 0x1f52, 0xeb1f, 0xabeb, 0x48ab,
+       0x9348, 0xb392, 0x17b3, 0x1618, 0x5b16, 0x175b, 0xdc17, 0xdedb, 0x1cdf,
+       0xeb1c, 0xd1ea, 0x4ad2, 0xd4b,  0xc20c, 0x24c2, 0x7b25, 0x137b, 0x8b13,
+       0x618b, 0xa061, 0xff9f, 0xfffe, 0x72ff, 0xf572, 0xe2f5, 0xcfe2, 0xd2cf,
+       0x75d3, 0x6a76, 0xc469, 0x1ec4, 0xfc1d, 0x59fb, 0x455a, 0x7a45, 0xa479,
+       0xb7a4
+};
+
 static u8 tmp_buf[TEST_BUFLEN];
 
 #define full_csum(buff, len, sum) csum_fold(csum_partial(buff, len, sum))
@@ -338,10 +575,57 @@ static void test_csum_no_carry_inputs(struct kunit *test)
        }
 }
 
+static void test_ip_fast_csum(struct kunit *test)
+{
+       __sum16 csum_result, expected;
+
+       for (int len = IPv4_MIN_WORDS; len < IPv4_MAX_WORDS; len++) {
+               for (int index = 0; index < NUM_IP_FAST_CSUM_TESTS; index++) {
+                       csum_result = ip_fast_csum(random_buf + index, len);
+                       expected =
+                               expected_fast_csum[(len - IPv4_MIN_WORDS) *
+                                                  NUM_IP_FAST_CSUM_TESTS +
+                                                  index];
+                       CHECK_EQ(expected, csum_result);
+               }
+       }
+}
+
+static void test_csum_ipv6_magic(struct kunit *test)
+{
+#if defined(CONFIG_NET)
+       const struct in6_addr *saddr;
+       const struct in6_addr *daddr;
+       unsigned int len;
+       unsigned char proto;
+       unsigned int csum;
+
+       const int daddr_offset = sizeof(struct in6_addr);
+       const int len_offset = sizeof(struct in6_addr) + sizeof(struct in6_addr);
+       const int proto_offset = sizeof(struct in6_addr) + sizeof(struct in6_addr) +
+                            sizeof(int);
+       const int csum_offset = sizeof(struct in6_addr) + sizeof(struct in6_addr) +
+                           sizeof(int) + sizeof(char);
+
+       for (int i = 0; i < NUM_IPv6_TESTS; i++) {
+               saddr = (const struct in6_addr *)(random_buf + i);
+               daddr = (const struct in6_addr *)(random_buf + i +
+                                                 daddr_offset);
+               len = *(unsigned int *)(random_buf + i + len_offset);
+               proto = *(random_buf + i + proto_offset);
+               csum = *(unsigned int *)(random_buf + i + csum_offset);
+               CHECK_EQ(expected_csum_ipv6_magic[i],
+                        csum_ipv6_magic(saddr, daddr, len, proto, csum));
+       }
+#endif /* !CONFIG_NET */
+}
+
 static struct kunit_case __refdata checksum_test_cases[] = {
        KUNIT_CASE(test_csum_fixed_random_inputs),
        KUNIT_CASE(test_csum_all_carry_inputs),
        KUNIT_CASE(test_csum_no_carry_inputs),
+       KUNIT_CASE(test_ip_fast_csum),
+       KUNIT_CASE(test_csum_ipv6_magic),
        {}
 };
 
index c49a09ee3853cda00d60fa73f711d44094abdfb4..c3569d2ba503f4d24282a649bba4d147f358859b 100644 (file)
 #include <linux/kernel.h>
 #include <linux/string.h>
 #include <linux/types.h>
+#include <linux/fw_table.h>
 
 enum acpi_subtable_type {
        ACPI_SUBTABLE_COMMON,
        ACPI_SUBTABLE_HMAT,
        ACPI_SUBTABLE_PRMT,
        ACPI_SUBTABLE_CEDT,
+       CDAT_SUBTABLE,
 };
 
 struct acpi_subtable_entry {
@@ -25,7 +27,7 @@ struct acpi_subtable_entry {
        enum acpi_subtable_type type;
 };
 
-static unsigned long __init_or_acpilib
+static unsigned long __init_or_fwtbl_lib
 acpi_get_entry_type(struct acpi_subtable_entry *entry)
 {
        switch (entry->type) {
@@ -37,11 +39,13 @@ acpi_get_entry_type(struct acpi_subtable_entry *entry)
                return 0;
        case ACPI_SUBTABLE_CEDT:
                return entry->hdr->cedt.type;
+       case CDAT_SUBTABLE:
+               return entry->hdr->cdat.type;
        }
        return 0;
 }
 
-static unsigned long __init_or_acpilib
+static unsigned long __init_or_fwtbl_lib
 acpi_get_entry_length(struct acpi_subtable_entry *entry)
 {
        switch (entry->type) {
@@ -53,11 +57,16 @@ acpi_get_entry_length(struct acpi_subtable_entry *entry)
                return entry->hdr->prmt.length;
        case ACPI_SUBTABLE_CEDT:
                return entry->hdr->cedt.length;
+       case CDAT_SUBTABLE: {
+               __le16 length = (__force __le16)entry->hdr->cdat.length;
+
+               return le16_to_cpu(length);
+       }
        }
        return 0;
 }
 
-static unsigned long __init_or_acpilib
+static unsigned long __init_or_fwtbl_lib
 acpi_get_subtable_header_length(struct acpi_subtable_entry *entry)
 {
        switch (entry->type) {
@@ -69,11 +78,13 @@ acpi_get_subtable_header_length(struct acpi_subtable_entry *entry)
                return sizeof(entry->hdr->prmt);
        case ACPI_SUBTABLE_CEDT:
                return sizeof(entry->hdr->cedt);
+       case CDAT_SUBTABLE:
+               return sizeof(entry->hdr->cdat);
        }
        return 0;
 }
 
-static enum acpi_subtable_type __init_or_acpilib
+static enum acpi_subtable_type __init_or_fwtbl_lib
 acpi_get_subtable_type(char *id)
 {
        if (strncmp(id, ACPI_SIG_HMAT, 4) == 0)
@@ -82,12 +93,27 @@ acpi_get_subtable_type(char *id)
                return ACPI_SUBTABLE_PRMT;
        if (strncmp(id, ACPI_SIG_CEDT, 4) == 0)
                return ACPI_SUBTABLE_CEDT;
+       if (strncmp(id, ACPI_SIG_CDAT, 4) == 0)
+               return CDAT_SUBTABLE;
        return ACPI_SUBTABLE_COMMON;
 }
 
-static __init_or_acpilib int call_handler(struct acpi_subtable_proc *proc,
-                                         union acpi_subtable_headers *hdr,
-                                         unsigned long end)
+static unsigned long __init_or_fwtbl_lib
+acpi_table_get_length(enum acpi_subtable_type type,
+                     union fw_table_header *header)
+{
+       if (type == CDAT_SUBTABLE) {
+               __le32 length = (__force __le32)header->cdat.length;
+
+               return le32_to_cpu(length);
+       }
+
+       return header->acpi.length;
+}
+
+static __init_or_fwtbl_lib int call_handler(struct acpi_subtable_proc *proc,
+                                           union acpi_subtable_headers *hdr,
+                                           unsigned long end)
 {
        if (proc->handler)
                return proc->handler(hdr, end);
@@ -119,22 +145,25 @@ static __init_or_acpilib int call_handler(struct acpi_subtable_proc *proc,
  * On success returns sum of all matching entries for all proc handlers.
  * Otherwise, -ENODEV or -EINVAL is returned.
  */
-int __init_or_acpilib
+int __init_or_fwtbl_lib
 acpi_parse_entries_array(char *id, unsigned long table_size,
-                        struct acpi_table_header *table_header,
+                        union fw_table_header *table_header,
                         struct acpi_subtable_proc *proc,
                         int proc_num, unsigned int max_entries)
 {
        unsigned long table_end, subtable_len, entry_len;
        struct acpi_subtable_entry entry;
+       enum acpi_subtable_type type;
        int count = 0;
        int i;
 
-       table_end = (unsigned long)table_header + table_header->length;
+       type = acpi_get_subtable_type(id);
+       table_end = (unsigned long)table_header +
+                   acpi_table_get_length(type, table_header);
 
        /* Parse all entries looking for a match. */
 
-       entry.type = acpi_get_subtable_type(id);
+       entry.type = type;
        entry.hdr = (union acpi_subtable_headers *)
            ((unsigned long)table_header + table_size);
        subtable_len = acpi_get_subtable_header_length(&entry);
@@ -174,3 +203,25 @@ acpi_parse_entries_array(char *id, unsigned long table_size,
 
        return count;
 }
+
+int __init_or_fwtbl_lib
+cdat_table_parse(enum acpi_cdat_type type,
+                acpi_tbl_entry_handler_arg handler_arg,
+                void *arg,
+                struct acpi_table_cdat *table_header)
+{
+       struct acpi_subtable_proc proc = {
+               .id             = type,
+               .handler_arg    = handler_arg,
+               .arg            = arg,
+       };
+
+       if (!table_header)
+               return -EINVAL;
+
+       return acpi_parse_entries_array(ACPI_SIG_CDAT,
+                                       sizeof(struct acpi_table_cdat),
+                                       (union fw_table_header *)table_header,
+                                       &proc, 1, 0);
+}
+EXPORT_SYMBOL_FWTBL_LIB(cdat_table_parse);
index 54bd558364053c2eb5437e0bf5b94da6cdfba0e1..5fcd48ff0f36a37415c67cf2223cdf7ffeaef794 100644 (file)
@@ -13,5 +13,7 @@
 
 // For internal use only -- registers the kunit_bus.
 int kunit_bus_init(void);
+// For internal use only -- unregisters the kunit_bus.
+void kunit_bus_shutdown(void);
 
 #endif //_KUNIT_DEVICE_IMPL_H
index f5371287b3750f0cefdc423748846b4257d8d14a..644a38a1f5b1cf0686a67e7df1a366a3d84e61a6 100644 (file)
@@ -45,8 +45,8 @@ int kunit_bus_init(void)
        int error;
 
        kunit_bus_device = root_device_register("kunit");
-       if (!kunit_bus_device)
-               return -ENOMEM;
+       if (IS_ERR(kunit_bus_device))
+               return PTR_ERR(kunit_bus_device);
 
        error = bus_register(&kunit_bus_type);
        if (error)
@@ -54,6 +54,20 @@ int kunit_bus_init(void)
        return error;
 }
 
+/* Unregister the 'kunit_bus' in case the KUnit module is unloaded. */
+void kunit_bus_shutdown(void)
+{
+       /* Make sure the bus exists before we unregister it. */
+       if (IS_ERR_OR_NULL(kunit_bus_device))
+               return;
+
+       bus_unregister(&kunit_bus_type);
+
+       root_device_unregister(kunit_bus_device);
+
+       kunit_bus_device = NULL;
+}
+
 /* Release a 'fake' KUnit device. */
 static void kunit_device_release(struct device *d)
 {
index 717b9599036ba0bccf1ffe846b7b051c591109f2..689fff2b2b106a597bbf9b2e473d37e193537b03 100644 (file)
@@ -146,6 +146,10 @@ void kunit_free_suite_set(struct kunit_suite_set suite_set)
        kfree(suite_set.start);
 }
 
+/*
+ * Filter and reallocate test suites. Must return the filtered test suites set
+ * allocated at a valid virtual address or NULL in case of error.
+ */
 struct kunit_suite_set
 kunit_filter_suites(const struct kunit_suite_set *suite_set,
                    const char *filter_glob,
index c4259d910356ba7e8f24847cd347eb5861071cb4..f7980ef236a38bdefd8e0e7b53915f6057348617 100644 (file)
@@ -720,7 +720,7 @@ static void kunit_device_cleanup_test(struct kunit *test)
        long action_was_run = 0;
 
        test_device = kunit_device_register(test, "my_device");
-       KUNIT_ASSERT_NOT_NULL(test, test_device);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, test_device);
 
        /* Add an action to verify cleanup. */
        devm_add_action(test_device, test_dev_action, &action_was_run);
index f95d2093a0aa3359c0cb08462ea62e76ab0f2ecf..1d1475578515c261fe74b454502f1a2a5ac3bb81 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/panic.h>
 #include <linux/sched/debug.h>
 #include <linux/sched.h>
+#include <linux/mm.h>
 
 #include "debugfs.h"
 #include "device-impl.h"
@@ -801,12 +802,19 @@ static void kunit_module_exit(struct module *mod)
        };
        const char *action = kunit_action();
 
+       /*
+        * Check if the start address is a valid virtual address to detect
+        * if the module load sequence has failed and the suite set has not
+        * been initialized and filtered.
+        */
+       if (!suite_set.start || !virt_addr_valid(suite_set.start))
+               return;
+
        if (!action)
                __kunit_test_suites_exit(mod->kunit_suites,
                                         mod->num_kunit_suites);
 
-       if (suite_set.start)
-               kunit_free_suite_set(suite_set);
+       kunit_free_suite_set(suite_set);
 }
 
 static int kunit_module_notify(struct notifier_block *nb, unsigned long val,
@@ -816,12 +824,12 @@ static int kunit_module_notify(struct notifier_block *nb, unsigned long val,
 
        switch (val) {
        case MODULE_STATE_LIVE:
+               kunit_module_init(mod);
                break;
        case MODULE_STATE_GOING:
                kunit_module_exit(mod);
                break;
        case MODULE_STATE_COMING:
-               kunit_module_init(mod);
                break;
        case MODULE_STATE_UNFORMED:
                break;
@@ -920,6 +928,9 @@ static void __exit kunit_exit(void)
 #ifdef CONFIG_MODULES
        unregister_module_notifier(&kunit_mod_nb);
 #endif
+
+       kunit_bus_shutdown();
+
        kunit_debugfs_cleanup();
 }
 module_exit(kunit_exit);
index dc15e7888fc1fec5747252f3ef1b3d7b5d7d5bd8..ed2ab43e1b22c0156e5d361c6bfa7eb745759232 100644 (file)
@@ -758,7 +758,7 @@ EXPORT_SYMBOL(nla_find);
  * @dstsize: Size of destination buffer.
  *
  * Copies at most dstsize - 1 bytes into the destination buffer.
- * Unlike strlcpy the destination buffer is always padded out.
+ * Unlike strscpy() the destination buffer is always padded out.
  *
  * Return:
  * * srclen - Returns @nla length (not including the trailing %NUL).
index d0a5081dfd122e42702748c30fea79100d84727b..92c6b1fd898938e4613d8289cf49c09fd53bb93b 100644 (file)
@@ -388,11 +388,6 @@ static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq,
        unsigned int shallow_depth;
 
        /*
-        * For each batch, we wake up one queue. We need to make sure that our
-        * batch size is small enough that the full depth of the bitmap,
-        * potentially limited by a shallow depth, is enough to wake up all of
-        * the queues.
-        *
         * Each full word of the bitmap has bits_per_word bits, and there might
         * be a partial word. There are depth / bits_per_word full words and
         * depth % bits_per_word bits left over. In bitwise arithmetic:
index a0be5d05c7f08187667c91c7d0886843df52225c..5caa1f566553843911ffdf2edafd32ee70277ea8 100644 (file)
@@ -14,6 +14,7 @@
 
 #define pr_fmt(fmt) "stackdepot: " fmt
 
+#include <linux/debugfs.h>
 #include <linux/gfp.h>
 #include <linux/jhash.h>
 #include <linux/kernel.h>
@@ -21,8 +22,9 @@
 #include <linux/list.h>
 #include <linux/mm.h>
 #include <linux/mutex.h>
-#include <linux/percpu.h>
 #include <linux/printk.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
 #include <linux/refcount.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
@@ -67,12 +69,28 @@ union handle_parts {
 };
 
 struct stack_record {
-       struct list_head list;          /* Links in hash table or freelist */
+       struct list_head hash_list;     /* Links in the hash table */
        u32 hash;                       /* Hash in hash table */
        u32 size;                       /* Number of stored frames */
-       union handle_parts handle;
+       union handle_parts handle;      /* Constant after initialization */
        refcount_t count;
-       unsigned long entries[CONFIG_STACKDEPOT_MAX_FRAMES];    /* Frames */
+       union {
+               unsigned long entries[CONFIG_STACKDEPOT_MAX_FRAMES];    /* Frames */
+               struct {
+                       /*
+                        * An important invariant of the implementation is to
+                        * only place a stack record onto the freelist iff its
+                        * refcount is zero. Because stack records with a zero
+                        * refcount are never considered as valid, it is safe to
+                        * union @entries and freelist management state below.
+                        * Conversely, as soon as an entry is off the freelist
+                        * and its refcount becomes non-zero, the below must not
+                        * be accessed until being placed back on the freelist.
+                        */
+                       struct list_head free_list;     /* Links in the freelist */
+                       unsigned long rcu_state;        /* RCU cookie */
+               };
+       };
 };
 
 #define DEPOT_STACK_RECORD_SIZE \
@@ -112,8 +130,25 @@ static LIST_HEAD(free_stacks);
  * yet allocated or if the limit on the number of pools is reached.
  */
 static bool new_pool_required = true;
-/* Lock that protects the variables above. */
-static DEFINE_RWLOCK(pool_rwlock);
+/* The lock must be held when performing pool or freelist modifications. */
+static DEFINE_RAW_SPINLOCK(pool_lock);
+
+/* Statistics counters for debugfs. */
+enum depot_counter_id {
+       DEPOT_COUNTER_ALLOCS,
+       DEPOT_COUNTER_FREES,
+       DEPOT_COUNTER_INUSE,
+       DEPOT_COUNTER_FREELIST_SIZE,
+       DEPOT_COUNTER_COUNT,
+};
+static long counters[DEPOT_COUNTER_COUNT];
+static const char *const counter_names[] = {
+       [DEPOT_COUNTER_ALLOCS]          = "allocations",
+       [DEPOT_COUNTER_FREES]           = "frees",
+       [DEPOT_COUNTER_INUSE]           = "in_use",
+       [DEPOT_COUNTER_FREELIST_SIZE]   = "freelist_size",
+};
+static_assert(ARRAY_SIZE(counter_names) == DEPOT_COUNTER_COUNT);
 
 static int __init disable_stack_depot(char *str)
 {
@@ -258,14 +293,15 @@ out_unlock:
 }
 EXPORT_SYMBOL_GPL(stack_depot_init);
 
-/* Initializes a stack depol pool. */
+/*
+ * Initializes new stack depot @pool, release all its entries to the freelist,
+ * and update the list of pools.
+ */
 static void depot_init_pool(void *pool)
 {
        int offset;
 
-       lockdep_assert_held_write(&pool_rwlock);
-
-       WARN_ON(!list_empty(&free_stacks));
+       lockdep_assert_held(&pool_lock);
 
        /* Initialize handles and link stack records into the freelist. */
        for (offset = 0; offset <= DEPOT_POOL_SIZE - DEPOT_STACK_RECORD_SIZE;
@@ -276,18 +312,36 @@ static void depot_init_pool(void *pool)
                stack->handle.offset = offset >> DEPOT_STACK_ALIGN;
                stack->handle.extra = 0;
 
-               list_add(&stack->list, &free_stacks);
+               /*
+                * Stack traces of size 0 are never saved, and we can simply use
+                * the size field as an indicator if this is a new unused stack
+                * record in the freelist.
+                */
+               stack->size = 0;
+
+               INIT_LIST_HEAD(&stack->hash_list);
+               /*
+                * Add to the freelist front to prioritize never-used entries:
+                * required in case there are entries in the freelist, but their
+                * RCU cookie still belongs to the current RCU grace period
+                * (there can still be concurrent readers).
+                */
+               list_add(&stack->free_list, &free_stacks);
+               counters[DEPOT_COUNTER_FREELIST_SIZE]++;
        }
 
        /* Save reference to the pool to be used by depot_fetch_stack(). */
        stack_pools[pools_num] = pool;
-       pools_num++;
+
+       /* Pairs with concurrent READ_ONCE() in depot_fetch_stack(). */
+       WRITE_ONCE(pools_num, pools_num + 1);
+       ASSERT_EXCLUSIVE_WRITER(pools_num);
 }
 
 /* Keeps the preallocated memory to be used for a new stack depot pool. */
 static void depot_keep_new_pool(void **prealloc)
 {
-       lockdep_assert_held_write(&pool_rwlock);
+       lockdep_assert_held(&pool_lock);
 
        /*
         * If a new pool is already saved or the maximum number of
@@ -310,17 +364,16 @@ static void depot_keep_new_pool(void **prealloc)
         * number of pools is reached. In either case, take note that
         * keeping another pool is not required.
         */
-       new_pool_required = false;
+       WRITE_ONCE(new_pool_required, false);
 }
 
-/* Updates references to the current and the next stack depot pools. */
-static bool depot_update_pools(void **prealloc)
+/*
+ * Try to initialize a new stack depot pool from either a previous or the
+ * current pre-allocation, and release all its entries to the freelist.
+ */
+static bool depot_try_init_pool(void **prealloc)
 {
-       lockdep_assert_held_write(&pool_rwlock);
-
-       /* Check if we still have objects in the freelist. */
-       if (!list_empty(&free_stacks))
-               goto out_keep_prealloc;
+       lockdep_assert_held(&pool_lock);
 
        /* Check if we have a new pool saved and use it. */
        if (new_pool) {
@@ -329,10 +382,9 @@ static bool depot_update_pools(void **prealloc)
 
                /* Take note that we might need a new new_pool. */
                if (pools_num < DEPOT_MAX_POOLS)
-                       new_pool_required = true;
+                       WRITE_ONCE(new_pool_required, true);
 
-               /* Try keeping the preallocated memory for new_pool. */
-               goto out_keep_prealloc;
+               return true;
        }
 
        /* Bail out if we reached the pool limit. */
@@ -349,12 +401,32 @@ static bool depot_update_pools(void **prealloc)
        }
 
        return false;
+}
+
+/* Try to find next free usable entry. */
+static struct stack_record *depot_pop_free(void)
+{
+       struct stack_record *stack;
+
+       lockdep_assert_held(&pool_lock);
+
+       if (list_empty(&free_stacks))
+               return NULL;
+
+       /*
+        * We maintain the invariant that the elements in front are least
+        * recently used, and are therefore more likely to be associated with an
+        * RCU grace period in the past. Consequently it is sufficient to only
+        * check the first entry.
+        */
+       stack = list_first_entry(&free_stacks, struct stack_record, free_list);
+       if (stack->size && !poll_state_synchronize_rcu(stack->rcu_state))
+               return NULL;
+
+       list_del(&stack->free_list);
+       counters[DEPOT_COUNTER_FREELIST_SIZE]--;
 
-out_keep_prealloc:
-       /* Keep the preallocated memory for a new pool if required. */
-       if (*prealloc)
-               depot_keep_new_pool(prealloc);
-       return true;
+       return stack;
 }
 
 /* Allocates a new stack in a stack depot pool. */
@@ -363,19 +435,22 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
 {
        struct stack_record *stack;
 
-       lockdep_assert_held_write(&pool_rwlock);
+       lockdep_assert_held(&pool_lock);
 
-       /* Update current and new pools if required and possible. */
-       if (!depot_update_pools(prealloc))
+       /* This should already be checked by public API entry points. */
+       if (WARN_ON_ONCE(!size))
                return NULL;
 
        /* Check if we have a stack record to save the stack trace. */
-       if (list_empty(&free_stacks))
-               return NULL;
-
-       /* Get and unlink the first entry from the freelist. */
-       stack = list_first_entry(&free_stacks, struct stack_record, list);
-       list_del(&stack->list);
+       stack = depot_pop_free();
+       if (!stack) {
+               /* No usable entries on the freelist - try to refill the freelist. */
+               if (!depot_try_init_pool(prealloc))
+                       return NULL;
+               stack = depot_pop_free();
+               if (WARN_ON(!stack))
+                       return NULL;
+       }
 
        /* Limit number of saved frames to CONFIG_STACKDEPOT_MAX_FRAMES. */
        if (size > CONFIG_STACKDEPOT_MAX_FRAMES)
@@ -394,38 +469,80 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
         */
        kmsan_unpoison_memory(stack, DEPOT_STACK_RECORD_SIZE);
 
+       counters[DEPOT_COUNTER_ALLOCS]++;
+       counters[DEPOT_COUNTER_INUSE]++;
        return stack;
 }
 
 static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle)
 {
+       const int pools_num_cached = READ_ONCE(pools_num);
        union handle_parts parts = { .handle = handle };
        void *pool;
        size_t offset = parts.offset << DEPOT_STACK_ALIGN;
        struct stack_record *stack;
 
-       lockdep_assert_held(&pool_rwlock);
+       lockdep_assert_not_held(&pool_lock);
 
-       if (parts.pool_index > pools_num) {
+       if (parts.pool_index > pools_num_cached) {
                WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n",
-                    parts.pool_index, pools_num, handle);
+                    parts.pool_index, pools_num_cached, handle);
                return NULL;
        }
 
        pool = stack_pools[parts.pool_index];
-       if (!pool)
+       if (WARN_ON(!pool))
                return NULL;
 
        stack = pool + offset;
+       if (WARN_ON(!refcount_read(&stack->count)))
+               return NULL;
+
        return stack;
 }
 
 /* Links stack into the freelist. */
 static void depot_free_stack(struct stack_record *stack)
 {
-       lockdep_assert_held_write(&pool_rwlock);
+       unsigned long flags;
+
+       lockdep_assert_not_held(&pool_lock);
 
-       list_add(&stack->list, &free_stacks);
+       raw_spin_lock_irqsave(&pool_lock, flags);
+       printk_deferred_enter();
+
+       /*
+        * Remove the entry from the hash list. Concurrent list traversal may
+        * still observe the entry, but since the refcount is zero, this entry
+        * will no longer be considered as valid.
+        */
+       list_del_rcu(&stack->hash_list);
+
+       /*
+        * Due to being used from constrained contexts such as the allocators,
+        * NMI, or even RCU itself, stack depot cannot rely on primitives that
+        * would sleep (such as synchronize_rcu()) or recursively call into
+        * stack depot again (such as call_rcu()).
+        *
+        * Instead, get an RCU cookie, so that we can ensure this entry isn't
+        * moved onto another list until the next grace period, and concurrent
+        * RCU list traversal remains safe.
+        */
+       stack->rcu_state = get_state_synchronize_rcu();
+
+       /*
+        * Add the entry to the freelist tail, so that older entries are
+        * considered first - their RCU cookie is more likely to no longer be
+        * associated with the current grace period.
+        */
+       list_add_tail(&stack->free_list, &free_stacks);
+
+       counters[DEPOT_COUNTER_FREELIST_SIZE]++;
+       counters[DEPOT_COUNTER_FREES]++;
+       counters[DEPOT_COUNTER_INUSE]--;
+
+       printk_deferred_exit();
+       raw_spin_unlock_irqrestore(&pool_lock, flags);
 }
 
 /* Calculates the hash for a stack. */
@@ -453,22 +570,52 @@ int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
 
 /* Finds a stack in a bucket of the hash table. */
 static inline struct stack_record *find_stack(struct list_head *bucket,
-                                            unsigned long *entries, int size,
-                                            u32 hash)
+                                             unsigned long *entries, int size,
+                                             u32 hash, depot_flags_t flags)
 {
-       struct list_head *pos;
-       struct stack_record *found;
+       struct stack_record *stack, *ret = NULL;
+
+       /*
+        * Stack depot may be used from instrumentation that instruments RCU or
+        * tracing itself; use variant that does not call into RCU and cannot be
+        * traced.
+        *
+        * Note: Such use cases must take care when using refcounting to evict
+        * unused entries, because the stack record free-then-reuse code paths
+        * do call into RCU.
+        */
+       rcu_read_lock_sched_notrace();
 
-       lockdep_assert_held(&pool_rwlock);
+       list_for_each_entry_rcu(stack, bucket, hash_list) {
+               if (stack->hash != hash || stack->size != size)
+                       continue;
+
+               /*
+                * This may race with depot_free_stack() accessing the freelist
+                * management state unioned with @entries. The refcount is zero
+                * in that case and the below refcount_inc_not_zero() will fail.
+                */
+               if (data_race(stackdepot_memcmp(entries, stack->entries, size)))
+                       continue;
+
+               /*
+                * Try to increment refcount. If this succeeds, the stack record
+                * is valid and has not yet been freed.
+                *
+                * If STACK_DEPOT_FLAG_GET is not used, it is undefined behavior
+                * to then call stack_depot_put() later, and we can assume that
+                * a stack record is never placed back on the freelist.
+                */
+               if ((flags & STACK_DEPOT_FLAG_GET) && !refcount_inc_not_zero(&stack->count))
+                       continue;
 
-       list_for_each(pos, bucket) {
-               found = list_entry(pos, struct stack_record, list);
-               if (found->hash == hash &&
-                   found->size == size &&
-                   !stackdepot_memcmp(entries, found->entries, size))
-                       return found;
+               ret = stack;
+               break;
        }
-       return NULL;
+
+       rcu_read_unlock_sched_notrace();
+
+       return ret;
 }
 
 depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
@@ -482,7 +629,6 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
        struct page *page = NULL;
        void *prealloc = NULL;
        bool can_alloc = depot_flags & STACK_DEPOT_FLAG_CAN_ALLOC;
-       bool need_alloc = false;
        unsigned long flags;
        u32 hash;
 
@@ -505,31 +651,16 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
        hash = hash_stack(entries, nr_entries);
        bucket = &stack_table[hash & stack_hash_mask];
 
-       read_lock_irqsave(&pool_rwlock, flags);
-       printk_deferred_enter();
-
-       /* Fast path: look the stack trace up without full locking. */
-       found = find_stack(bucket, entries, nr_entries, hash);
-       if (found) {
-               if (depot_flags & STACK_DEPOT_FLAG_GET)
-                       refcount_inc(&found->count);
-               printk_deferred_exit();
-               read_unlock_irqrestore(&pool_rwlock, flags);
+       /* Fast path: look the stack trace up without locking. */
+       found = find_stack(bucket, entries, nr_entries, hash, depot_flags);
+       if (found)
                goto exit;
-       }
-
-       /* Take note if another stack pool needs to be allocated. */
-       if (new_pool_required)
-               need_alloc = true;
-
-       printk_deferred_exit();
-       read_unlock_irqrestore(&pool_rwlock, flags);
 
        /*
         * Allocate memory for a new pool if required now:
         * we won't be able to do that under the lock.
         */
-       if (unlikely(can_alloc && need_alloc)) {
+       if (unlikely(can_alloc && READ_ONCE(new_pool_required))) {
                /*
                 * Zero out zone modifiers, as we don't have specific zone
                 * requirements. Keep the flags related to allocation in atomic
@@ -543,31 +674,36 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
                        prealloc = page_address(page);
        }
 
-       write_lock_irqsave(&pool_rwlock, flags);
+       raw_spin_lock_irqsave(&pool_lock, flags);
        printk_deferred_enter();
 
-       found = find_stack(bucket, entries, nr_entries, hash);
+       /* Try to find again, to avoid concurrently inserting duplicates. */
+       found = find_stack(bucket, entries, nr_entries, hash, depot_flags);
        if (!found) {
                struct stack_record *new =
                        depot_alloc_stack(entries, nr_entries, hash, &prealloc);
 
                if (new) {
-                       list_add(&new->list, bucket);
+                       /*
+                        * This releases the stack record into the bucket and
+                        * makes it visible to readers in find_stack().
+                        */
+                       list_add_rcu(&new->hash_list, bucket);
                        found = new;
                }
-       } else {
-               if (depot_flags & STACK_DEPOT_FLAG_GET)
-                       refcount_inc(&found->count);
+       }
+
+       if (prealloc) {
                /*
-                * Stack depot already contains this stack trace, but let's
-                * keep the preallocated memory for future.
+                * Either stack depot already contains this stack trace, or
+                * depot_alloc_stack() did not consume the preallocated memory.
+                * Try to keep the preallocated memory for future.
                 */
-               if (prealloc)
-                       depot_keep_new_pool(&prealloc);
+               depot_keep_new_pool(&prealloc);
        }
 
        printk_deferred_exit();
-       write_unlock_irqrestore(&pool_rwlock, flags);
+       raw_spin_unlock_irqrestore(&pool_lock, flags);
 exit:
        if (prealloc) {
                /* Stack depot didn't use this memory, free it. */
@@ -592,7 +728,6 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle,
                               unsigned long **entries)
 {
        struct stack_record *stack;
-       unsigned long flags;
 
        *entries = NULL;
        /*
@@ -604,13 +739,13 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle,
        if (!handle || stack_depot_disabled)
                return 0;
 
-       read_lock_irqsave(&pool_rwlock, flags);
-       printk_deferred_enter();
-
        stack = depot_fetch_stack(handle);
-
-       printk_deferred_exit();
-       read_unlock_irqrestore(&pool_rwlock, flags);
+       /*
+        * Should never be NULL, otherwise this is a use-after-put (or just a
+        * corrupt handle).
+        */
+       if (WARN(!stack, "corrupt handle or use after stack_depot_put()"))
+               return 0;
 
        *entries = stack->entries;
        return stack->size;
@@ -620,29 +755,20 @@ EXPORT_SYMBOL_GPL(stack_depot_fetch);
 void stack_depot_put(depot_stack_handle_t handle)
 {
        struct stack_record *stack;
-       unsigned long flags;
 
        if (!handle || stack_depot_disabled)
                return;
 
-       write_lock_irqsave(&pool_rwlock, flags);
-       printk_deferred_enter();
-
        stack = depot_fetch_stack(handle);
-       if (WARN_ON(!stack))
-               goto out;
-
-       if (refcount_dec_and_test(&stack->count)) {
-               /* Unlink stack from the hash table. */
-               list_del(&stack->list);
+       /*
+        * Should always be able to find the stack record, otherwise this is an
+        * unbalanced put attempt (or corrupt handle).
+        */
+       if (WARN(!stack, "corrupt handle or unbalanced stack_depot_put()"))
+               return;
 
-               /* Free stack. */
+       if (refcount_dec_and_test(&stack->count))
                depot_free_stack(stack);
-       }
-
-out:
-       printk_deferred_exit();
-       write_unlock_irqrestore(&pool_rwlock, flags);
 }
 EXPORT_SYMBOL_GPL(stack_depot_put);
 
@@ -690,3 +816,30 @@ unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle)
        return parts.extra;
 }
 EXPORT_SYMBOL(stack_depot_get_extra_bits);
+
+static int stats_show(struct seq_file *seq, void *v)
+{
+       /*
+        * data race ok: These are just statistics counters, and approximate
+        * statistics are ok for debugging.
+        */
+       seq_printf(seq, "pools: %d\n", data_race(pools_num));
+       for (int i = 0; i < DEPOT_COUNTER_COUNT; i++)
+               seq_printf(seq, "%s: %ld\n", counter_names[i], data_race(counters[i]));
+
+       return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(stats);
+
+static int depot_debugfs_init(void)
+{
+       struct dentry *dir;
+
+       if (stack_depot_disabled)
+               return 0;
+
+       dir = debugfs_create_dir("stackdepot", NULL);
+       debugfs_create_file("stats", 0444, dir, NULL, &stats_fops);
+       return 0;
+}
+late_initcall(depot_debugfs_init);
index be26623953d2e6ef96a41567ed65e5c99787b7fb..6891d15ce991c308f198659e980f9bc9d6522335 100644 (file)
@@ -103,21 +103,6 @@ char *strncpy(char *dest, const char *src, size_t count)
 EXPORT_SYMBOL(strncpy);
 #endif
 
-#ifndef __HAVE_ARCH_STRLCPY
-size_t strlcpy(char *dest, const char *src, size_t size)
-{
-       size_t ret = strlen(src);
-
-       if (size) {
-               size_t len = (ret >= size) ? size - 1 : ret;
-               __builtin_memcpy(dest, src, len);
-               dest[len] = '\0';
-       }
-       return ret;
-}
-EXPORT_SYMBOL(strlcpy);
-#endif
-
 #ifndef __HAVE_ARCH_STRSCPY
 ssize_t strscpy(char *dest, const char *src, size_t count)
 {
diff --git a/lib/test_fortify/write_overflow-strlcpy-src.c b/lib/test_fortify/write_overflow-strlcpy-src.c
deleted file mode 100644 (file)
index 91bf83e..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-#define TEST   \
-       strlcpy(small, large_src, sizeof(small) + 1)
-
-#include "test_fortify.h"
diff --git a/lib/test_fortify/write_overflow-strlcpy.c b/lib/test_fortify/write_overflow-strlcpy.c
deleted file mode 100644 (file)
index 1883db7..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-#define TEST   \
-       strlcpy(instance.buf, large_src, sizeof(instance.buf) + 1)
-
-#include "test_fortify.h"
index 1902cfe4cc4f5075fce5ad9e6eb7c380d6087e20..ffc3a2ba3a8cd85e2e6d95606bcab1510ce0d679 100644 (file)
@@ -1258,6 +1258,9 @@ config LOCK_MM_AND_FIND_VMA
        bool
        depends on !STACK_GROWSUP
 
+config IOMMU_MM_DATA
+       bool
+
 source "mm/damon/Kconfig"
 
 endmenu
index 1e3447bccdb14d126b3c108fd27ab652b5a3a94f..e039d05304dd9ca52da735962c0ef951fb448ec5 100644 (file)
@@ -436,7 +436,6 @@ static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
        INIT_LIST_HEAD(&wb->work_list);
        INIT_DELAYED_WORK(&wb->dwork, wb_workfn);
        INIT_DELAYED_WORK(&wb->bw_dwork, wb_update_bandwidth_workfn);
-       wb->dirty_sleep = jiffies;
 
        err = fprop_local_init_percpu(&wb->completions, gfp);
        if (err)
@@ -921,6 +920,7 @@ int bdi_init(struct backing_dev_info *bdi)
        INIT_LIST_HEAD(&bdi->bdi_list);
        INIT_LIST_HEAD(&bdi->wb_list);
        init_waitqueue_head(&bdi->wb_waitq);
+       bdi->last_bdp_sleep = jiffies;
 
        return cgwb_bdi_init(bdi);
 }
index 27ada42924d59cdea7b06e0f55331280ff202398..4add68d40e8d99c72bd6af648510aadd587dddfc 100644 (file)
@@ -882,6 +882,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 
        /* Time to isolate some pages for migration */
        for (; low_pfn < end_pfn; low_pfn++) {
+               bool is_dirty, is_unevictable;
 
                if (skip_on_failure && low_pfn >= next_skip_pfn) {
                        /*
@@ -1079,8 +1080,10 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
                if (!folio_test_lru(folio))
                        goto isolate_fail_put;
 
+               is_unevictable = folio_test_unevictable(folio);
+
                /* Compaction might skip unevictable pages but CMA takes them */
-               if (!(mode & ISOLATE_UNEVICTABLE) && folio_test_unevictable(folio))
+               if (!(mode & ISOLATE_UNEVICTABLE) && is_unevictable)
                        goto isolate_fail_put;
 
                /*
@@ -1092,26 +1095,42 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
                if ((mode & ISOLATE_ASYNC_MIGRATE) && folio_test_writeback(folio))
                        goto isolate_fail_put;
 
-               if ((mode & ISOLATE_ASYNC_MIGRATE) && folio_test_dirty(folio)) {
-                       bool migrate_dirty;
+               is_dirty = folio_test_dirty(folio);
+
+               if (((mode & ISOLATE_ASYNC_MIGRATE) && is_dirty) ||
+                   (mapping && is_unevictable)) {
+                       bool migrate_dirty = true;
+                       bool is_unmovable;
 
                        /*
                         * Only folios without mappings or that have
-                        * a ->migrate_folio callback are possible to
-                        * migrate without blocking.  However, we may
-                        * be racing with truncation, which can free
-                        * the mapping.  Truncation holds the folio lock
-                        * until after the folio is removed from the page
-                        * cache so holding it ourselves is sufficient.
+                        * a ->migrate_folio callback are possible to migrate
+                        * without blocking.
+                        *
+                        * Folios from unmovable mappings are not migratable.
+                        *
+                        * However, we can be racing with truncation, which can
+                        * free the mapping that we need to check. Truncation
+                        * holds the folio lock until after the folio is removed
+                        * from the page so holding it ourselves is sufficient.
+                        *
+                        * To avoid locking the folio just to check unmovable,
+                        * assume every unmovable folio is also unevictable,
+                        * which is a cheaper test.  If our assumption goes
+                        * wrong, it's not a correctness bug, just potentially
+                        * wasted cycles.
                         */
                        if (!folio_trylock(folio))
                                goto isolate_fail_put;
 
                        mapping = folio_mapping(folio);
-                       migrate_dirty = !mapping ||
-                                       mapping->a_ops->migrate_folio;
+                       if ((mode & ISOLATE_ASYNC_MIGRATE) && is_dirty) {
+                               migrate_dirty = !mapping ||
+                                               mapping->a_ops->migrate_folio;
+                       }
+                       is_unmovable = mapping && mapping_unmovable(mapping);
                        folio_unlock(folio);
-                       if (!migrate_dirty)
+                       if (!migrate_dirty || is_unmovable)
                                goto isolate_fail_put;
                }
 
index 8dbaac6e5c2d05dc4bc7c2b7545be26d508afd9a..dd2fb512700920803b10621b82ffaa88bff30a92 100644 (file)
@@ -2194,7 +2194,7 @@ static void damos_tried_regions_init_upd_status(
                sysfs_regions->upd_timeout_jiffies = jiffies +
                        2 * usecs_to_jiffies(scheme->apply_interval_us ?
                                        scheme->apply_interval_us :
-                                       ctx->attrs.sample_interval);
+                                       ctx->attrs.aggr_interval);
        }
 }
 
index ea49677c63385af4a82981511384f63fc21e7c60..750e779c23db74730fa7743c2307d1b996729d62 100644 (file)
@@ -2688,6 +2688,7 @@ int kiocb_write_and_wait(struct kiocb *iocb, size_t count)
 
        return filemap_write_and_wait_range(mapping, pos, end);
 }
+EXPORT_SYMBOL_GPL(kiocb_write_and_wait);
 
 int kiocb_invalidate_pages(struct kiocb *iocb, size_t count)
 {
@@ -2715,6 +2716,7 @@ int kiocb_invalidate_pages(struct kiocb *iocb, size_t count)
        return invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT,
                                             end >> PAGE_SHIFT);
 }
+EXPORT_SYMBOL_GPL(kiocb_invalidate_pages);
 
 /**
  * generic_file_read_iter - generic filesystem read routine
index 94ef5c02b459642f2625775bc66ca147cb2ac992..94c958f7ebb50dd925070157c0d0b2432dfc0483 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/page_owner.h>
 #include <linux/sched/sysctl.h>
 #include <linux/memory-tiers.h>
+#include <linux/compat.h>
 
 #include <asm/tlb.h>
 #include <asm/pgalloc.h>
@@ -809,7 +810,10 @@ static unsigned long __thp_get_unmapped_area(struct file *filp,
 {
        loff_t off_end = off + len;
        loff_t off_align = round_up(off, size);
-       unsigned long len_pad, ret;
+       unsigned long len_pad, ret, off_sub;
+
+       if (IS_ENABLED(CONFIG_32BIT) || in_compat_syscall())
+               return 0;
 
        if (off_end <= off_align || (off_end - off_align) < size)
                return 0;
@@ -835,7 +839,13 @@ static unsigned long __thp_get_unmapped_area(struct file *filp,
        if (ret == addr)
                return addr;
 
-       ret += (off - ret) & (size - 1);
+       off_sub = (off - ret) & (size - 1);
+
+       if (current->mm->get_unmapped_area == arch_get_unmapped_area_topdown &&
+           !off_sub)
+               return ret + size;
+
+       ret += off_sub;
        return ret;
 }
 
@@ -2437,7 +2447,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
                        page = pmd_page(old_pmd);
                        folio = page_folio(page);
                        if (!folio_test_dirty(folio) && pmd_dirty(old_pmd))
-                               folio_set_dirty(folio);
+                               folio_mark_dirty(folio);
                        if (!folio_test_referenced(folio) && pmd_young(old_pmd))
                                folio_set_referenced(folio);
                        folio_remove_rmap_pmd(folio, page, vma);
@@ -3563,7 +3573,7 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
        }
 
        if (pmd_dirty(pmdval))
-               folio_set_dirty(folio);
+               folio_mark_dirty(folio);
        if (pmd_write(pmdval))
                entry = make_writable_migration_entry(page_to_pfn(page));
        else if (anon_exclusive)
index cfd367822cdd2ebe94181c118c1d14b73cc4b5e8..24c809379274503ac4f261fe7cfdbab3cb1ed1e7 100644 (file)
@@ -44,9 +44,6 @@ struct mm_struct init_mm = {
 #endif
        .user_ns        = &init_user_ns,
        .cpu_bitmap     = CPU_BITS_NONE,
-#ifdef CONFIG_IOMMU_SVA
-       .pasid          = IOMMU_PASID_INVALID,
-#endif
        INIT_MM_CONTEXT(init_mm)
 };
 
index 24c13dfb1e9478be88aee818321110b311b1761b..df6627f62402c01dab04e6955bf80e7fb4b4b2ae 100644 (file)
@@ -487,6 +487,7 @@ void kasan_init_object_meta(struct kmem_cache *cache, const void *object)
                __memset(alloc_meta, 0, sizeof(*alloc_meta));
 
                /*
+                * Prepare the lock for saving auxiliary stack traces.
                 * Temporarily disable KASAN bug reporting to allow instrumented
                 * raw_spin_lock_init to access aux_lock, which resides inside
                 * of a redzone.
@@ -510,8 +511,13 @@ static void release_alloc_meta(struct kasan_alloc_meta *meta)
        stack_depot_put(meta->aux_stack[0]);
        stack_depot_put(meta->aux_stack[1]);
 
-       /* Zero out alloc meta to mark it as invalid. */
-       __memset(meta, 0, sizeof(*meta));
+       /*
+        * Zero out alloc meta to mark it as invalid but keep aux_lock
+        * initialized to avoid having to reinitialize it when another object
+        * is allocated in the same slot.
+        */
+       __memset(&meta->alloc_track, 0, sizeof(meta->alloc_track));
+       __memset(meta->aux_stack, 0, sizeof(meta->aux_stack));
 }
 
 static void release_free_meta(const void *object, struct kasan_free_meta *meta)
index 912155a94ed5871c1805f33ec624c7c7c1ee28c8..cfa5e7288261189cb8242e5a0367fe6ffeebca12 100644 (file)
@@ -429,6 +429,7 @@ restart:
                if (++batch_count == SWAP_CLUSTER_MAX) {
                        batch_count = 0;
                        if (need_resched()) {
+                               arch_leave_lazy_mmu_mode();
                                pte_unmap_unlock(start_pte, ptl);
                                cond_resched();
                                goto restart;
index 8c194d8afeecbd841b326eccdedd01cac6bf4f88..4dcb2ee35eca856a43694f4402dea0c1c9bf6d8a 100644 (file)
@@ -1885,7 +1885,7 @@ int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
        int mid = memblock_search(type, PFN_PHYS(pfn));
 
        if (mid == -1)
-               return -1;
+               return NUMA_NO_NODE;
 
        *start_pfn = PFN_DOWN(type->regions[mid].base);
        *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
@@ -2176,6 +2176,9 @@ static void __init memmap_init_reserved_pages(void)
                        start = region->base;
                        end = start + region->size;
 
+                       if (nid == NUMA_NO_NODE || nid >= MAX_NUMNODES)
+                               nid = early_pfn_to_nid(PFN_DOWN(start));
+
                        reserve_bootmem_region(start, end, nid);
                }
        }
index e4c8735e7c85cf061a2ab31c9be250934c680879..1ed40f9d3a277ec8912c77326c5527a259a96c47 100644 (file)
@@ -621,6 +621,15 @@ static inline int memcg_events_index(enum vm_event_item idx)
 }
 
 struct memcg_vmstats_percpu {
+       /* Stats updates since the last flush */
+       unsigned int                    stats_updates;
+
+       /* Cached pointers for fast iteration in memcg_rstat_updated() */
+       struct memcg_vmstats_percpu     *parent;
+       struct memcg_vmstats            *vmstats;
+
+       /* The above should fit a single cacheline for memcg_rstat_updated() */
+
        /* Local (CPU and cgroup) page state & events */
        long                    state[MEMCG_NR_STAT];
        unsigned long           events[NR_MEMCG_EVENTS];
@@ -632,10 +641,7 @@ struct memcg_vmstats_percpu {
        /* Cgroup1: threshold notifications & softlimit tree updates */
        unsigned long           nr_page_events;
        unsigned long           targets[MEM_CGROUP_NTARGETS];
-
-       /* Stats updates since the last flush */
-       unsigned int            stats_updates;
-};
+} ____cacheline_aligned;
 
 struct memcg_vmstats {
        /* Aggregated (CPU and subtree) page state & events */
@@ -698,36 +704,35 @@ static void memcg_stats_unlock(void)
 }
 
 
-static bool memcg_should_flush_stats(struct mem_cgroup *memcg)
+static bool memcg_vmstats_needs_flush(struct memcg_vmstats *vmstats)
 {
-       return atomic64_read(&memcg->vmstats->stats_updates) >
+       return atomic64_read(&vmstats->stats_updates) >
                MEMCG_CHARGE_BATCH * num_online_cpus();
 }
 
 static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
 {
+       struct memcg_vmstats_percpu *statc;
        int cpu = smp_processor_id();
-       unsigned int x;
 
        if (!val)
                return;
 
        cgroup_rstat_updated(memcg->css.cgroup, cpu);
-
-       for (; memcg; memcg = parent_mem_cgroup(memcg)) {
-               x = __this_cpu_add_return(memcg->vmstats_percpu->stats_updates,
-                                         abs(val));
-
-               if (x < MEMCG_CHARGE_BATCH)
+       statc = this_cpu_ptr(memcg->vmstats_percpu);
+       for (; statc; statc = statc->parent) {
+               statc->stats_updates += abs(val);
+               if (statc->stats_updates < MEMCG_CHARGE_BATCH)
                        continue;
 
                /*
                 * If @memcg is already flush-able, increasing stats_updates is
                 * redundant. Avoid the overhead of the atomic update.
                 */
-               if (!memcg_should_flush_stats(memcg))
-                       atomic64_add(x, &memcg->vmstats->stats_updates);
-               __this_cpu_write(memcg->vmstats_percpu->stats_updates, 0);
+               if (!memcg_vmstats_needs_flush(statc->vmstats))
+                       atomic64_add(statc->stats_updates,
+                                    &statc->vmstats->stats_updates);
+               statc->stats_updates = 0;
        }
 }
 
@@ -756,7 +761,7 @@ void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
        if (!memcg)
                memcg = root_mem_cgroup;
 
-       if (memcg_should_flush_stats(memcg))
+       if (memcg_vmstats_needs_flush(memcg->vmstats))
                do_flush_stats(memcg);
 }
 
@@ -770,7 +775,7 @@ void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
 static void flush_memcg_stats_dwork(struct work_struct *w)
 {
        /*
-        * Deliberately ignore memcg_should_flush_stats() here so that flushing
+        * Deliberately ignore memcg_vmstats_needs_flush() here so that flushing
         * in latency-sensitive paths is as cheap as possible.
         */
        do_flush_stats(root_mem_cgroup);
@@ -2623,8 +2628,9 @@ static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
 }
 
 /*
- * Scheduled by try_charge() to be executed from the userland return path
- * and reclaims memory over the high limit.
+ * Reclaims memory over the high limit. Called directly from
+ * try_charge() (context permitting), as well as from the userland
+ * return path where reclaim is always able to block.
  */
 void mem_cgroup_handle_over_high(gfp_t gfp_mask)
 {
@@ -2643,6 +2649,17 @@ void mem_cgroup_handle_over_high(gfp_t gfp_mask)
        current->memcg_nr_pages_over_high = 0;
 
 retry_reclaim:
+       /*
+        * Bail if the task is already exiting. Unlike memory.max,
+        * memory.high enforcement isn't as strict, and there is no
+        * OOM killer involved, which means the excess could already
+        * be much bigger (and still growing) than it could for
+        * memory.max; the dying task could get stuck in fruitless
+        * reclaim for a long time, which isn't desirable.
+        */
+       if (task_is_dying())
+               goto out;
+
        /*
         * The allocating task should reclaim at least the batch size, but for
         * subsequent retries we only want to do what's necessary to prevent oom
@@ -2693,6 +2710,9 @@ retry_reclaim:
        }
 
        /*
+        * Reclaim didn't manage to push usage below the limit, slow
+        * this allocating task down.
+        *
         * If we exit early, we're guaranteed to die (since
         * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
         * need to account for any ill-begotten jiffies to pay them off later.
@@ -2887,11 +2907,17 @@ done_restock:
                }
        } while ((memcg = parent_mem_cgroup(memcg)));
 
+       /*
+        * Reclaim is set up above to be called from the userland
+        * return path. But also attempt synchronous reclaim to avoid
+        * excessive overrun while the task is still inside the
+        * kernel. If this is successful, the return path will see it
+        * when it rechecks the overage and simply bail out.
+        */
        if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
            !(current->flags & PF_MEMALLOC) &&
-           gfpflags_allow_blocking(gfp_mask)) {
+           gfpflags_allow_blocking(gfp_mask))
                mem_cgroup_handle_over_high(gfp_mask);
-       }
        return 0;
 }
 
@@ -5456,10 +5482,11 @@ static void mem_cgroup_free(struct mem_cgroup *memcg)
        __mem_cgroup_free(memcg);
 }
 
-static struct mem_cgroup *mem_cgroup_alloc(void)
+static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
 {
+       struct memcg_vmstats_percpu *statc, *pstatc;
        struct mem_cgroup *memcg;
-       int node;
+       int node, cpu;
        int __maybe_unused i;
        long error = -ENOMEM;
 
@@ -5483,6 +5510,14 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
        if (!memcg->vmstats_percpu)
                goto fail;
 
+       for_each_possible_cpu(cpu) {
+               if (parent)
+                       pstatc = per_cpu_ptr(parent->vmstats_percpu, cpu);
+               statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
+               statc->parent = parent ? pstatc : NULL;
+               statc->vmstats = memcg->vmstats;
+       }
+
        for_each_node(node)
                if (alloc_mem_cgroup_per_node_info(memcg, node))
                        goto fail;
@@ -5528,7 +5563,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
        struct mem_cgroup *memcg, *old_memcg;
 
        old_memcg = set_active_memcg(parent);
-       memcg = mem_cgroup_alloc();
+       memcg = mem_cgroup_alloc(parent);
        set_active_memcg(old_memcg);
        if (IS_ERR(memcg))
                return ERR_CAST(memcg);
index 4f9b61f4a6682a530a202d02a6998c0b687906dd..9349948f1abfd120977706bbda23456999f057bc 100644 (file)
@@ -982,7 +982,7 @@ static bool has_extra_refcount(struct page_state *ps, struct page *p,
        int count = page_count(p) - 1;
 
        if (extra_pins)
-               count -= 1;
+               count -= folio_nr_pages(page_folio(p));
 
        if (count > 0) {
                pr_err("%#lx: %s still referenced by %d users\n",
@@ -1377,6 +1377,9 @@ void ClearPageHWPoisonTakenOff(struct page *page)
  */
 static inline bool HWPoisonHandlable(struct page *page, unsigned long flags)
 {
+       if (PageSlab(page))
+               return false;
+
        /* Soft offline could migrate non-LRU movable pages */
        if ((flags & MF_SOFT_OFFLINE) && __PageMovable(page))
                return true;
index 8d5291add2bce6df6849165b725e73ab04dff557..5462d9e3c84c7d41af4e29f00c16cd00efb0ace3 100644 (file)
@@ -109,7 +109,7 @@ static struct demotion_nodes *node_demotion __read_mostly;
 static BLOCKING_NOTIFIER_HEAD(mt_adistance_algorithms);
 
 static bool default_dram_perf_error;
-static struct node_hmem_attrs default_dram_perf;
+static struct access_coordinate default_dram_perf;
 static int default_dram_perf_ref_nid = NUMA_NO_NODE;
 static const char *default_dram_perf_ref_source;
 
@@ -601,15 +601,15 @@ void clear_node_memory_type(int node, struct memory_dev_type *memtype)
 }
 EXPORT_SYMBOL_GPL(clear_node_memory_type);
 
-static void dump_hmem_attrs(struct node_hmem_attrs *attrs, const char *prefix)
+static void dump_hmem_attrs(struct access_coordinate *coord, const char *prefix)
 {
        pr_info(
 "%sread_latency: %u, write_latency: %u, read_bandwidth: %u, write_bandwidth: %u\n",
-               prefix, attrs->read_latency, attrs->write_latency,
-               attrs->read_bandwidth, attrs->write_bandwidth);
+               prefix, coord->read_latency, coord->write_latency,
+               coord->read_bandwidth, coord->write_bandwidth);
 }
 
-int mt_set_default_dram_perf(int nid, struct node_hmem_attrs *perf,
+int mt_set_default_dram_perf(int nid, struct access_coordinate *perf,
                             const char *source)
 {
        int rc = 0;
@@ -666,7 +666,7 @@ out:
        return rc;
 }
 
-int mt_perf_to_adistance(struct node_hmem_attrs *perf, int *adist)
+int mt_perf_to_adistance(struct access_coordinate *perf, int *adist)
 {
        if (default_dram_perf_error)
                return -EIO;
index 7e1f4849463aa3645a0eead97f40a90caf5e6d5f..15f8b10ea17c4f28e857009372f50bfd774001b9 100644 (file)
@@ -1464,7 +1464,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
                        delay_rmap = 0;
                        if (!folio_test_anon(folio)) {
                                if (pte_dirty(ptent)) {
-                                       folio_set_dirty(folio);
+                                       folio_mark_dirty(folio);
                                        if (tlb_delay_rmap(tlb)) {
                                                delay_rmap = 1;
                                                force_flush = 1;
@@ -5478,7 +5478,7 @@ static inline bool get_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs
                return true;
 
        if (regs && !user_mode(regs)) {
-               unsigned long ip = instruction_pointer(regs);
+               unsigned long ip = exception_ip(regs);
                if (!search_exception_tables(ip))
                        return false;
        }
@@ -5503,7 +5503,7 @@ static inline bool upgrade_mmap_lock_carefully(struct mm_struct *mm, struct pt_r
 {
        mmap_read_unlock(mm);
        if (regs && !user_mode(regs)) {
-               unsigned long ip = instruction_pointer(regs);
+               unsigned long ip = exception_ip(regs);
                if (!search_exception_tables(ip))
                        return false;
        }
index b3c0ff52bb72e7a92117358766fcc822dfada1e6..21890994c1d3cc6d407143218851ee48287d1114 100644 (file)
@@ -101,9 +101,11 @@ static int set_memmap_mode(const char *val, const struct kernel_param *kp)
 
 static int get_memmap_mode(char *buffer, const struct kernel_param *kp)
 {
-       if (*((int *)kp->arg) == MEMMAP_ON_MEMORY_FORCE)
-               return sprintf(buffer,  "force\n");
-       return param_get_bool(buffer, kp);
+       int mode = *((int *)kp->arg);
+
+       if (mode == MEMMAP_ON_MEMORY_FORCE)
+               return sprintf(buffer, "force\n");
+       return sprintf(buffer, "%c\n", mode ? 'Y' : 'N');
 }
 
 static const struct kernel_param_ops memmap_mode_ops = {
index bde8273cf15b16fad592255c8132a7d4a22c1d89..cc9f2bcd73b492aebacab4b812a515cf7e70b92b 100644 (file)
@@ -962,6 +962,8 @@ static int move_to_new_folio(struct folio *dst, struct folio *src,
 
                if (!mapping)
                        rc = migrate_folio(mapping, dst, src, mode);
+               else if (mapping_unmovable(mapping))
+                       rc = -EOPNOTSUPP;
                else if (mapping->a_ops->migrate_folio)
                        /*
                         * Most folios have a mapping and most filesystems
index 89dc29f1e6c6fa3a29da0ff071779f0925c36dbf..2c19f5515e36c47eb3bb23f02d9c7c2c1c646393 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/pgtable.h>
 #include <linux/swap.h>
 #include <linux/cma.h>
+#include <linux/crash_dump.h>
 #include "internal.h"
 #include "slab.h"
 #include "shuffle.h"
@@ -381,6 +382,11 @@ static void __init find_zone_movable_pfns_for_nodes(void)
                        goto out;
                }
 
+               if (is_kdump_kernel()) {
+                       pr_warn("The system is under kdump, ignore kernelcore=mirror.\n");
+                       goto out;
+               }
+
                for_each_mem_region(r) {
                        if (memblock_is_mirror(r))
                                continue;
index b78e83d351d2864a6a339059ac734b6602eb5824..d89770eaab6b6111117783ca7ff532871c1d71a5 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1825,15 +1825,17 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
                /*
                 * mmap_region() will call shmem_zero_setup() to create a file,
                 * so use shmem's get_unmapped_area in case it can be huge.
-                * do_mmap() will clear pgoff, so match alignment.
                 */
-               pgoff = 0;
                get_area = shmem_get_unmapped_area;
        } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
                /* Ensures that larger anonymous mappings are THP aligned. */
                get_area = thp_get_unmapped_area;
        }
 
+       /* Always treat pgoff as zero for anonymous memory. */
+       if (!file)
+               pgoff = 0;
+
        addr = get_area(file, addr, len, pgoff, flags);
        if (IS_ERR_VALUE(addr))
                return addr;
index cd4e4ae77c40ae0497efeaa8fb391f6550e51a4b..3f255534986a2fda07e2d35187bb385f64749c5c 100644 (file)
@@ -1638,7 +1638,7 @@ static inline void wb_dirty_limits(struct dirty_throttle_control *dtc)
         */
        dtc->wb_thresh = __wb_calc_thresh(dtc);
        dtc->wb_bg_thresh = dtc->thresh ?
-               div_u64((u64)dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
+               div64_u64(dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
 
        /*
         * In order to avoid the stacked BDI deadlock we need
@@ -1921,7 +1921,7 @@ pause:
                        break;
                }
                __set_current_state(TASK_KILLABLE);
-               wb->dirty_sleep = now;
+               bdi->last_bdp_sleep = jiffies;
                io_schedule_timeout(pause);
 
                current->dirty_paused_when = now + pause;
index 7b97d31df76766f830c1da97427d54d81cea319b..4e11fc1e6deff0b2af1ee6be6b8de6bd1e7a18e2 100644 (file)
@@ -3333,13 +3333,7 @@ int __init pcpu_page_first_chunk(size_t reserved_size, pcpu_fc_cpu_to_node_fn_t
                if (rc < 0)
                        panic("failed to map percpu area, err=%d\n", rc);
 
-               /*
-                * FIXME: Archs with virtual cache should flush local
-                * cache for the linear mapping here - something
-                * equivalent to flush_cache_vmap() on the local cpu.
-                * flush_cache_vmap() can't be used as most supporting
-                * data structures are not set up yet.
-                */
+               flush_cache_vmap_early(unit_addr, unit_addr + ai->unit_size);
 
                /* copy static data */
                memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
index 23620c57c1225bef9e3e1193a7163c36a916951f..2648ec4f04947b2e837377da68d7b8ae1fd48f7a 100644 (file)
@@ -469,7 +469,7 @@ static inline int ra_alloc_folio(struct readahead_control *ractl, pgoff_t index,
 
        if (!folio)
                return -ENOMEM;
-       mark = round_up(mark, 1UL << order);
+       mark = round_down(mark, 1UL << order);
        if (index == mark)
                folio_set_readahead(folio);
        err = filemap_add_folio(ractl->mapping, folio, index, gfp);
@@ -575,7 +575,7 @@ static void ondemand_readahead(struct readahead_control *ractl,
         * It's the expected callback index, assume sequential access.
         * Ramp up sizes, and push forward the readahead window.
         */
-       expected = round_up(ra->start + ra->size - ra->async_size,
+       expected = round_down(ra->start + ra->size - ra->async_size,
                        1UL << order);
        if (index == expected || index == (ra->start + ra->size)) {
                ra->start += ra->size;
index 216ab4c8621f6ba38e480ea791e7fb0283cf4944..7cf7d43842590ccd99bf37795918a7054b61a8c4 100644 (file)
@@ -357,6 +357,7 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
                                              unsigned long dst_start,
                                              unsigned long src_start,
                                              unsigned long len,
+                                             atomic_t *mmap_changing,
                                              uffd_flags_t flags)
 {
        struct mm_struct *dst_mm = dst_vma->vm_mm;
@@ -472,6 +473,15 @@ retry:
                                goto out;
                        }
                        mmap_read_lock(dst_mm);
+                       /*
+                        * If memory mappings are changing because of non-cooperative
+                        * operation (e.g. mremap) running in parallel, bail out and
+                        * request the user to retry later
+                        */
+                       if (mmap_changing && atomic_read(mmap_changing)) {
+                               err = -EAGAIN;
+                               break;
+                       }
 
                        dst_vma = NULL;
                        goto retry;
@@ -506,6 +516,7 @@ extern ssize_t mfill_atomic_hugetlb(struct vm_area_struct *dst_vma,
                                    unsigned long dst_start,
                                    unsigned long src_start,
                                    unsigned long len,
+                                   atomic_t *mmap_changing,
                                    uffd_flags_t flags);
 #endif /* CONFIG_HUGETLB_PAGE */
 
@@ -622,8 +633,8 @@ retry:
         * If this is a HUGETLB vma, pass off to appropriate routine
         */
        if (is_vm_hugetlb_page(dst_vma))
-               return  mfill_atomic_hugetlb(dst_vma, dst_start,
-                                            src_start, len, flags);
+               return  mfill_atomic_hugetlb(dst_vma, dst_start, src_start,
+                                            len, mmap_changing, flags);
 
        if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
                goto out_unlock;
@@ -891,8 +902,8 @@ static int move_present_pte(struct mm_struct *mm,
 
        double_pt_lock(dst_ptl, src_ptl);
 
-       if (!pte_same(*src_pte, orig_src_pte) ||
-           !pte_same(*dst_pte, orig_dst_pte)) {
+       if (!pte_same(ptep_get(src_pte), orig_src_pte) ||
+           !pte_same(ptep_get(dst_pte), orig_dst_pte)) {
                err = -EAGAIN;
                goto out;
        }
@@ -935,8 +946,8 @@ static int move_swap_pte(struct mm_struct *mm,
 
        double_pt_lock(dst_ptl, src_ptl);
 
-       if (!pte_same(*src_pte, orig_src_pte) ||
-           !pte_same(*dst_pte, orig_dst_pte)) {
+       if (!pte_same(ptep_get(src_pte), orig_src_pte) ||
+           !pte_same(ptep_get(dst_pte), orig_dst_pte)) {
                double_pt_unlock(dst_ptl, src_ptl);
                return -EAGAIN;
        }
@@ -1005,7 +1016,7 @@ retry:
        }
 
        spin_lock(dst_ptl);
-       orig_dst_pte = *dst_pte;
+       orig_dst_pte = ptep_get(dst_pte);
        spin_unlock(dst_ptl);
        if (!pte_none(orig_dst_pte)) {
                err = -EEXIST;
@@ -1013,7 +1024,7 @@ retry:
        }
 
        spin_lock(src_ptl);
-       orig_src_pte = *src_pte;
+       orig_src_pte = ptep_get(src_pte);
        spin_unlock(src_ptl);
        if (pte_none(orig_src_pte)) {
                if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES))
@@ -1043,7 +1054,7 @@ retry:
                         * page isn't freed under us
                         */
                        spin_lock(src_ptl);
-                       if (!pte_same(orig_src_pte, *src_pte)) {
+                       if (!pte_same(orig_src_pte, ptep_get(src_pte))) {
                                spin_unlock(src_ptl);
                                err = -EAGAIN;
                                goto out;
@@ -1393,6 +1404,12 @@ ssize_t move_pages(struct userfaultfd_ctx *ctx, struct mm_struct *mm,
                                err = -ENOENT;
                                break;
                        }
+                       /* Avoid moving zeropages for now */
+                       if (is_huge_zero_pmd(*src_pmd)) {
+                               spin_unlock(ptl);
+                               err = -EBUSY;
+                               break;
+                       }
 
                        /* Check if we can move the pmd without splitting it. */
                        if (move_splits_huge_pmd(dst_addr, src_addr, src_start + len) ||
index ca25b676048ea6d0b399661e9ebca137585f8dbd..350dd2fc815994739d2012e0bcf483445350bb88 100644 (file)
@@ -536,10 +536,6 @@ static struct zpool *zswap_find_zpool(struct zswap_entry *entry)
  */
 static void zswap_free_entry(struct zswap_entry *entry)
 {
-       if (entry->objcg) {
-               obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
-               obj_cgroup_put(entry->objcg);
-       }
        if (!entry->length)
                atomic_dec(&zswap_same_filled_pages);
        else {
@@ -548,6 +544,10 @@ static void zswap_free_entry(struct zswap_entry *entry)
                atomic_dec(&entry->pool->nr_stored);
                zswap_pool_put(entry->pool);
        }
+       if (entry->objcg) {
+               obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
+               obj_cgroup_put(entry->objcg);
+       }
        zswap_entry_cache_free(entry);
        atomic_dec(&zswap_stored_pages);
        zswap_update_total_size();
@@ -895,10 +895,8 @@ static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_o
                 * into the warmer region. We should terminate shrinking (if we're in the dynamic
                 * shrinker context).
                 */
-               if (writeback_result == -EEXIST && encountered_page_in_swapcache) {
-                       ret = LRU_SKIP;
+               if (writeback_result == -EEXIST && encountered_page_in_swapcache)
                        *encountered_page_in_swapcache = true;
-               }
 
                goto put_unlock;
        }
index 214532173536b790cf032615f73fb3d868d2aae1..a3b68243fd4b18492220339f8a2151598cf6e98a 100644 (file)
@@ -118,12 +118,16 @@ static int vlan_changelink(struct net_device *dev, struct nlattr *tb[],
        }
        if (data[IFLA_VLAN_INGRESS_QOS]) {
                nla_for_each_nested(attr, data[IFLA_VLAN_INGRESS_QOS], rem) {
+                       if (nla_type(attr) != IFLA_VLAN_QOS_MAPPING)
+                               continue;
                        m = nla_data(attr);
                        vlan_dev_set_ingress_priority(dev, m->to, m->from);
                }
        }
        if (data[IFLA_VLAN_EGRESS_QOS]) {
                nla_for_each_nested(attr, data[IFLA_VLAN_EGRESS_QOS], rem) {
+                       if (nla_type(attr) != IFLA_VLAN_QOS_MAPPING)
+                               continue;
                        m = nla_data(attr);
                        err = vlan_dev_set_egress_priority(dev, m->from, m->to);
                        if (err)
index d982daea832927d38474f8d46764a82f87a09659..14088c4ff2f66f9049858598f30dc0069c27fb70 100644 (file)
@@ -2175,6 +2175,7 @@ void batadv_mcast_free(struct batadv_priv *bat_priv)
        cancel_delayed_work_sync(&bat_priv->mcast.work);
 
        batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_MCAST, 2);
+       batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_MCAST_TRACKER, 1);
        batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_MCAST, 2);
 
        /* safely calling outside of worker, as worker was canceled above */
@@ -2198,6 +2199,8 @@ void batadv_mcast_purge_orig(struct batadv_orig_node *orig)
                                      BATADV_MCAST_WANT_NO_RTR4);
        batadv_mcast_want_rtr6_update(bat_priv, orig,
                                      BATADV_MCAST_WANT_NO_RTR6);
+       batadv_mcast_have_mc_ptype_update(bat_priv, orig,
+                                         BATADV_MCAST_HAVE_MC_PTYPE_CAPA);
 
        spin_unlock_bh(&orig->mcast_handler_lock);
 }
index 94ec913dfb76e84b711dd6f3f8333c2994de9b2e..69c75c041fe10aee4e11cde6e9d88cb994cd25c7 100644 (file)
@@ -1041,7 +1041,7 @@ static void rfcomm_tty_flush_buffer(struct tty_struct *tty)
        tty_wakeup(tty);
 }
 
-static void rfcomm_tty_send_xchar(struct tty_struct *tty, char ch)
+static void rfcomm_tty_send_xchar(struct tty_struct *tty, u8 ch)
 {
        BT_DBG("tty %p ch %c", tty, ch);
 }
index d7d021af102981255ba284d396826feb71ae20be..2d7b7324295885e7a5ee70dd63b5dffd9a9a8968 100644 (file)
@@ -1762,6 +1762,10 @@ static void br_ip6_multicast_querier_expired(struct timer_list *t)
 }
 #endif
 
+static void br_multicast_query_delay_expired(struct timer_list *t)
+{
+}
+
 static void br_multicast_select_own_querier(struct net_bridge_mcast *brmctx,
                                            struct br_ip *ip,
                                            struct sk_buff *skb)
@@ -3198,7 +3202,7 @@ br_multicast_update_query_timer(struct net_bridge_mcast *brmctx,
                                unsigned long max_delay)
 {
        if (!timer_pending(&query->timer))
-               query->delay_time = jiffies + max_delay;
+               mod_timer(&query->delay_timer, jiffies + max_delay);
 
        mod_timer(&query->timer, jiffies + brmctx->multicast_querier_interval);
 }
@@ -4041,13 +4045,11 @@ void br_multicast_ctx_init(struct net_bridge *br,
        brmctx->multicast_querier_interval = 255 * HZ;
        brmctx->multicast_membership_interval = 260 * HZ;
 
-       brmctx->ip4_other_query.delay_time = 0;
        brmctx->ip4_querier.port_ifidx = 0;
        seqcount_spinlock_init(&brmctx->ip4_querier.seq, &br->multicast_lock);
        brmctx->multicast_igmp_version = 2;
 #if IS_ENABLED(CONFIG_IPV6)
        brmctx->multicast_mld_version = 1;
-       brmctx->ip6_other_query.delay_time = 0;
        brmctx->ip6_querier.port_ifidx = 0;
        seqcount_spinlock_init(&brmctx->ip6_querier.seq, &br->multicast_lock);
 #endif
@@ -4056,6 +4058,8 @@ void br_multicast_ctx_init(struct net_bridge *br,
                    br_ip4_multicast_local_router_expired, 0);
        timer_setup(&brmctx->ip4_other_query.timer,
                    br_ip4_multicast_querier_expired, 0);
+       timer_setup(&brmctx->ip4_other_query.delay_timer,
+                   br_multicast_query_delay_expired, 0);
        timer_setup(&brmctx->ip4_own_query.timer,
                    br_ip4_multicast_query_expired, 0);
 #if IS_ENABLED(CONFIG_IPV6)
@@ -4063,6 +4067,8 @@ void br_multicast_ctx_init(struct net_bridge *br,
                    br_ip6_multicast_local_router_expired, 0);
        timer_setup(&brmctx->ip6_other_query.timer,
                    br_ip6_multicast_querier_expired, 0);
+       timer_setup(&brmctx->ip6_other_query.delay_timer,
+                   br_multicast_query_delay_expired, 0);
        timer_setup(&brmctx->ip6_own_query.timer,
                    br_ip6_multicast_query_expired, 0);
 #endif
@@ -4197,10 +4203,12 @@ static void __br_multicast_stop(struct net_bridge_mcast *brmctx)
 {
        del_timer_sync(&brmctx->ip4_mc_router_timer);
        del_timer_sync(&brmctx->ip4_other_query.timer);
+       del_timer_sync(&brmctx->ip4_other_query.delay_timer);
        del_timer_sync(&brmctx->ip4_own_query.timer);
 #if IS_ENABLED(CONFIG_IPV6)
        del_timer_sync(&brmctx->ip6_mc_router_timer);
        del_timer_sync(&brmctx->ip6_other_query.timer);
+       del_timer_sync(&brmctx->ip6_other_query.delay_timer);
        del_timer_sync(&brmctx->ip6_own_query.timer);
 #endif
 }
@@ -4643,13 +4651,15 @@ int br_multicast_set_querier(struct net_bridge_mcast *brmctx, unsigned long val)
        max_delay = brmctx->multicast_query_response_interval;
 
        if (!timer_pending(&brmctx->ip4_other_query.timer))
-               brmctx->ip4_other_query.delay_time = jiffies + max_delay;
+               mod_timer(&brmctx->ip4_other_query.delay_timer,
+                         jiffies + max_delay);
 
        br_multicast_start_querier(brmctx, &brmctx->ip4_own_query);
 
 #if IS_ENABLED(CONFIG_IPV6)
        if (!timer_pending(&brmctx->ip6_other_query.timer))
-               brmctx->ip6_other_query.delay_time = jiffies + max_delay;
+               mod_timer(&brmctx->ip6_other_query.delay_timer,
+                         jiffies + max_delay);
 
        br_multicast_start_querier(brmctx, &brmctx->ip6_own_query);
 #endif
index 6adcb45bca75d9426a2952ff84d0fdb9a89b4c33..ed17208907578a231d283c04bd97ce48bebdffaa 100644 (file)
@@ -279,8 +279,17 @@ int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_
 
                if ((READ_ONCE(neigh->nud_state) & NUD_CONNECTED) &&
                    READ_ONCE(neigh->hh.hh_len)) {
+                       struct net_device *br_indev;
+
+                       br_indev = nf_bridge_get_physindev(skb, net);
+                       if (!br_indev) {
+                               neigh_release(neigh);
+                               goto free_skb;
+                       }
+
                        neigh_hh_bridge(&neigh->hh, skb);
-                       skb->dev = nf_bridge->physindev;
+                       skb->dev = br_indev;
+
                        ret = br_handle_frame_finish(net, sk, skb);
                } else {
                        /* the neighbour function below overwrites the complete
@@ -352,12 +361,18 @@ br_nf_ipv4_daddr_was_changed(const struct sk_buff *skb,
  */
 static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
-       struct net_device *dev = skb->dev;
+       struct net_device *dev = skb->dev, *br_indev;
        struct iphdr *iph = ip_hdr(skb);
        struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
        struct rtable *rt;
        int err;
 
+       br_indev = nf_bridge_get_physindev(skb, net);
+       if (!br_indev) {
+               kfree_skb(skb);
+               return 0;
+       }
+
        nf_bridge->frag_max_size = IPCB(skb)->frag_max_size;
 
        if (nf_bridge->pkt_otherhost) {
@@ -397,7 +412,7 @@ free_skb:
                } else {
                        if (skb_dst(skb)->dev == dev) {
 bridged_dnat:
-                               skb->dev = nf_bridge->physindev;
+                               skb->dev = br_indev;
                                nf_bridge_update_protocol(skb);
                                nf_bridge_push_encap_header(skb);
                                br_nf_hook_thresh(NF_BR_PRE_ROUTING,
@@ -410,7 +425,7 @@ bridged_dnat:
                        skb->pkt_type = PACKET_HOST;
                }
        } else {
-               rt = bridge_parent_rtable(nf_bridge->physindev);
+               rt = bridge_parent_rtable(br_indev);
                if (!rt) {
                        kfree_skb(skb);
                        return 0;
@@ -419,7 +434,7 @@ bridged_dnat:
                skb_dst_set_noref(skb, &rt->dst);
        }
 
-       skb->dev = nf_bridge->physindev;
+       skb->dev = br_indev;
        nf_bridge_update_protocol(skb);
        nf_bridge_push_encap_header(skb);
        br_nf_hook_thresh(NF_BR_PRE_ROUTING, net, sk, skb, skb->dev, NULL,
@@ -456,7 +471,7 @@ struct net_device *setup_pre_routing(struct sk_buff *skb, const struct net *net)
        }
 
        nf_bridge->in_prerouting = 1;
-       nf_bridge->physindev = skb->dev;
+       nf_bridge->physinif = skb->dev->ifindex;
        skb->dev = brnf_get_logical_dev(skb, skb->dev, net);
 
        if (skb->protocol == htons(ETH_P_8021Q))
@@ -553,7 +568,11 @@ static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff
                if (skb->protocol == htons(ETH_P_IPV6))
                        nf_bridge->frag_max_size = IP6CB(skb)->frag_max_size;
 
-               in = nf_bridge->physindev;
+               in = nf_bridge_get_physindev(skb, net);
+               if (!in) {
+                       kfree_skb(skb);
+                       return 0;
+               }
                if (nf_bridge->pkt_otherhost) {
                        skb->pkt_type = PACKET_OTHERHOST;
                        nf_bridge->pkt_otherhost = false;
@@ -899,6 +918,13 @@ static unsigned int ip_sabotage_in(void *priv,
 static void br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
 {
        struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+       struct net_device *br_indev;
+
+       br_indev = nf_bridge_get_physindev(skb, dev_net(skb->dev));
+       if (!br_indev) {
+               kfree_skb(skb);
+               return;
+       }
 
        skb_pull(skb, ETH_HLEN);
        nf_bridge->bridged_dnat = 0;
@@ -908,7 +934,7 @@ static void br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
        skb_copy_to_linear_data_offset(skb, -(ETH_HLEN - ETH_ALEN),
                                       nf_bridge->neigh_header,
                                       ETH_HLEN - ETH_ALEN);
-       skb->dev = nf_bridge->physindev;
+       skb->dev = br_indev;
 
        nf_bridge->physoutdev = NULL;
        br_handle_frame_finish(dev_net(skb->dev), NULL, skb);
index 2e24a743f91731cad6a8791e85af7f7aeadb8352..e0421eaa3abc78b8587d551c6e91682bba28c79d 100644 (file)
@@ -102,9 +102,15 @@ static int br_nf_pre_routing_finish_ipv6(struct net *net, struct sock *sk, struc
 {
        struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
        struct rtable *rt;
-       struct net_device *dev = skb->dev;
+       struct net_device *dev = skb->dev, *br_indev;
        const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
 
+       br_indev = nf_bridge_get_physindev(skb, net);
+       if (!br_indev) {
+               kfree_skb(skb);
+               return 0;
+       }
+
        nf_bridge->frag_max_size = IP6CB(skb)->frag_max_size;
 
        if (nf_bridge->pkt_otherhost) {
@@ -122,7 +128,7 @@ static int br_nf_pre_routing_finish_ipv6(struct net *net, struct sock *sk, struc
                }
 
                if (skb_dst(skb)->dev == dev) {
-                       skb->dev = nf_bridge->physindev;
+                       skb->dev = br_indev;
                        nf_bridge_update_protocol(skb);
                        nf_bridge_push_encap_header(skb);
                        br_nf_hook_thresh(NF_BR_PRE_ROUTING,
@@ -133,7 +139,7 @@ static int br_nf_pre_routing_finish_ipv6(struct net *net, struct sock *sk, struc
                ether_addr_copy(eth_hdr(skb)->h_dest, dev->dev_addr);
                skb->pkt_type = PACKET_HOST;
        } else {
-               rt = bridge_parent_rtable(nf_bridge->physindev);
+               rt = bridge_parent_rtable(br_indev);
                if (!rt) {
                        kfree_skb(skb);
                        return 0;
@@ -142,7 +148,7 @@ static int br_nf_pre_routing_finish_ipv6(struct net *net, struct sock *sk, struc
                skb_dst_set_noref(skb, &rt->dst);
        }
 
-       skb->dev = nf_bridge->physindev;
+       skb->dev = br_indev;
        nf_bridge_update_protocol(skb);
        nf_bridge_push_encap_header(skb);
        br_nf_hook_thresh(NF_BR_PRE_ROUTING, net, sk, skb,
index b0a92c344722be6bf195d571bf1a26daf759dfca..86ea5e6689b5ce49a4b71b383893d2ef5b53d110 100644 (file)
@@ -78,7 +78,7 @@ struct bridge_mcast_own_query {
 /* other querier */
 struct bridge_mcast_other_query {
        struct timer_list               timer;
-       unsigned long                   delay_time;
+       struct timer_list               delay_timer;
 };
 
 /* selected querier */
@@ -1159,7 +1159,7 @@ __br_multicast_querier_exists(struct net_bridge_mcast *brmctx,
                own_querier_enabled = false;
        }
 
-       return time_is_before_jiffies(querier->delay_time) &&
+       return !timer_pending(&querier->delay_timer) &&
               (own_querier_enabled || timer_pending(&querier->timer));
 }
 
index f9a50d7f0d204639f821835d341bb87c13a80333..0cb61c76b9b87da0746294cb371bc62defec0f81 100644 (file)
@@ -160,8 +160,9 @@ static size_t sizeof_footer(struct ceph_connection *con)
 static void prepare_message_data(struct ceph_msg *msg, u32 data_len)
 {
        /* Initialize data cursor if it's not a sparse read */
-       if (!msg->sparse_read)
-               ceph_msg_data_cursor_init(&msg->cursor, msg, data_len);
+       u64 len = msg->sparse_read_total ? : data_len;
+
+       ceph_msg_data_cursor_init(&msg->cursor, msg, len);
 }
 
 /*
@@ -991,7 +992,7 @@ static inline int read_partial_message_section(struct ceph_connection *con,
        return read_partial_message_chunk(con, section, sec_len, crc);
 }
 
-static int read_sparse_msg_extent(struct ceph_connection *con, u32 *crc)
+static int read_partial_sparse_msg_extent(struct ceph_connection *con, u32 *crc)
 {
        struct ceph_msg_data_cursor *cursor = &con->in_msg->cursor;
        bool do_bounce = ceph_test_opt(from_msgr(con->msgr), RXBOUNCE);
@@ -1026,7 +1027,7 @@ static int read_sparse_msg_extent(struct ceph_connection *con, u32 *crc)
        return 1;
 }
 
-static int read_sparse_msg_data(struct ceph_connection *con)
+static int read_partial_sparse_msg_data(struct ceph_connection *con)
 {
        struct ceph_msg_data_cursor *cursor = &con->in_msg->cursor;
        bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC);
@@ -1036,31 +1037,31 @@ static int read_sparse_msg_data(struct ceph_connection *con)
        if (do_datacrc)
                crc = con->in_data_crc;
 
-       do {
+       while (cursor->total_resid) {
                if (con->v1.in_sr_kvec.iov_base)
                        ret = read_partial_message_chunk(con,
                                                         &con->v1.in_sr_kvec,
                                                         con->v1.in_sr_len,
                                                         &crc);
                else if (cursor->sr_resid > 0)
-                       ret = read_sparse_msg_extent(con, &crc);
-
-               if (ret <= 0) {
-                       if (do_datacrc)
-                               con->in_data_crc = crc;
-                       return ret;
-               }
+                       ret = read_partial_sparse_msg_extent(con, &crc);
+               if (ret <= 0)
+                       break;
 
                memset(&con->v1.in_sr_kvec, 0, sizeof(con->v1.in_sr_kvec));
                ret = con->ops->sparse_read(con, cursor,
                                (char **)&con->v1.in_sr_kvec.iov_base);
+               if (ret <= 0) {
+                       ret = ret ? ret : 1;  /* must return > 0 to indicate success */
+                       break;
+               }
                con->v1.in_sr_len = ret;
-       } while (ret > 0);
+       }
 
        if (do_datacrc)
                con->in_data_crc = crc;
 
-       return ret < 0 ? ret : 1;  /* must return > 0 to indicate success */
+       return ret;
 }
 
 static int read_partial_msg_data(struct ceph_connection *con)
@@ -1253,8 +1254,8 @@ static int read_partial_message(struct ceph_connection *con)
                if (!m->num_data_items)
                        return -EIO;
 
-               if (m->sparse_read)
-                       ret = read_sparse_msg_data(con);
+               if (m->sparse_read_total)
+                       ret = read_partial_sparse_msg_data(con);
                else if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE))
                        ret = read_partial_msg_data_bounce(con);
                else
index f8ec60e1aba3a112aaa024c235f0117297b9bf70..a0ca5414b333df92b3aa0085a95b928cdc0609a5 100644 (file)
@@ -1128,7 +1128,7 @@ static int decrypt_tail(struct ceph_connection *con)
        struct sg_table enc_sgt = {};
        struct sg_table sgt = {};
        struct page **pages = NULL;
-       bool sparse = con->in_msg->sparse_read;
+       bool sparse = !!con->in_msg->sparse_read_total;
        int dpos = 0;
        int tail_len;
        int ret;
@@ -2060,7 +2060,7 @@ static int prepare_read_tail_plain(struct ceph_connection *con)
        }
 
        if (data_len(msg)) {
-               if (msg->sparse_read)
+               if (msg->sparse_read_total)
                        con->v2.in_state = IN_S_PREPARE_SPARSE_DATA;
                else
                        con->v2.in_state = IN_S_PREPARE_READ_DATA;
index d3a759e052c81f066710a467cc9d9baf3dbf8e20..9d078b37fe0b9b085894be86db17053227de9a18 100644 (file)
@@ -5510,7 +5510,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
        }
 
        m = ceph_msg_get(req->r_reply);
-       m->sparse_read = (bool)srlen;
+       m->sparse_read_total = srlen;
 
        dout("get_reply tid %lld %p\n", tid, m);
 
@@ -5777,11 +5777,8 @@ static int prep_next_sparse_read(struct ceph_connection *con,
        }
 
        if (o->o_sparse_op_idx < 0) {
-               u64 srlen = sparse_data_requested(req);
-
-               dout("%s: [%d] starting new sparse read req. srlen=0x%llx\n",
-                    __func__, o->o_osd, srlen);
-               ceph_msg_data_cursor_init(cursor, con->in_msg, srlen);
+               dout("%s: [%d] starting new sparse read req\n",
+                    __func__, o->o_osd);
        } else {
                u64 end;
 
@@ -5850,8 +5847,6 @@ static inline void convert_extent_map(struct ceph_sparse_read *sr)
 }
 #endif
 
-#define MAX_EXTENTS 4096
-
 static int osd_sparse_read(struct ceph_connection *con,
                           struct ceph_msg_data_cursor *cursor,
                           char **pbuf)
@@ -5859,8 +5854,8 @@ static int osd_sparse_read(struct ceph_connection *con,
        struct ceph_osd *o = con->private;
        struct ceph_sparse_read *sr = &o->o_sparse_read;
        u32 count = sr->sr_count;
-       u64 eoff, elen;
-       int ret;
+       u64 eoff, elen, len = 0;
+       int i, ret;
 
        switch (sr->sr_state) {
        case CEPH_SPARSE_READ_HDR:
@@ -5882,23 +5877,16 @@ next_op:
 
                if (count > 0) {
                        if (!sr->sr_extent || count > sr->sr_ext_len) {
-                               /*
-                                * Apply a hard cap to the number of extents.
-                                * If we have more, assume something is wrong.
-                                */
-                               if (count > MAX_EXTENTS) {
-                                       dout("%s: OSD returned 0x%x extents in a single reply!\n",
-                                            __func__, count);
-                                       return -EREMOTEIO;
-                               }
-
                                /* no extent array provided, or too short */
                                kfree(sr->sr_extent);
                                sr->sr_extent = kmalloc_array(count,
                                                              sizeof(*sr->sr_extent),
                                                              GFP_NOIO);
-                               if (!sr->sr_extent)
+                               if (!sr->sr_extent) {
+                                       pr_err("%s: failed to allocate %u extents\n",
+                                              __func__, count);
                                        return -ENOMEM;
+                               }
                                sr->sr_ext_len = count;
                        }
                        ret = count * sizeof(*sr->sr_extent);
@@ -5912,8 +5900,20 @@ next_op:
                convert_extent_map(sr);
                ret = sizeof(sr->sr_datalen);
                *pbuf = (char *)&sr->sr_datalen;
-               sr->sr_state = CEPH_SPARSE_READ_DATA;
+               sr->sr_state = CEPH_SPARSE_READ_DATA_PRE;
                break;
+       case CEPH_SPARSE_READ_DATA_PRE:
+               /* Convert sr_datalen to host-endian */
+               sr->sr_datalen = le32_to_cpu((__force __le32)sr->sr_datalen);
+               for (i = 0; i < count; i++)
+                       len += sr->sr_extent[i].len;
+               if (sr->sr_datalen != len) {
+                       pr_warn_ratelimited("data len %u != extent len %llu\n",
+                                           sr->sr_datalen, len);
+                       return -EREMOTEIO;
+               }
+               sr->sr_state = CEPH_SPARSE_READ_DATA;
+               fallthrough;
        case CEPH_SPARSE_READ_DATA:
                if (sr->sr_index >= count) {
                        sr->sr_state = CEPH_SPARSE_READ_HDR;
index 103d46fa0eeb34af20b2d74b79f38e7424e25155..a8b625abe242c657dca8cd0188c236553757c6b2 100644 (file)
@@ -751,7 +751,7 @@ size_t memcpy_to_iter_csum(void *iter_to, size_t progress,
                           size_t len, void *from, void *priv2)
 {
        __wsum *csum = priv2;
-       __wsum next = csum_partial_copy_nocheck(from, iter_to, len);
+       __wsum next = csum_partial_copy_nocheck(from + progress, iter_to, len);
 
        *csum = csum_block_add(*csum, next, progress);
        return 0;
index f01a9b858347b41e88c25632d5d9524cbabba9a1..cb2dab0feee0abe758479a7a001342bf6613df08 100644 (file)
@@ -11551,6 +11551,7 @@ static struct pernet_operations __net_initdata netdev_net_ops = {
 
 static void __net_exit default_device_exit_net(struct net *net)
 {
+       struct netdev_name_node *name_node, *tmp;
        struct net_device *dev, *aux;
        /*
         * Push all migratable network devices back to the
@@ -11573,6 +11574,14 @@ static void __net_exit default_device_exit_net(struct net *net)
                snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
                if (netdev_name_in_use(&init_net, fb_name))
                        snprintf(fb_name, IFNAMSIZ, "dev%%d");
+
+               netdev_for_each_altname_safe(dev, name_node, tmp)
+                       if (netdev_name_in_use(&init_net, name_node->name)) {
+                               netdev_name_node_del(name_node);
+                               synchronize_rcu();
+                               __netdev_name_node_alt_destroy(name_node);
+                       }
+
                err = dev_change_net_namespace(dev, &init_net, fb_name);
                if (err) {
                        pr_emerg("%s: failed to move %s to init_net: %d\n",
index cf93e188785ba7f0fd6e9428762bf02105eb3154..7480b4c8429808378f7c5ec499c4f479d5a4b285 100644 (file)
@@ -63,6 +63,9 @@ int dev_change_name(struct net_device *dev, const char *newname);
 
 #define netdev_for_each_altname(dev, namenode)                         \
        list_for_each_entry((namenode), &(dev)->name_node->list, list)
+#define netdev_for_each_altname_safe(dev, namenode, next)              \
+       list_for_each_entry_safe((namenode), (next), &(dev)->name_node->list, \
+                                list)
 
 int netdev_name_node_alt_create(struct net_device *dev, const char *name);
 int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);
index 24061f29c9dd25bcf2e852471d0e8e394ff4121a..ef3e78b6a39c45b9487931e0b7fa438e722aac2e 100644 (file)
@@ -83,6 +83,7 @@
 #include <net/netfilter/nf_conntrack_bpf.h>
 #include <net/netkit.h>
 #include <linux/un.h>
+#include <net/xdp_sock_drv.h>
 
 #include "dev.h"
 
@@ -4092,10 +4093,46 @@ static int bpf_xdp_frags_increase_tail(struct xdp_buff *xdp, int offset)
        memset(skb_frag_address(frag) + skb_frag_size(frag), 0, offset);
        skb_frag_size_add(frag, offset);
        sinfo->xdp_frags_size += offset;
+       if (rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL)
+               xsk_buff_get_tail(xdp)->data_end += offset;
 
        return 0;
 }
 
+static void bpf_xdp_shrink_data_zc(struct xdp_buff *xdp, int shrink,
+                                  struct xdp_mem_info *mem_info, bool release)
+{
+       struct xdp_buff *zc_frag = xsk_buff_get_tail(xdp);
+
+       if (release) {
+               xsk_buff_del_tail(zc_frag);
+               __xdp_return(NULL, mem_info, false, zc_frag);
+       } else {
+               zc_frag->data_end -= shrink;
+       }
+}
+
+static bool bpf_xdp_shrink_data(struct xdp_buff *xdp, skb_frag_t *frag,
+                               int shrink)
+{
+       struct xdp_mem_info *mem_info = &xdp->rxq->mem;
+       bool release = skb_frag_size(frag) == shrink;
+
+       if (mem_info->type == MEM_TYPE_XSK_BUFF_POOL) {
+               bpf_xdp_shrink_data_zc(xdp, shrink, mem_info, release);
+               goto out;
+       }
+
+       if (release) {
+               struct page *page = skb_frag_page(frag);
+
+               __xdp_return(page_address(page), mem_info, false, NULL);
+       }
+
+out:
+       return release;
+}
+
 static int bpf_xdp_frags_shrink_tail(struct xdp_buff *xdp, int offset)
 {
        struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
@@ -4110,12 +4147,7 @@ static int bpf_xdp_frags_shrink_tail(struct xdp_buff *xdp, int offset)
 
                len_free += shrink;
                offset -= shrink;
-
-               if (skb_frag_size(frag) == shrink) {
-                       struct page *page = skb_frag_page(frag);
-
-                       __xdp_return(page_address(page), &xdp->rxq->mem,
-                                    false, NULL);
+               if (bpf_xdp_shrink_data(xdp, frag, shrink)) {
                        n_frags_free++;
                } else {
                        skb_frag_size_sub(frag, shrink);
index f35c2e9984062ba4bed637eaeace4eb9e71dadc0..63de5c635842b6f9e6d92f2a28a69009e54ec68c 100644 (file)
@@ -33,9 +33,6 @@
 
 void reqsk_queue_alloc(struct request_sock_queue *queue)
 {
-       spin_lock_init(&queue->rskq_lock);
-
-       spin_lock_init(&queue->fastopenq.lock);
        queue->fastopenq.rskq_rst_head = NULL;
        queue->fastopenq.rskq_rst_tail = NULL;
        queue->fastopenq.qlen = 0;
index 5f6ed6da3cfc0b6e7a660760a9efe242b5478f12..f6f29eb03ec277a1ea17ccc220fa7624bf6db092 100644 (file)
@@ -2899,13 +2899,6 @@ static int do_setlink(const struct sk_buff *skb,
                call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
        }
 
-       if (tb[IFLA_MASTER]) {
-               err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
-               if (err)
-                       goto errout;
-               status |= DO_SETLINK_MODIFIED;
-       }
-
        if (ifm->ifi_flags || ifm->ifi_change) {
                err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
                                       extack);
@@ -2913,6 +2906,13 @@ static int do_setlink(const struct sk_buff *skb,
                        goto errout;
        }
 
+       if (tb[IFLA_MASTER]) {
+               err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
+               if (err)
+                       goto errout;
+               status |= DO_SETLINK_MODIFIED;
+       }
+
        if (tb[IFLA_CARRIER]) {
                err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER]));
                if (err)
index 158dbdebce6a3693deb63e557e856d9cdd7500ae..0a7f46c37f0cfc169e11377107c8342c229da0de 100644 (file)
 #include <linux/interrupt.h>
 #include <linux/poll.h>
 #include <linux/tcp.h>
+#include <linux/udp.h>
 #include <linux/init.h>
 #include <linux/highmem.h>
 #include <linux/user_namespace.h>
@@ -4144,8 +4145,14 @@ bool sk_busy_loop_end(void *p, unsigned long start_time)
 {
        struct sock *sk = p;
 
-       return !skb_queue_empty_lockless(&sk->sk_receive_queue) ||
-              sk_busy_loop_timeout(sk, start_time);
+       if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
+               return true;
+
+       if (sk_is_udp(sk) &&
+           !skb_queue_empty_lockless(&udp_sk(sk)->reader_queue))
+               return true;
+
+       return sk_busy_loop_timeout(sk, start_time);
 }
 EXPORT_SYMBOL(sk_busy_loop_end);
 #endif /* CONFIG_NET_RX_BUSY_POLL */
index 4275a2bc6d8e062052a88503b731d9599ca55d2a..6a58342752b4690d1f13d19eb94ee1d44b9cda61 100644 (file)
@@ -46,7 +46,7 @@ struct devlink_rel {
                u32 obj_index;
                devlink_rel_notify_cb_t *notify_cb;
                devlink_rel_cleanup_cb_t *cleanup_cb;
-               struct work_struct notify_work;
+               struct delayed_work notify_work;
        } nested_in;
 };
 
@@ -70,7 +70,7 @@ static void __devlink_rel_put(struct devlink_rel *rel)
 static void devlink_rel_nested_in_notify_work(struct work_struct *work)
 {
        struct devlink_rel *rel = container_of(work, struct devlink_rel,
-                                              nested_in.notify_work);
+                                              nested_in.notify_work.work);
        struct devlink *devlink;
 
        devlink = devlinks_xa_get(rel->nested_in.devlink_index);
@@ -96,13 +96,13 @@ rel_put:
        return;
 
 reschedule_work:
-       schedule_work(&rel->nested_in.notify_work);
+       schedule_delayed_work(&rel->nested_in.notify_work, 1);
 }
 
 static void devlink_rel_nested_in_notify_work_schedule(struct devlink_rel *rel)
 {
        __devlink_rel_get(rel);
-       schedule_work(&rel->nested_in.notify_work);
+       schedule_delayed_work(&rel->nested_in.notify_work, 0);
 }
 
 static struct devlink_rel *devlink_rel_alloc(void)
@@ -123,8 +123,8 @@ static struct devlink_rel *devlink_rel_alloc(void)
        }
 
        refcount_set(&rel->refcount, 1);
-       INIT_WORK(&rel->nested_in.notify_work,
-                 &devlink_rel_nested_in_notify_work);
+       INIT_DELAYED_WORK(&rel->nested_in.notify_work,
+                         &devlink_rel_nested_in_notify_work);
        return rel;
 }
 
index 62e54e152ecf1fa601cb2cd755988c9ff97670af..78592912f657c934885077c900ef95b9d79aed4c 100644 (file)
@@ -674,7 +674,7 @@ static int devlink_port_function_validate(struct devlink_port *devlink_port,
                return -EOPNOTSUPP;
        }
        if (tb[DEVLINK_PORT_FN_ATTR_STATE] && !ops->port_fn_state_set) {
-               NL_SET_ERR_MSG_ATTR(extack, tb[DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR],
+               NL_SET_ERR_MSG_ATTR(extack, tb[DEVLINK_PORT_FN_ATTR_STATE],
                                    "Function does not support state setting");
                return -EOPNOTSUPP;
        }
index b738a466e2dccb9dce8226a1b3c047ba93414b8c..b15e71cc342c7963c9c88951d5e058314dd15575 100644 (file)
@@ -2806,13 +2806,14 @@ EXPORT_SYMBOL_GPL(dsa_user_dev_check);
 static int dsa_user_changeupper(struct net_device *dev,
                                struct netdev_notifier_changeupper_info *info)
 {
-       struct dsa_port *dp = dsa_user_to_port(dev);
        struct netlink_ext_ack *extack;
        int err = NOTIFY_DONE;
+       struct dsa_port *dp;
 
        if (!dsa_user_dev_check(dev))
                return err;
 
+       dp = dsa_user_to_port(dev);
        extack = netdev_notifier_info_to_extack(&info->info);
 
        if (netif_is_bridge_master(info->upper_dev)) {
@@ -2865,11 +2866,13 @@ static int dsa_user_changeupper(struct net_device *dev,
 static int dsa_user_prechangeupper(struct net_device *dev,
                                   struct netdev_notifier_changeupper_info *info)
 {
-       struct dsa_port *dp = dsa_user_to_port(dev);
+       struct dsa_port *dp;
 
        if (!dsa_user_dev_check(dev))
                return NOTIFY_DONE;
 
+       dp = dsa_user_to_port(dev);
+
        if (netif_is_bridge_master(info->upper_dev) && !info->linking)
                dsa_port_pre_bridge_leave(dp, info->upper_dev);
        else if (netif_is_lag_master(info->upper_dev) && !info->linking)
index a79af8c25a07121baf0e66f30f9161c9cf7cdee6..b6cb101d7f19ef5038f6f0e10139b1e5ccfa4af5 100644 (file)
@@ -234,17 +234,20 @@ int ethnl_set_features(struct sk_buff *skb, struct genl_info *info)
        dev = req_info.dev;
 
        rtnl_lock();
+       ret = ethnl_ops_begin(dev);
+       if (ret < 0)
+               goto out_rtnl;
        ethnl_features_to_bitmap(old_active, dev->features);
        ethnl_features_to_bitmap(old_wanted, dev->wanted_features);
        ret = ethnl_parse_bitset(req_wanted, req_mask, NETDEV_FEATURE_COUNT,
                                 tb[ETHTOOL_A_FEATURES_WANTED],
                                 netdev_features_strings, info->extack);
        if (ret < 0)
-               goto out_rtnl;
+               goto out_ops;
        if (ethnl_bitmap_to_features(req_mask) & ~NETIF_F_ETHTOOL_BITS) {
                GENL_SET_ERR_MSG(info, "attempt to change non-ethtool features");
                ret = -EINVAL;
-               goto out_rtnl;
+               goto out_ops;
        }
 
        /* set req_wanted bits not in req_mask from old_wanted */
@@ -281,6 +284,8 @@ int ethnl_set_features(struct sk_buff *skb, struct genl_info *info)
        if (mod)
                netdev_features_change(dev);
 
+out_ops:
+       ethnl_ops_complete(dev);
 out_rtnl:
        rtnl_unlock();
        ethnl_parse_header_dev_put(&req_info);
index 7ceb9ac6e7309372a5931f92c9b8adcc390af5f4..9d71b66183daf94e19945d75cfb5c33df6ce346c 100644 (file)
@@ -308,7 +308,7 @@ static void send_hsr_supervision_frame(struct hsr_port *master,
 
        skb = hsr_init_skb(master);
        if (!skb) {
-               WARN_ONCE(1, "HSR: Could not send supervision frame\n");
+               netdev_warn_once(master->dev, "HSR: Could not send supervision frame\n");
                return;
        }
 
@@ -355,7 +355,7 @@ static void send_prp_supervision_frame(struct hsr_port *master,
 
        skb = hsr_init_skb(master);
        if (!skb) {
-               WARN_ONCE(1, "PRP: Could not send supervision frame\n");
+               netdev_warn_once(master->dev, "PRP: Could not send supervision frame\n");
                return;
        }
 
index b099c315015096f3637f4770ee9a6553baac2637..cb83c8feb746535fe4126f4172ffbafaac58b0d5 100644 (file)
@@ -167,4 +167,5 @@ static void __exit hsr_exit(void)
 
 module_init(hsr_init);
 module_exit(hsr_exit);
+MODULE_DESCRIPTION("High-availability Seamless Redundancy (HSR) driver");
 MODULE_LICENSE("GPL");
index 835f4f9d98d25559fb8965a7531c6863448a55c2..a5a820ee2026691afdd5ca3255962b5116fca290 100644 (file)
@@ -330,6 +330,9 @@ lookup_protocol:
        if (INET_PROTOSW_REUSE & answer_flags)
                sk->sk_reuse = SK_CAN_REUSE;
 
+       if (INET_PROTOSW_ICSK & answer_flags)
+               inet_init_csk_locks(sk);
+
        inet = inet_sk(sk);
        inet_assign_bit(IS_ICSK, sk, INET_PROTOSW_ICSK & answer_flags);
 
@@ -1625,10 +1628,12 @@ EXPORT_SYMBOL(inet_current_timestamp);
 
 int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
 {
-       if (sk->sk_family == AF_INET)
+       unsigned int family = READ_ONCE(sk->sk_family);
+
+       if (family == AF_INET)
                return ip_recv_error(sk, msg, len, addr_len);
 #if IS_ENABLED(CONFIG_IPV6)
-       if (sk->sk_family == AF_INET6)
+       if (family == AF_INET6)
                return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len);
 #endif
        return -EINVAL;
index 8e2eb1793685ecd72da75bc841af12b90e85fcc7..459af1f8973958611c43936b0894f6154d23b99a 100644 (file)
@@ -727,6 +727,10 @@ out:
        }
        if (req)
                reqsk_put(req);
+
+       if (newsk)
+               inet_init_csk_locks(newsk);
+
        return newsk;
 out_err:
        newsk = NULL;
index b06f678b03a19b806fd14764a4caad60caf02919..41537d18eecfd6e1163aacc35e047c22468e04e6 100644 (file)
@@ -1287,6 +1287,12 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
        if (unlikely(!rt))
                return -EFAULT;
 
+       cork->fragsize = ip_sk_use_pmtu(sk) ?
+                        dst_mtu(&rt->dst) : READ_ONCE(rt->dst.dev->mtu);
+
+       if (!inetdev_valid_mtu(cork->fragsize))
+               return -ENETUNREACH;
+
        /*
         * setup for corking.
         */
@@ -1303,12 +1309,6 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
                cork->addr = ipc->addr;
        }
 
-       cork->fragsize = ip_sk_use_pmtu(sk) ?
-                        dst_mtu(&rt->dst) : READ_ONCE(rt->dst.dev->mtu);
-
-       if (!inetdev_valid_mtu(cork->fragsize))
-               return -ENETUNREACH;
-
        cork->gso_size = ipc->gso_size;
 
        cork->dst = &rt->dst;
index 7aa9dc0e6760df6c9980252854014ab6fdd1c3f7..21d2ffa919e98b41ed325f978ae573b9f25f4d71 100644 (file)
@@ -1363,12 +1363,13 @@ e_inval:
  * ipv4_pktinfo_prepare - transfer some info from rtable to skb
  * @sk: socket
  * @skb: buffer
+ * @drop_dst: if true, drops skb dst
  *
  * To support IP_CMSG_PKTINFO option, we store rt_iif and specific
  * destination in skb->cb[] before dst drop.
  * This way, receiver doesn't make cache line misses to read rtable.
  */
-void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
+void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb, bool drop_dst)
 {
        struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb);
        bool prepare = inet_test_bit(PKTINFO, sk) ||
@@ -1397,7 +1398,8 @@ void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
                pktinfo->ipi_ifindex = 0;
                pktinfo->ipi_spec_dst.s_addr = 0;
        }
-       skb_dst_drop(skb);
+       if (drop_dst)
+               skb_dst_drop(skb);
 }
 
 int ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
index 586b1b3e35b805d46158531ae8e7b49122abbaa7..80ccd6661aa32f2b60a720a18deec26e9e2cc18d 100644 (file)
@@ -332,7 +332,7 @@ static int iptunnel_pmtud_build_icmpv6(struct sk_buff *skb, int mtu)
        };
        skb_reset_network_header(skb);
 
-       csum = csum_partial(icmp6h, len, 0);
+       csum = skb_checksum(skb, skb_transport_offset(skb), len, 0);
        icmp6h->icmp6_cksum = csum_ipv6_magic(&nip6h->saddr, &nip6h->daddr, len,
                                              IPPROTO_ICMPV6, csum);
 
index 9d6f59531b3a0b0bc082e1f1febf4568368580b9..3622298365105d99c0277f1c1616fb5fc63cdc2d 100644 (file)
@@ -1073,7 +1073,7 @@ static int ipmr_cache_report(const struct mr_table *mrt,
                msg = (struct igmpmsg *)skb_network_header(skb);
                msg->im_vif = vifi;
                msg->im_vif_hi = vifi >> 8;
-               ipv4_pktinfo_prepare(mroute_sk, pkt);
+               ipv4_pktinfo_prepare(mroute_sk, pkt, false);
                memcpy(skb->cb, pkt->cb, sizeof(skb->cb));
                /* Add our header */
                igmp = skb_put(skb, sizeof(struct igmphdr));
index f01b038fc1cda0257fb29df9a8832310378bd1fb..04504b2b51df562c2d4be27c16fdc294e442239b 100644 (file)
@@ -239,7 +239,6 @@ static int nf_reject_fill_skb_dst(struct sk_buff *skb_in)
 void nf_send_reset(struct net *net, struct sock *sk, struct sk_buff *oldskb,
                   int hook)
 {
-       struct net_device *br_indev __maybe_unused;
        struct sk_buff *nskb;
        struct iphdr *niph;
        const struct tcphdr *oth;
@@ -289,9 +288,13 @@ void nf_send_reset(struct net *net, struct sock *sk, struct sk_buff *oldskb,
         * build the eth header using the original destination's MAC as the
         * source, and send the RST packet directly.
         */
-       br_indev = nf_bridge_get_physindev(oldskb);
-       if (br_indev) {
+       if (nf_bridge_info_exists(oldskb)) {
                struct ethhdr *oeth = eth_hdr(oldskb);
+               struct net_device *br_indev;
+
+               br_indev = nf_bridge_get_physindev(oldskb, net);
+               if (!br_indev)
+                       goto free_nskb;
 
                nskb->dev = br_indev;
                niph->tot_len = htons(nskb->len);
index 27da9d7294c0b4fb9027bb7feb704063dc6302db..aea89326c69793f94bb8489cdf0c93b7524ba3fc 100644 (file)
@@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
 
        /* Charge it to the socket. */
 
-       ipv4_pktinfo_prepare(sk, skb);
+       ipv4_pktinfo_prepare(sk, skb, true);
        if (sock_queue_rcv_skb_reason(sk, skb, &reason) < 0) {
                kfree_skb_reason(skb, reason);
                return NET_RX_DROP;
index 1baa484d21902d2492fc2830d960100dc09683bf..7e2481b9eae1b791e1ec65f39efa41837a9fcbd3 100644 (file)
@@ -722,6 +722,7 @@ void tcp_push(struct sock *sk, int flags, int mss_now,
                if (!test_bit(TSQ_THROTTLED, &sk->sk_tsq_flags)) {
                        NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING);
                        set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
+                       smp_mb__after_atomic();
                }
                /* It is possible TX completion already happened
                 * before we set TSQ_THROTTLED.
@@ -1785,7 +1786,17 @@ static skb_frag_t *skb_advance_to_frag(struct sk_buff *skb, u32 offset_skb,
 
 static bool can_map_frag(const skb_frag_t *frag)
 {
-       return skb_frag_size(frag) == PAGE_SIZE && !skb_frag_off(frag);
+       struct page *page;
+
+       if (skb_frag_size(frag) != PAGE_SIZE || skb_frag_off(frag))
+               return false;
+
+       page = skb_frag_page(frag);
+
+       if (PageCompound(page) || page->mapping)
+               return false;
+
+       return true;
 }
 
 static int find_next_mappable_frag(const skb_frag_t *frag,
index 89e5a806b82e9c83b583d454e1b58b7838068f04..f631b0a21af4c7a520212c94ed0580f86d269ed2 100644 (file)
@@ -805,7 +805,7 @@ void udp_flush_pending_frames(struct sock *sk)
 
        if (up->pending) {
                up->len = 0;
-               up->pending = 0;
+               WRITE_ONCE(up->pending, 0);
                ip_flush_pending_frames(sk);
        }
 }
@@ -993,7 +993,7 @@ int udp_push_pending_frames(struct sock *sk)
 
 out:
        up->len = 0;
-       up->pending = 0;
+       WRITE_ONCE(up->pending, 0);
        return err;
 }
 EXPORT_SYMBOL(udp_push_pending_frames);
@@ -1070,7 +1070,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
 
        fl4 = &inet->cork.fl.u.ip4;
-       if (up->pending) {
+       if (READ_ONCE(up->pending)) {
                /*
                 * There are pending frames.
                 * The socket lock must be held while it's corked.
@@ -1269,7 +1269,7 @@ back_from_confirm:
        fl4->saddr = saddr;
        fl4->fl4_dport = dport;
        fl4->fl4_sport = inet->inet_sport;
-       up->pending = AF_INET;
+       WRITE_ONCE(up->pending, AF_INET);
 
 do_append_data:
        up->len += ulen;
@@ -1281,7 +1281,7 @@ do_append_data:
        else if (!corkreq)
                err = udp_push_pending_frames(sk);
        else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
-               up->pending = 0;
+               WRITE_ONCE(up->pending, 0);
        release_sock(sk);
 
 out:
@@ -1319,7 +1319,7 @@ void udp_splice_eof(struct socket *sock)
        struct sock *sk = sock->sk;
        struct udp_sock *up = udp_sk(sk);
 
-       if (!up->pending || udp_test_bit(CORK, sk))
+       if (!READ_ONCE(up->pending) || udp_test_bit(CORK, sk))
                return;
 
        lock_sock(sk);
@@ -2169,7 +2169,7 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
 
        udp_csum_pull_header(skb);
 
-       ipv4_pktinfo_prepare(sk, skb);
+       ipv4_pktinfo_prepare(sk, skb, true);
        return __udp_queue_rcv_skb(sk, skb);
 
 csum_error:
@@ -3137,16 +3137,18 @@ static struct sock *bpf_iter_udp_batch(struct seq_file *seq)
        struct bpf_udp_iter_state *iter = seq->private;
        struct udp_iter_state *state = &iter->state;
        struct net *net = seq_file_net(seq);
+       int resume_bucket, resume_offset;
        struct udp_table *udptable;
        unsigned int batch_sks = 0;
        bool resized = false;
        struct sock *sk;
 
+       resume_bucket = state->bucket;
+       resume_offset = iter->offset;
+
        /* The current batch is done, so advance the bucket. */
-       if (iter->st_bucket_done) {
+       if (iter->st_bucket_done)
                state->bucket++;
-               iter->offset = 0;
-       }
 
        udptable = udp_get_table_seq(seq, net);
 
@@ -3166,19 +3168,19 @@ again:
        for (; state->bucket <= udptable->mask; state->bucket++) {
                struct udp_hslot *hslot2 = &udptable->hash2[state->bucket];
 
-               if (hlist_empty(&hslot2->head)) {
-                       iter->offset = 0;
+               if (hlist_empty(&hslot2->head))
                        continue;
-               }
 
+               iter->offset = 0;
                spin_lock_bh(&hslot2->lock);
                udp_portaddr_for_each_entry(sk, &hslot2->head) {
                        if (seq_sk_match(seq, sk)) {
                                /* Resume from the last iterated socket at the
                                 * offset in the bucket before iterator was stopped.
                                 */
-                               if (iter->offset) {
-                                       --iter->offset;
+                               if (state->bucket == resume_bucket &&
+                                   iter->offset < resume_offset) {
+                                       ++iter->offset;
                                        continue;
                                }
                                if (iter->end_sk < iter->max_sk) {
@@ -3192,9 +3194,6 @@ again:
 
                if (iter->end_sk)
                        break;
-
-               /* Reset the current bucket's offset before moving to the next bucket. */
-               iter->offset = 0;
        }
 
        /* All done: no batch made. */
@@ -3213,7 +3212,6 @@ again:
                /* After allocating a larger batch, retry one more time to grab
                 * the whole bucket.
                 */
-               state->bucket--;
                goto again;
        }
 done:
index 507a8353a6bdb94cd5e83aad6efd877d84cfdc85..c008d21925d7f4afa31cc55deec0ccc321cdab04 100644 (file)
@@ -220,19 +220,26 @@ const struct ipv6_stub *ipv6_stub __read_mostly = &(struct ipv6_stub) {
 EXPORT_SYMBOL_GPL(ipv6_stub);
 
 /* IPv6 Wildcard Address and Loopback Address defined by RFC2553 */
-const struct in6_addr in6addr_loopback = IN6ADDR_LOOPBACK_INIT;
+const struct in6_addr in6addr_loopback __aligned(BITS_PER_LONG/8)
+       = IN6ADDR_LOOPBACK_INIT;
 EXPORT_SYMBOL(in6addr_loopback);
-const struct in6_addr in6addr_any = IN6ADDR_ANY_INIT;
+const struct in6_addr in6addr_any __aligned(BITS_PER_LONG/8)
+       = IN6ADDR_ANY_INIT;
 EXPORT_SYMBOL(in6addr_any);
-const struct in6_addr in6addr_linklocal_allnodes = IN6ADDR_LINKLOCAL_ALLNODES_INIT;
+const struct in6_addr in6addr_linklocal_allnodes __aligned(BITS_PER_LONG/8)
+       = IN6ADDR_LINKLOCAL_ALLNODES_INIT;
 EXPORT_SYMBOL(in6addr_linklocal_allnodes);
-const struct in6_addr in6addr_linklocal_allrouters = IN6ADDR_LINKLOCAL_ALLROUTERS_INIT;
+const struct in6_addr in6addr_linklocal_allrouters __aligned(BITS_PER_LONG/8)
+       = IN6ADDR_LINKLOCAL_ALLROUTERS_INIT;
 EXPORT_SYMBOL(in6addr_linklocal_allrouters);
-const struct in6_addr in6addr_interfacelocal_allnodes = IN6ADDR_INTERFACELOCAL_ALLNODES_INIT;
+const struct in6_addr in6addr_interfacelocal_allnodes __aligned(BITS_PER_LONG/8)
+       = IN6ADDR_INTERFACELOCAL_ALLNODES_INIT;
 EXPORT_SYMBOL(in6addr_interfacelocal_allnodes);
-const struct in6_addr in6addr_interfacelocal_allrouters = IN6ADDR_INTERFACELOCAL_ALLROUTERS_INIT;
+const struct in6_addr in6addr_interfacelocal_allrouters __aligned(BITS_PER_LONG/8)
+       = IN6ADDR_INTERFACELOCAL_ALLROUTERS_INIT;
 EXPORT_SYMBOL(in6addr_interfacelocal_allrouters);
-const struct in6_addr in6addr_sitelocal_allrouters = IN6ADDR_SITELOCAL_ALLROUTERS_INIT;
+const struct in6_addr in6addr_sitelocal_allrouters __aligned(BITS_PER_LONG/8)
+       = IN6ADDR_SITELOCAL_ALLROUTERS_INIT;
 EXPORT_SYMBOL(in6addr_sitelocal_allrouters);
 
 static void snmp6_free_dev(struct inet6_dev *idev)
index 13a1833a4df52956431c5c2fefcb6af80e1a828f..959bfd9f6344f11241dd20246f92bd1d47ff565e 100644 (file)
@@ -199,6 +199,9 @@ lookup_protocol:
        if (INET_PROTOSW_REUSE & answer_flags)
                sk->sk_reuse = SK_CAN_REUSE;
 
+       if (INET_PROTOSW_ICSK & answer_flags)
+               inet_init_csk_locks(sk);
+
        inet = inet_sk(sk);
        inet_assign_bit(IS_ICSK, sk, INET_PROTOSW_ICSK & answer_flags);
 
index 46c19bd4899011d53b4feb84e25013c01ddce701..9bbabf750a21e251d4e8f9e3059c707505f5ce32 100644 (file)
@@ -796,8 +796,8 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
                                                struct sk_buff *skb),
                         bool log_ecn_err)
 {
-       const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
-       int err;
+       const struct ipv6hdr *ipv6h;
+       int nh, err;
 
        if ((!(tpi->flags & TUNNEL_CSUM) &&
             (tunnel->parms.i_flags & TUNNEL_CSUM)) ||
@@ -829,7 +829,6 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
                        goto drop;
                }
 
-               ipv6h = ipv6_hdr(skb);
                skb->protocol = eth_type_trans(skb, tunnel->dev);
                skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
        } else {
@@ -837,7 +836,23 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
                skb_reset_mac_header(skb);
        }
 
+       /* Save offset of outer header relative to skb->head,
+        * because we are going to reset the network header to the inner header
+        * and might change skb->head.
+        */
+       nh = skb_network_header(skb) - skb->head;
+
        skb_reset_network_header(skb);
+
+       if (!pskb_inet_may_pull(skb)) {
+               DEV_STATS_INC(tunnel->dev, rx_length_errors);
+               DEV_STATS_INC(tunnel->dev, rx_errors);
+               goto drop;
+       }
+
+       /* Get the outer header. */
+       ipv6h = (struct ipv6hdr *)(skb->head + nh);
+
        memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
 
        __skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
index b75d3c9d41bb5005af2d4e10fab58f157e9ea4fa..bc6e0a0bad3c12d641a1dc60a8c790a6e72b1b5f 100644 (file)
@@ -2722,8 +2722,12 @@ void ipv6_mc_down(struct inet6_dev *idev)
        synchronize_net();
        mld_query_stop_work(idev);
        mld_report_stop_work(idev);
+
+       mutex_lock(&idev->mc_lock);
        mld_ifc_stop_work(idev);
        mld_gq_stop_work(idev);
+       mutex_unlock(&idev->mc_lock);
+
        mld_dad_stop_work(idev);
 }
 
index d45bc54b7ea55d03ffbea6de9ef3db8c098c217f..196dd4ecb5e215f8a1de321bf249bec6fca6b97c 100644 (file)
@@ -278,7 +278,6 @@ static int nf_reject6_fill_skb_dst(struct sk_buff *skb_in)
 void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb,
                    int hook)
 {
-       struct net_device *br_indev __maybe_unused;
        struct sk_buff *nskb;
        struct tcphdr _otcph;
        const struct tcphdr *otcph;
@@ -354,9 +353,15 @@ void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb,
         * build the eth header using the original destination's MAC as the
         * source, and send the RST packet directly.
         */
-       br_indev = nf_bridge_get_physindev(oldskb);
-       if (br_indev) {
+       if (nf_bridge_info_exists(oldskb)) {
                struct ethhdr *oeth = eth_hdr(oldskb);
+               struct net_device *br_indev;
+
+               br_indev = nf_bridge_get_physindev(oldskb, net);
+               if (!br_indev) {
+                       kfree_skb(nskb);
+                       return;
+               }
 
                nskb->dev = br_indev;
                nskb->protocol = htons(ETH_P_IPV6);
index 594e3f23c12909fe6f245bf31057278169cd85c5..3f2249b4cd5f6a594dd9768e29f20f0d9a57faed 100644 (file)
@@ -1135,7 +1135,7 @@ static void udp_v6_flush_pending_frames(struct sock *sk)
                udp_flush_pending_frames(sk);
        else if (up->pending) {
                up->len = 0;
-               up->pending = 0;
+               WRITE_ONCE(up->pending, 0);
                ip6_flush_pending_frames(sk);
        }
 }
@@ -1313,7 +1313,7 @@ static int udp_v6_push_pending_frames(struct sock *sk)
                              &inet_sk(sk)->cork.base);
 out:
        up->len = 0;
-       up->pending = 0;
+       WRITE_ONCE(up->pending, 0);
        return err;
 }
 
@@ -1370,7 +1370,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
                default:
                        return -EINVAL;
                }
-       } else if (!up->pending) {
+       } else if (!READ_ONCE(up->pending)) {
                if (sk->sk_state != TCP_ESTABLISHED)
                        return -EDESTADDRREQ;
                daddr = &sk->sk_v6_daddr;
@@ -1401,8 +1401,8 @@ do_udp_sendmsg:
                return -EMSGSIZE;
 
        getfrag  =  is_udplite ?  udplite_getfrag : ip_generic_getfrag;
-       if (up->pending) {
-               if (up->pending == AF_INET)
+       if (READ_ONCE(up->pending)) {
+               if (READ_ONCE(up->pending) == AF_INET)
                        return udp_sendmsg(sk, msg, len);
                /*
                 * There are pending frames.
@@ -1593,7 +1593,7 @@ back_from_confirm:
                goto out;
        }
 
-       up->pending = AF_INET6;
+       WRITE_ONCE(up->pending, AF_INET6);
 
 do_append_data:
        if (ipc6.dontfrag < 0)
@@ -1607,7 +1607,7 @@ do_append_data:
        else if (!corkreq)
                err = udp_v6_push_pending_frames(sk);
        else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
-               up->pending = 0;
+               WRITE_ONCE(up->pending, 0);
 
        if (err > 0)
                err = inet6_test_bit(RECVERR6, sk) ? net_xmit_errno(err) : 0;
@@ -1648,7 +1648,7 @@ static void udpv6_splice_eof(struct socket *sock)
        struct sock *sk = sock->sk;
        struct udp_sock *up = udp_sk(sk);
 
-       if (!up->pending || udp_test_bit(CORK, sk))
+       if (!READ_ONCE(up->pending) || udp_test_bit(CORK, sk))
                return;
 
        lock_sock(sk);
index 9b06c380866b53bcb395bf255587279db025d11d..fde1140d899efc7ba02e6bc3998cb857ef30df14 100644 (file)
@@ -226,6 +226,8 @@ static int llc_ui_release(struct socket *sock)
        }
        netdev_put(llc->dev, &llc->dev_tracker);
        sock_put(sk);
+       sock_orphan(sk);
+       sock->sk = NULL;
        llc_sk_free(sk);
 out:
        return 0;
@@ -928,14 +930,15 @@ copy_uaddr:
  */
 static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
 {
+       DECLARE_SOCKADDR(struct sockaddr_llc *, addr, msg->msg_name);
        struct sock *sk = sock->sk;
        struct llc_sock *llc = llc_sk(sk);
-       DECLARE_SOCKADDR(struct sockaddr_llc *, addr, msg->msg_name);
        int flags = msg->msg_flags;
        int noblock = flags & MSG_DONTWAIT;
+       int rc = -EINVAL, copied = 0, hdrlen, hh_len;
        struct sk_buff *skb = NULL;
+       struct net_device *dev;
        size_t size = 0;
-       int rc = -EINVAL, copied = 0, hdrlen;
 
        dprintk("%s: sending from %02X to %02X\n", __func__,
                llc->laddr.lsap, llc->daddr.lsap);
@@ -955,22 +958,29 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
                if (rc)
                        goto out;
        }
-       hdrlen = llc->dev->hard_header_len + llc_ui_header_len(sk, addr);
+       dev = llc->dev;
+       hh_len = LL_RESERVED_SPACE(dev);
+       hdrlen = llc_ui_header_len(sk, addr);
        size = hdrlen + len;
-       if (size > llc->dev->mtu)
-               size = llc->dev->mtu;
+       size = min_t(size_t, size, READ_ONCE(dev->mtu));
        copied = size - hdrlen;
        rc = -EINVAL;
        if (copied < 0)
                goto out;
        release_sock(sk);
-       skb = sock_alloc_send_skb(sk, size, noblock, &rc);
+       skb = sock_alloc_send_skb(sk, hh_len + size, noblock, &rc);
        lock_sock(sk);
        if (!skb)
                goto out;
-       skb->dev      = llc->dev;
+       if (sock_flag(sk, SOCK_ZAPPED) ||
+           llc->dev != dev ||
+           hdrlen != llc_ui_header_len(sk, addr) ||
+           hh_len != LL_RESERVED_SPACE(dev) ||
+           size > READ_ONCE(dev->mtu))
+               goto out;
+       skb->dev      = dev;
        skb->protocol = llc_proto_type(addr->sllc_arphrd);
-       skb_reserve(skb, hdrlen);
+       skb_reserve(skb, hh_len + hdrlen);
        rc = memcpy_from_msg(skb_put(skb, copied), msg, copied);
        if (rc)
                goto out;
index 6e387aadffcecbec01d63aef4d6289bccc17f59e..4f16d9c88350b4481805c145887df23c681a159d 100644 (file)
@@ -135,22 +135,15 @@ static struct packet_type llc_packet_type __read_mostly = {
        .func = llc_rcv,
 };
 
-static struct packet_type llc_tr_packet_type __read_mostly = {
-       .type = cpu_to_be16(ETH_P_TR_802_2),
-       .func = llc_rcv,
-};
-
 static int __init llc_init(void)
 {
        dev_add_pack(&llc_packet_type);
-       dev_add_pack(&llc_tr_packet_type);
        return 0;
 }
 
 static void __exit llc_exit(void)
 {
        dev_remove_pack(&llc_packet_type);
-       dev_remove_pack(&llc_tr_packet_type);
 }
 
 module_init(llc_init);
index cb0291decf2e56c7d4111e649f41d28577af987e..13438cc0a6b139b6cb10c15ce894153706514811 100644 (file)
@@ -62,7 +62,6 @@ config MAC80211_KUNIT_TEST
        depends on KUNIT
        depends on MAC80211
        default KUNIT_ALL_TESTS
-       depends on !KERNEL_6_2
        help
          Enable this option to test mac80211 internals with kunit.
 
index 489dd97f51724a86053a9c4e9269487c4c7e928b..327682995c9260c9c7498ff9b322ecf5d59c6717 100644 (file)
@@ -5,7 +5,7 @@
  * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2015  Intel Mobile Communications GmbH
  * Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2022 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
  */
 
 #include <linux/ieee80211.h>
@@ -987,7 +987,8 @@ static int
 ieee80211_set_unsol_bcast_probe_resp(struct ieee80211_sub_if_data *sdata,
                                     struct cfg80211_unsol_bcast_probe_resp *params,
                                     struct ieee80211_link_data *link,
-                                    struct ieee80211_bss_conf *link_conf)
+                                    struct ieee80211_bss_conf *link_conf,
+                                    u64 *changed)
 {
        struct unsol_bcast_probe_resp_data *new, *old = NULL;
 
@@ -1011,7 +1012,8 @@ ieee80211_set_unsol_bcast_probe_resp(struct ieee80211_sub_if_data *sdata,
                RCU_INIT_POINTER(link->u.ap.unsol_bcast_probe_resp, NULL);
        }
 
-       return BSS_CHANGED_UNSOL_BCAST_PROBE_RESP;
+       *changed |= BSS_CHANGED_UNSOL_BCAST_PROBE_RESP;
+       return 0;
 }
 
 static int ieee80211_set_ftm_responder_params(
@@ -1450,10 +1452,9 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
 
        err = ieee80211_set_unsol_bcast_probe_resp(sdata,
                                                   &params->unsol_bcast_probe_resp,
-                                                  link, link_conf);
+                                                  link, link_conf, &changed);
        if (err < 0)
                goto error;
-       changed |= err;
 
        err = drv_start_ap(sdata->local, sdata, link_conf);
        if (err) {
@@ -1525,10 +1526,9 @@ static int ieee80211_change_beacon(struct wiphy *wiphy, struct net_device *dev,
 
        err = ieee80211_set_unsol_bcast_probe_resp(sdata,
                                                   &params->unsol_bcast_probe_resp,
-                                                  link, link_conf);
+                                                  link, link_conf, &changed);
        if (err < 0)
                return err;
-       changed |= err;
 
        if (beacon->he_bss_color_valid &&
            beacon->he_bss_color.enabled != link_conf->he_bss_color.enabled) {
@@ -1869,6 +1869,8 @@ static int sta_link_apply_parameters(struct ieee80211_local *local,
                                              sband->band);
        }
 
+       ieee80211_sta_set_rx_nss(link_sta);
+
        return ret;
 }
 
index dce5606ed66da5a31a476aec16bb55412e1e72cc..68596ef78b15ee9596f6f81e8dd2d2f82c1d56cd 100644 (file)
@@ -997,8 +997,8 @@ static void add_link_files(struct ieee80211_link_data *link,
        }
 }
 
-void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata,
-                                 bool mld_vif)
+static void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata,
+                                        bool mld_vif)
 {
        char buf[10+IFNAMSIZ];
 
index b226b1aae88a5d4205c0206b351b63e6ee54c2a2..a02ec0a413f61468dded52076fbfef9a35da17b0 100644 (file)
@@ -11,8 +11,6 @@
 #include "ieee80211_i.h"
 
 #ifdef CONFIG_MAC80211_DEBUGFS
-void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata,
-                                 bool mld_vif);
 void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata);
 void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata);
 void ieee80211_debugfs_recreate_netdev(struct ieee80211_sub_if_data *sdata,
@@ -24,9 +22,6 @@ void ieee80211_link_debugfs_remove(struct ieee80211_link_data *link);
 void ieee80211_link_debugfs_drv_add(struct ieee80211_link_data *link);
 void ieee80211_link_debugfs_drv_remove(struct ieee80211_link_data *link);
 #else
-static inline void ieee80211_debugfs_add_netdev(
-       struct ieee80211_sub_if_data *sdata, bool mld_vif)
-{}
 static inline void ieee80211_debugfs_remove_netdev(
        struct ieee80211_sub_if_data *sdata)
 {}
index e4e7c0b38cb6efcbb65786d071f436e09c7bf322..11c4caa4748e4038a2c758e34ae8dbf762e8159e 100644 (file)
@@ -1783,7 +1783,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
        /* need to do this after the switch so vif.type is correct */
        ieee80211_link_setup(&sdata->deflink);
 
-       ieee80211_debugfs_add_netdev(sdata, false);
+       ieee80211_debugfs_recreate_netdev(sdata, false);
 }
 
 static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
index 073105deb42481f2792a33fa0341d509f7a95017..2022a26eb8811492ef8029de9e89dfd4bbb5f101 100644 (file)
@@ -8,7 +8,7 @@
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2023 Intel Corporation
+ * Copyright (C) 2018 - 2024 Intel Corporation
  */
 
 #include <linux/delay.h>
@@ -2918,6 +2918,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
 
        /* other links will be destroyed */
        sdata->deflink.u.mgd.bss = NULL;
+       sdata->deflink.smps_mode = IEEE80211_SMPS_OFF;
 
        netif_carrier_off(sdata->dev);
 
@@ -5045,9 +5046,6 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
        if (!link)
                return 0;
 
-       /* will change later if needed */
-       link->smps_mode = IEEE80211_SMPS_OFF;
-
        /*
         * If this fails (possibly due to channel context sharing
         * on incompatible channels, e.g. 80+80 and 160 sharing the
@@ -7096,6 +7094,7 @@ void ieee80211_mgd_setup_link(struct ieee80211_link_data *link)
        link->u.mgd.p2p_noa_index = -1;
        link->u.mgd.conn_flags = 0;
        link->conf->bssid = link->u.mgd.bssid;
+       link->smps_mode = IEEE80211_SMPS_OFF;
 
        wiphy_work_init(&link->u.mgd.request_smps_work,
                        ieee80211_request_smps_mgd_work);
@@ -7309,6 +7308,75 @@ out_err:
        return err;
 }
 
+static bool ieee80211_mgd_csa_present(struct ieee80211_sub_if_data *sdata,
+                                     const struct cfg80211_bss_ies *ies,
+                                     u8 cur_channel, bool ignore_ecsa)
+{
+       const struct element *csa_elem, *ecsa_elem;
+       struct ieee80211_channel_sw_ie *csa = NULL;
+       struct ieee80211_ext_chansw_ie *ecsa = NULL;
+
+       if (!ies)
+               return false;
+
+       csa_elem = cfg80211_find_elem(WLAN_EID_CHANNEL_SWITCH,
+                                     ies->data, ies->len);
+       if (csa_elem && csa_elem->datalen == sizeof(*csa))
+               csa = (void *)csa_elem->data;
+
+       ecsa_elem = cfg80211_find_elem(WLAN_EID_EXT_CHANSWITCH_ANN,
+                                      ies->data, ies->len);
+       if (ecsa_elem && ecsa_elem->datalen == sizeof(*ecsa))
+               ecsa = (void *)ecsa_elem->data;
+
+       if (csa && csa->count == 0)
+               csa = NULL;
+       if (csa && !csa->mode && csa->new_ch_num == cur_channel)
+               csa = NULL;
+
+       if (ecsa && ecsa->count == 0)
+               ecsa = NULL;
+       if (ecsa && !ecsa->mode && ecsa->new_ch_num == cur_channel)
+               ecsa = NULL;
+
+       if (ignore_ecsa && ecsa) {
+               sdata_info(sdata,
+                          "Ignoring ECSA in probe response - was considered stuck!\n");
+               return csa;
+       }
+
+       return csa || ecsa;
+}
+
+static bool ieee80211_mgd_csa_in_process(struct ieee80211_sub_if_data *sdata,
+                                        struct cfg80211_bss *bss)
+{
+       u8 cur_channel;
+       bool ret;
+
+       cur_channel = ieee80211_frequency_to_channel(bss->channel->center_freq);
+
+       rcu_read_lock();
+       if (ieee80211_mgd_csa_present(sdata,
+                                     rcu_dereference(bss->beacon_ies),
+                                     cur_channel, false)) {
+               ret = true;
+               goto out;
+       }
+
+       if (ieee80211_mgd_csa_present(sdata,
+                                     rcu_dereference(bss->proberesp_ies),
+                                     cur_channel, bss->proberesp_ecsa_stuck)) {
+               ret = true;
+               goto out;
+       }
+
+       ret = false;
+out:
+       rcu_read_unlock();
+       return ret;
+}
+
 /* config hooks */
 int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
                       struct cfg80211_auth_request *req)
@@ -7317,7 +7385,6 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        struct ieee80211_mgd_auth_data *auth_data;
        struct ieee80211_link_data *link;
-       const struct element *csa_elem, *ecsa_elem;
        u16 auth_alg;
        int err;
        bool cont_auth;
@@ -7360,21 +7427,10 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
        if (ifmgd->assoc_data)
                return -EBUSY;
 
-       rcu_read_lock();
-       csa_elem = ieee80211_bss_get_elem(req->bss, WLAN_EID_CHANNEL_SWITCH);
-       ecsa_elem = ieee80211_bss_get_elem(req->bss,
-                                          WLAN_EID_EXT_CHANSWITCH_ANN);
-       if ((csa_elem &&
-            csa_elem->datalen == sizeof(struct ieee80211_channel_sw_ie) &&
-            ((struct ieee80211_channel_sw_ie *)csa_elem->data)->count != 0) ||
-           (ecsa_elem &&
-            ecsa_elem->datalen == sizeof(struct ieee80211_ext_chansw_ie) &&
-            ((struct ieee80211_ext_chansw_ie *)ecsa_elem->data)->count != 0)) {
-               rcu_read_unlock();
+       if (ieee80211_mgd_csa_in_process(sdata, req->bss)) {
                sdata_info(sdata, "AP is in CSA process, reject auth\n");
                return -EINVAL;
        }
-       rcu_read_unlock();
 
        auth_data = kzalloc(sizeof(*auth_data) + req->auth_data_len +
                            req->ie_len, GFP_KERNEL);
@@ -7684,7 +7740,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        struct ieee80211_mgd_assoc_data *assoc_data;
-       const struct element *ssid_elem, *csa_elem, *ecsa_elem;
+       const struct element *ssid_elem;
        struct ieee80211_vif_cfg *vif_cfg = &sdata->vif.cfg;
        ieee80211_conn_flags_t conn_flags = 0;
        struct ieee80211_link_data *link;
@@ -7707,23 +7763,15 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
 
        cbss = req->link_id < 0 ? req->bss : req->links[req->link_id].bss;
 
-       rcu_read_lock();
-       ssid_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_SSID);
-       if (!ssid_elem || ssid_elem->datalen > sizeof(assoc_data->ssid)) {
-               rcu_read_unlock();
+       if (ieee80211_mgd_csa_in_process(sdata, cbss)) {
+               sdata_info(sdata, "AP is in CSA process, reject assoc\n");
                kfree(assoc_data);
                return -EINVAL;
        }
 
-       csa_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_CHANNEL_SWITCH);
-       ecsa_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_EXT_CHANSWITCH_ANN);
-       if ((csa_elem &&
-            csa_elem->datalen == sizeof(struct ieee80211_channel_sw_ie) &&
-            ((struct ieee80211_channel_sw_ie *)csa_elem->data)->count != 0) ||
-           (ecsa_elem &&
-            ecsa_elem->datalen == sizeof(struct ieee80211_ext_chansw_ie) &&
-            ((struct ieee80211_ext_chansw_ie *)ecsa_elem->data)->count != 0)) {
-               sdata_info(sdata, "AP is in CSA process, reject assoc\n");
+       rcu_read_lock();
+       ssid_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_SSID);
+       if (!ssid_elem || ssid_elem->datalen > sizeof(assoc_data->ssid)) {
                rcu_read_unlock();
                kfree(assoc_data);
                return -EINVAL;
@@ -7998,8 +8046,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
 
                rcu_read_lock();
                beacon_ies = rcu_dereference(req->bss->beacon_ies);
-
-               if (beacon_ies) {
+               if (!beacon_ies) {
                        /*
                         * Wait up to one beacon interval ...
                         * should this be more if we miss one?
@@ -8080,6 +8127,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
                ieee80211_report_disconnect(sdata, frame_buf,
                                            sizeof(frame_buf), true,
                                            req->reason_code, false);
+               drv_mgd_complete_tx(sdata->local, sdata, &info);
                return 0;
        }
 
index 645355e5f1bc7baba435db18c0c8d8243a4649c6..f9d5842601fa9433ba0303f3b6572129b3e2f9fe 100644 (file)
@@ -9,7 +9,7 @@
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
  * Copyright 2013-2015  Intel Mobile Communications GmbH
  * Copyright 2016-2017  Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
  */
 
 #include <linux/if_arp.h>
@@ -237,14 +237,18 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
 }
 
 static bool ieee80211_scan_accept_presp(struct ieee80211_sub_if_data *sdata,
+                                       struct ieee80211_channel *channel,
                                        u32 scan_flags, const u8 *da)
 {
        if (!sdata)
                return false;
-       /* accept broadcast for OCE */
-       if (scan_flags & NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP &&
-           is_broadcast_ether_addr(da))
+
+       /* accept broadcast on 6 GHz and for OCE */
+       if (is_broadcast_ether_addr(da) &&
+           (channel->band == NL80211_BAND_6GHZ ||
+            scan_flags & NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP))
                return true;
+
        if (scan_flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
                return true;
        return ether_addr_equal(da, sdata->vif.addr);
@@ -293,6 +297,12 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
                wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0);
        }
 
+       channel = ieee80211_get_channel_khz(local->hw.wiphy,
+                                           ieee80211_rx_status_to_khz(rx_status));
+
+       if (!channel || channel->flags & IEEE80211_CHAN_DISABLED)
+               return;
+
        if (ieee80211_is_probe_resp(mgmt->frame_control)) {
                struct cfg80211_scan_request *scan_req;
                struct cfg80211_sched_scan_request *sched_scan_req;
@@ -310,19 +320,15 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
                /* ignore ProbeResp to foreign address or non-bcast (OCE)
                 * unless scanning with randomised address
                 */
-               if (!ieee80211_scan_accept_presp(sdata1, scan_req_flags,
+               if (!ieee80211_scan_accept_presp(sdata1, channel,
+                                                scan_req_flags,
                                                 mgmt->da) &&
-                   !ieee80211_scan_accept_presp(sdata2, sched_scan_req_flags,
+                   !ieee80211_scan_accept_presp(sdata2, channel,
+                                                sched_scan_req_flags,
                                                 mgmt->da))
                        return;
        }
 
-       channel = ieee80211_get_channel_khz(local->hw.wiphy,
-                                       ieee80211_rx_status_to_khz(rx_status));
-
-       if (!channel || channel->flags & IEEE80211_CHAN_DISABLED)
-               return;
-
        bss = ieee80211_bss_info_update(local, rx_status,
                                        mgmt, skb->len,
                                        channel);
index bf1adcd96b411327ba79b3bdc6734df1afd605ca..4391d8dd634bb557771dcc07c11bab296c5a18f3 100644 (file)
@@ -404,7 +404,10 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
        int i;
 
        for (i = 0; i < ARRAY_SIZE(sta->link); i++) {
-               if (!(sta->sta.valid_links & BIT(i)))
+               struct link_sta_info *link_sta;
+
+               link_sta = rcu_access_pointer(sta->link[i]);
+               if (!link_sta)
                        continue;
 
                sta_remove_link(sta, i, false);
@@ -910,6 +913,8 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
        if (ieee80211_vif_is_mesh(&sdata->vif))
                mesh_accept_plinks_update(sdata);
 
+       ieee80211_check_fast_xmit(sta);
+
        return 0;
  out_remove:
        if (sta->sta.valid_links)
index 314998fdb1a5a4853f84a90edf2ba2312933719a..e448ab33844896881bfbf1f5283a2f77ae5ee4e8 100644 (file)
@@ -3048,7 +3048,7 @@ void ieee80211_check_fast_xmit(struct sta_info *sta)
            sdata->vif.type == NL80211_IFTYPE_STATION)
                goto out;
 
-       if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
+       if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED) || !sta->uploaded)
                goto out;
 
        if (test_sta_flag(sta, WLAN_STA_PS_STA) ||
@@ -3100,10 +3100,11 @@ void ieee80211_check_fast_xmit(struct sta_info *sta)
                        /* DA SA BSSID */
                        build.da_offs = offsetof(struct ieee80211_hdr, addr1);
                        build.sa_offs = offsetof(struct ieee80211_hdr, addr2);
+                       rcu_read_lock();
                        link = rcu_dereference(sdata->link[tdls_link_id]);
-                       if (WARN_ON_ONCE(!link))
-                               break;
-                       memcpy(hdr->addr3, link->u.mgd.bssid, ETH_ALEN);
+                       if (!WARN_ON_ONCE(!link))
+                               memcpy(hdr->addr3, link->u.mgd.bssid, ETH_ALEN);
+                       rcu_read_unlock();
                        build.hdr_len = 24;
                        break;
                }
index a05c5b971789c796658a04e0a41a3748fdce75da..3a8612309137312f88dfe6bad79ac854fe76fcc1 100644 (file)
@@ -23,8 +23,6 @@ void ieee80211_check_wbrf_support(struct ieee80211_local *local)
                return;
 
        local->wbrf_supported = acpi_amd_wbrf_supported_producer(dev);
-       dev_dbg(dev, "WBRF is %s supported\n",
-               local->wbrf_supported ? "" : "not");
 }
 
 static void get_chan_freq_boundary(u32 center_freq, u32 bandwidth, u64 *start, u64 *end)
index c53914012d01d38c2dc0a3578bf3651595956e72..d2527d189a799319c068a5b76a5816cc7a905861 100644 (file)
@@ -123,8 +123,8 @@ static void mptcp_parse_option(const struct sk_buff *skb,
                break;
 
        case MPTCPOPT_MP_JOIN:
-               mp_opt->suboptions |= OPTIONS_MPTCP_MPJ;
                if (opsize == TCPOLEN_MPTCP_MPJ_SYN) {
+                       mp_opt->suboptions |= OPTION_MPTCP_MPJ_SYN;
                        mp_opt->backup = *ptr++ & MPTCPOPT_BACKUP;
                        mp_opt->join_id = *ptr++;
                        mp_opt->token = get_unaligned_be32(ptr);
@@ -135,6 +135,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
                                 mp_opt->backup, mp_opt->join_id,
                                 mp_opt->token, mp_opt->nonce);
                } else if (opsize == TCPOLEN_MPTCP_MPJ_SYNACK) {
+                       mp_opt->suboptions |= OPTION_MPTCP_MPJ_SYNACK;
                        mp_opt->backup = *ptr++ & MPTCPOPT_BACKUP;
                        mp_opt->join_id = *ptr++;
                        mp_opt->thmac = get_unaligned_be64(ptr);
@@ -145,11 +146,10 @@ static void mptcp_parse_option(const struct sk_buff *skb,
                                 mp_opt->backup, mp_opt->join_id,
                                 mp_opt->thmac, mp_opt->nonce);
                } else if (opsize == TCPOLEN_MPTCP_MPJ_ACK) {
+                       mp_opt->suboptions |= OPTION_MPTCP_MPJ_ACK;
                        ptr += 2;
                        memcpy(mp_opt->hmac, ptr, MPTCPOPT_HMAC_LEN);
                        pr_debug("MP_JOIN hmac");
-               } else {
-                       mp_opt->suboptions &= ~OPTIONS_MPTCP_MPJ;
                }
                break;
 
index 3ed4709a75096025683149b2d4af0a1d5f24141c..028e8b473626f122db6e3e664414b2f0447fae69 100644 (file)
@@ -2314,9 +2314,6 @@ bool __mptcp_retransmit_pending_data(struct sock *sk)
        if (__mptcp_check_fallback(msk))
                return false;
 
-       if (tcp_rtx_and_write_queues_empty(sk))
-               return false;
-
        /* the closing socket has some data untransmitted and/or unacked:
         * some data in the mptcp rtx queue has not really xmitted yet.
         * keep it simple and re-inject the whole mptcp level rtx queue
index 3eacd04e7099e6de1a161c176a74959722445286..0dcb721c89d193e8943aa414610fcf4284d51f38 100644 (file)
@@ -157,8 +157,8 @@ static int subflow_check_req(struct request_sock *req,
 
        mptcp_get_options(skb, &mp_opt);
 
-       opt_mp_capable = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPC);
-       opt_mp_join = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ);
+       opt_mp_capable = !!(mp_opt.suboptions & OPTION_MPTCP_MPC_SYN);
+       opt_mp_join = !!(mp_opt.suboptions & OPTION_MPTCP_MPJ_SYN);
        if (opt_mp_capable) {
                SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);
 
@@ -254,8 +254,8 @@ int mptcp_subflow_init_cookie_req(struct request_sock *req,
        subflow_init_req(req, sk_listener);
        mptcp_get_options(skb, &mp_opt);
 
-       opt_mp_capable = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPC);
-       opt_mp_join = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ);
+       opt_mp_capable = !!(mp_opt.suboptions & OPTION_MPTCP_MPC_ACK);
+       opt_mp_join = !!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK);
        if (opt_mp_capable && opt_mp_join)
                return -EINVAL;
 
@@ -486,7 +486,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
 
        mptcp_get_options(skb, &mp_opt);
        if (subflow->request_mptcp) {
-               if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPC)) {
+               if (!(mp_opt.suboptions & OPTION_MPTCP_MPC_SYNACK)) {
                        MPTCP_INC_STATS(sock_net(sk),
                                        MPTCP_MIB_MPCAPABLEACTIVEFALLBACK);
                        mptcp_do_fallback(sk);
@@ -506,7 +506,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
        } else if (subflow->request_join) {
                u8 hmac[SHA256_DIGEST_SIZE];
 
-               if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ)) {
+               if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_SYNACK)) {
                        subflow->reset_reason = MPTCP_RST_EMPTCP;
                        goto do_reset;
                }
@@ -783,12 +783,13 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
                 * options.
                 */
                mptcp_get_options(skb, &mp_opt);
-               if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPC))
+               if (!(mp_opt.suboptions &
+                     (OPTION_MPTCP_MPC_SYN | OPTION_MPTCP_MPC_ACK)))
                        fallback = true;
 
        } else if (subflow_req->mp_join) {
                mptcp_get_options(skb, &mp_opt);
-               if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ) ||
+               if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK) ||
                    !subflow_hmac_valid(req, &mp_opt) ||
                    !mptcp_can_accept_new_subflow(subflow_req->msk)) {
                        SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
index 21f7860e8fa1fd4f1f46c9ad278bfeb261090152..cb48a2b9cb9fd708c2f99adadfd4b21671b44a4a 100644 (file)
@@ -30,6 +30,7 @@
 #define mtype_del              IPSET_TOKEN(MTYPE, _del)
 #define mtype_list             IPSET_TOKEN(MTYPE, _list)
 #define mtype_gc               IPSET_TOKEN(MTYPE, _gc)
+#define mtype_cancel_gc                IPSET_TOKEN(MTYPE, _cancel_gc)
 #define mtype                  MTYPE
 
 #define get_ext(set, map, id)  ((map)->extensions + ((set)->dsize * (id)))
@@ -59,9 +60,6 @@ mtype_destroy(struct ip_set *set)
 {
        struct mtype *map = set->data;
 
-       if (SET_WITH_TIMEOUT(set))
-               del_timer_sync(&map->gc);
-
        if (set->dsize && set->extensions & IPSET_EXT_DESTROY)
                mtype_ext_cleanup(set);
        ip_set_free(map->members);
@@ -290,6 +288,15 @@ mtype_gc(struct timer_list *t)
        add_timer(&map->gc);
 }
 
+static void
+mtype_cancel_gc(struct ip_set *set)
+{
+       struct mtype *map = set->data;
+
+       if (SET_WITH_TIMEOUT(set))
+               del_timer_sync(&map->gc);
+}
+
 static const struct ip_set_type_variant mtype = {
        .kadt   = mtype_kadt,
        .uadt   = mtype_uadt,
@@ -303,6 +310,7 @@ static const struct ip_set_type_variant mtype = {
        .head   = mtype_head,
        .list   = mtype_list,
        .same_set = mtype_same_set,
+       .cancel_gc = mtype_cancel_gc,
 };
 
 #endif /* __IP_SET_BITMAP_IP_GEN_H */
index 4c133e06be1de2f8972b50ac87e6b0b7bfc9ac6d..3184cc6be4c9d375fb2bda49d1bbec6623618c77 100644 (file)
@@ -1154,6 +1154,7 @@ static int ip_set_create(struct sk_buff *skb, const struct nfnl_info *info,
        return ret;
 
 cleanup:
+       set->variant->cancel_gc(set);
        set->variant->destroy(set);
 put_out:
        module_put(set->type->me);
@@ -1182,6 +1183,14 @@ ip_set_destroy_set(struct ip_set *set)
        kfree(set);
 }
 
+static void
+ip_set_destroy_set_rcu(struct rcu_head *head)
+{
+       struct ip_set *set = container_of(head, struct ip_set, rcu);
+
+       ip_set_destroy_set(set);
+}
+
 static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
                          const struct nlattr * const attr[])
 {
@@ -1193,8 +1202,6 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
        if (unlikely(protocol_min_failed(attr)))
                return -IPSET_ERR_PROTOCOL;
 
-       /* Must wait for flush to be really finished in list:set */
-       rcu_barrier();
 
        /* Commands are serialized and references are
         * protected by the ip_set_ref_lock.
@@ -1206,8 +1213,10 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
         * counter, so if it's already zero, we can proceed
         * without holding the lock.
         */
-       read_lock_bh(&ip_set_ref_lock);
        if (!attr[IPSET_ATTR_SETNAME]) {
+               /* Must wait for flush to be really finished in list:set */
+               rcu_barrier();
+               read_lock_bh(&ip_set_ref_lock);
                for (i = 0; i < inst->ip_set_max; i++) {
                        s = ip_set(inst, i);
                        if (s && (s->ref || s->ref_netlink)) {
@@ -1221,6 +1230,8 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
                        s = ip_set(inst, i);
                        if (s) {
                                ip_set(inst, i) = NULL;
+                               /* Must cancel garbage collectors */
+                               s->variant->cancel_gc(s);
                                ip_set_destroy_set(s);
                        }
                }
@@ -1228,6 +1239,9 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
                inst->is_destroyed = false;
        } else {
                u32 flags = flag_exist(info->nlh);
+               u16 features = 0;
+
+               read_lock_bh(&ip_set_ref_lock);
                s = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]),
                                    &i);
                if (!s) {
@@ -1238,10 +1252,16 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
                        ret = -IPSET_ERR_BUSY;
                        goto out;
                }
+               features = s->type->features;
                ip_set(inst, i) = NULL;
                read_unlock_bh(&ip_set_ref_lock);
-
-               ip_set_destroy_set(s);
+               if (features & IPSET_TYPE_NAME) {
+                       /* Must wait for flush to be really finished  */
+                       rcu_barrier();
+               }
+               /* Must cancel garbage collectors */
+               s->variant->cancel_gc(s);
+               call_rcu(&s->rcu, ip_set_destroy_set_rcu);
        }
        return 0;
 out:
@@ -1394,9 +1414,6 @@ static int ip_set_swap(struct sk_buff *skb, const struct nfnl_info *info,
        ip_set(inst, to_id) = from;
        write_unlock_bh(&ip_set_ref_lock);
 
-       /* Make sure all readers of the old set pointers are completed. */
-       synchronize_rcu();
-
        return 0;
 }
 
@@ -2362,6 +2379,7 @@ ip_set_net_exit(struct net *net)
                set = ip_set(inst, i);
                if (set) {
                        ip_set(inst, i) = NULL;
+                       set->variant->cancel_gc(set);
                        ip_set_destroy_set(set);
                }
        }
@@ -2409,8 +2427,11 @@ ip_set_fini(void)
 {
        nf_unregister_sockopt(&so_set);
        nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
-
        unregister_pernet_subsys(&ip_set_net_ops);
+
+       /* Wait for call_rcu() in destroy */
+       rcu_barrier();
+
        pr_debug("these are the famous last words\n");
 }
 
index cbf80da9a01caf0616d7d77d5be16521b6c0d47e..cf3ce72c3de645168b4698176518a02df6a6fa5a 100644 (file)
@@ -222,6 +222,7 @@ static const union nf_inet_addr zeromask = {};
 #undef mtype_gc_do
 #undef mtype_gc
 #undef mtype_gc_init
+#undef mtype_cancel_gc
 #undef mtype_variant
 #undef mtype_data_match
 
@@ -266,6 +267,7 @@ static const union nf_inet_addr zeromask = {};
 #define mtype_gc_do            IPSET_TOKEN(MTYPE, _gc_do)
 #define mtype_gc               IPSET_TOKEN(MTYPE, _gc)
 #define mtype_gc_init          IPSET_TOKEN(MTYPE, _gc_init)
+#define mtype_cancel_gc                IPSET_TOKEN(MTYPE, _cancel_gc)
 #define mtype_variant          IPSET_TOKEN(MTYPE, _variant)
 #define mtype_data_match       IPSET_TOKEN(MTYPE, _data_match)
 
@@ -430,7 +432,7 @@ mtype_ahash_destroy(struct ip_set *set, struct htable *t, bool ext_destroy)
        u32 i;
 
        for (i = 0; i < jhash_size(t->htable_bits); i++) {
-               n = __ipset_dereference(hbucket(t, i));
+               n = (__force struct hbucket *)hbucket(t, i);
                if (!n)
                        continue;
                if (set->extensions & IPSET_EXT_DESTROY && ext_destroy)
@@ -450,10 +452,7 @@ mtype_destroy(struct ip_set *set)
        struct htype *h = set->data;
        struct list_head *l, *lt;
 
-       if (SET_WITH_TIMEOUT(set))
-               cancel_delayed_work_sync(&h->gc.dwork);
-
-       mtype_ahash_destroy(set, ipset_dereference_nfnl(h->table), true);
+       mtype_ahash_destroy(set, (__force struct htable *)h->table, true);
        list_for_each_safe(l, lt, &h->ad) {
                list_del(l);
                kfree(l);
@@ -599,6 +598,15 @@ mtype_gc_init(struct htable_gc *gc)
        queue_delayed_work(system_power_efficient_wq, &gc->dwork, HZ);
 }
 
+static void
+mtype_cancel_gc(struct ip_set *set)
+{
+       struct htype *h = set->data;
+
+       if (SET_WITH_TIMEOUT(set))
+               cancel_delayed_work_sync(&h->gc.dwork);
+}
+
 static int
 mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
          struct ip_set_ext *mext, u32 flags);
@@ -1441,6 +1449,7 @@ static const struct ip_set_type_variant mtype_variant = {
        .uref   = mtype_uref,
        .resize = mtype_resize,
        .same_set = mtype_same_set,
+       .cancel_gc = mtype_cancel_gc,
        .region_lock = true,
 };
 
index 95aeb31c60e0d74fe1d8d2abc0507966c391e412..30a655e5c4fdcd3424f5d8516c9724ecd100580a 100644 (file)
@@ -138,9 +138,9 @@ hash_netiface4_data_next(struct hash_netiface4_elem *next,
 #include "ip_set_hash_gen.h"
 
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
-static const char *get_physindev_name(const struct sk_buff *skb)
+static const char *get_physindev_name(const struct sk_buff *skb, struct net *net)
 {
-       struct net_device *dev = nf_bridge_get_physindev(skb);
+       struct net_device *dev = nf_bridge_get_physindev(skb, net);
 
        return dev ? dev->name : NULL;
 }
@@ -177,7 +177,7 @@ hash_netiface4_kadt(struct ip_set *set, const struct sk_buff *skb,
 
        if (opt->cmdflags & IPSET_FLAG_PHYSDEV) {
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
-               const char *eiface = SRCDIR ? get_physindev_name(skb) :
+               const char *eiface = SRCDIR ? get_physindev_name(skb, xt_net(par)) :
                                              get_physoutdev_name(skb);
 
                if (!eiface)
@@ -395,7 +395,7 @@ hash_netiface6_kadt(struct ip_set *set, const struct sk_buff *skb,
 
        if (opt->cmdflags & IPSET_FLAG_PHYSDEV) {
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
-               const char *eiface = SRCDIR ? get_physindev_name(skb) :
+               const char *eiface = SRCDIR ? get_physindev_name(skb, xt_net(par)) :
                                              get_physoutdev_name(skb);
 
                if (!eiface)
index e162636525cfb4ad02de58982382a289e5bcbc45..6c3f28bc59b3259f0033cd4adc0ba5711db08c26 100644 (file)
@@ -426,9 +426,6 @@ list_set_destroy(struct ip_set *set)
        struct list_set *map = set->data;
        struct set_elem *e, *n;
 
-       if (SET_WITH_TIMEOUT(set))
-               timer_shutdown_sync(&map->gc);
-
        list_for_each_entry_safe(e, n, &map->members, list) {
                list_del(&e->list);
                ip_set_put_byindex(map->net, e->id);
@@ -545,6 +542,15 @@ list_set_same_set(const struct ip_set *a, const struct ip_set *b)
               a->extensions == b->extensions;
 }
 
+static void
+list_set_cancel_gc(struct ip_set *set)
+{
+       struct list_set *map = set->data;
+
+       if (SET_WITH_TIMEOUT(set))
+               timer_shutdown_sync(&map->gc);
+}
+
 static const struct ip_set_type_variant set_variant = {
        .kadt   = list_set_kadt,
        .uadt   = list_set_uadt,
@@ -558,6 +564,7 @@ static const struct ip_set_type_variant set_variant = {
        .head   = list_set_head,
        .list   = list_set_list,
        .same_set = list_set_same_set,
+       .cancel_gc = list_set_cancel_gc,
 };
 
 static void
index 9193e109e6b38f1336bb13e26ffb5399d1b15381..65e0259178da43c61733973e50b69c77da5027c5 100644 (file)
@@ -271,7 +271,7 @@ static inline bool decrement_ttl(struct netns_ipvs *ipvs,
                        skb->dev = dst->dev;
                        icmpv6_send(skb, ICMPV6_TIME_EXCEED,
                                    ICMPV6_EXC_HOPLIMIT, 0);
-                       __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
+                       IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
 
                        return false;
                }
@@ -286,7 +286,7 @@ static inline bool decrement_ttl(struct netns_ipvs *ipvs,
        {
                if (ip_hdr(skb)->ttl <= 1) {
                        /* Tell the sender its packet died... */
-                       __IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
+                       IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
                        icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0);
                        return false;
                }
index 0c22a02c2035ccb9c760d71fe2e5dd9c461bf239..3b846cbdc050d324626586fb6ece00985efd874b 100644 (file)
@@ -876,6 +876,7 @@ struct ctnetlink_filter_u32 {
 
 struct ctnetlink_filter {
        u8 family;
+       bool zone_filter;
 
        u_int32_t orig_flags;
        u_int32_t reply_flags;
@@ -992,9 +993,12 @@ ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family)
        if (err)
                goto err_filter;
 
-       err = ctnetlink_parse_zone(cda[CTA_ZONE], &filter->zone);
-       if (err < 0)
-               goto err_filter;
+       if (cda[CTA_ZONE]) {
+               err = ctnetlink_parse_zone(cda[CTA_ZONE], &filter->zone);
+               if (err < 0)
+                       goto err_filter;
+               filter->zone_filter = true;
+       }
 
        if (!cda[CTA_FILTER])
                return filter;
@@ -1148,7 +1152,7 @@ static int ctnetlink_filter_match(struct nf_conn *ct, void *data)
        if (filter->family && nf_ct_l3num(ct) != filter->family)
                goto ignore_entry;
 
-       if (filter->zone.id != NF_CT_DEFAULT_ZONE_ID &&
+       if (filter->zone_filter &&
            !nf_ct_zone_equal_any(ct, &filter->zone))
                goto ignore_entry;
 
index c6bd533983c1ff275796b789cdb973c09f646984..4cc97f971264ed779434ab4597dd0162586b3736 100644 (file)
@@ -283,7 +283,7 @@ sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
                        pr_debug("Setting vtag %x for secondary conntrack\n",
                                 sh->vtag);
                        ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL] = sh->vtag;
-               } else {
+               } else if (sch->type == SCTP_CID_SHUTDOWN_ACK) {
                /* If it is a shutdown ack OOTB packet, we expect a return
                   shutdown complete, otherwise an ABORT Sec 8.4 (5) and (8) */
                        pr_debug("Setting vtag %x for new conn OOTB\n",
index e573be5afde7a591e00e799aadeadcf455b31f05..ae493599a3ef03415f6c40e942cdab700acb84c6 100644 (file)
@@ -457,7 +457,8 @@ static void tcp_init_sender(struct ip_ct_tcp_state *sender,
                            const struct sk_buff *skb,
                            unsigned int dataoff,
                            const struct tcphdr *tcph,
-                           u32 end, u32 win)
+                           u32 end, u32 win,
+                           enum ip_conntrack_dir dir)
 {
        /* SYN-ACK in reply to a SYN
         * or SYN from reply direction in simultaneous open.
@@ -471,7 +472,8 @@ static void tcp_init_sender(struct ip_ct_tcp_state *sender,
         * Both sides must send the Window Scale option
         * to enable window scaling in either direction.
         */
-       if (!(sender->flags & IP_CT_TCP_FLAG_WINDOW_SCALE &&
+       if (dir == IP_CT_DIR_REPLY &&
+           !(sender->flags & IP_CT_TCP_FLAG_WINDOW_SCALE &&
              receiver->flags & IP_CT_TCP_FLAG_WINDOW_SCALE)) {
                sender->td_scale = 0;
                receiver->td_scale = 0;
@@ -542,7 +544,7 @@ tcp_in_window(struct nf_conn *ct, enum ip_conntrack_dir dir,
                if (tcph->syn) {
                        tcp_init_sender(sender, receiver,
                                        skb, dataoff, tcph,
-                                       end, win);
+                                       end, win, dir);
                        if (!tcph->ack)
                                /* Simultaneous open */
                                return NFCT_TCP_ACCEPT;
@@ -585,7 +587,7 @@ tcp_in_window(struct nf_conn *ct, enum ip_conntrack_dir dir,
                 */
                tcp_init_sender(sender, receiver,
                                skb, dataoff, tcph,
-                               end, win);
+                               end, win, dir);
 
                if (dir == IP_CT_DIR_REPLY && !tcph->ack)
                        return NFCT_TCP_ACCEPT;
index 8cc52d2bd31be518df778bbe2cfaad6172d90dbc..e16f158388bbe568cddc1be5a0a6d16069897822 100644 (file)
@@ -193,11 +193,12 @@ void nf_logger_put(int pf, enum nf_log_type type)
                return;
        }
 
-       BUG_ON(loggers[pf][type] == NULL);
-
        rcu_read_lock();
        logger = rcu_dereference(loggers[pf][type]);
-       module_put(logger->me);
+       if (!logger)
+               WARN_ON_ONCE(1);
+       else
+               module_put(logger->me);
        rcu_read_unlock();
 }
 EXPORT_SYMBOL_GPL(nf_logger_put);
index c66689ad2b491977876aa47e6d4201de29244950..58402226045e84b7134b3c8a33919b5ea1f22f12 100644 (file)
@@ -111,7 +111,8 @@ nf_log_dump_packet_common(struct nf_log_buf *m, u8 pf,
                          unsigned int hooknum, const struct sk_buff *skb,
                          const struct net_device *in,
                          const struct net_device *out,
-                         const struct nf_loginfo *loginfo, const char *prefix)
+                         const struct nf_loginfo *loginfo, const char *prefix,
+                         struct net *net)
 {
        const struct net_device *physoutdev __maybe_unused;
        const struct net_device *physindev __maybe_unused;
@@ -121,7 +122,7 @@ nf_log_dump_packet_common(struct nf_log_buf *m, u8 pf,
                        in ? in->name : "",
                        out ? out->name : "");
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
-       physindev = nf_bridge_get_physindev(skb);
+       physindev = nf_bridge_get_physindev(skb, net);
        if (physindev && in != physindev)
                nf_log_buf_add(m, "PHYSIN=%s ", physindev->name);
        physoutdev = nf_bridge_get_physoutdev(skb);
@@ -148,7 +149,7 @@ static void nf_log_arp_packet(struct net *net, u_int8_t pf,
                loginfo = &default_loginfo;
 
        nf_log_dump_packet_common(m, pf, hooknum, skb, in, out, loginfo,
-                                 prefix);
+                                 prefix, net);
        dump_arp_packet(m, loginfo, skb, skb_network_offset(skb));
 
        nf_log_buf_close(m);
@@ -845,7 +846,7 @@ static void nf_log_ip_packet(struct net *net, u_int8_t pf,
                loginfo = &default_loginfo;
 
        nf_log_dump_packet_common(m, pf, hooknum, skb, in,
-                                 out, loginfo, prefix);
+                                 out, loginfo, prefix, net);
 
        if (in)
                dump_mac_header(m, loginfo, skb);
@@ -880,7 +881,7 @@ static void nf_log_ip6_packet(struct net *net, u_int8_t pf,
                loginfo = &default_loginfo;
 
        nf_log_dump_packet_common(m, pf, hooknum, skb, in, out,
-                                 loginfo, prefix);
+                                 loginfo, prefix, net);
 
        if (in)
                dump_mac_header(m, loginfo, skb);
@@ -916,7 +917,7 @@ static void nf_log_unknown_packet(struct net *net, u_int8_t pf,
                loginfo = &default_loginfo;
 
        nf_log_dump_packet_common(m, pf, hooknum, skb, in, out, loginfo,
-                                 prefix);
+                                 prefix, net);
 
        dump_mac_header(m, loginfo, skb);
 
index 63d1516816b1fdaa570288c725cc5f721cde694d..e2f334f70281f8bb29ed0ea8eb974382708c9de6 100644 (file)
@@ -82,11 +82,9 @@ static void __nf_queue_entry_init_physdevs(struct nf_queue_entry *entry)
 {
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
        const struct sk_buff *skb = entry->skb;
-       struct nf_bridge_info *nf_bridge;
 
-       nf_bridge = nf_bridge_info_get(skb);
-       if (nf_bridge) {
-               entry->physin = nf_bridge_get_physindev(skb);
+       if (nf_bridge_info_exists(skb)) {
+               entry->physin = nf_bridge_get_physindev(skb, entry->state.net);
                entry->physout = nf_bridge_get_physoutdev(skb);
        } else {
                entry->physin = NULL;
index 8438a8922e4ab9612784cd9d2ed933f218a26bd3..f8e3f70c35bd558aec8a2b2149a7ff7db0eb3d1f 100644 (file)
@@ -24,6 +24,7 @@
 #include <net/sock.h>
 
 #define NFT_MODULE_AUTOLOAD_LIMIT (MODULE_NAME_LEN - sizeof("nft-expr-255-"))
+#define NFT_SET_MAX_ANONLEN 16
 
 unsigned int nf_tables_net_id __read_mostly;
 
@@ -2977,6 +2978,9 @@ static int nf_tables_delchain(struct sk_buff *skb, const struct nfnl_info *info,
  */
 int nft_register_expr(struct nft_expr_type *type)
 {
+       if (WARN_ON_ONCE(type->maxattr > NFT_EXPR_MAXATTR))
+               return -ENOMEM;
+
        nfnl_lock(NFNL_SUBSYS_NFTABLES);
        if (type->family == NFPROTO_UNSPEC)
                list_add_tail_rcu(&type->list, &nf_tables_expressions);
@@ -3271,14 +3275,13 @@ int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src)
 {
        int err;
 
-       if (src->ops->clone) {
-               dst->ops = src->ops;
-               err = src->ops->clone(dst, src);
-               if (err < 0)
-                       return err;
-       } else {
-               memcpy(dst, src, src->ops->size);
-       }
+       if (WARN_ON_ONCE(!src->ops->clone))
+               return -EINVAL;
+
+       dst->ops = src->ops;
+       err = src->ops->clone(dst, src);
+       if (err < 0)
+               return err;
 
        __module_get(src->ops->type->owner);
 
@@ -4411,6 +4414,9 @@ static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set,
                if (p[1] != 'd' || strchr(p + 2, '%'))
                        return -EINVAL;
 
+               if (strnlen(name, NFT_SET_MAX_ANONLEN) >= NFT_SET_MAX_ANONLEN)
+                       return -EINVAL;
+
                inuse = (unsigned long *)get_zeroed_page(GFP_KERNEL);
                if (inuse == NULL)
                        return -ENOMEM;
@@ -4811,8 +4817,8 @@ static int nft_set_desc_concat_parse(const struct nlattr *attr,
 static int nft_set_desc_concat(struct nft_set_desc *desc,
                               const struct nlattr *nla)
 {
+       u32 num_regs = 0, key_num_regs = 0;
        struct nlattr *attr;
-       u32 num_regs = 0;
        int rem, err, i;
 
        nla_for_each_nested(attr, nla, rem) {
@@ -4827,6 +4833,10 @@ static int nft_set_desc_concat(struct nft_set_desc *desc,
        for (i = 0; i < desc->field_count; i++)
                num_regs += DIV_ROUND_UP(desc->field_len[i], sizeof(u32));
 
+       key_num_regs = DIV_ROUND_UP(desc->klen, sizeof(u32));
+       if (key_num_regs != num_regs)
+               return -EINVAL;
+
        if (num_regs > NFT_REG32_COUNT)
                return -E2BIG;
 
@@ -5048,16 +5058,28 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
        }
 
        desc.policy = NFT_SET_POL_PERFORMANCE;
-       if (nla[NFTA_SET_POLICY] != NULL)
+       if (nla[NFTA_SET_POLICY] != NULL) {
                desc.policy = ntohl(nla_get_be32(nla[NFTA_SET_POLICY]));
+               switch (desc.policy) {
+               case NFT_SET_POL_PERFORMANCE:
+               case NFT_SET_POL_MEMORY:
+                       break;
+               default:
+                       return -EOPNOTSUPP;
+               }
+       }
 
        if (nla[NFTA_SET_DESC] != NULL) {
                err = nf_tables_set_desc_parse(&desc, nla[NFTA_SET_DESC]);
                if (err < 0)
                        return err;
 
-               if (desc.field_count > 1 && !(flags & NFT_SET_CONCAT))
+               if (desc.field_count > 1) {
+                       if (!(flags & NFT_SET_CONCAT))
+                               return -EINVAL;
+               } else if (flags & NFT_SET_CONCAT) {
                        return -EINVAL;
+               }
        } else if (flags & NFT_SET_CONCAT) {
                return -EINVAL;
        }
@@ -5704,7 +5726,7 @@ static int nf_tables_dump_setelem(const struct nft_ctx *ctx,
        const struct nft_set_ext *ext = nft_set_elem_ext(set, elem_priv);
        struct nft_set_dump_args *args;
 
-       if (nft_set_elem_expired(ext))
+       if (nft_set_elem_expired(ext) || nft_set_elem_is_dead(ext))
                return 0;
 
        args = container_of(iter, struct nft_set_dump_args, iter);
@@ -6568,7 +6590,7 @@ static int nft_setelem_catchall_deactivate(const struct net *net,
 
        list_for_each_entry(catchall, &set->catchall_list, list) {
                ext = nft_set_elem_ext(set, catchall->elem);
-               if (!nft_is_active(net, ext))
+               if (!nft_is_active_next(net, ext))
                        continue;
 
                kfree(elem->priv);
@@ -7529,11 +7551,15 @@ nla_put_failure:
        return -1;
 }
 
-static const struct nft_object_type *__nft_obj_type_get(u32 objtype)
+static const struct nft_object_type *__nft_obj_type_get(u32 objtype, u8 family)
 {
        const struct nft_object_type *type;
 
        list_for_each_entry(type, &nf_tables_objects, list) {
+               if (type->family != NFPROTO_UNSPEC &&
+                   type->family != family)
+                       continue;
+
                if (objtype == type->type)
                        return type;
        }
@@ -7541,11 +7567,11 @@ static const struct nft_object_type *__nft_obj_type_get(u32 objtype)
 }
 
 static const struct nft_object_type *
-nft_obj_type_get(struct net *net, u32 objtype)
+nft_obj_type_get(struct net *net, u32 objtype, u8 family)
 {
        const struct nft_object_type *type;
 
-       type = __nft_obj_type_get(objtype);
+       type = __nft_obj_type_get(objtype, family);
        if (type != NULL && try_module_get(type->owner))
                return type;
 
@@ -7638,7 +7664,7 @@ static int nf_tables_newobj(struct sk_buff *skb, const struct nfnl_info *info,
                if (info->nlh->nlmsg_flags & NLM_F_REPLACE)
                        return -EOPNOTSUPP;
 
-               type = __nft_obj_type_get(objtype);
+               type = __nft_obj_type_get(objtype, family);
                if (WARN_ON_ONCE(!type))
                        return -ENOENT;
 
@@ -7652,7 +7678,7 @@ static int nf_tables_newobj(struct sk_buff *skb, const struct nfnl_info *info,
        if (!nft_use_inc(&table->use))
                return -EMFILE;
 
-       type = nft_obj_type_get(net, objtype);
+       type = nft_obj_type_get(net, objtype, family);
        if (IS_ERR(type)) {
                err = PTR_ERR(type);
                goto err_type;
@@ -9801,6 +9827,7 @@ dead_elem:
 struct nft_trans_gc *nft_trans_gc_catchall_sync(struct nft_trans_gc *gc)
 {
        struct nft_set_elem_catchall *catchall, *next;
+       u64 tstamp = nft_net_tstamp(gc->net);
        const struct nft_set *set = gc->set;
        struct nft_elem_priv *elem_priv;
        struct nft_set_ext *ext;
@@ -9810,7 +9837,7 @@ struct nft_trans_gc *nft_trans_gc_catchall_sync(struct nft_trans_gc *gc)
        list_for_each_entry_safe(catchall, next, &set->catchall_list, list) {
                ext = nft_set_elem_ext(set, catchall->elem);
 
-               if (!nft_set_elem_expired(ext))
+               if (!__nft_set_elem_expired(ext, tstamp))
                        continue;
 
                gc = nft_trans_gc_queue_sync(gc, GFP_KERNEL);
@@ -10596,6 +10623,7 @@ static bool nf_tables_valid_genid(struct net *net, u32 genid)
        bool genid_ok;
 
        mutex_lock(&nft_net->commit_mutex);
+       nft_net->tstamp = get_jiffies_64();
 
        genid_ok = genid == 0 || nft_net->base_seq == genid;
        if (!genid_ok)
@@ -10970,16 +10998,10 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
        data->verdict.code = ntohl(nla_get_be32(tb[NFTA_VERDICT_CODE]));
 
        switch (data->verdict.code) {
-       default:
-               switch (data->verdict.code & NF_VERDICT_MASK) {
-               case NF_ACCEPT:
-               case NF_DROP:
-               case NF_QUEUE:
-                       break;
-               default:
-                       return -EINVAL;
-               }
-               fallthrough;
+       case NF_ACCEPT:
+       case NF_DROP:
+       case NF_QUEUE:
+               break;
        case NFT_CONTINUE:
        case NFT_BREAK:
        case NFT_RETURN:
@@ -11014,6 +11036,8 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
 
                data->verdict.chain = chain;
                break;
+       default:
+               return -EINVAL;
        }
 
        desc->len = sizeof(data->verdict);
index f03f4d4d7d88967d237c5064cd729ea6f83b40bf..134e05d31061e4b6daa977cb47084f0da20f697c 100644 (file)
@@ -508,7 +508,7 @@ __build_packet_message(struct nfnl_log_net *log,
                                         htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
                                goto nla_put_failure;
                } else {
-                       struct net_device *physindev;
+                       int physinif;
 
                        /* Case 2: indev is bridge group, we need to look for
                         * physical device (when called from ipv4) */
@@ -516,10 +516,10 @@ __build_packet_message(struct nfnl_log_net *log,
                                         htonl(indev->ifindex)))
                                goto nla_put_failure;
 
-                       physindev = nf_bridge_get_physindev(skb);
-                       if (physindev &&
+                       physinif = nf_bridge_get_physinif(skb);
+                       if (physinif &&
                            nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
-                                        htonl(physindev->ifindex)))
+                                        htonl(physinif)))
                                goto nla_put_failure;
                }
 #endif
index 171d1f52d3dd0da711cd63b23ec31d72fa88cdd2..5cf38fc0a366ac55c0ce11798baf7fb93c88283f 100644 (file)
@@ -232,18 +232,25 @@ static void nfqnl_reinject(struct nf_queue_entry *entry, unsigned int verdict)
        if (verdict == NF_ACCEPT ||
            verdict == NF_REPEAT ||
            verdict == NF_STOP) {
+               unsigned int ct_verdict = verdict;
+
                rcu_read_lock();
                ct_hook = rcu_dereference(nf_ct_hook);
                if (ct_hook)
-                       verdict = ct_hook->update(entry->state.net, entry->skb);
+                       ct_verdict = ct_hook->update(entry->state.net, entry->skb);
                rcu_read_unlock();
 
-               switch (verdict & NF_VERDICT_MASK) {
+               switch (ct_verdict & NF_VERDICT_MASK) {
+               case NF_ACCEPT:
+                       /* follow userspace verdict, could be REPEAT */
+                       break;
                case NF_STOLEN:
                        nf_queue_entry_free(entry);
                        return;
+               default:
+                       verdict = ct_verdict & NF_VERDICT_MASK;
+                       break;
                }
-
        }
        nf_reinject(entry, verdict);
 }
index 680fe557686e42d3421a445b6c5472bd4056a65a..274b6f7e6bb57e4f270262ef923ebf8d7f1cf02c 100644 (file)
@@ -357,9 +357,10 @@ static int nf_tables_netdev_event(struct notifier_block *this,
                                  unsigned long event, void *ptr)
 {
        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+       struct nft_base_chain *basechain;
        struct nftables_pernet *nft_net;
-       struct nft_table *table;
        struct nft_chain *chain, *nr;
+       struct nft_table *table;
        struct nft_ctx ctx = {
                .net    = dev_net(dev),
        };
@@ -371,7 +372,8 @@ static int nf_tables_netdev_event(struct notifier_block *this,
        nft_net = nft_pernet(ctx.net);
        mutex_lock(&nft_net->commit_mutex);
        list_for_each_entry(table, &nft_net->tables, list) {
-               if (table->family != NFPROTO_NETDEV)
+               if (table->family != NFPROTO_NETDEV &&
+                   table->family != NFPROTO_INET)
                        continue;
 
                ctx.family = table->family;
@@ -380,6 +382,11 @@ static int nf_tables_netdev_event(struct notifier_block *this,
                        if (!nft_is_base_chain(chain))
                                continue;
 
+                       basechain = nft_base_chain(chain);
+                       if (table->family == NFPROTO_INET &&
+                           basechain->ops.hooknum != NF_INET_INGRESS)
+                               continue;
+
                        ctx.chain = chain;
                        nft_netdev_event(event, dev, &ctx);
                }
index 5284cd2ad532713368db0cd56bdf17baf1e0ed4d..1f9474fefe84923e7769efd8ea5c703f5783e37d 100644 (file)
@@ -135,7 +135,7 @@ static void nft_target_eval_bridge(const struct nft_expr *expr,
 
 static const struct nla_policy nft_target_policy[NFTA_TARGET_MAX + 1] = {
        [NFTA_TARGET_NAME]      = { .type = NLA_NUL_STRING },
-       [NFTA_TARGET_REV]       = { .type = NLA_U32 },
+       [NFTA_TARGET_REV]       = NLA_POLICY_MAX(NLA_BE32, 255),
        [NFTA_TARGET_INFO]      = { .type = NLA_BINARY },
 };
 
@@ -200,6 +200,7 @@ static const struct nla_policy nft_rule_compat_policy[NFTA_RULE_COMPAT_MAX + 1]
 static int nft_parse_compat(const struct nlattr *attr, u16 *proto, bool *inv)
 {
        struct nlattr *tb[NFTA_RULE_COMPAT_MAX+1];
+       u32 l4proto;
        u32 flags;
        int err;
 
@@ -212,12 +213,18 @@ static int nft_parse_compat(const struct nlattr *attr, u16 *proto, bool *inv)
                return -EINVAL;
 
        flags = ntohl(nla_get_be32(tb[NFTA_RULE_COMPAT_FLAGS]));
-       if (flags & ~NFT_RULE_COMPAT_F_MASK)
+       if (flags & NFT_RULE_COMPAT_F_UNUSED ||
+           flags & ~NFT_RULE_COMPAT_F_MASK)
                return -EINVAL;
        if (flags & NFT_RULE_COMPAT_F_INV)
                *inv = true;
 
-       *proto = ntohl(nla_get_be32(tb[NFTA_RULE_COMPAT_PROTO]));
+       l4proto = ntohl(nla_get_be32(tb[NFTA_RULE_COMPAT_PROTO]));
+       if (l4proto > U16_MAX)
+               return -EINVAL;
+
+       *proto = l4proto;
+
        return 0;
 }
 
@@ -350,6 +357,12 @@ static int nft_target_validate(const struct nft_ctx *ctx,
        unsigned int hook_mask = 0;
        int ret;
 
+       if (ctx->family != NFPROTO_IPV4 &&
+           ctx->family != NFPROTO_IPV6 &&
+           ctx->family != NFPROTO_BRIDGE &&
+           ctx->family != NFPROTO_ARP)
+               return -EOPNOTSUPP;
+
        if (nft_is_base_chain(ctx->chain)) {
                const struct nft_base_chain *basechain =
                                                nft_base_chain(ctx->chain);
@@ -413,7 +426,7 @@ static void nft_match_eval(const struct nft_expr *expr,
 
 static const struct nla_policy nft_match_policy[NFTA_MATCH_MAX + 1] = {
        [NFTA_MATCH_NAME]       = { .type = NLA_NUL_STRING },
-       [NFTA_MATCH_REV]        = { .type = NLA_U32 },
+       [NFTA_MATCH_REV]        = NLA_POLICY_MAX(NLA_BE32, 255),
        [NFTA_MATCH_INFO]       = { .type = NLA_BINARY },
 };
 
@@ -595,6 +608,12 @@ static int nft_match_validate(const struct nft_ctx *ctx,
        unsigned int hook_mask = 0;
        int ret;
 
+       if (ctx->family != NFPROTO_IPV4 &&
+           ctx->family != NFPROTO_IPV6 &&
+           ctx->family != NFPROTO_BRIDGE &&
+           ctx->family != NFPROTO_ARP)
+               return -EOPNOTSUPP;
+
        if (nft_is_base_chain(ctx->chain)) {
                const struct nft_base_chain *basechain =
                                                nft_base_chain(ctx->chain);
@@ -712,7 +731,7 @@ out_put:
 static const struct nla_policy nfnl_compat_policy_get[NFTA_COMPAT_MAX+1] = {
        [NFTA_COMPAT_NAME]      = { .type = NLA_NUL_STRING,
                                    .len = NFT_COMPAT_NAME_MAX-1 },
-       [NFTA_COMPAT_REV]       = { .type = NLA_U32 },
+       [NFTA_COMPAT_REV]       = NLA_POLICY_MAX(NLA_BE32, 255),
        [NFTA_COMPAT_TYPE]      = { .type = NLA_U32 },
 };
 
index 86bb9d7797d9eeaea730e463c389a958e0b6ec85..bfd3e5a14dab68484469bdba71af37a460822549 100644 (file)
@@ -476,6 +476,9 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,
                break;
 #endif
        case NFT_CT_ID:
+               if (tb[NFTA_CT_DIRECTION])
+                       return -EINVAL;
+
                len = sizeof(u32);
                break;
        default:
@@ -1250,7 +1253,31 @@ static int nft_ct_expect_obj_init(const struct nft_ctx *ctx,
        if (tb[NFTA_CT_EXPECT_L3PROTO])
                priv->l3num = ntohs(nla_get_be16(tb[NFTA_CT_EXPECT_L3PROTO]));
 
+       switch (priv->l3num) {
+       case NFPROTO_IPV4:
+       case NFPROTO_IPV6:
+               if (priv->l3num != ctx->family)
+                       return -EINVAL;
+
+               fallthrough;
+       case NFPROTO_INET:
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
        priv->l4proto = nla_get_u8(tb[NFTA_CT_EXPECT_L4PROTO]);
+       switch (priv->l4proto) {
+       case IPPROTO_TCP:
+       case IPPROTO_UDP:
+       case IPPROTO_UDPLITE:
+       case IPPROTO_DCCP:
+       case IPPROTO_SCTP:
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
        priv->dport = nla_get_be16(tb[NFTA_CT_EXPECT_DPORT]);
        priv->timeout = nla_get_u32(tb[NFTA_CT_EXPECT_TIMEOUT]);
        priv->size = nla_get_u8(tb[NFTA_CT_EXPECT_SIZE]);
index ab3362c483b4a78c1e138815764e9e80bfd5d43d..397351fa4d5f82d8bcec25e1d69f327dc60e0199 100644 (file)
@@ -384,6 +384,11 @@ static int nft_flow_offload_validate(const struct nft_ctx *ctx,
 {
        unsigned int hook_mask = (1 << NF_INET_FORWARD);
 
+       if (ctx->family != NFPROTO_IPV4 &&
+           ctx->family != NFPROTO_IPV6 &&
+           ctx->family != NFPROTO_INET)
+               return -EOPNOTSUPP;
+
        return nft_chain_validate_hooks(ctx->chain, hook_mask);
 }
 
index 145dc62c62472605e9f99c6e69f240e4c66774e9..cefa25e0dbb0a2c87af43e8230cf7934ce8fa3d1 100644 (file)
@@ -58,16 +58,19 @@ static inline bool nft_limit_eval(struct nft_limit_priv *priv, u64 cost)
 static int nft_limit_init(struct nft_limit_priv *priv,
                          const struct nlattr * const tb[], bool pkts)
 {
-       u64 unit, tokens;
+       u64 unit, tokens, rate_with_burst;
+       bool invert = false;
 
        if (tb[NFTA_LIMIT_RATE] == NULL ||
            tb[NFTA_LIMIT_UNIT] == NULL)
                return -EINVAL;
 
        priv->rate = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_RATE]));
+       if (priv->rate == 0)
+               return -EINVAL;
+
        unit = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_UNIT]));
-       priv->nsecs = unit * NSEC_PER_SEC;
-       if (priv->rate == 0 || priv->nsecs < unit)
+       if (check_mul_overflow(unit, NSEC_PER_SEC, &priv->nsecs))
                return -EOVERFLOW;
 
        if (tb[NFTA_LIMIT_BURST])
@@ -76,18 +79,35 @@ static int nft_limit_init(struct nft_limit_priv *priv,
        if (pkts && priv->burst == 0)
                priv->burst = NFT_LIMIT_PKT_BURST_DEFAULT;
 
-       if (priv->rate + priv->burst < priv->rate)
+       if (check_add_overflow(priv->rate, priv->burst, &rate_with_burst))
                return -EOVERFLOW;
 
        if (pkts) {
-               tokens = div64_u64(priv->nsecs, priv->rate) * priv->burst;
+               u64 tmp = div64_u64(priv->nsecs, priv->rate);
+
+               if (check_mul_overflow(tmp, priv->burst, &tokens))
+                       return -EOVERFLOW;
        } else {
+               u64 tmp;
+
                /* The token bucket size limits the number of tokens can be
                 * accumulated. tokens_max specifies the bucket size.
                 * tokens_max = unit * (rate + burst) / rate.
                 */
-               tokens = div64_u64(priv->nsecs * (priv->rate + priv->burst),
-                                priv->rate);
+               if (check_mul_overflow(priv->nsecs, rate_with_burst, &tmp))
+                       return -EOVERFLOW;
+
+               tokens = div64_u64(tmp, priv->rate);
+       }
+
+       if (tb[NFTA_LIMIT_FLAGS]) {
+               u32 flags = ntohl(nla_get_be32(tb[NFTA_LIMIT_FLAGS]));
+
+               if (flags & ~NFT_LIMIT_F_INV)
+                       return -EOPNOTSUPP;
+
+               if (flags & NFT_LIMIT_F_INV)
+                       invert = true;
        }
 
        priv->limit = kmalloc(sizeof(*priv->limit), GFP_KERNEL_ACCOUNT);
@@ -96,13 +116,7 @@ static int nft_limit_init(struct nft_limit_priv *priv,
 
        priv->limit->tokens = tokens;
        priv->tokens_max = priv->limit->tokens;
-
-       if (tb[NFTA_LIMIT_FLAGS]) {
-               u32 flags = ntohl(nla_get_be32(tb[NFTA_LIMIT_FLAGS]));
-
-               if (flags & NFT_LIMIT_F_INV)
-                       priv->invert = true;
-       }
+       priv->invert = invert;
        priv->limit->last = ktime_get_ns();
        spin_lock_init(&priv->limit->lock);
 
index 583885ce72328fab424da04f888398eb687c896a..808f5802c2704a583c747e71d227965fa5c1a8bf 100644 (file)
@@ -143,6 +143,11 @@ static int nft_nat_validate(const struct nft_ctx *ctx,
        struct nft_nat *priv = nft_expr_priv(expr);
        int err;
 
+       if (ctx->family != NFPROTO_IPV4 &&
+           ctx->family != NFPROTO_IPV6 &&
+           ctx->family != NFPROTO_INET)
+               return -EOPNOTSUPP;
+
        err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
        if (err < 0)
                return err;
index 35a2c28caa60bb6d50da5febbf5a6d2be7c9bdd9..24d977138572988e87b8c726daf67441f0b41de2 100644 (file)
@@ -166,6 +166,11 @@ static int nft_rt_validate(const struct nft_ctx *ctx, const struct nft_expr *exp
        const struct nft_rt *priv = nft_expr_priv(expr);
        unsigned int hooks;
 
+       if (ctx->family != NFPROTO_IPV4 &&
+           ctx->family != NFPROTO_IPV6 &&
+           ctx->family != NFPROTO_INET)
+               return -EOPNOTSUPP;
+
        switch (priv->key) {
        case NFT_RT_NEXTHOP4:
        case NFT_RT_NEXTHOP6:
index 6c2061bfdae6c361c530088ca51aa2790d850ba4..6968a3b342367c6c0cb0df7523fdfd5864038802 100644 (file)
@@ -36,6 +36,7 @@ struct nft_rhash_cmp_arg {
        const struct nft_set            *set;
        const u32                       *key;
        u8                              genmask;
+       u64                             tstamp;
 };
 
 static inline u32 nft_rhash_key(const void *data, u32 len, u32 seed)
@@ -62,7 +63,7 @@ static inline int nft_rhash_cmp(struct rhashtable_compare_arg *arg,
                return 1;
        if (nft_set_elem_is_dead(&he->ext))
                return 1;
-       if (nft_set_elem_expired(&he->ext))
+       if (__nft_set_elem_expired(&he->ext, x->tstamp))
                return 1;
        if (!nft_set_elem_active(&he->ext, x->genmask))
                return 1;
@@ -87,6 +88,7 @@ bool nft_rhash_lookup(const struct net *net, const struct nft_set *set,
                .genmask = nft_genmask_cur(net),
                .set     = set,
                .key     = key,
+               .tstamp  = get_jiffies_64(),
        };
 
        he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params);
@@ -106,6 +108,7 @@ nft_rhash_get(const struct net *net, const struct nft_set *set,
                .genmask = nft_genmask_cur(net),
                .set     = set,
                .key     = elem->key.val.data,
+               .tstamp  = get_jiffies_64(),
        };
 
        he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params);
@@ -131,6 +134,7 @@ static bool nft_rhash_update(struct nft_set *set, const u32 *key,
                .genmask = NFT_GENMASK_ANY,
                .set     = set,
                .key     = key,
+               .tstamp  = get_jiffies_64(),
        };
 
        he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params);
@@ -175,6 +179,7 @@ static int nft_rhash_insert(const struct net *net, const struct nft_set *set,
                .genmask = nft_genmask_next(net),
                .set     = set,
                .key     = elem->key.val.data,
+               .tstamp  = nft_net_tstamp(net),
        };
        struct nft_rhash_elem *prev;
 
@@ -216,6 +221,7 @@ nft_rhash_deactivate(const struct net *net, const struct nft_set *set,
                .genmask = nft_genmask_next(net),
                .set     = set,
                .key     = elem->key.val.data,
+               .tstamp  = nft_net_tstamp(net),
        };
 
        rcu_read_lock();
index efd523496be45f59408e8b6dcec7ff40dbcf5844..aa1d9e93a9a04859d48e417501c7f9e889187400 100644 (file)
 #include "nft_set_pipapo_avx2.h"
 #include "nft_set_pipapo.h"
 
-/* Current working bitmap index, toggled between field matches */
-static DEFINE_PER_CPU(bool, nft_pipapo_scratch_index);
-
 /**
  * pipapo_refill() - For each set bit, set bits from selected mapping table item
  * @map:       Bitmap to be scanned for set bits
@@ -412,6 +409,7 @@ bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
                       const u32 *key, const struct nft_set_ext **ext)
 {
        struct nft_pipapo *priv = nft_set_priv(set);
+       struct nft_pipapo_scratch *scratch;
        unsigned long *res_map, *fill_map;
        u8 genmask = nft_genmask_cur(net);
        const u8 *rp = (const u8 *)key;
@@ -422,15 +420,17 @@ bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
 
        local_bh_disable();
 
-       map_index = raw_cpu_read(nft_pipapo_scratch_index);
-
        m = rcu_dereference(priv->match);
 
        if (unlikely(!m || !*raw_cpu_ptr(m->scratch)))
                goto out;
 
-       res_map  = *raw_cpu_ptr(m->scratch) + (map_index ? m->bsize_max : 0);
-       fill_map = *raw_cpu_ptr(m->scratch) + (map_index ? 0 : m->bsize_max);
+       scratch = *raw_cpu_ptr(m->scratch);
+
+       map_index = scratch->map_index;
+
+       res_map  = scratch->map + (map_index ? m->bsize_max : 0);
+       fill_map = scratch->map + (map_index ? 0 : m->bsize_max);
 
        memset(res_map, 0xff, m->bsize_max * sizeof(*res_map));
 
@@ -460,7 +460,7 @@ next_match:
                b = pipapo_refill(res_map, f->bsize, f->rules, fill_map, f->mt,
                                  last);
                if (b < 0) {
-                       raw_cpu_write(nft_pipapo_scratch_index, map_index);
+                       scratch->map_index = map_index;
                        local_bh_enable();
 
                        return false;
@@ -477,7 +477,7 @@ next_match:
                         * current inactive bitmap is clean and can be reused as
                         * *next* bitmap (not initial) for the next packet.
                         */
-                       raw_cpu_write(nft_pipapo_scratch_index, map_index);
+                       scratch->map_index = map_index;
                        local_bh_enable();
 
                        return true;
@@ -504,6 +504,7 @@ out:
  * @set:       nftables API set representation
  * @data:      Key data to be matched against existing elements
  * @genmask:   If set, check that element is active in given genmask
+ * @tstamp:    timestamp to check for expired elements
  *
  * This is essentially the same as the lookup function, except that it matches
  * key data against the uncommitted copy and doesn't use preallocated maps for
@@ -513,7 +514,8 @@ out:
  */
 static struct nft_pipapo_elem *pipapo_get(const struct net *net,
                                          const struct nft_set *set,
-                                         const u8 *data, u8 genmask)
+                                         const u8 *data, u8 genmask,
+                                         u64 tstamp)
 {
        struct nft_pipapo_elem *ret = ERR_PTR(-ENOENT);
        struct nft_pipapo *priv = nft_set_priv(set);
@@ -566,7 +568,7 @@ next_match:
                        goto out;
 
                if (last) {
-                       if (nft_set_elem_expired(&f->mt[b].e->ext))
+                       if (__nft_set_elem_expired(&f->mt[b].e->ext, tstamp))
                                goto next_match;
                        if ((genmask &&
                             !nft_set_elem_active(&f->mt[b].e->ext, genmask)))
@@ -603,10 +605,10 @@ static struct nft_elem_priv *
 nft_pipapo_get(const struct net *net, const struct nft_set *set,
               const struct nft_set_elem *elem, unsigned int flags)
 {
-       static struct nft_pipapo_elem *e;
+       struct nft_pipapo_elem *e;
 
        e = pipapo_get(net, set, (const u8 *)elem->key.val.data,
-                      nft_genmask_cur(net));
+                      nft_genmask_cur(net), get_jiffies_64());
        if (IS_ERR(e))
                return ERR_CAST(e);
 
@@ -1108,6 +1110,25 @@ static void pipapo_map(struct nft_pipapo_match *m,
                f->mt[map[i].to + j].e = e;
 }
 
+/**
+ * pipapo_free_scratch() - Free per-CPU map at original (not aligned) address
+ * @m:         Matching data
+ * @cpu:       CPU number
+ */
+static void pipapo_free_scratch(const struct nft_pipapo_match *m, unsigned int cpu)
+{
+       struct nft_pipapo_scratch *s;
+       void *mem;
+
+       s = *per_cpu_ptr(m->scratch, cpu);
+       if (!s)
+               return;
+
+       mem = s;
+       mem -= s->align_off;
+       kfree(mem);
+}
+
 /**
  * pipapo_realloc_scratch() - Reallocate scratch maps for partial match results
  * @clone:     Copy of matching data with pending insertions and deletions
@@ -1121,12 +1142,13 @@ static int pipapo_realloc_scratch(struct nft_pipapo_match *clone,
        int i;
 
        for_each_possible_cpu(i) {
-               unsigned long *scratch;
+               struct nft_pipapo_scratch *scratch;
 #ifdef NFT_PIPAPO_ALIGN
-               unsigned long *scratch_aligned;
+               void *scratch_aligned;
+               u32 align_off;
 #endif
-
-               scratch = kzalloc_node(bsize_max * sizeof(*scratch) * 2 +
+               scratch = kzalloc_node(struct_size(scratch, map,
+                                                  bsize_max * 2) +
                                       NFT_PIPAPO_ALIGN_HEADROOM,
                                       GFP_KERNEL, cpu_to_node(i));
                if (!scratch) {
@@ -1140,14 +1162,25 @@ static int pipapo_realloc_scratch(struct nft_pipapo_match *clone,
                        return -ENOMEM;
                }
 
-               kfree(*per_cpu_ptr(clone->scratch, i));
-
-               *per_cpu_ptr(clone->scratch, i) = scratch;
+               pipapo_free_scratch(clone, i);
 
 #ifdef NFT_PIPAPO_ALIGN
-               scratch_aligned = NFT_PIPAPO_LT_ALIGN(scratch);
-               *per_cpu_ptr(clone->scratch_aligned, i) = scratch_aligned;
+               /* Align &scratch->map (not the struct itself): the extra
+                * %NFT_PIPAPO_ALIGN_HEADROOM bytes passed to kzalloc_node()
+                * above guarantee we can waste up to those bytes in order
+                * to align the map field regardless of its offset within
+                * the struct.
+                */
+               BUILD_BUG_ON(offsetof(struct nft_pipapo_scratch, map) > NFT_PIPAPO_ALIGN_HEADROOM);
+
+               scratch_aligned = NFT_PIPAPO_LT_ALIGN(&scratch->map);
+               scratch_aligned -= offsetof(struct nft_pipapo_scratch, map);
+               align_off = scratch_aligned - (void *)scratch;
+
+               scratch = scratch_aligned;
+               scratch->align_off = align_off;
 #endif
+               *per_cpu_ptr(clone->scratch, i) = scratch;
        }
 
        return 0;
@@ -1173,6 +1206,7 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
        struct nft_pipapo_match *m = priv->clone;
        u8 genmask = nft_genmask_next(net);
        struct nft_pipapo_elem *e, *dup;
+       u64 tstamp = nft_net_tstamp(net);
        struct nft_pipapo_field *f;
        const u8 *start_p, *end_p;
        int i, bsize_max, err = 0;
@@ -1182,7 +1216,7 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
        else
                end = start;
 
-       dup = pipapo_get(net, set, start, genmask);
+       dup = pipapo_get(net, set, start, genmask, tstamp);
        if (!IS_ERR(dup)) {
                /* Check if we already have the same exact entry */
                const struct nft_data *dup_key, *dup_end;
@@ -1204,7 +1238,7 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
 
        if (PTR_ERR(dup) == -ENOENT) {
                /* Look for partially overlapping entries */
-               dup = pipapo_get(net, set, end, nft_genmask_next(net));
+               dup = pipapo_get(net, set, end, nft_genmask_next(net), tstamp);
        }
 
        if (PTR_ERR(dup) != -ENOENT) {
@@ -1301,11 +1335,6 @@ static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old)
        if (!new->scratch)
                goto out_scratch;
 
-#ifdef NFT_PIPAPO_ALIGN
-       new->scratch_aligned = alloc_percpu(*new->scratch_aligned);
-       if (!new->scratch_aligned)
-               goto out_scratch;
-#endif
        for_each_possible_cpu(i)
                *per_cpu_ptr(new->scratch, i) = NULL;
 
@@ -1357,10 +1386,7 @@ out_lt:
        }
 out_scratch_realloc:
        for_each_possible_cpu(i)
-               kfree(*per_cpu_ptr(new->scratch, i));
-#ifdef NFT_PIPAPO_ALIGN
-       free_percpu(new->scratch_aligned);
-#endif
+               pipapo_free_scratch(new, i);
 out_scratch:
        free_percpu(new->scratch);
        kfree(new);
@@ -1560,6 +1586,7 @@ static void pipapo_gc(struct nft_set *set, struct nft_pipapo_match *m)
 {
        struct nft_pipapo *priv = nft_set_priv(set);
        struct net *net = read_pnet(&set->net);
+       u64 tstamp = nft_net_tstamp(net);
        int rules_f0, first_rule = 0;
        struct nft_pipapo_elem *e;
        struct nft_trans_gc *gc;
@@ -1594,7 +1621,7 @@ static void pipapo_gc(struct nft_set *set, struct nft_pipapo_match *m)
                /* synchronous gc never fails, there is no need to set on
                 * NFT_SET_ELEM_DEAD_BIT.
                 */
-               if (nft_set_elem_expired(&e->ext)) {
+               if (__nft_set_elem_expired(&e->ext, tstamp)) {
                        priv->dirty = true;
 
                        gc = nft_trans_gc_queue_sync(gc, GFP_KERNEL);
@@ -1640,13 +1667,9 @@ static void pipapo_free_match(struct nft_pipapo_match *m)
        int i;
 
        for_each_possible_cpu(i)
-               kfree(*per_cpu_ptr(m->scratch, i));
+               pipapo_free_scratch(m, i);
 
-#ifdef NFT_PIPAPO_ALIGN
-       free_percpu(m->scratch_aligned);
-#endif
        free_percpu(m->scratch);
-
        pipapo_free_fields(m);
 
        kfree(m);
@@ -1769,7 +1792,7 @@ static void *pipapo_deactivate(const struct net *net, const struct nft_set *set,
 {
        struct nft_pipapo_elem *e;
 
-       e = pipapo_get(net, set, data, nft_genmask_next(net));
+       e = pipapo_get(net, set, data, nft_genmask_next(net), nft_net_tstamp(net));
        if (IS_ERR(e))
                return NULL;
 
@@ -2132,7 +2155,7 @@ static int nft_pipapo_init(const struct nft_set *set,
        m->field_count = field_count;
        m->bsize_max = 0;
 
-       m->scratch = alloc_percpu(unsigned long *);
+       m->scratch = alloc_percpu(struct nft_pipapo_scratch *);
        if (!m->scratch) {
                err = -ENOMEM;
                goto out_scratch;
@@ -2140,16 +2163,6 @@ static int nft_pipapo_init(const struct nft_set *set,
        for_each_possible_cpu(i)
                *per_cpu_ptr(m->scratch, i) = NULL;
 
-#ifdef NFT_PIPAPO_ALIGN
-       m->scratch_aligned = alloc_percpu(unsigned long *);
-       if (!m->scratch_aligned) {
-               err = -ENOMEM;
-               goto out_free;
-       }
-       for_each_possible_cpu(i)
-               *per_cpu_ptr(m->scratch_aligned, i) = NULL;
-#endif
-
        rcu_head_init(&m->rcu);
 
        nft_pipapo_for_each_field(f, i, m) {
@@ -2180,9 +2193,6 @@ static int nft_pipapo_init(const struct nft_set *set,
        return 0;
 
 out_free:
-#ifdef NFT_PIPAPO_ALIGN
-       free_percpu(m->scratch_aligned);
-#endif
        free_percpu(m->scratch);
 out_scratch:
        kfree(m);
@@ -2236,11 +2246,8 @@ static void nft_pipapo_destroy(const struct nft_ctx *ctx,
 
                nft_set_pipapo_match_destroy(ctx, set, m);
 
-#ifdef NFT_PIPAPO_ALIGN
-               free_percpu(m->scratch_aligned);
-#endif
                for_each_possible_cpu(cpu)
-                       kfree(*per_cpu_ptr(m->scratch, cpu));
+                       pipapo_free_scratch(m, cpu);
                free_percpu(m->scratch);
                pipapo_free_fields(m);
                kfree(m);
@@ -2253,11 +2260,8 @@ static void nft_pipapo_destroy(const struct nft_ctx *ctx,
                if (priv->dirty)
                        nft_set_pipapo_match_destroy(ctx, set, m);
 
-#ifdef NFT_PIPAPO_ALIGN
-               free_percpu(priv->clone->scratch_aligned);
-#endif
                for_each_possible_cpu(cpu)
-                       kfree(*per_cpu_ptr(priv->clone->scratch, cpu));
+                       pipapo_free_scratch(priv->clone, cpu);
                free_percpu(priv->clone->scratch);
 
                pipapo_free_fields(priv->clone);
index 1040223da5fa3ab7bbfd4da4d348baee3d22a0d6..f59a0cd811051add128f9feaee4501fcc153ed79 100644 (file)
@@ -130,21 +130,29 @@ struct nft_pipapo_field {
        union nft_pipapo_map_bucket *mt;
 };
 
+/**
+ * struct nft_pipapo_scratch - percpu data used for lookup and matching
+ * @map_index: Current working bitmap index, toggled between field matches
+ * @align_off: Offset to get the originally allocated address
+ * @map:       store partial matching results during lookup
+ */
+struct nft_pipapo_scratch {
+       u8 map_index;
+       u32 align_off;
+       unsigned long map[];
+};
+
 /**
  * struct nft_pipapo_match - Data used for lookup and matching
  * @field_count                Amount of fields in set
  * @scratch:           Preallocated per-CPU maps for partial matching results
- * @scratch_aligned:   Version of @scratch aligned to NFT_PIPAPO_ALIGN bytes
  * @bsize_max:         Maximum lookup table bucket size of all fields, in longs
  * @rcu                        Matching data is swapped on commits
  * @f:                 Fields, with lookup and mapping tables
  */
 struct nft_pipapo_match {
        int field_count;
-#ifdef NFT_PIPAPO_ALIGN
-       unsigned long * __percpu *scratch_aligned;
-#endif
-       unsigned long * __percpu *scratch;
+       struct nft_pipapo_scratch * __percpu *scratch;
        size_t bsize_max;
        struct rcu_head rcu;
        struct nft_pipapo_field f[] __counted_by(field_count);
index 52e0d026d30ad2c92f63f589727cdc0b39d7092b..a3a8ddca991894b28aa1a1cd7c84ba0380366b5f 100644 (file)
@@ -57,7 +57,7 @@
 
 /* Jump to label if @reg is zero */
 #define NFT_PIPAPO_AVX2_NOMATCH_GOTO(reg, label)                       \
-       asm_volatile_goto("vptest %%ymm" #reg ", %%ymm" #reg ";"        \
+       asm goto("vptest %%ymm" #reg ", %%ymm" #reg ";" \
                          "je %l[" #label "]" : : : : label)
 
 /* Store 256 bits from YMM register into memory. Contrary to bucket load
@@ -71,9 +71,6 @@
 #define NFT_PIPAPO_AVX2_ZERO(reg)                                      \
        asm volatile("vpxor %ymm" #reg ", %ymm" #reg ", %ymm" #reg)
 
-/* Current working bitmap index, toggled between field matches */
-static DEFINE_PER_CPU(bool, nft_pipapo_avx2_scratch_index);
-
 /**
  * nft_pipapo_avx2_prepare() - Prepare before main algorithm body
  *
@@ -1120,11 +1117,12 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
                            const u32 *key, const struct nft_set_ext **ext)
 {
        struct nft_pipapo *priv = nft_set_priv(set);
-       unsigned long *res, *fill, *scratch;
+       struct nft_pipapo_scratch *scratch;
        u8 genmask = nft_genmask_cur(net);
        const u8 *rp = (const u8 *)key;
        struct nft_pipapo_match *m;
        struct nft_pipapo_field *f;
+       unsigned long *res, *fill;
        bool map_index;
        int i, ret = 0;
 
@@ -1141,15 +1139,16 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
         */
        kernel_fpu_begin_mask(0);
 
-       scratch = *raw_cpu_ptr(m->scratch_aligned);
+       scratch = *raw_cpu_ptr(m->scratch);
        if (unlikely(!scratch)) {
                kernel_fpu_end();
                return false;
        }
-       map_index = raw_cpu_read(nft_pipapo_avx2_scratch_index);
 
-       res  = scratch + (map_index ? m->bsize_max : 0);
-       fill = scratch + (map_index ? 0 : m->bsize_max);
+       map_index = scratch->map_index;
+
+       res  = scratch->map + (map_index ? m->bsize_max : 0);
+       fill = scratch->map + (map_index ? 0 : m->bsize_max);
 
        /* Starting map doesn't need to be set for this implementation */
 
@@ -1221,7 +1220,7 @@ next_match:
 
 out:
        if (i % 2)
-               raw_cpu_write(nft_pipapo_avx2_scratch_index, !map_index);
+               scratch->map_index = !map_index;
        kernel_fpu_end();
 
        return ret >= 0;
index baa3fea4fe65c8f938e665a7fb6b0e4fc0f8f9ad..9944fe479e5361dc140f75be8b90bf3c5deb40f6 100644 (file)
@@ -234,7 +234,7 @@ static void nft_rbtree_gc_elem_remove(struct net *net, struct nft_set *set,
 
 static const struct nft_rbtree_elem *
 nft_rbtree_gc_elem(const struct nft_set *__set, struct nft_rbtree *priv,
-                  struct nft_rbtree_elem *rbe, u8 genmask)
+                  struct nft_rbtree_elem *rbe)
 {
        struct nft_set *set = (struct nft_set *)__set;
        struct rb_node *prev = rb_prev(&rbe->node);
@@ -253,7 +253,7 @@ nft_rbtree_gc_elem(const struct nft_set *__set, struct nft_rbtree *priv,
        while (prev) {
                rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
                if (nft_rbtree_interval_end(rbe_prev) &&
-                   nft_set_elem_active(&rbe_prev->ext, genmask))
+                   nft_set_elem_active(&rbe_prev->ext, NFT_GENMASK_ANY))
                        break;
 
                prev = rb_prev(prev);
@@ -313,6 +313,7 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
        struct nft_rbtree *priv = nft_set_priv(set);
        u8 cur_genmask = nft_genmask_cur(net);
        u8 genmask = nft_genmask_next(net);
+       u64 tstamp = nft_net_tstamp(net);
        int d;
 
        /* Descend the tree to search for an existing element greater than the
@@ -360,11 +361,11 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
                /* perform garbage collection to avoid bogus overlap reports
                 * but skip new elements in this transaction.
                 */
-               if (nft_set_elem_expired(&rbe->ext) &&
+               if (__nft_set_elem_expired(&rbe->ext, tstamp) &&
                    nft_set_elem_active(&rbe->ext, cur_genmask)) {
                        const struct nft_rbtree_elem *removed_end;
 
-                       removed_end = nft_rbtree_gc_elem(set, priv, rbe, genmask);
+                       removed_end = nft_rbtree_gc_elem(set, priv, rbe);
                        if (IS_ERR(removed_end))
                                return PTR_ERR(removed_end);
 
@@ -551,6 +552,7 @@ nft_rbtree_deactivate(const struct net *net, const struct nft_set *set,
        const struct nft_rbtree *priv = nft_set_priv(set);
        const struct rb_node *parent = priv->root.rb_node;
        u8 genmask = nft_genmask_next(net);
+       u64 tstamp = nft_net_tstamp(net);
        int d;
 
        while (parent != NULL) {
@@ -571,7 +573,7 @@ nft_rbtree_deactivate(const struct net *net, const struct nft_set *set,
                                   nft_rbtree_interval_end(this)) {
                                parent = parent->rb_right;
                                continue;
-                       } else if (nft_set_elem_expired(&rbe->ext)) {
+                       } else if (__nft_set_elem_expired(&rbe->ext, tstamp)) {
                                break;
                        } else if (!nft_set_elem_active(&rbe->ext, genmask)) {
                                parent = parent->rb_left;
@@ -624,9 +626,10 @@ static void nft_rbtree_gc(struct nft_set *set)
 {
        struct nft_rbtree *priv = nft_set_priv(set);
        struct nft_rbtree_elem *rbe, *rbe_end = NULL;
+       struct net *net = read_pnet(&set->net);
+       u64 tstamp = nft_net_tstamp(net);
        struct rb_node *node, *next;
        struct nft_trans_gc *gc;
-       struct net *net;
 
        set  = nft_set_container_of(priv);
        net  = read_pnet(&set->net);
@@ -648,7 +651,7 @@ static void nft_rbtree_gc(struct nft_set *set)
                        rbe_end = rbe;
                        continue;
                }
-               if (!nft_set_elem_expired(&rbe->ext))
+               if (!__nft_set_elem_expired(&rbe->ext, tstamp))
                        continue;
 
                gc = nft_trans_gc_queue_sync(gc, GFP_KERNEL);
index 9ed85be79452d990ad79ad9a0b31a26bb3f4c6a4..f30163e2ca620783cceda339c702c9f81b29cfa2 100644 (file)
@@ -242,6 +242,11 @@ static int nft_socket_validate(const struct nft_ctx *ctx,
                               const struct nft_expr *expr,
                               const struct nft_data **data)
 {
+       if (ctx->family != NFPROTO_IPV4 &&
+           ctx->family != NFPROTO_IPV6 &&
+           ctx->family != NFPROTO_INET)
+               return -EOPNOTSUPP;
+
        return nft_chain_validate_hooks(ctx->chain,
                                        (1 << NF_INET_PRE_ROUTING) |
                                        (1 << NF_INET_LOCAL_IN) |
index 13da882669a4ee026d286a7903e0c974e60541ac..1d737f89dfc18ccdf816e00407bc9be70c13e8f2 100644 (file)
@@ -186,7 +186,6 @@ static int nft_synproxy_do_init(const struct nft_ctx *ctx,
                break;
 #endif
        case NFPROTO_INET:
-       case NFPROTO_BRIDGE:
                err = nf_synproxy_ipv4_init(snet, ctx->net);
                if (err)
                        goto nf_ct_failure;
@@ -219,7 +218,6 @@ static void nft_synproxy_do_destroy(const struct nft_ctx *ctx)
                break;
 #endif
        case NFPROTO_INET:
-       case NFPROTO_BRIDGE:
                nf_synproxy_ipv4_fini(snet, ctx->net);
                nf_synproxy_ipv6_fini(snet, ctx->net);
                break;
@@ -253,6 +251,11 @@ static int nft_synproxy_validate(const struct nft_ctx *ctx,
                                 const struct nft_expr *expr,
                                 const struct nft_data **data)
 {
+       if (ctx->family != NFPROTO_IPV4 &&
+           ctx->family != NFPROTO_IPV6 &&
+           ctx->family != NFPROTO_INET)
+               return -EOPNOTSUPP;
+
        return nft_chain_validate_hooks(ctx->chain, (1 << NF_INET_LOCAL_IN) |
                                                    (1 << NF_INET_FORWARD));
 }
index ae15cd693f0ec2857215c1daa7e633af222de423..71412adb73d414c43d2082362e854c3ad561d815 100644 (file)
@@ -316,6 +316,11 @@ static int nft_tproxy_validate(const struct nft_ctx *ctx,
                               const struct nft_expr *expr,
                               const struct nft_data **data)
 {
+       if (ctx->family != NFPROTO_IPV4 &&
+           ctx->family != NFPROTO_IPV6 &&
+           ctx->family != NFPROTO_INET)
+               return -EOPNOTSUPP;
+
        return nft_chain_validate_hooks(ctx->chain, 1 << NF_INET_PRE_ROUTING);
 }
 
index 9f21953c7433ff942caba909a8c8673baa3e003c..f735d79d8be5778a008485e893a2be78584318fe 100644 (file)
@@ -713,6 +713,7 @@ static const struct nft_object_ops nft_tunnel_obj_ops = {
 
 static struct nft_object_type nft_tunnel_obj_type __read_mostly = {
        .type           = NFT_OBJECT_TUNNEL,
+       .family         = NFPROTO_NETDEV,
        .ops            = &nft_tunnel_obj_ops,
        .maxattr        = NFTA_TUNNEL_KEY_MAX,
        .policy         = nft_tunnel_key_policy,
index 452f8587addadce5a2e1f480d5685eb70c5760b0..1c866757db55247b8e267fb038dd4e1fbd9681ea 100644 (file)
@@ -235,6 +235,11 @@ static int nft_xfrm_validate(const struct nft_ctx *ctx, const struct nft_expr *e
        const struct nft_xfrm *priv = nft_expr_priv(expr);
        unsigned int hooks;
 
+       if (ctx->family != NFPROTO_IPV4 &&
+           ctx->family != NFPROTO_IPV6 &&
+           ctx->family != NFPROTO_INET)
+               return -EOPNOTSUPP;
+
        switch (priv->dir) {
        case XFRM_POLICY_IN:
                hooks = (1 << NF_INET_FORWARD) |
index ec6ed6fda96c5903d6136fce62f82912dc0701cb..343e65f377d442a9fda9e3556b04cbb7fea20174 100644 (file)
@@ -59,7 +59,7 @@ physdev_mt(const struct sk_buff *skb, struct xt_action_param *par)
            (!!outdev ^ !(info->invert & XT_PHYSDEV_OP_BRIDGED)))
                return false;
 
-       physdev = nf_bridge_get_physindev(skb);
+       physdev = nf_bridge_get_physindev(skb, xt_net(par));
        indev = physdev ? physdev->name : NULL;
 
        if ((info->bitmask & XT_PHYSDEV_OP_ISIN &&
index 4ed8ffd58ff375f3fa9f262e6f3b4d1a1aaf2731..9c962347cf859f16fc76e4d8a2fd22cdb3d142d6 100644 (file)
@@ -374,7 +374,7 @@ static void netlink_skb_destructor(struct sk_buff *skb)
        if (is_vmalloc_addr(skb->head)) {
                if (!skb->cloned ||
                    !atomic_dec_return(&(skb_shinfo(skb)->dataref)))
-                       vfree(skb->head);
+                       vfree_atomic(skb->head);
 
                skb->head = NULL;
        }
index d63d2e5dc60c97e46ae977674e113b019169682b..dae378f1d52b607abf1cd0931473b28a390f1949 100644 (file)
@@ -858,4 +858,5 @@ void nfc_digital_unregister_device(struct nfc_digital_dev *ddev)
 }
 EXPORT_SYMBOL(nfc_digital_unregister_device);
 
+MODULE_DESCRIPTION("NFC Digital protocol stack");
 MODULE_LICENSE("GPL");
index 6c9592d051206f242b2249f3a5a57d4640333a2c..cdad47b140fa4bd54ac0571457ab16ab505a3a11 100644 (file)
@@ -1208,6 +1208,10 @@ void nci_free_device(struct nci_dev *ndev)
 {
        nfc_free_device(ndev->nfc_dev);
        nci_hci_deallocate(ndev);
+
+       /* drop partial rx data packet if present */
+       if (ndev->rx_data_reassembly)
+               kfree_skb(ndev->rx_data_reassembly);
        kfree(ndev);
 }
 EXPORT_SYMBOL(nci_free_device);
@@ -1577,4 +1581,5 @@ static void nci_cmd_work(struct work_struct *work)
        }
 }
 
+MODULE_DESCRIPTION("NFC Controller Interface");
 MODULE_LICENSE("GPL");
index b68150c971d0b108dd97411fa9362e09d47caede..6a93533c480e656914d83278068ebf10b4189b24 100644 (file)
@@ -319,4 +319,5 @@ done:
 }
 EXPORT_SYMBOL_GPL(nci_spi_read);
 
+MODULE_DESCRIPTION("NFC Controller Interface (NCI) SPI link layer");
 MODULE_LICENSE("GPL");
index 01c4cdfef45df32ad0b0b942e416d6bc267687e1..8435a20968ef5112d44164ecbf89071f7ee4b855 100644 (file)
@@ -419,7 +419,7 @@ static int rds_recv_track_latency(struct rds_sock *rs, sockptr_t optval,
 
        rs->rs_rx_traces = trace.rx_traces;
        for (i = 0; i < rs->rs_rx_traces; i++) {
-               if (trace.rx_trace_pos[i] > RDS_MSG_RX_DGRAM_TRACE_MAX) {
+               if (trace.rx_trace_pos[i] >= RDS_MSG_RX_DGRAM_TRACE_MAX) {
                        rs->rs_rx_traces = 0;
                        return -EFAULT;
                }
index 2f8b39a614c319e0cf604a57ef2e6e17d89d5151..7818aae1be8e00c1e9b15c918868ca11b40a7213 100644 (file)
@@ -199,11 +199,19 @@ struct rxrpc_host_header {
  */
 struct rxrpc_skb_priv {
        struct rxrpc_connection *conn;  /* Connection referred to (poke packet) */
-       u16             offset;         /* Offset of data */
-       u16             len;            /* Length of data */
-       u8              flags;
+       union {
+               struct {
+                       u16             offset;         /* Offset of data */
+                       u16             len;            /* Length of data */
+                       u8              flags;
 #define RXRPC_RX_VERIFIED      0x01
-
+               };
+               struct {
+                       rxrpc_seq_t     first_ack;      /* First packet in acks table */
+                       u8              nr_acks;        /* Number of acks+nacks */
+                       u8              nr_nacks;       /* Number of nacks */
+               };
+       };
        struct rxrpc_host_header hdr;   /* RxRPC packet header from this packet */
 };
 
@@ -510,7 +518,7 @@ struct rxrpc_connection {
        enum rxrpc_call_completion completion;  /* Completion condition */
        s32                     abort_code;     /* Abort code of connection abort */
        int                     debug_id;       /* debug ID for printks */
-       atomic_t                serial;         /* packet serial number counter */
+       rxrpc_serial_t          tx_serial;      /* Outgoing packet serial number counter */
        unsigned int            hi_serial;      /* highest serial number received */
        u32                     service_id;     /* Service ID, possibly upgraded */
        u32                     security_level; /* Security level selected */
@@ -692,11 +700,11 @@ struct rxrpc_call {
        u8                      cong_dup_acks;  /* Count of ACKs showing missing packets */
        u8                      cong_cumul_acks; /* Cumulative ACK count */
        ktime_t                 cong_tstamp;    /* Last time cwnd was changed */
+       struct sk_buff          *cong_last_nack; /* Last ACK with nacks received */
 
        /* Receive-phase ACK management (ACKs we send). */
        u8                      ackr_reason;    /* reason to ACK */
        u16                     ackr_sack_base; /* Starting slot in SACK table ring */
-       rxrpc_serial_t          ackr_serial;    /* serial of packet being ACK'd */
        rxrpc_seq_t             ackr_window;    /* Base of SACK window */
        rxrpc_seq_t             ackr_wtop;      /* Base of SACK window */
        unsigned int            ackr_nr_unacked; /* Number of unacked packets */
@@ -730,7 +738,8 @@ struct rxrpc_call {
 struct rxrpc_ack_summary {
        u16                     nr_acks;                /* Number of ACKs in packet */
        u16                     nr_new_acks;            /* Number of new ACKs in packet */
-       u16                     nr_rot_new_acks;        /* Number of rotated new ACKs */
+       u16                     nr_new_nacks;           /* Number of new nacks in packet */
+       u16                     nr_retained_nacks;      /* Number of nacks retained between ACKs */
        u8                      ack_reason;
        bool                    saw_nacks;              /* Saw NACKs in packet */
        bool                    new_low_nack;           /* T if new low NACK found */
@@ -822,6 +831,20 @@ static inline bool rxrpc_sending_to_client(const struct rxrpc_txbuf *txb)
 
 #include <trace/events/rxrpc.h>
 
+/*
+ * Allocate the next serial number on a connection.  0 must be skipped.
+ */
+static inline rxrpc_serial_t rxrpc_get_next_serial(struct rxrpc_connection *conn)
+{
+       rxrpc_serial_t serial;
+
+       serial = conn->tx_serial;
+       if (serial == 0)
+               serial = 1;
+       conn->tx_serial = serial + 1;
+       return serial;
+}
+
 /*
  * af_rxrpc.c
  */
@@ -1079,6 +1102,7 @@ void rxrpc_send_version_request(struct rxrpc_local *local,
 /*
  * local_object.c
  */
+void rxrpc_local_dont_fragment(const struct rxrpc_local *local, bool set);
 struct rxrpc_local *rxrpc_lookup_local(struct net *, const struct sockaddr_rxrpc *);
 struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *, enum rxrpc_local_trace);
 struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *, enum rxrpc_local_trace);
index e363f21a20141bb13c931fc0cd40c862c49e5829..0f78544d043be9327ea13cc91fbfa532d6ef4002 100644 (file)
@@ -43,8 +43,6 @@ void rxrpc_propose_delay_ACK(struct rxrpc_call *call, rxrpc_serial_t serial,
        unsigned long expiry = rxrpc_soft_ack_delay;
        unsigned long now = jiffies, ack_at;
 
-       call->ackr_serial = serial;
-
        if (rxrpc_soft_ack_delay < expiry)
                expiry = rxrpc_soft_ack_delay;
        if (call->peer->srtt_us != 0)
@@ -114,6 +112,7 @@ static void rxrpc_congestion_timeout(struct rxrpc_call *call)
 void rxrpc_resend(struct rxrpc_call *call, struct sk_buff *ack_skb)
 {
        struct rxrpc_ackpacket *ack = NULL;
+       struct rxrpc_skb_priv *sp;
        struct rxrpc_txbuf *txb;
        unsigned long resend_at;
        rxrpc_seq_t transmitted = READ_ONCE(call->tx_transmitted);
@@ -141,14 +140,15 @@ void rxrpc_resend(struct rxrpc_call *call, struct sk_buff *ack_skb)
         * explicitly NAK'd packets.
         */
        if (ack_skb) {
+               sp = rxrpc_skb(ack_skb);
                ack = (void *)ack_skb->data + sizeof(struct rxrpc_wire_header);
 
-               for (i = 0; i < ack->nAcks; i++) {
+               for (i = 0; i < sp->nr_acks; i++) {
                        rxrpc_seq_t seq;
 
                        if (ack->acks[i] & 1)
                                continue;
-                       seq = ntohl(ack->firstPacket) + i;
+                       seq = sp->first_ack + i;
                        if (after(txb->seq, transmitted))
                                break;
                        if (after(txb->seq, seq))
@@ -373,7 +373,6 @@ static void rxrpc_send_initial_ping(struct rxrpc_call *call)
 bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
 {
        unsigned long now, next, t;
-       rxrpc_serial_t ackr_serial;
        bool resend = false, expired = false;
        s32 abort_code;
 
@@ -423,8 +422,7 @@ bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
        if (time_after_eq(now, t)) {
                trace_rxrpc_timer(call, rxrpc_timer_exp_ack, now);
                cmpxchg(&call->delay_ack_at, t, now + MAX_JIFFY_OFFSET);
-               ackr_serial = xchg(&call->ackr_serial, 0);
-               rxrpc_send_ACK(call, RXRPC_ACK_DELAY, ackr_serial,
+               rxrpc_send_ACK(call, RXRPC_ACK_DELAY, 0,
                               rxrpc_propose_ack_ping_for_lost_ack);
        }
 
index 0943e54370ba0e71bcfa6d2238704b0b41c49ee9..9fc9a6c3f685868fe69842d5ac133a1b898674eb 100644 (file)
@@ -686,6 +686,7 @@ static void rxrpc_destroy_call(struct work_struct *work)
 
        del_timer_sync(&call->timer);
 
+       rxrpc_free_skb(call->cong_last_nack, rxrpc_skb_put_last_nack);
        rxrpc_cleanup_ring(call);
        while ((txb = list_first_entry_or_null(&call->tx_sendmsg,
                                               struct rxrpc_txbuf, call_link))) {
index 95f4bc206b3dc9a571abe6fb63cc6fe05575e9c9..1f251d758cb9d8be81856187d78e1994ef179072 100644 (file)
@@ -95,6 +95,14 @@ void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
 
        _enter("%d", conn->debug_id);
 
+       if (sp && sp->hdr.type == RXRPC_PACKET_TYPE_ACK) {
+               if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
+                                 &pkt.ack, sizeof(pkt.ack)) < 0)
+                       return;
+               if (pkt.ack.reason == RXRPC_ACK_PING_RESPONSE)
+                       return;
+       }
+
        chan = &conn->channels[channel];
 
        /* If the last call got moved on whilst we were waiting to run, just
@@ -117,7 +125,7 @@ void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
        iov[2].iov_base = &ack_info;
        iov[2].iov_len  = sizeof(ack_info);
 
-       serial = atomic_inc_return(&conn->serial);
+       serial = rxrpc_get_next_serial(conn);
 
        pkt.whdr.epoch          = htonl(conn->proto.epoch);
        pkt.whdr.cid            = htonl(conn->proto.cid | channel);
index 92495e73b8699185cf76c60aa88f62d77a29dd56..9691de00ade7522d36174bbe1ab9098c1b52b145 100644 (file)
@@ -45,11 +45,9 @@ static void rxrpc_congestion_management(struct rxrpc_call *call,
        }
 
        cumulative_acks += summary->nr_new_acks;
-       cumulative_acks += summary->nr_rot_new_acks;
        if (cumulative_acks > 255)
                cumulative_acks = 255;
 
-       summary->mode = call->cong_mode;
        summary->cwnd = call->cong_cwnd;
        summary->ssthresh = call->cong_ssthresh;
        summary->cumulative_acks = cumulative_acks;
@@ -151,6 +149,7 @@ out_no_clear_ca:
                cwnd = RXRPC_TX_MAX_WINDOW;
        call->cong_cwnd = cwnd;
        call->cong_cumul_acks = cumulative_acks;
+       summary->mode = call->cong_mode;
        trace_rxrpc_congest(call, summary, acked_serial, change);
        if (resend)
                rxrpc_resend(call, skb);
@@ -213,7 +212,6 @@ static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
        list_for_each_entry_rcu(txb, &call->tx_buffer, call_link, false) {
                if (before_eq(txb->seq, call->acks_hard_ack))
                        continue;
-               summary->nr_rot_new_acks++;
                if (test_bit(RXRPC_TXBUF_LAST, &txb->flags)) {
                        set_bit(RXRPC_CALL_TX_LAST, &call->flags);
                        rot_last = true;
@@ -254,6 +252,11 @@ static void rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
 {
        ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags));
 
+       if (unlikely(call->cong_last_nack)) {
+               rxrpc_free_skb(call->cong_last_nack, rxrpc_skb_put_last_nack);
+               call->cong_last_nack = NULL;
+       }
+
        switch (__rxrpc_call_state(call)) {
        case RXRPC_CALL_CLIENT_SEND_REQUEST:
        case RXRPC_CALL_CLIENT_AWAIT_REPLY:
@@ -702,6 +705,43 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
                wake_up(&call->waitq);
 }
 
+/*
+ * Determine how many nacks from the previous ACK have now been satisfied.
+ */
+static rxrpc_seq_t rxrpc_input_check_prev_ack(struct rxrpc_call *call,
+                                             struct rxrpc_ack_summary *summary,
+                                             rxrpc_seq_t seq)
+{
+       struct sk_buff *skb = call->cong_last_nack;
+       struct rxrpc_ackpacket ack;
+       struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+       unsigned int i, new_acks = 0, retained_nacks = 0;
+       rxrpc_seq_t old_seq = sp->first_ack;
+       u8 *acks = skb->data + sizeof(struct rxrpc_wire_header) + sizeof(ack);
+
+       if (after_eq(seq, old_seq + sp->nr_acks)) {
+               summary->nr_new_acks += sp->nr_nacks;
+               summary->nr_new_acks += seq - (old_seq + sp->nr_acks);
+               summary->nr_retained_nacks = 0;
+       } else if (seq == old_seq) {
+               summary->nr_retained_nacks = sp->nr_nacks;
+       } else {
+               for (i = 0; i < sp->nr_acks; i++) {
+                       if (acks[i] == RXRPC_ACK_TYPE_NACK) {
+                               if (before(old_seq + i, seq))
+                                       new_acks++;
+                               else
+                                       retained_nacks++;
+                       }
+               }
+
+               summary->nr_new_acks += new_acks;
+               summary->nr_retained_nacks = retained_nacks;
+       }
+
+       return old_seq + sp->nr_acks;
+}
+
 /*
  * Process individual soft ACKs.
  *
@@ -711,25 +751,51 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
  * the timer on the basis that the peer might just not have processed them at
  * the time the ACK was sent.
  */
-static void rxrpc_input_soft_acks(struct rxrpc_call *call, u8 *acks,
-                                 rxrpc_seq_t seq, int nr_acks,
-                                 struct rxrpc_ack_summary *summary)
+static void rxrpc_input_soft_acks(struct rxrpc_call *call,
+                                 struct rxrpc_ack_summary *summary,
+                                 struct sk_buff *skb,
+                                 rxrpc_seq_t seq,
+                                 rxrpc_seq_t since)
 {
-       unsigned int i;
+       struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+       unsigned int i, old_nacks = 0;
+       rxrpc_seq_t lowest_nak = seq + sp->nr_acks;
+       u8 *acks = skb->data + sizeof(struct rxrpc_wire_header) + sizeof(struct rxrpc_ackpacket);
 
-       for (i = 0; i < nr_acks; i++) {
+       for (i = 0; i < sp->nr_acks; i++) {
                if (acks[i] == RXRPC_ACK_TYPE_ACK) {
                        summary->nr_acks++;
-                       summary->nr_new_acks++;
+                       if (after_eq(seq, since))
+                               summary->nr_new_acks++;
                } else {
-                       if (!summary->saw_nacks &&
-                           call->acks_lowest_nak != seq + i) {
-                               call->acks_lowest_nak = seq + i;
-                               summary->new_low_nack = true;
-                       }
                        summary->saw_nacks = true;
+                       if (before(seq, since)) {
+                               /* Overlap with previous ACK */
+                               old_nacks++;
+                       } else {
+                               summary->nr_new_nacks++;
+                               sp->nr_nacks++;
+                       }
+
+                       if (before(seq, lowest_nak))
+                               lowest_nak = seq;
                }
+               seq++;
+       }
+
+       if (lowest_nak != call->acks_lowest_nak) {
+               call->acks_lowest_nak = lowest_nak;
+               summary->new_low_nack = true;
        }
+
+       /* We *can* have more nacks than we did - the peer is permitted to drop
+        * packets it has soft-acked and re-request them.  Further, it is
+        * possible for the nack distribution to change whilst the number of
+        * nacks stays the same or goes down.
+        */
+       if (old_nacks < summary->nr_retained_nacks)
+               summary->nr_new_acks += summary->nr_retained_nacks - old_nacks;
+       summary->nr_retained_nacks = old_nacks;
 }
 
 /*
@@ -773,7 +839,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
        struct rxrpc_ackinfo info;
        rxrpc_serial_t ack_serial, acked_serial;
-       rxrpc_seq_t first_soft_ack, hard_ack, prev_pkt;
+       rxrpc_seq_t first_soft_ack, hard_ack, prev_pkt, since;
        int nr_acks, offset, ioffset;
 
        _enter("");
@@ -789,6 +855,8 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
        prev_pkt = ntohl(ack.previousPacket);
        hard_ack = first_soft_ack - 1;
        nr_acks = ack.nAcks;
+       sp->first_ack = first_soft_ack;
+       sp->nr_acks = nr_acks;
        summary.ack_reason = (ack.reason < RXRPC_ACK__INVALID ?
                              ack.reason : RXRPC_ACK__INVALID);
 
@@ -858,6 +926,16 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
        if (nr_acks > 0)
                skb_condense(skb);
 
+       if (call->cong_last_nack) {
+               since = rxrpc_input_check_prev_ack(call, &summary, first_soft_ack);
+               rxrpc_free_skb(call->cong_last_nack, rxrpc_skb_put_last_nack);
+               call->cong_last_nack = NULL;
+       } else {
+               summary.nr_new_acks = first_soft_ack - call->acks_first_seq;
+               call->acks_lowest_nak = first_soft_ack + nr_acks;
+               since = first_soft_ack;
+       }
+
        call->acks_latest_ts = skb->tstamp;
        call->acks_first_seq = first_soft_ack;
        call->acks_prev_seq = prev_pkt;
@@ -866,7 +944,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
        case RXRPC_ACK_PING:
                break;
        default:
-               if (after(acked_serial, call->acks_highest_serial))
+               if (acked_serial && after(acked_serial, call->acks_highest_serial))
                        call->acks_highest_serial = acked_serial;
                break;
        }
@@ -905,8 +983,9 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
        if (nr_acks > 0) {
                if (offset > (int)skb->len - nr_acks)
                        return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_short_sack);
-               rxrpc_input_soft_acks(call, skb->data + offset, first_soft_ack,
-                                     nr_acks, &summary);
+               rxrpc_input_soft_acks(call, &summary, skb, first_soft_ack, since);
+               rxrpc_get_skb(skb, rxrpc_skb_get_last_nack);
+               call->cong_last_nack = skb;
        }
 
        if (test_bit(RXRPC_CALL_TX_LAST, &call->flags) &&
index c553a30e9c8386384cc5f038c3ecb6570349c7b7..34d30736813531007f0a394f897f531f6db66eb4 100644 (file)
@@ -36,6 +36,17 @@ static void rxrpc_encap_err_rcv(struct sock *sk, struct sk_buff *skb, int err,
                return ipv6_icmp_error(sk, skb, err, port, info, payload);
 }
 
+/*
+ * Set or clear the Don't Fragment flag on a socket.
+ */
+void rxrpc_local_dont_fragment(const struct rxrpc_local *local, bool set)
+{
+       if (set)
+               ip_sock_set_mtu_discover(local->socket->sk, IP_PMTUDISC_DO);
+       else
+               ip_sock_set_mtu_discover(local->socket->sk, IP_PMTUDISC_DONT);
+}
+
 /*
  * Compare a local to an address.  Return -ve, 0 or +ve to indicate less than,
  * same or greater than.
@@ -203,7 +214,7 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
                ip_sock_set_recverr(usk);
 
                /* we want to set the don't fragment bit */
-               ip_sock_set_mtu_discover(usk, IP_PMTUDISC_DO);
+               rxrpc_local_dont_fragment(local, true);
 
                /* We want receive timestamps. */
                sock_enable_timestamps(usk);
index 5e53429c692288cef7a868fcbb61cbcc7ffe3c85..4a292f860ae37a41bddcd99f7e3bdc6a2c092d29 100644 (file)
@@ -216,7 +216,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
        iov[0].iov_len  = sizeof(txb->wire) + sizeof(txb->ack) + n;
        len = iov[0].iov_len;
 
-       serial = atomic_inc_return(&conn->serial);
+       serial = rxrpc_get_next_serial(conn);
        txb->wire.serial = htonl(serial);
        trace_rxrpc_tx_ack(call->debug_id, serial,
                           ntohl(txb->ack.firstPacket),
@@ -302,7 +302,7 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
        iov[0].iov_base = &pkt;
        iov[0].iov_len  = sizeof(pkt);
 
-       serial = atomic_inc_return(&conn->serial);
+       serial = rxrpc_get_next_serial(conn);
        pkt.whdr.serial = htonl(serial);
 
        iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, sizeof(pkt));
@@ -334,7 +334,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
        _enter("%x,{%d}", txb->seq, txb->len);
 
        /* Each transmission of a Tx packet needs a new serial number */
-       serial = atomic_inc_return(&conn->serial);
+       serial = rxrpc_get_next_serial(conn);
        txb->wire.serial = htonl(serial);
 
        if (test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags) &&
@@ -494,14 +494,12 @@ send_fragmentable:
        switch (conn->local->srx.transport.family) {
        case AF_INET6:
        case AF_INET:
-               ip_sock_set_mtu_discover(conn->local->socket->sk,
-                                        IP_PMTUDISC_DONT);
+               rxrpc_local_dont_fragment(conn->local, false);
                rxrpc_inc_stat(call->rxnet, stat_tx_data_send_frag);
                ret = do_udp_sendmsg(conn->local->socket, &msg, len);
                conn->peer->last_tx_at = ktime_get_seconds();
 
-               ip_sock_set_mtu_discover(conn->local->socket->sk,
-                                        IP_PMTUDISC_DO);
+               rxrpc_local_dont_fragment(conn->local, true);
                break;
 
        default:
@@ -560,7 +558,7 @@ void rxrpc_send_conn_abort(struct rxrpc_connection *conn)
 
        len = iov[0].iov_len + iov[1].iov_len;
 
-       serial = atomic_inc_return(&conn->serial);
+       serial = rxrpc_get_next_serial(conn);
        whdr.serial = htonl(serial);
 
        iov_iter_kvec(&msg.msg_iter, WRITE, iov, 2, len);
index 6c86cbb98d1d601edc9dad728c72f887067a376e..26dc2f26d92d8d67f82229675254d7217c2184e0 100644 (file)
@@ -181,7 +181,7 @@ print:
                   atomic_read(&conn->active),
                   state,
                   key_serial(conn->key),
-                  atomic_read(&conn->serial),
+                  conn->tx_serial,
                   conn->hi_serial,
                   conn->channels[0].call_id,
                   conn->channels[1].call_id,
index 1bf571a66e020d263ceb1d5a4489253b8fbf9728..6b32d61d4cdc46719d4a011987f6ea112ae59fc1 100644 (file)
@@ -664,7 +664,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
 
        len = iov[0].iov_len + iov[1].iov_len;
 
-       serial = atomic_inc_return(&conn->serial);
+       serial = rxrpc_get_next_serial(conn);
        whdr.serial = htonl(serial);
 
        ret = kernel_sendmsg(conn->local->socket, &msg, iov, 2, len);
@@ -721,10 +721,12 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
 
        len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
 
-       serial = atomic_inc_return(&conn->serial);
+       serial = rxrpc_get_next_serial(conn);
        whdr.serial = htonl(serial);
 
+       rxrpc_local_dont_fragment(conn->local, false);
        ret = kernel_sendmsg(conn->local->socket, &msg, iov, 3, len);
+       rxrpc_local_dont_fragment(conn->local, true);
        if (ret < 0) {
                trace_rxrpc_tx_fail(conn->debug_id, serial, ret,
                                    rxrpc_tx_point_rxkad_response);
index e3236a3169c32f470ca9a6abfcb065d79d9860fe..ff3d396a65aac0dec81fc79a6bea44c24cfd7a68 100644 (file)
@@ -1424,6 +1424,14 @@ static void tcf_block_owner_del(struct tcf_block *block,
        WARN_ON(1);
 }
 
+static bool tcf_block_tracks_dev(struct tcf_block *block,
+                                struct tcf_block_ext_info *ei)
+{
+       return tcf_block_shared(block) &&
+              (ei->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS ||
+               ei->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS);
+}
+
 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
                      struct tcf_block_ext_info *ei,
                      struct netlink_ext_ack *extack)
@@ -1462,7 +1470,7 @@ int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
        if (err)
                goto err_block_offload_bind;
 
-       if (tcf_block_shared(block)) {
+       if (tcf_block_tracks_dev(block, ei)) {
                err = xa_insert(&block->ports, dev->ifindex, dev, GFP_KERNEL);
                if (err) {
                        NL_SET_ERR_MSG(extack, "block dev insert failed");
@@ -1516,7 +1524,7 @@ void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
 
        if (!block)
                return;
-       if (tcf_block_shared(block))
+       if (tcf_block_tracks_dev(block, ei))
                xa_erase(&block->ports, dev->ifindex);
        tcf_chain0_head_change_cb_del(block, ei);
        tcf_block_owner_del(block, q, ei->binder_type);
@@ -1552,6 +1560,9 @@ tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
             chain_prev = chain,
                     chain = __tcf_get_next_chain(block, chain),
                     tcf_chain_put(chain_prev)) {
+               if (chain->tmplt_ops && add)
+                       chain->tmplt_ops->tmplt_reoffload(chain, true, cb,
+                                                         cb_priv);
                for (tp = __tcf_get_next_proto(chain, NULL); tp;
                     tp_prev = tp,
                             tp = __tcf_get_next_proto(chain, tp),
@@ -1567,6 +1578,9 @@ tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
                                goto err_playback_remove;
                        }
                }
+               if (chain->tmplt_ops && !add)
+                       chain->tmplt_ops->tmplt_reoffload(chain, false, cb,
+                                                         cb_priv);
        }
 
        return 0;
@@ -2992,7 +3006,8 @@ static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
        ops = tcf_proto_lookup_ops(name, true, extack);
        if (IS_ERR(ops))
                return PTR_ERR(ops);
-       if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
+       if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump ||
+           !ops->tmplt_reoffload) {
                NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
                module_put(ops->owner);
                return -EOPNOTSUPP;
index e5314a31f75ae3a6db31cb81a3ebf5316a3005ff..efb9d2811b73d18862f824b0b7a8b4e6b905271d 100644 (file)
@@ -2721,6 +2721,28 @@ static void fl_tmplt_destroy(void *tmplt_priv)
        kfree(tmplt);
 }
 
+static void fl_tmplt_reoffload(struct tcf_chain *chain, bool add,
+                              flow_setup_cb_t *cb, void *cb_priv)
+{
+       struct fl_flow_tmplt *tmplt = chain->tmplt_priv;
+       struct flow_cls_offload cls_flower = {};
+
+       cls_flower.rule = flow_rule_alloc(0);
+       if (!cls_flower.rule)
+               return;
+
+       cls_flower.common.chain_index = chain->index;
+       cls_flower.command = add ? FLOW_CLS_TMPLT_CREATE :
+                                  FLOW_CLS_TMPLT_DESTROY;
+       cls_flower.cookie = (unsigned long) tmplt;
+       cls_flower.rule->match.dissector = &tmplt->dissector;
+       cls_flower.rule->match.mask = &tmplt->mask;
+       cls_flower.rule->match.key = &tmplt->dummy_key;
+
+       cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv);
+       kfree(cls_flower.rule);
+}
+
 static int fl_dump_key_val(struct sk_buff *skb,
                           void *val, int val_type,
                           void *mask, int mask_type, int len)
@@ -3628,6 +3650,7 @@ static struct tcf_proto_ops cls_fl_ops __read_mostly = {
        .bind_class     = fl_bind_class,
        .tmplt_create   = fl_tmplt_create,
        .tmplt_destroy  = fl_tmplt_destroy,
+       .tmplt_reoffload = fl_tmplt_reoffload,
        .tmplt_dump     = fl_tmplt_dump,
        .get_exts       = fl_get_exts,
        .owner          = THIS_MODULE,
index 95cc95458e2d8d2c2c3578088544ee1abe0ea8a6..e4c858411207a51d043aef96a47c48ac63f5dd8a 100644 (file)
@@ -1877,9 +1877,15 @@ static bool smcd_lgr_match(struct smc_link_group *lgr,
                           struct smcd_dev *smcismdev,
                           struct smcd_gid *peer_gid)
 {
-       return lgr->peer_gid.gid == peer_gid->gid && lgr->smcd == smcismdev &&
-               smc_ism_is_virtual(smcismdev) ?
-               (lgr->peer_gid.gid_ext == peer_gid->gid_ext) : 1;
+       if (lgr->peer_gid.gid != peer_gid->gid ||
+           lgr->smcd != smcismdev)
+               return false;
+
+       if (smc_ism_is_virtual(smcismdev) &&
+           lgr->peer_gid.gid_ext != peer_gid->gid_ext)
+               return false;
+
+       return true;
 }
 
 /* create a new SMC connection (and a new link group if necessary) */
index 52f7c4f1e7670d723a6858614f071f73dbd88dc5..5a33908015f3e3197ad11869c6f5134799307c56 100644 (file)
@@ -164,7 +164,7 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
        }
        if (smc_conn_lgr_valid(&smc->conn) && smc->conn.lgr->is_smcd &&
            (req->diag_ext & (1 << (SMC_DIAG_DMBINFO - 1))) &&
-           !list_empty(&smc->conn.lgr->list)) {
+           !list_empty(&smc->conn.lgr->list) && smc->conn.rmb_desc) {
                struct smc_connection *conn = &smc->conn;
                struct smcd_diag_dmbinfo dinfo;
                struct smcd_dev *smcd = conn->lgr->smcd;
index 1af71fbb0d80590632439c436a81d218a4123bff..c7af0220f82f42d8e630c9ee182d4e3c4922fb87 100644 (file)
@@ -2280,6 +2280,7 @@ static void __exit exit_rpcsec_gss(void)
 }
 
 MODULE_ALIAS("rpc-auth-6");
+MODULE_DESCRIPTION("Sun RPC Kerberos RPCSEC_GSS client authentication");
 MODULE_LICENSE("GPL");
 module_param_named(expired_cred_retry_delay,
                   gss_expired_cred_retry_delay,
index e31cfdf7eadcb9bfc6b3d3f3ad1e71db4f03b72e..64cff717c3d9b30c101bbd090df41bd161e20ee7 100644 (file)
@@ -650,6 +650,7 @@ static void __exit cleanup_kerberos_module(void)
        gss_mech_unregister(&gss_kerberos_mech);
 }
 
+MODULE_DESCRIPTION("Sun RPC Kerberos 5 module");
 MODULE_LICENSE("GPL");
 module_init(init_kerberos_module);
 module_exit(cleanup_kerberos_module);
index 691c0000e9eae14c0d0dfd08b2969fa663252ed0..bab6cab2940524a970422b62b3fa4212c61c4f43 100644 (file)
@@ -148,6 +148,7 @@ cleanup_sunrpc(void)
 #endif
        rcu_barrier(); /* Wait for completion of call_rcu()'s */
 }
+MODULE_DESCRIPTION("Sun RPC core");
 MODULE_LICENSE("GPL");
 fs_initcall(init_sunrpc); /* Ensure we're initialised before nfs */
 module_exit(cleanup_sunrpc);
index f60c93e5a25d69f6c918ab43a9c48a973cbf90b4..b969e505c7b77002e17936c7ee4fa6e6c79ad223 100644 (file)
@@ -1598,10 +1598,10 @@ void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp)
        /* Finally, send the reply synchronously */
        if (rqstp->bc_to_initval > 0) {
                timeout.to_initval = rqstp->bc_to_initval;
-               timeout.to_retries = rqstp->bc_to_initval;
+               timeout.to_retries = rqstp->bc_to_retries;
        } else {
                timeout.to_initval = req->rq_xprt->timeout->to_initval;
-               timeout.to_initval = req->rq_xprt->timeout->to_retries;
+               timeout.to_retries = req->rq_xprt->timeout->to_retries;
        }
        memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf));
        task = rpc_run_bc_task(req, &timeout);
index bfb2f78523a8289f0a6ea758ca61c53d06832273..545017a3daa4d6b20255c51c6c0dea73ec32ecfc 100644 (file)
@@ -717,12 +717,12 @@ static int svc_udp_sendto(struct svc_rqst *rqstp)
                                ARRAY_SIZE(rqstp->rq_bvec), xdr);
 
        iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, rqstp->rq_bvec,
-                     count, 0);
+                     count, rqstp->rq_res.len);
        err = sock_sendmsg(svsk->sk_sock, &msg);
        if (err == -ECONNREFUSED) {
                /* ICMP error on earlier request. */
                iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, rqstp->rq_bvec,
-                             count, 0);
+                             count, rqstp->rq_res.len);
                err = sock_sendmsg(svsk->sk_sock, &msg);
        }
 
index 2cde375477e381aa4a542cd4cf24db067770b466..878415c43527615801186d79a6c0c73b62bf5750 100644 (file)
@@ -1086,6 +1086,12 @@ int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info)
 
 #ifdef CONFIG_TIPC_MEDIA_UDP
        if (attrs[TIPC_NLA_BEARER_UDP_OPTS]) {
+               if (b->media->type_id != TIPC_MEDIA_TYPE_UDP) {
+                       rtnl_unlock();
+                       NL_SET_ERR_MSG(info->extack, "UDP option is unsupported");
+                       return -EINVAL;
+               }
+
                err = tipc_udp_nl_bearer_add(b,
                                             attrs[TIPC_NLA_BEARER_UDP_OPTS]);
                if (err) {
index e37b4d2e2acde25d6879770629b3996d03860c56..31e8a94dfc111b7705fe19b9b4ddee3e6a317a23 100644 (file)
@@ -1052,7 +1052,11 @@ alloc_encrypted:
                        if (ret < 0)
                                goto send_end;
                        tls_ctx->pending_open_record_frags = true;
-                       if (full_record || eor || sk_msg_full(msg_pl))
+
+                       if (sk_msg_full(msg_pl))
+                               full_record = true;
+
+                       if (full_record || eor)
                                goto copied;
                        continue;
                }
index ac1f2bc18fc9685652c26ac3b68f19bfd82f8332..30b178ebba60aa810e8442a326a14edcee071061 100644 (file)
@@ -1344,13 +1344,11 @@ static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
                unix_state_lock(sk1);
                return;
        }
-       if (sk1 < sk2) {
-               unix_state_lock(sk1);
-               unix_state_lock_nested(sk2);
-       } else {
-               unix_state_lock(sk2);
-               unix_state_lock_nested(sk1);
-       }
+       if (sk1 > sk2)
+               swap(sk1, sk2);
+
+       unix_state_lock(sk1);
+       unix_state_lock_nested(sk2, U_LOCK_SECOND);
 }
 
 static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
@@ -1591,7 +1589,7 @@ restart:
                goto out_unlock;
        }
 
-       unix_state_lock_nested(sk);
+       unix_state_lock_nested(sk, U_LOCK_SECOND);
 
        if (sk->sk_state != st) {
                unix_state_unlock(sk);
index bec09a3a1d44ce56d43e16583fdf3b417cce4033..be19827eca36dbb68ec97b2e9b3c80e22b4fa4be 100644 (file)
@@ -84,7 +84,7 @@ static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
                         * queue lock. With the other's queue locked it's
                         * OK to lock the state.
                         */
-                       unix_state_lock_nested(req);
+                       unix_state_lock_nested(req, U_LOCK_DIAG);
                        peer = unix_sk(req)->peer;
                        buf[i++] = (peer ? sock_i_ino(peer) : 0);
                        unix_state_unlock(req);
index 2405f0f9af31c0ccefe2aa404002cfab8583c090..8f63f0b4bf0129c4704919f8742a052566943b1e 100644 (file)
@@ -314,6 +314,17 @@ void unix_gc(void)
        /* Here we are. Hitlist is filled. Die. */
        __skb_queue_purge(&hitlist);
 
+#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
+       list_for_each_entry_safe(u, next, &gc_candidates, link) {
+               struct sk_buff *skb = u->oob_skb;
+
+               if (skb) {
+                       u->oob_skb = NULL;
+                       kfree_skb(skb);
+               }
+       }
+#endif
+
        spin_lock(&unix_gc_lock);
 
        /* There could be io_uring registered files, just push them back to
index a9ac85e09af37ca8f7d1599e7057f98e7d8200be..10345388ad139f5f9b35025b2336c548bd3344be 100644 (file)
@@ -206,7 +206,6 @@ config CFG80211_KUNIT_TEST
        depends on KUNIT
        depends on CFG80211
        default KUNIT_ALL_TESTS
-       depends on !KERNEL_6_2
        help
          Enable this option to test cfg80211 functions with kunit.
 
index 409d74c57ca0d8c8d36c2260897fce39557620ee..3fb1b637352a9d0b469206d890601031ffd4c68f 100644 (file)
@@ -5,7 +5,7 @@
  * Copyright 2006-2010         Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -1661,6 +1661,7 @@ void wiphy_delayed_work_queue(struct wiphy *wiphy,
                              unsigned long delay)
 {
        if (!delay) {
+               del_timer(&dwork->timer);
                wiphy_work_queue(wiphy, &dwork->work);
                return;
        }
index 60877b532993219c6607c28d6b4e0fb6ae2506ad..b09700400d09744ee1b0c990e46806264df25e3b 100644 (file)
@@ -4020,6 +4020,7 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
                }
                wiphy_unlock(&rdev->wiphy);
 
+               if_start = 0;
                wp_idx++;
        }
  out:
index 2249b1a89d1c4cee36bda840d64dda612d367c5f..389a52c29bfc728c2437037b4f0e180b974d12ba 100644 (file)
@@ -1731,6 +1731,61 @@ static void cfg80211_update_hidden_bsses(struct cfg80211_internal_bss *known,
        }
 }
 
+static void cfg80211_check_stuck_ecsa(struct cfg80211_registered_device *rdev,
+                                     struct cfg80211_internal_bss *known,
+                                     const struct cfg80211_bss_ies *old)
+{
+       const struct ieee80211_ext_chansw_ie *ecsa;
+       const struct element *elem_new, *elem_old;
+       const struct cfg80211_bss_ies *new, *bcn;
+
+       if (known->pub.proberesp_ecsa_stuck)
+               return;
+
+       new = rcu_dereference_protected(known->pub.proberesp_ies,
+                                       lockdep_is_held(&rdev->bss_lock));
+       if (WARN_ON(!new))
+               return;
+
+       if (new->tsf - old->tsf < USEC_PER_SEC)
+               return;
+
+       elem_old = cfg80211_find_elem(WLAN_EID_EXT_CHANSWITCH_ANN,
+                                     old->data, old->len);
+       if (!elem_old)
+               return;
+
+       elem_new = cfg80211_find_elem(WLAN_EID_EXT_CHANSWITCH_ANN,
+                                     new->data, new->len);
+       if (!elem_new)
+               return;
+
+       bcn = rcu_dereference_protected(known->pub.beacon_ies,
+                                       lockdep_is_held(&rdev->bss_lock));
+       if (bcn &&
+           cfg80211_find_elem(WLAN_EID_EXT_CHANSWITCH_ANN,
+                              bcn->data, bcn->len))
+               return;
+
+       if (elem_new->datalen != elem_old->datalen)
+               return;
+       if (elem_new->datalen < sizeof(struct ieee80211_ext_chansw_ie))
+               return;
+       if (memcmp(elem_new->data, elem_old->data, elem_new->datalen))
+               return;
+
+       ecsa = (void *)elem_new->data;
+
+       if (!ecsa->mode)
+               return;
+
+       if (ecsa->new_ch_num !=
+           ieee80211_frequency_to_channel(known->pub.channel->center_freq))
+               return;
+
+       known->pub.proberesp_ecsa_stuck = 1;
+}
+
 static bool
 cfg80211_update_known_bss(struct cfg80211_registered_device *rdev,
                          struct cfg80211_internal_bss *known,
@@ -1750,8 +1805,10 @@ cfg80211_update_known_bss(struct cfg80211_registered_device *rdev,
                /* Override possible earlier Beacon frame IEs */
                rcu_assign_pointer(known->pub.ies,
                                   new->pub.proberesp_ies);
-               if (old)
+               if (old) {
+                       cfg80211_check_stuck_ecsa(rdev, known, old);
                        kfree_rcu((struct cfg80211_bss_ies *)old, rcu_head);
+               }
        }
 
        if (rcu_access_pointer(new->pub.beacon_ies)) {
index 9f13aa3353e31f9692ce41db10a977ac2614d7d8..1eadfac03cc41d35709c001a77759a23f7dbdc39 100644 (file)
@@ -167,8 +167,10 @@ static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
                contd = XDP_PKT_CONTD;
 
        err = __xsk_rcv_zc(xs, xskb, len, contd);
-       if (err || likely(!frags))
-               goto out;
+       if (err)
+               goto err;
+       if (likely(!frags))
+               return 0;
 
        xskb_list = &xskb->pool->xskb_list;
        list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) {
@@ -177,11 +179,13 @@ static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
                len = pos->xdp.data_end - pos->xdp.data;
                err = __xsk_rcv_zc(xs, pos, len, contd);
                if (err)
-                       return err;
+                       goto err;
                list_del(&pos->xskb_list_node);
        }
 
-out:
+       return 0;
+err:
+       xsk_buff_free(xdp);
        return err;
 }
 
index 28711cc44ced216573938f392de3b452f2176410..ce60ecd48a4dc88eed7582bc0701f7c72acc84f5 100644 (file)
@@ -555,6 +555,7 @@ struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
 
        xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM;
        xskb->xdp.data_meta = xskb->xdp.data;
+       xskb->xdp.flags = 0;
 
        if (pool->dma_need_sync) {
                dma_sync_single_range_for_device(pool->dev, xskb->dma, 0,
index 7048bb3594d65be6d132d4103ee801fadf087b7e..634e81d83efd9577337e37f1ce42911574b0adf9 100644 (file)
@@ -4,14 +4,14 @@
 #define __ASM_GOTO_WORKAROUND_H
 
 /*
- * This will bring in asm_volatile_goto and asm_inline macro definitions
+ * This will bring in asm_goto_output and asm_inline macro definitions
  * if enabled by compiler and config options.
  */
 #include <linux/types.h>
 
-#ifdef asm_volatile_goto
-#undef asm_volatile_goto
-#define asm_volatile_goto(x...) asm volatile("invalid use of asm_volatile_goto")
+#ifdef asm_goto_output
+#undef asm_goto_output
+#define asm_goto_output(x...) asm volatile("invalid use of asm_goto_output")
 #endif
 
 /*
diff --git a/samples/cgroup/.gitignore b/samples/cgroup/.gitignore
new file mode 100644 (file)
index 0000000..3a01611
--- /dev/null
@@ -0,0 +1,3 @@
+/cgroup_event_listener
+/memcg_event_listener
+
index e2a6a69352dfb775ebc7d6954c98943ec2a3097f..81220390851a396cb93cb66339130523d991eeaa 100644 (file)
@@ -24,6 +24,41 @@ extern void my_tramp2(void *);
 
 static unsigned long my_ip = (unsigned long)schedule;
 
+#ifdef CONFIG_RISCV
+#include <asm/asm.h>
+
+asm (
+"      .pushsection    .text, \"ax\", @progbits\n"
+"      .type           my_tramp1, @function\n"
+"      .globl          my_tramp1\n"
+"   my_tramp1:\n"
+"      addi    sp,sp,-2*"SZREG"\n"
+"      "REG_S" t0,0*"SZREG"(sp)\n"
+"      "REG_S" ra,1*"SZREG"(sp)\n"
+"      call    my_direct_func1\n"
+"      "REG_L" t0,0*"SZREG"(sp)\n"
+"      "REG_L" ra,1*"SZREG"(sp)\n"
+"      addi    sp,sp,2*"SZREG"\n"
+"      jr      t0\n"
+"      .size           my_tramp1, .-my_tramp1\n"
+"      .type           my_tramp2, @function\n"
+"      .globl          my_tramp2\n"
+
+"   my_tramp2:\n"
+"      addi    sp,sp,-2*"SZREG"\n"
+"      "REG_S" t0,0*"SZREG"(sp)\n"
+"      "REG_S" ra,1*"SZREG"(sp)\n"
+"      call    my_direct_func2\n"
+"      "REG_L" t0,0*"SZREG"(sp)\n"
+"      "REG_L" ra,1*"SZREG"(sp)\n"
+"      addi    sp,sp,2*"SZREG"\n"
+"      jr      t0\n"
+"      .size           my_tramp2, .-my_tramp2\n"
+"      .popsection\n"
+);
+
+#endif /* CONFIG_RISCV */
+
 #ifdef CONFIG_X86_64
 
 #include <asm/ibt.h>
index 2e349834d63c386ef54a8d3fecb2df713e7e4f2e..f943e40d57fd32ed3fd1c57c9b197ac916ebe33d 100644 (file)
@@ -22,6 +22,47 @@ void my_direct_func2(unsigned long ip)
 extern void my_tramp1(void *);
 extern void my_tramp2(void *);
 
+#ifdef CONFIG_RISCV
+#include <asm/asm.h>
+
+asm (
+"      .pushsection    .text, \"ax\", @progbits\n"
+"      .type           my_tramp1, @function\n"
+"      .globl          my_tramp1\n"
+"   my_tramp1:\n"
+"       addi   sp,sp,-3*"SZREG"\n"
+"       "REG_S"        a0,0*"SZREG"(sp)\n"
+"       "REG_S"        t0,1*"SZREG"(sp)\n"
+"       "REG_S"        ra,2*"SZREG"(sp)\n"
+"       mv     a0,t0\n"
+"       call   my_direct_func1\n"
+"       "REG_L"        a0,0*"SZREG"(sp)\n"
+"       "REG_L"        t0,1*"SZREG"(sp)\n"
+"       "REG_L"        ra,2*"SZREG"(sp)\n"
+"       addi   sp,sp,3*"SZREG"\n"
+"      jr      t0\n"
+"      .size           my_tramp1, .-my_tramp1\n"
+
+"      .type           my_tramp2, @function\n"
+"      .globl          my_tramp2\n"
+"   my_tramp2:\n"
+"       addi   sp,sp,-3*"SZREG"\n"
+"       "REG_S"        a0,0*"SZREG"(sp)\n"
+"       "REG_S"        t0,1*"SZREG"(sp)\n"
+"       "REG_S"        ra,2*"SZREG"(sp)\n"
+"       mv     a0,t0\n"
+"       call   my_direct_func2\n"
+"       "REG_L"        a0,0*"SZREG"(sp)\n"
+"       "REG_L"        t0,1*"SZREG"(sp)\n"
+"       "REG_L"        ra,2*"SZREG"(sp)\n"
+"       addi   sp,sp,3*"SZREG"\n"
+"      jr      t0\n"
+"      .size           my_tramp2, .-my_tramp2\n"
+"      .popsection\n"
+);
+
+#endif /* CONFIG_RISCV */
+
 #ifdef CONFIG_X86_64
 
 #include <asm/ibt.h>
index 9243dbfe4d0c1f72f7e9d55f5d7fb9631482018f..aed6df2927ce1833af8729810ea182ad5e492bfb 100644 (file)
@@ -17,6 +17,31 @@ void my_direct_func(unsigned long ip)
 
 extern void my_tramp(void *);
 
+#ifdef CONFIG_RISCV
+#include <asm/asm.h>
+
+asm (
+"       .pushsection    .text, \"ax\", @progbits\n"
+"       .type           my_tramp, @function\n"
+"       .globl          my_tramp\n"
+"   my_tramp:\n"
+"       addi   sp,sp,-3*"SZREG"\n"
+"       "REG_S"        a0,0*"SZREG"(sp)\n"
+"       "REG_S"        t0,1*"SZREG"(sp)\n"
+"       "REG_S"        ra,2*"SZREG"(sp)\n"
+"       mv     a0,t0\n"
+"       call   my_direct_func\n"
+"       "REG_L"        a0,0*"SZREG"(sp)\n"
+"       "REG_L"        t0,1*"SZREG"(sp)\n"
+"       "REG_L"        ra,2*"SZREG"(sp)\n"
+"       addi   sp,sp,3*"SZREG"\n"
+"       jr     t0\n"
+"       .size           my_tramp, .-my_tramp\n"
+"       .popsection\n"
+);
+
+#endif /* CONFIG_RISCV */
+
 #ifdef CONFIG_X86_64
 
 #include <asm/ibt.h>
index e39c3563ae4e42845aa8028aafa8fce394ab7759..6ff546a5d7eb05270683701b9693f3be2101b36d 100644 (file)
@@ -19,6 +19,34 @@ void my_direct_func(struct vm_area_struct *vma, unsigned long address,
 
 extern void my_tramp(void *);
 
+#ifdef CONFIG_RISCV
+#include <asm/asm.h>
+
+asm (
+"       .pushsection    .text, \"ax\", @progbits\n"
+"       .type           my_tramp, @function\n"
+"       .globl          my_tramp\n"
+"   my_tramp:\n"
+"       addi   sp,sp,-5*"SZREG"\n"
+"       "REG_S"        a0,0*"SZREG"(sp)\n"
+"       "REG_S"        a1,1*"SZREG"(sp)\n"
+"       "REG_S"        a2,2*"SZREG"(sp)\n"
+"       "REG_S"        t0,3*"SZREG"(sp)\n"
+"       "REG_S"        ra,4*"SZREG"(sp)\n"
+"       call   my_direct_func\n"
+"       "REG_L"        a0,0*"SZREG"(sp)\n"
+"       "REG_L"        a1,1*"SZREG"(sp)\n"
+"       "REG_L"        a2,2*"SZREG"(sp)\n"
+"       "REG_L"        t0,3*"SZREG"(sp)\n"
+"       "REG_L"        ra,4*"SZREG"(sp)\n"
+"       addi   sp,sp,5*"SZREG"\n"
+"       jr     t0\n"
+"       .size           my_tramp, .-my_tramp\n"
+"       .popsection\n"
+);
+
+#endif /* CONFIG_RISCV */
+
 #ifdef CONFIG_X86_64
 
 #include <asm/ibt.h>
index 32c477da1e9aa3719cd1ac6997d8e774e7d84658..ef0945670e1eb985da1397e7e496cf4a768e49b7 100644 (file)
@@ -16,6 +16,30 @@ void my_direct_func(struct task_struct *p)
 
 extern void my_tramp(void *);
 
+#ifdef CONFIG_RISCV
+#include <asm/asm.h>
+
+asm (
+"       .pushsection    .text, \"ax\", @progbits\n"
+"       .type           my_tramp, @function\n"
+"       .globl          my_tramp\n"
+"   my_tramp:\n"
+"       addi   sp,sp,-3*"SZREG"\n"
+"       "REG_S"        a0,0*"SZREG"(sp)\n"
+"       "REG_S"        t0,1*"SZREG"(sp)\n"
+"       "REG_S"        ra,2*"SZREG"(sp)\n"
+"       call   my_direct_func\n"
+"       "REG_L"        a0,0*"SZREG"(sp)\n"
+"       "REG_L"        t0,1*"SZREG"(sp)\n"
+"       "REG_L"        ra,2*"SZREG"(sp)\n"
+"       addi   sp,sp,3*"SZREG"\n"
+"       jr     t0\n"
+"       .size           my_tramp, .-my_tramp\n"
+"       .popsection\n"
+);
+
+#endif /* CONFIG_RISCV */
+
 #ifdef CONFIG_X86_64
 
 #include <asm/ibt.h>
index 6aba02a31c96c52d24804aad834b9a169c8c318f..d0ee9001c7b376cb4310fd1d30b5ba162fa292c5 100644 (file)
@@ -105,7 +105,7 @@ static int __init sample_trace_array_init(void)
         * NOTE: This function increments the reference counter
         * associated with the trace array - "tr".
         */
-       tr = trace_array_get_by_name("sample-instance");
+       tr = trace_array_get_by_name("sample-instance", "sched,timer,kprobes");
 
        if (!tr)
                return -1;
index ab271b2051a2459cc83d05111ba1be0558cdc954..226ea3df3b4b4caf70a8b7cc1c7ead71def9af7c 100644 (file)
@@ -9,8 +9,8 @@
 # Input config fragments without '.config' suffix
 define merge_into_defconfig
        $(Q)$(CONFIG_SHELL) $(srctree)/scripts/kconfig/merge_config.sh \
-               -m -O $(objtree) $(srctree)/arch/$(ARCH)/configs/$(1) \
-               $(foreach config,$(2),$(srctree)/arch/$(ARCH)/configs/$(config).config)
+               -m -O $(objtree) $(srctree)/arch/$(SRCARCH)/configs/$(1) \
+               $(foreach config,$(2),$(srctree)/arch/$(SRCARCH)/configs/$(config).config)
        +$(Q)$(MAKE) -f $(srctree)/Makefile olddefconfig
 endef
 
@@ -23,7 +23,7 @@ endef
 # Input config fragments without '.config' suffix
 define merge_into_defconfig_override
        $(Q)$(CONFIG_SHELL) $(srctree)/scripts/kconfig/merge_config.sh \
-               -Q -m -O $(objtree) $(srctree)/arch/$(ARCH)/configs/$(1) \
-               $(foreach config,$(2),$(srctree)/arch/$(ARCH)/configs/$(config).config)
+               -Q -m -O $(objtree) $(srctree)/arch/$(SRCARCH)/configs/$(1) \
+               $(foreach config,$(2),$(srctree)/arch/$(SRCARCH)/configs/$(config).config)
        +$(Q)$(MAKE) -f $(srctree)/Makefile olddefconfig
 endef
index c9725685aa768bae560521eaadcc9b23fb5ce28b..a9e552a1e9105b5efb559a23e4a2943c102b12a2 100644 (file)
@@ -82,15 +82,6 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init)
 # Warn if there is an enum types mismatch
 KBUILD_CFLAGS += $(call cc-option,-Wenum-conversion)
 
-# backward compatibility
-KBUILD_EXTRA_WARN ?= $(KBUILD_ENABLE_EXTRA_GCC_CHECKS)
-
-ifeq ("$(origin W)", "command line")
-  KBUILD_EXTRA_WARN := $(W)
-endif
-
-export KBUILD_EXTRA_WARN
-
 #
 # W=1 - warnings which may be relevant and do not occur too often
 #
@@ -106,7 +97,6 @@ KBUILD_CFLAGS += $(call cc-option, -Wunused-const-variable)
 KBUILD_CFLAGS += $(call cc-option, -Wpacked-not-aligned)
 KBUILD_CFLAGS += $(call cc-option, -Wformat-overflow)
 KBUILD_CFLAGS += $(call cc-option, -Wformat-truncation)
-KBUILD_CFLAGS += $(call cc-option, -Wstringop-overflow)
 KBUILD_CFLAGS += $(call cc-option, -Wstringop-truncation)
 
 KBUILD_CPPFLAGS += -Wundef
@@ -122,7 +112,6 @@ KBUILD_CFLAGS += $(call cc-disable-warning, restrict)
 KBUILD_CFLAGS += $(call cc-disable-warning, packed-not-aligned)
 KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow)
 KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation)
-KBUILD_CFLAGS += $(call cc-disable-warning, stringop-overflow)
 KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation)
 
 ifdef CONFIG_CC_IS_CLANG
index 1a965fe68e011196d476a5d422bab347077112cd..cd5b181060f151f2c28186feb5b96db37ee04da9 100644 (file)
@@ -83,8 +83,8 @@ dtb-$(CONFIG_OF_ALL_DTBS)       += $(dtb-)
 multi-dtb-y := $(call multi-search, $(dtb-y), .dtb, -dtbs)
 # Primitive DTB compiled from *.dts
 real-dtb-y := $(call real-search, $(dtb-y), .dtb, -dtbs)
-# Base DTB that overlay is applied onto (each first word of $(*-dtbs) expansion)
-base-dtb-y := $(foreach m, $(multi-dtb-y), $(firstword $(call suffix-search, $m, .dtb, -dtbs)))
+# Base DTB that overlay is applied onto
+base-dtb-y := $(filter %.dtb, $(call real-search, $(multi-dtb-y), .dtb, -dtbs))
 
 always-y                       += $(dtb-y)
 
index 3addd1c0b989a0e9acd91a57184fd0342b804fda..a81dfb1f518106e50d8ebdb2014be1fcb028bba5 100644 (file)
@@ -4,27 +4,6 @@
 include $(srctree)/scripts/Kbuild.include
 include $(srctree)/scripts/Makefile.lib
 
-KERNELPATH := kernel-$(subst -,_,$(KERNELRELEASE))
-# Include only those top-level files that are needed by make, plus the GPL copy
-TAR_CONTENT := Documentation LICENSES arch block certs crypto drivers fs \
-               include init io_uring ipc kernel lib mm net rust \
-               samples scripts security sound tools usr virt \
-               .config Makefile \
-               Kbuild Kconfig COPYING $(wildcard localversion*)
-
-quiet_cmd_src_tar = TAR     $(2).tar.gz
-      cmd_src_tar = \
-if test "$(objtree)" != "$(srctree)"; then \
-       echo >&2; \
-       echo >&2 "  ERROR:"; \
-       echo >&2 "  Building source tarball is not possible outside the"; \
-       echo >&2 "  kernel source tree. Don't set KBUILD_OUTPUT"; \
-       echo >&2; \
-       false; \
-fi ; \
-tar -I $(KGZIP) -c $(RCS_TAR_IGNORE) -f $(2).tar.gz \
-       --transform 's:^:$(2)/:S' $(TAR_CONTENT) $(3)
-
 # Git
 # ---------------------------------------------------------------------------
 
@@ -130,8 +109,6 @@ debian-orig: linux.tar$(debian-orig-suffix) debian
                cp $< ../$(orig-name); \
        fi
 
-KBUILD_PKG_ROOTCMD ?= 'fakeroot -u'
-
 PHONY += deb-pkg srcdeb-pkg bindeb-pkg
 
 deb-pkg:    private build-type := source,binary
@@ -146,7 +123,7 @@ deb-pkg srcdeb-pkg bindeb-pkg:
        $(if $(findstring source, $(build-type)), \
                --unsigned-source --compression=$(KDEB_SOURCE_COMPRESS)) \
        $(if $(findstring binary, $(build-type)), \
-               --rules-file='$(MAKE) -f debian/rules' --jobs=1 -r$(KBUILD_PKG_ROOTCMD) -a$$(cat debian/arch), \
+               -R'$(MAKE) -f debian/rules' -j1 -a$$(cat debian/arch), \
                --no-check-builddeps) \
        $(DPKG_FLAGS))
 
@@ -157,9 +134,8 @@ snap-pkg:
        rm -rf $(objtree)/snap
        mkdir $(objtree)/snap
        $(MAKE) clean
-       $(call cmd,src_tar,$(KERNELPATH))
        sed "s@KERNELRELEASE@$(KERNELRELEASE)@; \
-               s@SRCTREE@$(shell realpath $(KERNELPATH).tar.gz)@" \
+               s@SRCTREE@$(abs_srctree)@" \
                $(srctree)/scripts/package/snapcraft.template > \
                $(objtree)/snap/snapcraft.yaml
        cd $(objtree)/snap && \
diff --git a/scripts/check-uapi.sh b/scripts/check-uapi.sh
new file mode 100755 (executable)
index 0000000..9555817
--- /dev/null
@@ -0,0 +1,573 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0-only
+# Script to check commits for UAPI backwards compatibility
+
+set -o errexit
+set -o pipefail
+
+print_usage() {
+       name=$(basename "$0")
+       cat << EOF
+$name - check for UAPI header stability across Git commits
+
+By default, the script will check to make sure the latest commit (or current
+dirty changes) did not introduce ABI changes when compared to HEAD^1. You can
+check against additional commit ranges with the -b and -p options.
+
+The script will not check UAPI headers for architectures other than the one
+defined in ARCH.
+
+Usage: $name [-b BASE_REF] [-p PAST_REF] [-j N] [-l ERROR_LOG] [-i] [-q] [-v]
+
+Options:
+    -b BASE_REF    Base git reference to use for comparison. If unspecified or empty,
+                   will use any dirty changes in tree to UAPI files. If there are no
+                   dirty changes, HEAD will be used.
+    -p PAST_REF    Compare BASE_REF to PAST_REF (e.g. -p v6.1). If unspecified or empty,
+                   will use BASE_REF^1. Must be an ancestor of BASE_REF. Only headers
+                   that exist on PAST_REF will be checked for compatibility.
+    -j JOBS        Number of checks to run in parallel (default: number of CPU cores).
+    -l ERROR_LOG   Write error log to file (default: no error log is generated).
+    -i             Ignore ambiguous changes that may or may not break UAPI compatibility.
+    -q             Quiet operation.
+    -v             Verbose operation (print more information about each header being checked).
+
+Environmental args:
+    ABIDIFF  Custom path to abidiff binary
+    CC       C compiler (default is "gcc")
+    ARCH     Target architecture for the UAPI check (default is host arch)
+
+Exit codes:
+    $SUCCESS) Success
+    $FAIL_ABI) ABI difference detected
+    $FAIL_PREREQ) Prerequisite not met
+EOF
+}
+
+readonly SUCCESS=0
+readonly FAIL_ABI=1
+readonly FAIL_PREREQ=2
+
+# Print to stderr
+eprintf() {
+       # shellcheck disable=SC2059
+       printf "$@" >&2
+}
+
+# Expand an array with a specific character (similar to Python string.join())
+join() {
+       local IFS="$1"
+       shift
+       printf "%s" "$*"
+}
+
+# Create abidiff suppressions
+gen_suppressions() {
+       # Common enum variant names which we don't want to worry about
+       # being shifted when new variants are added.
+       local -a enum_regex=(
+               ".*_AFTER_LAST$"
+               ".*_CNT$"
+               ".*_COUNT$"
+               ".*_END$"
+               ".*_LAST$"
+               ".*_MASK$"
+               ".*_MAX$"
+               ".*_MAX_BIT$"
+               ".*_MAX_BPF_ATTACH_TYPE$"
+               ".*_MAX_ID$"
+               ".*_MAX_SHIFT$"
+               ".*_NBITS$"
+               ".*_NETDEV_NUMHOOKS$"
+               ".*_NFT_META_IIFTYPE$"
+               ".*_NL80211_ATTR$"
+               ".*_NLDEV_NUM_OPS$"
+               ".*_NUM$"
+               ".*_NUM_ELEMS$"
+               ".*_NUM_IRQS$"
+               ".*_SIZE$"
+               ".*_TLSMAX$"
+               "^MAX_.*"
+               "^NUM_.*"
+       )
+
+       # Common padding field names which can be expanded into
+       # without worrying about users.
+       local -a padding_regex=(
+               ".*end$"
+               ".*pad$"
+               ".*pad[0-9]?$"
+               ".*pad_[0-9]?$"
+               ".*padding$"
+               ".*padding[0-9]?$"
+               ".*padding_[0-9]?$"
+               ".*res$"
+               ".*resv$"
+               ".*resv[0-9]?$"
+               ".*resv_[0-9]?$"
+               ".*reserved$"
+               ".*reserved[0-9]?$"
+               ".*reserved_[0-9]?$"
+               ".*rsvd[0-9]?$"
+               ".*unused$"
+       )
+
+       cat << EOF
+[suppress_type]
+  type_kind = enum
+  changed_enumerators_regexp = $(join , "${enum_regex[@]}")
+EOF
+
+       for p in "${padding_regex[@]}"; do
+               cat << EOF
+[suppress_type]
+  type_kind = struct
+  has_data_member_inserted_at = offset_of_first_data_member_regexp(${p})
+EOF
+       done
+
+if [ "$IGNORE_AMBIGUOUS_CHANGES" = "true" ]; then
+       cat << EOF
+[suppress_type]
+  type_kind = struct
+  has_data_member_inserted_at = end
+  has_size_change = yes
+EOF
+fi
+}
+
+# Check if git tree is dirty
+tree_is_dirty() {
+       ! git diff --quiet
+}
+
+# Get list of files installed in $ref
+get_file_list() {
+       local -r ref="$1"
+       local -r tree="$(get_header_tree "$ref")"
+
+       # Print all installed headers, filtering out ones that can't be compiled
+       find "$tree" -type f -name '*.h' -printf '%P\n' | grep -v -f "$INCOMPAT_LIST"
+}
+
+# Add to the list of incompatible headers
+add_to_incompat_list() {
+       local -r ref="$1"
+
+       # Start with the usr/include/Makefile to get a list of the headers
+       # that don't compile using this method.
+       if [ ! -f usr/include/Makefile ]; then
+               eprintf "error - no usr/include/Makefile present at %s\n" "$ref"
+               eprintf "Note: usr/include/Makefile was added in the v5.3 kernel release\n"
+               exit "$FAIL_PREREQ"
+       fi
+       {
+               # shellcheck disable=SC2016
+               printf 'all: ; @echo $(no-header-test)\n'
+               cat usr/include/Makefile
+       } | SRCARCH="$ARCH" make --always-make -f - | tr " " "\n" \
+         | grep -v "asm-generic" >> "$INCOMPAT_LIST"
+
+       # The makefile also skips all asm-generic files, but prints "asm-generic/%"
+       # which won't work for our grep match. Instead, print something grep will match.
+       printf "asm-generic/.*\.h\n" >> "$INCOMPAT_LIST"
+}
+
+# Compile the simple test app
+do_compile() {
+       local -r inc_dir="$1"
+       local -r header="$2"
+       local -r out="$3"
+       printf "int main(void) { return 0; }\n" | \
+               "$CC" -c \
+                 -o "$out" \
+                 -x c \
+                 -O0 \
+                 -std=c90 \
+                 -fno-eliminate-unused-debug-types \
+                 -g \
+                 "-I${inc_dir}" \
+                 -include "$header" \
+                 -
+}
+
+# Run make headers_install
+run_make_headers_install() {
+       local -r ref="$1"
+       local -r install_dir="$(get_header_tree "$ref")"
+       make -j "$MAX_THREADS" ARCH="$ARCH" INSTALL_HDR_PATH="$install_dir" \
+               headers_install > /dev/null
+}
+
+# Install headers for both git refs
+install_headers() {
+       local -r base_ref="$1"
+       local -r past_ref="$2"
+
+       for ref in "$base_ref" "$past_ref"; do
+               printf "Installing user-facing UAPI headers from %s... " "${ref:-dirty tree}"
+               if [ -n "$ref" ]; then
+                       git archive --format=tar --prefix="${ref}-archive/" "$ref" \
+                               | (cd "$TMP_DIR" && tar xf -)
+                       (
+                               cd "${TMP_DIR}/${ref}-archive"
+                               run_make_headers_install "$ref"
+                               add_to_incompat_list "$ref" "$INCOMPAT_LIST"
+                       )
+               else
+                       run_make_headers_install "$ref"
+                       add_to_incompat_list "$ref" "$INCOMPAT_LIST"
+               fi
+               printf "OK\n"
+       done
+       sort -u -o "$INCOMPAT_LIST" "$INCOMPAT_LIST"
+       sed -i -e '/^$/d' "$INCOMPAT_LIST"
+}
+
+# Print the path to the headers_install tree for a given ref
+get_header_tree() {
+       local -r ref="$1"
+       printf "%s" "${TMP_DIR}/${ref}/usr"
+}
+
+# Check file list for UAPI compatibility
+check_uapi_files() {
+       local -r base_ref="$1"
+       local -r past_ref="$2"
+       local -r abi_error_log="$3"
+
+       local passed=0;
+       local failed=0;
+       local -a threads=()
+       set -o errexit
+
+       printf "Checking changes to UAPI headers between %s and %s...\n" "$past_ref" "${base_ref:-dirty tree}"
+       # Loop over all UAPI headers that were installed by $past_ref (if they only exist on $base_ref,
+       # there's no way they're broken and no way to compare anyway)
+       while read -r file; do
+               if [ "${#threads[@]}" -ge "$MAX_THREADS" ]; then
+                       if wait "${threads[0]}"; then
+                               passed=$((passed + 1))
+                       else
+                               failed=$((failed + 1))
+                       fi
+                       threads=("${threads[@]:1}")
+               fi
+
+               check_individual_file "$base_ref" "$past_ref" "$file" &
+               threads+=("$!")
+       done < <(get_file_list "$past_ref")
+
+       for t in "${threads[@]}"; do
+               if wait "$t"; then
+                       passed=$((passed + 1))
+               else
+                       failed=$((failed + 1))
+               fi
+       done
+
+       if [ -n "$abi_error_log" ]; then
+               printf 'Generated by "%s %s" from git ref %s\n\n' \
+                       "$0" "$*" "$(git rev-parse HEAD)" > "$abi_error_log"
+       fi
+
+       while read -r error_file; do
+               {
+                       cat "$error_file"
+                       printf "\n\n"
+               } | tee -a "${abi_error_log:-/dev/null}" >&2
+       done < <(find "$TMP_DIR" -type f -name '*.error' | sort)
+
+       total="$((passed + failed))"
+       if [ "$failed" -gt 0 ]; then
+               eprintf "error - %d/%d UAPI headers compatible with %s appear _not_ to be backwards compatible\n" \
+                       "$failed" "$total" "$ARCH"
+               if [ -n "$abi_error_log" ]; then
+                       eprintf "Failure summary saved to %s\n" "$abi_error_log"
+               fi
+       else
+               printf "All %d UAPI headers compatible with %s appear to be backwards compatible\n" \
+                       "$total" "$ARCH"
+       fi
+
+       return "$failed"
+}
+
+# Check an individual file for UAPI compatibility
+check_individual_file() {
+       local -r base_ref="$1"
+       local -r past_ref="$2"
+       local -r file="$3"
+
+       local -r base_header="$(get_header_tree "$base_ref")/${file}"
+       local -r past_header="$(get_header_tree "$past_ref")/${file}"
+
+       if [ ! -f "$base_header" ]; then
+               mkdir -p "$(dirname "$base_header")"
+               printf "==== UAPI header %s was removed between %s and %s ====" \
+                       "$file" "$past_ref" "$base_ref" \
+                               > "${base_header}.error"
+               return 1
+       fi
+
+       compare_abi "$file" "$base_header" "$past_header" "$base_ref" "$past_ref"
+}
+
+# Perform the A/B compilation and compare output ABI
+compare_abi() {
+       local -r file="$1"
+       local -r base_header="$2"
+       local -r past_header="$3"
+       local -r base_ref="$4"
+       local -r past_ref="$5"
+       local -r log="${TMP_DIR}/log/${file}.log"
+       local -r error_log="${TMP_DIR}/log/${file}.error"
+
+       mkdir -p "$(dirname "$log")"
+
+       if ! do_compile "$(get_header_tree "$base_ref")/include" "$base_header" "${base_header}.bin" 2> "$log"; then
+               {
+                       warn_str=$(printf "==== Could not compile version of UAPI header %s at %s ====\n" \
+                               "$file" "$base_ref")
+                       printf "%s\n" "$warn_str"
+                       cat "$log"
+                       printf -- "=%.0s" $(seq 0 ${#warn_str})
+               } > "$error_log"
+               return 1
+       fi
+
+       if ! do_compile "$(get_header_tree "$past_ref")/include" "$past_header" "${past_header}.bin" 2> "$log"; then
+               {
+                       warn_str=$(printf "==== Could not compile version of UAPI header %s at %s ====\n" \
+                               "$file" "$past_ref")
+                       printf "%s\n" "$warn_str"
+                       cat "$log"
+                       printf -- "=%.0s" $(seq 0 ${#warn_str})
+               } > "$error_log"
+               return 1
+       fi
+
+       local ret=0
+       "$ABIDIFF" --non-reachable-types \
+               --suppressions "$SUPPRESSIONS" \
+               "${past_header}.bin" "${base_header}.bin" > "$log" || ret="$?"
+       if [ "$ret" -eq 0 ]; then
+               if [ "$VERBOSE" = "true" ]; then
+                       printf "No ABI differences detected in %s from %s -> %s\n" \
+                               "$file" "$past_ref" "$base_ref"
+               fi
+       else
+               # Bits in abidiff's return code can be used to determine the type of error
+               if [ $((ret & 0x2)) -gt 0 ]; then
+                       eprintf "error - abidiff did not run properly\n"
+                       exit 1
+               fi
+
+               if [ "$IGNORE_AMBIGUOUS_CHANGES" = "true" ] && [ "$ret" -eq 4 ]; then
+                       return 0
+               fi
+
+               # If the only changes were additions (not modifications to existing APIs), then
+               # there's no problem. Ignore these diffs.
+               if grep "Unreachable types summary" "$log" | grep -q "0 removed" &&
+                  grep "Unreachable types summary" "$log" | grep -q "0 changed"; then
+                       return 0
+               fi
+
+               {
+                       warn_str=$(printf "==== ABI differences detected in %s from %s -> %s ====" \
+                               "$file" "$past_ref" "$base_ref")
+                       printf "%s\n" "$warn_str"
+                       sed  -e '/summary:/d' -e '/changed type/d' -e '/^$/d' -e 's/^/  /g' "$log"
+                       printf -- "=%.0s" $(seq 0 ${#warn_str})
+                       if cmp "$past_header" "$base_header" > /dev/null 2>&1; then
+                               printf "\n%s did not change between %s and %s...\n" "$file" "$past_ref" "${base_ref:-dirty tree}"
+                               printf "It's possible a change to one of the headers it includes caused this error:\n"
+                               grep '^#include' "$base_header"
+                               printf "\n"
+                       fi
+               } > "$error_log"
+
+               return 1
+       fi
+}
+
+# Check that a minimum software version number is satisfied
+min_version_is_satisfied() {
+       local -r min_version="$1"
+       local -r version_installed="$2"
+
+       printf "%s\n%s\n" "$min_version" "$version_installed" \
+               | sort -Vc > /dev/null 2>&1
+}
+
+# Make sure we have the tools we need and the arguments make sense
+check_deps() {
+       ABIDIFF="${ABIDIFF:-abidiff}"
+       CC="${CC:-gcc}"
+       ARCH="${ARCH:-$(uname -m)}"
+       if [ "$ARCH" = "x86_64" ]; then
+               ARCH="x86"
+       fi
+
+       local -r abidiff_min_version="2.4"
+       local -r libdw_min_version_if_clang="0.171"
+
+       if ! command -v "$ABIDIFF" > /dev/null 2>&1; then
+               eprintf "error - abidiff not found!\n"
+               eprintf "Please install abigail-tools version %s or greater\n" "$abidiff_min_version"
+               eprintf "See: https://sourceware.org/libabigail/manual/libabigail-overview.html\n"
+               return 1
+       fi
+
+       local -r abidiff_version="$("$ABIDIFF" --version | cut -d ' ' -f 2)"
+       if ! min_version_is_satisfied "$abidiff_min_version" "$abidiff_version"; then
+               eprintf "error - abidiff version too old: %s\n" "$abidiff_version"
+               eprintf "Please install abigail-tools version %s or greater\n" "$abidiff_min_version"
+               eprintf "See: https://sourceware.org/libabigail/manual/libabigail-overview.html\n"
+               return 1
+       fi
+
+       if ! command -v "$CC" > /dev/null 2>&1; then
+               eprintf 'error - %s not found\n' "$CC"
+               return 1
+       fi
+
+       if "$CC" --version | grep -q clang; then
+               local -r libdw_version="$(ldconfig -v 2>/dev/null | grep -v SKIPPED | grep -m 1 -o 'libdw-[0-9]\+.[0-9]\+' | cut -c 7-)"
+               if ! min_version_is_satisfied "$libdw_min_version_if_clang" "$libdw_version"; then
+                       eprintf "error - libdw version too old for use with clang: %s\n" "$libdw_version"
+                       eprintf "Please install libdw from elfutils version %s or greater\n" "$libdw_min_version_if_clang"
+                       eprintf "See: https://sourceware.org/elfutils/\n"
+                       return 1
+               fi
+       fi
+
+       if [ ! -d "arch/${ARCH}" ]; then
+               eprintf 'error - ARCH "%s" is not a subdirectory under arch/\n' "$ARCH"
+               eprintf "Please set ARCH to one of:\n%s\n" "$(find arch -maxdepth 1 -mindepth 1 -type d -printf '%f ' | fmt)"
+               return 1
+       fi
+
+       if ! git rev-parse --is-inside-work-tree > /dev/null 2>&1; then
+               eprintf "error - this script requires the kernel tree to be initialized with Git\n"
+               return 1
+       fi
+
+       if ! git rev-parse --verify "$past_ref" > /dev/null 2>&1; then
+               printf 'error - invalid git reference "%s"\n' "$past_ref"
+               return 1
+       fi
+
+       if [ -n "$base_ref" ]; then
+               if ! git merge-base --is-ancestor "$past_ref" "$base_ref" > /dev/null 2>&1; then
+                       printf 'error - "%s" is not an ancestor of base ref "%s"\n' "$past_ref" "$base_ref"
+                       return 1
+               fi
+               if [ "$(git rev-parse "$base_ref")" = "$(git rev-parse "$past_ref")" ]; then
+                       printf 'error - "%s" and "%s" are the same reference\n' "$past_ref" "$base_ref"
+                       return 1
+               fi
+       fi
+}
+
+run() {
+       local base_ref="$1"
+       local past_ref="$2"
+       local abi_error_log="$3"
+       shift 3
+
+       if [ -z "$KERNEL_SRC" ]; then
+               KERNEL_SRC="$(realpath "$(dirname "$0")"/..)"
+       fi
+
+       cd "$KERNEL_SRC"
+
+       if [ -z "$base_ref" ] && ! tree_is_dirty; then
+               base_ref=HEAD
+       fi
+
+       if [ -z "$past_ref" ]; then
+               if [ -n "$base_ref" ]; then
+                       past_ref="${base_ref}^1"
+               else
+                       past_ref=HEAD
+               fi
+       fi
+
+       if ! check_deps; then
+               exit "$FAIL_PREREQ"
+       fi
+
+       TMP_DIR=$(mktemp -d)
+       readonly TMP_DIR
+       trap 'rm -rf "$TMP_DIR"' EXIT
+
+       readonly INCOMPAT_LIST="${TMP_DIR}/incompat_list.txt"
+       touch "$INCOMPAT_LIST"
+
+       readonly SUPPRESSIONS="${TMP_DIR}/suppressions.txt"
+       gen_suppressions > "$SUPPRESSIONS"
+
+       # Run make install_headers for both refs
+       install_headers "$base_ref" "$past_ref"
+
+       # Check for any differences in the installed header trees
+       if diff -r -q "$(get_header_tree "$base_ref")" "$(get_header_tree "$past_ref")" > /dev/null 2>&1; then
+               printf "No changes to UAPI headers were applied between %s and %s\n" "$past_ref" "${base_ref:-dirty tree}"
+               exit "$SUCCESS"
+       fi
+
+       if ! check_uapi_files "$base_ref" "$past_ref" "$abi_error_log"; then
+               exit "$FAIL_ABI"
+       fi
+}
+
+main() {
+       MAX_THREADS=$(nproc)
+       VERBOSE="false"
+       IGNORE_AMBIGUOUS_CHANGES="false"
+       quiet="false"
+       local base_ref=""
+       while getopts "hb:p:j:l:iqv" opt; do
+               case $opt in
+               h)
+                       print_usage
+                       exit "$SUCCESS"
+                       ;;
+               b)
+                       base_ref="$OPTARG"
+                       ;;
+               p)
+                       past_ref="$OPTARG"
+                       ;;
+               j)
+                       MAX_THREADS="$OPTARG"
+                       ;;
+               l)
+                       abi_error_log="$OPTARG"
+                       ;;
+               i)
+                       IGNORE_AMBIGUOUS_CHANGES="true"
+                       ;;
+               q)
+                       quiet="true"
+                       VERBOSE="false"
+                       ;;
+               v)
+                       VERBOSE="true"
+                       quiet="false"
+                       ;;
+               *)
+                       exit "$FAIL_PREREQ"
+               esac
+       done
+
+       if [ "$quiet" = "true" ]; then
+               exec > /dev/null 2>&1
+       fi
+
+       run "$base_ref" "$past_ref" "$abi_error_log" "$@"
+}
+
+main "$@"
index f8343b34a28b911491bfc688b7d3f23b34a7839d..9c4c4a61bc8327e05a0f51c300e0675b247b84ec 100755 (executable)
@@ -512,6 +512,7 @@ our $Attribute      = qr{
                        __ro_after_init|
                        __kprobes|
                        $InitAttribute|
+                       __aligned\s*\(.*\)|
                        ____cacheline_aligned|
                        ____cacheline_aligned_in_smp|
                        ____cacheline_internodealigned_in_smp|
index a28dc061653aa0ea75195f7777fb5fbc38ea38c2..550d1d2fc02a9b95f453594268f4bf44045808e9 100644 (file)
@@ -1,10 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0-only
 ///
 /// From Documentation/filesystems/sysfs.rst:
-///  show() must not use snprintf() when formatting the value to be
-///  returned to user space. If you can guarantee that an overflow
-///  will never happen you can use sprintf() otherwise you must use
-///  scnprintf().
+///  show() should only use sysfs_emit() or sysfs_emit_at() when formatting
+///  the value to be returned to user space.
 ///
 // Confidence: High
 // Copyright: (C) 2020 Denis Efremov ISPRAS
@@ -30,15 +28,16 @@ ssize_t show(struct device *dev, struct device_attribute *attr, char *buf)
 
 @rp depends on patch@
 identifier show, dev, attr, buf;
+expression BUF, SZ, FORMAT, STR;
 @@
 
 ssize_t show(struct device *dev, struct device_attribute *attr, char *buf)
 {
        <...
        return
--              snprintf
-+              scnprintf
-                       (...);
+-              snprintf(BUF, SZ, FORMAT
++              sysfs_emit(BUF, FORMAT
+                               ,...);
        ...>
 }
 
@@ -46,10 +45,10 @@ ssize_t show(struct device *dev, struct device_attribute *attr, char *buf)
 p << r.p;
 @@
 
-coccilib.report.print_report(p[0], "WARNING: use scnprintf or sprintf")
+coccilib.report.print_report(p[0], "WARNING: please use sysfs_emit or sysfs_emit_at")
 
 @script: python depends on org@
 p << r.p;
 @@
 
-coccilib.org.print_todo(p[0], "WARNING: use scnprintf or sprintf")
+coccilib.org.print_todo(p[0], "WARNING: please use sysfs_emit or sysfs_emit_at")
index cb980b144ca14e6dff2c037f2c8ed24d58edad73..fa5be6f57b0090317e503f5a7ab8e4094e2e9890 100755 (executable)
@@ -16,6 +16,21 @@ elif type c++filt >/dev/null 2>&1 ; then
        cppfilt_opts=-i
 fi
 
+UTIL_SUFFIX=
+if [[ -z ${LLVM:-} ]]; then
+       UTIL_PREFIX=${CROSS_COMPILE:-}
+else
+       UTIL_PREFIX=llvm-
+       if [[ ${LLVM} == */ ]]; then
+               UTIL_PREFIX=${LLVM}${UTIL_PREFIX}
+       elif [[ ${LLVM} == -* ]]; then
+               UTIL_SUFFIX=${LLVM}
+       fi
+fi
+
+READELF=${UTIL_PREFIX}readelf${UTIL_SUFFIX}
+ADDR2LINE=${UTIL_PREFIX}addr2line${UTIL_SUFFIX}
+
 if [[ $1 == "-r" ]] ; then
        vmlinux=""
        basepath="auto"
@@ -75,7 +90,7 @@ find_module() {
 
        if [[ "$modpath" != "" ]] ; then
                for fn in $(find "$modpath" -name "${module//_/[-_]}.ko*") ; do
-                       if readelf -WS "$fn" | grep -qwF .debug_line ; then
+                       if ${READELF} -WS "$fn" | grep -qwF .debug_line ; then
                                echo $fn
                                return
                        fi
@@ -169,7 +184,7 @@ parse_symbol() {
        if [[ $aarray_support == true && "${cache[$module,$address]+isset}" == "isset" ]]; then
                local code=${cache[$module,$address]}
        else
-               local code=$(${CROSS_COMPILE}addr2line -i -e "$objfile" "$address" 2>/dev/null)
+               local code=$(${ADDR2LINE} -i -e "$objfile" "$address" 2>/dev/null)
                if [[ $aarray_support == true ]]; then
                        cache[$module,$address]=$code
                fi
index aa5ab6251f763b9860a6128e8582d1ef4af91f6a..6793d6e86e777b576e9acac680acdd8020c3d105 100644 (file)
@@ -82,21 +82,12 @@ LxPs()
 
 thread_info_type = utils.CachedType("struct thread_info")
 
-ia64_task_size = None
-
 
 def get_thread_info(task):
     thread_info_ptr_type = thread_info_type.get_type().pointer()
-    if utils.is_target_arch("ia64"):
-        global ia64_task_size
-        if ia64_task_size is None:
-            ia64_task_size = gdb.parse_and_eval("sizeof(struct task_struct)")
-        thread_info_addr = task.address + ia64_task_size
-        thread_info = thread_info_addr.cast(thread_info_ptr_type)
-    else:
-        if task.type.fields()[0].type == thread_info_type.get_type():
-            return task['thread_info']
-        thread_info = task['stack'].cast(thread_info_ptr_type)
+    if task.type.fields()[0].type == thread_info_type.get_type():
+        return task['thread_info']
+    thread_info = task['stack'].cast(thread_info_ptr_type)
     return thread_info.dereference()
 
 
index 3c6cbe2b278d302ebd6375e900dbe4875765805f..0da52b548ba50f5e1333c3c2b2a18da2533a18a1 100644 (file)
@@ -161,6 +161,13 @@ fn main() {
         ts.push("features", features);
         ts.push("llvm-target", "x86_64-linux-gnu");
         ts.push("target-pointer-width", "64");
+    } else if cfg.has("LOONGARCH") {
+        ts.push("arch", "loongarch64");
+        ts.push("data-layout", "e-m:e-p:64:64-i64:64-i128:128-n64-S128");
+        ts.push("features", "-f,-d");
+        ts.push("llvm-target", "loongarch64-linux-gnusf");
+        ts.push("llvm-abiname", "lp64s");
+        ts.push("target-pointer-width", "64");
     } else {
         panic!("Unsupported architecture");
     }
index f5dfdb9d80e9d5bb66f7e5e0f5b5cb4aac069d9b..f3901c55df239df5a7d88e974bae8e388d2c00ae 100644 (file)
@@ -16,9 +16,7 @@
 #include <unistd.h>
 #include <assert.h>
 #include <stdarg.h>
-#ifdef __GNU_LIBRARY__
 #include <getopt.h>
-#endif                         /* __GNU_LIBRARY__ */
 
 #include "genksyms.h"
 /*----------------------------------------------------------------------*/
@@ -718,8 +716,6 @@ void error_with_pos(const char *fmt, ...)
 static void genksyms_usage(void)
 {
        fputs("Usage:\n" "genksyms [-adDTwqhVR] > /path/to/.tmp_obj.ver\n" "\n"
-#ifdef __GNU_LIBRARY__
-             "  -s, --symbol-prefix   Select symbol prefix\n"
              "  -d, --debug           Increment the debug level (repeatable)\n"
              "  -D, --dump            Dump expanded symbol defs (for debugging only)\n"
              "  -r, --reference file  Read reference symbols from a file\n"
@@ -729,18 +725,6 @@ static void genksyms_usage(void)
              "  -q, --quiet           Disable warnings (default)\n"
              "  -h, --help            Print this message\n"
              "  -V, --version         Print the release version\n"
-#else                          /* __GNU_LIBRARY__ */
-             "  -s                    Select symbol prefix\n"
-             "  -d                    Increment the debug level (repeatable)\n"
-             "  -D                    Dump expanded symbol defs (for debugging only)\n"
-             "  -r file               Read reference symbols from a file\n"
-             "  -T file               Dump expanded types into file\n"
-             "  -p                    Preserve reference modversions or fail\n"
-             "  -w                    Enable warnings\n"
-             "  -q                    Disable warnings (default)\n"
-             "  -h                    Print this message\n"
-             "  -V                    Print the release version\n"
-#endif                         /* __GNU_LIBRARY__ */
              , stderr);
 }
 
@@ -749,7 +733,6 @@ int main(int argc, char **argv)
        FILE *dumpfile = NULL, *ref_file = NULL;
        int o;
 
-#ifdef __GNU_LIBRARY__
        struct option long_opts[] = {
                {"debug", 0, 0, 'd'},
                {"warnings", 0, 0, 'w'},
@@ -763,11 +746,8 @@ int main(int argc, char **argv)
                {0, 0, 0, 0}
        };
 
-       while ((o = getopt_long(argc, argv, "s:dwqVDr:T:ph",
+       while ((o = getopt_long(argc, argv, "dwqVDr:T:ph",
                                &long_opts[0], NULL)) != EOF)
-#else                          /* __GNU_LIBRARY__ */
-       while ((o = getopt(argc, argv, "s:dwqVDr:T:ph")) != EOF)
-#endif                         /* __GNU_LIBRARY__ */
                switch (o) {
                case 'd':
                        flag_debug++;
diff --git a/scripts/git.orderFile b/scripts/git.orderFile
new file mode 100644 (file)
index 0000000..5102ba7
--- /dev/null
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: GPL-2.0
+
+# order file for git, to produce patches which are easier to review
+# by diffing the important stuff like header changes first.
+#
+# one-off usage:
+#   git diff -O scripts/git.orderFile ...
+#
+# add to git config:
+#   git config diff.orderFile scripts/git.orderFile
+#
+
+MAINTAINERS
+
+# Documentation
+Documentation/*
+*.rst
+
+# git-specific
+.gitignore
+scripts/git.orderFile
+
+# build system
+Kconfig*
+*/Kconfig*
+Kbuild*
+*/Kbuild*
+Makefile*
+*/Makefile*
+*.mak
+*.mk
+scripts/*
+
+# semantic patches
+*.cocci
+
+# headers
+*types.h
+*.h
+
+# code
+*.c
index 26359968744ef1e9d5e40937e3ac4055d3a7bf2a..890f69005bab41c6d0977f2a3e95de5143d4fbba 100644 (file)
@@ -17,7 +17,6 @@ arch/arm/kernel/head-nommu.o
 arch/arm/kernel/head.o
 arch/csky/kernel/head.o
 arch/hexagon/kernel/head.o
-arch/ia64/kernel/head.o
 arch/loongarch/kernel/head.o
 arch/m68k/68000/head.o
 arch/m68k/coldfire/head.o
index 4eee155121a8b37c201e70c8327c3ea556d5cd94..ea1bf3b3dbde1bc463abbc8640537f873adc830f 100644 (file)
@@ -27,6 +27,14 @@ KCONFIG_DEFCONFIG_LIST += \
 endif
 KCONFIG_DEFCONFIG_LIST += arch/$(SRCARCH)/configs/$(KBUILD_DEFCONFIG)
 
+ifneq ($(findstring c, $(KBUILD_EXTRA_WARN)),)
+export KCONFIG_WARN_UNKNOWN_SYMBOLS=1
+endif
+
+ifneq ($(findstring e, $(KBUILD_EXTRA_WARN)),)
+export KCONFIG_WERROR=1
+endif
+
 # We need this, in case the user has it in its environment
 unexport CONFIG_
 
@@ -99,7 +107,7 @@ config-fragments = $(call configfiles,$@)
 
 %.config: $(obj)/conf
        $(if $(config-fragments),, $(error $@ fragment does not exists on this architecture))
-       $(Q)$(CONFIG_SHELL) $(srctree)/scripts/kconfig/merge_config.sh -m .config $(config-fragments)
+       $(Q)$(CONFIG_SHELL) $(srctree)/scripts/kconfig/merge_config.sh -m $(KCONFIG_CONFIG) $(config-fragments)
        $(Q)$(MAKE) -f $(srctree)/Makefile olddefconfig
 
 PHONY += tinyconfig
@@ -166,7 +174,7 @@ conf-objs   := conf.o $(common-objs)
 
 # nconf: Used for the nconfig target based on ncurses
 hostprogs      += nconf
-nconf-objs     := nconf.o nconf.gui.o $(common-objs)
+nconf-objs     := nconf.o nconf.gui.o mnconf-common.o $(common-objs)
 
 HOSTLDLIBS_nconf       = $(call read-file, $(obj)/nconf-libs)
 HOSTCFLAGS_nconf.o     = $(call read-file, $(obj)/nconf-cflags)
@@ -179,7 +187,7 @@ $(obj)/nconf.o $(obj)/nconf.gui.o: | $(obj)/nconf-cflags
 hostprogs      += mconf
 lxdialog       := $(addprefix lxdialog/, \
                     checklist.o inputbox.o menubox.o textbox.o util.o yesno.o)
-mconf-objs     := mconf.o $(lxdialog) $(common-objs)
+mconf-objs     := mconf.o $(lxdialog) mnconf-common.o $(common-objs)
 
 HOSTLDLIBS_mconf = $(call read-file, $(obj)/mconf-libs)
 $(foreach f, mconf.o $(lxdialog), \
index 33d19e419908b8315603f04db41189ed9c506a0c..662a5e7c37c28539ce7104085478c718848c4fc3 100644 (file)
@@ -827,6 +827,9 @@ int main(int ac, char **av)
                break;
        }
 
+       if (conf_errors())
+               exit(1);
+
        if (sync_kconfig) {
                name = getenv("KCONFIG_NOSILENTUPDATE");
                if (name && *name) {
@@ -890,6 +893,9 @@ int main(int ac, char **av)
                break;
        }
 
+       if (sym_dep_errors())
+               exit(1);
+
        if (input_mode == savedefconfig) {
                if (conf_write_defconfig(defconfig_file)) {
                        fprintf(stderr, "n*** Error while saving defconfig to: %s\n\n",
index 4a6811d77d182964d1ec73c34a87c28670dec692..f53dcdd445976aa8759e13e6e3103ef02f76ead1 100644 (file)
@@ -155,6 +155,13 @@ static void conf_message(const char *fmt, ...)
 static const char *conf_filename;
 static int conf_lineno, conf_warnings;
 
+bool conf_errors(void)
+{
+       if (conf_warnings)
+               return getenv("KCONFIG_WERROR");
+       return false;
+}
+
 static void conf_warning(const char *fmt, ...)
 {
        va_list ap;
@@ -289,16 +296,12 @@ static int conf_set_sym_val(struct symbol *sym, int def, int def_flags, char *p)
 #define LINE_GROWTH 16
 static int add_byte(int c, char **lineptr, size_t slen, size_t *n)
 {
-       char *nline;
        size_t new_size = slen + 1;
+
        if (new_size > *n) {
                new_size += LINE_GROWTH - 1;
                new_size *= 2;
-               nline = xrealloc(*lineptr, new_size);
-               if (!nline)
-                       return -1;
-
-               *lineptr = nline;
+               *lineptr = xrealloc(*lineptr, new_size);
                *n = new_size;
        }
 
@@ -341,19 +344,37 @@ e_out:
        return -1;
 }
 
+/* like getline(), but the newline character is stripped away */
+static ssize_t getline_stripped(char **lineptr, size_t *n, FILE *stream)
+{
+       ssize_t len;
+
+       len = compat_getline(lineptr, n, stream);
+
+       if (len > 0 && (*lineptr)[len - 1] == '\n') {
+               len--;
+               (*lineptr)[len] = '\0';
+
+               if (len > 0 && (*lineptr)[len - 1] == '\r') {
+                       len--;
+                       (*lineptr)[len] = '\0';
+               }
+       }
+
+       return len;
+}
+
 int conf_read_simple(const char *name, int def)
 {
        FILE *in = NULL;
        char   *line = NULL;
        size_t  line_asize = 0;
-       char *p, *p2;
+       char *p, *val;
        struct symbol *sym;
        int i, def_flags;
-       const char *warn_unknown;
-       const char *werror;
+       const char *warn_unknown, *sym_name;
 
        warn_unknown = getenv("KCONFIG_WARN_UNKNOWN_SYMBOLS");
-       werror = getenv("KCONFIG_WERROR");
        if (name) {
                in = zconf_fopen(name);
        } else {
@@ -417,8 +438,7 @@ load:
                case S_INT:
                case S_HEX:
                case S_STRING:
-                       if (sym->def[def].val)
-                               free(sym->def[def].val);
+                       free(sym->def[def].val);
                        /* fall through */
                default:
                        sym->def[def].val = NULL;
@@ -426,90 +446,68 @@ load:
                }
        }
 
-       while (compat_getline(&line, &line_asize, in) != -1) {
+       while (getline_stripped(&line, &line_asize, in) != -1) {
                conf_lineno++;
-               sym = NULL;
+
+               if (!line[0]) /* blank line */
+                       continue;
+
                if (line[0] == '#') {
-                       if (memcmp(line + 2, CONFIG_, strlen(CONFIG_)))
+                       if (line[1] != ' ')
+                               continue;
+                       p = line + 2;
+                       if (memcmp(p, CONFIG_, strlen(CONFIG_)))
                                continue;
-                       p = strchr(line + 2 + strlen(CONFIG_), ' ');
+                       sym_name = p + strlen(CONFIG_);
+                       p = strchr(sym_name, ' ');
                        if (!p)
                                continue;
                        *p++ = 0;
-                       if (strncmp(p, "is not set", 10))
+                       if (strcmp(p, "is not set"))
                                continue;
-                       if (def == S_DEF_USER) {
-                               sym = sym_find(line + 2 + strlen(CONFIG_));
-                               if (!sym) {
-                                       if (warn_unknown)
-                                               conf_warning("unknown symbol: %s",
-                                                            line + 2 + strlen(CONFIG_));
-
-                                       conf_set_changed(true);
-                                       continue;
-                               }
-                       } else {
-                               sym = sym_lookup(line + 2 + strlen(CONFIG_), 0);
-                               if (sym->type == S_UNKNOWN)
-                                       sym->type = S_BOOLEAN;
-                       }
-                       if (sym->flags & def_flags) {
-                               conf_warning("override: reassigning to symbol %s", sym->name);
-                       }
-                       switch (sym->type) {
-                       case S_BOOLEAN:
-                       case S_TRISTATE:
-                               sym->def[def].tri = no;
-                               sym->flags |= def_flags;
-                               break;
-                       default:
-                               ;
-                       }
-               } else if (memcmp(line, CONFIG_, strlen(CONFIG_)) == 0) {
-                       p = strchr(line + strlen(CONFIG_), '=');
-                       if (!p)
+
+                       val = "n";
+               } else {
+                       if (memcmp(line, CONFIG_, strlen(CONFIG_))) {
+                               conf_warning("unexpected data: %s", line);
                                continue;
-                       *p++ = 0;
-                       p2 = strchr(p, '\n');
-                       if (p2) {
-                               *p2-- = 0;
-                               if (*p2 == '\r')
-                                       *p2 = 0;
                        }
 
-                       sym = sym_find(line + strlen(CONFIG_));
-                       if (!sym) {
-                               if (def == S_DEF_AUTO) {
-                                       /*
-                                        * Reading from include/config/auto.conf
-                                        * If CONFIG_FOO previously existed in
-                                        * auto.conf but it is missing now,
-                                        * include/config/FOO must be touched.
-                                        */
-                                       conf_touch_dep(line + strlen(CONFIG_));
-                               } else {
-                                       if (warn_unknown)
-                                               conf_warning("unknown symbol: %s",
-                                                            line + strlen(CONFIG_));
-
-                                       conf_set_changed(true);
-                               }
+                       sym_name = line + strlen(CONFIG_);
+                       p = strchr(sym_name, '=');
+                       if (!p) {
+                               conf_warning("unexpected data: %s", line);
                                continue;
                        }
+                       *p = 0;
+                       val = p + 1;
+               }
 
-                       if (sym->flags & def_flags) {
-                               conf_warning("override: reassigning to symbol %s", sym->name);
-                       }
-                       if (conf_set_sym_val(sym, def, def_flags, p))
-                               continue;
-               } else {
-                       if (line[0] != '\r' && line[0] != '\n')
-                               conf_warning("unexpected data: %.*s",
-                                            (int)strcspn(line, "\r\n"), line);
+               sym = sym_find(sym_name);
+               if (!sym) {
+                       if (def == S_DEF_AUTO) {
+                               /*
+                                * Reading from include/config/auto.conf.
+                                * If CONFIG_FOO previously existed in auto.conf
+                                * but it is missing now, include/config/FOO
+                                * must be touched.
+                                */
+                               conf_touch_dep(sym_name);
+                       } else {
+                               if (warn_unknown)
+                                       conf_warning("unknown symbol: %s", sym_name);
 
+                               conf_set_changed(true);
+                       }
                        continue;
                }
 
+               if (sym->flags & def_flags)
+                       conf_warning("override: reassigning to symbol %s", sym->name);
+
+               if (conf_set_sym_val(sym, def, def_flags, val))
+                       continue;
+
                if (sym && sym_is_choice_value(sym)) {
                        struct symbol *cs = prop_get_symbol(sym_get_choice_prop(sym));
                        switch (sym->def[def].tri) {
@@ -533,9 +531,6 @@ load:
        free(line);
        fclose(in);
 
-       if (conf_warnings && werror)
-               exit(1);
-
        return 0;
 }
 
@@ -594,7 +589,7 @@ int conf_read(const char *name)
                                /* Reset a string value if it's out of range */
                                if (sym_string_within_range(sym, sym->def[S_DEF_USER].val))
                                        break;
-                               sym->flags &= ~(SYMBOL_VALID|SYMBOL_DEF_USER);
+                               sym->flags &= ~SYMBOL_VALID;
                                conf_unsaved++;
                                break;
                        default:
index 81ebf8108ca748893d469c77af1eebf8ecbf7df2..a290de36307ba8abe184a915fb0a6b6a3b29bbb6 100644 (file)
@@ -1131,7 +1131,6 @@ static int expr_compare_type(enum expr_type t1, enum expr_type t2)
        default:
                return -1;
        }
-       printf("[%dgt%d?]", t1, t2);
        return 0;
 }
 
index 471a59acecec61c3a8f7e141a7f6c6654aae21c6..5cdc8f5e6446ab55e42ce21dfbe728b0ab22aee7 100644 (file)
@@ -99,8 +99,6 @@ bool menu_is_visible(struct menu *menu);
 bool menu_has_prompt(struct menu *menu);
 const char *menu_get_prompt(struct menu *menu);
 struct menu *menu_get_parent_menu(struct menu *menu);
-bool menu_has_help(struct menu *menu);
-const char *menu_get_help(struct menu *menu);
 int get_jump_key_char(void);
 struct gstr get_relations_str(struct symbol **sym_arr, struct list_head *head);
 void menu_get_ext_help(struct menu *menu, struct gstr *help);
index edd1e617b25c5c3683ba4287f108ccb690608d10..a4ae5e9eadadb8758b5911d0f38c028b0622d937 100644 (file)
@@ -1,4 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0 */
+#ifndef LKC_PROTO_H
+#define LKC_PROTO_H
+
 #include <stdarg.h>
 
 /* confdata.c */
@@ -12,6 +15,7 @@ void conf_set_changed(bool val);
 bool conf_get_changed(void);
 void conf_set_changed_callback(void (*fn)(void));
 void conf_set_message_callback(void (*fn)(const char *s));
+bool conf_errors(void);
 
 /* symbol.c */
 extern struct symbol * symbol_hash[SYMBOL_HASHSIZE];
@@ -22,6 +26,7 @@ void print_symbol_for_listconfig(struct symbol *sym);
 struct symbol ** sym_re_search(const char *pattern);
 const char * sym_type_name(enum symbol_type type);
 void sym_calc_value(struct symbol *sym);
+bool sym_dep_errors(void);
 enum symbol_type sym_get_type(struct symbol *sym);
 bool sym_tristate_within_range(struct symbol *sym,tristate tri);
 bool sym_set_tristate_value(struct symbol *sym,tristate tri);
@@ -50,3 +55,5 @@ char *expand_one_token(const char **str);
 
 /* expr.c */
 void expr_print(struct expr *e, void (*fn)(void *, struct symbol *, const char *), void *data, int prevtoken);
+
+#endif /* LKC_PROTO_H */
index eccc87a441e713a8b9013e3b286a176688ece372..5df32148a86951f78dca309db7d06d77f2364259 100644 (file)
@@ -21,6 +21,7 @@
 
 #include "lkc.h"
 #include "lxdialog/dialog.h"
+#include "mnconf-common.h"
 
 static const char mconf_readme[] =
 "Overview\n"
@@ -247,7 +248,7 @@ search_help[] =
        "      -> PCI support (PCI [=y])\n"
        "(1)     -> PCI access mode (<choice> [=y])\n"
        "  Defined at drivers/pci/Kconfig:47\n"
-       "  Depends on: X86_LOCAL_APIC && X86_IO_APIC || IA64\n"
+       "  Depends on: X86_LOCAL_APIC && X86_IO_APIC\n"
        "  Selects: LIBCRC32\n"
        "  Selected by: BAR [=n]\n"
        "-----------------------------------------------------------------\n"
@@ -286,7 +287,6 @@ static int single_menu_mode;
 static int show_all_options;
 static int save_and_exit;
 static int silent;
-static int jump_key_char;
 
 static void conf(struct menu *menu, struct menu *active_menu);
 
@@ -378,58 +378,6 @@ static void show_help(struct menu *menu)
        str_free(&help);
 }
 
-struct search_data {
-       struct list_head *head;
-       struct menu *target;
-};
-
-static int next_jump_key(int key)
-{
-       if (key < '1' || key > '9')
-               return '1';
-
-       key++;
-
-       if (key > '9')
-               key = '1';
-
-       return key;
-}
-
-static int handle_search_keys(int key, size_t start, size_t end, void *_data)
-{
-       struct search_data *data = _data;
-       struct jump_key *pos;
-       int index = 0;
-
-       if (key < '1' || key > '9')
-               return 0;
-
-       list_for_each_entry(pos, data->head, entries) {
-               index = next_jump_key(index);
-
-               if (pos->offset < start)
-                       continue;
-
-               if (pos->offset >= end)
-                       break;
-
-               if (key == index) {
-                       data->target = pos->target;
-                       return 1;
-               }
-       }
-
-       return 0;
-}
-
-int get_jump_key_char(void)
-{
-       jump_key_char = next_jump_key(jump_key_char);
-
-       return jump_key_char;
-}
-
 static void search_conf(void)
 {
        struct symbol **sym_arr;
index 61c442d84aef4a0dd11a2512314fb2d5bea169d2..2cce8b651f6154197af3b5332a64ea3ecdac344d 100644 (file)
@@ -673,19 +673,6 @@ struct menu *menu_get_parent_menu(struct menu *menu)
        return menu;
 }
 
-bool menu_has_help(struct menu *menu)
-{
-       return menu->help != NULL;
-}
-
-const char *menu_get_help(struct menu *menu)
-{
-       if (menu->help)
-               return menu->help;
-       else
-               return "";
-}
-
 static void get_def_str(struct gstr *r, struct menu *menu)
 {
        str_printf(r, "Defined at %s:%d\n",
@@ -856,10 +843,10 @@ void menu_get_ext_help(struct menu *menu, struct gstr *help)
        struct symbol *sym = menu->sym;
        const char *help_text = nohelp_text;
 
-       if (menu_has_help(menu)) {
+       if (menu->help) {
                if (sym->name)
                        str_printf(help, "%s%s:\n\n", CONFIG_, sym->name);
-               help_text = menu_get_help(menu);
+               help_text = menu->help;
        }
        str_printf(help, "%s\n", help_text);
        if (sym)
diff --git a/scripts/kconfig/mnconf-common.c b/scripts/kconfig/mnconf-common.c
new file mode 100644 (file)
index 0000000..18cb9a6
--- /dev/null
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include "expr.h"
+#include "list.h"
+#include "mnconf-common.h"
+
+int jump_key_char;
+
+int next_jump_key(int key)
+{
+       if (key < '1' || key > '9')
+               return '1';
+
+       key++;
+
+       if (key > '9')
+               key = '1';
+
+       return key;
+}
+
+int handle_search_keys(int key, size_t start, size_t end, void *_data)
+{
+       struct search_data *data = _data;
+       struct jump_key *pos;
+       int index = 0;
+
+       if (key < '1' || key > '9')
+               return 0;
+
+       list_for_each_entry(pos, data->head, entries) {
+               index = next_jump_key(index);
+
+               if (pos->offset < start)
+                       continue;
+
+               if (pos->offset >= end)
+                       break;
+
+               if (key == index) {
+                       data->target = pos->target;
+                       return 1;
+               }
+       }
+
+       return 0;
+}
+
+int get_jump_key_char(void)
+{
+       jump_key_char = next_jump_key(jump_key_char);
+
+       return jump_key_char;
+}
diff --git a/scripts/kconfig/mnconf-common.h b/scripts/kconfig/mnconf-common.h
new file mode 100644 (file)
index 0000000..ab6292c
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef MNCONF_COMMON_H
+#define MNCONF_COMMON_H
+
+#include <stddef.h>
+
+struct search_data {
+       struct list_head *head;
+       struct menu *target;
+};
+
+extern int jump_key_char;
+
+int next_jump_key(int key);
+int handle_search_keys(int key, size_t start, size_t end, void *_data);
+int get_jump_key_char(void);
+
+#endif /* MNCONF_COMMON_H */
index 143a2c351d5764b5e9e1b175ad16bb40d3b5d2aa..1148163cfa7e71c037ba28eab8183cf304fc5b85 100644 (file)
@@ -12,6 +12,7 @@
 #include <stdlib.h>
 
 #include "lkc.h"
+#include "mnconf-common.h"
 #include "nconf.h"
 #include <ctype.h>
 
@@ -216,7 +217,7 @@ search_help[] =
 "Symbol: FOO [ = m]\n"
 "Prompt: Foo bus is used to drive the bar HW\n"
 "Defined at drivers/pci/Kconfig:47\n"
-"Depends on: X86_LOCAL_APIC && X86_IO_APIC || IA64\n"
+"Depends on: X86_LOCAL_APIC && X86_IO_APIC\n"
 "Location:\n"
 "  -> Bus options (PCI, PCMCIA, EISA, ISA)\n"
 "    -> PCI support (PCI [ = y])\n"
@@ -279,7 +280,6 @@ static const char *current_instructions = menu_instructions;
 
 static char *dialog_input_result;
 static int dialog_input_result_len;
-static int jump_key_char;
 
 static void selected_conf(struct menu *menu, struct menu *active_menu);
 static void conf(struct menu *menu);
@@ -691,57 +691,6 @@ static int do_exit(void)
        return 0;
 }
 
-struct search_data {
-       struct list_head *head;
-       struct menu *target;
-};
-
-static int next_jump_key(int key)
-{
-       if (key < '1' || key > '9')
-               return '1';
-
-       key++;
-
-       if (key > '9')
-               key = '1';
-
-       return key;
-}
-
-static int handle_search_keys(int key, size_t start, size_t end, void *_data)
-{
-       struct search_data *data = _data;
-       struct jump_key *pos;
-       int index = 0;
-
-       if (key < '1' || key > '9')
-               return 0;
-
-       list_for_each_entry(pos, data->head, entries) {
-               index = next_jump_key(index);
-
-               if (pos->offset < start)
-                       continue;
-
-               if (pos->offset >= end)
-                       break;
-
-               if (key == index) {
-                       data->target = pos->target;
-                       return 1;
-               }
-       }
-
-       return 0;
-}
-
-int get_jump_key_char(void)
-{
-       jump_key_char = next_jump_key(jump_key_char);
-
-       return jump_key_char;
-}
 
 static void search_conf(void)
 {
index a76925b46ce6309439ec0a554775dbbf2dd445cd..e9e9fb8d86746460c893a51a2989ca4061fc8e52 100644 (file)
@@ -29,14 +29,9 @@ struct symbol symbol_no = {
        .flags = SYMBOL_CONST|SYMBOL_VALID,
 };
 
-static struct symbol symbol_empty = {
-       .name = "",
-       .curr = { "", no },
-       .flags = SYMBOL_VALID,
-};
-
 struct symbol *modules_sym;
 static tristate modules_val;
+static int sym_warnings;
 
 enum symbol_type sym_get_type(struct symbol *sym)
 {
@@ -317,6 +312,14 @@ static void sym_warn_unmet_dep(struct symbol *sym)
                               "  Selected by [m]:\n");
 
        fputs(str_get(&gs), stderr);
+       sym_warnings++;
+}
+
+bool sym_dep_errors(void)
+{
+       if (sym_warnings)
+               return getenv("KCONFIG_WERROR");
+       return false;
 }
 
 void sym_calc_value(struct symbol *sym)
@@ -342,15 +345,21 @@ void sym_calc_value(struct symbol *sym)
 
        oldval = sym->curr;
 
+       newval.tri = no;
+
        switch (sym->type) {
        case S_INT:
+               newval.val = "0";
+               break;
        case S_HEX:
+               newval.val = "0x0";
+               break;
        case S_STRING:
-               newval = symbol_empty.curr;
+               newval.val = "";
                break;
        case S_BOOLEAN:
        case S_TRISTATE:
-               newval = symbol_no.curr;
+               newval.val = "n";
                break;
        default:
                sym->curr.val = sym->name;
@@ -697,13 +706,12 @@ const char *sym_get_string_default(struct symbol *sym)
 {
        struct property *prop;
        struct symbol *ds;
-       const char *str;
+       const char *str = "";
        tristate val;
 
        sym_calc_visibility(sym);
        sym_calc_value(modules_sym);
        val = symbol_no.curr.tri;
-       str = symbol_empty.curr.val;
 
        /* If symbol has a default value look it up */
        prop = sym_get_default_prop(sym);
@@ -753,14 +761,17 @@ const char *sym_get_string_default(struct symbol *sym)
                case yes: return "y";
                }
        case S_INT:
+               if (!str[0])
+                       str = "0";
+               break;
        case S_HEX:
-               return str;
-       case S_STRING:
-               return str;
-       case S_UNKNOWN:
+               if (!str[0])
+                       str = "0x0";
+               break;
+       default:
                break;
        }
-       return "";
+       return str;
 }
 
 const char *sym_get_string_value(struct symbol *sym)
index b78f114ad48cc5bac6e57246f8a2df4dabc0271d..92e5b2b9761d70966279ac0159769adf580110fa 100644 (file)
@@ -42,8 +42,7 @@ struct gstr str_new(void)
 /* Free storage for growable string */
 void str_free(struct gstr *gs)
 {
-       if (gs->s)
-               free(gs->s);
+       free(gs->s);
        gs->s = NULL;
        gs->len = 0;
 }
index c62066825f538c4ffe048091f68d5892c0d8267d..9faa4d3d91e3586e20bed71a50893c6c959252ad 100755 (executable)
@@ -26,6 +26,8 @@ gcc)
 llvm)
        if [ "$SRCARCH" = s390 ]; then
                echo 15.0.0
+       elif [ "$SRCARCH" = loongarch ]; then
+               echo 18.0.0
        else
                echo 11.0.0
        fi
index cb6406f485a960041db048a5f5e8ae5bfad40bb5..267b9a0a3abcd849fe4f0bae4cddd8a287d26184 100644 (file)
@@ -60,8 +60,7 @@ static unsigned int nr_unresolved;
 
 #define MODULE_NAME_LEN (64 - sizeof(Elf_Addr))
 
-void __attribute__((format(printf, 2, 3)))
-modpost_log(enum loglevel loglevel, const char *fmt, ...)
+void modpost_log(enum loglevel loglevel, const char *fmt, ...)
 {
        va_list arglist;
 
@@ -71,9 +70,7 @@ modpost_log(enum loglevel loglevel, const char *fmt, ...)
                break;
        case LOG_ERROR:
                fprintf(stderr, "ERROR: ");
-               break;
-       case LOG_FATAL:
-               fprintf(stderr, "FATAL: ");
+               error_occurred = true;
                break;
        default: /* invalid loglevel, ignore */
                break;
@@ -84,11 +81,6 @@ modpost_log(enum loglevel loglevel, const char *fmt, ...)
        va_start(arglist, fmt);
        vfprintf(stderr, fmt, arglist);
        va_end(arglist);
-
-       if (loglevel == LOG_FATAL)
-               exit(1);
-       if (loglevel == LOG_ERROR)
-               error_occurred = true;
 }
 
 static inline bool strends(const char *str, const char *postfix)
@@ -474,11 +466,9 @@ static int parse_elf(struct elf_info *info, const char *filename)
                fatal("%s: not relocatable object.", filename);
 
        /* Check if file offset is correct */
-       if (hdr->e_shoff > info->size) {
+       if (hdr->e_shoff > info->size)
                fatal("section header offset=%lu in file '%s' is bigger than filesize=%zu\n",
                      (unsigned long)hdr->e_shoff, filename, info->size);
-               return 0;
-       }
 
        if (hdr->e_shnum == SHN_UNDEF) {
                /*
@@ -516,12 +506,11 @@ static int parse_elf(struct elf_info *info, const char *filename)
                const char *secname;
                int nobits = sechdrs[i].sh_type == SHT_NOBITS;
 
-               if (!nobits && sechdrs[i].sh_offset > info->size) {
+               if (!nobits && sechdrs[i].sh_offset > info->size)
                        fatal("%s is truncated. sechdrs[i].sh_offset=%lu > sizeof(*hrd)=%zu\n",
                              filename, (unsigned long)sechdrs[i].sh_offset,
                              sizeof(*hdr));
-                       return 0;
-               }
+
                secname = secstrings + sechdrs[i].sh_name;
                if (strcmp(secname, ".modinfo") == 0) {
                        if (nobits)
@@ -807,7 +796,8 @@ static void check_section(const char *modname, struct elf_info *elf,
 
 #define DATA_SECTIONS ".data", ".data.rel"
 #define TEXT_SECTIONS ".text", ".text.*", ".sched.text", \
-               ".kprobes.text", ".cpuidle.text", ".noinstr.text"
+               ".kprobes.text", ".cpuidle.text", ".noinstr.text", \
+               ".ltext", ".ltext.*"
 #define OTHER_TEXT_SECTIONS ".ref.text", ".head.text", ".spinlock.text", \
                ".fixup", ".entry.text", ".exception.text", \
                ".coldtext", ".softirqentry.text"
@@ -1346,6 +1336,14 @@ static Elf_Addr addend_mips_rel(uint32_t *location, unsigned int r_type)
 #define R_LARCH_SUB32          55
 #endif
 
+#ifndef R_LARCH_RELAX
+#define R_LARCH_RELAX          100
+#endif
+
+#ifndef R_LARCH_ALIGN
+#define R_LARCH_ALIGN          102
+#endif
+
 static void get_rel_type_and_sym(struct elf_info *elf, uint64_t r_info,
                                 unsigned int *r_type, unsigned int *r_sym)
 {
@@ -1400,9 +1398,16 @@ static void section_rela(struct module *mod, struct elf_info *elf,
                                continue;
                        break;
                case EM_LOONGARCH:
-                       if (!strcmp("__ex_table", fromsec) &&
-                           r_type == R_LARCH_SUB32)
+                       switch (r_type) {
+                       case R_LARCH_SUB32:
+                               if (!strcmp("__ex_table", fromsec))
+                                       continue;
+                               break;
+                       case R_LARCH_RELAX:
+                       case R_LARCH_ALIGN:
+                               /* These relocs do not refer to symbols */
                                continue;
+                       }
                        break;
                }
 
@@ -1419,7 +1424,7 @@ static void section_rel(struct module *mod, struct elf_info *elf,
 
        for (rel = start; rel < stop; rel++) {
                Elf_Sym *tsym;
-               Elf_Addr taddr = 0, r_offset;
+               Elf_Addr taddr, r_offset;
                unsigned int r_type, r_sym;
                void *loc;
 
index 69baf014da4fdaa25989716f7686fadc43b8a603..ee43c795063682b440818cd3a81ba5355afba456 100644 (file)
@@ -194,10 +194,10 @@ void *sym_get_data(const struct elf_info *info, const Elf_Sym *sym);
 enum loglevel {
        LOG_WARN,
        LOG_ERROR,
-       LOG_FATAL
 };
 
-void modpost_log(enum loglevel loglevel, const char *fmt, ...);
+void __attribute__((format(printf, 2, 3)))
+modpost_log(enum loglevel loglevel, const char *fmt, ...);
 
 /*
  * warn - show the given message, then let modpost continue running, still
@@ -214,4 +214,4 @@ void modpost_log(enum loglevel loglevel, const char *fmt, ...);
  */
 #define warn(fmt, args...)     modpost_log(LOG_WARN, fmt, ##args)
 #define error(fmt, args...)    modpost_log(LOG_ERROR, fmt, ##args)
-#define fatal(fmt, args...)    modpost_log(LOG_FATAL, fmt, ##args)
+#define fatal(fmt, args...)    do { error(fmt, ##args); exit(1); } while (1)
index d7dd0d04c70c9982bae9b86e54bf52dba295a2fd..bf96a3c2460814febe85a0a49fe2e9a8e90ea1ad 100755 (executable)
@@ -25,35 +25,20 @@ if_enabled_echo() {
 }
 
 create_package() {
-       local pname="$1" pdir="$2"
-       local dpkg_deb_opts
-
-       mkdir -m 755 -p "$pdir/DEBIAN"
-       mkdir -p "$pdir/usr/share/doc/$pname"
-       cp debian/copyright "$pdir/usr/share/doc/$pname/"
-       cp debian/changelog "$pdir/usr/share/doc/$pname/changelog.Debian"
-       gzip -n -9 "$pdir/usr/share/doc/$pname/changelog.Debian"
-       sh -c "cd '$pdir'; find . -type f ! -path './DEBIAN/*' -printf '%P\0' \
-               | xargs -r0 md5sum > DEBIAN/md5sums"
-
-       # Fix ownership and permissions
-       if [ "$DEB_RULES_REQUIRES_ROOT" = "no" ]; then
-               dpkg_deb_opts="--root-owner-group"
-       else
-               chown -R root:root "$pdir"
-       fi
-       # a+rX in case we are in a restrictive umask environment like 0077
-       # ug-s in case we build in a setuid/setgid directory
-       chmod -R go-w,a+rX,ug-s "$pdir"
-
-       # Create the package
-       dpkg-gencontrol -p$pname -P"$pdir"
-       dpkg-deb $dpkg_deb_opts ${KDEB_COMPRESS:+-Z$KDEB_COMPRESS} --build "$pdir" ..
+       export DH_OPTIONS="-p${1}"
+
+       dh_installdocs
+       dh_installchangelogs
+       dh_compress
+       dh_fixperms
+       dh_gencontrol
+       dh_md5sums
+       dh_builddeb -- ${KDEB_COMPRESS:+-Z$KDEB_COMPRESS}
 }
 
 install_linux_image () {
-       pdir=$1
-       pname=$2
+       pname=$1
+       pdir=debian/$1
 
        rm -rf ${pdir}
 
@@ -62,7 +47,7 @@ install_linux_image () {
                ${MAKE} -f ${srctree}/Makefile INSTALL_DTBS_PATH="${pdir}/usr/lib/linux-image-${KERNELRELEASE}" dtbs_install
        fi
 
-       ${MAKE} -f ${srctree}/Makefile INSTALL_MOD_PATH="${pdir}" modules_install
+       ${MAKE} -f ${srctree}/Makefile INSTALL_MOD_PATH="${pdir}" INSTALL_MOD_STRIP=1 modules_install
        rm -f "${pdir}/lib/modules/${KERNELRELEASE}/build"
 
        # Install the kernel
@@ -122,26 +107,22 @@ install_linux_image () {
 }
 
 install_linux_image_dbg () {
-       pdir=$1
-       image_pdir=$2
+       pdir=debian/$1
 
        rm -rf ${pdir}
 
-       for module in $(find ${image_pdir}/lib/modules/ -name *.ko -printf '%P\n'); do
-               module=lib/modules/${module}
-               mkdir -p $(dirname ${pdir}/usr/lib/debug/${module})
-               # only keep debug symbols in the debug file
-               ${OBJCOPY} --only-keep-debug ${image_pdir}/${module} ${pdir}/usr/lib/debug/${module}
-               # strip original module from debug symbols
-               ${OBJCOPY} --strip-debug ${image_pdir}/${module}
-               # then add a link to those
-               ${OBJCOPY} --add-gnu-debuglink=${pdir}/usr/lib/debug/${module} ${image_pdir}/${module}
-       done
+       # Parse modules.order directly because 'make modules_install' may sign,
+       # compress modules, and then run unneeded depmod.
+       while read -r mod; do
+               mod="${mod%.o}.ko"
+               dbg="${pdir}/usr/lib/debug/lib/modules/${KERNELRELEASE}/kernel/${mod}"
+               buildid=$("${READELF}" -n "${mod}" | sed -n 's@^.*Build ID: \(..\)\(.*\)@\1/\2@p')
+               link="${pdir}/usr/lib/debug/.build-id/${buildid}.debug"
 
-       # re-sign stripped modules
-       if is_enabled CONFIG_MODULE_SIG_ALL; then
-               ${MAKE} -f ${srctree}/Makefile INSTALL_MOD_PATH="${image_pdir}" modules_sign
-       fi
+               mkdir -p "${dbg%/*}" "${link%/*}"
+               "${OBJCOPY}" --only-keep-debug "${mod}" "${dbg}"
+               ln -sf --relative "${dbg}" "${link}"
+       done < modules.order
 
        # Build debug package
        # Different tools want the image in different locations
@@ -156,8 +137,8 @@ install_linux_image_dbg () {
 }
 
 install_kernel_headers () {
-       pdir=$1
-       version=$2
+       pdir=debian/$1
+       version=${1#linux-headers-}
 
        rm -rf $pdir
 
@@ -168,18 +149,16 @@ install_kernel_headers () {
 }
 
 install_libc_headers () {
-       pdir=$1
+       pdir=debian/$1
 
        rm -rf $pdir
 
-       $MAKE -f $srctree/Makefile headers
        $MAKE -f $srctree/Makefile headers_install INSTALL_HDR_PATH=$pdir/usr
 
        # move asm headers to /usr/include/<libc-machine>/asm to match the structure
        # used by Debian-based distros (to support multi-arch)
-       host_arch=$(dpkg-architecture -a$DEB_HOST_ARCH -qDEB_HOST_MULTIARCH)
-       mkdir $pdir/usr/include/$host_arch
-       mv $pdir/usr/include/asm $pdir/usr/include/$host_arch/
+       mkdir "$pdir/usr/include/${DEB_HOST_MULTIARCH}"
+       mv "$pdir/usr/include/asm" "$pdir/usr/include/${DEB_HOST_MULTIARCH}"
 }
 
 rm -f debian/files
@@ -190,30 +169,13 @@ for package in ${packages_enabled}
 do
        case ${package} in
        *-dbg)
-               # This must be done after linux-image, that is, we expect the
-               # debug package appears after linux-image in debian/control.
-               install_linux_image_dbg debian/linux-image-dbg debian/linux-image;;
-       linux-image-*|user-mode-linux-*)
-               install_linux_image debian/linux-image ${package};;
-       linux-libc-dev)
-               install_libc_headers debian/linux-libc-dev;;
-       linux-headers-*)
-               install_kernel_headers debian/linux-headers ${package#linux-headers-};;
-       esac
-done
-
-for package in ${packages_enabled}
-do
-       case ${package} in
-       *-dbg)
-               create_package ${package} debian/linux-image-dbg;;
+               install_linux_image_dbg "${package}";;
        linux-image-*|user-mode-linux-*)
-               create_package ${package} debian/linux-image;;
+               install_linux_image "${package}";;
        linux-libc-dev)
-               create_package ${package} debian/linux-libc-dev;;
+               install_libc_headers "${package}";;
        linux-headers-*)
-               create_package ${package} debian/linux-headers;;
+               install_kernel_headers "${package}";;
        esac
+       create_package "${package}"
 done
-
-exit 0
index 65b4ea50296219e2cfed406dddd3cb4eac0737ea..72c91a1b832f939d9861a3b21094c1df75a009fd 100755 (executable)
@@ -23,7 +23,6 @@ tmpdir=$1
 #
 rm -rf -- "${tmpdir}"
 mkdir -p -- "${tmpdir}/boot"
-dirs=boot
 
 
 #
@@ -38,12 +37,9 @@ fi
 
 
 #
-# Try to install modules
+# Install modules
 #
-if grep -q '^CONFIG_MODULES=y' include/config/auto.conf; then
-       make ARCH="${ARCH}" -f ${srctree}/Makefile INSTALL_MOD_PATH="${tmpdir}" modules_install
-       dirs="$dirs lib"
-fi
+make ARCH="${ARCH}" -f ${srctree}/Makefile INSTALL_MOD_PATH="${tmpdir}" modules_install
 
 
 #
diff --git a/scripts/package/deb-build-option b/scripts/package/deb-build-option
deleted file mode 100755 (executable)
index 7950eff..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0-only
-
-# Set up CROSS_COMPILE if not defined yet
-if [ "${CROSS_COMPILE+set}" != "set" -a "${DEB_HOST_ARCH}" != "${DEB_BUILD_ARCH}" ]; then
-       echo CROSS_COMPILE=${DEB_HOST_GNU_TYPE}-
-fi
-
-version=$(dpkg-parsechangelog -S Version)
-debian_revision="${version##*-}"
-
-if [ "${version}" != "${debian_revision}" ]; then
-       echo KBUILD_BUILD_VERSION=${debian_revision}
-fi
diff --git a/scripts/package/debian/copyright b/scripts/package/debian/copyright
new file mode 100644 (file)
index 0000000..4f1f062
--- /dev/null
@@ -0,0 +1,16 @@
+This is a packaged upstream version of the Linux kernel.
+
+The sources may be found at most Linux archive sites, including:
+https://www.kernel.org/pub/linux/kernel
+
+Copyright: 1991 - 2023 Linus Torvalds and others.
+
+The git repository for mainline kernel development is at:
+git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; version 2 dated June, 1991.
+
+On Debian GNU/Linux systems, the complete text of the GNU General Public
+License version 2 can be found in `/usr/share/common-licenses/GPL-2'.
index 3dafa9496c6366d727bb8b3249886a11b93ba2d0..09830778006227f5854580bba5099e21b09b8d1e 100755 (executable)
@@ -1,33 +1,46 @@
 #!/usr/bin/make -f
 # SPDX-License-Identifier: GPL-2.0-only
 
-include debian/rules.vars
+# in case debian/rules is executed directly
+export DEB_RULES_REQUIRES_ROOT := no
 
-srctree ?= .
+include debian/rules.vars
 
 ifneq (,$(filter-out parallel=1,$(filter parallel=%,$(DEB_BUILD_OPTIONS))))
     NUMJOBS = $(patsubst parallel=%,%,$(filter parallel=%,$(DEB_BUILD_OPTIONS)))
     MAKEFLAGS += -j$(NUMJOBS)
 endif
 
+revision = $(lastword $(subst -, ,$(shell dpkg-parsechangelog -S Version)))
+CROSS_COMPILE ?= $(filter-out $(DEB_BUILD_GNU_TYPE)-, $(DEB_HOST_GNU_TYPE)-)
+make-opts = ARCH=$(ARCH) KERNELRELEASE=$(KERNELRELEASE) KBUILD_BUILD_VERSION=$(revision) $(addprefix CROSS_COMPILE=,$(CROSS_COMPILE))
+
 .PHONY: binary binary-indep binary-arch
 binary: binary-arch binary-indep
 binary-indep: build-indep
 binary-arch: build-arch
-       $(MAKE) -f $(srctree)/Makefile ARCH=$(ARCH) \
-       KERNELRELEASE=$(KERNELRELEASE) \
-       run-command KBUILD_RUN_COMMAND=+$(srctree)/scripts/package/builddeb
+       $(MAKE) $(make-opts) \
+       run-command KBUILD_RUN_COMMAND='+$$(srctree)/scripts/package/builddeb'
 
 .PHONY: build build-indep build-arch
 build: build-arch build-indep
 build-indep:
 build-arch:
-       $(MAKE) -f $(srctree)/Makefile ARCH=$(ARCH) \
-       KERNELRELEASE=$(KERNELRELEASE) \
-       $(shell $(srctree)/scripts/package/deb-build-option) \
-       olddefconfig all
+       $(MAKE) $(make-opts) olddefconfig
+       $(MAKE) $(make-opts) $(if $(filter um,$(ARCH)),,headers) all
 
 .PHONY: clean
 clean:
-       rm -rf debian/files debian/linux-*
-       $(MAKE) -f $(srctree)/Makefile ARCH=$(ARCH) clean
+       rm -rf debian/files debian/linux-* debian/deb-env.vars*
+       $(MAKE) ARCH=$(ARCH) clean
+
+# If DEB_HOST_ARCH is empty, it is likely that debian/rules was executed
+# directly. Run 'dpkg-architecture --print-set --print-format=make' to
+# generate a makefile construct that exports all DEB_* variables.
+ifndef DEB_HOST_ARCH
+include debian/deb-env.vars
+
+debian/deb-env.vars:
+       dpkg-architecture -a$$(cat debian/arch) --print-set --print-format=make > $@.tmp
+       mv $@.tmp $@
+endif
index 8a7051fad0878990cd4569a326fc7137a7db088d..76e0765dfcd6ea23294be4d54329c1317dbddb37 100755 (executable)
@@ -20,7 +20,7 @@ mkdir -p "${destdir}"
        find "arch/${SRCARCH}" -maxdepth 1 -name 'Makefile*'
        find include scripts -type f -o -type l
        find "arch/${SRCARCH}" -name Kbuild.platforms -o -name Platform
-       find "arch/${SRCARCH}" -name include -o -name scripts -type d
+       find "arch/${SRCARCH}" -name include -type d
 ) | tar -c -f - -C "${srctree}" -T - | tar -xf - -C "${destdir}"
 
 {
index 3eee0143e0c5cc7671e640aad2368446e94805e0..f58726671fb37424308c678126e5edd4a22dc5f3 100644 (file)
@@ -55,18 +55,12 @@ patch -p1 < %{SOURCE2}
 %{make} %{makeflags} KERNELRELEASE=%{KERNELRELEASE} KBUILD_BUILD_VERSION=%{release}
 
 %install
-mkdir -p %{buildroot}/boot
-%ifarch ia64
-mkdir -p %{buildroot}/boot/efi
-cp $(%{make} %{makeflags} -s image_name) %{buildroot}/boot/efi/vmlinuz-%{KERNELRELEASE}
-ln -s efi/vmlinuz-%{KERNELRELEASE} %{buildroot}/boot/
-%else
-cp $(%{make} %{makeflags} -s image_name) %{buildroot}/boot/vmlinuz-%{KERNELRELEASE}
-%endif
+mkdir -p %{buildroot}/lib/modules/%{KERNELRELEASE}
+cp $(%{make} %{makeflags} -s image_name) %{buildroot}/lib/modules/%{KERNELRELEASE}/vmlinuz
 %{make} %{makeflags} INSTALL_MOD_PATH=%{buildroot} modules_install
 %{make} %{makeflags} INSTALL_HDR_PATH=%{buildroot}/usr headers_install
-cp System.map %{buildroot}/boot/System.map-%{KERNELRELEASE}
-cp .config %{buildroot}/boot/config-%{KERNELRELEASE}
+cp System.map %{buildroot}/lib/modules/%{KERNELRELEASE}
+cp .config %{buildroot}/lib/modules/%{KERNELRELEASE}/config
 ln -fns /usr/src/kernels/%{KERNELRELEASE} %{buildroot}/lib/modules/%{KERNELRELEASE}/build
 %if %{with_devel}
 %{make} %{makeflags} run-command KBUILD_RUN_COMMAND='${srctree}/scripts/package/install-extmod-build %{buildroot}/usr/src/kernels/%{KERNELRELEASE}'
@@ -76,13 +70,14 @@ ln -fns /usr/src/kernels/%{KERNELRELEASE} %{buildroot}/lib/modules/%{KERNELRELEA
 rm -rf %{buildroot}
 
 %post
-if [ -x /sbin/installkernel -a -r /boot/vmlinuz-%{KERNELRELEASE} -a -r /boot/System.map-%{KERNELRELEASE} ]; then
-cp /boot/vmlinuz-%{KERNELRELEASE} /boot/.vmlinuz-%{KERNELRELEASE}-rpm
-cp /boot/System.map-%{KERNELRELEASE} /boot/.System.map-%{KERNELRELEASE}-rpm
-rm -f /boot/vmlinuz-%{KERNELRELEASE} /boot/System.map-%{KERNELRELEASE}
-/sbin/installkernel %{KERNELRELEASE} /boot/.vmlinuz-%{KERNELRELEASE}-rpm /boot/.System.map-%{KERNELRELEASE}-rpm
-rm -f /boot/.vmlinuz-%{KERNELRELEASE}-rpm /boot/.System.map-%{KERNELRELEASE}-rpm
+if [ -x /usr/bin/kernel-install ]; then
+       /usr/bin/kernel-install add %{KERNELRELEASE} /lib/modules/%{KERNELRELEASE}/vmlinuz
 fi
+for file in vmlinuz System.map config; do
+       if ! cmp --silent "/lib/modules/%{KERNELRELEASE}/${file}" "/boot/${file}-%{KERNELRELEASE}"; then
+               cp "/lib/modules/%{KERNELRELEASE}/${file}" "/boot/${file}-%{KERNELRELEASE}"
+       fi
+done
 
 %preun
 if [ -x /sbin/new-kernel-pkg ]; then
@@ -100,7 +95,6 @@ fi
 %defattr (-, root, root)
 /lib/modules/%{KERNELRELEASE}
 %exclude /lib/modules/%{KERNELRELEASE}/build
-/boot/*
 
 %files headers
 %defattr (-, root, root)
index 5044224cf6714b3e5738f1e6d30dda05c589e3ff..070149c985fea4e33126650fad3e7769605c216a 100755 (executable)
@@ -26,7 +26,7 @@ set_debarch() {
 
        # Attempt to find the correct Debian architecture
        case "$UTS_MACHINE" in
-       i386|ia64|alpha|m68k|riscv*)
+       i386|alpha|m68k|riscv*)
                debarch="$UTS_MACHINE" ;;
        x86_64)
                debarch=amd64 ;;
@@ -176,8 +176,6 @@ else
 fi
 
 echo $debarch > debian/arch
-extra_build_depends=", $(if_enabled_echo CONFIG_UNWINDER_ORC libelf-dev:native)"
-extra_build_depends="$extra_build_depends, $(if_enabled_echo CONFIG_SYSTEM_TRUSTED_KEYRING libssl-dev:native)"
 
 # Generate a simple changelog template
 cat <<EOF > debian/changelog
@@ -188,26 +186,6 @@ $sourcename ($packageversion) $distribution; urgency=low
  -- $maintainer  $(date -R)
 EOF
 
-# Generate copyright file
-cat <<EOF > debian/copyright
-This is a packaged upstream version of the Linux kernel.
-
-The sources may be found at most Linux archive sites, including:
-https://www.kernel.org/pub/linux/kernel
-
-Copyright: 1991 - 2018 Linus Torvalds and others.
-
-The git repository for mainline kernel development is at:
-git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
-
-    This program is free software; you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation; version 2 dated June, 1991.
-
-On Debian GNU/Linux systems, the complete text of the GNU General Public
-License version 2 can be found in \`/usr/share/common-licenses/GPL-2'.
-EOF
-
 # Generate a control file
 cat <<EOF > debian/control
 Source: $sourcename
@@ -215,7 +193,8 @@ Section: kernel
 Priority: optional
 Maintainer: $maintainer
 Rules-Requires-Root: no
-Build-Depends: bc, debhelper, rsync, kmod, cpio, bison, flex $extra_build_depends
+Build-Depends: debhelper-compat (= 12)
+Build-Depends-Arch: bc, bison, cpio, flex, kmod, libelf-dev:native, libssl-dev:native, rsync
 Homepage: https://www.kernel.org/
 
 Package: $packagename-$version
@@ -268,6 +247,7 @@ ARCH := ${ARCH}
 KERNELRELEASE := ${KERNELRELEASE}
 EOF
 
+cp "${srctree}/scripts/package/debian/copyright" debian/
 cp "${srctree}/scripts/package/debian/rules" debian/
 
 exit 0
index 626d278e4a5a7a9a90286444956b933e997895f7..85d5e07d1b40b2087ea49477fe90e96b924a122a 100644 (file)
@@ -10,5 +10,5 @@ parts:
   kernel:
     plugin: kernel
     source: SRCTREE
-    source-type: tar
+    source-type: local
     kernel-with-firmware: false
index 40ae6b2c7a6da590f36d33caa543fd1376ba4945..3e4f54799cc0a5a366a222b66ef87d9abd9b5a36 100644 (file)
@@ -590,7 +590,6 @@ static int do_file(char const *const fname)
                ideal_nop = ideal_nop4_arm64;
                is_fake_mcount64 = arm64_is_fake_mcount;
                break;
-       case EM_IA_64:  reltype = R_IA64_IMM64; break;
        case EM_MIPS:   /* reltype: e_class    */ break;
        case EM_LOONGARCH:      /* reltype: e_class    */ break;
        case EM_PPC:    reltype = R_PPC_ADDR32; break;
index 6a4645a5797603c7a60ad95c4eaed5c25fbb49e2..f84df9e383fd0acf75b9afb87422aff9c088e3a8 100755 (executable)
@@ -275,13 +275,6 @@ if ($arch eq "x86_64") {
     $section_type = '%progbits';
     $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_AARCH64_CALL26\\s+_mcount\$";
     $type = ".quad";
-} elsif ($arch eq "ia64") {
-    $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s_mcount\$";
-    $type = "data8";
-
-    if ($is_module eq "0") {
-       $cc .= " -mconstant-gp";
-    }
 } elsif ($arch eq "sparc64") {
     # In the objdump output there are giblets like:
     # 0000000000000000 <igmp_net_exit-0x18>:
index a70d43723146d1c71f60bdea56a9f2b8a595b535..191e0461d6d5bd7e0c6be5bce059f106e2da3af6 100755 (executable)
@@ -3,7 +3,7 @@
 # Generate tags or cscope files
 # Usage tags.sh <mode>
 #
-# mode may be any of: tags, TAGS, cscope
+# mode may be any of: tags, gtags, TAGS, cscope
 #
 # Uses the following environment variables:
 # SUBARCH, SRCARCH, srctree
@@ -50,7 +50,7 @@ fi
 find_arch_sources()
 {
        for i in $archincludedir; do
-               prune="$prune -wholename $i -prune -o"
+               local prune="$prune ( -path $i ) -prune -o"
        done
        find ${tree}arch/$1 $ignore $prune -name "$2" -not -type l -print;
 }
@@ -58,7 +58,7 @@ find_arch_sources()
 # find sources in arch/$1/include
 find_arch_include_sources()
 {
-       include=$(find ${tree}arch/$1/ -name include -type d -print);
+       local include=$(find ${tree}arch/$1/ -name include -type d -print);
        if [ -n "$include" ]; then
                archincludedir="$archincludedir $include"
                find $include $ignore -name "$2" -not -type l -print;
@@ -81,21 +81,16 @@ find_other_sources()
               -name "$1" -not -type l -print;
 }
 
-find_sources()
-{
-       find_arch_sources $1 "$2"
-}
-
 all_sources()
 {
        find_arch_include_sources ${SRCARCH} '*.[chS]'
-       if [ ! -z "$archinclude" ]; then
+       if [ -n "$archinclude" ]; then
                find_arch_include_sources $archinclude '*.[chS]'
        fi
        find_include_sources '*.[chS]'
        for arch in $ALLSOURCE_ARCHS
        do
-               find_sources $arch '*.[chS]'
+               find_arch_sources $arch '*.[chS]'
        done
        find_other_sources '*.[chS]'
 }
@@ -125,7 +120,7 @@ all_kconfigs()
        find ${tree}arch/ -maxdepth 1 $ignore \
               -name "Kconfig*" -not -type l -print;
        for arch in $ALLSOURCE_ARCHS; do
-               find_sources $arch 'Kconfig*'
+               find_arch_sources $arch 'Kconfig*'
        done
        find_other_sources 'Kconfig*'
 }
index 76e9cbcfbeab457bde700e733a5aae9f5ca1598f..d06baf626abe79d11401cb4a831b06e450c0b360 100755 (executable)
@@ -15,7 +15,6 @@ LZMA2OPTS=
 case $SRCARCH in
        x86)            BCJ=--x86 ;;
        powerpc)        BCJ=--powerpc ;;
-       ia64)           BCJ=--ia64; LZMA2OPTS=pb=4 ;;
        arm)            BCJ=--arm ;;
        sparc)          BCJ=--sparc ;;
 esac
index e0d1dd0a192a9d944d9e78d4fe09076500004682..64cc3044a42cedce62a745a9d19e0e2e9fab64e2 100644 (file)
@@ -57,10 +57,10 @@ config SECURITY_APPARMOR_INTROSPECT_POLICY
          cpu is paramount.
 
 config SECURITY_APPARMOR_HASH
-       bool "Enable introspection of sha1 hashes for loaded profiles"
+       bool "Enable introspection of sha256 hashes for loaded profiles"
        depends on SECURITY_APPARMOR_INTROSPECT_POLICY
        select CRYPTO
-       select CRYPTO_SHA1
+       select CRYPTO_SHA256
        default y
        help
          This option selects whether introspection of loaded policy
@@ -74,10 +74,10 @@ config SECURITY_APPARMOR_HASH_DEFAULT
        depends on SECURITY_APPARMOR_HASH
        default y
        help
-         This option selects whether sha1 hashing of loaded policy
-        is enabled by default. The generation of sha1 hashes for
-        loaded policy provide system administrators a quick way
-        to verify that policy in the kernel matches what is expected,
+        This option selects whether sha256 hashing of loaded policy
+        is enabled by default. The generation of sha256 hashes for
+        loaded policy provide system administrators a quick way to
+        verify that policy in the kernel matches what is expected,
         however it can slow down policy load on some devices. In
         these cases policy hashing can be disabled by default and
         enabled only if needed.
index f3c77825aa7529ba3df5f8fa4933917e7c044a5a..bcfea073e3f2e386ada23becdd1e1be92f4878e1 100644 (file)
@@ -1474,7 +1474,7 @@ int __aa_fs_create_rawdata(struct aa_ns *ns, struct aa_loaddata *rawdata)
        rawdata->dents[AAFS_LOADDATA_REVISION] = dent;
 
        if (aa_g_hash_policy) {
-               dent = aafs_create_file("sha1", S_IFREG | 0444, dir,
+               dent = aafs_create_file("sha256", S_IFREG | 0444, dir,
                                              rawdata, &seq_rawdata_hash_fops);
                if (IS_ERR(dent))
                        goto fail;
@@ -1643,11 +1643,11 @@ static const char *rawdata_get_link_base(struct dentry *dentry,
        return target;
 }
 
-static const char *rawdata_get_link_sha1(struct dentry *dentry,
+static const char *rawdata_get_link_sha256(struct dentry *dentry,
                                         struct inode *inode,
                                         struct delayed_call *done)
 {
-       return rawdata_get_link_base(dentry, inode, done, "sha1");
+       return rawdata_get_link_base(dentry, inode, done, "sha256");
 }
 
 static const char *rawdata_get_link_abi(struct dentry *dentry,
@@ -1664,8 +1664,8 @@ static const char *rawdata_get_link_data(struct dentry *dentry,
        return rawdata_get_link_base(dentry, inode, done, "raw_data");
 }
 
-static const struct inode_operations rawdata_link_sha1_iops = {
-       .get_link       = rawdata_get_link_sha1,
+static const struct inode_operations rawdata_link_sha256_iops = {
+       .get_link       = rawdata_get_link_sha256,
 };
 
 static const struct inode_operations rawdata_link_abi_iops = {
@@ -1738,7 +1738,7 @@ int __aafs_profile_mkdir(struct aa_profile *profile, struct dentry *parent)
        profile->dents[AAFS_PROF_ATTACH] = dent;
 
        if (profile->hash) {
-               dent = create_profile_file(dir, "sha1", profile,
+               dent = create_profile_file(dir, "sha256", profile,
                                           &seq_profile_hash_fops);
                if (IS_ERR(dent))
                        goto fail;
@@ -1748,9 +1748,9 @@ int __aafs_profile_mkdir(struct aa_profile *profile, struct dentry *parent)
 #ifdef CONFIG_SECURITY_APPARMOR_EXPORT_BINARY
        if (profile->rawdata) {
                if (aa_g_hash_policy) {
-                       dent = aafs_create("raw_sha1", S_IFLNK | 0444, dir,
+                       dent = aafs_create("raw_sha256", S_IFLNK | 0444, dir,
                                           profile->label.proxy, NULL, NULL,
-                                          &rawdata_link_sha1_iops);
+                                          &rawdata_link_sha256_iops);
                        if (IS_ERR(dent))
                                goto fail;
                        aa_get_proxy(profile->label.proxy);
index 6724e2ff6da8900127a19ea609fabf81764e12a0..aad486b2fca65482981ffbf47d11d4c448481c5e 100644 (file)
@@ -106,16 +106,16 @@ static int __init init_profile_hash(void)
        if (!apparmor_initialized)
                return 0;
 
-       tfm = crypto_alloc_shash("sha1", 0, 0);
+       tfm = crypto_alloc_shash("sha256", 0, 0);
        if (IS_ERR(tfm)) {
                int error = PTR_ERR(tfm);
-               AA_ERROR("failed to setup profile sha1 hashing: %d\n", error);
+               AA_ERROR("failed to setup profile sha256 hashing: %d\n", error);
                return error;
        }
        apparmor_tfm = tfm;
        apparmor_hash_size = crypto_shash_digestsize(apparmor_tfm);
 
-       aa_info_message("AppArmor sha1 policy hashing enabled");
+       aa_info_message("AppArmor sha256 policy hashing enabled");
 
        return 0;
 }
index 89fbeab4b33bd89041ec5af15aac390034249165..571158ec6188f92cfb5082e8df117aff0bc914f9 100644 (file)
@@ -1311,7 +1311,7 @@ static int change_profile_perms_wrapper(const char *op, const char *name,
        return error;
 }
 
-const char *stack_msg = "change_profile unprivileged unconfined converted to stacking";
+static const char *stack_msg = "change_profile unprivileged unconfined converted to stacking";
 
 /**
  * aa_change_profile - perform a one-way profile transition
index 4c198d273f091d35ca7eb7caf657f9c650c2dcd5..cd569fbbfe36d29a741c9fe8eb449a82dfdf160f 100644 (file)
@@ -41,6 +41,7 @@ void aa_free_str_table(struct aa_str_table *t)
                        kfree_sensitive(t->table[i]);
                kfree_sensitive(t->table);
                t->table = NULL;
+               t->size = 0;
        }
 }
 
index e490a70004089f102335e17d77c7d5005e90db03..98e1150bee9d0cbecb79c7e81cb05159f6160b04 100644 (file)
@@ -469,8 +469,10 @@ static int apparmor_file_open(struct file *file)
         * Cache permissions granted by the previous exec check, with
         * implicit read and executable mmap which are required to
         * actually execute the image.
+        *
+        * Illogically, FMODE_EXEC is in f_flags, not f_mode.
         */
-       if (current->in_execve) {
+       if (file->f_flags & __FMODE_EXEC) {
                fctx->allow = MAY_EXEC | MAY_READ | AA_EXEC_MMAP;
                return 0;
        }
@@ -1023,7 +1025,6 @@ static int apparmor_task_kill(struct task_struct *target, struct kernel_siginfo
                cl = aa_get_newest_cred_label(cred);
                error = aa_may_signal(cred, cl, tc, tl, sig);
                aa_put_label(cl);
-               return error;
        } else {
                cl = __begin_current_label_crit_section();
                error = aa_may_signal(current_cred(), cl, tc, tl, sig);
@@ -1056,9 +1057,6 @@ static int apparmor_userns_create(const struct cred *cred)
        return error;
 }
 
-/**
- * apparmor_sk_alloc_security - allocate and attach the sk_security field
- */
 static int apparmor_sk_alloc_security(struct sock *sk, int family, gfp_t flags)
 {
        struct aa_sk_ctx *ctx;
@@ -1072,9 +1070,6 @@ static int apparmor_sk_alloc_security(struct sock *sk, int family, gfp_t flags)
        return 0;
 }
 
-/**
- * apparmor_sk_free_security - free the sk_security field
- */
 static void apparmor_sk_free_security(struct sock *sk)
 {
        struct aa_sk_ctx *ctx = aa_sock(sk);
@@ -1087,6 +1082,8 @@ static void apparmor_sk_free_security(struct sock *sk)
 
 /**
  * apparmor_sk_clone_security - clone the sk_security field
+ * @sk: sock to have security cloned
+ * @newsk: sock getting clone
  */
 static void apparmor_sk_clone_security(const struct sock *sk,
                                       struct sock *newsk)
@@ -1103,9 +1100,6 @@ static void apparmor_sk_clone_security(const struct sock *sk,
        new->peer = aa_get_label(ctx->peer);
 }
 
-/**
- * apparmor_socket_create - check perms before creating a new socket
- */
 static int apparmor_socket_create(int family, int type, int protocol, int kern)
 {
        struct aa_label *label;
@@ -1127,10 +1121,14 @@ static int apparmor_socket_create(int family, int type, int protocol, int kern)
 
 /**
  * apparmor_socket_post_create - setup the per-socket security struct
+ * @sock: socket that is being setup
+ * @family: family of socket being created
+ * @type: type of the socket
+ * @ptotocol: protocol of the socket
+ * @kern: socket is a special kernel socket
  *
  * Note:
- * -   kernel sockets currently labeled unconfined but we may want to
- *     move to a special kernel label
+ * -   kernel sockets labeled kernel_t used to use unconfined
  * -   socket may not have sk here if created with sock_create_lite or
  *     sock_alloc. These should be accept cases which will be handled in
  *     sock_graft.
@@ -1156,9 +1154,6 @@ static int apparmor_socket_post_create(struct socket *sock, int family,
        return 0;
 }
 
-/**
- * apparmor_socket_bind - check perms before bind addr to socket
- */
 static int apparmor_socket_bind(struct socket *sock,
                                struct sockaddr *address, int addrlen)
 {
@@ -1172,9 +1167,6 @@ static int apparmor_socket_bind(struct socket *sock,
                         aa_sk_perm(OP_BIND, AA_MAY_BIND, sock->sk));
 }
 
-/**
- * apparmor_socket_connect - check perms before connecting @sock to @address
- */
 static int apparmor_socket_connect(struct socket *sock,
                                   struct sockaddr *address, int addrlen)
 {
@@ -1188,9 +1180,6 @@ static int apparmor_socket_connect(struct socket *sock,
                         aa_sk_perm(OP_CONNECT, AA_MAY_CONNECT, sock->sk));
 }
 
-/**
- * apparmor_socket_listen - check perms before allowing listen
- */
 static int apparmor_socket_listen(struct socket *sock, int backlog)
 {
        AA_BUG(!sock);
@@ -1202,9 +1191,7 @@ static int apparmor_socket_listen(struct socket *sock, int backlog)
                         aa_sk_perm(OP_LISTEN, AA_MAY_LISTEN, sock->sk));
 }
 
-/**
- * apparmor_socket_accept - check perms before accepting a new connection.
- *
+/*
  * Note: while @newsock is created and has some information, the accept
  *       has not been done.
  */
@@ -1233,18 +1220,12 @@ static int aa_sock_msg_perm(const char *op, u32 request, struct socket *sock,
                         aa_sk_perm(op, request, sock->sk));
 }
 
-/**
- * apparmor_socket_sendmsg - check perms before sending msg to another socket
- */
 static int apparmor_socket_sendmsg(struct socket *sock,
                                   struct msghdr *msg, int size)
 {
        return aa_sock_msg_perm(OP_SENDMSG, AA_MAY_SEND, sock, msg, size);
 }
 
-/**
- * apparmor_socket_recvmsg - check perms before receiving a message
- */
 static int apparmor_socket_recvmsg(struct socket *sock,
                                   struct msghdr *msg, int size, int flags)
 {
@@ -1263,17 +1244,11 @@ static int aa_sock_perm(const char *op, u32 request, struct socket *sock)
                         aa_sk_perm(op, request, sock->sk));
 }
 
-/**
- * apparmor_socket_getsockname - check perms before getting the local address
- */
 static int apparmor_socket_getsockname(struct socket *sock)
 {
        return aa_sock_perm(OP_GETSOCKNAME, AA_MAY_GETATTR, sock);
 }
 
-/**
- * apparmor_socket_getpeername - check perms before getting remote address
- */
 static int apparmor_socket_getpeername(struct socket *sock)
 {
        return aa_sock_perm(OP_GETPEERNAME, AA_MAY_GETATTR, sock);
@@ -1292,9 +1267,6 @@ static int aa_sock_opt_perm(const char *op, u32 request, struct socket *sock,
                         aa_sk_perm(op, request, sock->sk));
 }
 
-/**
- * apparmor_socket_getsockopt - check perms before getting socket options
- */
 static int apparmor_socket_getsockopt(struct socket *sock, int level,
                                      int optname)
 {
@@ -1302,9 +1274,6 @@ static int apparmor_socket_getsockopt(struct socket *sock, int level,
                                level, optname);
 }
 
-/**
- * apparmor_socket_setsockopt - check perms before setting socket options
- */
 static int apparmor_socket_setsockopt(struct socket *sock, int level,
                                      int optname)
 {
@@ -1312,9 +1281,6 @@ static int apparmor_socket_setsockopt(struct socket *sock, int level,
                                level, optname);
 }
 
-/**
- * apparmor_socket_shutdown - check perms before shutting down @sock conn
- */
 static int apparmor_socket_shutdown(struct socket *sock, int how)
 {
        return aa_sock_perm(OP_SHUTDOWN, AA_MAY_SHUTDOWN, sock);
@@ -1323,6 +1289,8 @@ static int apparmor_socket_shutdown(struct socket *sock, int how)
 #ifdef CONFIG_NETWORK_SECMARK
 /**
  * apparmor_socket_sock_rcv_skb - check perms before associating skb to sk
+ * @sk: sk to associate @skb with
+ * @skb: skb to check for perms
  *
  * Note: can not sleep may be called with locks held
  *
@@ -1354,6 +1322,11 @@ static struct aa_label *sk_peer_label(struct sock *sk)
 
 /**
  * apparmor_socket_getpeersec_stream - get security context of peer
+ * @sock: socket that we are trying to get the peer context of
+ * @optval: output - buffer to copy peer name to
+ * @optlen: output - size of copied name in @optval
+ * @len: size of @optval buffer
+ * Returns: 0 on success, -errno of failure
  *
  * Note: for tcp only valid if using ipsec or cipso on lan
  */
@@ -2182,7 +2155,7 @@ __initcall(apparmor_nf_ip_init);
 static char nulldfa_src[] = {
        #include "nulldfa.in"
 };
-struct aa_dfa *nulldfa;
+static struct aa_dfa *nulldfa;
 
 static char stacksplitdfa_src[] = {
        #include "stacksplitdfa.in"
index ed4c9803c8fad82adc9723e255dfdcda22cf9083..957654d253dd74cb50ac2a666a1588a03ac2e821 100644 (file)
@@ -99,13 +99,14 @@ const char *const aa_profile_mode_names[] = {
 };
 
 
-static void aa_free_pdb(struct aa_policydb *policy)
+static void aa_free_pdb(struct aa_policydb *pdb)
 {
-       if (policy) {
-               aa_put_dfa(policy->dfa);
-               if (policy->perms)
-                       kvfree(policy->perms);
-               aa_free_str_table(&policy->trans);
+       if (pdb) {
+               aa_put_dfa(pdb->dfa);
+               if (pdb->perms)
+                       kvfree(pdb->perms);
+               aa_free_str_table(&pdb->trans);
+               kfree(pdb);
        }
 }
 
index 47ec097d6741fe0fc6c33bfc0c844f331f704f17..5e578ef0ddffb1f7adb0cc9863bcbd18ec9a562b 100644 (file)
@@ -478,6 +478,8 @@ static bool unpack_trans_table(struct aa_ext *e, struct aa_str_table *strs)
                if (!table)
                        goto fail;
 
+               strs->table = table;
+               strs->size = size;
                for (i = 0; i < size; i++) {
                        char *str;
                        int c, j, pos, size2 = aa_unpack_strdup(e, &str, NULL);
@@ -520,14 +522,11 @@ static bool unpack_trans_table(struct aa_ext *e, struct aa_str_table *strs)
                        goto fail;
                if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
                        goto fail;
-
-               strs->table = table;
-               strs->size = size;
        }
        return true;
 
 fail:
-       kfree_sensitive(table);
+       aa_free_str_table(strs);
        e->pos = saved_pos;
        return false;
 }
@@ -833,6 +832,10 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
 
        tmpname = aa_splitn_fqname(name, strlen(name), &tmpns, &ns_len);
        if (tmpns) {
+               if (!tmpname) {
+                       info = "empty profile name";
+                       goto fail;
+               }
                *ns_name = kstrndup(tmpns, ns_len, GFP_KERNEL);
                if (!*ns_name) {
                        info = "out of memory";
@@ -1022,8 +1025,10 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
                }
        } else if (rules->policy->dfa &&
                   rules->policy->start[AA_CLASS_FILE]) {
+               aa_put_pdb(rules->file);
                rules->file = aa_get_pdb(rules->policy);
        } else {
+               aa_put_pdb(rules->file);
                rules->file = aa_get_pdb(nullpdb);
        }
        error = -EPROTO;
index f29a2e80e6bf68cbc225ef3970fd12ca29526284..c87fb9f4ac18ae91d627fb8424267da0f791291e 100644 (file)
@@ -278,7 +278,9 @@ static int profile_tracer_perm(const struct cred *cred,
 
 /**
  * aa_may_ptrace - test if tracer task can trace the tracee
+ * @tracer_cred: cred of task doing the tracing  (NOT NULL)
  * @tracer: label of the task doing the tracing  (NOT NULL)
+ * @tracee_cred: cred of task to be traced
  * @tracee: task label to be traced
  * @request: permission request
  *
index 76f55dd13cb801078ba71079bf7e1c58eb2ada3b..8af2136069d239129c2994e5ee0f3e9b696ed7ea 100644 (file)
@@ -237,10 +237,6 @@ static int datablob_parse(char *datablob, const char **format,
                        break;
                }
                *decrypted_data = strsep(&datablob, " \t");
-               if (!*decrypted_data) {
-                       pr_info("encrypted_key: decrypted_data is missing\n");
-                       break;
-               }
                ret = 0;
                break;
        case Opt_load:
index 0144a98d3712e66733462a37a47518beb2eab743..3aaad75c9ce8531b6f214ad8bd469d1c571908c7 100644 (file)
@@ -4255,7 +4255,19 @@ EXPORT_SYMBOL(security_inode_setsecctx);
  */
 int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
 {
-       return call_int_hook(inode_getsecctx, -EOPNOTSUPP, inode, ctx, ctxlen);
+       struct security_hook_list *hp;
+       int rc;
+
+       /*
+        * Only one module will provide a security context.
+        */
+       hlist_for_each_entry(hp, &security_hook_heads.inode_getsecctx, list) {
+               rc = hp->hook.inode_getsecctx(inode, ctx, ctxlen);
+               if (rc != LSM_RET_DEFAULT(inode_getsecctx))
+                       return rc;
+       }
+
+       return LSM_RET_DEFAULT(inode_getsecctx);
 }
 EXPORT_SYMBOL(security_inode_getsecctx);
 
@@ -4612,8 +4624,20 @@ EXPORT_SYMBOL(security_sock_rcv_skb);
 int security_socket_getpeersec_stream(struct socket *sock, sockptr_t optval,
                                      sockptr_t optlen, unsigned int len)
 {
-       return call_int_hook(socket_getpeersec_stream, -ENOPROTOOPT, sock,
-                            optval, optlen, len);
+       struct security_hook_list *hp;
+       int rc;
+
+       /*
+        * Only one module will provide a security context.
+        */
+       hlist_for_each_entry(hp, &security_hook_heads.socket_getpeersec_stream,
+                            list) {
+               rc = hp->hook.socket_getpeersec_stream(sock, optval, optlen,
+                                                      len);
+               if (rc != LSM_RET_DEFAULT(socket_getpeersec_stream))
+                       return rc;
+       }
+       return LSM_RET_DEFAULT(socket_getpeersec_stream);
 }
 
 /**
@@ -4633,8 +4657,19 @@ int security_socket_getpeersec_stream(struct socket *sock, sockptr_t optval,
 int security_socket_getpeersec_dgram(struct socket *sock,
                                     struct sk_buff *skb, u32 *secid)
 {
-       return call_int_hook(socket_getpeersec_dgram, -ENOPROTOOPT, sock,
-                            skb, secid);
+       struct security_hook_list *hp;
+       int rc;
+
+       /*
+        * Only one module will provide a security context.
+        */
+       hlist_for_each_entry(hp, &security_hook_heads.socket_getpeersec_dgram,
+                            list) {
+               rc = hp->hook.socket_getpeersec_dgram(sock, skb, secid);
+               if (rc != LSM_RET_DEFAULT(socket_getpeersec_dgram))
+                       return rc;
+       }
+       return LSM_RET_DEFAULT(socket_getpeersec_dgram);
 }
 EXPORT_SYMBOL(security_socket_getpeersec_dgram);
 
index 3c3af149bf1c12a94c318d188984ab4bda4a2edc..04a92c3d65d44de5502dd5955146e58cba4f4978 100644 (file)
@@ -328,7 +328,8 @@ static int tomoyo_file_fcntl(struct file *file, unsigned int cmd,
 static int tomoyo_file_open(struct file *f)
 {
        /* Don't check read permission here if called from execve(). */
-       if (current->in_execve)
+       /* Illogically, FMODE_EXEC is in f_flags, not f_mode. */
+       if (f->f_flags & __FMODE_EXEC)
                return 0;
        return tomoyo_check_open_permission(tomoyo_domain(), &f->f_path,
                                            f->f_flags);
index a09f0154e6a7029c72fb3f0d6d8bd36f202b72c8..d0788126cbab10a2ef8daaab9201f366f27d8c63 100644 (file)
@@ -211,6 +211,10 @@ static const char * const snd_pcm_format_names[] = {
        FORMAT(DSD_U32_LE),
        FORMAT(DSD_U16_BE),
        FORMAT(DSD_U32_BE),
+       FORMAT(S20_LE),
+       FORMAT(S20_BE),
+       FORMAT(U20_LE),
+       FORMAT(U20_BE),
 };
 
 /**
index e87dc67f33c692567da42cffc97d0cd072818b5a..1c65e0a3b13ce875f7416d3f63912f609080ffc0 100644 (file)
@@ -322,6 +322,17 @@ static int loopback_snd_timer_close_cable(struct loopback_pcm *dpcm)
        return 0;
 }
 
+static bool is_access_interleaved(snd_pcm_access_t access)
+{
+       switch (access) {
+       case SNDRV_PCM_ACCESS_MMAP_INTERLEAVED:
+       case SNDRV_PCM_ACCESS_RW_INTERLEAVED:
+               return true;
+       default:
+               return false;
+       }
+};
+
 static int loopback_check_format(struct loopback_cable *cable, int stream)
 {
        struct snd_pcm_runtime *runtime, *cruntime;
@@ -341,7 +352,8 @@ static int loopback_check_format(struct loopback_cable *cable, int stream)
        check = runtime->format != cruntime->format ||
                runtime->rate != cruntime->rate ||
                runtime->channels != cruntime->channels ||
-               runtime->access != cruntime->access;
+               is_access_interleaved(runtime->access) !=
+               is_access_interleaved(cruntime->access);
        if (!check)
                return 0;
        if (stream == SNDRV_PCM_STREAM_CAPTURE) {
@@ -369,7 +381,8 @@ static int loopback_check_format(struct loopback_cable *cable, int stream)
                                                        &setup->channels_id);
                        setup->channels = runtime->channels;
                }
-               if (setup->access != runtime->access) {
+               if (is_access_interleaved(setup->access) !=
+                   is_access_interleaved(runtime->access)) {
                        snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
                                                        &setup->access_id);
                        setup->access = runtime->access;
@@ -584,8 +597,7 @@ static void copy_play_buf(struct loopback_pcm *play,
                        size = play->pcm_buffer_size - src_off;
                if (dst_off + size > capt->pcm_buffer_size)
                        size = capt->pcm_buffer_size - dst_off;
-               if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED ||
-                   runtime->access == SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED)
+               if (!is_access_interleaved(runtime->access))
                        copy_play_buf_part_n(play, capt, size, src_off, dst_off);
                else
                        memcpy(dst + dst_off, src + src_off, size);
@@ -1544,8 +1556,7 @@ static int loopback_access_get(struct snd_kcontrol *kcontrol,
        mutex_lock(&loopback->cable_lock);
        access = loopback->setup[kcontrol->id.subdevice][kcontrol->id.device].access;
 
-       ucontrol->value.enumerated.item[0] = access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED ||
-                                            access == SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED;
+       ucontrol->value.enumerated.item[0] = !is_access_interleaved(access);
 
        mutex_unlock(&loopback->cable_lock);
        return 0;
index c8db6c75d133d152cdd29c02e7ae7a77ffe519b2..d6e5aafd697c7e4e06ae8692e935de4bc206c192 100644 (file)
@@ -100,8 +100,8 @@ static void snd_serial_generic_write_wakeup(struct serdev_device *serdev)
        snd_serial_generic_tx_wakeup(drvdata);
 }
 
-static int snd_serial_generic_receive_buf(struct serdev_device *serdev,
-                               const unsigned char *buf, size_t count)
+static ssize_t snd_serial_generic_receive_buf(struct serdev_device *serdev,
+                                             const u8 *buf, size_t count)
 {
        int ret;
        struct snd_serial_generic *drvdata = serdev_device_get_drvdata(serdev);
index 35277ce890a46fb9204cc80a55e812cb64c842d5..d74cf11eef1ea6fcb1bae71b9c00aad87203de40 100644 (file)
@@ -76,6 +76,8 @@ static const struct cs35l41_config cs35l41_config_table[] = {
        { "10431533", 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 1000, 4500, 24 },
        { "10431573", 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 1000, 4500, 24 },
        { "10431663", 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, -1, 0, 1000, 4500, 24 },
+       { "10431683", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 0, 0, 0 },
+       { "104316A3", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 0, 0, 0 },
        { "104316D3", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 0, 0, 0 },
        { "104316F3", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 0, 0, 0 },
        { "104317F3", 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 1000, 4500, 24 },
@@ -410,6 +412,8 @@ static const struct cs35l41_prop_model cs35l41_prop_model_table[] = {
        { "CSC3551", "10431533", generic_dsd_config },
        { "CSC3551", "10431573", generic_dsd_config },
        { "CSC3551", "10431663", generic_dsd_config },
+       { "CSC3551", "10431683", generic_dsd_config },
+       { "CSC3551", "104316A3", generic_dsd_config },
        { "CSC3551", "104316D3", generic_dsd_config },
        { "CSC3551", "104316F3", generic_dsd_config },
        { "CSC3551", "104317F3", generic_dsd_config },
index b61e1de8c4bf905a6bddd8bc859da2dc9b8cd408..75a14ba54fcd1c270459b47be66ecb6aa799aa33 100644 (file)
   *  ASP1_RX_WL = 24 bits per sample
   *  ASP1_TX_WL = 24 bits per sample
   *  ASP1_RXn_EN 1..3 and ASP1_TXn_EN 1..4 disabled
+  *
+  * Override any Windows-specific mixer settings applied by the firmware.
   */
 static const struct reg_sequence cs35l56_hda_dai_config[] = {
        { CS35L56_ASP1_CONTROL1,        0x00000021 },
        { CS35L56_ASP1_CONTROL2,        0x20200200 },
        { CS35L56_ASP1_CONTROL3,        0x00000003 },
+       { CS35L56_ASP1_FRAME_CONTROL1,  0x03020100 },
+       { CS35L56_ASP1_FRAME_CONTROL5,  0x00020100 },
        { CS35L56_ASP1_DATA_CONTROL5,   0x00000018 },
        { CS35L56_ASP1_DATA_CONTROL1,   0x00000018 },
        { CS35L56_ASP1_ENABLES1,        0x00000000 },
+       { CS35L56_ASP1TX1_INPUT,        0x00000018 },
+       { CS35L56_ASP1TX2_INPUT,        0x00000019 },
+       { CS35L56_ASP1TX3_INPUT,        0x00000020 },
+       { CS35L56_ASP1TX4_INPUT,        0x00000028 },
+
 };
 
 static void cs35l56_hda_play(struct cs35l56_hda *cs35l56)
@@ -133,6 +142,10 @@ static int cs35l56_hda_runtime_resume(struct device *dev)
                }
        }
 
+       ret = cs35l56_force_sync_asp1_registers_from_cache(&cs35l56->base);
+       if (ret)
+               goto err;
+
        return 0;
 
 err:
@@ -384,7 +397,7 @@ static const struct cs_dsp_client_ops cs35l56_hda_client_ops = {
 
 static int cs35l56_hda_request_firmware_file(struct cs35l56_hda *cs35l56,
                                             const struct firmware **firmware, char **filename,
-                                            const char *dir, const char *system_name,
+                                            const char *base_name, const char *system_name,
                                             const char *amp_name,
                                             const char *filetype)
 {
@@ -392,17 +405,13 @@ static int cs35l56_hda_request_firmware_file(struct cs35l56_hda *cs35l56,
        int ret = 0;
 
        if (system_name && amp_name)
-               *filename = kasprintf(GFP_KERNEL, "%scs35l56%s-%02x-dsp1-misc-%s-%s.%s", dir,
-                                     cs35l56->base.secured ? "s" : "", cs35l56->base.rev,
+               *filename = kasprintf(GFP_KERNEL, "%s-%s-%s.%s", base_name,
                                      system_name, amp_name, filetype);
        else if (system_name)
-               *filename = kasprintf(GFP_KERNEL, "%scs35l56%s-%02x-dsp1-misc-%s.%s", dir,
-                                     cs35l56->base.secured ? "s" : "", cs35l56->base.rev,
+               *filename = kasprintf(GFP_KERNEL, "%s-%s.%s", base_name,
                                      system_name, filetype);
        else
-               *filename = kasprintf(GFP_KERNEL, "%scs35l56%s-%02x-dsp1-misc.%s", dir,
-                                     cs35l56->base.secured ? "s" : "", cs35l56->base.rev,
-                                     filetype);
+               *filename = kasprintf(GFP_KERNEL, "%s.%s", base_name, filetype);
 
        if (!*filename)
                return -ENOMEM;
@@ -435,8 +444,8 @@ static int cs35l56_hda_request_firmware_file(struct cs35l56_hda *cs35l56,
        return 0;
 }
 
-static const char cirrus_dir[] = "cirrus/";
 static void cs35l56_hda_request_firmware_files(struct cs35l56_hda *cs35l56,
+                                              unsigned int preloaded_fw_ver,
                                               const struct firmware **wmfw_firmware,
                                               char **wmfw_filename,
                                               const struct firmware **coeff_firmware,
@@ -444,55 +453,73 @@ static void cs35l56_hda_request_firmware_files(struct cs35l56_hda *cs35l56,
 {
        const char *system_name = cs35l56->system_name;
        const char *amp_name = cs35l56->amp_name;
+       char base_name[37];
        int ret;
 
+       if (preloaded_fw_ver) {
+               snprintf(base_name, sizeof(base_name),
+                        "cirrus/cs35l56-%02x%s-%06x-dsp1-misc",
+                        cs35l56->base.rev,
+                        cs35l56->base.secured ? "-s" : "",
+                        preloaded_fw_ver & 0xffffff);
+       } else {
+               snprintf(base_name, sizeof(base_name),
+                        "cirrus/cs35l56-%02x%s-dsp1-misc",
+                        cs35l56->base.rev,
+                        cs35l56->base.secured ? "-s" : "");
+       }
+
        if (system_name && amp_name) {
                if (!cs35l56_hda_request_firmware_file(cs35l56, wmfw_firmware, wmfw_filename,
-                                                      cirrus_dir, system_name, amp_name, "wmfw")) {
+                                                      base_name, system_name, amp_name, "wmfw")) {
                        cs35l56_hda_request_firmware_file(cs35l56, coeff_firmware, coeff_filename,
-                                                         cirrus_dir, system_name, amp_name, "bin");
+                                                         base_name, system_name, amp_name, "bin");
                        return;
                }
        }
 
        if (system_name) {
                if (!cs35l56_hda_request_firmware_file(cs35l56, wmfw_firmware, wmfw_filename,
-                                                      cirrus_dir, system_name, NULL, "wmfw")) {
+                                                      base_name, system_name, NULL, "wmfw")) {
                        if (amp_name)
                                cs35l56_hda_request_firmware_file(cs35l56,
                                                                  coeff_firmware, coeff_filename,
-                                                                 cirrus_dir, system_name,
+                                                                 base_name, system_name,
                                                                  amp_name, "bin");
                        if (!*coeff_firmware)
                                cs35l56_hda_request_firmware_file(cs35l56,
                                                                  coeff_firmware, coeff_filename,
-                                                                 cirrus_dir, system_name,
+                                                                 base_name, system_name,
                                                                  NULL, "bin");
                        return;
                }
+
+               /*
+                * Check for system-specific bin files without wmfw before
+                * falling back to generic firmware
+                */
+               if (amp_name)
+                       cs35l56_hda_request_firmware_file(cs35l56, coeff_firmware, coeff_filename,
+                                                         base_name, system_name, amp_name, "bin");
+               if (!*coeff_firmware)
+                       cs35l56_hda_request_firmware_file(cs35l56, coeff_firmware, coeff_filename,
+                                                         base_name, system_name, NULL, "bin");
+
+               if (*coeff_firmware)
+                       return;
        }
 
        ret = cs35l56_hda_request_firmware_file(cs35l56, wmfw_firmware, wmfw_filename,
-                                               cirrus_dir, NULL, NULL, "wmfw");
+                                               base_name, NULL, NULL, "wmfw");
        if (!ret) {
                cs35l56_hda_request_firmware_file(cs35l56, coeff_firmware, coeff_filename,
-                                                 cirrus_dir, NULL, NULL, "bin");
+                                                 base_name, NULL, NULL, "bin");
                return;
        }
 
-       /* When a firmware file is not found must still search for the coeff files */
-       if (system_name) {
-               if (amp_name)
-                       cs35l56_hda_request_firmware_file(cs35l56, coeff_firmware, coeff_filename,
-                                                         cirrus_dir, system_name, amp_name, "bin");
-               if (!*coeff_firmware)
-                       cs35l56_hda_request_firmware_file(cs35l56, coeff_firmware, coeff_filename,
-                                                         cirrus_dir, system_name, NULL, "bin");
-       }
-
        if (!*coeff_firmware)
                cs35l56_hda_request_firmware_file(cs35l56, coeff_firmware, coeff_filename,
-                                                 cirrus_dir, NULL, NULL, "bin");
+                                                 base_name, NULL, NULL, "bin");
 }
 
 static void cs35l56_hda_release_firmware_files(const struct firmware *wmfw_firmware,
@@ -526,7 +553,8 @@ static int cs35l56_hda_fw_load(struct cs35l56_hda *cs35l56)
        const struct firmware *wmfw_firmware = NULL;
        char *coeff_filename = NULL;
        char *wmfw_filename = NULL;
-       unsigned int firmware_missing;
+       unsigned int preloaded_fw_ver;
+       bool firmware_missing;
        int ret = 0;
 
        /* Prepare for a new DSP power-up */
@@ -537,24 +565,21 @@ static int cs35l56_hda_fw_load(struct cs35l56_hda *cs35l56)
 
        pm_runtime_get_sync(cs35l56->base.dev);
 
-       ret = regmap_read(cs35l56->base.regmap, CS35L56_PROTECTION_STATUS, &firmware_missing);
-       if (ret) {
-               dev_err(cs35l56->base.dev, "Failed to read PROTECTION_STATUS: %d\n", ret);
+       /*
+        * The firmware can only be upgraded if it is currently running
+        * from the built-in ROM. If not, the wmfw/bin must be for the
+        * version of firmware that is running on the chip.
+        */
+       ret = cs35l56_read_prot_status(&cs35l56->base, &firmware_missing, &preloaded_fw_ver);
+       if (ret)
                goto err_pm_put;
-       }
 
-       firmware_missing &= CS35L56_FIRMWARE_MISSING;
+       if (firmware_missing)
+               preloaded_fw_ver = 0;
 
-       /*
-        * Firmware can only be downloaded if the CS35L56 is secured or is
-        * running from the built-in ROM. If it is secured the BIOS will have
-        * downloaded firmware, and the wmfw/bin files will only contain
-        * tunings that are safe to download with the firmware running.
-        */
-       if (cs35l56->base.secured || firmware_missing) {
-               cs35l56_hda_request_firmware_files(cs35l56, &wmfw_firmware, &wmfw_filename,
-                                                  &coeff_firmware, &coeff_filename);
-       }
+       cs35l56_hda_request_firmware_files(cs35l56, preloaded_fw_ver,
+                                          &wmfw_firmware, &wmfw_filename,
+                                          &coeff_firmware, &coeff_filename);
 
        /*
         * If the BIOS didn't patch the firmware a bin file is mandatory to
@@ -569,12 +594,12 @@ static int cs35l56_hda_fw_load(struct cs35l56_hda *cs35l56)
        mutex_lock(&cs35l56->base.irq_lock);
 
        /*
-        * When the device is running in secure mode the firmware files can
-        * only contain insecure tunings and therefore we do not need to
-        * shutdown the firmware to apply them and can use the lower cost
-        * reinit sequence instead.
+        * If the firmware hasn't been patched it must be shutdown before
+        * doing a full patch and reset afterwards. If it is already
+        * running a patched version the firmware files only contain
+        * tunings and we can use the lower cost reinit sequence instead.
         */
-       if (!cs35l56->base.secured && (wmfw_firmware || coeff_firmware)) {
+       if (firmware_missing && (wmfw_firmware || coeff_firmware)) {
                ret = cs35l56_firmware_shutdown(&cs35l56->base);
                if (ret)
                        goto err;
@@ -593,7 +618,7 @@ static int cs35l56_hda_fw_load(struct cs35l56_hda *cs35l56)
        if (coeff_filename)
                dev_dbg(cs35l56->base.dev, "Loaded Coefficients: %s\n", coeff_filename);
 
-       if (cs35l56->base.secured) {
+       if (!firmware_missing) {
                ret = cs35l56_mbox_send(&cs35l56->base, CS35L56_MBOX_CMD_AUDIO_REINIT);
                if (ret)
                        goto err_powered_up;
@@ -976,6 +1001,9 @@ int cs35l56_hda_common_probe(struct cs35l56_hda *cs35l56, int id)
 
        regmap_multi_reg_write(cs35l56->base.regmap, cs35l56_hda_dai_config,
                               ARRAY_SIZE(cs35l56_hda_dai_config));
+       ret = cs35l56_force_sync_asp1_registers_from_cache(&cs35l56->base);
+       if (ret)
+               goto err;
 
        /*
         * By default only enable one ASP1TXn, where n=amplifier index,
@@ -1035,16 +1063,6 @@ const struct dev_pm_ops cs35l56_hda_pm_ops = {
 };
 EXPORT_SYMBOL_NS_GPL(cs35l56_hda_pm_ops, SND_HDA_SCODEC_CS35L56);
 
-#if IS_ENABLED(CONFIG_SND_HDA_SCODEC_CS35L56_KUNIT_TEST)
-/* Hooks to export static function to KUnit test */
-
-int cs35l56_hda_test_hook_get_speaker_id(struct device *dev, int amp_index, int num_amps)
-{
-       return cs35l56_hda_get_speaker_id(dev, amp_index, num_amps);
-}
-EXPORT_SYMBOL_NS_GPL(cs35l56_hda_test_hook_get_speaker_id, SND_HDA_SCODEC_CS35L56);
-#endif
-
 MODULE_DESCRIPTION("CS35L56 HDA Driver");
 MODULE_IMPORT_NS(SND_HDA_CIRRUS_SCODEC);
 MODULE_IMPORT_NS(SND_HDA_CS_DSP_CONTROLS);
index bf685d01259d30070aaf3ac7f3ed3204bc30c5bd..de2a3d08c73c1a7c49061bbe8a7fbb1a29664b8d 100644 (file)
@@ -3946,7 +3946,6 @@ static int create_mute_led_cdev(struct hda_codec *codec,
        cdev->max_brightness = 1;
        cdev->default_trigger = micmute ? "audio-micmute" : "audio-mute";
        cdev->brightness_set_blocking = callback;
-       cdev->brightness = ledtrig_audio_get(idx);
        cdev->flags = LED_CORE_SUSPENDRESUME;
 
        err = led_classdev_register(&codec->core.dev, cdev);
index 2276adc8447840a232eb493e5bac54af0cb35682..1b550c42db092739135e5917a74914894e254454 100644 (file)
@@ -1729,9 +1729,11 @@ static int default_bdl_pos_adj(struct azx *chip)
        /* some exceptions: Atoms seem problematic with value 1 */
        if (chip->pci->vendor == PCI_VENDOR_ID_INTEL) {
                switch (chip->pci->device) {
-               case 0x0f04: /* Baytrail */
-               case 0x2284: /* Braswell */
+               case PCI_DEVICE_ID_INTEL_HDA_BYT:
+               case PCI_DEVICE_ID_INTEL_HDA_BSW:
                        return 32;
+               case PCI_DEVICE_ID_INTEL_HDA_APL:
+                       return 64;
                }
        }
 
index 627899959ffe8c34c76211d51824e1d1e93d33a8..e41316e2e98338a5d69245ed9e9db1b979a2fdf2 100644 (file)
@@ -1371,6 +1371,7 @@ void dolphin_fixups(struct hda_codec *codec, const struct hda_fixup *fix, int ac
                spec->scodecs[CS8409_CODEC1] = &dolphin_cs42l42_1;
                spec->scodecs[CS8409_CODEC1]->codec = codec;
                spec->num_scodecs = 2;
+               spec->gen.suppress_vmaster = 1;
 
                codec->patch_ops = cs8409_dolphin_patch_ops;
 
index 200779296a1b8b31239ae7ec4b643be88d24fa2e..495d63101186fd519523aab4d7fd8b25547143af 100644 (file)
@@ -2301,6 +2301,7 @@ static int generic_hdmi_build_pcms(struct hda_codec *codec)
        codec_dbg(codec, "hdmi: pcm_num set to %d\n", pcm_num);
 
        for (idx = 0; idx < pcm_num; idx++) {
+               struct hdmi_spec_per_cvt *per_cvt;
                struct hda_pcm *info;
                struct hda_pcm_stream *pstr;
 
@@ -2316,6 +2317,11 @@ static int generic_hdmi_build_pcms(struct hda_codec *codec)
                pstr = &info->stream[SNDRV_PCM_STREAM_PLAYBACK];
                pstr->substreams = 1;
                pstr->ops = generic_ops;
+
+               per_cvt = get_cvt(spec, 0);
+               pstr->channels_min = per_cvt->channels_min;
+               pstr->channels_max = per_cvt->channels_max;
+
                /* pcm number is less than pcm_rec array size */
                if (spec->pcm_used >= ARRAY_SIZE(spec->pcm_rec))
                        break;
index b68c94757051057275953fe054fe5438be1f9c06..6994c4c5073cbb6fd5ad932186db35970c7cbd0f 100644 (file)
@@ -439,6 +439,10 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
                alc_update_coef_idx(codec, 0x67, 0xf000, 0x3000);
                fallthrough;
        case 0x10ec0215:
+       case 0x10ec0285:
+       case 0x10ec0289:
+               alc_update_coef_idx(codec, 0x36, 1<<13, 0);
+               fallthrough;
        case 0x10ec0230:
        case 0x10ec0233:
        case 0x10ec0235:
@@ -452,9 +456,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
        case 0x10ec0283:
        case 0x10ec0286:
        case 0x10ec0288:
-       case 0x10ec0285:
        case 0x10ec0298:
-       case 0x10ec0289:
        case 0x10ec0300:
                alc_update_coef_idx(codec, 0x10, 1<<9, 0);
                break;
@@ -9577,7 +9579,7 @@ static const struct hda_fixup alc269_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = cs35l41_fixup_i2c_two,
                .chained = true,
-               .chain_id = ALC269_FIXUP_THINKPAD_ACPI,
+               .chain_id = ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK,
        },
        [ALC287_FIXUP_TAS2781_I2C] = {
                .type = HDA_FIXUP_FUNC,
@@ -9604,6 +9606,8 @@ static const struct hda_fixup alc269_fixups[] = {
        [ALC287_FIXUP_THINKPAD_I2S_SPK] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc287_fixup_bind_dacs,
+               .chained = true,
+               .chain_id = ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK,
        },
        [ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD] = {
                .type = HDA_FIXUP_FUNC,
@@ -9653,6 +9657,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x1247, "Acer vCopperbox", ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS),
        SND_PCI_QUIRK(0x1025, 0x1248, "Acer Veriton N4660G", ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1025, 0x1269, "Acer SWIFT SF314-54", ALC256_FIXUP_ACER_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1025, 0x126a, "Acer Swift SF114-32", ALC256_FIXUP_ACER_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
@@ -9732,6 +9737,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0b71, "Dell Inspiron 16 Plus 7620", ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS),
        SND_PCI_QUIRK(0x1028, 0x0beb, "Dell XPS 15 9530 (2023)", ALC289_FIXUP_DELL_CS35L41_SPI_2),
        SND_PCI_QUIRK(0x1028, 0x0c03, "Dell Precision 5340", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x0c0d, "Dell Oasis", ALC289_FIXUP_RTK_AMP_DUAL_SPK),
        SND_PCI_QUIRK(0x1028, 0x0c19, "Dell Precision 3340", ALC236_FIXUP_DELL_DUAL_CODECS),
        SND_PCI_QUIRK(0x1028, 0x0c1a, "Dell Precision 3340", ALC236_FIXUP_DELL_DUAL_CODECS),
        SND_PCI_QUIRK(0x1028, 0x0c1b, "Dell Precision 3440", ALC236_FIXUP_DELL_DUAL_CODECS),
@@ -9852,6 +9858,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x8786, "HP OMEN 15", ALC285_FIXUP_HP_MUTE_LED),
        SND_PCI_QUIRK(0x103c, 0x8787, "HP OMEN 15", ALC285_FIXUP_HP_MUTE_LED),
        SND_PCI_QUIRK(0x103c, 0x8788, "HP OMEN 15", ALC285_FIXUP_HP_MUTE_LED),
+       SND_PCI_QUIRK(0x103c, 0x87b7, "HP Laptop 14-fq0xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
        SND_PCI_QUIRK(0x103c, 0x87c8, "HP", ALC287_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x87e5, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x87e7, "HP ProBook 450 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
@@ -9861,6 +9868,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x87f6, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
        SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
+       SND_PCI_QUIRK(0x103c, 0x87fe, "HP Laptop 15s-fq2xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
        SND_PCI_QUIRK(0x103c, 0x8805, "HP ProBook 650 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x880d, "HP EliteBook 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8811, "HP Spectre x360 15-eb1xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1),
@@ -9955,6 +9963,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x8c71, "HP EliteBook 845 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8c72, "HP EliteBook 865 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8c96, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+       SND_PCI_QUIRK(0x103c, 0x8c97, "HP ZBook", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+       SND_PCI_QUIRK(0x103c, 0x8ca1, "HP ZBook Power", ALC236_FIXUP_HP_GPIO_LED),
+       SND_PCI_QUIRK(0x103c, 0x8ca2, "HP ZBook Power", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8ca4, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8ca7, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8cf5, "HP ZBook Studio 16", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED),
@@ -10231,6 +10242,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x3176, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
        SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
        SND_PCI_QUIRK(0x17aa, 0x31af, "ThinkCentre Station", ALC623_FIXUP_LENOVO_THINKSTATION_P340),
+       SND_PCI_QUIRK(0x17aa, 0x334b, "Lenovo ThinkCentre M70 Gen5", ALC283_FIXUP_HEADSET_MIC),
        SND_PCI_QUIRK(0x17aa, 0x3801, "Lenovo Yoga9 14IAP7", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
        SND_PCI_QUIRK(0x17aa, 0x3802, "Lenovo Yoga DuetITL 2021", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
        SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS),
@@ -10319,6 +10331,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1d72, 0x1945, "Redmi G", ALC256_FIXUP_ASUS_HEADSET_MIC),
        SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
        SND_PCI_QUIRK(0x2782, 0x0232, "CHUWI CoreBook XPro", ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO),
+       SND_PCI_QUIRK(0x2782, 0x1707, "Vaio VJFE-ADL", ALC298_FIXUP_SPK_VOLUME),
        SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC),
        SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED),
        SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", ALC256_FIXUP_INTEL_NUC10),
index 46705ec77b4810ae0fd958749580937db8830ce3..eb3aca16359c58f6f5985ba44ef2173b3d5afc50 100644 (file)
@@ -718,7 +718,7 @@ static int ac97_fp_rec_volume_put(struct snd_kcontrol *ctl,
        oldreg = oxygen_read_ac97(chip, 1, AC97_REC_GAIN);
        newreg = oldreg & ~0x0707;
        newreg = newreg | (value->value.integer.value[0] & 7);
-       newreg = newreg | ((value->value.integer.value[0] & 7) << 8);
+       newreg = newreg | ((value->value.integer.value[1] & 7) << 8);
        change = newreg != oldreg;
        if (change)
                oxygen_write_ac97(chip, 1, AC97_REC_GAIN, newreg);
index c90ec3419247797a0628bfa5207eaf3afcb8d012..504d1b8c4cbb4f104a8b8e70adf10894f211ce6c 100644 (file)
@@ -505,6 +505,13 @@ static int acp_card_rt5682s_hw_params(struct snd_pcm_substream *substream,
 
        clk_set_rate(drvdata->wclk, srate);
        clk_set_rate(drvdata->bclk, srate * ch * format);
+       if (!drvdata->soc_mclk) {
+               ret = acp_clk_enable(drvdata, srate, ch * format);
+               if (ret < 0) {
+                       dev_err(rtd->card->dev, "Failed to enable HS clk: %d\n", ret);
+                       return ret;
+               }
+       }
 
        return 0;
 }
@@ -1464,8 +1471,13 @@ int acp_sofdsp_dai_links_create(struct snd_soc_card *card)
        if (drv_data->amp_cpu_id == I2S_SP) {
                links[i].name = "acp-amp-codec";
                links[i].id = AMP_BE_ID;
-               links[i].cpus = sof_sp_virtual;
-               links[i].num_cpus = ARRAY_SIZE(sof_sp_virtual);
+               if (drv_data->platform == RENOIR) {
+                       links[i].cpus = sof_sp;
+                       links[i].num_cpus = ARRAY_SIZE(sof_sp);
+               } else {
+                       links[i].cpus = sof_sp_virtual;
+                       links[i].num_cpus = ARRAY_SIZE(sof_sp_virtual);
+               }
                links[i].platforms = sof_component;
                links[i].num_platforms = ARRAY_SIZE(sof_component);
                links[i].dpcm_playback = 1;
index 2a9fd3275e42f5fa1086d10baf4a8cc4cc2b69b1..20b94814a0462147258fe94cd4219b772afa45f0 100644 (file)
@@ -48,6 +48,7 @@ static struct acp_card_drvdata sof_rt5682s_rt1019_data = {
        .hs_codec_id = RT5682S,
        .amp_codec_id = RT1019,
        .dmic_codec_id = DMIC,
+       .platform = RENOIR,
        .tdm_mode = false,
 };
 
@@ -58,6 +59,7 @@ static struct acp_card_drvdata sof_rt5682s_max_data = {
        .hs_codec_id = RT5682S,
        .amp_codec_id = MAX98360A,
        .dmic_codec_id = DMIC,
+       .platform = RENOIR,
        .tdm_mode = false,
 };
 
@@ -68,6 +70,7 @@ static struct acp_card_drvdata sof_nau8825_data = {
        .hs_codec_id = NAU8825,
        .amp_codec_id = MAX98360A,
        .dmic_codec_id = DMIC,
+       .platform = REMBRANDT,
        .soc_mclk = true,
        .tdm_mode = false,
 };
@@ -79,6 +82,7 @@ static struct acp_card_drvdata sof_rt5682s_hs_rt1019_data = {
        .hs_codec_id = RT5682S,
        .amp_codec_id = RT1019,
        .dmic_codec_id = DMIC,
+       .platform = REMBRANDT,
        .soc_mclk = true,
        .tdm_mode = false,
 };
index f85b85ea4be9c28cf6ba1f29b4c7290908bc842d..2b0aa270a3e9d75c8ebd47aaf12e6fc0b73c75b3 100644 (file)
@@ -354,6 +354,14 @@ static const struct dmi_system_id acp3x_es83xx_dmi_table[] = {
                },
                .driver_data = (void *)(ES83XX_ENABLE_DMIC|ES83XX_48_MHZ_MCLK),
        },
+       {
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "HUAWEI"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "HVY-WXX9"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "M1010"),
+               },
+               .driver_data = (void *)(ES83XX_ENABLE_DMIC),
+       },
        {
                .matches = {
                        DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "HUAWEI"),
index d83cb6e4c62aecc6e54a700e5d22f136253e42fb..23d44a50d8157212aa5bbaa49ecd1e9acbe9aa00 100644 (file)
@@ -297,6 +297,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Bravo 15 B7ED"),
                }
        },
+       {
+               .driver_data = &acp6x_card,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "Micro-Star International Co., Ltd."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Bravo 15 C7VF"),
+               }
+       },
        {
                .driver_data = &acp6x_card,
                .matches = {
index 953ba066bab1e30dfc22ea94bead73ce2f91c0fe..02fba4bc0a14f4cc44690ffed69bd5217409b6e9 100644 (file)
@@ -5,6 +5,7 @@
 // Copyright (C) 2023 Cirrus Logic, Inc. and
 //                    Cirrus Logic International Semiconductor Ltd.
 
+#include <linux/gpio/consumer.h>
 #include <linux/regmap.h>
 #include <linux/regulator/consumer.h>
 #include <linux/types.h>
 #include "cs35l56.h"
 
 static const struct reg_sequence cs35l56_patch[] = {
+       /*
+        * Firmware can change these to non-defaults to satisfy SDCA.
+        * Ensure that they are at known defaults.
+        */
+       { CS35L56_SWIRE_DP3_CH1_INPUT,          0x00000018 },
+       { CS35L56_SWIRE_DP3_CH2_INPUT,          0x00000019 },
+       { CS35L56_SWIRE_DP3_CH3_INPUT,          0x00000029 },
+       { CS35L56_SWIRE_DP3_CH4_INPUT,          0x00000028 },
+
        /* These are not reset by a soft-reset, so patch to defaults. */
        { CS35L56_MAIN_RENDER_USER_MUTE,        0x00000000 },
        { CS35L56_MAIN_RENDER_USER_VOLUME,      0x00000000 },
@@ -34,10 +44,9 @@ static const struct reg_default cs35l56_reg_defaults[] = {
        { CS35L56_ASP1_FRAME_CONTROL5,          0x00020100 },
        { CS35L56_ASP1_DATA_CONTROL1,           0x00000018 },
        { CS35L56_ASP1_DATA_CONTROL5,           0x00000018 },
-       { CS35L56_ASP1TX1_INPUT,                0x00000018 },
-       { CS35L56_ASP1TX2_INPUT,                0x00000019 },
-       { CS35L56_ASP1TX3_INPUT,                0x00000020 },
-       { CS35L56_ASP1TX4_INPUT,                0x00000028 },
+
+       /* no defaults for ASP1TX mixer */
+
        { CS35L56_SWIRE_DP3_CH1_INPUT,          0x00000018 },
        { CS35L56_SWIRE_DP3_CH2_INPUT,          0x00000019 },
        { CS35L56_SWIRE_DP3_CH3_INPUT,          0x00000029 },
@@ -195,6 +204,47 @@ static bool cs35l56_volatile_reg(struct device *dev, unsigned int reg)
        }
 }
 
+/*
+ * The firmware boot sequence can overwrite the ASP1 config registers so that
+ * they don't match regmap's view of their values. Rewrite the values from the
+ * regmap cache into the hardware registers.
+ */
+int cs35l56_force_sync_asp1_registers_from_cache(struct cs35l56_base *cs35l56_base)
+{
+       struct reg_sequence asp1_regs[] = {
+               { .reg = CS35L56_ASP1_ENABLES1 },
+               { .reg = CS35L56_ASP1_CONTROL1 },
+               { .reg = CS35L56_ASP1_CONTROL2 },
+               { .reg = CS35L56_ASP1_CONTROL3 },
+               { .reg = CS35L56_ASP1_FRAME_CONTROL1 },
+               { .reg = CS35L56_ASP1_FRAME_CONTROL5 },
+               { .reg = CS35L56_ASP1_DATA_CONTROL1 },
+               { .reg = CS35L56_ASP1_DATA_CONTROL5 },
+       };
+       int i, ret;
+
+       /* Read values from regmap cache into a write sequence */
+       for (i = 0; i < ARRAY_SIZE(asp1_regs); ++i) {
+               ret = regmap_read(cs35l56_base->regmap, asp1_regs[i].reg, &asp1_regs[i].def);
+               if (ret)
+                       goto err;
+       }
+
+       /* Write the values cache-bypassed so that they will be written to silicon */
+       ret = regmap_multi_reg_write_bypassed(cs35l56_base->regmap, asp1_regs,
+                                             ARRAY_SIZE(asp1_regs));
+       if (ret)
+               goto err;
+
+       return 0;
+
+err:
+       dev_err(cs35l56_base->dev, "Failed to sync ASP1 registers: %d\n", ret);
+
+       return ret;
+}
+EXPORT_SYMBOL_NS_GPL(cs35l56_force_sync_asp1_registers_from_cache, SND_SOC_CS35L56_SHARED);
+
 int cs35l56_mbox_send(struct cs35l56_base *cs35l56_base, unsigned int command)
 {
        unsigned int val;
@@ -400,17 +450,6 @@ int cs35l56_is_fw_reload_needed(struct cs35l56_base *cs35l56_base)
        unsigned int val;
        int ret;
 
-       /* Nothing to re-patch if we haven't done any patching yet. */
-       if (!cs35l56_base->fw_patched)
-               return false;
-
-       /*
-        * If we have control of RESET we will have asserted it so the firmware
-        * will need re-patching.
-        */
-       if (cs35l56_base->reset_gpio)
-               return true;
-
        /*
         * In secure mode FIRMWARE_MISSING is cleared by the BIOS loader so
         * can't be used here to test for memory retention.
@@ -590,10 +629,35 @@ void cs35l56_init_cs_dsp(struct cs35l56_base *cs35l56_base, struct cs_dsp *cs_ds
 }
 EXPORT_SYMBOL_NS_GPL(cs35l56_init_cs_dsp, SND_SOC_CS35L56_SHARED);
 
+int cs35l56_read_prot_status(struct cs35l56_base *cs35l56_base,
+                            bool *fw_missing, unsigned int *fw_version)
+{
+       unsigned int prot_status;
+       int ret;
+
+       ret = regmap_read(cs35l56_base->regmap, CS35L56_PROTECTION_STATUS, &prot_status);
+       if (ret) {
+               dev_err(cs35l56_base->dev, "Get PROTECTION_STATUS failed: %d\n", ret);
+               return ret;
+       }
+
+       *fw_missing = !!(prot_status & CS35L56_FIRMWARE_MISSING);
+
+       ret = regmap_read(cs35l56_base->regmap, CS35L56_DSP1_FW_VER, fw_version);
+       if (ret) {
+               dev_err(cs35l56_base->dev, "Get FW VER failed: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cs35l56_read_prot_status, SND_SOC_CS35L56_SHARED);
+
 int cs35l56_hw_init(struct cs35l56_base *cs35l56_base)
 {
        int ret;
-       unsigned int devid, revid, otpid, secured;
+       unsigned int devid, revid, otpid, secured, fw_ver;
+       bool fw_missing;
 
        /*
         * When the system is not using a reset_gpio ensure the device is
@@ -652,8 +716,13 @@ int cs35l56_hw_init(struct cs35l56_base *cs35l56_base)
                return ret;
        }
 
-       dev_info(cs35l56_base->dev, "Cirrus Logic CS35L56%s Rev %02X OTP%d\n",
-                cs35l56_base->secured ? "s" : "", cs35l56_base->rev, otpid);
+       ret = cs35l56_read_prot_status(cs35l56_base, &fw_missing, &fw_ver);
+       if (ret)
+               return ret;
+
+       dev_info(cs35l56_base->dev, "Cirrus Logic CS35L56%s Rev %02X OTP%d fw:%d.%d.%d (patched=%u)\n",
+                cs35l56_base->secured ? "s" : "", cs35l56_base->rev, otpid,
+                fw_ver >> 16, (fw_ver >> 8) & 0xff, fw_ver & 0xff, !fw_missing);
 
        /* Wake source and *_BLOCKED interrupts default to unmasked, so mask them */
        regmap_write(cs35l56_base->regmap, CS35L56_IRQ1_MASK_20, 0xffffffff);
@@ -668,6 +737,41 @@ int cs35l56_hw_init(struct cs35l56_base *cs35l56_base)
 }
 EXPORT_SYMBOL_NS_GPL(cs35l56_hw_init, SND_SOC_CS35L56_SHARED);
 
+int cs35l56_get_speaker_id(struct cs35l56_base *cs35l56_base)
+{
+       struct gpio_descs *descs;
+       int speaker_id;
+       int i, ret;
+
+       /* Read the speaker type qualifier from the motherboard GPIOs */
+       descs = gpiod_get_array_optional(cs35l56_base->dev, "spk-id", GPIOD_IN);
+       if (!descs) {
+               return -ENOENT;
+       } else if (IS_ERR(descs)) {
+               ret = PTR_ERR(descs);
+               return dev_err_probe(cs35l56_base->dev, ret, "Failed to get spk-id-gpios\n");
+       }
+
+       speaker_id = 0;
+       for (i = 0; i < descs->ndescs; i++) {
+               ret = gpiod_get_value_cansleep(descs->desc[i]);
+               if (ret < 0) {
+                       dev_err_probe(cs35l56_base->dev, ret, "Failed to read spk-id[%d]\n", i);
+                       goto err;
+               }
+
+               speaker_id |= (ret << i);
+       }
+
+       dev_dbg(cs35l56_base->dev, "Speaker ID = %d\n", speaker_id);
+       ret = speaker_id;
+err:
+       gpiod_put_array(descs);
+
+       return ret;
+}
+EXPORT_SYMBOL_NS_GPL(cs35l56_get_speaker_id, SND_SOC_CS35L56_SHARED);
+
 static const u32 cs35l56_bclk_valid_for_pll_freq_table[] = {
        [0x0C] = 128000,
        [0x0F] = 256000,
index 45b4de3eff94ffef7ba6ef8ffcccf11747f74415..c23e29da4cfb9f43fa52611fdeea96adad4a0b30 100644 (file)
@@ -59,6 +59,135 @@ static int cs35l56_dspwait_put_volsw(struct snd_kcontrol *kcontrol,
        return snd_soc_put_volsw(kcontrol, ucontrol);
 }
 
+static const unsigned short cs35l56_asp1_mixer_regs[] = {
+       CS35L56_ASP1TX1_INPUT, CS35L56_ASP1TX2_INPUT,
+       CS35L56_ASP1TX3_INPUT, CS35L56_ASP1TX4_INPUT,
+};
+
+static const char * const cs35l56_asp1_mux_control_names[] = {
+       "ASP1 TX1 Source", "ASP1 TX2 Source", "ASP1 TX3 Source", "ASP1 TX4 Source"
+};
+
+static int cs35l56_dspwait_asp1tx_get(struct snd_kcontrol *kcontrol,
+                                     struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *component = snd_soc_dapm_kcontrol_component(kcontrol);
+       struct cs35l56_private *cs35l56 = snd_soc_component_get_drvdata(component);
+       struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+       int index = e->shift_l;
+       unsigned int addr, val;
+       int ret;
+
+       /* Wait for mux to be initialized */
+       cs35l56_wait_dsp_ready(cs35l56);
+       flush_work(&cs35l56->mux_init_work);
+
+       addr = cs35l56_asp1_mixer_regs[index];
+       ret = regmap_read(cs35l56->base.regmap, addr, &val);
+       if (ret)
+               return ret;
+
+       val &= CS35L56_ASP_TXn_SRC_MASK;
+       ucontrol->value.enumerated.item[0] = snd_soc_enum_val_to_item(e, val);
+
+       return 0;
+}
+
+static int cs35l56_dspwait_asp1tx_put(struct snd_kcontrol *kcontrol,
+                                     struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *component = snd_soc_dapm_kcontrol_component(kcontrol);
+       struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol);
+       struct cs35l56_private *cs35l56 = snd_soc_component_get_drvdata(component);
+       struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+       int item = ucontrol->value.enumerated.item[0];
+       int index = e->shift_l;
+       unsigned int addr, val;
+       bool changed;
+       int ret;
+
+       /* Wait for mux to be initialized */
+       cs35l56_wait_dsp_ready(cs35l56);
+       flush_work(&cs35l56->mux_init_work);
+
+       addr = cs35l56_asp1_mixer_regs[index];
+       val = snd_soc_enum_item_to_val(e, item);
+
+       ret = regmap_update_bits_check(cs35l56->base.regmap, addr,
+                                      CS35L56_ASP_TXn_SRC_MASK, val, &changed);
+       if (!ret)
+               return ret;
+
+       if (changed)
+               snd_soc_dapm_mux_update_power(dapm, kcontrol, item, e, NULL);
+
+       return changed;
+}
+
+static void cs35l56_mark_asp1_mixer_widgets_dirty(struct cs35l56_private *cs35l56)
+{
+       struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(cs35l56->component);
+       const char *prefix = cs35l56->component->name_prefix;
+       char full_name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+       const char *name;
+       struct snd_kcontrol *kcontrol;
+       struct soc_enum *e;
+       unsigned int val[4];
+       int i, item, ret;
+
+       /*
+        * Resume so we can read the registers from silicon if the regmap
+        * cache has not yet been populated.
+        */
+       ret = pm_runtime_resume_and_get(cs35l56->base.dev);
+       if (ret < 0)
+               return;
+
+       ret = regmap_bulk_read(cs35l56->base.regmap, CS35L56_ASP1TX1_INPUT,
+                              val, ARRAY_SIZE(val));
+
+       pm_runtime_mark_last_busy(cs35l56->base.dev);
+       pm_runtime_put_autosuspend(cs35l56->base.dev);
+
+       if (ret) {
+               dev_err(cs35l56->base.dev, "Failed to read ASP1 mixer regs: %d\n", ret);
+               return;
+       }
+
+       snd_soc_card_mutex_lock(dapm->card);
+       WARN_ON(!dapm->card->instantiated);
+
+       for (i = 0; i < ARRAY_SIZE(cs35l56_asp1_mux_control_names); ++i) {
+               name = cs35l56_asp1_mux_control_names[i];
+
+               if (prefix) {
+                       snprintf(full_name, sizeof(full_name), "%s %s", prefix, name);
+                       name = full_name;
+               }
+
+               kcontrol = snd_soc_card_get_kcontrol(dapm->card, name);
+               if (!kcontrol) {
+                       dev_warn(cs35l56->base.dev, "Could not find control %s\n", name);
+                       continue;
+               }
+
+               e = (struct soc_enum *)kcontrol->private_value;
+               item = snd_soc_enum_val_to_item(e, val[i] & CS35L56_ASP_TXn_SRC_MASK);
+               snd_soc_dapm_mux_update_power(dapm, kcontrol, item, e, NULL);
+       }
+
+       snd_soc_card_mutex_unlock(dapm->card);
+}
+
+static void cs35l56_mux_init_work(struct work_struct *work)
+{
+       struct cs35l56_private *cs35l56 = container_of(work,
+                                                      struct cs35l56_private,
+                                                      mux_init_work);
+
+       cs35l56_mark_asp1_mixer_widgets_dirty(cs35l56);
+}
+
 static DECLARE_TLV_DB_SCALE(vol_tlv, -10000, 25, 0);
 
 static const struct snd_kcontrol_new cs35l56_controls[] = {
@@ -77,40 +206,44 @@ static const struct snd_kcontrol_new cs35l56_controls[] = {
 };
 
 static SOC_VALUE_ENUM_SINGLE_DECL(cs35l56_asp1tx1_enum,
-                                 CS35L56_ASP1TX1_INPUT,
-                                 0, CS35L56_ASP_TXn_SRC_MASK,
+                                 SND_SOC_NOPM,
+                                 0, 0,
                                  cs35l56_tx_input_texts,
                                  cs35l56_tx_input_values);
 
 static const struct snd_kcontrol_new asp1_tx1_mux =
-       SOC_DAPM_ENUM("ASP1TX1 SRC", cs35l56_asp1tx1_enum);
+       SOC_DAPM_ENUM_EXT("ASP1TX1 SRC", cs35l56_asp1tx1_enum,
+                         cs35l56_dspwait_asp1tx_get, cs35l56_dspwait_asp1tx_put);
 
 static SOC_VALUE_ENUM_SINGLE_DECL(cs35l56_asp1tx2_enum,
-                                 CS35L56_ASP1TX2_INPUT,
-                                 0, CS35L56_ASP_TXn_SRC_MASK,
+                                 SND_SOC_NOPM,
+                                 1, 0,
                                  cs35l56_tx_input_texts,
                                  cs35l56_tx_input_values);
 
 static const struct snd_kcontrol_new asp1_tx2_mux =
-       SOC_DAPM_ENUM("ASP1TX2 SRC", cs35l56_asp1tx2_enum);
+       SOC_DAPM_ENUM_EXT("ASP1TX2 SRC", cs35l56_asp1tx2_enum,
+                         cs35l56_dspwait_asp1tx_get, cs35l56_dspwait_asp1tx_put);
 
 static SOC_VALUE_ENUM_SINGLE_DECL(cs35l56_asp1tx3_enum,
-                                 CS35L56_ASP1TX3_INPUT,
-                                 0, CS35L56_ASP_TXn_SRC_MASK,
+                                 SND_SOC_NOPM,
+                                 2, 0,
                                  cs35l56_tx_input_texts,
                                  cs35l56_tx_input_values);
 
 static const struct snd_kcontrol_new asp1_tx3_mux =
-       SOC_DAPM_ENUM("ASP1TX3 SRC", cs35l56_asp1tx3_enum);
+       SOC_DAPM_ENUM_EXT("ASP1TX3 SRC", cs35l56_asp1tx3_enum,
+                         cs35l56_dspwait_asp1tx_get, cs35l56_dspwait_asp1tx_put);
 
 static SOC_VALUE_ENUM_SINGLE_DECL(cs35l56_asp1tx4_enum,
-                                 CS35L56_ASP1TX4_INPUT,
-                                 0, CS35L56_ASP_TXn_SRC_MASK,
+                                 SND_SOC_NOPM,
+                                 3, 0,
                                  cs35l56_tx_input_texts,
                                  cs35l56_tx_input_values);
 
 static const struct snd_kcontrol_new asp1_tx4_mux =
-       SOC_DAPM_ENUM("ASP1TX4 SRC", cs35l56_asp1tx4_enum);
+       SOC_DAPM_ENUM_EXT("ASP1TX4 SRC", cs35l56_asp1tx4_enum,
+                         cs35l56_dspwait_asp1tx_get, cs35l56_dspwait_asp1tx_put);
 
 static SOC_VALUE_ENUM_SINGLE_DECL(cs35l56_sdw1tx1_enum,
                                CS35L56_SWIRE_DP3_CH1_INPUT,
@@ -148,6 +281,21 @@ static SOC_VALUE_ENUM_SINGLE_DECL(cs35l56_sdw1tx4_enum,
 static const struct snd_kcontrol_new sdw1_tx4_mux =
        SOC_DAPM_ENUM("SDW1TX4 SRC", cs35l56_sdw1tx4_enum);
 
+static int cs35l56_asp1_cfg_event(struct snd_soc_dapm_widget *w,
+                                 struct snd_kcontrol *kcontrol, int event)
+{
+       struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+       struct cs35l56_private *cs35l56 = snd_soc_component_get_drvdata(component);
+
+       switch (event) {
+       case SND_SOC_DAPM_PRE_PMU:
+               /* Override register values set by firmware boot */
+               return cs35l56_force_sync_asp1_registers_from_cache(&cs35l56->base);
+       default:
+               return 0;
+       }
+}
+
 static int cs35l56_play_event(struct snd_soc_dapm_widget *w,
                              struct snd_kcontrol *kcontrol, int event)
 {
@@ -184,6 +332,9 @@ static const struct snd_soc_dapm_widget cs35l56_dapm_widgets[] = {
        SND_SOC_DAPM_REGULATOR_SUPPLY("VDD_B", 0, 0),
        SND_SOC_DAPM_REGULATOR_SUPPLY("VDD_AMP", 0, 0),
 
+       SND_SOC_DAPM_SUPPLY("ASP1 CFG", SND_SOC_NOPM, 0, 0, cs35l56_asp1_cfg_event,
+                           SND_SOC_DAPM_PRE_PMU),
+
        SND_SOC_DAPM_SUPPLY("PLAY", SND_SOC_NOPM, 0, 0, cs35l56_play_event,
                            SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
 
@@ -251,6 +402,9 @@ static const struct snd_soc_dapm_route cs35l56_audio_map[] = {
        { "AMP", NULL, "VDD_B" },
        { "AMP", NULL, "VDD_AMP" },
 
+       { "ASP1 Playback", NULL, "ASP1 CFG" },
+       { "ASP1 Capture", NULL, "ASP1 CFG" },
+
        { "ASP1 Playback", NULL, "PLAY" },
        { "SDW1 Playback", NULL, "PLAY" },
 
@@ -650,7 +804,7 @@ static struct snd_soc_dai_driver cs35l56_dai[] = {
        }
 };
 
-static void cs35l56_secure_patch(struct cs35l56_private *cs35l56)
+static void cs35l56_reinit_patch(struct cs35l56_private *cs35l56)
 {
        int ret;
 
@@ -662,19 +816,10 @@ static void cs35l56_secure_patch(struct cs35l56_private *cs35l56)
                cs35l56_mbox_send(&cs35l56->base, CS35L56_MBOX_CMD_AUDIO_REINIT);
 }
 
-static void cs35l56_patch(struct cs35l56_private *cs35l56)
+static void cs35l56_patch(struct cs35l56_private *cs35l56, bool firmware_missing)
 {
-       unsigned int firmware_missing;
        int ret;
 
-       ret = regmap_read(cs35l56->base.regmap, CS35L56_PROTECTION_STATUS, &firmware_missing);
-       if (ret) {
-               dev_err(cs35l56->base.dev, "Failed to read PROTECTION_STATUS: %d\n", ret);
-               return;
-       }
-
-       firmware_missing &= CS35L56_FIRMWARE_MISSING;
-
        /*
         * Disable SoundWire interrupts to prevent race with IRQ work.
         * Setting sdw_irq_no_unmask prevents the handler re-enabling
@@ -747,23 +892,59 @@ static void cs35l56_dsp_work(struct work_struct *work)
        struct cs35l56_private *cs35l56 = container_of(work,
                                                       struct cs35l56_private,
                                                       dsp_work);
+       unsigned int firmware_version;
+       bool firmware_missing;
+       int ret;
 
        if (!cs35l56->base.init_done)
                return;
 
        pm_runtime_get_sync(cs35l56->base.dev);
 
+       ret = cs35l56_read_prot_status(&cs35l56->base, &firmware_missing, &firmware_version);
+       if (ret)
+               goto err;
+
+       /* Populate fw file qualifier with the revision and security state */
+       kfree(cs35l56->dsp.fwf_name);
+       if (firmware_missing) {
+               cs35l56->dsp.fwf_name = kasprintf(GFP_KERNEL, "%02x-dsp1", cs35l56->base.rev);
+       } else {
+               /* Firmware files must match the running firmware version */
+               cs35l56->dsp.fwf_name = kasprintf(GFP_KERNEL,
+                                                 "%02x%s-%06x-dsp1",
+                                                 cs35l56->base.rev,
+                                                 cs35l56->base.secured ? "-s" : "",
+                                                 firmware_version);
+       }
+
+       if (!cs35l56->dsp.fwf_name)
+               goto err;
+
+       dev_dbg(cs35l56->base.dev, "DSP fwf name: '%s' system name: '%s'\n",
+               cs35l56->dsp.fwf_name, cs35l56->dsp.system_name);
+
        /*
-        * When the device is running in secure mode the firmware files can
-        * only contain insecure tunings and therefore we do not need to
-        * shutdown the firmware to apply them and can use the lower cost
-        * reinit sequence instead.
+        * The firmware cannot be patched if it is already running from
+        * patch RAM. In this case the firmware files are versioned to
+        * match the running firmware version and will only contain
+        * tunings. We do not need to shutdown the firmware to apply
+        * tunings so can use the lower cost reinit sequence instead.
         */
-       if (cs35l56->base.secured)
-               cs35l56_secure_patch(cs35l56);
+       if (!firmware_missing)
+               cs35l56_reinit_patch(cs35l56);
        else
-               cs35l56_patch(cs35l56);
+               cs35l56_patch(cs35l56, firmware_missing);
 
+
+       /*
+        * Set starting value of ASP1 mux widgets. Updating a mux takes
+        * the DAPM mutex. Post this to a separate job so that DAPM
+        * power-up can wait for dsp_work to complete without deadlocking
+        * on the DAPM mutex.
+        */
+       queue_work(cs35l56->dsp_wq, &cs35l56->mux_init_work);
+err:
        pm_runtime_mark_last_busy(cs35l56->base.dev);
        pm_runtime_put_autosuspend(cs35l56->base.dev);
 }
@@ -778,10 +959,19 @@ static int cs35l56_component_probe(struct snd_soc_component *component)
 
        if (!cs35l56->dsp.system_name &&
            (snd_soc_card_get_pci_ssid(component->card, &vendor, &device) == 0)) {
-               cs35l56->dsp.system_name = devm_kasprintf(cs35l56->base.dev,
-                                                         GFP_KERNEL,
-                                                         "%04x%04x",
-                                                         vendor, device);
+               /* Append a speaker qualifier if there is a speaker ID */
+               if (cs35l56->speaker_id >= 0) {
+                       cs35l56->dsp.system_name = devm_kasprintf(cs35l56->base.dev,
+                                                                 GFP_KERNEL,
+                                                                 "%04x%04x-spkid%d",
+                                                                 vendor, device,
+                                                                 cs35l56->speaker_id);
+               } else {
+                       cs35l56->dsp.system_name = devm_kasprintf(cs35l56->base.dev,
+                                                                 GFP_KERNEL,
+                                                                 "%04x%04x",
+                                                                 vendor, device);
+               }
                if (!cs35l56->dsp.system_name)
                        return -ENOMEM;
        }
@@ -809,6 +999,17 @@ static void cs35l56_component_remove(struct snd_soc_component *component)
        struct cs35l56_private *cs35l56 = snd_soc_component_get_drvdata(component);
 
        cancel_work_sync(&cs35l56->dsp_work);
+       cancel_work_sync(&cs35l56->mux_init_work);
+
+       if (cs35l56->dsp.cs_dsp.booted)
+               wm_adsp_power_down(&cs35l56->dsp);
+
+       wm_adsp2_component_remove(&cs35l56->dsp, component);
+
+       kfree(cs35l56->dsp.fwf_name);
+       cs35l56->dsp.fwf_name = NULL;
+
+       cs35l56->component = NULL;
 }
 
 static int cs35l56_set_bias_level(struct snd_soc_component *component,
@@ -869,8 +1070,10 @@ int cs35l56_system_suspend(struct device *dev)
 
        dev_dbg(dev, "system_suspend\n");
 
-       if (cs35l56->component)
+       if (cs35l56->component) {
                flush_work(&cs35l56->dsp_work);
+               cancel_work_sync(&cs35l56->mux_init_work);
+       }
 
        /*
         * The interrupt line is normally shared, but after we start suspending
@@ -1021,6 +1224,7 @@ static int cs35l56_dsp_init(struct cs35l56_private *cs35l56)
                return -ENOMEM;
 
        INIT_WORK(&cs35l56->dsp_work, cs35l56_dsp_work);
+       INIT_WORK(&cs35l56->mux_init_work, cs35l56_mux_init_work);
 
        dsp = &cs35l56->dsp;
        cs35l56_init_cs_dsp(&cs35l56->base, &dsp->cs_dsp);
@@ -1050,7 +1254,13 @@ static int cs35l56_get_firmware_uid(struct cs35l56_private *cs35l56)
        if (ret < 0)
                return 0;
 
-       cs35l56->dsp.system_name = devm_kstrdup(dev, prop, GFP_KERNEL);
+       /* Append a speaker qualifier if there is a speaker ID */
+       if (cs35l56->speaker_id >= 0)
+               cs35l56->dsp.system_name = devm_kasprintf(dev, GFP_KERNEL, "%s-spkid%d",
+                                                         prop, cs35l56->speaker_id);
+       else
+               cs35l56->dsp.system_name = devm_kstrdup(dev, prop, GFP_KERNEL);
+
        if (cs35l56->dsp.system_name == NULL)
                return -ENOMEM;
 
@@ -1065,6 +1275,7 @@ int cs35l56_common_probe(struct cs35l56_private *cs35l56)
 
        init_completion(&cs35l56->init_completion);
        mutex_init(&cs35l56->base.irq_lock);
+       cs35l56->speaker_id = -ENOENT;
 
        dev_set_drvdata(cs35l56->base.dev, cs35l56);
 
@@ -1101,6 +1312,12 @@ int cs35l56_common_probe(struct cs35l56_private *cs35l56)
                gpiod_set_value_cansleep(cs35l56->base.reset_gpio, 1);
        }
 
+       ret = cs35l56_get_speaker_id(&cs35l56->base);
+       if ((ret < 0) && (ret != -ENOENT))
+               goto err;
+
+       cs35l56->speaker_id = ret;
+
        ret = cs35l56_get_firmware_uid(cs35l56);
        if (ret != 0)
                goto err;
@@ -1152,11 +1369,9 @@ int cs35l56_init(struct cs35l56_private *cs35l56)
        if (ret < 0)
                return ret;
 
-       /* Populate the DSP information with the revision and security state */
-       cs35l56->dsp.part = devm_kasprintf(cs35l56->base.dev, GFP_KERNEL, "cs35l56%s-%02x",
-                                          cs35l56->base.secured ? "s" : "", cs35l56->base.rev);
-       if (!cs35l56->dsp.part)
-               return -ENOMEM;
+       ret = cs35l56_set_patch(&cs35l56->base);
+       if (ret)
+               return ret;
 
        if (!cs35l56->base.reset_gpio) {
                dev_dbg(cs35l56->base.dev, "No reset gpio: using soft reset\n");
@@ -1190,10 +1405,6 @@ post_soft_reset:
        if (ret)
                return ret;
 
-       ret = cs35l56_set_patch(&cs35l56->base);
-       if (ret)
-               return ret;
-
        /* Registers could be dirty after soft reset or SoundWire enumeration */
        regcache_sync(cs35l56->base.regmap);
 
index 8159c3e217d936c02baf88c5659a99e4f3159ddd..596b141e3f9612389cae4c6b80eab20f4f90fd69 100644 (file)
@@ -34,6 +34,7 @@ struct cs35l56_private {
        struct wm_adsp dsp; /* must be first member */
        struct cs35l56_base base;
        struct work_struct dsp_work;
+       struct work_struct mux_init_work;
        struct workqueue_struct *dsp_wq;
        struct snd_soc_component *component;
        struct regulator_bulk_data supplies[CS35L56_NUM_BULK_SUPPLIES];
@@ -44,6 +45,7 @@ struct cs35l56_private {
        bool sdw_attached;
        struct completion init_completion;
 
+       int speaker_id;
        u32 rx_mask;
        u32 tx_mask;
        u8 asp_slot_width;
old mode 100755 (executable)
new mode 100644 (file)
index fa890f6..cbcd02e
@@ -45,6 +45,82 @@ struct es8326_priv {
        int jack_remove_retry;
 };
 
+static int es8326_crosstalk1_get(struct snd_kcontrol *kcontrol,
+               struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+       struct es8326_priv *es8326 = snd_soc_component_get_drvdata(component);
+       unsigned int crosstalk_h, crosstalk_l;
+       unsigned int crosstalk;
+
+       regmap_read(es8326->regmap, ES8326_DAC_RAMPRATE, &crosstalk_h);
+       regmap_read(es8326->regmap, ES8326_DAC_CROSSTALK, &crosstalk_l);
+       crosstalk_h &= 0x20;
+       crosstalk_l &= 0xf0;
+       crosstalk = crosstalk_h >> 1 | crosstalk_l >> 4;
+       ucontrol->value.integer.value[0] = crosstalk;
+
+       return 0;
+}
+
+static int es8326_crosstalk1_set(struct snd_kcontrol *kcontrol,
+               struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+       struct es8326_priv *es8326 = snd_soc_component_get_drvdata(component);
+       unsigned int crosstalk_h, crosstalk_l;
+       unsigned int crosstalk;
+
+       crosstalk = ucontrol->value.integer.value[0];
+       regmap_read(es8326->regmap, ES8326_DAC_CROSSTALK, &crosstalk_l);
+       crosstalk_h = (crosstalk & 0x10) << 1;
+       crosstalk_l &= 0x0f;
+       crosstalk_l |= (crosstalk & 0x0f) << 4;
+       regmap_update_bits(es8326->regmap, ES8326_DAC_RAMPRATE,
+                       0x20, crosstalk_h);
+       regmap_write(es8326->regmap, ES8326_DAC_CROSSTALK, crosstalk_l);
+
+       return 0;
+}
+
+static int es8326_crosstalk2_get(struct snd_kcontrol *kcontrol,
+               struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+       struct es8326_priv *es8326 = snd_soc_component_get_drvdata(component);
+       unsigned int crosstalk_h, crosstalk_l;
+       unsigned int crosstalk;
+
+       regmap_read(es8326->regmap, ES8326_DAC_RAMPRATE, &crosstalk_h);
+       regmap_read(es8326->regmap, ES8326_DAC_CROSSTALK, &crosstalk_l);
+       crosstalk_h &= 0x10;
+       crosstalk_l &= 0x0f;
+       crosstalk = crosstalk_h  | crosstalk_l;
+       ucontrol->value.integer.value[0] = crosstalk;
+
+       return 0;
+}
+
+static int es8326_crosstalk2_set(struct snd_kcontrol *kcontrol,
+               struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+       struct es8326_priv *es8326 = snd_soc_component_get_drvdata(component);
+       unsigned int crosstalk_h, crosstalk_l;
+       unsigned int crosstalk;
+
+       crosstalk = ucontrol->value.integer.value[0];
+       regmap_read(es8326->regmap, ES8326_DAC_CROSSTALK, &crosstalk_l);
+       crosstalk_h = crosstalk & 0x10;
+       crosstalk_l &= 0xf0;
+       crosstalk_l |= crosstalk & 0x0f;
+       regmap_update_bits(es8326->regmap, ES8326_DAC_RAMPRATE,
+                       0x10, crosstalk_h);
+       regmap_write(es8326->regmap, ES8326_DAC_CROSSTALK, crosstalk_l);
+
+       return 0;
+}
+
 static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(dac_vol_tlv, -9550, 50, 0);
 static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(adc_vol_tlv, -9550, 50, 0);
 static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(adc_analog_pga_tlv, 0, 300, 0);
@@ -102,6 +178,10 @@ static const struct snd_kcontrol_new es8326_snd_controls[] = {
        SOC_SINGLE_TLV("ALC Capture Target Level", ES8326_ALC_LEVEL,
                        0, 0x0f, 0, drc_target_tlv),
 
+       SOC_SINGLE_EXT("CROSSTALK1", SND_SOC_NOPM, 0, 31, 0,
+                       es8326_crosstalk1_get, es8326_crosstalk1_set),
+       SOC_SINGLE_EXT("CROSSTALK2", SND_SOC_NOPM, 0, 31, 0,
+                       es8326_crosstalk2_get, es8326_crosstalk2_set),
 };
 
 static const struct snd_soc_dapm_widget es8326_dapm_widgets[] = {
@@ -117,12 +197,6 @@ static const struct snd_soc_dapm_widget es8326_dapm_widgets[] = {
        SND_SOC_DAPM_AIF_OUT("I2S OUT", "I2S1 Capture", 0, SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_AIF_IN("I2S IN", "I2S1 Playback", 0, SND_SOC_NOPM, 0, 0),
 
-       /* ADC Digital Mute */
-       SND_SOC_DAPM_PGA("ADC L1", ES8326_ADC_MUTE, 0, 1, NULL, 0),
-       SND_SOC_DAPM_PGA("ADC R1", ES8326_ADC_MUTE, 1, 1, NULL, 0),
-       SND_SOC_DAPM_PGA("ADC L2", ES8326_ADC_MUTE, 2, 1, NULL, 0),
-       SND_SOC_DAPM_PGA("ADC R2", ES8326_ADC_MUTE, 3, 1, NULL, 0),
-
        /* Analog Power Supply*/
        SND_SOC_DAPM_DAC("Right DAC", NULL, ES8326_ANA_PDN, 0, 1),
        SND_SOC_DAPM_DAC("Left DAC", NULL, ES8326_ANA_PDN, 1, 1),
@@ -142,15 +216,10 @@ static const struct snd_soc_dapm_widget es8326_dapm_widgets[] = {
 };
 
 static const struct snd_soc_dapm_route es8326_dapm_routes[] = {
-       {"ADC L1", NULL, "MIC1"},
-       {"ADC R1", NULL, "MIC2"},
-       {"ADC L2", NULL, "MIC3"},
-       {"ADC R2", NULL, "MIC4"},
-
-       {"ADC L", NULL, "ADC L1"},
-       {"ADC R", NULL, "ADC R1"},
-       {"ADC L", NULL, "ADC L2"},
-       {"ADC R", NULL, "ADC R2"},
+       {"ADC L", NULL, "MIC1"},
+       {"ADC R", NULL, "MIC2"},
+       {"ADC L", NULL, "MIC3"},
+       {"ADC R", NULL, "MIC4"},
 
        {"I2S OUT", NULL, "ADC L"},
        {"I2S OUT", NULL, "ADC R"},
@@ -440,10 +509,16 @@ static int es8326_mute(struct snd_soc_dai *dai, int mute, int direction)
        unsigned int offset_l, offset_r;
 
        if (mute) {
-               regmap_write(es8326->regmap, ES8326_HP_CAL, ES8326_HP_OFF);
-               regmap_update_bits(es8326->regmap, ES8326_DAC_MUTE,
-                               ES8326_MUTE_MASK, ES8326_MUTE);
-               regmap_write(es8326->regmap, ES8326_HP_DRIVER, 0xf0);
+               if (direction == SNDRV_PCM_STREAM_PLAYBACK) {
+                       regmap_write(es8326->regmap, ES8326_HP_CAL, ES8326_HP_OFF);
+                       regmap_update_bits(es8326->regmap, ES8326_DAC_MUTE,
+                                       ES8326_MUTE_MASK, ES8326_MUTE);
+                       regmap_update_bits(es8326->regmap, ES8326_HP_DRIVER_REF,
+                                       0x30, 0x00);
+               } else {
+                       regmap_update_bits(es8326->regmap,  ES8326_ADC_MUTE,
+                                       0x0F, 0x0F);
+               }
        } else {
                if (!es8326->calibrated) {
                        regmap_write(es8326->regmap, ES8326_HP_CAL, ES8326_HP_FORCE_CAL);
@@ -456,11 +531,22 @@ static int es8326_mute(struct snd_soc_dai *dai, int mute, int direction)
                        regmap_write(es8326->regmap, ES8326_HPR_OFFSET_INI, offset_r);
                        es8326->calibrated = true;
                }
-               regmap_write(es8326->regmap, ES8326_HP_DRIVER, 0xa1);
-               regmap_write(es8326->regmap, ES8326_HP_VOL, 0x91);
-               regmap_write(es8326->regmap, ES8326_HP_CAL, ES8326_HP_ON);
-               regmap_update_bits(es8326->regmap, ES8326_DAC_MUTE,
-                               ES8326_MUTE_MASK, ~(ES8326_MUTE));
+               if (direction == SNDRV_PCM_STREAM_PLAYBACK) {
+                       regmap_update_bits(es8326->regmap, ES8326_DAC_DSM, 0x01, 0x01);
+                       usleep_range(1000, 5000);
+                       regmap_update_bits(es8326->regmap, ES8326_DAC_DSM, 0x01, 0x00);
+                       usleep_range(1000, 5000);
+                       regmap_update_bits(es8326->regmap, ES8326_HP_DRIVER_REF, 0x30, 0x20);
+                       regmap_update_bits(es8326->regmap, ES8326_HP_DRIVER_REF, 0x30, 0x30);
+                       regmap_write(es8326->regmap, ES8326_HP_DRIVER, 0xa1);
+                       regmap_write(es8326->regmap, ES8326_HP_CAL, ES8326_HP_ON);
+                       regmap_update_bits(es8326->regmap, ES8326_DAC_MUTE,
+                                       ES8326_MUTE_MASK, ~(ES8326_MUTE));
+               } else {
+                       msleep(300);
+                       regmap_update_bits(es8326->regmap,  ES8326_ADC_MUTE,
+                                       0x0F, 0x00);
+               }
        }
        return 0;
 }
@@ -477,23 +563,20 @@ static int es8326_set_bias_level(struct snd_soc_component *codec,
                if (ret)
                        return ret;
 
-               regmap_update_bits(es8326->regmap, ES8326_DAC_DSM, 0x01, 0x00);
+               regmap_update_bits(es8326->regmap, ES8326_RESET, 0x02, 0x02);
+               usleep_range(5000, 10000);
                regmap_write(es8326->regmap, ES8326_INTOUT_IO, es8326->interrupt_clk);
                regmap_write(es8326->regmap, ES8326_SDINOUT1_IO,
                            (ES8326_IO_DMIC_CLK << ES8326_SDINOUT1_SHIFT));
-               regmap_write(es8326->regmap, ES8326_VMIDSEL, 0x0E);
                regmap_write(es8326->regmap, ES8326_PGA_PDN, 0x40);
                regmap_write(es8326->regmap, ES8326_ANA_PDN, 0x00);
                regmap_update_bits(es8326->regmap,  ES8326_CLK_CTL, 0x20, 0x20);
-
-               regmap_update_bits(es8326->regmap, ES8326_RESET,
-                               ES8326_CSM_ON, ES8326_CSM_ON);
+               regmap_update_bits(es8326->regmap, ES8326_RESET, 0x02, 0x00);
                break;
        case SND_SOC_BIAS_PREPARE:
                break;
        case SND_SOC_BIAS_STANDBY:
                regmap_write(es8326->regmap, ES8326_ANA_PDN, 0x3b);
-               regmap_write(es8326->regmap, ES8326_VMIDSEL, 0x00);
                regmap_update_bits(es8326->regmap, ES8326_CLK_CTL, 0x20, 0x00);
                regmap_write(es8326->regmap, ES8326_SDINOUT1_IO, ES8326_IO_INPUT);
                break;
@@ -513,7 +596,7 @@ static const struct snd_soc_dai_ops es8326_ops = {
        .set_fmt = es8326_set_dai_fmt,
        .set_sysclk = es8326_set_dai_sysclk,
        .mute_stream = es8326_mute,
-       .no_capture_mute = 1,
+       .no_capture_mute = 0,
 };
 
 static struct snd_soc_dai_driver es8326_dai = {
@@ -672,6 +755,8 @@ static void es8326_jack_detect_handler(struct work_struct *work)
                        es8326->hp = 0;
                }
                regmap_update_bits(es8326->regmap, ES8326_HPDET_TYPE, 0x03, 0x01);
+               regmap_write(es8326->regmap, ES8326_SYS_BIAS, 0x0a);
+               regmap_update_bits(es8326->regmap, ES8326_HP_DRIVER_REF, 0x0f, 0x03);
                /*
                 * Inverted HPJACK_POL bit to trigger one IRQ to double check HP Removal event
                 */
@@ -695,8 +780,11 @@ static void es8326_jack_detect_handler(struct work_struct *work)
                         * Don't report jack status.
                         */
                        regmap_update_bits(es8326->regmap, ES8326_HPDET_TYPE, 0x03, 0x01);
+                       es8326_enable_micbias(es8326->component);
                        usleep_range(50000, 70000);
                        regmap_update_bits(es8326->regmap, ES8326_HPDET_TYPE, 0x03, 0x00);
+                       regmap_write(es8326->regmap, ES8326_SYS_BIAS, 0x1f);
+                       regmap_update_bits(es8326->regmap, ES8326_HP_DRIVER_REF, 0x0f, 0x08);
                        queue_delayed_work(system_wq, &es8326->jack_detect_work,
                                        msecs_to_jiffies(400));
                        es8326->hp = 1;
@@ -736,13 +824,10 @@ exit:
 static irqreturn_t es8326_irq(int irq, void *dev_id)
 {
        struct es8326_priv *es8326 = dev_id;
-       struct snd_soc_component *comp = es8326->component;
 
        if (!es8326->jack)
                goto out;
 
-       es8326_enable_micbias(comp);
-
        if (es8326->jack->status & SND_JACK_HEADSET)
                queue_delayed_work(system_wq, &es8326->jack_detect_work,
                                   msecs_to_jiffies(10));
@@ -766,14 +851,14 @@ static int es8326_calibrate(struct snd_soc_component *component)
        if ((es8326->version == ES8326_VERSION_B) && (es8326->calibrated == false)) {
                dev_dbg(component->dev, "ES8326_VERSION_B, calibrating\n");
                regmap_write(es8326->regmap, ES8326_CLK_INV, 0xc0);
-               regmap_write(es8326->regmap, ES8326_CLK_DIV1, 0x01);
+               regmap_write(es8326->regmap, ES8326_CLK_DIV1, 0x03);
                regmap_write(es8326->regmap, ES8326_CLK_DLL, 0x30);
                regmap_write(es8326->regmap, ES8326_CLK_MUX, 0xed);
                regmap_write(es8326->regmap, ES8326_CLK_DAC_SEL, 0x08);
                regmap_write(es8326->regmap, ES8326_CLK_TRI, 0xc1);
                regmap_write(es8326->regmap, ES8326_DAC_MUTE, 0x03);
                regmap_write(es8326->regmap, ES8326_ANA_VSEL, 0x7f);
-               regmap_write(es8326->regmap, ES8326_VMIDLOW, 0x03);
+               regmap_write(es8326->regmap, ES8326_VMIDLOW, 0x23);
                regmap_write(es8326->regmap, ES8326_DAC2HPMIX, 0x88);
                usleep_range(15000, 20000);
                regmap_write(es8326->regmap, ES8326_HP_OFFSET_CAL, 0x8c);
@@ -814,13 +899,13 @@ static int es8326_resume(struct snd_soc_component *component)
        /* reset internal clock state */
        regmap_write(es8326->regmap, ES8326_RESET, 0x1f);
        regmap_write(es8326->regmap, ES8326_VMIDSEL, 0x0E);
+       regmap_write(es8326->regmap, ES8326_ANA_LP, 0xf0);
        usleep_range(10000, 15000);
        regmap_write(es8326->regmap, ES8326_HPJACK_TIMER, 0xe9);
-       regmap_write(es8326->regmap, ES8326_ANA_MICBIAS, 0x4b);
+       regmap_write(es8326->regmap, ES8326_ANA_MICBIAS, 0xcb);
        /* set headphone default type and detect pin */
        regmap_write(es8326->regmap, ES8326_HPDET_TYPE, 0x83);
        regmap_write(es8326->regmap, ES8326_CLK_RESAMPLE, 0x05);
-       regmap_write(es8326->regmap, ES8326_HP_MISC, 0x30);
 
        /* set internal oscillator as clock source of headpone cp */
        regmap_write(es8326->regmap, ES8326_CLK_DIV_CPC, 0x89);
@@ -828,14 +913,15 @@ static int es8326_resume(struct snd_soc_component *component)
        /* clock manager reset release */
        regmap_write(es8326->regmap, ES8326_RESET, 0x17);
        /* set headphone detection as half scan mode */
-       regmap_write(es8326->regmap, ES8326_HP_MISC, 0x30);
+       regmap_write(es8326->regmap, ES8326_HP_MISC, 0x3d);
        regmap_write(es8326->regmap, ES8326_PULLUP_CTL, 0x00);
 
        /* enable headphone driver */
+       regmap_write(es8326->regmap, ES8326_HP_VOL, 0xc4);
        regmap_write(es8326->regmap, ES8326_HP_DRIVER, 0xa7);
        usleep_range(2000, 5000);
-       regmap_write(es8326->regmap, ES8326_HP_DRIVER_REF, 0xa3);
-       regmap_write(es8326->regmap, ES8326_HP_DRIVER_REF, 0xb3);
+       regmap_write(es8326->regmap, ES8326_HP_DRIVER_REF, 0x23);
+       regmap_write(es8326->regmap, ES8326_HP_DRIVER_REF, 0x33);
        regmap_write(es8326->regmap, ES8326_HP_DRIVER, 0xa1);
 
        regmap_write(es8326->regmap, ES8326_CLK_INV, 0x00);
@@ -844,6 +930,8 @@ static int es8326_resume(struct snd_soc_component *component)
        regmap_write(es8326->regmap, ES8326_CLK_CAL_TIME, 0x00);
        /* calibrate for B version */
        es8326_calibrate(component);
+       regmap_write(es8326->regmap, ES8326_DAC_CROSSTALK, 0xaa);
+       regmap_write(es8326->regmap, ES8326_DAC_RAMPRATE, 0x00);
        /* turn off headphone out */
        regmap_write(es8326->regmap, ES8326_HP_CAL, 0x00);
        /* set ADC and DAC in low power mode */
@@ -856,6 +944,14 @@ static int es8326_resume(struct snd_soc_component *component)
        regmap_write(es8326->regmap, ES8326_DAC_DSM, 0x08);
        regmap_write(es8326->regmap, ES8326_DAC_VPPSCALE, 0x15);
 
+       regmap_write(es8326->regmap, ES8326_HPDET_TYPE, 0x80 |
+                       ((es8326->version == ES8326_VERSION_B) ?
+                       (ES8326_HP_DET_SRC_PIN9 | es8326->jack_pol) :
+                       (ES8326_HP_DET_SRC_PIN9 | es8326->jack_pol | 0x04)));
+       usleep_range(5000, 10000);
+       es8326_enable_micbias(es8326->component);
+       usleep_range(50000, 70000);
+       regmap_update_bits(es8326->regmap, ES8326_HPDET_TYPE, 0x03, 0x00);
        regmap_write(es8326->regmap, ES8326_INT_SOURCE,
                    (ES8326_INT_SRC_PIN9 | ES8326_INT_SRC_BUTTON));
        regmap_write(es8326->regmap, ES8326_INTOUT_IO,
@@ -864,7 +960,7 @@ static int es8326_resume(struct snd_soc_component *component)
                    (ES8326_IO_DMIC_CLK << ES8326_SDINOUT1_SHIFT));
        regmap_write(es8326->regmap, ES8326_SDINOUT23_IO, ES8326_IO_INPUT);
 
-       regmap_write(es8326->regmap, ES8326_ANA_PDN, 0x3b);
+       regmap_write(es8326->regmap, ES8326_ANA_PDN, 0x00);
        regmap_write(es8326->regmap, ES8326_RESET, ES8326_CSM_ON);
        regmap_update_bits(es8326->regmap, ES8326_PGAGAIN, ES8326_MIC_SEL_MASK,
                           ES8326_MIC1_SEL);
@@ -872,11 +968,7 @@ static int es8326_resume(struct snd_soc_component *component)
        regmap_update_bits(es8326->regmap, ES8326_DAC_MUTE, ES8326_MUTE_MASK,
                           ES8326_MUTE);
 
-       regmap_write(es8326->regmap, ES8326_HPDET_TYPE, 0x80 |
-                       ((es8326->version == ES8326_VERSION_B) ?
-                       (ES8326_HP_DET_SRC_PIN9 | es8326->jack_pol) :
-                       (ES8326_HP_DET_SRC_PIN9 | es8326->jack_pol | 0x04)));
-       regmap_write(es8326->regmap, ES8326_HP_VOL, 0x11);
+       regmap_write(es8326->regmap, ES8326_ADC_MUTE, 0x0f);
 
        es8326->jack_remove_retry = 0;
        es8326->hp = 0;
index 90a08351d6acd043b42a6ebed574591a3defc6a0..4234bbb900c4530c6746fab5f75fcb0b049e4fdb 100644 (file)
@@ -72,6 +72,7 @@
 #define ES8326_DAC_VOL         0x50
 #define ES8326_DRC_RECOVERY    0x53
 #define ES8326_DRC_WINSIZE     0x54
+#define ES8326_DAC_CROSSTALK   0x55
 #define ES8326_HPJACK_TIMER    0x56
 #define ES8326_HPDET_TYPE      0x57
 #define ES8326_INT_SOURCE      0x58
 #define ES8326_MUTE (3 << 0)
 
 /* ES8326_CLK_CTL */
-#define ES8326_CLK_ON (0x7f << 0)
+#define ES8326_CLK_ON (0x7e << 0)
 #define ES8326_CLK_OFF (0 << 0)
 
 /* ES8326_CLK_INV */
index 7e21cec3c2fb97a9be518b4316cdeafae2cf0776..6ce309980cd10e200dc62a1941b07f6f7728d3cd 100644 (file)
@@ -1584,7 +1584,6 @@ static int wsa_macro_enable_interpolator(struct snd_soc_dapm_widget *w,
        u16 gain_reg;
        u16 reg;
        int val;
-       int offset_val = 0;
        struct wsa_macro *wsa = snd_soc_component_get_drvdata(component);
 
        if (w->shift == WSA_MACRO_COMP1) {
@@ -1623,10 +1622,8 @@ static int wsa_macro_enable_interpolator(struct snd_soc_dapm_widget *w,
                                        CDC_WSA_RX1_RX_PATH_MIX_SEC0,
                                        CDC_WSA_RX_PGA_HALF_DB_MASK,
                                        CDC_WSA_RX_PGA_HALF_DB_ENABLE);
-                       offset_val = -2;
                }
                val = snd_soc_component_read(component, gain_reg);
-               val += offset_val;
                snd_soc_component_write(component, gain_reg, val);
                wsa_macro_config_ear_spkr_gain(component, wsa,
                                                event, gain_reg);
@@ -1654,10 +1651,6 @@ static int wsa_macro_enable_interpolator(struct snd_soc_dapm_widget *w,
                                        CDC_WSA_RX1_RX_PATH_MIX_SEC0,
                                        CDC_WSA_RX_PGA_HALF_DB_MASK,
                                        CDC_WSA_RX_PGA_HALF_DB_DISABLE);
-                       offset_val = 2;
-                       val = snd_soc_component_read(component, gain_reg);
-                       val += offset_val;
-                       snd_soc_component_write(component, gain_reg, val);
                }
                wsa_macro_config_ear_spkr_gain(component, wsa,
                                                event, gain_reg);
index c22b047115cc47217d6455697014503d5a6aa4a1..aa3eadecd9746cd5f24427955b4aea0b49d88274 100644 (file)
@@ -59,6 +59,7 @@
 
 struct rtq9128_data {
        struct gpio_desc *enable;
+       unsigned int daifmt;
        int tdm_slots;
        int tdm_slot_width;
        bool tdm_input_data2_select;
@@ -391,7 +392,11 @@ static int rtq9128_component_probe(struct snd_soc_component *comp)
        unsigned int val;
        int i, ret;
 
-       pm_runtime_resume_and_get(comp->dev);
+       ret = pm_runtime_resume_and_get(comp->dev);
+       if (ret < 0) {
+               dev_err(comp->dev, "Failed to resume device (%d)\n", ret);
+               return ret;
+       }
 
        val = snd_soc_component_read(comp, RTQ9128_REG_EFUSE_DATA);
 
@@ -437,10 +442,7 @@ static const struct snd_soc_component_driver rtq9128_comp_driver = {
 static int rtq9128_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
 {
        struct rtq9128_data *data = snd_soc_dai_get_drvdata(dai);
-       struct snd_soc_component *comp = dai->component;
        struct device *dev = dai->dev;
-       unsigned int audfmt, fmtval;
-       int ret;
 
        dev_dbg(dev, "%s: fmt 0x%8x\n", __func__, fmt);
 
@@ -450,35 +452,10 @@ static int rtq9128_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
                return -EINVAL;
        }
 
-       fmtval = fmt & SND_SOC_DAIFMT_FORMAT_MASK;
-       if (data->tdm_slots && fmtval != SND_SOC_DAIFMT_DSP_A && fmtval != SND_SOC_DAIFMT_DSP_B) {
-               dev_err(dev, "TDM is used, format only support DSP_A or DSP_B\n");
-               return -EINVAL;
-       }
+       /* Store here and will be used in runtime hw_params for DAI format setting */
+       data->daifmt = fmt;
 
-       switch (fmtval) {
-       case SND_SOC_DAIFMT_I2S:
-               audfmt = 8;
-               break;
-       case SND_SOC_DAIFMT_LEFT_J:
-               audfmt = 9;
-               break;
-       case SND_SOC_DAIFMT_RIGHT_J:
-               audfmt = 10;
-               break;
-       case SND_SOC_DAIFMT_DSP_A:
-               audfmt = data->tdm_slots ? 12 : 11;
-               break;
-       case SND_SOC_DAIFMT_DSP_B:
-               audfmt = data->tdm_slots ? 4 : 3;
-               break;
-       default:
-               dev_err(dev, "Unsupported format 0x%8x\n", fmt);
-               return -EINVAL;
-       }
-
-       ret = snd_soc_component_write_field(comp, RTQ9128_REG_I2S_OPT, RTQ9128_AUDFMT_MASK, audfmt);
-       return ret < 0 ? ret : 0;
+       return 0;
 }
 
 static int rtq9128_dai_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
@@ -554,10 +531,38 @@ static int rtq9128_dai_hw_params(struct snd_pcm_substream *stream, struct snd_pc
        unsigned int width, slot_width, bitrate, audbit, dolen;
        struct snd_soc_component *comp = dai->component;
        struct device *dev = dai->dev;
+       unsigned int fmtval, audfmt;
        int ret;
 
        dev_dbg(dev, "%s: width %d\n", __func__, params_width(param));
 
+       fmtval = FIELD_GET(SND_SOC_DAIFMT_FORMAT_MASK, data->daifmt);
+       if (data->tdm_slots && fmtval != SND_SOC_DAIFMT_DSP_A && fmtval != SND_SOC_DAIFMT_DSP_B) {
+               dev_err(dev, "TDM is used, format only support DSP_A or DSP_B\n");
+               return -EINVAL;
+       }
+
+       switch (fmtval) {
+       case SND_SOC_DAIFMT_I2S:
+               audfmt = 8;
+               break;
+       case SND_SOC_DAIFMT_LEFT_J:
+               audfmt = 9;
+               break;
+       case SND_SOC_DAIFMT_RIGHT_J:
+               audfmt = 10;
+               break;
+       case SND_SOC_DAIFMT_DSP_A:
+               audfmt = data->tdm_slots ? 12 : 11;
+               break;
+       case SND_SOC_DAIFMT_DSP_B:
+               audfmt = data->tdm_slots ? 4 : 3;
+               break;
+       default:
+               dev_err(dev, "Unsupported format 0x%8x\n", fmtval);
+               return -EINVAL;
+       }
+
        switch (width = params_width(param)) {
        case 16:
                audbit = 0;
@@ -611,6 +616,10 @@ static int rtq9128_dai_hw_params(struct snd_pcm_substream *stream, struct snd_pc
                return -EINVAL;
        }
 
+       ret = snd_soc_component_write_field(comp, RTQ9128_REG_I2S_OPT, RTQ9128_AUDFMT_MASK, audfmt);
+       if (ret < 0)
+               return ret;
+
        ret = snd_soc_component_write_field(comp, RTQ9128_REG_I2S_OPT, RTQ9128_AUDBIT_MASK, audbit);
        if (ret < 0)
                return ret;
index 962c2cdfa017441ce74d7084f14f2b765bf29b74..54561ae598b87ac3db985eea203b7b1508090804 100644 (file)
@@ -59,7 +59,6 @@ struct tas2562_data {
 
 enum tas256x_model {
        TAS2562,
-       TAS2563,
        TAS2564,
        TAS2110,
 };
@@ -721,7 +720,6 @@ static int tas2562_parse_dt(struct tas2562_data *tas2562)
 
 static const struct i2c_device_id tas2562_id[] = {
        { "tas2562", TAS2562 },
-       { "tas2563", TAS2563 },
        { "tas2564", TAS2564 },
        { "tas2110", TAS2110 },
        { }
@@ -770,7 +768,6 @@ static int tas2562_probe(struct i2c_client *client)
 #ifdef CONFIG_OF
 static const struct of_device_id tas2562_of_match[] = {
        { .compatible = "ti,tas2562", },
-       { .compatible = "ti,tas2563", },
        { .compatible = "ti,tas2564", },
        { .compatible = "ti,tas2110", },
        { },
index 917b1c15f71d41077243f9aef933af2f97c55c14..32913bd1a623381ee6e8d3d72c3f8e49d60ff0f7 100644 (file)
@@ -1,13 +1,13 @@
 // SPDX-License-Identifier: GPL-2.0
 //
-// ALSA SoC Texas Instruments TAS2781 Audio Smart Amplifier
+// ALSA SoC Texas Instruments TAS2563/TAS2781 Audio Smart Amplifier
 //
 // Copyright (C) 2022 - 2023 Texas Instruments Incorporated
 // https://www.ti.com
 //
-// The TAS2781 driver implements a flexible and configurable
+// The TAS2563/TAS2781 driver implements a flexible and configurable
 // algo coefficient setting for one, two, or even multiple
-// TAS2781 chips.
+// TAS2563/TAS2781 chips.
 //
 // Author: Shenghao Ding <shenghao-ding@ti.com>
 // Author: Kevin Lu <kevin-lu@ti.com>
@@ -32,6 +32,7 @@
 #include <sound/tas2781-tlv.h>
 
 static const struct i2c_device_id tasdevice_id[] = {
+       { "tas2563", TAS2563 },
        { "tas2781", TAS2781 },
        {}
 };
@@ -39,6 +40,7 @@ MODULE_DEVICE_TABLE(i2c, tasdevice_id);
 
 #ifdef CONFIG_OF
 static const struct of_device_id tasdevice_of_match[] = {
+       { .compatible = "ti,tas2563" },
        { .compatible = "ti,tas2781" },
        {},
 };
index 43c648efd0d938db5e0cb470a625617b8dc1860f..deb15b95992d5cc494562a91f13adbc348e2dd31 100644 (file)
@@ -3033,7 +3033,6 @@ static int wcd9335_codec_enable_mix_path(struct snd_soc_dapm_widget *w,
 {
        struct snd_soc_component *comp = snd_soc_dapm_to_component(w->dapm);
        u16 gain_reg;
-       int offset_val = 0;
        int val = 0;
 
        switch (w->reg) {
@@ -3073,7 +3072,6 @@ static int wcd9335_codec_enable_mix_path(struct snd_soc_dapm_widget *w,
        switch (event) {
        case SND_SOC_DAPM_POST_PMU:
                val = snd_soc_component_read(comp, gain_reg);
-               val += offset_val;
                snd_soc_component_write(comp, gain_reg, val);
                break;
        case SND_SOC_DAPM_POST_PMD:
@@ -3294,7 +3292,6 @@ static int wcd9335_codec_enable_interpolator(struct snd_soc_dapm_widget *w,
        u16 gain_reg;
        u16 reg;
        int val;
-       int offset_val = 0;
 
        if (!(snd_soc_dapm_widget_name_cmp(w, "RX INT0 INTERP"))) {
                reg = WCD9335_CDC_RX0_RX_PATH_CTL;
@@ -3337,7 +3334,6 @@ static int wcd9335_codec_enable_interpolator(struct snd_soc_dapm_widget *w,
        case SND_SOC_DAPM_POST_PMU:
                wcd9335_config_compander(comp, w->shift, event);
                val = snd_soc_component_read(comp, gain_reg);
-               val += offset_val;
                snd_soc_component_write(comp, gain_reg, val);
                break;
        case SND_SOC_DAPM_POST_PMD:
index 1b6e376f3833cbc5f59034e88f90a4ee3845632a..6813268e6a19f3048877c5ae0ee55ae227543c04 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/regmap.h>
-#include <linux/regulator/consumer.h>
 #include <linux/slab.h>
 #include <linux/slimbus.h>
 #include <sound/pcm_params.h>
index faf8d3f9b3c5d929d935ad4c5c63fe45371e4edc..6021aa5a56891969b04db64ac019bafb0766c701 100644 (file)
@@ -210,7 +210,7 @@ struct wcd938x_priv {
 };
 
 static const SNDRV_CTL_TLVD_DECLARE_DB_MINMAX(ear_pa_gain, 600, -1800);
-static const DECLARE_TLV_DB_SCALE(line_gain, -3000, 150, -3000);
+static const DECLARE_TLV_DB_SCALE(line_gain, -3000, 150, 0);
 static const SNDRV_CTL_TLVD_DECLARE_DB_MINMAX(analog_gain, 0, 3000);
 
 struct wcd938x_mbhc_zdet_param {
@@ -3587,10 +3587,8 @@ static int wcd938x_probe(struct platform_device *pdev)
        mutex_init(&wcd938x->micb_lock);
 
        ret = wcd938x_populate_dt_data(wcd938x, dev);
-       if (ret) {
-               dev_err(dev, "%s: Fail to obtain platform data\n", __func__);
-               return -EINVAL;
-       }
+       if (ret)
+               return ret;
 
        ret = wcd938x_add_slave_components(wcd938x, dev, &match);
        if (ret)
index c01e31175015cc2f354175dec019fac591a98b4b..36ea0dcdc7ab0033eb48e393d783e4f7d4df9854 100644 (file)
@@ -739,19 +739,25 @@ static int wm_adsp_request_firmware_file(struct wm_adsp *dsp,
                                         const char *filetype)
 {
        struct cs_dsp *cs_dsp = &dsp->cs_dsp;
+       const char *fwf;
        char *s, c;
        int ret = 0;
 
+       if (dsp->fwf_name)
+               fwf = dsp->fwf_name;
+       else
+               fwf = dsp->cs_dsp.name;
+
        if (system_name && asoc_component_prefix)
                *filename = kasprintf(GFP_KERNEL, "%s%s-%s-%s-%s-%s.%s", dir, dsp->part,
-                                     dsp->fwf_name, wm_adsp_fw[dsp->fw].file, system_name,
+                                     fwf, wm_adsp_fw[dsp->fw].file, system_name,
                                      asoc_component_prefix, filetype);
        else if (system_name)
                *filename = kasprintf(GFP_KERNEL, "%s%s-%s-%s-%s.%s", dir, dsp->part,
-                                     dsp->fwf_name, wm_adsp_fw[dsp->fw].file, system_name,
+                                     fwf, wm_adsp_fw[dsp->fw].file, system_name,
                                      filetype);
        else
-               *filename = kasprintf(GFP_KERNEL, "%s%s-%s-%s.%s", dir, dsp->part, dsp->fwf_name,
+               *filename = kasprintf(GFP_KERNEL, "%s%s-%s-%s.%s", dir, dsp->part, fwf,
                                      wm_adsp_fw[dsp->fw].file, filetype);
 
        if (*filename == NULL)
@@ -823,6 +829,23 @@ static int wm_adsp_request_firmware_files(struct wm_adsp *dsp,
                }
        }
 
+       /* Check system-specific bin without wmfw before falling back to generic */
+       if (dsp->wmfw_optional && system_name) {
+               if (asoc_component_prefix)
+                       wm_adsp_request_firmware_file(dsp, coeff_firmware, coeff_filename,
+                                                     cirrus_dir, system_name,
+                                                     asoc_component_prefix, "bin");
+
+               if (!*coeff_firmware)
+                       wm_adsp_request_firmware_file(dsp, coeff_firmware, coeff_filename,
+                                                     cirrus_dir, system_name,
+                                                     NULL, "bin");
+
+               if (*coeff_firmware)
+                       return 0;
+       }
+
+       /* Check legacy location */
        if (!wm_adsp_request_firmware_file(dsp, wmfw_firmware, wmfw_filename,
                                           "", NULL, NULL, "wmfw")) {
                wm_adsp_request_firmware_file(dsp, coeff_firmware, coeff_filename,
@@ -830,62 +853,28 @@ static int wm_adsp_request_firmware_files(struct wm_adsp *dsp,
                return 0;
        }
 
+       /* Fall back to generic wmfw and optional matching bin */
        ret = wm_adsp_request_firmware_file(dsp, wmfw_firmware, wmfw_filename,
                                            cirrus_dir, NULL, NULL, "wmfw");
-       if (!ret) {
+       if (!ret || dsp->wmfw_optional) {
                wm_adsp_request_firmware_file(dsp, coeff_firmware, coeff_filename,
                                              cirrus_dir, NULL, NULL, "bin");
                return 0;
        }
 
-       if (dsp->wmfw_optional) {
-               if (system_name) {
-                       if (asoc_component_prefix)
-                               wm_adsp_request_firmware_file(dsp, coeff_firmware, coeff_filename,
-                                                             cirrus_dir, system_name,
-                                                             asoc_component_prefix, "bin");
-
-                       if (!*coeff_firmware)
-                               wm_adsp_request_firmware_file(dsp, coeff_firmware, coeff_filename,
-                                                             cirrus_dir, system_name,
-                                                             NULL, "bin");
-               }
-
-               if (!*coeff_firmware)
-                       wm_adsp_request_firmware_file(dsp, coeff_firmware, coeff_filename,
-                                                     "", NULL, NULL, "bin");
-
-               if (!*coeff_firmware)
-                       wm_adsp_request_firmware_file(dsp, coeff_firmware, coeff_filename,
-                                                     cirrus_dir, NULL, NULL, "bin");
-
-               return 0;
-       }
-
        adsp_err(dsp, "Failed to request firmware <%s>%s-%s-%s<-%s<%s>>.wmfw\n",
-                cirrus_dir, dsp->part, dsp->fwf_name, wm_adsp_fw[dsp->fw].file,
-                system_name, asoc_component_prefix);
+                cirrus_dir, dsp->part,
+                dsp->fwf_name ? dsp->fwf_name : dsp->cs_dsp.name,
+                wm_adsp_fw[dsp->fw].file, system_name, asoc_component_prefix);
 
        return -ENOENT;
 }
 
 static int wm_adsp_common_init(struct wm_adsp *dsp)
 {
-       char *p;
-
        INIT_LIST_HEAD(&dsp->compr_list);
        INIT_LIST_HEAD(&dsp->buffer_list);
 
-       if (!dsp->fwf_name) {
-               p = devm_kstrdup(dsp->cs_dsp.dev, dsp->cs_dsp.name, GFP_KERNEL);
-               if (!p)
-                       return -ENOMEM;
-
-               dsp->fwf_name = p;
-               for (; *p != 0; ++p)
-                       *p = tolower(*p);
-       }
-
        return 0;
 }
 
index cb83c569e18d6aef70b23a56198dbf5ccd5ef2d8..a2e86ef7d18f5981b4604372e4b20930695aa5c4 100644 (file)
@@ -1098,7 +1098,11 @@ static int wsa_dev_mode_put(struct snd_kcontrol *kcontrol,
        return 1;
 }
 
-static const DECLARE_TLV_DB_SCALE(pa_gain, -300, 150, -300);
+static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(pa_gain,
+       0, 14, TLV_DB_SCALE_ITEM(-300, 0, 0),
+       15, 29, TLV_DB_SCALE_ITEM(-300, 150, 0),
+       30, 31, TLV_DB_SCALE_ITEM(1800, 0, 0),
+);
 
 static int wsa883x_get_swr_port(struct snd_kcontrol *kcontrol,
                                struct snd_ctl_elem_value *ucontrol)
index 9c94677f681a17b65095b9a691dcb13ea5903c0f..62606e20be9a3ec0e9607e4da1acd68e99f9ba1a 100644 (file)
@@ -556,7 +556,7 @@ static int graph_parse_node_multi_nm(struct snd_soc_dai_link *dai_link,
                struct device_node *mcodec_port;
                int codec_idx;
 
-               if (*nm_idx >= nm_max)
+               if (*nm_idx > nm_max)
                        break;
 
                mcpu_ep_n = of_get_next_child(mcpu_port, mcpu_ep_n);
index 816fad8c1ff0ef439c8805dfb39edb6bdb3cab2f..540f7a29310a9f8f467af23540332db091a783b9 100644 (file)
@@ -797,6 +797,9 @@ static int broxton_audio_probe(struct platform_device *pdev)
                broxton_audio_card.name = "glkda7219max";
                /* Fixup the SSP entries for geminilake */
                for (i = 0; i < ARRAY_SIZE(broxton_dais); i++) {
+                       if (!broxton_dais[i].codecs->dai_name)
+                               continue;
+
                        /* MAXIM_CODEC is connected to SSP1. */
                        if (!strcmp(broxton_dais[i].codecs->dai_name,
                                    BXT_MAXIM_CODEC_DAI)) {
@@ -822,6 +825,9 @@ static int broxton_audio_probe(struct platform_device *pdev)
                        broxton_audio_card.name = "cmlda7219max";
 
                for (i = 0; i < ARRAY_SIZE(broxton_dais); i++) {
+                       if (!broxton_dais[i].codecs->dai_name)
+                               continue;
+
                        /* MAXIM_CODEC is connected to SSP1. */
                        if (!strcmp(broxton_dais[i].codecs->dai_name,
                                        BXT_MAXIM_CODEC_DAI)) {
index 4631106f2a2823d4dfebd7f4e7f372cf7d5e2732..c0eb65c14aa97b4b9d14163bf4e957dcaedbea69 100644 (file)
@@ -604,7 +604,8 @@ static int broxton_audio_probe(struct platform_device *pdev)
        int i;
 
        for (i = 0; i < ARRAY_SIZE(broxton_rt298_dais); i++) {
-               if (!strncmp(card->dai_link[i].codecs->name, "i2c-INT343A:00",
+               if (card->dai_link[i].codecs->name &&
+                   !strncmp(card->dai_link[i].codecs->name, "i2c-INT343A:00",
                             I2C_NAME_SIZE)) {
                        if (!strncmp(card->name, "broxton-rt298",
                                     PLATFORM_NAME_SIZE)) {
index 9ecee43ad84d115ed3460855fa46c3a6efb577bc..300391fbc2fc2b29863d6ba19169d60e9a22502c 100644 (file)
@@ -1256,11 +1256,11 @@ static int fill_sdw_codec_dlc(struct device *dev,
        else if (is_unique_device(adr_link, sdw_version, mfg_id, part_id,
                                  class_id, adr_index))
                codec->name = devm_kasprintf(dev, GFP_KERNEL,
-                                            "sdw:%01x:%04x:%04x:%02x", link_id,
+                                            "sdw:0:%01x:%04x:%04x:%02x", link_id,
                                             mfg_id, part_id, class_id);
        else
                codec->name = devm_kasprintf(dev, GFP_KERNEL,
-                                            "sdw:%01x:%04x:%04x:%02x:%01x", link_id,
+                                            "sdw:0:%01x:%04x:%04x:%02x:%01x", link_id,
                                             mfg_id, part_id, class_id, unique_id);
 
        if (!codec->name)
index f3894010f6563a0f949ec2b52307546eef648a81..7ec8965a70c06ba1b48ece52b5099832544e842a 100644 (file)
@@ -24,7 +24,7 @@ int mtk_sof_dai_link_fixup(struct snd_soc_pcm_runtime *rtd,
                struct snd_soc_dai_link *sof_dai_link = NULL;
                const struct sof_conn_stream *conn = &sof_priv->conn_streams[i];
 
-               if (strcmp(rtd->dai_link->name, conn->normal_link))
+               if (conn->normal_link && strcmp(rtd->dai_link->name, conn->normal_link))
                        continue;
 
                for_each_card_rtds(card, runtime) {
index 5bd6addd145051bbdc4f71583a3fa64e4581838d..bfcb2c486c39df1de6d095a4fbcca7087ed851e3 100644 (file)
@@ -1208,7 +1208,8 @@ static int mt8192_mt6359_dev_probe(struct platform_device *pdev)
                        dai_link->ignore = 0;
                }
 
-               if (strcmp(dai_link->codecs[0].dai_name, RT1015_CODEC_DAI) == 0)
+               if (dai_link->num_codecs && dai_link->codecs[0].dai_name &&
+                   strcmp(dai_link->codecs[0].dai_name, RT1015_CODEC_DAI) == 0)
                        dai_link->ops = &mt8192_rt1015_i2s_ops;
 
                if (!dai_link->platforms->name)
index 1e33863c85ca060a961ba33e100cfb0bcd0f8103..620d7ade1992e371aef1ba87e87ee9b5aeea7d24 100644 (file)
@@ -1795,10 +1795,6 @@ static const struct snd_kcontrol_new mt8195_memif_controls[] = {
                            MT8195_AFE_IRQ_28),
 };
 
-static const struct snd_soc_component_driver mt8195_afe_pcm_dai_component = {
-       .name = "mt8195-afe-pcm-dai",
-};
-
 static const struct mtk_base_memif_data memif_data[MT8195_AFE_MEMIF_NUM] = {
        [MT8195_AFE_MEMIF_DL2] = {
                .name = "DL2",
@@ -3037,7 +3033,6 @@ static int mt8195_afe_pcm_dev_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct reset_control *rstc;
        int i, irq_id, ret;
-       struct snd_soc_component *component;
 
        ret = of_reserved_mem_device_init(dev);
        if (ret)
@@ -3170,36 +3165,12 @@ static int mt8195_afe_pcm_dev_probe(struct platform_device *pdev)
 
        /* register component */
        ret = devm_snd_soc_register_component(dev, &mt8195_afe_component,
-                                             NULL, 0);
+                                             afe->dai_drivers, afe->num_dai_drivers);
        if (ret) {
                dev_warn(dev, "err_platform\n");
                goto err_pm_put;
        }
 
-       component = devm_kzalloc(dev, sizeof(*component), GFP_KERNEL);
-       if (!component) {
-               ret = -ENOMEM;
-               goto err_pm_put;
-       }
-
-       ret = snd_soc_component_initialize(component,
-                                          &mt8195_afe_pcm_dai_component,
-                                          dev);
-       if (ret)
-               goto err_pm_put;
-
-#ifdef CONFIG_DEBUG_FS
-       component->debugfs_prefix = "pcm";
-#endif
-
-       ret = snd_soc_add_component(component,
-                                   afe->dai_drivers,
-                                   afe->num_dai_drivers);
-       if (ret) {
-               dev_warn(dev, "err_dai_component\n");
-               goto err_pm_put;
-       }
-
        ret = regmap_multi_reg_write(afe->regmap, mt8195_afe_reg_defaults,
                                     ARRAY_SIZE(mt8195_afe_reg_defaults));
        if (ret)
@@ -3224,8 +3195,6 @@ err_pm_put:
 
 static void mt8195_afe_pcm_dev_remove(struct platform_device *pdev)
 {
-       snd_soc_unregister_component(&pdev->dev);
-
        pm_runtime_disable(&pdev->dev);
        if (!pm_runtime_status_suspended(&pdev->dev))
                mt8195_afe_runtime_suspend(&pdev->dev);
index 4feb9fb7696792c9533e5007ca8ee9b52fe145ad..53fd8a897b9d27a5894006eaa0e96743b2e728c6 100644 (file)
@@ -934,12 +934,11 @@ SND_SOC_DAILINK_DEFS(ETDM1_IN_BE,
 
 SND_SOC_DAILINK_DEFS(ETDM2_IN_BE,
                     DAILINK_COMP_ARRAY(COMP_CPU("ETDM2_IN")),
-                    DAILINK_COMP_ARRAY(COMP_DUMMY()),
+                    DAILINK_COMP_ARRAY(COMP_EMPTY()),
                     DAILINK_COMP_ARRAY(COMP_EMPTY()));
 
 SND_SOC_DAILINK_DEFS(ETDM1_OUT_BE,
                     DAILINK_COMP_ARRAY(COMP_CPU("ETDM1_OUT")),
-                    DAILINK_COMP_ARRAY(COMP_DUMMY()),
                     DAILINK_COMP_ARRAY(COMP_EMPTY()));
 
 SND_SOC_DAILINK_DEFS(ETDM2_OUT_BE,
@@ -1237,8 +1236,6 @@ static struct snd_soc_dai_link mt8195_mt6359_dai_links[] = {
                        SND_SOC_DAIFMT_NB_NF |
                        SND_SOC_DAIFMT_CBS_CFS,
                .dpcm_capture = 1,
-               .init = mt8195_rt5682_init,
-               .ops = &mt8195_rt5682_etdm_ops,
                .be_hw_params_fixup = mt8195_etdm_hw_params_fixup,
                SND_SOC_DAILINK_REG(ETDM2_IN_BE),
        },
@@ -1249,7 +1246,6 @@ static struct snd_soc_dai_link mt8195_mt6359_dai_links[] = {
                        SND_SOC_DAIFMT_NB_NF |
                        SND_SOC_DAIFMT_CBS_CFS,
                .dpcm_playback = 1,
-               .ops = &mt8195_rt5682_etdm_ops,
                .be_hw_params_fixup = mt8195_etdm_hw_params_fixup,
                SND_SOC_DAILINK_REG(ETDM1_OUT_BE),
        },
@@ -1381,7 +1377,7 @@ static int mt8195_mt6359_dev_probe(struct platform_device *pdev)
        struct snd_soc_dai_link *dai_link;
        struct mtk_soc_card_data *soc_card_data;
        struct mt8195_mt6359_priv *mach_priv;
-       struct device_node *platform_node, *adsp_node, *dp_node, *hdmi_node;
+       struct device_node *platform_node, *adsp_node, *codec_node, *dp_node, *hdmi_node;
        struct mt8195_card_data *card_data;
        int is5682s = 0;
        int init6359 = 0;
@@ -1401,8 +1397,12 @@ static int mt8195_mt6359_dev_probe(struct platform_device *pdev)
        if (!card->name)
                card->name = card_data->name;
 
-       if (strstr(card->name, "_5682s"))
+       if (strstr(card->name, "_5682s")) {
+               codec_node = of_find_compatible_node(NULL, NULL, "realtek,rt5682s");
                is5682s = 1;
+       } else
+               codec_node = of_find_compatible_node(NULL, NULL, "realtek,rt5682i");
+
        soc_card_data = devm_kzalloc(&pdev->dev, sizeof(*card_data), GFP_KERNEL);
        if (!soc_card_data)
                return -ENOMEM;
@@ -1488,12 +1488,27 @@ static int mt8195_mt6359_dev_probe(struct platform_device *pdev)
                                dai_link->codecs->dai_name = "i2s-hifi";
                                dai_link->init = mt8195_hdmi_codec_init;
                        }
-               } else if (strcmp(dai_link->name, "ETDM1_OUT_BE") == 0 ||
-                          strcmp(dai_link->name, "ETDM2_IN_BE") == 0) {
-                       dai_link->codecs->name =
-                               is5682s ? RT5682S_DEV0_NAME : RT5682_DEV0_NAME;
-                       dai_link->codecs->dai_name =
-                               is5682s ? RT5682S_CODEC_DAI : RT5682_CODEC_DAI;
+               } else if (strcmp(dai_link->name, "ETDM1_OUT_BE") == 0) {
+                       if (!codec_node) {
+                               dev_err(&pdev->dev, "Codec not found!\n");
+                       } else {
+                               dai_link->codecs->of_node = codec_node;
+                               dai_link->codecs->name = NULL;
+                               dai_link->codecs->dai_name =
+                                       is5682s ? RT5682S_CODEC_DAI : RT5682_CODEC_DAI;
+                               dai_link->init = mt8195_rt5682_init;
+                               dai_link->ops = &mt8195_rt5682_etdm_ops;
+                       }
+               } else if (strcmp(dai_link->name, "ETDM2_IN_BE") == 0) {
+                       if (!codec_node) {
+                               dev_err(&pdev->dev, "Codec not found!\n");
+                       } else {
+                               dai_link->codecs->of_node = codec_node;
+                               dai_link->codecs->name = NULL;
+                               dai_link->codecs->dai_name =
+                                       is5682s ? RT5682S_CODEC_DAI : RT5682_CODEC_DAI;
+                               dai_link->ops = &mt8195_rt5682_etdm_ops;
+                       }
                } else if (strcmp(dai_link->name, "DL_SRC_BE") == 0 ||
                           strcmp(dai_link->name, "UL_SRC1_BE") == 0 ||
                           strcmp(dai_link->name, "UL_SRC2_BE") == 0) {
index ed4bb551bfbb92c965eba25c048ce4fa12648283..b7fd503a166668d3fe41500bf52e58de129c92de 100644 (file)
@@ -32,12 +32,14 @@ static int sc8280xp_snd_init(struct snd_soc_pcm_runtime *rtd)
        case WSA_CODEC_DMA_RX_0:
        case WSA_CODEC_DMA_RX_1:
                /*
-                * set limit of 0dB on Digital Volume for Speakers,
-                * this can prevent damage of speakers to some extent without
-                * active speaker protection
+                * Set limit of -3 dB on Digital Volume and 0 dB on PA Volume
+                * to reduce the risk of speaker damage until we have active
+                * speaker protection in place.
                 */
-               snd_soc_limit_volume(card, "WSA_RX0 Digital Volume", 84);
-               snd_soc_limit_volume(card, "WSA_RX1 Digital Volume", 84);
+               snd_soc_limit_volume(card, "WSA_RX0 Digital Volume", 81);
+               snd_soc_limit_volume(card, "WSA_RX1 Digital Volume", 81);
+               snd_soc_limit_volume(card, "SpkrLeft PA Volume", 17);
+               snd_soc_limit_volume(card, "SpkrRight PA Volume", 17);
                break;
        default:
                break;
index 1a504ebd3a0e9c6f0da9ea434633cf82c331c60b..6c89c7331229f0ac86b29f3bffb05f6623c98067 100644 (file)
@@ -446,7 +446,7 @@ static const struct rockchip_sound_route rockchip_routes[] = {
 
 struct dailink_match_data {
        const char *compatible;
-       struct bus_type *bus_type;
+       const struct bus_type *bus_type;
 };
 
 static const struct dailink_match_data dailink_match[] = {
index f8524b5bfb330652afb48091b7afab12b1a70d6e..516350533e73f8ee1084164e44068f825eb7fafe 100644 (file)
@@ -1037,7 +1037,7 @@ component_dai_empty:
        return -EINVAL;
 }
 
-#define MAX_DEFAULT_CH_MAP_SIZE 7
+#define MAX_DEFAULT_CH_MAP_SIZE 8
 static struct snd_soc_dai_link_ch_map default_ch_map_sync[MAX_DEFAULT_CH_MAP_SIZE] = {
        { .cpu = 0, .codec = 0 },
        { .cpu = 1, .codec = 1 },
@@ -1046,6 +1046,7 @@ static struct snd_soc_dai_link_ch_map default_ch_map_sync[MAX_DEFAULT_CH_MAP_SIZ
        { .cpu = 4, .codec = 4 },
        { .cpu = 5, .codec = 5 },
        { .cpu = 6, .codec = 6 },
+       { .cpu = 7, .codec = 7 },
 };
 static struct snd_soc_dai_link_ch_map default_ch_map_1cpu[MAX_DEFAULT_CH_MAP_SIZE] = {
        { .cpu = 0, .codec = 0 },
@@ -1055,6 +1056,7 @@ static struct snd_soc_dai_link_ch_map default_ch_map_1cpu[MAX_DEFAULT_CH_MAP_SIZ
        { .cpu = 0, .codec = 4 },
        { .cpu = 0, .codec = 5 },
        { .cpu = 0, .codec = 6 },
+       { .cpu = 0, .codec = 7 },
 };
 static struct snd_soc_dai_link_ch_map default_ch_map_1codec[MAX_DEFAULT_CH_MAP_SIZE] = {
        { .cpu = 0, .codec = 0 },
@@ -1064,6 +1066,7 @@ static struct snd_soc_dai_link_ch_map default_ch_map_1codec[MAX_DEFAULT_CH_MAP_S
        { .cpu = 4, .codec = 0 },
        { .cpu = 5, .codec = 0 },
        { .cpu = 6, .codec = 0 },
+       { .cpu = 7, .codec = 0 },
 };
 static int snd_soc_compensate_channel_connection_map(struct snd_soc_card *card,
                                                     struct snd_soc_dai_link *dai_link)
index 93b189c2d2ee2f8886e8c1cbbc52100ea2c2be72..0dca139322f3d22a14c7f6d45a9a343090a9f2bb 100644 (file)
@@ -137,7 +137,6 @@ static int trace_filter_parse(struct snd_sof_dev *sdev, char *string,
                        dev_err(sdev->dev,
                                "Parsing filter entry '%s' failed with %d\n",
                                entry, entry_len);
-                       kfree(*out);
                        return -EINVAL;
                }
        }
@@ -209,13 +208,13 @@ static ssize_t dfsentry_trace_filter_write(struct file *file, const char __user
                ret = ipc3_trace_update_filter(sdev, num_elems, elems);
                if (ret < 0) {
                        dev_err(sdev->dev, "Filter update failed: %d\n", ret);
-                       kfree(elems);
                        goto error;
                }
        }
        ret = count;
 error:
        kfree(string);
+       kfree(elems);
        return ret;
 }
 
index 3539b0a66e1beedb6a5e2841a999b97f39cf75c0..c79479afa8d0db70be1a756d09ff85a1a353bbc9 100644 (file)
@@ -482,13 +482,10 @@ void sof_ipc4_update_cpc_from_manifest(struct snd_sof_dev *sdev,
                msg = "No CPC match in the firmware file's manifest";
 
 no_cpc:
-       dev_warn(sdev->dev, "%s (UUID: %pUL): %s (ibs/obs: %u/%u)\n",
-                fw_module->man4_module_entry.name,
-                &fw_module->man4_module_entry.uuid, msg, basecfg->ibs,
-                basecfg->obs);
-       dev_warn_once(sdev->dev, "Please try to update the firmware.\n");
-       dev_warn_once(sdev->dev, "If the issue persists, file a bug at\n");
-       dev_warn_once(sdev->dev, "https://github.com/thesofproject/sof/issues/\n");
+       dev_dbg(sdev->dev, "%s (UUID: %pUL): %s (ibs/obs: %u/%u)\n",
+               fw_module->man4_module_entry.name,
+               &fw_module->man4_module_entry.uuid, msg, basecfg->ibs,
+               basecfg->obs);
 }
 
 const struct sof_ipc_fw_loader_ops ipc4_loader_ops = {
index 39039a647cca335aa362e5671ab0d51e6c0abcf1..85d3f390e4b290774687086f37b2a73473117e54 100644 (file)
@@ -768,10 +768,8 @@ static void sof_ipc4_build_time_info(struct snd_sof_dev *sdev, struct snd_sof_pc
        info->llp_offset = offsetof(struct sof_ipc4_fw_registers, llp_evad_reading_slot) +
                                        sdev->fw_info_box.offset;
        sof_mailbox_read(sdev, info->llp_offset, &llp_slot, sizeof(llp_slot));
-       if (llp_slot.node_id != dai_copier->data.gtw_cfg.node_id) {
-               dev_info(sdev->dev, "no llp found, fall back to default HDA path");
+       if (llp_slot.node_id != dai_copier->data.gtw_cfg.node_id)
                info->llp_offset = 0;
-       }
 }
 
 static int sof_ipc4_pcm_hw_params(struct snd_soc_component *component,
index 702386823d17263ffa6acacb6d0bd71adb7c83d9..f41c309558579f1c3c4b1d0e7bcca1b2e64d8747 100644 (file)
@@ -577,6 +577,11 @@ static const struct of_device_id sun4i_spdif_of_match[] = {
                .compatible = "allwinner,sun50i-h6-spdif",
                .data = &sun50i_h6_spdif_quirks,
        },
+       {
+               .compatible = "allwinner,sun50i-h616-spdif",
+               /* Essentially the same as the H6, but without RX */
+               .data = &sun50i_h6_spdif_quirks,
+       },
        { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, sun4i_spdif_of_match);
index 33db334e6556674414047b1a1d660ec3e8083100..60fcb872a80b6c1f79afcec88e959df33a04a4da 100644 (file)
@@ -261,6 +261,8 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip,
        int ret, i, cur, err, pins, clock_id;
        const u8 *sources;
        int proto = fmt->protocol;
+       bool readable, writeable;
+       u32 bmControls;
 
        entity_id &= 0xff;
 
@@ -292,11 +294,27 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip,
                sources = GET_VAL(selector, proto, baCSourceID);
                cur = 0;
 
+               if (proto == UAC_VERSION_3)
+                       bmControls = le32_to_cpu(*(__le32 *)(&selector->v3.baCSourceID[0] + pins));
+               else
+                       bmControls = *(__u8 *)(&selector->v2.baCSourceID[0] + pins);
+
+               readable = uac_v2v3_control_is_readable(bmControls,
+                                                       UAC2_CX_CLOCK_SELECTOR);
+               writeable = uac_v2v3_control_is_writeable(bmControls,
+                                                         UAC2_CX_CLOCK_SELECTOR);
+
                if (pins == 1) {
                        ret = 1;
                        goto find_source;
                }
 
+               /* for now just warn about buggy device */
+               if (!readable)
+                       usb_audio_warn(chip,
+                               "%s(): clock selector control is not readable, id %d\n",
+                               __func__, clock_id);
+
                /* the entity ID we are looking at is a selector.
                 * find out what it currently selects */
                ret = uac_clock_selector_get_val(chip, clock_id);
@@ -325,17 +343,29 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip,
                                              visited, validate);
                if (ret > 0) {
                        /* Skip setting clock selector again for some devices */
-                       if (chip->quirk_flags & QUIRK_FLAG_SKIP_CLOCK_SELECTOR)
+                       if (chip->quirk_flags & QUIRK_FLAG_SKIP_CLOCK_SELECTOR ||
+                           !writeable)
                                return ret;
                        err = uac_clock_selector_set_val(chip, entity_id, cur);
-                       if (err < 0)
+                       if (err < 0) {
+                               if (pins == 1) {
+                                       usb_audio_dbg(chip,
+                                                     "%s(): selector returned an error, "
+                                                     "assuming a firmware bug, id %d, ret %d\n",
+                                                     __func__, clock_id, err);
+                                       return ret;
+                               }
                                return err;
+                       }
                }
 
                if (!validate || ret > 0 || !chip->autoclock)
                        return ret;
 
        find_others:
+               if (!writeable)
+                       return -ENXIO;
+
                /* The current clock source is invalid, try others. */
                for (i = 1; i <= pins; i++) {
                        if (i == cur)
index ab5fed9f55b60ec8b255448a9cbb435f9e04d96b..3b45d0ee769389aafb3e752cec5b96223c52077b 100644 (file)
@@ -470,9 +470,11 @@ static int validate_sample_rate_table_v2v3(struct snd_usb_audio *chip,
                                           int clock)
 {
        struct usb_device *dev = chip->dev;
+       struct usb_host_interface *alts;
        unsigned int *table;
        unsigned int nr_rates;
        int i, err;
+       u32 bmControls;
 
        /* performing the rate verification may lead to unexpected USB bus
         * behavior afterwards by some unknown reason.  Do this only for the
@@ -481,6 +483,24 @@ static int validate_sample_rate_table_v2v3(struct snd_usb_audio *chip,
        if (!(chip->quirk_flags & QUIRK_FLAG_VALIDATE_RATES))
                return 0; /* don't perform the validation as default */
 
+       alts = snd_usb_get_host_interface(chip, fp->iface, fp->altsetting);
+       if (!alts)
+               return 0;
+
+       if (fp->protocol == UAC_VERSION_3) {
+               struct uac3_as_header_descriptor *as = snd_usb_find_csint_desc(
+                               alts->extra, alts->extralen, NULL, UAC_AS_GENERAL);
+               bmControls = le32_to_cpu(as->bmControls);
+       } else {
+               struct uac2_as_header_descriptor *as = snd_usb_find_csint_desc(
+                               alts->extra, alts->extralen, NULL, UAC_AS_GENERAL);
+               bmControls = as->bmControls;
+       }
+
+       if (!uac_v2v3_control_is_readable(bmControls,
+                               UAC2_AS_VAL_ALT_SETTINGS))
+               return 0;
+
        table = kcalloc(fp->nr_rates, sizeof(*table), GFP_KERNEL);
        if (!table)
                return -ENOMEM;
index 1ec177fe284eddd7eb431d56083e82886c858550..820d3e4b672ab603b6f2cb91ba95d12b60d519f5 100644 (file)
@@ -1085,7 +1085,7 @@ int snd_usb_midi_v2_create(struct snd_usb_audio *chip,
        }
        if ((quirk && quirk->type != QUIRK_MIDI_STANDARD_INTERFACE) ||
            iface->num_altsetting < 2) {
-               usb_audio_info(chip, "Quirk or no altest; falling back to MIDI 1.0\n");
+               usb_audio_info(chip, "Quirk or no altset; falling back to MIDI 1.0\n");
                goto fallback_to_midi1;
        }
        hostif = &iface->altsetting[1];
index 1de3ddc50eb6accdb267ab1e722caffe532d6df2..6de605a601e5f89ff7a9c12b36db81eed6d876c3 100644 (file)
@@ -5361,9 +5361,9 @@ static int scarlett2_add_line_out_ctls(struct usb_mixer_interface *mixer)
                        if (private->vol_sw_hw_switch[index])
                                scarlett2_vol_ctl_set_writable(mixer, i, 0);
 
-                       snprintf(s, sizeof(s),
-                                "Line Out %02d Volume Control Playback Enum",
-                                i + 1);
+                       scnprintf(s, sizeof(s),
+                                 "Line Out %02d Volume Control Playback Enum",
+                                 i + 1);
                        err = scarlett2_add_new_ctl(mixer,
                                                    &scarlett2_sw_hw_enum_ctl,
                                                    i, 1, s,
@@ -5406,8 +5406,8 @@ static int scarlett2_add_line_in_ctls(struct usb_mixer_interface *mixer)
 
        /* Add input level (line/inst) controls */
        for (i = 0; i < info->level_input_count; i++) {
-               snprintf(s, sizeof(s), fmt, i + 1 + info->level_input_first,
-                        "Level", "Enum");
+               scnprintf(s, sizeof(s), fmt, i + 1 + info->level_input_first,
+                         "Level", "Enum");
                err = scarlett2_add_new_ctl(mixer, &scarlett2_level_enum_ctl,
                                            i, 1, s, &private->level_ctls[i]);
                if (err < 0)
@@ -5416,7 +5416,7 @@ static int scarlett2_add_line_in_ctls(struct usb_mixer_interface *mixer)
 
        /* Add input pad controls */
        for (i = 0; i < info->pad_input_count; i++) {
-               snprintf(s, sizeof(s), fmt, i + 1, "Pad", "Switch");
+               scnprintf(s, sizeof(s), fmt, i + 1, "Pad", "Switch");
                err = scarlett2_add_new_ctl(mixer, &scarlett2_pad_ctl,
                                            i, 1, s, &private->pad_ctls[i]);
                if (err < 0)
@@ -5425,8 +5425,8 @@ static int scarlett2_add_line_in_ctls(struct usb_mixer_interface *mixer)
 
        /* Add input air controls */
        for (i = 0; i < info->air_input_count; i++) {
-               snprintf(s, sizeof(s), fmt, i + 1 + info->air_input_first,
-                        "Air", info->air_option ? "Enum" : "Switch");
+               scnprintf(s, sizeof(s), fmt, i + 1 + info->air_input_first,
+                         "Air", info->air_option ? "Enum" : "Switch");
                err = scarlett2_add_new_ctl(
                        mixer, &scarlett2_air_ctl[info->air_option],
                        i, 1, s, &private->air_ctls[i]);
@@ -5481,9 +5481,9 @@ static int scarlett2_add_line_in_ctls(struct usb_mixer_interface *mixer)
 
                for (i = 0; i < info->gain_input_count; i++) {
                        if (i % 2) {
-                               snprintf(s, sizeof(s),
-                                        "Line In %d-%d Link Capture Switch",
-                                        i, i + 1);
+                               scnprintf(s, sizeof(s),
+                                         "Line In %d-%d Link Capture Switch",
+                                         i, i + 1);
                                err = scarlett2_add_new_ctl(
                                        mixer, &scarlett2_input_link_ctl,
                                        i / 2, 1, s,
@@ -5492,30 +5492,30 @@ static int scarlett2_add_line_in_ctls(struct usb_mixer_interface *mixer)
                                        return err;
                        }
 
-                       snprintf(s, sizeof(s), fmt, i + 1,
-                                "Gain", "Volume");
+                       scnprintf(s, sizeof(s), fmt, i + 1,
+                                 "Gain", "Volume");
                        err = scarlett2_add_new_ctl(
                                mixer, &scarlett2_input_gain_ctl,
                                i, 1, s, &private->input_gain_ctls[i]);
                        if (err < 0)
                                return err;
 
-                       snprintf(s, sizeof(s), fmt, i + 1,
-                                "Autogain", "Switch");
+                       scnprintf(s, sizeof(s), fmt, i + 1,
+                                 "Autogain", "Switch");
                        err = scarlett2_add_new_ctl(
                                mixer, &scarlett2_autogain_switch_ctl,
                                i, 1, s, &private->autogain_ctls[i]);
                        if (err < 0)
                                return err;
 
-                       snprintf(s, sizeof(s), fmt, i + 1,
-                                "Autogain Status", "Enum");
+                       scnprintf(s, sizeof(s), fmt, i + 1,
+                                 "Autogain Status", "Enum");
                        err = scarlett2_add_new_ctl(
                                mixer, &scarlett2_autogain_status_ctl,
                                i, 1, s, &private->autogain_status_ctls[i]);
 
-                       snprintf(s, sizeof(s), fmt, i + 1,
-                                "Safe", "Switch");
+                       scnprintf(s, sizeof(s), fmt, i + 1,
+                                 "Safe", "Switch");
                        err = scarlett2_add_new_ctl(
                                mixer, &scarlett2_safe_ctl,
                                i, 1, s, &private->safe_ctls[i]);
@@ -5902,8 +5902,8 @@ static int scarlett2_add_direct_monitor_ctls(struct usb_mixer_interface *mixer)
                        for (k = 0; k < private->num_mix_in; k++, index++) {
                                char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
 
-                               snprintf(name, sizeof(name), format,
-                                        mix_type, 'A' + j, k + 1);
+                               scnprintf(name, sizeof(name), format,
+                                         mix_type, 'A' + j, k + 1);
 
                                err = scarlett2_add_new_ctl(
                                        mixer, &scarlett2_monitor_mix_ctl,
index 07cc6a201579aa864f6ebd113d638cf4a36153d8..09712e61c606ef21c2b39bb80b8906a70f6ed4ff 100644 (file)
@@ -2031,10 +2031,14 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
                   QUIRK_FLAG_CTL_MSG_DELAY_1M | QUIRK_FLAG_IGNORE_CTL_ERROR),
        DEVICE_FLG(0x0499, 0x1509, /* Steinberg UR22 */
                   QUIRK_FLAG_GENERIC_IMPLICIT_FB),
+       DEVICE_FLG(0x0499, 0x3108, /* Yamaha YIT-W12TX */
+                  QUIRK_FLAG_GET_SAMPLE_RATE),
        DEVICE_FLG(0x04d8, 0xfeea, /* Benchmark DAC1 Pre */
                   QUIRK_FLAG_GET_SAMPLE_RATE),
        DEVICE_FLG(0x04e8, 0xa051, /* Samsung USBC Headset (AKG) */
                   QUIRK_FLAG_SKIP_CLOCK_SELECTOR | QUIRK_FLAG_CTL_MSG_DELAY_5M),
+       DEVICE_FLG(0x0525, 0xa4ad, /* Hamedal C20 usb camero */
+                  QUIRK_FLAG_IFACE_SKIP_CLOSE),
        DEVICE_FLG(0x054c, 0x0b8c, /* Sony WALKMAN NW-A45 DAC */
                   QUIRK_FLAG_SET_IFACE_FIRST),
        DEVICE_FLG(0x0556, 0x0014, /* Phoenix Audio TMX320VC */
@@ -2073,14 +2077,22 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
                   QUIRK_FLAG_GENERIC_IMPLICIT_FB),
        DEVICE_FLG(0x0763, 0x2031, /* M-Audio Fast Track C600 */
                   QUIRK_FLAG_GENERIC_IMPLICIT_FB),
+       DEVICE_FLG(0x07fd, 0x000b, /* MOTU M Series 2nd hardware revision */
+                  QUIRK_FLAG_CTL_MSG_DELAY_1M),
        DEVICE_FLG(0x08bb, 0x2702, /* LineX FM Transmitter */
                   QUIRK_FLAG_IGNORE_CTL_ERROR),
        DEVICE_FLG(0x0951, 0x16ad, /* Kingston HyperX */
                   QUIRK_FLAG_CTL_MSG_DELAY_1M),
        DEVICE_FLG(0x0b0e, 0x0349, /* Jabra 550a */
                   QUIRK_FLAG_CTL_MSG_DELAY_1M),
+       DEVICE_FLG(0x0ecb, 0x205c, /* JBL Quantum610 Wireless */
+                  QUIRK_FLAG_FIXED_RATE),
+       DEVICE_FLG(0x0ecb, 0x2069, /* JBL Quantum810 Wireless */
+                  QUIRK_FLAG_FIXED_RATE),
        DEVICE_FLG(0x0fd9, 0x0008, /* Hauppauge HVR-950Q */
                   QUIRK_FLAG_SHARE_MEDIA_DEVICE | QUIRK_FLAG_ALIGN_TRANSFER),
+       DEVICE_FLG(0x1224, 0x2a25, /* Jieli Technology USB PHY 2.0 */
+                  QUIRK_FLAG_GET_SAMPLE_RATE),
        DEVICE_FLG(0x1395, 0x740a, /* Sennheiser DECT */
                   QUIRK_FLAG_GET_SAMPLE_RATE),
        DEVICE_FLG(0x1397, 0x0507, /* Behringer UMC202HD */
@@ -2113,6 +2125,10 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
                   QUIRK_FLAG_ITF_USB_DSD_DAC | QUIRK_FLAG_CTL_MSG_DELAY),
        DEVICE_FLG(0x1901, 0x0191, /* GE B850V3 CP2114 audio interface */
                   QUIRK_FLAG_GET_SAMPLE_RATE),
+       DEVICE_FLG(0x19f7, 0x0035, /* RODE NT-USB+ */
+                  QUIRK_FLAG_GET_SAMPLE_RATE),
+       DEVICE_FLG(0x1bcf, 0x2283, /* NexiGo N930AF FHD Webcam */
+                  QUIRK_FLAG_GET_SAMPLE_RATE),
        DEVICE_FLG(0x2040, 0x7200, /* Hauppauge HVR-950Q */
                   QUIRK_FLAG_SHARE_MEDIA_DEVICE | QUIRK_FLAG_ALIGN_TRANSFER),
        DEVICE_FLG(0x2040, 0x7201, /* Hauppauge HVR-950Q-MXL */
@@ -2155,6 +2171,12 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
                   QUIRK_FLAG_IGNORE_CTL_ERROR),
        DEVICE_FLG(0x2912, 0x30c8, /* Audioengine D1 */
                   QUIRK_FLAG_GET_SAMPLE_RATE),
+       DEVICE_FLG(0x2b53, 0x0023, /* Fiero SC-01 (firmware v1.0.0 @ 48 kHz) */
+                  QUIRK_FLAG_GENERIC_IMPLICIT_FB),
+       DEVICE_FLG(0x2b53, 0x0024, /* Fiero SC-01 (firmware v1.0.0 @ 96 kHz) */
+                  QUIRK_FLAG_GENERIC_IMPLICIT_FB),
+       DEVICE_FLG(0x2b53, 0x0031, /* Fiero SC-01 (firmware v1.1.0) */
+                  QUIRK_FLAG_GENERIC_IMPLICIT_FB),
        DEVICE_FLG(0x30be, 0x0101, /* Schiit Hel */
                   QUIRK_FLAG_IGNORE_CTL_ERROR),
        DEVICE_FLG(0x413c, 0xa506, /* Dell AE515 sound bar */
@@ -2163,22 +2185,6 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
                   QUIRK_FLAG_ALIGN_TRANSFER),
        DEVICE_FLG(0x534d, 0x2109, /* MacroSilicon MS2109 */
                   QUIRK_FLAG_ALIGN_TRANSFER),
-       DEVICE_FLG(0x1224, 0x2a25, /* Jieli Technology USB PHY 2.0 */
-                  QUIRK_FLAG_GET_SAMPLE_RATE),
-       DEVICE_FLG(0x2b53, 0x0023, /* Fiero SC-01 (firmware v1.0.0 @ 48 kHz) */
-                  QUIRK_FLAG_GENERIC_IMPLICIT_FB),
-       DEVICE_FLG(0x2b53, 0x0024, /* Fiero SC-01 (firmware v1.0.0 @ 96 kHz) */
-                  QUIRK_FLAG_GENERIC_IMPLICIT_FB),
-       DEVICE_FLG(0x2b53, 0x0031, /* Fiero SC-01 (firmware v1.1.0) */
-                  QUIRK_FLAG_GENERIC_IMPLICIT_FB),
-       DEVICE_FLG(0x0525, 0xa4ad, /* Hamedal C20 usb camero */
-                  QUIRK_FLAG_IFACE_SKIP_CLOSE),
-       DEVICE_FLG(0x0ecb, 0x205c, /* JBL Quantum610 Wireless */
-                  QUIRK_FLAG_FIXED_RATE),
-       DEVICE_FLG(0x0ecb, 0x2069, /* JBL Quantum810 Wireless */
-                  QUIRK_FLAG_FIXED_RATE),
-       DEVICE_FLG(0x1bcf, 0x2283, /* NexiGo N930AF FHD Webcam */
-                  QUIRK_FLAG_GET_SAMPLE_RATE),
 
        /* Vendor matches */
        VENDOR_FLG(0x045e, /* MS Lifecam */
index e2847c040f750f98a77cb0bfe4ec2a548f5eb691..b158c3cb8e5f5fce75e22306c7707935465bc57f 100644 (file)
@@ -91,8 +91,6 @@ static void virtsnd_event_notify_cb(struct virtqueue *vqueue)
                        virtsnd_event_dispatch(snd, event);
                        virtsnd_event_send(vqueue, event, true, GFP_ATOMIC);
                }
-               if (unlikely(virtqueue_is_broken(vqueue)))
-                       break;
        } while (!virtqueue_enable_cb(vqueue));
        spin_unlock_irqrestore(&queue->lock, flags);
 }
index 18dc5aca2e0c5b2a1e6c0d4391b34865b797995e..9dabea01277f845726ee2a908b9288c9f0e5e918 100644 (file)
@@ -303,8 +303,6 @@ void virtsnd_ctl_notify_cb(struct virtqueue *vqueue)
                virtqueue_disable_cb(vqueue);
                while ((msg = virtqueue_get_buf(vqueue, &length)))
                        virtsnd_ctl_msg_complete(msg);
-               if (unlikely(virtqueue_is_broken(vqueue)))
-                       break;
        } while (!virtqueue_enable_cb(vqueue));
        spin_unlock_irqrestore(&queue->lock, flags);
 }
index 542446c4c7ba8e4da2d7dd5b701c829e45c24084..8c32efaf4c5294e6aba0adcfb8a40a22d3a0d261 100644 (file)
@@ -358,8 +358,6 @@ static inline void virtsnd_pcm_notify_cb(struct virtio_snd_queue *queue)
                virtqueue_disable_cb(queue->vqueue);
                while ((msg = virtqueue_get_buf(queue->vqueue, &written_bytes)))
                        virtsnd_pcm_msg_complete(msg, written_bytes);
-               if (unlikely(virtqueue_is_broken(queue->vqueue)))
-                       break;
        } while (!virtqueue_enable_cb(queue->vqueue));
        spin_unlock_irqrestore(&queue->lock, flags);
 }
index f4542d2718f4f635ce8879da123764e72e9af47b..29cb275a219d7fb38fa0d16e6ba48e91c9d032b4 100644 (file)
 #define X86_FEATURE_CAT_L3             ( 7*32+ 4) /* Cache Allocation Technology L3 */
 #define X86_FEATURE_CAT_L2             ( 7*32+ 5) /* Cache Allocation Technology L2 */
 #define X86_FEATURE_CDP_L3             ( 7*32+ 6) /* Code and Data Prioritization L3 */
+#define X86_FEATURE_TDX_HOST_PLATFORM  ( 7*32+ 7) /* Platform supports being a TDX host */
 #define X86_FEATURE_HW_PSTATE          ( 7*32+ 8) /* AMD HW-PState */
 #define X86_FEATURE_PROC_FEEDBACK      ( 7*32+ 9) /* AMD ProcFeedbackInterface */
 #define X86_FEATURE_XCOMPACTED         ( 7*32+10) /* "" Use compacted XSTATE (XSAVES or XSAVEC) */
 #define X86_FEATURE_SMBA               (11*32+21) /* "" Slow Memory Bandwidth Allocation */
 #define X86_FEATURE_BMEC               (11*32+22) /* "" Bandwidth Monitoring Event Configuration */
 #define X86_FEATURE_USER_SHSTK         (11*32+23) /* Shadow stack support for user mode applications */
-
 #define X86_FEATURE_SRSO               (11*32+24) /* "" AMD BTB untrain RETs */
 #define X86_FEATURE_SRSO_ALIAS         (11*32+25) /* "" AMD BTB untrain RETs through aliasing */
 #define X86_FEATURE_IBPB_ON_VMEXIT     (11*32+26) /* "" Issue an IBPB only on VMEXIT */
+#define X86_FEATURE_APIC_MSRS_FENCE    (11*32+27) /* "" IA32_TSC_DEADLINE and X2APIC MSRs need fencing */
+#define X86_FEATURE_ZEN2               (11*32+28) /* "" CPU based on Zen2 microarchitecture */
+#define X86_FEATURE_ZEN3               (11*32+29) /* "" CPU based on Zen3 microarchitecture */
+#define X86_FEATURE_ZEN4               (11*32+30) /* "" CPU based on Zen4 microarchitecture */
+#define X86_FEATURE_ZEN1               (11*32+31) /* "" CPU based on Zen1 microarchitecture */
 
 /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
 #define X86_FEATURE_AVX_VNNI           (12*32+ 4) /* AVX VNNI instructions */
 #define X86_BUG_EIBRS_PBRSB            X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
 #define X86_BUG_SMT_RSB                        X86_BUG(29) /* CPU is vulnerable to Cross-Thread Return Address Predictions */
 #define X86_BUG_GDS                    X86_BUG(30) /* CPU is affected by Gather Data Sampling */
+#define X86_BUG_TDX_PW_MCE             X86_BUG(31) /* CPU may incur #MC if non-TD software does partial write to TDX private memory */
 
 /* BUG word 2 */
 #define X86_BUG_SRSO                   X86_BUG(1*32 + 0) /* AMD SRSO bug */
index 1d51e1850ed03d46e84c71de0c451067d0baac5b..f1bd7b91b3c63735738825f15cd3c82fca7579ce 100644 (file)
 #define LBR_INFO_CYCLES                        0xffff
 #define LBR_INFO_BR_TYPE_OFFSET                56
 #define LBR_INFO_BR_TYPE               (0xfull << LBR_INFO_BR_TYPE_OFFSET)
+#define LBR_INFO_BR_CNTR_OFFSET                32
+#define LBR_INFO_BR_CNTR_NUM           4
+#define LBR_INFO_BR_CNTR_BITS          2
+#define LBR_INFO_BR_CNTR_MASK          GENMASK_ULL(LBR_INFO_BR_CNTR_BITS - 1, 0)
+#define LBR_INFO_BR_CNTR_FULL_MASK     GENMASK_ULL(LBR_INFO_BR_CNTR_NUM * LBR_INFO_BR_CNTR_BITS - 1, 0)
 
 #define MSR_ARCH_LBR_CTL               0x000014ce
 #define ARCH_LBR_CTL_LBREN             BIT(0)
 #define MSR_RELOAD_PMC0                        0x000014c1
 #define MSR_RELOAD_FIXED_CTR0          0x00001309
 
+/* KeyID partitioning between MKTME and TDX */
+#define MSR_IA32_MKTME_KEYID_PARTITIONING      0x00000087
+
 /*
  * AMD64 MSRs. Not complete. See the architecture manual for a more
  * complete list.
index 11ff975242cac7cff4dfaab3a4591dc4cb82eb1d..e2ff22b379a44c584b7325249c48db9b3368c7d8 100644 (file)
@@ -4,7 +4,7 @@
 
 #define __GEN_RMWcc(fullop, var, cc, ...)                              \
 do {                                                                   \
-       asm_volatile_goto (fullop "; j" cc " %l[cc_label]"              \
+       asm goto (fullop "; j" cc " %l[cc_label]"               \
                        : : "m" (var), ## __VA_ARGS__                   \
                        : "memory" : cc_label);                         \
        return 0;                                                       \
index 1a6a1f98794967d260e2898b0dbb62f830d45664..a448d0964fc06ebd0c15cd0b550e3c2cefbf57bf 100644 (file)
@@ -562,4 +562,7 @@ struct kvm_pmu_event_filter {
 /* x86-specific KVM_EXIT_HYPERCALL flags. */
 #define KVM_EXIT_HYPERCALL_LONG_MODE   BIT(0)
 
+#define KVM_X86_DEFAULT_VM     0
+#define KVM_X86_SW_PROTECTED_VM        1
+
 #endif /* _ASM_X86_KVM_H */
index d055b82d22ccd083975a874e5a96abbeff8f496b..59cf6f9065aa84d8a4a6a92999f3d3d1f3367681 100644 (file)
@@ -1,11 +1,11 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /* Copyright 2002 Andi Kleen */
 
+#include <linux/export.h>
 #include <linux/linkage.h>
 #include <asm/errno.h>
 #include <asm/cpufeatures.h>
 #include <asm/alternative.h>
-#include <asm/export.h>
 
 .section .noinstr.text, "ax"
 
@@ -39,7 +39,7 @@ SYM_TYPED_FUNC_START(__memcpy)
 SYM_FUNC_END(__memcpy)
 EXPORT_SYMBOL(__memcpy)
 
-SYM_FUNC_ALIAS(memcpy, __memcpy)
+SYM_FUNC_ALIAS_MEMFUNC(memcpy, __memcpy)
 EXPORT_SYMBOL(memcpy)
 
 SYM_FUNC_START_LOCAL(memcpy_orig)
index 7c59a704c4584bf7ef3e6a50f2021c31e6f15029..0199d56cb479d88ce0bc6556c092ea87ae9ceb3b 100644 (file)
@@ -1,10 +1,10 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright 2002 Andi Kleen, SuSE Labs */
 
+#include <linux/export.h>
 #include <linux/linkage.h>
 #include <asm/cpufeatures.h>
 #include <asm/alternative.h>
-#include <asm/export.h>
 
 .section .noinstr.text, "ax"
 
@@ -40,7 +40,7 @@ SYM_FUNC_START(__memset)
 SYM_FUNC_END(__memset)
 EXPORT_SYMBOL(__memset)
 
-SYM_FUNC_ALIAS(memset, __memset)
+SYM_FUNC_ALIAS_MEMFUNC(memset, __memset)
 EXPORT_SYMBOL(memset)
 
 SYM_FUNC_START_LOCAL(memset_orig)
index 934e2777a2dbcd9062f873c039f1853867937ed3..64df118376df66d6ddeb4895054eb12c7c40942b 100644 (file)
@@ -32,6 +32,7 @@ FEATURE_TESTS_BASIC :=                  \
         backtrace                       \
         dwarf                           \
         dwarf_getlocations              \
+        dwarf_getcfi                    \
         eventfd                         \
         fortify-source                  \
         get_current_dir_name            \
index dad79ede4e0ae0030ee401a1daf5d59ff871dcb6..37722e509eb9f1924380e65542b55937f3e0cc9e 100644 (file)
@@ -7,6 +7,7 @@ FILES=                                          \
          test-bionic.bin                        \
          test-dwarf.bin                         \
          test-dwarf_getlocations.bin            \
+         test-dwarf_getcfi.bin                  \
          test-eventfd.bin                       \
          test-fortify-source.bin                \
          test-get_current_dir_name.bin          \
@@ -154,6 +155,9 @@ $(OUTPUT)test-dwarf.bin:
 $(OUTPUT)test-dwarf_getlocations.bin:
        $(BUILD) $(DWARFLIBS)
 
+$(OUTPUT)test-dwarf_getcfi.bin:
+       $(BUILD) $(DWARFLIBS)
+
 $(OUTPUT)test-libelf-getphdrnum.bin:
        $(BUILD) -lelf
 
diff --git a/tools/build/feature/test-dwarf_getcfi.c b/tools/build/feature/test-dwarf_getcfi.c
new file mode 100644 (file)
index 0000000..50e7d7c
--- /dev/null
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdio.h>
+#include <elfutils/libdw.h>
+
+int main(void)
+{
+       Dwarf *dwarf = NULL;
+       return dwarf_getcfi(dwarf) == NULL;
+}
index eb6303ff446ed93aadaeaf1ee553515eb8f39a05..4cfcef9da3e434955396dc5ccd8ccdfd1e063ed7 100644 (file)
@@ -4,9 +4,9 @@
 /*
  * Check OpenCSD library version is sufficient to provide required features
  */
-#define OCSD_MIN_VER ((1 << 16) | (1 << 8) | (1))
+#define OCSD_MIN_VER ((1 << 16) | (2 << 8) | (1))
 #if !defined(OCSD_VER_NUM) || (OCSD_VER_NUM < OCSD_MIN_VER)
-#error "OpenCSD >= 1.1.1 is required"
+#error "OpenCSD >= 1.2.1 is required"
 #endif
 
 int main(void)
index 33f4a51d715ec10745e6195f9ad7ee96648ad633..4bbadb7ec93af248ac08ff3a7f6dc400b5f8fd8c 100644 (file)
@@ -1 +1,2 @@
 counter_example-y += counter_example.o
+counter_watch_events-y += counter_watch_events.o
index b2c2946f44c9f3320188692353a160853cc4f05d..d82d35a520f610260abb43f6ae28e56d73268231 100644 (file)
@@ -12,9 +12,10 @@ endif
 # (this improves performance and avoids hard-to-debug behaviour);
 MAKEFLAGS += -r
 
-override CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include
+override CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include \
+        -I$(srctree)/tools/include
 
-ALL_TARGETS := counter_example
+ALL_TARGETS := counter_example counter_watch_events
 ALL_PROGRAMS := $(patsubst %,$(OUTPUT)%,$(ALL_TARGETS))
 
 all: $(ALL_PROGRAMS)
@@ -37,12 +38,19 @@ $(COUNTER_EXAMPLE): prepare FORCE
 $(OUTPUT)counter_example: $(COUNTER_EXAMPLE)
        $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
 
+COUNTER_WATCH_EVENTS := $(OUTPUT)counter_watch_events.o
+$(COUNTER_WATCH_EVENTS): prepare FORCE
+       $(Q)$(MAKE) $(build)=counter_watch_events
+$(OUTPUT)counter_watch_events: $(COUNTER_WATCH_EVENTS)
+       $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
+
 clean:
        rm -f $(ALL_PROGRAMS)
        rm -rf $(OUTPUT)include/linux/counter.h
        rm -df $(OUTPUT)include/linux
        rm -df $(OUTPUT)include
        find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
+       find $(or $(OUTPUT),.) -name '\.*.o.cmd' -delete
 
 install: $(ALL_PROGRAMS)
        install -d -m 755 $(DESTDIR)$(bindir);          \
diff --git a/tools/counter/counter_watch_events.c b/tools/counter/counter_watch_events.c
new file mode 100644 (file)
index 0000000..107631e
--- /dev/null
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Counter Watch Events - Test various counter watch events in a userspace application
+ *
+ * Copyright (C) STMicroelectronics 2023 - All Rights Reserved
+ * Author: Fabrice Gasnier <fabrice.gasnier@foss.st.com>.
+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <getopt.h>
+#include <linux/counter.h>
+#include <linux/kernel.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+
+static struct counter_watch simple_watch[] = {
+       {
+               /* Component data: Count 0 count */
+               .component.type = COUNTER_COMPONENT_COUNT,
+               .component.scope = COUNTER_SCOPE_COUNT,
+               .component.parent = 0,
+               /* Event type: overflow or underflow */
+               .event = COUNTER_EVENT_OVERFLOW_UNDERFLOW,
+               /* Device event channel 0 */
+               .channel = 0,
+       },
+};
+
+static const char * const counter_event_type_name[] = {
+       "COUNTER_EVENT_OVERFLOW",
+       "COUNTER_EVENT_UNDERFLOW",
+       "COUNTER_EVENT_OVERFLOW_UNDERFLOW",
+       "COUNTER_EVENT_THRESHOLD",
+       "COUNTER_EVENT_INDEX",
+       "COUNTER_EVENT_CHANGE_OF_STATE",
+       "COUNTER_EVENT_CAPTURE",
+};
+
+static const char * const counter_component_type_name[] = {
+       "COUNTER_COMPONENT_NONE",
+       "COUNTER_COMPONENT_SIGNAL",
+       "COUNTER_COMPONENT_COUNT",
+       "COUNTER_COMPONENT_FUNCTION",
+       "COUNTER_COMPONENT_SYNAPSE_ACTION",
+       "COUNTER_COMPONENT_EXTENSION",
+};
+
+static const char * const counter_scope_name[] = {
+       "COUNTER_SCOPE_DEVICE",
+       "COUNTER_SCOPE_SIGNAL",
+       "COUNTER_SCOPE_COUNT",
+};
+
+static void print_watch(struct counter_watch *watch, int nwatch)
+{
+       int i;
+
+       /* prints the watch array in C-like structure */
+       printf("watch[%d] = {\n", nwatch);
+       for (i = 0; i < nwatch; i++) {
+               printf(" [%d] =\t{\n"
+                      "\t\t.component.type = %s\n"
+                      "\t\t.component.scope = %s\n"
+                      "\t\t.component.parent = %d\n"
+                      "\t\t.component.id = %d\n"
+                      "\t\t.event = %s\n"
+                      "\t\t.channel = %d\n"
+                      "\t},\n",
+                      i,
+                      counter_component_type_name[watch[i].component.type],
+                      counter_scope_name[watch[i].component.scope],
+                      watch[i].component.parent,
+                      watch[i].component.id,
+                      counter_event_type_name[watch[i].event],
+                      watch[i].channel);
+       }
+       printf("};\n");
+}
+
+static void print_usage(void)
+{
+       fprintf(stderr, "Usage:\n\n"
+               "counter_watch_events [options] [-w <watchoptions>]\n"
+               "counter_watch_events [options] [-w <watch1 options>] [-w <watch2 options>]...\n"
+               "\n"
+               "When no --watch option has been provided, simple watch example is used:\n"
+               "counter_watch_events [options] -w comp_count,scope_count,evt_ovf_udf\n"
+               "\n"
+               "Test various watch events for given counter device.\n"
+               "\n"
+               "Options:\n"
+               "  -d, --debug                Prints debug information\n"
+               "  -h, --help                 Prints usage\n"
+               "  -n, --device-num <n>       Use /dev/counter<n> [default: /dev/counter0]\n"
+               "  -l, --loop <n>             Loop for <n> events [default: 0 (forever)]\n"
+               "  -w, --watch <watchoptions> comma-separated list of watch options\n"
+               "\n"
+               "Watch options:\n"
+               "  scope_device               (COUNTER_SCOPE_DEVICE) [default: scope_device]\n"
+               "  scope_signal               (COUNTER_SCOPE_SIGNAL)\n"
+               "  scope_count                (COUNTER_SCOPE_COUNT)\n"
+               "\n"
+               "  comp_none                  (COUNTER_COMPONENT_NONE) [default: comp_none]\n"
+               "  comp_signal                (COUNTER_COMPONENT_SIGNAL)\n"
+               "  comp_count                 (COUNTER_COMPONENT_COUNT)\n"
+               "  comp_function              (COUNTER_COMPONENT_FUNCTION)\n"
+               "  comp_synapse_action        (COUNTER_COMPONENT_SYNAPSE_ACTION)\n"
+               "  comp_extension             (COUNTER_COMPONENT_EXTENSION)\n"
+               "\n"
+               "  evt_ovf                    (COUNTER_EVENT_OVERFLOW) [default: evt_ovf]\n"
+               "  evt_udf                    (COUNTER_EVENT_UNDERFLOW)\n"
+               "  evt_ovf_udf                (COUNTER_EVENT_OVERFLOW_UNDERFLOW)\n"
+               "  evt_threshold              (COUNTER_EVENT_THRESHOLD)\n"
+               "  evt_index                  (COUNTER_EVENT_INDEX)\n"
+               "  evt_change_of_state        (COUNTER_EVENT_CHANGE_OF_STATE)\n"
+               "  evt_capture                (COUNTER_EVENT_CAPTURE)\n"
+               "\n"
+               "  chan=<n>                   channel <n> for this watch [default: 0]\n"
+               "  id=<n>                     component id <n> for this watch [default: 0]\n"
+               "  parent=<n>                 component parent <n> for this watch [default: 0]\n"
+               "\n"
+               "Example with two watched events:\n\n"
+               "counter_watch_events -d \\\n"
+               "\t-w comp_count,scope_count,evt_ovf_udf \\\n"
+               "\t-w comp_extension,scope_count,evt_capture,id=7,chan=3\n"
+               );
+}
+
+static const struct option longopts[] = {
+       { "debug",              no_argument,       0, 'd' },
+       { "help",               no_argument,       0, 'h' },
+       { "device-num",         required_argument, 0, 'n' },
+       { "loop",               required_argument, 0, 'l' },
+       { "watch",              required_argument, 0, 'w' },
+       { },
+};
+
+/* counter watch subopts */
+enum {
+       WATCH_SCOPE_DEVICE,
+       WATCH_SCOPE_SIGNAL,
+       WATCH_SCOPE_COUNT,
+       WATCH_COMPONENT_NONE,
+       WATCH_COMPONENT_SIGNAL,
+       WATCH_COMPONENT_COUNT,
+       WATCH_COMPONENT_FUNCTION,
+       WATCH_COMPONENT_SYNAPSE_ACTION,
+       WATCH_COMPONENT_EXTENSION,
+       WATCH_EVENT_OVERFLOW,
+       WATCH_EVENT_UNDERFLOW,
+       WATCH_EVENT_OVERFLOW_UNDERFLOW,
+       WATCH_EVENT_THRESHOLD,
+       WATCH_EVENT_INDEX,
+       WATCH_EVENT_CHANGE_OF_STATE,
+       WATCH_EVENT_CAPTURE,
+       WATCH_CHANNEL,
+       WATCH_ID,
+       WATCH_PARENT,
+       WATCH_SUBOPTS_MAX,
+};
+
+static char * const counter_watch_subopts[WATCH_SUBOPTS_MAX + 1] = {
+       /* component.scope */
+       [WATCH_SCOPE_DEVICE] = "scope_device",
+       [WATCH_SCOPE_SIGNAL] = "scope_signal",
+       [WATCH_SCOPE_COUNT] = "scope_count",
+       /* component.type */
+       [WATCH_COMPONENT_NONE] = "comp_none",
+       [WATCH_COMPONENT_SIGNAL] = "comp_signal",
+       [WATCH_COMPONENT_COUNT] = "comp_count",
+       [WATCH_COMPONENT_FUNCTION] = "comp_function",
+       [WATCH_COMPONENT_SYNAPSE_ACTION] = "comp_synapse_action",
+       [WATCH_COMPONENT_EXTENSION] = "comp_extension",
+       /* event */
+       [WATCH_EVENT_OVERFLOW] = "evt_ovf",
+       [WATCH_EVENT_UNDERFLOW] = "evt_udf",
+       [WATCH_EVENT_OVERFLOW_UNDERFLOW] = "evt_ovf_udf",
+       [WATCH_EVENT_THRESHOLD] = "evt_threshold",
+       [WATCH_EVENT_INDEX] = "evt_index",
+       [WATCH_EVENT_CHANGE_OF_STATE] = "evt_change_of_state",
+       [WATCH_EVENT_CAPTURE] = "evt_capture",
+       /* channel, id, parent */
+       [WATCH_CHANNEL] = "chan",
+       [WATCH_ID] = "id",
+       [WATCH_PARENT] = "parent",
+       /* Empty entry ends the opts array */
+       NULL
+};
+
+int main(int argc, char **argv)
+{
+       int c, fd, i, ret, rc = 0, debug = 0, loop = 0, dev_num = 0, nwatch = 0;
+       struct counter_event event_data;
+       char *device_name = NULL, *subopts, *value;
+       struct counter_watch *watches;
+
+       /*
+        * 1st pass:
+        * - list watch events number to allocate the watch array.
+        * - parse normal options (other than watch options)
+        */
+       while ((c = getopt_long(argc, argv, "dhn:l:w:", longopts, NULL)) != -1) {
+               switch (c) {
+               case 'd':
+                       debug = 1;
+                       break;
+               case 'h':
+                       print_usage();
+                       return EXIT_SUCCESS;
+               case 'n':
+                       dev_num = strtoul(optarg, NULL, 10);
+                       if (errno) {
+                               perror("strtol failed: --device-num <n>\n");
+                               return EXIT_FAILURE;
+                       }
+                       break;
+               case 'l':
+                       loop = strtol(optarg, NULL, 10);
+                       if (errno) {
+                               perror("strtol failed: --loop <n>\n");
+                               return EXIT_FAILURE;
+                       }
+                       break;
+               case 'w':
+                       nwatch++;
+                       break;
+               default:
+                       return EXIT_FAILURE;
+               }
+       }
+
+       if (nwatch) {
+               watches = calloc(nwatch, sizeof(*watches));
+               if (!watches) {
+                       perror("Error allocating watches\n");
+                       return EXIT_FAILURE;
+               }
+       } else {
+               /* default to simple watch example */
+               watches = simple_watch;
+               nwatch = ARRAY_SIZE(simple_watch);
+       }
+
+       /* 2nd pass: parse watch sub-options to fill in watch array */
+       optind = 1;
+       i = 0;
+       while ((c = getopt_long(argc, argv, "dhn:l:w:", longopts, NULL)) != -1) {
+               switch (c) {
+               case 'w':
+                       subopts = optarg;
+                       while (*subopts != '\0') {
+                               ret = getsubopt(&subopts, counter_watch_subopts, &value);
+                               switch (ret) {
+                               case WATCH_SCOPE_DEVICE:
+                               case WATCH_SCOPE_SIGNAL:
+                               case WATCH_SCOPE_COUNT:
+                                       /* match with counter_scope */
+                                       watches[i].component.scope = ret;
+                                       break;
+                               case WATCH_COMPONENT_NONE:
+                               case WATCH_COMPONENT_SIGNAL:
+                               case WATCH_COMPONENT_COUNT:
+                               case WATCH_COMPONENT_FUNCTION:
+                               case WATCH_COMPONENT_SYNAPSE_ACTION:
+                               case WATCH_COMPONENT_EXTENSION:
+                                       /* match counter_component_type: subtract enum value */
+                                       ret -= WATCH_COMPONENT_NONE;
+                                       watches[i].component.type = ret;
+                                       break;
+                               case WATCH_EVENT_OVERFLOW:
+                               case WATCH_EVENT_UNDERFLOW:
+                               case WATCH_EVENT_OVERFLOW_UNDERFLOW:
+                               case WATCH_EVENT_THRESHOLD:
+                               case WATCH_EVENT_INDEX:
+                               case WATCH_EVENT_CHANGE_OF_STATE:
+                               case WATCH_EVENT_CAPTURE:
+                                       /* match counter_event_type: subtract enum value */
+                                       ret -= WATCH_EVENT_OVERFLOW;
+                                       watches[i].event = ret;
+                                       break;
+                               case WATCH_CHANNEL:
+                                       if (!value) {
+                                               fprintf(stderr, "Invalid chan=<number>\n");
+                                               rc = EXIT_FAILURE;
+                                               goto err_free_watches;
+                                       }
+                                       watches[i].channel = strtoul(value, NULL, 10);
+                                       if (errno) {
+                                               perror("strtoul failed: chan=<number>\n");
+                                               rc = EXIT_FAILURE;
+                                               goto err_free_watches;
+                                       }
+                                       break;
+                               case WATCH_ID:
+                                       if (!value) {
+                                               fprintf(stderr, "Invalid id=<number>\n");
+                                               rc = EXIT_FAILURE;
+                                               goto err_free_watches;
+                                       }
+                                       watches[i].component.id = strtoul(value, NULL, 10);
+                                       if (errno) {
+                                               perror("strtoul failed: id=<number>\n");
+                                               rc = EXIT_FAILURE;
+                                               goto err_free_watches;
+                                       }
+                                       break;
+                               case WATCH_PARENT:
+                                       if (!value) {
+                                               fprintf(stderr, "Invalid parent=<number>\n");
+                                               rc = EXIT_FAILURE;
+                                               goto err_free_watches;
+                                       }
+                                       watches[i].component.parent = strtoul(value, NULL, 10);
+                                       if (errno) {
+                                               perror("strtoul failed: parent=<number>\n");
+                                               rc = EXIT_FAILURE;
+                                               goto err_free_watches;
+                                       }
+                                       break;
+                               default:
+                                       fprintf(stderr, "Unknown suboption '%s'\n", value);
+                                       rc = EXIT_FAILURE;
+                                       goto err_free_watches;
+                               }
+                       }
+                       i++;
+                       break;
+               }
+       }
+
+       if (debug)
+               print_watch(watches, nwatch);
+
+       ret = asprintf(&device_name, "/dev/counter%d", dev_num);
+       if (ret < 0) {
+               fprintf(stderr, "asprintf failed\n");
+               rc = EXIT_FAILURE;
+               goto err_free_watches;
+       }
+
+       if (debug)
+               printf("Opening %s\n", device_name);
+
+       fd = open(device_name, O_RDWR);
+       if (fd == -1) {
+               fprintf(stderr, "Unable to open %s: %s\n", device_name, strerror(errno));
+               free(device_name);
+               rc = EXIT_FAILURE;
+               goto err_free_watches;
+       }
+       free(device_name);
+
+       for (i = 0; i < nwatch; i++) {
+               ret = ioctl(fd, COUNTER_ADD_WATCH_IOCTL, watches + i);
+               if (ret == -1) {
+                       fprintf(stderr, "Error adding watches[%d]: %s\n", i,
+                               strerror(errno));
+                       rc = EXIT_FAILURE;
+                       goto err_close;
+               }
+       }
+
+       ret = ioctl(fd, COUNTER_ENABLE_EVENTS_IOCTL);
+       if (ret == -1) {
+               perror("Error enabling events");
+               rc = EXIT_FAILURE;
+               goto err_close;
+       }
+
+       for (i = 0; loop <= 0 || i < loop; i++) {
+               ret = read(fd, &event_data, sizeof(event_data));
+               if (ret == -1) {
+                       perror("Failed to read event data");
+                       rc = EXIT_FAILURE;
+                       goto err_close;
+               }
+
+               if (ret != sizeof(event_data)) {
+                       fprintf(stderr, "Failed to read event data (got: %d)\n", ret);
+                       rc = EXIT_FAILURE;
+                       goto err_close;
+               }
+
+               printf("Timestamp: %llu\tData: %llu\t event: %s\tch: %d\n",
+                      event_data.timestamp, event_data.value,
+                      counter_event_type_name[event_data.watch.event],
+                      event_data.watch.channel);
+
+               if (event_data.status) {
+                       fprintf(stderr, "Error %d: %s\n", event_data.status,
+                               strerror(event_data.status));
+               }
+       }
+
+err_close:
+       close(fd);
+err_free_watches:
+       if (watches != simple_watch)
+               free(watches);
+
+       return rc;
+}
index 2eaaa7123b042b04dc9acb17d8eab53f704e773f..8073c9e4fe46a03c64fdd08ca0f9e0f1b656479c 100644 (file)
@@ -105,6 +105,8 @@ static const char * const iio_modifier_names[] = {
        [IIO_MOD_LIGHT_GREEN] = "green",
        [IIO_MOD_LIGHT_BLUE] = "blue",
        [IIO_MOD_LIGHT_UV] = "uv",
+       [IIO_MOD_LIGHT_UVA] = "uva",
+       [IIO_MOD_LIGHT_UVB] = "uvb",
        [IIO_MOD_LIGHT_DUV] = "duv",
        [IIO_MOD_QUATERNION] = "quaternion",
        [IIO_MOD_TEMP_AMBIENT] = "ambient",
index 2fd551915c2025ee7d7adc53f30e44e7b6bf01c1..cdd2fd078027afc99a21a7edb2fa097caf6e4a92 100644 (file)
@@ -105,9 +105,9 @@ static inline u32 get_unaligned_le24(const void *p)
 
 static inline void __put_unaligned_be24(const u32 val, u8 *p)
 {
-       *p++ = val >> 16;
-       *p++ = val >> 8;
-       *p++ = val;
+       *p++ = (val >> 16) & 0xff;
+       *p++ = (val >> 8) & 0xff;
+       *p++ = val & 0xff;
 }
 
 static inline void put_unaligned_be24(const u32 val, void *p)
@@ -117,9 +117,9 @@ static inline void put_unaligned_be24(const u32 val, void *p)
 
 static inline void __put_unaligned_le24(const u32 val, u8 *p)
 {
-       *p++ = val;
-       *p++ = val >> 8;
-       *p++ = val >> 16;
+       *p++ = val & 0xff;
+       *p++ = (val >> 8) & 0xff;
+       *p++ = (val >> 16) & 0xff;
 }
 
 static inline void put_unaligned_le24(const u32 val, void *p)
@@ -129,12 +129,12 @@ static inline void put_unaligned_le24(const u32 val, void *p)
 
 static inline void __put_unaligned_be48(const u64 val, u8 *p)
 {
-       *p++ = val >> 40;
-       *p++ = val >> 32;
-       *p++ = val >> 24;
-       *p++ = val >> 16;
-       *p++ = val >> 8;
-       *p++ = val;
+       *p++ = (val >> 40) & 0xff;
+       *p++ = (val >> 32) & 0xff;
+       *p++ = (val >> 24) & 0xff;
+       *p++ = (val >> 16) & 0xff;
+       *p++ = (val >> 8) & 0xff;
+       *p++ = val & 0xff;
 }
 
 static inline void put_unaligned_be48(const u64 val, void *p)
index 1bdd834bdd57198059c91222036314403191cdbc..d09f9dc172a486875e2e62cf8550a69f24c9beed 100644 (file)
@@ -36,8 +36,8 @@
 #include <linux/compiler-gcc.h>
 #endif
 
-#ifndef asm_volatile_goto
-#define asm_volatile_goto(x...) asm goto(x)
+#ifndef asm_goto_output
+#define asm_goto_output(x...) asm goto(x)
 #endif
 
 #endif /* __LINUX_COMPILER_TYPES_H */
index 756b013fb8324bd7a320e60cebec2ca692faa149..75f00965ab1586cd64d00928217596de5034bd25 100644 (file)
@@ -829,8 +829,21 @@ __SYSCALL(__NR_futex_wait, sys_futex_wait)
 #define __NR_futex_requeue 456
 __SYSCALL(__NR_futex_requeue, sys_futex_requeue)
 
+#define __NR_statmount   457
+__SYSCALL(__NR_statmount, sys_statmount)
+
+#define __NR_listmount   458
+__SYSCALL(__NR_listmount, sys_listmount)
+
+#define __NR_lsm_get_self_attr 459
+__SYSCALL(__NR_lsm_get_self_attr, sys_lsm_get_self_attr)
+#define __NR_lsm_set_self_attr 460
+__SYSCALL(__NR_lsm_set_self_attr, sys_lsm_set_self_attr)
+#define __NR_lsm_list_modules 461
+__SYSCALL(__NR_lsm_list_modules, sys_lsm_list_modules)
+
 #undef __NR_syscalls
-#define __NR_syscalls 457
+#define __NR_syscalls 462
 
 /*
  * 32 bit systems traditionally used different
index de723566c5ae82382192923e17478209f7c94f41..16122819edfeff872b91d989d1f6267640ae1391 100644 (file)
@@ -713,7 +713,8 @@ struct drm_gem_open {
 /**
  * DRM_CAP_ASYNC_PAGE_FLIP
  *
- * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC.
+ * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for legacy
+ * page-flips.
  */
 #define DRM_CAP_ASYNC_PAGE_FLIP                0x7
 /**
@@ -773,6 +774,13 @@ struct drm_gem_open {
  * :ref:`drm_sync_objects`.
  */
 #define DRM_CAP_SYNCOBJ_TIMELINE       0x14
+/**
+ * DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP
+ *
+ * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for atomic
+ * commits.
+ */
+#define DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP 0x15
 
 /* DRM_IOCTL_GET_CAP ioctl argument type */
 struct drm_get_cap {
@@ -842,6 +850,31 @@ struct drm_get_cap {
  */
 #define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS    5
 
+/**
+ * DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT
+ *
+ * Drivers for para-virtualized hardware (e.g. vmwgfx, qxl, virtio and
+ * virtualbox) have additional restrictions for cursor planes (thus
+ * making cursor planes on those drivers not truly universal,) e.g.
+ * they need cursor planes to act like one would expect from a mouse
+ * cursor and have correctly set hotspot properties.
+ * If this client cap is not set the DRM core will hide cursor plane on
+ * those virtualized drivers because not setting it implies that the
+ * client is not capable of dealing with those extra restictions.
+ * Clients which do set cursor hotspot and treat the cursor plane
+ * like a mouse cursor should set this property.
+ * The client must enable &DRM_CLIENT_CAP_ATOMIC first.
+ *
+ * Setting this property on drivers which do not special case
+ * cursor planes (i.e. non-virtualized drivers) will return
+ * EOPNOTSUPP, which can be used by userspace to gauge
+ * requirements of the hardware/drivers they're running on.
+ *
+ * This capability is always supported for atomic-capable virtualized
+ * drivers starting from kernel version 6.6.
+ */
+#define DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT    6
+
 /* DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
 struct drm_set_client_cap {
        __u64 capability;
@@ -893,6 +926,7 @@ struct drm_syncobj_transfer {
 #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0)
 #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1)
 #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2) /* wait for time point to become available */
+#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE (1 << 3) /* set fence deadline to deadline_nsec */
 struct drm_syncobj_wait {
        __u64 handles;
        /* absolute timeout */
@@ -901,6 +935,14 @@ struct drm_syncobj_wait {
        __u32 flags;
        __u32 first_signaled; /* only valid when not waiting all */
        __u32 pad;
+       /**
+        * @deadline_nsec - fence deadline hint
+        *
+        * Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing
+        * fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is
+        * set.
+        */
+       __u64 deadline_nsec;
 };
 
 struct drm_syncobj_timeline_wait {
@@ -913,6 +955,14 @@ struct drm_syncobj_timeline_wait {
        __u32 flags;
        __u32 first_signaled; /* only valid when not waiting all */
        __u32 pad;
+       /**
+        * @deadline_nsec - fence deadline hint
+        *
+        * Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing
+        * fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is
+        * set.
+        */
+       __u64 deadline_nsec;
 };
 
 /**
@@ -1218,6 +1268,26 @@ extern "C" {
 
 #define DRM_IOCTL_SYNCOBJ_EVENTFD      DRM_IOWR(0xCF, struct drm_syncobj_eventfd)
 
+/**
+ * DRM_IOCTL_MODE_CLOSEFB - Close a framebuffer.
+ *
+ * This closes a framebuffer previously added via ADDFB/ADDFB2. The IOCTL
+ * argument is a framebuffer object ID.
+ *
+ * This IOCTL is similar to &DRM_IOCTL_MODE_RMFB, except it doesn't disable
+ * planes and CRTCs. As long as the framebuffer is used by a plane, it's kept
+ * alive. When the plane no longer uses the framebuffer (because the
+ * framebuffer is replaced with another one, or the plane is disabled), the
+ * framebuffer is cleaned up.
+ *
+ * This is useful to implement flicker-free transitions between two processes.
+ *
+ * Depending on the threat model, user-space may want to ensure that the
+ * framebuffer doesn't expose any sensitive user information: closed
+ * framebuffers attached to a plane can be read back by the next DRM master.
+ */
+#define DRM_IOCTL_MODE_CLOSEFB         DRM_IOWR(0xD0, struct drm_mode_closefb)
+
 /*
  * Device specific ioctls should only be in their respective headers
  * The device specific ioctl range is from 0x40 to 0x9f.
index 218edb0a96f8c043df13a5bf25f85ec754ee449a..fd4f9574d177a269b2cdbe5a36b3b30f2addbc94 100644 (file)
@@ -693,7 +693,7 @@ typedef struct drm_i915_irq_wait {
 #define I915_PARAM_HAS_EXEC_FENCE       44
 
 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture
- * user specified bufffers for post-mortem debugging of GPU hangs. See
+ * user-specified buffers for post-mortem debugging of GPU hangs. See
  * EXEC_OBJECT_CAPTURE.
  */
 #define I915_PARAM_HAS_EXEC_CAPTURE     45
@@ -1606,7 +1606,7 @@ struct drm_i915_gem_busy {
         * is accurate.
         *
         * The returned dword is split into two fields to indicate both
-        * the engine classess on which the object is being read, and the
+        * the engine classes on which the object is being read, and the
         * engine class on which it is currently being written (if any).
         *
         * The low word (bits 0:15) indicate if the object is being written
@@ -1815,7 +1815,7 @@ struct drm_i915_gem_madvise {
        __u32 handle;
 
        /* Advice: either the buffer will be needed again in the near future,
-        *         or wont be and could be discarded under memory pressure.
+        *         or won't be and could be discarded under memory pressure.
         */
        __u32 madv;
 
@@ -3246,7 +3246,7 @@ struct drm_i915_query_topology_info {
  *     // enough to hold our array of engines. The kernel will fill out the
  *     // item.length for us, which is the number of bytes we need.
  *     //
- *     // Alternatively a large buffer can be allocated straight away enabling
+ *     // Alternatively a large buffer can be allocated straightaway enabling
  *     // querying in one pass, in which case item.length should contain the
  *     // length of the provided buffer.
  *     err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
@@ -3256,7 +3256,7 @@ struct drm_i915_query_topology_info {
  *     // Now that we allocated the required number of bytes, we call the ioctl
  *     // again, this time with the data_ptr pointing to our newly allocated
  *     // blob, which the kernel can then populate with info on all engines.
- *     item.data_ptr = (uintptr_t)&info,
+ *     item.data_ptr = (uintptr_t)&info;
  *
  *     err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
  *     if (err) ...
@@ -3286,7 +3286,7 @@ struct drm_i915_query_topology_info {
 /**
  * struct drm_i915_engine_info
  *
- * Describes one engine and it's capabilities as known to the driver.
+ * Describes one engine and its capabilities as known to the driver.
  */
 struct drm_i915_engine_info {
        /** @engine: Engine class and instance. */
index 6c80f96049bd07d1aa527c103acb07fe52bfd617..282e90aeb163c0288590995b38fe011b19e85111 100644 (file)
 #define AT_HANDLE_FID          AT_REMOVEDIR    /* file handle is needed to
                                        compare object identity and may not
                                        be usable to open_by_handle_at(2) */
+#if defined(__KERNEL__)
+#define AT_GETATTR_NOSEC       0x80000000
+#endif
 
 #endif /* _UAPI_LINUX_FCNTL_H */
index 211b86de35ac53f6457bbd2fae8c973ce6b3a968..c3308536482bdb2bfb1279279325faf5430a3356 100644 (file)
 
 #define KVM_API_VERSION 12
 
-/* *** Deprecated interfaces *** */
-
-#define KVM_TRC_SHIFT           16
-
-#define KVM_TRC_ENTRYEXIT       (1 << KVM_TRC_SHIFT)
-#define KVM_TRC_HANDLER         (1 << (KVM_TRC_SHIFT + 1))
-
-#define KVM_TRC_VMENTRY         (KVM_TRC_ENTRYEXIT + 0x01)
-#define KVM_TRC_VMEXIT          (KVM_TRC_ENTRYEXIT + 0x02)
-#define KVM_TRC_PAGE_FAULT      (KVM_TRC_HANDLER + 0x01)
-
-#define KVM_TRC_HEAD_SIZE       12
-#define KVM_TRC_CYCLE_SIZE      8
-#define KVM_TRC_EXTRA_MAX       7
-
-#define KVM_TRC_INJ_VIRQ         (KVM_TRC_HANDLER + 0x02)
-#define KVM_TRC_REDELIVER_EVT    (KVM_TRC_HANDLER + 0x03)
-#define KVM_TRC_PEND_INTR        (KVM_TRC_HANDLER + 0x04)
-#define KVM_TRC_IO_READ          (KVM_TRC_HANDLER + 0x05)
-#define KVM_TRC_IO_WRITE         (KVM_TRC_HANDLER + 0x06)
-#define KVM_TRC_CR_READ          (KVM_TRC_HANDLER + 0x07)
-#define KVM_TRC_CR_WRITE         (KVM_TRC_HANDLER + 0x08)
-#define KVM_TRC_DR_READ          (KVM_TRC_HANDLER + 0x09)
-#define KVM_TRC_DR_WRITE         (KVM_TRC_HANDLER + 0x0A)
-#define KVM_TRC_MSR_READ         (KVM_TRC_HANDLER + 0x0B)
-#define KVM_TRC_MSR_WRITE        (KVM_TRC_HANDLER + 0x0C)
-#define KVM_TRC_CPUID            (KVM_TRC_HANDLER + 0x0D)
-#define KVM_TRC_INTR             (KVM_TRC_HANDLER + 0x0E)
-#define KVM_TRC_NMI              (KVM_TRC_HANDLER + 0x0F)
-#define KVM_TRC_VMMCALL          (KVM_TRC_HANDLER + 0x10)
-#define KVM_TRC_HLT              (KVM_TRC_HANDLER + 0x11)
-#define KVM_TRC_CLTS             (KVM_TRC_HANDLER + 0x12)
-#define KVM_TRC_LMSW             (KVM_TRC_HANDLER + 0x13)
-#define KVM_TRC_APIC_ACCESS      (KVM_TRC_HANDLER + 0x14)
-#define KVM_TRC_TDP_FAULT        (KVM_TRC_HANDLER + 0x15)
-#define KVM_TRC_GTLB_WRITE       (KVM_TRC_HANDLER + 0x16)
-#define KVM_TRC_STLB_WRITE       (KVM_TRC_HANDLER + 0x17)
-#define KVM_TRC_STLB_INVAL       (KVM_TRC_HANDLER + 0x18)
-#define KVM_TRC_PPC_INSTR        (KVM_TRC_HANDLER + 0x19)
-
-struct kvm_user_trace_setup {
-       __u32 buf_size;
-       __u32 buf_nr;
-};
-
-#define __KVM_DEPRECATED_MAIN_W_0x06 \
-       _IOW(KVMIO, 0x06, struct kvm_user_trace_setup)
-#define __KVM_DEPRECATED_MAIN_0x07 _IO(KVMIO, 0x07)
-#define __KVM_DEPRECATED_MAIN_0x08 _IO(KVMIO, 0x08)
-
-#define __KVM_DEPRECATED_VM_R_0x70 _IOR(KVMIO, 0x70, struct kvm_assigned_irq)
-
-struct kvm_breakpoint {
-       __u32 enabled;
-       __u32 padding;
-       __u64 address;
-};
-
-struct kvm_debug_guest {
-       __u32 enabled;
-       __u32 pad;
-       struct kvm_breakpoint breakpoints[4];
-       __u32 singlestep;
-};
-
-#define __KVM_DEPRECATED_VCPU_W_0x87 _IOW(KVMIO, 0x87, struct kvm_debug_guest)
-
-/* *** End of deprecated interfaces *** */
-
-
 /* for KVM_SET_USER_MEMORY_REGION */
 struct kvm_userspace_memory_region {
        __u32 slot;
@@ -95,6 +25,19 @@ struct kvm_userspace_memory_region {
        __u64 userspace_addr; /* start of the userspace allocated memory */
 };
 
+/* for KVM_SET_USER_MEMORY_REGION2 */
+struct kvm_userspace_memory_region2 {
+       __u32 slot;
+       __u32 flags;
+       __u64 guest_phys_addr;
+       __u64 memory_size;
+       __u64 userspace_addr;
+       __u64 guest_memfd_offset;
+       __u32 guest_memfd;
+       __u32 pad1;
+       __u64 pad2[14];
+};
+
 /*
  * The bit 0 ~ bit 15 of kvm_userspace_memory_region::flags are visible for
  * userspace, other bits are reserved for kvm internal use which are defined
@@ -102,6 +45,7 @@ struct kvm_userspace_memory_region {
  */
 #define KVM_MEM_LOG_DIRTY_PAGES        (1UL << 0)
 #define KVM_MEM_READONLY       (1UL << 1)
+#define KVM_MEM_GUEST_MEMFD    (1UL << 2)
 
 /* for KVM_IRQ_LINE */
 struct kvm_irq_level {
@@ -265,6 +209,7 @@ struct kvm_xen_exit {
 #define KVM_EXIT_RISCV_CSR        36
 #define KVM_EXIT_NOTIFY           37
 #define KVM_EXIT_LOONGARCH_IOCSR  38
+#define KVM_EXIT_MEMORY_FAULT     39
 
 /* For KVM_EXIT_INTERNAL_ERROR */
 /* Emulate instruction failed. */
@@ -518,6 +463,13 @@ struct kvm_run {
 #define KVM_NOTIFY_CONTEXT_INVALID     (1 << 0)
                        __u32 flags;
                } notify;
+               /* KVM_EXIT_MEMORY_FAULT */
+               struct {
+#define KVM_MEMORY_EXIT_FLAG_PRIVATE   (1ULL << 3)
+                       __u64 flags;
+                       __u64 gpa;
+                       __u64 size;
+               } memory_fault;
                /* Fix the size of the union. */
                char padding[256];
        };
@@ -945,9 +897,6 @@ struct kvm_ppc_resize_hpt {
  */
 #define KVM_GET_VCPU_MMAP_SIZE    _IO(KVMIO,   0x04) /* in bytes */
 #define KVM_GET_SUPPORTED_CPUID   _IOWR(KVMIO, 0x05, struct kvm_cpuid2)
-#define KVM_TRACE_ENABLE          __KVM_DEPRECATED_MAIN_W_0x06
-#define KVM_TRACE_PAUSE           __KVM_DEPRECATED_MAIN_0x07
-#define KVM_TRACE_DISABLE         __KVM_DEPRECATED_MAIN_0x08
 #define KVM_GET_EMULATED_CPUID   _IOWR(KVMIO, 0x09, struct kvm_cpuid2)
 #define KVM_GET_MSR_FEATURE_INDEX_LIST    _IOWR(KVMIO, 0x0a, struct kvm_msr_list)
 
@@ -1201,6 +1150,11 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE 228
 #define KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES 229
 #define KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES 230
+#define KVM_CAP_USER_MEMORY2 231
+#define KVM_CAP_MEMORY_FAULT_INFO 232
+#define KVM_CAP_MEMORY_ATTRIBUTES 233
+#define KVM_CAP_GUEST_MEMFD 234
+#define KVM_CAP_VM_TYPES 235
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
@@ -1291,6 +1245,7 @@ struct kvm_x86_mce {
 #define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL       (1 << 4)
 #define KVM_XEN_HVM_CONFIG_EVTCHN_SEND         (1 << 5)
 #define KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG        (1 << 6)
+#define KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE        (1 << 7)
 
 struct kvm_xen_hvm_config {
        __u32 flags;
@@ -1483,6 +1438,8 @@ struct kvm_vfio_spapr_tce {
                                        struct kvm_userspace_memory_region)
 #define KVM_SET_TSS_ADDR          _IO(KVMIO,   0x47)
 #define KVM_SET_IDENTITY_MAP_ADDR _IOW(KVMIO,  0x48, __u64)
+#define KVM_SET_USER_MEMORY_REGION2 _IOW(KVMIO, 0x49, \
+                                        struct kvm_userspace_memory_region2)
 
 /* enable ucontrol for s390 */
 struct kvm_s390_ucas_mapping {
@@ -1507,20 +1464,8 @@ struct kvm_s390_ucas_mapping {
                        _IOW(KVMIO,  0x67, struct kvm_coalesced_mmio_zone)
 #define KVM_UNREGISTER_COALESCED_MMIO \
                        _IOW(KVMIO,  0x68, struct kvm_coalesced_mmio_zone)
-#define KVM_ASSIGN_PCI_DEVICE     _IOR(KVMIO,  0x69, \
-                                      struct kvm_assigned_pci_dev)
 #define KVM_SET_GSI_ROUTING       _IOW(KVMIO,  0x6a, struct kvm_irq_routing)
-/* deprecated, replaced by KVM_ASSIGN_DEV_IRQ */
-#define KVM_ASSIGN_IRQ            __KVM_DEPRECATED_VM_R_0x70
-#define KVM_ASSIGN_DEV_IRQ        _IOW(KVMIO,  0x70, struct kvm_assigned_irq)
 #define KVM_REINJECT_CONTROL      _IO(KVMIO,   0x71)
-#define KVM_DEASSIGN_PCI_DEVICE   _IOW(KVMIO,  0x72, \
-                                      struct kvm_assigned_pci_dev)
-#define KVM_ASSIGN_SET_MSIX_NR    _IOW(KVMIO,  0x73, \
-                                      struct kvm_assigned_msix_nr)
-#define KVM_ASSIGN_SET_MSIX_ENTRY _IOW(KVMIO,  0x74, \
-                                      struct kvm_assigned_msix_entry)
-#define KVM_DEASSIGN_DEV_IRQ      _IOW(KVMIO,  0x75, struct kvm_assigned_irq)
 #define KVM_IRQFD                 _IOW(KVMIO,  0x76, struct kvm_irqfd)
 #define KVM_CREATE_PIT2                  _IOW(KVMIO,  0x77, struct kvm_pit_config)
 #define KVM_SET_BOOT_CPU_ID       _IO(KVMIO,   0x78)
@@ -1537,9 +1482,6 @@ struct kvm_s390_ucas_mapping {
 *  KVM_CAP_VM_TSC_CONTROL to set defaults for a VM */
 #define KVM_SET_TSC_KHZ           _IO(KVMIO,  0xa2)
 #define KVM_GET_TSC_KHZ           _IO(KVMIO,  0xa3)
-/* Available with KVM_CAP_PCI_2_3 */
-#define KVM_ASSIGN_SET_INTX_MASK  _IOW(KVMIO,  0xa4, \
-                                      struct kvm_assigned_pci_dev)
 /* Available with KVM_CAP_SIGNAL_MSI */
 #define KVM_SIGNAL_MSI            _IOW(KVMIO,  0xa5, struct kvm_msi)
 /* Available with KVM_CAP_PPC_GET_SMMU_INFO */
@@ -1592,8 +1534,6 @@ struct kvm_s390_ucas_mapping {
 #define KVM_SET_SREGS             _IOW(KVMIO,  0x84, struct kvm_sregs)
 #define KVM_TRANSLATE             _IOWR(KVMIO, 0x85, struct kvm_translation)
 #define KVM_INTERRUPT             _IOW(KVMIO,  0x86, struct kvm_interrupt)
-/* KVM_DEBUG_GUEST is no longer supported, use KVM_SET_GUEST_DEBUG instead */
-#define KVM_DEBUG_GUEST           __KVM_DEPRECATED_VCPU_W_0x87
 #define KVM_GET_MSRS              _IOWR(KVMIO, 0x88, struct kvm_msrs)
 #define KVM_SET_MSRS              _IOW(KVMIO,  0x89, struct kvm_msrs)
 #define KVM_SET_CPUID             _IOW(KVMIO,  0x8a, struct kvm_cpuid)
@@ -2267,4 +2207,24 @@ struct kvm_s390_zpci_op {
 /* flags for kvm_s390_zpci_op->u.reg_aen.flags */
 #define KVM_S390_ZPCIOP_REGAEN_HOST    (1 << 0)
 
+/* Available with KVM_CAP_MEMORY_ATTRIBUTES */
+#define KVM_SET_MEMORY_ATTRIBUTES              _IOW(KVMIO,  0xd2, struct kvm_memory_attributes)
+
+struct kvm_memory_attributes {
+       __u64 address;
+       __u64 size;
+       __u64 attributes;
+       __u64 flags;
+};
+
+#define KVM_MEMORY_ATTRIBUTE_PRIVATE           (1ULL << 3)
+
+#define KVM_CREATE_GUEST_MEMFD _IOWR(KVMIO,  0xd4, struct kvm_create_guest_memfd)
+
+struct kvm_create_guest_memfd {
+       __u64 size;
+       __u64 flags;
+       __u64 reserved[6];
+};
+
 #endif /* __LINUX_KVM_H */
index bb242fdcfe6b29bf96e287023701dd8629042969..ad5478dbad007341f70a8816aa506216ffea89ec 100644 (file)
@@ -138,4 +138,74 @@ struct mount_attr {
 /* List of all mount_attr versions. */
 #define MOUNT_ATTR_SIZE_VER0   32 /* sizeof first published struct */
 
+
+/*
+ * Structure for getting mount/superblock/filesystem info with statmount(2).
+ *
+ * The interface is similar to statx(2): individual fields or groups can be
+ * selected with the @mask argument of statmount().  Kernel will set the @mask
+ * field according to the supported fields.
+ *
+ * If string fields are selected, then the caller needs to pass a buffer that
+ * has space after the fixed part of the structure.  Nul terminated strings are
+ * copied there and offsets relative to @str are stored in the relevant fields.
+ * If the buffer is too small, then EOVERFLOW is returned.  The actually used
+ * size is returned in @size.
+ */
+struct statmount {
+       __u32 size;             /* Total size, including strings */
+       __u32 __spare1;
+       __u64 mask;             /* What results were written */
+       __u32 sb_dev_major;     /* Device ID */
+       __u32 sb_dev_minor;
+       __u64 sb_magic;         /* ..._SUPER_MAGIC */
+       __u32 sb_flags;         /* SB_{RDONLY,SYNCHRONOUS,DIRSYNC,LAZYTIME} */
+       __u32 fs_type;          /* [str] Filesystem type */
+       __u64 mnt_id;           /* Unique ID of mount */
+       __u64 mnt_parent_id;    /* Unique ID of parent (for root == mnt_id) */
+       __u32 mnt_id_old;       /* Reused IDs used in proc/.../mountinfo */
+       __u32 mnt_parent_id_old;
+       __u64 mnt_attr;         /* MOUNT_ATTR_... */
+       __u64 mnt_propagation;  /* MS_{SHARED,SLAVE,PRIVATE,UNBINDABLE} */
+       __u64 mnt_peer_group;   /* ID of shared peer group */
+       __u64 mnt_master;       /* Mount receives propagation from this ID */
+       __u64 propagate_from;   /* Propagation from in current namespace */
+       __u32 mnt_root;         /* [str] Root of mount relative to root of fs */
+       __u32 mnt_point;        /* [str] Mountpoint relative to current root */
+       __u64 __spare2[50];
+       char str[];             /* Variable size part containing strings */
+};
+
+/*
+ * Structure for passing mount ID and miscellaneous parameters to statmount(2)
+ * and listmount(2).
+ *
+ * For statmount(2) @param represents the request mask.
+ * For listmount(2) @param represents the last listed mount id (or zero).
+ */
+struct mnt_id_req {
+       __u32 size;
+       __u32 spare;
+       __u64 mnt_id;
+       __u64 param;
+};
+
+/* List of all mnt_id_req versions. */
+#define MNT_ID_REQ_SIZE_VER0   24 /* sizeof first published struct */
+
+/*
+ * @mask bits for statmount(2)
+ */
+#define STATMOUNT_SB_BASIC             0x00000001U     /* Want/got sb_... */
+#define STATMOUNT_MNT_BASIC            0x00000002U     /* Want/got mnt_... */
+#define STATMOUNT_PROPAGATE_FROM       0x00000004U     /* Want/got propagate_from */
+#define STATMOUNT_MNT_ROOT             0x00000008U     /* Want/got mnt_root  */
+#define STATMOUNT_MNT_POINT            0x00000010U     /* Want/got mnt_point */
+#define STATMOUNT_FS_TYPE              0x00000020U     /* Want/got fs_type */
+
+/*
+ * Special @mnt_id values that can be passed to listmount
+ */
+#define LSMT_ROOT              0xffffffffffffffff      /* root mount */
+
 #endif /* _UAPI_LINUX_MOUNT_H */
index 39c6a250dd1b92af18e3b4a72a047d2784f89382..3a64499b0f5d63734d632ab03cd1966211473d8c 100644 (file)
@@ -204,6 +204,8 @@ enum perf_branch_sample_type_shift {
 
        PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT      = 18, /* save privilege mode */
 
+       PERF_SAMPLE_BRANCH_COUNTERS_SHIFT       = 19, /* save occurrences of events on a branch */
+
        PERF_SAMPLE_BRANCH_MAX_SHIFT            /* non-ABI */
 };
 
@@ -235,6 +237,8 @@ enum perf_branch_sample_type {
 
        PERF_SAMPLE_BRANCH_PRIV_SAVE    = 1U << PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT,
 
+       PERF_SAMPLE_BRANCH_COUNTERS     = 1U << PERF_SAMPLE_BRANCH_COUNTERS_SHIFT,
+
        PERF_SAMPLE_BRANCH_MAX          = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
 };
 
@@ -982,6 +986,12 @@ enum perf_event_type {
         *      { u64                   nr;
         *        { u64 hw_idx; } && PERF_SAMPLE_BRANCH_HW_INDEX
         *        { u64 from, to, flags } lbr[nr];
+        *        #
+        *        # The format of the counters is decided by the
+        *        # "branch_counter_nr" and "branch_counter_width",
+        *        # which are defined in the ABI.
+        *        #
+        *        { u64 counters; } cntr[nr] && PERF_SAMPLE_BRANCH_COUNTERS
         *      } && PERF_SAMPLE_BRANCH_STACK
         *
         *      { u64                   abi; # enum perf_sample_regs_abi
@@ -1427,6 +1437,9 @@ struct perf_branch_entry {
                reserved:31;
 };
 
+/* Size of used info bits in struct perf_branch_entry */
+#define PERF_BRANCH_ENTRY_INFO_BITS_MAX                33
+
 union perf_sample_weight {
        __u64           full;
 #if defined(__LITTLE_ENDIAN_BITFIELD)
index 7cab2c65d3d7fce9210d2fb6d02012233b9923cf..2f2ee82d55175d052c0214a7e29da5d6ce2738ab 100644 (file)
@@ -154,6 +154,7 @@ struct statx {
 #define STATX_BTIME            0x00000800U     /* Want/got stx_btime */
 #define STATX_MNT_ID           0x00001000U     /* Got stx_mnt_id */
 #define STATX_DIOALIGN         0x00002000U     /* Want/got direct I/O alignment info */
+#define STATX_MNT_ID_UNIQUE    0x00004000U     /* Want/got extended stx_mount_id */
 
 #define STATX__RESERVED                0x80000000U     /* Reserved for future struct statx expansion */
 
index 5cb0eeec2c8a6c4353ea82885250ab123c37d797..337fde770e45fe031c8fbb399529bb385d15ac76 100644 (file)
@@ -16,6 +16,7 @@
 #include <sys/mount.h>
 
 #include "fs.h"
+#include "../io.h"
 #include "debug-internal.h"
 
 #define _STR(x) #x
@@ -344,53 +345,24 @@ int filename__read_ull(const char *filename, unsigned long long *value)
        return filename__read_ull_base(filename, value, 0);
 }
 
-#define STRERR_BUFSIZE  128     /* For the buffer size of strerror_r */
-
 int filename__read_str(const char *filename, char **buf, size_t *sizep)
 {
-       size_t size = 0, alloc_size = 0;
-       void *bf = NULL, *nbf;
-       int fd, n, err = 0;
-       char sbuf[STRERR_BUFSIZE];
+       struct io io;
+       char bf[128];
+       int err;
 
-       fd = open(filename, O_RDONLY);
-       if (fd < 0)
+       io.fd = open(filename, O_RDONLY);
+       if (io.fd < 0)
                return -errno;
-
-       do {
-               if (size == alloc_size) {
-                       alloc_size += BUFSIZ;
-                       nbf = realloc(bf, alloc_size);
-                       if (!nbf) {
-                               err = -ENOMEM;
-                               break;
-                       }
-
-                       bf = nbf;
-               }
-
-               n = read(fd, bf + size, alloc_size - size);
-               if (n < 0) {
-                       if (size) {
-                               pr_warn("read failed %d: %s\n", errno,
-                                       strerror_r(errno, sbuf, sizeof(sbuf)));
-                               err = 0;
-                       } else
-                               err = -errno;
-
-                       break;
-               }
-
-               size += n;
-       } while (n > 0);
-
-       if (!err) {
-               *sizep = size;
-               *buf   = bf;
+       io__init(&io, io.fd, bf, sizeof(bf));
+       *buf = NULL;
+       err = io__getdelim(&io, buf, sizep, /*delim=*/-1);
+       if (err < 0) {
+               free(*buf);
+               *buf = NULL;
        } else
-               free(bf);
-
-       close(fd);
+               err = 0;
+       close(io.fd);
        return err;
 }
 
@@ -475,15 +447,22 @@ int sysfs__read_str(const char *entry, char **buf, size_t *sizep)
 
 int sysfs__read_bool(const char *entry, bool *value)
 {
-       char *buf;
-       size_t size;
-       int ret;
+       struct io io;
+       char bf[16];
+       int ret = 0;
+       char path[PATH_MAX];
+       const char *sysfs = sysfs__mountpoint();
+
+       if (!sysfs)
+               return -1;
 
-       ret = sysfs__read_str(entry, &buf, &size);
-       if (ret < 0)
-               return ret;
+       snprintf(path, sizeof(path), "%s/%s", sysfs, entry);
+       io.fd = open(path, O_RDONLY);
+       if (io.fd < 0)
+               return -errno;
 
-       switch (buf[0]) {
+       io__init(&io, io.fd, bf, sizeof(bf));
+       switch (io__get_char(&io)) {
        case '1':
        case 'y':
        case 'Y':
@@ -497,8 +476,7 @@ int sysfs__read_bool(const char *entry, bool *value)
        default:
                ret = -1;
        }
-
-       free(buf);
+       close(io.fd);
 
        return ret;
 }
index a77b74c5fb655a8c7b65c293ba0dd563f34f8569..84adf81020185171b0d839eb639ba523d4d75355 100644 (file)
@@ -12,6 +12,7 @@
 #include <stdlib.h>
 #include <string.h>
 #include <unistd.h>
+#include <linux/types.h>
 
 struct io {
        /* File descriptor being read/ */
@@ -140,8 +141,8 @@ static inline int io__get_dec(struct io *io, __u64 *dec)
        }
 }
 
-/* Read up to and including the first newline following the pattern of getline. */
-static inline ssize_t io__getline(struct io *io, char **line_out, size_t *line_len_out)
+/* Read up to and including the first delim. */
+static inline ssize_t io__getdelim(struct io *io, char **line_out, size_t *line_len_out, int delim)
 {
        char buf[128];
        int buf_pos = 0;
@@ -151,7 +152,7 @@ static inline ssize_t io__getline(struct io *io, char **line_out, size_t *line_l
 
        /* TODO: reuse previously allocated memory. */
        free(*line_out);
-       while (ch != '\n') {
+       while (ch != delim) {
                ch = io__get_char(io);
 
                if (ch < 0)
@@ -184,4 +185,9 @@ err_out:
        return -ENOMEM;
 }
 
+static inline ssize_t io__getline(struct io *io, char **line_out, size_t *line_len_out)
+{
+       return io__getdelim(io, line_out, line_len_out, /*delim=*/'\n');
+}
+
 #endif /* __API_IO__ */
index c5a42ac309fdb00a58d8f6fdf386d6a6fb18c317..afd09571c482b7234ea827c739980a6996d9ab7f 100644 (file)
@@ -6695,6 +6695,67 @@ static struct {
        /* all other program types don't have "named" context structs */
 };
 
+static bool need_func_arg_type_fixup(const struct btf *btf, const struct bpf_program *prog,
+                                    const char *subprog_name, int arg_idx,
+                                    int arg_type_id, const char *ctx_name)
+{
+       const struct btf_type *t;
+       const char *tname;
+
+       /* check if existing parameter already matches verifier expectations */
+       t = skip_mods_and_typedefs(btf, arg_type_id, NULL);
+       if (!btf_is_ptr(t))
+               goto out_warn;
+
+       /* typedef bpf_user_pt_regs_t is a special PITA case, valid for kprobe
+        * and perf_event programs, so check this case early on and forget
+        * about it for subsequent checks
+        */
+       while (btf_is_mod(t))
+               t = btf__type_by_id(btf, t->type);
+       if (btf_is_typedef(t) &&
+           (prog->type == BPF_PROG_TYPE_KPROBE || prog->type == BPF_PROG_TYPE_PERF_EVENT)) {
+               tname = btf__str_by_offset(btf, t->name_off) ?: "<anon>";
+               if (strcmp(tname, "bpf_user_pt_regs_t") == 0)
+                       return false; /* canonical type for kprobe/perf_event */
+       }
+
+       /* now we can ignore typedefs moving forward */
+       t = skip_mods_and_typedefs(btf, t->type, NULL);
+
+       /* if it's `void *`, definitely fix up BTF info */
+       if (btf_is_void(t))
+               return true;
+
+       /* if it's already proper canonical type, no need to fix up */
+       tname = btf__str_by_offset(btf, t->name_off) ?: "<anon>";
+       if (btf_is_struct(t) && strcmp(tname, ctx_name) == 0)
+               return false;
+
+       /* special cases */
+       switch (prog->type) {
+       case BPF_PROG_TYPE_KPROBE:
+       case BPF_PROG_TYPE_PERF_EVENT:
+               /* `struct pt_regs *` is expected, but we need to fix up */
+               if (btf_is_struct(t) && strcmp(tname, "pt_regs") == 0)
+                       return true;
+               break;
+       case BPF_PROG_TYPE_RAW_TRACEPOINT:
+       case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
+               /* allow u64* as ctx */
+               if (btf_is_int(t) && t->size == 8)
+                       return true;
+               break;
+       default:
+               break;
+       }
+
+out_warn:
+       pr_warn("prog '%s': subprog '%s' arg#%d is expected to be of `struct %s *` type\n",
+               prog->name, subprog_name, arg_idx, ctx_name);
+       return false;
+}
+
 static int clone_func_btf_info(struct btf *btf, int orig_fn_id, struct bpf_program *prog)
 {
        int fn_id, fn_proto_id, ret_type_id, orig_proto_id;
@@ -6757,6 +6818,69 @@ static int clone_func_btf_info(struct btf *btf, int orig_fn_id, struct bpf_progr
        return fn_id;
 }
 
+static int probe_kern_arg_ctx_tag(void)
+{
+       /* To minimize merge conflicts with BPF token series that refactors
+        * feature detection code a lot, we don't integrate
+        * probe_kern_arg_ctx_tag() into kernel_supports() feature-detection
+        * framework yet, doing our own caching internally.
+        * This will be cleaned up a bit later when bpf/bpf-next trees settle.
+        */
+       static int cached_result = -1;
+       static const char strs[] = "\0a\0b\0arg:ctx\0";
+       const __u32 types[] = {
+               /* [1] INT */
+               BTF_TYPE_INT_ENC(1 /* "a" */, BTF_INT_SIGNED, 0, 32, 4),
+               /* [2] PTR -> VOID */
+               BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 0),
+               /* [3] FUNC_PROTO `int(void *a)` */
+               BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 1),
+               BTF_PARAM_ENC(1 /* "a" */, 2),
+               /* [4] FUNC 'a' -> FUNC_PROTO (main prog) */
+               BTF_TYPE_ENC(1 /* "a" */, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 3),
+               /* [5] FUNC_PROTO `int(void *b __arg_ctx)` */
+               BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 1),
+               BTF_PARAM_ENC(3 /* "b" */, 2),
+               /* [6] FUNC 'b' -> FUNC_PROTO (subprog) */
+               BTF_TYPE_ENC(3 /* "b" */, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 5),
+               /* [7] DECL_TAG 'arg:ctx' -> func 'b' arg 'b' */
+               BTF_TYPE_DECL_TAG_ENC(5 /* "arg:ctx" */, 6, 0),
+       };
+       const struct bpf_insn insns[] = {
+               /* main prog */
+               BPF_CALL_REL(+1),
+               BPF_EXIT_INSN(),
+               /* global subprog */
+               BPF_EMIT_CALL(BPF_FUNC_get_func_ip), /* needs PTR_TO_CTX */
+               BPF_EXIT_INSN(),
+       };
+       const struct bpf_func_info_min func_infos[] = {
+               { 0, 4 }, /* main prog -> FUNC 'a' */
+               { 2, 6 }, /* subprog -> FUNC 'b' */
+       };
+       LIBBPF_OPTS(bpf_prog_load_opts, opts);
+       int prog_fd, btf_fd, insn_cnt = ARRAY_SIZE(insns);
+
+       if (cached_result >= 0)
+               return cached_result;
+
+       btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs));
+       if (btf_fd < 0)
+               return 0;
+
+       opts.prog_btf_fd = btf_fd;
+       opts.func_info = &func_infos;
+       opts.func_info_cnt = ARRAY_SIZE(func_infos);
+       opts.func_info_rec_size = sizeof(func_infos[0]);
+
+       prog_fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, "det_arg_ctx",
+                               "GPL", insns, insn_cnt, &opts);
+       close(btf_fd);
+
+       cached_result = probe_fd(prog_fd);
+       return cached_result;
+}
+
 /* Check if main program or global subprog's function prototype has `arg:ctx`
  * argument tags, and, if necessary, substitute correct type to match what BPF
  * verifier would expect, taking into account specific program type. This
@@ -6766,7 +6890,7 @@ static int clone_func_btf_info(struct btf *btf, int orig_fn_id, struct bpf_progr
  */
 static int bpf_program_fixup_func_info(struct bpf_object *obj, struct bpf_program *prog)
 {
-       const char *ctx_name = NULL, *ctx_tag = "arg:ctx";
+       const char *ctx_name = NULL, *ctx_tag = "arg:ctx", *fn_name;
        struct bpf_func_info_min *func_rec;
        struct btf_type *fn_t, *fn_proto_t;
        struct btf *btf = obj->btf;
@@ -6780,6 +6904,10 @@ static int bpf_program_fixup_func_info(struct bpf_object *obj, struct bpf_progra
        if (!obj->btf_ext || !prog->func_info)
                return 0;
 
+       /* don't do any fix ups if kernel natively supports __arg_ctx */
+       if (probe_kern_arg_ctx_tag() > 0)
+               return 0;
+
        /* some BPF program types just don't have named context structs, so
         * this fallback mechanism doesn't work for them
         */
@@ -6842,15 +6970,11 @@ static int bpf_program_fixup_func_info(struct bpf_object *obj, struct bpf_progra
                if (arg_idx < 0 || arg_idx >= arg_cnt)
                        continue;
 
-               /* check if existing parameter already matches verifier expectations */
+               /* check if we should fix up argument type */
                p = &btf_params(fn_proto_t)[arg_idx];
-               t = skip_mods_and_typedefs(btf, p->type, NULL);
-               if (btf_is_ptr(t) &&
-                   (t = skip_mods_and_typedefs(btf, t->type, NULL)) &&
-                   btf_is_struct(t) &&
-                   strcmp(btf__str_by_offset(btf, t->name_off), ctx_name) == 0) {
-                       continue; /* no need for fix up */
-               }
+               fn_name = btf__str_by_offset(btf, fn_t->name_off) ?: "<anon>";
+               if (!need_func_arg_type_fixup(btf, prog, fn_name, arg_idx, p->type, ctx_name))
+                       continue;
 
                /* clone fn/fn_proto, unless we already did it for another arg */
                if (func_rec->type_id == orig_fn_id) {
index 8e1a926a9cfe6ec3631f9b82d758e870d4dab6ef..bc142f0664b5a6a127152bbb48068290dc949dcb 100644 (file)
@@ -39,7 +39,7 @@ int main(int argc, char **argv)
 
        libperf_init(libperf_print);
 
-       cpus = perf_cpu_map__new(NULL);
+       cpus = perf_cpu_map__new_online_cpus();
        if (!cpus) {
                fprintf(stderr, "failed to create cpus\n");
                return -1;
index d6ca24f6ef78f421910614560fe3b6bb6ec45420..2378980fab8a6b263e44ed3c5bb0f7ae9ed35ce1 100644 (file)
@@ -97,7 +97,7 @@ In this case we will monitor all the available CPUs:
 
 [source,c]
 --
- 42         cpus = perf_cpu_map__new(NULL);
+ 42         cpus = perf_cpu_map__new_online_cpus();
  43         if (!cpus) {
  44                 fprintf(stderr, "failed to create cpus\n");
  45                 return -1;
index a8f1a237931b19b182d5a197f7ce6fe4ec019351..fcfb9499ef9cdfbdfa7903c13f32a060b49e19c2 100644 (file)
@@ -37,7 +37,7 @@ SYNOPSIS
 
   struct perf_cpu_map;
 
-  struct perf_cpu_map *perf_cpu_map__dummy_new(void);
+  struct perf_cpu_map *perf_cpu_map__new_any_cpu(void);
   struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list);
   struct perf_cpu_map *perf_cpu_map__read(FILE *file);
   struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map);
@@ -46,7 +46,7 @@ SYNOPSIS
   void perf_cpu_map__put(struct perf_cpu_map *map);
   int perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx);
   int perf_cpu_map__nr(const struct perf_cpu_map *cpus);
-  bool perf_cpu_map__empty(const struct perf_cpu_map *map);
+  bool perf_cpu_map__has_any_cpu_or_is_empty(const struct perf_cpu_map *map);
   int perf_cpu_map__max(struct perf_cpu_map *map);
   bool perf_cpu_map__has(const struct perf_cpu_map *map, int cpu);
 
index 2a5a292173740bc220c4b7ecd655242912882335..4adcd7920d033dfa5f99ec6c4f9bcb3d7a7d2d10 100644 (file)
@@ -9,6 +9,7 @@
 #include <unistd.h>
 #include <ctype.h>
 #include <limits.h>
+#include "internal.h"
 
 void perf_cpu_map__set_nr(struct perf_cpu_map *map, int nr_cpus)
 {
@@ -27,7 +28,7 @@ struct perf_cpu_map *perf_cpu_map__alloc(int nr_cpus)
        return result;
 }
 
-struct perf_cpu_map *perf_cpu_map__dummy_new(void)
+struct perf_cpu_map *perf_cpu_map__new_any_cpu(void)
 {
        struct perf_cpu_map *cpus = perf_cpu_map__alloc(1);
 
@@ -66,15 +67,21 @@ void perf_cpu_map__put(struct perf_cpu_map *map)
        }
 }
 
-static struct perf_cpu_map *cpu_map__default_new(void)
+static struct perf_cpu_map *cpu_map__new_sysconf(void)
 {
        struct perf_cpu_map *cpus;
-       int nr_cpus;
+       int nr_cpus, nr_cpus_conf;
 
        nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
        if (nr_cpus < 0)
                return NULL;
 
+       nr_cpus_conf = sysconf(_SC_NPROCESSORS_CONF);
+       if (nr_cpus != nr_cpus_conf) {
+               pr_warning("Number of online CPUs (%d) differs from the number configured (%d) the CPU map will only cover the first %d CPUs.",
+                       nr_cpus, nr_cpus_conf, nr_cpus);
+       }
+
        cpus = perf_cpu_map__alloc(nr_cpus);
        if (cpus != NULL) {
                int i;
@@ -86,9 +93,27 @@ static struct perf_cpu_map *cpu_map__default_new(void)
        return cpus;
 }
 
-struct perf_cpu_map *perf_cpu_map__default_new(void)
+static struct perf_cpu_map *cpu_map__new_sysfs_online(void)
 {
-       return cpu_map__default_new();
+       struct perf_cpu_map *cpus = NULL;
+       FILE *onlnf;
+
+       onlnf = fopen("/sys/devices/system/cpu/online", "r");
+       if (onlnf) {
+               cpus = perf_cpu_map__read(onlnf);
+               fclose(onlnf);
+       }
+       return cpus;
+}
+
+struct perf_cpu_map *perf_cpu_map__new_online_cpus(void)
+{
+       struct perf_cpu_map *cpus = cpu_map__new_sysfs_online();
+
+       if (cpus)
+               return cpus;
+
+       return cpu_map__new_sysconf();
 }
 
 
@@ -180,27 +205,11 @@ struct perf_cpu_map *perf_cpu_map__read(FILE *file)
 
        if (nr_cpus > 0)
                cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
-       else
-               cpus = cpu_map__default_new();
 out_free_tmp:
        free(tmp_cpus);
        return cpus;
 }
 
-static struct perf_cpu_map *cpu_map__read_all_cpu_map(void)
-{
-       struct perf_cpu_map *cpus = NULL;
-       FILE *onlnf;
-
-       onlnf = fopen("/sys/devices/system/cpu/online", "r");
-       if (!onlnf)
-               return cpu_map__default_new();
-
-       cpus = perf_cpu_map__read(onlnf);
-       fclose(onlnf);
-       return cpus;
-}
-
 struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
 {
        struct perf_cpu_map *cpus = NULL;
@@ -211,7 +220,7 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
        int max_entries = 0;
 
        if (!cpu_list)
-               return cpu_map__read_all_cpu_map();
+               return perf_cpu_map__new_online_cpus();
 
        /*
         * must handle the case of empty cpumap to cover
@@ -268,10 +277,12 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
 
        if (nr_cpus > 0)
                cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
-       else if (*cpu_list != '\0')
-               cpus = cpu_map__default_new();
-       else
-               cpus = perf_cpu_map__dummy_new();
+       else if (*cpu_list != '\0') {
+               pr_warning("Unexpected characters at end of cpu list ('%s'), using online CPUs.",
+                          cpu_list);
+               cpus = perf_cpu_map__new_online_cpus();
+       } else
+               cpus = perf_cpu_map__new_any_cpu();
 invalid:
        free(tmp_cpus);
 out:
@@ -300,7 +311,7 @@ int perf_cpu_map__nr(const struct perf_cpu_map *cpus)
        return cpus ? __perf_cpu_map__nr(cpus) : 1;
 }
 
-bool perf_cpu_map__empty(const struct perf_cpu_map *map)
+bool perf_cpu_map__has_any_cpu_or_is_empty(const struct perf_cpu_map *map)
 {
        return map ? __perf_cpu_map__cpu(map, 0).cpu == -1 : true;
 }
index 3acbbccc19019c4baa11f82ea253daacd73f7603..058e3ff10f9b2849fd25b35164472ec9f949adca 100644 (file)
@@ -39,7 +39,7 @@ static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
        if (evsel->system_wide) {
                /* System wide: set the cpu map of the evsel to all online CPUs. */
                perf_cpu_map__put(evsel->cpus);
-               evsel->cpus = perf_cpu_map__new(NULL);
+               evsel->cpus = perf_cpu_map__new_online_cpus();
        } else if (evlist->has_user_cpus && evsel->is_pmu_core) {
                /*
                 * User requested CPUs on a core PMU, ensure the requested CPUs
@@ -619,7 +619,7 @@ static int perf_evlist__nr_mmaps(struct perf_evlist *evlist)
 
        /* One for each CPU */
        nr_mmaps = perf_cpu_map__nr(evlist->all_cpus);
-       if (perf_cpu_map__empty(evlist->all_cpus)) {
+       if (perf_cpu_map__has_any_cpu_or_is_empty(evlist->all_cpus)) {
                /* Plus one for each thread */
                nr_mmaps += perf_thread_map__nr(evlist->threads);
                /* Minus the per-thread CPU (-1) */
@@ -653,7 +653,7 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
        if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
                return -ENOMEM;
 
-       if (perf_cpu_map__empty(cpus))
+       if (perf_cpu_map__has_any_cpu_or_is_empty(cpus))
                return mmap_per_thread(evlist, ops, mp);
 
        return mmap_per_cpu(evlist, ops, mp);
index 8b51b008a81f142129069bc351c86e6aa2804ed8..c07160953224adf7e7b291f6a47ab33743d20b94 100644 (file)
@@ -120,7 +120,7 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
                static struct perf_cpu_map *empty_cpu_map;
 
                if (empty_cpu_map == NULL) {
-                       empty_cpu_map = perf_cpu_map__dummy_new();
+                       empty_cpu_map = perf_cpu_map__new_any_cpu();
                        if (empty_cpu_map == NULL)
                                return -ENOMEM;
                }
index 5a062af8e9d8e2200aaecc3e110b7a750c2f78e7..5f08cab61ecec6d25cd1a80e4a1c5bd83a0d12e3 100644 (file)
@@ -33,7 +33,8 @@ struct perf_mmap {
        bool                     overwrite;
        u64                      flush;
        libperf_unmap_cb_t       unmap_cb;
-       char                     event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8);
+       void                    *event_copy;
+       size_t                   event_copy_sz;
        struct perf_mmap        *next;
 };
 
index e38d859a384d2c32ef770bd18fa68798fc151bbb..228c6c629b0ce16558b880a3b8989207159b9575 100644 (file)
@@ -19,10 +19,23 @@ struct perf_cache {
 struct perf_cpu_map;
 
 /**
- * perf_cpu_map__dummy_new - a map with a singular "any CPU"/dummy -1 value.
+ * perf_cpu_map__new_any_cpu - a map with a singular "any CPU"/dummy -1 value.
+ */
+LIBPERF_API struct perf_cpu_map *perf_cpu_map__new_any_cpu(void);
+/**
+ * perf_cpu_map__new_online_cpus - a map read from
+ *                                 /sys/devices/system/cpu/online if
+ *                                 available. If reading wasn't possible a map
+ *                                 is created using the online processors
+ *                                 assuming the first 'n' processors are all
+ *                                 online.
+ */
+LIBPERF_API struct perf_cpu_map *perf_cpu_map__new_online_cpus(void);
+/**
+ * perf_cpu_map__new - create a map from the given cpu_list such as "0-7". If no
+ *                     cpu_list argument is provided then
+ *                     perf_cpu_map__new_online_cpus is returned.
  */
-LIBPERF_API struct perf_cpu_map *perf_cpu_map__dummy_new(void);
-LIBPERF_API struct perf_cpu_map *perf_cpu_map__default_new(void);
 LIBPERF_API struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list);
 LIBPERF_API struct perf_cpu_map *perf_cpu_map__read(FILE *file);
 LIBPERF_API struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map);
@@ -31,12 +44,23 @@ LIBPERF_API struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig,
 LIBPERF_API struct perf_cpu_map *perf_cpu_map__intersect(struct perf_cpu_map *orig,
                                                         struct perf_cpu_map *other);
 LIBPERF_API void perf_cpu_map__put(struct perf_cpu_map *map);
+/**
+ * perf_cpu_map__cpu - get the CPU value at the given index. Returns -1 if index
+ *                     is invalid.
+ */
 LIBPERF_API struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx);
+/**
+ * perf_cpu_map__nr - for an empty map returns 1, as perf_cpu_map__cpu returns a
+ *                    cpu of -1 for an invalid index, this makes an empty map
+ *                    look like it contains the "any CPU"/dummy value. Otherwise
+ *                    the result is the number CPUs in the map plus one if the
+ *                    "any CPU"/dummy value is present.
+ */
 LIBPERF_API int perf_cpu_map__nr(const struct perf_cpu_map *cpus);
 /**
- * perf_cpu_map__empty - is map either empty or the "any CPU"/dummy value.
+ * perf_cpu_map__has_any_cpu_or_is_empty - is map either empty or has the "any CPU"/dummy value.
  */
-LIBPERF_API bool perf_cpu_map__empty(const struct perf_cpu_map *map);
+LIBPERF_API bool perf_cpu_map__has_any_cpu_or_is_empty(const struct perf_cpu_map *map);
 LIBPERF_API struct perf_cpu perf_cpu_map__max(const struct perf_cpu_map *map);
 LIBPERF_API bool perf_cpu_map__has(const struct perf_cpu_map *map, struct perf_cpu cpu);
 LIBPERF_API bool perf_cpu_map__equal(const struct perf_cpu_map *lhs,
@@ -51,6 +75,12 @@ LIBPERF_API bool perf_cpu_map__has_any_cpu(const struct perf_cpu_map *map);
             (idx) < perf_cpu_map__nr(cpus);                    \
             (idx)++, (cpu) = perf_cpu_map__cpu(cpus, idx))
 
+#define perf_cpu_map__for_each_cpu_skip_any(_cpu, idx, cpus)   \
+       for ((idx) = 0, (_cpu) = perf_cpu_map__cpu(cpus, idx);  \
+            (idx) < perf_cpu_map__nr(cpus);                    \
+            (idx)++, (_cpu) = perf_cpu_map__cpu(cpus, idx))    \
+               if ((_cpu).cpu != -1)
+
 #define perf_cpu_map__for_each_idx(idx, cpus)                          \
        for ((idx) = 0; (idx) < perf_cpu_map__nr(cpus); (idx)++)
 
index 190b56ae923addf23446aedbe4215c124bc4cb53..10b3f372264264ff35e77e529ac3387836ba09e0 100644 (file)
@@ -1,15 +1,15 @@
 LIBPERF_0.0.1 {
        global:
                libperf_init;
-               perf_cpu_map__dummy_new;
-               perf_cpu_map__default_new;
+               perf_cpu_map__new_any_cpu;
+               perf_cpu_map__new_online_cpus;
                perf_cpu_map__get;
                perf_cpu_map__put;
                perf_cpu_map__new;
                perf_cpu_map__read;
                perf_cpu_map__nr;
                perf_cpu_map__cpu;
-               perf_cpu_map__empty;
+               perf_cpu_map__has_any_cpu_or_is_empty;
                perf_cpu_map__max;
                perf_cpu_map__has;
                perf_thread_map__new_array;
index 2184814b37dd393e3a6a0fb10c6ccb4db99df042..0c903c2372c97850ab7d3fddea63ddd67bfd40c9 100644 (file)
@@ -19,6 +19,7 @@
 void perf_mmap__init(struct perf_mmap *map, struct perf_mmap *prev,
                     bool overwrite, libperf_unmap_cb_t unmap_cb)
 {
+       /* Assume fields were zero initialized. */
        map->fd = -1;
        map->overwrite = overwrite;
        map->unmap_cb  = unmap_cb;
@@ -51,13 +52,18 @@ int perf_mmap__mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
 
 void perf_mmap__munmap(struct perf_mmap *map)
 {
-       if (map && map->base != NULL) {
+       if (!map)
+               return;
+
+       zfree(&map->event_copy);
+       map->event_copy_sz = 0;
+       if (map->base) {
                munmap(map->base, perf_mmap__mmap_len(map));
                map->base = NULL;
                map->fd = -1;
                refcount_set(&map->refcnt, 0);
        }
-       if (map && map->unmap_cb)
+       if (map->unmap_cb)
                map->unmap_cb(map);
 }
 
@@ -223,9 +229,17 @@ static union perf_event *perf_mmap__read(struct perf_mmap *map,
                 */
                if ((*startp & map->mask) + size != ((*startp + size) & map->mask)) {
                        unsigned int offset = *startp;
-                       unsigned int len = min(sizeof(*event), size), cpy;
+                       unsigned int len = size, cpy;
                        void *dst = map->event_copy;
 
+                       if (size > map->event_copy_sz) {
+                               dst = realloc(map->event_copy, size);
+                               if (!dst)
+                                       return NULL;
+                               map->event_copy = dst;
+                               map->event_copy_sz = size;
+                       }
+
                        do {
                                cpy = min(map->mask + 1 - (offset & map->mask), len);
                                memcpy(dst, &data[offset & map->mask], cpy);
index 87b0510a556ff3215c9df8f951db65d15accb990..c998b1dae86313d58134cf228552b66cf1a06e65 100644 (file)
@@ -21,7 +21,7 @@ int test_cpumap(int argc, char **argv)
 
        libperf_init(libperf_print);
 
-       cpus = perf_cpu_map__dummy_new();
+       cpus = perf_cpu_map__new_any_cpu();
        if (!cpus)
                return -1;
 
@@ -29,7 +29,7 @@ int test_cpumap(int argc, char **argv)
        perf_cpu_map__put(cpus);
        perf_cpu_map__put(cpus);
 
-       cpus = perf_cpu_map__default_new();
+       cpus = perf_cpu_map__new_online_cpus();
        if (!cpus)
                return -1;
 
index ed616fc19b4f2f82061cee202483a85eb0a51832..10f70cb41ff1debbb870d3acc2cb71683bcee7f6 100644 (file)
@@ -46,7 +46,7 @@ static int test_stat_cpu(void)
        };
        int err, idx;
 
-       cpus = perf_cpu_map__new(NULL);
+       cpus = perf_cpu_map__new_online_cpus();
        __T("failed to create cpus", cpus);
 
        evlist = perf_evlist__new();
@@ -261,7 +261,7 @@ static int test_mmap_thread(void)
        threads = perf_thread_map__new_dummy();
        __T("failed to create threads", threads);
 
-       cpus = perf_cpu_map__dummy_new();
+       cpus = perf_cpu_map__new_any_cpu();
        __T("failed to create cpus", cpus);
 
        perf_thread_map__set_pid(threads, 0, pid);
@@ -350,7 +350,7 @@ static int test_mmap_cpus(void)
 
        attr.config = id;
 
-       cpus = perf_cpu_map__new(NULL);
+       cpus = perf_cpu_map__new_online_cpus();
        __T("failed to create cpus", cpus);
 
        evlist = perf_evlist__new();
index a11fc51bfb688304e764f9166ebeddb11c902938..545ec31505466647b9ec182f79534fa713df4a57 100644 (file)
@@ -27,7 +27,7 @@ static int test_stat_cpu(void)
        };
        int err, idx;
 
-       cpus = perf_cpu_map__new(NULL);
+       cpus = perf_cpu_map__new_online_cpus();
        __T("failed to create cpus", cpus);
 
        evsel = perf_evsel__new(&attr);
index adfbae27dc369d8a6dedbe01d2faca2b86f4594a..8561b0f01a2476908bd2bfd73dfd8677be959ef2 100644 (file)
@@ -52,11 +52,21 @@ void uniq(struct cmdnames *cmds)
        if (!cmds->cnt)
                return;
 
-       for (i = j = 1; i < cmds->cnt; i++)
-               if (strcmp(cmds->names[i]->name, cmds->names[i-1]->name))
-                       cmds->names[j++] = cmds->names[i];
-
+       for (i = 1; i < cmds->cnt; i++) {
+               if (!strcmp(cmds->names[i]->name, cmds->names[i-1]->name))
+                       zfree(&cmds->names[i - 1]);
+       }
+       for (i = 0, j = 0; i < cmds->cnt; i++) {
+               if (cmds->names[i]) {
+                       if (i == j)
+                               j++;
+                       else
+                               cmds->names[j++] = cmds->names[i];
+               }
+       }
        cmds->cnt = j;
+       while (j < i)
+               cmds->names[j++] = NULL;
 }
 
 void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes)
index f533e76fb48002b7e94e9733eb8747cb1b11f05a..f5b81d439387a14f36954def542a78106a46ce23 100644 (file)
@@ -39,6 +39,9 @@ trace/beauty/generated/
 pmu-events/pmu-events.c
 pmu-events/jevents
 pmu-events/metric_test.log
+tests/shell/*.shellcheck_log
+tests/shell/coresight/*.shellcheck_log
+tests/shell/lib/*.shellcheck_log
 feature/
 libapi/
 libbpf/
@@ -49,3 +52,4 @@ libtraceevent/
 libtraceevent_plugins/
 fixdep
 Documentation/doc.dep
+python_ext_build/
index a97f95825b14e8b77186587ad4f19bf2766d7e8c..19cc179be9a784708492f18a94f06752b33c49f6 100644 (file)
@@ -25,6 +25,7 @@
                q       quicker (less detailed) decoding
                A       approximate IPC
                Z       prefer to ignore timestamps (so-called "timeless" decoding)
+               T       use the timestamp trace as kernel time
 
        The default is all events i.e. the same as --itrace=iybxwpe,
        except for perf script where it is --itrace=ce
index fe168e8165c8d22dd51933da407c9388c685f252..b95524bea021eb2fccdfbb4bf49d465e574584b0 100644 (file)
@@ -155,6 +155,17 @@ include::itrace.txt[]
        stdio or stdio2 (Default: 0).  Note that this is about selection of
        functions to display, not about lines within the function.
 
+--data-type[=TYPE_NAME]::
+       Display data type annotation instead of code.  It infers data type of
+       samples (if they are memory accessing instructions) using DWARF debug
+       information.  It can take an optional argument of data type name.  In
+       that case it'd show annotation for the type only, otherwise it'd show
+       all data types it finds.
+
+--type-stat::
+       Show stats for the data type annotation.
+
+
 SEE ALSO
 --------
 linkperf:perf-record[1], linkperf:perf-report[1]
index 0b4e79dbd3f689942d4e734eb4e48161634ae063..379f9d7a8ab11a029e602bde77d57a0ef7785f79 100644 (file)
@@ -251,7 +251,8 @@ annotate.*::
                addr2line binary to use for file names and line numbers.
 
        annotate.objdump::
-               objdump binary to use for disassembly and annotations.
+               objdump binary to use for disassembly and annotations,
+               including in the 'perf test' command.
 
        annotate.disassembler_style::
                Use this to change the default disassembler style to some other value
@@ -722,7 +723,6 @@ session-<NAME>.*::
                Defines new record session for daemon. The value is record's
                command line without the 'record' keyword.
 
-
 SEE ALSO
 --------
 linkperf:perf[1]
index d5f78e125efed157d1270ff256513fa31673f60f..3b12595193c9f49a78a490b888f22d615b92dc43 100644 (file)
@@ -47,6 +47,10 @@ Print PMU events and metrics limited to the specific PMU name.
 --json::
 Output in JSON format.
 
+-o::
+--output=::
+       Output file name. By default output is written to stdout.
+
 [[EVENT_MODIFIERS]]
 EVENT MODIFIERS
 ---------------
@@ -81,11 +85,13 @@ For Intel systems precise event sampling is implemented with PEBS
 which supports up to precise-level 2, and precise level 3 for
 some special cases
 
-On AMD systems it is implemented using IBS (up to precise-level 2).
-The precise modifier works with event types 0x76 (cpu-cycles, CPU
-clocks not halted) and 0xC1 (micro-ops retired). Both events map to
-IBS execution sampling (IBS op) with the IBS Op Counter Control bit
-(IbsOpCntCtl) set respectively (see the
+On AMD systems it is implemented using IBS OP (up to precise-level 2).
+Unlike Intel PEBS which provides levels of precision, AMD core pmu is
+inherently non-precise and IBS is inherently precise. (i.e. ibs_op//,
+ibs_op//p, ibs_op//pp and ibs_op//ppp are all same). The precise modifier
+works with event types 0x76 (cpu-cycles, CPU clocks not halted) and 0xC1
+(micro-ops retired). Both events map to IBS execution sampling (IBS op)
+with the IBS Op Counter Control bit (IbsOpCntCtl) set respectively (see the
 Core Complex (CCX) -> Processor x86 Core -> Instruction Based Sampling (IBS)
 section of the [AMD Processor Programming Reference (PPR)] relevant to the
 family, model and stepping of the processor being used).
index 503abcba1438038732cff8afcfab5012feed0fc5..f5938d616d75176cb7f42e5500fee4f03a4ebafc 100644 (file)
@@ -119,7 +119,7 @@ INFO OPTIONS
 
 
 CONTENTION OPTIONS
---------------
+------------------
 
 -k::
 --key=<value>::
index 1889f66addf2aa936bafea132aed55ab0908ff8d..6015fdd08fb63b679b56195b7734e7449a318851 100644 (file)
@@ -445,6 +445,10 @@ following filters are defined:
                     4th-Gen Xeon+ server), the save branch type is unconditionally enabled
                     when the taken branch stack sampling is enabled.
        - priv: save privilege state during sampling in case binary is not available later
+       - counter: save occurrences of the event since the last branch entry. Currently, the
+                  feature is only supported by a newer CPU, e.g., Intel Sierra Forest and
+                  later platforms. An error out is expected if it's used on the unsupported
+                  kernel or CPUs.
 
 +
 The option requires at least one branch type among any, any_call, any_ret, ind_call, cond.
index af068b4f1e5a696464ba2040b7b39a4831b32050..38f59ac064f7d4615daf5e1bba57a7045ba4c597 100644 (file)
@@ -118,6 +118,9 @@ OPTIONS
        - retire_lat: On X86, this reports pipeline stall of this instruction compared
          to the previous instruction in cycles. And currently supported only on X86
        - simd: Flags describing a SIMD operation. "e" for empty Arm SVE predicate. "p" for partial Arm SVE predicate
+       - type: Data type of sample memory access.
+       - typeoff: Offset in the data type of sample memory access.
+       - symoff: Offset in the symbol.
 
        By default, comm, dso and symbol keys are used.
        (i.e. --sort comm,dso,symbol)
index 8f789fa1242e0dfdeab8aee6268f996e4933a6ac..5af2e432b54fb51a5e5371cffdfd22d162e0c915 100644 (file)
@@ -422,7 +422,34 @@ See perf list output for the possible metrics and metricgroups.
 
 -A::
 --no-aggr::
-Do not aggregate counts across all monitored CPUs.
+--no-merge::
+Do not aggregate/merge counts across monitored CPUs or PMUs.
+
+When multiple events are created from a single event specification,
+stat will, by default, aggregate the event counts and show the result
+in a single row. This option disables that behavior and shows the
+individual events and counts.
+
+Multiple events are created from a single event specification when:
+
+1. PID monitoring isn't requested and the system has more than one
+   CPU. For example, a system with 8 SMT threads will have one event
+   opened on each thread and aggregation is performed across them.
+
+2. Prefix or glob wildcard matching is used for the PMU name. For
+   example, multiple memory controller PMUs may exist typically with a
+   suffix of _0, _1, etc. By default the event counts will all be
+   combined if the PMU is specified without the suffix such as
+   uncore_imc rather than uncore_imc_0.
+
+3. Aliases, which are listed immediately after the Kernel PMU events
+   by perf list, are used.
+
+--hybrid-merge::
+Merge core event counts from all core PMUs. In hybrid or big.LITTLE
+systems by default each core PMU will report its count
+separately. This option forces core PMU counts to be combined to give
+a behavior closer to having a single CPU type in the system.
 
 --topdown::
 Print top-down metrics supported by the CPU. This allows to determine
@@ -475,29 +502,6 @@ highlight 'tma_frontend_bound'. This metric may be drilled into with
 
 Error out if the input is higher than the supported max level.
 
---no-merge::
-Do not merge results from same PMUs.
-
-When multiple events are created from a single event specification,
-stat will, by default, aggregate the event counts and show the result
-in a single row. This option disables that behavior and shows
-the individual events and counts.
-
-Multiple events are created from a single event specification when:
-1. Prefix or glob matching is used for the PMU name.
-2. Aliases, which are listed immediately after the Kernel PMU events
-   by perf list, are used.
-
---hybrid-merge::
-Merge the hybrid event counts from all PMUs.
-
-For hybrid events, by default, the stat aggregates and reports the event
-counts per PMU. But sometimes, it's also useful to aggregate event counts
-from all PMUs. This option enables that behavior and reports the counts
-without PMUs.
-
-For non-hybrid events, it should be no effect.
-
 --smi-cost::
 Measure SMI cost if msr/aperf/ and msr/smi/ events are supported.
 
index ba3df49c169d329223f6d03fb6cab122e737b757..a7cf7bc2f9689dcdfea6cbe8daaaf01205e76be3 100644 (file)
@@ -64,6 +64,9 @@ OPTIONS
           perf-event-open  - Print perf_event_open() arguments and
                              return value
 
+--debug-file::
+       Write debug output to a specified file.
+
 DESCRIPTION
 -----------
 Performance counters for Linux are a new kernel-based subsystem
index b3e6ed10f40c6f6c57578a8c99365dffb53ca94a..aa55850fbc213b939df67bb1df68f776ca555006 100644 (file)
@@ -476,6 +476,11 @@ else
       else
         CFLAGS += -DHAVE_DWARF_GETLOCATIONS_SUPPORT
       endif # dwarf_getlocations
+      ifneq ($(feature-dwarf_getcfi), 1)
+        msg := $(warning Old libdw.h, finding variables at given 'perf probe' point will not work, install elfutils-devel/libdw-dev >= 0.142);
+      else
+        CFLAGS += -DHAVE_DWARF_CFI_SUPPORT
+      endif # dwarf_getcfi
     endif # Dwarf support
   endif # libelf support
 endif # NO_LIBELF
@@ -680,15 +685,15 @@ ifndef BUILD_BPF_SKEL
 endif
 
 ifeq ($(BUILD_BPF_SKEL),1)
-  ifeq ($(filter -DHAVE_LIBBPF_SUPPORT, $(CFLAGS)),)
-    dummy := $(warning Warning: Disabled BPF skeletons as libbpf is required)
-    BUILD_BPF_SKEL := 0
-  else ifeq ($(filter -DHAVE_LIBELF_SUPPORT, $(CFLAGS)),)
+  ifeq ($(filter -DHAVE_LIBELF_SUPPORT, $(CFLAGS)),)
     dummy := $(warning Warning: Disabled BPF skeletons as libelf is required by bpftool)
     BUILD_BPF_SKEL := 0
   else ifeq ($(filter -DHAVE_ZLIB_SUPPORT, $(CFLAGS)),)
     dummy := $(warning Warning: Disabled BPF skeletons as zlib is required by bpftool)
     BUILD_BPF_SKEL := 0
+  else ifeq ($(filter -DHAVE_LIBBPF_SUPPORT, $(CFLAGS)),)
+    dummy := $(warning Warning: Disabled BPF skeletons as libbpf is required)
+    BUILD_BPF_SKEL := 0
   else ifeq ($(call get-executable,$(CLANG)),)
     dummy := $(warning Warning: Disabled BPF skeletons as clang ($(CLANG)) is missing)
     BUILD_BPF_SKEL := 0
index 058c9aecf6087d065a31115492b4e80bed69c7a2..f8774a9b1377a3e98b98543a66b4f8aea6fb6837 100644 (file)
@@ -134,6 +134,8 @@ include ../scripts/utilities.mak
 #      x86 instruction decoder - new instructions test
 #
 # Define GEN_VMLINUX_H to generate vmlinux.h from the BTF.
+#
+# Define NO_SHELLCHECK if you do not want to run shellcheck during build
 
 # As per kernel Makefile, avoid funny character set dependencies
 unexport LC_ALL
@@ -227,8 +229,25 @@ else
   force_fixdep := $(config)
 endif
 
+# Runs shellcheck on perf test shell scripts
+ifeq ($(NO_SHELLCHECK),1)
+  SHELLCHECK :=
+else
+  SHELLCHECK := $(shell which shellcheck 2> /dev/null)
+endif
+
+# shellcheck is using in tools/perf/tests/Build with option -a/--check-sourced (
+# introduced in v0.4.7) and -S/--severity (introduced in v0.6.0). So make the
+# minimal shellcheck version as v0.6.0.
+ifneq ($(SHELLCHECK),)
+  ifeq ($(shell expr $(shell $(SHELLCHECK) --version | grep version: | \
+        sed -e 's/.\+ \([0-9]\+\).\([0-9]\+\).\([0-9]\+\)/\1\2\3/g') \< 060), 1)
+    SHELLCHECK :=
+  endif
+endif
+
 export srctree OUTPUT RM CC CXX LD AR CFLAGS CXXFLAGS V BISON FLEX AWK
-export HOSTCC HOSTLD HOSTAR HOSTCFLAGS
+export HOSTCC HOSTLD HOSTAR HOSTCFLAGS SHELLCHECK
 
 include $(srctree)/tools/build/Makefile.include
 
@@ -1152,7 +1171,7 @@ bpf-skel-clean:
 
 clean:: $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean $(LIBSYMBOL)-clean $(LIBPERF)-clean arm64-sysreg-defs-clean fixdep-clean python-clean bpf-skel-clean tests-coresight-targets-clean
        $(call QUIET_CLEAN, core-objs)  $(RM) $(LIBPERF_A) $(OUTPUT)perf-archive $(OUTPUT)perf-iostat $(LANG_BINDINGS)
-       $(Q)find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
+       $(Q)find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete -o -name '*.shellcheck_log' -delete
        $(Q)$(RM) $(OUTPUT).config-detected
        $(call QUIET_CLEAN, core-progs) $(RM) $(ALL_PROGRAMS) perf perf-read-vdso32 perf-read-vdsox32 $(OUTPUT)$(LIBJVMTI).so
        $(call QUIET_CLEAN, core-gen)   $(RM)  *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)FEATURE-DUMP $(OUTPUT)util/*-bison* $(OUTPUT)util/*-flex* \
index 2cf873d71dff03730e62b31c299ed90c2c0a975e..77e6663c1703b8776c4cc33fbf0b81aac833583f 100644 (file)
@@ -199,7 +199,7 @@ static int cs_etm_validate_config(struct auxtrace_record *itr,
 {
        int i, err = -EINVAL;
        struct perf_cpu_map *event_cpus = evsel->evlist->core.user_requested_cpus;
-       struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
+       struct perf_cpu_map *online_cpus = perf_cpu_map__new_online_cpus();
 
        /* Set option of each CPU we have */
        for (i = 0; i < cpu__max_cpu().cpu; i++) {
@@ -211,7 +211,7 @@ static int cs_etm_validate_config(struct auxtrace_record *itr,
                 * program can run on any CPUs in this case, thus don't skip
                 * validation.
                 */
-               if (!perf_cpu_map__empty(event_cpus) &&
+               if (!perf_cpu_map__has_any_cpu_or_is_empty(event_cpus) &&
                    !perf_cpu_map__has(event_cpus, cpu))
                        continue;
 
@@ -435,7 +435,7 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
         * Also the case of per-cpu mmaps, need the contextID in order to be notified
         * when a context switch happened.
         */
-       if (!perf_cpu_map__empty(cpus)) {
+       if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) {
                evsel__set_config_if_unset(cs_etm_pmu, cs_etm_evsel,
                                           "timestamp", 1);
                evsel__set_config_if_unset(cs_etm_pmu, cs_etm_evsel,
@@ -461,7 +461,7 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
        evsel->core.attr.sample_period = 1;
 
        /* In per-cpu case, always need the time of mmap events etc */
-       if (!perf_cpu_map__empty(cpus))
+       if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus))
                evsel__set_sample_bit(evsel, TIME);
 
        err = cs_etm_validate_config(itr, cs_etm_evsel);
@@ -536,10 +536,10 @@ cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused,
        int i;
        int etmv3 = 0, etmv4 = 0, ete = 0;
        struct perf_cpu_map *event_cpus = evlist->core.user_requested_cpus;
-       struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
+       struct perf_cpu_map *online_cpus = perf_cpu_map__new_online_cpus();
 
        /* cpu map is not empty, we have specific CPUs to work with */
-       if (!perf_cpu_map__empty(event_cpus)) {
+       if (!perf_cpu_map__has_any_cpu_or_is_empty(event_cpus)) {
                for (i = 0; i < cpu__max_cpu().cpu; i++) {
                        struct perf_cpu cpu = { .cpu = i, };
 
@@ -802,7 +802,7 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
        u64 nr_cpu, type;
        struct perf_cpu_map *cpu_map;
        struct perf_cpu_map *event_cpus = session->evlist->core.user_requested_cpus;
-       struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
+       struct perf_cpu_map *online_cpus = perf_cpu_map__new_online_cpus();
        struct cs_etm_recording *ptr =
                        container_of(itr, struct cs_etm_recording, itr);
        struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
@@ -814,7 +814,7 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
                return -EINVAL;
 
        /* If the cpu_map is empty all online CPUs are involved */
-       if (perf_cpu_map__empty(event_cpus)) {
+       if (perf_cpu_map__has_any_cpu_or_is_empty(event_cpus)) {
                cpu_map = online_cpus;
        } else {
                /* Make sure all specified CPUs are online */
index e3acc739bd0027b214a4aa5296e81bfcac3afba7..51ccbfd3d246d484400c9a83220efaa8833f9f95 100644 (file)
@@ -232,7 +232,7 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
         * In the case of per-cpu mmaps, sample CPU for AUX event;
         * also enable the timestamp tracing for samples correlation.
         */
-       if (!perf_cpu_map__empty(cpus)) {
+       if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) {
                evsel__set_sample_bit(arm_spe_evsel, CPU);
                evsel__set_config_if_unset(arm_spe_pmu, arm_spe_evsel,
                                           "ts_enable", 1);
@@ -265,7 +265,7 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
        tracking_evsel->core.attr.sample_period = 1;
 
        /* In per-cpu case, always need the time of mmap events etc */
-       if (!perf_cpu_map__empty(cpus)) {
+       if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) {
                evsel__set_sample_bit(tracking_evsel, TIME);
                evsel__set_sample_bit(tracking_evsel, CPU);
 
index a2eef9ec5491096d0f718f59e3e2ae6996e35ca3..97037499152ef785837b8815aac4da7907dfebf5 100644 (file)
@@ -57,7 +57,7 @@ static int _get_cpuid(char *buf, size_t sz, struct perf_cpu_map *cpus)
 
 int get_cpuid(char *buf, size_t sz)
 {
-       struct perf_cpu_map *cpus = perf_cpu_map__new(NULL);
+       struct perf_cpu_map *cpus = perf_cpu_map__new_online_cpus();
        int ret;
 
        if (!cpus)
index 98e19c5366acfd628fb7cf979e295f01119114a8..21cc7e4149f721d7d0a28f715df89a991fc3d606 100644 (file)
@@ -61,10 +61,10 @@ static int loongarch_jump__parse(struct arch *arch, struct ins_operands *ops, st
        const char *c = strchr(ops->raw, '#');
        u64 start, end;
 
-       ops->raw_comment = strchr(ops->raw, arch->objdump.comment_char);
-       ops->raw_func_start = strchr(ops->raw, '<');
+       ops->jump.raw_comment = strchr(ops->raw, arch->objdump.comment_char);
+       ops->jump.raw_func_start = strchr(ops->raw, '<');
 
-       if (ops->raw_func_start && c > ops->raw_func_start)
+       if (ops->jump.raw_func_start && c > ops->jump.raw_func_start)
                c = NULL;
 
        if (c++ != NULL)
index eb152770f148562b8032cbd7a14c1e153e273d93..40f5d17fedab6955c89042837eddb13227b04c58 100644 (file)
@@ -47,7 +47,7 @@ static int test__hybrid_hw_group_event(struct evlist *evlist)
        evsel = evsel__next(evsel);
        TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
        TEST_ASSERT_VAL("wrong hybrid type", test_hybrid_type(evsel, PERF_TYPE_RAW));
-       TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_INSTRUCTIONS));
+       TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_BRANCH_INSTRUCTIONS));
        TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
        return TEST_OK;
 }
@@ -102,7 +102,7 @@ static int test__hybrid_group_modifier1(struct evlist *evlist)
        evsel = evsel__next(evsel);
        TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
        TEST_ASSERT_VAL("wrong hybrid type", test_hybrid_type(evsel, PERF_TYPE_RAW));
-       TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_INSTRUCTIONS));
+       TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_BRANCH_INSTRUCTIONS));
        TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
        TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
        TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
@@ -163,6 +163,24 @@ static int test__checkevent_pmu(struct evlist *evlist)
        return TEST_OK;
 }
 
+static int test__hybrid_hw_group_event_2(struct evlist *evlist)
+{
+       struct evsel *evsel, *leader;
+
+       evsel = leader = evlist__first(evlist);
+       TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
+       TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
+       TEST_ASSERT_VAL("wrong hybrid type", test_hybrid_type(evsel, PERF_TYPE_RAW));
+       TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
+       TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
+
+       evsel = evsel__next(evsel);
+       TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
+       TEST_ASSERT_VAL("wrong config", evsel->core.attr.config == 0x3c);
+       TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
+       return TEST_OK;
+}
+
 struct evlist_test {
        const char *name;
        bool (*valid)(void);
@@ -171,27 +189,27 @@ struct evlist_test {
 
 static const struct evlist_test test__hybrid_events[] = {
        {
-               .name  = "cpu_core/cpu-cycles/",
+               .name  = "cpu_core/cycles/",
                .check = test__hybrid_hw_event_with_pmu,
                /* 0 */
        },
        {
-               .name  = "{cpu_core/cpu-cycles/,cpu_core/instructions/}",
+               .name  = "{cpu_core/cycles/,cpu_core/branches/}",
                .check = test__hybrid_hw_group_event,
                /* 1 */
        },
        {
-               .name  = "{cpu-clock,cpu_core/cpu-cycles/}",
+               .name  = "{cpu-clock,cpu_core/cycles/}",
                .check = test__hybrid_sw_hw_group_event,
                /* 2 */
        },
        {
-               .name  = "{cpu_core/cpu-cycles/,cpu-clock}",
+               .name  = "{cpu_core/cycles/,cpu-clock}",
                .check = test__hybrid_hw_sw_group_event,
                /* 3 */
        },
        {
-               .name  = "{cpu_core/cpu-cycles/k,cpu_core/instructions/u}",
+               .name  = "{cpu_core/cycles/k,cpu_core/branches/u}",
                .check = test__hybrid_group_modifier1,
                /* 4 */
        },
@@ -215,6 +233,11 @@ static const struct evlist_test test__hybrid_events[] = {
                .check = test__hybrid_cache_event,
                /* 8 */
        },
+       {
+               .name  = "{cpu_core/cycles/,cpu_core/cpu-cycles/}",
+               .check = test__hybrid_hw_group_event_2,
+               /* 9 */
+       },
 };
 
 static int test_event(const struct evlist_test *e)
index 5309348057108f60ea2f3d95ad03e1ef0eda8149..399c4a0a29d8c1ac4f21e2ee1030e0beec27aa34 100644 (file)
@@ -113,3 +113,41 @@ int regs_query_register_offset(const char *name)
                        return roff->offset;
        return -EINVAL;
 }
+
+struct dwarf_regs_idx {
+       const char *name;
+       int idx;
+};
+
+static const struct dwarf_regs_idx x86_regidx_table[] = {
+       { "rax", 0 }, { "eax", 0 }, { "ax", 0 }, { "al", 0 },
+       { "rdx", 1 }, { "edx", 1 }, { "dx", 1 }, { "dl", 1 },
+       { "rcx", 2 }, { "ecx", 2 }, { "cx", 2 }, { "cl", 2 },
+       { "rbx", 3 }, { "edx", 3 }, { "bx", 3 }, { "bl", 3 },
+       { "rsi", 4 }, { "esi", 4 }, { "si", 4 }, { "sil", 4 },
+       { "rdi", 5 }, { "edi", 5 }, { "di", 5 }, { "dil", 5 },
+       { "rbp", 6 }, { "ebp", 6 }, { "bp", 6 }, { "bpl", 6 },
+       { "rsp", 7 }, { "esp", 7 }, { "sp", 7 }, { "spl", 7 },
+       { "r8", 8 }, { "r8d", 8 }, { "r8w", 8 }, { "r8b", 8 },
+       { "r9", 9 }, { "r9d", 9 }, { "r9w", 9 }, { "r9b", 9 },
+       { "r10", 10 }, { "r10d", 10 }, { "r10w", 10 }, { "r10b", 10 },
+       { "r11", 11 }, { "r11d", 11 }, { "r11w", 11 }, { "r11b", 11 },
+       { "r12", 12 }, { "r12d", 12 }, { "r12w", 12 }, { "r12b", 12 },
+       { "r13", 13 }, { "r13d", 13 }, { "r13w", 13 }, { "r13b", 13 },
+       { "r14", 14 }, { "r14d", 14 }, { "r14w", 14 }, { "r14b", 14 },
+       { "r15", 15 }, { "r15d", 15 }, { "r15w", 15 }, { "r15b", 15 },
+       { "rip", DWARF_REG_PC },
+};
+
+int get_arch_regnum(const char *name)
+{
+       unsigned int i;
+
+       if (*name != '%')
+               return -EINVAL;
+
+       for (i = 0; i < ARRAY_SIZE(x86_regidx_table); i++)
+               if (!strcmp(x86_regidx_table[i].name, name + 1))
+                       return x86_regidx_table[i].idx;
+       return -ENOENT;
+}
index 5741ffe473120a2c09dd74759aa955f2aec261d8..e65b7dbe27fbcee6cc4890a30622dc27f62a0e95 100644 (file)
 
 #if defined(__x86_64__)
 
-int perf_event__synthesize_extra_kmaps(struct perf_tool *tool,
-                                      perf_event__handler_t process,
-                                      struct machine *machine)
+struct perf_event__synthesize_extra_kmaps_cb_args {
+       struct perf_tool *tool;
+       perf_event__handler_t process;
+       struct machine *machine;
+       union perf_event *event;
+};
+
+static int perf_event__synthesize_extra_kmaps_cb(struct map *map, void *data)
 {
-       int rc = 0;
-       struct map_rb_node *pos;
-       struct maps *kmaps = machine__kernel_maps(machine);
-       union perf_event *event = zalloc(sizeof(event->mmap) +
-                                        machine->id_hdr_size);
+       struct perf_event__synthesize_extra_kmaps_cb_args *args = data;
+       union perf_event *event = args->event;
+       struct kmap *kmap;
+       size_t size;
 
-       if (!event) {
-               pr_debug("Not enough memory synthesizing mmap event "
-                        "for extra kernel maps\n");
-               return -1;
-       }
+       if (!__map__is_extra_kernel_map(map))
+               return 0;
 
-       maps__for_each_entry(kmaps, pos) {
-               struct kmap *kmap;
-               size_t size;
-               struct map *map = pos->map;
+       kmap = map__kmap(map);
 
-               if (!__map__is_extra_kernel_map(map))
-                       continue;
+       size = sizeof(event->mmap) - sizeof(event->mmap.filename) +
+                     PERF_ALIGN(strlen(kmap->name) + 1, sizeof(u64)) +
+                     args->machine->id_hdr_size;
 
-               kmap = map__kmap(map);
+       memset(event, 0, size);
 
-               size = sizeof(event->mmap) - sizeof(event->mmap.filename) +
-                      PERF_ALIGN(strlen(kmap->name) + 1, sizeof(u64)) +
-                      machine->id_hdr_size;
+       event->mmap.header.type = PERF_RECORD_MMAP;
 
-               memset(event, 0, size);
+       /*
+        * kernel uses 0 for user space maps, see kernel/perf_event.c
+        * __perf_event_mmap
+        */
+       if (machine__is_host(args->machine))
+               event->header.misc = PERF_RECORD_MISC_KERNEL;
+       else
+               event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
 
-               event->mmap.header.type = PERF_RECORD_MMAP;
+       event->mmap.header.size = size;
 
-               /*
-                * kernel uses 0 for user space maps, see kernel/perf_event.c
-                * __perf_event_mmap
-                */
-               if (machine__is_host(machine))
-                       event->header.misc = PERF_RECORD_MISC_KERNEL;
-               else
-                       event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
+       event->mmap.start = map__start(map);
+       event->mmap.len   = map__size(map);
+       event->mmap.pgoff = map__pgoff(map);
+       event->mmap.pid   = args->machine->pid;
 
-               event->mmap.header.size = size;
+       strlcpy(event->mmap.filename, kmap->name, PATH_MAX);
 
-               event->mmap.start = map__start(map);
-               event->mmap.len   = map__size(map);
-               event->mmap.pgoff = map__pgoff(map);
-               event->mmap.pid   = machine->pid;
+       if (perf_tool__process_synth_event(args->tool, event, args->machine, args->process) != 0)
+               return -1;
 
-               strlcpy(event->mmap.filename, kmap->name, PATH_MAX);
+       return 0;
+}
 
-               if (perf_tool__process_synth_event(tool, event, machine,
-                                                  process) != 0) {
-                       rc = -1;
-                       break;
-               }
+int perf_event__synthesize_extra_kmaps(struct perf_tool *tool,
+                                      perf_event__handler_t process,
+                                      struct machine *machine)
+{
+       int rc;
+       struct maps *kmaps = machine__kernel_maps(machine);
+       struct perf_event__synthesize_extra_kmaps_cb_args args = {
+               .tool = tool,
+               .process = process,
+               .machine = machine,
+               .event = zalloc(sizeof(args.event->mmap) + machine->id_hdr_size),
+       };
+
+       if (!args.event) {
+               pr_debug("Not enough memory synthesizing mmap event "
+                        "for extra kernel maps\n");
+               return -1;
        }
 
-       free(event);
+       rc = maps__for_each_map(kmaps, perf_event__synthesize_extra_kmaps_cb, &args);
+
+       free(args.event);
        return rc;
 }
 
index d2c8cac1147021dbf51185db50409d1c078bf9e9..af8ae4647585b460c5f3ef381cfea29f3fa966bd 100644 (file)
@@ -143,7 +143,7 @@ static int intel_bts_recording_options(struct auxtrace_record *itr,
        if (!opts->full_auxtrace)
                return 0;
 
-       if (opts->full_auxtrace && !perf_cpu_map__empty(cpus)) {
+       if (opts->full_auxtrace && !perf_cpu_map__has_any_cpu_or_is_empty(cpus)) {
                pr_err(INTEL_BTS_PMU_NAME " does not support per-cpu recording\n");
                return -EINVAL;
        }
@@ -224,7 +224,7 @@ static int intel_bts_recording_options(struct auxtrace_record *itr,
                 * In the case of per-cpu mmaps, we need the CPU on the
                 * AUX event.
                 */
-               if (!perf_cpu_map__empty(cpus))
+               if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus))
                        evsel__set_sample_bit(intel_bts_evsel, CPU);
        }
 
index fa0c718b9e7277f0374356bf5d46b603f19ed7ca..d199619df3abe1b22c70fbfa1eea485eb095ae6f 100644 (file)
@@ -369,7 +369,7 @@ static int intel_pt_info_fill(struct auxtrace_record *itr,
                        ui__warning("Intel Processor Trace: TSC not available\n");
        }
 
-       per_cpu_mmaps = !perf_cpu_map__empty(session->evlist->core.user_requested_cpus);
+       per_cpu_mmaps = !perf_cpu_map__has_any_cpu_or_is_empty(session->evlist->core.user_requested_cpus);
 
        auxtrace_info->type = PERF_AUXTRACE_INTEL_PT;
        auxtrace_info->priv[INTEL_PT_PMU_TYPE] = intel_pt_pmu->type;
@@ -774,7 +774,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
         * Per-cpu recording needs sched_switch events to distinguish different
         * threads.
         */
-       if (have_timing_info && !perf_cpu_map__empty(cpus) &&
+       if (have_timing_info && !perf_cpu_map__has_any_cpu_or_is_empty(cpus) &&
            !record_opts__no_switch_events(opts)) {
                if (perf_can_record_switch_events()) {
                        bool cpu_wide = !target__none(&opts->target) &&
@@ -832,7 +832,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
                 * In the case of per-cpu mmaps, we need the CPU on the
                 * AUX event.
                 */
-               if (!perf_cpu_map__empty(cpus))
+               if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus))
                        evsel__set_sample_bit(intel_pt_evsel, CPU);
        }
 
@@ -858,7 +858,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
                        tracking_evsel->immediate = true;
 
                /* In per-cpu case, always need the time of mmap events etc */
-               if (!perf_cpu_map__empty(cpus)) {
+               if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) {
                        evsel__set_sample_bit(tracking_evsel, TIME);
                        /* And the CPU for switch events */
                        evsel__set_sample_bit(tracking_evsel, CPU);
@@ -870,7 +870,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
         * Warn the user when we do not have enough information to decode i.e.
         * per-cpu with no sched_switch (except workload-only).
         */
-       if (!ptr->have_sched_switch && !perf_cpu_map__empty(cpus) &&
+       if (!ptr->have_sched_switch && !perf_cpu_map__has_any_cpu_or_is_empty(cpus) &&
            !target__none(&opts->target) &&
            !intel_pt_evsel->core.attr.exclude_user)
                ui__warning("Intel Processor Trace decoding will not be possible except for kernel tracing!\n");
index 6bfffe83dde99bd240de98cdde473d1bb8620d3b..d3db73dac66afe48ff80ff30849e125240c812e7 100644 (file)
@@ -330,7 +330,7 @@ int bench_epoll_ctl(int argc, const char **argv)
        act.sa_sigaction = toggle_done;
        sigaction(SIGINT, &act, NULL);
 
-       cpu = perf_cpu_map__new(NULL);
+       cpu = perf_cpu_map__new_online_cpus();
        if (!cpu)
                goto errmem;
 
index cb5174b53940b265ae2be3adec3a9b916d524dab..06bb3187660abdd736821b5e96dddc73af261fed 100644 (file)
@@ -444,7 +444,7 @@ int bench_epoll_wait(int argc, const char **argv)
        act.sa_sigaction = toggle_done;
        sigaction(SIGINT, &act, NULL);
 
-       cpu = perf_cpu_map__new(NULL);
+       cpu = perf_cpu_map__new_online_cpus();
        if (!cpu)
                goto errmem;
 
index 2005a3fa3026799d1cfcd246cc956ac4e528c97b..0c69d20efa329427c71141c09e6f1c1a9031738c 100644 (file)
@@ -138,7 +138,7 @@ int bench_futex_hash(int argc, const char **argv)
                exit(EXIT_FAILURE);
        }
 
-       cpu = perf_cpu_map__new(NULL);
+       cpu = perf_cpu_map__new_online_cpus();
        if (!cpu)
                goto errmem;
 
index 092cbd52db82b500360022d0d768999d1ca42cb9..7a4973346180fc91009573c503a021c9b217ee29 100644 (file)
@@ -172,7 +172,7 @@ int bench_futex_lock_pi(int argc, const char **argv)
        if (argc)
                goto err;
 
-       cpu = perf_cpu_map__new(NULL);
+       cpu = perf_cpu_map__new_online_cpus();
        if (!cpu)
                err(EXIT_FAILURE, "calloc");
 
index c0035990a33cebafea34493b7978a9e1c2c9b2f7..d9ad736c1a3e0d13317aaf28a777a6327c17e5d3 100644 (file)
@@ -174,7 +174,7 @@ int bench_futex_requeue(int argc, const char **argv)
        if (argc)
                goto err;
 
-       cpu = perf_cpu_map__new(NULL);
+       cpu = perf_cpu_map__new_online_cpus();
        if (!cpu)
                err(EXIT_FAILURE, "cpu_map__new");
 
index 5ab0234d74e696d1afcc48451110653bc4da3649..b66df553e5614cb393066f2ec8b66ee75ba15ed8 100644 (file)
@@ -264,7 +264,7 @@ int bench_futex_wake_parallel(int argc, const char **argv)
                        err(EXIT_FAILURE, "mlockall");
        }
 
-       cpu = perf_cpu_map__new(NULL);
+       cpu = perf_cpu_map__new_online_cpus();
        if (!cpu)
                err(EXIT_FAILURE, "calloc");
 
index 18a5894af8bb51fb6aa5353db05ef9c75a270253..690fd6d3da130161ac6473df2ba5dabecbc8324a 100644 (file)
@@ -149,7 +149,7 @@ int bench_futex_wake(int argc, const char **argv)
                exit(EXIT_FAILURE);
        }
 
-       cpu = perf_cpu_map__new(NULL);
+       cpu = perf_cpu_map__new_online_cpus();
        if (!cpu)
                err(EXIT_FAILURE, "calloc");
 
index a01c40131493b76dc75c354b2e60b5321944d198..269c1f4a6852ce49584674322943f1b71e9a77cd 100644 (file)
@@ -32,7 +32,7 @@ static bool sync_mode;
 static const struct option options[] = {
        OPT_U64('l', "loop",    &loops,         "Specify number of loops"),
        OPT_BOOLEAN('s', "sync-mode", &sync_mode,
-                   "Enable the synchronious mode for seccomp notifications"),
+                   "Enable the synchronous mode for seccomp notifications"),
        OPT_END()
 };
 
index aeeb801f1ed7b15f1118de904d4366feb7ed3a97..6c1cc797692d949f6fc07bb195007ecfad536dd6 100644 (file)
@@ -20,6 +20,7 @@
 #include "util/evlist.h"
 #include "util/evsel.h"
 #include "util/annotate.h"
+#include "util/annotate-data.h"
 #include "util/event.h"
 #include <subcmd/parse-options.h>
 #include "util/parse-events.h"
@@ -45,7 +46,6 @@
 struct perf_annotate {
        struct perf_tool tool;
        struct perf_session *session;
-       struct annotation_options opts;
 #ifdef HAVE_SLANG_SUPPORT
        bool       use_tui;
 #endif
@@ -56,9 +56,13 @@ struct perf_annotate {
        bool       skip_missing;
        bool       has_br_stack;
        bool       group_set;
+       bool       data_type;
+       bool       type_stat;
+       bool       insn_stat;
        float      min_percent;
        const char *sym_hist_filter;
        const char *cpu_list;
+       const char *target_data_type;
        DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
 };
 
@@ -94,6 +98,7 @@ static void process_basic_block(struct addr_map_symbol *start,
        struct annotation *notes = sym ? symbol__annotation(sym) : NULL;
        struct block_range_iter iter;
        struct block_range *entry;
+       struct annotated_branch *branch;
 
        /*
         * Sanity; NULL isn't executable and the CPU cannot execute backwards
@@ -105,6 +110,8 @@ static void process_basic_block(struct addr_map_symbol *start,
        if (!block_range_iter__valid(&iter))
                return;
 
+       branch = annotation__get_branch(notes);
+
        /*
         * First block in range is a branch target.
         */
@@ -118,8 +125,8 @@ static void process_basic_block(struct addr_map_symbol *start,
                entry->coverage++;
                entry->sym = sym;
 
-               if (notes)
-                       notes->max_coverage = max(notes->max_coverage, entry->coverage);
+               if (branch)
+                       branch->max_coverage = max(branch->max_coverage, entry->coverage);
 
        } while (block_range_iter__next(&iter));
 
@@ -315,9 +322,153 @@ static int hist_entry__tty_annotate(struct hist_entry *he,
                                    struct perf_annotate *ann)
 {
        if (!ann->use_stdio2)
-               return symbol__tty_annotate(&he->ms, evsel, &ann->opts);
+               return symbol__tty_annotate(&he->ms, evsel);
+
+       return symbol__tty_annotate2(&he->ms, evsel);
+}
+
+static void print_annotated_data_header(struct hist_entry *he, struct evsel *evsel)
+{
+       struct dso *dso = map__dso(he->ms.map);
+       int nr_members = 1;
+       int nr_samples = he->stat.nr_events;
+
+       if (evsel__is_group_event(evsel)) {
+               struct hist_entry *pair;
+
+               list_for_each_entry(pair, &he->pairs.head, pairs.node)
+                       nr_samples += pair->stat.nr_events;
+       }
+
+       printf("Annotate type: '%s' in %s (%d samples):\n",
+              he->mem_type->self.type_name, dso->name, nr_samples);
+
+       if (evsel__is_group_event(evsel)) {
+               struct evsel *pos;
+               int i = 0;
+
+               for_each_group_evsel(pos, evsel)
+                       printf(" event[%d] = %s\n", i++, pos->name);
+
+               nr_members = evsel->core.nr_members;
+       }
+
+       printf("============================================================================\n");
+       printf("%*s %10s %10s  %s\n", 11 * nr_members, "samples", "offset", "size", "field");
+}
+
+static void print_annotated_data_type(struct annotated_data_type *mem_type,
+                                     struct annotated_member *member,
+                                     struct evsel *evsel, int indent)
+{
+       struct annotated_member *child;
+       struct type_hist *h = mem_type->histograms[evsel->core.idx];
+       int i, nr_events = 1, samples = 0;
+
+       for (i = 0; i < member->size; i++)
+               samples += h->addr[member->offset + i].nr_samples;
+       printf(" %10d", samples);
 
-       return symbol__tty_annotate2(&he->ms, evsel, &ann->opts);
+       if (evsel__is_group_event(evsel)) {
+               struct evsel *pos;
+
+               for_each_group_member(pos, evsel) {
+                       h = mem_type->histograms[pos->core.idx];
+
+                       samples = 0;
+                       for (i = 0; i < member->size; i++)
+                               samples += h->addr[member->offset + i].nr_samples;
+                       printf(" %10d", samples);
+               }
+               nr_events = evsel->core.nr_members;
+       }
+
+       printf(" %10d %10d  %*s%s\t%s",
+              member->offset, member->size, indent, "", member->type_name,
+              member->var_name ?: "");
+
+       if (!list_empty(&member->children))
+               printf(" {\n");
+
+       list_for_each_entry(child, &member->children, node)
+               print_annotated_data_type(mem_type, child, evsel, indent + 4);
+
+       if (!list_empty(&member->children))
+               printf("%*s}", 11 * nr_events + 24 + indent, "");
+       printf(";\n");
+}
+
+static void print_annotate_data_stat(struct annotated_data_stat *s)
+{
+#define PRINT_STAT(fld) if (s->fld) printf("%10d : %s\n", s->fld, #fld)
+
+       int bad = s->no_sym +
+                       s->no_insn +
+                       s->no_insn_ops +
+                       s->no_mem_ops +
+                       s->no_reg +
+                       s->no_dbginfo +
+                       s->no_cuinfo +
+                       s->no_var +
+                       s->no_typeinfo +
+                       s->invalid_size +
+                       s->bad_offset;
+       int ok = s->total - bad;
+
+       printf("Annotate data type stats:\n");
+       printf("total %d, ok %d (%.1f%%), bad %d (%.1f%%)\n",
+               s->total, ok, 100.0 * ok / (s->total ?: 1), bad, 100.0 * bad / (s->total ?: 1));
+       printf("-----------------------------------------------------------\n");
+       PRINT_STAT(no_sym);
+       PRINT_STAT(no_insn);
+       PRINT_STAT(no_insn_ops);
+       PRINT_STAT(no_mem_ops);
+       PRINT_STAT(no_reg);
+       PRINT_STAT(no_dbginfo);
+       PRINT_STAT(no_cuinfo);
+       PRINT_STAT(no_var);
+       PRINT_STAT(no_typeinfo);
+       PRINT_STAT(invalid_size);
+       PRINT_STAT(bad_offset);
+       printf("\n");
+
+#undef PRINT_STAT
+}
+
+static void print_annotate_item_stat(struct list_head *head, const char *title)
+{
+       struct annotated_item_stat *istat, *pos, *iter;
+       int total_good, total_bad, total;
+       int sum1, sum2;
+       LIST_HEAD(tmp);
+
+       /* sort the list by count */
+       list_splice_init(head, &tmp);
+       total_good = total_bad = 0;
+
+       list_for_each_entry_safe(istat, pos, &tmp, list) {
+               total_good += istat->good;
+               total_bad += istat->bad;
+               sum1 = istat->good + istat->bad;
+
+               list_for_each_entry(iter, head, list) {
+                       sum2 = iter->good + iter->bad;
+                       if (sum1 > sum2)
+                               break;
+               }
+               list_move_tail(&istat->list, &iter->list);
+       }
+       total = total_good + total_bad;
+
+       printf("Annotate %s stats\n", title);
+       printf("total %d, ok %d (%.1f%%), bad %d (%.1f%%)\n\n", total,
+              total_good, 100.0 * total_good / (total ?: 1),
+              total_bad, 100.0 * total_bad / (total ?: 1));
+       printf("  %-10s: %5s %5s\n", "Name", "Good", "Bad");
+       printf("-----------------------------------------------------------\n");
+       list_for_each_entry(istat, head, list)
+               printf("  %-10s: %5d %5d\n", istat->name, istat->good, istat->bad);
+       printf("\n");
 }
 
 static void hists__find_annotations(struct hists *hists,
@@ -327,6 +478,11 @@ static void hists__find_annotations(struct hists *hists,
        struct rb_node *nd = rb_first_cached(&hists->entries), *next;
        int key = K_RIGHT;
 
+       if (ann->type_stat)
+               print_annotate_data_stat(&ann_data_stat);
+       if (ann->insn_stat)
+               print_annotate_item_stat(&ann_insn_stat, "Instruction");
+
        while (nd) {
                struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
                struct annotation *notes;
@@ -359,11 +515,38 @@ find_next:
                        continue;
                }
 
+               if (ann->data_type) {
+                       /* skip unknown type */
+                       if (he->mem_type->histograms == NULL)
+                               goto find_next;
+
+                       if (ann->target_data_type) {
+                               const char *type_name = he->mem_type->self.type_name;
+
+                               /* skip 'struct ' prefix in the type name */
+                               if (strncmp(ann->target_data_type, "struct ", 7) &&
+                                   !strncmp(type_name, "struct ", 7))
+                                       type_name += 7;
+
+                               /* skip 'union ' prefix in the type name */
+                               if (strncmp(ann->target_data_type, "union ", 6) &&
+                                   !strncmp(type_name, "union ", 6))
+                                       type_name += 6;
+
+                               if (strcmp(ann->target_data_type, type_name))
+                                       goto find_next;
+                       }
+
+                       print_annotated_data_header(he, evsel);
+                       print_annotated_data_type(he->mem_type, &he->mem_type->self, evsel, 0);
+                       printf("\n");
+                       goto find_next;
+               }
+
                if (use_browser == 2) {
                        int ret;
                        int (*annotate)(struct hist_entry *he,
                                        struct evsel *evsel,
-                                       struct annotation_options *options,
                                        struct hist_browser_timer *hbt);
 
                        annotate = dlsym(perf_gtk_handle,
@@ -373,14 +556,14 @@ find_next:
                                return;
                        }
 
-                       ret = annotate(he, evsel, &ann->opts, NULL);
+                       ret = annotate(he, evsel, NULL);
                        if (!ret || !ann->skip_missing)
                                return;
 
                        /* skip missing symbols */
                        nd = rb_next(nd);
                } else if (use_browser == 1) {
-                       key = hist_entry__tui_annotate(he, evsel, NULL, &ann->opts);
+                       key = hist_entry__tui_annotate(he, evsel, NULL);
 
                        switch (key) {
                        case -1:
@@ -422,9 +605,9 @@ static int __cmd_annotate(struct perf_annotate *ann)
                        goto out;
        }
 
-       if (!ann->opts.objdump_path) {
+       if (!annotate_opts.objdump_path) {
                ret = perf_env__lookup_objdump(&session->header.env,
-                                              &ann->opts.objdump_path);
+                                              &annotate_opts.objdump_path);
                if (ret)
                        goto out;
        }
@@ -457,8 +640,20 @@ static int __cmd_annotate(struct perf_annotate *ann)
                        evsel__reset_sample_bit(pos, CALLCHAIN);
                        evsel__output_resort(pos, NULL);
 
-                       if (symbol_conf.event_group && !evsel__is_group_leader(pos))
+                       /*
+                        * An event group needs to display other events too.
+                        * Let's delay printing until other events are processed.
+                        */
+                       if (symbol_conf.event_group) {
+                               if (!evsel__is_group_leader(pos)) {
+                                       struct hists *leader_hists;
+
+                                       leader_hists = evsel__hists(evsel__leader(pos));
+                                       hists__match(leader_hists, hists);
+                                       hists__link(leader_hists, hists);
+                               }
                                continue;
+                       }
 
                        hists__find_annotations(hists, pos, ann);
                }
@@ -469,6 +664,20 @@ static int __cmd_annotate(struct perf_annotate *ann)
                goto out;
        }
 
+       /* Display group events together */
+       evlist__for_each_entry(session->evlist, pos) {
+               struct hists *hists = evsel__hists(pos);
+               u32 nr_samples = hists->stats.nr_samples;
+
+               if (nr_samples == 0)
+                       continue;
+
+               if (!symbol_conf.event_group || !evsel__is_group_leader(pos))
+                       continue;
+
+               hists__find_annotations(hists, pos, ann);
+       }
+
        if (use_browser == 2) {
                void (*show_annotations)(void);
 
@@ -495,6 +704,17 @@ static int parse_percent_limit(const struct option *opt, const char *str,
        return 0;
 }
 
+static int parse_data_type(const struct option *opt, const char *str, int unset)
+{
+       struct perf_annotate *ann = opt->value;
+
+       ann->data_type = !unset;
+       if (str)
+               ann->target_data_type = strdup(str);
+
+       return 0;
+}
+
 static const char * const annotate_usage[] = {
        "perf annotate [<options>]",
        NULL
@@ -558,9 +778,9 @@ int cmd_annotate(int argc, const char **argv)
                   "file", "vmlinux pathname"),
        OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
                    "load module symbols - WARNING: use only with -k and LIVE kernel"),
-       OPT_BOOLEAN('l', "print-line", &annotate.opts.print_lines,
+       OPT_BOOLEAN('l', "print-line", &annotate_opts.print_lines,
                    "print matching source lines (may be slow)"),
-       OPT_BOOLEAN('P', "full-paths", &annotate.opts.full_path,
+       OPT_BOOLEAN('P', "full-paths", &annotate_opts.full_path,
                    "Don't shorten the displayed pathnames"),
        OPT_BOOLEAN(0, "skip-missing", &annotate.skip_missing,
                    "Skip symbols that cannot be annotated"),
@@ -571,15 +791,15 @@ int cmd_annotate(int argc, const char **argv)
        OPT_CALLBACK(0, "symfs", NULL, "directory",
                     "Look for files with symbols relative to this directory",
                     symbol__config_symfs),
-       OPT_BOOLEAN(0, "source", &annotate.opts.annotate_src,
+       OPT_BOOLEAN(0, "source", &annotate_opts.annotate_src,
                    "Interleave source code with assembly code (default)"),
-       OPT_BOOLEAN(0, "asm-raw", &annotate.opts.show_asm_raw,
+       OPT_BOOLEAN(0, "asm-raw", &annotate_opts.show_asm_raw,
                    "Display raw encoding of assembly instructions (default)"),
        OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
                   "Specify disassembler style (e.g. -M intel for intel syntax)"),
-       OPT_STRING(0, "prefix", &annotate.opts.prefix, "prefix",
+       OPT_STRING(0, "prefix", &annotate_opts.prefix, "prefix",
                    "Add prefix to source file path names in programs (with --prefix-strip)"),
-       OPT_STRING(0, "prefix-strip", &annotate.opts.prefix_strip, "N",
+       OPT_STRING(0, "prefix-strip", &annotate_opts.prefix_strip, "N",
                    "Strip first N entries of source file path name in programs (with --prefix)"),
        OPT_STRING(0, "objdump", &objdump_path, "path",
                   "objdump binary to use for disassembly and annotations"),
@@ -598,7 +818,7 @@ int cmd_annotate(int argc, const char **argv)
        OPT_CALLBACK_DEFAULT(0, "stdio-color", NULL, "mode",
                             "'always' (default), 'never' or 'auto' only applicable to --stdio mode",
                             stdio__config_color, "always"),
-       OPT_CALLBACK(0, "percent-type", &annotate.opts, "local-period",
+       OPT_CALLBACK(0, "percent-type", &annotate_opts, "local-period",
                     "Set percent type local/global-period/hits",
                     annotate_parse_percent_type),
        OPT_CALLBACK(0, "percent-limit", &annotate, "percent",
@@ -606,7 +826,13 @@ int cmd_annotate(int argc, const char **argv)
        OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts",
                            "Instruction Tracing options\n" ITRACE_HELP,
                            itrace_parse_synth_opts),
-
+       OPT_CALLBACK_OPTARG(0, "data-type", &annotate, NULL, "name",
+                           "Show data type annotate for the memory accesses",
+                           parse_data_type),
+       OPT_BOOLEAN(0, "type-stat", &annotate.type_stat,
+                   "Show stats for the data type annotation"),
+       OPT_BOOLEAN(0, "insn-stat", &annotate.insn_stat,
+                   "Show instruction stats for the data type annotation"),
        OPT_END()
        };
        int ret;
@@ -614,13 +840,13 @@ int cmd_annotate(int argc, const char **argv)
        set_option_flag(options, 0, "show-total-period", PARSE_OPT_EXCLUSIVE);
        set_option_flag(options, 0, "show-nr-samples", PARSE_OPT_EXCLUSIVE);
 
-       annotation_options__init(&annotate.opts);
+       annotation_options__init();
 
        ret = hists__init();
        if (ret < 0)
                return ret;
 
-       annotation_config__init(&annotate.opts);
+       annotation_config__init();
 
        argc = parse_options(argc, argv, options, annotate_usage, 0);
        if (argc) {
@@ -635,13 +861,13 @@ int cmd_annotate(int argc, const char **argv)
        }
 
        if (disassembler_style) {
-               annotate.opts.disassembler_style = strdup(disassembler_style);
-               if (!annotate.opts.disassembler_style)
+               annotate_opts.disassembler_style = strdup(disassembler_style);
+               if (!annotate_opts.disassembler_style)
                        return -ENOMEM;
        }
        if (objdump_path) {
-               annotate.opts.objdump_path = strdup(objdump_path);
-               if (!annotate.opts.objdump_path)
+               annotate_opts.objdump_path = strdup(objdump_path);
+               if (!annotate_opts.objdump_path)
                        return -ENOMEM;
        }
        if (addr2line_path) {
@@ -650,7 +876,7 @@ int cmd_annotate(int argc, const char **argv)
                        return -ENOMEM;
        }
 
-       if (annotate_check_args(&annotate.opts) < 0)
+       if (annotate_check_args() < 0)
                return -EINVAL;
 
 #ifdef HAVE_GTK2_SUPPORT
@@ -660,6 +886,13 @@ int cmd_annotate(int argc, const char **argv)
        }
 #endif
 
+#ifndef HAVE_DWARF_GETLOCATIONS_SUPPORT
+       if (annotate.data_type) {
+               pr_err("Error: Data type profiling is disabled due to missing DWARF support\n");
+               return -ENOTSUP;
+       }
+#endif
+
        ret = symbol__validate_sym_arguments();
        if (ret)
                return ret;
@@ -702,6 +935,14 @@ int cmd_annotate(int argc, const char **argv)
                use_browser = 2;
 #endif
 
+       /* FIXME: only support stdio for now */
+       if (annotate.data_type) {
+               use_browser = 0;
+               annotate_opts.annotate_src = false;
+               symbol_conf.annotate_data_member = true;
+               symbol_conf.annotate_data_sample = true;
+       }
+
        setup_browser(true);
 
        /*
@@ -709,7 +950,10 @@ int cmd_annotate(int argc, const char **argv)
         * symbol, we do not care about the processes in annotate,
         * set sort order to avoid repeated output.
         */
-       sort_order = "dso,symbol";
+       if (annotate.data_type)
+               sort_order = "dso,type";
+       else
+               sort_order = "dso,symbol";
 
        /*
         * Set SORT_MODE__BRANCH so that annotate display IPC/Cycle
@@ -731,7 +975,7 @@ out_delete:
 #ifndef NDEBUG
        perf_session__delete(annotate.session);
 #endif
-       annotation_options__exit(&annotate.opts);
+       annotation_options__exit();
 
        return ret;
 }
index a4cf9de7a7b5a9d6ae416f1a65f6a5c47c6e2e4f..f78eea9e21539352e96c68f37c4b0001c84054e4 100644 (file)
@@ -2320,7 +2320,7 @@ static int setup_nodes(struct perf_session *session)
                nodes[node] = set;
 
                /* empty node, skip */
-               if (perf_cpu_map__empty(map))
+               if (perf_cpu_map__has_any_cpu_or_is_empty(map))
                        continue;
 
                perf_cpu_map__for_each_cpu(cpu, idx, map) {
index ac2e6c75f9120192ad5220eaf727896ef8b11aee..eb30c8eca48878482d9e9682165330a161a6f3f8 100644 (file)
@@ -333,7 +333,7 @@ static int set_tracing_func_irqinfo(struct perf_ftrace *ftrace)
 
 static int reset_tracing_cpu(void)
 {
-       struct perf_cpu_map *cpumap = perf_cpu_map__new(NULL);
+       struct perf_cpu_map *cpumap = perf_cpu_map__new_online_cpus();
        int ret;
 
        ret = set_tracing_cpumask(cpumap);
index c8cf2fdd9cff9637ebd97660321e858fa3aeb3e0..eb3ef5c24b66258c568dea02252975f1addb3924 100644 (file)
@@ -2265,6 +2265,12 @@ int cmd_inject(int argc, const char **argv)
                "perf inject [<options>]",
                NULL
        };
+
+       if (!inject.itrace_synth_opts.set) {
+               /* Disable eager loading of kernel symbols that adds overhead to perf inject. */
+               symbol_conf.lazy_load_kernel_maps = true;
+       }
+
 #ifndef HAVE_JITDUMP
        set_option_nobuild(options, 'j', "jit", "NO_LIBELF=1", true);
 #endif
index 61c2c96cc0701b886d7c1daecd92cfa25581d1f7..e27a1b1288c29ffe96ce871bc5bab76c8a67c8b7 100644 (file)
@@ -30,6 +30,8 @@
  * functions.
  */
 struct print_state {
+       /** @fp: File to write output to. */
+       FILE *fp;
        /**
         * @pmu_glob: Optionally restrict PMU and metric matching to PMU or
         * debugfs subsystem name.
@@ -66,13 +68,15 @@ static void default_print_start(void *ps)
 {
        struct print_state *print_state = ps;
 
-       if (!print_state->name_only && pager_in_use())
-               printf("\nList of pre-defined events (to be used in -e or -M):\n\n");
+       if (!print_state->name_only && pager_in_use()) {
+               fprintf(print_state->fp,
+                       "\nList of pre-defined events (to be used in -e or -M):\n\n");
+       }
 }
 
 static void default_print_end(void *print_state __maybe_unused) {}
 
-static void wordwrap(const char *s, int start, int max, int corr)
+static void wordwrap(FILE *fp, const char *s, int start, int max, int corr)
 {
        int column = start;
        int n;
@@ -82,10 +86,10 @@ static void wordwrap(const char *s, int start, int max, int corr)
                int wlen = strcspn(s, " \t\n");
 
                if ((column + wlen >= max && column > start) || saw_newline) {
-                       printf("\n%*s", start, "");
+                       fprintf(fp, "\n%*s", start, "");
                        column = start + corr;
                }
-               n = printf("%s%.*s", column > start ? " " : "", wlen, s);
+               n = fprintf(fp, "%s%.*s", column > start ? " " : "", wlen, s);
                if (n <= 0)
                        break;
                saw_newline = s[wlen] == '\n';
@@ -104,6 +108,7 @@ static void default_print_event(void *ps, const char *pmu_name, const char *topi
 {
        struct print_state *print_state = ps;
        int pos;
+       FILE *fp = print_state->fp;
 
        if (deprecated && !print_state->deprecated)
                return;
@@ -119,30 +124,30 @@ static void default_print_event(void *ps, const char *pmu_name, const char *topi
 
        if (print_state->name_only) {
                if (event_alias && strlen(event_alias))
-                       printf("%s ", event_alias);
+                       fprintf(fp, "%s ", event_alias);
                else
-                       printf("%s ", event_name);
+                       fprintf(fp, "%s ", event_name);
                return;
        }
 
        if (strcmp(print_state->last_topic, topic ?: "")) {
                if (topic)
-                       printf("\n%s:\n", topic);
+                       fprintf(fp, "\n%s:\n", topic);
                zfree(&print_state->last_topic);
                print_state->last_topic = strdup(topic ?: "");
        }
 
        if (event_alias && strlen(event_alias))
-               pos = printf("  %s OR %s", event_name, event_alias);
+               pos = fprintf(fp, "  %s OR %s", event_name, event_alias);
        else
-               pos = printf("  %s", event_name);
+               pos = fprintf(fp, "  %s", event_name);
 
        if (!topic && event_type_desc) {
                for (; pos < 53; pos++)
-                       putchar(' ');
-               printf("[%s]\n", event_type_desc);
+                       fputc(' ', fp);
+               fprintf(fp, "[%s]\n", event_type_desc);
        } else
-               putchar('\n');
+               fputc('\n', fp);
 
        if (desc && print_state->desc) {
                char *desc_with_unit = NULL;
@@ -155,22 +160,22 @@ static void default_print_event(void *ps, const char *pmu_name, const char *topi
                                              ? "%s. Unit: %s" : "%s Unit: %s",
                                            desc, pmu_name);
                }
-               printf("%*s", 8, "[");
-               wordwrap(desc_len > 0 ? desc_with_unit : desc, 8, pager_get_columns(), 0);
-               printf("]\n");
+               fprintf(fp, "%*s", 8, "[");
+               wordwrap(fp, desc_len > 0 ? desc_with_unit : desc, 8, pager_get_columns(), 0);
+               fprintf(fp, "]\n");
                free(desc_with_unit);
        }
        long_desc = long_desc ?: desc;
        if (long_desc && print_state->long_desc) {
-               printf("%*s", 8, "[");
-               wordwrap(long_desc, 8, pager_get_columns(), 0);
-               printf("]\n");
+               fprintf(fp, "%*s", 8, "[");
+               wordwrap(fp, long_desc, 8, pager_get_columns(), 0);
+               fprintf(fp, "]\n");
        }
 
        if (print_state->detailed && encoding_desc) {
-               printf("%*s", 8, "");
-               wordwrap(encoding_desc, 8, pager_get_columns(), 0);
-               putchar('\n');
+               fprintf(fp, "%*s", 8, "");
+               wordwrap(fp, encoding_desc, 8, pager_get_columns(), 0);
+               fputc('\n', fp);
        }
 }
 
@@ -184,6 +189,7 @@ static void default_print_metric(void *ps,
                                const char *unit __maybe_unused)
 {
        struct print_state *print_state = ps;
+       FILE *fp = print_state->fp;
 
        if (print_state->event_glob &&
            (!print_state->metrics || !name || !strglobmatch(name, print_state->event_glob)) &&
@@ -192,27 +198,27 @@ static void default_print_metric(void *ps,
 
        if (!print_state->name_only && !print_state->last_metricgroups) {
                if (print_state->metricgroups) {
-                       printf("\nMetric Groups:\n");
+                       fprintf(fp, "\nMetric Groups:\n");
                        if (!print_state->metrics)
-                               putchar('\n');
+                               fputc('\n', fp);
                } else {
-                       printf("\nMetrics:\n\n");
+                       fprintf(fp, "\nMetrics:\n\n");
                }
        }
        if (!print_state->last_metricgroups ||
            strcmp(print_state->last_metricgroups, group ?: "")) {
                if (group && print_state->metricgroups) {
                        if (print_state->name_only)
-                               printf("%s ", group);
+                               fprintf(fp, "%s ", group);
                        else if (print_state->metrics) {
                                const char *gdesc = describe_metricgroup(group);
 
                                if (gdesc)
-                                       printf("\n%s: [%s]\n", group, gdesc);
+                                       fprintf(fp, "\n%s: [%s]\n", group, gdesc);
                                else
-                                       printf("\n%s:\n", group);
+                                       fprintf(fp, "\n%s:\n", group);
                        } else
-                               printf("%s\n", group);
+                               fprintf(fp, "%s\n", group);
                }
                zfree(&print_state->last_metricgroups);
                print_state->last_metricgroups = strdup(group ?: "");
@@ -223,53 +229,59 @@ static void default_print_metric(void *ps,
        if (print_state->name_only) {
                if (print_state->metrics &&
                    !strlist__has_entry(print_state->visited_metrics, name)) {
-                       printf("%s ", name);
+                       fprintf(fp, "%s ", name);
                        strlist__add(print_state->visited_metrics, name);
                }
                return;
        }
-       printf("  %s\n", name);
+       fprintf(fp, "  %s\n", name);
 
        if (desc && print_state->desc) {
-               printf("%*s", 8, "[");
-               wordwrap(desc, 8, pager_get_columns(), 0);
-               printf("]\n");
+               fprintf(fp, "%*s", 8, "[");
+               wordwrap(fp, desc, 8, pager_get_columns(), 0);
+               fprintf(fp, "]\n");
        }
        if (long_desc && print_state->long_desc) {
-               printf("%*s", 8, "[");
-               wordwrap(long_desc, 8, pager_get_columns(), 0);
-               printf("]\n");
+               fprintf(fp, "%*s", 8, "[");
+               wordwrap(fp, long_desc, 8, pager_get_columns(), 0);
+               fprintf(fp, "]\n");
        }
        if (expr && print_state->detailed) {
-               printf("%*s", 8, "[");
-               wordwrap(expr, 8, pager_get_columns(), 0);
-               printf("]\n");
+               fprintf(fp, "%*s", 8, "[");
+               wordwrap(fp, expr, 8, pager_get_columns(), 0);
+               fprintf(fp, "]\n");
        }
        if (threshold && print_state->detailed) {
-               printf("%*s", 8, "[");
-               wordwrap(threshold, 8, pager_get_columns(), 0);
-               printf("]\n");
+               fprintf(fp, "%*s", 8, "[");
+               wordwrap(fp, threshold, 8, pager_get_columns(), 0);
+               fprintf(fp, "]\n");
        }
 }
 
 struct json_print_state {
+       /** @fp: File to write output to. */
+       FILE *fp;
        /** Should a separator be printed prior to the next item? */
        bool need_sep;
 };
 
-static void json_print_start(void *print_state __maybe_unused)
+static void json_print_start(void *ps)
 {
-       printf("[\n");
+       struct json_print_state *print_state = ps;
+       FILE *fp = print_state->fp;
+
+       fprintf(fp, "[\n");
 }
 
 static void json_print_end(void *ps)
 {
        struct json_print_state *print_state = ps;
+       FILE *fp = print_state->fp;
 
-       printf("%s]\n", print_state->need_sep ? "\n" : "");
+       fprintf(fp, "%s]\n", print_state->need_sep ? "\n" : "");
 }
 
-static void fix_escape_printf(struct strbuf *buf, const char *fmt, ...)
+static void fix_escape_fprintf(FILE *fp, struct strbuf *buf, const char *fmt, ...)
 {
        va_list args;
 
@@ -318,7 +330,7 @@ static void fix_escape_printf(struct strbuf *buf, const char *fmt, ...)
                }
        }
        va_end(args);
-       fputs(buf->buf, stdout);
+       fputs(buf->buf, fp);
 }
 
 static void json_print_event(void *ps, const char *pmu_name, const char *topic,
@@ -330,60 +342,71 @@ static void json_print_event(void *ps, const char *pmu_name, const char *topic,
 {
        struct json_print_state *print_state = ps;
        bool need_sep = false;
+       FILE *fp = print_state->fp;
        struct strbuf buf;
 
        strbuf_init(&buf, 0);
-       printf("%s{\n", print_state->need_sep ? ",\n" : "");
+       fprintf(fp, "%s{\n", print_state->need_sep ? ",\n" : "");
        print_state->need_sep = true;
        if (pmu_name) {
-               fix_escape_printf(&buf, "\t\"Unit\": \"%S\"", pmu_name);
+               fix_escape_fprintf(fp, &buf, "\t\"Unit\": \"%S\"", pmu_name);
                need_sep = true;
        }
        if (topic) {
-               fix_escape_printf(&buf, "%s\t\"Topic\": \"%S\"", need_sep ? ",\n" : "", topic);
+               fix_escape_fprintf(fp, &buf, "%s\t\"Topic\": \"%S\"",
+                                  need_sep ? ",\n" : "",
+                                  topic);
                need_sep = true;
        }
        if (event_name) {
-               fix_escape_printf(&buf, "%s\t\"EventName\": \"%S\"", need_sep ? ",\n" : "",
-                                 event_name);
+               fix_escape_fprintf(fp, &buf, "%s\t\"EventName\": \"%S\"",
+                                  need_sep ? ",\n" : "",
+                                  event_name);
                need_sep = true;
        }
        if (event_alias && strlen(event_alias)) {
-               fix_escape_printf(&buf, "%s\t\"EventAlias\": \"%S\"", need_sep ? ",\n" : "",
-                                 event_alias);
+               fix_escape_fprintf(fp, &buf, "%s\t\"EventAlias\": \"%S\"",
+                                  need_sep ? ",\n" : "",
+                                  event_alias);
                need_sep = true;
        }
        if (scale_unit && strlen(scale_unit)) {
-               fix_escape_printf(&buf, "%s\t\"ScaleUnit\": \"%S\"", need_sep ? ",\n" : "",
-                                 scale_unit);
+               fix_escape_fprintf(fp, &buf, "%s\t\"ScaleUnit\": \"%S\"",
+                                  need_sep ? ",\n" : "",
+                                  scale_unit);
                need_sep = true;
        }
        if (event_type_desc) {
-               fix_escape_printf(&buf, "%s\t\"EventType\": \"%S\"", need_sep ? ",\n" : "",
-                                 event_type_desc);
+               fix_escape_fprintf(fp, &buf, "%s\t\"EventType\": \"%S\"",
+                                  need_sep ? ",\n" : "",
+                                  event_type_desc);
                need_sep = true;
        }
        if (deprecated) {
-               fix_escape_printf(&buf, "%s\t\"Deprecated\": \"%S\"", need_sep ? ",\n" : "",
-                                 deprecated ? "1" : "0");
+               fix_escape_fprintf(fp, &buf, "%s\t\"Deprecated\": \"%S\"",
+                                  need_sep ? ",\n" : "",
+                                  deprecated ? "1" : "0");
                need_sep = true;
        }
        if (desc) {
-               fix_escape_printf(&buf, "%s\t\"BriefDescription\": \"%S\"", need_sep ? ",\n" : "",
-                                 desc);
+               fix_escape_fprintf(fp, &buf, "%s\t\"BriefDescription\": \"%S\"",
+                                  need_sep ? ",\n" : "",
+                                  desc);
                need_sep = true;
        }
        if (long_desc) {
-               fix_escape_printf(&buf, "%s\t\"PublicDescription\": \"%S\"", need_sep ? ",\n" : "",
-                                 long_desc);
+               fix_escape_fprintf(fp, &buf, "%s\t\"PublicDescription\": \"%S\"",
+                                  need_sep ? ",\n" : "",
+                                  long_desc);
                need_sep = true;
        }
        if (encoding_desc) {
-               fix_escape_printf(&buf, "%s\t\"Encoding\": \"%S\"", need_sep ? ",\n" : "",
-                                 encoding_desc);
+               fix_escape_fprintf(fp, &buf, "%s\t\"Encoding\": \"%S\"",
+                                  need_sep ? ",\n" : "",
+                                  encoding_desc);
                need_sep = true;
        }
-       printf("%s}", need_sep ? "\n" : "");
+       fprintf(fp, "%s}", need_sep ? "\n" : "");
        strbuf_release(&buf);
 }
 
@@ -394,43 +417,53 @@ static void json_print_metric(void *ps __maybe_unused, const char *group,
 {
        struct json_print_state *print_state = ps;
        bool need_sep = false;
+       FILE *fp = print_state->fp;
        struct strbuf buf;
 
        strbuf_init(&buf, 0);
-       printf("%s{\n", print_state->need_sep ? ",\n" : "");
+       fprintf(fp, "%s{\n", print_state->need_sep ? ",\n" : "");
        print_state->need_sep = true;
        if (group) {
-               fix_escape_printf(&buf, "\t\"MetricGroup\": \"%S\"", group);
+               fix_escape_fprintf(fp, &buf, "\t\"MetricGroup\": \"%S\"", group);
                need_sep = true;
        }
        if (name) {
-               fix_escape_printf(&buf, "%s\t\"MetricName\": \"%S\"", need_sep ? ",\n" : "", name);
+               fix_escape_fprintf(fp, &buf, "%s\t\"MetricName\": \"%S\"",
+                                  need_sep ? ",\n" : "",
+                                  name);
                need_sep = true;
        }
        if (expr) {
-               fix_escape_printf(&buf, "%s\t\"MetricExpr\": \"%S\"", need_sep ? ",\n" : "", expr);
+               fix_escape_fprintf(fp, &buf, "%s\t\"MetricExpr\": \"%S\"",
+                                  need_sep ? ",\n" : "",
+                                  expr);
                need_sep = true;
        }
        if (threshold) {
-               fix_escape_printf(&buf, "%s\t\"MetricThreshold\": \"%S\"", need_sep ? ",\n" : "",
-                                 threshold);
+               fix_escape_fprintf(fp, &buf, "%s\t\"MetricThreshold\": \"%S\"",
+                                  need_sep ? ",\n" : "",
+                                  threshold);
                need_sep = true;
        }
        if (unit) {
-               fix_escape_printf(&buf, "%s\t\"ScaleUnit\": \"%S\"", need_sep ? ",\n" : "", unit);
+               fix_escape_fprintf(fp, &buf, "%s\t\"ScaleUnit\": \"%S\"",
+                                  need_sep ? ",\n" : "",
+                                  unit);
                need_sep = true;
        }
        if (desc) {
-               fix_escape_printf(&buf, "%s\t\"BriefDescription\": \"%S\"", need_sep ? ",\n" : "",
-                                 desc);
+               fix_escape_fprintf(fp, &buf, "%s\t\"BriefDescription\": \"%S\"",
+                                  need_sep ? ",\n" : "",
+                                  desc);
                need_sep = true;
        }
        if (long_desc) {
-               fix_escape_printf(&buf, "%s\t\"PublicDescription\": \"%S\"", need_sep ? ",\n" : "",
-                                 long_desc);
+               fix_escape_fprintf(fp, &buf, "%s\t\"PublicDescription\": \"%S\"",
+                                  need_sep ? ",\n" : "",
+                                  long_desc);
                need_sep = true;
        }
-       printf("%s}", need_sep ? "\n" : "");
+       fprintf(fp, "%s}", need_sep ? "\n" : "");
        strbuf_release(&buf);
 }
 
@@ -449,8 +482,12 @@ static bool default_skip_duplicate_pmus(void *ps)
 int cmd_list(int argc, const char **argv)
 {
        int i, ret = 0;
-       struct print_state default_ps = {};
-       struct print_state json_ps = {};
+       struct print_state default_ps = {
+               .fp = stdout,
+       };
+       struct print_state json_ps = {
+               .fp = stdout,
+       };
        void *ps = &default_ps;
        struct print_callbacks print_cb = {
                .print_start = default_print_start,
@@ -461,6 +498,7 @@ int cmd_list(int argc, const char **argv)
        };
        const char *cputype = NULL;
        const char *unit_name = NULL;
+       const char *output_path = NULL;
        bool json = false;
        struct option list_options[] = {
                OPT_BOOLEAN(0, "raw-dump", &default_ps.name_only, "Dump raw events"),
@@ -471,6 +509,7 @@ int cmd_list(int argc, const char **argv)
                            "Print longer event descriptions."),
                OPT_BOOLEAN(0, "details", &default_ps.detailed,
                            "Print information on the perf event names and expressions used internally by events."),
+               OPT_STRING('o', "output", &output_path, "file", "output file name"),
                OPT_BOOLEAN(0, "deprecated", &default_ps.deprecated,
                            "Print deprecated events."),
                OPT_STRING(0, "cputype", &cputype, "cpu type",
@@ -497,6 +536,11 @@ int cmd_list(int argc, const char **argv)
        argc = parse_options(argc, argv, list_options, list_usage,
                             PARSE_OPT_STOP_AT_NON_OPTION);
 
+       if (output_path) {
+               default_ps.fp = fopen(output_path, "w");
+               json_ps.fp = default_ps.fp;
+       }
+
        setup_pager();
 
        if (!default_ps.name_only)
@@ -618,5 +662,8 @@ out:
        free(default_ps.last_topic);
        free(default_ps.last_metricgroups);
        strlist__delete(default_ps.visited_metrics);
+       if (output_path)
+               fclose(default_ps.fp);
+
        return ret;
 }
index a3ff2f4edbaa5064b040b256eaee1361a34122e5..230461280e4525a612a6842c3a99c7b1a1225929 100644 (file)
@@ -2285,8 +2285,10 @@ setup_args:
                else
                        ev_name = strdup(contention_tracepoints[j].name);
 
-               if (!ev_name)
+               if (!ev_name) {
+                       free(rec_argv);
                        return -ENOMEM;
+               }
 
                rec_argv[i++] = "-e";
                rec_argv[i++] = ev_name;
index dcf288a4fb9a9ad9281b2892272b4918c8f760e5..86c91012517267c5355d7fedebdeed42e9cfb675 100644 (file)
@@ -270,7 +270,7 @@ static int record__write(struct record *rec, struct mmap *map __maybe_unused,
 
 static int record__aio_enabled(struct record *rec);
 static int record__comp_enabled(struct record *rec);
-static size_t zstd_compress(struct perf_session *session, struct mmap *map,
+static ssize_t zstd_compress(struct perf_session *session, struct mmap *map,
                            void *dst, size_t dst_size, void *src, size_t src_size);
 
 #ifdef HAVE_AIO_SUPPORT
@@ -405,9 +405,13 @@ static int record__aio_pushfn(struct mmap *map, void *to, void *buf, size_t size
         */
 
        if (record__comp_enabled(aio->rec)) {
-               size = zstd_compress(aio->rec->session, NULL, aio->data + aio->size,
-                                    mmap__mmap_len(map) - aio->size,
-                                    buf, size);
+               ssize_t compressed = zstd_compress(aio->rec->session, NULL, aio->data + aio->size,
+                                                  mmap__mmap_len(map) - aio->size,
+                                                  buf, size);
+               if (compressed < 0)
+                       return (int)compressed;
+
+               size = compressed;
        } else {
                memcpy(aio->data + aio->size, buf, size);
        }
@@ -633,7 +637,13 @@ static int record__pushfn(struct mmap *map, void *to, void *bf, size_t size)
        struct record *rec = to;
 
        if (record__comp_enabled(rec)) {
-               size = zstd_compress(rec->session, map, map->data, mmap__mmap_len(map), bf, size);
+               ssize_t compressed = zstd_compress(rec->session, map, map->data,
+                                                  mmap__mmap_len(map), bf, size);
+
+               if (compressed < 0)
+                       return (int)compressed;
+
+               size = compressed;
                bf   = map->data;
        }
 
@@ -1350,7 +1360,7 @@ static int record__open(struct record *rec)
        evlist__for_each_entry(evlist, pos) {
 try_again:
                if (evsel__open(pos, pos->core.cpus, pos->core.threads) < 0) {
-                       if (evsel__fallback(pos, errno, msg, sizeof(msg))) {
+                       if (evsel__fallback(pos, &opts->target, errno, msg, sizeof(msg))) {
                                if (verbose > 0)
                                        ui__warning("%s\n", msg);
                                goto try_again;
@@ -1527,10 +1537,10 @@ static size_t process_comp_header(void *record, size_t increment)
        return size;
 }
 
-static size_t zstd_compress(struct perf_session *session, struct mmap *map,
+static ssize_t zstd_compress(struct perf_session *session, struct mmap *map,
                            void *dst, size_t dst_size, void *src, size_t src_size)
 {
-       size_t compressed;
+       ssize_t compressed;
        size_t max_record_size = PERF_SAMPLE_MAX_SIZE - sizeof(struct perf_record_compressed) - 1;
        struct zstd_data *zstd_data = &session->zstd_data;
 
@@ -1539,6 +1549,8 @@ static size_t zstd_compress(struct perf_session *session, struct mmap *map,
 
        compressed = zstd_compress_stream_to_records(zstd_data, dst, dst_size, src, src_size,
                                                     max_record_size, process_comp_header);
+       if (compressed < 0)
+               return compressed;
 
        if (map && map->file) {
                thread->bytes_transferred += src_size;
@@ -1912,21 +1924,13 @@ static void __record__save_lost_samples(struct record *rec, struct evsel *evsel,
 static void record__read_lost_samples(struct record *rec)
 {
        struct perf_session *session = rec->session;
-       struct perf_record_lost_samples *lost;
+       struct perf_record_lost_samples *lost = NULL;
        struct evsel *evsel;
 
        /* there was an error during record__open */
        if (session->evlist == NULL)
                return;
 
-       lost = zalloc(PERF_SAMPLE_MAX_SIZE);
-       if (lost == NULL) {
-               pr_debug("Memory allocation failed\n");
-               return;
-       }
-
-       lost->header.type = PERF_RECORD_LOST_SAMPLES;
-
        evlist__for_each_entry(session->evlist, evsel) {
                struct xyarray *xy = evsel->core.sample_id;
                u64 lost_count;
@@ -1949,6 +1953,15 @@ static void record__read_lost_samples(struct record *rec)
                                }
 
                                if (count.lost) {
+                                       if (!lost) {
+                                               lost = zalloc(sizeof(*lost) +
+                                                             session->machines.host.id_hdr_size);
+                                               if (!lost) {
+                                                       pr_debug("Memory allocation failed\n");
+                                                       return;
+                                               }
+                                               lost->header.type = PERF_RECORD_LOST_SAMPLES;
+                                       }
                                        __record__save_lost_samples(rec, evsel, lost,
                                                                    x, y, count.lost, 0);
                                }
@@ -1956,9 +1969,19 @@ static void record__read_lost_samples(struct record *rec)
                }
 
                lost_count = perf_bpf_filter__lost_count(evsel);
-               if (lost_count)
+               if (lost_count) {
+                       if (!lost) {
+                               lost = zalloc(sizeof(*lost) +
+                                             session->machines.host.id_hdr_size);
+                               if (!lost) {
+                                       pr_debug("Memory allocation failed\n");
+                                       return;
+                               }
+                               lost->header.type = PERF_RECORD_LOST_SAMPLES;
+                       }
                        __record__save_lost_samples(rec, evsel, lost, 0, 0, lost_count,
                                                    PERF_RECORD_MISC_LOST_SAMPLES_BPF);
+               }
        }
 out:
        free(lost);
@@ -2216,32 +2239,6 @@ static void hit_auxtrace_snapshot_trigger(struct record *rec)
        }
 }
 
-static void record__uniquify_name(struct record *rec)
-{
-       struct evsel *pos;
-       struct evlist *evlist = rec->evlist;
-       char *new_name;
-       int ret;
-
-       if (perf_pmus__num_core_pmus() == 1)
-               return;
-
-       evlist__for_each_entry(evlist, pos) {
-               if (!evsel__is_hybrid(pos))
-                       continue;
-
-               if (strchr(pos->name, '/'))
-                       continue;
-
-               ret = asprintf(&new_name, "%s/%s/",
-                              pos->pmu_name, pos->name);
-               if (ret) {
-                       free(pos->name);
-                       pos->name = new_name;
-               }
-       }
-}
-
 static int record__terminate_thread(struct record_thread *thread_data)
 {
        int err;
@@ -2475,7 +2472,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
        if (data->is_pipe && rec->evlist->core.nr_entries == 1)
                rec->opts.sample_id = true;
 
-       record__uniquify_name(rec);
+       evlist__uniquify_name(rec->evlist);
 
        /* Debug message used by test scripts */
        pr_debug3("perf record opening and mmapping events\n");
@@ -3580,9 +3577,7 @@ static int record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_cp
        if (cpu_map__is_dummy(cpus))
                return 0;
 
-       perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
-               if (cpu.cpu == -1)
-                       continue;
+       perf_cpu_map__for_each_cpu_skip_any(cpu, idx, cpus) {
                /* Return ENODEV is input cpu is greater than max cpu */
                if ((unsigned long)cpu.cpu > mask->nbits)
                        return -ENODEV;
@@ -3989,6 +3984,8 @@ int cmd_record(int argc, const char **argv)
 # undef set_nobuild
 #endif
 
+       /* Disable eager loading of kernel symbols that adds overhead to perf record. */
+       symbol_conf.lazy_load_kernel_maps = true;
        rec->opts.affinity = PERF_AFFINITY_SYS;
 
        rec->evlist = evlist__new();
@@ -4083,8 +4080,8 @@ int cmd_record(int argc, const char **argv)
        }
 
        if (rec->switch_output.num_files) {
-               rec->switch_output.filenames = calloc(sizeof(char *),
-                                                     rec->switch_output.num_files);
+               rec->switch_output.filenames = calloc(rec->switch_output.num_files,
+                                                     sizeof(char *));
                if (!rec->switch_output.filenames) {
                        err = -EINVAL;
                        goto out_opts;
index 9cb1da2dc0c03bbe7d0b0b63c64cc7c6133e0823..f2ed2b7e80a32649095f123b63b3ed8ecaf42cc9 100644 (file)
@@ -96,9 +96,9 @@ struct report {
        bool                    stitch_lbr;
        bool                    disable_order;
        bool                    skip_empty;
+       bool                    data_type;
        int                     max_stack;
        struct perf_read_values show_threads_values;
-       struct annotation_options annotation_opts;
        const char              *pretty_printing_style;
        const char              *cpu_list;
        const char              *symbol_filter_str;
@@ -171,7 +171,7 @@ static int hist_iter__report_callback(struct hist_entry_iter *iter,
        struct mem_info *mi;
        struct branch_info *bi;
 
-       if (!ui__has_annotation() && !rep->symbol_ipc)
+       if (!ui__has_annotation() && !rep->symbol_ipc && !rep->data_type)
                return 0;
 
        if (sort__mode == SORT_MODE__BRANCH) {
@@ -541,8 +541,7 @@ static int evlist__tui_block_hists_browse(struct evlist *evlist, struct report *
        evlist__for_each_entry(evlist, pos) {
                ret = report__browse_block_hists(&rep->block_reports[i++].hist,
                                                 rep->min_percent, pos,
-                                                &rep->session->header.env,
-                                                &rep->annotation_opts);
+                                                &rep->session->header.env);
                if (ret != 0)
                        return ret;
        }
@@ -574,8 +573,7 @@ static int evlist__tty_browse_hists(struct evlist *evlist, struct report *rep, c
 
                if (rep->total_cycles_mode) {
                        report__browse_block_hists(&rep->block_reports[i++].hist,
-                                                  rep->min_percent, pos,
-                                                  NULL, NULL);
+                                                  rep->min_percent, pos, NULL);
                        continue;
                }
 
@@ -670,7 +668,7 @@ static int report__browse_hists(struct report *rep)
                }
 
                ret = evlist__tui_browse_hists(evlist, help, NULL, rep->min_percent,
-                                              &session->header.env, true, &rep->annotation_opts);
+                                              &session->header.env, true);
                /*
                 * Usually "ret" is the last pressed key, and we only
                 * care if the key notifies us to switch data file.
@@ -745,7 +743,7 @@ static int hists__resort_cb(struct hist_entry *he, void *arg)
        if (rep->symbol_ipc && sym && !sym->annotate2) {
                struct evsel *evsel = hists_to_evsel(he->hists);
 
-               symbol__annotate2(&he->ms, evsel, &rep->annotation_opts, NULL);
+               symbol__annotate2(&he->ms, evsel, NULL);
        }
 
        return 0;
@@ -859,27 +857,47 @@ static struct task *tasks_list(struct task *task, struct machine *machine)
        return tasks_list(parent_task, machine);
 }
 
-static size_t maps__fprintf_task(struct maps *maps, int indent, FILE *fp)
+struct maps__fprintf_task_args {
+       int indent;
+       FILE *fp;
+       size_t printed;
+};
+
+static int maps__fprintf_task_cb(struct map *map, void *data)
 {
-       size_t printed = 0;
-       struct map_rb_node *rb_node;
+       struct maps__fprintf_task_args *args = data;
+       const struct dso *dso = map__dso(map);
+       u32 prot = map__prot(map);
+       int ret;
 
-       maps__for_each_entry(maps, rb_node) {
-               struct map *map = rb_node->map;
-               const struct dso *dso = map__dso(map);
-               u32 prot = map__prot(map);
+       ret = fprintf(args->fp,
+               "%*s  %" PRIx64 "-%" PRIx64 " %c%c%c%c %08" PRIx64 " %" PRIu64 " %s\n",
+               args->indent, "", map__start(map), map__end(map),
+               prot & PROT_READ ? 'r' : '-',
+               prot & PROT_WRITE ? 'w' : '-',
+               prot & PROT_EXEC ? 'x' : '-',
+               map__flags(map) ? 's' : 'p',
+               map__pgoff(map),
+               dso->id.ino, dso->name);
 
-               printed += fprintf(fp, "%*s  %" PRIx64 "-%" PRIx64 " %c%c%c%c %08" PRIx64 " %" PRIu64 " %s\n",
-                                  indent, "", map__start(map), map__end(map),
-                                  prot & PROT_READ ? 'r' : '-',
-                                  prot & PROT_WRITE ? 'w' : '-',
-                                  prot & PROT_EXEC ? 'x' : '-',
-                                  map__flags(map) ? 's' : 'p',
-                                  map__pgoff(map),
-                                  dso->id.ino, dso->name);
-       }
+       if (ret < 0)
+               return ret;
+
+       args->printed += ret;
+       return 0;
+}
+
+static size_t maps__fprintf_task(struct maps *maps, int indent, FILE *fp)
+{
+       struct maps__fprintf_task_args args = {
+               .indent = indent,
+               .fp = fp,
+               .printed = 0,
+       };
 
-       return printed;
+       maps__for_each_map(maps, maps__fprintf_task_cb, &args);
+
+       return args.printed;
 }
 
 static void task__print_level(struct task *task, FILE *fp, int level)
@@ -1341,15 +1359,15 @@ int cmd_report(int argc, const char **argv)
                   "list of cpus to profile"),
        OPT_BOOLEAN('I', "show-info", &report.show_full_info,
                    "Display extended information about perf.data file"),
-       OPT_BOOLEAN(0, "source", &report.annotation_opts.annotate_src,
+       OPT_BOOLEAN(0, "source", &annotate_opts.annotate_src,
                    "Interleave source code with assembly code (default)"),
-       OPT_BOOLEAN(0, "asm-raw", &report.annotation_opts.show_asm_raw,
+       OPT_BOOLEAN(0, "asm-raw", &annotate_opts.show_asm_raw,
                    "Display raw encoding of assembly instructions (default)"),
        OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
                   "Specify disassembler style (e.g. -M intel for intel syntax)"),
-       OPT_STRING(0, "prefix", &report.annotation_opts.prefix, "prefix",
+       OPT_STRING(0, "prefix", &annotate_opts.prefix, "prefix",
                    "Add prefix to source file path names in programs (with --prefix-strip)"),
-       OPT_STRING(0, "prefix-strip", &report.annotation_opts.prefix_strip, "N",
+       OPT_STRING(0, "prefix-strip", &annotate_opts.prefix_strip, "N",
                    "Strip first N entries of source file path name in programs (with --prefix)"),
        OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
                    "Show a column with the sum of periods"),
@@ -1401,7 +1419,7 @@ int cmd_report(int argc, const char **argv)
                   "Time span of interest (start,stop)"),
        OPT_BOOLEAN(0, "inline", &symbol_conf.inline_name,
                    "Show inline function"),
-       OPT_CALLBACK(0, "percent-type", &report.annotation_opts, "local-period",
+       OPT_CALLBACK(0, "percent-type", &annotate_opts, "local-period",
                     "Set percent type local/global-period/hits",
                     annotate_parse_percent_type),
        OPT_BOOLEAN(0, "ns", &symbol_conf.nanosecs, "Show times in nanosecs"),
@@ -1426,7 +1444,14 @@ int cmd_report(int argc, const char **argv)
        if (ret < 0)
                goto exit;
 
-       annotation_options__init(&report.annotation_opts);
+       /*
+        * tasks_mode require access to exited threads to list those that are in
+        * the data file. Off-cpu events are synthesized after other events and
+        * reference exited threads.
+        */
+       symbol_conf.keep_exited_threads = true;
+
+       annotation_options__init();
 
        ret = perf_config(report__config, &report);
        if (ret)
@@ -1445,13 +1470,13 @@ int cmd_report(int argc, const char **argv)
        }
 
        if (disassembler_style) {
-               report.annotation_opts.disassembler_style = strdup(disassembler_style);
-               if (!report.annotation_opts.disassembler_style)
+               annotate_opts.disassembler_style = strdup(disassembler_style);
+               if (!annotate_opts.disassembler_style)
                        return -ENOMEM;
        }
        if (objdump_path) {
-               report.annotation_opts.objdump_path = strdup(objdump_path);
-               if (!report.annotation_opts.objdump_path)
+               annotate_opts.objdump_path = strdup(objdump_path);
+               if (!annotate_opts.objdump_path)
                        return -ENOMEM;
        }
        if (addr2line_path) {
@@ -1460,7 +1485,7 @@ int cmd_report(int argc, const char **argv)
                        return -ENOMEM;
        }
 
-       if (annotate_check_args(&report.annotation_opts) < 0) {
+       if (annotate_check_args() < 0) {
                ret = -EINVAL;
                goto exit;
        }
@@ -1615,6 +1640,16 @@ repeat:
                        sort_order = NULL;
        }
 
+       if (sort_order && strstr(sort_order, "type")) {
+               report.data_type = true;
+               annotate_opts.annotate_src = false;
+
+#ifndef HAVE_DWARF_GETLOCATIONS_SUPPORT
+               pr_err("Error: Data type profiling is disabled due to missing DWARF support\n");
+               goto error;
+#endif
+       }
+
        if (strcmp(input_name, "-") != 0)
                setup_browser(true);
        else
@@ -1673,7 +1708,7 @@ repeat:
         * so don't allocate extra space that won't be used in the stdio
         * implementation.
         */
-       if (ui__has_annotation() || report.symbol_ipc ||
+       if (ui__has_annotation() || report.symbol_ipc || report.data_type ||
            report.total_cycles_mode) {
                ret = symbol__annotation_init();
                if (ret < 0)
@@ -1692,7 +1727,7 @@ repeat:
                         */
                        symbol_conf.priv_size += sizeof(u32);
                }
-               annotation_config__init(&report.annotation_opts);
+               annotation_config__init();
        }
 
        if (symbol__init(&session->header.env) < 0)
@@ -1746,7 +1781,7 @@ error:
        zstd_fini(&(session->zstd_data));
        perf_session__delete(session);
 exit:
-       annotation_options__exit(&report.annotation_opts);
+       annotation_options__exit();
        free(sort_order_help);
        free(field_order_help);
        return ret;
index a3af805a1d572d101b80fefbdddd04b41413629e..5fe9abc6a52418f3b5612c8e5e38d4d052c31f98 100644 (file)
@@ -653,7 +653,7 @@ static enum counter_recovery stat_handle_error(struct evsel *counter)
                if ((evsel__leader(counter) != counter) ||
                    !(counter->core.leader->nr_members > 1))
                        return COUNTER_SKIP;
-       } else if (evsel__fallback(counter, errno, msg, sizeof(msg))) {
+       } else if (evsel__fallback(counter, &target, errno, msg, sizeof(msg))) {
                if (verbose > 0)
                        ui__warning("%s\n", msg);
                return COUNTER_RETRY;
@@ -1204,8 +1204,9 @@ static struct option stat_options[] = {
        OPT_STRING('C', "cpu", &target.cpu_list, "cpu",
                    "list of cpus to monitor in system-wide"),
        OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode,
-                   "disable CPU count aggregation", AGGR_NONE),
-       OPT_BOOLEAN(0, "no-merge", &stat_config.no_merge, "Do not merge identical named events"),
+                   "disable aggregation across CPUs or PMUs", AGGR_NONE),
+       OPT_SET_UINT(0, "no-merge", &stat_config.aggr_mode,
+                   "disable aggregation the same as -A or -no-aggr", AGGR_NONE),
        OPT_BOOLEAN(0, "hybrid-merge", &stat_config.hybrid_merge,
                    "Merge identical named hybrid events"),
        OPT_STRING('x', "field-separator", &stat_config.csv_sep, "separator",
@@ -1255,7 +1256,7 @@ static struct option stat_options[] = {
        OPT_BOOLEAN(0, "metric-no-merge", &stat_config.metric_no_merge,
                       "don't try to share events between metrics in a group"),
        OPT_BOOLEAN(0, "metric-no-threshold", &stat_config.metric_no_threshold,
-                      "don't try to share events between metrics in a group  "),
+                      "disable adding events for the metric threshold calculation"),
        OPT_BOOLEAN(0, "topdown", &topdown_run,
                        "measure top-down statistics"),
        OPT_UINTEGER(0, "td-level", &stat_config.topdown_level,
@@ -1316,7 +1317,7 @@ static int cpu__get_cache_id_from_map(struct perf_cpu cpu, char *map)
         * be the first online CPU in the cache domain else use the
         * first online CPU of the cache domain as the ID.
         */
-       if (perf_cpu_map__empty(cpu_map))
+       if (perf_cpu_map__has_any_cpu_or_is_empty(cpu_map))
                id = cpu.cpu;
        else
                id = perf_cpu_map__cpu(cpu_map, 0).cpu;
@@ -1622,7 +1623,7 @@ static int perf_stat_init_aggr_mode(void)
         * taking the highest cpu number to be the size of
         * the aggregation translate cpumap.
         */
-       if (!perf_cpu_map__empty(evsel_list->core.user_requested_cpus))
+       if (!perf_cpu_map__has_any_cpu_or_is_empty(evsel_list->core.user_requested_cpus))
                nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu;
        else
                nr = 0;
@@ -2289,7 +2290,7 @@ int process_stat_config_event(struct perf_session *session,
 
        perf_event__read_stat_config(&stat_config, &event->stat_config);
 
-       if (perf_cpu_map__empty(st->cpus)) {
+       if (perf_cpu_map__has_any_cpu_or_is_empty(st->cpus)) {
                if (st->aggr_mode != AGGR_UNSET)
                        pr_warning("warning: processing task data, aggregation mode not set\n");
        } else if (st->aggr_mode != AGGR_UNSET) {
@@ -2695,15 +2696,19 @@ int cmd_stat(int argc, const char **argv)
         */
        if (metrics) {
                const char *pmu = parse_events_option_args.pmu_filter ?: "all";
+               int ret = metricgroup__parse_groups(evsel_list, pmu, metrics,
+                                               stat_config.metric_no_group,
+                                               stat_config.metric_no_merge,
+                                               stat_config.metric_no_threshold,
+                                               stat_config.user_requested_cpu_list,
+                                               stat_config.system_wide,
+                                               &stat_config.metric_events);
 
-               metricgroup__parse_groups(evsel_list, pmu, metrics,
-                                       stat_config.metric_no_group,
-                                       stat_config.metric_no_merge,
-                                       stat_config.metric_no_threshold,
-                                       stat_config.user_requested_cpu_list,
-                                       stat_config.system_wide,
-                                       &stat_config.metric_events);
                zfree(&metrics);
+               if (ret) {
+                       status = ret;
+                       goto out;
+               }
        }
 
        if (add_default_attributes())
index ea8c7eca5eeedd7616976139b288b3ff5c8c95d4..5301d1badd435906ddf152511e6935b73236c034 100644 (file)
@@ -147,7 +147,7 @@ static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
                return err;
        }
 
-       err = symbol__annotate(&he->ms, evsel, &top->annotation_opts, NULL);
+       err = symbol__annotate(&he->ms, evsel, NULL);
        if (err == 0) {
                top->sym_filter_entry = he;
        } else {
@@ -261,9 +261,9 @@ static void perf_top__show_details(struct perf_top *top)
                goto out_unlock;
 
        printf("Showing %s for %s\n", evsel__name(top->sym_evsel), symbol->name);
-       printf("  Events  Pcnt (>=%d%%)\n", top->annotation_opts.min_pcnt);
+       printf("  Events  Pcnt (>=%d%%)\n", annotate_opts.min_pcnt);
 
-       more = symbol__annotate_printf(&he->ms, top->sym_evsel, &top->annotation_opts);
+       more = symbol__annotate_printf(&he->ms, top->sym_evsel);
 
        if (top->evlist->enabled) {
                if (top->zero)
@@ -357,7 +357,7 @@ static void perf_top__print_sym_table(struct perf_top *top)
 
 static void prompt_integer(int *target, const char *msg)
 {
-       char *buf = malloc(0), *p;
+       char *buf = NULL, *p;
        size_t dummy = 0;
        int tmp;
 
@@ -450,7 +450,7 @@ static void perf_top__print_mapped_keys(struct perf_top *top)
 
        fprintf(stdout, "\t[f]     profile display filter (count).    \t(%d)\n", top->count_filter);
 
-       fprintf(stdout, "\t[F]     annotate display filter (percent). \t(%d%%)\n", top->annotation_opts.min_pcnt);
+       fprintf(stdout, "\t[F]     annotate display filter (percent). \t(%d%%)\n", annotate_opts.min_pcnt);
        fprintf(stdout, "\t[s]     annotate symbol.                   \t(%s)\n", name?: "NULL");
        fprintf(stdout, "\t[S]     stop annotation.\n");
 
@@ -553,7 +553,7 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c)
                        prompt_integer(&top->count_filter, "Enter display event count filter");
                        break;
                case 'F':
-                       prompt_percent(&top->annotation_opts.min_pcnt,
+                       prompt_percent(&annotate_opts.min_pcnt,
                                       "Enter details display event filter (percent)");
                        break;
                case 'K':
@@ -646,8 +646,7 @@ repeat:
        }
 
        ret = evlist__tui_browse_hists(top->evlist, help, &hbt, top->min_percent,
-                                      &top->session->header.env, !top->record_opts.overwrite,
-                                      &top->annotation_opts);
+                                      &top->session->header.env, !top->record_opts.overwrite);
        if (ret == K_RELOAD) {
                top->zero = true;
                goto repeat;
@@ -1027,8 +1026,8 @@ static int perf_top__start_counters(struct perf_top *top)
 
        evlist__for_each_entry(evlist, counter) {
 try_again:
-               if (evsel__open(counter, top->evlist->core.user_requested_cpus,
-                                    top->evlist->core.threads) < 0) {
+               if (evsel__open(counter, counter->core.cpus,
+                               counter->core.threads) < 0) {
 
                        /*
                         * Specially handle overwrite fall back.
@@ -1044,7 +1043,7 @@ try_again:
                            perf_top_overwrite_fallback(top, counter))
                                goto try_again;
 
-                       if (evsel__fallback(counter, errno, msg, sizeof(msg))) {
+                       if (evsel__fallback(counter, &opts->target, errno, msg, sizeof(msg))) {
                                if (verbose > 0)
                                        ui__warning("%s\n", msg);
                                goto try_again;
@@ -1241,9 +1240,9 @@ static int __cmd_top(struct perf_top *top)
        pthread_t thread, thread_process;
        int ret;
 
-       if (!top->annotation_opts.objdump_path) {
+       if (!annotate_opts.objdump_path) {
                ret = perf_env__lookup_objdump(&top->session->header.env,
-                                              &top->annotation_opts.objdump_path);
+                                              &annotate_opts.objdump_path);
                if (ret)
                        return ret;
        }
@@ -1299,6 +1298,7 @@ static int __cmd_top(struct perf_top *top)
                }
        }
 
+       evlist__uniquify_name(top->evlist);
        ret = perf_top__start_counters(top);
        if (ret)
                return ret;
@@ -1536,9 +1536,9 @@ int cmd_top(int argc, const char **argv)
                   "only consider symbols in these comms"),
        OPT_STRING(0, "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
                   "only consider these symbols"),
-       OPT_BOOLEAN(0, "source", &top.annotation_opts.annotate_src,
+       OPT_BOOLEAN(0, "source", &annotate_opts.annotate_src,
                    "Interleave source code with assembly code (default)"),
-       OPT_BOOLEAN(0, "asm-raw", &top.annotation_opts.show_asm_raw,
+       OPT_BOOLEAN(0, "asm-raw", &annotate_opts.show_asm_raw,
                    "Display raw encoding of assembly instructions (default)"),
        OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
                    "Enable kernel symbol demangling"),
@@ -1549,9 +1549,9 @@ int cmd_top(int argc, const char **argv)
                   "addr2line binary to use for line numbers"),
        OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
                   "Specify disassembler style (e.g. -M intel for intel syntax)"),
-       OPT_STRING(0, "prefix", &top.annotation_opts.prefix, "prefix",
+       OPT_STRING(0, "prefix", &annotate_opts.prefix, "prefix",
                    "Add prefix to source file path names in programs (with --prefix-strip)"),
-       OPT_STRING(0, "prefix-strip", &top.annotation_opts.prefix_strip, "N",
+       OPT_STRING(0, "prefix-strip", &annotate_opts.prefix_strip, "N",
                    "Strip first N entries of source file path name in programs (with --prefix)"),
        OPT_STRING('u', "uid", &target->uid_str, "user", "user to profile"),
        OPT_CALLBACK(0, "percent-limit", &top, "percent",
@@ -1609,10 +1609,10 @@ int cmd_top(int argc, const char **argv)
        if (status < 0)
                return status;
 
-       annotation_options__init(&top.annotation_opts);
+       annotation_options__init();
 
-       top.annotation_opts.min_pcnt = 5;
-       top.annotation_opts.context  = 4;
+       annotate_opts.min_pcnt = 5;
+       annotate_opts.context  = 4;
 
        top.evlist = evlist__new();
        if (top.evlist == NULL)
@@ -1642,13 +1642,13 @@ int cmd_top(int argc, const char **argv)
                usage_with_options(top_usage, options);
 
        if (disassembler_style) {
-               top.annotation_opts.disassembler_style = strdup(disassembler_style);
-               if (!top.annotation_opts.disassembler_style)
+               annotate_opts.disassembler_style = strdup(disassembler_style);
+               if (!annotate_opts.disassembler_style)
                        return -ENOMEM;
        }
        if (objdump_path) {
-               top.annotation_opts.objdump_path = strdup(objdump_path);
-               if (!top.annotation_opts.objdump_path)
+               annotate_opts.objdump_path = strdup(objdump_path);
+               if (!annotate_opts.objdump_path)
                        return -ENOMEM;
        }
        if (addr2line_path) {
@@ -1661,7 +1661,7 @@ int cmd_top(int argc, const char **argv)
        if (status)
                goto out_delete_evlist;
 
-       if (annotate_check_args(&top.annotation_opts) < 0)
+       if (annotate_check_args() < 0)
                goto out_delete_evlist;
 
        if (!top.evlist->core.nr_entries) {
@@ -1787,7 +1787,7 @@ int cmd_top(int argc, const char **argv)
        if (status < 0)
                goto out_delete_evlist;
 
-       annotation_config__init(&top.annotation_opts);
+       annotation_config__init();
 
        symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
        status = symbol__init(NULL);
@@ -1840,7 +1840,7 @@ int cmd_top(int argc, const char **argv)
 out_delete_evlist:
        evlist__delete(top.evlist);
        perf_session__delete(top.session);
-       annotation_options__exit(&top.annotation_opts);
+       annotation_options__exit();
 
        return status;
 }
index e541d0e2777ab935a6274d0a8aeaa46ffe3f8247..109b8e64fe69ae32fee0ee7b6937d260d32b6203 100644 (file)
@@ -2470,9 +2470,8 @@ static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sam
 static const char *errno_to_name(struct evsel *evsel, int err)
 {
        struct perf_env *env = evsel__env(evsel);
-       const char *arch_name = perf_env__arch(env);
 
-       return arch_syscalls__strerrno(arch_name, err);
+       return perf_env__arch_strerrno(env, err);
 }
 
 static int trace__sys_exit(struct trace *trace, struct evsel *evsel,
@@ -4264,12 +4263,11 @@ static size_t thread__dump_stats(struct thread_trace *ttrace,
                        printed += fprintf(fp, " %9.3f %9.2f%%\n", max, pct);
 
                        if (trace->errno_summary && stats->nr_failures) {
-                               const char *arch_name = perf_env__arch(trace->host->env);
                                int e;
 
                                for (e = 0; e < stats->max_errno; ++e) {
                                        if (stats->errnos[e] != 0)
-                                               fprintf(fp, "\t\t\t\t%s: %d\n", arch_syscalls__strerrno(arch_name, e + 1), stats->errnos[e]);
+                                               fprintf(fp, "\t\t\t\t%s: %d\n", perf_env__arch_strerrno(trace->host->env, e + 1), stats->errnos[e]);
                                }
                        }
                }
old mode 100644 (file)
new mode 100755 (executable)
index 133f0ed..f947957
@@ -4,8 +4,73 @@
 # Arnaldo Carvalho de Melo <acme@redhat.com>
 
 PERF_DATA=perf.data
-if [ $# -ne 0 ] ; then
-       PERF_DATA=$1
+PERF_SYMBOLS=perf.symbols
+PERF_ALL=perf.all
+ALL=0
+UNPACK=0
+
+while [ $# -gt 0 ] ; do
+       if [ $1 == "--all" ]; then
+               ALL=1
+               shift
+       elif [ $1 == "--unpack" ]; then
+               UNPACK=1
+               shift
+       else
+               PERF_DATA=$1
+               UNPACK_TAR=$1
+               shift
+       fi
+done
+
+if [ $UNPACK -eq 1 ]; then
+       if [ ! -z "$UNPACK_TAR" ]; then                                 # tar given as an argument
+               if [ ! -e "$UNPACK_TAR" ]; then
+                       echo "Provided file $UNPACK_TAR does not exist"
+                       exit 1
+               fi
+               TARGET="$UNPACK_TAR"
+       else                                                                                                                            # search for perf tar in the current directory
+               TARGET=`find . -regex "\./perf.*\.tar\.bz2"`
+               TARGET_NUM=`echo -n "$TARGET" | grep -c '^'`
+
+               if [ -z "$TARGET" -o $TARGET_NUM -gt 1 ]; then
+                       echo -e "Error: $TARGET_NUM files found for unpacking:\n$TARGET"
+                       echo "Provide the requested file as an argument"
+                       exit 1
+               else
+                       echo "Found target file for unpacking: $TARGET"
+               fi
+       fi
+
+       if [[ "$TARGET" =~ (\./)?$PERF_ALL.*.tar.bz2 ]]; then                           # perf tar generated by --all option
+               TAR_CONTENTS=`tar tvf "$TARGET" | tr -s " " | cut -d " " -f 6`
+               VALID_TAR=`echo "$TAR_CONTENTS" | grep "$PERF_SYMBOLS.tar.bz2" | wc -l`         # check if it contains a sub-tar perf.symbols
+               if [ $VALID_TAR -ne 1 ]; then
+                       echo "Error: $TARGET file is not valid (contains zero or multiple sub-tar files with debug symbols)"
+                       exit 1
+               fi
+
+               INTERSECT=`comm -12 <(ls) <(echo "$TAR_CONTENTS") | tr "\n" " "`        # check for overwriting
+               if [ ! -z "$INTERSECT" ]; then                                                                          # prompt if file(s) already exist in the current directory
+                       echo "File(s) ${INTERSECT::-1} already exist in the current directory."
+                       while true; do
+                               read -p 'Do you wish to overwrite them? ' yn
+                               case $yn in
+                                       [Yy]* ) break;;
+                                       [Nn]* ) exit 1;;
+                                       * ) echo "Please answer yes or no.";;
+                               esac
+                       done
+               fi
+
+               # unzip the perf.data file in the current working directory     and debug symbols in ~/.debug directory
+               tar xvf $TARGET && tar xvf $PERF_SYMBOLS.tar.bz2 -C ~/.debug
+
+       else                                                                                                                            # perf tar generated by perf archive (contains only debug symbols)
+               tar xvf $TARGET -C ~/.debug
+       fi
+       exit 0
 fi
 
 #
@@ -39,9 +104,18 @@ while read build_id ; do
        echo ${filename#$PERF_BUILDID_LINKDIR} >> $MANIFEST
 done
 
-tar cjf $PERF_DATA.tar.bz2 -C $PERF_BUILDID_DIR -T $MANIFEST
-rm $MANIFEST $BUILDIDS || true
+if [ $ALL -eq 1 ]; then                                                # pack perf.data file together with tar containing debug symbols
+       HOSTNAME=$(hostname)
+       DATE=$(date '+%Y%m%d-%H%M%S')
+       tar cjf $PERF_SYMBOLS.tar.bz2 -C $PERF_BUILDID_DIR -T $MANIFEST
+       tar cjf $PERF_ALL-$HOSTNAME-$DATE.tar.bz2 $PERF_DATA $PERF_SYMBOLS.tar.bz2
+       rm $PERF_SYMBOLS.tar.bz2 $MANIFEST $BUILDIDS || true
+else                                                                           # pack only the debug symbols
+       tar cjf $PERF_DATA.tar.bz2 -C $PERF_BUILDID_DIR -T $MANIFEST
+       rm $MANIFEST $BUILDIDS || true
+fi
+
 echo -e "Now please run:\n"
-echo -e "$ tar xvf $PERF_DATA.tar.bz2 -C ~/.debug\n"
-echo "wherever you need to run 'perf report' on."
+echo -e "$ perf archive --unpack\n"
+echo "or unpack the tar manually wherever you need to run 'perf report' on."
 exit 0
index d3fc8090413c8c289b55c006cde3ad3388708a9d..921bee0a643707ec596620b00052cd5989f4f56e 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/zalloc.h>
 
 static int use_pager = -1;
+static FILE *debug_fp = NULL;
 
 struct cmd_struct {
        const char *cmd;
@@ -162,6 +163,19 @@ static void commit_pager_choice(void)
        }
 }
 
+static int set_debug_file(const char *path)
+{
+       debug_fp = fopen(path, "w");
+       if (!debug_fp) {
+               fprintf(stderr, "Open debug file '%s' failed: %s\n",
+                       path, strerror(errno));
+               return -1;
+       }
+
+       debug_set_file(debug_fp);
+       return 0;
+}
+
 struct option options[] = {
        OPT_ARGUMENT("help", "help"),
        OPT_ARGUMENT("version", "version"),
@@ -174,6 +188,7 @@ struct option options[] = {
        OPT_ARGUMENT("list-cmds", "list-cmds"),
        OPT_ARGUMENT("list-opts", "list-opts"),
        OPT_ARGUMENT("debug", "debug"),
+       OPT_ARGUMENT("debug-file", "debug-file"),
        OPT_END()
 };
 
@@ -287,6 +302,18 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
 
                        (*argv)++;
                        (*argc)--;
+               } else if (!strcmp(cmd, "--debug-file")) {
+                       if (*argc < 2) {
+                               fprintf(stderr, "No path given for --debug-file.\n");
+                               usage(perf_usage_string);
+                       }
+
+                       if (set_debug_file((*argv)[1]))
+                               usage(perf_usage_string);
+
+                       (*argv)++;
+                       (*argc)--;
+
                } else {
                        fprintf(stderr, "Unknown option: %s\n", cmd);
                        usage(perf_usage_string);
@@ -547,5 +574,8 @@ int main(int argc, const char **argv)
        fprintf(stderr, "Failed to run command '%s': %s\n",
                cmd, str_error_r(errno, sbuf, sizeof(sbuf)));
 out:
+       if (debug_fp)
+               fclose(debug_fp);
+
        return 1;
 }
index 88b23b85e33cd0784eeca99e1994250a6509b2fc..879ff21e0b177c6a015dccd0ab4f016787deb01d 100644 (file)
     {
         "PublicDescription": "Flushes due to memory hazards",
         "EventCode": "0x121",
-        "EventName": "BPU_FLUSH_MEM_FAULT",
+        "EventName": "GPC_FLUSH_MEM_FAULT",
         "BriefDescription": "Flushes due to memory hazards"
     },
     {
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/branch.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/branch.json
new file mode 100644 (file)
index 0000000..a632755
--- /dev/null
@@ -0,0 +1,125 @@
+[
+    {
+        "ArchStdEvent": "BR_IMMED_SPEC"
+    },
+    {
+        "ArchStdEvent": "BR_RETURN_SPEC"
+    },
+    {
+        "ArchStdEvent": "BR_INDIRECT_SPEC"
+    },
+    {
+        "ArchStdEvent": "BR_MIS_PRED"
+    },
+    {
+        "ArchStdEvent": "BR_PRED"
+    },
+    {
+        "PublicDescription": "Instruction architecturally executed, branch not taken",
+        "EventCode": "0x8107",
+        "EventName": "BR_SKIP_RETIRED",
+        "BriefDescription": "Instruction architecturally executed, branch not taken"
+    },
+    {
+        "PublicDescription": "Instruction architecturally executed, immediate branch taken",
+        "EventCode": "0x8108",
+        "EventName": "BR_IMMED_TAKEN_RETIRED",
+        "BriefDescription": "Instruction architecturally executed, immediate branch taken"
+    },
+    {
+        "PublicDescription": "Instruction architecturally executed, indirect branch excluding return retired",
+        "EventCode": "0x810c",
+        "EventName": "BR_INDNR_TAKEN_RETIRED",
+        "BriefDescription": "Instruction architecturally executed, indirect branch excluding return retired"
+    },
+    {
+        "PublicDescription": "Instruction architecturally executed, predicted immediate branch",
+        "EventCode": "0x8110",
+        "EventName": "BR_IMMED_PRED_RETIRED",
+        "BriefDescription": "Instruction architecturally executed, predicted immediate branch"
+    },
+    {
+        "PublicDescription": "Instruction architecturally executed, mispredicted immediate branch",
+        "EventCode": "0x8111",
+        "EventName": "BR_IMMED_MIS_PRED_RETIRED",
+        "BriefDescription": "Instruction architecturally executed, mispredicted immediate branch"
+    },
+    {
+        "PublicDescription": "Instruction architecturally executed, predicted indirect branch",
+        "EventCode": "0x8112",
+        "EventName": "BR_IND_PRED_RETIRED",
+        "BriefDescription": "Instruction architecturally executed, predicted indirect branch"
+    },
+    {
+        "PublicDescription": "Instruction architecturally executed, mispredicted indirect branch",
+        "EventCode": "0x8113",
+        "EventName": "BR_IND_MIS_PRED_RETIRED",
+        "BriefDescription": "Instruction architecturally executed, mispredicted indirect branch"
+    },
+    {
+        "PublicDescription": "Instruction architecturally executed, predicted procedure return",
+        "EventCode": "0x8114",
+        "EventName": "BR_RETURN_PRED_RETIRED",
+        "BriefDescription": "Instruction architecturally executed, predicted procedure return"
+    },
+    {
+        "PublicDescription": "Instruction architecturally executed, mispredicted procedure return",
+        "EventCode": "0x8115",
+        "EventName": "BR_RETURN_MIS_PRED_RETIRED",
+        "BriefDescription": "Instruction architecturally executed, mispredicted procedure return"
+    },
+    {
+        "PublicDescription": "Instruction architecturally executed, predicted indirect branch excluding return",
+        "EventCode": "0x8116",
+        "EventName": "BR_INDNR_PRED_RETIRED",
+        "BriefDescription": "Instruction architecturally executed, predicted indirect branch excluding return"
+    },
+    {
+        "PublicDescription": "Instruction architecturally executed, mispredicted indirect branch excluding return",
+        "EventCode": "0x8117",
+        "EventName": "BR_INDNR_MIS_PRED_RETIRED",
+        "BriefDescription": "Instruction architecturally executed, mispredicted indirect branch excluding return"
+    },
+    {
+        "PublicDescription": "Instruction architecturally executed, predicted branch, taken",
+        "EventCode": "0x8118",
+        "EventName": "BR_TAKEN_PRED_RETIRED",
+        "BriefDescription": "Instruction architecturally executed, predicted branch, taken"
+    },
+    {
+        "PublicDescription": "Instruction architecturally executed, mispredicted branch, taken",
+        "EventCode": "0x8119",
+        "EventName": "BR_TAKEN_MIS_PRED_RETIRED",
+        "BriefDescription": "Instruction architecturally executed, mispredicted branch, taken"
+    },
+    {
+        "PublicDescription": "Instruction architecturally executed, predicted branch, not taken",
+        "EventCode": "0x811a",
+        "EventName": "BR_SKIP_PRED_RETIRED",
+        "BriefDescription": "Instruction architecturally executed, predicted branch, not taken"
+    },
+    {
+        "PublicDescription": "Instruction architecturally executed, mispredicted branch, not taken",
+        "EventCode": "0x811b",
+        "EventName": "BR_SKIP_MIS_PRED_RETIRED",
+        "BriefDescription": "Instruction architecturally executed, mispredicted branch, not taken"
+    },
+    {
+        "PublicDescription": "Instruction architecturally executed, predicted branch",
+        "EventCode": "0x811c",
+        "EventName": "BR_PRED_RETIRED",
+        "BriefDescription": "Instruction architecturally executed, predicted branch"
+    },
+    {
+        "PublicDescription": "Instruction architecturally executed, indirect branch",
+        "EventCode": "0x811d",
+        "EventName": "BR_IND_RETIRED",
+        "BriefDescription": "Instruction architecturally executed, indirect branch"
+    },
+    {
+        "PublicDescription": "Branch Record captured.",
+        "EventCode": "0x811f",
+        "EventName": "BRB_FILTRATE",
+        "BriefDescription": "Branch Record captured."
+    }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/bus.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/bus.json
new file mode 100644 (file)
index 0000000..2aeb990
--- /dev/null
@@ -0,0 +1,20 @@
+[
+    {
+        "ArchStdEvent": "CPU_CYCLES"
+    },
+    {
+        "ArchStdEvent": "BUS_CYCLES"
+    },
+    {
+        "ArchStdEvent": "BUS_ACCESS_RD"
+    },
+    {
+        "ArchStdEvent": "BUS_ACCESS_WR"
+    },
+    {
+        "ArchStdEvent": "BUS_ACCESS"
+    },
+    {
+        "ArchStdEvent": "CNT_CYCLES"
+    }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/cache.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/cache.json
new file mode 100644 (file)
index 0000000..c50d8e9
--- /dev/null
@@ -0,0 +1,206 @@
+[
+    {
+        "ArchStdEvent": "L1D_CACHE_RD"
+    },
+    {
+        "ArchStdEvent": "L1D_CACHE_WR"
+    },
+    {
+        "ArchStdEvent": "L1D_CACHE_REFILL_RD"
+    },
+    {
+        "ArchStdEvent": "L1D_CACHE_INVAL"
+    },
+    {
+        "ArchStdEvent": "L1D_TLB_REFILL_RD"
+    },
+    {
+        "ArchStdEvent": "L1D_TLB_REFILL_WR"
+    },
+    {
+        "ArchStdEvent": "L2D_CACHE_RD"
+    },
+    {
+        "ArchStdEvent": "L2D_CACHE_WR"
+    },
+    {
+        "ArchStdEvent": "L2D_CACHE_REFILL_RD"
+    },
+    {
+        "ArchStdEvent": "L2D_CACHE_REFILL_WR"
+    },
+    {
+        "ArchStdEvent": "L2D_CACHE_WB_VICTIM"
+    },
+    {
+        "ArchStdEvent": "L2D_CACHE_WB_CLEAN"
+    },
+    {
+        "ArchStdEvent": "L2D_CACHE_INVAL"
+    },
+    {
+        "ArchStdEvent": "L1I_CACHE_REFILL"
+    },
+    {
+        "ArchStdEvent": "L1I_TLB_REFILL"
+    },
+    {
+        "ArchStdEvent": "L1D_CACHE_REFILL"
+    },
+    {
+        "ArchStdEvent": "L1D_CACHE"
+    },
+    {
+        "ArchStdEvent": "L1D_TLB_REFILL"
+    },
+    {
+        "ArchStdEvent": "L1I_CACHE"
+    },
+    {
+        "ArchStdEvent": "L2D_CACHE"
+    },
+    {
+        "ArchStdEvent": "L2D_CACHE_REFILL"
+    },
+    {
+        "ArchStdEvent": "L2D_CACHE_WB"
+    },
+    {
+        "ArchStdEvent": "L1D_TLB"
+    },
+    {
+        "ArchStdEvent": "L1I_TLB"
+    },
+    {
+        "ArchStdEvent": "L2D_TLB_REFILL"
+    },
+    {
+        "ArchStdEvent": "L2I_TLB_REFILL"
+    },
+    {
+        "ArchStdEvent": "L2D_TLB"
+    },
+    {
+        "ArchStdEvent": "L2I_TLB"
+    },
+    {
+        "ArchStdEvent": "DTLB_WALK"
+    },
+    {
+        "ArchStdEvent": "ITLB_WALK"
+    },
+    {
+        "ArchStdEvent": "L1D_CACHE_REFILL_WR"
+    },
+    {
+        "ArchStdEvent": "L1D_CACHE_LMISS_RD"
+    },
+    {
+        "ArchStdEvent": "L1I_CACHE_LMISS"
+    },
+    {
+        "ArchStdEvent": "L2D_CACHE_LMISS_RD"
+    },
+    {
+        "PublicDescription": "Level 1 data or unified cache demand access",
+        "EventCode": "0x8140",
+        "EventName": "L1D_CACHE_RW",
+        "BriefDescription": "Level 1 data or unified cache demand access"
+    },
+    {
+        "PublicDescription": "Level 1 data or unified cache preload or prefetch",
+        "EventCode": "0x8142",
+        "EventName": "L1D_CACHE_PRFM",
+        "BriefDescription": "Level 1 data or unified cache preload or prefetch"
+    },
+    {
+        "PublicDescription": "Level 1 data or unified cache refill, preload or prefetch",
+        "EventCode": "0x8146",
+        "EventName": "L1D_CACHE_REFILL_PRFM",
+        "BriefDescription": "Level 1 data or unified cache refill, preload or prefetch"
+    },
+    {
+        "ArchStdEvent": "L1D_TLB_RD"
+    },
+    {
+        "ArchStdEvent": "L1D_TLB_WR"
+    },
+    {
+        "ArchStdEvent": "L2D_TLB_REFILL_RD"
+    },
+    {
+        "ArchStdEvent": "L2D_TLB_REFILL_WR"
+    },
+    {
+        "ArchStdEvent": "L2D_TLB_RD"
+    },
+    {
+        "ArchStdEvent": "L2D_TLB_WR"
+    },
+    {
+        "PublicDescription": "L1D TLB miss",
+        "EventCode": "0xD600",
+        "EventName": "L1D_TLB_MISS",
+        "BriefDescription": "L1D TLB miss"
+    },
+    {
+        "PublicDescription": "Level 1 prefetcher, load prefetch requests generated",
+        "EventCode": "0xd606",
+        "EventName": "L1_PREFETCH_LD_GEN",
+        "BriefDescription": "Level 1 prefetcher, load prefetch requests generated"
+    },
+    {
+        "PublicDescription": "Level 1 prefetcher, load prefetch fills into the level 1 cache",
+        "EventCode": "0xd607",
+        "EventName": "L1_PREFETCH_LD_FILL",
+        "BriefDescription": "Level 1 prefetcher, load prefetch fills into the level 1 cache"
+    },
+    {
+        "PublicDescription": "Level 1 prefetcher, load prefetch to level 2 generated",
+        "EventCode": "0xd608",
+        "EventName": "L1_PREFETCH_L2_REQ",
+        "BriefDescription": "Level 1 prefetcher, load prefetch to level 2 generated"
+    },
+    {
+        "PublicDescription": "L1 prefetcher, distance was reset",
+        "EventCode": "0xd609",
+        "EventName": "L1_PREFETCH_DIST_RST",
+        "BriefDescription": "L1 prefetcher, distance was reset"
+    },
+    {
+        "PublicDescription": "L1 prefetcher, distance was increased",
+        "EventCode": "0xd60a",
+        "EventName": "L1_PREFETCH_DIST_INC",
+        "BriefDescription": "L1 prefetcher, distance was increased"
+    },
+    {
+        "PublicDescription": "Level 1 prefetcher, table entry is trained",
+        "EventCode": "0xd60b",
+        "EventName": "L1_PREFETCH_ENTRY_TRAINED",
+        "BriefDescription": "Level 1 prefetcher, table entry is trained"
+    },
+    {
+        "PublicDescription": "L1 data cache refill - Read or Write",
+        "EventCode": "0xd60e",
+        "EventName": "L1D_CACHE_REFILL_RW",
+        "BriefDescription": "L1 data cache refill - Read or Write"
+    },
+    {
+        "PublicDescription": "Level 2 cache refill from instruction-side miss, including IMMU refills",
+        "EventCode": "0xD701",
+        "EventName": "L2C_INST_REFILL",
+        "BriefDescription": "Level 2 cache refill from instruction-side miss, including IMMU refills"
+    },
+    {
+        "PublicDescription": "Level 2 cache refill from data-side miss, including DMMU refills",
+        "EventCode": "0xD702",
+        "EventName": "L2C_DATA_REFILL",
+        "BriefDescription": "Level 2 cache refill from data-side miss, including DMMU refills"
+    },
+    {
+        "PublicDescription": "Level 2 cache prefetcher, load prefetch requests generated",
+        "EventCode": "0xD703",
+        "EventName": "L2_PREFETCH_REQ",
+        "BriefDescription": "Level 2 cache prefetcher, load prefetch requests generated"
+    }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/core-imp-def.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/core-imp-def.json
new file mode 100644 (file)
index 0000000..eb5a220
--- /dev/null
@@ -0,0 +1,464 @@
+[
+    {
+        "PublicDescription": "Level 2 prefetch requests, refilled to L2 cache",
+        "EventCode": "0x10A",
+        "EventName": "L2_PREFETCH_REFILL",
+        "BriefDescription": "Level 2 prefetch requests, refilled to L2 cache"
+    },
+    {
+        "PublicDescription": "Level 2 prefetch requests, late",
+        "EventCode": "0x10B",
+        "EventName": "L2_PREFETCH_UPGRADE",
+        "BriefDescription": "Level 2 prefetch requests, late"
+    },
+    {
+        "PublicDescription": "Predictable branch speculatively executed that hit any level of BTB",
+        "EventCode": "0x110",
+        "EventName": "BPU_HIT_BTB",
+        "BriefDescription": "Predictable branch speculatively executed that hit any level of BTB"
+    },
+    {
+        "PublicDescription": "Predictable conditional branch speculatively executed that hit any level of BTB",
+        "EventCode": "0x111",
+        "EventName": "BPU_CONDITIONAL_BRANCH_HIT_BTB",
+        "BriefDescription": "Predictable conditional branch speculatively executed that hit any level of BTB"
+    },
+    {
+        "PublicDescription": "Predictable taken branch speculatively executed that hit any level of BTB that access the indirect predictor",
+        "EventCode": "0x112",
+        "EventName": "BPU_HIT_INDIRECT_PREDICTOR",
+        "BriefDescription": "Predictable taken branch speculatively executed that hit any level of BTB that access the indirect predictor"
+    },
+    {
+        "PublicDescription": "Predictable taken branch speculatively executed that hit any level of BTB that access the return predictor",
+        "EventCode": "0x113",
+        "EventName": "BPU_HIT_RSB",
+        "BriefDescription": "Predictable taken branch speculatively executed that hit any level of BTB that access the return predictor"
+    },
+    {
+        "PublicDescription": "Predictable unconditional branch speculatively executed that did not hit any level of BTB",
+        "EventCode": "0x114",
+        "EventName": "BPU_UNCONDITIONAL_BRANCH_MISS_BTB",
+        "BriefDescription": "Predictable unconditional branch speculatively executed that did not hit any level of BTB"
+    },
+    {
+        "PublicDescription": "Predictable branch speculatively executed, unpredicted",
+        "EventCode": "0x115",
+        "EventName": "BPU_BRANCH_NO_HIT",
+        "BriefDescription": "Predictable branch speculatively executed, unpredicted"
+    },
+    {
+        "PublicDescription": "Predictable branch speculatively executed that hit any level of BTB that mispredict",
+        "EventCode": "0x116",
+        "EventName": "BPU_HIT_BTB_AND_MISPREDICT",
+        "BriefDescription": "Predictable branch speculatively executed that hit any level of BTB that mispredict"
+    },
+    {
+        "PublicDescription": "Predictable conditional branch speculatively executed that hit any level of BTB that (direction) mispredict",
+        "EventCode": "0x117",
+        "EventName": "BPU_CONDITIONAL_BRANCH_HIT_BTB_AND_MISPREDICT",
+        "BriefDescription": "Predictable conditional branch speculatively executed that hit any level of BTB that (direction) mispredict"
+    },
+    {
+        "PublicDescription": "Predictable taken branch speculatively executed that hit any level of BTB that access the indirect predictor that mispredict",
+        "EventCode": "0x118",
+        "EventName": "BPU_INDIRECT_BRANCH_HIT_BTB_AND_MISPREDICT",
+        "BriefDescription": "Predictable taken branch speculatively executed that hit any level of BTB that access the indirect predictor that mispredict"
+    },
+    {
+        "PublicDescription": "Predictable taken branch speculatively executed that hit any level of BTB that access the return predictor that mispredict",
+        "EventCode": "0x119",
+        "EventName": "BPU_HIT_RSB_AND_MISPREDICT",
+        "BriefDescription": "Predictable taken branch speculatively executed that hit any level of BTB that access the return predictor that mispredict"
+    },
+    {
+        "PublicDescription": "Predictable taken branch speculatively executed that hit any level of BTB that access the overflow/underflow return predictor that mispredict",
+        "EventCode": "0x11a",
+        "EventName": "BPU_MISS_RSB_AND_MISPREDICT",
+        "BriefDescription": "Predictable taken branch speculatively executed that hit any level of BTB that access the overflow/underflow return predictor that mispredict"
+    },
+    {
+        "PublicDescription": "Predictable branch speculatively executed, unpredicted, that mispredict",
+        "EventCode": "0x11b",
+        "EventName": "BPU_NO_PREDICTION_MISPREDICT",
+        "BriefDescription": "Predictable branch speculatively executed, unpredicted, that mispredict"
+    },
+    {
+        "PublicDescription": "Preditable branch update the BTB region buffer entry",
+        "EventCode": "0x11c",
+        "EventName": "BPU_BTB_UPDATE",
+        "BriefDescription": "Preditable branch update the BTB region buffer entry"
+    },
+    {
+        "PublicDescription": "Count predict pipe stalls due to speculative return address predictor full",
+        "EventCode": "0x11d",
+        "EventName": "BPU_RSB_FULL_STALL",
+        "BriefDescription": "Count predict pipe stalls due to speculative return address predictor full"
+    },
+    {
+        "PublicDescription": "Macro-ops speculatively decoded",
+        "EventCode": "0x11f",
+        "EventName": "ICF_INST_SPEC_DECODE",
+        "BriefDescription": "Macro-ops speculatively decoded"
+    },
+    {
+        "PublicDescription": "Flushes",
+        "EventCode": "0x120",
+        "EventName": "GPC_FLUSH",
+        "BriefDescription": "Flushes"
+    },
+    {
+        "PublicDescription": "Flushes due to memory hazards",
+        "EventCode": "0x121",
+        "EventName": "GPC_FLUSH_MEM_FAULT",
+        "BriefDescription": "Flushes due to memory hazards"
+    },
+    {
+        "PublicDescription": "ETM extout bit 0",
+        "EventCode": "0x141",
+        "EventName": "MSC_ETM_EXTOUT0",
+        "BriefDescription": "ETM extout bit 0"
+    },
+    {
+        "PublicDescription": "ETM extout bit 1",
+        "EventCode": "0x142",
+        "EventName": "MSC_ETM_EXTOUT1",
+        "BriefDescription": "ETM extout bit 1"
+    },
+    {
+        "PublicDescription": "ETM extout bit 2",
+        "EventCode": "0x143",
+        "EventName": "MSC_ETM_EXTOUT2",
+        "BriefDescription": "ETM extout bit 2"
+    },
+    {
+        "PublicDescription": "ETM extout bit 3",
+        "EventCode": "0x144",
+        "EventName": "MSC_ETM_EXTOUT3",
+        "BriefDescription": "ETM extout bit 3"
+    },
+    {
+        "PublicDescription": "Bus request sn",
+        "EventCode": "0x156",
+        "EventName": "L2C_SNOOP",
+        "BriefDescription": "Bus request sn"
+    },
+    {
+        "PublicDescription": "L2 TXDAT LCRD blocked",
+        "EventCode": "0x169",
+        "EventName": "L2C_DAT_CRD_STALL",
+        "BriefDescription": "L2 TXDAT LCRD blocked"
+    },
+    {
+        "PublicDescription": "L2 TXRSP LCRD blocked",
+        "EventCode": "0x16a",
+        "EventName": "L2C_RSP_CRD_STALL",
+        "BriefDescription": "L2 TXRSP LCRD blocked"
+    },
+    {
+        "PublicDescription": "L2 TXREQ LCRD blocked",
+        "EventCode": "0x16b",
+        "EventName": "L2C_REQ_CRD_STALL",
+        "BriefDescription": "L2 TXREQ LCRD blocked"
+    },
+    {
+        "PublicDescription": "Early mispredict",
+        "EventCode": "0xD100",
+        "EventName": "ICF_EARLY_MIS_PRED",
+        "BriefDescription": "Early mispredict"
+    },
+    {
+        "PublicDescription": "FEQ full cycles",
+        "EventCode": "0xD101",
+        "EventName": "ICF_FEQ_FULL",
+        "BriefDescription": "FEQ full cycles"
+    },
+    {
+        "PublicDescription": "Instruction FIFO Full",
+        "EventCode": "0xD102",
+        "EventName": "ICF_INST_FIFO_FULL",
+        "BriefDescription": "Instruction FIFO Full"
+    },
+    {
+        "PublicDescription": "L1I TLB miss",
+        "EventCode": "0xD103",
+        "EventName": "L1I_TLB_MISS",
+        "BriefDescription": "L1I TLB miss"
+    },
+    {
+        "PublicDescription": "ICF sent 0 instructions to IDR this cycle",
+        "EventCode": "0xD104",
+        "EventName": "ICF_STALL",
+        "BriefDescription": "ICF sent 0 instructions to IDR this cycle"
+    },
+    {
+        "PublicDescription": "PC FIFO Full",
+        "EventCode": "0xD105",
+        "EventName": "ICF_PC_FIFO_FULL",
+        "BriefDescription": "PC FIFO Full"
+    },
+    {
+        "PublicDescription": "Stall due to BOB ID",
+        "EventCode": "0xD200",
+        "EventName": "IDR_STALL_BOB_ID",
+        "BriefDescription": "Stall due to BOB ID"
+    },
+    {
+        "PublicDescription": "Dispatch stall due to LOB entries",
+        "EventCode": "0xD201",
+        "EventName": "IDR_STALL_LOB_ID",
+        "BriefDescription": "Dispatch stall due to LOB entries"
+    },
+    {
+        "PublicDescription": "Dispatch stall due to SOB entries",
+        "EventCode": "0xD202",
+        "EventName": "IDR_STALL_SOB_ID",
+        "BriefDescription": "Dispatch stall due to SOB entries"
+    },
+    {
+        "PublicDescription": "Dispatch stall due to IXU scheduler entries",
+        "EventCode": "0xD203",
+        "EventName": "IDR_STALL_IXU_SCHED",
+        "BriefDescription": "Dispatch stall due to IXU scheduler entries"
+    },
+    {
+        "PublicDescription": "Dispatch stall due to FSU scheduler entries",
+        "EventCode": "0xD204",
+        "EventName": "IDR_STALL_FSU_SCHED",
+        "BriefDescription": "Dispatch stall due to FSU scheduler entries"
+    },
+    {
+        "PublicDescription": "Dispatch stall due to ROB entries",
+        "EventCode": "0xD205",
+        "EventName": "IDR_STALL_ROB_ID",
+        "BriefDescription": "Dispatch stall due to ROB entries"
+    },
+    {
+        "PublicDescription": "Dispatch stall due to flush",
+        "EventCode": "0xD206",
+        "EventName": "IDR_STALL_FLUSH",
+        "BriefDescription": "Dispatch stall due to flush"
+    },
+    {
+        "PublicDescription": "Dispatch stall due to WFI",
+        "EventCode": "0xD207",
+        "EventName": "IDR_STALL_WFI",
+        "BriefDescription": "Dispatch stall due to WFI"
+    },
+    {
+        "PublicDescription": "Number of SWOB drains triggered by timeout",
+        "EventCode": "0xD208",
+        "EventName": "IDR_STALL_SWOB_TIMEOUT",
+        "BriefDescription": "Number of SWOB drains triggered by timeout"
+    },
+    {
+        "PublicDescription": "Number of SWOB drains triggered by system register or special-purpose register read-after-write or specific special-purpose register writes that cause SWOB drain",
+        "EventCode": "0xD209",
+        "EventName": "IDR_STALL_SWOB_RAW",
+        "BriefDescription": "Number of SWOB drains triggered by system register or special-purpose register read-after-write or specific special-purpose register writes that cause SWOB drain"
+    },
+    {
+        "PublicDescription": "Number of SWOB drains triggered by system register write when SWOB full",
+        "EventCode": "0xD20A",
+        "EventName": "IDR_STALL_SWOB_FULL",
+        "BriefDescription": "Number of SWOB drains triggered by system register write when SWOB full"
+    },
+    {
+        "PublicDescription": "Dispatch stall due to L1 instruction cache miss",
+        "EventCode": "0xD20B",
+        "EventName": "STALL_FRONTEND_CACHE",
+        "BriefDescription": "Dispatch stall due to L1 instruction cache miss"
+    },
+    {
+        "PublicDescription": "Dispatch stall due to L1 data cache miss",
+        "EventCode": "0xD20D",
+        "EventName": "STALL_BACKEND_CACHE",
+        "BriefDescription": "Dispatch stall due to L1 data cache miss"
+    },
+    {
+        "PublicDescription": "Dispatch stall due to lack of any core resource",
+        "EventCode": "0xD20F",
+        "EventName": "STALL_BACKEND_RESOURCE",
+        "BriefDescription": "Dispatch stall due to lack of any core resource"
+    },
+    {
+        "PublicDescription": "Instructions issued by the scheduler",
+        "EventCode": "0xD300",
+        "EventName": "IXU_NUM_UOPS_ISSUED",
+        "BriefDescription": "Instructions issued by the scheduler"
+    },
+    {
+        "PublicDescription": "Any uop issued was canceled for any reason",
+        "EventCode": "0xD301",
+        "EventName": "IXU_ISSUE_CANCEL",
+        "BriefDescription": "Any uop issued was canceled for any reason"
+    },
+    {
+        "PublicDescription": "A load wakeup to the scheduler has been canceled",
+        "EventCode": "0xD302",
+        "EventName": "IXU_LOAD_CANCEL",
+        "BriefDescription": "A load wakeup to the scheduler has been canceled"
+    },
+    {
+        "PublicDescription": "The scheduler had to cancel one slow Uop due to resource conflict",
+        "EventCode": "0xD303",
+        "EventName": "IXU_SLOW_CANCEL",
+        "BriefDescription": "The scheduler had to cancel one slow Uop due to resource conflict"
+    },
+    {
+        "PublicDescription": "Uops issued by the scheduler on IXA",
+        "EventCode": "0xD304",
+        "EventName": "IXU_IXA_ISSUED",
+        "BriefDescription": "Uops issued by the scheduler on IXA"
+    },
+    {
+        "PublicDescription": "Uops issued by the scheduler on IXA Par 0",
+        "EventCode": "0xD305",
+        "EventName": "IXU_IXA_PAR0_ISSUED",
+        "BriefDescription": "Uops issued by the scheduler on IXA Par 0"
+    },
+    {
+        "PublicDescription": "Uops issued by the scheduler on IXA Par 1",
+        "EventCode": "0xD306",
+        "EventName": "IXU_IXA_PAR1_ISSUED",
+        "BriefDescription": "Uops issued by the scheduler on IXA Par 1"
+    },
+    {
+        "PublicDescription": "Uops issued by the scheduler on IXB",
+        "EventCode": "0xD307",
+        "EventName": "IXU_IXB_ISSUED",
+        "BriefDescription": "Uops issued by the scheduler on IXB"
+    },
+    {
+        "PublicDescription": "Uops issued by the scheduler on IXB Par 0",
+        "EventCode": "0xD308",
+        "EventName": "IXU_IXB_PAR0_ISSUED",
+        "BriefDescription": "Uops issued by the scheduler on IXB Par 0"
+    },
+    {
+        "PublicDescription": "Uops issued by the scheduler on IXB Par 1",
+        "EventCode": "0xD309",
+        "EventName": "IXU_IXB_PAR1_ISSUED",
+        "BriefDescription": "Uops issued by the scheduler on IXB Par 1"
+    },
+    {
+        "PublicDescription": "Uops issued by the scheduler on IXC",
+        "EventCode": "0xD30A",
+        "EventName": "IXU_IXC_ISSUED",
+        "BriefDescription": "Uops issued by the scheduler on IXC"
+    },
+    {
+        "PublicDescription": "Uops issued by the scheduler on IXC Par 0",
+        "EventCode": "0xD30B",
+        "EventName": "IXU_IXC_PAR0_ISSUED",
+        "BriefDescription": "Uops issued by the scheduler on IXC Par 0"
+    },
+    {
+        "PublicDescription": "Uops issued by the scheduler on IXC Par 1",
+        "EventCode": "0xD30C",
+        "EventName": "IXU_IXC_PAR1_ISSUED",
+        "BriefDescription": "Uops issued by the scheduler on IXC Par 1"
+    },
+    {
+        "PublicDescription": "Uops issued by the scheduler on IXD",
+        "EventCode": "0xD30D",
+        "EventName": "IXU_IXD_ISSUED",
+        "BriefDescription": "Uops issued by the scheduler on IXD"
+    },
+    {
+        "PublicDescription": "Uops issued by the scheduler on IXD Par 0",
+        "EventCode": "0xD30E",
+        "EventName": "IXU_IXD_PAR0_ISSUED",
+        "BriefDescription": "Uops issued by the scheduler on IXD Par 0"
+    },
+    {
+        "PublicDescription": "Uops issued by the scheduler on IXD Par 1",
+        "EventCode": "0xD30F",
+        "EventName": "IXU_IXD_PAR1_ISSUED",
+        "BriefDescription": "Uops issued by the scheduler on IXD Par 1"
+    },
+    {
+        "PublicDescription": "Uops issued by the FSU scheduler",
+        "EventCode": "0xD400",
+        "EventName": "FSU_ISSUED",
+        "BriefDescription": "Uops issued by the FSU scheduler"
+    },
+    {
+        "PublicDescription": "Uops issued by the scheduler on FSX",
+        "EventCode": "0xD401",
+        "EventName": "FSU_FSX_ISSUED",
+        "BriefDescription": "Uops issued by the scheduler on FSX"
+    },
+    {
+        "PublicDescription": "Uops issued by the scheduler on FSY",
+        "EventCode": "0xD402",
+        "EventName": "FSU_FSY_ISSUED",
+        "BriefDescription": "Uops issued by the scheduler on FSY"
+    },
+    {
+        "PublicDescription": "Uops issued by the scheduler on FSZ",
+        "EventCode": "0xD403",
+        "EventName": "FSU_FSZ_ISSUED",
+        "BriefDescription": "Uops issued by the scheduler on FSZ"
+    },
+    {
+        "PublicDescription": "Uops canceled (load cancels)",
+        "EventCode": "0xD404",
+        "EventName": "FSU_CANCEL",
+        "BriefDescription": "Uops canceled (load cancels)"
+    },
+    {
+        "PublicDescription": "Count scheduler stalls due to divide/sqrt",
+        "EventCode": "0xD405",
+        "EventName": "FSU_DIV_SQRT_STALL",
+        "BriefDescription": "Count scheduler stalls due to divide/sqrt"
+    },
+    {
+        "PublicDescription": "Number of SWOB drains",
+        "EventCode": "0xD500",
+        "EventName": "GPC_SWOB_DRAIN",
+        "BriefDescription": "Number of SWOB drains"
+    },
+    {
+        "PublicDescription": "GPC detected a Breakpoint instruction match",
+        "EventCode": "0xD501",
+        "EventName": "BREAKPOINT_MATCH",
+        "BriefDescription": "GPC detected a Breakpoint instruction match"
+    },
+    {
+        "PublicDescription": "Core progress monitor triggered",
+        "EventCode": "0xd502",
+        "EventName": "GPC_CPM_TRIGGER",
+        "BriefDescription": "Core progress monitor triggered"
+    },
+    {
+        "PublicDescription": "Fill buffer full",
+        "EventCode": "0xD601",
+        "EventName": "OFB_FULL",
+        "BriefDescription": "Fill buffer full"
+    },
+    {
+        "PublicDescription": "Load satisified from store forwarded data",
+        "EventCode": "0xD605",
+        "EventName": "LD_FROM_ST_FWD",
+        "BriefDescription": "Load satisified from store forwarded data"
+    },
+    {
+        "PublicDescription": "Store retirement pipe stall",
+        "EventCode": "0xD60C",
+        "EventName": "LSU_ST_RETIRE_STALL",
+        "BriefDescription": "Store retirement pipe stall"
+    },
+    {
+        "PublicDescription": "LSU detected a Watchpoint data match",
+        "EventCode": "0xD60D",
+        "EventName": "WATCHPOINT_MATCH",
+        "BriefDescription": "LSU detected a Watchpoint data match"
+    },
+    {
+        "PublicDescription": "Counts cycles that MSC is telling GPC to stall commit due to ETM ISTALL feature",
+        "EventCode": "0xda00",
+        "EventName": "MSC_ETM_COMMIT_STALL",
+        "BriefDescription": "Counts cycles that MSC is telling GPC to stall commit due to ETM ISTALL feature"
+    }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/exception.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/exception.json
new file mode 100644 (file)
index 0000000..bd59ba7
--- /dev/null
@@ -0,0 +1,47 @@
+[
+    {
+        "ArchStdEvent": "EXC_UNDEF"
+    },
+    {
+        "ArchStdEvent": "EXC_SVC"
+    },
+    {
+        "ArchStdEvent": "EXC_PABORT"
+    },
+    {
+        "ArchStdEvent": "EXC_DABORT"
+    },
+    {
+        "ArchStdEvent": "EXC_IRQ"
+    },
+    {
+        "ArchStdEvent": "EXC_FIQ"
+    },
+    {
+        "ArchStdEvent": "EXC_HVC"
+    },
+    {
+        "ArchStdEvent": "EXC_TRAP_PABORT"
+    },
+    {
+        "ArchStdEvent": "EXC_TRAP_DABORT"
+    },
+    {
+        "ArchStdEvent": "EXC_TRAP_OTHER"
+    },
+    {
+        "ArchStdEvent": "EXC_TRAP_IRQ"
+    },
+    {
+        "ArchStdEvent": "EXC_TRAP_FIQ"
+    },
+    {
+        "ArchStdEvent": "EXC_TAKEN"
+    },
+    {
+        "ArchStdEvent": "EXC_RETURN"
+    },
+    {
+        "ArchStdEvent": "EXC_SMC"
+    }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/instruction.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/instruction.json
new file mode 100644 (file)
index 0000000..a6a20f5
--- /dev/null
@@ -0,0 +1,128 @@
+[
+    {
+        "ArchStdEvent": "SW_INCR"
+    },
+    {
+        "ArchStdEvent": "ST_RETIRED"
+    },
+    {
+        "ArchStdEvent": "LD_SPEC"
+    },
+    {
+        "ArchStdEvent": "ST_SPEC"
+    },
+    {
+        "ArchStdEvent": "LDST_SPEC"
+    },
+    {
+        "ArchStdEvent": "DP_SPEC"
+    },
+    {
+        "ArchStdEvent": "ASE_SPEC"
+    },
+    {
+        "ArchStdEvent": "VFP_SPEC"
+    },
+    {
+        "ArchStdEvent": "PC_WRITE_SPEC"
+    },
+    {
+        "ArchStdEvent": "BR_IMMED_RETIRED"
+    },
+    {
+        "ArchStdEvent": "BR_RETURN_RETIRED"
+    },
+    {
+        "ArchStdEvent": "CRYPTO_SPEC"
+    },
+    {
+        "ArchStdEvent": "ISB_SPEC"
+    },
+    {
+        "ArchStdEvent": "DSB_SPEC"
+    },
+    {
+        "ArchStdEvent": "DMB_SPEC"
+    },
+    {
+        "ArchStdEvent": "RC_LD_SPEC"
+    },
+    {
+        "ArchStdEvent": "RC_ST_SPEC"
+    },
+    {
+        "ArchStdEvent": "INST_RETIRED"
+    },
+    {
+        "ArchStdEvent": "CID_WRITE_RETIRED"
+    },
+    {
+        "ArchStdEvent": "PC_WRITE_RETIRED"
+    },
+    {
+        "ArchStdEvent": "INST_SPEC"
+    },
+    {
+        "ArchStdEvent": "TTBR_WRITE_RETIRED"
+    },
+    {
+        "ArchStdEvent": "BR_RETIRED"
+    },
+    {
+        "ArchStdEvent": "BR_MIS_PRED_RETIRED"
+    },
+    {
+        "ArchStdEvent": "OP_RETIRED"
+    },
+    {
+        "ArchStdEvent": "OP_SPEC"
+    },
+    {
+        "PublicDescription": "Operation speculatively executed - ASE Scalar",
+        "EventCode": "0xd210",
+        "EventName": "ASE_SCALAR_SPEC",
+        "BriefDescription": "Operation speculatively executed - ASE Scalar"
+    },
+    {
+        "PublicDescription": "Operation speculatively executed - ASE Vector",
+        "EventCode": "0xd211",
+        "EventName": "ASE_VECTOR_SPEC",
+        "BriefDescription": "Operation speculatively executed - ASE Vector"
+    },
+    {
+        "PublicDescription": "Barrier speculatively executed, CSDB",
+        "EventCode": "0x7f",
+        "EventName": "CSDB_SPEC",
+        "BriefDescription": "Barrier speculatively executed, CSDB"
+    },
+    {
+        "PublicDescription": "Prefetch sent to L2.",
+        "EventCode": "0xd106",
+        "EventName": "ICF_PREFETCH_DISPATCH",
+        "BriefDescription": "Prefetch sent to L2."
+    },
+    {
+        "PublicDescription": "Prefetch response received but was dropped since we don't support inflight upgrades.",
+        "EventCode": "0xd107",
+        "EventName": "ICF_PREFETCH_DROPPED_NO_UPGRADE",
+        "BriefDescription": "Prefetch response received but was dropped since we don't support inflight upgrades."
+    },
+    {
+        "PublicDescription": "Prefetch request missed TLB.",
+        "EventCode": "0xd108",
+        "EventName": "ICF_PREFETCH_DROPPED_TLB_MISS",
+        "BriefDescription": "Prefetch request missed TLB."
+    },
+    {
+        "PublicDescription": "Prefetch request dropped since duplicate was found in TLB.",
+        "EventCode": "0xd109",
+        "EventName": "ICF_PREFETCH_DROPPED_DUPLICATE",
+        "BriefDescription": "Prefetch request dropped since duplicate was found in TLB."
+    },
+    {
+        "PublicDescription": "Prefetch request dropped since it was found in cache.",
+        "EventCode": "0xd10a",
+        "EventName": "ICF_PREFETCH_DROPPED_CACHE_HIT",
+        "BriefDescription": "Prefetch request dropped since it was found in cache."
+    }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/intrinsic.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/intrinsic.json
new file mode 100644 (file)
index 0000000..7ecffb9
--- /dev/null
@@ -0,0 +1,14 @@
+[
+    {
+        "ArchStdEvent": "LDREX_SPEC"
+    },
+    {
+        "ArchStdEvent": "STREX_PASS_SPEC"
+    },
+    {
+        "ArchStdEvent": "STREX_FAIL_SPEC"
+    },
+    {
+        "ArchStdEvent": "STREX_SPEC"
+    }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/memory.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/memory.json
new file mode 100644 (file)
index 0000000..a211d94
--- /dev/null
@@ -0,0 +1,41 @@
+[
+    {
+        "ArchStdEvent": "LD_RETIRED"
+    },
+    {
+        "ArchStdEvent": "MEM_ACCESS_RD"
+    },
+    {
+        "ArchStdEvent": "MEM_ACCESS_WR"
+    },
+    {
+        "ArchStdEvent": "LD_ALIGN_LAT"
+    },
+    {
+        "ArchStdEvent": "ST_ALIGN_LAT"
+    },
+    {
+        "ArchStdEvent": "MEM_ACCESS"
+    },
+    {
+        "ArchStdEvent": "MEMORY_ERROR"
+    },
+    {
+        "ArchStdEvent": "LDST_ALIGN_LAT"
+    },
+    {
+        "ArchStdEvent": "MEM_ACCESS_CHECKED"
+    },
+    {
+        "ArchStdEvent": "MEM_ACCESS_CHECKED_RD"
+    },
+    {
+        "ArchStdEvent": "MEM_ACCESS_CHECKED_WR"
+    },
+    {
+        "PublicDescription": "Flushes due to memory hazards",
+        "EventCode": "0x121",
+        "EventName": "BPU_FLUSH_MEM_FAULT",
+        "BriefDescription": "Flushes due to memory hazards"
+    }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/metrics.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/metrics.json
new file mode 100644 (file)
index 0000000..c5d1d22
--- /dev/null
@@ -0,0 +1,442 @@
+[
+    {
+        "MetricName": "branch_miss_pred_rate",
+        "MetricExpr": "BR_MIS_PRED / BR_PRED",
+        "BriefDescription": "Branch predictor misprediction rate. May not count branches that are never resolved because they are in the misprediction shadow of an earlier branch",
+        "MetricGroup": "branch",
+        "ScaleUnit": "100%"
+    },
+    {
+        "MetricName": "bus_utilization",
+        "MetricExpr": "BUS_ACCESS / (BUS_CYCLES * 1)",
+        "BriefDescription": "Core-to-uncore bus utilization",
+        "MetricGroup": "Bus",
+        "ScaleUnit": "100percent of bus cycles"
+    },
+    {
+        "MetricName": "l1d_cache_miss_ratio",
+        "MetricExpr": "L1D_CACHE_REFILL / L1D_CACHE",
+        "BriefDescription": "This metric measures the ratio of level 1 data cache accesses missed to the total number of level 1 data cache accesses. This gives an indication of the effectiveness of the level 1 data cache.",
+        "MetricGroup": "Miss_Ratio;L1D_Cache_Effectiveness",
+        "ScaleUnit": "1per cache access"
+    },
+    {
+        "MetricName": "l1i_cache_miss_ratio",
+        "MetricExpr": "L1I_CACHE_REFILL / L1I_CACHE",
+        "BriefDescription": "This metric measures the ratio of level 1 instruction cache accesses missed to the total number of level 1 instruction cache accesses. This gives an indication of the effectiveness of the level 1 instruction cache.",
+        "MetricGroup": "Miss_Ratio;L1I_Cache_Effectiveness",
+        "ScaleUnit": "1per cache access"
+    },
+    {
+        "MetricName": "Miss_Ratio;l1d_cache_read_miss",
+        "MetricExpr": "L1D_CACHE_LMISS_RD / L1D_CACHE_RD",
+        "BriefDescription": "L1D cache read miss rate",
+        "MetricGroup": "Cache",
+        "ScaleUnit": "1per cache read access"
+    },
+    {
+        "MetricName": "l2_cache_miss_ratio",
+        "MetricExpr": "L2D_CACHE_REFILL / L2D_CACHE",
+        "BriefDescription": "This metric measures the ratio of level 2 cache accesses missed to the total number of level 2 cache accesses. This gives an indication of the effectiveness of the level 2 cache, which is a unified cache that stores both data and instruction. Note that cache accesses in this cache are either data memory access or instruction fetch as this is a unified cache.",
+        "MetricGroup": "Miss_Ratio;L2_Cache_Effectiveness",
+        "ScaleUnit": "1per cache access"
+    },
+    {
+        "MetricName": "l1i_cache_read_miss_rate",
+        "MetricExpr": "L1I_CACHE_LMISS / L1I_CACHE",
+        "BriefDescription": "L1I cache read miss rate",
+        "MetricGroup": "Cache",
+        "ScaleUnit": "1per cache access"
+    },
+    {
+        "MetricName": "l2d_cache_read_miss_rate",
+        "MetricExpr": "L2D_CACHE_LMISS_RD / L2D_CACHE_RD",
+        "BriefDescription": "L2 cache read miss rate",
+        "MetricGroup": "Cache",
+        "ScaleUnit": "1per cache read access"
+    },
+    {
+        "MetricName": "l1d_cache_miss_mpki",
+        "MetricExpr": "(L1D_CACHE_LMISS_RD * 1e3) / INST_RETIRED",
+        "BriefDescription": "Misses per thousand instructions (data)",
+        "MetricGroup": "Cache",
+        "ScaleUnit": "1MPKI"
+    },
+    {
+        "MetricName": "l1i_cache_miss_mpki",
+        "MetricExpr": "(L1I_CACHE_LMISS * 1e3) / INST_RETIRED",
+        "BriefDescription": "Misses per thousand instructions (instruction)",
+        "MetricGroup": "Cache",
+        "ScaleUnit": "1MPKI"
+    },
+    {
+        "MetricName": "simd_percentage",
+        "MetricExpr": "ASE_SPEC / INST_SPEC",
+        "BriefDescription": "This metric measures advanced SIMD operations as a percentage of total operations speculatively executed.",
+        "MetricGroup": "Operation_Mix",
+        "ScaleUnit": "100percent of operations"
+    },
+    {
+        "MetricName": "crypto_percentage",
+        "MetricExpr": "CRYPTO_SPEC / INST_SPEC",
+        "BriefDescription": "This metric measures crypto operations as a percentage of operations speculatively executed.",
+        "MetricGroup": "Operation_Mix",
+        "ScaleUnit": "100percent of operations"
+    },
+    {
+        "MetricName": "gflops",
+        "MetricExpr": "VFP_SPEC / (duration_time * 1e9)",
+        "BriefDescription": "Giga-floating point operations per second",
+        "MetricGroup": "InstructionMix"
+    },
+    {
+        "MetricName": "integer_dp_percentage",
+        "MetricExpr": "DP_SPEC / INST_SPEC",
+        "BriefDescription": "This metric measures scalar integer operations as a percentage of operations speculatively executed.",
+        "MetricGroup": "Operation_Mix",
+        "ScaleUnit": "100percent of operations"
+    },
+    {
+        "MetricName": "ipc",
+        "MetricExpr": "INST_RETIRED / CPU_CYCLES",
+        "BriefDescription": "This metric measures the number of instructions retired per cycle.",
+        "MetricGroup": "General",
+        "ScaleUnit": "1per cycle"
+    },
+    {
+        "MetricName": "load_percentage",
+        "MetricExpr": "LD_SPEC / INST_SPEC",
+        "BriefDescription": "This metric measures load operations as a percentage of operations speculatively executed.",
+        "MetricGroup": "Operation_Mix",
+        "ScaleUnit": "100percent of operations"
+    },
+    {
+        "MetricName": "load_store_spec_rate",
+        "MetricExpr": "LDST_SPEC / INST_SPEC",
+        "BriefDescription": "The rate of load or store instructions speculatively executed to overall instructions speclatively executed",
+        "MetricGroup": "Operation_Mix",
+        "ScaleUnit": "100percent of operations"
+    },
+    {
+        "MetricName": "retired_mips",
+        "MetricExpr": "INST_RETIRED / (duration_time * 1e6)",
+        "BriefDescription": "Millions of instructions per second",
+        "MetricGroup": "InstructionMix"
+    },
+    {
+        "MetricName": "spec_utilization_mips",
+        "MetricExpr": "INST_SPEC / (duration_time * 1e6)",
+        "BriefDescription": "Millions of instructions per second",
+        "MetricGroup": "PEutilization"
+    },
+    {
+        "MetricName": "pc_write_spec_rate",
+        "MetricExpr": "PC_WRITE_SPEC / INST_SPEC",
+        "BriefDescription": "The rate of software change of the PC speculatively executed to overall instructions speclatively executed",
+        "MetricGroup": "Operation_Mix",
+        "ScaleUnit": "100percent of operations"
+    },
+    {
+        "MetricName": "store_percentage",
+        "MetricExpr": "ST_SPEC / INST_SPEC",
+        "BriefDescription": "This metric measures store operations as a percentage of operations speculatively executed.",
+        "MetricGroup": "Operation_Mix",
+        "ScaleUnit": "100percent of operations"
+    },
+    {
+        "MetricName": "scalar_fp_percentage",
+        "MetricExpr": "VFP_SPEC / INST_SPEC",
+        "BriefDescription": "This metric measures scalar floating point operations as a percentage of operations speculatively executed.",
+        "MetricGroup": "Operation_Mix",
+        "ScaleUnit": "100percent of operations"
+    },
+    {
+        "MetricName": "retired_rate",
+        "MetricExpr": "OP_RETIRED / OP_SPEC",
+        "BriefDescription": "Of all the micro-operations issued, what percentage are retired(committed)",
+        "MetricGroup": "General",
+        "ScaleUnit": "100%"
+    },
+    {
+        "MetricName": "wasted",
+        "MetricExpr": "1 - (OP_RETIRED / (CPU_CYCLES * #slots))",
+        "BriefDescription": "Of all the micro-operations issued, what proportion are lost",
+        "MetricGroup": "General",
+        "ScaleUnit": "100%"
+    },
+    {
+        "MetricName": "wasted_rate",
+        "MetricExpr": "1 - OP_RETIRED / OP_SPEC",
+        "BriefDescription": "Of all the micro-operations issued, what percentage are not retired(committed)",
+        "MetricGroup": "General",
+        "ScaleUnit": "100%"
+    },
+    {
+        "MetricName": "stall_backend_cache_rate",
+        "MetricExpr": "STALL_BACKEND_CACHE / CPU_CYCLES",
+        "BriefDescription": "Proportion of cycles stalled and no operations issued to backend and cache miss",
+        "MetricGroup": "Stall",
+        "ScaleUnit": "100percent of cycles"
+    },
+    {
+        "MetricName": "stall_backend_resource_rate",
+        "MetricExpr": "STALL_BACKEND_RESOURCE / CPU_CYCLES",
+        "BriefDescription": "Proportion of cycles stalled and no operations issued to backend and resource full",
+        "MetricGroup": "Stall",
+        "ScaleUnit": "100percent of cycles"
+    },
+    {
+        "MetricName": "stall_backend_tlb_rate",
+        "MetricExpr": "STALL_BACKEND_TLB / CPU_CYCLES",
+        "BriefDescription": "Proportion of cycles stalled and no operations issued to backend and TLB miss",
+        "MetricGroup": "Stall",
+        "ScaleUnit": "100percent of cycles"
+    },
+    {
+        "MetricName": "stall_frontend_cache_rate",
+        "MetricExpr": "STALL_FRONTEND_CACHE / CPU_CYCLES",
+        "BriefDescription": "Proportion of cycles stalled and no ops delivered from frontend and cache miss",
+        "MetricGroup": "Stall",
+        "ScaleUnit": "100percent of cycles"
+    },
+    {
+        "MetricName": "stall_frontend_tlb_rate",
+        "MetricExpr": "STALL_FRONTEND_TLB / CPU_CYCLES",
+        "BriefDescription": "Proportion of cycles stalled and no ops delivered from frontend and TLB miss",
+        "MetricGroup": "Stall",
+        "ScaleUnit": "100percent of cycles"
+    },
+    {
+        "MetricName": "dtlb_walk_ratio",
+        "MetricExpr": "DTLB_WALK / L1D_TLB",
+        "BriefDescription": "This metric measures the ratio of data TLB Walks to the total number of data TLB accesses. This gives an indication of the effectiveness of the data TLB accesses.",
+        "MetricGroup": "Miss_Ratio;DTLB_Effectiveness",
+        "ScaleUnit": "1per TLB access"
+    },
+    {
+        "MetricName": "itlb_walk_ratio",
+        "MetricExpr": "ITLB_WALK / L1I_TLB",
+        "BriefDescription": "This metric measures the ratio of instruction TLB Walks to the total number of instruction TLB accesses. This gives an indication of the effectiveness of the instruction TLB accesses.",
+        "MetricGroup": "Miss_Ratio;ITLB_Effectiveness",
+        "ScaleUnit": "1per TLB access"
+    },
+    {
+        "ArchStdEvent": "backend_bound"
+    },
+    {
+        "ArchStdEvent": "frontend_bound",
+        "MetricExpr": "100 - (retired_fraction + slots_lost_misspeculation_fraction + backend_bound)"
+    },
+    {
+        "MetricName": "slots_lost_misspeculation_fraction",
+        "MetricExpr": "(OP_SPEC - OP_RETIRED) / (CPU_CYCLES * #slots)",
+        "BriefDescription": "Fraction of slots lost due to misspeculation",
+        "DefaultMetricgroupName": "TopdownL1",
+        "MetricGroup": "Default;TopdownL1",
+        "ScaleUnit": "100percent of slots"
+    },
+    {
+        "MetricName": "retired_fraction",
+        "MetricExpr": "OP_RETIRED / (CPU_CYCLES * #slots)",
+        "BriefDescription": "Fraction of slots retiring, useful work",
+        "DefaultMetricgroupName": "TopdownL1",
+        "MetricGroup": "Default;TopdownL1",
+        "ScaleUnit": "100percent of slots"
+    },
+    {
+        "MetricName": "backend_core",
+        "MetricExpr": "(backend_bound / 100) - backend_memory",
+        "BriefDescription": "Fraction of slots the CPU was stalled due to backend non-memory subsystem issues",
+        "MetricGroup": "TopdownL2",
+        "ScaleUnit": "100%"
+    },
+    {
+        "MetricName": "backend_memory",
+        "MetricExpr": "(STALL_BACKEND_TLB + STALL_BACKEND_CACHE) / CPU_CYCLES",
+        "BriefDescription": "Fraction of slots the CPU was stalled due to backend memory subsystem issues (cache/tlb miss)",
+        "MetricGroup": "TopdownL2",
+        "ScaleUnit": "100%"
+    },
+    {
+        "MetricName": "branch_mispredict",
+        "MetricExpr": "(BR_MIS_PRED_RETIRED / GPC_FLUSH) * slots_lost_misspeculation_fraction",
+        "BriefDescription": "Fraction of slots lost due to branch misprediciton",
+        "MetricGroup": "TopdownL2",
+        "ScaleUnit": "1percent of slots"
+    },
+    {
+        "MetricName": "frontend_bandwidth",
+        "MetricExpr": "frontend_bound - frontend_latency",
+        "BriefDescription": "Fraction of slots the CPU did not dispatch at full bandwidth - able to dispatch partial slots only (1, 2, or 3 uops)",
+        "MetricGroup": "TopdownL2",
+        "ScaleUnit": "1percent of slots"
+    },
+    {
+        "MetricName": "frontend_latency",
+        "MetricExpr": "(STALL_FRONTEND - ((STALL_SLOT_FRONTEND - ((frontend_bound / 100) * CPU_CYCLES * #slots)) / #slots)) / CPU_CYCLES",
+        "BriefDescription": "Fraction of slots the CPU was stalled due to frontend latency issues (cache/tlb miss); nothing to dispatch",
+        "MetricGroup": "TopdownL2",
+        "ScaleUnit": "100percent of slots"
+    },
+    {
+        "MetricName": "other_miss_pred",
+        "MetricExpr": "slots_lost_misspeculation_fraction - branch_mispredict",
+        "BriefDescription": "Fraction of slots lost due to other/non-branch misprediction misspeculation",
+        "MetricGroup": "TopdownL2",
+        "ScaleUnit": "1percent of slots"
+    },
+    {
+        "MetricName": "pipe_utilization",
+        "MetricExpr": "100 * ((IXU_NUM_UOPS_ISSUED + FSU_ISSUED) / (CPU_CYCLES * 6))",
+        "BriefDescription": "Fraction of execute slots utilized",
+        "MetricGroup": "TopdownL2",
+        "ScaleUnit": "1percent of slots"
+    },
+    {
+        "MetricName": "d_cache_l2_miss_rate",
+        "MetricExpr": "STALL_BACKEND_MEM / CPU_CYCLES",
+        "BriefDescription": "Fraction of cycles the CPU was stalled due to data L2 cache miss",
+        "MetricGroup": "TopdownL3",
+        "ScaleUnit": "100percent of cycles"
+    },
+    {
+        "MetricName": "d_cache_miss_rate",
+        "MetricExpr": "STALL_BACKEND_CACHE / CPU_CYCLES",
+        "BriefDescription": "Fraction of cycles the CPU was stalled due to data cache miss",
+        "MetricGroup": "TopdownL3",
+        "ScaleUnit": "100percent of cycles"
+    },
+    {
+        "MetricName": "d_tlb_miss_rate",
+        "MetricExpr": "STALL_BACKEND_TLB / CPU_CYCLES",
+        "BriefDescription": "Fraction of cycles the CPU was stalled due to data TLB miss",
+        "MetricGroup": "TopdownL3",
+        "ScaleUnit": "100percent of cycles"
+    },
+    {
+        "MetricName": "fsu_pipe_utilization",
+        "MetricExpr": "FSU_ISSUED / (CPU_CYCLES * 2)",
+        "BriefDescription": "Fraction of FSU execute slots utilized",
+        "MetricGroup": "TopdownL3",
+        "ScaleUnit": "100percent of slots"
+    },
+    {
+        "MetricName": "i_cache_miss_rate",
+        "MetricExpr": "STALL_FRONTEND_CACHE / CPU_CYCLES",
+        "BriefDescription": "Fraction of cycles the CPU was stalled due to instruction cache miss",
+        "MetricGroup": "TopdownL3",
+        "ScaleUnit": "100percent of slots"
+    },
+    {
+        "MetricName": "i_tlb_miss_rate",
+        "MetricExpr": "STALL_FRONTEND_TLB / CPU_CYCLES",
+        "BriefDescription": "Fraction of cycles the CPU was stalled due to instruction TLB miss",
+        "MetricGroup": "TopdownL3",
+        "ScaleUnit": "100percent of slots"
+    },
+    {
+        "MetricName": "ixu_pipe_utilization",
+        "MetricExpr": "IXU_NUM_UOPS_ISSUED / (CPU_CYCLES * #slots)",
+        "BriefDescription": "Fraction of IXU execute slots utilized",
+        "MetricGroup": "TopdownL3",
+        "ScaleUnit": "100percent of slots"
+    },
+    {
+        "MetricName": "stall_recovery_rate",
+        "MetricExpr": "IDR_STALL_FLUSH / CPU_CYCLES",
+        "BriefDescription": "Fraction of cycles the CPU was stalled due to flush recovery",
+        "MetricGroup": "TopdownL3",
+        "ScaleUnit": "100percent of slots"
+    },
+    {
+        "MetricName": "stall_fsu_sched_rate",
+        "MetricExpr": "IDR_STALL_FSU_SCHED / CPU_CYCLES",
+        "BriefDescription": "Fraction of cycles the CPU was stalled and FSU was full",
+        "MetricGroup": "TopdownL4",
+        "ScaleUnit": "100percent of cycles"
+    },
+    {
+        "MetricName": "stall_ixu_sched_rate",
+        "MetricExpr": "IDR_STALL_IXU_SCHED / CPU_CYCLES",
+        "BriefDescription": "Fraction of cycles the CPU was stalled and IXU was full",
+        "MetricGroup": "TopdownL4",
+        "ScaleUnit": "100percent of cycles"
+    },
+    {
+        "MetricName": "stall_lob_id_rate",
+        "MetricExpr": "IDR_STALL_LOB_ID / CPU_CYCLES",
+        "BriefDescription": "Fraction of cycles the CPU was stalled and LOB was full",
+        "MetricGroup": "TopdownL4",
+        "ScaleUnit": "100percent of cycles"
+    },
+    {
+        "MetricName": "stall_rob_id_rate",
+        "MetricExpr": "IDR_STALL_ROB_ID / CPU_CYCLES",
+        "BriefDescription": "Fraction of cycles the CPU was stalled and ROB was full",
+        "MetricGroup": "TopdownL4",
+        "ScaleUnit": "100percent of cycles"
+    },
+    {
+        "MetricName": "stall_sob_id_rate",
+        "MetricExpr": "IDR_STALL_SOB_ID / CPU_CYCLES",
+        "BriefDescription": "Fraction of cycles the CPU was stalled and SOB was full",
+        "MetricGroup": "TopdownL4",
+        "ScaleUnit": "100percent of cycles"
+    },
+    {
+        "MetricName": "l1d_cache_access_demand",
+        "MetricExpr": "L1D_CACHE_RW / L1D_CACHE",
+        "BriefDescription": "L1D cache access - demand",
+        "MetricGroup": "Cache",
+        "ScaleUnit": "100percent of cache acceses"
+    },
+    {
+        "MetricName": "l1d_cache_access_prefetces",
+        "MetricExpr": "L1D_CACHE_PRFM / L1D_CACHE",
+        "BriefDescription": "L1D cache access - prefetch",
+        "MetricGroup": "Cache",
+        "ScaleUnit": "100percent of cache acceses"
+    },
+    {
+        "MetricName": "l1d_cache_demand_misses",
+        "MetricExpr": "L1D_CACHE_REFILL_RW / L1D_CACHE",
+        "BriefDescription": "L1D cache demand misses",
+        "MetricGroup": "Cache",
+        "ScaleUnit": "100percent of cache acceses"
+    },
+    {
+        "MetricName": "l1d_cache_demand_misses_read",
+        "MetricExpr": "L1D_CACHE_REFILL_RD / L1D_CACHE",
+        "BriefDescription": "L1D cache demand misses - read",
+        "MetricGroup": "Cache",
+        "ScaleUnit": "100percent of cache acceses"
+    },
+    {
+        "MetricName": "l1d_cache_demand_misses_write",
+        "MetricExpr": "L1D_CACHE_REFILL_WR / L1D_CACHE",
+        "BriefDescription": "L1D cache demand misses - write",
+        "MetricGroup": "Cache",
+        "ScaleUnit": "100percent of cache acceses"
+    },
+    {
+        "MetricName": "l1d_cache_prefetch_misses",
+        "MetricExpr": "L1D_CACHE_REFILL_PRFM / L1D_CACHE",
+        "BriefDescription": "L1D cache prefetch misses",
+        "MetricGroup": "Cache",
+        "ScaleUnit": "100percent of cache acceses"
+    },
+    {
+        "MetricName": "ase_scalar_mix",
+        "MetricExpr": "ASE_SCALAR_SPEC / OP_SPEC",
+        "BriefDescription": "Proportion of advanced SIMD data processing operations (excluding DP_SPEC/LD_SPEC) scalar operations",
+        "MetricGroup": "Instructions",
+        "ScaleUnit": "100percent of cache acceses"
+    },
+    {
+        "MetricName": "ase_vector_mix",
+        "MetricExpr": "ASE_VECTOR_SPEC / OP_SPEC",
+        "BriefDescription": "Proportion of advanced SIMD data processing operations (excluding DP_SPEC/LD_SPEC) vector operations",
+        "MetricGroup": "Instructions",
+        "ScaleUnit": "100percent of cache acceses"
+    }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/mmu.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/mmu.json
new file mode 100644 (file)
index 0000000..66d83b6
--- /dev/null
@@ -0,0 +1,170 @@
+[
+    {
+        "PublicDescription": "Level 2 data translation buffer allocation",
+        "EventCode": "0xD800",
+        "EventName": "MMU_D_OTB_ALLOC",
+        "BriefDescription": "Level 2 data translation buffer allocation"
+    },
+    {
+        "PublicDescription": "Data TLB translation cache hit on S1L2 walk cache entry",
+        "EventCode": "0xd801",
+        "EventName": "MMU_D_TRANS_CACHE_HIT_S1L2_WALK",
+        "BriefDescription": "Data TLB translation cache hit on S1L2 walk cache entry"
+    },
+    {
+        "PublicDescription": "Data TLB translation cache hit on S1L1 walk cache entry",
+        "EventCode": "0xd802",
+        "EventName": "MMU_D_TRANS_CACHE_HIT_S1L1_WALK",
+        "BriefDescription": "Data TLB translation cache hit on S1L1 walk cache entry"
+    },
+    {
+        "PublicDescription": "Data TLB translation cache hit on S1L0 walk cache entry",
+        "EventCode": "0xd803",
+        "EventName": "MMU_D_TRANS_CACHE_HIT_S1L0_WALK",
+        "BriefDescription": "Data TLB translation cache hit on S1L0 walk cache entry"
+    },
+    {
+        "PublicDescription": "Data TLB translation cache hit on S2L2 walk cache entry",
+        "EventCode": "0xd804",
+        "EventName": "MMU_D_TRANS_CACHE_HIT_S2L2_WALK",
+        "BriefDescription": "Data TLB translation cache hit on S2L2 walk cache entry"
+    },
+    {
+        "PublicDescrition": "Data TLB translation cache hit on S2L1 walk cache entry",
+        "EventCode": "0xd805",
+        "EventName": "MMU_D_TRANS_CACHE_HIT_S2L1_WALK",
+        "BriefDescription": "Data TLB translation cache hit on S2L1 walk cache entry"
+    },
+    {
+        "PublicDescrition": "Data TLB translation cache hit on S2L0 walk cache entry",
+        "EventCode": "0xd806",
+        "EventName": "MMU_D_TRANS_CACHE_HIT_S2L0_WALK",
+        "BriefDescription": "Data TLB translation cache hit on S2L0 walk cache entry"
+    },
+    {
+        "PublicDescrition": "Data-side S1 page walk cache lookup",
+        "EventCode": "0xd807",
+        "EventName": "MMU_D_S1_WALK_CACHE_LOOKUP",
+        "BriefDescription": "Data-side S1 page walk cache lookup"
+    },
+    {
+        "PublicDescrition": "Data-side S1 page walk cache refill",
+        "EventCode": "0xd808",
+        "EventName": "MMU_D_S1_WALK_CACHE_REFILL",
+        "BriefDescription": "Data-side S1 page walk cache refill"
+    },
+    {
+        "PublicDescrition": "Data-side S2 page walk cache lookup",
+        "EventCode": "0xd809",
+        "EventName": "MMU_D_S2_WALK_CACHE_LOOKUP",
+        "BriefDescription": "Data-side S2 page walk cache lookup"
+    },
+    {
+        "PublicDescrition": "Data-side S2 page walk cache refill",
+        "EventCode": "0xd80a",
+        "EventName": "MMU_D_S2_WALK_CACHE_REFILL",
+        "BriefDescription": "Data-side S2 page walk cache refill"
+    },
+    {
+        "PublicDescription": "Data-side S1 table walk fault",
+        "EventCode": "0xD80B",
+        "EventName": "MMU_D_S1_WALK_FAULT",
+        "BriefDescription": "Data-side S1 table walk fault"
+    },
+    {
+        "PublicDescription": "Data-side S2 table walk fault",
+        "EventCode": "0xD80C",
+        "EventName": "MMU_D_S2_WALK_FAULT",
+        "BriefDescription": "Data-side S2 table walk fault"
+    },
+    {
+        "PublicDescription": "Data-side table walk steps or descriptor fetches",
+        "EventCode": "0xD80D",
+        "EventName": "MMU_D_WALK_STEPS",
+        "BriefDescription": "Data-side table walk steps or descriptor fetches"
+    },
+    {
+        "PublicDescription": "Level 2 instruction translation buffer allocation",
+        "EventCode": "0xD900",
+        "EventName": "MMU_I_OTB_ALLOC",
+        "BriefDescription": "Level 2 instruction translation buffer allocation"
+    },
+    {
+        "PublicDescrition": "Instruction TLB translation cache hit on S1L2 walk cache entry",
+        "EventCode": "0xd901",
+        "EventName": "MMU_I_TRANS_CACHE_HIT_S1L2_WALK",
+        "BriefDescription": "Instruction TLB translation cache hit on S1L2 walk cache entry"
+    },
+    {
+        "PublicDescrition": "Instruction TLB translation cache hit on S1L1 walk cache entry",
+        "EventCode": "0xd902",
+        "EventName": "MMU_I_TRANS_CACHE_HIT_S1L1_WALK",
+        "BriefDescription": "Instruction TLB translation cache hit on S1L1 walk cache entry"
+    },
+    {
+        "PublicDescrition": "Instruction TLB translation cache hit on S1L0 walk cache entry",
+        "EventCode": "0xd903",
+        "EventName": "MMU_I_TRANS_CACHE_HIT_S1L0_WALK",
+        "BriefDescription": "Instruction TLB translation cache hit on S1L0 walk cache entry"
+    },
+    {
+        "PublicDescrition": "Instruction TLB translation cache hit on S2L2 walk cache entry",
+        "EventCode": "0xd904",
+        "EventName": "MMU_I_TRANS_CACHE_HIT_S2L2_WALK",
+        "BriefDescription": "Instruction TLB translation cache hit on S2L2 walk cache entry"
+    },
+    {
+        "PublicDescrition": "Instruction TLB translation cache hit on S2L1 walk cache entry",
+        "EventCode": "0xd905",
+        "EventName": "MMU_I_TRANS_CACHE_HIT_S2L1_WALK",
+        "BriefDescription": "Instruction TLB translation cache hit on S2L1 walk cache entry"
+    },
+    {
+        "PublicDescrition": "Instruction TLB translation cache hit on S2L0 walk cache entry",
+        "EventCode": "0xd906",
+        "EventName": "MMU_I_TRANS_CACHE_HIT_S2L0_WALK",
+        "BriefDescription": "Instruction TLB translation cache hit on S2L0 walk cache entry"
+    },
+    {
+        "PublicDescrition": "Instruction-side S1 page walk cache lookup",
+        "EventCode": "0xd907",
+        "EventName": "MMU_I_S1_WALK_CACHE_LOOKUP",
+        "BriefDescription": "Instruction-side S1 page walk cache lookup"
+    },
+    {
+        "PublicDescrition": "Instruction-side S1 page walk cache refill",
+        "EventCode": "0xd908",
+        "EventName": "MMU_I_S1_WALK_CACHE_REFILL",
+        "BriefDescription": "Instruction-side S1 page walk cache refill"
+    },
+    {
+        "PublicDescrition": "Instruction-side S2 page walk cache lookup",
+        "EventCode": "0xd909",
+        "EventName": "MMU_I_S2_WALK_CACHE_LOOKUP",
+        "BriefDescription": "Instruction-side S2 page walk cache lookup"
+    },
+    {
+        "PublicDescrition": "Instruction-side S2 page walk cache refill",
+        "EventCode": "0xd90a",
+        "EventName": "MMU_I_S2_WALK_CACHE_REFILL",
+        "BriefDescription": "Instruction-side S2 page walk cache refill"
+    },
+    {
+        "PublicDescription": "Instruction-side S1 table walk fault",
+        "EventCode": "0xD90B",
+        "EventName": "MMU_I_S1_WALK_FAULT",
+        "BriefDescription": "Instruction-side S1 table walk fault"
+    },
+    {
+        "PublicDescription": "Instruction-side S2 table walk fault",
+        "EventCode": "0xD90C",
+        "EventName": "MMU_I_S2_WALK_FAULT",
+        "BriefDescription": "Instruction-side S2 table walk fault"
+    },
+    {
+        "PublicDescription": "Instruction-side table walk steps or descriptor fetches",
+        "EventCode": "0xD90D",
+        "EventName": "MMU_I_WALK_STEPS",
+        "BriefDescription": "Instruction-side table walk steps or descriptor fetches"
+    }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/pipeline.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/pipeline.json
new file mode 100644 (file)
index 0000000..2fb2d1f
--- /dev/null
@@ -0,0 +1,41 @@
+[
+    {
+        "ArchStdEvent": "STALL_FRONTEND",
+        "Errata": "Errata AC03_CPU_29",
+        "BriefDescription": "Impacted by errata, use metrics instead -"
+    },
+    {
+        "ArchStdEvent": "STALL_BACKEND"
+    },
+    {
+        "ArchStdEvent": "STALL",
+        "Errata": "Errata AC03_CPU_29",
+        "BriefDescription": "Impacted by errata, use metrics instead -"
+    },
+    {
+        "ArchStdEvent": "STALL_SLOT_BACKEND"
+    },
+    {
+        "ArchStdEvent": "STALL_SLOT_FRONTEND",
+        "Errata": "Errata AC03_CPU_29",
+        "BriefDescription": "Impacted by errata, use metrics instead -"
+    },
+    {
+        "ArchStdEvent": "STALL_SLOT"
+    },
+    {
+        "ArchStdEvent": "STALL_BACKEND_MEM"
+    },
+    {
+        "PublicDescription": "Frontend stall cycles, TLB",
+        "EventCode": "0x815c",
+        "EventName": "STALL_FRONTEND_TLB",
+        "BriefDescription": "Frontend stall cycles, TLB"
+    },
+    {
+        "PublicDescription": "Backend stall cycles, TLB",
+        "EventCode": "0x8167",
+        "EventName": "STALL_BACKEND_TLB",
+        "BriefDescription": "Backend stall cycles, TLB"
+    }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/spe.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/spe.json
new file mode 100644 (file)
index 0000000..20f2165
--- /dev/null
@@ -0,0 +1,14 @@
+[
+    {
+        "ArchStdEvent": "SAMPLE_POP"
+    },
+    {
+        "ArchStdEvent": "SAMPLE_FEED"
+    },
+    {
+        "ArchStdEvent": "SAMPLE_FILTRATE"
+    },
+    {
+        "ArchStdEvent": "SAMPLE_COLLISION"
+    }
+]
index 428605c37d10bcb5aef284aaa5b8279085c0002d..5ec157c39f0df134412ec65c694cdc08297e78e5 100644 (file)
                "EventName": "hnf_qos_hh_retry",
                "EventidCode": "0xe",
                "NodeType": "0x5",
-               "BriefDescription": "Counts number of times a HighHigh priority request is protocolretried at the HNF.",
+               "BriefDescription": "Counts number of times a HighHigh priority request is protocolretried at the HN-F.",
                "Unit": "arm_cmn",
                "Compat": "(434|436|43c|43a).*"
        },
index 5b58db5032c11fad8c1dd7e7cf4fd8124772aafd..f4d1ca4d1493ddb68ef1cdb9097732d5163e9f4c 100644 (file)
@@ -42,3 +42,4 @@
 0x00000000480fd010,v1,hisilicon/hip08,core
 0x00000000500f0000,v1,ampere/emag,core
 0x00000000c00fac30,v1,ampere/ampereone,core
+0x00000000c00fac40,v1,ampere/ampereonex,core
index f4908af7ad66b48b4da10e1f47fe1b6ab23dc77f..599a588dbeb40070fec7e3dd2c721edcb73dbb09 100644 (file)
@@ -11,8 +11,7 @@
 #
 # Multiple PVRs could map to a single JSON file.
 #
-
-# Power8 entries
 0x004[bcd][[:xdigit:]]{4},1,power8,core
+0x0066[[:xdigit:]]{4},1,power8,core
 0x004e[[:xdigit:]]{4},1,power9,core
 0x0080[[:xdigit:]]{4},1,power10,core
index 6b0356f2d301384e9e29a29ef34f4526f4d01c14..0eeaaf1a95b863bac3772f4a47018f6574861316 100644 (file)
     "EventName": "PM_INST_FROM_L2MISS",
     "BriefDescription": "The processor's instruction cache was reloaded from a source beyond the local core's L2 due to a demand miss."
   },
+  {
+    "EventCode": "0x0003C0000000C040",
+    "EventName": "PM_DATA_FROM_L2MISS_DSRC",
+    "BriefDescription": "The processor's L1 data cache was reloaded from a source beyond the local core's L2 due to a demand miss."
+  },
   {
     "EventCode": "0x000380000010C040",
     "EventName": "PM_INST_FROM_L2MISS_ALL",
   },
   {
     "EventCode": "0x000780000000C040",
-    "EventName": "PM_INST_FROM_L3MISS",
+    "EventName": "PM_INST_FROM_L3MISS_DSRC",
     "BriefDescription": "The processor's instruction cache was reloaded from beyond the local core's L3 due to a demand miss."
   },
+  {
+    "EventCode": "0x0007C0000000C040",
+    "EventName": "PM_DATA_FROM_L3MISS_DSRC",
+    "BriefDescription": "The processor's L1 data cache was reloaded from beyond the local core's L3 due to a demand miss."
+  },
   {
     "EventCode": "0x000780000010C040",
     "EventName": "PM_INST_FROM_L3MISS_ALL",
   },
   {
     "EventCode": "0x0003C0000000C142",
-    "EventName": "PM_MRK_DATA_FROM_L2MISS",
+    "EventName": "PM_MRK_DATA_FROM_L2MISS_DSRC",
     "BriefDescription": "The processor's L1 data cache was reloaded from a source beyond the local core's L2 due to a demand miss for a marked instruction."
   },
   {
   },
   {
     "EventCode": "0x000780000000C142",
-    "EventName": "PM_MRK_INST_FROM_L3MISS",
+    "EventName": "PM_MRK_INST_FROM_L3MISS_DSRC",
     "BriefDescription": "The processor's instruction cache was reloaded from beyond the local core's L3 due to a demand miss for a marked instruction."
   },
   {
     "EventCode": "0x0007C0000000C142",
-    "EventName": "PM_MRK_DATA_FROM_L3MISS",
+    "EventName": "PM_MRK_DATA_FROM_L3MISS_DSRC",
     "BriefDescription": "The processor's L1 data cache was reloaded from beyond the local core's L3 due to a demand miss for a marked instruction."
   },
   {
index c61b3d6ef6166a906164c0fe2732199b96843621..cfc449b198105ebe5004c0565de85499ff14f319 100644 (file)
@@ -15,3 +15,5 @@
 #
 #MVENDORID-MARCHID-MIMPID,Version,Filename,EventType
 0x489-0x8000000000000007-0x[[:xdigit:]]+,v1,sifive/u74,core
+0x5b7-0x0-0x0,v1,thead/c900-legacy,core
+0x67e-0x80000000db0000[89]0-0x[[:xdigit:]]+,v1,starfive/dubhe-80,core
diff --git a/tools/perf/pmu-events/arch/riscv/starfive/dubhe-80/common.json b/tools/perf/pmu-events/arch/riscv/starfive/dubhe-80/common.json
new file mode 100644 (file)
index 0000000..fbffcac
--- /dev/null
@@ -0,0 +1,172 @@
+[
+  {
+    "EventName": "ACCESS_MMU_STLB",
+    "EventCode": "0x1",
+    "BriefDescription": "access MMU STLB"
+  },
+  {
+    "EventName": "MISS_MMU_STLB",
+    "EventCode": "0x2",
+    "BriefDescription": "miss MMU STLB"
+  },
+  {
+    "EventName": "ACCESS_MMU_PTE_C",
+    "EventCode": "0x3",
+    "BriefDescription": "access MMU PTE-Cache"
+  },
+  {
+    "EventName": "MISS_MMU_PTE_C",
+    "EventCode": "0x4",
+    "BriefDescription": "miss MMU PTE-Cache"
+  },
+  {
+    "EventName": "ROB_FLUSH",
+    "EventCode": "0x5",
+    "BriefDescription": "ROB flush (all kinds of exceptions)"
+  },
+  {
+    "EventName": "BTB_PREDICTION_MISS",
+    "EventCode": "0x6",
+    "BriefDescription": "BTB prediction miss"
+  },
+  {
+    "EventName": "ITLB_MISS",
+    "EventCode": "0x7",
+    "BriefDescription": "ITLB miss"
+  },
+  {
+    "EventName": "SYNC_DEL_FETCH_G",
+    "EventCode": "0x8",
+    "BriefDescription": "SYNC delivery a fetch-group"
+  },
+  {
+    "EventName": "ICACHE_MISS",
+    "EventCode": "0x9",
+    "BriefDescription": "ICache miss"
+  },
+  {
+    "EventName": "BPU_BR_RETIRE",
+    "EventCode": "0xA",
+    "BriefDescription": "condition branch instruction retire"
+  },
+  {
+    "EventName": "BPU_BR_MISS",
+    "EventCode": "0xB",
+    "BriefDescription": "condition branch instruction miss"
+  },
+  {
+    "EventName": "RET_INS_RETIRE",
+    "EventCode": "0xC",
+    "BriefDescription": "return instruction retire"
+  },
+  {
+    "EventName": "RET_INS_MISS",
+    "EventCode": "0xD",
+    "BriefDescription": "return instruction miss"
+  },
+  {
+    "EventName": "INDIRECT_JR_MISS",
+    "EventCode": "0xE",
+    "BriefDescription": "indirect JR instruction miss (inlcude without target)"
+  },
+  {
+    "EventName": "IBUF_VAL_ID_NORDY",
+    "EventCode": "0xF",
+    "BriefDescription": "IBUF valid while ID not ready"
+  },
+  {
+    "EventName": "IBUF_NOVAL_ID_RDY",
+    "EventCode": "0x10",
+    "BriefDescription": "IBUF not valid while ID ready"
+  },
+  {
+    "EventName": "REN_INT_PHY_REG_NORDY",
+    "EventCode": "0x11",
+    "BriefDescription": "REN integer physical register file is not ready"
+  },
+  {
+    "EventName": "REN_FP_PHY_REG_NORDY",
+    "EventCode": "0x12",
+    "BriefDescription": "REN floating point physical register file is not ready"
+  },
+  {
+    "EventName": "REN_CP_NORDY",
+    "EventCode": "0x13",
+    "BriefDescription": "REN checkpoint is not ready"
+  },
+  {
+    "EventName": "DEC_VAL_ROB_NORDY",
+    "EventCode": "0x14",
+    "BriefDescription": "DEC is valid and ROB is not ready"
+  },
+  {
+    "EventName": "OOD_FLUSH_LS_DEP",
+    "EventCode": "0x15",
+    "BriefDescription": "out of order flush due to load/store dependency"
+  },
+  {
+    "EventName": "BRU_RET_IJR_INS",
+    "EventCode": "0x16",
+    "BriefDescription": "BRU retire an IJR instruction"
+  },
+  {
+    "EventName": "ACCESS_DTLB",
+    "EventCode": "0x17",
+    "BriefDescription": "access DTLB"
+  },
+  {
+    "EventName": "MISS_DTLB",
+    "EventCode": "0x18",
+    "BriefDescription": "miss DTLB"
+  },
+  {
+    "EventName": "LOAD_INS_DCACHE",
+    "EventCode": "0x19",
+    "BriefDescription": "load instruction access DCache"
+  },
+  {
+    "EventName": "LOAD_INS_MISS_DCACHE",
+    "EventCode": "0x1A",
+    "BriefDescription": "load instruction miss DCache"
+  },
+  {
+    "EventName": "STORE_INS_DCACHE",
+    "EventCode": "0x1B",
+    "BriefDescription": "store/amo instruction access DCache"
+  },
+  {
+    "EventName": "STORE_INS_MISS_DCACHE",
+    "EventCode": "0x1C",
+    "BriefDescription": "store/amo instruction miss DCache"
+  },
+  {
+    "EventName": "LOAD_SCACHE",
+    "EventCode": "0x1D",
+    "BriefDescription": "load access SCache"
+  },
+  {
+    "EventName": "STORE_SCACHE",
+    "EventCode": "0x1E",
+    "BriefDescription": "store access SCache"
+  },
+  {
+    "EventName": "LOAD_MISS_SCACHE",
+    "EventCode": "0x1F",
+    "BriefDescription": "load miss SCache"
+  },
+  {
+    "EventName": "STORE_MISS_SCACHE",
+    "EventCode": "0x20",
+    "BriefDescription": "store miss SCache"
+  },
+  {
+    "EventName": "L2C_PF_REQ",
+    "EventCode": "0x21",
+    "BriefDescription": "L2C data-prefetcher request"
+  },
+  {
+    "EventName": "L2C_PF_HIT",
+    "EventCode": "0x22",
+    "BriefDescription": "L2C data-prefetcher hit"
+  }
+]
diff --git a/tools/perf/pmu-events/arch/riscv/starfive/dubhe-80/firmware.json b/tools/perf/pmu-events/arch/riscv/starfive/dubhe-80/firmware.json
new file mode 100644 (file)
index 0000000..9b4a032
--- /dev/null
@@ -0,0 +1,68 @@
+[
+  {
+    "ArchStdEvent": "FW_MISALIGNED_LOAD"
+  },
+  {
+    "ArchStdEvent": "FW_MISALIGNED_STORE"
+  },
+  {
+    "ArchStdEvent": "FW_ACCESS_LOAD"
+  },
+  {
+    "ArchStdEvent": "FW_ACCESS_STORE"
+  },
+  {
+    "ArchStdEvent": "FW_ILLEGAL_INSN"
+  },
+  {
+    "ArchStdEvent": "FW_SET_TIMER"
+  },
+  {
+    "ArchStdEvent": "FW_IPI_SENT"
+  },
+  {
+    "ArchStdEvent": "FW_IPI_RECEIVED"
+  },
+  {
+    "ArchStdEvent": "FW_FENCE_I_SENT"
+  },
+  {
+    "ArchStdEvent": "FW_FENCE_I_RECEIVED"
+  },
+  {
+    "ArchStdEvent": "FW_SFENCE_VMA_SENT"
+  },
+  {
+    "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED"
+  },
+  {
+    "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED"
+  },
+  {
+    "ArchStdEvent": "FW_SFENCE_VMA_ASID_RECEIVED"
+  },
+  {
+    "ArchStdEvent": "FW_HFENCE_GVMA_SENT"
+  },
+  {
+    "ArchStdEvent": "FW_HFENCE_GVMA_RECEIVED"
+  },
+  {
+    "ArchStdEvent": "FW_HFENCE_GVMA_VMID_SENT"
+  },
+  {
+    "ArchStdEvent": "FW_HFENCE_GVMA_VMID_RECEIVED"
+  },
+  {
+    "ArchStdEvent": "FW_HFENCE_VVMA_SENT"
+  },
+  {
+    "ArchStdEvent": "FW_HFENCE_VVMA_RECEIVED"
+  },
+  {
+    "ArchStdEvent": "FW_HFENCE_VVMA_ASID_SENT"
+  },
+  {
+    "ArchStdEvent": "FW_HFENCE_VVMA_ASID_RECEIVED"
+  }
+]
diff --git a/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/cache.json b/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/cache.json
new file mode 100644 (file)
index 0000000..2b14234
--- /dev/null
@@ -0,0 +1,67 @@
+[
+  {
+    "EventName": "L1_ICACHE_ACCESS",
+    "EventCode": "0x00000001",
+    "BriefDescription": "L1 instruction cache access"
+  },
+  {
+    "EventName": "L1_ICACHE_MISS",
+    "EventCode": "0x00000002",
+    "BriefDescription": "L1 instruction cache miss"
+  },
+  {
+    "EventName": "ITLB_MISS",
+    "EventCode": "0x00000003",
+    "BriefDescription": "I-UTLB miss"
+  },
+  {
+    "EventName": "DTLB_MISS",
+    "EventCode": "0x00000004",
+    "BriefDescription": "D-UTLB miss"
+  },
+  {
+    "EventName": "JTLB_MISS",
+    "EventCode": "0x00000005",
+    "BriefDescription": "JTLB miss"
+  },
+  {
+    "EventName": "L1_DCACHE_READ_ACCESS",
+    "EventCode": "0x0000000c",
+    "BriefDescription": "L1 data cache read access"
+  },
+  {
+    "EventName": "L1_DCACHE_READ_MISS",
+    "EventCode": "0x0000000d",
+    "BriefDescription": "L1 data cache read miss"
+  },
+  {
+    "EventName": "L1_DCACHE_WRITE_ACCESS",
+    "EventCode": "0x0000000e",
+    "BriefDescription": "L1 data cache write access"
+  },
+  {
+    "EventName": "L1_DCACHE_WRITE_MISS",
+    "EventCode": "0x0000000f",
+    "BriefDescription": "L1 data cache write miss"
+  },
+  {
+    "EventName": "LL_CACHE_READ_ACCESS",
+    "EventCode": "0x00000010",
+    "BriefDescription": "LL Cache read access"
+  },
+  {
+    "EventName": "LL_CACHE_READ_MISS",
+    "EventCode": "0x00000011",
+    "BriefDescription": "LL Cache read miss"
+  },
+  {
+    "EventName": "LL_CACHE_WRITE_ACCESS",
+    "EventCode": "0x00000012",
+    "BriefDescription": "LL Cache write access"
+  },
+  {
+    "EventName": "LL_CACHE_WRITE_MISS",
+    "EventCode": "0x00000013",
+    "BriefDescription": "LL Cache write miss"
+  }
+]
diff --git a/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/firmware.json b/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/firmware.json
new file mode 100644 (file)
index 0000000..9b4a032
--- /dev/null
@@ -0,0 +1,68 @@
+[
+  {
+    "ArchStdEvent": "FW_MISALIGNED_LOAD"
+  },
+  {
+    "ArchStdEvent": "FW_MISALIGNED_STORE"
+  },
+  {
+    "ArchStdEvent": "FW_ACCESS_LOAD"
+  },
+  {
+    "ArchStdEvent": "FW_ACCESS_STORE"
+  },
+  {
+    "ArchStdEvent": "FW_ILLEGAL_INSN"
+  },
+  {
+    "ArchStdEvent": "FW_SET_TIMER"
+  },
+  {
+    "ArchStdEvent": "FW_IPI_SENT"
+  },
+  {
+    "ArchStdEvent": "FW_IPI_RECEIVED"
+  },
+  {
+    "ArchStdEvent": "FW_FENCE_I_SENT"
+  },
+  {
+    "ArchStdEvent": "FW_FENCE_I_RECEIVED"
+  },
+  {
+    "ArchStdEvent": "FW_SFENCE_VMA_SENT"
+  },
+  {
+    "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED"
+  },
+  {
+    "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED"
+  },
+  {
+    "ArchStdEvent": "FW_SFENCE_VMA_ASID_RECEIVED"
+  },
+  {
+    "ArchStdEvent": "FW_HFENCE_GVMA_SENT"
+  },
+  {
+    "ArchStdEvent": "FW_HFENCE_GVMA_RECEIVED"
+  },
+  {
+    "ArchStdEvent": "FW_HFENCE_GVMA_VMID_SENT"
+  },
+  {
+    "ArchStdEvent": "FW_HFENCE_GVMA_VMID_RECEIVED"
+  },
+  {
+    "ArchStdEvent": "FW_HFENCE_VVMA_SENT"
+  },
+  {
+    "ArchStdEvent": "FW_HFENCE_VVMA_RECEIVED"
+  },
+  {
+    "ArchStdEvent": "FW_HFENCE_VVMA_ASID_SENT"
+  },
+  {
+    "ArchStdEvent": "FW_HFENCE_VVMA_ASID_RECEIVED"
+  }
+]
diff --git a/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/instruction.json b/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/instruction.json
new file mode 100644 (file)
index 0000000..c822b53
--- /dev/null
@@ -0,0 +1,72 @@
+[
+  {
+    "EventName": "INST_BRANCH_MISPREDICT",
+    "EventCode": "0x00000006",
+    "BriefDescription": "Mispredicted branch instructions"
+  },
+  {
+    "EventName": "INST_BRANCH",
+    "EventCode": "0x00000007",
+    "BriefDescription": "Retired branch instructions"
+  },
+  {
+    "EventName": "INST_JMP_MISPREDICT",
+    "EventCode": "0x00000008",
+    "BriefDescription": "Indirect branch mispredict"
+  },
+  {
+    "EventName": "INST_JMP",
+    "EventCode": "0x00000009",
+    "BriefDescription": "Retired jmp instructions"
+  },
+  {
+    "EventName": "INST_STORE",
+    "EventCode": "0x0000000b",
+    "BriefDescription": "Retired store instructions"
+  },
+  {
+    "EventName": "INST_ALU",
+    "EventCode": "0x0000001d",
+    "BriefDescription": "Retired ALU instructions"
+  },
+  {
+    "EventName": "INST_LDST",
+    "EventCode": "0x0000001e",
+    "BriefDescription": "Retired Load/Store instructions"
+  },
+  {
+    "EventName": "INST_VECTOR",
+    "EventCode": "0x0000001f",
+    "BriefDescription": "Retired Vector instructions"
+  },
+  {
+    "EventName": "INST_CSR",
+    "EventCode": "0x00000020",
+    "BriefDescription": "Retired CSR instructions"
+  },
+  {
+    "EventName": "INST_SYNC",
+    "EventCode": "0x00000021",
+    "BriefDescription": "Retired sync instructions (AMO/LR/SC instructions)"
+  },
+  {
+    "EventName": "INST_UNALIGNED_ACCESS",
+    "EventCode": "0x00000022",
+    "BriefDescription": "Retired Store/Load instructions with unaligned memory access"
+  },
+  {
+    "EventName": "INST_ECALL",
+    "EventCode": "0x00000025",
+    "BriefDescription": "Retired ecall instructions"
+  },
+  {
+    "EventName": "INST_LONG_JP",
+    "EventCode": "0x00000026",
+    "BriefDescription": "Retired long jump instructions"
+  },
+  {
+    "EventName": "INST_FP",
+    "EventCode": "0x0000002a",
+    "BriefDescription": "Retired FPU instructions"
+  }
+]
diff --git a/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/microarch.json b/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/microarch.json
new file mode 100644 (file)
index 0000000..0ab6f28
--- /dev/null
@@ -0,0 +1,80 @@
+[
+  {
+    "EventName": "LSU_SPEC_FAIL",
+    "EventCode": "0x0000000a",
+    "BriefDescription": "LSU speculation fail"
+  },
+  {
+    "EventName": "IDU_RF_PIPE_FAIL",
+    "EventCode": "0x00000014",
+    "BriefDescription": "Instruction decode unit launch pipeline failed in RF state"
+  },
+  {
+    "EventName": "IDU_RF_REG_FAIL",
+    "EventCode": "0x00000015",
+    "BriefDescription": "Instruction decode unit launch register file fail in RF state"
+  },
+  {
+    "EventName": "IDU_RF_INSTRUCTION",
+    "EventCode": "0x00000016",
+    "BriefDescription": "retired instruction count of Instruction decode unit in RF (Register File) stage"
+  },
+  {
+    "EventName": "LSU_4K_STALL",
+    "EventCode": "0x00000017",
+    "BriefDescription": "LSU stall times for long distance data access (Over 4K)",
+    "PublicDescription": "This stall occurs when translate virtual address with page offset over 4k"
+  },
+  {
+    "EventName": "LSU_OTHER_STALL",
+    "EventCode": "0x00000018",
+    "BriefDescription": "LSU stall times for other reasons (except the 4k stall)"
+  },
+  {
+    "EventName": "LSU_SQ_OTHER_DIS",
+    "EventCode": "0x00000019",
+    "BriefDescription": "LSU store queue discard others"
+  },
+  {
+    "EventName": "LSU_SQ_DATA_DISCARD",
+    "EventCode": "0x0000001a",
+    "BriefDescription": "LSU store queue discard data (uops)"
+  },
+  {
+    "EventName": "BRANCH_DIRECTION_MISPREDICTION",
+    "EventCode": "0x0000001b",
+    "BriefDescription": "Branch misprediction in BTB"
+  },
+  {
+    "EventName": "BRANCH_DIRECTION_PREDICTION",
+    "EventCode": "0x0000001c",
+    "BriefDescription": "All branch prediction in BTB",
+    "PublicDescription": "This event including both successful prediction and failed prediction in BTB"
+  },
+  {
+    "EventName": "INTERRUPT_ACK_COUNT",
+    "EventCode": "0x00000023",
+    "BriefDescription": "acknowledged interrupt count"
+  },
+  {
+    "EventName": "INTERRUPT_OFF_CYCLE",
+    "EventCode": "0x00000024",
+    "BriefDescription": "PLIC arbitration time when the interrupt is not responded",
+    "PublicDescription": "The arbitration time is recorded while meeting any of the following:\n- CPU is M-mode and MIE == 0\n- CPU is S-mode and delegation and SIE == 0\n"
+  },
+  {
+    "EventName": "IFU_STALLED_CYCLE",
+    "EventCode": "0x00000027",
+    "BriefDescription": "Number of stall cycles of the instruction fetch unit (IFU)."
+  },
+  {
+    "EventName": "IDU_STALLED_CYCLE",
+    "EventCode": "0x00000028",
+    "BriefDescription": "hpcp_backend_stall Number of stall cycles of the instruction decoding unit (IDU) and next-level pipeline unit."
+  },
+  {
+    "EventName": "SYNC_STALL",
+    "EventCode": "0x00000029",
+    "BriefDescription": "Sync instruction stall cycle fence/fence.i/sync/sfence"
+  }
+]
index 3388b58b8f1a687d9afb622c6e732c7c532612f0..bbfa3883e53384f563427e1b7567e679e1d1465f 100644 (file)
         "MetricName": "C9_Pkg_Residency",
         "ScaleUnit": "100%"
     },
-    {
-        "BriefDescription": "Uncore frequency per die [GHZ]",
-        "MetricExpr": "tma_info_system_socket_clks / #num_dies / duration_time / 1e9",
-        "MetricGroup": "SoC",
-        "MetricName": "UNCORE_FREQ"
-    },
     {
         "BriefDescription": "Percentage of cycles spent in System Management Interrupts.",
         "MetricExpr": "((msr@aperf@ - cycles) / msr@aperf@ if msr@smi@ > 0 else 0)",
     },
     {
         "BriefDescription": "Counts the number of issue slots  that were not consumed by the backend due to certain allocation restrictions.",
-        "MetricExpr": "TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS / tma_info_core_slots",
+        "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS@ / tma_info_core_slots",
         "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
         "MetricName": "tma_alloc_restriction",
         "MetricThreshold": "tma_alloc_restriction > 0.1",
     {
         "BriefDescription": "Counts the total number of issue slots  that were not consumed by the backend due to backend stalls",
         "DefaultMetricgroupName": "TopdownL1",
-        "MetricExpr": "TOPDOWN_BE_BOUND.ALL / tma_info_core_slots",
+        "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.ALL@ / tma_info_core_slots",
         "MetricGroup": "Default;TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound",
         "MetricThreshold": "tma_backend_bound > 0.1",
     },
     {
         "BriefDescription": "Counts the number of issue slots  that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend",
-        "MetricExpr": "TOPDOWN_FE_BOUND.BRANCH_DETECT / tma_info_core_slots",
+        "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.BRANCH_DETECT@ / tma_info_core_slots",
         "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_latency_group",
         "MetricName": "tma_branch_detect",
         "MetricThreshold": "tma_branch_detect > 0.05",
     },
     {
         "BriefDescription": "Counts the number of issue slots  that were not consumed by the backend due to branch mispredicts.",
-        "MetricExpr": "TOPDOWN_BAD_SPECULATION.MISPREDICT / tma_info_core_slots",
+        "MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.MISPREDICT@ / tma_info_core_slots",
         "MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
         "MetricName": "tma_branch_mispredicts",
         "MetricThreshold": "tma_branch_mispredicts > 0.05",
     },
     {
         "BriefDescription": "Counts the number of issue slots  that were not delivered by the frontend due to BTCLEARS, which occurs when the Branch Target Buffer (BTB) predicts a taken branch.",
-        "MetricExpr": "TOPDOWN_FE_BOUND.BRANCH_RESTEER / tma_info_core_slots",
+        "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.BRANCH_RESTEER@ / tma_info_core_slots",
         "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_latency_group",
         "MetricName": "tma_branch_resteer",
         "MetricThreshold": "tma_branch_resteer > 0.05",
     },
     {
         "BriefDescription": "Counts the number of issue slots  that were not delivered by the frontend due to the microcode sequencer (MS).",
-        "MetricExpr": "TOPDOWN_FE_BOUND.CISC / tma_info_core_slots",
+        "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.CISC@ / tma_info_core_slots",
         "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
         "MetricName": "tma_cisc",
         "MetricThreshold": "tma_cisc > 0.05",
     },
     {
         "BriefDescription": "Counts the number of issue slots  that were not delivered by the frontend due to decode stalls.",
-        "MetricExpr": "TOPDOWN_FE_BOUND.DECODE / tma_info_core_slots",
+        "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.DECODE@ / tma_info_core_slots",
         "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
         "MetricName": "tma_decode",
         "MetricThreshold": "tma_decode > 0.05",
     },
     {
         "BriefDescription": "Counts the number of cycles the core is stalled due to a demand load miss which hit in DRAM or MMIO (Non-DRAM).",
-        "MetricConstraint": "NO_GROUP_EVENTS",
         "MetricExpr": "cpu_atom@MEM_BOUND_STALLS.LOAD_DRAM_HIT@ / tma_info_core_clks - max((cpu_atom@MEM_BOUND_STALLS.LOAD@ - cpu_atom@LD_HEAD.L1_MISS_AT_RET@) / tma_info_core_clks, 0) * cpu_atom@MEM_BOUND_STALLS.LOAD_DRAM_HIT@ / cpu_atom@MEM_BOUND_STALLS.LOAD@",
         "MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
         "MetricName": "tma_dram_bound",
     },
     {
         "BriefDescription": "Counts the number of issue slots  that were not consumed by the backend due to a machine clear classified as a fast nuke due to memory ordering, memory disambiguation and memory renaming.",
-        "MetricExpr": "TOPDOWN_BAD_SPECULATION.FASTNUKE / tma_info_core_slots",
+        "MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.FASTNUKE@ / tma_info_core_slots",
         "MetricGroup": "TopdownL3;tma_L3_group;tma_machine_clears_group",
         "MetricName": "tma_fast_nuke",
         "MetricThreshold": "tma_fast_nuke > 0.05",
     },
     {
         "BriefDescription": "Counts the number of issue slots  that were not delivered by the frontend due to frontend bandwidth restrictions due to decode, predecode, cisc, and other limitations.",
-        "MetricExpr": "TOPDOWN_FE_BOUND.FRONTEND_BANDWIDTH / tma_info_core_slots",
+        "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.FRONTEND_BANDWIDTH@ / tma_info_core_slots",
         "MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_bandwidth",
         "MetricThreshold": "tma_fetch_bandwidth > 0.1",
     },
     {
         "BriefDescription": "Counts the number of issue slots  that were not delivered by the frontend due to frontend bandwidth restrictions due to decode, predecode, cisc, and other limitations.",
-        "MetricExpr": "TOPDOWN_FE_BOUND.FRONTEND_LATENCY / tma_info_core_slots",
+        "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.FRONTEND_LATENCY@ / tma_info_core_slots",
         "MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_latency",
         "MetricThreshold": "tma_fetch_latency > 0.15",
     },
     {
         "BriefDescription": "Counts the number of floating point divide operations per uop.",
-        "MetricExpr": "UOPS_RETIRED.FPDIV / tma_info_core_slots",
+        "MetricExpr": "cpu_atom@UOPS_RETIRED.FPDIV@ / tma_info_core_slots",
         "MetricGroup": "TopdownL3;tma_L3_group;tma_base_group",
         "MetricName": "tma_fpdiv_uops",
         "MetricThreshold": "tma_fpdiv_uops > 0.2",
     {
         "BriefDescription": "Counts the number of issue slots  that were not consumed by the backend due to frontend stalls.",
         "DefaultMetricgroupName": "TopdownL1",
-        "MetricExpr": "TOPDOWN_FE_BOUND.ALL / tma_info_core_slots",
+        "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.ALL@ / tma_info_core_slots",
         "MetricGroup": "Default;TopdownL1;tma_L1_group",
         "MetricName": "tma_frontend_bound",
         "MetricThreshold": "tma_frontend_bound > 0.2",
     },
     {
         "BriefDescription": "Counts the number of issue slots  that were not delivered by the frontend due to instruction cache misses.",
-        "MetricExpr": "TOPDOWN_FE_BOUND.ICACHE / tma_info_core_slots",
+        "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.ICACHE@ / tma_info_core_slots",
         "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_latency_group",
         "MetricName": "tma_icache_misses",
         "MetricThreshold": "tma_icache_misses > 0.05",
     },
     {
         "BriefDescription": "Instructions Per Cycle",
-        "MetricExpr": "INST_RETIRED.ANY / tma_info_core_clks",
+        "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / tma_info_core_clks",
         "MetricName": "tma_info_core_ipc",
         "Unit": "cpu_atom"
     },
     },
     {
         "BriefDescription": "Uops Per Instruction",
-        "MetricExpr": "UOPS_RETIRED.ALL / INST_RETIRED.ANY",
+        "MetricExpr": "cpu_atom@UOPS_RETIRED.ALL@ / INST_RETIRED.ANY",
         "MetricName": "tma_info_core_upi",
         "Unit": "cpu_atom"
     },
     },
     {
         "BriefDescription": "Ratio of all branches which mispredict",
-        "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.ALL_BRANCHES",
+        "MetricExpr": "cpu_atom@BR_MISP_RETIRED.ALL_BRANCHES@ / BR_INST_RETIRED.ALL_BRANCHES",
         "MetricName": "tma_info_inst_mix_branch_mispredict_ratio",
         "Unit": "cpu_atom"
     },
     {
         "BriefDescription": "Ratio between Mispredicted branches and unknown branches",
-        "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / BACLEARS.ANY",
+        "MetricExpr": "cpu_atom@BR_MISP_RETIRED.ALL_BRANCHES@ / BACLEARS.ANY",
         "MetricName": "tma_info_inst_mix_branch_mispredict_to_unknown_branch_ratio",
         "Unit": "cpu_atom"
     },
     },
     {
         "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
-        "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+        "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / BR_INST_RETIRED.ALL_BRANCHES",
         "MetricName": "tma_info_inst_mix_ipbranch",
         "Unit": "cpu_atom"
     },
     {
         "BriefDescription": "Instruction per (near) call (lower number means higher occurrence rate)",
-        "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.CALL",
+        "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / BR_INST_RETIRED.CALL",
         "MetricName": "tma_info_inst_mix_ipcall",
         "Unit": "cpu_atom"
     },
     {
         "BriefDescription": "Instructions per Far Branch",
-        "MetricExpr": "INST_RETIRED.ANY / (cpu_atom@BR_INST_RETIRED.FAR_BRANCH@ / 2)",
+        "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / (cpu_atom@BR_INST_RETIRED.FAR_BRANCH@ / 2)",
         "MetricName": "tma_info_inst_mix_ipfarbranch",
         "Unit": "cpu_atom"
     },
     {
         "BriefDescription": "Instructions per Load",
-        "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
+        "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / MEM_UOPS_RETIRED.ALL_LOADS",
         "MetricName": "tma_info_inst_mix_ipload",
         "Unit": "cpu_atom"
     },
     {
         "BriefDescription": "Instructions per retired conditional Branch Misprediction where the branch was not taken",
-        "MetricExpr": "INST_RETIRED.ANY / (cpu_atom@BR_MISP_RETIRED.COND@ - cpu_atom@BR_MISP_RETIRED.COND_TAKEN@)",
+        "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / (cpu_atom@BR_MISP_RETIRED.COND@ - cpu_atom@BR_MISP_RETIRED.COND_TAKEN@)",
         "MetricName": "tma_info_inst_mix_ipmisp_cond_ntaken",
         "Unit": "cpu_atom"
     },
     {
         "BriefDescription": "Instructions per retired conditional Branch Misprediction where the branch was taken",
-        "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_TAKEN",
+        "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / BR_MISP_RETIRED.COND_TAKEN",
         "MetricName": "tma_info_inst_mix_ipmisp_cond_taken",
         "Unit": "cpu_atom"
     },
     {
         "BriefDescription": "Instructions per retired indirect call or jump Branch Misprediction",
-        "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.INDIRECT",
+        "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / BR_MISP_RETIRED.INDIRECT",
         "MetricName": "tma_info_inst_mix_ipmisp_indirect",
         "Unit": "cpu_atom"
     },
     {
         "BriefDescription": "Instructions per retired return Branch Misprediction",
-        "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.RETURN",
+        "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / BR_MISP_RETIRED.RETURN",
         "MetricName": "tma_info_inst_mix_ipmisp_ret",
         "Unit": "cpu_atom"
     },
     {
         "BriefDescription": "Instructions per retired Branch Misprediction",
-        "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+        "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / BR_MISP_RETIRED.ALL_BRANCHES",
         "MetricName": "tma_info_inst_mix_ipmispredict",
         "Unit": "cpu_atom"
     },
     {
         "BriefDescription": "Instructions per Store",
-        "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
+        "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / MEM_UOPS_RETIRED.ALL_STORES",
         "MetricName": "tma_info_inst_mix_ipstore",
         "Unit": "cpu_atom"
     },
     },
     {
         "BriefDescription": "Cycle cost per DRAM hit",
-        "MetricExpr": "MEM_BOUND_STALLS.LOAD_DRAM_HIT / MEM_LOAD_UOPS_RETIRED.DRAM_HIT",
+        "MetricExpr": "cpu_atom@MEM_BOUND_STALLS.LOAD_DRAM_HIT@ / MEM_LOAD_UOPS_RETIRED.DRAM_HIT",
         "MetricName": "tma_info_memory_cycles_per_demand_load_dram_hit",
         "Unit": "cpu_atom"
     },
     {
         "BriefDescription": "Cycle cost per L2 hit",
-        "MetricExpr": "MEM_BOUND_STALLS.LOAD_L2_HIT / MEM_LOAD_UOPS_RETIRED.L2_HIT",
+        "MetricExpr": "cpu_atom@MEM_BOUND_STALLS.LOAD_L2_HIT@ / MEM_LOAD_UOPS_RETIRED.L2_HIT",
         "MetricName": "tma_info_memory_cycles_per_demand_load_l2_hit",
         "Unit": "cpu_atom"
     },
     {
         "BriefDescription": "Cycle cost per LLC hit",
-        "MetricExpr": "MEM_BOUND_STALLS.LOAD_LLC_HIT / MEM_LOAD_UOPS_RETIRED.L3_HIT",
+        "MetricExpr": "cpu_atom@MEM_BOUND_STALLS.LOAD_LLC_HIT@ / MEM_LOAD_UOPS_RETIRED.L3_HIT",
         "MetricName": "tma_info_memory_cycles_per_demand_load_l3_hit",
         "Unit": "cpu_atom"
     },
     },
     {
         "BriefDescription": "Average CPU Utilization",
-        "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+        "MetricExpr": "cpu_atom@CPU_CLK_UNHALTED.REF_TSC@ / TSC",
         "MetricName": "tma_info_system_cpu_utilization",
         "Unit": "cpu_atom"
     },
     },
     {
         "BriefDescription": "Counts the number of issue slots  that were not delivered by the frontend due to Instruction Table Lookaside Buffer (ITLB) misses.",
-        "MetricExpr": "TOPDOWN_FE_BOUND.ITLB / tma_info_core_slots",
+        "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.ITLB@ / tma_info_core_slots",
         "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_latency_group",
         "MetricName": "tma_itlb_misses",
         "MetricThreshold": "tma_itlb_misses > 0.05",
     },
     {
         "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a load block.",
-        "MetricExpr": "LD_HEAD.L1_BOUND_AT_RET / tma_info_core_clks",
+        "MetricExpr": "cpu_atom@LD_HEAD.L1_BOUND_AT_RET@ / tma_info_core_clks",
         "MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
         "MetricName": "tma_l1_bound",
         "MetricThreshold": "tma_l1_bound > 0.1",
     },
     {
         "BriefDescription": "Counts the number of cycles a core is stalled due to a demand load which hit in the L2 Cache.",
-        "MetricConstraint": "NO_GROUP_EVENTS",
         "MetricExpr": "cpu_atom@MEM_BOUND_STALLS.LOAD_L2_HIT@ / tma_info_core_clks - max((cpu_atom@MEM_BOUND_STALLS.LOAD@ - cpu_atom@LD_HEAD.L1_MISS_AT_RET@) / tma_info_core_clks, 0) * cpu_atom@MEM_BOUND_STALLS.LOAD_L2_HIT@ / cpu_atom@MEM_BOUND_STALLS.LOAD@",
         "MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
         "MetricName": "tma_l2_bound",
     },
     {
         "BriefDescription": "Counts the number of cycles a core is stalled due to a demand load which hit in the Last Level Cache (LLC) or other core with HITE/F/M.",
-        "MetricConstraint": "NO_GROUP_EVENTS_NMI",
         "MetricExpr": "cpu_atom@MEM_BOUND_STALLS.LOAD_LLC_HIT@ / tma_info_core_clks - max((cpu_atom@MEM_BOUND_STALLS.LOAD@ - cpu_atom@LD_HEAD.L1_MISS_AT_RET@) / tma_info_core_clks, 0) * cpu_atom@MEM_BOUND_STALLS.LOAD_LLC_HIT@ / cpu_atom@MEM_BOUND_STALLS.LOAD@",
         "MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
         "MetricName": "tma_l3_bound",
     },
     {
         "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a machine clear (nuke) of any kind including memory ordering and memory disambiguation.",
-        "MetricExpr": "TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS / tma_info_core_slots",
+        "MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS@ / tma_info_core_slots",
         "MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
         "MetricName": "tma_machine_clears",
         "MetricThreshold": "tma_machine_clears > 0.05",
     },
     {
         "BriefDescription": "Counts the number of issue slots  that were not consumed by the backend due to memory reservation stalls in which a scheduler is not able to accept uops.",
-        "MetricExpr": "TOPDOWN_BE_BOUND.MEM_SCHEDULER / tma_info_core_slots",
+        "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.MEM_SCHEDULER@ / tma_info_core_slots",
         "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
         "MetricName": "tma_mem_scheduler",
         "MetricThreshold": "tma_mem_scheduler > 0.1",
     },
     {
         "BriefDescription": "Counts the number of cycles the core is stalled due to stores or loads.",
-        "MetricExpr": "min(cpu_atom@TOPDOWN_BE_BOUND.ALL@ / tma_info_core_slots, cpu_atom@LD_HEAD.ANY_AT_RET@ / tma_info_core_clks + tma_store_bound)",
+        "MetricExpr": "min(tma_backend_bound, cpu_atom@LD_HEAD.ANY_AT_RET@ / tma_info_core_clks + tma_store_bound)",
         "MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_memory_bound",
         "MetricThreshold": "tma_memory_bound > 0.2",
     },
     {
         "BriefDescription": "Counts the number of uops that are from the complex flows issued by the micro-sequencer (MS)",
-        "MetricExpr": "UOPS_RETIRED.MS / tma_info_core_slots",
+        "MetricExpr": "cpu_atom@UOPS_RETIRED.MS@ / tma_info_core_slots",
         "MetricGroup": "TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_ms_uops",
         "MetricThreshold": "tma_ms_uops > 0.05",
     },
     {
         "BriefDescription": "Counts the number of issue slots  that were not consumed by the backend due to IEC or FPC RAT stalls, which can be due to FIQ or IEC reservation stalls in which the integer, floating point or SIMD scheduler is not able to accept uops.",
-        "MetricExpr": "TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER / tma_info_core_slots",
+        "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER@ / tma_info_core_slots",
         "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
         "MetricName": "tma_non_mem_scheduler",
         "MetricThreshold": "tma_non_mem_scheduler > 0.1",
     },
     {
         "BriefDescription": "Counts the number of issue slots  that were not consumed by the backend due to a machine clear (slow nuke).",
-        "MetricExpr": "TOPDOWN_BAD_SPECULATION.NUKE / tma_info_core_slots",
+        "MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.NUKE@ / tma_info_core_slots",
         "MetricGroup": "TopdownL3;tma_L3_group;tma_machine_clears_group",
         "MetricName": "tma_nuke",
         "MetricThreshold": "tma_nuke > 0.05",
     },
     {
         "BriefDescription": "Counts the number of issue slots  that were not delivered by the frontend due to other common frontend stalls not categorized.",
-        "MetricExpr": "TOPDOWN_FE_BOUND.OTHER / tma_info_core_slots",
+        "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.OTHER@ / tma_info_core_slots",
         "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
         "MetricName": "tma_other_fb",
         "MetricThreshold": "tma_other_fb > 0.05",
     },
     {
         "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a number of other load blocks.",
-        "MetricExpr": "LD_HEAD.OTHER_AT_RET / tma_info_core_clks",
+        "MetricExpr": "cpu_atom@LD_HEAD.OTHER_AT_RET@ / tma_info_core_clks",
         "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
         "MetricName": "tma_other_l1",
         "MetricThreshold": "tma_other_l1 > 0.05",
     },
     {
         "BriefDescription": "Counts the number of issue slots  that were not delivered by the frontend due to wrong predecodes.",
-        "MetricExpr": "TOPDOWN_FE_BOUND.PREDECODE / tma_info_core_slots",
+        "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.PREDECODE@ / tma_info_core_slots",
         "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
         "MetricName": "tma_predecode",
         "MetricThreshold": "tma_predecode > 0.05",
     },
     {
         "BriefDescription": "Counts the number of issue slots  that were not consumed by the backend due to the physical register file unable to accept an entry (marble stalls).",
-        "MetricExpr": "TOPDOWN_BE_BOUND.REGISTER / tma_info_core_slots",
+        "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.REGISTER@ / tma_info_core_slots",
         "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
         "MetricName": "tma_register",
         "MetricThreshold": "tma_register > 0.1",
     },
     {
         "BriefDescription": "Counts the number of issue slots  that were not consumed by the backend due to the reorder buffer being full (ROB stalls).",
-        "MetricExpr": "TOPDOWN_BE_BOUND.REORDER_BUFFER / tma_info_core_slots",
+        "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.REORDER_BUFFER@ / tma_info_core_slots",
         "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
         "MetricName": "tma_reorder_buffer",
         "MetricThreshold": "tma_reorder_buffer > 0.1",
     {
         "BriefDescription": "Counts the number of issue slots  that result in retirement slots.",
         "DefaultMetricgroupName": "TopdownL1",
-        "MetricExpr": "TOPDOWN_RETIRING.ALL / tma_info_core_slots",
+        "MetricExpr": "cpu_atom@TOPDOWN_RETIRING.ALL@ / tma_info_core_slots",
         "MetricGroup": "Default;TopdownL1;tma_L1_group",
         "MetricName": "tma_retiring",
         "MetricThreshold": "tma_retiring > 0.75",
     },
     {
         "BriefDescription": "Counts the number of issue slots  that were not consumed by the backend due to scoreboards from the instruction queue (IQ), jump execution unit (JEU), or microcode sequencer (MS).",
-        "MetricExpr": "TOPDOWN_BE_BOUND.SERIALIZATION / tma_info_core_slots",
+        "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.SERIALIZATION@ / tma_info_core_slots",
         "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
         "MetricName": "tma_serialization",
         "MetricThreshold": "tma_serialization > 0.1",
     },
     {
         "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a first level TLB miss.",
-        "MetricExpr": "LD_HEAD.DTLB_MISS_AT_RET / tma_info_core_clks",
+        "MetricExpr": "cpu_atom@LD_HEAD.DTLB_MISS_AT_RET@ / tma_info_core_clks",
         "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
         "MetricName": "tma_stlb_hit",
         "MetricThreshold": "tma_stlb_hit > 0.05",
     },
     {
         "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a second level TLB miss requiring a page walk.",
-        "MetricExpr": "LD_HEAD.PGWALK_AT_RET / tma_info_core_clks",
+        "MetricExpr": "cpu_atom@LD_HEAD.PGWALK_AT_RET@ / tma_info_core_clks",
         "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
         "MetricName": "tma_stlb_miss",
         "MetricThreshold": "tma_stlb_miss > 0.05",
     },
     {
         "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a store forward block.",
-        "MetricConstraint": "NO_GROUP_EVENTS_NMI",
-        "MetricExpr": "LD_HEAD.ST_ADDR_AT_RET / tma_info_core_clks",
+        "MetricExpr": "cpu_atom@LD_HEAD.ST_ADDR_AT_RET@ / tma_info_core_clks",
         "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
         "MetricName": "tma_store_fwd_blk",
         "MetricThreshold": "tma_store_fwd_blk > 0.05",
         "ScaleUnit": "100%",
         "Unit": "cpu_atom"
     },
+    {
+        "BriefDescription": "Uncore frequency per die [GHZ]",
+        "MetricExpr": "tma_info_system_socket_clks / #num_dies / duration_time / 1e9",
+        "MetricGroup": "SoC",
+        "MetricName": "UNCORE_FREQ",
+        "Unit": "cpu_core"
+    },
     {
         "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.",
         "MetricExpr": "(cpu_core@UOPS_DISPATCHED.PORT_0@ + cpu_core@UOPS_DISPATCHED.PORT_1@ + cpu_core@UOPS_DISPATCHED.PORT_5_11@ + cpu_core@UOPS_DISPATCHED.PORT_6@) / (5 * tma_info_core_core_clks)",
     },
     {
         "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers",
-        "MetricExpr": "INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks + tma_unknown_branches",
+        "MetricExpr": "cpu_core@INT_MISC.CLEAR_RESTEER_CYCLES@ / tma_info_thread_clks + tma_unknown_branches",
         "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group",
         "MetricName": "tma_branch_resteers",
         "MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
     },
     {
         "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
-        "MetricConstraint": "NO_GROUP_EVENTS",
         "MetricExpr": "(25 * tma_info_system_average_frequency * (cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@ * (cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ / (cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ + cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD@))) + 24 * tma_info_system_average_frequency * cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS@) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks",
         "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
         "MetricName": "tma_contested_accesses",
     },
     {
         "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
-        "MetricConstraint": "NO_GROUP_EVENTS",
         "MetricExpr": "24 * tma_info_system_average_frequency * (cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD@ + cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@ * (1 - cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ / (cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ + cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD@))) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks",
         "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
         "MetricName": "tma_data_sharing",
     },
     {
         "BriefDescription": "This metric represents fraction of cycles where the Divider unit was active",
-        "MetricExpr": "ARITH.DIV_ACTIVE / tma_info_thread_clks",
+        "MetricExpr": "cpu_core@ARITH.DIV_ACTIVE@ / tma_info_thread_clks",
         "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
         "MetricName": "tma_divider",
         "MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
     },
     {
         "BriefDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads",
-        "MetricConstraint": "NO_GROUP_EVENTS",
         "MetricExpr": "cpu_core@MEMORY_ACTIVITY.STALLS_L3_MISS@ / tma_info_thread_clks",
         "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
         "MetricName": "tma_dram_bound",
     },
     {
         "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines",
-        "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / tma_info_thread_clks",
+        "MetricExpr": "cpu_core@DSB2MITE_SWITCHES.PENALTY_CYCLES@ / tma_info_thread_clks",
         "MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
         "MetricName": "tma_dsb_switches",
         "MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
     },
     {
         "BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
-        "MetricExpr": "L1D_PEND_MISS.FB_FULL / tma_info_thread_clks",
+        "MetricExpr": "cpu_core@L1D_PEND_MISS.FB_FULL@ / tma_info_thread_clks",
         "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
         "MetricName": "tma_fb_full",
         "MetricThreshold": "tma_fb_full > 0.3",
     },
     {
         "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses",
-        "MetricExpr": "ICACHE_DATA.STALLS / tma_info_thread_clks",
+        "MetricExpr": "cpu_core@ICACHE_DATA.STALLS@ / tma_info_thread_clks",
         "MetricGroup": "BigFoot;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
         "MetricName": "tma_icache_misses",
         "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
     },
     {
         "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
-        "MetricConstraint": "NO_GROUP_EVENTS",
         "MetricExpr": "(tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) * tma_info_thread_slots / BR_MISP_RETIRED.ALL_BRANCHES",
         "MetricGroup": "Bad;BrMispredicts;tma_issueBM",
         "MetricName": "tma_info_bad_spec_branch_misprediction_cost",
     },
     {
         "BriefDescription": "Instructions per retired mispredicts for conditional non-taken branches (lower number means higher occurrence rate).",
-        "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_NTAKEN",
+        "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / BR_MISP_RETIRED.COND_NTAKEN",
         "MetricGroup": "Bad;BrMispredicts",
         "MetricName": "tma_info_bad_spec_ipmisp_cond_ntaken",
         "MetricThreshold": "tma_info_bad_spec_ipmisp_cond_ntaken < 200",
     },
     {
         "BriefDescription": "Instructions per retired mispredicts for conditional taken branches (lower number means higher occurrence rate).",
-        "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_TAKEN",
+        "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / BR_MISP_RETIRED.COND_TAKEN",
         "MetricGroup": "Bad;BrMispredicts",
         "MetricName": "tma_info_bad_spec_ipmisp_cond_taken",
         "MetricThreshold": "tma_info_bad_spec_ipmisp_cond_taken < 200",
     },
     {
         "BriefDescription": "Instructions per retired mispredicts for return branches (lower number means higher occurrence rate).",
-        "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.RET",
+        "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / BR_MISP_RETIRED.RET",
         "MetricGroup": "Bad;BrMispredicts",
         "MetricName": "tma_info_bad_spec_ipmisp_ret",
         "MetricThreshold": "tma_info_bad_spec_ipmisp_ret < 500",
     },
     {
         "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
-        "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+        "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / BR_MISP_RETIRED.ALL_BRANCHES",
         "MetricGroup": "Bad;BadSpec;BrMispredicts",
         "MetricName": "tma_info_bad_spec_ipmispredict",
         "MetricThreshold": "tma_info_bad_spec_ipmispredict < 200",
     },
     {
         "BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts",
-        "MetricConstraint": "NO_GROUP_EVENTS",
         "MetricExpr": "(100 * (1 - tma_core_bound / tma_ports_utilization if tma_core_bound < tma_ports_utilization else 1) if tma_info_system_smt_2t_utilization > 0.5 else 0)",
         "MetricGroup": "Cor;SMT",
         "MetricName": "tma_info_botlnk_l0_core_bound_likely",
     },
     {
         "BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck",
-        "MetricConstraint": "NO_GROUP_EVENTS",
         "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_fetch_bandwidth * tma_mite / (tma_dsb + tma_lsd + tma_mite))",
         "MetricGroup": "DSBmiss;Fed;tma_issueFB",
         "MetricName": "tma_info_botlnk_l2_dsb_misses",
     },
     {
         "BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
-        "MetricConstraint": "NO_GROUP_EVENTS",
         "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
         "MetricGroup": "Fed;FetchLat;IcMiss;tma_issueFL",
         "MetricName": "tma_info_botlnk_l2_ic_misses",
     },
     {
         "BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)",
-        "MetricConstraint": "NO_GROUP_EVENTS",
         "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)",
         "MetricGroup": "BigFoot;Fed;Frontend;IcMiss;MemoryTLB;tma_issueBC",
         "MetricName": "tma_info_bottleneck_big_code",
     },
     {
         "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks",
-        "MetricConstraint": "NO_GROUP_EVENTS",
         "MetricExpr": "100 * (tma_frontend_bound - tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) - tma_info_bottleneck_big_code",
         "MetricGroup": "Fed;FetchBW;Frontend",
         "MetricName": "tma_info_bottleneck_instruction_fetch_bw",
     },
     {
         "BriefDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks",
-        "MetricConstraint": "NO_GROUP_EVENTS",
         "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full))) + tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_fb_full / (tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk))",
         "MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW",
         "MetricName": "tma_info_bottleneck_memory_bandwidth",
     },
     {
         "BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
-        "MetricConstraint": "NO_GROUP_EVENTS",
         "MetricExpr": "100 * tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_dtlb_load / max(tma_l1_bound, tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
         "MetricGroup": "Mem;MemoryTLB;Offcore;tma_issueTLB",
         "MetricName": "tma_info_bottleneck_memory_data_tlbs",
     },
     {
         "BriefDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches)",
-        "MetricConstraint": "NO_GROUP_EVENTS",
         "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound))",
         "MetricGroup": "Mem;MemoryLat;Offcore;tma_issueLat",
         "MetricName": "tma_info_bottleneck_memory_latency",
     },
     {
         "BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks",
-        "MetricConstraint": "NO_GROUP_EVENTS",
         "MetricExpr": "100 * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
         "MetricGroup": "Bad;BadSpec;BrMispredicts;tma_issueBM",
         "MetricName": "tma_info_bottleneck_mispredictions",
     },
     {
         "BriefDescription": "Fraction of branches that are non-taken conditionals",
-        "MetricExpr": "BR_INST_RETIRED.COND_NTAKEN / BR_INST_RETIRED.ALL_BRANCHES",
+        "MetricExpr": "cpu_core@BR_INST_RETIRED.COND_NTAKEN@ / BR_INST_RETIRED.ALL_BRANCHES",
         "MetricGroup": "Bad;Branches;CodeGen;PGO",
         "MetricName": "tma_info_branches_cond_nt",
         "Unit": "cpu_core"
     },
     {
         "BriefDescription": "Fraction of branches that are taken conditionals",
-        "MetricExpr": "BR_INST_RETIRED.COND_TAKEN / BR_INST_RETIRED.ALL_BRANCHES",
+        "MetricExpr": "cpu_core@BR_INST_RETIRED.COND_TAKEN@ / BR_INST_RETIRED.ALL_BRANCHES",
         "MetricGroup": "Bad;Branches;CodeGen;PGO",
         "MetricName": "tma_info_branches_cond_tk",
         "Unit": "cpu_core"
     },
     {
         "BriefDescription": "Instructions Per Cycle across hyper-threads (per physical core)",
-        "MetricExpr": "INST_RETIRED.ANY / tma_info_core_core_clks",
+        "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / tma_info_core_core_clks",
         "MetricGroup": "Ret;SMT;TmaL1;tma_L1_group",
         "MetricName": "tma_info_core_coreipc",
         "Unit": "cpu_core"
     },
     {
         "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
-        "MetricExpr": "UOPS_EXECUTED.THREAD / (cpu_core@UOPS_EXECUTED.CORE_CYCLES_GE_1@ / 2 if #SMT_on else cpu_core@UOPS_EXECUTED.CORE_CYCLES_GE_1@)",
+        "MetricExpr": "cpu_core@UOPS_EXECUTED.THREAD@ / (cpu_core@UOPS_EXECUTED.CORE_CYCLES_GE_1@ / 2 if #SMT_on else cpu_core@UOPS_EXECUTED.CORE_CYCLES_GE_1@)",
         "MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
         "MetricName": "tma_info_core_ilp",
         "Unit": "cpu_core"
     },
     {
         "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
-        "MetricExpr": "IDQ.DSB_UOPS / cpu_core@UOPS_ISSUED.ANY@",
+        "MetricExpr": "cpu_core@IDQ.DSB_UOPS@ / cpu_core@UOPS_ISSUED.ANY@",
         "MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
         "MetricName": "tma_info_frontend_dsb_coverage",
         "MetricThreshold": "tma_info_frontend_dsb_coverage < 0.7 & tma_info_thread_ipc / 6 > 0.35",
     },
     {
         "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
-        "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / cpu_core@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=1\\,edge@",
+        "MetricExpr": "cpu_core@DSB2MITE_SWITCHES.PENALTY_CYCLES@ / cpu_core@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=1\\,edge@",
         "MetricGroup": "DSBmiss",
         "MetricName": "tma_info_frontend_dsb_switch_cost",
         "Unit": "cpu_core"
     },
     {
         "BriefDescription": "Average number of Uops issued by front-end when it issued something",
-        "MetricExpr": "UOPS_ISSUED.ANY / cpu_core@UOPS_ISSUED.ANY\\,cmask\\=1@",
+        "MetricExpr": "cpu_core@UOPS_ISSUED.ANY@ / cpu_core@UOPS_ISSUED.ANY\\,cmask\\=1@",
         "MetricGroup": "Fed;FetchBW",
         "MetricName": "tma_info_frontend_fetch_upc",
         "Unit": "cpu_core"
     },
     {
         "BriefDescription": "Average Latency for L1 instruction cache misses",
-        "MetricExpr": "ICACHE_DATA.STALLS / cpu_core@ICACHE_DATA.STALLS\\,cmask\\=1\\,edge@",
+        "MetricExpr": "cpu_core@ICACHE_DATA.STALLS@ / cpu_core@ICACHE_DATA.STALLS\\,cmask\\=1\\,edge@",
         "MetricGroup": "Fed;FetchLat;IcMiss",
         "MetricName": "tma_info_frontend_icache_miss_latency",
         "Unit": "cpu_core"
     },
     {
         "BriefDescription": "Instructions per non-speculative DSB miss (lower number means higher occurrence rate)",
-        "MetricExpr": "INST_RETIRED.ANY / FRONTEND_RETIRED.ANY_DSB_MISS",
+        "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / FRONTEND_RETIRED.ANY_DSB_MISS",
         "MetricGroup": "DSBmiss;Fed",
         "MetricName": "tma_info_frontend_ipdsb_miss_ret",
         "MetricThreshold": "tma_info_frontend_ipdsb_miss_ret < 50",
     },
     {
         "BriefDescription": "Fraction of Uops delivered by the LSD (Loop Stream Detector; aka Loop Cache)",
-        "MetricExpr": "LSD.UOPS / cpu_core@UOPS_ISSUED.ANY@",
+        "MetricExpr": "cpu_core@LSD.UOPS@ / cpu_core@UOPS_ISSUED.ANY@",
         "MetricGroup": "Fed;LSD",
         "MetricName": "tma_info_frontend_lsd_coverage",
         "Unit": "cpu_core"
     },
     {
         "BriefDescription": "Branch instructions per taken branch.",
-        "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+        "MetricExpr": "cpu_core@BR_INST_RETIRED.ALL_BRANCHES@ / BR_INST_RETIRED.NEAR_TAKEN",
         "MetricGroup": "Branches;Fed;PGO",
         "MetricName": "tma_info_inst_mix_bptkbranch",
         "Unit": "cpu_core"
     },
     {
         "BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)",
-        "MetricExpr": "INST_RETIRED.ANY / (cpu_core@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0x3c@)",
+        "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / (cpu_core@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0x3c@)",
         "MetricGroup": "Flops;InsType",
         "MetricName": "tma_info_inst_mix_iparith",
         "MetricThreshold": "tma_info_inst_mix_iparith < 10",
     },
     {
         "BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)",
-        "MetricExpr": "INST_RETIRED.ANY / (cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE@ + cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE@)",
+        "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / (cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE@ + cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE@)",
         "MetricGroup": "Flops;FpVector;InsType",
         "MetricName": "tma_info_inst_mix_iparith_avx128",
         "MetricThreshold": "tma_info_inst_mix_iparith_avx128 < 10",
     },
     {
         "BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)",
-        "MetricExpr": "INST_RETIRED.ANY / (cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE@ + cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE@)",
+        "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / (cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE@ + cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE@)",
         "MetricGroup": "Flops;FpVector;InsType",
         "MetricName": "tma_info_inst_mix_iparith_avx256",
         "MetricThreshold": "tma_info_inst_mix_iparith_avx256 < 10",
     },
     {
         "BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)",
-        "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+        "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
         "MetricGroup": "Flops;FpScalar;InsType",
         "MetricName": "tma_info_inst_mix_iparith_scalar_dp",
         "MetricThreshold": "tma_info_inst_mix_iparith_scalar_dp < 10",
     },
     {
         "BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)",
-        "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+        "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
         "MetricGroup": "Flops;FpScalar;InsType",
         "MetricName": "tma_info_inst_mix_iparith_scalar_sp",
         "MetricThreshold": "tma_info_inst_mix_iparith_scalar_sp < 10",
     },
     {
         "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
-        "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+        "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / BR_INST_RETIRED.ALL_BRANCHES",
         "MetricGroup": "Branches;Fed;InsType",
         "MetricName": "tma_info_inst_mix_ipbranch",
         "MetricThreshold": "tma_info_inst_mix_ipbranch < 8",
     },
     {
         "BriefDescription": "Instructions per (near) call (lower number means higher occurrence rate)",
-        "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+        "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / BR_INST_RETIRED.NEAR_CALL",
         "MetricGroup": "Branches;Fed;PGO",
         "MetricName": "tma_info_inst_mix_ipcall",
         "MetricThreshold": "tma_info_inst_mix_ipcall < 200",
     },
     {
         "BriefDescription": "Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate)",
-        "MetricExpr": "INST_RETIRED.ANY / (cpu_core@FP_ARITH_INST_RETIRED.SCALAR_SINGLE@ + cpu_core@FP_ARITH_INST_RETIRED.SCALAR_DOUBLE@ + 2 * cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE@ + 4 * (cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE@ + cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE@) + 8 * cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE@)",
+        "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / (cpu_core@FP_ARITH_INST_RETIRED.SCALAR_SINGLE@ + cpu_core@FP_ARITH_INST_RETIRED.SCALAR_DOUBLE@ + 2 * cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE@ + 4 * (cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE@ + cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE@) + 8 * cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE@)",
         "MetricGroup": "Flops;InsType",
         "MetricName": "tma_info_inst_mix_ipflop",
         "MetricThreshold": "tma_info_inst_mix_ipflop < 10",
     },
     {
         "BriefDescription": "Instructions per Load (lower number means higher occurrence rate)",
-        "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_LOADS",
+        "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / MEM_INST_RETIRED.ALL_LOADS",
         "MetricGroup": "InsType",
         "MetricName": "tma_info_inst_mix_ipload",
         "MetricThreshold": "tma_info_inst_mix_ipload < 3",
     },
     {
         "BriefDescription": "Instructions per Store (lower number means higher occurrence rate)",
-        "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES",
+        "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / MEM_INST_RETIRED.ALL_STORES",
         "MetricGroup": "InsType",
         "MetricName": "tma_info_inst_mix_ipstore",
         "MetricThreshold": "tma_info_inst_mix_ipstore < 8",
     },
     {
         "BriefDescription": "Instructions per Software prefetch instruction (of any type: NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence rate)",
-        "MetricExpr": "INST_RETIRED.ANY / cpu_core@SW_PREFETCH_ACCESS.T0\\,umask\\=0xF@",
+        "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@SW_PREFETCH_ACCESS.T0\\,umask\\=0xF@",
         "MetricGroup": "Prefetches",
         "MetricName": "tma_info_inst_mix_ipswpf",
         "MetricThreshold": "tma_info_inst_mix_ipswpf < 100",
     },
     {
         "BriefDescription": "Instruction per taken branch",
-        "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+        "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / BR_INST_RETIRED.NEAR_TAKEN",
         "MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
         "MetricName": "tma_info_inst_mix_iptb",
         "MetricThreshold": "tma_info_inst_mix_iptb < 13",
     },
     {
         "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
-        "MetricExpr": "L1D_PEND_MISS.PENDING / MEM_LOAD_COMPLETED.L1_MISS_ANY",
+        "MetricExpr": "cpu_core@L1D_PEND_MISS.PENDING@ / MEM_LOAD_COMPLETED.L1_MISS_ANY",
         "MetricGroup": "Mem;MemoryBound;MemoryLat",
         "MetricName": "tma_info_memory_load_miss_real_latency",
         "Unit": "cpu_core"
     },
     {
         "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss",
-        "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+        "MetricExpr": "cpu_core@L1D_PEND_MISS.PENDING@ / L1D_PEND_MISS.PENDING_CYCLES",
         "MetricGroup": "Mem;MemoryBW;MemoryBound",
         "MetricName": "tma_info_memory_mlp",
         "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
     },
     {
         "BriefDescription": "Average Parallel L2 cache miss data reads",
-        "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+        "MetricExpr": "cpu_core@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD@ / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
         "MetricGroup": "Memory_BW;Offcore",
         "MetricName": "tma_info_memory_oro_data_l2_mlp",
         "Unit": "cpu_core"
     },
     {
         "BriefDescription": "Average Latency for L2 cache miss demand Loads",
-        "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD",
+        "MetricExpr": "cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD@ / OFFCORE_REQUESTS.DEMAND_DATA_RD",
         "MetricGroup": "Memory_Lat;Offcore",
         "MetricName": "tma_info_memory_oro_load_l2_miss_latency",
         "Unit": "cpu_core"
     },
     {
         "BriefDescription": "Average Parallel L2 cache miss demand Loads",
-        "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=1@",
+        "MetricExpr": "cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD@ / cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=1@",
         "MetricGroup": "Memory_BW;Offcore",
         "MetricName": "tma_info_memory_oro_load_l2_mlp",
         "Unit": "cpu_core"
     },
     {
         "BriefDescription": "Average Latency for L3 cache miss demand Loads",
-        "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD / OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
+        "MetricExpr": "cpu_core@OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD@ / OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
         "MetricGroup": "Memory_Lat;Offcore",
         "MetricName": "tma_info_memory_oro_load_l3_miss_latency",
         "Unit": "cpu_core"
     },
     {
         "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-thread",
-        "MetricExpr": "UOPS_EXECUTED.THREAD / cpu_core@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
+        "MetricExpr": "cpu_core@UOPS_EXECUTED.THREAD@ / cpu_core@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
         "MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
         "MetricName": "tma_info_pipeline_execute",
         "Unit": "cpu_core"
     },
     {
         "BriefDescription": "Instructions per a microcode Assist invocation",
-        "MetricExpr": "INST_RETIRED.ANY / cpu_core@ASSISTS.ANY\\,umask\\=0x1B@",
+        "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@ASSISTS.ANY\\,umask\\=0x1B@",
         "MetricGroup": "Pipeline;Ret;Retire",
         "MetricName": "tma_info_pipeline_ipassist",
         "MetricThreshold": "tma_info_pipeline_ipassist < 100e3",
     },
     {
         "BriefDescription": "Estimated fraction of retirement-cycles dealing with repeat instructions",
-        "MetricExpr": "INST_RETIRED.REP_ITERATION / cpu_core@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
+        "MetricExpr": "cpu_core@INST_RETIRED.REP_ITERATION@ / cpu_core@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
         "MetricGroup": "Pipeline;Ret",
         "MetricName": "tma_info_pipeline_strings_cycles",
         "MetricThreshold": "tma_info_pipeline_strings_cycles > 0.1",
     },
     {
         "BriefDescription": "Average CPU Utilization",
-        "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+        "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.REF_TSC@ / TSC",
         "MetricGroup": "HPC;Summary",
         "MetricName": "tma_info_system_cpu_utilization",
         "Unit": "cpu_core"
     },
     {
         "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
-        "MetricExpr": "INST_RETIRED.ANY / cpu_core@BR_INST_RETIRED.FAR_BRANCH@u",
+        "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_INST_RETIRED.FAR_BRANCH@u",
         "MetricGroup": "Branches;OS",
         "MetricName": "tma_info_system_ipfarbranch",
         "MetricThreshold": "tma_info_system_ipfarbranch < 1e6",
     },
     {
         "BriefDescription": "Average number of parallel data read requests to external memory",
-        "MetricExpr": "UNC_ARB_DAT_OCCUPANCY.RD / cpu_core@UNC_ARB_DAT_OCCUPANCY.RD\\,cmask\\=1@",
+        "MetricExpr": "UNC_ARB_DAT_OCCUPANCY.RD / UNC_ARB_DAT_OCCUPANCY.RD@cmask\\=1@",
         "MetricGroup": "Mem;MemoryBW;SoC",
         "MetricName": "tma_info_system_mem_parallel_reads",
         "PublicDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
     },
     {
         "BriefDescription": "Average latency of data read request to external memory (in nanoseconds)",
+        "MetricConstraint": "NO_GROUP_EVENTS",
         "MetricExpr": "(UNC_ARB_TRK_OCCUPANCY.RD + UNC_ARB_DAT_OCCUPANCY.RD) / UNC_ARB_TRK_REQUESTS.RD",
         "MetricGroup": "Mem;MemoryLat;SoC",
         "MetricName": "tma_info_system_mem_read_latency",
     },
     {
         "BriefDescription": "Average latency of all requests to external memory (in Uncore cycles)",
+        "MetricConstraint": "NO_GROUP_EVENTS",
         "MetricExpr": "(UNC_ARB_TRK_OCCUPANCY.ALL + UNC_ARB_DAT_OCCUPANCY.RD) / UNC_ARB_TRK_REQUESTS.ALL",
         "MetricGroup": "Mem;SoC",
         "MetricName": "tma_info_system_mem_request_latency",
     },
     {
         "BriefDescription": "The ratio of Executed- by Issued-Uops",
-        "MetricExpr": "UOPS_EXECUTED.THREAD / UOPS_ISSUED.ANY",
+        "MetricExpr": "cpu_core@UOPS_EXECUTED.THREAD@ / UOPS_ISSUED.ANY",
         "MetricGroup": "Cor;Pipeline",
         "MetricName": "tma_info_thread_execute_per_issue",
         "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage.",
     },
     {
         "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
-        "MetricExpr": "INST_RETIRED.ANY / tma_info_thread_clks",
+        "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / tma_info_thread_clks",
         "MetricGroup": "Ret;Summary",
         "MetricName": "tma_info_thread_ipc",
         "Unit": "cpu_core"
     },
     {
         "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
-        "MetricExpr": "ICACHE_TAG.STALLS / tma_info_thread_clks",
+        "MetricExpr": "cpu_core@ICACHE_TAG.STALLS@ / tma_info_thread_clks",
         "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
         "MetricName": "tma_itlb_misses",
         "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
     },
     {
         "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads",
-        "MetricConstraint": "NO_GROUP_EVENTS",
         "MetricExpr": "(cpu_core@MEMORY_ACTIVITY.STALLS_L1D_MISS@ - cpu_core@MEMORY_ACTIVITY.STALLS_L2_MISS@) / tma_info_thread_clks",
         "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
         "MetricName": "tma_l2_bound",
     },
     {
         "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core",
-        "MetricConstraint": "NO_GROUP_EVENTS_NMI",
         "MetricExpr": "(cpu_core@MEMORY_ACTIVITY.STALLS_L2_MISS@ - cpu_core@MEMORY_ACTIVITY.STALLS_L3_MISS@) / tma_info_thread_clks",
         "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
         "MetricName": "tma_l3_bound",
     },
     {
         "BriefDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs)",
-        "MetricExpr": "DECODE.LCP / tma_info_thread_clks",
+        "MetricExpr": "cpu_core@DECODE.LCP@ / tma_info_thread_clks",
         "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
         "MetricName": "tma_lcp",
         "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
     },
     {
         "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations",
-        "MetricExpr": "UOPS_DISPATCHED.PORT_2_3_10 / (3 * tma_info_core_core_clks)",
+        "MetricExpr": "cpu_core@UOPS_DISPATCHED.PORT_2_3_10@ / (3 * tma_info_core_core_clks)",
         "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
         "MetricName": "tma_load_op_utilization",
         "MetricThreshold": "tma_load_op_utilization > 0.6",
     },
     {
         "BriefDescription": "This metric estimates the fraction of cycles where the Second-level TLB (STLB) was missed by load accesses, performing a hardware page walk",
-        "MetricExpr": "DTLB_LOAD_MISSES.WALK_ACTIVE / tma_info_thread_clks",
+        "MetricExpr": "cpu_core@DTLB_LOAD_MISSES.WALK_ACTIVE@ / tma_info_thread_clks",
         "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
         "MetricName": "tma_load_stlb_miss",
         "MetricThreshold": "tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
     },
     {
         "BriefDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations",
-        "MetricConstraint": "NO_GROUP_EVENTS",
         "MetricExpr": "(16 * max(0, cpu_core@MEM_INST_RETIRED.LOCK_LOADS@ - cpu_core@L2_RQSTS.ALL_RFO@) + cpu_core@MEM_INST_RETIRED.LOCK_LOADS@ / cpu_core@MEM_INST_RETIRED.ALL_STORES@ * (10 * cpu_core@L2_RQSTS.RFO_HIT@ + min(cpu_core@CPU_CLK_UNHALTED.THREAD@, cpu_core@OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO@))) / tma_info_thread_clks",
         "MetricGroup": "Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
         "MetricName": "tma_lock_latency",
     },
     {
         "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to LFENCE Instructions.",
+        "MetricConstraint": "NO_GROUP_EVENTS_NMI",
         "MetricExpr": "13 * cpu_core@MISC2_RETIRED.LFENCE@ / tma_info_thread_clks",
         "MetricGroup": "TopdownL6;tma_L6_group;tma_serializing_operation_group",
         "MetricName": "tma_memory_fence",
     },
     {
         "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations -- uops for memory load or store accesses.",
-        "MetricConstraint": "NO_GROUP_EVENTS",
         "MetricExpr": "tma_light_operations * cpu_core@MEM_UOP_RETIRED.ANY@ / (tma_retiring * tma_info_thread_slots)",
         "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
         "MetricName": "tma_memory_operations",
     },
     {
         "BriefDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit",
-        "MetricExpr": "UOPS_RETIRED.MS / tma_info_thread_slots",
+        "MetricExpr": "cpu_core@UOPS_RETIRED.MS@ / tma_info_thread_slots",
         "MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueMC;tma_issueMS",
         "MetricName": "tma_microcode_sequencer",
         "MetricThreshold": "tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
     },
     {
         "BriefDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes",
-        "MetricConstraint": "NO_GROUP_EVENTS",
         "MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_int_operations + tma_memory_operations + tma_fused_instructions + tma_non_fused_branches + tma_nop_instructions))",
         "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
         "MetricName": "tma_other_light_ops",
     },
     {
         "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch)",
-        "MetricExpr": "UOPS_DISPATCHED.PORT_0 / tma_info_core_core_clks",
+        "MetricExpr": "cpu_core@UOPS_DISPATCHED.PORT_0@ / tma_info_core_core_clks",
         "MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
         "MetricName": "tma_port_0",
         "MetricThreshold": "tma_port_0 > 0.6",
     },
     {
         "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU)",
-        "MetricExpr": "UOPS_DISPATCHED.PORT_1 / tma_info_core_core_clks",
+        "MetricExpr": "cpu_core@UOPS_DISPATCHED.PORT_1@ / tma_info_core_core_clks",
         "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
         "MetricName": "tma_port_1",
         "MetricThreshold": "tma_port_1 > 0.6",
     },
     {
         "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU)",
-        "MetricExpr": "UOPS_DISPATCHED.PORT_6 / tma_info_core_core_clks",
+        "MetricExpr": "cpu_core@UOPS_DISPATCHED.PORT_6@ / tma_info_core_core_clks",
         "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
         "MetricName": "tma_port_6",
         "MetricThreshold": "tma_port_6 > 0.6",
     },
     {
         "BriefDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
-        "MetricExpr": "EXE_ACTIVITY.1_PORTS_UTIL / tma_info_thread_clks",
+        "MetricExpr": "cpu_core@EXE_ACTIVITY.1_PORTS_UTIL@ / tma_info_thread_clks",
         "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group",
         "MetricName": "tma_ports_utilized_1",
         "MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
     },
     {
         "BriefDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
-        "MetricExpr": "EXE_ACTIVITY.2_PORTS_UTIL / tma_info_thread_clks",
+        "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+        "MetricExpr": "cpu_core@EXE_ACTIVITY.2_PORTS_UTIL@ / tma_info_thread_clks",
         "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
         "MetricName": "tma_ports_utilized_2",
         "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
     },
     {
         "BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
-        "MetricExpr": "UOPS_EXECUTED.CYCLES_GE_3 / tma_info_thread_clks",
+        "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+        "MetricExpr": "cpu_core@UOPS_EXECUTED.CYCLES_GE_3@ / tma_info_thread_clks",
         "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
         "MetricName": "tma_ports_utilized_3m",
         "MetricThreshold": "tma_ports_utilized_3m > 0.7 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
     },
     {
         "BriefDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations",
-        "MetricExpr": "RESOURCE_STALLS.SCOREBOARD / tma_info_thread_clks",
+        "MetricExpr": "cpu_core@RESOURCE_STALLS.SCOREBOARD@ / tma_info_thread_clks",
         "MetricGroup": "PortsUtil;TopdownL5;tma_L5_group;tma_issueSO;tma_ports_utilized_0_group",
         "MetricName": "tma_serializing_operation",
         "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)))",
     },
     {
         "BriefDescription": "This metric represents Shuffle (cross \"vector lane\" data transfers) uops fraction the CPU has retired.",
-        "MetricExpr": "INT_VEC_RETIRED.SHUFFLES / (tma_retiring * tma_info_thread_slots)",
+        "MetricExpr": "cpu_core@INT_VEC_RETIRED.SHUFFLES@ / (tma_retiring * tma_info_thread_slots)",
         "MetricGroup": "HPC;Pipeline;TopdownL4;tma_L4_group;tma_int_operations_group",
         "MetricName": "tma_shuffles",
         "MetricThreshold": "tma_shuffles > 0.1 & (tma_int_operations > 0.1 & tma_light_operations > 0.6)",
     },
     {
         "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions",
-        "MetricExpr": "CPU_CLK_UNHALTED.PAUSE / tma_info_thread_clks",
+        "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+        "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.PAUSE@ / tma_info_thread_clks",
         "MetricGroup": "TopdownL6;tma_L6_group;tma_serializing_operation_group",
         "MetricName": "tma_slow_pause",
         "MetricThreshold": "tma_slow_pause > 0.05 & (tma_serializing_operation > 0.1 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))))",
     },
     {
         "BriefDescription": "This metric represents rate of split store accesses",
-        "MetricConstraint": "NO_GROUP_EVENTS_NMI",
-        "MetricExpr": "MEM_INST_RETIRED.SPLIT_STORES / tma_info_core_core_clks",
+        "MetricExpr": "cpu_core@MEM_INST_RETIRED.SPLIT_STORES@ / tma_info_core_core_clks",
         "MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
         "MetricName": "tma_split_stores",
         "MetricThreshold": "tma_split_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
     },
     {
         "BriefDescription": "This metric estimates how often CPU was stalled  due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write",
-        "MetricExpr": "EXE_ACTIVITY.BOUND_ON_STORES / tma_info_thread_clks",
+        "MetricExpr": "cpu_core@EXE_ACTIVITY.BOUND_ON_STORES@ / tma_info_thread_clks",
         "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
         "MetricName": "tma_store_bound",
         "MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
     },
     {
         "BriefDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores",
-        "MetricConstraint": "NO_GROUP_EVENTS_NMI",
         "MetricExpr": "13 * cpu_core@LD_BLOCKS.STORE_FORWARD@ / tma_info_thread_clks",
         "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
         "MetricName": "tma_store_fwd_blk",
     },
     {
         "BriefDescription": "This metric estimates the fraction of cycles where the STLB was missed by store accesses, performing a hardware page walk",
-        "MetricExpr": "DTLB_STORE_MISSES.WALK_ACTIVE / tma_info_core_core_clks",
+        "MetricExpr": "cpu_core@DTLB_STORE_MISSES.WALK_ACTIVE@ / tma_info_core_core_clks",
         "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
         "MetricName": "tma_store_stlb_miss",
         "MetricThreshold": "tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
     },
     {
         "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears",
-        "MetricExpr": "INT_MISC.UNKNOWN_BRANCH_CYCLES / tma_info_thread_clks",
+        "MetricExpr": "cpu_core@INT_MISC.UNKNOWN_BRANCH_CYCLES@ / tma_info_thread_clks",
         "MetricGroup": "BigFoot;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
         "MetricName": "tma_unknown_branches",
         "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
index c150c14ac6ed9925888fb9afc5e15e8dabdc1d8f..a35edf7d86a97e20570cf3f053d1b314be37fd20 100644 (file)
     },
     {
         "BriefDescription": "Counts the number of cycles the core is stalled due to a demand load miss which hit in DRAM or MMIO (Non-DRAM).",
-        "MetricConstraint": "NO_GROUP_EVENTS",
         "MetricExpr": "MEM_BOUND_STALLS.LOAD_DRAM_HIT / tma_info_core_clks - max((MEM_BOUND_STALLS.LOAD - LD_HEAD.L1_MISS_AT_RET) / tma_info_core_clks, 0) * MEM_BOUND_STALLS.LOAD_DRAM_HIT / MEM_BOUND_STALLS.LOAD",
         "MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
         "MetricName": "tma_dram_bound",
     },
     {
         "BriefDescription": "Counts the number of cycles a core is stalled due to a demand load which hit in the L2 Cache.",
-        "MetricConstraint": "NO_GROUP_EVENTS",
         "MetricExpr": "MEM_BOUND_STALLS.LOAD_L2_HIT / tma_info_core_clks - max((MEM_BOUND_STALLS.LOAD - LD_HEAD.L1_MISS_AT_RET) / tma_info_core_clks, 0) * MEM_BOUND_STALLS.LOAD_L2_HIT / MEM_BOUND_STALLS.LOAD",
         "MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
         "MetricName": "tma_l2_bound",
     },
     {
         "BriefDescription": "Counts the number of cycles a core is stalled due to a demand load which hit in the Last Level Cache (LLC) or other core with HITE/F/M.",
-        "MetricConstraint": "NO_GROUP_EVENTS_NMI",
         "MetricExpr": "MEM_BOUND_STALLS.LOAD_LLC_HIT / tma_info_core_clks - max((MEM_BOUND_STALLS.LOAD - LD_HEAD.L1_MISS_AT_RET) / tma_info_core_clks, 0) * MEM_BOUND_STALLS.LOAD_LLC_HIT / MEM_BOUND_STALLS.LOAD",
         "MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
         "MetricName": "tma_l3_bound",
     },
     {
         "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a store forward block.",
-        "MetricConstraint": "NO_GROUP_EVENTS_NMI",
         "MetricExpr": "LD_HEAD.ST_ADDR_AT_RET / tma_info_core_clks",
         "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
         "MetricName": "tma_store_fwd_blk",
diff --git a/tools/perf/pmu-events/arch/x86/amdzen4/memory-controller.json b/tools/perf/pmu-events/arch/x86/amdzen4/memory-controller.json
new file mode 100644 (file)
index 0000000..55263e5
--- /dev/null
@@ -0,0 +1,101 @@
+[
+  {
+    "EventName": "umc_mem_clk",
+    "PublicDescription": "Number of memory clock cycles.",
+    "EventCode": "0x00",
+    "PerPkg": "1",
+    "Unit": "UMCPMC"
+  },
+  {
+    "EventName": "umc_act_cmd.all",
+    "PublicDescription": "Number of ACTIVATE commands sent.",
+    "EventCode": "0x05",
+    "PerPkg": "1",
+    "Unit": "UMCPMC"
+  },
+  {
+    "EventName": "umc_act_cmd.rd",
+    "PublicDescription": "Number of ACTIVATE commands sent for reads.",
+    "EventCode": "0x05",
+    "RdWrMask": "0x1",
+    "PerPkg": "1",
+    "Unit": "UMCPMC"
+  },
+  {
+    "EventName": "umc_act_cmd.wr",
+    "PublicDescription": "Number of ACTIVATE commands sent for writes.",
+    "EventCode": "0x05",
+    "RdWrMask": "0x2",
+    "PerPkg": "1",
+    "Unit": "UMCPMC"
+  },
+  {
+    "EventName": "umc_pchg_cmd.all",
+    "PublicDescription": "Number of PRECHARGE commands sent.",
+    "EventCode": "0x06",
+    "PerPkg": "1",
+    "Unit": "UMCPMC"
+  },
+  {
+    "EventName": "umc_pchg_cmd.rd",
+    "PublicDescription": "Number of PRECHARGE commands sent for reads.",
+    "EventCode": "0x06",
+    "RdWrMask": "0x1",
+    "PerPkg": "1",
+    "Unit": "UMCPMC"
+  },
+  {
+    "EventName": "umc_pchg_cmd.wr",
+    "PublicDescription": "Number of PRECHARGE commands sent for writes.",
+    "EventCode": "0x06",
+    "RdWrMask": "0x2",
+    "PerPkg": "1",
+    "Unit": "UMCPMC"
+  },
+  {
+    "EventName": "umc_cas_cmd.all",
+    "PublicDescription": "Number of CAS commands sent.",
+    "EventCode": "0x0a",
+    "PerPkg": "1",
+    "Unit": "UMCPMC"
+  },
+  {
+    "EventName": "umc_cas_cmd.rd",
+    "PublicDescription": "Number of CAS commands sent for reads.",
+    "EventCode": "0x0a",
+    "RdWrMask": "0x1",
+    "PerPkg": "1",
+    "Unit": "UMCPMC"
+  },
+  {
+    "EventName": "umc_cas_cmd.wr",
+    "PublicDescription": "Number of CAS commands sent for writes.",
+    "EventCode": "0x0a",
+    "RdWrMask": "0x2",
+    "PerPkg": "1",
+    "Unit": "UMCPMC"
+  },
+  {
+    "EventName": "umc_data_slot_clks.all",
+    "PublicDescription": "Number of clocks used by the data bus.",
+    "EventCode": "0x14",
+    "PerPkg": "1",
+    "Unit": "UMCPMC"
+  },
+  {
+    "EventName": "umc_data_slot_clks.rd",
+    "PublicDescription": "Number of clocks used by the data bus for reads.",
+    "EventCode": "0x14",
+    "RdWrMask": "0x1",
+    "PerPkg": "1",
+    "Unit": "UMCPMC"
+  },
+  {
+    "EventName": "umc_data_slot_clks.wr",
+    "PublicDescription": "Number of clocks used by the data bus for writes.",
+    "EventCode": "0x14",
+    "RdWrMask": "0x2",
+    "PerPkg": "1",
+    "Unit": "UMCPMC"
+  }
+]
index 5e6a793acf7b2a8e8785cbb17bcb8baedcd92c4d..96e06401c6cbbe22992266cc3306e3f10dcc6262 100644 (file)
     "MetricGroup": "data_fabric",
     "PerPkg": "1",
     "ScaleUnit": "6.103515625e-5MiB"
+  },
+  {
+    "MetricName": "umc_data_bus_utilization",
+    "BriefDescription": "Memory controller data bus utilization.",
+    "MetricExpr": "d_ratio(umc_data_slot_clks.all / 2, umc_mem_clk)",
+    "MetricGroup": "memory_controller",
+    "PerPkg": "1",
+    "ScaleUnit": "100%"
+  },
+  {
+    "MetricName": "umc_cas_cmd_rate",
+    "BriefDescription": "Memory controller CAS command rate.",
+    "MetricExpr": "d_ratio(umc_cas_cmd.all * 1000, umc_mem_clk)",
+    "MetricGroup": "memory_controller",
+    "PerPkg": "1"
+  },
+  {
+    "MetricName": "umc_cas_cmd_read_ratio",
+    "BriefDescription": "Ratio of memory controller CAS commands for reads.",
+    "MetricExpr": "d_ratio(umc_cas_cmd.rd, umc_cas_cmd.all)",
+    "MetricGroup": "memory_controller",
+    "PerPkg": "1",
+    "ScaleUnit": "100%"
+  },
+  {
+    "MetricName": "umc_cas_cmd_write_ratio",
+    "BriefDescription": "Ratio of memory controller CAS commands for writes.",
+    "MetricExpr": "d_ratio(umc_cas_cmd.wr, umc_cas_cmd.all)",
+    "MetricGroup": "memory_controller",
+    "PerPkg": "1",
+    "ScaleUnit": "100%"
+  },
+  {
+    "MetricName": "umc_mem_read_bandwidth",
+    "BriefDescription": "Estimated memory read bandwidth.",
+    "MetricExpr": "(umc_cas_cmd.rd * 64) / 1e6 / duration_time",
+    "MetricGroup": "memory_controller",
+    "PerPkg": "1",
+    "ScaleUnit": "1MB/s"
+  },
+  {
+    "MetricName": "umc_mem_write_bandwidth",
+    "BriefDescription": "Estimated memory write bandwidth.",
+    "MetricExpr": "(umc_cas_cmd.wr * 64) / 1e6 / duration_time",
+    "MetricGroup": "memory_controller",
+    "PerPkg": "1",
+    "ScaleUnit": "1MB/s"
+  },
+  {
+    "MetricName": "umc_mem_bandwidth",
+    "BriefDescription": "Estimated combined memory bandwidth.",
+    "MetricExpr": "(umc_cas_cmd.all * 64) / 1e6 / duration_time",
+    "MetricGroup": "memory_controller",
+    "PerPkg": "1",
+    "ScaleUnit": "1MB/s"
+  },
+  {
+    "MetricName": "umc_cas_cmd_read_ratio",
+    "BriefDescription": "Ratio of memory controller CAS commands for reads.",
+    "MetricExpr": "d_ratio(umc_cas_cmd.rd, umc_cas_cmd.all)",
+    "MetricGroup": "memory_controller",
+    "PerPkg": "1",
+    "ScaleUnit": "100%"
+  },
+  {
+    "MetricName": "umc_cas_cmd_rate",
+    "BriefDescription": "Memory controller CAS command rate.",
+    "MetricExpr": "d_ratio(umc_cas_cmd.all * 1000, umc_mem_clk)",
+    "MetricGroup": "memory_controller",
+    "PerPkg": "1"
+  },
+  {
+    "MetricName": "umc_activate_cmd_rate",
+    "BriefDescription": "Memory controller ACTIVATE command rate.",
+    "MetricExpr": "d_ratio(umc_act_cmd.all * 1000, umc_mem_clk)",
+    "MetricGroup": "memory_controller",
+    "PerPkg": "1"
+  },
+  {
+    "MetricName": "umc_precharge_cmd_rate",
+    "BriefDescription": "Memory controller PRECHARGE command rate.",
+    "MetricExpr": "d_ratio(umc_pchg_cmd.all * 1000, umc_mem_clk)",
+    "MetricGroup": "memory_controller",
+    "PerPkg": "1"
   }
 ]
index 84c132af3dfa5717c6477f2cfe53c60ef0466a1d..8bc6c07078566e0f2baa317041fa45dc95c0c60f 100644 (file)
         "MetricName": "uncore_frequency",
         "ScaleUnit": "1GHz"
     },
+    {
+        "BriefDescription": "Intel(R) Ultra Path Interconnect (UPI) data receive bandwidth (MB/sec)",
+        "MetricExpr": "UNC_UPI_RxL_FLITS.ALL_DATA * 7.111111111111111 / 1e6 / duration_time",
+        "MetricName": "upi_data_receive_bw",
+        "ScaleUnit": "1MB/s"
+    },
     {
         "BriefDescription": "Intel(R) Ultra Path Interconnect (UPI) data transmit bandwidth (MB/sec)",
         "MetricExpr": "UNC_UPI_TxL_FLITS.ALL_DATA * 7.111111111111111 / 1e6 / duration_time",
index 4a9d211e9d4f11cbb7fffacc91139b7797fbe2b7..1bdefaf96287777b1ca7ec8cc3fed35cd8a0418f 100644 (file)
         "UMask": "0x10"
     },
     {
-        "BriefDescription": "FP_ARITH_DISPATCHED.PORT_0",
+        "BriefDescription": "FP_ARITH_DISPATCHED.PORT_0 [This event is alias to FP_ARITH_DISPATCHED.V0]",
         "EventCode": "0xb3",
         "EventName": "FP_ARITH_DISPATCHED.PORT_0",
         "SampleAfterValue": "2000003",
         "UMask": "0x1"
     },
     {
-        "BriefDescription": "FP_ARITH_DISPATCHED.PORT_1",
+        "BriefDescription": "FP_ARITH_DISPATCHED.PORT_1 [This event is alias to FP_ARITH_DISPATCHED.V1]",
         "EventCode": "0xb3",
         "EventName": "FP_ARITH_DISPATCHED.PORT_1",
         "SampleAfterValue": "2000003",
         "UMask": "0x2"
     },
     {
-        "BriefDescription": "FP_ARITH_DISPATCHED.PORT_5",
+        "BriefDescription": "FP_ARITH_DISPATCHED.PORT_5 [This event is alias to FP_ARITH_DISPATCHED.V2]",
         "EventCode": "0xb3",
         "EventName": "FP_ARITH_DISPATCHED.PORT_5",
         "SampleAfterValue": "2000003",
         "UMask": "0x4"
     },
+    {
+        "BriefDescription": "FP_ARITH_DISPATCHED.V0 [This event is alias to FP_ARITH_DISPATCHED.PORT_0]",
+        "EventCode": "0xb3",
+        "EventName": "FP_ARITH_DISPATCHED.V0",
+        "SampleAfterValue": "2000003",
+        "UMask": "0x1"
+    },
+    {
+        "BriefDescription": "FP_ARITH_DISPATCHED.V1 [This event is alias to FP_ARITH_DISPATCHED.PORT_1]",
+        "EventCode": "0xb3",
+        "EventName": "FP_ARITH_DISPATCHED.V1",
+        "SampleAfterValue": "2000003",
+        "UMask": "0x2"
+    },
+    {
+        "BriefDescription": "FP_ARITH_DISPATCHED.V2 [This event is alias to FP_ARITH_DISPATCHED.PORT_5]",
+        "EventCode": "0xb3",
+        "EventName": "FP_ARITH_DISPATCHED.V2",
+        "SampleAfterValue": "2000003",
+        "UMask": "0x4"
+    },
     {
         "BriefDescription": "Counts number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below.  Each count represents 2 computation operations, one for each element.  Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB.  DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
         "EventCode": "0xc7",
index 6dcf3b763af4f96d73f00f27001e89c52e5cf895..1f8200fb89647626f9a91853b36428fcc3594ef6 100644 (file)
@@ -1,20 +1,4 @@
 [
-    {
-        "BriefDescription": "AMX retired arithmetic BF16 operations.",
-        "EventCode": "0xce",
-        "EventName": "AMX_OPS_RETIRED.BF16",
-        "PublicDescription": "Number of AMX-based retired arithmetic bfloat16 (BF16) floating-point operations. Counts TDPBF16PS FP instructions. SW to use operation multiplier of 4",
-        "SampleAfterValue": "1000003",
-        "UMask": "0x2"
-    },
-    {
-        "BriefDescription": "AMX retired arithmetic integer 8-bit operations.",
-        "EventCode": "0xce",
-        "EventName": "AMX_OPS_RETIRED.INT8",
-        "PublicDescription": "Number of AMX-based retired arithmetic integer operations of 8-bit width source operands. Counts TDPB[SS,UU,US,SU]D instructions. SW should use operation multiplier of 8.",
-        "SampleAfterValue": "1000003",
-        "UMask": "0x1"
-    },
     {
         "BriefDescription": "This event is deprecated. Refer to new event ARITH.DIV_ACTIVE",
         "CounterMask": "1",
         "UMask": "0x1"
     },
     {
-        "BriefDescription": "INT_MISC.UNKNOWN_BRANCH_CYCLES",
+        "BriefDescription": "Bubble cycles of BAClear (Unknown Branch).",
         "EventCode": "0xad",
         "EventName": "INT_MISC.UNKNOWN_BRANCH_CYCLES",
         "MSRIndex": "0x3F7",
index 09d840c7da4c9c9a9314bcd9edcc00a4f008fbc8..65d088556bae8dc1c4fce01201da5c99a3213a99 100644 (file)
         "Unit": "M3UPI"
     },
     {
-        "BriefDescription": "Number of allocations into the CRS Egress  used to queue up requests destined to the mesh (AD Bouncable)",
+        "BriefDescription": "Number of allocations into the CRS Egress  used to queue up requests destined to the mesh (AD Bounceable)",
         "EventCode": "0x47",
         "EventName": "UNC_MDF_CRS_TxR_INSERTS.AD_BNC",
         "PerPkg": "1",
-        "PublicDescription": "AD Bouncable : Number of allocations into the CRS Egress",
+        "PublicDescription": "AD Bounceable : Number of allocations into the CRS Egress",
         "UMask": "0x1",
         "Unit": "MDF"
     },
         "Unit": "MDF"
     },
     {
-        "BriefDescription": "Number of allocations into the CRS Egress  used to queue up requests destined to the mesh (BL Bouncable)",
+        "BriefDescription": "Number of allocations into the CRS Egress  used to queue up requests destined to the mesh (BL Bounceable)",
         "EventCode": "0x47",
         "EventName": "UNC_MDF_CRS_TxR_INSERTS.BL_BNC",
         "PerPkg": "1",
-        "PublicDescription": "BL Bouncable : Number of allocations into the CRS Egress",
+        "PublicDescription": "BL Bounceable : Number of allocations into the CRS Egress",
         "UMask": "0x4",
         "Unit": "MDF"
     },
index 557080b74ee50dc2be924f11d3c5d9802481bd4e..0761980c34a04014cdbde96a98ed112c1a33bc2b 100644 (file)
         "UMask": "0x70ff010",
         "Unit": "IIO"
     },
+    {
+        "BriefDescription": ": IOTLB Hits to a 1G Page",
+        "EventCode": "0x40",
+        "EventName": "UNC_IIO_IOMMU0.1G_HITS",
+        "PerPkg": "1",
+        "PortMask": "0x0000",
+        "PublicDescription": ": IOTLB Hits to a 1G Page : Counts if a transaction to a 1G page, on its first lookup, hits the IOTLB.",
+        "UMask": "0x10",
+        "Unit": "IIO"
+    },
+    {
+        "BriefDescription": ": IOTLB Hits to a 2M Page",
+        "EventCode": "0x40",
+        "EventName": "UNC_IIO_IOMMU0.2M_HITS",
+        "PerPkg": "1",
+        "PortMask": "0x0000",
+        "PublicDescription": ": IOTLB Hits to a 2M Page : Counts if a transaction to a 2M page, on its first lookup, hits the IOTLB.",
+        "UMask": "0x8",
+        "Unit": "IIO"
+    },
+    {
+        "BriefDescription": ": IOTLB Hits to a 4K Page",
+        "EventCode": "0x40",
+        "EventName": "UNC_IIO_IOMMU0.4K_HITS",
+        "PerPkg": "1",
+        "PortMask": "0x0000",
+        "PublicDescription": ": IOTLB Hits to a 4K Page : Counts if a transaction to a 4K page, on its first lookup, hits the IOTLB.",
+        "UMask": "0x4",
+        "Unit": "IIO"
+    },
     {
         "BriefDescription": ": Context cache hits",
         "EventCode": "0x40",
index e98602c667072a1ac916e05f9f3e50ac5a3ed1f3..71d78a7841ea826073622118fb6e3fa44b1b89bc 100644 (file)
         "MetricName": "uncore_frequency",
         "ScaleUnit": "1GHz"
     },
+    {
+        "BriefDescription": "Intel(R) Ultra Path Interconnect (UPI) data receive bandwidth (MB/sec)",
+        "MetricExpr": "UNC_UPI_RxL_FLITS.ALL_DATA * 7.111111111111111 / 1e6 / duration_time",
+        "MetricName": "upi_data_receive_bw",
+        "ScaleUnit": "1MB/s"
+    },
     {
         "BriefDescription": "Intel(R) Ultra Path Interconnect (UPI) data transmit bandwidth (MB/sec)",
         "MetricExpr": "UNC_UPI_TxL_FLITS.ALL_DATA * 7.111111111111111 / 1e6 / duration_time",
index 63d5faf2fc43ee963eb1bc7dfebba1ecfa7653a6..11810daaf1503062a64b8fa207537296e2e7335a 100644 (file)
@@ -19,7 +19,7 @@
         "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX512 turbo schedule.",
         "EventCode": "0x28",
         "EventName": "CORE_POWER.LVL2_TURBO_LICENSE",
-        "PublicDescription": "Core cycles where the core was running with power-delivery for license level 2 (introduced in Skylake Server microarchtecture).  This includes high current AVX 512-bit instructions.",
+        "PublicDescription": "Core cycles where the core was running with power-delivery for license level 2 (introduced in Skylake Server microarchitecture).  This includes high current AVX 512-bit instructions.",
         "SampleAfterValue": "200003",
         "UMask": "0x20"
     },
index 176e5ef2a24af9e99c5119c2bff7d8ec195936ec..45ee6bceba7f1365a985522ee0f6ef1eadacad90 100644 (file)
         "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
         "EventCode": "0x5e",
         "EventName": "RS_EVENTS.EMPTY_CYCLES",
-        "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into stravation periods (e.g. branch mispredictions or i-cache misses)",
+        "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into starvation periods (e.g. branch mispredictions or i-cache misses)",
         "SampleAfterValue": "1000003",
         "UMask": "0x1"
     },
index f87ea3f66d1becd35dcc6f92751d855fe19f0f66..a066a009c51178f7c835fb2d8c757f4772023eab 100644 (file)
@@ -38,7 +38,7 @@
         "EventCode": "0x10",
         "EventName": "UNC_I_COHERENT_OPS.CLFLUSH",
         "PerPkg": "1",
-        "PublicDescription": "Coherent Ops : CLFlush : Counts the number of coherency related operations servied by the IRP",
+        "PublicDescription": "Coherent Ops : CLFlush : Counts the number of coherency related operations serviced by the IRP",
         "UMask": "0x80",
         "Unit": "IRP"
     },
@@ -65,7 +65,7 @@
         "EventCode": "0x10",
         "EventName": "UNC_I_COHERENT_OPS.WBMTOI",
         "PerPkg": "1",
-        "PublicDescription": "Coherent Ops : WbMtoI : Counts the number of coherency related operations servied by the IRP",
+        "PublicDescription": "Coherent Ops : WbMtoI : Counts the number of coherency related operations serviced by the IRP",
         "UMask": "0x40",
         "Unit": "IRP"
     },
         "EventCode": "0x11",
         "EventName": "UNC_I_TRANSACTIONS.WRITES",
         "PerPkg": "1",
-        "PublicDescription": "Inbound Transaction Count : Writes : Counts the number of Inbound transactions from the IRP to the Uncore.  This can be filtered based on request type in addition to the source queue.  Note the special filtering equation.  We do OR-reduction on the request type.  If the SOURCE bit is set, then we also do AND qualification based on the source portID. : Trackes only write requests.  Each write request should have a prefetch, so there is no need to explicitly track these requests.  For writes that are tickled and have to retry, the counter will be incremented for each retry.",
+        "PublicDescription": "Inbound Transaction Count : Writes : Counts the number of Inbound transactions from the IRP to the Uncore.  This can be filtered based on request type in addition to the source queue.  Note the special filtering equation.  We do OR-reduction on the request type.  If the SOURCE bit is set, then we also do AND qualification based on the source portID. : Tracks only write requests.  Each write request should have a prefetch, so there is no need to explicitly track these requests.  For writes that are tickled and have to retry, the counter will be incremented for each retry.",
         "UMask": "0x2",
         "Unit": "IRP"
     },
index e571683f59f3d587637b5be1078801606673a5da..4d1deed4437ab24fb1fb060819cc40deb9fb7daa 100644 (file)
@@ -7,7 +7,7 @@ GenuineIntel-6-56,v11,broadwellde,core
 GenuineIntel-6-4F,v22,broadwellx,core
 GenuineIntel-6-55-[56789ABCDEF],v1.20,cascadelakex,core
 GenuineIntel-6-9[6C],v1.04,elkhartlake,core
-GenuineIntel-6-CF,v1.01,emeraldrapids,core
+GenuineIntel-6-CF,v1.02,emeraldrapids,core
 GenuineIntel-6-5[CF],v13,goldmont,core
 GenuineIntel-6-7A,v1.01,goldmontplus,core
 GenuineIntel-6-B6,v1.00,grandridge,core
@@ -15,7 +15,7 @@ GenuineIntel-6-A[DE],v1.01,graniterapids,core
 GenuineIntel-6-(3C|45|46),v33,haswell,core
 GenuineIntel-6-3F,v28,haswellx,core
 GenuineIntel-6-7[DE],v1.19,icelake,core
-GenuineIntel-6-6[AC],v1.21,icelakex,core
+GenuineIntel-6-6[AC],v1.23,icelakex,core
 GenuineIntel-6-3A,v24,ivybridge,core
 GenuineIntel-6-3E,v24,ivytown,core
 GenuineIntel-6-2D,v24,jaketown,core
@@ -26,7 +26,7 @@ GenuineIntel-6-1[AEF],v4,nehalemep,core
 GenuineIntel-6-2E,v4,nehalemex,core
 GenuineIntel-6-A7,v1.01,rocketlake,core
 GenuineIntel-6-2A,v19,sandybridge,core
-GenuineIntel-6-8F,v1.16,sapphirerapids,core
+GenuineIntel-6-8F,v1.17,sapphirerapids,core
 GenuineIntel-6-AF,v1.00,sierraforest,core
 GenuineIntel-6-(37|4A|4C|4D|5A),v15,silvermont,core
 GenuineIntel-6-(4E|5E|8E|9E|A5|A6),v57,skylake,core
index 0c880e41566995eb8cb2c5b3cafd05713161347c..27433fc15ede77b2de29677fdef4cdd4ce2b7a77 100644 (file)
     },
     {
         "BriefDescription": "Average number of parallel data read requests to external memory",
-        "MetricExpr": "UNC_ARB_DAT_OCCUPANCY.RD / cpu@UNC_ARB_DAT_OCCUPANCY.RD\\,cmask\\=1@",
+        "MetricExpr": "UNC_ARB_DAT_OCCUPANCY.RD / UNC_ARB_DAT_OCCUPANCY.RD@cmask\\=1@",
         "MetricGroup": "Mem;MemoryBW;SoC",
         "MetricName": "tma_info_system_mem_parallel_reads",
         "PublicDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches"
index 4a9d211e9d4f11cbb7fffacc91139b7797fbe2b7..1bdefaf96287777b1ca7ec8cc3fed35cd8a0418f 100644 (file)
         "UMask": "0x10"
     },
     {
-        "BriefDescription": "FP_ARITH_DISPATCHED.PORT_0",
+        "BriefDescription": "FP_ARITH_DISPATCHED.PORT_0 [This event is alias to FP_ARITH_DISPATCHED.V0]",
         "EventCode": "0xb3",
         "EventName": "FP_ARITH_DISPATCHED.PORT_0",
         "SampleAfterValue": "2000003",
         "UMask": "0x1"
     },
     {
-        "BriefDescription": "FP_ARITH_DISPATCHED.PORT_1",
+        "BriefDescription": "FP_ARITH_DISPATCHED.PORT_1 [This event is alias to FP_ARITH_DISPATCHED.V1]",
         "EventCode": "0xb3",
         "EventName": "FP_ARITH_DISPATCHED.PORT_1",
         "SampleAfterValue": "2000003",
         "UMask": "0x2"
     },
     {
-        "BriefDescription": "FP_ARITH_DISPATCHED.PORT_5",
+        "BriefDescription": "FP_ARITH_DISPATCHED.PORT_5 [This event is alias to FP_ARITH_DISPATCHED.V2]",
         "EventCode": "0xb3",
         "EventName": "FP_ARITH_DISPATCHED.PORT_5",
         "SampleAfterValue": "2000003",
         "UMask": "0x4"
     },
+    {
+        "BriefDescription": "FP_ARITH_DISPATCHED.V0 [This event is alias to FP_ARITH_DISPATCHED.PORT_0]",
+        "EventCode": "0xb3",
+        "EventName": "FP_ARITH_DISPATCHED.V0",
+        "SampleAfterValue": "2000003",
+        "UMask": "0x1"
+    },
+    {
+        "BriefDescription": "FP_ARITH_DISPATCHED.V1 [This event is alias to FP_ARITH_DISPATCHED.PORT_1]",
+        "EventCode": "0xb3",
+        "EventName": "FP_ARITH_DISPATCHED.V1",
+        "SampleAfterValue": "2000003",
+        "UMask": "0x2"
+    },
+    {
+        "BriefDescription": "FP_ARITH_DISPATCHED.V2 [This event is alias to FP_ARITH_DISPATCHED.PORT_5]",
+        "EventCode": "0xb3",
+        "EventName": "FP_ARITH_DISPATCHED.V2",
+        "SampleAfterValue": "2000003",
+        "UMask": "0x4"
+    },
     {
         "BriefDescription": "Counts number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below.  Each count represents 2 computation operations, one for each element.  Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB.  DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
         "EventCode": "0xc7",
index 6dcf3b763af4f96d73f00f27001e89c52e5cf895..2cfe814d20151c301deeed6562dc5bcdf10b3f42 100644 (file)
         "UMask": "0x1"
     },
     {
-        "BriefDescription": "INT_MISC.UNKNOWN_BRANCH_CYCLES",
+        "BriefDescription": "Bubble cycles of BAClear (Unknown Branch).",
         "EventCode": "0xad",
         "EventName": "INT_MISC.UNKNOWN_BRANCH_CYCLES",
         "MSRIndex": "0x3F7",
index 06c6d67cb76b073d7862899b180383ac8753503b..56e54babcc26f16aee88abae0716675e3ab97c83 100644 (file)
     },
     {
         "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
-        "MetricConstraint": "NO_GROUP_EVENTS",
         "MetricExpr": "(76 * tma_info_system_average_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) + 75.5 * tma_info_system_average_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
         "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
         "MetricName": "tma_contested_accesses",
     },
     {
         "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
-        "MetricConstraint": "NO_GROUP_EVENTS",
         "MetricExpr": "75.5 * tma_info_system_average_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD + MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (1 - OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
         "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
         "MetricName": "tma_data_sharing",
     },
     {
         "BriefDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads",
-        "MetricConstraint": "NO_GROUP_EVENTS",
         "MetricExpr": "(MEMORY_ACTIVITY.STALLS_L3_MISS / tma_info_thread_clks - tma_pmm_bound if #has_pmem > 0 else MEMORY_ACTIVITY.STALLS_L3_MISS / tma_info_thread_clks)",
         "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
         "MetricName": "tma_dram_bound",
     },
     {
         "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
-        "MetricConstraint": "NO_GROUP_EVENTS",
         "MetricExpr": "(tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) * tma_info_thread_slots / BR_MISP_RETIRED.ALL_BRANCHES",
         "MetricGroup": "Bad;BrMispredicts;tma_issueBM",
         "MetricName": "tma_info_bad_spec_branch_misprediction_cost",
     },
     {
         "BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts",
-        "MetricConstraint": "NO_GROUP_EVENTS",
         "MetricExpr": "(100 * (1 - tma_core_bound / tma_ports_utilization if tma_core_bound < tma_ports_utilization else 1) if tma_info_system_smt_2t_utilization > 0.5 else 0)",
         "MetricGroup": "Cor;SMT",
         "MetricName": "tma_info_botlnk_l0_core_bound_likely",
     },
     {
         "BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck",
-        "MetricConstraint": "NO_GROUP_EVENTS",
         "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_fetch_bandwidth * tma_mite / (tma_dsb + tma_mite))",
         "MetricGroup": "DSBmiss;Fed;tma_issueFB",
         "MetricName": "tma_info_botlnk_l2_dsb_misses",
     },
     {
         "BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
-        "MetricConstraint": "NO_GROUP_EVENTS",
         "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
         "MetricGroup": "Fed;FetchLat;IcMiss;tma_issueFL",
         "MetricName": "tma_info_botlnk_l2_ic_misses",
     },
     {
         "BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)",
-        "MetricConstraint": "NO_GROUP_EVENTS",
         "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)",
         "MetricGroup": "BigFoot;Fed;Frontend;IcMiss;MemoryTLB;tma_issueBC",
         "MetricName": "tma_info_bottleneck_big_code",
     },
     {
         "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks",
-        "MetricConstraint": "NO_GROUP_EVENTS",
         "MetricExpr": "100 * (tma_frontend_bound - tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) - tma_info_bottleneck_big_code",
         "MetricGroup": "Fed;FetchBW;Frontend",
         "MetricName": "tma_info_bottleneck_instruction_fetch_bw",
     },
     {
         "BriefDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks",
-        "MetricConstraint": "NO_GROUP_EVENTS",
         "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full))) + tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_fb_full / (tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk))",
         "MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW",
         "MetricName": "tma_info_bottleneck_memory_bandwidth",
     },
     {
         "BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
-        "MetricConstraint": "NO_GROUP_EVENTS",
         "MetricExpr": "100 * tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_dtlb_load / max(tma_l1_bound, tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
         "MetricGroup": "Mem;MemoryTLB;Offcore;tma_issueTLB",
         "MetricName": "tma_info_bottleneck_memory_data_tlbs",
     },
     {
         "BriefDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches)",
-        "MetricConstraint": "NO_GROUP_EVENTS",
         "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound))",
         "MetricGroup": "Mem;MemoryLat;Offcore;tma_issueLat",
         "MetricName": "tma_info_bottleneck_memory_latency",
     },
     {
         "BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks",
-        "MetricConstraint": "NO_GROUP_EVENTS",
         "MetricExpr": "100 * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
         "MetricGroup": "Bad;BadSpec;BrMispredicts;tma_issueBM",
         "MetricName": "tma_info_bottleneck_mispredictions",
     },
     {
         "BriefDescription": "Average latency of data read request to external memory (in nanoseconds)",
+        "MetricConstraint": "NO_GROUP_EVENTS",
         "MetricExpr": "1e9 * (UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD / UNC_CHA_TOR_INSERTS.IA_MISS_DRD) / (tma_info_system_socket_clks / duration_time)",
         "MetricGroup": "Mem;MemoryLat;SoC",
         "MetricName": "tma_info_system_mem_read_latency",
     },
     {
         "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads",
-        "MetricConstraint": "NO_GROUP_EVENTS",
         "MetricExpr": "(MEMORY_ACTIVITY.STALLS_L1D_MISS - MEMORY_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks",
         "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
         "MetricName": "tma_l2_bound",
     },
     {
         "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core",
-        "MetricConstraint": "NO_GROUP_EVENTS_NMI",
         "MetricExpr": "(MEMORY_ACTIVITY.STALLS_L2_MISS - MEMORY_ACTIVITY.STALLS_L3_MISS) / tma_info_thread_clks",
         "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
         "MetricName": "tma_l3_bound",
     },
     {
         "BriefDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations",
-        "MetricConstraint": "NO_GROUP_EVENTS",
         "MetricExpr": "(16 * max(0, MEM_INST_RETIRED.LOCK_LOADS - L2_RQSTS.ALL_RFO) + MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES * (10 * L2_RQSTS.RFO_HIT + min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO))) / tma_info_thread_clks",
         "MetricGroup": "Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
         "MetricName": "tma_lock_latency",
     },
     {
         "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to LFENCE Instructions.",
+        "MetricConstraint": "NO_GROUP_EVENTS_NMI",
         "MetricExpr": "13 * MISC2_RETIRED.LFENCE / tma_info_thread_clks",
         "MetricGroup": "TopdownL6;tma_L6_group;tma_serializing_operation_group",
         "MetricName": "tma_memory_fence",
     },
     {
         "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations -- uops for memory load or store accesses.",
-        "MetricConstraint": "NO_GROUP_EVENTS",
         "MetricExpr": "tma_light_operations * MEM_UOP_RETIRED.ANY / (tma_retiring * tma_info_thread_slots)",
         "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
         "MetricName": "tma_memory_operations",
     },
     {
         "BriefDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes",
-        "MetricConstraint": "NO_GROUP_EVENTS",
         "MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_int_operations + tma_memory_operations + tma_fused_instructions + tma_non_fused_branches + tma_nop_instructions))",
         "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
         "MetricName": "tma_other_light_ops",
     },
     {
         "BriefDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+        "MetricConstraint": "NO_GROUP_EVENTS_NMI",
         "MetricExpr": "EXE_ACTIVITY.2_PORTS_UTIL / tma_info_thread_clks",
         "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
         "MetricName": "tma_ports_utilized_2",
     },
     {
         "BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+        "MetricConstraint": "NO_GROUP_EVENTS_NMI",
         "MetricExpr": "UOPS_EXECUTED.CYCLES_GE_3 / tma_info_thread_clks",
         "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
         "MetricName": "tma_ports_utilized_3m",
     },
     {
         "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions",
+        "MetricConstraint": "NO_GROUP_EVENTS_NMI",
         "MetricExpr": "CPU_CLK_UNHALTED.PAUSE / tma_info_thread_clks",
         "MetricGroup": "TopdownL6;tma_L6_group;tma_serializing_operation_group",
         "MetricName": "tma_slow_pause",
     },
     {
         "BriefDescription": "This metric represents rate of split store accesses",
-        "MetricConstraint": "NO_GROUP_EVENTS_NMI",
         "MetricExpr": "MEM_INST_RETIRED.SPLIT_STORES / tma_info_core_core_clks",
         "MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
         "MetricName": "tma_split_stores",
     },
     {
         "BriefDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores",
-        "MetricConstraint": "NO_GROUP_EVENTS_NMI",
         "MetricExpr": "13 * LD_BLOCKS.STORE_FORWARD / tma_info_thread_clks",
         "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
         "MetricName": "tma_store_fwd_blk",
         "MetricName": "uncore_frequency",
         "ScaleUnit": "1GHz"
     },
+    {
+        "BriefDescription": "Intel(R) Ultra Path Interconnect (UPI) data receive bandwidth (MB/sec)",
+        "MetricExpr": "UNC_UPI_RxL_FLITS.ALL_DATA * 7.111111111111111 / 1e6 / duration_time",
+        "MetricName": "upi_data_receive_bw",
+        "ScaleUnit": "1MB/s"
+    },
     {
         "BriefDescription": "Intel(R) Ultra Path Interconnect (UPI) data transmit bandwidth (MB/sec)",
         "MetricExpr": "UNC_UPI_TxL_FLITS.ALL_DATA * 7.111111111111111 / 1e6 / duration_time",
index 09d840c7da4c9c9a9314bcd9edcc00a4f008fbc8..65d088556bae8dc1c4fce01201da5c99a3213a99 100644 (file)
         "Unit": "M3UPI"
     },
     {
-        "BriefDescription": "Number of allocations into the CRS Egress  used to queue up requests destined to the mesh (AD Bouncable)",
+        "BriefDescription": "Number of allocations into the CRS Egress  used to queue up requests destined to the mesh (AD Bounceable)",
         "EventCode": "0x47",
         "EventName": "UNC_MDF_CRS_TxR_INSERTS.AD_BNC",
         "PerPkg": "1",
-        "PublicDescription": "AD Bouncable : Number of allocations into the CRS Egress",
+        "PublicDescription": "AD Bounceable : Number of allocations into the CRS Egress",
         "UMask": "0x1",
         "Unit": "MDF"
     },
         "Unit": "MDF"
     },
     {
-        "BriefDescription": "Number of allocations into the CRS Egress  used to queue up requests destined to the mesh (BL Bouncable)",
+        "BriefDescription": "Number of allocations into the CRS Egress  used to queue up requests destined to the mesh (BL Bounceable)",
         "EventCode": "0x47",
         "EventName": "UNC_MDF_CRS_TxR_INSERTS.BL_BNC",
         "PerPkg": "1",
-        "PublicDescription": "BL Bouncable : Number of allocations into the CRS Egress",
+        "PublicDescription": "BL Bounceable : Number of allocations into the CRS Egress",
         "UMask": "0x4",
         "Unit": "MDF"
     },
index 8b5f54fed10339640840d49c01cc7b76e935a9ee..03596db8771016b931673489b6abfc97bc1522a4 100644 (file)
         "UMask": "0x70ff010",
         "Unit": "IIO"
     },
+    {
+        "BriefDescription": ": IOTLB Hits to a 1G Page",
+        "EventCode": "0x40",
+        "EventName": "UNC_IIO_IOMMU0.1G_HITS",
+        "PerPkg": "1",
+        "PortMask": "0x0000",
+        "PublicDescription": ": IOTLB Hits to a 1G Page : Counts if a transaction to a 1G page, on its first lookup, hits the IOTLB.",
+        "UMask": "0x10",
+        "Unit": "IIO"
+    },
+    {
+        "BriefDescription": ": IOTLB Hits to a 2M Page",
+        "EventCode": "0x40",
+        "EventName": "UNC_IIO_IOMMU0.2M_HITS",
+        "PerPkg": "1",
+        "PortMask": "0x0000",
+        "PublicDescription": ": IOTLB Hits to a 2M Page : Counts if a transaction to a 2M page, on its first lookup, hits the IOTLB.",
+        "UMask": "0x8",
+        "Unit": "IIO"
+    },
+    {
+        "BriefDescription": ": IOTLB Hits to a 4K Page",
+        "EventCode": "0x40",
+        "EventName": "UNC_IIO_IOMMU0.4K_HITS",
+        "PerPkg": "1",
+        "PortMask": "0x0000",
+        "PublicDescription": ": IOTLB Hits to a 4K Page : Counts if a transaction to a 4K page, on its first lookup, hits the IOTLB.",
+        "UMask": "0x4",
+        "Unit": "IIO"
+    },
     {
         "BriefDescription": ": Context cache hits",
         "EventCode": "0x40",
index 4a8f8eeb7525594483fa050cd53262b3a8438c7a..ec3aa5ef00a3c79bdf8cae408a271d49754204be 100644 (file)
         "MetricName": "uncore_frequency",
         "ScaleUnit": "1GHz"
     },
+    {
+        "BriefDescription": "Intel(R) Ultra Path Interconnect (UPI) data receive bandwidth (MB/sec)",
+        "MetricExpr": "UNC_UPI_RxL_FLITS.ALL_DATA * 7.111111111111111 / 1e6 / duration_time",
+        "MetricName": "upi_data_receive_bw",
+        "ScaleUnit": "1MB/s"
+    },
     {
         "BriefDescription": "Intel(R) Ultra Path Interconnect (UPI) data transmit bandwidth (MB/sec)",
         "MetricExpr": "UNC_UPI_TxL_FLITS.ALL_DATA * 7.111111111111111 / 1e6 / duration_time",
index 3c091ab753059072973a37cc19a89e2ef0f0c8af..53ab050c8fa436f584867c707a91a1ae985aa567 100755 (executable)
@@ -83,7 +83,7 @@ def c_len(s: str) -> int:
   """Return the length of s a C string
 
   This doesn't handle all escape characters properly. It first assumes
-  all \ are for escaping, it then adjusts as it will have over counted
+  all \\ are for escaping, it then adjusts as it will have over counted
   \\. The code uses \000 rather than \0 as a terminator as an adjacent
   number would be folded into a string of \0 (ie. "\0" + "5" doesn't
   equal a terminator followed by the number 5 but the escape of
@@ -286,6 +286,7 @@ class JsonEvent:
           'imx8_ddr': 'imx8_ddr',
           'L3PMC': 'amd_l3',
           'DFPMC': 'amd_df',
+          'UMCPMC': 'amd_umc',
           'cpu_core': 'cpu_core',
           'cpu_atom': 'cpu_atom',
           'ali_drw': 'ali_drw',
@@ -354,6 +355,7 @@ class JsonEvent:
         ('SampleAfterValue', 'period='),
         ('UMask', 'umask='),
         ('NodeType', 'type='),
+        ('RdWrMask', 'rdwrmask='),
     ]
     for key, value in event_fields:
       if key in jd and jd[key] != '0':
index d59ff53f1d946c01e59038f353b5658658dee1e7..d973c2baed1c8559d2c769516a07542ecc993dc5 100755 (executable)
@@ -45,8 +45,8 @@ parser = OptionParser(option_list=option_list)
 # Initialize global dicts and regular expression
 disasm_cache = dict()
 cpu_data = dict()
-disasm_re = re.compile("^\s*([0-9a-fA-F]+):")
-disasm_func_re = re.compile("^\s*([0-9a-fA-F]+)\s.*:")
+disasm_re = re.compile(r"^\s*([0-9a-fA-F]+):")
+disasm_func_re = re.compile(r"^\s*([0-9a-fA-F]+)\s.*:")
 cache_size = 64*1024
 
 glb_source_file_name   = None
@@ -188,6 +188,17 @@ def process_event(param_dict):
        dso_end = get_optional(param_dict, "dso_map_end")
        symbol = get_optional(param_dict, "symbol")
 
+       cpu = sample["cpu"]
+       ip = sample["ip"]
+       addr = sample["addr"]
+
+       # Initialize CPU data if it's empty, and directly return back
+       # if this is the first tracing event for this CPU.
+       if (cpu_data.get(str(cpu) + 'addr') == None):
+               cpu_data[str(cpu) + 'addr'] = addr
+               return
+
+
        if (options.verbose == True):
                print("Event type: %s" % name)
                print_sample(sample)
@@ -209,16 +220,6 @@ def process_event(param_dict):
        if (name[0:8] != "branches"):
                return
 
-       cpu = sample["cpu"]
-       ip = sample["ip"]
-       addr = sample["addr"]
-
-       # Initialize CPU data if it's empty, and directly return back
-       # if this is the first tracing event for this CPU.
-       if (cpu_data.get(str(cpu) + 'addr') == None):
-               cpu_data[str(cpu) + 'addr'] = addr
-               return
-
        # The format for packet is:
        #
        #                 +------------+------------+------------+
@@ -258,8 +259,9 @@ def process_event(param_dict):
 
        if (options.objdump_name != None):
                # It doesn't need to decrease virtual memory offset for disassembly
-               # for kernel dso, so in this case we set vm_start to zero.
-               if (dso == "[kernel.kallsyms]"):
+               # for kernel dso and executable file dso, so in this case we set
+               # vm_start to zero.
+               if (dso == "[kernel.kallsyms]" or dso_start == 0x400000):
                        dso_vm_start = 0
                else:
                        dso_vm_start = int(dso_start)
index 2560a042dc6fa4e9c0cd80dd8b5f068117be9082..9401f7c14747788f7f1e4617fb22354819785a46 100644 (file)
@@ -260,7 +260,7 @@ def pr_help():
 
 comm_re = None
 pid_re = None
-pid_regex = "^(\d*)-(\d*)$|^(\d*)$"
+pid_regex = r"^(\d*)-(\d*)$|^(\d*)$"
 
 opt_proc = popt.DISP_DFL
 opt_disp = topt.DISP_ALL
index 13f2d8a8161096e8f8f9b691146629dafeee4538..121cf61ba1b345f579e2db83f1dfb586e8d88ec7 100755 (executable)
@@ -677,8 +677,8 @@ class CallGraphModelBase(TreeModel):
                        #   sqlite supports GLOB (text only) which uses * and ? and is case sensitive
                        if not self.glb.dbref.is_sqlite3:
                                # Escape % and _
-                               s = value.replace("%", "\%")
-                               s = s.replace("_", "\_")
+                               s = value.replace("%", "\\%")
+                               s = s.replace("_", "\\_")
                                # Translate * and ? into SQL LIKE pattern characters % and _
                                trans = string.maketrans("*?", "%_")
                                match = " LIKE '" + str(s).translate(trans) + "'"
index 2b45ffa462a6c4b9b29629dde2fa8262620bbe61..53ba9c3e20e05782eb47e7368795b5869f263be9 100644 (file)
@@ -77,3 +77,17 @@ CFLAGS_python-use.o   += -DPYTHONPATH="BUILD_STR($(OUTPUT)python)" -DPYTHON="BUI
 CFLAGS_dwarf-unwind.o += -fno-optimize-sibling-calls
 
 perf-y += workloads/
+
+ifdef SHELLCHECK
+  SHELL_TESTS := $(shell find tests/shell -executable -type f -name '*.sh')
+  TEST_LOGS := $(SHELL_TESTS:tests/shell/%=shell/%.shellcheck_log)
+else
+  SHELL_TESTS :=
+  TEST_LOGS :=
+endif
+
+$(OUTPUT)%.shellcheck_log: %
+       $(call rule_mkdir)
+       $(Q)$(call echo-cmd,test)shellcheck -a -S warning "$<" > $@ || (cat $@ && rm $@ && false)
+
+perf-y += $(TEST_LOGS)
index 61186d0d1cfa1afda6d99260db51d5603e732a22..97e1bdd6ec0e9fc3ceafc96ac6df071c034cd314 100644 (file)
@@ -188,7 +188,7 @@ static int test__attr(struct test_suite *test __maybe_unused, int subtest __mayb
        if (perf_pmus__num_core_pmus() > 1) {
                /*
                 * TODO: Attribute tests hard code the PMU type. If there are >1
-                * core PMU then each PMU will have a different type whic
+                * core PMU then each PMU will have a different type which
                 * requires additional support.
                 */
                pr_debug("Skip test on hybrid systems");
index 27c21271a16c997ab8a606d347a96967bec9919d..b44e4e6e444386af80ad5027513cd5a2fe4986de 100644 (file)
@@ -6,7 +6,7 @@ flags=0|8
 cpu=*
 type=0|1
 size=136
-config=0
+config=0|1
 sample_period=*
 sample_type=263
 read_format=0|4|20
index fbb065842880f3461bdb91571fc371a3ca10fa7d..bed765450ca976f8b13dad231eae2e2743840acc 100644 (file)
@@ -6,4 +6,4 @@ args    = --no-bpf-event --user-regs=vg kill >/dev/null 2>&1
 ret     = 129
 test_ret = true
 arch    = aarch64
-auxv    = auxv["AT_HWCAP"] & 0x200000 == 0
+auxv    = auxv["AT_HWCAP"] & 0x400000 == 0
index c598c803221da79fe573a308ed27ce3fa848202e..a65113cd7311b4a8faacfbe2ef54567b00163748 100644 (file)
@@ -6,7 +6,7 @@ args    = --no-bpf-event --user-regs=vg kill >/dev/null 2>&1
 ret     = 1
 test_ret = true
 arch    = aarch64
-auxv    = auxv["AT_HWCAP"] & 0x200000 == 0x200000
+auxv    = auxv["AT_HWCAP"] & 0x400000 == 0x400000
 kernel_since = 6.1
 
 [event:base-record]
index cb6f1dd00dc483a495fdb465067aa33ae6a6be1e..4a5973f9bb9b370f1bc966f04e1efdd7b03ef64d 100644 (file)
@@ -14,6 +14,7 @@
 #include <sys/wait.h>
 #include <sys/stat.h>
 #include "builtin.h"
+#include "config.h"
 #include "hist.h"
 #include "intlist.h"
 #include "tests.h"
@@ -32,6 +33,7 @@
 
 static bool dont_fork;
 const char *dso_to_test;
+const char *test_objdump_path = "objdump";
 
 /*
  * List of architecture specific tests. Not a weak symbol as the array length is
@@ -60,8 +62,6 @@ static struct test_suite *generic_tests[] = {
        &suite__pmu,
        &suite__pmu_events,
        &suite__dso_data,
-       &suite__dso_data_cache,
-       &suite__dso_data_reopen,
        &suite__perf_evsel__roundtrip_name_test,
 #ifdef HAVE_LIBTRACEEVENT
        &suite__perf_evsel__tp_sched_test,
@@ -513,6 +513,15 @@ static int run_workload(const char *work, int argc, const char **argv)
        return -1;
 }
 
+static int perf_test__config(const char *var, const char *value,
+                            void *data __maybe_unused)
+{
+       if (!strcmp(var, "annotate.objdump"))
+               test_objdump_path = value;
+
+       return 0;
+}
+
 int cmd_test(int argc, const char **argv)
 {
        const char *test_usage[] = {
@@ -529,6 +538,8 @@ int cmd_test(int argc, const char **argv)
                    "Do not fork for testcase"),
        OPT_STRING('w', "workload", &workload, "work", "workload to run for testing"),
        OPT_STRING(0, "dso", &dso_to_test, "dso", "dso to test"),
+       OPT_STRING(0, "objdump", &test_objdump_path, "path",
+                  "objdump binary to use for disassembly and annotations"),
        OPT_END()
        };
        const char * const test_subcommands[] = { "list", NULL };
@@ -538,6 +549,8 @@ int cmd_test(int argc, const char **argv)
         if (ret < 0)
                 return ret;
 
+       perf_config(perf_test__config, NULL);
+
        /* Unbuffered output */
        setvbuf(stdout, NULL, _IONBF, 0);
 
index 3af81012014edb8455965d5ee78d834416d5b4da..7a3a7bbbec7146b772cd6ab029b1d22c9d94a873 100644 (file)
@@ -185,7 +185,7 @@ static int read_via_objdump(const char *filename, u64 addr, void *buf,
        int ret;
 
        fmt = "%s -z -d --start-address=0x%"PRIx64" --stop-address=0x%"PRIx64" %s";
-       ret = snprintf(cmd, sizeof(cmd), fmt, "objdump", addr, addr + len,
+       ret = snprintf(cmd, sizeof(cmd), fmt, test_objdump_path, addr, addr + len,
                       filename);
        if (ret <= 0 || (size_t)ret >= sizeof(cmd))
                return -1;
@@ -511,38 +511,6 @@ static void fs_something(void)
        }
 }
 
-#ifdef __s390x__
-#include "header.h" // for get_cpuid()
-#endif
-
-static const char *do_determine_event(bool excl_kernel)
-{
-       const char *event = excl_kernel ? "cycles:u" : "cycles";
-
-#ifdef __s390x__
-       char cpuid[128], model[16], model_c[16], cpum_cf_v[16];
-       unsigned int family;
-       int ret, cpum_cf_a;
-
-       if (get_cpuid(cpuid, sizeof(cpuid)))
-               goto out_clocks;
-       ret = sscanf(cpuid, "%*[^,],%u,%[^,],%[^,],%[^,],%x", &family, model_c,
-                    model, cpum_cf_v, &cpum_cf_a);
-       if (ret != 5)            /* Not available */
-               goto out_clocks;
-       if (excl_kernel && (cpum_cf_a & 4))
-               return event;
-       if (!excl_kernel && (cpum_cf_a & 2))
-               return event;
-
-       /* Fall through: missing authorization */
-out_clocks:
-       event = excl_kernel ? "cpu-clock:u" : "cpu-clock";
-
-#endif
-       return event;
-}
-
 static void do_something(void)
 {
        fs_something();
@@ -583,8 +551,10 @@ static int do_test_code_reading(bool try_kcore)
        int err = -1, ret;
        pid_t pid;
        struct map *map;
-       bool have_vmlinux, have_kcore, excl_kernel = false;
+       bool have_vmlinux, have_kcore;
        struct dso *dso;
+       const char *events[] = { "cycles", "cycles:u", "cpu-clock", "cpu-clock:u", NULL };
+       int evidx = 0;
 
        pid = getpid();
 
@@ -618,7 +588,7 @@ static int do_test_code_reading(bool try_kcore)
 
        /* No point getting kernel events if there is no kernel object */
        if (!have_vmlinux && !have_kcore)
-               excl_kernel = true;
+               evidx++;
 
        threads = thread_map__new_by_tid(pid);
        if (!threads) {
@@ -640,13 +610,13 @@ static int do_test_code_reading(bool try_kcore)
                goto out_put;
        }
 
-       cpus = perf_cpu_map__new(NULL);
+       cpus = perf_cpu_map__new_online_cpus();
        if (!cpus) {
                pr_debug("perf_cpu_map__new failed\n");
                goto out_put;
        }
 
-       while (1) {
+       while (events[evidx]) {
                const char *str;
 
                evlist = evlist__new();
@@ -657,7 +627,7 @@ static int do_test_code_reading(bool try_kcore)
 
                perf_evlist__set_maps(&evlist->core, cpus, threads);
 
-               str = do_determine_event(excl_kernel);
+               str = events[evidx];
                pr_debug("Parsing event '%s'\n", str);
                ret = parse_event(evlist, str);
                if (ret < 0) {
@@ -675,32 +645,32 @@ static int do_test_code_reading(bool try_kcore)
 
                ret = evlist__open(evlist);
                if (ret < 0) {
-                       if (!excl_kernel) {
-                               excl_kernel = true;
-                               /*
-                                * Both cpus and threads are now owned by evlist
-                                * and will be freed by following perf_evlist__set_maps
-                                * call. Getting reference to keep them alive.
-                                */
-                               perf_cpu_map__get(cpus);
-                               perf_thread_map__get(threads);
-                               perf_evlist__set_maps(&evlist->core, NULL, NULL);
-                               evlist__delete(evlist);
-                               evlist = NULL;
-                               continue;
-                       }
+                       evidx++;
 
-                       if (verbose > 0) {
+                       if (events[evidx] == NULL && verbose > 0) {
                                char errbuf[512];
                                evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
                                pr_debug("perf_evlist__open() failed!\n%s\n", errbuf);
                        }
 
-                       goto out_put;
+                       /*
+                        * Both cpus and threads are now owned by evlist
+                        * and will be freed by following perf_evlist__set_maps
+                        * call. Getting reference to keep them alive.
+                        */
+                       perf_cpu_map__get(cpus);
+                       perf_thread_map__get(threads);
+                       perf_evlist__set_maps(&evlist->core, NULL, NULL);
+                       evlist__delete(evlist);
+                       evlist = NULL;
+                       continue;
                }
                break;
        }
 
+       if (events[evidx] == NULL)
+               goto out_put;
+
        ret = evlist__mmap(evlist, UINT_MAX);
        if (ret < 0) {
                pr_debug("evlist__mmap failed\n");
@@ -721,7 +691,7 @@ static int do_test_code_reading(bool try_kcore)
                err = TEST_CODE_READING_NO_KERNEL_OBJ;
        else if (!have_vmlinux && !try_kcore)
                err = TEST_CODE_READING_NO_VMLINUX;
-       else if (excl_kernel)
+       else if (strstr(events[evidx], ":u"))
                err = TEST_CODE_READING_NO_ACCESS;
        else
                err = TEST_CODE_READING_OK;
index 7730fc2ab40b734274fe89b569def4f9909e0e31..bd8e396f3e57bbb27d46567677a20273877492d6 100644 (file)
@@ -213,7 +213,7 @@ static int test__cpu_map_intersect(struct test_suite *test __maybe_unused,
 
 static int test__cpu_map_equal(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
 {
-       struct perf_cpu_map *any = perf_cpu_map__dummy_new();
+       struct perf_cpu_map *any = perf_cpu_map__new_any_cpu();
        struct perf_cpu_map *one = perf_cpu_map__new("1");
        struct perf_cpu_map *two = perf_cpu_map__new("2");
        struct perf_cpu_map *empty = perf_cpu_map__intersect(one, two);
index 3419a4ab5590f5fff2ae85334f8941b0c159b2e3..2d67422c1222949700e7759fd174080ea439765a 100644 (file)
@@ -394,6 +394,15 @@ static int test__dso_data_reopen(struct test_suite *test __maybe_unused, int sub
        return 0;
 }
 
-DEFINE_SUITE("DSO data read", dso_data);
-DEFINE_SUITE("DSO data cache", dso_data_cache);
-DEFINE_SUITE("DSO data reopen", dso_data_reopen);
+
+static struct test_case tests__dso_data[] = {
+       TEST_CASE("read", dso_data),
+       TEST_CASE("cache", dso_data_cache),
+       TEST_CASE("reopen", dso_data_reopen),
+       {       .name = NULL, }
+};
+
+struct test_suite suite__dso_data = {
+       .desc = "DSO data tests",
+       .test_cases = tests__dso_data,
+};
index 8f4f9b632e1e586a85911d61e4e180294041fff3..5a3b2bed07f327e1806766a5bdf0c61792673c75 100644 (file)
@@ -81,7 +81,7 @@ static int test__keep_tracking(struct test_suite *test __maybe_unused, int subte
        threads = thread_map__new(-1, getpid(), UINT_MAX);
        CHECK_NOT_NULL__(threads);
 
-       cpus = perf_cpu_map__new(NULL);
+       cpus = perf_cpu_map__new_online_cpus();
        CHECK_NOT_NULL__(cpus);
 
        evlist = evlist__new();
index d9945ed25bc5ae96c765c152b4761bdf4a622c78..8a4da7eb637a8abd38f047238c6433e9929f9a2d 100644 (file)
@@ -183,7 +183,7 @@ run += make_install_prefix_slash
 # run += make_install_pdf
 run += make_minimal
 
-old_libbpf := $(shell echo '\#include <bpf/libbpf.h>' | $(CC) -E -dM -x c -| egrep -q "define[[:space:]]+LIBBPF_MAJOR_VERSION[[:space:]]+0{1}")
+old_libbpf := $(shell echo '\#include <bpf/libbpf.h>' | $(CC) -E -dM -x c -| grep -q -E "define[[:space:]]+LIBBPF_MAJOR_VERSION[[:space:]]+0{1}")
 
 ifneq ($(old_libbpf),)
 run += make_libbpf_dynamic
index 5bb1123a91a7ccf0f38a021581bcd194c36bec1c..bb3fbfe5a73e2302155fe40a953102e15f0dd103 100644 (file)
@@ -14,44 +14,59 @@ struct map_def {
        u64 end;
 };
 
+struct check_maps_cb_args {
+       struct map_def *merged;
+       unsigned int i;
+};
+
+static int check_maps_cb(struct map *map, void *data)
+{
+       struct check_maps_cb_args *args = data;
+       struct map_def *merged = &args->merged[args->i];
+
+       if (map__start(map) != merged->start ||
+           map__end(map) != merged->end ||
+           strcmp(map__dso(map)->name, merged->name) ||
+           refcount_read(map__refcnt(map)) != 1) {
+               return 1;
+       }
+       args->i++;
+       return 0;
+}
+
+static int failed_cb(struct map *map, void *data __maybe_unused)
+{
+       pr_debug("\tstart: %" PRIu64 " end: %" PRIu64 " name: '%s' refcnt: %d\n",
+               map__start(map),
+               map__end(map),
+               map__dso(map)->name,
+               refcount_read(map__refcnt(map)));
+
+       return 0;
+}
+
 static int check_maps(struct map_def *merged, unsigned int size, struct maps *maps)
 {
-       struct map_rb_node *rb_node;
-       unsigned int i = 0;
        bool failed = false;
 
        if (maps__nr_maps(maps) != size) {
                pr_debug("Expected %d maps, got %d", size, maps__nr_maps(maps));
                failed = true;
        } else {
-               maps__for_each_entry(maps, rb_node) {
-                       struct map *map = rb_node->map;
-
-                       if (map__start(map) != merged[i].start ||
-                           map__end(map) != merged[i].end ||
-                           strcmp(map__dso(map)->name, merged[i].name) ||
-                           refcount_read(map__refcnt(map)) != 1) {
-                               failed = true;
-                       }
-                       i++;
-               }
+               struct check_maps_cb_args args = {
+                       .merged = merged,
+                       .i = 0,
+               };
+               failed = maps__for_each_map(maps, check_maps_cb, &args);
        }
        if (failed) {
                pr_debug("Expected:\n");
-               for (i = 0; i < size; i++) {
+               for (unsigned int i = 0; i < size; i++) {
                        pr_debug("\tstart: %" PRIu64 " end: %" PRIu64 " name: '%s' refcnt: 1\n",
                                merged[i].start, merged[i].end, merged[i].name);
                }
                pr_debug("Got:\n");
-               maps__for_each_entry(maps, rb_node) {
-                       struct map *map = rb_node->map;
-
-                       pr_debug("\tstart: %" PRIu64 " end: %" PRIu64 " name: '%s' refcnt: %d\n",
-                               map__start(map),
-                               map__end(map),
-                               map__dso(map)->name,
-                               refcount_read(map__refcnt(map)));
-               }
+               maps__for_each_map(maps, failed_cb, NULL);
        }
        return failed ? TEST_FAIL : TEST_OK;
 }
index 886a13a77a1624022f9165837a7787f87ca709c4..012c8ae439fdcf56cd6b2492d7460de5110cc708 100644 (file)
@@ -52,7 +52,7 @@ static int test__basic_mmap(struct test_suite *test __maybe_unused, int subtest
                return -1;
        }
 
-       cpus = perf_cpu_map__new(NULL);
+       cpus = perf_cpu_map__new_online_cpus();
        if (cpus == NULL) {
                pr_debug("perf_cpu_map__new\n");
                goto out_free_threads;
index f3275be83a3382ee1437ea671274f47fad60eca5..fb114118c87640b848bbd0243a7810cc2877b032 100644 (file)
@@ -37,7 +37,7 @@ static int test__openat_syscall_event_on_all_cpus(struct test_suite *test __mayb
                return -1;
        }
 
-       cpus = perf_cpu_map__new(NULL);
+       cpus = perf_cpu_map__new_online_cpus();
        if (cpus == NULL) {
                pr_debug("perf_cpu_map__new\n");
                goto out_thread_map_delete;
index f78be21a5999b699bfa12ce52c2592b56c4d75ef..fbdf710d5eea06047784aef245438b87442451d9 100644 (file)
@@ -162,6 +162,22 @@ static int test__checkevent_numeric(struct evlist *evlist)
        return TEST_OK;
 }
 
+
+static int assert_hw(struct perf_evsel *evsel, enum perf_hw_id id, const char *name)
+{
+       struct perf_pmu *pmu;
+
+       if (evsel->attr.type == PERF_TYPE_HARDWARE) {
+               TEST_ASSERT_VAL("wrong config", test_perf_config(evsel, id));
+               return 0;
+       }
+       pmu = perf_pmus__find_by_type(evsel->attr.type);
+
+       TEST_ASSERT_VAL("unexpected PMU type", pmu);
+       TEST_ASSERT_VAL("PMU missing event", perf_pmu__have_event(pmu, name));
+       return 0;
+}
+
 static int test__checkevent_symbolic_name(struct evlist *evlist)
 {
        struct perf_evsel *evsel;
@@ -169,10 +185,12 @@ static int test__checkevent_symbolic_name(struct evlist *evlist)
        TEST_ASSERT_VAL("wrong number of entries", 0 != evlist->core.nr_entries);
 
        perf_evlist__for_each_evsel(&evlist->core, evsel) {
-               TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
-               TEST_ASSERT_VAL("wrong config",
-                               test_perf_config(evsel, PERF_COUNT_HW_INSTRUCTIONS));
+               int ret = assert_hw(evsel, PERF_COUNT_HW_INSTRUCTIONS, "instructions");
+
+               if (ret)
+                       return ret;
        }
+
        return TEST_OK;
 }
 
@@ -183,8 +201,10 @@ static int test__checkevent_symbolic_name_config(struct evlist *evlist)
        TEST_ASSERT_VAL("wrong number of entries", 0 != evlist->core.nr_entries);
 
        perf_evlist__for_each_evsel(&evlist->core, evsel) {
-               TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
-               TEST_ASSERT_VAL("wrong config", test_perf_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
+               int ret = assert_hw(evsel, PERF_COUNT_HW_CPU_CYCLES, "cycles");
+
+               if (ret)
+                       return ret;
                /*
                 * The period value gets configured within evlist__config,
                 * while this test executes only parse events method.
@@ -861,10 +881,14 @@ static int test__group1(struct evlist *evlist)
                        evlist__nr_groups(evlist) == num_core_entries());
 
        for (int i = 0; i < num_core_entries(); i++) {
+               int ret;
+
                /* instructions:k */
                evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
-               TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
-               TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_INSTRUCTIONS));
+               ret = assert_hw(&evsel->core, PERF_COUNT_HW_INSTRUCTIONS, "instructions");
+               if (ret)
+                       return ret;
+
                TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
                TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
                TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
@@ -878,8 +902,10 @@ static int test__group1(struct evlist *evlist)
 
                /* cycles:upp */
                evsel = evsel__next(evsel);
-               TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
-               TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
+               ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
+               if (ret)
+                       return ret;
+
                TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
                TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
                TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
@@ -907,6 +933,8 @@ static int test__group2(struct evlist *evlist)
        TEST_ASSERT_VAL("wrong number of groups", 1 == evlist__nr_groups(evlist));
 
        evlist__for_each_entry(evlist, evsel) {
+               int ret;
+
                if (evsel->core.attr.type == PERF_TYPE_SOFTWARE) {
                        /* faults + :ku modifier */
                        leader = evsel;
@@ -939,8 +967,10 @@ static int test__group2(struct evlist *evlist)
                        continue;
                }
                /* cycles:k */
-               TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
-               TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
+               ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
+               if (ret)
+                       return ret;
+
                TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
                TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
                TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
@@ -957,6 +987,7 @@ static int test__group2(struct evlist *evlist)
 static int test__group3(struct evlist *evlist __maybe_unused)
 {
        struct evsel *evsel, *group1_leader = NULL, *group2_leader = NULL;
+       int ret;
 
        TEST_ASSERT_VAL("wrong number of entries",
                        evlist->core.nr_entries == (3 * perf_pmus__num_core_pmus() + 2));
@@ -1045,8 +1076,10 @@ static int test__group3(struct evlist *evlist __maybe_unused)
                        continue;
                }
                /* instructions:u */
-               TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
-               TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_INSTRUCTIONS));
+               ret = assert_hw(&evsel->core, PERF_COUNT_HW_INSTRUCTIONS, "instructions");
+               if (ret)
+                       return ret;
+
                TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
                TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
                TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
@@ -1070,10 +1103,14 @@ static int test__group4(struct evlist *evlist __maybe_unused)
                        num_core_entries() == evlist__nr_groups(evlist));
 
        for (int i = 0; i < num_core_entries(); i++) {
+               int ret;
+
                /* cycles:u + p */
                evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
-               TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
-               TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
+               ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
+               if (ret)
+                       return ret;
+
                TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
                TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
                TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
@@ -1089,8 +1126,10 @@ static int test__group4(struct evlist *evlist __maybe_unused)
 
                /* instructions:kp + p */
                evsel = evsel__next(evsel);
-               TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
-               TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_INSTRUCTIONS));
+               ret = assert_hw(&evsel->core, PERF_COUNT_HW_INSTRUCTIONS, "instructions");
+               if (ret)
+                       return ret;
+
                TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
                TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
                TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
@@ -1108,6 +1147,7 @@ static int test__group4(struct evlist *evlist __maybe_unused)
 static int test__group5(struct evlist *evlist __maybe_unused)
 {
        struct evsel *evsel = NULL, *leader;
+       int ret;
 
        TEST_ASSERT_VAL("wrong number of entries",
                        evlist->core.nr_entries == (5 * num_core_entries()));
@@ -1117,8 +1157,10 @@ static int test__group5(struct evlist *evlist __maybe_unused)
        for (int i = 0; i < num_core_entries(); i++) {
                /* cycles + G */
                evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
-               TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
-               TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
+               ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
+               if (ret)
+                       return ret;
+
                TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
                TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
                TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
@@ -1133,8 +1175,10 @@ static int test__group5(struct evlist *evlist __maybe_unused)
 
                /* instructions + G */
                evsel = evsel__next(evsel);
-               TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
-               TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_INSTRUCTIONS));
+               ret = assert_hw(&evsel->core, PERF_COUNT_HW_INSTRUCTIONS, "instructions");
+               if (ret)
+                       return ret;
+
                TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
                TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
                TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
@@ -1148,8 +1192,10 @@ static int test__group5(struct evlist *evlist __maybe_unused)
        for (int i = 0; i < num_core_entries(); i++) {
                /* cycles:G */
                evsel = leader = evsel__next(evsel);
-               TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
-               TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
+               ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
+               if (ret)
+                       return ret;
+
                TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
                TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
                TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
@@ -1164,8 +1210,10 @@ static int test__group5(struct evlist *evlist __maybe_unused)
 
                /* instructions:G */
                evsel = evsel__next(evsel);
-               TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
-               TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_INSTRUCTIONS));
+               ret = assert_hw(&evsel->core, PERF_COUNT_HW_INSTRUCTIONS, "instructions");
+               if (ret)
+                       return ret;
+
                TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
                TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
                TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
@@ -1178,8 +1226,10 @@ static int test__group5(struct evlist *evlist __maybe_unused)
        for (int i = 0; i < num_core_entries(); i++) {
                /* cycles */
                evsel = evsel__next(evsel);
-               TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
-               TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
+               ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
+               if (ret)
+                       return ret;
+
                TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
                TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
                TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
@@ -1201,10 +1251,14 @@ static int test__group_gh1(struct evlist *evlist)
                        evlist__nr_groups(evlist) == num_core_entries());
 
        for (int i = 0; i < num_core_entries(); i++) {
+               int ret;
+
                /* cycles + :H group modifier */
                evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
-               TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
-               TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
+               ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
+               if (ret)
+                       return ret;
+
                TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
                TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
                TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
@@ -1218,8 +1272,10 @@ static int test__group_gh1(struct evlist *evlist)
 
                /* cache-misses:G + :H group modifier */
                evsel = evsel__next(evsel);
-               TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
-               TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CACHE_MISSES));
+               ret = assert_hw(&evsel->core, PERF_COUNT_HW_CACHE_MISSES, "cache-misses");
+               if (ret)
+                       return ret;
+
                TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
                TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
                TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
@@ -1242,10 +1298,14 @@ static int test__group_gh2(struct evlist *evlist)
                        evlist__nr_groups(evlist) == num_core_entries());
 
        for (int i = 0; i < num_core_entries(); i++) {
+               int ret;
+
                /* cycles + :G group modifier */
                evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
-               TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
-               TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
+               ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
+               if (ret)
+                       return ret;
+
                TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
                TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
                TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
@@ -1259,8 +1319,10 @@ static int test__group_gh2(struct evlist *evlist)
 
                /* cache-misses:H + :G group modifier */
                evsel = evsel__next(evsel);
-               TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
-               TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CACHE_MISSES));
+               ret = assert_hw(&evsel->core, PERF_COUNT_HW_CACHE_MISSES, "cache-misses");
+               if (ret)
+                       return ret;
+
                TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
                TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
                TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
@@ -1283,10 +1345,14 @@ static int test__group_gh3(struct evlist *evlist)
                        evlist__nr_groups(evlist) == num_core_entries());
 
        for (int i = 0; i < num_core_entries(); i++) {
+               int ret;
+
                /* cycles:G + :u group modifier */
                evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
-               TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
-               TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
+               ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
+               if (ret)
+                       return ret;
+
                TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
                TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
                TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
@@ -1300,8 +1366,10 @@ static int test__group_gh3(struct evlist *evlist)
 
                /* cache-misses:H + :u group modifier */
                evsel = evsel__next(evsel);
-               TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
-               TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CACHE_MISSES));
+               ret = assert_hw(&evsel->core, PERF_COUNT_HW_CACHE_MISSES, "cache-misses");
+               if (ret)
+                       return ret;
+
                TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
                TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
                TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
@@ -1324,10 +1392,14 @@ static int test__group_gh4(struct evlist *evlist)
                        evlist__nr_groups(evlist) == num_core_entries());
 
        for (int i = 0; i < num_core_entries(); i++) {
+               int ret;
+
                /* cycles:G + :uG group modifier */
                evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
-               TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
-               TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
+               ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
+               if (ret)
+                       return ret;
+
                TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
                TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
                TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
@@ -1341,8 +1413,10 @@ static int test__group_gh4(struct evlist *evlist)
 
                /* cache-misses:H + :uG group modifier */
                evsel = evsel__next(evsel);
-               TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
-               TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CACHE_MISSES));
+               ret = assert_hw(&evsel->core, PERF_COUNT_HW_CACHE_MISSES, "cache-misses");
+               if (ret)
+                       return ret;
+
                TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
                TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
                TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
@@ -1363,10 +1437,14 @@ static int test__leader_sample1(struct evlist *evlist)
                        evlist->core.nr_entries == (3 * num_core_entries()));
 
        for (int i = 0; i < num_core_entries(); i++) {
+               int ret;
+
                /* cycles - sampling group leader */
                evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
-               TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
-               TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
+               ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
+               if (ret)
+                       return ret;
+
                TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
                TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
                TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
@@ -1379,8 +1457,10 @@ static int test__leader_sample1(struct evlist *evlist)
 
                /* cache-misses - not sampling */
                evsel = evsel__next(evsel);
-               TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
-               TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CACHE_MISSES));
+               ret = assert_hw(&evsel->core, PERF_COUNT_HW_CACHE_MISSES, "cache-misses");
+               if (ret)
+                       return ret;
+
                TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
                TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
                TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
@@ -1392,8 +1472,10 @@ static int test__leader_sample1(struct evlist *evlist)
 
                /* branch-misses - not sampling */
                evsel = evsel__next(evsel);
-               TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
-               TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_BRANCH_MISSES));
+               ret = assert_hw(&evsel->core, PERF_COUNT_HW_BRANCH_MISSES, "branch-misses");
+               if (ret)
+                       return ret;
+
                TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
                TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
                TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
@@ -1415,10 +1497,14 @@ static int test__leader_sample2(struct evlist *evlist __maybe_unused)
                        evlist->core.nr_entries == (2 * num_core_entries()));
 
        for (int i = 0; i < num_core_entries(); i++) {
+               int ret;
+
                /* instructions - sampling group leader */
                evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
-               TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
-               TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_INSTRUCTIONS));
+               ret = assert_hw(&evsel->core, PERF_COUNT_HW_INSTRUCTIONS, "instructions");
+               if (ret)
+                       return ret;
+
                TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
                TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
                TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
@@ -1431,8 +1517,10 @@ static int test__leader_sample2(struct evlist *evlist __maybe_unused)
 
                /* branch-misses - not sampling */
                evsel = evsel__next(evsel);
-               TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
-               TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_BRANCH_MISSES));
+               ret = assert_hw(&evsel->core, PERF_COUNT_HW_BRANCH_MISSES, "branch-misses");
+               if (ret)
+                       return ret;
+
                TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
                TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
                TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
@@ -1472,10 +1560,14 @@ static int test__pinned_group(struct evlist *evlist)
                        evlist->core.nr_entries == (3 * num_core_entries()));
 
        for (int i = 0; i < num_core_entries(); i++) {
+               int ret;
+
                /* cycles - group leader */
                evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
-               TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
-               TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
+               ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
+               if (ret)
+                       return ret;
+
                TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
                TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
                /* TODO: The group modifier is not copied to the split group leader. */
@@ -1484,13 +1576,18 @@ static int test__pinned_group(struct evlist *evlist)
 
                /* cache-misses - can not be pinned, but will go on with the leader */
                evsel = evsel__next(evsel);
-               TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
-               TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CACHE_MISSES));
+               ret = assert_hw(&evsel->core, PERF_COUNT_HW_CACHE_MISSES, "cache-misses");
+               if (ret)
+                       return ret;
+
                TEST_ASSERT_VAL("wrong pinned", !evsel->core.attr.pinned);
 
                /* branch-misses - ditto */
                evsel = evsel__next(evsel);
-               TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_BRANCH_MISSES));
+               ret = assert_hw(&evsel->core, PERF_COUNT_HW_BRANCH_MISSES, "branch-misses");
+               if (ret)
+                       return ret;
+
                TEST_ASSERT_VAL("wrong pinned", !evsel->core.attr.pinned);
        }
        return TEST_OK;
@@ -1517,10 +1614,14 @@ static int test__exclusive_group(struct evlist *evlist)
                        evlist->core.nr_entries == 3 * num_core_entries());
 
        for (int i = 0; i < num_core_entries(); i++) {
+               int ret;
+
                /* cycles - group leader */
                evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
-               TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
-               TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
+               ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
+               if (ret)
+                       return ret;
+
                TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
                TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
                /* TODO: The group modifier is not copied to the split group leader. */
@@ -1529,13 +1630,18 @@ static int test__exclusive_group(struct evlist *evlist)
 
                /* cache-misses - can not be pinned, but will go on with the leader */
                evsel = evsel__next(evsel);
-               TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
-               TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CACHE_MISSES));
+               ret = assert_hw(&evsel->core, PERF_COUNT_HW_CACHE_MISSES, "cache-misses");
+               if (ret)
+                       return ret;
+
                TEST_ASSERT_VAL("wrong exclusive", !evsel->core.attr.exclusive);
 
                /* branch-misses - ditto */
                evsel = evsel__next(evsel);
-               TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_BRANCH_MISSES));
+               ret = assert_hw(&evsel->core, PERF_COUNT_HW_BRANCH_MISSES, "branch-misses");
+               if (ret)
+                       return ret;
+
                TEST_ASSERT_VAL("wrong exclusive", !evsel->core.attr.exclusive);
        }
        return TEST_OK;
@@ -1677,9 +1783,11 @@ static int test__checkevent_raw_pmu(struct evlist *evlist)
 static int test__sym_event_slash(struct evlist *evlist)
 {
        struct evsel *evsel = evlist__first(evlist);
+       int ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
+
+       if (ret)
+               return ret;
 
-       TEST_ASSERT_VAL("wrong type", evsel->core.attr.type == PERF_TYPE_HARDWARE);
-       TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
        TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
        return TEST_OK;
 }
@@ -1687,9 +1795,11 @@ static int test__sym_event_slash(struct evlist *evlist)
 static int test__sym_event_dc(struct evlist *evlist)
 {
        struct evsel *evsel = evlist__first(evlist);
+       int ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
+
+       if (ret)
+               return ret;
 
-       TEST_ASSERT_VAL("wrong type", evsel->core.attr.type == PERF_TYPE_HARDWARE);
-       TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
        TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
        return TEST_OK;
 }
@@ -1697,9 +1807,11 @@ static int test__sym_event_dc(struct evlist *evlist)
 static int test__term_equal_term(struct evlist *evlist)
 {
        struct evsel *evsel = evlist__first(evlist);
+       int ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
+
+       if (ret)
+               return ret;
 
-       TEST_ASSERT_VAL("wrong type", evsel->core.attr.type == PERF_TYPE_HARDWARE);
-       TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
        TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "name") == 0);
        return TEST_OK;
 }
@@ -1707,9 +1819,11 @@ static int test__term_equal_term(struct evlist *evlist)
 static int test__term_equal_legacy(struct evlist *evlist)
 {
        struct evsel *evsel = evlist__first(evlist);
+       int ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
+
+       if (ret)
+               return ret;
 
-       TEST_ASSERT_VAL("wrong type", evsel->core.attr.type == PERF_TYPE_HARDWARE);
-       TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
        TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "l1d") == 0);
        return TEST_OK;
 }
@@ -2549,7 +2663,7 @@ static int test__pmu_events(struct test_suite *test __maybe_unused, int subtest
                        if (strchr(ent->d_name, '.'))
                                continue;
 
-                       /* exclude parametrized ones (name contains '?') */
+                       /* exclude parameterized ones (name contains '?') */
                        n = snprintf(pmu_event, sizeof(pmu_event), "%s%s", path, ent->d_name);
                        if (n >= PATH_MAX) {
                                pr_err("pmu event name crossed PATH_MAX(%d) size\n", PATH_MAX);
@@ -2578,7 +2692,7 @@ static int test__pmu_events(struct test_suite *test __maybe_unused, int subtest
                        fclose(file);
 
                        if (is_event_parameterized == 1) {
-                               pr_debug("skipping parametrized PMU event: %s which contains ?\n", pmu_event);
+                               pr_debug("skipping parameterized PMU event: %s which contains ?\n", pmu_event);
                                continue;
                        }
 
index efcd71c2738afb9d93809e8bab18eb5dd3090195..bbe2ddeb9b745c0c8d51dc842afcfd603635fe4c 100644 (file)
@@ -93,7 +93,7 @@ static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int su
        threads = thread_map__new(-1, getpid(), UINT_MAX);
        CHECK_NOT_NULL__(threads);
 
-       cpus = perf_cpu_map__new(NULL);
+       cpus = perf_cpu_map__new_online_cpus();
        CHECK_NOT_NULL__(cpus);
 
        evlist = evlist__new();
index a7e169d1bf645e302af1c9e3a10544ffcd0af76f..5f886cd09e6b3a62b5690dade94f1f8cae3279d2 100644 (file)
@@ -42,7 +42,6 @@ static pthread_t new_thr(void *(*fn) (void *arg), void *arg)
 int main(int argc, char **argv)
 {
        unsigned long i, len, size, thr;
-       pthread_t threads[256];
        struct args args[256];
        long long v;
 
index c0158fac7d0b0b47ebfc5dac1a8e3aa72d781981..e05a559253ca9d9366ad321d520349042fb07fca 100644 (file)
@@ -57,7 +57,6 @@ static pthread_t new_thr(void *(*fn) (void *arg), void *arg)
 int main(int argc, char **argv)
 {
        unsigned int i, len, thr;
-       pthread_t threads[256];
        struct args args[256];
 
        if (argc < 3) {
index 8f6d384208ed971debc09d296006e7408a790b2b..0fc7bf1a25af3607b40f091f62176134ddb7f9f6 100644 (file)
@@ -51,7 +51,6 @@ static pthread_t new_thr(void *(*fn) (void *arg), void *arg)
 int main(int argc, char **argv)
 {
        unsigned int i, thr;
-       pthread_t threads[256];
        struct args args[256];
 
        if (argc < 2) {
index 4c598cfc5afa14816f128da39f86cc7f18b4c68a..e5fa8d6f9eb1fdad3a5d700da9fa788c771a31ad 100755 (executable)
@@ -414,16 +414,30 @@ EOF
        # start daemon
        daemon_start ${config} test
 
-       # send 2 signals
-       perf daemon signal --config ${config} --session test
-       perf daemon signal --config ${config}
-
-       # stop daemon
-       daemon_exit ${config}
-
-       # count is 2 perf.data for signals and 1 for perf record finished
-       count=`ls ${base}/session-test/*perf.data* | wc -l`
-       if [ ${count} -ne 3 ]; then
+        # send 2 signals then exit. Do this in a loop watching the number of
+        # files to avoid races. If the loop retries more than 600 times then
+        # give up.
+       local retries=0
+       local signals=0
+       local success=0
+       while [ ${retries} -lt 600 ] && [ ${success} -eq 0 ]; do
+               local files
+               files=`ls ${base}/session-test/*perf.data* 2> /dev/null | wc -l`
+               if [ ${signals} -eq 0 ]; then
+                       perf daemon signal --config ${config} --session test
+                       signals=1
+               elif [ ${signals} -eq 1 ] && [ $files -ge 1 ]; then
+                       perf daemon signal --config ${config}
+                       signals=2
+               elif [ ${signals} -eq 2 ] && [ $files -ge 2 ]; then
+                       daemon_exit ${config}
+                       signals=3
+               elif [ ${signals} -eq 3 ] && [ $files -ge 3 ]; then
+                       success=1
+               fi
+               retries=$((${retries} +1))
+       done
+       if [ ${success} -eq 0 ]; then
                error=1
                echo "FAILED: perf data no generated"
        fi
diff --git a/tools/perf/tests/shell/diff.sh b/tools/perf/tests/shell/diff.sh
new file mode 100755 (executable)
index 0000000..14b87af
--- /dev/null
@@ -0,0 +1,108 @@
+#!/bin/sh
+# perf diff tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+err=0
+perfdata1=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+perfdata2=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+perfdata3=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+testprog="perf test -w thloop"
+
+shelldir=$(dirname "$0")
+# shellcheck source=lib/perf_has_symbol.sh
+. "${shelldir}"/lib/perf_has_symbol.sh
+
+testsym="test_loop"
+
+skip_test_missing_symbol ${testsym}
+
+cleanup() {
+  rm -rf "${perfdata1}"
+  rm -rf "${perfdata1}".old
+  rm -rf "${perfdata2}"
+  rm -rf "${perfdata2}".old
+  rm -rf "${perfdata3}"
+  rm -rf "${perfdata3}".old
+
+  trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+  cleanup
+  exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+make_data() {
+  file="$1"
+  if ! perf record -o "${file}" ${testprog} 2> /dev/null
+  then
+    echo "Workload record [Failed record]"
+    echo 1
+    return
+  fi
+  if ! perf report -i "${file}" -q | grep -q "${testsym}"
+  then
+    echo "Workload record [Failed missing output]"
+    echo 1
+    return
+  fi
+  echo 0
+}
+
+test_two_files() {
+  echo "Basic two file diff test"
+  err=$(make_data "${perfdata1}")
+  if [ $err != 0 ]
+  then
+    return
+  fi
+  err=$(make_data "${perfdata2}")
+  if [ $err != 0 ]
+  then
+    return
+  fi
+
+  if ! perf diff "${perfdata1}" "${perfdata2}" | grep -q "${testsym}"
+  then
+    echo "Basic two file diff test [Failed diff]"
+    err=1
+    return
+  fi
+  echo "Basic two file diff test [Success]"
+}
+
+test_three_files() {
+  echo "Basic three file diff test"
+  err=$(make_data "${perfdata1}")
+  if [ $err != 0 ]
+  then
+    return
+  fi
+  err=$(make_data "${perfdata2}")
+  if [ $err != 0 ]
+  then
+    return
+  fi
+  err=$(make_data "${perfdata3}")
+  if [ $err != 0 ]
+  then
+    return
+  fi
+
+  if ! perf diff "${perfdata1}" "${perfdata2}" "${perfdata3}" | grep -q "${testsym}"
+  then
+    echo "Basic three file diff test [Failed diff]"
+    err=1
+    return
+  fi
+  echo "Basic three file diff test [Success]"
+}
+
+test_two_files
+test_three_files
+
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/lib/perf_has_symbol.sh b/tools/perf/tests/shell/lib/perf_has_symbol.sh
new file mode 100644 (file)
index 0000000..5d59c32
--- /dev/null
@@ -0,0 +1,21 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+perf_has_symbol()
+{
+       if perf test -vv "Symbols" 2>&1 | grep "[[:space:]]$1$"; then
+               echo "perf does have symbol '$1'"
+               return 0
+       fi
+       echo "perf does not have symbol '$1'"
+       return 1
+}
+
+skip_test_missing_symbol()
+{
+       if ! perf_has_symbol "$1" ; then
+               echo "perf is missing symbols - skipping test"
+               exit 2
+       fi
+       return 0
+}
diff --git a/tools/perf/tests/shell/lib/setup_python.sh b/tools/perf/tests/shell/lib/setup_python.sh
new file mode 100644 (file)
index 0000000..c2fce17
--- /dev/null
@@ -0,0 +1,16 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+if [ "x$PYTHON" = "x" ]
+then
+  python3 --version >/dev/null 2>&1 && PYTHON=python3
+fi
+if [ "x$PYTHON" = "x" ]
+then
+  python --version >/dev/null 2>&1 && PYTHON=python
+fi
+if [ "x$PYTHON" = "x" ]
+then
+  echo Skipping test, python not detected please set environment variable PYTHON.
+  exit 2
+fi
diff --git a/tools/perf/tests/shell/list.sh b/tools/perf/tests/shell/list.sh
new file mode 100755 (executable)
index 0000000..8a868ae
--- /dev/null
@@ -0,0 +1,34 @@
+#!/bin/sh
+# perf list tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+shelldir=$(dirname "$0")
+# shellcheck source=lib/setup_python.sh
+. "${shelldir}"/lib/setup_python.sh
+
+list_output=$(mktemp /tmp/__perf_test.list_output.json.XXXXX)
+
+cleanup() {
+  rm -f "${list_output}"
+
+  trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+  cleanup
+  exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+test_list_json() {
+  echo "Json output test"
+  perf list -j -o "${list_output}"
+  $PYTHON -m json.tool "${list_output}"
+  echo "Json output test [Success]"
+}
+
+test_list_json
+cleanup
+exit 0
index 8dd115dd35a7e1d0e57b5907459d0e995221e9c2..a78d35d2cff070d731769e1f8c73cb54354d7835 100755 (executable)
@@ -2,10 +2,17 @@
 # perf pipe recording and injection test
 # SPDX-License-Identifier: GPL-2.0
 
+shelldir=$(dirname "$0")
+# shellcheck source=lib/perf_has_symbol.sh
+. "${shelldir}"/lib/perf_has_symbol.sh
+
+sym="noploop"
+
+skip_test_missing_symbol ${sym}
+
 data=$(mktemp /tmp/perf.data.XXXXXX)
 prog="perf test -w noploop"
 task="perf"
-sym="noploop"
 
 if ! perf record -e task-clock:u -o - ${prog} | perf report -i - --task | grep ${task}; then
        echo "cannot find the test file in the perf report"
index eebeea6bdc767a73168c81cd64bd13d049060508..72c65570db378c74d562d6d361faa2d4c01760fd 100755 (executable)
@@ -45,7 +45,10 @@ trace_libc_inet_pton_backtrace() {
                ;;
        ppc64|ppc64le)
                eventattr='max-stack=4'
-               echo "gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
+               # Add gaih_inet to expected backtrace only if it is part of libc.
+               if nm $libc | grep -F -q gaih_inet.; then
+                       echo "gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
+               fi
                echo "getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
                echo ".*(\+0x[[:xdigit:]]+|\[unknown\])[[:space:]]\(.*/bin/ping.*\)$" >> $expected
                ;;
index 29443b8e8876502aa5cb7e291dcb0e104f8a3da1..3d1a7759a7b2da83fb9742e62b9021713a43f21c 100755 (executable)
@@ -8,10 +8,19 @@ shelldir=$(dirname "$0")
 # shellcheck source=lib/waiting.sh
 . "${shelldir}"/lib/waiting.sh
 
+# shellcheck source=lib/perf_has_symbol.sh
+. "${shelldir}"/lib/perf_has_symbol.sh
+
+testsym="test_loop"
+
+skip_test_missing_symbol ${testsym}
+
 err=0
 perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
 testprog="perf test -w thloop"
-testsym="test_loop"
+cpu_pmu_dir="/sys/bus/event_source/devices/cpu*"
+br_cntr_file="/caps/branch_counter_nr"
+br_cntr_output="branch stack counters"
 
 cleanup() {
   rm -rf "${perfdata}"
@@ -155,10 +164,37 @@ test_workload() {
   echo "Basic target workload test [Success]"
 }
 
+test_branch_counter() {
+  echo "Basic branch counter test"
+  # Check if the branch counter feature is supported
+  for dir in $cpu_pmu_dir
+  do
+    if [ ! -e "$dir$br_cntr_file" ]
+    then
+      echo "branch counter feature not supported on all core PMUs ($dir) [Skipped]"
+      return
+    fi
+  done
+  if ! perf record -o "${perfdata}" -j any,counter ${testprog} 2> /dev/null
+  then
+    echo "Basic branch counter test [Failed record]"
+    err=1
+    return
+  fi
+  if ! perf report -i "${perfdata}" -D -q | grep -q "$br_cntr_output"
+  then
+    echo "Basic branch record test [Failed missing output]"
+    err=1
+    return
+  fi
+  echo "Basic branch counter test [Success]"
+}
+
 test_per_thread
 test_register_capture
 test_system_wide
 test_workload
+test_branch_counter
 
 cleanup
 exit $err
index a1ef8f0d2b5cc1c5f1257e159b696d294875f874..67c925f3a15aa7fd78e0a1b779ce4c85e80246be 100755 (executable)
@@ -77,9 +77,9 @@ test_offcpu_child() {
     err=1
     return
   fi
-  # each process waits for read and write, so it should be more than 800 events
+  # each process waits at least for poll, so it should be more than 400 events
   if ! perf report -i ${perfdata} -s comm -q -n -t ';' --percent-limit=90 | \
-    awk -F ";" '{ if (NF > 3 && int($3) < 800) exit 1; }'
+    awk -F ";" '{ if (NF > 3 && int($3) < 400) exit 1; }'
   then
     echo "Child task off-cpu test [Failed invalid output]"
     err=1
diff --git a/tools/perf/tests/shell/script.sh b/tools/perf/tests/shell/script.sh
new file mode 100755 (executable)
index 0000000..fa4d71e
--- /dev/null
@@ -0,0 +1,72 @@
+#!/bin/sh
+# perf script tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+temp_dir=$(mktemp -d /tmp/perf-test-script.XXXXXXXXXX)
+
+perfdatafile="${temp_dir}/perf.data"
+db_test="${temp_dir}/db_test.py"
+
+err=0
+
+cleanup()
+{
+       trap - EXIT TERM INT
+       sane=$(echo "${temp_dir}" | cut -b 1-21)
+       if [ "${sane}" = "/tmp/perf-test-script" ] ; then
+               echo "--- Cleaning up ---"
+               rm -f "${temp_dir}/"*
+               rmdir "${temp_dir}"
+       fi
+}
+
+trap_cleanup()
+{
+       cleanup
+       exit 1
+}
+
+trap trap_cleanup EXIT TERM INT
+
+
+test_db()
+{
+       echo "DB test"
+
+       # Check if python script is supported
+        if perf version --build-options | grep python | grep -q OFF ; then
+               echo "SKIP: python scripting is not supported"
+               err=2
+               return
+       fi
+
+       cat << "_end_of_file_" > "${db_test}"
+perf_db_export_mode = True
+perf_db_export_calls = False
+perf_db_export_callchains = True
+
+def sample_table(*args):
+    print(f'sample_table({args})')
+
+def call_path_table(*args):
+    print(f'call_path_table({args}')
+_end_of_file_
+       case $(uname -m)
+       in s390x)
+               cmd_flags="--call-graph dwarf -e cpu-clock";;
+       *)
+               cmd_flags="-g";;
+       esac
+
+       perf record $cmd_flags -o "${perfdatafile}" true
+       perf script -i "${perfdatafile}" -s "${db_test}"
+       echo "DB test [Success]"
+}
+
+test_db
+
+cleanup
+
+exit $err
index 196e22672c50cf6572db3b08c8974c75abb1ae56..3bc900533a5d65e5f7c3495022802857da517388 100755 (executable)
@@ -8,20 +8,10 @@ set -e
 
 skip_test=0
 
+shelldir=$(dirname "$0")
+# shellcheck source=lib/setup_python.sh
+. "${shelldir}"/lib/setup_python.sh
 pythonchecker=$(dirname $0)/lib/perf_json_output_lint.py
-if [ "x$PYTHON" == "x" ]
-then
-       if which python3 > /dev/null
-       then
-               PYTHON=python3
-       elif which python > /dev/null
-       then
-               PYTHON=python
-       else
-               echo Skipping test, python not detected please set environment variable PYTHON.
-               exit 2
-       fi
-fi
 
 stat_output=$(mktemp /tmp/__perf_test.stat_output.json.XXXXX)
 
index c77955419173190216d9af049bb88bd3ab1bcc1e..d2a3506e0d196c997d7eb663d6da06b530ea5681 100755 (executable)
@@ -4,7 +4,7 @@
 
 set -e
 
-# Test all PMU events; however exclude parametrized ones (name contains '?')
+# Test all PMU events; however exclude parameterized ones (name contains '?')
 for p in $(perf list --raw-dump pmu | sed 's/[[:graph:]]\+?[[:graph:]]\+[[:space:]]//g'); do
   echo "Testing $p"
   result=$(perf stat -e "$p" true 2>&1)
index ad94c936de7e878173e95ee949ae9146aa295239..7ca172599aa6cdac7adb47d16716ff3ba3746e63 100755 (executable)
@@ -1,16 +1,10 @@
 #!/bin/bash
 # perf metrics value validation
 # SPDX-License-Identifier: GPL-2.0
-if [ "x$PYTHON" == "x" ]
-then
-       if which python3 > /dev/null
-       then
-               PYTHON=python3
-       else
-               echo Skipping test, python3 not detected please set environment variable PYTHON.
-               exit 2
-       fi
-fi
+
+shelldir=$(dirname "$0")
+# shellcheck source=lib/setup_python.sh
+. "${shelldir}"/lib/setup_python.sh
 
 grep -q GenuineIntel /proc/cpuinfo || { echo Skipping non-Intel; exit 2; }
 
index 66dfdfdad553f4c6b580e928d8b870840b831269..e342e6c8aa50c41ddb86730e263c321907800d73 100755 (executable)
@@ -2,8 +2,14 @@
 # Check Arm64 callgraphs are complete in fp mode
 # SPDX-License-Identifier: GPL-2.0
 
+shelldir=$(dirname "$0")
+# shellcheck source=lib/perf_has_symbol.sh
+. "${shelldir}"/lib/perf_has_symbol.sh
+
 lscpu | grep -q "aarch64" || exit 2
 
+skip_test_missing_symbol leafloop
+
 PERF_DATA=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
 TEST_PROGRAM="perf test -w leafloop"
 
index 09908d71c9941d3c4e3cb1d81cc08617581e13d8..5f14d0cb013f838629446abc4f15484edb2cd7d8 100755 (executable)
@@ -4,6 +4,10 @@
 # SPDX-License-Identifier: GPL-2.0
 # German Gomez <german.gomez@arm.com>, 2022
 
+shelldir=$(dirname "$0")
+# shellcheck source=lib/perf_has_symbol.sh
+. "${shelldir}"/lib/perf_has_symbol.sh
+
 # skip the test if the hardware doesn't support branch stack sampling
 # and if the architecture doesn't support filter types: any,save_type,u
 if ! perf record -o- --no-buildid --branch-filter any,save_type,u -- true > /dev/null 2>&1 ; then
@@ -11,6 +15,8 @@ if ! perf record -o- --no-buildid --branch-filter any,save_type,u -- true > /dev
        exit 2
 fi
 
+skip_test_missing_symbol brstack_bench
+
 TMPDIR=$(mktemp -d /tmp/__perf_test.program.XXXXX)
 TESTPROG="perf test -w brstack"
 
index 69bb6fe86c5078a8325dedbcc2ca045a6d552f2a..3dfa91832aa87f89b8f0ef0c1ba51df6d130d2d2 100755 (executable)
@@ -4,6 +4,13 @@
 # SPDX-License-Identifier: GPL-2.0
 # Leo Yan <leo.yan@linaro.org>, 2022
 
+shelldir=$(dirname "$0")
+# shellcheck source=lib/waiting.sh
+. "${shelldir}"/lib/waiting.sh
+
+# shellcheck source=lib/perf_has_symbol.sh
+. "${shelldir}"/lib/perf_has_symbol.sh
+
 skip_if_no_mem_event() {
        perf mem record -e list 2>&1 | grep -E -q 'available' && return 0
        return 2
@@ -11,8 +18,11 @@ skip_if_no_mem_event() {
 
 skip_if_no_mem_event || exit 2
 
+skip_test_missing_symbol buf1
+
 TEST_PROGRAM="perf test -w datasym"
 PERF_DATA=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+ERR_FILE=$(mktemp /tmp/__perf_test.stderr.XXXXX)
 
 check_result() {
        # The memory report format is as below:
@@ -50,13 +60,15 @@ echo "Recording workload..."
 # specific CPU and test in per-CPU mode.
 is_amd=$(grep -E -c 'vendor_id.*AuthenticAMD' /proc/cpuinfo)
 if (($is_amd >= 1)); then
-       perf mem record -o ${PERF_DATA} -C 0 -- taskset -c 0 $TEST_PROGRAM &
+       perf mem record -vvv -o ${PERF_DATA} -C 0 -- taskset -c 0 $TEST_PROGRAM 2>"${ERR_FILE}" &
 else
-       perf mem record --all-user -o ${PERF_DATA} -- $TEST_PROGRAM &
+       perf mem record -vvv --all-user -o ${PERF_DATA} -- $TEST_PROGRAM 2>"${ERR_FILE}" &
 fi
 
 PERFPID=$!
 
+wait_for_perf_to_start ${PERFPID} "${ERR_FILE}"
+
 sleep 1
 
 kill $PERFPID
index 6ded58f98f55b26f6f538cf5f457007625be7700..c4f1b59d116f6e4705d872824339b234180e206f 100755 (executable)
@@ -6,16 +6,9 @@ set -e
 
 err=0
 
-if [ "$PYTHON" = "" ] ; then
-       if which python3 > /dev/null ; then
-               PYTHON=python3
-       elif which python > /dev/null ; then
-               PYTHON=python
-       else
-               echo Skipping test, python not detected please set environment variable PYTHON.
-               exit 2
-       fi
-fi
+shelldir=$(dirname "$0")
+# shellcheck source=lib/setup_python.sh
+. "${shelldir}"/lib/setup_python.sh
 
 perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
 result=$(mktemp /tmp/__perf_test.output.json.XXXXX)
index 1de7478ec1894d7799d723f09f9384937710bf40..e6fd934b027a3d0ca36f434135cbdc245acd666d 100644 (file)
@@ -57,36 +57,79 @@ static struct perf_event_attr make_event_attr(void)
 #ifdef HAVE_BPF_SKEL
 #include <bpf/btf.h>
 
-static bool attr_has_sigtrap(void)
+static struct btf *btf;
+
+static bool btf__available(void)
 {
-       bool ret = false;
-       struct btf *btf;
-       const struct btf_type *t;
+       if (btf == NULL)
+               btf = btf__load_vmlinux_btf();
+
+       return btf != NULL;
+}
+
+static void btf__exit(void)
+{
+       btf__free(btf);
+       btf = NULL;
+}
+
+static const struct btf_member *__btf_type__find_member_by_name(int type_id, const char *member_name)
+{
+       const struct btf_type *t = btf__type_by_id(btf, type_id);
        const struct btf_member *m;
-       const char *name;
-       int i, id;
+       int i;
+
+       for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
+               const char *current_member_name = btf__name_by_offset(btf, m->name_off);
+               if (!strcmp(current_member_name, member_name))
+                       return m;
+       }
 
-       btf = btf__load_vmlinux_btf();
-       if (btf == NULL) {
+       return NULL;
+}
+
+static bool attr_has_sigtrap(void)
+{
+       int id;
+
+       if (!btf__available()) {
                /* should be an old kernel */
                return false;
        }
 
        id = btf__find_by_name_kind(btf, "perf_event_attr", BTF_KIND_STRUCT);
        if (id < 0)
-               goto out;
+               return false;
 
-       t = btf__type_by_id(btf, id);
-       for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
-               name = btf__name_by_offset(btf, m->name_off);
-               if (!strcmp(name, "sigtrap")) {
-                       ret = true;
-                       break;
-               }
-       }
-out:
-       btf__free(btf);
-       return ret;
+       return __btf_type__find_member_by_name(id, "sigtrap") != NULL;
+}
+
+static bool kernel_with_sleepable_spinlocks(void)
+{
+       const struct btf_member *member;
+       const struct btf_type *type;
+       const char *type_name;
+       int id;
+
+       if (!btf__available())
+               return false;
+
+       id = btf__find_by_name_kind(btf, "spinlock", BTF_KIND_STRUCT);
+       if (id < 0)
+               return false;
+
+       // Only RT has a "lock" member for "struct spinlock"
+       member = __btf_type__find_member_by_name(id, "lock");
+       if (member == NULL)
+               return false;
+
+       // But check its type as well
+       type = btf__type_by_id(btf, member->type);
+       if (!type || !btf_is_struct(type))
+               return false;
+
+       type_name = btf__name_by_offset(btf, type->name_off);
+       return type_name && !strcmp(type_name, "rt_mutex_base");
 }
 #else  /* !HAVE_BPF_SKEL */
 static bool attr_has_sigtrap(void)
@@ -109,6 +152,15 @@ static bool attr_has_sigtrap(void)
 
        return ret;
 }
+
+static bool kernel_with_sleepable_spinlocks(void)
+{
+       return false;
+}
+
+static void btf__exit(void)
+{
+}
 #endif  /* HAVE_BPF_SKEL */
 
 static void
@@ -147,7 +199,7 @@ static int run_test_threads(pthread_t *threads, pthread_barrier_t *barrier)
 
 static int run_stress_test(int fd, pthread_t *threads, pthread_barrier_t *barrier)
 {
-       int ret;
+       int ret, expected_sigtraps;
 
        ctx.iterate_on = 3000;
 
@@ -156,7 +208,16 @@ static int run_stress_test(int fd, pthread_t *threads, pthread_barrier_t *barrie
        ret = run_test_threads(threads, barrier);
        TEST_ASSERT_EQUAL("disable failed", ioctl(fd, PERF_EVENT_IOC_DISABLE, 0), 0);
 
-       TEST_ASSERT_EQUAL("unexpected sigtraps", ctx.signal_count, NUM_THREADS * ctx.iterate_on);
+       expected_sigtraps = NUM_THREADS * ctx.iterate_on;
+
+       if (ctx.signal_count < expected_sigtraps && kernel_with_sleepable_spinlocks()) {
+               pr_debug("Expected %d sigtraps, got %d, running on a kernel with sleepable spinlocks.\n",
+                        expected_sigtraps, ctx.signal_count);
+               pr_debug("See https://lore.kernel.org/all/e368f2c848d77fbc8d259f44e2055fe469c219cf.camel@gmx.de/\n");
+               return TEST_SKIP;
+       } else
+               TEST_ASSERT_EQUAL("unexpected sigtraps", ctx.signal_count, expected_sigtraps);
+
        TEST_ASSERT_EQUAL("missing signals or incorrectly delivered", ctx.tids_want_signal, 0);
        TEST_ASSERT_VAL("unexpected si_addr", ctx.first_siginfo.si_addr == &ctx.iterate_on);
 #if 0 /* FIXME: enable when libc's signal.h has si_perf_{type,data} */
@@ -221,6 +282,7 @@ out_restore_sigaction:
        sigaction(SIGTRAP, &oldact, NULL);
 out:
        pthread_barrier_destroy(&barrier);
+       btf__exit();
        return ret;
 }
 
index 4d7493fa01059112ff283e36b46e22f95839ecdf..290716783ac6a28d06567f7827ccb1bf68ffb135 100644 (file)
@@ -62,7 +62,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
        }
        evlist__add(evlist, evsel);
 
-       cpus = perf_cpu_map__dummy_new();
+       cpus = perf_cpu_map__new_any_cpu();
        threads = thread_map__new_by_tid(getpid());
        if (!cpus || !threads) {
                err = -ENOMEM;
index e52b031bedc5a9b545fe9b6c8d3f93b6aaefd03d..5cab17a1942e67d7767161a21fdee07599912d61 100644 (file)
@@ -351,7 +351,7 @@ static int test__switch_tracking(struct test_suite *test __maybe_unused, int sub
                goto out_err;
        }
 
-       cpus = perf_cpu_map__new(NULL);
+       cpus = perf_cpu_map__new_online_cpus();
        if (!cpus) {
                pr_debug("perf_cpu_map__new failed!\n");
                goto out_err;
index 968dddde6ddaf0bede4fc7679d57bbb4ad2cd35b..d33d0952025cf5b65e80fb7e8464f634b5c313c1 100644 (file)
@@ -70,7 +70,7 @@ static int test__task_exit(struct test_suite *test __maybe_unused, int subtest _
         * evlist__prepare_workload we'll fill in the only thread
         * we're monitoring, the one forked there.
         */
-       cpus = perf_cpu_map__dummy_new();
+       cpus = perf_cpu_map__new_any_cpu();
        threads = thread_map__new_by_tid(-1);
        if (!cpus || !threads) {
                err = -ENOMEM;
index b394f3ac2d667bacb2d9e16aca78715c65ab5ed3..dad3d7414142d1befc3d6eebe48d81a39ace153a 100644 (file)
@@ -207,5 +207,6 @@ DECLARE_WORKLOAD(brstack);
 DECLARE_WORKLOAD(datasym);
 
 extern const char *dso_to_test;
+extern const char *test_objdump_path;
 
 #endif /* TESTS_H */
index 9dee63734e66a0c1b08c62bb1bbfb393ad9866d2..2a842f53fbb575a2f715d66898ff1bb6553dfa7f 100644 (file)
@@ -215,7 +215,7 @@ static int test__session_topology(struct test_suite *test __maybe_unused, int su
        if (session_write_header(path))
                goto free_path;
 
-       map = perf_cpu_map__new(NULL);
+       map = perf_cpu_map__new_online_cpus();
        if (map == NULL) {
                pr_debug("failed to get system cpumap\n");
                goto free_path;
index 1078a93b01aa018f3be1932dee7f3f694ad51828..822f893e67d5f6643f5f1794801b87630f64fca1 100644 (file)
@@ -112,18 +112,92 @@ static bool is_ignored_symbol(const char *name, char type)
        return false;
 }
 
+struct test__vmlinux_matches_kallsyms_cb_args {
+       struct machine kallsyms;
+       struct map *vmlinux_map;
+       bool header_printed;
+};
+
+static int test__vmlinux_matches_kallsyms_cb1(struct map *map, void *data)
+{
+       struct test__vmlinux_matches_kallsyms_cb_args *args = data;
+       struct dso *dso = map__dso(map);
+       /*
+        * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while
+        * the kernel will have the path for the vmlinux file being used, so use
+        * the short name, less descriptive but the same ("[kernel]" in both
+        * cases.
+        */
+       struct map *pair = maps__find_by_name(args->kallsyms.kmaps,
+                                       (dso->kernel ? dso->short_name : dso->name));
+
+       if (pair)
+               map__set_priv(pair, 1);
+       else {
+               if (!args->header_printed) {
+                       pr_info("WARN: Maps only in vmlinux:\n");
+                       args->header_printed = true;
+               }
+               map__fprintf(map, stderr);
+       }
+       return 0;
+}
+
+static int test__vmlinux_matches_kallsyms_cb2(struct map *map, void *data)
+{
+       struct test__vmlinux_matches_kallsyms_cb_args *args = data;
+       struct map *pair;
+       u64 mem_start = map__unmap_ip(args->vmlinux_map, map__start(map));
+       u64 mem_end = map__unmap_ip(args->vmlinux_map, map__end(map));
+
+       pair = maps__find(args->kallsyms.kmaps, mem_start);
+       if (pair == NULL || map__priv(pair))
+               return 0;
+
+       if (map__start(pair) == mem_start) {
+               struct dso *dso = map__dso(map);
+
+               if (!args->header_printed) {
+                       pr_info("WARN: Maps in vmlinux with a different name in kallsyms:\n");
+                       args->header_printed = true;
+               }
+
+               pr_info("WARN: %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as",
+                       map__start(map), map__end(map), map__pgoff(map), dso->name);
+               if (mem_end != map__end(pair))
+                       pr_info(":\nWARN: *%" PRIx64 "-%" PRIx64 " %" PRIx64,
+                               map__start(pair), map__end(pair), map__pgoff(pair));
+               pr_info(" %s\n", dso->name);
+               map__set_priv(pair, 1);
+       }
+       return 0;
+}
+
+static int test__vmlinux_matches_kallsyms_cb3(struct map *map, void *data)
+{
+       struct test__vmlinux_matches_kallsyms_cb_args *args = data;
+
+       if (!map__priv(map)) {
+               if (!args->header_printed) {
+                       pr_info("WARN: Maps only in kallsyms:\n");
+                       args->header_printed = true;
+               }
+               map__fprintf(map, stderr);
+       }
+       return 0;
+}
+
 static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused,
                                        int subtest __maybe_unused)
 {
        int err = TEST_FAIL;
        struct rb_node *nd;
        struct symbol *sym;
-       struct map *kallsyms_map, *vmlinux_map;
-       struct map_rb_node *rb_node;
-       struct machine kallsyms, vmlinux;
+       struct map *kallsyms_map;
+       struct machine vmlinux;
        struct maps *maps;
        u64 mem_start, mem_end;
-       bool header_printed;
+       struct test__vmlinux_matches_kallsyms_cb_args args;
 
        /*
         * Step 1:
@@ -131,7 +205,7 @@ static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused
         * Init the machines that will hold kernel, modules obtained from
         * both vmlinux + .ko files and from /proc/kallsyms split by modules.
         */
-       machine__init(&kallsyms, "", HOST_KERNEL_ID);
+       machine__init(&args.kallsyms, "", HOST_KERNEL_ID);
        machine__init(&vmlinux, "", HOST_KERNEL_ID);
 
        maps = machine__kernel_maps(&vmlinux);
@@ -143,7 +217,7 @@ static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused
         * load /proc/kallsyms. Also create the modules maps from /proc/modules
         * and find the .ko files that match them in /lib/modules/`uname -r`/.
         */
-       if (machine__create_kernel_maps(&kallsyms) < 0) {
+       if (machine__create_kernel_maps(&args.kallsyms) < 0) {
                pr_debug("machine__create_kernel_maps failed");
                err = TEST_SKIP;
                goto out;
@@ -160,7 +234,7 @@ static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused
         * be compacted against the list of modules found in the "vmlinux"
         * code and with the one got from /proc/modules from the "kallsyms" code.
         */
-       if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms") <= 0) {
+       if (machine__load_kallsyms(&args.kallsyms, "/proc/kallsyms") <= 0) {
                pr_debug("machine__load_kallsyms failed");
                err = TEST_SKIP;
                goto out;
@@ -174,7 +248,7 @@ static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused
         * to see if the running kernel was relocated by checking if it has the
         * same value in the vmlinux file we load.
         */
-       kallsyms_map = machine__kernel_map(&kallsyms);
+       kallsyms_map = machine__kernel_map(&args.kallsyms);
 
        /*
         * Step 5:
@@ -186,7 +260,7 @@ static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused
                goto out;
        }
 
-       vmlinux_map = machine__kernel_map(&vmlinux);
+       args.vmlinux_map = machine__kernel_map(&vmlinux);
 
        /*
         * Step 6:
@@ -213,7 +287,7 @@ static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused
         * in the kallsyms dso. For the ones that are in both, check its names and
         * end addresses too.
         */
-       map__for_each_symbol(vmlinux_map, sym, nd) {
+       map__for_each_symbol(args.vmlinux_map, sym, nd) {
                struct symbol *pair, *first_pair;
 
                sym  = rb_entry(nd, struct symbol, rb_node);
@@ -221,10 +295,10 @@ static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused
                if (sym->start == sym->end)
                        continue;
 
-               mem_start = map__unmap_ip(vmlinux_map, sym->start);
-               mem_end = map__unmap_ip(vmlinux_map, sym->end);
+               mem_start = map__unmap_ip(args.vmlinux_map, sym->start);
+               mem_end = map__unmap_ip(args.vmlinux_map, sym->end);
 
-               first_pair = machine__find_kernel_symbol(&kallsyms, mem_start, NULL);
+               first_pair = machine__find_kernel_symbol(&args.kallsyms, mem_start, NULL);
                pair = first_pair;
 
                if (pair && UM(pair->start) == mem_start) {
@@ -253,7 +327,8 @@ next_pair:
                                 */
                                continue;
                        } else {
-                               pair = machine__find_kernel_symbol_by_name(&kallsyms, sym->name, NULL);
+                               pair = machine__find_kernel_symbol_by_name(&args.kallsyms,
+                                                                          sym->name, NULL);
                                if (pair) {
                                        if (UM(pair->start) == mem_start)
                                                goto next_pair;
@@ -267,7 +342,7 @@ next_pair:
 
                                continue;
                        }
-               } else if (mem_start == map__end(kallsyms.vmlinux_map)) {
+               } else if (mem_start == map__end(args.kallsyms.vmlinux_map)) {
                        /*
                         * Ignore aliases to _etext, i.e. to the end of the kernel text area,
                         * such as __indirect_thunk_end.
@@ -289,78 +364,18 @@ next_pair:
        if (verbose <= 0)
                goto out;
 
-       header_printed = false;
-
-       maps__for_each_entry(maps, rb_node) {
-               struct map *map = rb_node->map;
-               struct dso *dso = map__dso(map);
-               /*
-                * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while
-                * the kernel will have the path for the vmlinux file being used,
-                * so use the short name, less descriptive but the same ("[kernel]" in
-                * both cases.
-                */
-               struct map *pair = maps__find_by_name(kallsyms.kmaps, (dso->kernel ?
-                                                               dso->short_name :
-                                                               dso->name));
-               if (pair) {
-                       map__set_priv(pair, 1);
-               } else {
-                       if (!header_printed) {
-                               pr_info("WARN: Maps only in vmlinux:\n");
-                               header_printed = true;
-                       }
-                       map__fprintf(map, stderr);
-               }
-       }
-
-       header_printed = false;
-
-       maps__for_each_entry(maps, rb_node) {
-               struct map *pair, *map = rb_node->map;
-
-               mem_start = map__unmap_ip(vmlinux_map, map__start(map));
-               mem_end = map__unmap_ip(vmlinux_map, map__end(map));
+       args.header_printed = false;
+       maps__for_each_map(maps, test__vmlinux_matches_kallsyms_cb1, &args);
 
-               pair = maps__find(kallsyms.kmaps, mem_start);
-               if (pair == NULL || map__priv(pair))
-                       continue;
-
-               if (map__start(pair) == mem_start) {
-                       struct dso *dso = map__dso(map);
-
-                       if (!header_printed) {
-                               pr_info("WARN: Maps in vmlinux with a different name in kallsyms:\n");
-                               header_printed = true;
-                       }
-
-                       pr_info("WARN: %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as",
-                               map__start(map), map__end(map), map__pgoff(map), dso->name);
-                       if (mem_end != map__end(pair))
-                               pr_info(":\nWARN: *%" PRIx64 "-%" PRIx64 " %" PRIx64,
-                                       map__start(pair), map__end(pair), map__pgoff(pair));
-                       pr_info(" %s\n", dso->name);
-                       map__set_priv(pair, 1);
-               }
-       }
-
-       header_printed = false;
-
-       maps = machine__kernel_maps(&kallsyms);
+       args.header_printed = false;
+       maps__for_each_map(maps, test__vmlinux_matches_kallsyms_cb2, &args);
 
-       maps__for_each_entry(maps, rb_node) {
-               struct map *map = rb_node->map;
+       args.header_printed = false;
+       maps = machine__kernel_maps(&args.kallsyms);
+       maps__for_each_map(maps, test__vmlinux_matches_kallsyms_cb3, &args);
 
-               if (!map__priv(map)) {
-                       if (!header_printed) {
-                               pr_info("WARN: Maps only in kallsyms:\n");
-                               header_printed = true;
-                       }
-                       map__fprintf(map, stderr);
-               }
-       }
 out:
-       machine__exit(&kallsyms);
+       machine__exit(&args.kallsyms);
        machine__exit(&vmlinux);
        return err;
 }
index af05269c2eb8a4a197de87bd9a7bfec00daefb14..457b29f91c3ee277429393bd4095a4de63ee7365 100644 (file)
@@ -7,7 +7,6 @@
 #include "../tests.h"
 
 static volatile sig_atomic_t done;
-static volatile unsigned count;
 
 /* We want to check this symbol in perf report */
 noinline void test_loop(void);
@@ -19,8 +18,7 @@ static void sighandler(int sig __maybe_unused)
 
 noinline void test_loop(void)
 {
-       while (!done)
-               __atomic_fetch_add(&count, 1, __ATOMIC_RELAXED);
+       while (!done);
 }
 
 static void *thfunc(void *arg)
index cc09dcaa891e04bb66e0a60cb496111acd1c9b72..7df4bf5b55a3cc2a8c5a31462129e8ac829a4e59 100755 (executable)
@@ -57,13 +57,13 @@ create_arch_errno_table_func()
        archlist="$1"
        default="$2"
 
-       printf 'const char *arch_syscalls__strerrno(const char *arch, int err)\n'
+       printf 'arch_syscalls__strerrno_t *arch_syscalls__strerrno_function(const char *arch)\n'
        printf '{\n'
        for arch in $archlist; do
                printf '\tif (!strcmp(arch, "%s"))\n' $(arch_string "$arch")
-               printf '\t\treturn errno_to_name__%s(err);\n' $(arch_string "$arch")
+               printf '\t\treturn errno_to_name__%s;\n' $(arch_string "$arch")
        done
-       printf '\treturn errno_to_name__%s(err);\n' $(arch_string "$default")
+       printf '\treturn errno_to_name__%s;\n' $(arch_string "$default")
        printf '}\n'
 }
 
@@ -76,7 +76,9 @@ EoHEADER
 
 # Create list of architectures that have a specific errno.h.
 archlist=""
-for arch in $(find $toolsdir/arch -maxdepth 1 -mindepth 1 -type d -printf "%f\n" | sort -r); do
+for f in $toolsdir/arch/*/include/uapi/asm/errno.h; do
+       d=${f%/include/uapi/asm/errno.h}
+       arch="${d##*/}"
        test -f $toolsdir/arch/$arch/include/uapi/asm/errno.h && archlist="$archlist $arch"
 done
 
index 788e8f6bd90eb753af7b7e2928831837c99ff789..9feb794f5c6e15f408a372665f537df5a630e5a0 100644 (file)
@@ -251,6 +251,4 @@ size_t open__scnprintf_flags(unsigned long flags, char *bf, size_t size, bool sh
 void syscall_arg__set_ret_scnprintf(struct syscall_arg *arg,
                                    size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg));
 
-const char *arch_syscalls__strerrno(const char *arch, int err);
-
 #endif /* _PERF_TRACE_BEAUTY_H */
index 8059342ca4126c381a144073f169cba6bc46a059..9455d9672f140d13daa1502bf5faf3c7986a8cc5 100755 (executable)
@@ -4,9 +4,9 @@
 [ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/
 
 printf "static const char *prctl_options[] = {\n"
-regex='^#define[[:space:]]{1}PR_(\w+)[[:space:]]*([[:xdigit:]]+)([[:space:]]*\/.*)?$'
+regex='^#define[[:space:]]{1}PR_(\w+)[[:space:]]*([[:xdigit:]]+)([[:space:]]*/.*)?$'
 grep -E $regex ${header_dir}/prctl.h | grep -v PR_SET_PTRACER | \
-       sed -r "s/$regex/\2 \1/g"       | \
+       sed -E "s%$regex%\2 \1%g"       | \
        sort -n | xargs printf "\t[%s] = \"%s\",\n"
 printf "};\n"
 
index 8bc7ba62203e4a9d3c327487027d05d4da5a21fe..670c6db298ae029812a5eaa885cb986e7a1f56b2 100755 (executable)
@@ -18,10 +18,10 @@ grep -E $ipproto_regex ${uapi_header_dir}/in.h | \
 printf "};\n\n"
 
 printf "static const char *socket_level[] = {\n"
-socket_level_regex='^#define[[:space:]]+SOL_(\w+)[[:space:]]+([[:digit:]]+)([[:space:]]+\/.*)?'
+socket_level_regex='^#define[[:space:]]+SOL_(\w+)[[:space:]]+([[:digit:]]+)([[:space:]]+/.*)?'
 
 grep -E $socket_level_regex ${beauty_header_dir}/socket.h | \
-       sed -r "s/$socket_level_regex/\2 \1/g"  | \
+       sed -E "s%$socket_level_regex%\2 \1%g"  | \
        sort -n | xargs printf "\t[%s] = \"%s\",\n"
 printf "};\n\n"
 
index 5f5320f7c6e27d17a944e7196cfa1605c66357be..dc5943a6352d91dc67bafedfad09bdc40da1e135 100644 (file)
@@ -67,6 +67,7 @@ size_t syscall_arg__scnprintf_statx_mask(char *bf, size_t size, struct syscall_a
        P_FLAG(BTIME);
        P_FLAG(MNT_ID);
        P_FLAG(DIOALIGN);
+       P_FLAG(MNT_ID_UNIQUE);
 
 #undef P_FLAG
 
index ccdb2cd11fbf0325f1e2bcfa35fa86d5986b3261..ec5e21932876038b99afbfa30560d856efd8afd5 100644 (file)
@@ -27,7 +27,6 @@ struct annotate_browser {
        struct rb_node             *curr_hot;
        struct annotation_line     *selection;
        struct arch                *arch;
-       struct annotation_options  *opts;
        bool                        searching_backwards;
        char                        search_bf[128];
 };
@@ -38,11 +37,10 @@ static inline struct annotation *browser__annotation(struct ui_browser *browser)
        return symbol__annotation(ms->sym);
 }
 
-static bool disasm_line__filter(struct ui_browser *browser, void *entry)
+static bool disasm_line__filter(struct ui_browser *browser __maybe_unused, void *entry)
 {
-       struct annotation *notes = browser__annotation(browser);
        struct annotation_line *al = list_entry(entry, struct annotation_line, node);
-       return annotation_line__filter(al, notes);
+       return annotation_line__filter(al);
 }
 
 static int ui_browser__jumps_percent_color(struct ui_browser *browser, int nr, bool current)
@@ -97,7 +95,7 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
        struct annotation_write_ops ops = {
                .first_line              = row == 0,
                .current_entry           = is_current_entry,
-               .change_color            = (!notes->options->hide_src_code &&
+               .change_color            = (!annotate_opts.hide_src_code &&
                                            (!is_current_entry ||
                                             (browser->use_navkeypressed &&
                                              !browser->navkeypressed))),
@@ -114,7 +112,7 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
        if (!browser->navkeypressed)
                ops.width += 1;
 
-       annotation_line__write(al, notes, &ops, ab->opts);
+       annotation_line__write(al, notes, &ops);
 
        if (ops.current_entry)
                ab->selection = al;
@@ -128,7 +126,7 @@ static int is_fused(struct annotate_browser *ab, struct disasm_line *cursor)
 
        while (pos && pos->al.offset == -1) {
                pos = list_prev_entry(pos, al.node);
-               if (!ab->opts->hide_src_code)
+               if (!annotate_opts.hide_src_code)
                        diff++;
        }
 
@@ -188,14 +186,14 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
         *  name right after the '<' token and probably treating this like a
         *  'call' instruction.
         */
-       target = notes->offsets[cursor->ops.target.offset];
+       target = notes->src->offsets[cursor->ops.target.offset];
        if (target == NULL) {
                ui_helpline__printf("WARN: jump target inconsistency, press 'o', notes->offsets[%#x] = NULL\n",
                                    cursor->ops.target.offset);
                return;
        }
 
-       if (notes->options->hide_src_code) {
+       if (annotate_opts.hide_src_code) {
                from = cursor->al.idx_asm;
                to = target->idx_asm;
        } else {
@@ -224,7 +222,7 @@ static unsigned int annotate_browser__refresh(struct ui_browser *browser)
        int ret = ui_browser__list_head_refresh(browser);
        int pcnt_width = annotation__pcnt_width(notes);
 
-       if (notes->options->jump_arrows)
+       if (annotate_opts.jump_arrows)
                annotate_browser__draw_current_jump(browser);
 
        ui_browser__set_color(browser, HE_COLORSET_NORMAL);
@@ -258,7 +256,7 @@ static void disasm_rb_tree__insert(struct annotate_browser *browser,
                parent = *p;
                l = rb_entry(parent, struct annotation_line, rb_node);
 
-               if (disasm__cmp(al, l, browser->opts->percent_type) < 0)
+               if (disasm__cmp(al, l, annotate_opts.percent_type) < 0)
                        p = &(*p)->rb_left;
                else
                        p = &(*p)->rb_right;
@@ -270,7 +268,6 @@ static void disasm_rb_tree__insert(struct annotate_browser *browser,
 static void annotate_browser__set_top(struct annotate_browser *browser,
                                      struct annotation_line *pos, u32 idx)
 {
-       struct annotation *notes = browser__annotation(&browser->b);
        unsigned back;
 
        ui_browser__refresh_dimensions(&browser->b);
@@ -280,7 +277,7 @@ static void annotate_browser__set_top(struct annotate_browser *browser,
        while (browser->b.top_idx != 0 && back != 0) {
                pos = list_entry(pos->node.prev, struct annotation_line, node);
 
-               if (annotation_line__filter(pos, notes))
+               if (annotation_line__filter(pos))
                        continue;
 
                --browser->b.top_idx;
@@ -294,11 +291,10 @@ static void annotate_browser__set_top(struct annotate_browser *browser,
 static void annotate_browser__set_rb_top(struct annotate_browser *browser,
                                         struct rb_node *nd)
 {
-       struct annotation *notes = browser__annotation(&browser->b);
        struct annotation_line * pos = rb_entry(nd, struct annotation_line, rb_node);
        u32 idx = pos->idx;
 
-       if (notes->options->hide_src_code)
+       if (annotate_opts.hide_src_code)
                idx = pos->idx_asm;
        annotate_browser__set_top(browser, pos, idx);
        browser->curr_hot = nd;
@@ -331,13 +327,13 @@ static void annotate_browser__calc_percent(struct annotate_browser *browser,
                        double percent;
 
                        percent = annotation_data__percent(&pos->al.data[i],
-                                                          browser->opts->percent_type);
+                                                          annotate_opts.percent_type);
 
                        if (max_percent < percent)
                                max_percent = percent;
                }
 
-               if (max_percent < 0.01 && pos->al.ipc == 0) {
+               if (max_percent < 0.01 && (!pos->al.cycles || pos->al.cycles->ipc == 0)) {
                        RB_CLEAR_NODE(&pos->al.rb_node);
                        continue;
                }
@@ -380,12 +376,12 @@ static bool annotate_browser__toggle_source(struct annotate_browser *browser)
        browser->b.seek(&browser->b, offset, SEEK_CUR);
        al = list_entry(browser->b.top, struct annotation_line, node);
 
-       if (notes->options->hide_src_code) {
+       if (annotate_opts.hide_src_code) {
                if (al->idx_asm < offset)
                        offset = al->idx;
 
-               browser->b.nr_entries = notes->nr_entries;
-               notes->options->hide_src_code = false;
+               browser->b.nr_entries = notes->src->nr_entries;
+               annotate_opts.hide_src_code = false;
                browser->b.seek(&browser->b, -offset, SEEK_CUR);
                browser->b.top_idx = al->idx - offset;
                browser->b.index = al->idx;
@@ -402,8 +398,8 @@ static bool annotate_browser__toggle_source(struct annotate_browser *browser)
                if (al->idx_asm < offset)
                        offset = al->idx_asm;
 
-               browser->b.nr_entries = notes->nr_asm_entries;
-               notes->options->hide_src_code = true;
+               browser->b.nr_entries = notes->src->nr_asm_entries;
+               annotate_opts.hide_src_code = true;
                browser->b.seek(&browser->b, -offset, SEEK_CUR);
                browser->b.top_idx = al->idx_asm - offset;
                browser->b.index = al->idx_asm;
@@ -435,7 +431,7 @@ static void ui_browser__init_asm_mode(struct ui_browser *browser)
 {
        struct annotation *notes = browser__annotation(browser);
        ui_browser__reset_index(browser);
-       browser->nr_entries = notes->nr_asm_entries;
+       browser->nr_entries = notes->src->nr_asm_entries;
 }
 
 static int sym_title(struct symbol *sym, struct map *map, char *title,
@@ -483,8 +479,8 @@ static bool annotate_browser__callq(struct annotate_browser *browser,
        target_ms.map = ms->map;
        target_ms.sym = dl->ops.target.sym;
        annotation__unlock(notes);
-       symbol__tui_annotate(&target_ms, evsel, hbt, browser->opts);
-       sym_title(ms->sym, ms->map, title, sizeof(title), browser->opts->percent_type);
+       symbol__tui_annotate(&target_ms, evsel, hbt);
+       sym_title(ms->sym, ms->map, title, sizeof(title), annotate_opts.percent_type);
        ui_browser__show_title(&browser->b, title);
        return true;
 }
@@ -500,7 +496,7 @@ struct disasm_line *annotate_browser__find_offset(struct annotate_browser *brows
        list_for_each_entry(pos, &notes->src->source, al.node) {
                if (pos->al.offset == offset)
                        return pos;
-               if (!annotation_line__filter(&pos->al, notes))
+               if (!annotation_line__filter(&pos->al))
                        ++*idx;
        }
 
@@ -544,7 +540,7 @@ struct annotation_line *annotate_browser__find_string(struct annotate_browser *b
 
        *idx = browser->b.index;
        list_for_each_entry_continue(al, &notes->src->source, node) {
-               if (annotation_line__filter(al, notes))
+               if (annotation_line__filter(al))
                        continue;
 
                ++*idx;
@@ -581,7 +577,7 @@ struct annotation_line *annotate_browser__find_string_reverse(struct annotate_br
 
        *idx = browser->b.index;
        list_for_each_entry_continue_reverse(al, &notes->src->source, node) {
-               if (annotation_line__filter(al, notes))
+               if (annotation_line__filter(al))
                        continue;
 
                --*idx;
@@ -659,7 +655,6 @@ bool annotate_browser__continue_search_reverse(struct annotate_browser *browser,
 
 static int annotate_browser__show(struct ui_browser *browser, char *title, const char *help)
 {
-       struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
        struct map_symbol *ms = browser->priv;
        struct symbol *sym = ms->sym;
        char symbol_dso[SYM_TITLE_MAX_SIZE];
@@ -667,7 +662,7 @@ static int annotate_browser__show(struct ui_browser *browser, char *title, const
        if (ui_browser__show(browser, title, help) < 0)
                return -1;
 
-       sym_title(sym, ms->map, symbol_dso, sizeof(symbol_dso), ab->opts->percent_type);
+       sym_title(sym, ms->map, symbol_dso, sizeof(symbol_dso), annotate_opts.percent_type);
 
        ui_browser__gotorc_title(browser, 0, 0);
        ui_browser__set_color(browser, HE_COLORSET_ROOT);
@@ -809,7 +804,7 @@ static int annotate_browser__run(struct annotate_browser *browser,
                        annotate_browser__show(&browser->b, title, help);
                        continue;
                case 'k':
-                       notes->options->show_linenr = !notes->options->show_linenr;
+                       annotate_opts.show_linenr = !annotate_opts.show_linenr;
                        continue;
                case 'l':
                        annotate_browser__show_full_location (&browser->b);
@@ -822,18 +817,18 @@ static int annotate_browser__run(struct annotate_browser *browser,
                                ui_helpline__puts(help);
                        continue;
                case 'o':
-                       notes->options->use_offset = !notes->options->use_offset;
+                       annotate_opts.use_offset = !annotate_opts.use_offset;
                        annotation__update_column_widths(notes);
                        continue;
                case 'O':
-                       if (++notes->options->offset_level > ANNOTATION__MAX_OFFSET_LEVEL)
-                               notes->options->offset_level = ANNOTATION__MIN_OFFSET_LEVEL;
+                       if (++annotate_opts.offset_level > ANNOTATION__MAX_OFFSET_LEVEL)
+                               annotate_opts.offset_level = ANNOTATION__MIN_OFFSET_LEVEL;
                        continue;
                case 'j':
-                       notes->options->jump_arrows = !notes->options->jump_arrows;
+                       annotate_opts.jump_arrows = !annotate_opts.jump_arrows;
                        continue;
                case 'J':
-                       notes->options->show_nr_jumps = !notes->options->show_nr_jumps;
+                       annotate_opts.show_nr_jumps = !annotate_opts.show_nr_jumps;
                        annotation__update_column_widths(notes);
                        continue;
                case '/':
@@ -860,7 +855,7 @@ show_help:
                                           browser->b.height,
                                           browser->b.index,
                                           browser->b.top_idx,
-                                          notes->nr_asm_entries);
+                                          notes->src->nr_asm_entries);
                }
                        continue;
                case K_ENTER:
@@ -884,7 +879,7 @@ show_sup_ins:
                        continue;
                }
                case 'P':
-                       map_symbol__annotation_dump(ms, evsel, browser->opts);
+                       map_symbol__annotation_dump(ms, evsel);
                        continue;
                case 't':
                        if (symbol_conf.show_total_period) {
@@ -897,15 +892,15 @@ show_sup_ins:
                        annotation__update_column_widths(notes);
                        continue;
                case 'c':
-                       if (notes->options->show_minmax_cycle)
-                               notes->options->show_minmax_cycle = false;
+                       if (annotate_opts.show_minmax_cycle)
+                               annotate_opts.show_minmax_cycle = false;
                        else
-                               notes->options->show_minmax_cycle = true;
+                               annotate_opts.show_minmax_cycle = true;
                        annotation__update_column_widths(notes);
                        continue;
                case 'p':
                case 'b':
-                       switch_percent_type(browser->opts, key == 'b');
+                       switch_percent_type(&annotate_opts, key == 'b');
                        hists__scnprintf_title(hists, title, sizeof(title));
                        annotate_browser__show(&browser->b, title, help);
                        continue;
@@ -932,26 +927,24 @@ out:
 }
 
 int map_symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel,
-                            struct hist_browser_timer *hbt,
-                            struct annotation_options *opts)
+                            struct hist_browser_timer *hbt)
 {
-       return symbol__tui_annotate(ms, evsel, hbt, opts);
+       return symbol__tui_annotate(ms, evsel, hbt);
 }
 
 int hist_entry__tui_annotate(struct hist_entry *he, struct evsel *evsel,
-                            struct hist_browser_timer *hbt,
-                            struct annotation_options *opts)
+                            struct hist_browser_timer *hbt)
 {
        /* reset abort key so that it can get Ctrl-C as a key */
        SLang_reset_tty();
        SLang_init_tty(0, 0, 0);
+       SLtty_set_suspend_state(true);
 
-       return map_symbol__tui_annotate(&he->ms, evsel, hbt, opts);
+       return map_symbol__tui_annotate(&he->ms, evsel, hbt);
 }
 
 int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel,
-                        struct hist_browser_timer *hbt,
-                        struct annotation_options *opts)
+                        struct hist_browser_timer *hbt)
 {
        struct symbol *sym = ms->sym;
        struct annotation *notes = symbol__annotation(sym);
@@ -965,7 +958,6 @@ int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel,
                        .priv    = ms,
                        .use_navkeypressed = true,
                },
-               .opts = opts,
        };
        struct dso *dso;
        int ret = -1, err;
@@ -979,7 +971,7 @@ int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel,
                return -1;
 
        if (not_annotated) {
-               err = symbol__annotate2(ms, evsel, opts, &browser.arch);
+               err = symbol__annotate2(ms, evsel, &browser.arch);
                if (err) {
                        char msg[BUFSIZ];
                        dso->annotate_warned = true;
@@ -991,12 +983,12 @@ int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel,
 
        ui_helpline__push("Press ESC to exit");
 
-       browser.b.width = notes->max_line_len;
-       browser.b.nr_entries = notes->nr_entries;
+       browser.b.width = notes->src->max_line_len;
+       browser.b.nr_entries = notes->src->nr_entries;
        browser.b.entries = &notes->src->source,
        browser.b.width += 18; /* Percentage */
 
-       if (notes->options->hide_src_code)
+       if (annotate_opts.hide_src_code)
                ui_browser__init_asm_mode(&browser.b);
 
        ret = annotate_browser__run(&browser, evsel, hbt);
@@ -1006,6 +998,6 @@ int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel,
 
 out_free_offsets:
        if(not_annotated)
-               zfree(&notes->offsets);
+               zfree(&notes->src->offsets);
        return ret;
 }
index f4812b226818122b92ad5cdce9ba36071585b0db..0c02b3a8e121ffaaaa17647237ed5ab2c9ad67d0 100644 (file)
@@ -2250,8 +2250,7 @@ struct hist_browser *hist_browser__new(struct hists *hists)
 static struct hist_browser *
 perf_evsel_browser__new(struct evsel *evsel,
                        struct hist_browser_timer *hbt,
-                       struct perf_env *env,
-                       struct annotation_options *annotation_opts)
+                       struct perf_env *env)
 {
        struct hist_browser *browser = hist_browser__new(evsel__hists(evsel));
 
@@ -2259,7 +2258,6 @@ perf_evsel_browser__new(struct evsel *evsel,
                browser->hbt   = hbt;
                browser->env   = env;
                browser->title = hists_browser__scnprintf_title;
-               browser->annotation_opts = annotation_opts;
        }
        return browser;
 }
@@ -2432,8 +2430,8 @@ do_annotate(struct hist_browser *browser, struct popup_action *act)
        struct hist_entry *he;
        int err;
 
-       if (!browser->annotation_opts->objdump_path &&
-           perf_env__lookup_objdump(browser->env, &browser->annotation_opts->objdump_path))
+       if (!annotate_opts.objdump_path &&
+           perf_env__lookup_objdump(browser->env, &annotate_opts.objdump_path))
                return 0;
 
        notes = symbol__annotation(act->ms.sym);
@@ -2445,8 +2443,7 @@ do_annotate(struct hist_browser *browser, struct popup_action *act)
        else
                evsel = hists_to_evsel(browser->hists);
 
-       err = map_symbol__tui_annotate(&act->ms, evsel, browser->hbt,
-                                      browser->annotation_opts);
+       err = map_symbol__tui_annotate(&act->ms, evsel, browser->hbt);
        he = hist_browser__selected_entry(browser);
        /*
         * offer option to annotate the other branch source or target
@@ -2943,11 +2940,10 @@ next:
 
 static int evsel__hists_browse(struct evsel *evsel, int nr_events, const char *helpline,
                               bool left_exits, struct hist_browser_timer *hbt, float min_pcnt,
-                              struct perf_env *env, bool warn_lost_event,
-                              struct annotation_options *annotation_opts)
+                              struct perf_env *env, bool warn_lost_event)
 {
        struct hists *hists = evsel__hists(evsel);
-       struct hist_browser *browser = perf_evsel_browser__new(evsel, hbt, env, annotation_opts);
+       struct hist_browser *browser = perf_evsel_browser__new(evsel, hbt, env);
        struct branch_info *bi = NULL;
 #define MAX_OPTIONS  16
        char *options[MAX_OPTIONS];
@@ -3004,6 +3000,7 @@ static int evsel__hists_browse(struct evsel *evsel, int nr_events, const char *h
        /* reset abort key so that it can get Ctrl-C as a key */
        SLang_reset_tty();
        SLang_init_tty(0, 0, 0);
+       SLtty_set_suspend_state(true);
 
        if (min_pcnt)
                browser->min_pcnt = min_pcnt;
@@ -3398,7 +3395,6 @@ out:
 struct evsel_menu {
        struct ui_browser b;
        struct evsel *selection;
-       struct annotation_options *annotation_opts;
        bool lost_events, lost_events_warned;
        float min_pcnt;
        struct perf_env *env;
@@ -3499,8 +3495,7 @@ browse_hists:
                                hbt->timer(hbt->arg);
                        key = evsel__hists_browse(pos, nr_events, help, true, hbt,
                                                  menu->min_pcnt, menu->env,
-                                                 warn_lost_event,
-                                                 menu->annotation_opts);
+                                                 warn_lost_event);
                        ui_browser__show_title(&menu->b, title);
                        switch (key) {
                        case K_TAB:
@@ -3557,7 +3552,7 @@ static bool filter_group_entries(struct ui_browser *browser __maybe_unused,
 
 static int __evlist__tui_browse_hists(struct evlist *evlist, int nr_entries, const char *help,
                                      struct hist_browser_timer *hbt, float min_pcnt, struct perf_env *env,
-                                     bool warn_lost_event, struct annotation_options *annotation_opts)
+                                     bool warn_lost_event)
 {
        struct evsel *pos;
        struct evsel_menu menu = {
@@ -3572,7 +3567,6 @@ static int __evlist__tui_browse_hists(struct evlist *evlist, int nr_entries, con
                },
                .min_pcnt = min_pcnt,
                .env = env,
-               .annotation_opts = annotation_opts,
        };
 
        ui_helpline__push("Press ESC to exit");
@@ -3607,8 +3601,7 @@ static bool evlist__single_entry(struct evlist *evlist)
 }
 
 int evlist__tui_browse_hists(struct evlist *evlist, const char *help, struct hist_browser_timer *hbt,
-                            float min_pcnt, struct perf_env *env, bool warn_lost_event,
-                            struct annotation_options *annotation_opts)
+                            float min_pcnt, struct perf_env *env, bool warn_lost_event)
 {
        int nr_entries = evlist->core.nr_entries;
 
@@ -3617,7 +3610,7 @@ single_entry: {
                struct evsel *first = evlist__first(evlist);
 
                return evsel__hists_browse(first, nr_entries, help, false, hbt, min_pcnt,
-                                          env, warn_lost_event, annotation_opts);
+                                          env, warn_lost_event);
        }
        }
 
@@ -3635,7 +3628,7 @@ single_entry: {
        }
 
        return __evlist__tui_browse_hists(evlist, nr_entries, help, hbt, min_pcnt, env,
-                                         warn_lost_event, annotation_opts);
+                                         warn_lost_event);
 }
 
 static int block_hists_browser__title(struct hist_browser *browser, char *bf,
@@ -3654,8 +3647,7 @@ static int block_hists_browser__title(struct hist_browser *browser, char *bf,
 }
 
 int block_hists_tui_browse(struct block_hist *bh, struct evsel *evsel,
-                          float min_percent, struct perf_env *env,
-                          struct annotation_options *annotation_opts)
+                          float min_percent, struct perf_env *env)
 {
        struct hists *hists = &bh->block_hists;
        struct hist_browser *browser;
@@ -3672,11 +3664,11 @@ int block_hists_tui_browse(struct block_hist *bh, struct evsel *evsel,
        browser->title = block_hists_browser__title;
        browser->min_pcnt = min_percent;
        browser->env = env;
-       browser->annotation_opts = annotation_opts;
 
        /* reset abort key so that it can get Ctrl-C as a key */
        SLang_reset_tty();
        SLang_init_tty(0, 0, 0);
+       SLtty_set_suspend_state(true);
 
        memset(&action, 0, sizeof(action));
 
index 1e938d9ffa5ee26177152840acdf73072db6c289..de46f6c56b0ef0d798c106598446f044670f2994 100644 (file)
@@ -4,7 +4,6 @@
 
 #include "ui/browser.h"
 
-struct annotation_options;
 struct evsel;
 
 struct hist_browser {
@@ -15,7 +14,6 @@ struct hist_browser {
        struct hist_browser_timer *hbt;
        struct pstack       *pstack;
        struct perf_env     *env;
-       struct annotation_options *annotation_opts;
        struct evsel        *block_evsel;
        int                  print_seq;
        bool                 show_dso;
index 47d2c7a8cbe13cba1a3f9d46fd3c720cf0d2149c..50d45054ed6c1b435faf5cb4634ceae6eea03491 100644 (file)
@@ -166,6 +166,7 @@ void run_script(char *cmd)
        printf("\033[c\033[H\033[J");
        fflush(stdout);
        SLang_init_tty(0, 0, 0);
+       SLtty_set_suspend_state(true);
        SLsmg_refresh();
 }
 
index 2effac77ca8c6742fcd8a0287f0ccf54235c8d56..394861245fd3e48ff1cc43ae14b97dd2213dc64e 100644 (file)
@@ -162,7 +162,6 @@ static int perf_gtk__annotate_symbol(GtkWidget *window, struct map_symbol *ms,
 }
 
 static int symbol__gtk_annotate(struct map_symbol *ms, struct evsel *evsel,
-                               struct annotation_options *options,
                                struct hist_browser_timer *hbt)
 {
        struct dso *dso = map__dso(ms->map);
@@ -176,7 +175,7 @@ static int symbol__gtk_annotate(struct map_symbol *ms, struct evsel *evsel,
        if (dso->annotate_warned)
                return -1;
 
-       err = symbol__annotate(ms, evsel, options, NULL);
+       err = symbol__annotate(ms, evsel, NULL);
        if (err) {
                char msg[BUFSIZ];
                dso->annotate_warned = true;
@@ -244,10 +243,9 @@ static int symbol__gtk_annotate(struct map_symbol *ms, struct evsel *evsel,
 
 int hist_entry__gtk_annotate(struct hist_entry *he,
                             struct evsel *evsel,
-                            struct annotation_options *options,
                             struct hist_browser_timer *hbt)
 {
-       return symbol__gtk_annotate(&he->ms, evsel, options, hbt);
+       return symbol__gtk_annotate(&he->ms, evsel, hbt);
 }
 
 void perf_gtk__show_annotations(void)
index 1e84dceb52671385696db95e603b008e0d19efda..a2b497f03fd6e478f11136e5ef21e987f4aeb89d 100644 (file)
@@ -56,13 +56,11 @@ struct evsel;
 struct evlist;
 struct hist_entry;
 struct hist_browser_timer;
-struct annotation_options;
 
 int evlist__gtk_browse_hists(struct evlist *evlist, const char *help,
                             struct hist_browser_timer *hbt, float min_pcnt);
 int hist_entry__gtk_annotate(struct hist_entry *he,
                             struct evsel *evsel,
-                            struct annotation_options *options,
                             struct hist_browser_timer *hbt);
 void perf_gtk__show_annotations(void);
 
index 605d9e175ea73b662a51c9fe0257a073fcfaf199..16c6eff4d24116b0fde68f60319330f1d4a0f1ac 100644 (file)
@@ -2,12 +2,14 @@
 #include <signal.h>
 #include <stdbool.h>
 #include <stdlib.h>
+#include <termios.h>
 #include <unistd.h>
 #include <linux/kernel.h>
 #ifdef HAVE_BACKTRACE_SUPPORT
 #include <execinfo.h>
 #endif
 
+#include "../../util/color.h"
 #include "../../util/debug.h"
 #include "../browser.h"
 #include "../helpline.h"
@@ -121,6 +123,23 @@ static void ui__signal(int sig)
        exit(0);
 }
 
+static void ui__sigcont(int sig)
+{
+       static struct termios tty;
+
+       if (sig == SIGTSTP) {
+               while (tcgetattr(SLang_TT_Read_FD, &tty) == -1 && errno == EINTR)
+                       ;
+               while (write(SLang_TT_Read_FD, PERF_COLOR_RESET, sizeof(PERF_COLOR_RESET) - 1) == -1 && errno == EINTR)
+                       ;
+               raise(SIGSTOP);
+       } else {
+               while (tcsetattr(SLang_TT_Read_FD, TCSADRAIN, &tty) == -1 && errno == EINTR)
+                       ;
+               raise(SIGWINCH);
+       }
+}
+
 int ui__init(void)
 {
        int err;
@@ -135,6 +154,7 @@ int ui__init(void)
        err = SLang_init_tty(-1, 0, 0);
        if (err < 0)
                goto out;
+       SLtty_set_suspend_state(true);
 
        err = SLkp_init();
        if (err < 0) {
@@ -149,6 +169,8 @@ int ui__init(void)
        signal(SIGINT, ui__signal);
        signal(SIGQUIT, ui__signal);
        signal(SIGTERM, ui__signal);
+       signal(SIGTSTP, ui__sigcont);
+       signal(SIGCONT, ui__sigcont);
 
        perf_error__register(&perf_tui_eops);
 
index 988473bf907aee74f9863fe52bb59a5f3b4dd387..8027f450fa3e489e04769f42a146e4438350dbbb 100644 (file)
@@ -195,6 +195,8 @@ endif
 perf-$(CONFIG_DWARF) += probe-finder.o
 perf-$(CONFIG_DWARF) += dwarf-aux.o
 perf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-$(CONFIG_DWARF) += debuginfo.o
+perf-$(CONFIG_DWARF) += annotate-data.o
 
 perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
 perf-$(CONFIG_LOCAL_LIBUNWIND)    += unwind-libunwind-local.o
diff --git a/tools/perf/util/annotate-data.c b/tools/perf/util/annotate-data.c
new file mode 100644 (file)
index 0000000..f22b4f1
--- /dev/null
@@ -0,0 +1,405 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Convert sample address to data type using DWARF debug info.
+ *
+ * Written by Namhyung Kim <namhyung@kernel.org>
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <inttypes.h>
+
+#include "annotate-data.h"
+#include "debuginfo.h"
+#include "debug.h"
+#include "dso.h"
+#include "evsel.h"
+#include "evlist.h"
+#include "map.h"
+#include "map_symbol.h"
+#include "strbuf.h"
+#include "symbol.h"
+#include "symbol_conf.h"
+
+/*
+ * Compare type name and size to maintain them in a tree.
+ * I'm not sure if DWARF would have information of a single type in many
+ * different places (compilation units).  If not, it could compare the
+ * offset of the type entry in the .debug_info section.
+ */
+static int data_type_cmp(const void *_key, const struct rb_node *node)
+{
+       const struct annotated_data_type *key = _key;
+       struct annotated_data_type *type;
+
+       type = rb_entry(node, struct annotated_data_type, node);
+
+       if (key->self.size != type->self.size)
+               return key->self.size - type->self.size;
+       return strcmp(key->self.type_name, type->self.type_name);
+}
+
+static bool data_type_less(struct rb_node *node_a, const struct rb_node *node_b)
+{
+       struct annotated_data_type *a, *b;
+
+       a = rb_entry(node_a, struct annotated_data_type, node);
+       b = rb_entry(node_b, struct annotated_data_type, node);
+
+       if (a->self.size != b->self.size)
+               return a->self.size < b->self.size;
+       return strcmp(a->self.type_name, b->self.type_name) < 0;
+}
+
+/* Recursively add new members for struct/union */
+static int __add_member_cb(Dwarf_Die *die, void *arg)
+{
+       struct annotated_member *parent = arg;
+       struct annotated_member *member;
+       Dwarf_Die member_type, die_mem;
+       Dwarf_Word size, loc;
+       Dwarf_Attribute attr;
+       struct strbuf sb;
+       int tag;
+
+       if (dwarf_tag(die) != DW_TAG_member)
+               return DIE_FIND_CB_SIBLING;
+
+       member = zalloc(sizeof(*member));
+       if (member == NULL)
+               return DIE_FIND_CB_END;
+
+       strbuf_init(&sb, 32);
+       die_get_typename(die, &sb);
+
+       die_get_real_type(die, &member_type);
+       if (dwarf_aggregate_size(&member_type, &size) < 0)
+               size = 0;
+
+       if (!dwarf_attr_integrate(die, DW_AT_data_member_location, &attr))
+               loc = 0;
+       else
+               dwarf_formudata(&attr, &loc);
+
+       member->type_name = strbuf_detach(&sb, NULL);
+       /* member->var_name can be NULL */
+       if (dwarf_diename(die))
+               member->var_name = strdup(dwarf_diename(die));
+       member->size = size;
+       member->offset = loc + parent->offset;
+       INIT_LIST_HEAD(&member->children);
+       list_add_tail(&member->node, &parent->children);
+
+       tag = dwarf_tag(&member_type);
+       switch (tag) {
+       case DW_TAG_structure_type:
+       case DW_TAG_union_type:
+               die_find_child(&member_type, __add_member_cb, member, &die_mem);
+               break;
+       default:
+               break;
+       }
+       return DIE_FIND_CB_SIBLING;
+}
+
+static void add_member_types(struct annotated_data_type *parent, Dwarf_Die *type)
+{
+       Dwarf_Die die_mem;
+
+       die_find_child(type, __add_member_cb, &parent->self, &die_mem);
+}
+
+static void delete_members(struct annotated_member *member)
+{
+       struct annotated_member *child, *tmp;
+
+       list_for_each_entry_safe(child, tmp, &member->children, node) {
+               list_del(&child->node);
+               delete_members(child);
+               free(child->type_name);
+               free(child->var_name);
+               free(child);
+       }
+}
+
+static struct annotated_data_type *dso__findnew_data_type(struct dso *dso,
+                                                         Dwarf_Die *type_die)
+{
+       struct annotated_data_type *result = NULL;
+       struct annotated_data_type key;
+       struct rb_node *node;
+       struct strbuf sb;
+       char *type_name;
+       Dwarf_Word size;
+
+       strbuf_init(&sb, 32);
+       if (die_get_typename_from_type(type_die, &sb) < 0)
+               strbuf_add(&sb, "(unknown type)", 14);
+       type_name = strbuf_detach(&sb, NULL);
+       dwarf_aggregate_size(type_die, &size);
+
+       /* Check existing nodes in dso->data_types tree */
+       key.self.type_name = type_name;
+       key.self.size = size;
+       node = rb_find(&key, &dso->data_types, data_type_cmp);
+       if (node) {
+               result = rb_entry(node, struct annotated_data_type, node);
+               free(type_name);
+               return result;
+       }
+
+       /* If not, add a new one */
+       result = zalloc(sizeof(*result));
+       if (result == NULL) {
+               free(type_name);
+               return NULL;
+       }
+
+       result->self.type_name = type_name;
+       result->self.size = size;
+       INIT_LIST_HEAD(&result->self.children);
+
+       if (symbol_conf.annotate_data_member)
+               add_member_types(result, type_die);
+
+       rb_add(&result->node, &dso->data_types, data_type_less);
+       return result;
+}
+
+static bool find_cu_die(struct debuginfo *di, u64 pc, Dwarf_Die *cu_die)
+{
+       Dwarf_Off off, next_off;
+       size_t header_size;
+
+       if (dwarf_addrdie(di->dbg, pc, cu_die) != NULL)
+               return cu_die;
+
+       /*
+        * There are some kernels don't have full aranges and contain only a few
+        * aranges entries.  Fallback to iterate all CU entries in .debug_info
+        * in case it's missing.
+        */
+       off = 0;
+       while (dwarf_nextcu(di->dbg, off, &next_off, &header_size,
+                           NULL, NULL, NULL) == 0) {
+               if (dwarf_offdie(di->dbg, off + header_size, cu_die) &&
+                   dwarf_haspc(cu_die, pc))
+                       return true;
+
+               off = next_off;
+       }
+       return false;
+}
+
+/* The type info will be saved in @type_die */
+static int check_variable(Dwarf_Die *var_die, Dwarf_Die *type_die, int offset)
+{
+       Dwarf_Word size;
+
+       /* Get the type of the variable */
+       if (die_get_real_type(var_die, type_die) == NULL) {
+               pr_debug("variable has no type\n");
+               ann_data_stat.no_typeinfo++;
+               return -1;
+       }
+
+       /*
+        * It expects a pointer type for a memory access.
+        * Convert to a real type it points to.
+        */
+       if (dwarf_tag(type_die) != DW_TAG_pointer_type ||
+           die_get_real_type(type_die, type_die) == NULL) {
+               pr_debug("no pointer or no type\n");
+               ann_data_stat.no_typeinfo++;
+               return -1;
+       }
+
+       /* Get the size of the actual type */
+       if (dwarf_aggregate_size(type_die, &size) < 0) {
+               pr_debug("type size is unknown\n");
+               ann_data_stat.invalid_size++;
+               return -1;
+       }
+
+       /* Minimal sanity check */
+       if ((unsigned)offset >= size) {
+               pr_debug("offset: %d is bigger than size: %" PRIu64 "\n", offset, size);
+               ann_data_stat.bad_offset++;
+               return -1;
+       }
+
+       return 0;
+}
+
+/* The result will be saved in @type_die */
+static int find_data_type_die(struct debuginfo *di, u64 pc,
+                             int reg, int offset, Dwarf_Die *type_die)
+{
+       Dwarf_Die cu_die, var_die;
+       Dwarf_Die *scopes = NULL;
+       int ret = -1;
+       int i, nr_scopes;
+
+       /* Get a compile_unit for this address */
+       if (!find_cu_die(di, pc, &cu_die)) {
+               pr_debug("cannot find CU for address %" PRIx64 "\n", pc);
+               ann_data_stat.no_cuinfo++;
+               return -1;
+       }
+
+       /* Get a list of nested scopes - i.e. (inlined) functions and blocks. */
+       nr_scopes = die_get_scopes(&cu_die, pc, &scopes);
+
+       /* Search from the inner-most scope to the outer */
+       for (i = nr_scopes - 1; i >= 0; i--) {
+               /* Look up variables/parameters in this scope */
+               if (!die_find_variable_by_reg(&scopes[i], pc, reg, &var_die))
+                       continue;
+
+               /* Found a variable, see if it's correct */
+               ret = check_variable(&var_die, type_die, offset);
+               goto out;
+       }
+       if (ret < 0)
+               ann_data_stat.no_var++;
+
+out:
+       free(scopes);
+       return ret;
+}
+
+/**
+ * find_data_type - Return a data type at the location
+ * @ms: map and symbol at the location
+ * @ip: instruction address of the memory access
+ * @reg: register that holds the base address
+ * @offset: offset from the base address
+ *
+ * This functions searches the debug information of the binary to get the data
+ * type it accesses.  The exact location is expressed by (ip, reg, offset).
+ * It return %NULL if not found.
+ */
+struct annotated_data_type *find_data_type(struct map_symbol *ms, u64 ip,
+                                          int reg, int offset)
+{
+       struct annotated_data_type *result = NULL;
+       struct dso *dso = map__dso(ms->map);
+       struct debuginfo *di;
+       Dwarf_Die type_die;
+       u64 pc;
+
+       di = debuginfo__new(dso->long_name);
+       if (di == NULL) {
+               pr_debug("cannot get the debug info\n");
+               return NULL;
+       }
+
+       /*
+        * IP is a relative instruction address from the start of the map, as
+        * it can be randomized/relocated, it needs to translate to PC which is
+        * a file address for DWARF processing.
+        */
+       pc = map__rip_2objdump(ms->map, ip);
+       if (find_data_type_die(di, pc, reg, offset, &type_die) < 0)
+               goto out;
+
+       result = dso__findnew_data_type(dso, &type_die);
+
+out:
+       debuginfo__delete(di);
+       return result;
+}
+
+static int alloc_data_type_histograms(struct annotated_data_type *adt, int nr_entries)
+{
+       int i;
+       size_t sz = sizeof(struct type_hist);
+
+       sz += sizeof(struct type_hist_entry) * adt->self.size;
+
+       /* Allocate a table of pointers for each event */
+       adt->nr_histograms = nr_entries;
+       adt->histograms = calloc(nr_entries, sizeof(*adt->histograms));
+       if (adt->histograms == NULL)
+               return -ENOMEM;
+
+       /*
+        * Each histogram is allocated for the whole size of the type.
+        * TODO: Probably we can move the histogram to members.
+        */
+       for (i = 0; i < nr_entries; i++) {
+               adt->histograms[i] = zalloc(sz);
+               if (adt->histograms[i] == NULL)
+                       goto err;
+       }
+       return 0;
+
+err:
+       while (--i >= 0)
+               free(adt->histograms[i]);
+       free(adt->histograms);
+       return -ENOMEM;
+}
+
+static void delete_data_type_histograms(struct annotated_data_type *adt)
+{
+       for (int i = 0; i < adt->nr_histograms; i++)
+               free(adt->histograms[i]);
+       free(adt->histograms);
+}
+
+void annotated_data_type__tree_delete(struct rb_root *root)
+{
+       struct annotated_data_type *pos;
+
+       while (!RB_EMPTY_ROOT(root)) {
+               struct rb_node *node = rb_first(root);
+
+               rb_erase(node, root);
+               pos = rb_entry(node, struct annotated_data_type, node);
+               delete_members(&pos->self);
+               delete_data_type_histograms(pos);
+               free(pos->self.type_name);
+               free(pos);
+       }
+}
+
+/**
+ * annotated_data_type__update_samples - Update histogram
+ * @adt: Data type to update
+ * @evsel: Event to update
+ * @offset: Offset in the type
+ * @nr_samples: Number of samples at this offset
+ * @period: Event count at this offset
+ *
+ * This function updates type histogram at @ofs for @evsel.  Samples are
+ * aggregated before calling this function so it can be called with more
+ * than one samples at a certain offset.
+ */
+int annotated_data_type__update_samples(struct annotated_data_type *adt,
+                                       struct evsel *evsel, int offset,
+                                       int nr_samples, u64 period)
+{
+       struct type_hist *h;
+
+       if (adt == NULL)
+               return 0;
+
+       if (adt->histograms == NULL) {
+               int nr = evsel->evlist->core.nr_entries;
+
+               if (alloc_data_type_histograms(adt, nr) < 0)
+                       return -1;
+       }
+
+       if (offset < 0 || offset >= adt->self.size)
+               return -1;
+
+       h = adt->histograms[evsel->core.idx];
+
+       h->nr_samples += nr_samples;
+       h->addr[offset].nr_samples += nr_samples;
+       h->period += period;
+       h->addr[offset].period += period;
+       return 0;
+}
diff --git a/tools/perf/util/annotate-data.h b/tools/perf/util/annotate-data.h
new file mode 100644 (file)
index 0000000..8e73096
--- /dev/null
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _PERF_ANNOTATE_DATA_H
+#define _PERF_ANNOTATE_DATA_H
+
+#include <errno.h>
+#include <linux/compiler.h>
+#include <linux/rbtree.h>
+#include <linux/types.h>
+
+struct evsel;
+struct map_symbol;
+
+/**
+ * struct annotated_member - Type of member field
+ * @node: List entry in the parent list
+ * @children: List head for child nodes
+ * @type_name: Name of the member type
+ * @var_name: Name of the member variable
+ * @offset: Offset from the outer data type
+ * @size: Size of the member field
+ *
+ * This represents a member type in a data type.
+ */
+struct annotated_member {
+       struct list_head node;
+       struct list_head children;
+       char *type_name;
+       char *var_name;
+       int offset;
+       int size;
+};
+
+/**
+ * struct type_hist_entry - Histogram entry per offset
+ * @nr_samples: Number of samples
+ * @period: Count of event
+ */
+struct type_hist_entry {
+       int nr_samples;
+       u64 period;
+};
+
+/**
+ * struct type_hist - Type histogram for each event
+ * @nr_samples: Total number of samples in this data type
+ * @period: Total count of the event in this data type
+ * @offset: Array of histogram entry
+ */
+struct type_hist {
+       u64                     nr_samples;
+       u64                     period;
+       struct type_hist_entry  addr[];
+};
+
+/**
+ * struct annotated_data_type - Data type to profile
+ * @node: RB-tree node for dso->type_tree
+ * @self: Actual type information
+ * @nr_histogram: Number of histogram entries
+ * @histograms: An array of pointers to histograms
+ *
+ * This represents a data type accessed by samples in the profile data.
+ */
+struct annotated_data_type {
+       struct rb_node node;
+       struct annotated_member self;
+       int nr_histograms;
+       struct type_hist **histograms;
+};
+
+extern struct annotated_data_type unknown_type;
+
+/**
+ * struct annotated_data_stat - Debug statistics
+ * @total: Total number of entry
+ * @no_sym: No symbol or map found
+ * @no_insn: Failed to get disasm line
+ * @no_insn_ops: The instruction has no operands
+ * @no_mem_ops: The instruction has no memory operands
+ * @no_reg: Failed to extract a register from the operand
+ * @no_dbginfo: The binary has no debug information
+ * @no_cuinfo: Failed to find a compile_unit
+ * @no_var: Failed to find a matching variable
+ * @no_typeinfo: Failed to get a type info for the variable
+ * @invalid_size: Failed to get a size info of the type
+ * @bad_offset: The access offset is out of the type
+ */
+struct annotated_data_stat {
+       int total;
+       int no_sym;
+       int no_insn;
+       int no_insn_ops;
+       int no_mem_ops;
+       int no_reg;
+       int no_dbginfo;
+       int no_cuinfo;
+       int no_var;
+       int no_typeinfo;
+       int invalid_size;
+       int bad_offset;
+};
+extern struct annotated_data_stat ann_data_stat;
+
+#ifdef HAVE_DWARF_SUPPORT
+
+/* Returns data type at the location (ip, reg, offset) */
+struct annotated_data_type *find_data_type(struct map_symbol *ms, u64 ip,
+                                          int reg, int offset);
+
+/* Update type access histogram at the given offset */
+int annotated_data_type__update_samples(struct annotated_data_type *adt,
+                                       struct evsel *evsel, int offset,
+                                       int nr_samples, u64 period);
+
+/* Release all data type information in the tree */
+void annotated_data_type__tree_delete(struct rb_root *root);
+
+#else /* HAVE_DWARF_SUPPORT */
+
+static inline struct annotated_data_type *
+find_data_type(struct map_symbol *ms __maybe_unused, u64 ip __maybe_unused,
+              int reg __maybe_unused, int offset __maybe_unused)
+{
+       return NULL;
+}
+
+static inline int
+annotated_data_type__update_samples(struct annotated_data_type *adt __maybe_unused,
+                                   struct evsel *evsel __maybe_unused,
+                                   int offset __maybe_unused,
+                                   int nr_samples __maybe_unused,
+                                   u64 period __maybe_unused)
+{
+       return -1;
+}
+
+static inline void annotated_data_type__tree_delete(struct rb_root *root __maybe_unused)
+{
+}
+
+#endif /* HAVE_DWARF_SUPPORT */
+
+#endif /* _PERF_ANNOTATE_DATA_H */
index 82956adf99632d742f777f01d4177ce575ddfbd6..9b70ab110ce79f24da580611f1a2098726f1ae12 100644 (file)
 #include "units.h"
 #include "debug.h"
 #include "annotate.h"
+#include "annotate-data.h"
 #include "evsel.h"
 #include "evlist.h"
 #include "bpf-event.h"
 #include "bpf-utils.h"
 #include "block-range.h"
 #include "string2.h"
+#include "dwarf-regs.h"
 #include "util/event.h"
 #include "util/sharded_mutex.h"
 #include "arch/common.h"
@@ -57,6 +59,9 @@
 
 #include <linux/ctype.h>
 
+/* global annotation options */
+struct annotation_options annotate_opts;
+
 static regex_t  file_lineno;
 
 static struct ins_ops *ins__find(struct arch *arch, const char *name);
@@ -85,6 +90,8 @@ struct arch {
        struct          {
                char comment_char;
                char skip_functions_char;
+               char register_char;
+               char memory_ref_char;
        } objdump;
 };
 
@@ -96,6 +103,10 @@ static struct ins_ops nop_ops;
 static struct ins_ops lock_ops;
 static struct ins_ops ret_ops;
 
+/* Data type collection debug statistics */
+struct annotated_data_stat ann_data_stat;
+LIST_HEAD(ann_insn_stat);
+
 static int arch__grow_instructions(struct arch *arch)
 {
        struct ins *new_instructions;
@@ -188,6 +199,8 @@ static struct arch architectures[] = {
                .insn_suffix = "bwlq",
                .objdump =  {
                        .comment_char = '#',
+                       .register_char = '%',
+                       .memory_ref_char = '(',
                },
        },
        {
@@ -340,10 +353,10 @@ bool ins__is_call(const struct ins *ins)
  */
 static inline const char *validate_comma(const char *c, struct ins_operands *ops)
 {
-       if (ops->raw_comment && c > ops->raw_comment)
+       if (ops->jump.raw_comment && c > ops->jump.raw_comment)
                return NULL;
 
-       if (ops->raw_func_start && c > ops->raw_func_start)
+       if (ops->jump.raw_func_start && c > ops->jump.raw_func_start)
                return NULL;
 
        return c;
@@ -359,8 +372,8 @@ static int jump__parse(struct arch *arch, struct ins_operands *ops, struct map_s
        const char *c = strchr(ops->raw, ',');
        u64 start, end;
 
-       ops->raw_comment = strchr(ops->raw, arch->objdump.comment_char);
-       ops->raw_func_start = strchr(ops->raw, '<');
+       ops->jump.raw_comment = strchr(ops->raw, arch->objdump.comment_char);
+       ops->jump.raw_func_start = strchr(ops->raw, '<');
 
        c = validate_comma(c, ops);
 
@@ -462,7 +475,16 @@ static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
                         ops->target.offset);
 }
 
+static void jump__delete(struct ins_operands *ops __maybe_unused)
+{
+       /*
+        * The ops->jump.raw_comment and ops->jump.raw_func_start belong to the
+        * raw string, don't free them.
+        */
+}
+
 static struct ins_ops jump_ops = {
+       .free      = jump__delete,
        .parse     = jump__parse,
        .scnprintf = jump__scnprintf,
 };
@@ -557,6 +579,34 @@ static struct ins_ops lock_ops = {
        .scnprintf = lock__scnprintf,
 };
 
+/*
+ * Check if the operand has more than one registers like x86 SIB addressing:
+ *   0x1234(%rax, %rbx, 8)
+ *
+ * But it doesn't care segment selectors like %gs:0x5678(%rcx), so just check
+ * the input string after 'memory_ref_char' if exists.
+ */
+static bool check_multi_regs(struct arch *arch, const char *op)
+{
+       int count = 0;
+
+       if (arch->objdump.register_char == 0)
+               return false;
+
+       if (arch->objdump.memory_ref_char) {
+               op = strchr(op, arch->objdump.memory_ref_char);
+               if (op == NULL)
+                       return false;
+       }
+
+       while ((op = strchr(op, arch->objdump.register_char)) != NULL) {
+               count++;
+               op++;
+       }
+
+       return count > 1;
+}
+
 static int mov__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms __maybe_unused)
 {
        char *s = strchr(ops->raw, ','), *target, *comment, prev;
@@ -584,6 +634,8 @@ static int mov__parse(struct arch *arch, struct ins_operands *ops, struct map_sy
        if (ops->source.raw == NULL)
                return -1;
 
+       ops->source.multi_regs = check_multi_regs(arch, ops->source.raw);
+
        target = skip_spaces(++s);
        comment = strchr(s, arch->objdump.comment_char);
 
@@ -604,6 +656,8 @@ static int mov__parse(struct arch *arch, struct ins_operands *ops, struct map_sy
        if (ops->target.raw == NULL)
                goto out_free_source;
 
+       ops->target.multi_regs = check_multi_regs(arch, ops->target.raw);
+
        if (comment == NULL)
                return 0;
 
@@ -795,6 +849,11 @@ static struct arch *arch__find(const char *name)
        return bsearch(name, architectures, nmemb, sizeof(struct arch), arch__key_cmp);
 }
 
+bool arch__is(struct arch *arch, const char *name)
+{
+       return !strcmp(arch->name, name);
+}
+
 static struct annotated_source *annotated_source__new(void)
 {
        struct annotated_source *src = zalloc(sizeof(*src));
@@ -810,7 +869,6 @@ static __maybe_unused void annotated_source__delete(struct annotated_source *src
        if (src == NULL)
                return;
        zfree(&src->histograms);
-       zfree(&src->cycles_hist);
        free(src);
 }
 
@@ -845,18 +903,6 @@ static int annotated_source__alloc_histograms(struct annotated_source *src,
        return src->histograms ? 0 : -1;
 }
 
-/* The cycles histogram is lazily allocated. */
-static int symbol__alloc_hist_cycles(struct symbol *sym)
-{
-       struct annotation *notes = symbol__annotation(sym);
-       const size_t size = symbol__size(sym);
-
-       notes->src->cycles_hist = calloc(size, sizeof(struct cyc_hist));
-       if (notes->src->cycles_hist == NULL)
-               return -1;
-       return 0;
-}
-
 void symbol__annotate_zero_histograms(struct symbol *sym)
 {
        struct annotation *notes = symbol__annotation(sym);
@@ -865,9 +911,10 @@ void symbol__annotate_zero_histograms(struct symbol *sym)
        if (notes->src != NULL) {
                memset(notes->src->histograms, 0,
                       notes->src->nr_histograms * notes->src->sizeof_sym_hist);
-               if (notes->src->cycles_hist)
-                       memset(notes->src->cycles_hist, 0,
-                               symbol__size(sym) * sizeof(struct cyc_hist));
+       }
+       if (notes->branch && notes->branch->cycles_hist) {
+               memset(notes->branch->cycles_hist, 0,
+                      symbol__size(sym) * sizeof(struct cyc_hist));
        }
        annotation__unlock(notes);
 }
@@ -958,23 +1005,33 @@ static int __symbol__inc_addr_samples(struct map_symbol *ms,
        return 0;
 }
 
+struct annotated_branch *annotation__get_branch(struct annotation *notes)
+{
+       if (notes == NULL)
+               return NULL;
+
+       if (notes->branch == NULL)
+               notes->branch = zalloc(sizeof(*notes->branch));
+
+       return notes->branch;
+}
+
 static struct cyc_hist *symbol__cycles_hist(struct symbol *sym)
 {
        struct annotation *notes = symbol__annotation(sym);
+       struct annotated_branch *branch;
 
-       if (notes->src == NULL) {
-               notes->src = annotated_source__new();
-               if (notes->src == NULL)
-                       return NULL;
-               goto alloc_cycles_hist;
-       }
+       branch = annotation__get_branch(notes);
+       if (branch == NULL)
+               return NULL;
+
+       if (branch->cycles_hist == NULL) {
+               const size_t size = symbol__size(sym);
 
-       if (!notes->src->cycles_hist) {
-alloc_cycles_hist:
-               symbol__alloc_hist_cycles(sym);
+               branch->cycles_hist = calloc(size, sizeof(struct cyc_hist));
        }
 
-       return notes->src->cycles_hist;
+       return branch->cycles_hist;
 }
 
 struct annotated_source *symbol__hists(struct symbol *sym, int nr_hists)
@@ -1077,12 +1134,20 @@ static unsigned annotation__count_insn(struct annotation *notes, u64 start, u64
        u64 offset;
 
        for (offset = start; offset <= end; offset++) {
-               if (notes->offsets[offset])
+               if (notes->src->offsets[offset])
                        n_insn++;
        }
        return n_insn;
 }
 
+static void annotated_branch__delete(struct annotated_branch *branch)
+{
+       if (branch) {
+               zfree(&branch->cycles_hist);
+               free(branch);
+       }
+}
+
 static void annotation__count_and_fill(struct annotation *notes, u64 start, u64 end, struct cyc_hist *ch)
 {
        unsigned n_insn;
@@ -1091,6 +1156,7 @@ static void annotation__count_and_fill(struct annotation *notes, u64 start, u64
 
        n_insn = annotation__count_insn(notes, start, end);
        if (n_insn && ch->num && ch->cycles) {
+               struct annotated_branch *branch;
                float ipc = n_insn / ((double)ch->cycles / (double)ch->num);
 
                /* Hide data when there are too many overlaps. */
@@ -1098,54 +1164,76 @@ static void annotation__count_and_fill(struct annotation *notes, u64 start, u64
                        return;
 
                for (offset = start; offset <= end; offset++) {
-                       struct annotation_line *al = notes->offsets[offset];
+                       struct annotation_line *al = notes->src->offsets[offset];
 
-                       if (al && al->ipc == 0.0) {
-                               al->ipc = ipc;
+                       if (al && al->cycles && al->cycles->ipc == 0.0) {
+                               al->cycles->ipc = ipc;
                                cover_insn++;
                        }
                }
 
-               if (cover_insn) {
-                       notes->hit_cycles += ch->cycles;
-                       notes->hit_insn += n_insn * ch->num;
-                       notes->cover_insn += cover_insn;
+               branch = annotation__get_branch(notes);
+               if (cover_insn && branch) {
+                       branch->hit_cycles += ch->cycles;
+                       branch->hit_insn += n_insn * ch->num;
+                       branch->cover_insn += cover_insn;
                }
        }
 }
 
-void annotation__compute_ipc(struct annotation *notes, size_t size)
+static int annotation__compute_ipc(struct annotation *notes, size_t size)
 {
+       int err = 0;
        s64 offset;
 
-       if (!notes->src || !notes->src->cycles_hist)
-               return;
+       if (!notes->branch || !notes->branch->cycles_hist)
+               return 0;
 
-       notes->total_insn = annotation__count_insn(notes, 0, size - 1);
-       notes->hit_cycles = 0;
-       notes->hit_insn = 0;
-       notes->cover_insn = 0;
+       notes->branch->total_insn = annotation__count_insn(notes, 0, size - 1);
+       notes->branch->hit_cycles = 0;
+       notes->branch->hit_insn = 0;
+       notes->branch->cover_insn = 0;
 
        annotation__lock(notes);
        for (offset = size - 1; offset >= 0; --offset) {
                struct cyc_hist *ch;
 
-               ch = &notes->src->cycles_hist[offset];
+               ch = &notes->branch->cycles_hist[offset];
                if (ch && ch->cycles) {
                        struct annotation_line *al;
 
+                       al = notes->src->offsets[offset];
+                       if (al && al->cycles == NULL) {
+                               al->cycles = zalloc(sizeof(*al->cycles));
+                               if (al->cycles == NULL) {
+                                       err = ENOMEM;
+                                       break;
+                               }
+                       }
                        if (ch->have_start)
                                annotation__count_and_fill(notes, ch->start, offset, ch);
-                       al = notes->offsets[offset];
                        if (al && ch->num_aggr) {
-                               al->cycles = ch->cycles_aggr / ch->num_aggr;
-                               al->cycles_max = ch->cycles_max;
-                               al->cycles_min = ch->cycles_min;
+                               al->cycles->avg = ch->cycles_aggr / ch->num_aggr;
+                               al->cycles->max = ch->cycles_max;
+                               al->cycles->min = ch->cycles_min;
+                       }
+               }
+       }
+
+       if (err) {
+               while (++offset < (s64)size) {
+                       struct cyc_hist *ch = &notes->branch->cycles_hist[offset];
+
+                       if (ch && ch->cycles) {
+                               struct annotation_line *al = notes->src->offsets[offset];
+                               if (al)
+                                       zfree(&al->cycles);
                        }
-                       notes->have_cycles = true;
                }
        }
+
        annotation__unlock(notes);
+       return 0;
 }
 
 int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, struct perf_sample *sample,
@@ -1225,6 +1313,7 @@ static void annotation_line__exit(struct annotation_line *al)
 {
        zfree_srcline(&al->path);
        zfree(&al->line);
+       zfree(&al->cycles);
 }
 
 static size_t disasm_line_size(int nr)
@@ -1299,6 +1388,7 @@ int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool r
 void annotation__exit(struct annotation *notes)
 {
        annotated_source__delete(notes->src);
+       annotated_branch__delete(notes->branch);
 }
 
 static struct sharded_mutex *sharded_mutex;
@@ -1817,7 +1907,6 @@ static int symbol__disassemble_bpf(struct symbol *sym,
                                   struct annotate_args *args)
 {
        struct annotation *notes = symbol__annotation(sym);
-       struct annotation_options *opts = args->options;
        struct bpf_prog_linfo *prog_linfo = NULL;
        struct bpf_prog_info_node *info_node;
        int len = sym->end - sym->start;
@@ -1927,7 +2016,7 @@ static int symbol__disassemble_bpf(struct symbol *sym,
                prev_buf_size = buf_size;
                fflush(s);
 
-               if (!opts->hide_src_code && srcline) {
+               if (!annotate_opts.hide_src_code && srcline) {
                        args->offset = -1;
                        args->line = strdup(srcline);
                        args->line_nr = 0;
@@ -2050,7 +2139,7 @@ static char *expand_tabs(char *line, char **storage, size_t *storage_len)
 
 static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
 {
-       struct annotation_options *opts = args->options;
+       struct annotation_options *opts = &annotate_opts;
        struct map *map = args->ms.map;
        struct dso *dso = map__dso(map);
        char *command;
@@ -2113,12 +2202,13 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
        err = asprintf(&command,
                 "%s %s%s --start-address=0x%016" PRIx64
                 " --stop-address=0x%016" PRIx64
-                " -l -d %s %s %s %c%s%c %s%s -C \"$1\"",
+                " %s -d %s %s %s %c%s%c %s%s -C \"$1\"",
                 opts->objdump_path ?: "objdump",
                 opts->disassembler_style ? "-M " : "",
                 opts->disassembler_style ?: "",
                 map__rip_2objdump(map, sym->start),
                 map__rip_2objdump(map, sym->end),
+                opts->show_linenr ? "-l" : "",
                 opts->show_asm_raw ? "" : "--no-show-raw-insn",
                 opts->annotate_src ? "-S" : "",
                 opts->prefix ? "--prefix " : "",
@@ -2299,15 +2389,8 @@ void symbol__calc_percent(struct symbol *sym, struct evsel *evsel)
        annotation__calc_percent(notes, evsel, symbol__size(sym));
 }
 
-int symbol__annotate(struct map_symbol *ms, struct evsel *evsel,
-                    struct annotation_options *options, struct arch **parch)
+static int evsel__get_arch(struct evsel *evsel, struct arch **parch)
 {
-       struct symbol *sym = ms->sym;
-       struct annotation *notes = symbol__annotation(sym);
-       struct annotate_args args = {
-               .evsel          = evsel,
-               .options        = options,
-       };
        struct perf_env *env = evsel__env(evsel);
        const char *arch_name = perf_env__arch(env);
        struct arch *arch;
@@ -2316,25 +2399,45 @@ int symbol__annotate(struct map_symbol *ms, struct evsel *evsel,
        if (!arch_name)
                return errno;
 
-       args.arch = arch = arch__find(arch_name);
+       *parch = arch = arch__find(arch_name);
        if (arch == NULL) {
                pr_err("%s: unsupported arch %s\n", __func__, arch_name);
                return ENOTSUP;
        }
 
-       if (parch)
-               *parch = arch;
-
        if (arch->init) {
                err = arch->init(arch, env ? env->cpuid : NULL);
                if (err) {
-                       pr_err("%s: failed to initialize %s arch priv area\n", __func__, arch->name);
+                       pr_err("%s: failed to initialize %s arch priv area\n",
+                              __func__, arch->name);
                        return err;
                }
        }
+       return 0;
+}
 
+int symbol__annotate(struct map_symbol *ms, struct evsel *evsel,
+                    struct arch **parch)
+{
+       struct symbol *sym = ms->sym;
+       struct annotation *notes = symbol__annotation(sym);
+       struct annotate_args args = {
+               .evsel          = evsel,
+               .options        = &annotate_opts,
+       };
+       struct arch *arch = NULL;
+       int err;
+
+       err = evsel__get_arch(evsel, &arch);
+       if (err < 0)
+               return err;
+
+       if (parch)
+               *parch = arch;
+
+       args.arch = arch;
        args.ms = *ms;
-       if (notes->options && notes->options->full_addr)
+       if (annotate_opts.full_addr)
                notes->start = map__objdump_2mem(ms->map, ms->sym->start);
        else
                notes->start = map__rip_2objdump(ms->map, ms->sym->start);
@@ -2342,12 +2445,12 @@ int symbol__annotate(struct map_symbol *ms, struct evsel *evsel,
        return symbol__disassemble(sym, &args);
 }
 
-static void insert_source_line(struct rb_root *root, struct annotation_line *al,
-                              struct annotation_options *opts)
+static void insert_source_line(struct rb_root *root, struct annotation_line *al)
 {
        struct annotation_line *iter;
        struct rb_node **p = &root->rb_node;
        struct rb_node *parent = NULL;
+       unsigned int percent_type = annotate_opts.percent_type;
        int i, ret;
 
        while (*p != NULL) {
@@ -2358,7 +2461,7 @@ static void insert_source_line(struct rb_root *root, struct annotation_line *al,
                if (ret == 0) {
                        for (i = 0; i < al->data_nr; i++) {
                                iter->data[i].percent_sum += annotation_data__percent(&al->data[i],
-                                                                                     opts->percent_type);
+                                                                                     percent_type);
                        }
                        return;
                }
@@ -2371,7 +2474,7 @@ static void insert_source_line(struct rb_root *root, struct annotation_line *al,
 
        for (i = 0; i < al->data_nr; i++) {
                al->data[i].percent_sum = annotation_data__percent(&al->data[i],
-                                                                  opts->percent_type);
+                                                                  percent_type);
        }
 
        rb_link_node(&al->rb_node, parent, p);
@@ -2493,8 +2596,7 @@ static int annotated_source__addr_fmt_width(struct list_head *lines, u64 start)
        return 0;
 }
 
-int symbol__annotate_printf(struct map_symbol *ms, struct evsel *evsel,
-                           struct annotation_options *opts)
+int symbol__annotate_printf(struct map_symbol *ms, struct evsel *evsel)
 {
        struct map *map = ms->map;
        struct symbol *sym = ms->sym;
@@ -2505,6 +2607,7 @@ int symbol__annotate_printf(struct map_symbol *ms, struct evsel *evsel,
        struct annotation *notes = symbol__annotation(sym);
        struct sym_hist *h = annotation__histogram(notes, evsel->core.idx);
        struct annotation_line *pos, *queue = NULL;
+       struct annotation_options *opts = &annotate_opts;
        u64 start = map__rip_2objdump(map, sym->start);
        int printed = 2, queue_len = 0, addr_fmt_width;
        int more = 0;
@@ -2633,8 +2736,7 @@ static void FILE__write_graph(void *fp, int graph)
        fputs(s, fp);
 }
 
-static int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp,
-                                    struct annotation_options *opts)
+static int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp)
 {
        struct annotation *notes = symbol__annotation(sym);
        struct annotation_write_ops wops = {
@@ -2649,9 +2751,9 @@ static int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp,
        struct annotation_line *al;
 
        list_for_each_entry(al, &notes->src->source, node) {
-               if (annotation_line__filter(al, notes))
+               if (annotation_line__filter(al))
                        continue;
-               annotation_line__write(al, notes, &wops, opts);
+               annotation_line__write(al, notes, &wops);
                fputc('\n', fp);
                wops.first_line = false;
        }
@@ -2659,8 +2761,7 @@ static int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp,
        return 0;
 }
 
-int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel,
-                               struct annotation_options *opts)
+int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel)
 {
        const char *ev_name = evsel__name(evsel);
        char buf[1024];
@@ -2682,7 +2783,7 @@ int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel,
 
        fprintf(fp, "%s() %s\nEvent: %s\n\n",
                ms->sym->name, map__dso(ms->map)->long_name, ev_name);
-       symbol__annotate_fprintf2(ms->sym, fp, opts);
+       symbol__annotate_fprintf2(ms->sym, fp);
 
        fclose(fp);
        err = 0;
@@ -2769,7 +2870,7 @@ void annotation__mark_jump_targets(struct annotation *notes, struct symbol *sym)
                return;
 
        for (offset = 0; offset < size; ++offset) {
-               struct annotation_line *al = notes->offsets[offset];
+               struct annotation_line *al = notes->src->offsets[offset];
                struct disasm_line *dl;
 
                dl = disasm_line(al);
@@ -2777,7 +2878,7 @@ void annotation__mark_jump_targets(struct annotation *notes, struct symbol *sym)
                if (!disasm_line__is_valid_local_jump(dl, sym))
                        continue;
 
-               al = notes->offsets[dl->ops.target.offset];
+               al = notes->src->offsets[dl->ops.target.offset];
 
                /*
                 * FIXME: Oops, no jump target? Buggy disassembler? Or do we
@@ -2794,19 +2895,20 @@ void annotation__mark_jump_targets(struct annotation *notes, struct symbol *sym)
 void annotation__set_offsets(struct annotation *notes, s64 size)
 {
        struct annotation_line *al;
+       struct annotated_source *src = notes->src;
 
-       notes->max_line_len = 0;
-       notes->nr_entries = 0;
-       notes->nr_asm_entries = 0;
+       src->max_line_len = 0;
+       src->nr_entries = 0;
+       src->nr_asm_entries = 0;
 
-       list_for_each_entry(al, &notes->src->source, node) {
+       list_for_each_entry(al, &src->source, node) {
                size_t line_len = strlen(al->line);
 
-               if (notes->max_line_len < line_len)
-                       notes->max_line_len = line_len;
-               al->idx = notes->nr_entries++;
+               if (src->max_line_len < line_len)
+                       src->max_line_len = line_len;
+               al->idx = src->nr_entries++;
                if (al->offset != -1) {
-                       al->idx_asm = notes->nr_asm_entries++;
+                       al->idx_asm = src->nr_asm_entries++;
                        /*
                         * FIXME: short term bandaid to cope with assembly
                         * routines that comes with labels in the same column
@@ -2815,7 +2917,7 @@ void annotation__set_offsets(struct annotation *notes, s64 size)
                         * E.g. copy_user_generic_unrolled
                         */
                        if (al->offset < size)
-                               notes->offsets[al->offset] = al;
+                               notes->src->offsets[al->offset] = al;
                } else
                        al->idx_asm = -1;
        }
@@ -2858,24 +2960,24 @@ void annotation__init_column_widths(struct annotation *notes, struct symbol *sym
 
 void annotation__update_column_widths(struct annotation *notes)
 {
-       if (notes->options->use_offset)
+       if (annotate_opts.use_offset)
                notes->widths.target = notes->widths.min_addr;
-       else if (notes->options->full_addr)
+       else if (annotate_opts.full_addr)
                notes->widths.target = BITS_PER_LONG / 4;
        else
                notes->widths.target = notes->widths.max_addr;
 
        notes->widths.addr = notes->widths.target;
 
-       if (notes->options->show_nr_jumps)
+       if (annotate_opts.show_nr_jumps)
                notes->widths.addr += notes->widths.jumps + 1;
 }
 
 void annotation__toggle_full_addr(struct annotation *notes, struct map_symbol *ms)
 {
-       notes->options->full_addr = !notes->options->full_addr;
+       annotate_opts.full_addr = !annotate_opts.full_addr;
 
-       if (notes->options->full_addr)
+       if (annotate_opts.full_addr)
                notes->start = map__objdump_2mem(ms->map, ms->sym->start);
        else
                notes->start = map__rip_2objdump(ms->map, ms->sym->start);
@@ -2884,8 +2986,7 @@ void annotation__toggle_full_addr(struct annotation *notes, struct map_symbol *m
 }
 
 static void annotation__calc_lines(struct annotation *notes, struct map *map,
-                                  struct rb_root *root,
-                                  struct annotation_options *opts)
+                                  struct rb_root *root)
 {
        struct annotation_line *al;
        struct rb_root tmp_root = RB_ROOT;
@@ -2898,7 +2999,7 @@ static void annotation__calc_lines(struct annotation *notes, struct map *map,
                        double percent;
 
                        percent = annotation_data__percent(&al->data[i],
-                                                          opts->percent_type);
+                                                          annotate_opts.percent_type);
 
                        if (percent > percent_max)
                                percent_max = percent;
@@ -2909,22 +3010,20 @@ static void annotation__calc_lines(struct annotation *notes, struct map *map,
 
                al->path = get_srcline(map__dso(map), notes->start + al->offset, NULL,
                                       false, true, notes->start + al->offset);
-               insert_source_line(&tmp_root, al, opts);
+               insert_source_line(&tmp_root, al);
        }
 
        resort_source_line(root, &tmp_root);
 }
 
-static void symbol__calc_lines(struct map_symbol *ms, struct rb_root *root,
-                              struct annotation_options *opts)
+static void symbol__calc_lines(struct map_symbol *ms, struct rb_root *root)
 {
        struct annotation *notes = symbol__annotation(ms->sym);
 
-       annotation__calc_lines(notes, ms->map, root, opts);
+       annotation__calc_lines(notes, ms->map, root);
 }
 
-int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel,
-                         struct annotation_options *opts)
+int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel)
 {
        struct dso *dso = map__dso(ms->map);
        struct symbol *sym = ms->sym;
@@ -2933,7 +3032,7 @@ int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel,
        char buf[1024];
        int err;
 
-       err = symbol__annotate2(ms, evsel, opts, NULL);
+       err = symbol__annotate2(ms, evsel, NULL);
        if (err) {
                char msg[BUFSIZ];
 
@@ -2943,31 +3042,31 @@ int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel,
                return -1;
        }
 
-       if (opts->print_lines) {
-               srcline_full_filename = opts->full_path;
-               symbol__calc_lines(ms, &source_line, opts);
+       if (annotate_opts.print_lines) {
+               srcline_full_filename = annotate_opts.full_path;
+               symbol__calc_lines(ms, &source_line);
                print_summary(&source_line, dso->long_name);
        }
 
        hists__scnprintf_title(hists, buf, sizeof(buf));
        fprintf(stdout, "%s, [percent: %s]\n%s() %s\n",
-               buf, percent_type_str(opts->percent_type), sym->name, dso->long_name);
-       symbol__annotate_fprintf2(sym, stdout, opts);
+               buf, percent_type_str(annotate_opts.percent_type), sym->name,
+               dso->long_name);
+       symbol__annotate_fprintf2(sym, stdout);
 
        annotated_source__purge(symbol__annotation(sym)->src);
 
        return 0;
 }
 
-int symbol__tty_annotate(struct map_symbol *ms, struct evsel *evsel,
-                        struct annotation_options *opts)
+int symbol__tty_annotate(struct map_symbol *ms, struct evsel *evsel)
 {
        struct dso *dso = map__dso(ms->map);
        struct symbol *sym = ms->sym;
        struct rb_root source_line = RB_ROOT;
        int err;
 
-       err = symbol__annotate(ms, evsel, opts, NULL);
+       err = symbol__annotate(ms, evsel, NULL);
        if (err) {
                char msg[BUFSIZ];
 
@@ -2979,13 +3078,13 @@ int symbol__tty_annotate(struct map_symbol *ms, struct evsel *evsel,
 
        symbol__calc_percent(sym, evsel);
 
-       if (opts->print_lines) {
-               srcline_full_filename = opts->full_path;
-               symbol__calc_lines(ms, &source_line, opts);
+       if (annotate_opts.print_lines) {
+               srcline_full_filename = annotate_opts.full_path;
+               symbol__calc_lines(ms, &source_line);
                print_summary(&source_line, dso->long_name);
        }
 
-       symbol__annotate_printf(ms, evsel, opts);
+       symbol__annotate_printf(ms, evsel);
 
        annotated_source__purge(symbol__annotation(sym)->src);
 
@@ -3046,19 +3145,20 @@ call_like:
                obj__printf(obj, "  ");
        }
 
-       disasm_line__scnprintf(dl, bf, size, !notes->options->use_offset, notes->widths.max_ins_name);
+       disasm_line__scnprintf(dl, bf, size, !annotate_opts.use_offset, notes->widths.max_ins_name);
 }
 
 static void ipc_coverage_string(char *bf, int size, struct annotation *notes)
 {
        double ipc = 0.0, coverage = 0.0;
+       struct annotated_branch *branch = annotation__get_branch(notes);
 
-       if (notes->hit_cycles)
-               ipc = notes->hit_insn / ((double)notes->hit_cycles);
+       if (branch && branch->hit_cycles)
+               ipc = branch->hit_insn / ((double)branch->hit_cycles);
 
-       if (notes->total_insn) {
-               coverage = notes->cover_insn * 100.0 /
-                       ((double)notes->total_insn);
+       if (branch && branch->total_insn) {
+               coverage = branch->cover_insn * 100.0 /
+                       ((double)branch->total_insn);
        }
 
        scnprintf(bf, size, "(Average IPC: %.2f, IPC Coverage: %.1f%%)",
@@ -3083,8 +3183,8 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
        int printed;
 
        if (first_line && (al->offset == -1 || percent_max == 0.0)) {
-               if (notes->have_cycles) {
-                       if (al->ipc == 0.0 && al->cycles == 0)
+               if (notes->branch && al->cycles) {
+                       if (al->cycles->ipc == 0.0 && al->cycles->avg == 0)
                                show_title = true;
                } else
                        show_title = true;
@@ -3120,18 +3220,18 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
                }
        }
 
-       if (notes->have_cycles) {
-               if (al->ipc)
-                       obj__printf(obj, "%*.2f ", ANNOTATION__IPC_WIDTH - 1, al->ipc);
+       if (notes->branch) {
+               if (al->cycles && al->cycles->ipc)
+                       obj__printf(obj, "%*.2f ", ANNOTATION__IPC_WIDTH - 1, al->cycles->ipc);
                else if (!show_title)
                        obj__printf(obj, "%*s", ANNOTATION__IPC_WIDTH, " ");
                else
                        obj__printf(obj, "%*s ", ANNOTATION__IPC_WIDTH - 1, "IPC");
 
-               if (!notes->options->show_minmax_cycle) {
-                       if (al->cycles)
+               if (!annotate_opts.show_minmax_cycle) {
+                       if (al->cycles && al->cycles->avg)
                                obj__printf(obj, "%*" PRIu64 " ",
-                                          ANNOTATION__CYCLES_WIDTH - 1, al->cycles);
+                                          ANNOTATION__CYCLES_WIDTH - 1, al->cycles->avg);
                        else if (!show_title)
                                obj__printf(obj, "%*s",
                                            ANNOTATION__CYCLES_WIDTH, " ");
@@ -3145,8 +3245,8 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
 
                                scnprintf(str, sizeof(str),
                                        "%" PRIu64 "(%" PRIu64 "/%" PRIu64 ")",
-                                       al->cycles, al->cycles_min,
-                                       al->cycles_max);
+                                       al->cycles->avg, al->cycles->min,
+                                       al->cycles->max);
 
                                obj__printf(obj, "%*s ",
                                            ANNOTATION__MINMAX_CYCLES_WIDTH - 1,
@@ -3172,7 +3272,7 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
        if (!*al->line)
                obj__printf(obj, "%-*s", width - pcnt_width - cycles_width, " ");
        else if (al->offset == -1) {
-               if (al->line_nr && notes->options->show_linenr)
+               if (al->line_nr && annotate_opts.show_linenr)
                        printed = scnprintf(bf, sizeof(bf), "%-*d ", notes->widths.addr + 1, al->line_nr);
                else
                        printed = scnprintf(bf, sizeof(bf), "%-*s  ", notes->widths.addr, " ");
@@ -3182,15 +3282,15 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
                u64 addr = al->offset;
                int color = -1;
 
-               if (!notes->options->use_offset)
+               if (!annotate_opts.use_offset)
                        addr += notes->start;
 
-               if (!notes->options->use_offset) {
+               if (!annotate_opts.use_offset) {
                        printed = scnprintf(bf, sizeof(bf), "%" PRIx64 ": ", addr);
                } else {
                        if (al->jump_sources &&
-                           notes->options->offset_level >= ANNOTATION__OFFSET_JUMP_TARGETS) {
-                               if (notes->options->show_nr_jumps) {
+                           annotate_opts.offset_level >= ANNOTATION__OFFSET_JUMP_TARGETS) {
+                               if (annotate_opts.show_nr_jumps) {
                                        int prev;
                                        printed = scnprintf(bf, sizeof(bf), "%*d ",
                                                            notes->widths.jumps,
@@ -3204,9 +3304,9 @@ print_addr:
                                printed = scnprintf(bf, sizeof(bf), "%*" PRIx64 ": ",
                                                    notes->widths.target, addr);
                        } else if (ins__is_call(&disasm_line(al)->ins) &&
-                                  notes->options->offset_level >= ANNOTATION__OFFSET_CALL) {
+                                  annotate_opts.offset_level >= ANNOTATION__OFFSET_CALL) {
                                goto print_addr;
-                       } else if (notes->options->offset_level == ANNOTATION__MAX_OFFSET_LEVEL) {
+                       } else if (annotate_opts.offset_level == ANNOTATION__MAX_OFFSET_LEVEL) {
                                goto print_addr;
                        } else {
                                printed = scnprintf(bf, sizeof(bf), "%-*s  ",
@@ -3228,43 +3328,44 @@ print_addr:
 }
 
 void annotation_line__write(struct annotation_line *al, struct annotation *notes,
-                           struct annotation_write_ops *wops,
-                           struct annotation_options *opts)
+                           struct annotation_write_ops *wops)
 {
        __annotation_line__write(al, notes, wops->first_line, wops->current_entry,
                                 wops->change_color, wops->width, wops->obj,
-                                opts->percent_type,
+                                annotate_opts.percent_type,
                                 wops->set_color, wops->set_percent_color,
                                 wops->set_jumps_percent_color, wops->printf,
                                 wops->write_graph);
 }
 
 int symbol__annotate2(struct map_symbol *ms, struct evsel *evsel,
-                     struct annotation_options *options, struct arch **parch)
+                     struct arch **parch)
 {
        struct symbol *sym = ms->sym;
        struct annotation *notes = symbol__annotation(sym);
        size_t size = symbol__size(sym);
        int nr_pcnt = 1, err;
 
-       notes->offsets = zalloc(size * sizeof(struct annotation_line *));
-       if (notes->offsets == NULL)
+       notes->src->offsets = zalloc(size * sizeof(struct annotation_line *));
+       if (notes->src->offsets == NULL)
                return ENOMEM;
 
        if (evsel__is_group_event(evsel))
                nr_pcnt = evsel->core.nr_members;
 
-       err = symbol__annotate(ms, evsel, options, parch);
+       err = symbol__annotate(ms, evsel, parch);
        if (err)
                goto out_free_offsets;
 
-       notes->options = options;
-
        symbol__calc_percent(sym, evsel);
 
        annotation__set_offsets(notes, size);
        annotation__mark_jump_targets(notes, sym);
-       annotation__compute_ipc(notes, size);
+
+       err = annotation__compute_ipc(notes, size);
+       if (err)
+               goto out_free_offsets;
+
        annotation__init_column_widths(notes, sym);
        notes->nr_events = nr_pcnt;
 
@@ -3274,7 +3375,7 @@ int symbol__annotate2(struct map_symbol *ms, struct evsel *evsel,
        return 0;
 
 out_free_offsets:
-       zfree(&notes->offsets);
+       zfree(&notes->src->offsets);
        return err;
 }
 
@@ -3337,8 +3438,10 @@ static int annotation__config(const char *var, const char *value, void *data)
        return 0;
 }
 
-void annotation_options__init(struct annotation_options *opt)
+void annotation_options__init(void)
 {
+       struct annotation_options *opt = &annotate_opts;
+
        memset(opt, 0, sizeof(*opt));
 
        /* Default values. */
@@ -3349,16 +3452,15 @@ void annotation_options__init(struct annotation_options *opt)
        opt->percent_type = PERCENT_PERIOD_LOCAL;
 }
 
-
-void annotation_options__exit(struct annotation_options *opt)
+void annotation_options__exit(void)
 {
-       zfree(&opt->disassembler_style);
-       zfree(&opt->objdump_path);
+       zfree(&annotate_opts.disassembler_style);
+       zfree(&annotate_opts.objdump_path);
 }
 
-void annotation_config__init(struct annotation_options *opt)
+void annotation_config__init(void)
 {
-       perf_config(annotation__config, opt);
+       perf_config(annotation__config, &annotate_opts);
 }
 
 static unsigned int parse_percent_type(char *str1, char *str2)
@@ -3382,10 +3484,9 @@ static unsigned int parse_percent_type(char *str1, char *str2)
        return type;
 }
 
-int annotate_parse_percent_type(const struct option *opt, const char *_str,
+int annotate_parse_percent_type(const struct option *opt __maybe_unused, const char *_str,
                                int unset __maybe_unused)
 {
-       struct annotation_options *opts = opt->value;
        unsigned int type;
        char *str1, *str2;
        int err = -1;
@@ -3404,7 +3505,7 @@ int annotate_parse_percent_type(const struct option *opt, const char *_str,
        if (type == (unsigned int) -1)
                type = parse_percent_type(str2, str1);
        if (type != (unsigned int) -1) {
-               opts->percent_type = type;
+               annotate_opts.percent_type = type;
                err = 0;
        }
 
@@ -3413,11 +3514,267 @@ out:
        return err;
 }
 
-int annotate_check_args(struct annotation_options *args)
+int annotate_check_args(void)
 {
+       struct annotation_options *args = &annotate_opts;
+
        if (args->prefix_strip && !args->prefix) {
                pr_err("--prefix-strip requires --prefix\n");
                return -1;
        }
        return 0;
 }
+
+/*
+ * Get register number and access offset from the given instruction.
+ * It assumes AT&T x86 asm format like OFFSET(REG).  Maybe it needs
+ * to revisit the format when it handles different architecture.
+ * Fills @reg and @offset when return 0.
+ */
+static int extract_reg_offset(struct arch *arch, const char *str,
+                             struct annotated_op_loc *op_loc)
+{
+       char *p;
+       char *regname;
+
+       if (arch->objdump.register_char == 0)
+               return -1;
+
+       /*
+        * It should start from offset, but it's possible to skip 0
+        * in the asm.  So 0(%rax) should be same as (%rax).
+        *
+        * However, it also start with a segment select register like
+        * %gs:0x18(%rbx).  In that case it should skip the part.
+        */
+       if (*str == arch->objdump.register_char) {
+               while (*str && !isdigit(*str) &&
+                      *str != arch->objdump.memory_ref_char)
+                       str++;
+       }
+
+       op_loc->offset = strtol(str, &p, 0);
+
+       p = strchr(p, arch->objdump.register_char);
+       if (p == NULL)
+               return -1;
+
+       regname = strdup(p);
+       if (regname == NULL)
+               return -1;
+
+       op_loc->reg = get_dwarf_regnum(regname, 0);
+       free(regname);
+       return 0;
+}
+
+/**
+ * annotate_get_insn_location - Get location of instruction
+ * @arch: the architecture info
+ * @dl: the target instruction
+ * @loc: a buffer to save the data
+ *
+ * Get detailed location info (register and offset) in the instruction.
+ * It needs both source and target operand and whether it accesses a
+ * memory location.  The offset field is meaningful only when the
+ * corresponding mem flag is set.
+ *
+ * Some examples on x86:
+ *
+ *   mov  (%rax), %rcx   # src_reg = rax, src_mem = 1, src_offset = 0
+ *                       # dst_reg = rcx, dst_mem = 0
+ *
+ *   mov  0x18, %r8      # src_reg = -1, dst_reg = r8
+ */
+int annotate_get_insn_location(struct arch *arch, struct disasm_line *dl,
+                              struct annotated_insn_loc *loc)
+{
+       struct ins_operands *ops;
+       struct annotated_op_loc *op_loc;
+       int i;
+
+       if (!strcmp(dl->ins.name, "lock"))
+               ops = dl->ops.locked.ops;
+       else
+               ops = &dl->ops;
+
+       if (ops == NULL)
+               return -1;
+
+       memset(loc, 0, sizeof(*loc));
+
+       for_each_insn_op_loc(loc, i, op_loc) {
+               const char *insn_str = ops->source.raw;
+
+               if (i == INSN_OP_TARGET)
+                       insn_str = ops->target.raw;
+
+               /* Invalidate the register by default */
+               op_loc->reg = -1;
+
+               if (insn_str == NULL)
+                       continue;
+
+               if (strchr(insn_str, arch->objdump.memory_ref_char)) {
+                       op_loc->mem_ref = true;
+                       extract_reg_offset(arch, insn_str, op_loc);
+               } else {
+                       char *s = strdup(insn_str);
+
+                       if (s) {
+                               op_loc->reg = get_dwarf_regnum(s, 0);
+                               free(s);
+                       }
+               }
+       }
+
+       return 0;
+}
+
+static void symbol__ensure_annotate(struct map_symbol *ms, struct evsel *evsel)
+{
+       struct disasm_line *dl, *tmp_dl;
+       struct annotation *notes;
+
+       notes = symbol__annotation(ms->sym);
+       if (!list_empty(&notes->src->source))
+               return;
+
+       if (symbol__annotate(ms, evsel, NULL) < 0)
+               return;
+
+       /* remove non-insn disasm lines for simplicity */
+       list_for_each_entry_safe(dl, tmp_dl, &notes->src->source, al.node) {
+               if (dl->al.offset == -1) {
+                       list_del(&dl->al.node);
+                       free(dl);
+               }
+       }
+}
+
+static struct disasm_line *find_disasm_line(struct symbol *sym, u64 ip)
+{
+       struct disasm_line *dl;
+       struct annotation *notes;
+
+       notes = symbol__annotation(sym);
+
+       list_for_each_entry(dl, &notes->src->source, al.node) {
+               if (sym->start + dl->al.offset == ip)
+                       return dl;
+       }
+       return NULL;
+}
+
+static struct annotated_item_stat *annotate_data_stat(struct list_head *head,
+                                                     const char *name)
+{
+       struct annotated_item_stat *istat;
+
+       list_for_each_entry(istat, head, list) {
+               if (!strcmp(istat->name, name))
+                       return istat;
+       }
+
+       istat = zalloc(sizeof(*istat));
+       if (istat == NULL)
+               return NULL;
+
+       istat->name = strdup(name);
+       if (istat->name == NULL) {
+               free(istat);
+               return NULL;
+       }
+
+       list_add_tail(&istat->list, head);
+       return istat;
+}
+
+/**
+ * hist_entry__get_data_type - find data type for given hist entry
+ * @he: hist entry
+ *
+ * This function first annotates the instruction at @he->ip and extracts
+ * register and offset info from it.  Then it searches the DWARF debug
+ * info to get a variable and type information using the address, register,
+ * and offset.
+ */
+struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he)
+{
+       struct map_symbol *ms = &he->ms;
+       struct evsel *evsel = hists_to_evsel(he->hists);
+       struct arch *arch;
+       struct disasm_line *dl;
+       struct annotated_insn_loc loc;
+       struct annotated_op_loc *op_loc;
+       struct annotated_data_type *mem_type;
+       struct annotated_item_stat *istat;
+       u64 ip = he->ip;
+       int i;
+
+       ann_data_stat.total++;
+
+       if (ms->map == NULL || ms->sym == NULL) {
+               ann_data_stat.no_sym++;
+               return NULL;
+       }
+
+       if (!symbol_conf.init_annotation) {
+               ann_data_stat.no_sym++;
+               return NULL;
+       }
+
+       if (evsel__get_arch(evsel, &arch) < 0) {
+               ann_data_stat.no_insn++;
+               return NULL;
+       }
+
+       /* Make sure it runs objdump to get disasm of the function */
+       symbol__ensure_annotate(ms, evsel);
+
+       /*
+        * Get a disasm to extract the location from the insn.
+        * This is too slow...
+        */
+       dl = find_disasm_line(ms->sym, ip);
+       if (dl == NULL) {
+               ann_data_stat.no_insn++;
+               return NULL;
+       }
+
+       istat = annotate_data_stat(&ann_insn_stat, dl->ins.name);
+       if (istat == NULL) {
+               ann_data_stat.no_insn++;
+               return NULL;
+       }
+
+       if (annotate_get_insn_location(arch, dl, &loc) < 0) {
+               ann_data_stat.no_insn_ops++;
+               istat->bad++;
+               return NULL;
+       }
+
+       for_each_insn_op_loc(&loc, i, op_loc) {
+               if (!op_loc->mem_ref)
+                       continue;
+
+               mem_type = find_data_type(ms, ip, op_loc->reg, op_loc->offset);
+               if (mem_type)
+                       istat->good++;
+               else
+                       istat->bad++;
+
+               if (symbol_conf.annotate_data_sample) {
+                       annotated_data_type__update_samples(mem_type, evsel,
+                                                           op_loc->offset,
+                                                           he->stat.nr_events,
+                                                           he->stat.period);
+               }
+               he->mem_type_off = op_loc->offset;
+               return mem_type;
+       }
+
+       ann_data_stat.no_mem_ops++;
+       istat->bad++;
+       return NULL;
+}
index 96278055917601c31ee928099188ddd35d5c1e0c..dba50762c6e807198880909a7a058e78bc7f9e21 100644 (file)
@@ -23,6 +23,7 @@ struct option;
 struct perf_sample;
 struct evsel;
 struct symbol;
+struct annotated_data_type;
 
 struct ins {
        const char     *name;
@@ -31,8 +32,6 @@ struct ins {
 
 struct ins_operands {
        char    *raw;
-       char    *raw_comment;
-       char    *raw_func_start;
        struct {
                char    *raw;
                char    *name;
@@ -41,22 +40,30 @@ struct ins_operands {
                s64     offset;
                bool    offset_avail;
                bool    outside;
+               bool    multi_regs;
        } target;
        union {
                struct {
                        char    *raw;
                        char    *name;
                        u64     addr;
+                       bool    multi_regs;
                } source;
                struct {
                        struct ins          ins;
                        struct ins_operands *ops;
                } locked;
+               struct {
+                       char    *raw_comment;
+                       char    *raw_func_start;
+               } jump;
        };
 };
 
 struct arch;
 
+bool arch__is(struct arch *arch, const char *name);
+
 struct ins_ops {
        void (*free)(struct ins_operands *ops);
        int (*parse)(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms);
@@ -101,6 +108,8 @@ struct annotation_options {
        unsigned int percent_type;
 };
 
+extern struct annotation_options annotate_opts;
+
 enum {
        ANNOTATION__OFFSET_JUMP_TARGETS = 1,
        ANNOTATION__OFFSET_CALL,
@@ -130,6 +139,13 @@ struct annotation_data {
        struct sym_hist_entry    he;
 };
 
+struct cycles_info {
+       float                    ipc;
+       u64                      avg;
+       u64                      max;
+       u64                      min;
+};
+
 struct annotation_line {
        struct list_head         node;
        struct rb_node           rb_node;
@@ -137,12 +153,9 @@ struct annotation_line {
        char                    *line;
        int                      line_nr;
        char                    *fileloc;
-       int                      jump_sources;
-       float                    ipc;
-       u64                      cycles;
-       u64                      cycles_max;
-       u64                      cycles_min;
        char                    *path;
+       struct cycles_info      *cycles;
+       int                      jump_sources;
        u32                      idx;
        int                      idx_asm;
        int                      data_nr;
@@ -214,8 +227,7 @@ struct annotation_write_ops {
 };
 
 void annotation_line__write(struct annotation_line *al, struct annotation *notes,
-                           struct annotation_write_ops *ops,
-                           struct annotation_options *opts);
+                           struct annotation_write_ops *ops);
 
 int __annotation__scnprintf_samples_period(struct annotation *notes,
                                           char *bf, size_t size,
@@ -264,27 +276,29 @@ struct cyc_hist {
  * returns.
  */
 struct annotated_source {
-       struct list_head   source;
-       int                nr_histograms;
-       size_t             sizeof_sym_hist;
-       struct cyc_hist    *cycles_hist;
-       struct sym_hist    *histograms;
+       struct list_head        source;
+       size_t                  sizeof_sym_hist;
+       struct sym_hist         *histograms;
+       struct annotation_line  **offsets;
+       int                     nr_histograms;
+       int                     nr_entries;
+       int                     nr_asm_entries;
+       u16                     max_line_len;
 };
 
-struct LOCKABLE annotation {
-       u64                     max_coverage;
-       u64                     start;
+struct annotated_branch {
        u64                     hit_cycles;
        u64                     hit_insn;
        unsigned int            total_insn;
        unsigned int            cover_insn;
-       struct annotation_options *options;
-       struct annotation_line  **offsets;
+       struct cyc_hist         *cycles_hist;
+       u64                     max_coverage;
+};
+
+struct LOCKABLE annotation {
+       u64                     start;
        int                     nr_events;
        int                     max_jump_sources;
-       int                     nr_entries;
-       int                     nr_asm_entries;
-       u16                     max_line_len;
        struct {
                u8              addr;
                u8              jumps;
@@ -293,8 +307,8 @@ struct LOCKABLE annotation {
                u8              max_addr;
                u8              max_ins_name;
        } widths;
-       bool                    have_cycles;
        struct annotated_source *src;
+       struct annotated_branch *branch;
 };
 
 static inline void annotation__init(struct annotation *notes __maybe_unused)
@@ -308,10 +322,10 @@ bool annotation__trylock(struct annotation *notes) EXCLUSIVE_TRYLOCK_FUNCTION(tr
 
 static inline int annotation__cycles_width(struct annotation *notes)
 {
-       if (notes->have_cycles && notes->options->show_minmax_cycle)
+       if (notes->branch && annotate_opts.show_minmax_cycle)
                return ANNOTATION__IPC_WIDTH + ANNOTATION__MINMAX_CYCLES_WIDTH;
 
-       return notes->have_cycles ? ANNOTATION__IPC_WIDTH + ANNOTATION__CYCLES_WIDTH : 0;
+       return notes->branch ? ANNOTATION__IPC_WIDTH + ANNOTATION__CYCLES_WIDTH : 0;
 }
 
 static inline int annotation__pcnt_width(struct annotation *notes)
@@ -319,13 +333,12 @@ static inline int annotation__pcnt_width(struct annotation *notes)
        return (symbol_conf.show_total_period ? 12 : 7) * notes->nr_events;
 }
 
-static inline bool annotation_line__filter(struct annotation_line *al, struct annotation *notes)
+static inline bool annotation_line__filter(struct annotation_line *al)
 {
-       return notes->options->hide_src_code && al->offset == -1;
+       return annotate_opts.hide_src_code && al->offset == -1;
 }
 
 void annotation__set_offsets(struct annotation *notes, s64 size);
-void annotation__compute_ipc(struct annotation *notes, size_t size);
 void annotation__mark_jump_targets(struct annotation *notes, struct symbol *sym);
 void annotation__update_column_widths(struct annotation *notes);
 void annotation__init_column_widths(struct annotation *notes, struct symbol *sym);
@@ -349,6 +362,8 @@ static inline struct annotation *symbol__annotation(struct symbol *sym)
 int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, struct perf_sample *sample,
                                 struct evsel *evsel);
 
+struct annotated_branch *annotation__get_branch(struct annotation *notes);
+
 int addr_map_symbol__account_cycles(struct addr_map_symbol *ams,
                                    struct addr_map_symbol *start,
                                    unsigned cycles);
@@ -361,11 +376,9 @@ void symbol__annotate_zero_histograms(struct symbol *sym);
 
 int symbol__annotate(struct map_symbol *ms,
                     struct evsel *evsel,
-                    struct annotation_options *options,
                     struct arch **parch);
 int symbol__annotate2(struct map_symbol *ms,
                      struct evsel *evsel,
-                     struct annotation_options *options,
                      struct arch **parch);
 
 enum symbol_disassemble_errno {
@@ -392,43 +405,86 @@ enum symbol_disassemble_errno {
 
 int symbol__strerror_disassemble(struct map_symbol *ms, int errnum, char *buf, size_t buflen);
 
-int symbol__annotate_printf(struct map_symbol *ms, struct evsel *evsel,
-                           struct annotation_options *options);
+int symbol__annotate_printf(struct map_symbol *ms, struct evsel *evsel);
 void symbol__annotate_zero_histogram(struct symbol *sym, int evidx);
 void symbol__annotate_decay_histogram(struct symbol *sym, int evidx);
 void annotated_source__purge(struct annotated_source *as);
 
-int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel,
-                               struct annotation_options *opts);
+int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel);
 
 bool ui__has_annotation(void);
 
-int symbol__tty_annotate(struct map_symbol *ms, struct evsel *evsel, struct annotation_options *opts);
+int symbol__tty_annotate(struct map_symbol *ms, struct evsel *evsel);
 
-int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel, struct annotation_options *opts);
+int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel);
 
 #ifdef HAVE_SLANG_SUPPORT
 int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel,
-                        struct hist_browser_timer *hbt,
-                        struct annotation_options *opts);
+                        struct hist_browser_timer *hbt);
 #else
 static inline int symbol__tui_annotate(struct map_symbol *ms __maybe_unused,
                                struct evsel *evsel  __maybe_unused,
-                               struct hist_browser_timer *hbt __maybe_unused,
-                               struct annotation_options *opts __maybe_unused)
+                               struct hist_browser_timer *hbt __maybe_unused)
 {
        return 0;
 }
 #endif
 
-void annotation_options__init(struct annotation_options *opt);
-void annotation_options__exit(struct annotation_options *opt);
+void annotation_options__init(void);
+void annotation_options__exit(void);
 
-void annotation_config__init(struct annotation_options *opt);
+void annotation_config__init(void);
 
 int annotate_parse_percent_type(const struct option *opt, const char *_str,
                                int unset);
 
-int annotate_check_args(struct annotation_options *args);
+int annotate_check_args(void);
+
+/**
+ * struct annotated_op_loc - Location info of instruction operand
+ * @reg: Register in the operand
+ * @offset: Memory access offset in the operand
+ * @mem_ref: Whether the operand accesses memory
+ */
+struct annotated_op_loc {
+       int reg;
+       int offset;
+       bool mem_ref;
+};
+
+enum annotated_insn_ops {
+       INSN_OP_SOURCE = 0,
+       INSN_OP_TARGET = 1,
+
+       INSN_OP_MAX,
+};
+
+/**
+ * struct annotated_insn_loc - Location info of instruction
+ * @ops: Array of location info for source and target operands
+ */
+struct annotated_insn_loc {
+       struct annotated_op_loc ops[INSN_OP_MAX];
+};
+
+#define for_each_insn_op_loc(insn_loc, i, op_loc)                      \
+       for (i = INSN_OP_SOURCE, op_loc = &(insn_loc)->ops[i];          \
+            i < INSN_OP_MAX;                                           \
+            i++, op_loc++)
+
+/* Get detailed location info in the instruction */
+int annotate_get_insn_location(struct arch *arch, struct disasm_line *dl,
+                              struct annotated_insn_loc *loc);
+
+/* Returns a data type from the sample instruction (if any) */
+struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he);
+
+struct annotated_item_stat {
+       struct list_head list;
+       char *name;
+       int good;
+       int bad;
+};
+extern struct list_head ann_insn_stat;
 
 #endif /* __PERF_ANNOTATE_H */
index a0368202a746ab6c046eed1a3b8bfb671af71456..3684e6009b635076c8171d68b4b9edb89bfcf1f6 100644 (file)
@@ -174,7 +174,7 @@ void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
                                   struct evlist *evlist,
                                   struct evsel *evsel, int idx)
 {
-       bool per_cpu = !perf_cpu_map__empty(evlist->core.user_requested_cpus);
+       bool per_cpu = !perf_cpu_map__has_any_cpu_or_is_empty(evlist->core.user_requested_cpus);
 
        mp->mmap_needed = evsel->needs_auxtrace_mmap;
 
@@ -648,7 +648,7 @@ int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
 
 static int evlist__enable_event_idx(struct evlist *evlist, struct evsel *evsel, int idx)
 {
-       bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.user_requested_cpus);
+       bool per_cpu_mmaps = !perf_cpu_map__has_any_cpu_or_is_empty(evlist->core.user_requested_cpus);
 
        if (per_cpu_mmaps) {
                struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->core.all_cpus, idx);
@@ -1638,6 +1638,9 @@ int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts,
                case 'Z':
                        synth_opts->timeless_decoding = true;
                        break;
+               case 'T':
+                       synth_opts->use_timestamp = true;
+                       break;
                case ' ':
                case ',':
                        break;
index 29eb82dff5749c44afa6200dfe3894e64be5258b..55702215a82d31c1a519dde9df327d276adc5c2f 100644 (file)
@@ -99,6 +99,7 @@ enum itrace_period_type {
  * @remote_access: whether to synthesize remote access events
  * @mem: whether to synthesize memory events
  * @timeless_decoding: prefer "timeless" decoding i.e. ignore timestamps
+ * @use_timestamp: use the timestamp trace as kernel time
  * @vm_time_correlation: perform VM Time Correlation
  * @vm_tm_corr_dry_run: VM Time Correlation dry-run
  * @vm_tm_corr_args:  VM Time Correlation implementation-specific arguments
@@ -146,6 +147,7 @@ struct itrace_synth_opts {
        bool                    remote_access;
        bool                    mem;
        bool                    timeless_decoding;
+       bool                    use_timestamp;
        bool                    vm_time_correlation;
        bool                    vm_tm_corr_dry_run;
        char                    *vm_tm_corr_args;
@@ -678,6 +680,7 @@ bool auxtrace__evsel_is_auxtrace(struct perf_session *session,
 "                              q:                      quicker (less detailed) decoding\n" \
 "                              A:                      approximate IPC\n" \
 "                              Z:                      prefer to ignore timestamps (so-called \"timeless\" decoding)\n" \
+"                              T:                      use the timestamp trace as kernel time\n" \
 "                              PERIOD[ns|us|ms|i|t]:   specify period to sample stream\n" \
 "                              concatenate multiple options. Default is iybxwpe or cewp\n"
 
index 591fc1edd385caee7be9ff7834b18b255994747c..dec910989701eb9433442c00383e01673e11dd86 100644 (file)
@@ -129,9 +129,9 @@ int block_info__process_sym(struct hist_entry *he, struct block_hist *bh,
        al.sym = he->ms.sym;
 
        notes = symbol__annotation(he->ms.sym);
-       if (!notes || !notes->src || !notes->src->cycles_hist)
+       if (!notes || !notes->branch || !notes->branch->cycles_hist)
                return 0;
-       ch = notes->src->cycles_hist;
+       ch = notes->branch->cycles_hist;
        for (unsigned int i = 0; i < symbol__size(he->ms.sym); i++) {
                if (ch[i].num_aggr) {
                        struct block_info *bi;
@@ -464,8 +464,7 @@ void block_info__free_report(struct block_report *reps, int nr_reps)
 }
 
 int report__browse_block_hists(struct block_hist *bh, float min_percent,
-                              struct evsel *evsel, struct perf_env *env,
-                              struct annotation_options *annotation_opts)
+                              struct evsel *evsel, struct perf_env *env)
 {
        int ret;
 
@@ -477,8 +476,7 @@ int report__browse_block_hists(struct block_hist *bh, float min_percent,
                return 0;
        case 1:
                symbol_conf.report_individual_block = true;
-               ret = block_hists_tui_browse(bh, evsel, min_percent,
-                                            env, annotation_opts);
+               ret = block_hists_tui_browse(bh, evsel, min_percent, env);
                return ret;
        default:
                return -1;
index 42e9dcc4cf0ab3584253046d1557036f3f7395e3..96f53e89795e24a95293a5170a168279df5760bc 100644 (file)
@@ -78,8 +78,7 @@ struct block_report *block_info__create_report(struct evlist *evlist,
 void block_info__free_report(struct block_report *reps, int nr_reps);
 
 int report__browse_block_hists(struct block_hist *bh, float min_percent,
-                              struct evsel *evsel, struct perf_env *env,
-                              struct annotation_options *annotation_opts);
+                              struct evsel *evsel, struct perf_env *env);
 
 float block_info__total_cycles_percent(struct hist_entry *he);
 
index 680e92774d0cde6171fddcc46a992af02766615a..15c42196c24c8230779b04c1ddd55e1d473deade 100644 (file)
@@ -311,6 +311,7 @@ done:
 double block_range__coverage(struct block_range *br)
 {
        struct symbol *sym;
+       struct annotated_branch *branch;
 
        if (!br) {
                if (block_ranges.blocks)
@@ -323,5 +324,9 @@ double block_range__coverage(struct block_range *br)
        if (!sym)
                return -1;
 
-       return (double)br->coverage / symbol__annotation(sym)->max_coverage;
+       branch = symbol__annotation(sym)->branch;
+       if (!branch)
+               return -1;
+
+       return (double)br->coverage / branch->max_coverage;
 }
index 38fcf3ba5749d9f77aab72af20aae9e949e9eb0d..3573e0b7ef3eda83ba635868f303bfd30df7153e 100644 (file)
@@ -386,6 +386,9 @@ int perf_event__synthesize_bpf_events(struct perf_session *session,
        int err;
        int fd;
 
+       if (opts->no_bpf_event)
+               return 0;
+
        event = malloc(sizeof(event->bpf) + KSYM_NAME_LEN + machine->id_hdr_size);
        if (!event)
                return -1;
@@ -542,9 +545,9 @@ int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env)
        return evlist__add_sb_event(evlist, &attr, bpf_event__sb_cb, env);
 }
 
-void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
-                                   struct perf_env *env,
-                                   FILE *fp)
+void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
+                                     struct perf_env *env,
+                                     FILE *fp)
 {
        __u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
        __u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
@@ -560,7 +563,7 @@ void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
        if (info->btf_id) {
                struct btf_node *node;
 
-               node = perf_env__find_btf(env, info->btf_id);
+               node = __perf_env__find_btf(env, info->btf_id);
                if (node)
                        btf = btf__new((__u8 *)(node->data),
                                       node->data_size);
index 1bcbd4fb6c669d76065255ad196addaa37cf85aa..e2f0420905f597410dc027f4d6e3e0a5e8ccc48c 100644 (file)
@@ -33,9 +33,9 @@ struct btf_node {
 int machine__process_bpf(struct machine *machine, union perf_event *event,
                         struct perf_sample *sample);
 int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env);
-void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
-                                   struct perf_env *env,
-                                   FILE *fp);
+void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
+                                     struct perf_env *env,
+                                     FILE *fp);
 #else
 static inline int machine__process_bpf(struct machine *machine __maybe_unused,
                                       union perf_event *event __maybe_unused,
@@ -50,9 +50,9 @@ static inline int evlist__add_bpf_sb_event(struct evlist *evlist __maybe_unused,
        return 0;
 }
 
-static inline void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info __maybe_unused,
-                                                 struct perf_env *env __maybe_unused,
-                                                 FILE *fp __maybe_unused)
+static inline void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info __maybe_unused,
+                                                   struct perf_env *env __maybe_unused,
+                                                   FILE *fp __maybe_unused)
 {
 
 }
index 7f9b0e46e008c466604aec8c4e59ace15001a5e8..7a8af60e0f5158fe7936898a7c12fceadef7e8f0 100644 (file)
@@ -455,7 +455,7 @@ static int bperf__load(struct evsel *evsel, struct target *target)
                return -1;
 
        if (!all_cpu_map) {
-               all_cpu_map = perf_cpu_map__new(NULL);
+               all_cpu_map = perf_cpu_map__new_online_cpus();
                if (!all_cpu_map)
                        return -1;
        }
index f1716c089c9912f4f9bfca827bde4e509db8d22e..31ff19afc20c1b857a4397185926007dacb75e71 100644 (file)
@@ -318,7 +318,7 @@ int lock_contention_read(struct lock_contention *con)
        }
 
        /* make sure it loads the kernel map */
-       map__load(maps__first(machine->kmaps)->map);
+       maps__load_first(machine->kmaps);
 
        prev_key = NULL;
        while (!bpf_map_get_next_key(fd, prev_key, &key)) {
index 0cd3369af2a4f2cdfb184f25e22a3beb53c87040..b29109cd36095c4fe4063cff1d60cf50b420d66d 100644 (file)
@@ -3,6 +3,8 @@
 #define PERF_COMPRESS_H
 
 #include <stdbool.h>
+#include <stddef.h>
+#include <sys/types.h>
 #ifdef HAVE_ZSTD_SUPPORT
 #include <zstd.h>
 #endif
@@ -21,6 +23,7 @@ struct zstd_data {
 #ifdef HAVE_ZSTD_SUPPORT
        ZSTD_CStream    *cstream;
        ZSTD_DStream    *dstream;
+       int comp_level;
 #endif
 };
 
@@ -29,7 +32,7 @@ struct zstd_data {
 int zstd_init(struct zstd_data *data, int level);
 int zstd_fini(struct zstd_data *data);
 
-size_t zstd_compress_stream_to_records(struct zstd_data *data, void *dst, size_t dst_size,
+ssize_t zstd_compress_stream_to_records(struct zstd_data *data, void *dst, size_t dst_size,
                                       void *src, size_t src_size, size_t max_record_size,
                                       size_t process_header(void *record, size_t increment));
 
@@ -48,7 +51,7 @@ static inline int zstd_fini(struct zstd_data *data __maybe_unused)
 }
 
 static inline
-size_t zstd_compress_stream_to_records(struct zstd_data *data __maybe_unused,
+ssize_t zstd_compress_stream_to_records(struct zstd_data *data __maybe_unused,
                                       void *dst __maybe_unused, size_t dst_size __maybe_unused,
                                       void *src __maybe_unused, size_t src_size __maybe_unused,
                                       size_t max_record_size __maybe_unused,
index 0e090e8bc33491eea0f2007f136aa236cc103482..0581ee0fa5f270b4eb6fa4ae5776afb056804099 100644 (file)
@@ -672,7 +672,7 @@ struct perf_cpu_map *cpu_map__online(void) /* thread unsafe */
        static struct perf_cpu_map *online;
 
        if (!online)
-               online = perf_cpu_map__new(NULL); /* from /sys/devices/system/cpu/online */
+               online = perf_cpu_map__new_online_cpus(); /* from /sys/devices/system/cpu/online */
 
        return online;
 }
index 81cfc85f46682ce0c0a2cc5a88556dbb88beb23b..8bbeb2dc76fda994b7f83abd227aceaed6e78c55 100644 (file)
@@ -267,7 +267,7 @@ struct cpu_topology *cpu_topology__new(void)
        ncpus = cpu__max_present_cpu().cpu;
 
        /* build online CPU map */
-       map = perf_cpu_map__new(NULL);
+       map = perf_cpu_map__new_online_cpus();
        if (map == NULL) {
                pr_debug("failed to get system cpumap\n");
                return NULL;
index a9873d14c6329925968770cf571724bf25a84df8..d65d7485886cd512fca26134f6f34921b13753a1 100644 (file)
@@ -3346,12 +3346,27 @@ int cs_etm__process_auxtrace_info_full(union perf_event *event,
        etm->metadata = metadata;
        etm->auxtrace_type = auxtrace_info->type;
 
-       /* Use virtual timestamps if all ETMs report ts_source = 1 */
-       etm->has_virtual_ts = cs_etm__has_virtual_ts(metadata, num_cpu);
+       if (etm->synth_opts.use_timestamp)
+               /*
+                * Prior to Armv8.4, Arm CPUs don't support FEAT_TRF feature,
+                * therefore the decoder cannot know if the timestamp trace is
+                * same with the kernel time.
+                *
+                * If a user has knowledge for the working platform and can
+                * specify itrace option 'T' to tell decoder to forcely use the
+                * traced timestamp as the kernel time.
+                */
+               etm->has_virtual_ts = true;
+       else
+               /* Use virtual timestamps if all ETMs report ts_source = 1 */
+               etm->has_virtual_ts = cs_etm__has_virtual_ts(metadata, num_cpu);
 
        if (!etm->has_virtual_ts)
                ui__warning("Virtual timestamps are not enabled, or not supported by the traced system.\n"
-                           "The time field of the samples will not be set accurately.\n\n");
+                           "The time field of the samples will not be set accurately.\n"
+                           "For Arm CPUs prior to Armv8.4 or without support FEAT_TRF,\n"
+                           "you can specify the itrace option 'T' for timestamp decoding\n"
+                           "if the Coresight timestamp on the platform is same with the kernel time.\n\n");
 
        etm->auxtrace.process_event = cs_etm__process_event;
        etm->auxtrace.process_auxtrace_event = cs_etm__process_auxtrace_event;
index b9fb71ab7a7303a301465ec2cfa82e6efbc02538..106429155c2e9d131f65bd425e375f76e4e8de7a 100644 (file)
@@ -253,8 +253,8 @@ static struct call_path *call_path_from_sample(struct db_export *dbe,
                 */
                addr_location__init(&al);
                al.sym = node->ms.sym;
-               al.map = node->ms.map;
-               al.maps = thread__maps(thread);
+               al.map = map__get(node->ms.map);
+               al.maps = maps__get(thread__maps(thread));
                al.addr = node->ip;
 
                if (al.map && !al.sym)
index 88378c4c5dd9e6d8739c3c49e836782464b5a0bf..e282b4ceb4d25fd560a972acb50689010bd01a85 100644 (file)
@@ -38,12 +38,21 @@ bool dump_trace = false, quiet = false;
 int debug_ordered_events;
 static int redirect_to_stderr;
 int debug_data_convert;
-static FILE *debug_file;
+static FILE *_debug_file;
 bool debug_display_time;
 
+FILE *debug_file(void)
+{
+       if (!_debug_file) {
+               pr_warning_once("debug_file not set");
+               debug_set_file(stderr);
+       }
+       return _debug_file;
+}
+
 void debug_set_file(FILE *file)
 {
-       debug_file = file;
+       _debug_file = file;
 }
 
 void debug_set_display_time(bool set)
@@ -78,8 +87,8 @@ int veprintf(int level, int var, const char *fmt, va_list args)
                if (use_browser >= 1 && !redirect_to_stderr) {
                        ui_helpline__vshow(fmt, args);
                } else {
-                       ret = fprintf_time(debug_file);
-                       ret += vfprintf(debug_file, fmt, args);
+                       ret = fprintf_time(debug_file());
+                       ret += vfprintf(debug_file(), fmt, args);
                }
        }
 
@@ -107,9 +116,8 @@ static int veprintf_time(u64 t, const char *fmt, va_list args)
        nsecs -= secs  * NSEC_PER_SEC;
        usecs  = nsecs / NSEC_PER_USEC;
 
-       ret = fprintf(stderr, "[%13" PRIu64 ".%06" PRIu64 "] ",
-                     secs, usecs);
-       ret += vfprintf(stderr, fmt, args);
+       ret = fprintf(debug_file(), "[%13" PRIu64 ".%06" PRIu64 "] ", secs, usecs);
+       ret += vfprintf(debug_file(), fmt, args);
        return ret;
 }
 
index f99468a7f68170017f0fa9adc1862704bdbf3a71..de8870980d44abc3f4a52add52affbdaefe11448 100644 (file)
@@ -77,6 +77,7 @@ int eprintf_time(int level, int var, u64 t, const char *fmt, ...) __printf(4, 5)
 int veprintf(int level, int var, const char *fmt, va_list args);
 
 int perf_debug_option(const char *str);
+FILE *debug_file(void);
 void debug_set_file(FILE *file);
 void debug_set_display_time(bool set);
 void perf_debug_setup(void);
diff --git a/tools/perf/util/debuginfo.c b/tools/perf/util/debuginfo.c
new file mode 100644 (file)
index 0000000..19acf47
--- /dev/null
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * DWARF debug information handling code.  Copied from probe-finder.c.
+ *
+ * Written by Masami Hiramatsu <mhiramat@redhat.com>
+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <linux/zalloc.h>
+
+#include "build-id.h"
+#include "dso.h"
+#include "debug.h"
+#include "debuginfo.h"
+#include "symbol.h"
+
+#ifdef HAVE_DEBUGINFOD_SUPPORT
+#include <elfutils/debuginfod.h>
+#endif
+
+/* Dwarf FL wrappers */
+static char *debuginfo_path;   /* Currently dummy */
+
+static const Dwfl_Callbacks offline_callbacks = {
+       .find_debuginfo = dwfl_standard_find_debuginfo,
+       .debuginfo_path = &debuginfo_path,
+
+       .section_address = dwfl_offline_section_address,
+
+       /* We use this table for core files too.  */
+       .find_elf = dwfl_build_id_find_elf,
+};
+
+/* Get a Dwarf from offline image */
+static int debuginfo__init_offline_dwarf(struct debuginfo *dbg,
+                                        const char *path)
+{
+       GElf_Addr dummy;
+       int fd;
+
+       fd = open(path, O_RDONLY);
+       if (fd < 0)
+               return fd;
+
+       dbg->dwfl = dwfl_begin(&offline_callbacks);
+       if (!dbg->dwfl)
+               goto error;
+
+       dwfl_report_begin(dbg->dwfl);
+       dbg->mod = dwfl_report_offline(dbg->dwfl, "", "", fd);
+       if (!dbg->mod)
+               goto error;
+
+       dbg->dbg = dwfl_module_getdwarf(dbg->mod, &dbg->bias);
+       if (!dbg->dbg)
+               goto error;
+
+       dwfl_module_build_id(dbg->mod, &dbg->build_id, &dummy);
+
+       dwfl_report_end(dbg->dwfl, NULL, NULL);
+
+       return 0;
+error:
+       if (dbg->dwfl)
+               dwfl_end(dbg->dwfl);
+       else
+               close(fd);
+       memset(dbg, 0, sizeof(*dbg));
+
+       return -ENOENT;
+}
+
+static struct debuginfo *__debuginfo__new(const char *path)
+{
+       struct debuginfo *dbg = zalloc(sizeof(*dbg));
+       if (!dbg)
+               return NULL;
+
+       if (debuginfo__init_offline_dwarf(dbg, path) < 0)
+               zfree(&dbg);
+       if (dbg)
+               pr_debug("Open Debuginfo file: %s\n", path);
+       return dbg;
+}
+
+enum dso_binary_type distro_dwarf_types[] = {
+       DSO_BINARY_TYPE__FEDORA_DEBUGINFO,
+       DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
+       DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
+       DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
+       DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO,
+       DSO_BINARY_TYPE__NOT_FOUND,
+};
+
+struct debuginfo *debuginfo__new(const char *path)
+{
+       enum dso_binary_type *type;
+       char buf[PATH_MAX], nil = '\0';
+       struct dso *dso;
+       struct debuginfo *dinfo = NULL;
+       struct build_id bid;
+
+       /* Try to open distro debuginfo files */
+       dso = dso__new(path);
+       if (!dso)
+               goto out;
+
+       /* Set the build id for DSO_BINARY_TYPE__BUILDID_DEBUGINFO */
+       if (is_regular_file(path) && filename__read_build_id(path, &bid) > 0)
+               dso__set_build_id(dso, &bid);
+
+       for (type = distro_dwarf_types;
+            !dinfo && *type != DSO_BINARY_TYPE__NOT_FOUND;
+            type++) {
+               if (dso__read_binary_type_filename(dso, *type, &nil,
+                                                  buf, PATH_MAX) < 0)
+                       continue;
+               dinfo = __debuginfo__new(buf);
+       }
+       dso__put(dso);
+
+out:
+       /* if failed to open all distro debuginfo, open given binary */
+       return dinfo ? : __debuginfo__new(path);
+}
+
+void debuginfo__delete(struct debuginfo *dbg)
+{
+       if (dbg) {
+               if (dbg->dwfl)
+                       dwfl_end(dbg->dwfl);
+               free(dbg);
+       }
+}
+
+/* For the kernel module, we need a special code to get a DIE */
+int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs,
+                               bool adjust_offset)
+{
+       int n, i;
+       Elf32_Word shndx;
+       Elf_Scn *scn;
+       Elf *elf;
+       GElf_Shdr mem, *shdr;
+       const char *p;
+
+       elf = dwfl_module_getelf(dbg->mod, &dbg->bias);
+       if (!elf)
+               return -EINVAL;
+
+       /* Get the number of relocations */
+       n = dwfl_module_relocations(dbg->mod);
+       if (n < 0)
+               return -ENOENT;
+       /* Search the relocation related .text section */
+       for (i = 0; i < n; i++) {
+               p = dwfl_module_relocation_info(dbg->mod, i, &shndx);
+               if (strcmp(p, ".text") == 0) {
+                       /* OK, get the section header */
+                       scn = elf_getscn(elf, shndx);
+                       if (!scn)
+                               return -ENOENT;
+                       shdr = gelf_getshdr(scn, &mem);
+                       if (!shdr)
+                               return -ENOENT;
+                       *offs = shdr->sh_addr;
+                       if (adjust_offset)
+                               *offs -= shdr->sh_offset;
+               }
+       }
+       return 0;
+}
+
+#ifdef HAVE_DEBUGINFOD_SUPPORT
+int get_source_from_debuginfod(const char *raw_path,
+                              const char *sbuild_id, char **new_path)
+{
+       debuginfod_client *c = debuginfod_begin();
+       const char *p = raw_path;
+       int fd;
+
+       if (!c)
+               return -ENOMEM;
+
+       fd = debuginfod_find_source(c, (const unsigned char *)sbuild_id,
+                               0, p, new_path);
+       pr_debug("Search %s from debuginfod -> %d\n", p, fd);
+       if (fd >= 0)
+               close(fd);
+       debuginfod_end(c);
+       if (fd < 0) {
+               pr_debug("Failed to find %s in debuginfod (%s)\n",
+                       raw_path, sbuild_id);
+               return -ENOENT;
+       }
+       pr_debug("Got a source %s\n", *new_path);
+
+       return 0;
+}
+#endif /* HAVE_DEBUGINFOD_SUPPORT */
diff --git a/tools/perf/util/debuginfo.h b/tools/perf/util/debuginfo.h
new file mode 100644 (file)
index 0000000..4d65b8c
--- /dev/null
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _PERF_DEBUGINFO_H
+#define _PERF_DEBUGINFO_H
+
+#include <errno.h>
+#include <linux/compiler.h>
+
+#ifdef HAVE_DWARF_SUPPORT
+
+#include "dwarf-aux.h"
+
+/* debug information structure */
+struct debuginfo {
+       Dwarf           *dbg;
+       Dwfl_Module     *mod;
+       Dwfl            *dwfl;
+       Dwarf_Addr      bias;
+       const unsigned char     *build_id;
+};
+
+/* This also tries to open distro debuginfo */
+struct debuginfo *debuginfo__new(const char *path);
+void debuginfo__delete(struct debuginfo *dbg);
+
+int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs,
+                              bool adjust_offset);
+
+#else /* HAVE_DWARF_SUPPORT */
+
+/* dummy debug information structure */
+struct debuginfo {
+};
+
+static inline struct debuginfo *debuginfo__new(const char *path __maybe_unused)
+{
+       return NULL;
+}
+
+static inline void debuginfo__delete(struct debuginfo *dbg __maybe_unused)
+{
+}
+
+static inline int debuginfo__get_text_offset(struct debuginfo *dbg __maybe_unused,
+                                            Dwarf_Addr *offs __maybe_unused,
+                                            bool adjust_offset __maybe_unused)
+{
+       return -EINVAL;
+}
+
+#endif /* HAVE_DWARF_SUPPORT */
+
+#ifdef HAVE_DEBUGINFOD_SUPPORT
+int get_source_from_debuginfod(const char *raw_path, const char *sbuild_id,
+                              char **new_path);
+#else /* HAVE_DEBUGINFOD_SUPPORT */
+static inline int get_source_from_debuginfod(const char *raw_path __maybe_unused,
+                                            const char *sbuild_id __maybe_unused,
+                                            char **new_path __maybe_unused)
+{
+       return -ENOTSUP;
+}
+#endif /* HAVE_DEBUGINFOD_SUPPORT */
+
+#endif /* _PERF_DEBUGINFO_H */
index 1f629b6fb7cfe3420df429cdf04ba12dc76b8183..22fd5fa806ed8f589ca1209710288c0e44ac19a2 100644 (file)
@@ -31,6 +31,7 @@
 #include "debug.h"
 #include "string2.h"
 #include "vdso.h"
+#include "annotate-data.h"
 
 static const char * const debuglink_paths[] = {
        "%.0s%s",
@@ -1327,6 +1328,7 @@ struct dso *dso__new_id(const char *name, struct dso_id *id)
                dso->data.cache = RB_ROOT;
                dso->inlined_nodes = RB_ROOT_CACHED;
                dso->srclines = RB_ROOT_CACHED;
+               dso->data_types = RB_ROOT;
                dso->data.fd = -1;
                dso->data.status = DSO_DATA_STATUS_UNKNOWN;
                dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
@@ -1370,6 +1372,8 @@ void dso__delete(struct dso *dso)
        symbols__delete(&dso->symbols);
        dso->symbol_names_len = 0;
        zfree(&dso->symbol_names);
+       annotated_data_type__tree_delete(&dso->data_types);
+
        if (dso->short_name_allocated) {
                zfree((char **)&dso->short_name);
                dso->short_name_allocated = false;
index 3759de8c2267af674290b3bc5718a97c0e83997d..ce9f3849a773cc49c17b9835a675f895667843eb 100644 (file)
@@ -154,6 +154,8 @@ struct dso {
        size_t           symbol_names_len;
        struct rb_root_cached inlined_nodes;
        struct rb_root_cached srclines;
+       struct rb_root  data_types;
+
        struct {
                u64             addr;
                struct symbol   *symbol;
index 2941d88f2199c42b341cdb9fb741e0dba43cdcbe..7aa5fee0da1906a073ac9423305bf26deb569761 100644 (file)
@@ -1051,32 +1051,28 @@ Dwarf_Die *die_find_member(Dwarf_Die *st_die, const char *name,
 }
 
 /**
- * die_get_typename - Get the name of given variable DIE
- * @vr_die: a variable DIE
+ * die_get_typename_from_type - Get the name of given type DIE
+ * @type_die: a type DIE
  * @buf: a strbuf for result type name
  *
- * Get the name of @vr_die and stores it to @buf. Return 0 if succeeded.
+ * Get the name of @type_die and stores it to @buf. Return 0 if succeeded.
  * and Return -ENOENT if failed to find type name.
  * Note that the result will stores typedef name if possible, and stores
  * "*(function_type)" if the type is a function pointer.
  */
-int die_get_typename(Dwarf_Die *vr_die, struct strbuf *buf)
+int die_get_typename_from_type(Dwarf_Die *type_die, struct strbuf *buf)
 {
-       Dwarf_Die type;
        int tag, ret;
        const char *tmp = "";
 
-       if (__die_get_real_type(vr_die, &type) == NULL)
-               return -ENOENT;
-
-       tag = dwarf_tag(&type);
+       tag = dwarf_tag(type_die);
        if (tag == DW_TAG_array_type || tag == DW_TAG_pointer_type)
                tmp = "*";
        else if (tag == DW_TAG_subroutine_type) {
                /* Function pointer */
                return strbuf_add(buf, "(function_type)", 15);
        } else {
-               const char *name = dwarf_diename(&type);
+               const char *name = dwarf_diename(type_die);
 
                if (tag == DW_TAG_union_type)
                        tmp = "union ";
@@ -1089,8 +1085,35 @@ int die_get_typename(Dwarf_Die *vr_die, struct strbuf *buf)
                /* Write a base name */
                return strbuf_addf(buf, "%s%s", tmp, name ?: "");
        }
-       ret = die_get_typename(&type, buf);
-       return ret ? ret : strbuf_addstr(buf, tmp);
+       ret = die_get_typename(type_die, buf);
+       if (ret < 0) {
+               /* void pointer has no type attribute */
+               if (tag == DW_TAG_pointer_type && ret == -ENOENT)
+                       return strbuf_addf(buf, "void*");
+
+               return ret;
+       }
+       return strbuf_addstr(buf, tmp);
+}
+
+/**
+ * die_get_typename - Get the name of given variable DIE
+ * @vr_die: a variable DIE
+ * @buf: a strbuf for result type name
+ *
+ * Get the name of @vr_die and stores it to @buf. Return 0 if succeeded.
+ * and Return -ENOENT if failed to find type name.
+ * Note that the result will stores typedef name if possible, and stores
+ * "*(function_type)" if the type is a function pointer.
+ */
+int die_get_typename(Dwarf_Die *vr_die, struct strbuf *buf)
+{
+       Dwarf_Die type;
+
+       if (__die_get_real_type(vr_die, &type) == NULL)
+               return -ENOENT;
+
+       return die_get_typename_from_type(&type, buf);
 }
 
 /**
@@ -1238,12 +1261,151 @@ int die_get_var_range(Dwarf_Die *sp_die, Dwarf_Die *vr_die, struct strbuf *buf)
 out:
        return ret;
 }
-#else
-int die_get_var_range(Dwarf_Die *sp_die __maybe_unused,
-                     Dwarf_Die *vr_die __maybe_unused,
-                     struct strbuf *buf __maybe_unused)
+
+/* Interval parameters for __die_find_var_reg_cb() */
+struct find_var_data {
+       /* Target instruction address */
+       Dwarf_Addr pc;
+       /* Target memory address (for global data) */
+       Dwarf_Addr addr;
+       /* Target register */
+       unsigned reg;
+       /* Access offset, set for global data */
+       int offset;
+};
+
+/* Max number of registers DW_OP_regN supports */
+#define DWARF_OP_DIRECT_REGS  32
+
+/* Only checks direct child DIEs in the given scope. */
+static int __die_find_var_reg_cb(Dwarf_Die *die_mem, void *arg)
+{
+       struct find_var_data *data = arg;
+       int tag = dwarf_tag(die_mem);
+       ptrdiff_t off = 0;
+       Dwarf_Attribute attr;
+       Dwarf_Addr base, start, end;
+       Dwarf_Op *ops;
+       size_t nops;
+
+       if (tag != DW_TAG_variable && tag != DW_TAG_formal_parameter)
+               return DIE_FIND_CB_SIBLING;
+
+       if (dwarf_attr(die_mem, DW_AT_location, &attr) == NULL)
+               return DIE_FIND_CB_SIBLING;
+
+       while ((off = dwarf_getlocations(&attr, off, &base, &start, &end, &ops, &nops)) > 0) {
+               /* Assuming the location list is sorted by address */
+               if (end < data->pc)
+                       continue;
+               if (start > data->pc)
+                       break;
+
+               /* Only match with a simple case */
+               if (data->reg < DWARF_OP_DIRECT_REGS) {
+                       if (ops->atom == (DW_OP_reg0 + data->reg) && nops == 1)
+                               return DIE_FIND_CB_END;
+               } else {
+                       if (ops->atom == DW_OP_regx && ops->number == data->reg &&
+                           nops == 1)
+                               return DIE_FIND_CB_END;
+               }
+       }
+       return DIE_FIND_CB_SIBLING;
+}
+
+/**
+ * die_find_variable_by_reg - Find a variable saved in a register
+ * @sc_die: a scope DIE
+ * @pc: the program address to find
+ * @reg: the register number to find
+ * @die_mem: a buffer to save the resulting DIE
+ *
+ * Find the variable DIE accessed by the given register.
+ */
+Dwarf_Die *die_find_variable_by_reg(Dwarf_Die *sc_die, Dwarf_Addr pc, int reg,
+                                   Dwarf_Die *die_mem)
+{
+       struct find_var_data data = {
+               .pc = pc,
+               .reg = reg,
+       };
+       return die_find_child(sc_die, __die_find_var_reg_cb, &data, die_mem);
+}
+
+/* Only checks direct child DIEs in the given scope */
+static int __die_find_var_addr_cb(Dwarf_Die *die_mem, void *arg)
+{
+       struct find_var_data *data = arg;
+       int tag = dwarf_tag(die_mem);
+       ptrdiff_t off = 0;
+       Dwarf_Attribute attr;
+       Dwarf_Addr base, start, end;
+       Dwarf_Word size;
+       Dwarf_Die type_die;
+       Dwarf_Op *ops;
+       size_t nops;
+
+       if (tag != DW_TAG_variable)
+               return DIE_FIND_CB_SIBLING;
+
+       if (dwarf_attr(die_mem, DW_AT_location, &attr) == NULL)
+               return DIE_FIND_CB_SIBLING;
+
+       while ((off = dwarf_getlocations(&attr, off, &base, &start, &end, &ops, &nops)) > 0) {
+               if (ops->atom != DW_OP_addr)
+                       continue;
+
+               if (data->addr < ops->number)
+                       continue;
+
+               if (data->addr == ops->number) {
+                       /* Update offset relative to the start of the variable */
+                       data->offset = 0;
+                       return DIE_FIND_CB_END;
+               }
+
+               if (die_get_real_type(die_mem, &type_die) == NULL)
+                       continue;
+
+               if (dwarf_aggregate_size(&type_die, &size) < 0)
+                       continue;
+
+               if (data->addr >= ops->number + size)
+                       continue;
+
+               /* Update offset relative to the start of the variable */
+               data->offset = data->addr - ops->number;
+               return DIE_FIND_CB_END;
+       }
+       return DIE_FIND_CB_SIBLING;
+}
+
+/**
+ * die_find_variable_by_addr - Find variable located at given address
+ * @sc_die: a scope DIE
+ * @pc: the program address to find
+ * @addr: the data address to find
+ * @die_mem: a buffer to save the resulting DIE
+ * @offset: the offset in the resulting type
+ *
+ * Find the variable DIE located at the given address (in PC-relative mode).
+ * This is usually for global variables.
+ */
+Dwarf_Die *die_find_variable_by_addr(Dwarf_Die *sc_die, Dwarf_Addr pc,
+                                    Dwarf_Addr addr, Dwarf_Die *die_mem,
+                                    int *offset)
 {
-       return -ENOTSUP;
+       struct find_var_data data = {
+               .pc = pc,
+               .addr = addr,
+       };
+       Dwarf_Die *result;
+
+       result = die_find_child(sc_die, __die_find_var_addr_cb, &data, die_mem);
+       if (result)
+               *offset = data.offset;
+       return result;
 }
 #endif
 
@@ -1425,3 +1587,56 @@ void die_skip_prologue(Dwarf_Die *sp_die, Dwarf_Die *cu_die,
 
        *entrypc = postprologue_addr;
 }
+
+/* Internal parameters for __die_find_scope_cb() */
+struct find_scope_data {
+       /* Target instruction address */
+       Dwarf_Addr pc;
+       /* Number of scopes found [output] */
+       int nr;
+       /* Array of scopes found, 0 for the outermost one. [output] */
+       Dwarf_Die *scopes;
+};
+
+static int __die_find_scope_cb(Dwarf_Die *die_mem, void *arg)
+{
+       struct find_scope_data *data = arg;
+
+       if (dwarf_haspc(die_mem, data->pc)) {
+               Dwarf_Die *tmp;
+
+               tmp = realloc(data->scopes, (data->nr + 1) * sizeof(*tmp));
+               if (tmp == NULL)
+                       return DIE_FIND_CB_END;
+
+               memcpy(tmp + data->nr, die_mem, sizeof(*die_mem));
+               data->scopes = tmp;
+               data->nr++;
+               return DIE_FIND_CB_CHILD;
+       }
+       return DIE_FIND_CB_SIBLING;
+}
+
+/**
+ * die_get_scopes - Return a list of scopes including the address
+ * @cu_die: a compile unit DIE
+ * @pc: the address to find
+ * @scopes: the array of DIEs for scopes (result)
+ *
+ * This function does the same as the dwarf_getscopes() but doesn't follow
+ * the origins of inlined functions.  It returns the number of scopes saved
+ * in the @scopes argument.  The outer scope will be saved first (index 0) and
+ * the last one is the innermost scope at the @pc.
+ */
+int die_get_scopes(Dwarf_Die *cu_die, Dwarf_Addr pc, Dwarf_Die **scopes)
+{
+       struct find_scope_data data = {
+               .pc = pc,
+       };
+       Dwarf_Die die_mem;
+
+       die_find_child(cu_die, __die_find_scope_cb, &data, &die_mem);
+
+       *scopes = data.scopes;
+       return data.nr;
+}
index 7ec8bc1083bb33f81f6d128995552be35e6bb762..4e64caac6df83ea5ba292225894206b450d63222 100644 (file)
@@ -116,12 +116,14 @@ Dwarf_Die *die_find_variable_at(Dwarf_Die *sp_die, const char *name,
 Dwarf_Die *die_find_member(Dwarf_Die *st_die, const char *name,
                           Dwarf_Die *die_mem);
 
+/* Get the name of given type DIE */
+int die_get_typename_from_type(Dwarf_Die *type_die, struct strbuf *buf);
+
 /* Get the name of given variable DIE */
 int die_get_typename(Dwarf_Die *vr_die, struct strbuf *buf);
 
 /* Get the name and type of given variable DIE, stored as "type\tname" */
 int die_get_varname(Dwarf_Die *vr_die, struct strbuf *buf);
-int die_get_var_range(Dwarf_Die *sp_die, Dwarf_Die *vr_die, struct strbuf *buf);
 
 /* Check if target program is compiled with optimization */
 bool die_is_optimized_target(Dwarf_Die *cu_die);
@@ -130,4 +132,49 @@ bool die_is_optimized_target(Dwarf_Die *cu_die);
 void die_skip_prologue(Dwarf_Die *sp_die, Dwarf_Die *cu_die,
                       Dwarf_Addr *entrypc);
 
-#endif
+/* Get the list of including scopes */
+int die_get_scopes(Dwarf_Die *cu_die, Dwarf_Addr pc, Dwarf_Die **scopes);
+
+#ifdef HAVE_DWARF_GETLOCATIONS_SUPPORT
+
+/* Get byte offset range of given variable DIE */
+int die_get_var_range(Dwarf_Die *sp_die, Dwarf_Die *vr_die, struct strbuf *buf);
+
+/* Find a variable saved in the 'reg' at given address */
+Dwarf_Die *die_find_variable_by_reg(Dwarf_Die *sc_die, Dwarf_Addr pc, int reg,
+                                   Dwarf_Die *die_mem);
+
+/* Find a (global) variable located in the 'addr' */
+Dwarf_Die *die_find_variable_by_addr(Dwarf_Die *sc_die, Dwarf_Addr pc,
+                                    Dwarf_Addr addr, Dwarf_Die *die_mem,
+                                    int *offset);
+
+#else /*  HAVE_DWARF_GETLOCATIONS_SUPPORT */
+
+static inline int die_get_var_range(Dwarf_Die *sp_die __maybe_unused,
+                                   Dwarf_Die *vr_die __maybe_unused,
+                                   struct strbuf *buf __maybe_unused)
+{
+       return -ENOTSUP;
+}
+
+static inline Dwarf_Die *die_find_variable_by_reg(Dwarf_Die *sc_die __maybe_unused,
+                                                 Dwarf_Addr pc __maybe_unused,
+                                                 int reg __maybe_unused,
+                                                 Dwarf_Die *die_mem __maybe_unused)
+{
+       return NULL;
+}
+
+static inline Dwarf_Die *die_find_variable_by_addr(Dwarf_Die *sc_die __maybe_unused,
+                                                  Dwarf_Addr pc __maybe_unused,
+                                                  Dwarf_Addr addr __maybe_unused,
+                                                  Dwarf_Die *die_mem __maybe_unused,
+                                                  int *offset __maybe_unused)
+{
+       return NULL;
+}
+
+#endif /* HAVE_DWARF_GETLOCATIONS_SUPPORT */
+
+#endif /* _DWARF_AUX_H */
index 69cfaa5953bf475cf02db129d1dc6ad6aa7332e3..5b7f86c0063f2b279fa5af9ee2f7c6b1b2220996 100644 (file)
@@ -5,9 +5,12 @@
  * Written by: Masami Hiramatsu <mhiramat@kernel.org>
  */
 
+#include <stdlib.h>
+#include <string.h>
 #include <debug.h>
 #include <dwarf-regs.h>
 #include <elf.h>
+#include <errno.h>
 #include <linux/kernel.h>
 
 #ifndef EM_AARCH64
@@ -68,3 +71,34 @@ const char *get_dwarf_regstr(unsigned int n, unsigned int machine)
        }
        return NULL;
 }
+
+__weak int get_arch_regnum(const char *name __maybe_unused)
+{
+       return -ENOTSUP;
+}
+
+/* Return DWARF register number from architecture register name */
+int get_dwarf_regnum(const char *name, unsigned int machine)
+{
+       char *regname = strdup(name);
+       int reg = -1;
+       char *p;
+
+       if (regname == NULL)
+               return -EINVAL;
+
+       /* For convenience, remove trailing characters */
+       p = strpbrk(regname, " ,)");
+       if (p)
+               *p = '\0';
+
+       switch (machine) {
+       case EM_NONE:   /* Generic arch - use host arch */
+               reg = get_arch_regnum(regname);
+               break;
+       default:
+               pr_err("ELF MACHINE %x is not supported.\n", machine);
+       }
+       free(regname);
+       return reg;
+}
index 44140b7f596a3f2009fc2f901317a602f14fe6c4..a459374d0a1a1dc89721e210616cfe201f01789d 100644 (file)
@@ -3,6 +3,7 @@
 #include "debug.h"
 #include "env.h"
 #include "util/header.h"
+#include "linux/compiler.h"
 #include <linux/ctype.h>
 #include <linux/zalloc.h>
 #include "cgroup.h"
@@ -12,6 +13,7 @@
 #include <string.h>
 #include "pmus.h"
 #include "strbuf.h"
+#include "trace/beauty/beauty.h"
 
 struct perf_env perf_env;
 
@@ -22,13 +24,19 @@ struct perf_env perf_env;
 
 void perf_env__insert_bpf_prog_info(struct perf_env *env,
                                    struct bpf_prog_info_node *info_node)
+{
+       down_write(&env->bpf_progs.lock);
+       __perf_env__insert_bpf_prog_info(env, info_node);
+       up_write(&env->bpf_progs.lock);
+}
+
+void __perf_env__insert_bpf_prog_info(struct perf_env *env, struct bpf_prog_info_node *info_node)
 {
        __u32 prog_id = info_node->info_linear->info.id;
        struct bpf_prog_info_node *node;
        struct rb_node *parent = NULL;
        struct rb_node **p;
 
-       down_write(&env->bpf_progs.lock);
        p = &env->bpf_progs.infos.rb_node;
 
        while (*p != NULL) {
@@ -40,15 +48,13 @@ void perf_env__insert_bpf_prog_info(struct perf_env *env,
                        p = &(*p)->rb_right;
                } else {
                        pr_debug("duplicated bpf prog info %u\n", prog_id);
-                       goto out;
+                       return;
                }
        }
 
        rb_link_node(&info_node->rb_node, parent, p);
        rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
        env->bpf_progs.infos_cnt++;
-out:
-       up_write(&env->bpf_progs.lock);
 }
 
 struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
@@ -77,14 +83,22 @@ out:
 }
 
 bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
+{
+       bool ret;
+
+       down_write(&env->bpf_progs.lock);
+       ret = __perf_env__insert_btf(env, btf_node);
+       up_write(&env->bpf_progs.lock);
+       return ret;
+}
+
+bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
 {
        struct rb_node *parent = NULL;
        __u32 btf_id = btf_node->id;
        struct btf_node *node;
        struct rb_node **p;
-       bool ret = true;
 
-       down_write(&env->bpf_progs.lock);
        p = &env->bpf_progs.btfs.rb_node;
 
        while (*p != NULL) {
@@ -96,25 +110,31 @@ bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
                        p = &(*p)->rb_right;
                } else {
                        pr_debug("duplicated btf %u\n", btf_id);
-                       ret = false;
-                       goto out;
+                       return false;
                }
        }
 
        rb_link_node(&btf_node->rb_node, parent, p);
        rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
        env->bpf_progs.btfs_cnt++;
-out:
-       up_write(&env->bpf_progs.lock);
-       return ret;
+       return true;
 }
 
 struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
+{
+       struct btf_node *res;
+
+       down_read(&env->bpf_progs.lock);
+       res = __perf_env__find_btf(env, btf_id);
+       up_read(&env->bpf_progs.lock);
+       return res;
+}
+
+struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id)
 {
        struct btf_node *node = NULL;
        struct rb_node *n;
 
-       down_read(&env->bpf_progs.lock);
        n = env->bpf_progs.btfs.rb_node;
 
        while (n) {
@@ -124,13 +144,9 @@ struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
                else if (btf_id > node->id)
                        n = n->rb_right;
                else
-                       goto out;
+                       return node;
        }
-       node = NULL;
-
-out:
-       up_read(&env->bpf_progs.lock);
-       return node;
+       return NULL;
 }
 
 /* purge data in bpf_progs.infos tree */
@@ -453,6 +469,18 @@ const char *perf_env__arch(struct perf_env *env)
        return normalize_arch(arch_name);
 }
 
+const char *perf_env__arch_strerrno(struct perf_env *env __maybe_unused, int err __maybe_unused)
+{
+#if defined(HAVE_SYSCALL_TABLE_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
+       if (env->arch_strerrno == NULL)
+               env->arch_strerrno = arch_syscalls__strerrno_function(perf_env__arch(env));
+
+       return env->arch_strerrno ? env->arch_strerrno(err) : "no arch specific strerrno function";
+#else
+       return "!(HAVE_SYSCALL_TABLE_SUPPORT && HAVE_LIBTRACEEVENT)";
+#endif
+}
+
 const char *perf_env__cpuid(struct perf_env *env)
 {
        int status;
@@ -531,6 +559,24 @@ int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu)
        return cpu.cpu >= 0 && cpu.cpu < env->nr_numa_map ? env->numa_map[cpu.cpu] : -1;
 }
 
+bool perf_env__has_pmu_mapping(struct perf_env *env, const char *pmu_name)
+{
+       char *pmu_mapping = env->pmu_mappings, *colon;
+
+       for (int i = 0; i < env->nr_pmu_mappings; ++i) {
+               if (strtoul(pmu_mapping, &colon, 0) == ULONG_MAX || *colon != ':')
+                       goto out_error;
+
+               pmu_mapping = colon + 1;
+               if (strcmp(pmu_mapping, pmu_name) == 0)
+                       return true;
+
+               pmu_mapping += strlen(pmu_mapping) + 1;
+       }
+out_error:
+       return false;
+}
+
 char *perf_env__find_pmu_cap(struct perf_env *env, const char *pmu_name,
                             const char *cap)
 {
index 4566c51f2fd956ca12ee8a17ef66a3439b0571f4..7c527e65c1864b524c8dfc1d844fac1f6e3ee1a7 100644 (file)
@@ -46,10 +46,17 @@ struct hybrid_node {
 struct pmu_caps {
        int             nr_caps;
        unsigned int    max_branches;
+       unsigned int    br_cntr_nr;
+       unsigned int    br_cntr_width;
+
        char            **caps;
        char            *pmu_name;
 };
 
+typedef const char *(arch_syscalls__strerrno_t)(int err);
+
+arch_syscalls__strerrno_t *arch_syscalls__strerrno_function(const char *arch);
+
 struct perf_env {
        char                    *hostname;
        char                    *os_release;
@@ -62,6 +69,8 @@ struct perf_env {
        unsigned long long      total_mem;
        unsigned int            msr_pmu_type;
        unsigned int            max_branches;
+       unsigned int            br_cntr_nr;
+       unsigned int            br_cntr_width;
        int                     kernel_is_64_bit;
 
        int                     nr_cmdline;
@@ -130,6 +139,7 @@ struct perf_env {
                 */
                bool    enabled;
        } clock;
+       arch_syscalls__strerrno_t *arch_strerrno;
 };
 
 enum perf_compress_type {
@@ -159,19 +169,26 @@ int perf_env__read_cpu_topology_map(struct perf_env *env);
 void cpu_cache_level__free(struct cpu_cache_level *cache);
 
 const char *perf_env__arch(struct perf_env *env);
+const char *perf_env__arch_strerrno(struct perf_env *env, int err);
 const char *perf_env__cpuid(struct perf_env *env);
 const char *perf_env__raw_arch(struct perf_env *env);
 int perf_env__nr_cpus_avail(struct perf_env *env);
 
 void perf_env__init(struct perf_env *env);
+void __perf_env__insert_bpf_prog_info(struct perf_env *env,
+                                     struct bpf_prog_info_node *info_node);
 void perf_env__insert_bpf_prog_info(struct perf_env *env,
                                    struct bpf_prog_info_node *info_node);
 struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
                                                        __u32 prog_id);
 bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
+bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
 struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
+struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id);
 
 int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu);
 char *perf_env__find_pmu_cap(struct perf_env *env, const char *pmu_name,
                             const char *cap);
+
+bool perf_env__has_pmu_mapping(struct perf_env *env, const char *pmu_name);
 #endif /* __PERF_ENV_H */
index 923c0fb1512226a60c7a01730c405ca15e6982c9..68f45e9e63b6e4f8fcdf6476dd0b2f9c3789dd3a 100644 (file)
@@ -617,13 +617,13 @@ struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
        if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
                al->level = 'k';
                maps = machine__kernel_maps(machine);
-               load_map = true;
+               load_map = !symbol_conf.lazy_load_kernel_maps;
        } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
                al->level = '.';
        } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
                al->level = 'g';
                maps = machine__kernel_maps(machine);
-               load_map = true;
+               load_map = !symbol_conf.lazy_load_kernel_maps;
        } else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) {
                al->level = 'u';
        } else {
index e36da58522efb3d9639bd6f12c00ff7ecb6e9a8b..55a300a0977b416e60e90819ad1a9feefcdcbc84 100644 (file)
@@ -103,7 +103,14 @@ struct evlist *evlist__new_default(void)
        err = parse_event(evlist, can_profile_kernel ? "cycles:P" : "cycles:Pu");
        if (err) {
                evlist__delete(evlist);
-               evlist = NULL;
+               return NULL;
+       }
+
+       if (evlist->core.nr_entries > 1) {
+               struct evsel *evsel;
+
+               evlist__for_each_entry(evlist, evsel)
+                       evsel__set_sample_id(evsel, /*can_sample_identifier=*/false);
        }
 
        return evlist;
@@ -1056,7 +1063,7 @@ int evlist__create_maps(struct evlist *evlist, struct target *target)
                return -1;
 
        if (target__uses_dummy_map(target))
-               cpus = perf_cpu_map__dummy_new();
+               cpus = perf_cpu_map__new_any_cpu();
        else
                cpus = perf_cpu_map__new(target->cpu_list);
 
@@ -1352,7 +1359,7 @@ static int evlist__create_syswide_maps(struct evlist *evlist)
         * error, and we may not want to do that fallback to a
         * default cpu identity map :-\
         */
-       cpus = perf_cpu_map__new(NULL);
+       cpus = perf_cpu_map__new_online_cpus();
        if (!cpus)
                goto out;
 
@@ -2518,3 +2525,33 @@ void evlist__warn_user_requested_cpus(struct evlist *evlist, const char *cpu_lis
        }
        perf_cpu_map__put(user_requested_cpus);
 }
+
+void evlist__uniquify_name(struct evlist *evlist)
+{
+       char *new_name, empty_attributes[2] = ":", *attributes;
+       struct evsel *pos;
+
+       if (perf_pmus__num_core_pmus() == 1)
+               return;
+
+       evlist__for_each_entry(evlist, pos) {
+               if (!evsel__is_hybrid(pos))
+                       continue;
+
+               if (strchr(pos->name, '/'))
+                       continue;
+
+               attributes = strchr(pos->name, ':');
+               if (attributes)
+                       *attributes = '\0';
+               else
+                       attributes = empty_attributes;
+
+               if (asprintf(&new_name, "%s/%s/%s", pos->pmu_name, pos->name, attributes + 1)) {
+                       free(pos->name);
+                       pos->name = new_name;
+               } else {
+                       *attributes = ':';
+               }
+       }
+}
index 98e7ddb2bd3058106f77271f61f954242bc3984f..cb91dc9117a2726b34b9dce5265186c89eee96cd 100644 (file)
@@ -442,5 +442,6 @@ struct evsel *evlist__find_evsel(struct evlist *evlist, int idx);
 int evlist__scnprintf_evsels(struct evlist *evlist, size_t size, char *bf);
 void evlist__check_mem_load_aux(struct evlist *evlist);
 void evlist__warn_user_requested_cpus(struct evlist *evlist, const char *cpu_list);
+void evlist__uniquify_name(struct evlist *evlist);
 
 #endif /* __PERF_EVLIST_H */
index 72a5dfc38d3806c50ed3c0b933d9a94a56215945..6d7c9c58a9bcb8b7ed70e38286026cac16543163 100644 (file)
@@ -1801,7 +1801,7 @@ static int __evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus,
 
        if (cpus == NULL) {
                if (empty_cpu_map == NULL) {
-                       empty_cpu_map = perf_cpu_map__dummy_new();
+                       empty_cpu_map = perf_cpu_map__new_any_cpu();
                        if (empty_cpu_map == NULL)
                                return -ENOMEM;
                }
@@ -1832,6 +1832,8 @@ static int __evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus,
 
 static void evsel__disable_missing_features(struct evsel *evsel)
 {
+       if (perf_missing_features.branch_counters)
+               evsel->core.attr.branch_sample_type &= ~PERF_SAMPLE_BRANCH_COUNTERS;
        if (perf_missing_features.read_lost)
                evsel->core.attr.read_format &= ~PERF_FORMAT_LOST;
        if (perf_missing_features.weight_struct) {
@@ -1885,7 +1887,12 @@ bool evsel__detect_missing_features(struct evsel *evsel)
         * Must probe features in the order they were added to the
         * perf_event_attr interface.
         */
-       if (!perf_missing_features.read_lost &&
+       if (!perf_missing_features.branch_counters &&
+           (evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS)) {
+               perf_missing_features.branch_counters = true;
+               pr_debug2("switching off branch counters support\n");
+               return true;
+       } else if (!perf_missing_features.read_lost &&
            (evsel->core.attr.read_format & PERF_FORMAT_LOST)) {
                perf_missing_features.read_lost = true;
                pr_debug2("switching off PERF_FORMAT_LOST support\n");
@@ -2318,6 +2325,22 @@ u64 evsel__bitfield_swap_branch_flags(u64 value)
        return new_val;
 }
 
+static inline bool evsel__has_branch_counters(const struct evsel *evsel)
+{
+       struct evsel *cur, *leader = evsel__leader(evsel);
+
+       /* The branch counters feature only supports group */
+       if (!leader || !evsel->evlist)
+               return false;
+
+       evlist__for_each_entry(evsel->evlist, cur) {
+               if ((leader == evsel__leader(cur)) &&
+                   (cur->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS))
+                       return true;
+       }
+       return false;
+}
+
 int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
                        struct perf_sample *data)
 {
@@ -2551,6 +2574,16 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
 
                OVERFLOW_CHECK(array, sz, max_size);
                array = (void *)array + sz;
+
+               if (evsel__has_branch_counters(evsel)) {
+                       OVERFLOW_CHECK_u64(array);
+
+                       data->branch_stack_cntr = (u64 *)array;
+                       sz = data->branch_stack->nr * sizeof(u64);
+
+                       OVERFLOW_CHECK(array, sz, max_size);
+                       array = (void *)array + sz;
+               }
        }
 
        if (type & PERF_SAMPLE_REGS_USER) {
@@ -2820,7 +2853,8 @@ u64 evsel__intval_common(struct evsel *evsel, struct perf_sample *sample, const
 
 #endif
 
-bool evsel__fallback(struct evsel *evsel, int err, char *msg, size_t msgsize)
+bool evsel__fallback(struct evsel *evsel, struct target *target, int err,
+                    char *msg, size_t msgsize)
 {
        int paranoid;
 
@@ -2828,18 +2862,19 @@ bool evsel__fallback(struct evsel *evsel, int err, char *msg, size_t msgsize)
            evsel->core.attr.type   == PERF_TYPE_HARDWARE &&
            evsel->core.attr.config == PERF_COUNT_HW_CPU_CYCLES) {
                /*
-                * If it's cycles then fall back to hrtimer based
-                * cpu-clock-tick sw counter, which is always available even if
-                * no PMU support.
+                * If it's cycles then fall back to hrtimer based cpu-clock sw
+                * counter, which is always available even if no PMU support.
                 *
                 * PPC returns ENXIO until 2.6.37 (behavior changed with commit
                 * b0a873e).
                 */
-               scnprintf(msg, msgsize, "%s",
-"The cycles event is not supported, trying to fall back to cpu-clock-ticks");
-
                evsel->core.attr.type   = PERF_TYPE_SOFTWARE;
-               evsel->core.attr.config = PERF_COUNT_SW_CPU_CLOCK;
+               evsel->core.attr.config = target__has_cpu(target)
+                       ? PERF_COUNT_SW_CPU_CLOCK
+                       : PERF_COUNT_SW_TASK_CLOCK;
+               scnprintf(msg, msgsize,
+                       "The cycles event is not supported, trying to fall back to %s",
+                       target__has_cpu(target) ? "cpu-clock" : "task-clock");
 
                zfree(&evsel->name);
                return true;
index d791316a1792e5931ef5ebaf81215f21104636c8..efbb6e848287f3f6b4f9f0aca779b2a6590ec42f 100644 (file)
@@ -191,6 +191,7 @@ struct perf_missing_features {
        bool code_page_size;
        bool weight_struct;
        bool read_lost;
+       bool branch_counters;
 };
 
 extern struct perf_missing_features perf_missing_features;
@@ -459,7 +460,8 @@ static inline bool evsel__is_clock(const struct evsel *evsel)
               evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK);
 }
 
-bool evsel__fallback(struct evsel *evsel, int err, char *msg, size_t msgsize);
+bool evsel__fallback(struct evsel *evsel, struct target *target, int err,
+                    char *msg, size_t msgsize);
 int evsel__open_strerror(struct evsel *evsel, struct target *target,
                         int err, char *msg, size_t size);
 
index fefc72066c4e8ee1e85068180a752c12cfb15d23..ac17a3cb59dc0d08621015506b48188ea4a74d03 100644 (file)
@@ -293,9 +293,9 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym,
         */
        phdr = elf_newphdr(e, 1);
        phdr[0].p_type = PT_LOAD;
-       phdr[0].p_offset = 0;
-       phdr[0].p_vaddr = 0;
-       phdr[0].p_paddr = 0;
+       phdr[0].p_offset = GEN_ELF_TEXT_OFFSET;
+       phdr[0].p_vaddr = GEN_ELF_TEXT_OFFSET;
+       phdr[0].p_paddr = GEN_ELF_TEXT_OFFSET;
        phdr[0].p_filesz = csize;
        phdr[0].p_memsz = csize;
        phdr[0].p_flags = PF_X | PF_R;
index e86b9439ffee054a4088efc944b1b4b657f8d330..3fe28edc3d017a39127abdf64c8289cc995cda35 100644 (file)
@@ -1444,7 +1444,9 @@ static int build_mem_topology(struct memory_node **nodesp, u64 *cntp)
                        nodes = new_nodes;
                        size += 4;
                }
-               ret = memory_node__read(&nodes[cnt++], idx);
+               ret = memory_node__read(&nodes[cnt], idx);
+               if (!ret)
+                       cnt += 1;
        }
 out:
        closedir(dir);
@@ -1847,8 +1849,8 @@ static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp)
                node = rb_entry(next, struct bpf_prog_info_node, rb_node);
                next = rb_next(&node->rb_node);
 
-               bpf_event__print_bpf_prog_info(&node->info_linear->info,
-                                              env, fp);
+               __bpf_event__print_bpf_prog_info(&node->info_linear->info,
+                                                env, fp);
        }
 
        up_read(&env->bpf_progs.lock);
@@ -2145,6 +2147,14 @@ static void print_pmu_caps(struct feat_fd *ff, FILE *fp)
                __print_pmu_caps(fp, pmu_caps->nr_caps, pmu_caps->caps,
                                 pmu_caps->pmu_name);
        }
+
+       if (strcmp(perf_env__arch(&ff->ph->env), "x86") == 0 &&
+           perf_env__has_pmu_mapping(&ff->ph->env, "ibs_op")) {
+               char *max_precise = perf_env__find_pmu_cap(&ff->ph->env, "cpu", "max_precise");
+
+               if (max_precise != NULL && atoi(max_precise) == 0)
+                       fprintf(fp, "# AMD systems uses ibs_op// PMU for some precise events, e.g.: cycles:p, see the 'perf list' man page for further details.\n");
+       }
 }
 
 static void print_pmu_mappings(struct feat_fd *ff, FILE *fp)
@@ -3178,7 +3188,7 @@ static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
                /* after reading from file, translate offset to address */
                bpil_offs_to_addr(info_linear);
                info_node->info_linear = info_linear;
-               perf_env__insert_bpf_prog_info(env, info_node);
+               __perf_env__insert_bpf_prog_info(env, info_node);
        }
 
        up_write(&env->bpf_progs.lock);
@@ -3225,7 +3235,7 @@ static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
                if (__do_read(ff, node->data, data_size))
                        goto out;
 
-               perf_env__insert_btf(env, node);
+               __perf_env__insert_btf(env, node);
                node = NULL;
        }
 
@@ -3259,7 +3269,9 @@ static int process_compressed(struct feat_fd *ff,
 }
 
 static int __process_pmu_caps(struct feat_fd *ff, int *nr_caps,
-                             char ***caps, unsigned int *max_branches)
+                             char ***caps, unsigned int *max_branches,
+                             unsigned int *br_cntr_nr,
+                             unsigned int *br_cntr_width)
 {
        char *name, *value, *ptr;
        u32 nr_pmu_caps, i;
@@ -3294,6 +3306,12 @@ static int __process_pmu_caps(struct feat_fd *ff, int *nr_caps,
                if (!strcmp(name, "branches"))
                        *max_branches = atoi(value);
 
+               if (!strcmp(name, "branch_counter_nr"))
+                       *br_cntr_nr = atoi(value);
+
+               if (!strcmp(name, "branch_counter_width"))
+                       *br_cntr_width = atoi(value);
+
                free(value);
                free(name);
        }
@@ -3318,7 +3336,9 @@ static int process_cpu_pmu_caps(struct feat_fd *ff,
 {
        int ret = __process_pmu_caps(ff, &ff->ph->env.nr_cpu_pmu_caps,
                                     &ff->ph->env.cpu_pmu_caps,
-                                    &ff->ph->env.max_branches);
+                                    &ff->ph->env.max_branches,
+                                    &ff->ph->env.br_cntr_nr,
+                                    &ff->ph->env.br_cntr_width);
 
        if (!ret && !ff->ph->env.cpu_pmu_caps)
                pr_debug("cpu pmu capabilities not available\n");
@@ -3347,7 +3367,9 @@ static int process_pmu_caps(struct feat_fd *ff, void *data __maybe_unused)
        for (i = 0; i < nr_pmu; i++) {
                ret = __process_pmu_caps(ff, &pmu_caps[i].nr_caps,
                                         &pmu_caps[i].caps,
-                                        &pmu_caps[i].max_branches);
+                                        &pmu_caps[i].max_branches,
+                                        &pmu_caps[i].br_cntr_nr,
+                                        &pmu_caps[i].br_cntr_width);
                if (ret)
                        goto err;
 
@@ -4369,9 +4391,10 @@ size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
                ret += fprintf(fp, "... ");
 
                map = cpu_map__new_data(&ev->cpus.cpus);
-               if (map)
+               if (map) {
                        ret += cpu_map__fprintf(map, fp);
-               else
+                       perf_cpu_map__put(map);
+               } else
                        ret += fprintf(fp, "failed to get cpus\n");
                break;
        default:
index 43bd1ca62d58244583f8c8f742d890c79b6e7b36..52d0ce302ca042ed0bae720273443d3f2794affb 100644 (file)
@@ -123,6 +123,7 @@ static int hisi_ptt_process_auxtrace_event(struct perf_session *session,
        if (dump_trace)
                hisi_ptt_dump_event(ptt, data, size);
 
+       free(data);
        return 0;
 }
 
index 0888b7163b7cc25c33724f4e61099ab16a2ab60a..fa359180ebf8fc45e1248e4241543817e0660260 100644 (file)
@@ -491,8 +491,8 @@ static int hist_entry__init(struct hist_entry *he,
        }
 
        if (symbol_conf.res_sample) {
-               he->res_samples = calloc(sizeof(struct res_sample),
-                                       symbol_conf.res_sample);
+               he->res_samples = calloc(symbol_conf.res_sample,
+                                       sizeof(struct res_sample));
                if (!he->res_samples)
                        goto err_srcline;
        }
index afc9f1c7f4dc248cbc1b70104a0405d42264fdca..4a0aea0c9e00e09b64520df0948460493acb55b5 100644 (file)
@@ -82,6 +82,9 @@ enum hist_column {
        HISTC_ADDR_TO,
        HISTC_ADDR,
        HISTC_SIMD,
+       HISTC_TYPE,
+       HISTC_TYPE_OFFSET,
+       HISTC_SYMBOL_OFFSET,
        HISTC_NR_COLS, /* Last entry */
 };
 
@@ -457,7 +460,6 @@ struct hist_browser_timer {
        int refresh;
 };
 
-struct annotation_options;
 struct res_sample;
 
 enum rstype {
@@ -473,16 +475,13 @@ struct block_hist;
 void attr_to_script(char *buf, struct perf_event_attr *attr);
 
 int map_symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel,
-                            struct hist_browser_timer *hbt,
-                            struct annotation_options *annotation_opts);
+                            struct hist_browser_timer *hbt);
 
 int hist_entry__tui_annotate(struct hist_entry *he, struct evsel *evsel,
-                            struct hist_browser_timer *hbt,
-                            struct annotation_options *annotation_opts);
+                            struct hist_browser_timer *hbt);
 
 int evlist__tui_browse_hists(struct evlist *evlist, const char *help, struct hist_browser_timer *hbt,
-                            float min_pcnt, struct perf_env *env, bool warn_lost_event,
-                            struct annotation_options *annotation_options);
+                            float min_pcnt, struct perf_env *env, bool warn_lost_event);
 
 int script_browse(const char *script_opt, struct evsel *evsel);
 
@@ -492,8 +491,7 @@ int res_sample_browse(struct res_sample *res_samples, int num_res,
 void res_sample_init(void);
 
 int block_hists_tui_browse(struct block_hist *bh, struct evsel *evsel,
-                          float min_percent, struct perf_env *env,
-                          struct annotation_options *annotation_opts);
+                          float min_percent, struct perf_env *env);
 #else
 static inline
 int evlist__tui_browse_hists(struct evlist *evlist __maybe_unused,
@@ -501,23 +499,20 @@ int evlist__tui_browse_hists(struct evlist *evlist __maybe_unused,
                             struct hist_browser_timer *hbt __maybe_unused,
                             float min_pcnt __maybe_unused,
                             struct perf_env *env __maybe_unused,
-                            bool warn_lost_event __maybe_unused,
-                            struct annotation_options *annotation_options __maybe_unused)
+                            bool warn_lost_event __maybe_unused)
 {
        return 0;
 }
 static inline int map_symbol__tui_annotate(struct map_symbol *ms __maybe_unused,
                                           struct evsel *evsel __maybe_unused,
-                                          struct hist_browser_timer *hbt __maybe_unused,
-                                          struct annotation_options *annotation_options __maybe_unused)
+                                          struct hist_browser_timer *hbt __maybe_unused)
 {
        return 0;
 }
 
 static inline int hist_entry__tui_annotate(struct hist_entry *he __maybe_unused,
                                           struct evsel *evsel __maybe_unused,
-                                          struct hist_browser_timer *hbt __maybe_unused,
-                                          struct annotation_options *annotation_opts __maybe_unused)
+                                          struct hist_browser_timer *hbt __maybe_unused)
 {
        return 0;
 }
@@ -541,8 +536,7 @@ static inline void res_sample_init(void) {}
 static inline int block_hists_tui_browse(struct block_hist *bh __maybe_unused,
                                         struct evsel *evsel __maybe_unused,
                                         float min_percent __maybe_unused,
-                                        struct perf_env *env __maybe_unused,
-                                        struct annotation_options *annotation_opts __maybe_unused)
+                                        struct perf_env *env __maybe_unused)
 {
        return 0;
 }
index 7d99a084e82d7c1047d0911365cb3828e6c3ab60..01fb25a1150af8d9af9c1fd222d4aec89a534a4e 100644 (file)
@@ -2,6 +2,9 @@
 #ifndef _PERF_DWARF_REGS_H_
 #define _PERF_DWARF_REGS_H_
 
+#define DWARF_REG_PC  0xd3af9c /* random number */
+#define DWARF_REG_FB  0xd3affb /* random number */
+
 #ifdef HAVE_DWARF_SUPPORT
 const char *get_arch_regstr(unsigned int n);
 /*
@@ -10,6 +13,22 @@ const char *get_arch_regstr(unsigned int n);
  * machine: ELF machine signature (EM_*)
  */
 const char *get_dwarf_regstr(unsigned int n, unsigned int machine);
+
+int get_arch_regnum(const char *name);
+/*
+ * get_dwarf_regnum - Returns DWARF regnum from register name
+ * name: architecture register name
+ * machine: ELF machine signature (EM_*)
+ */
+int get_dwarf_regnum(const char *name, unsigned int machine);
+
+#else /* HAVE_DWARF_SUPPORT */
+
+static inline int get_dwarf_regnum(const char *name __maybe_unused,
+                                  unsigned int machine __maybe_unused)
+{
+       return -1;
+}
 #endif
 
 #ifdef HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET
index 75e2248416f55f6792563a2614b211a36281222f..178b00205fe6a7b2d75f3a9d68b7bad99ccd82af 100644 (file)
        SYM_ALIAS(alias, name, SYM_T_FUNC, SYM_L_WEAK)
 #endif
 
+#ifndef SYM_FUNC_ALIAS_MEMFUNC
+#define SYM_FUNC_ALIAS_MEMFUNC SYM_FUNC_ALIAS
+#endif
+
 // In the kernel sources (include/linux/cfi_types.h), this has a different
 // definition when CONFIG_CFI_CLANG is used, for tools/ just use the !clang
 // definition:
index 90c750150b19bdbc0192c5bdc0fd306a2554aeca..b397a769006f45ac1b7716f4efdb2147c86977ae 100644 (file)
@@ -453,7 +453,7 @@ static struct thread *findnew_guest_code(struct machine *machine,
         * Guest code can be found in hypervisor process at the same address
         * so copy host maps.
         */
-       err = maps__clone(thread, thread__maps(host_thread));
+       err = maps__copy_from(thread__maps(thread), thread__maps(host_thread));
        thread__put(host_thread);
        if (err)
                goto out_err;
@@ -1285,33 +1285,46 @@ static u64 find_entry_trampoline(struct dso *dso)
 #define X86_64_CPU_ENTRY_AREA_SIZE     0x2c000
 #define X86_64_ENTRY_TRAMPOLINE                0x6000
 
+struct machine__map_x86_64_entry_trampolines_args {
+       struct maps *kmaps;
+       bool found;
+};
+
+static int machine__map_x86_64_entry_trampolines_cb(struct map *map, void *data)
+{
+       struct machine__map_x86_64_entry_trampolines_args *args = data;
+       struct map *dest_map;
+       struct kmap *kmap = __map__kmap(map);
+
+       if (!kmap || !is_entry_trampoline(kmap->name))
+               return 0;
+
+       dest_map = maps__find(args->kmaps, map__pgoff(map));
+       if (dest_map != map)
+               map__set_pgoff(map, map__map_ip(dest_map, map__pgoff(map)));
+
+       args->found = true;
+       return 0;
+}
+
 /* Map x86_64 PTI entry trampolines */
 int machine__map_x86_64_entry_trampolines(struct machine *machine,
                                          struct dso *kernel)
 {
-       struct maps *kmaps = machine__kernel_maps(machine);
+       struct machine__map_x86_64_entry_trampolines_args args = {
+               .kmaps = machine__kernel_maps(machine),
+               .found = false,
+       };
        int nr_cpus_avail, cpu;
-       bool found = false;
-       struct map_rb_node *rb_node;
        u64 pgoff;
 
        /*
         * In the vmlinux case, pgoff is a virtual address which must now be
         * mapped to a vmlinux offset.
         */
-       maps__for_each_entry(kmaps, rb_node) {
-               struct map *dest_map, *map = rb_node->map;
-               struct kmap *kmap = __map__kmap(map);
-
-               if (!kmap || !is_entry_trampoline(kmap->name))
-                       continue;
+       maps__for_each_map(args.kmaps, machine__map_x86_64_entry_trampolines_cb, &args);
 
-               dest_map = maps__find(kmaps, map__pgoff(map));
-               if (dest_map != map)
-                       map__set_pgoff(map, map__map_ip(dest_map, map__pgoff(map)));
-               found = true;
-       }
-       if (found || machine->trampolines_mapped)
+       if (args.found || machine->trampolines_mapped)
                return 0;
 
        pgoff = find_entry_trampoline(kernel);
@@ -1359,8 +1372,7 @@ __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
        if (machine->vmlinux_map == NULL)
                return -ENOMEM;
 
-       map__set_map_ip(machine->vmlinux_map, identity__map_ip);
-       map__set_unmap_ip(machine->vmlinux_map, identity__map_ip);
+       map__set_mapping_type(machine->vmlinux_map, MAPPING_TYPE__IDENTITY);
        return maps__insert(machine__kernel_maps(machine), machine->vmlinux_map);
 }
 
@@ -1750,12 +1762,11 @@ int machine__create_kernel_maps(struct machine *machine)
 
        if (end == ~0ULL) {
                /* update end address of the kernel map using adjacent module address */
-               struct map_rb_node *rb_node = maps__find_node(machine__kernel_maps(machine),
-                                                       machine__kernel_map(machine));
-               struct map_rb_node *next = map_rb_node__next(rb_node);
+               struct map *next = maps__find_next_entry(machine__kernel_maps(machine),
+                                                        machine__kernel_map(machine));
 
                if (next)
-                       machine__set_kernel_mmap(machine, start, map__start(next->map));
+                       machine__set_kernel_mmap(machine, start, map__start(next));
        }
 
 out_put:
@@ -2157,9 +2168,13 @@ int machine__process_exit_event(struct machine *machine, union perf_event *event
        if (dump_trace)
                perf_event__fprintf_task(event, stdout);
 
-       if (thread != NULL)
-               thread__put(thread);
-
+       if (thread != NULL) {
+               if (symbol_conf.keep_exited_threads)
+                       thread__set_exited(thread, /*exited=*/true);
+               else
+                       machine__remove_thread(machine, thread);
+       }
+       thread__put(thread);
        return 0;
 }
 
@@ -3395,16 +3410,8 @@ int machine__for_each_dso(struct machine *machine, machine__dso_t fn, void *priv
 int machine__for_each_kernel_map(struct machine *machine, machine__map_t fn, void *priv)
 {
        struct maps *maps = machine__kernel_maps(machine);
-       struct map_rb_node *pos;
-       int err = 0;
 
-       maps__for_each_entry(maps, pos) {
-               err = fn(pos->map, priv);
-               if (err != 0) {
-                       break;
-               }
-       }
-       return err;
+       return maps__for_each_map(maps, fn, priv);
 }
 
 bool machine__is_lock_function(struct machine *machine, u64 addr)
index f64b830044217d520f5a2ec34f85e3bf8bbbd44f..54c67cb7ecefa441608e383476c6953563272f5a 100644 (file)
@@ -109,8 +109,7 @@ void map__init(struct map *map, u64 start, u64 end, u64 pgoff, struct dso *dso)
        map__set_pgoff(map, pgoff);
        map__set_reloc(map, 0);
        map__set_dso(map, dso__get(dso));
-       map__set_map_ip(map, map__dso_map_ip);
-       map__set_unmap_ip(map, map__dso_unmap_ip);
+       map__set_mapping_type(map, MAPPING_TYPE__DSO);
        map__set_erange_warned(map, false);
        refcount_set(map__refcnt(map), 1);
 }
@@ -172,7 +171,7 @@ struct map *map__new(struct machine *machine, u64 start, u64 len,
                map__init(result, start, start + len, pgoff, dso);
 
                if (anon || no_dso) {
-                       map->map_ip = map->unmap_ip = identity__map_ip;
+                       map->mapping_type = MAPPING_TYPE__IDENTITY;
 
                        /*
                         * Set memory without DSO as loaded. All map__find_*
@@ -630,18 +629,3 @@ struct maps *map__kmaps(struct map *map)
        }
        return kmap->kmaps;
 }
-
-u64 map__dso_map_ip(const struct map *map, u64 ip)
-{
-       return ip - map__start(map) + map__pgoff(map);
-}
-
-u64 map__dso_unmap_ip(const struct map *map, u64 ip)
-{
-       return ip + map__start(map) - map__pgoff(map);
-}
-
-u64 identity__map_ip(const struct map *map __maybe_unused, u64 ip)
-{
-       return ip;
-}
index 1b53d53adc866eacecabf0f95035343f5434338c..49756716cb132790f3cb9d9c2511fc73c9a294ba 100644 (file)
@@ -16,23 +16,25 @@ struct dso;
 struct maps;
 struct machine;
 
+enum mapping_type {
+       /* map__map_ip/map__unmap_ip are given as offsets in the DSO. */
+       MAPPING_TYPE__DSO,
+       /* map__map_ip/map__unmap_ip are just the given ip value. */
+       MAPPING_TYPE__IDENTITY,
+};
+
 DECLARE_RC_STRUCT(map) {
        u64                     start;
        u64                     end;
-       bool                    erange_warned:1;
-       bool                    priv:1;
-       u32                     prot;
        u64                     pgoff;
        u64                     reloc;
-
-       /* ip -> dso rip */
-       u64                     (*map_ip)(const struct map *, u64);
-       /* dso rip -> ip */
-       u64                     (*unmap_ip)(const struct map *, u64);
-
        struct dso              *dso;
        refcount_t              refcnt;
+       u32                     prot;
        u32                     flags;
+       enum mapping_type       mapping_type:8;
+       bool                    erange_warned;
+       bool                    priv;
 };
 
 struct kmap;
@@ -41,38 +43,11 @@ struct kmap *__map__kmap(struct map *map);
 struct kmap *map__kmap(struct map *map);
 struct maps *map__kmaps(struct map *map);
 
-/* ip -> dso rip */
-u64 map__dso_map_ip(const struct map *map, u64 ip);
-/* dso rip -> ip */
-u64 map__dso_unmap_ip(const struct map *map, u64 ip);
-/* Returns ip */
-u64 identity__map_ip(const struct map *map __maybe_unused, u64 ip);
-
 static inline struct dso *map__dso(const struct map *map)
 {
        return RC_CHK_ACCESS(map)->dso;
 }
 
-static inline u64 map__map_ip(const struct map *map, u64 ip)
-{
-       return RC_CHK_ACCESS(map)->map_ip(map, ip);
-}
-
-static inline u64 map__unmap_ip(const struct map *map, u64 ip)
-{
-       return RC_CHK_ACCESS(map)->unmap_ip(map, ip);
-}
-
-static inline void *map__map_ip_ptr(struct map *map)
-{
-       return RC_CHK_ACCESS(map)->map_ip;
-}
-
-static inline void* map__unmap_ip_ptr(struct map *map)
-{
-       return RC_CHK_ACCESS(map)->unmap_ip;
-}
-
 static inline u64 map__start(const struct map *map)
 {
        return RC_CHK_ACCESS(map)->start;
@@ -123,6 +98,34 @@ static inline size_t map__size(const struct map *map)
        return map__end(map) - map__start(map);
 }
 
+/* ip -> dso rip */
+static inline u64 map__dso_map_ip(const struct map *map, u64 ip)
+{
+       return ip - map__start(map) + map__pgoff(map);
+}
+
+/* dso rip -> ip */
+static inline u64 map__dso_unmap_ip(const struct map *map, u64 rip)
+{
+       return rip + map__start(map) - map__pgoff(map);
+}
+
+static inline u64 map__map_ip(const struct map *map, u64 ip_or_rip)
+{
+       if ((RC_CHK_ACCESS(map)->mapping_type) == MAPPING_TYPE__DSO)
+               return map__dso_map_ip(map, ip_or_rip);
+       else
+               return ip_or_rip;
+}
+
+static inline u64 map__unmap_ip(const struct map *map, u64 ip_or_rip)
+{
+       if ((RC_CHK_ACCESS(map)->mapping_type) == MAPPING_TYPE__DSO)
+               return map__dso_unmap_ip(map, ip_or_rip);
+       else
+               return ip_or_rip;
+}
+
 /* rip/ip <-> addr suitable for passing to `objdump --start-address=` */
 u64 map__rip_2objdump(struct map *map, u64 rip);
 
@@ -294,13 +297,13 @@ static inline void map__set_dso(struct map *map, struct dso *dso)
        RC_CHK_ACCESS(map)->dso = dso;
 }
 
-static inline void map__set_map_ip(struct map *map, u64 (*map_ip)(const struct map *map, u64 ip))
+static inline void map__set_mapping_type(struct map *map, enum mapping_type type)
 {
-       RC_CHK_ACCESS(map)->map_ip = map_ip;
+       RC_CHK_ACCESS(map)->mapping_type = type;
 }
 
-static inline void map__set_unmap_ip(struct map *map, u64 (*unmap_ip)(const struct map *map, u64 rip))
+static inline enum mapping_type map__mapping_type(struct map *map)
 {
-       RC_CHK_ACCESS(map)->unmap_ip = unmap_ip;
+       return RC_CHK_ACCESS(map)->mapping_type;
 }
 #endif /* __PERF_MAP_H */
index 233438c95b531f7ac6b571723a9e39f7b4000019..0334fc18d9c65897c5e76111d8cb3a6f8a4a53ba 100644 (file)
 #include "ui/ui.h"
 #include "unwind.h"
 
+struct map_rb_node {
+       struct rb_node rb_node;
+       struct map *map;
+};
+
+#define maps__for_each_entry(maps, map) \
+       for (map = maps__first(maps); map; map = map_rb_node__next(map))
+
+#define maps__for_each_entry_safe(maps, map, next) \
+       for (map = maps__first(maps), next = map_rb_node__next(map); map; \
+            map = next, next = map_rb_node__next(map))
+
+static struct rb_root *maps__entries(struct maps *maps)
+{
+       return &RC_CHK_ACCESS(maps)->entries;
+}
+
+static struct rw_semaphore *maps__lock(struct maps *maps)
+{
+       return &RC_CHK_ACCESS(maps)->lock;
+}
+
+static struct map **maps__maps_by_name(struct maps *maps)
+{
+       return RC_CHK_ACCESS(maps)->maps_by_name;
+}
+
+static struct map_rb_node *maps__first(struct maps *maps)
+{
+       struct rb_node *first = rb_first(maps__entries(maps));
+
+       if (first)
+               return rb_entry(first, struct map_rb_node, rb_node);
+       return NULL;
+}
+
+static struct map_rb_node *map_rb_node__next(struct map_rb_node *node)
+{
+       struct rb_node *next;
+
+       if (!node)
+               return NULL;
+
+       next = rb_next(&node->rb_node);
+
+       if (!next)
+               return NULL;
+
+       return rb_entry(next, struct map_rb_node, rb_node);
+}
+
+static struct map_rb_node *maps__find_node(struct maps *maps, struct map *map)
+{
+       struct map_rb_node *rb_node;
+
+       maps__for_each_entry(maps, rb_node) {
+               if (rb_node->RC_CHK_ACCESS(map) == RC_CHK_ACCESS(map))
+                       return rb_node;
+       }
+       return NULL;
+}
+
 static void maps__init(struct maps *maps, struct machine *machine)
 {
        refcount_set(maps__refcnt(maps), 1);
@@ -196,6 +258,41 @@ void maps__put(struct maps *maps)
                RC_CHK_PUT(maps);
 }
 
+int maps__for_each_map(struct maps *maps, int (*cb)(struct map *map, void *data), void *data)
+{
+       struct map_rb_node *pos;
+       int ret = 0;
+
+       down_read(maps__lock(maps));
+       maps__for_each_entry(maps, pos) {
+               ret = cb(pos->map, data);
+               if (ret)
+                       break;
+       }
+       up_read(maps__lock(maps));
+       return ret;
+}
+
+void maps__remove_maps(struct maps *maps, bool (*cb)(struct map *map, void *data), void *data)
+{
+       struct map_rb_node *pos, *next;
+       unsigned int start_nr_maps;
+
+       down_write(maps__lock(maps));
+
+       start_nr_maps = maps__nr_maps(maps);
+       maps__for_each_entry_safe(maps, pos, next)      {
+               if (cb(pos->map, data)) {
+                       __maps__remove(maps, pos);
+                       --RC_CHK_ACCESS(maps)->nr_maps;
+               }
+       }
+       if (maps__maps_by_name(maps) && start_nr_maps != maps__nr_maps(maps))
+               __maps__free_maps_by_name(maps);
+
+       up_write(maps__lock(maps));
+}
+
 struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp)
 {
        struct map *map = maps__find(maps, addr);
@@ -210,31 +307,40 @@ struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp)
        return NULL;
 }
 
-struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp)
-{
+struct maps__find_symbol_by_name_args {
+       struct map **mapp;
+       const char *name;
        struct symbol *sym;
-       struct map_rb_node *pos;
+};
 
-       down_read(maps__lock(maps));
+static int maps__find_symbol_by_name_cb(struct map *map, void *data)
+{
+       struct maps__find_symbol_by_name_args *args = data;
 
-       maps__for_each_entry(maps, pos) {
-               sym = map__find_symbol_by_name(pos->map, name);
+       args->sym = map__find_symbol_by_name(map, args->name);
+       if (!args->sym)
+               return 0;
 
-               if (sym == NULL)
-                       continue;
-               if (!map__contains_symbol(pos->map, sym)) {
-                       sym = NULL;
-                       continue;
-               }
-               if (mapp != NULL)
-                       *mapp = pos->map;
-               goto out;
+       if (!map__contains_symbol(map, args->sym)) {
+               args->sym = NULL;
+               return 0;
        }
 
-       sym = NULL;
-out:
-       up_read(maps__lock(maps));
-       return sym;
+       if (args->mapp != NULL)
+               *args->mapp = map__get(map);
+       return 1;
+}
+
+struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp)
+{
+       struct maps__find_symbol_by_name_args args = {
+               .mapp = mapp,
+               .name = name,
+               .sym = NULL,
+       };
+
+       maps__for_each_map(maps, maps__find_symbol_by_name_cb, &args);
+       return args.sym;
 }
 
 int maps__find_ams(struct maps *maps, struct addr_map_symbol *ams)
@@ -253,41 +359,46 @@ int maps__find_ams(struct maps *maps, struct addr_map_symbol *ams)
        return ams->ms.sym ? 0 : -1;
 }
 
-size_t maps__fprintf(struct maps *maps, FILE *fp)
-{
-       size_t printed = 0;
-       struct map_rb_node *pos;
+struct maps__fprintf_args {
+       FILE *fp;
+       size_t printed;
+};
 
-       down_read(maps__lock(maps));
+static int maps__fprintf_cb(struct map *map, void *data)
+{
+       struct maps__fprintf_args *args = data;
 
-       maps__for_each_entry(maps, pos) {
-               printed += fprintf(fp, "Map:");
-               printed += map__fprintf(pos->map, fp);
-               if (verbose > 2) {
-                       printed += dso__fprintf(map__dso(pos->map), fp);
-                       printed += fprintf(fp, "--\n");
-               }
+       args->printed += fprintf(args->fp, "Map:");
+       args->printed += map__fprintf(map, args->fp);
+       if (verbose > 2) {
+               args->printed += dso__fprintf(map__dso(map), args->fp);
+               args->printed += fprintf(args->fp, "--\n");
        }
+       return 0;
+}
 
-       up_read(maps__lock(maps));
+size_t maps__fprintf(struct maps *maps, FILE *fp)
+{
+       struct maps__fprintf_args args = {
+               .fp = fp,
+               .printed = 0,
+       };
+
+       maps__for_each_map(maps, maps__fprintf_cb, &args);
 
-       return printed;
+       return args.printed;
 }
 
-int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
+/*
+ * Find first map where end > map->start.
+ * Same as find_vma() in kernel.
+ */
+static struct rb_node *first_ending_after(struct maps *maps, const struct map *map)
 {
        struct rb_root *root;
        struct rb_node *next, *first;
-       int err = 0;
-
-       down_write(maps__lock(maps));
 
        root = maps__entries(maps);
-
-       /*
-        * Find first map where end > map->start.
-        * Same as find_vma() in kernel.
-        */
        next = root->rb_node;
        first = NULL;
        while (next) {
@@ -301,8 +412,23 @@ int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
                } else
                        next = next->rb_right;
        }
+       return first;
+}
 
-       next = first;
+/*
+ * Adds new to maps, if new overlaps existing entries then the existing maps are
+ * adjusted or removed so that new fits without overlapping any entries.
+ */
+int maps__fixup_overlap_and_insert(struct maps *maps, struct map *new)
+{
+
+       struct rb_node *next;
+       int err = 0;
+       FILE *fp = debug_file();
+
+       down_write(maps__lock(maps));
+
+       next = first_ending_after(maps, new);
        while (next && !err) {
                struct map_rb_node *pos = rb_entry(next, struct map_rb_node, rb_node);
                next = rb_next(&pos->rb_node);
@@ -311,27 +437,27 @@ int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
                 * Stop if current map starts after map->end.
                 * Maps are ordered by start: next will not overlap for sure.
                 */
-               if (map__start(pos->map) >= map__end(map))
+               if (map__start(pos->map) >= map__end(new))
                        break;
 
                if (verbose >= 2) {
 
                        if (use_browser) {
                                pr_debug("overlapping maps in %s (disable tui for more info)\n",
-                                        map__dso(map)->name);
+                                        map__dso(new)->name);
                        } else {
-                               fputs("overlapping maps:\n", fp);
-                               map__fprintf(map, fp);
+                               pr_debug("overlapping maps:\n");
+                               map__fprintf(new, fp);
                                map__fprintf(pos->map, fp);
                        }
                }
 
-               rb_erase_init(&pos->rb_node, root);
+               rb_erase_init(&pos->rb_node, maps__entries(maps));
                /*
                 * Now check if we need to create new maps for areas not
                 * overlapped by the new map:
                 */
-               if (map__start(map) > map__start(pos->map)) {
+               if (map__start(new) > map__start(pos->map)) {
                        struct map *before = map__clone(pos->map);
 
                        if (before == NULL) {
@@ -339,7 +465,7 @@ int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
                                goto put_map;
                        }
 
-                       map__set_end(before, map__start(map));
+                       map__set_end(before, map__start(new));
                        err = __maps__insert(maps, before);
                        if (err) {
                                map__put(before);
@@ -351,7 +477,7 @@ int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
                        map__put(before);
                }
 
-               if (map__end(map) < map__end(pos->map)) {
+               if (map__end(new) < map__end(pos->map)) {
                        struct map *after = map__clone(pos->map);
 
                        if (after == NULL) {
@@ -359,10 +485,10 @@ int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
                                goto put_map;
                        }
 
-                       map__set_start(after, map__end(map));
-                       map__add_pgoff(after, map__end(map) - map__start(pos->map));
-                       assert(map__map_ip(pos->map, map__end(map)) ==
-                               map__map_ip(after, map__end(map)));
+                       map__set_start(after, map__end(new));
+                       map__add_pgoff(after, map__end(new) - map__start(pos->map));
+                       assert(map__map_ip(pos->map, map__end(new)) ==
+                               map__map_ip(after, map__end(new)));
                        err = __maps__insert(maps, after);
                        if (err) {
                                map__put(after);
@@ -376,16 +502,14 @@ put_map:
                map__put(pos->map);
                free(pos);
        }
+       /* Add the map. */
+       err = __maps__insert(maps, new);
        up_write(maps__lock(maps));
        return err;
 }
 
-/*
- * XXX This should not really _copy_ te maps, but refcount them.
- */
-int maps__clone(struct thread *thread, struct maps *parent)
+int maps__copy_from(struct maps *maps, struct maps *parent)
 {
-       struct maps *maps = thread__maps(thread);
        int err;
        struct map_rb_node *rb_node;
 
@@ -416,17 +540,6 @@ out_unlock:
        return err;
 }
 
-struct map_rb_node *maps__find_node(struct maps *maps, struct map *map)
-{
-       struct map_rb_node *rb_node;
-
-       maps__for_each_entry(maps, rb_node) {
-               if (rb_node->RC_CHK_ACCESS(map) == RC_CHK_ACCESS(map))
-                       return rb_node;
-       }
-       return NULL;
-}
-
 struct map *maps__find(struct maps *maps, u64 ip)
 {
        struct rb_node *p;
@@ -452,26 +565,275 @@ out:
        return m ? m->map : NULL;
 }
 
-struct map_rb_node *maps__first(struct maps *maps)
+static int map__strcmp(const void *a, const void *b)
 {
-       struct rb_node *first = rb_first(maps__entries(maps));
+       const struct map *map_a = *(const struct map **)a;
+       const struct map *map_b = *(const struct map **)b;
+       const struct dso *dso_a = map__dso(map_a);
+       const struct dso *dso_b = map__dso(map_b);
+       int ret = strcmp(dso_a->short_name, dso_b->short_name);
 
-       if (first)
-               return rb_entry(first, struct map_rb_node, rb_node);
-       return NULL;
+       if (ret == 0 && map_a != map_b) {
+               /*
+                * Ensure distinct but name equal maps have an order in part to
+                * aid reference counting.
+                */
+               ret = (int)map__start(map_a) - (int)map__start(map_b);
+               if (ret == 0)
+                       ret = (int)((intptr_t)map_a - (intptr_t)map_b);
+       }
+
+       return ret;
 }
 
-struct map_rb_node *map_rb_node__next(struct map_rb_node *node)
+static int map__strcmp_name(const void *name, const void *b)
 {
-       struct rb_node *next;
+       const struct dso *dso = map__dso(*(const struct map **)b);
 
-       if (!node)
-               return NULL;
+       return strcmp(name, dso->short_name);
+}
 
-       next = rb_next(&node->rb_node);
+void __maps__sort_by_name(struct maps *maps)
+{
+       qsort(maps__maps_by_name(maps), maps__nr_maps(maps), sizeof(struct map *), map__strcmp);
+}
 
-       if (!next)
+static int map__groups__sort_by_name_from_rbtree(struct maps *maps)
+{
+       struct map_rb_node *rb_node;
+       struct map **maps_by_name = realloc(maps__maps_by_name(maps),
+                                           maps__nr_maps(maps) * sizeof(struct map *));
+       int i = 0;
+
+       if (maps_by_name == NULL)
+               return -1;
+
+       up_read(maps__lock(maps));
+       down_write(maps__lock(maps));
+
+       RC_CHK_ACCESS(maps)->maps_by_name = maps_by_name;
+       RC_CHK_ACCESS(maps)->nr_maps_allocated = maps__nr_maps(maps);
+
+       maps__for_each_entry(maps, rb_node)
+               maps_by_name[i++] = map__get(rb_node->map);
+
+       __maps__sort_by_name(maps);
+
+       up_write(maps__lock(maps));
+       down_read(maps__lock(maps));
+
+       return 0;
+}
+
+static struct map *__maps__find_by_name(struct maps *maps, const char *name)
+{
+       struct map **mapp;
+
+       if (maps__maps_by_name(maps) == NULL &&
+           map__groups__sort_by_name_from_rbtree(maps))
                return NULL;
 
-       return rb_entry(next, struct map_rb_node, rb_node);
+       mapp = bsearch(name, maps__maps_by_name(maps), maps__nr_maps(maps),
+                      sizeof(*mapp), map__strcmp_name);
+       if (mapp)
+               return *mapp;
+       return NULL;
+}
+
+struct map *maps__find_by_name(struct maps *maps, const char *name)
+{
+       struct map_rb_node *rb_node;
+       struct map *map;
+
+       down_read(maps__lock(maps));
+
+
+       if (RC_CHK_ACCESS(maps)->last_search_by_name) {
+               const struct dso *dso = map__dso(RC_CHK_ACCESS(maps)->last_search_by_name);
+
+               if (strcmp(dso->short_name, name) == 0) {
+                       map = RC_CHK_ACCESS(maps)->last_search_by_name;
+                       goto out_unlock;
+               }
+       }
+       /*
+        * If we have maps->maps_by_name, then the name isn't in the rbtree,
+        * as maps->maps_by_name mirrors the rbtree when lookups by name are
+        * made.
+        */
+       map = __maps__find_by_name(maps, name);
+       if (map || maps__maps_by_name(maps) != NULL)
+               goto out_unlock;
+
+       /* Fallback to traversing the rbtree... */
+       maps__for_each_entry(maps, rb_node) {
+               struct dso *dso;
+
+               map = rb_node->map;
+               dso = map__dso(map);
+               if (strcmp(dso->short_name, name) == 0) {
+                       RC_CHK_ACCESS(maps)->last_search_by_name = map;
+                       goto out_unlock;
+               }
+       }
+       map = NULL;
+
+out_unlock:
+       up_read(maps__lock(maps));
+       return map;
+}
+
+struct map *maps__find_next_entry(struct maps *maps, struct map *map)
+{
+       struct map_rb_node *rb_node = maps__find_node(maps, map);
+       struct map_rb_node *next = map_rb_node__next(rb_node);
+
+       if (next)
+               return next->map;
+
+       return NULL;
+}
+
+void maps__fixup_end(struct maps *maps)
+{
+       struct map_rb_node *prev = NULL, *curr;
+
+       down_write(maps__lock(maps));
+
+       maps__for_each_entry(maps, curr) {
+               if (prev && (!map__end(prev->map) || map__end(prev->map) > map__start(curr->map)))
+                       map__set_end(prev->map, map__start(curr->map));
+
+               prev = curr;
+       }
+
+       /*
+        * We still haven't the actual symbols, so guess the
+        * last map final address.
+        */
+       if (curr && !map__end(curr->map))
+               map__set_end(curr->map, ~0ULL);
+
+       up_write(maps__lock(maps));
+}
+
+/*
+ * Merges map into maps by splitting the new map within the existing map
+ * regions.
+ */
+int maps__merge_in(struct maps *kmaps, struct map *new_map)
+{
+       struct map_rb_node *rb_node;
+       struct rb_node *first;
+       bool overlaps;
+       LIST_HEAD(merged);
+       int err = 0;
+
+       down_read(maps__lock(kmaps));
+       first = first_ending_after(kmaps, new_map);
+       rb_node = first ? rb_entry(first, struct map_rb_node, rb_node) : NULL;
+       overlaps = rb_node && map__start(rb_node->map) < map__end(new_map);
+       up_read(maps__lock(kmaps));
+
+       if (!overlaps)
+               return maps__insert(kmaps, new_map);
+
+       maps__for_each_entry(kmaps, rb_node) {
+               struct map *old_map = rb_node->map;
+
+               /* no overload with this one */
+               if (map__end(new_map) < map__start(old_map) ||
+                   map__start(new_map) >= map__end(old_map))
+                       continue;
+
+               if (map__start(new_map) < map__start(old_map)) {
+                       /*
+                        * |new......
+                        *       |old....
+                        */
+                       if (map__end(new_map) < map__end(old_map)) {
+                               /*
+                                * |new......|     -> |new..|
+                                *       |old....| ->       |old....|
+                                */
+                               map__set_end(new_map, map__start(old_map));
+                       } else {
+                               /*
+                                * |new.............| -> |new..|       |new..|
+                                *       |old....|    ->       |old....|
+                                */
+                               struct map_list_node *m = map_list_node__new();
+
+                               if (!m) {
+                                       err = -ENOMEM;
+                                       goto out;
+                               }
+
+                               m->map = map__clone(new_map);
+                               if (!m->map) {
+                                       free(m);
+                                       err = -ENOMEM;
+                                       goto out;
+                               }
+
+                               map__set_end(m->map, map__start(old_map));
+                               list_add_tail(&m->node, &merged);
+                               map__add_pgoff(new_map, map__end(old_map) - map__start(new_map));
+                               map__set_start(new_map, map__end(old_map));
+                       }
+               } else {
+                       /*
+                        *      |new......
+                        * |old....
+                        */
+                       if (map__end(new_map) < map__end(old_map)) {
+                               /*
+                                *      |new..|   -> x
+                                * |old.........| -> |old.........|
+                                */
+                               map__put(new_map);
+                               new_map = NULL;
+                               break;
+                       } else {
+                               /*
+                                *      |new......| ->         |new...|
+                                * |old....|        -> |old....|
+                                */
+                               map__add_pgoff(new_map, map__end(old_map) - map__start(new_map));
+                               map__set_start(new_map, map__end(old_map));
+                       }
+               }
+       }
+
+out:
+       while (!list_empty(&merged)) {
+               struct map_list_node *old_node;
+
+               old_node = list_entry(merged.next, struct map_list_node, node);
+               list_del_init(&old_node->node);
+               if (!err)
+                       err = maps__insert(kmaps, old_node->map);
+               map__put(old_node->map);
+               free(old_node);
+       }
+
+       if (new_map) {
+               if (!err)
+                       err = maps__insert(kmaps, new_map);
+               map__put(new_map);
+       }
+       return err;
+}
+
+void maps__load_first(struct maps *maps)
+{
+       struct map_rb_node *first;
+
+       down_read(maps__lock(maps));
+
+       first = maps__first(maps);
+       if (first)
+               map__load(first->map);
+
+       up_read(maps__lock(maps));
 }
index 83144e0645ed46598c7f0500607ffc4e58c2cf71..d836d04c940229a70a30561ade8dc40edb02b4f0 100644 (file)
@@ -14,24 +14,18 @@ struct ref_reloc_sym;
 struct machine;
 struct map;
 struct maps;
-struct thread;
 
-struct map_rb_node {
-       struct rb_node rb_node;
+struct map_list_node {
+       struct list_head node;
        struct map *map;
 };
 
-struct map_rb_node *maps__first(struct maps *maps);
-struct map_rb_node *map_rb_node__next(struct map_rb_node *node);
-struct map_rb_node *maps__find_node(struct maps *maps, struct map *map);
-struct map *maps__find(struct maps *maps, u64 addr);
-
-#define maps__for_each_entry(maps, map) \
-       for (map = maps__first(maps); map; map = map_rb_node__next(map))
+static inline struct map_list_node *map_list_node__new(void)
+{
+       return malloc(sizeof(struct map_list_node));
+}
 
-#define maps__for_each_entry_safe(maps, map, next) \
-       for (map = maps__first(maps), next = map_rb_node__next(map); map; \
-            map = next, next = map_rb_node__next(map))
+struct map *maps__find(struct maps *maps, u64 addr);
 
 DECLARE_RC_STRUCT(maps) {
        struct rb_root      entries;
@@ -58,7 +52,7 @@ struct kmap {
 
 struct maps *maps__new(struct machine *machine);
 bool maps__empty(struct maps *maps);
-int maps__clone(struct thread *thread, struct maps *parent);
+int maps__copy_from(struct maps *maps, struct maps *parent);
 
 struct maps *maps__get(struct maps *maps);
 void maps__put(struct maps *maps);
@@ -71,26 +65,16 @@ static inline void __maps__zput(struct maps **map)
 
 #define maps__zput(map) __maps__zput(&map)
 
-static inline struct rb_root *maps__entries(struct maps *maps)
-{
-       return &RC_CHK_ACCESS(maps)->entries;
-}
+/* Iterate over map calling cb for each entry. */
+int maps__for_each_map(struct maps *maps, int (*cb)(struct map *map, void *data), void *data);
+/* Iterate over map removing an entry if cb returns true. */
+void maps__remove_maps(struct maps *maps, bool (*cb)(struct map *map, void *data), void *data);
 
 static inline struct machine *maps__machine(struct maps *maps)
 {
        return RC_CHK_ACCESS(maps)->machine;
 }
 
-static inline struct rw_semaphore *maps__lock(struct maps *maps)
-{
-       return &RC_CHK_ACCESS(maps)->lock;
-}
-
-static inline struct map **maps__maps_by_name(struct maps *maps)
-{
-       return RC_CHK_ACCESS(maps)->maps_by_name;
-}
-
 static inline unsigned int maps__nr_maps(const struct maps *maps)
 {
        return RC_CHK_ACCESS(maps)->nr_maps;
@@ -125,12 +109,18 @@ struct addr_map_symbol;
 
 int maps__find_ams(struct maps *maps, struct addr_map_symbol *ams);
 
-int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp);
+int maps__fixup_overlap_and_insert(struct maps *maps, struct map *new);
 
 struct map *maps__find_by_name(struct maps *maps, const char *name);
 
+struct map *maps__find_next_entry(struct maps *maps, struct map *map);
+
 int maps__merge_in(struct maps *kmaps, struct map *new_map);
 
 void __maps__sort_by_name(struct maps *maps);
 
+void maps__fixup_end(struct maps *maps);
+
+void maps__load_first(struct maps *maps);
+
 #endif // __PERF_MAPS_H
index 954b235e12e51700f43e3901636b5a24e5317b42..3a2e3687878c1862c64d0f723496a76ceb2f8229 100644 (file)
@@ -100,11 +100,14 @@ int perf_mem_events__parse(const char *str)
        return -1;
 }
 
-static bool perf_mem_event__supported(const char *mnt, char *sysfs_name)
+static bool perf_mem_event__supported(const char *mnt, struct perf_pmu *pmu,
+                                     struct perf_mem_event *e)
 {
+       char sysfs_name[100];
        char path[PATH_MAX];
        struct stat st;
 
+       scnprintf(sysfs_name, sizeof(sysfs_name), e->sysfs_name, pmu->name);
        scnprintf(path, PATH_MAX, "%s/devices/%s", mnt, sysfs_name);
        return !stat(path, &st);
 }
@@ -120,7 +123,6 @@ int perf_mem_events__init(void)
 
        for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
                struct perf_mem_event *e = perf_mem_events__ptr(j);
-               char sysfs_name[100];
                struct perf_pmu *pmu = NULL;
 
                /*
@@ -136,12 +138,12 @@ int perf_mem_events__init(void)
                 * of core PMU.
                 */
                while ((pmu = perf_pmus__scan(pmu)) != NULL) {
-                       scnprintf(sysfs_name, sizeof(sysfs_name), e->sysfs_name, pmu->name);
-                       e->supported |= perf_mem_event__supported(mnt, sysfs_name);
+                       e->supported |= perf_mem_event__supported(mnt, pmu, e);
+                       if (e->supported) {
+                               found = true;
+                               break;
+                       }
                }
-
-               if (e->supported)
-                       found = true;
        }
 
        return found ? 0 : -ENOENT;
@@ -167,13 +169,10 @@ static void perf_mem_events__print_unsupport_hybrid(struct perf_mem_event *e,
                                                    int idx)
 {
        const char *mnt = sysfs__mount();
-       char sysfs_name[100];
        struct perf_pmu *pmu = NULL;
 
        while ((pmu = perf_pmus__scan(pmu)) != NULL) {
-               scnprintf(sysfs_name, sizeof(sysfs_name), e->sysfs_name,
-                         pmu->name);
-               if (!perf_mem_event__supported(mnt, sysfs_name)) {
+               if (!perf_mem_event__supported(mnt, pmu, e)) {
                        pr_err("failed: event '%s' not supported\n",
                               perf_mem_events__name(idx, pmu->name));
                }
@@ -183,6 +182,7 @@ static void perf_mem_events__print_unsupport_hybrid(struct perf_mem_event *e,
 int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
                                 char **rec_tmp, int *tmp_nr)
 {
+       const char *mnt = sysfs__mount();
        int i = *argv_nr, k = 0;
        struct perf_mem_event *e;
 
@@ -211,6 +211,9 @@ int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
                        while ((pmu = perf_pmus__scan(pmu)) != NULL) {
                                const char *s = perf_mem_events__name(j, pmu->name);
 
+                               if (!perf_mem_event__supported(mnt, pmu, e))
+                                       continue;
+
                                rec_argv[i++] = "-e";
                                if (s) {
                                        char *copy = strdup(s);
index ca3e0404f18720d7a3cc2376896195f55cf1192d..966cca5a3e88cd94b78c27ce8429f820f1fa86b9 100644 (file)
@@ -286,7 +286,7 @@ static int setup_metric_events(const char *pmu, struct hashmap *ids,
        *out_metric_events = NULL;
        ids_size = hashmap__size(ids);
 
-       metric_events = calloc(sizeof(void *), ids_size + 1);
+       metric_events = calloc(ids_size + 1, sizeof(void *));
        if (!metric_events)
                return -ENOMEM;
 
index 49093b21ee2da034e6634ab7dbf34dd08045faeb..122ee198a86e9961d5b6c63dffc4c0d1e1636127 100644 (file)
@@ -295,15 +295,14 @@ int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, struct perf_cpu
 
        map->core.flush = mp->flush;
 
-       map->comp_level = mp->comp_level;
 #ifndef PYTHON_PERF
-       if (zstd_init(&map->zstd_data, map->comp_level)) {
+       if (zstd_init(&map->zstd_data, mp->comp_level)) {
                pr_debug2("failed to init mmap compressor, error %d\n", errno);
                return -1;
        }
 #endif
 
-       if (map->comp_level && !perf_mmap__aio_enabled(map)) {
+       if (mp->comp_level && !perf_mmap__aio_enabled(map)) {
                map->data = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE,
                                 MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
                if (map->data == MAP_FAILED) {
index f944c3cd5efa0b04c8e870b834c40f4fdcaa4f1d..0df6e1621c7e8fcf5857d3f3ce5709739fe53064 100644 (file)
@@ -39,7 +39,6 @@ struct mmap {
 #endif
        struct mmap_cpu_mask    affinity_mask;
        void            *data;
-       int             comp_level;
        struct perf_data_file *file;
        struct zstd_data      zstd_data;
 };
index fd67d204d720d9ba7859fa25e1b96a5b1ebf9c75..f7f7aff3d85a049000828a9fcb9ecc3ad9026389 100644 (file)
@@ -36,6 +36,7 @@ static const struct branch_mode branch_modes[] = {
        BRANCH_OPT("stack", PERF_SAMPLE_BRANCH_CALL_STACK),
        BRANCH_OPT("hw_index", PERF_SAMPLE_BRANCH_HW_INDEX),
        BRANCH_OPT("priv", PERF_SAMPLE_BRANCH_PRIV_SAVE),
+       BRANCH_OPT("counter", PERF_SAMPLE_BRANCH_COUNTERS),
        BRANCH_END
 };
 
index aa2f5c6fc7fc24f205b9c88012dfd8016fdf9b3e..66eabcea424274580abe69fd5226b16931a67dec 100644 (file)
@@ -976,7 +976,7 @@ static int config_term_pmu(struct perf_event_attr *attr,
                           struct parse_events_error *err)
 {
        if (term->type_term == PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE) {
-               const struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type);
+               struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type);
 
                if (!pmu) {
                        char *err_str;
@@ -986,15 +986,23 @@ static int config_term_pmu(struct perf_event_attr *attr,
                                                           err_str, /*help=*/NULL);
                        return -EINVAL;
                }
-               if (perf_pmu__supports_legacy_cache(pmu)) {
+               /*
+                * Rewrite the PMU event to a legacy cache one unless the PMU
+                * doesn't support legacy cache events or the event is present
+                * within the PMU.
+                */
+               if (perf_pmu__supports_legacy_cache(pmu) &&
+                   !perf_pmu__have_event(pmu, term->config)) {
                        attr->type = PERF_TYPE_HW_CACHE;
                        return parse_events__decode_legacy_cache(term->config, pmu->type,
                                                                 &attr->config);
-               } else
+               } else {
                        term->type_term = PARSE_EVENTS__TERM_TYPE_USER;
+                       term->no_value = true;
+               }
        }
        if (term->type_term == PARSE_EVENTS__TERM_TYPE_HARDWARE) {
-               const struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type);
+               struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type);
 
                if (!pmu) {
                        char *err_str;
@@ -1004,10 +1012,19 @@ static int config_term_pmu(struct perf_event_attr *attr,
                                                           err_str, /*help=*/NULL);
                        return -EINVAL;
                }
-               attr->type = PERF_TYPE_HARDWARE;
-               attr->config = term->val.num;
-               if (perf_pmus__supports_extended_type())
-                       attr->config |= (__u64)pmu->type << PERF_PMU_TYPE_SHIFT;
+               /*
+                * If the PMU has a sysfs or json event prefer it over
+                * legacy. ARM requires this.
+                */
+               if (perf_pmu__have_event(pmu, term->config)) {
+                       term->type_term = PARSE_EVENTS__TERM_TYPE_USER;
+                       term->no_value = true;
+               } else {
+                       attr->type = PERF_TYPE_HARDWARE;
+                       attr->config = term->val.num;
+                       if (perf_pmus__supports_extended_type())
+                               attr->config |= (__u64)pmu->type << PERF_PMU_TYPE_SHIFT;
+               }
                return 0;
        }
        if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER ||
@@ -1381,6 +1398,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
        YYLTYPE *loc = loc_;
        LIST_HEAD(config_terms);
        struct parse_events_terms parsed_terms;
+       bool alias_rewrote_terms = false;
 
        pmu = parse_state->fake_pmu ?: perf_pmus__find(name);
 
@@ -1433,7 +1451,15 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
                return evsel ? 0 : -ENOMEM;
        }
 
-       if (!parse_state->fake_pmu && perf_pmu__check_alias(pmu, &parsed_terms, &info, err)) {
+       /* Configure attr/terms with a known PMU, this will set hardcoded terms. */
+       if (config_attr(&attr, &parsed_terms, parse_state->error, config_term_pmu)) {
+               parse_events_terms__exit(&parsed_terms);
+               return -EINVAL;
+       }
+
+       /* Look for event names in the terms and rewrite into format based terms. */
+       if (!parse_state->fake_pmu && perf_pmu__check_alias(pmu, &parsed_terms,
+                                                           &info, &alias_rewrote_terms, err)) {
                parse_events_terms__exit(&parsed_terms);
                return -EINVAL;
        }
@@ -1447,11 +1473,9 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
                strbuf_release(&sb);
        }
 
-       /*
-        * Configure hardcoded terms first, no need to check
-        * return value when called with fail == 0 ;)
-        */
-       if (config_attr(&attr, &parsed_terms, parse_state->error, config_term_pmu)) {
+       /* Configure attr/terms again if an alias was expanded. */
+       if (alias_rewrote_terms &&
+           config_attr(&attr, &parsed_terms, parse_state->error, config_term_pmu)) {
                parse_events_terms__exit(&parsed_terms);
                return -EINVAL;
        }
index e1e2d701599c4294f05e1c73ca5aef4cd1ef8544..1de3b69cdf4aafb7fdc73574b2b44a3cad38a75e 100644 (file)
@@ -64,7 +64,7 @@ static bool perf_probe_api(setup_probe_fn_t fn)
        struct perf_cpu cpu;
        int ret, i = 0;
 
-       cpus = perf_cpu_map__new(NULL);
+       cpus = perf_cpu_map__new_online_cpus();
        if (!cpus)
                return false;
        cpu = perf_cpu_map__cpu(cpus, 0);
@@ -140,7 +140,7 @@ bool perf_can_record_cpu_wide(void)
        struct perf_cpu cpu;
        int fd;
 
-       cpus = perf_cpu_map__new(NULL);
+       cpus = perf_cpu_map__new_online_cpus();
        if (!cpus)
                return false;
 
index 2247991451f3aa1ba0969b9ad4f1f22e595b2a21..8f04d3b7f3ec783bee9981fa096b145e80fabc91 100644 (file)
@@ -55,6 +55,7 @@ static void __p_branch_sample_type(char *buf, size_t size, u64 value)
                bit_name(COND), bit_name(CALL_STACK), bit_name(IND_JUMP),
                bit_name(CALL), bit_name(NO_FLAGS), bit_name(NO_CYCLES),
                bit_name(TYPE_SAVE), bit_name(HW_INDEX), bit_name(PRIV_SAVE),
+               bit_name(COUNTERS),
                { .name = NULL, }
        };
 #undef bit_name
index d3c9aa4326bee4ba3d3b6594349d4e0426c2aae3..3c9609944a2f312e7cac681f8a19dd037d6eb01e 100644 (file)
@@ -1494,12 +1494,14 @@ static int check_info_data(struct perf_pmu *pmu,
  * defined for the alias
  */
 int perf_pmu__check_alias(struct perf_pmu *pmu, struct parse_events_terms *head_terms,
-                         struct perf_pmu_info *info, struct parse_events_error *err)
+                         struct perf_pmu_info *info, bool *rewrote_terms,
+                         struct parse_events_error *err)
 {
        struct parse_events_term *term, *h;
        struct perf_pmu_alias *alias;
        int ret;
 
+       *rewrote_terms = false;
        info->per_pkg = false;
 
        /*
@@ -1521,7 +1523,7 @@ int perf_pmu__check_alias(struct perf_pmu *pmu, struct parse_events_terms *head_
                                                NULL);
                        return ret;
                }
-
+               *rewrote_terms = true;
                ret = check_info_data(pmu, alias, info, err, term->err_term);
                if (ret)
                        return ret;
@@ -1615,6 +1617,8 @@ bool perf_pmu__auto_merge_stats(const struct perf_pmu *pmu)
 
 bool perf_pmu__have_event(struct perf_pmu *pmu, const char *name)
 {
+       if (!name)
+               return false;
        if (perf_pmu__find_alias(pmu, name, /*load=*/ true) != NULL)
                return true;
        if (pmu->cpu_aliases_added || !pmu->events_table)
index d2895d415f08fbf941bfd1bfa52f371307228e09..424c3fee09496248d6168ba5361d4fa9f66e28a2 100644 (file)
@@ -201,7 +201,8 @@ int perf_pmu__config_terms(const struct perf_pmu *pmu,
 __u64 perf_pmu__format_bits(struct perf_pmu *pmu, const char *name);
 int perf_pmu__format_type(struct perf_pmu *pmu, const char *name);
 int perf_pmu__check_alias(struct perf_pmu *pmu, struct parse_events_terms *head_terms,
-                         struct perf_pmu_info *info, struct parse_events_error *err);
+                         struct perf_pmu_info *info, bool *rewrote_terms,
+                         struct parse_events_error *err);
 int perf_pmu__find_event(struct perf_pmu *pmu, const char *event, void *state, pmu_event_callback cb);
 
 int perf_pmu__format_parse(struct perf_pmu *pmu, int dirfd, bool eager_load);
index b0fc48be623f31bcfd478a79b66b5295708a5a15..9e47712507cc265d46c7cf5d66033185006ba2f3 100644 (file)
@@ -66,7 +66,7 @@ void print_tracepoint_events(const struct print_callbacks *print_cb __maybe_unus
 
        put_tracing_file(events_path);
        if (events_fd < 0) {
-               printf("Error: failed to open tracing events directory\n");
+               pr_err("Error: failed to open tracing events directory\n");
                return;
        }
 
index 1a5b7fa459b232043457f706b45c0b7a87d35643..a1a796043691f487fe901e9fafef5888913f4ec7 100644 (file)
@@ -149,10 +149,32 @@ static int kernel_get_symbol_address_by_name(const char *name, u64 *addr,
        return 0;
 }
 
+struct kernel_get_module_map_cb_args {
+       const char *module;
+       struct map *result;
+};
+
+static int kernel_get_module_map_cb(struct map *map, void *data)
+{
+       struct kernel_get_module_map_cb_args *args = data;
+       struct dso *dso = map__dso(map);
+       const char *short_name = dso->short_name; /* short_name is "[module]" */
+       u16 short_name_len =  dso->short_name_len;
+
+       if (strncmp(short_name + 1, args->module, short_name_len - 2) == 0 &&
+           args->module[short_name_len - 2] == '\0') {
+               args->result = map__get(map);
+               return 1;
+       }
+       return 0;
+}
+
 static struct map *kernel_get_module_map(const char *module)
 {
-       struct maps *maps = machine__kernel_maps(host_machine);
-       struct map_rb_node *pos;
+       struct kernel_get_module_map_cb_args args = {
+               .module = module,
+               .result = NULL,
+       };
 
        /* A file path -- this is an offline module */
        if (module && strchr(module, '/'))
@@ -164,19 +186,9 @@ static struct map *kernel_get_module_map(const char *module)
                return map__get(map);
        }
 
-       maps__for_each_entry(maps, pos) {
-               /* short_name is "[module]" */
-               struct dso *dso = map__dso(pos->map);
-               const char *short_name = dso->short_name;
-               u16 short_name_len =  dso->short_name_len;
+       maps__for_each_map(machine__kernel_maps(host_machine), kernel_get_module_map_cb, &args);
 
-               if (strncmp(short_name + 1, module,
-                           short_name_len - 2) == 0 &&
-                   module[short_name_len - 2] == '\0') {
-                       return map__get(pos->map);
-               }
-       }
-       return NULL;
+       return args.result;
 }
 
 struct map *get_target_map(const char *target, struct nsinfo *nsi, bool user)
index f171360b0ef4db06eb0ff760619996cf58339943..c8923375e30d6618fda564b84c41317c46009a3d 100644 (file)
@@ -23,6 +23,7 @@
 #include "event.h"
 #include "dso.h"
 #include "debug.h"
+#include "debuginfo.h"
 #include "intlist.h"
 #include "strbuf.h"
 #include "strlist.h"
 #include "probe-file.h"
 #include "string2.h"
 
-#ifdef HAVE_DEBUGINFOD_SUPPORT
-#include <elfutils/debuginfod.h>
-#endif
-
 /* Kprobe tracer basic type is up to u64 */
 #define MAX_BASIC_TYPE_BITS    64
 
-/* Dwarf FL wrappers */
-static char *debuginfo_path;   /* Currently dummy */
-
-static const Dwfl_Callbacks offline_callbacks = {
-       .find_debuginfo = dwfl_standard_find_debuginfo,
-       .debuginfo_path = &debuginfo_path,
-
-       .section_address = dwfl_offline_section_address,
-
-       /* We use this table for core files too.  */
-       .find_elf = dwfl_build_id_find_elf,
-};
-
-/* Get a Dwarf from offline image */
-static int debuginfo__init_offline_dwarf(struct debuginfo *dbg,
-                                        const char *path)
-{
-       GElf_Addr dummy;
-       int fd;
-
-       fd = open(path, O_RDONLY);
-       if (fd < 0)
-               return fd;
-
-       dbg->dwfl = dwfl_begin(&offline_callbacks);
-       if (!dbg->dwfl)
-               goto error;
-
-       dwfl_report_begin(dbg->dwfl);
-       dbg->mod = dwfl_report_offline(dbg->dwfl, "", "", fd);
-       if (!dbg->mod)
-               goto error;
-
-       dbg->dbg = dwfl_module_getdwarf(dbg->mod, &dbg->bias);
-       if (!dbg->dbg)
-               goto error;
-
-       dwfl_module_build_id(dbg->mod, &dbg->build_id, &dummy);
-
-       dwfl_report_end(dbg->dwfl, NULL, NULL);
-
-       return 0;
-error:
-       if (dbg->dwfl)
-               dwfl_end(dbg->dwfl);
-       else
-               close(fd);
-       memset(dbg, 0, sizeof(*dbg));
-
-       return -ENOENT;
-}
-
-static struct debuginfo *__debuginfo__new(const char *path)
-{
-       struct debuginfo *dbg = zalloc(sizeof(*dbg));
-       if (!dbg)
-               return NULL;
-
-       if (debuginfo__init_offline_dwarf(dbg, path) < 0)
-               zfree(&dbg);
-       if (dbg)
-               pr_debug("Open Debuginfo file: %s\n", path);
-       return dbg;
-}
-
-enum dso_binary_type distro_dwarf_types[] = {
-       DSO_BINARY_TYPE__FEDORA_DEBUGINFO,
-       DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
-       DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
-       DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
-       DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO,
-       DSO_BINARY_TYPE__NOT_FOUND,
-};
-
-struct debuginfo *debuginfo__new(const char *path)
-{
-       enum dso_binary_type *type;
-       char buf[PATH_MAX], nil = '\0';
-       struct dso *dso;
-       struct debuginfo *dinfo = NULL;
-       struct build_id bid;
-
-       /* Try to open distro debuginfo files */
-       dso = dso__new(path);
-       if (!dso)
-               goto out;
-
-       /* Set the build id for DSO_BINARY_TYPE__BUILDID_DEBUGINFO */
-       if (is_regular_file(path) && filename__read_build_id(path, &bid) > 0)
-               dso__set_build_id(dso, &bid);
-
-       for (type = distro_dwarf_types;
-            !dinfo && *type != DSO_BINARY_TYPE__NOT_FOUND;
-            type++) {
-               if (dso__read_binary_type_filename(dso, *type, &nil,
-                                                  buf, PATH_MAX) < 0)
-                       continue;
-               dinfo = __debuginfo__new(buf);
-       }
-       dso__put(dso);
-
-out:
-       /* if failed to open all distro debuginfo, open given binary */
-       return dinfo ? : __debuginfo__new(path);
-}
-
-void debuginfo__delete(struct debuginfo *dbg)
-{
-       if (dbg) {
-               if (dbg->dwfl)
-                       dwfl_end(dbg->dwfl);
-               free(dbg);
-       }
-}
-
 /*
  * Probe finder related functions
  */
@@ -722,7 +604,7 @@ static int call_probe_finder(Dwarf_Die *sc_die, struct probe_finder *pf)
        ret = dwarf_getlocation_addr(&fb_attr, pf->addr, &pf->fb_ops, &nops, 1);
        if (ret <= 0 || nops == 0) {
                pf->fb_ops = NULL;
-#if _ELFUTILS_PREREQ(0, 142)
+#ifdef HAVE_DWARF_CFI_SUPPORT
        } else if (nops == 1 && pf->fb_ops[0].atom == DW_OP_call_frame_cfa &&
                   (pf->cfi_eh != NULL || pf->cfi_dbg != NULL)) {
                if ((dwarf_cfi_addrframe(pf->cfi_eh, pf->addr, &frame) != 0 &&
@@ -733,7 +615,7 @@ static int call_probe_finder(Dwarf_Die *sc_die, struct probe_finder *pf)
                        free(frame);
                        return -ENOENT;
                }
-#endif
+#endif /* HAVE_DWARF_CFI_SUPPORT */
        }
 
        /* Call finder's callback handler */
@@ -1258,7 +1140,7 @@ static int debuginfo__find_probes(struct debuginfo *dbg,
 
        pf->machine = ehdr.e_machine;
 
-#if _ELFUTILS_PREREQ(0, 142)
+#ifdef HAVE_DWARF_CFI_SUPPORT
        do {
                GElf_Shdr shdr;
 
@@ -1268,7 +1150,7 @@ static int debuginfo__find_probes(struct debuginfo *dbg,
 
                pf->cfi_dbg = dwarf_getcfi(dbg->dbg);
        } while (0);
-#endif
+#endif /* HAVE_DWARF_CFI_SUPPORT */
 
        ret = debuginfo__find_probe_location(dbg, pf);
        return ret;
@@ -1677,44 +1559,6 @@ int debuginfo__find_available_vars_at(struct debuginfo *dbg,
        return (ret < 0) ? ret : af.nvls;
 }
 
-/* For the kernel module, we need a special code to get a DIE */
-int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs,
-                               bool adjust_offset)
-{
-       int n, i;
-       Elf32_Word shndx;
-       Elf_Scn *scn;
-       Elf *elf;
-       GElf_Shdr mem, *shdr;
-       const char *p;
-
-       elf = dwfl_module_getelf(dbg->mod, &dbg->bias);
-       if (!elf)
-               return -EINVAL;
-
-       /* Get the number of relocations */
-       n = dwfl_module_relocations(dbg->mod);
-       if (n < 0)
-               return -ENOENT;
-       /* Search the relocation related .text section */
-       for (i = 0; i < n; i++) {
-               p = dwfl_module_relocation_info(dbg->mod, i, &shndx);
-               if (strcmp(p, ".text") == 0) {
-                       /* OK, get the section header */
-                       scn = elf_getscn(elf, shndx);
-                       if (!scn)
-                               return -ENOENT;
-                       shdr = gelf_getshdr(scn, &mem);
-                       if (!shdr)
-                               return -ENOENT;
-                       *offs = shdr->sh_addr;
-                       if (adjust_offset)
-                               *offs -= shdr->sh_offset;
-               }
-       }
-       return 0;
-}
-
 /* Reverse search */
 int debuginfo__find_probe_point(struct debuginfo *dbg, u64 addr,
                                struct perf_probe_point *ppt)
@@ -2009,41 +1853,6 @@ found:
        return (ret < 0) ? ret : lf.found;
 }
 
-#ifdef HAVE_DEBUGINFOD_SUPPORT
-/* debuginfod doesn't require the comp_dir but buildid is required */
-static int get_source_from_debuginfod(const char *raw_path,
-                               const char *sbuild_id, char **new_path)
-{
-       debuginfod_client *c = debuginfod_begin();
-       const char *p = raw_path;
-       int fd;
-
-       if (!c)
-               return -ENOMEM;
-
-       fd = debuginfod_find_source(c, (const unsigned char *)sbuild_id,
-                               0, p, new_path);
-       pr_debug("Search %s from debuginfod -> %d\n", p, fd);
-       if (fd >= 0)
-               close(fd);
-       debuginfod_end(c);
-       if (fd < 0) {
-               pr_debug("Failed to find %s in debuginfod (%s)\n",
-                       raw_path, sbuild_id);
-               return -ENOENT;
-       }
-       pr_debug("Got a source %s\n", *new_path);
-
-       return 0;
-}
-#else
-static inline int get_source_from_debuginfod(const char *raw_path __maybe_unused,
-                               const char *sbuild_id __maybe_unused,
-                               char **new_path __maybe_unused)
-{
-       return -ENOTSUP;
-}
-#endif
 /*
  * Find a src file from a DWARF tag path. Prepend optional source path prefix
  * and chop off leading directories that do not exist. Result is passed back as
index 8bc1c80d3c1c0b616659a10183d9f463c1008ffe..3add5ff516e12de544b38cf5e48a45f922b0dee2 100644 (file)
@@ -24,21 +24,7 @@ static inline int is_c_varname(const char *name)
 #ifdef HAVE_DWARF_SUPPORT
 
 #include "dwarf-aux.h"
-
-/* TODO: export debuginfo data structure even if no dwarf support */
-
-/* debug information structure */
-struct debuginfo {
-       Dwarf           *dbg;
-       Dwfl_Module     *mod;
-       Dwfl            *dwfl;
-       Dwarf_Addr      bias;
-       const unsigned char     *build_id;
-};
-
-/* This also tries to open distro debuginfo */
-struct debuginfo *debuginfo__new(const char *path);
-void debuginfo__delete(struct debuginfo *dbg);
+#include "debuginfo.h"
 
 /* Find probe_trace_events specified by perf_probe_event from debuginfo */
 int debuginfo__find_trace_events(struct debuginfo *dbg,
@@ -49,9 +35,6 @@ int debuginfo__find_trace_events(struct debuginfo *dbg,
 int debuginfo__find_probe_point(struct debuginfo *dbg, u64 addr,
                                struct perf_probe_point *ppt);
 
-int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs,
-                              bool adjust_offset);
-
 /* Find a line range */
 int debuginfo__find_line_range(struct debuginfo *dbg, struct line_range *lr);
 
index 9eb5c6a08999e83bb1ef05117ba8ce926d93dc16..87e817b3cf7e9d9b94799cfcb47cbd98b4c1ff02 100644 (file)
@@ -237,8 +237,8 @@ bool evlist__can_select_event(struct evlist *evlist, const char *str)
 
        evsel = evlist__last(temp_evlist);
 
-       if (!evlist || perf_cpu_map__empty(evlist->core.user_requested_cpus)) {
-               struct perf_cpu_map *cpus = perf_cpu_map__new(NULL);
+       if (!evlist || perf_cpu_map__has_any_cpu_or_is_empty(evlist->core.user_requested_cpus)) {
+               struct perf_cpu_map *cpus = perf_cpu_map__new_online_cpus();
 
                if (cpus)
                        cpu =  perf_cpu_map__cpu(cpus, 0);
index f55ca07f3ca12d912fcc4c12bb02d925c3eafe71..74b36644e384990a252e4623b1cf5e1ab70f789e 100644 (file)
@@ -12,6 +12,8 @@
 #define        S390_CPUMCF_DIAG_DEF    0xfeef  /* Counter diagnostic entry ID */
 #define        PERF_EVENT_CPUM_CF_DIAG 0xBC000 /* Event: Counter sets */
 #define PERF_EVENT_CPUM_SF_DIAG        0xBD000 /* Event: Combined-sampling */
+#define PERF_EVENT_PAI_CRYPTO_ALL      0x1000 /* Event: CRYPTO_ALL */
+#define PERF_EVENT_PAI_NNPA_ALL        0x1800 /* Event: NNPA_ALL */
 
 struct cf_ctrset_entry {       /* CPU-M CF counter set entry (8 byte) */
        unsigned int def:16;    /* 0-15  Data Entry Format */
index 115b16edb45138cb1d460afa2fbcc696ebee60e9..53383e97ec9d5731276fcce0021ec6a5176196c5 100644 (file)
@@ -51,8 +51,6 @@ static bool s390_cpumcfdg_testctr(struct perf_sample *sample)
        struct cf_trailer_entry *te;
        struct cf_ctrset_entry *cep, ce;
 
-       if (!len)
-               return false;
        while (offset < len) {
                cep = (struct cf_ctrset_entry *)(buf + offset);
                ce.def = be16_to_cpu(cep->def);
@@ -125,6 +123,9 @@ static int get_counterset_start(int setnr)
                return 128;
        case CPUMF_CTR_SET_MT_DIAG:             /* Diagnostic counter set */
                return 448;
+       case PERF_EVENT_PAI_NNPA_ALL:           /* PAI NNPA counter set */
+       case PERF_EVENT_PAI_CRYPTO_ALL:         /* PAI CRYPTO counter set */
+               return setnr;
        default:
                return -1;
        }
@@ -212,27 +213,120 @@ static void s390_cpumcfdg_dump(struct perf_pmu *pmu, struct perf_sample *sample)
        }
 }
 
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wpacked"
+#pragma GCC diagnostic ignored "-Wattributes"
+/*
+ * Check for consistency of PAI_CRYPTO/PAI_NNPA raw data.
+ */
+struct pai_data {              /* Event number and value */
+       u16 event_nr;
+       u64 event_val;
+} __packed;
+
+#pragma GCC diagnostic pop
+
+/*
+ * Test for valid raw data. At least one PAI event should be in the raw
+ * data section.
+ */
+static bool s390_pai_all_test(struct perf_sample *sample)
+{
+       size_t len = sample->raw_size;
+
+       if (len < 0xa)
+               return false;
+       return true;
+}
+
+static void s390_pai_all_dump(struct evsel *evsel, struct perf_sample *sample)
+{
+       size_t len = sample->raw_size, offset = 0;
+       unsigned char *p = sample->raw_data;
+       const char *color = PERF_COLOR_BLUE;
+       struct pai_data pai_data;
+       char *ev_name;
+
+       while (offset < len) {
+               memcpy(&pai_data.event_nr, p, sizeof(pai_data.event_nr));
+               pai_data.event_nr = be16_to_cpu(pai_data.event_nr);
+               p += sizeof(pai_data.event_nr);
+               offset += sizeof(pai_data.event_nr);
+
+               memcpy(&pai_data.event_val, p, sizeof(pai_data.event_val));
+               pai_data.event_val = be64_to_cpu(pai_data.event_val);
+               p += sizeof(pai_data.event_val);
+               offset += sizeof(pai_data.event_val);
+
+               ev_name = get_counter_name(evsel->core.attr.config,
+                                          pai_data.event_nr, evsel->pmu);
+               color_fprintf(stdout, color, "\tCounter:%03d %s Value:%#018lx\n",
+                             pai_data.event_nr, ev_name ?: "<unknown>",
+                             pai_data.event_val);
+               free(ev_name);
+
+               if (offset + 0xa > len)
+                       break;
+       }
+       color_fprintf(stdout, color, "\n");
+}
+
 /* S390 specific trace event function. Check for PERF_RECORD_SAMPLE events
- * and if the event was triggered by a counter set diagnostic event display
- * its raw data.
+ * and if the event was triggered by a
+ * - counter set diagnostic event
+ * - processor activity assist (PAI) crypto counter event
+ * - processor activity assist (PAI) neural network processor assist (NNPA)
+ *   counter event
+ * display its raw data.
  * The function is only invoked when the dump flag -D is set.
+ *
+ * Function evlist__s390_sample_raw() is defined as call back after it has
+ * been verified that the perf.data file was created on s390 platform.
  */
-void evlist__s390_sample_raw(struct evlist *evlist, union perf_event *event, struct perf_sample *sample)
+void evlist__s390_sample_raw(struct evlist *evlist, union perf_event *event,
+                            struct perf_sample *sample)
 {
+       const char *pai_name;
        struct evsel *evsel;
 
        if (event->header.type != PERF_RECORD_SAMPLE)
                return;
 
        evsel = evlist__event2evsel(evlist, event);
-       if (evsel == NULL ||
-           evsel->core.attr.config != PERF_EVENT_CPUM_CF_DIAG)
+       if (!evsel)
+               return;
+
+       /* Check for raw data in sample */
+       if (!sample->raw_size || !sample->raw_data)
                return;
 
        /* Display raw data on screen */
-       if (!s390_cpumcfdg_testctr(sample)) {
-               pr_err("Invalid counter set data encountered\n");
+       if (evsel->core.attr.config == PERF_EVENT_CPUM_CF_DIAG) {
+               if (!evsel->pmu)
+                       evsel->pmu = perf_pmus__find("cpum_cf");
+               if (!s390_cpumcfdg_testctr(sample))
+                       pr_err("Invalid counter set data encountered\n");
+               else
+                       s390_cpumcfdg_dump(evsel->pmu, sample);
+               return;
+       }
+
+       switch (evsel->core.attr.config) {
+       case PERF_EVENT_PAI_NNPA_ALL:
+               pai_name = "NNPA_ALL";
+               break;
+       case PERF_EVENT_PAI_CRYPTO_ALL:
+               pai_name = "CRYPTO_ALL";
+               break;
+       default:
                return;
        }
-       s390_cpumcfdg_dump(evsel->pmu, sample);
+
+       if (!s390_pai_all_test(sample)) {
+               pr_err("Invalid %s raw data encountered\n", pai_name);
+       } else {
+               if (!evsel->pmu)
+                       evsel->pmu = perf_pmus__find_by_type(evsel->core.attr.type);
+               s390_pai_all_dump(evsel, sample);
+       }
 }
index c92ad0f51ecd97d5727474b0a6b73e24ef37c41e..70b2c3135555ec2689fb5e824293195103c41590 100644 (file)
@@ -113,6 +113,7 @@ struct perf_sample {
        void *raw_data;
        struct ip_callchain *callchain;
        struct branch_stack *branch_stack;
+       u64 *branch_stack_cntr;
        struct regs_dump  user_regs;
        struct regs_dump  intr_regs;
        struct stack_dump user_stack;
index 603091317bed9be476117251fcda7caacaad5f54..b072ac5d3bc228ec628f054e86b94d422ee4cf76 100644 (file)
@@ -490,6 +490,9 @@ static int perl_start_script(const char *script, int argc, const char **argv,
        scripting_context->session = session;
 
        command_line = malloc((argc + 2) * sizeof(const char *));
+       if (!command_line)
+               return -ENOMEM;
+
        command_line[0] = "";
        command_line[1] = script;
        for (i = 2; i < argc + 2; i++)
index 94312741443abf8d858c6cd9d71e415d74042b5c..860e1837ba9693eb437a9ae68d762f5a89cf1d2d 100644 (file)
@@ -353,6 +353,8 @@ static PyObject *get_field_numeric_entry(struct tep_event *event,
 
        if (is_array) {
                list = PyList_New(field->arraylen);
+               if (!list)
+                       Py_FatalError("couldn't create Python list");
                item_size = field->size / field->arraylen;
                n_items = field->arraylen;
        } else {
@@ -754,7 +756,7 @@ static void regs_map(struct regs_dump *regs, uint64_t mask, const char *arch, ch
        }
 }
 
-static void set_regs_in_dict(PyObject *dict,
+static int set_regs_in_dict(PyObject *dict,
                             struct perf_sample *sample,
                             struct evsel *evsel)
 {
@@ -770,6 +772,8 @@ static void set_regs_in_dict(PyObject *dict,
         */
        int size = __sw_hweight64(attr->sample_regs_intr) * 28;
        char *bf = malloc(size);
+       if (!bf)
+               return -1;
 
        regs_map(&sample->intr_regs, attr->sample_regs_intr, arch, bf, size);
 
@@ -781,6 +785,8 @@ static void set_regs_in_dict(PyObject *dict,
        pydict_set_item_string_decref(dict, "uregs",
                        _PyUnicode_FromString(bf));
        free(bf);
+
+       return 0;
 }
 
 static void set_sym_in_dict(PyObject *dict, struct addr_location *al,
@@ -920,7 +926,8 @@ static PyObject *get_perf_sample_dict(struct perf_sample *sample,
                        PyLong_FromUnsignedLongLong(sample->cyc_cnt));
        }
 
-       set_regs_in_dict(dict, sample, evsel);
+       if (set_regs_in_dict(dict, sample, evsel))
+               Py_FatalError("Failed to setting regs in dict");
 
        return dict;
 }
@@ -1918,12 +1925,18 @@ static int python_start_script(const char *script, int argc, const char **argv,
        scripting_context->session = session;
 #if PY_MAJOR_VERSION < 3
        command_line = malloc((argc + 1) * sizeof(const char *));
+       if (!command_line)
+               return -1;
+
        command_line[0] = script;
        for (i = 1; i < argc + 1; i++)
                command_line[i] = argv[i - 1];
        PyImport_AppendInittab(name, initperf_trace_context);
 #else
        command_line = malloc((argc + 1) * sizeof(wchar_t *));
+       if (!command_line)
+               return -1;
+
        command_line[0] = Py_DecodeLocale(script, NULL);
        for (i = 1; i < argc + 1; i++)
                command_line[i] = Py_DecodeLocale(argv[i - 1], NULL);
index 1e9aa8ed15b6445eb906b76738f1c780cebb0713..199d3e8df31581c02245967d795847783556ee4c 100644 (file)
@@ -115,6 +115,11 @@ static int perf_session__open(struct perf_session *session, int repipe_fd)
                return -1;
        }
 
+       if (perf_header__has_feat(&session->header, HEADER_AUXTRACE)) {
+               /* Auxiliary events may reference exited threads, hold onto dead ones. */
+               symbol_conf.keep_exited_threads = true;
+       }
+
        if (perf_data__is_pipe(data))
                return 0;
 
@@ -1150,9 +1155,13 @@ static void callchain__printf(struct evsel *evsel,
                       i, callchain->ips[i]);
 }
 
-static void branch_stack__printf(struct perf_sample *sample, bool callstack)
+static void branch_stack__printf(struct perf_sample *sample,
+                                struct evsel *evsel)
 {
        struct branch_entry *entries = perf_sample__branch_entries(sample);
+       bool callstack = evsel__has_branch_callstack(evsel);
+       u64 *branch_stack_cntr = sample->branch_stack_cntr;
+       struct perf_env *env = evsel__env(evsel);
        uint64_t i;
 
        if (!callstack) {
@@ -1194,6 +1203,13 @@ static void branch_stack__printf(struct perf_sample *sample, bool callstack)
                        }
                }
        }
+
+       if (branch_stack_cntr) {
+               printf("... branch stack counters: nr:%" PRIu64 " (counter width: %u max counter nr:%u)\n",
+                       sample->branch_stack->nr, env->br_cntr_width, env->br_cntr_nr);
+               for (i = 0; i < sample->branch_stack->nr; i++)
+                       printf("..... %2"PRIu64": %016" PRIx64 "\n", i, branch_stack_cntr[i]);
+       }
 }
 
 static void regs_dump__printf(u64 mask, u64 *regs, const char *arch)
@@ -1355,7 +1371,7 @@ static void dump_sample(struct evsel *evsel, union perf_event *event,
                callchain__printf(evsel, sample);
 
        if (evsel__has_br_stack(evsel))
-               branch_stack__printf(sample, evsel__has_branch_callstack(evsel));
+               branch_stack__printf(sample, evsel);
 
        if (sample_type & PERF_SAMPLE_REGS_USER)
                regs_user__printf(sample, arch);
index 80e4f613274015deb70028c76dc9b60ea103f6e8..30254eb637099b07427d73944a6bb4e7baffc724 100644 (file)
@@ -24,6 +24,7 @@
 #include "strbuf.h"
 #include "mem-events.h"
 #include "annotate.h"
+#include "annotate-data.h"
 #include "event.h"
 #include "time-utils.h"
 #include "cgroup.h"
@@ -418,6 +419,52 @@ struct sort_entry sort_sym = {
        .se_width_idx   = HISTC_SYMBOL,
 };
 
+/* --sort symoff */
+
+static int64_t
+sort__symoff_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+       int64_t ret;
+
+       ret = sort__sym_cmp(left, right);
+       if (ret)
+               return ret;
+
+       return left->ip - right->ip;
+}
+
+static int64_t
+sort__symoff_sort(struct hist_entry *left, struct hist_entry *right)
+{
+       int64_t ret;
+
+       ret = sort__sym_sort(left, right);
+       if (ret)
+               return ret;
+
+       return left->ip - right->ip;
+}
+
+static int
+hist_entry__symoff_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width)
+{
+       struct symbol *sym = he->ms.sym;
+
+       if (sym == NULL)
+               return repsep_snprintf(bf, size, "[%c] %-#.*llx", he->level, width - 4, he->ip);
+
+       return repsep_snprintf(bf, size, "[%c] %s+0x%llx", he->level, sym->name, he->ip - sym->start);
+}
+
+struct sort_entry sort_sym_offset = {
+       .se_header      = "Symbol Offset",
+       .se_cmp         = sort__symoff_cmp,
+       .se_sort        = sort__symoff_sort,
+       .se_snprintf    = hist_entry__symoff_snprintf,
+       .se_filter      = hist_entry__sym_filter,
+       .se_width_idx   = HISTC_SYMBOL_OFFSET,
+};
+
 /* --sort srcline */
 
 char *hist_entry__srcline(struct hist_entry *he)
@@ -583,21 +630,21 @@ static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf,
 {
 
        struct symbol *sym = he->ms.sym;
-       struct annotation *notes;
+       struct annotated_branch *branch;
        double ipc = 0.0, coverage = 0.0;
        char tmp[64];
 
        if (!sym)
                return repsep_snprintf(bf, size, "%-*s", width, "-");
 
-       notes = symbol__annotation(sym);
+       branch = symbol__annotation(sym)->branch;
 
-       if (notes->hit_cycles)
-               ipc = notes->hit_insn / ((double)notes->hit_cycles);
+       if (branch && branch->hit_cycles)
+               ipc = branch->hit_insn / ((double)branch->hit_cycles);
 
-       if (notes->total_insn) {
-               coverage = notes->cover_insn * 100.0 /
-                       ((double)notes->total_insn);
+       if (branch && branch->total_insn) {
+               coverage = branch->cover_insn * 100.0 /
+                       ((double)branch->total_insn);
        }
 
        snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage);
@@ -2094,7 +2141,7 @@ struct sort_entry sort_dso_size = {
        .se_width_idx   = HISTC_DSO_SIZE,
 };
 
-/* --sort dso_size */
+/* --sort addr */
 
 static int64_t
 sort__addr_cmp(struct hist_entry *left, struct hist_entry *right)
@@ -2131,6 +2178,152 @@ struct sort_entry sort_addr = {
        .se_width_idx   = HISTC_ADDR,
 };
 
+/* --sort type */
+
+struct annotated_data_type unknown_type = {
+       .self = {
+               .type_name = (char *)"(unknown)",
+               .children = LIST_HEAD_INIT(unknown_type.self.children),
+       },
+};
+
+static int64_t
+sort__type_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+       return sort__addr_cmp(left, right);
+}
+
+static void sort__type_init(struct hist_entry *he)
+{
+       if (he->mem_type)
+               return;
+
+       he->mem_type = hist_entry__get_data_type(he);
+       if (he->mem_type == NULL) {
+               he->mem_type = &unknown_type;
+               he->mem_type_off = 0;
+       }
+}
+
+static int64_t
+sort__type_collapse(struct hist_entry *left, struct hist_entry *right)
+{
+       struct annotated_data_type *left_type = left->mem_type;
+       struct annotated_data_type *right_type = right->mem_type;
+
+       if (!left_type) {
+               sort__type_init(left);
+               left_type = left->mem_type;
+       }
+
+       if (!right_type) {
+               sort__type_init(right);
+               right_type = right->mem_type;
+       }
+
+       return strcmp(left_type->self.type_name, right_type->self.type_name);
+}
+
+static int64_t
+sort__type_sort(struct hist_entry *left, struct hist_entry *right)
+{
+       return sort__type_collapse(left, right);
+}
+
+static int hist_entry__type_snprintf(struct hist_entry *he, char *bf,
+                                    size_t size, unsigned int width)
+{
+       return repsep_snprintf(bf, size, "%-*s", width, he->mem_type->self.type_name);
+}
+
+struct sort_entry sort_type = {
+       .se_header      = "Data Type",
+       .se_cmp         = sort__type_cmp,
+       .se_collapse    = sort__type_collapse,
+       .se_sort        = sort__type_sort,
+       .se_init        = sort__type_init,
+       .se_snprintf    = hist_entry__type_snprintf,
+       .se_width_idx   = HISTC_TYPE,
+};
+
+/* --sort typeoff */
+
+static int64_t
+sort__typeoff_sort(struct hist_entry *left, struct hist_entry *right)
+{
+       struct annotated_data_type *left_type = left->mem_type;
+       struct annotated_data_type *right_type = right->mem_type;
+       int64_t ret;
+
+       if (!left_type) {
+               sort__type_init(left);
+               left_type = left->mem_type;
+       }
+
+       if (!right_type) {
+               sort__type_init(right);
+               right_type = right->mem_type;
+       }
+
+       ret = strcmp(left_type->self.type_name, right_type->self.type_name);
+       if (ret)
+               return ret;
+       return left->mem_type_off - right->mem_type_off;
+}
+
+static void fill_member_name(char *buf, size_t sz, struct annotated_member *m,
+                            int offset, bool first)
+{
+       struct annotated_member *child;
+
+       if (list_empty(&m->children))
+               return;
+
+       list_for_each_entry(child, &m->children, node) {
+               if (child->offset <= offset && offset < child->offset + child->size) {
+                       int len = 0;
+
+                       /* It can have anonymous struct/union members */
+                       if (child->var_name) {
+                               len = scnprintf(buf, sz, "%s%s",
+                                               first ? "" : ".", child->var_name);
+                               first = false;
+                       }
+
+                       fill_member_name(buf + len, sz - len, child, offset, first);
+                       return;
+               }
+       }
+}
+
+static int hist_entry__typeoff_snprintf(struct hist_entry *he, char *bf,
+                                    size_t size, unsigned int width __maybe_unused)
+{
+       struct annotated_data_type *he_type = he->mem_type;
+       char buf[4096];
+
+       buf[0] = '\0';
+       if (list_empty(&he_type->self.children))
+               snprintf(buf, sizeof(buf), "no field");
+       else
+               fill_member_name(buf, sizeof(buf), &he_type->self,
+                                he->mem_type_off, true);
+       buf[4095] = '\0';
+
+       return repsep_snprintf(bf, size, "%s %+d (%s)", he_type->self.type_name,
+                              he->mem_type_off, buf);
+}
+
+struct sort_entry sort_type_offset = {
+       .se_header      = "Data Type Offset",
+       .se_cmp         = sort__type_cmp,
+       .se_collapse    = sort__typeoff_sort,
+       .se_sort        = sort__typeoff_sort,
+       .se_init        = sort__type_init,
+       .se_snprintf    = hist_entry__typeoff_snprintf,
+       .se_width_idx   = HISTC_TYPE_OFFSET,
+};
+
 
 struct sort_dimension {
        const char              *name;
@@ -2185,7 +2378,10 @@ static struct sort_dimension common_sort_dimensions[] = {
        DIM(SORT_ADDR, "addr", sort_addr),
        DIM(SORT_LOCAL_RETIRE_LAT, "local_retire_lat", sort_local_p_stage_cyc),
        DIM(SORT_GLOBAL_RETIRE_LAT, "retire_lat", sort_global_p_stage_cyc),
-       DIM(SORT_SIMD, "simd", sort_simd)
+       DIM(SORT_SIMD, "simd", sort_simd),
+       DIM(SORT_ANNOTATE_DATA_TYPE, "type", sort_type),
+       DIM(SORT_ANNOTATE_DATA_TYPE_OFFSET, "typeoff", sort_type_offset),
+       DIM(SORT_SYM_OFFSET, "symoff", sort_sym_offset),
 };
 
 #undef DIM
@@ -3205,6 +3401,8 @@ int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
                        list->thread = 1;
                } else if (sd->entry == &sort_comm) {
                        list->comm = 1;
+               } else if (sd->entry == &sort_type_offset) {
+                       symbol_conf.annotate_data_member = true;
                }
 
                return __sort_dimension__add(sd, list, level);
index ecfb7f1359d5ee8a16ad02dabf226d467e2937d6..6f6b4189a389780f8aabd0089f48ea28d759a37a 100644 (file)
@@ -15,6 +15,7 @@
 
 struct option;
 struct thread;
+struct annotated_data_type;
 
 extern regex_t parent_regex;
 extern const char *sort_order;
@@ -34,6 +35,7 @@ extern struct sort_entry sort_dso_to;
 extern struct sort_entry sort_sym_from;
 extern struct sort_entry sort_sym_to;
 extern struct sort_entry sort_srcline;
+extern struct sort_entry sort_type;
 extern const char default_mem_sort_order[];
 extern bool chk_double_cl;
 
@@ -111,6 +113,7 @@ struct hist_entry {
        u64                     p_stage_cyc;
        u8                      cpumode;
        u8                      depth;
+       int                     mem_type_off;
        struct simd_flags       simd_flags;
 
        /* We are added by hists__add_dummy_entry. */
@@ -154,6 +157,7 @@ struct hist_entry {
        struct perf_hpp_list    *hpp_list;
        struct hist_entry       *parent_he;
        struct hist_entry_ops   *ops;
+       struct annotated_data_type *mem_type;
        union {
                /* this is for hierarchical entry structure */
                struct {
@@ -243,6 +247,9 @@ enum sort_type {
        SORT_LOCAL_RETIRE_LAT,
        SORT_GLOBAL_RETIRE_LAT,
        SORT_SIMD,
+       SORT_ANNOTATE_DATA_TYPE,
+       SORT_ANNOTATE_DATA_TYPE_OFFSET,
+       SORT_SYM_OFFSET,
 
        /* branch stack specific sort keys */
        __SORT_BRANCH_STACK,
index afe6db8e7bf4fb632126086f80adba6909a695cd..8c61f8627ebc9fb37cd645ea87a1d39378009db7 100644 (file)
@@ -898,7 +898,7 @@ static bool hybrid_uniquify(struct evsel *evsel, struct perf_stat_config *config
 
 static void uniquify_counter(struct perf_stat_config *config, struct evsel *counter)
 {
-       if (config->no_merge || hybrid_uniquify(counter, config))
+       if (config->aggr_mode == AGGR_NONE || hybrid_uniquify(counter, config))
                uniquify_event_name(counter);
 }
 
index 1c5c3eeba4cfb2e4d7b1914ab1e4a4d033562fec..e31426167852ad0d6fe2d94b5f8b84e2a7e7da8a 100644 (file)
@@ -264,7 +264,7 @@ static void print_ll_miss(struct perf_stat_config *config,
        static const double color_ratios[3] = {20.0, 10.0, 5.0};
 
        print_ratio(config, evsel, aggr_idx, misses, out, STAT_LL_CACHE, color_ratios,
-                   "of all L1-icache accesses");
+                   "of all LL-cache accesses");
 }
 
 static void print_dtlb_miss(struct perf_stat_config *config,
index ec350604221736783df773d40e1ea66bb6983283..b0bcf92f0f9c37e9d74bade174c148ce4c7a8805 100644 (file)
@@ -315,7 +315,7 @@ static int check_per_pkg(struct evsel *counter, struct perf_counts_values *vals,
        if (!counter->per_pkg)
                return 0;
 
-       if (perf_cpu_map__empty(cpus))
+       if (perf_cpu_map__has_any_cpu_or_is_empty(cpus))
                return 0;
 
        if (!mask) {
@@ -592,7 +592,7 @@ void perf_stat_merge_counters(struct perf_stat_config *config, struct evlist *ev
 {
        struct evsel *evsel;
 
-       if (config->no_merge)
+       if (config->aggr_mode == AGGR_NONE)
                return;
 
        evlist__for_each_entry(evlist, evsel)
index 325d0fad18424f904037a57de2005030bfc1a469..4357ba1148221bf27364ee14abe1184669635d1a 100644 (file)
@@ -76,7 +76,6 @@ struct perf_stat_config {
        bool                     null_run;
        bool                     ru_display;
        bool                     big_num;
-       bool                     no_merge;
        bool                     hybrid_merge;
        bool                     walltime_run_table;
        bool                     all_kernel;
index 9e7eeaf616b866894665eef45242beb71d6485c6..4b934ed3bfd13ba2bf1c613e13e743c664b260aa 100644 (file)
@@ -1392,8 +1392,7 @@ static int dso__process_kernel_symbol(struct dso *dso, struct map *map,
                        map__set_start(map, shdr->sh_addr + ref_reloc(kmap));
                        map__set_end(map, map__start(map) + shdr->sh_size);
                        map__set_pgoff(map, shdr->sh_offset);
-                       map__set_map_ip(map, map__dso_map_ip);
-                       map__set_unmap_ip(map, map__dso_unmap_ip);
+                       map__set_mapping_type(map, MAPPING_TYPE__DSO);
                        /* Ensure maps are correctly ordered */
                        if (kmaps) {
                                int err;
@@ -1455,8 +1454,7 @@ static int dso__process_kernel_symbol(struct dso *dso, struct map *map,
                        map__set_end(curr_map, map__start(curr_map) + shdr->sh_size);
                        map__set_pgoff(curr_map, shdr->sh_offset);
                } else {
-                       map__set_map_ip(curr_map, identity__map_ip);
-                       map__set_unmap_ip(curr_map, identity__map_ip);
+                       map__set_mapping_type(curr_map, MAPPING_TYPE__IDENTITY);
                }
                curr_dso->symtab_type = dso->symtab_type;
                if (maps__insert(kmaps, curr_map))
index a81a14769bd101bdeb207a05ec4b858ac04b8193..1da8b713509c5367b9d68d666b71cf4a1d9db6fd 100644 (file)
@@ -159,9 +159,10 @@ int filename__read_build_id(const char *filename, struct build_id *bid)
                                goto out_free;
 
                        ret = read_build_id(buf, buf_size, bid, need_swap);
-                       if (ret == 0)
+                       if (ret == 0) {
                                ret = bid->size;
-                       break;
+                               break;
+                       }
                }
        } else {
                Elf64_Ehdr ehdr;
@@ -210,9 +211,10 @@ int filename__read_build_id(const char *filename, struct build_id *bid)
                                goto out_free;
 
                        ret = read_build_id(buf, buf_size, bid, need_swap);
-                       if (ret == 0)
+                       if (ret == 0) {
                                ret = bid->size;
-                       break;
+                               break;
+                       }
                }
        }
 out_free:
index 82cc74b9358e0de701622378b1cffb9c0047adb6..be212ba157dc321d96534ba6063febbc1ccd5e04 100644 (file)
@@ -48,11 +48,6 @@ static bool symbol__is_idle(const char *name);
 int vmlinux_path__nr_entries;
 char **vmlinux_path;
 
-struct map_list_node {
-       struct list_head node;
-       struct map *map;
-};
-
 struct symbol_conf symbol_conf = {
        .nanosecs               = false,
        .use_modules            = true,
@@ -90,11 +85,6 @@ static enum dso_binary_type binary_type_symtab[] = {
 
 #define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab)
 
-static struct map_list_node *map_list_node__new(void)
-{
-       return malloc(sizeof(struct map_list_node));
-}
-
 static bool symbol_type__filter(char symbol_type)
 {
        symbol_type = toupper(symbol_type);
@@ -270,29 +260,6 @@ void symbols__fixup_end(struct rb_root_cached *symbols, bool is_kallsyms)
                curr->end = roundup(curr->start, 4096) + 4096;
 }
 
-void maps__fixup_end(struct maps *maps)
-{
-       struct map_rb_node *prev = NULL, *curr;
-
-       down_write(maps__lock(maps));
-
-       maps__for_each_entry(maps, curr) {
-               if (prev != NULL && !map__end(prev->map))
-                       map__set_end(prev->map, map__start(curr->map));
-
-               prev = curr;
-       }
-
-       /*
-        * We still haven't the actual symbols, so guess the
-        * last map final address.
-        */
-       if (curr && !map__end(curr->map))
-               map__set_end(curr->map, ~0ULL);
-
-       up_write(maps__lock(maps));
-}
-
 struct symbol *symbol__new(u64 start, u64 len, u8 binding, u8 type, const char *name)
 {
        size_t namelen = strlen(name) + 1;
@@ -956,8 +923,7 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
                                return -1;
                        }
 
-                       map__set_map_ip(curr_map, identity__map_ip);
-                       map__set_unmap_ip(curr_map, identity__map_ip);
+                       map__set_mapping_type(curr_map, MAPPING_TYPE__IDENTITY);
                        if (maps__insert(kmaps, curr_map)) {
                                dso__put(ndso);
                                return -1;
@@ -1148,33 +1114,35 @@ out_delete_from:
        return ret;
 }
 
+static int do_validate_kcore_modules_cb(struct map *old_map, void *data)
+{
+       struct rb_root *modules = data;
+       struct module_info *mi;
+       struct dso *dso;
+
+       if (!__map__is_kmodule(old_map))
+               return 0;
+
+       dso = map__dso(old_map);
+       /* Module must be in memory at the same address */
+       mi = find_module(dso->short_name, modules);
+       if (!mi || mi->start != map__start(old_map))
+               return -EINVAL;
+
+       return 0;
+}
+
 static int do_validate_kcore_modules(const char *filename, struct maps *kmaps)
 {
        struct rb_root modules = RB_ROOT;
-       struct map_rb_node *old_node;
        int err;
 
        err = read_proc_modules(filename, &modules);
        if (err)
                return err;
 
-       maps__for_each_entry(kmaps, old_node) {
-               struct map *old_map = old_node->map;
-               struct module_info *mi;
-               struct dso *dso;
+       err = maps__for_each_map(kmaps, do_validate_kcore_modules_cb, &modules);
 
-               if (!__map__is_kmodule(old_map)) {
-                       continue;
-               }
-               dso = map__dso(old_map);
-               /* Module must be in memory at the same address */
-               mi = find_module(dso->short_name, &modules);
-               if (!mi || mi->start != map__start(old_map)) {
-                       err = -EINVAL;
-                       goto out;
-               }
-       }
-out:
        delete_modules(&modules);
        return err;
 }
@@ -1271,101 +1239,15 @@ static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
        return 0;
 }
 
-/*
- * Merges map into maps by splitting the new map within the existing map
- * regions.
- */
-int maps__merge_in(struct maps *kmaps, struct map *new_map)
+static bool remove_old_maps(struct map *map, void *data)
 {
-       struct map_rb_node *rb_node;
-       LIST_HEAD(merged);
-       int err = 0;
-
-       maps__for_each_entry(kmaps, rb_node) {
-               struct map *old_map = rb_node->map;
-
-               /* no overload with this one */
-               if (map__end(new_map) < map__start(old_map) ||
-                   map__start(new_map) >= map__end(old_map))
-                       continue;
-
-               if (map__start(new_map) < map__start(old_map)) {
-                       /*
-                        * |new......
-                        *       |old....
-                        */
-                       if (map__end(new_map) < map__end(old_map)) {
-                               /*
-                                * |new......|     -> |new..|
-                                *       |old....| ->       |old....|
-                                */
-                               map__set_end(new_map, map__start(old_map));
-                       } else {
-                               /*
-                                * |new.............| -> |new..|       |new..|
-                                *       |old....|    ->       |old....|
-                                */
-                               struct map_list_node *m = map_list_node__new();
-
-                               if (!m) {
-                                       err = -ENOMEM;
-                                       goto out;
-                               }
-
-                               m->map = map__clone(new_map);
-                               if (!m->map) {
-                                       free(m);
-                                       err = -ENOMEM;
-                                       goto out;
-                               }
-
-                               map__set_end(m->map, map__start(old_map));
-                               list_add_tail(&m->node, &merged);
-                               map__add_pgoff(new_map, map__end(old_map) - map__start(new_map));
-                               map__set_start(new_map, map__end(old_map));
-                       }
-               } else {
-                       /*
-                        *      |new......
-                        * |old....
-                        */
-                       if (map__end(new_map) < map__end(old_map)) {
-                               /*
-                                *      |new..|   -> x
-                                * |old.........| -> |old.........|
-                                */
-                               map__put(new_map);
-                               new_map = NULL;
-                               break;
-                       } else {
-                               /*
-                                *      |new......| ->         |new...|
-                                * |old....|        -> |old....|
-                                */
-                               map__add_pgoff(new_map, map__end(old_map) - map__start(new_map));
-                               map__set_start(new_map, map__end(old_map));
-                       }
-               }
-       }
-
-out:
-       while (!list_empty(&merged)) {
-               struct map_list_node *old_node;
-
-               old_node = list_entry(merged.next, struct map_list_node, node);
-               list_del_init(&old_node->node);
-               if (!err)
-                       err = maps__insert(kmaps, old_node->map);
-               map__put(old_node->map);
-               free(old_node);
-       }
+       const struct map *map_to_save = data;
 
-       if (new_map) {
-               if (!err)
-                       err = maps__insert(kmaps, new_map);
-               map__put(new_map);
-       }
-       return err;
+       /*
+        * We need to preserve eBPF maps even if they are covered by kcore,
+        * because we need to access eBPF dso for source data.
+        */
+       return !RC_CHK_EQUAL(map, map_to_save) && !__map__is_bpf_prog(map);
 }
 
 static int dso__load_kcore(struct dso *dso, struct map *map,
@@ -1374,7 +1256,6 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
        struct maps *kmaps = map__kmaps(map);
        struct kcore_mapfn_data md;
        struct map *replacement_map = NULL;
-       struct map_rb_node *old_node, *next;
        struct machine *machine;
        bool is_64_bit;
        int err, fd;
@@ -1421,17 +1302,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
        }
 
        /* Remove old maps */
-       maps__for_each_entry_safe(kmaps, old_node, next) {
-               struct map *old_map = old_node->map;
-
-               /*
-                * We need to preserve eBPF maps even if they are
-                * covered by kcore, because we need to access
-                * eBPF dso for source data.
-                */
-               if (old_map != map && !__map__is_bpf_prog(old_map))
-                       maps__remove(kmaps, old_map);
-       }
+       maps__remove_maps(kmaps, remove_old_maps, map);
        machine->trampolines_mapped = false;
 
        /* Find the kernel map using the '_stext' symbol */
@@ -1475,8 +1346,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
                        map__set_start(map, map__start(new_map));
                        map__set_end(map, map__end(new_map));
                        map__set_pgoff(map, map__pgoff(new_map));
-                       map__set_map_ip(map, map__map_ip_ptr(new_map));
-                       map__set_unmap_ip(map, map__unmap_ip_ptr(new_map));
+                       map__set_mapping_type(map, map__mapping_type(new_map));
                        /* Ensure maps are correctly ordered */
                        map_ref = map__get(map);
                        maps__remove(kmaps, map_ref);
@@ -2067,124 +1937,6 @@ out:
        return ret;
 }
 
-static int map__strcmp(const void *a, const void *b)
-{
-       const struct map *map_a = *(const struct map **)a;
-       const struct map *map_b = *(const struct map **)b;
-       const struct dso *dso_a = map__dso(map_a);
-       const struct dso *dso_b = map__dso(map_b);
-       int ret = strcmp(dso_a->short_name, dso_b->short_name);
-
-       if (ret == 0 && map_a != map_b) {
-               /*
-                * Ensure distinct but name equal maps have an order in part to
-                * aid reference counting.
-                */
-               ret = (int)map__start(map_a) - (int)map__start(map_b);
-               if (ret == 0)
-                       ret = (int)((intptr_t)map_a - (intptr_t)map_b);
-       }
-
-       return ret;
-}
-
-static int map__strcmp_name(const void *name, const void *b)
-{
-       const struct dso *dso = map__dso(*(const struct map **)b);
-
-       return strcmp(name, dso->short_name);
-}
-
-void __maps__sort_by_name(struct maps *maps)
-{
-       qsort(maps__maps_by_name(maps), maps__nr_maps(maps), sizeof(struct map *), map__strcmp);
-}
-
-static int map__groups__sort_by_name_from_rbtree(struct maps *maps)
-{
-       struct map_rb_node *rb_node;
-       struct map **maps_by_name = realloc(maps__maps_by_name(maps),
-                                           maps__nr_maps(maps) * sizeof(struct map *));
-       int i = 0;
-
-       if (maps_by_name == NULL)
-               return -1;
-
-       up_read(maps__lock(maps));
-       down_write(maps__lock(maps));
-
-       RC_CHK_ACCESS(maps)->maps_by_name = maps_by_name;
-       RC_CHK_ACCESS(maps)->nr_maps_allocated = maps__nr_maps(maps);
-
-       maps__for_each_entry(maps, rb_node)
-               maps_by_name[i++] = map__get(rb_node->map);
-
-       __maps__sort_by_name(maps);
-
-       up_write(maps__lock(maps));
-       down_read(maps__lock(maps));
-
-       return 0;
-}
-
-static struct map *__maps__find_by_name(struct maps *maps, const char *name)
-{
-       struct map **mapp;
-
-       if (maps__maps_by_name(maps) == NULL &&
-           map__groups__sort_by_name_from_rbtree(maps))
-               return NULL;
-
-       mapp = bsearch(name, maps__maps_by_name(maps), maps__nr_maps(maps),
-                      sizeof(*mapp), map__strcmp_name);
-       if (mapp)
-               return *mapp;
-       return NULL;
-}
-
-struct map *maps__find_by_name(struct maps *maps, const char *name)
-{
-       struct map_rb_node *rb_node;
-       struct map *map;
-
-       down_read(maps__lock(maps));
-
-
-       if (RC_CHK_ACCESS(maps)->last_search_by_name) {
-               const struct dso *dso = map__dso(RC_CHK_ACCESS(maps)->last_search_by_name);
-
-               if (strcmp(dso->short_name, name) == 0) {
-                       map = RC_CHK_ACCESS(maps)->last_search_by_name;
-                       goto out_unlock;
-               }
-       }
-       /*
-        * If we have maps->maps_by_name, then the name isn't in the rbtree,
-        * as maps->maps_by_name mirrors the rbtree when lookups by name are
-        * made.
-        */
-       map = __maps__find_by_name(maps, name);
-       if (map || maps__maps_by_name(maps) != NULL)
-               goto out_unlock;
-
-       /* Fallback to traversing the rbtree... */
-       maps__for_each_entry(maps, rb_node) {
-               struct dso *dso;
-
-               map = rb_node->map;
-               dso = map__dso(map);
-               if (strcmp(dso->short_name, name) == 0) {
-                       RC_CHK_ACCESS(maps)->last_search_by_name = map;
-                       goto out_unlock;
-               }
-       }
-       map = NULL;
-
-out_unlock:
-       up_read(maps__lock(maps));
-       return map;
-}
-
 int dso__load_vmlinux(struct dso *dso, struct map *map,
                      const char *vmlinux, bool vmlinux_allocated)
 {
index af87c46b3f89e5e5d60c3c769420e229431a95f1..071837ddce2ac7598cc674a7086666b8cf17450d 100644 (file)
@@ -189,7 +189,6 @@ void __symbols__insert(struct rb_root_cached *symbols, struct symbol *sym,
 void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym);
 void symbols__fixup_duplicate(struct rb_root_cached *symbols);
 void symbols__fixup_end(struct rb_root_cached *symbols, bool is_kallsyms);
-void maps__fixup_end(struct maps *maps);
 
 typedef int (*mapfn_t)(u64 start, u64 len, u64 pgoff, void *data);
 int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data,
index 0b589570d1d095c1a20047cad399b7eb3a19a95a..c114bbceef4013f099b05cd39ef73b7b1813750f 100644 (file)
@@ -42,7 +42,11 @@ struct symbol_conf {
                        inline_name,
                        disable_add2line_warn,
                        buildid_mmap2,
-                       guest_code;
+                       guest_code,
+                       lazy_load_kernel_maps,
+                       keep_exited_threads,
+                       annotate_data_member,
+                       annotate_data_sample;
        const char      *vmlinux_name,
                        *kallsyms_name,
                        *source_prefix,
index a0579c7d7b9e9ecbe0996e8fd54a8f277be1b597..2a0289c149599927f1ee4023e530866d8da15a71 100644 (file)
@@ -665,18 +665,74 @@ int perf_event__synthesize_cgroups(struct perf_tool *tool __maybe_unused,
 }
 #endif
 
+struct perf_event__synthesize_modules_maps_cb_args {
+       struct perf_tool *tool;
+       perf_event__handler_t process;
+       struct machine *machine;
+       union perf_event *event;
+};
+
+static int perf_event__synthesize_modules_maps_cb(struct map *map, void *data)
+{
+       struct perf_event__synthesize_modules_maps_cb_args *args = data;
+       union perf_event *event = args->event;
+       struct dso *dso;
+       size_t size;
+
+       if (!__map__is_kmodule(map))
+               return 0;
+
+       dso = map__dso(map);
+       if (symbol_conf.buildid_mmap2) {
+               size = PERF_ALIGN(dso->long_name_len + 1, sizeof(u64));
+               event->mmap2.header.type = PERF_RECORD_MMAP2;
+               event->mmap2.header.size = (sizeof(event->mmap2) -
+                                       (sizeof(event->mmap2.filename) - size));
+               memset(event->mmap2.filename + size, 0, args->machine->id_hdr_size);
+               event->mmap2.header.size += args->machine->id_hdr_size;
+               event->mmap2.start = map__start(map);
+               event->mmap2.len   = map__size(map);
+               event->mmap2.pid   = args->machine->pid;
+
+               memcpy(event->mmap2.filename, dso->long_name, dso->long_name_len + 1);
+
+               perf_record_mmap2__read_build_id(&event->mmap2, args->machine, false);
+       } else {
+               size = PERF_ALIGN(dso->long_name_len + 1, sizeof(u64));
+               event->mmap.header.type = PERF_RECORD_MMAP;
+               event->mmap.header.size = (sizeof(event->mmap) -
+                                       (sizeof(event->mmap.filename) - size));
+               memset(event->mmap.filename + size, 0, args->machine->id_hdr_size);
+               event->mmap.header.size += args->machine->id_hdr_size;
+               event->mmap.start = map__start(map);
+               event->mmap.len   = map__size(map);
+               event->mmap.pid   = args->machine->pid;
+
+               memcpy(event->mmap.filename, dso->long_name, dso->long_name_len + 1);
+       }
+
+       if (perf_tool__process_synth_event(args->tool, event, args->machine, args->process) != 0)
+               return -1;
+
+       return 0;
+}
+
 int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t process,
                                   struct machine *machine)
 {
-       int rc = 0;
-       struct map_rb_node *pos;
+       int rc;
        struct maps *maps = machine__kernel_maps(machine);
-       union perf_event *event;
-       size_t size = symbol_conf.buildid_mmap2 ?
-                       sizeof(event->mmap2) : sizeof(event->mmap);
+       struct perf_event__synthesize_modules_maps_cb_args args = {
+               .tool = tool,
+               .process = process,
+               .machine = machine,
+       };
+       size_t size = symbol_conf.buildid_mmap2
+               ? sizeof(args.event->mmap2)
+               : sizeof(args.event->mmap);
 
-       event = zalloc(size + machine->id_hdr_size);
-       if (event == NULL) {
+       args.event = zalloc(size + machine->id_hdr_size);
+       if (args.event == NULL) {
                pr_debug("Not enough memory synthesizing mmap event "
                         "for kernel modules\n");
                return -1;
@@ -687,53 +743,13 @@ int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t
         * __perf_event_mmap
         */
        if (machine__is_host(machine))
-               event->header.misc = PERF_RECORD_MISC_KERNEL;
+               args.event->header.misc = PERF_RECORD_MISC_KERNEL;
        else
-               event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
-
-       maps__for_each_entry(maps, pos) {
-               struct map *map = pos->map;
-               struct dso *dso;
+               args.event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
 
-               if (!__map__is_kmodule(map))
-                       continue;
+       rc = maps__for_each_map(maps, perf_event__synthesize_modules_maps_cb, &args);
 
-               dso = map__dso(map);
-               if (symbol_conf.buildid_mmap2) {
-                       size = PERF_ALIGN(dso->long_name_len + 1, sizeof(u64));
-                       event->mmap2.header.type = PERF_RECORD_MMAP2;
-                       event->mmap2.header.size = (sizeof(event->mmap2) -
-                                               (sizeof(event->mmap2.filename) - size));
-                       memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
-                       event->mmap2.header.size += machine->id_hdr_size;
-                       event->mmap2.start = map__start(map);
-                       event->mmap2.len   = map__size(map);
-                       event->mmap2.pid   = machine->pid;
-
-                       memcpy(event->mmap2.filename, dso->long_name, dso->long_name_len + 1);
-
-                       perf_record_mmap2__read_build_id(&event->mmap2, machine, false);
-               } else {
-                       size = PERF_ALIGN(dso->long_name_len + 1, sizeof(u64));
-                       event->mmap.header.type = PERF_RECORD_MMAP;
-                       event->mmap.header.size = (sizeof(event->mmap) -
-                                               (sizeof(event->mmap.filename) - size));
-                       memset(event->mmap.filename + size, 0, machine->id_hdr_size);
-                       event->mmap.header.size += machine->id_hdr_size;
-                       event->mmap.start = map__start(map);
-                       event->mmap.len   = map__size(map);
-                       event->mmap.pid   = machine->pid;
-
-                       memcpy(event->mmap.filename, dso->long_name, dso->long_name_len + 1);
-               }
-
-               if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
-                       rc = -1;
-                       break;
-               }
-       }
-
-       free(event);
+       free(args.event);
        return rc;
 }
 
@@ -1039,11 +1055,11 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
        if (thread_nr > n)
                thread_nr = n;
 
-       synthesize_threads = calloc(sizeof(pthread_t), thread_nr);
+       synthesize_threads = calloc(thread_nr, sizeof(pthread_t));
        if (synthesize_threads == NULL)
                goto free_dirent;
 
-       args = calloc(sizeof(*args), thread_nr);
+       args = calloc(thread_nr, sizeof(*args));
        if (args == NULL)
                goto free_threads;
 
index fe5e6991ae4b496ba5a9876e3cbf22331439f953..89c47a5098e289b14e9807a4ea894e15f9e8ce4a 100644 (file)
@@ -345,38 +345,36 @@ int thread__insert_map(struct thread *thread, struct map *map)
        if (ret)
                return ret;
 
-       maps__fixup_overlappings(thread__maps(thread), map, stderr);
-       return maps__insert(thread__maps(thread), map);
+       return maps__fixup_overlap_and_insert(thread__maps(thread), map);
 }
 
-static int __thread__prepare_access(struct thread *thread)
+struct thread__prepare_access_maps_cb_args {
+       int err;
+       struct maps *maps;
+};
+
+static int thread__prepare_access_maps_cb(struct map *map, void *data)
 {
        bool initialized = false;
-       int err = 0;
-       struct maps *maps = thread__maps(thread);
-       struct map_rb_node *rb_node;
-
-       down_read(maps__lock(maps));
-
-       maps__for_each_entry(maps, rb_node) {
-               err = unwind__prepare_access(thread__maps(thread), rb_node->map, &initialized);
-               if (err || initialized)
-                       break;
-       }
+       struct thread__prepare_access_maps_cb_args *args = data;
 
-       up_read(maps__lock(maps));
+       args->err = unwind__prepare_access(args->maps, map, &initialized);
 
-       return err;
+       return (args->err || initialized) ? 1 : 0;
 }
 
 static int thread__prepare_access(struct thread *thread)
 {
-       int err = 0;
+       struct thread__prepare_access_maps_cb_args args = {
+               .err = 0,
+       };
 
-       if (dwarf_callchain_users)
-               err = __thread__prepare_access(thread);
+       if (dwarf_callchain_users) {
+               args.maps = thread__maps(thread);
+               maps__for_each_map(thread__maps(thread), thread__prepare_access_maps_cb, &args);
+       }
 
-       return err;
+       return args.err;
 }
 
 static int thread__clone_maps(struct thread *thread, struct thread *parent, bool do_maps_clone)
@@ -385,14 +383,14 @@ static int thread__clone_maps(struct thread *thread, struct thread *parent, bool
        if (thread__pid(thread) == thread__pid(parent))
                return thread__prepare_access(thread);
 
-       if (thread__maps(thread) == thread__maps(parent)) {
+       if (RC_CHK_EQUAL(thread__maps(thread), thread__maps(parent))) {
                pr_debug("broken map groups on thread %d/%d parent %d/%d\n",
                         thread__pid(thread), thread__tid(thread),
                         thread__pid(parent), thread__tid(parent));
                return 0;
        }
        /* But this one is new process, copy maps. */
-       return do_maps_clone ? maps__clone(thread, thread__maps(parent)) : 0;
+       return do_maps_clone ? maps__copy_from(thread__maps(thread), thread__maps(parent)) : 0;
 }
 
 int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp, bool do_maps_clone)
index e79225a0ea46b7897700775f6330b4c6d91763c4..0df775b5c1105d75d74192d2ae994d99dc5f001b 100644 (file)
@@ -36,13 +36,22 @@ struct thread_rb_node {
 };
 
 DECLARE_RC_STRUCT(thread) {
+       /** @maps: mmaps associated with this thread. */
        struct maps             *maps;
        pid_t                   pid_; /* Not all tools update this */
+       /** @tid: thread ID number unique to a machine. */
        pid_t                   tid;
+       /** @ppid: parent process of the process this thread belongs to. */
        pid_t                   ppid;
        int                     cpu;
        int                     guest_cpu; /* For QEMU thread */
        refcount_t              refcnt;
+       /**
+        * @exited: Has the thread had an exit event. Such threads are usually
+        * removed from the machine's threads but some events/tools require
+        * access to dead threads.
+        */
+       bool                    exited;
        bool                    comm_set;
        int                     comm_len;
        struct list_head        namespaces_list;
@@ -189,6 +198,11 @@ static inline refcount_t *thread__refcnt(struct thread *thread)
        return &RC_CHK_ACCESS(thread)->refcnt;
 }
 
+static inline void thread__set_exited(struct thread *thread, bool exited)
+{
+       RC_CHK_ACCESS(thread)->exited = exited;
+}
+
 static inline bool thread__comm_set(const struct thread *thread)
 {
        return RC_CHK_ACCESS(thread)->comm_set;
index be7157de045187b1be4ca4f10ef864eb1b023ec7..4db3d1bd686cf399757edb25ebf671958ba25f63 100644 (file)
@@ -28,6 +28,7 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
        struct record_opts *opts = &top->record_opts;
        struct target *target = &opts->target;
        size_t ret = 0;
+       int nr_cpus;
 
        if (top->samples) {
                samples_per_sec = top->samples / top->delay_secs;
@@ -93,19 +94,17 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
        else
                ret += SNPRINTF(bf + ret, size - ret, " (all");
 
+       nr_cpus = perf_cpu_map__nr(top->evlist->core.user_requested_cpus);
        if (target->cpu_list)
                ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)",
-                               perf_cpu_map__nr(top->evlist->core.user_requested_cpus) > 1
-                               ? "s" : "",
+                               nr_cpus > 1 ? "s" : "",
                                target->cpu_list);
        else {
                if (target->tid)
                        ret += SNPRINTF(bf + ret, size - ret, ")");
                else
                        ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)",
-                                       perf_cpu_map__nr(top->evlist->core.user_requested_cpus),
-                                       perf_cpu_map__nr(top->evlist->core.user_requested_cpus) > 1
-                                       ? "s" : "");
+                                       nr_cpus, nr_cpus > 1 ? "s" : "");
        }
 
        perf_top__reset_sample_counters(top);
index a8b0d79bd96cfa36be55dde1995ed8e129b3d732..4c5588dbb1317d38fcbddb21d241becd61771706 100644 (file)
@@ -21,7 +21,6 @@ struct perf_top {
        struct perf_tool   tool;
        struct evlist *evlist, *sb_evlist;
        struct record_opts record_opts;
-       struct annotation_options annotation_opts;
        struct evswitch    evswitch;
        /*
         * Symbols will be added here in perf_event__process_sample and will
index 8554db3fc0d7c9fb6523032e6257e3d49abd64b7..6013335a8daea58a4fe19c0b4a5041e522f6707d 100644 (file)
@@ -46,6 +46,7 @@ static int __report_module(struct addr_location *al, u64 ip,
 {
        Dwfl_Module *mod;
        struct dso *dso = NULL;
+       Dwarf_Addr base;
        /*
         * Some callers will use al->sym, so we can't just use the
         * cheaper thread__find_map() here.
@@ -58,13 +59,25 @@ static int __report_module(struct addr_location *al, u64 ip,
        if (!dso)
                return 0;
 
+       /*
+        * The generated JIT DSO files only map the code segment without
+        * ELF headers.  Since JIT codes used to be packed in a memory
+        * segment, calculating the base address using pgoff falls into
+        * a different code in another DSO.  So just use the map->start
+        * directly to pick the correct one.
+        */
+       if (!strncmp(dso->long_name, "/tmp/jitted-", 12))
+               base = map__start(al->map);
+       else
+               base = map__start(al->map) - map__pgoff(al->map);
+
        mod = dwfl_addrmodule(ui->dwfl, ip);
        if (mod) {
                Dwarf_Addr s;
 
                dwfl_module_info(mod, NULL, &s, NULL, NULL, NULL, NULL, NULL);
-               if (s != map__start(al->map) - map__pgoff(al->map))
-                       mod = 0;
+               if (s != base)
+                       mod = NULL;
        }
 
        if (!mod) {
@@ -72,14 +85,14 @@ static int __report_module(struct addr_location *al, u64 ip,
 
                __symbol__join_symfs(filename, sizeof(filename), dso->long_name);
                mod = dwfl_report_elf(ui->dwfl, dso->short_name, filename, -1,
-                                     map__start(al->map) - map__pgoff(al->map), false);
+                                     base, false);
        }
        if (!mod) {
                char filename[PATH_MAX];
 
                if (dso__build_id_filename(dso, filename, sizeof(filename), false))
                        mod = dwfl_report_elf(ui->dwfl, dso->short_name, filename, -1,
-                                             map__start(al->map) - map__pgoff(al->map), false);
+                                             base, false);
        }
 
        if (mod) {
index c0641882fd2fd7eef7991bc7b83fee0d3d03009b..dac536e28360a2481956360ec1753d2079488925 100644 (file)
@@ -302,12 +302,31 @@ static int unwind_spec_ehframe(struct dso *dso, struct machine *machine,
        return 0;
 }
 
+struct read_unwind_spec_eh_frame_maps_cb_args {
+       struct dso *dso;
+       u64 base_addr;
+};
+
+static int read_unwind_spec_eh_frame_maps_cb(struct map *map, void *data)
+{
+
+       struct read_unwind_spec_eh_frame_maps_cb_args *args = data;
+
+       if (map__dso(map) == args->dso && map__start(map) - map__pgoff(map) < args->base_addr)
+               args->base_addr = map__start(map) - map__pgoff(map);
+
+       return 0;
+}
+
+
 static int read_unwind_spec_eh_frame(struct dso *dso, struct unwind_info *ui,
                                     u64 *table_data, u64 *segbase,
                                     u64 *fde_count)
 {
-       struct map_rb_node *map_node;
-       u64 base_addr = UINT64_MAX;
+       struct read_unwind_spec_eh_frame_maps_cb_args args = {
+               .dso = dso,
+               .base_addr = UINT64_MAX,
+       };
        int ret, fd;
 
        if (dso->data.eh_frame_hdr_offset == 0) {
@@ -325,16 +344,11 @@ static int read_unwind_spec_eh_frame(struct dso *dso, struct unwind_info *ui,
                        return -EINVAL;
        }
 
-       maps__for_each_entry(thread__maps(ui->thread), map_node) {
-               struct map *map = map_node->map;
-               u64 start = map__start(map);
+       maps__for_each_map(thread__maps(ui->thread), read_unwind_spec_eh_frame_maps_cb, &args);
 
-               if (map__dso(map) == dso && start < base_addr)
-                       base_addr = start;
-       }
-       base_addr -= dso->data.elf_base_addr;
+       args.base_addr -= dso->data.elf_base_addr;
        /* Address of .eh_frame_hdr */
-       *segbase = base_addr + dso->data.eh_frame_hdr_addr;
+       *segbase = args.base_addr + dso->data.eh_frame_hdr_addr;
        ret = unwind_spec_ehframe(dso, ui->machine, dso->data.eh_frame_hdr_offset,
                                   table_data, fde_count);
        if (ret)
index ae3eee69b659c849ee4eb6ff88330d24d29852e5..df8963796187dc69515114aff048a7a757299a7d 100644 (file)
@@ -140,23 +140,34 @@ static struct dso *__machine__addnew_vdso(struct machine *machine, const char *s
        return dso;
 }
 
+struct machine__thread_dso_type_maps_cb_args {
+       struct machine *machine;
+       enum dso_type dso_type;
+};
+
+static int machine__thread_dso_type_maps_cb(struct map *map, void *data)
+{
+       struct machine__thread_dso_type_maps_cb_args *args = data;
+       struct dso *dso = map__dso(map);
+
+       if (!dso || dso->long_name[0] != '/')
+               return 0;
+
+       args->dso_type = dso__type(dso, args->machine);
+       return (args->dso_type != DSO__TYPE_UNKNOWN) ? 1 : 0;
+}
+
 static enum dso_type machine__thread_dso_type(struct machine *machine,
                                              struct thread *thread)
 {
-       enum dso_type dso_type = DSO__TYPE_UNKNOWN;
-       struct map_rb_node *rb_node;
-
-       maps__for_each_entry(thread__maps(thread), rb_node) {
-               struct dso *dso = map__dso(rb_node->map);
+       struct machine__thread_dso_type_maps_cb_args args = {
+               .machine = machine,
+               .dso_type = DSO__TYPE_UNKNOWN,
+       };
 
-               if (!dso || dso->long_name[0] != '/')
-                       continue;
-               dso_type = dso__type(dso, machine);
-               if (dso_type != DSO__TYPE_UNKNOWN)
-                       break;
-       }
+       maps__for_each_map(thread__maps(thread), machine__thread_dso_type_maps_cb, &args);
 
-       return dso_type;
+       return args.dso_type;
 }
 
 #if BITS_PER_LONG == 64
index 48dd2b018c47a7fcbe4a5e78e3d43bfc03d349b2..57027e0ac7b658a82ecd4ebd3153dfb3f8c1daad 100644 (file)
@@ -7,35 +7,9 @@
 
 int zstd_init(struct zstd_data *data, int level)
 {
-       size_t ret;
-
-       data->dstream = ZSTD_createDStream();
-       if (data->dstream == NULL) {
-               pr_err("Couldn't create decompression stream.\n");
-               return -1;
-       }
-
-       ret = ZSTD_initDStream(data->dstream);
-       if (ZSTD_isError(ret)) {
-               pr_err("Failed to initialize decompression stream: %s\n", ZSTD_getErrorName(ret));
-               return -1;
-       }
-
-       if (!level)
-               return 0;
-
-       data->cstream = ZSTD_createCStream();
-       if (data->cstream == NULL) {
-               pr_err("Couldn't create compression stream.\n");
-               return -1;
-       }
-
-       ret = ZSTD_initCStream(data->cstream, level);
-       if (ZSTD_isError(ret)) {
-               pr_err("Failed to initialize compression stream: %s\n", ZSTD_getErrorName(ret));
-               return -1;
-       }
-
+       data->comp_level = level;
+       data->dstream = NULL;
+       data->cstream = NULL;
        return 0;
 }
 
@@ -54,7 +28,7 @@ int zstd_fini(struct zstd_data *data)
        return 0;
 }
 
-size_t zstd_compress_stream_to_records(struct zstd_data *data, void *dst, size_t dst_size,
+ssize_t zstd_compress_stream_to_records(struct zstd_data *data, void *dst, size_t dst_size,
                                       void *src, size_t src_size, size_t max_record_size,
                                       size_t process_header(void *record, size_t increment))
 {
@@ -63,6 +37,21 @@ size_t zstd_compress_stream_to_records(struct zstd_data *data, void *dst, size_t
        ZSTD_outBuffer output;
        void *record;
 
+       if (!data->cstream) {
+               data->cstream = ZSTD_createCStream();
+               if (data->cstream == NULL) {
+                       pr_err("Couldn't create compression stream.\n");
+                       return -1;
+               }
+
+               ret = ZSTD_initCStream(data->cstream, data->comp_level);
+               if (ZSTD_isError(ret)) {
+                       pr_err("Failed to initialize compression stream: %s\n",
+                               ZSTD_getErrorName(ret));
+                       return -1;
+               }
+       }
+
        while (input.pos < input.size) {
                record = dst;
                size = process_header(record, 0);
@@ -96,6 +85,20 @@ size_t zstd_decompress_stream(struct zstd_data *data, void *src, size_t src_size
        ZSTD_inBuffer input = { src, src_size, 0 };
        ZSTD_outBuffer output = { dst, dst_size, 0 };
 
+       if (!data->dstream) {
+               data->dstream = ZSTD_createDStream();
+               if (data->dstream == NULL) {
+                       pr_err("Couldn't create decompression stream.\n");
+                       return 0;
+               }
+
+               ret = ZSTD_initDStream(data->dstream);
+               if (ZSTD_isError(ret)) {
+                       pr_err("Failed to initialize decompression stream: %s\n",
+                               ZSTD_getErrorName(ret));
+                       return 0;
+               }
+       }
        while (input.pos < input.size) {
                ret = ZSTD_decompressStream(data->dstream, &output, &input);
                if (ZSTD_isError(ret)) {
index d9d9923af85c2e60ca9b2161fd93867df3346d1b..a4b902f9e1c486801a7c14072e796a1b1f8e92ad 100644 (file)
@@ -15,7 +15,7 @@ LIBS = -L../ -L$(OUTPUT) -lm -lcpupower
 OBJS = $(OUTPUT)main.o $(OUTPUT)parse.o $(OUTPUT)system.o $(OUTPUT)benchmark.o
 endif
 
-CFLAGS += -D_GNU_SOURCE -I../lib -DDEFAULT_CONFIG_FILE=\"$(confdir)/cpufreq-bench.conf\"
+override CFLAGS += -D_GNU_SOURCE -I../lib -DDEFAULT_CONFIG_FILE=\"$(confdir)/cpufreq-bench.conf\"
 
 $(OUTPUT)%.o : %.c
        $(ECHO) "  CC      " $@
index 95dc58b94178bf00b447b04440644bba1cd2209d..caff3834671f9dfb7d261d5b6633532f71ecd9f5 100644 (file)
@@ -58,10 +58,13 @@ cxl_core-y += $(CXL_CORE_SRC)/mbox.o
 cxl_core-y += $(CXL_CORE_SRC)/pci.o
 cxl_core-y += $(CXL_CORE_SRC)/hdm.o
 cxl_core-y += $(CXL_CORE_SRC)/pmu.o
+cxl_core-y += $(CXL_CORE_SRC)/cdat.o
 cxl_core-$(CONFIG_TRACING) += $(CXL_CORE_SRC)/trace.o
 cxl_core-$(CONFIG_CXL_REGION) += $(CXL_CORE_SRC)/region.o
 cxl_core-y += config_check.o
 cxl_core-y += cxl_core_test.o
 cxl_core-y += cxl_core_exports.o
 
+KBUILD_CFLAGS := $(filter-out -Wmissing-prototypes -Wmissing-declarations, $(KBUILD_CFLAGS))
+
 obj-m += test/
index 61d5f7bcddf9a6ef9d5df5d0c4346bd93f7181f9..6b192789785612d810c6ff577b1ac47aadd9e9b3 100644 (file)
@@ -8,3 +8,5 @@ obj-m += cxl_mock_mem.o
 cxl_test-y := cxl.o
 cxl_mock-y := mock.o
 cxl_mock_mem-y := mem.o
+
+KBUILD_CFLAGS := $(filter-out -Wmissing-prototypes -Wmissing-declarations, $(KBUILD_CFLAGS))
index f4e517a0c7740ffa2dfb4889231d42fad438a5a9..a3cdbb2be038c45e27326925d81ba43294b56c31 100644 (file)
@@ -68,15 +68,19 @@ static struct acpi_device acpi0017_mock;
 static struct acpi_device host_bridge[NR_BRIDGES] = {
        [0] = {
                .handle = &host_bridge[0],
+               .pnp.unique_id = "0",
        },
        [1] = {
                .handle = &host_bridge[1],
+               .pnp.unique_id = "1",
        },
        [2] = {
                .handle = &host_bridge[2],
+               .pnp.unique_id = "2",
        },
        [3] = {
                .handle = &host_bridge[3],
+               .pnp.unique_id = "3",
        },
 };
 
index ee61fa3a2411f8c2acc7272a20252fad95a8a811..35ee41e435ab3a531187b57fd0ea8f2291a8435d 100644 (file)
@@ -251,7 +251,8 @@ static int mock_get_event(struct device *dev, struct cxl_mbox_cmd *cmd)
        for (i = 0; i < CXL_TEST_EVENT_CNT && !event_log_empty(log); i++) {
                memcpy(&pl->records[i], event_get_current(log),
                       sizeof(pl->records[i]));
-               pl->records[i].hdr.handle = event_get_cur_event_handle(log);
+               pl->records[i].event.generic.hdr.handle =
+                               event_get_cur_event_handle(log);
                log->cur_idx++;
        }
 
@@ -337,87 +338,109 @@ static void cxl_mock_event_trigger(struct device *dev)
 }
 
 struct cxl_event_record_raw maint_needed = {
-       .hdr = {
-               .id = UUID_INIT(0xBA5EBA11, 0xABCD, 0xEFEB,
-                               0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5),
-               .length = sizeof(struct cxl_event_record_raw),
-               .flags[0] = CXL_EVENT_RECORD_FLAG_MAINT_NEEDED,
-               /* .handle = Set dynamically */
-               .related_handle = cpu_to_le16(0xa5b6),
+       .id = UUID_INIT(0xBA5EBA11, 0xABCD, 0xEFEB,
+                       0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5),
+       .event.generic = {
+               .hdr = {
+                       .length = sizeof(struct cxl_event_record_raw),
+                       .flags[0] = CXL_EVENT_RECORD_FLAG_MAINT_NEEDED,
+                       /* .handle = Set dynamically */
+                       .related_handle = cpu_to_le16(0xa5b6),
+               },
+               .data = { 0xDE, 0xAD, 0xBE, 0xEF },
        },
-       .data = { 0xDE, 0xAD, 0xBE, 0xEF },
 };
 
 struct cxl_event_record_raw hardware_replace = {
-       .hdr = {
-               .id = UUID_INIT(0xABCDEFEB, 0xBA11, 0xBA5E,
-                               0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5),
-               .length = sizeof(struct cxl_event_record_raw),
-               .flags[0] = CXL_EVENT_RECORD_FLAG_HW_REPLACE,
-               /* .handle = Set dynamically */
-               .related_handle = cpu_to_le16(0xb6a5),
+       .id = UUID_INIT(0xABCDEFEB, 0xBA11, 0xBA5E,
+                       0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5),
+       .event.generic = {
+               .hdr = {
+                       .length = sizeof(struct cxl_event_record_raw),
+                       .flags[0] = CXL_EVENT_RECORD_FLAG_HW_REPLACE,
+                       /* .handle = Set dynamically */
+                       .related_handle = cpu_to_le16(0xb6a5),
+               },
+               .data = { 0xDE, 0xAD, 0xBE, 0xEF },
        },
-       .data = { 0xDE, 0xAD, 0xBE, 0xEF },
 };
 
-struct cxl_event_gen_media gen_media = {
-       .hdr = {
-               .id = UUID_INIT(0xfbcd0a77, 0xc260, 0x417f,
-                               0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6),
-               .length = sizeof(struct cxl_event_gen_media),
-               .flags[0] = CXL_EVENT_RECORD_FLAG_PERMANENT,
-               /* .handle = Set dynamically */
-               .related_handle = cpu_to_le16(0),
+struct cxl_test_gen_media {
+       uuid_t id;
+       struct cxl_event_gen_media rec;
+} __packed;
+
+struct cxl_test_gen_media gen_media = {
+       .id = CXL_EVENT_GEN_MEDIA_UUID,
+       .rec = {
+               .hdr = {
+                       .length = sizeof(struct cxl_test_gen_media),
+                       .flags[0] = CXL_EVENT_RECORD_FLAG_PERMANENT,
+                       /* .handle = Set dynamically */
+                       .related_handle = cpu_to_le16(0),
+               },
+               .phys_addr = cpu_to_le64(0x2000),
+               .descriptor = CXL_GMER_EVT_DESC_UNCORECTABLE_EVENT,
+               .type = CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR,
+               .transaction_type = CXL_GMER_TRANS_HOST_WRITE,
+               /* .validity_flags = <set below> */
+               .channel = 1,
+               .rank = 30
        },
-       .phys_addr = cpu_to_le64(0x2000),
-       .descriptor = CXL_GMER_EVT_DESC_UNCORECTABLE_EVENT,
-       .type = CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR,
-       .transaction_type = CXL_GMER_TRANS_HOST_WRITE,
-       /* .validity_flags = <set below> */
-       .channel = 1,
-       .rank = 30
 };
 
-struct cxl_event_dram dram = {
-       .hdr = {
-               .id = UUID_INIT(0x601dcbb3, 0x9c06, 0x4eab,
-                               0xb8, 0xaf, 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24),
-               .length = sizeof(struct cxl_event_dram),
-               .flags[0] = CXL_EVENT_RECORD_FLAG_PERF_DEGRADED,
-               /* .handle = Set dynamically */
-               .related_handle = cpu_to_le16(0),
+struct cxl_test_dram {
+       uuid_t id;
+       struct cxl_event_dram rec;
+} __packed;
+
+struct cxl_test_dram dram = {
+       .id = CXL_EVENT_DRAM_UUID,
+       .rec = {
+               .hdr = {
+                       .length = sizeof(struct cxl_test_dram),
+                       .flags[0] = CXL_EVENT_RECORD_FLAG_PERF_DEGRADED,
+                       /* .handle = Set dynamically */
+                       .related_handle = cpu_to_le16(0),
+               },
+               .phys_addr = cpu_to_le64(0x8000),
+               .descriptor = CXL_GMER_EVT_DESC_THRESHOLD_EVENT,
+               .type = CXL_GMER_MEM_EVT_TYPE_INV_ADDR,
+               .transaction_type = CXL_GMER_TRANS_INTERNAL_MEDIA_SCRUB,
+               /* .validity_flags = <set below> */
+               .channel = 1,
+               .bank_group = 5,
+               .bank = 2,
+               .column = {0xDE, 0xAD},
        },
-       .phys_addr = cpu_to_le64(0x8000),
-       .descriptor = CXL_GMER_EVT_DESC_THRESHOLD_EVENT,
-       .type = CXL_GMER_MEM_EVT_TYPE_INV_ADDR,
-       .transaction_type = CXL_GMER_TRANS_INTERNAL_MEDIA_SCRUB,
-       /* .validity_flags = <set below> */
-       .channel = 1,
-       .bank_group = 5,
-       .bank = 2,
-       .column = {0xDE, 0xAD},
 };
 
-struct cxl_event_mem_module mem_module = {
-       .hdr = {
-               .id = UUID_INIT(0xfe927475, 0xdd59, 0x4339,
-                               0xa5, 0x86, 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74),
-               .length = sizeof(struct cxl_event_mem_module),
-               /* .handle = Set dynamically */
-               .related_handle = cpu_to_le16(0),
+struct cxl_test_mem_module {
+       uuid_t id;
+       struct cxl_event_mem_module rec;
+} __packed;
+
+struct cxl_test_mem_module mem_module = {
+       .id = CXL_EVENT_MEM_MODULE_UUID,
+       .rec = {
+               .hdr = {
+                       .length = sizeof(struct cxl_test_mem_module),
+                       /* .handle = Set dynamically */
+                       .related_handle = cpu_to_le16(0),
+               },
+               .event_type = CXL_MMER_TEMP_CHANGE,
+               .info = {
+                       .health_status = CXL_DHI_HS_PERFORMANCE_DEGRADED,
+                       .media_status = CXL_DHI_MS_ALL_DATA_LOST,
+                       .add_status = (CXL_DHI_AS_CRITICAL << 2) |
+                                     (CXL_DHI_AS_WARNING << 4) |
+                                     (CXL_DHI_AS_WARNING << 5),
+                       .device_temp = { 0xDE, 0xAD},
+                       .dirty_shutdown_cnt = { 0xde, 0xad, 0xbe, 0xef },
+                       .cor_vol_err_cnt = { 0xde, 0xad, 0xbe, 0xef },
+                       .cor_per_err_cnt = { 0xde, 0xad, 0xbe, 0xef },
+               }
        },
-       .event_type = CXL_MMER_TEMP_CHANGE,
-       .info = {
-               .health_status = CXL_DHI_HS_PERFORMANCE_DEGRADED,
-               .media_status = CXL_DHI_MS_ALL_DATA_LOST,
-               .add_status = (CXL_DHI_AS_CRITICAL << 2) |
-                             (CXL_DHI_AS_WARNING << 4) |
-                             (CXL_DHI_AS_WARNING << 5),
-               .device_temp = { 0xDE, 0xAD},
-               .dirty_shutdown_cnt = { 0xde, 0xad, 0xbe, 0xef },
-               .cor_vol_err_cnt = { 0xde, 0xad, 0xbe, 0xef },
-               .cor_per_err_cnt = { 0xde, 0xad, 0xbe, 0xef },
-       }
 };
 
 static int mock_set_timestamp(struct cxl_dev_state *cxlds,
@@ -439,11 +462,11 @@ static int mock_set_timestamp(struct cxl_dev_state *cxlds,
 static void cxl_mock_add_event_logs(struct mock_event_store *mes)
 {
        put_unaligned_le16(CXL_GMER_VALID_CHANNEL | CXL_GMER_VALID_RANK,
-                          &gen_media.validity_flags);
+                          &gen_media.rec.validity_flags);
 
        put_unaligned_le16(CXL_DER_VALID_CHANNEL | CXL_DER_VALID_BANK_GROUP |
                           CXL_DER_VALID_BANK | CXL_DER_VALID_COLUMN,
-                          &dram.validity_flags);
+                          &dram.rec.validity_flags);
 
        mes_add_event(mes, CXL_EVENT_TYPE_INFO, &maint_needed);
        mes_add_event(mes, CXL_EVENT_TYPE_INFO,
index 8153251ea389a7dcff59d13f258cee0b066c7dbf..91a3627f301a79b90036c2cfd82217819b9bb757 100644 (file)
@@ -82,4 +82,6 @@ libnvdimm-$(CONFIG_NVDIMM_KEYS) += $(NVDIMM_SRC)/security.o
 libnvdimm-y += libnvdimm_test.o
 libnvdimm-y += config_check.o
 
+KBUILD_CFLAGS := $(filter-out -Wmissing-prototypes -Wmissing-declarations, $(KBUILD_CFLAGS))
+
 obj-m += test/
index 197bcb2b7f35165a54d2d621e9d57c8e2edadf6b..003d48f5f24f5a30e8969e01628540e431f3caa7 100644 (file)
@@ -7,6 +7,7 @@ obj-m += nfit_test_iomap.o
 
 ifeq  ($(CONFIG_ACPI_NFIT),m)
        nfit_test-y := nfit.o
+       obj-m += ndtest.o
 else
        nfit_test-y := ndtest.o
 endif
diff --git a/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c b/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c
new file mode 100644 (file)
index 0000000..0c365f3
--- /dev/null
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2024 Meta
+
+#include <test_progs.h>
+#include "network_helpers.h"
+#include "sock_iter_batch.skel.h"
+
+#define TEST_NS "sock_iter_batch_netns"
+
+static const int nr_soreuse = 4;
+
+static void do_test(int sock_type, bool onebyone)
+{
+       int err, i, nread, to_read, total_read, iter_fd = -1;
+       int first_idx, second_idx, indices[nr_soreuse];
+       struct bpf_link *link = NULL;
+       struct sock_iter_batch *skel;
+       int *fds[2] = {};
+
+       skel = sock_iter_batch__open();
+       if (!ASSERT_OK_PTR(skel, "sock_iter_batch__open"))
+               return;
+
+       /* Prepare 2 buckets of sockets in the kernel hashtable */
+       for (i = 0; i < ARRAY_SIZE(fds); i++) {
+               int local_port;
+
+               fds[i] = start_reuseport_server(AF_INET6, sock_type, "::1", 0, 0,
+                                               nr_soreuse);
+               if (!ASSERT_OK_PTR(fds[i], "start_reuseport_server"))
+                       goto done;
+               local_port = get_socket_local_port(*fds[i]);
+               if (!ASSERT_GE(local_port, 0, "get_socket_local_port"))
+                       goto done;
+               skel->rodata->ports[i] = ntohs(local_port);
+       }
+
+       err = sock_iter_batch__load(skel);
+       if (!ASSERT_OK(err, "sock_iter_batch__load"))
+               goto done;
+
+       link = bpf_program__attach_iter(sock_type == SOCK_STREAM ?
+                                       skel->progs.iter_tcp_soreuse :
+                                       skel->progs.iter_udp_soreuse,
+                                       NULL);
+       if (!ASSERT_OK_PTR(link, "bpf_program__attach_iter"))
+               goto done;
+
+       iter_fd = bpf_iter_create(bpf_link__fd(link));
+       if (!ASSERT_GE(iter_fd, 0, "bpf_iter_create"))
+               goto done;
+
+       /* Test reading a bucket (either from fds[0] or fds[1]).
+        * Only read "nr_soreuse - 1" number of sockets
+        * from a bucket and leave one socket out from
+        * that bucket on purpose.
+        */
+       to_read = (nr_soreuse - 1) * sizeof(*indices);
+       total_read = 0;
+       first_idx = -1;
+       do {
+               nread = read(iter_fd, indices, onebyone ? sizeof(*indices) : to_read);
+               if (nread <= 0 || nread % sizeof(*indices))
+                       break;
+               total_read += nread;
+
+               if (first_idx == -1)
+                       first_idx = indices[0];
+               for (i = 0; i < nread / sizeof(*indices); i++)
+                       ASSERT_EQ(indices[i], first_idx, "first_idx");
+       } while (total_read < to_read);
+       ASSERT_EQ(nread, onebyone ? sizeof(*indices) : to_read, "nread");
+       ASSERT_EQ(total_read, to_read, "total_read");
+
+       free_fds(fds[first_idx], nr_soreuse);
+       fds[first_idx] = NULL;
+
+       /* Read the "whole" second bucket */
+       to_read = nr_soreuse * sizeof(*indices);
+       total_read = 0;
+       second_idx = !first_idx;
+       do {
+               nread = read(iter_fd, indices, onebyone ? sizeof(*indices) : to_read);
+               if (nread <= 0 || nread % sizeof(*indices))
+                       break;
+               total_read += nread;
+
+               for (i = 0; i < nread / sizeof(*indices); i++)
+                       ASSERT_EQ(indices[i], second_idx, "second_idx");
+       } while (total_read <= to_read);
+       ASSERT_EQ(nread, 0, "nread");
+       /* Both so_reuseport ports should be in different buckets, so
+        * total_read must equal to the expected to_read.
+        *
+        * For a very unlikely case, both ports collide at the same bucket,
+        * the bucket offset (i.e. 3) will be skipped and it cannot
+        * expect the to_read number of bytes.
+        */
+       if (skel->bss->bucket[0] != skel->bss->bucket[1])
+               ASSERT_EQ(total_read, to_read, "total_read");
+
+done:
+       for (i = 0; i < ARRAY_SIZE(fds); i++)
+               free_fds(fds[i], nr_soreuse);
+       if (iter_fd < 0)
+               close(iter_fd);
+       bpf_link__destroy(link);
+       sock_iter_batch__destroy(skel);
+}
+
+void test_sock_iter_batch(void)
+{
+       struct nstoken *nstoken = NULL;
+
+       SYS_NOFAIL("ip netns del " TEST_NS " &> /dev/null");
+       SYS(done, "ip netns add %s", TEST_NS);
+       SYS(done, "ip -net %s link set dev lo up", TEST_NS);
+
+       nstoken = open_netns(TEST_NS);
+       if (!ASSERT_OK_PTR(nstoken, "open_netns"))
+               goto done;
+
+       if (test__start_subtest("tcp")) {
+               do_test(SOCK_STREAM, true);
+               do_test(SOCK_STREAM, false);
+       }
+       if (test__start_subtest("udp")) {
+               do_test(SOCK_DGRAM, true);
+               do_test(SOCK_DGRAM, false);
+       }
+       close_netns(nstoken);
+
+done:
+       SYS_NOFAIL("ip netns del " TEST_NS " &> /dev/null");
+}
index 67d4ef9e62b378de8888b326495514f26d7b169d..e905cbaf6b3d109c1ff68f7763a6b1cf1c0f4c17 100644 (file)
@@ -47,6 +47,19 @@ static void subtest_ctx_arg_rewrite(void)
        struct btf *btf = NULL;
        __u32 info_len = sizeof(info);
        int err, fd, i;
+       struct btf *kern_btf = NULL;
+
+       kern_btf = btf__load_vmlinux_btf();
+       if (!ASSERT_OK_PTR(kern_btf, "kern_btf_load"))
+               return;
+
+       /* simple detection of kernel native arg:ctx tag support */
+       if (btf__find_by_name_kind(kern_btf, "bpf_subprog_arg_info", BTF_KIND_STRUCT) > 0) {
+               test__skip();
+               btf__free(kern_btf);
+               return;
+       }
+       btf__free(kern_btf);
 
        skel = test_global_func_ctx_args__open();
        if (!ASSERT_OK_PTR(skel, "skel_open"))
index 1bdc680b0e0e26891ee045d19caef68e4133570f..e8bd4b7b5ef7695c7fa9176cb447504e36a9dfaa 100644 (file)
@@ -72,6 +72,8 @@
 #define inet_rcv_saddr         sk.__sk_common.skc_rcv_saddr
 #define inet_dport             sk.__sk_common.skc_dport
 
+#define udp_portaddr_hash      inet.sk.__sk_common.skc_u16hashes[1]
+
 #define ir_loc_addr            req.__req_common.skc_rcv_saddr
 #define ir_num                 req.__req_common.skc_num
 #define ir_rmt_addr            req.__req_common.skc_daddr
@@ -85,6 +87,7 @@
 #define sk_rmem_alloc          sk_backlog.rmem_alloc
 #define sk_refcnt              __sk_common.skc_refcnt
 #define sk_state               __sk_common.skc_state
+#define sk_net                 __sk_common.skc_net
 #define sk_v6_daddr            __sk_common.skc_v6_daddr
 #define sk_v6_rcv_saddr                __sk_common.skc_v6_rcv_saddr
 #define sk_flags               __sk_common.skc_flags
diff --git a/tools/testing/selftests/bpf/progs/sock_iter_batch.c b/tools/testing/selftests/bpf/progs/sock_iter_batch.c
new file mode 100644 (file)
index 0000000..ffbbfe1
--- /dev/null
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2024 Meta
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include <bpf/bpf_endian.h>
+#include "bpf_tracing_net.h"
+#include "bpf_kfuncs.h"
+
+#define ATTR __always_inline
+#include "test_jhash.h"
+
+static bool ipv6_addr_loopback(const struct in6_addr *a)
+{
+       return (a->s6_addr32[0] | a->s6_addr32[1] |
+               a->s6_addr32[2] | (a->s6_addr32[3] ^ bpf_htonl(1))) == 0;
+}
+
+volatile const __u16 ports[2];
+unsigned int bucket[2];
+
+SEC("iter/tcp")
+int iter_tcp_soreuse(struct bpf_iter__tcp *ctx)
+{
+       struct sock *sk = (struct sock *)ctx->sk_common;
+       struct inet_hashinfo *hinfo;
+       unsigned int hash;
+       struct net *net;
+       int idx;
+
+       if (!sk)
+               return 0;
+
+       sk = bpf_rdonly_cast(sk, bpf_core_type_id_kernel(struct sock));
+       if (sk->sk_family != AF_INET6 ||
+           sk->sk_state != TCP_LISTEN ||
+           !ipv6_addr_loopback(&sk->sk_v6_rcv_saddr))
+               return 0;
+
+       if (sk->sk_num == ports[0])
+               idx = 0;
+       else if (sk->sk_num == ports[1])
+               idx = 1;
+       else
+               return 0;
+
+       /* bucket selection as in inet_lhash2_bucket_sk() */
+       net = sk->sk_net.net;
+       hash = jhash2(sk->sk_v6_rcv_saddr.s6_addr32, 4, net->hash_mix);
+       hash ^= sk->sk_num;
+       hinfo = net->ipv4.tcp_death_row.hashinfo;
+       bucket[idx] = hash & hinfo->lhash2_mask;
+       bpf_seq_write(ctx->meta->seq, &idx, sizeof(idx));
+
+       return 0;
+}
+
+#define udp_sk(ptr) container_of(ptr, struct udp_sock, inet.sk)
+
+SEC("iter/udp")
+int iter_udp_soreuse(struct bpf_iter__udp *ctx)
+{
+       struct sock *sk = (struct sock *)ctx->udp_sk;
+       struct udp_table *udptable;
+       int idx;
+
+       if (!sk)
+               return 0;
+
+       sk = bpf_rdonly_cast(sk, bpf_core_type_id_kernel(struct sock));
+       if (sk->sk_family != AF_INET6 ||
+           !ipv6_addr_loopback(&sk->sk_v6_rcv_saddr))
+               return 0;
+
+       if (sk->sk_num == ports[0])
+               idx = 0;
+       else if (sk->sk_num == ports[1])
+               idx = 1;
+       else
+               return 0;
+
+       /* bucket selection as in udp_hashslot2() */
+       udptable = sk->sk_net.net->ipv4.udp_table;
+       bucket[idx] = udp_sk(sk)->udp_portaddr_hash & udptable->mask;
+       bpf_seq_write(ctx->meta->seq, &idx, sizeof(idx));
+
+       return 0;
+}
+
+char _license[] SEC("license") = "GPL";
index c300734d26f63c8a6305d3cff333ee3fc83d8f50..ef53559bbbdf12dd63e7743e776b04ed800a117e 100644 (file)
@@ -69,3 +69,34 @@ u32 jhash(const void *key, u32 length, u32 initval)
 
        return c;
 }
+
+static __always_inline u32 jhash2(const u32 *k, u32 length, u32 initval)
+{
+       u32 a, b, c;
+
+       /* Set up the internal state */
+       a = b = c = JHASH_INITVAL + (length<<2) + initval;
+
+       /* Handle most of the key */
+       while (length > 3) {
+               a += k[0];
+               b += k[1];
+               c += k[2];
+               __jhash_mix(a, b, c);
+               length -= 3;
+               k += 3;
+       }
+
+       /* Handle the last 3 u32's */
+       switch (length) {
+       case 3: c += k[2];
+       case 2: b += k[1];
+       case 1: a += k[0];
+               __jhash_final(a, b, c);
+               break;
+       case 0: /* Nothing left to add */
+               break;
+       }
+
+       return c;
+}
index 9eeb2d89cda884171789581de5fa94138ca6b79d..67dddd9418911cb1ce3db26132fab340e93aab4d 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <vmlinux.h>
 #include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
 #include "bpf_misc.h"
 #include "xdp_metadata.h"
 #include "bpf_kfuncs.h"
@@ -138,25 +139,182 @@ __weak int subprog_ctx_tag(void *ctx __arg_ctx)
        return bpf_get_stack(ctx, stack, sizeof(stack), 0);
 }
 
+__weak int raw_tp_canonical(struct bpf_raw_tracepoint_args *ctx __arg_ctx)
+{
+       return 0;
+}
+
+__weak int raw_tp_u64_array(u64 *ctx __arg_ctx)
+{
+       return 0;
+}
+
 SEC("?raw_tp")
 __success __log_level(2)
 int arg_tag_ctx_raw_tp(void *ctx)
 {
-       return subprog_ctx_tag(ctx);
+       return subprog_ctx_tag(ctx) + raw_tp_canonical(ctx) + raw_tp_u64_array(ctx);
+}
+
+SEC("?raw_tp.w")
+__success __log_level(2)
+int arg_tag_ctx_raw_tp_writable(void *ctx)
+{
+       return subprog_ctx_tag(ctx) + raw_tp_canonical(ctx) + raw_tp_u64_array(ctx);
+}
+
+SEC("?tp_btf/sys_enter")
+__success __log_level(2)
+int arg_tag_ctx_raw_tp_btf(void *ctx)
+{
+       return subprog_ctx_tag(ctx) + raw_tp_canonical(ctx) + raw_tp_u64_array(ctx);
+}
+
+struct whatever { };
+
+__weak int tp_whatever(struct whatever *ctx __arg_ctx)
+{
+       return 0;
 }
 
 SEC("?tp")
 __success __log_level(2)
 int arg_tag_ctx_tp(void *ctx)
 {
-       return subprog_ctx_tag(ctx);
+       return subprog_ctx_tag(ctx) + tp_whatever(ctx);
+}
+
+__weak int kprobe_subprog_pt_regs(struct pt_regs *ctx __arg_ctx)
+{
+       return 0;
+}
+
+__weak int kprobe_subprog_typedef(bpf_user_pt_regs_t *ctx __arg_ctx)
+{
+       return 0;
 }
 
 SEC("?kprobe")
 __success __log_level(2)
 int arg_tag_ctx_kprobe(void *ctx)
 {
-       return subprog_ctx_tag(ctx);
+       return subprog_ctx_tag(ctx) +
+              kprobe_subprog_pt_regs(ctx) +
+              kprobe_subprog_typedef(ctx);
+}
+
+__weak int perf_subprog_regs(
+#if defined(bpf_target_riscv)
+       struct user_regs_struct *ctx __arg_ctx
+#elif defined(bpf_target_s390)
+       /* user_pt_regs typedef is anonymous struct, so only `void *` works */
+       void *ctx __arg_ctx
+#elif defined(bpf_target_loongarch) || defined(bpf_target_arm64) || defined(bpf_target_powerpc)
+       struct user_pt_regs *ctx __arg_ctx
+#else
+       struct pt_regs *ctx __arg_ctx
+#endif
+)
+{
+       return 0;
+}
+
+__weak int perf_subprog_typedef(bpf_user_pt_regs_t *ctx __arg_ctx)
+{
+       return 0;
+}
+
+__weak int perf_subprog_canonical(struct bpf_perf_event_data *ctx __arg_ctx)
+{
+       return 0;
+}
+
+SEC("?perf_event")
+__success __log_level(2)
+int arg_tag_ctx_perf(void *ctx)
+{
+       return subprog_ctx_tag(ctx) +
+              perf_subprog_regs(ctx) +
+              perf_subprog_typedef(ctx) +
+              perf_subprog_canonical(ctx);
+}
+
+__weak int iter_subprog_void(void *ctx __arg_ctx)
+{
+       return 0;
+}
+
+__weak int iter_subprog_typed(struct bpf_iter__task *ctx __arg_ctx)
+{
+       return 0;
+}
+
+SEC("?iter/task")
+__success __log_level(2)
+int arg_tag_ctx_iter_task(struct bpf_iter__task *ctx)
+{
+       return (iter_subprog_void(ctx) + iter_subprog_typed(ctx)) & 1;
+}
+
+__weak int tracing_subprog_void(void *ctx __arg_ctx)
+{
+       return 0;
+}
+
+__weak int tracing_subprog_u64(u64 *ctx __arg_ctx)
+{
+       return 0;
+}
+
+int acc;
+
+SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
+__success __log_level(2)
+int BPF_PROG(arg_tag_ctx_fentry)
+{
+       acc += tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
+       return 0;
+}
+
+SEC("?fexit/" SYS_PREFIX "sys_nanosleep")
+__success __log_level(2)
+int BPF_PROG(arg_tag_ctx_fexit)
+{
+       acc += tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
+       return 0;
+}
+
+SEC("?fmod_ret/" SYS_PREFIX "sys_nanosleep")
+__success __log_level(2)
+int BPF_PROG(arg_tag_ctx_fmod_ret)
+{
+       return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
+}
+
+SEC("?lsm/bpf")
+__success __log_level(2)
+int BPF_PROG(arg_tag_ctx_lsm)
+{
+       return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
+}
+
+SEC("?struct_ops/test_1")
+__success __log_level(2)
+int BPF_PROG(arg_tag_ctx_struct_ops)
+{
+       return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx);
+}
+
+SEC(".struct_ops")
+struct bpf_dummy_ops dummy_1 = {
+       .test_1 = (void *)arg_tag_ctx_struct_ops,
+};
+
+SEC("?syscall")
+__success __log_level(2)
+int arg_tag_ctx_syscall(void *ctx)
+{
+       return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx) + tp_whatever(ctx);
 }
 
 __weak int subprog_dynptr(struct bpf_dynptr *dptr)
index 71814a7532160638b1f345d42fcfcef4c62f8182..a9ab37d3b9e2df323d702f44705b1117bc443780 100644 (file)
@@ -146,4 +146,23 @@ l0_%=:     exit;                                           \
        : __clobber_all);
 }
 
+SEC("flow_dissector")
+__description("flow_keys illegal alu op with variable offset")
+__failure __msg("R7 pointer arithmetic on flow_keys prohibited")
+__naked void flow_keys_illegal_variable_offset_alu(void)
+{
+       asm volatile("                                  \
+       r6 = r1;                                        \
+       r7 = *(u64*)(r6 + %[flow_keys_off]);            \
+       r8 = 8;                                         \
+       r8 /= 1;                                        \
+       r8 &= 8;                                        \
+       r7 += r8;                                       \
+       r0 = *(u64*)(r7 + 0);                           \
+       exit;                                           \
+"      :
+       : __imm_const(flow_keys_off, offsetof(struct __sk_buff, flow_keys))
+       : __clobber_all);
+}
+
 char _license[] SEC("license") = "GPL";
index 534576f06df1cc78f63619d873f77ad0390f45e5..c59e4adb905df61494db41d99355fbac5d742bab 100644 (file)
@@ -12,6 +12,7 @@
 #include <syscall.h>
 #include <unistd.h>
 #include <sys/resource.h>
+#include <linux/close_range.h>
 
 #include "../kselftest_harness.h"
 #include "../clone3/clone3_selftests.h"
index c54d1697f439a47908f360e75b2760861a9d7939..d508486cc0bdc2c917f9386aa2aea796f12d2c1d 100755 (executable)
@@ -162,7 +162,7 @@ prio_arp()
        local mode=$1
 
        for primary_reselect in 0 1 2; do
-               prio_test "mode active-backup arp_interval 100 arp_ip_target ${g_ip4} primary eth1 primary_reselect $primary_reselect"
+               prio_test "mode $mode arp_interval 100 arp_ip_target ${g_ip4} primary eth1 primary_reselect $primary_reselect"
                log_test "prio" "$mode arp_ip_target primary_reselect $primary_reselect"
        done
 }
@@ -178,7 +178,7 @@ prio_ns()
        fi
 
        for primary_reselect in 0 1 2; do
-               prio_test "mode active-backup arp_interval 100 ns_ip6_target ${g_ip6} primary eth1 primary_reselect $primary_reselect"
+               prio_test "mode $mode arp_interval 100 ns_ip6_target ${g_ip6} primary eth1 primary_reselect $primary_reselect"
                log_test "prio" "$mode ns_ip6_target primary_reselect $primary_reselect"
        done
 }
@@ -194,9 +194,9 @@ prio()
 
        for mode in $modes; do
                prio_miimon $mode
-               prio_arp $mode
-               prio_ns $mode
        done
+       prio_arp "active-backup"
+       prio_ns "active-backup"
 }
 
 arp_validate_test()
index 70638fa50b2cc872747bd9cdd34a1a111251dc97..899d7fb6ea8e906942cdac62ae61c64def72248d 100644 (file)
@@ -1,2 +1,10 @@
 CONFIG_BONDING=y
+CONFIG_BRIDGE=y
+CONFIG_DUMMY=y
+CONFIG_IPV6=y
 CONFIG_MACVLAN=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_CLS_FLOWER=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NLMON=y
+CONFIG_VETH=y
index 2a268b17b61f515b5c50a1fdbe3d7ae21af00578..dbdd736a41d394c9a6e2897d971eb31e728eae34 100644 (file)
@@ -48,6 +48,17 @@ test_LAG_cleanup()
        ip link add mv0 link "$name" up address "$ucaddr" type macvlan
        # Used to test dev->mc handling
        ip address add "$addr6" dev "$name"
+
+       # Check that addresses were added as expected
+       (grep_bridge_fdb "$ucaddr" bridge fdb show dev dummy1 ||
+               grep_bridge_fdb "$ucaddr" bridge fdb show dev dummy2) >/dev/null
+       check_err $? "macvlan unicast address not found on a slave"
+
+       # mcaddr is added asynchronously by addrconf_dad_work(), use busywait
+       (busywait 10000 grep_bridge_fdb "$mcaddr" bridge fdb show dev dummy1 ||
+               grep_bridge_fdb "$mcaddr" bridge fdb show dev dummy2) >/dev/null
+       check_err $? "IPv6 solicited-node multicast mac address not found on a slave"
+
        ip link set dev "$name" down
        ip link del "$name"
 
index ad4c845a4ac7c2ae8cf028c2d0b2077a59befd09..b76bf50309524a6e1b59340f9b6370ed6d484e6d 100755 (executable)
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
 # SPDX-License-Identifier: GPL-2.0
 
 # Regression Test:
index 2330d37453f956eb720f452fd12cdfd1cdc5aebc..8c2619002147915bfc03cf20fc56a257769895e9 100755 (executable)
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
 # SPDX-License-Identifier: GPL-2.0
 
 # Regression Test:
index 6091b45d226baf192c2d380ba893be15592f323d..79b65bdf05db6586726cc76d3313f12368d21dc5 100644 (file)
@@ -1 +1 @@
-timeout=120
+timeout=1200
index 42ce602d8d492e5fb64b305f0a28149df87b28b0..0f0f4f05807c92076dcf26a57e3229d45bf90a84 100755 (executable)
@@ -40,7 +40,6 @@
 # |   + $swp1          $swp3 +                    + $swp4                     |
 # |   | iPOOL1        iPOOL0 |                    | iPOOL2                    |
 # |   | ePOOL4        ePOOL5 |                    | ePOOL4                    |
-# |   |                1Gbps |                    | 1Gbps                     |
 # |   |        PFC:enabled=1 |                    | PFC:enabled=1             |
 # | +-|----------------------|-+                +-|------------------------+  |
 # | | + $swp1.111  $swp3.111 + |                | + $swp4.111              |  |
@@ -120,6 +119,9 @@ h2_destroy()
 
 switch_create()
 {
+       local lanes_swp4
+       local pg1_size
+
        # pools
        # -----
 
@@ -229,7 +231,20 @@ switch_create()
        dcb pfc set dev $swp4 prio-pfc all:off 1:on
        # PG0 will get autoconfigured to Xoff, give PG1 arbitrarily 100K, which
        # is (-2*MTU) about 80K of delay provision.
-       dcb buffer set dev $swp4 buffer-size all:0 1:$_100KB
+       pg1_size=$_100KB
+
+       setup_wait_dev_with_timeout $swp4
+
+       lanes_swp4=$(ethtool $swp4 | grep 'Lanes:')
+       lanes_swp4=${lanes_swp4#*"Lanes: "}
+
+       # 8-lane ports use two buffers among which the configured buffer
+       # is split, so double the size to get twice (20K + 80K).
+       if [[ $lanes_swp4 -eq 8 ]]; then
+               pg1_size=$((pg1_size * 2))
+       fi
+
+       dcb buffer set dev $swp4 buffer-size all:0 1:$pg1_size
 
        # bridges
        # -------
index fb850e0ec8375f143c8da0459ee48ca778bb3a29..616d3581419ca043fc715f9d067341c89513f6eb 100755 (executable)
@@ -10,7 +10,8 @@ lib_dir=$(dirname $0)/../../../../net/forwarding
 ALL_TESTS="single_mask_test identical_filters_test two_masks_test \
        multiple_masks_test ctcam_edge_cases_test delta_simple_test \
        delta_two_masks_one_key_test delta_simple_rehash_test \
-       bloom_simple_test bloom_complex_test bloom_delta_test"
+       bloom_simple_test bloom_complex_test bloom_delta_test \
+       max_erp_entries_test max_group_size_test"
 NUM_NETIFS=2
 source $lib_dir/lib.sh
 source $lib_dir/tc_common.sh
@@ -983,6 +984,109 @@ bloom_delta_test()
        log_test "bloom delta test ($tcflags)"
 }
 
+max_erp_entries_test()
+{
+       # The number of eRP entries is limited. Once the maximum number of eRPs
+       # has been reached, filters cannot be added. This test verifies that
+       # when this limit is reached, inserstion fails without crashing.
+
+       RET=0
+
+       local num_masks=32
+       local num_regions=15
+       local chain_failed
+       local mask_failed
+       local ret
+
+       if [[ "$tcflags" != "skip_sw" ]]; then
+               return 0;
+       fi
+
+       for ((i=1; i < $num_regions; i++)); do
+               for ((j=$num_masks; j >= 0; j--)); do
+                       tc filter add dev $h2 ingress chain $i protocol ip \
+                               pref $i handle $j flower $tcflags \
+                               dst_ip 192.1.0.0/$j &> /dev/null
+                       ret=$?
+
+                       if [ $ret -ne 0 ]; then
+                               chain_failed=$i
+                               mask_failed=$j
+                               break 2
+                       fi
+               done
+       done
+
+       # We expect to exceed the maximum number of eRP entries, so that
+       # insertion eventually fails. Otherwise, the test should be adjusted to
+       # add more filters.
+       check_fail $ret "expected to exceed number of eRP entries"
+
+       for ((; i >= 1; i--)); do
+               for ((j=0; j <= $num_masks; j++)); do
+                       tc filter del dev $h2 ingress chain $i protocol ip \
+                               pref $i handle $j flower &> /dev/null
+               done
+       done
+
+       log_test "max eRP entries test ($tcflags). " \
+               "max chain $chain_failed, mask $mask_failed"
+}
+
+max_group_size_test()
+{
+       # The number of ACLs in an ACL group is limited. Once the maximum
+       # number of ACLs has been reached, filters cannot be added. This test
+       # verifies that when this limit is reached, insertion fails without
+       # crashing.
+
+       RET=0
+
+       local num_acls=32
+       local max_size
+       local ret
+
+       if [[ "$tcflags" != "skip_sw" ]]; then
+               return 0;
+       fi
+
+       for ((i=1; i < $num_acls; i++)); do
+               if [[ $(( i % 2 )) == 1 ]]; then
+                       tc filter add dev $h2 ingress pref $i proto ipv4 \
+                               flower $tcflags dst_ip 198.51.100.1/32 \
+                               ip_proto tcp tcp_flags 0x01/0x01 \
+                               action drop &> /dev/null
+               else
+                       tc filter add dev $h2 ingress pref $i proto ipv6 \
+                               flower $tcflags dst_ip 2001:db8:1::1/128 \
+                               action drop &> /dev/null
+               fi
+
+               ret=$?
+               [[ $ret -ne 0 ]] && max_size=$((i - 1)) && break
+       done
+
+       # We expect to exceed the maximum number of ACLs in a group, so that
+       # insertion eventually fails. Otherwise, the test should be adjusted to
+       # add more filters.
+       check_fail $ret "expected to exceed number of ACLs in a group"
+
+       for ((; i >= 1; i--)); do
+               if [[ $(( i % 2 )) == 1 ]]; then
+                       tc filter del dev $h2 ingress pref $i proto ipv4 \
+                               flower $tcflags dst_ip 198.51.100.1/32 \
+                               ip_proto tcp tcp_flags 0x01/0x01 \
+                               action drop &> /dev/null
+               else
+                       tc filter del dev $h2 ingress pref $i proto ipv6 \
+                               flower $tcflags dst_ip 2001:db8:1::1/128 \
+                               action drop &> /dev/null
+               fi
+       done
+
+       log_test "max ACL group size test ($tcflags). max size $max_size"
+}
+
 setup_prepare()
 {
        h1=${NETIFS[p1]}
diff --git a/tools/testing/selftests/drivers/net/netdevsim/config b/tools/testing/selftests/drivers/net/netdevsim/config
new file mode 100644 (file)
index 0000000..adf45a3
--- /dev/null
@@ -0,0 +1,10 @@
+CONFIG_DUMMY=y
+CONFIG_GENEVE=m
+CONFIG_IPV6=y
+CONFIG_NETDEVSIM=m
+CONFIG_NET_SCH_MQPRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_PSAMPLE=y
+CONFIG_PTP_1588_CLOCK_MOCK=y
+CONFIG_VXLAN=m
index 922744059aaa24527c34a75bda18eb2a304cfbc4..80160579e0cc1ec30accfccb17b7bfa4c3937b51 100644 (file)
@@ -51,6 +51,7 @@ function make_netdev {
     fi
 
     echo $NSIM_ID $@ > /sys/bus/netdevsim/new_device
+    udevadm settle
     # get new device name
     ls /sys/bus/netdevsim/devices/netdevsim${NSIM_ID}/net/
 }
index 0c56746e9ce0e649b8f6a15477ce972dca04326a..7d7829f57550d8345c3546ecd0994f819969440b 100755 (executable)
@@ -8,16 +8,20 @@ NSIM_NETDEV=$(make_netdev)
 
 set -o pipefail
 
+# Since commit 2b3ddcb35357 ("ethtool: fec: Change the prompt ...")
+# in ethtool CLI the Configured lines start with Supported/Configured.
+configured=$($ETHTOOL --show-fec $NSIM_NETDEV | tail -2 | head -1 | cut -d' ' -f1)
+
 # netdevsim starts out with None/None
 s=$($ETHTOOL --show-fec $NSIM_NETDEV | tail -2)
-check $? "$s" "Configured FEC encodings: None
+check $? "$s" "$configured FEC encodings: None
 Active FEC encoding: None"
 
 # Test Auto
 $ETHTOOL --set-fec $NSIM_NETDEV encoding auto
 check $?
 s=$($ETHTOOL --show-fec $NSIM_NETDEV | tail -2)
-check $? "$s" "Configured FEC encodings: Auto
+check $? "$s" "$configured FEC encodings: Auto
 Active FEC encoding: Off"
 
 # Test case in-sensitivity
@@ -25,7 +29,7 @@ for o in off Off OFF; do
     $ETHTOOL --set-fec $NSIM_NETDEV encoding $o
     check $?
     s=$($ETHTOOL --show-fec $NSIM_NETDEV | tail -2)
-    check $? "$s" "Configured FEC encodings: Off
+    check $? "$s" "$configured FEC encodings: Off
 Active FEC encoding: Off"
 done
 
@@ -33,7 +37,7 @@ for o in BaseR baser BAser; do
     $ETHTOOL --set-fec $NSIM_NETDEV encoding $o
     check $?
     s=$($ETHTOOL --show-fec $NSIM_NETDEV | tail -2)
-    check $? "$s" "Configured FEC encodings: BaseR
+    check $? "$s" "$configured FEC encodings: BaseR
 Active FEC encoding: BaseR"
 done
 
@@ -41,7 +45,7 @@ for o in llrs rs; do
     $ETHTOOL --set-fec $NSIM_NETDEV encoding $o
     check $?
     s=$($ETHTOOL --show-fec $NSIM_NETDEV | tail -2)
-    check $? "$s" "Configured FEC encodings: ${o^^}
+    check $? "$s" "$configured FEC encodings: ${o^^}
 Active FEC encoding: ${o^^}"
 done
 
@@ -49,13 +53,13 @@ done
 $ETHTOOL --set-fec $NSIM_NETDEV encoding rs llrs
 check $?
 s=$($ETHTOOL --show-fec $NSIM_NETDEV | tail -2)
-check $? "$s" "Configured FEC encodings: RS LLRS
+check $? "$s" "$configured FEC encodings: RS LLRS
 Active FEC encoding: LLRS"
 
 $ETHTOOL --set-fec $NSIM_NETDEV encoding rs off auto
 check $?
 s=$($ETHTOOL --show-fec $NSIM_NETDEV | tail -2)
-check $? "$s" "Configured FEC encodings: Auto Off RS
+check $? "$s" "$configured FEC encodings: Auto Off RS
 Active FEC encoding: RS"
 
 # Make sure other link modes are rejected
index 1b08e042cf942a126626bbd3cad88152633f68a9..f98435c502f61aa665cefc39bea873e66a273ade 100755 (executable)
@@ -233,6 +233,7 @@ function print_tables {
 function get_netdev_name {
     local -n old=$1
 
+    udevadm settle
     new=$(ls /sys/class/net)
 
     for netdev in $new; do
@@ -269,6 +270,7 @@ for port in 0 1; do
        echo 1 > $NSIM_DEV_SYS/new_port
     fi
     NSIM_NETDEV=`get_netdev_name old_netdevs`
+    ifconfig $NSIM_NETDEV up
 
     msg="new NIC device created"
     exp0=( 0 0 0 0 )
@@ -430,6 +432,7 @@ for port in 0 1; do
     fi
 
     echo $port > $NSIM_DEV_SYS/new_port
+    NSIM_NETDEV=`get_netdev_name old_netdevs`
     ifconfig $NSIM_NETDEV up
 
     overflow_table0 "overflow NIC table"
@@ -487,6 +490,7 @@ for port in 0 1; do
     fi
 
     echo $port > $NSIM_DEV_SYS/new_port
+    NSIM_NETDEV=`get_netdev_name old_netdevs`
     ifconfig $NSIM_NETDEV up
 
     overflow_table0 "overflow NIC table"
@@ -543,6 +547,7 @@ for port in 0 1; do
     fi
 
     echo $port > $NSIM_DEV_SYS/new_port
+    NSIM_NETDEV=`get_netdev_name old_netdevs`
     ifconfig $NSIM_NETDEV up
 
     overflow_table0 "destroy NIC"
@@ -572,6 +577,7 @@ for port in 0 1; do
     fi
 
     echo $port > $NSIM_DEV_SYS/new_port
+    NSIM_NETDEV=`get_netdev_name old_netdevs`
     ifconfig $NSIM_NETDEV up
 
     msg="create VxLANs v6"
@@ -632,6 +638,7 @@ for port in 0 1; do
     fi
 
     echo $port > $NSIM_DEV_SYS/new_port
+    NSIM_NETDEV=`get_netdev_name old_netdevs`
     ifconfig $NSIM_NETDEV up
 
     echo 110 > $NSIM_DEV_DFS/ports/$port/udp_ports_inject_error
@@ -687,6 +694,7 @@ for port in 0 1; do
     fi
 
     echo $port > $NSIM_DEV_SYS/new_port
+    NSIM_NETDEV=`get_netdev_name old_netdevs`
     ifconfig $NSIM_NETDEV up
 
     msg="create VxLANs v6"
@@ -746,6 +754,7 @@ for port in 0 1; do
     fi
 
     echo $port > $NSIM_DEV_SYS/new_port
+    NSIM_NETDEV=`get_netdev_name old_netdevs`
     ifconfig $NSIM_NETDEV up
 
     msg="create VxLANs v6"
@@ -876,6 +885,7 @@ msg="re-add a port"
 
 echo 2 > $NSIM_DEV_SYS/del_port
 echo 2 > $NSIM_DEV_SYS/new_port
+NSIM_NETDEV=`get_netdev_name old_netdevs`
 check_tables
 
 msg="replace VxLAN in overflow table"
index 265b6882cc21ed0c285ae9f37f9282bfb2e440d1..b5e3a3aad4bfbb5f1d77b4fd1bd4ae566f6394a1 100644 (file)
@@ -1,3 +1,5 @@
+CONFIG_DUMMY=y
+CONFIG_IPV6=y
+CONFIG_MACVLAN=y
 CONFIG_NET_TEAM=y
 CONFIG_NET_TEAM_MODE_LOADBALANCE=y
-CONFIG_MACVLAN=y
diff --git a/tools/testing/selftests/ftrace/test.d/00basic/ringbuffer_subbuf_size.tc b/tools/testing/selftests/ftrace/test.d/00basic/ringbuffer_subbuf_size.tc
new file mode 100644 (file)
index 0000000..d44d09a
--- /dev/null
@@ -0,0 +1,95 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Change the ringbuffer sub-buffer size
+# requires: buffer_subbuf_size_kb
+# flags: instance
+
+get_buffer_data_size() {
+       sed -ne 's/^.*data.*size:\([0-9][0-9]*\).*/\1/p' events/header_page
+}
+
+get_buffer_data_offset() {
+       sed -ne 's/^.*data.*offset:\([0-9][0-9]*\).*/\1/p' events/header_page
+}
+
+get_event_header_size() {
+       type_len=`sed -ne 's/^.*type_len.*:[^0-9]*\([0-9][0-9]*\).*/\1/p' events/header_event`
+       time_len=`sed -ne 's/^.*time_delta.*:[^0-9]*\([0-9][0-9]*\).*/\1/p' events/header_event`
+       array_len=`sed -ne 's/^.*array.*:[^0-9]*\([0-9][0-9]*\).*/\1/p' events/header_event`
+       total_bits=$((type_len+time_len+array_len))
+       total_bits=$((total_bits+7))
+       echo $((total_bits/8))
+}
+
+get_print_event_buf_offset() {
+       sed -ne 's/^.*buf.*offset:\([0-9][0-9]*\).*/\1/p' events/ftrace/print/format
+}
+
+event_header_size=`get_event_header_size`
+print_header_size=`get_print_event_buf_offset`
+
+data_offset=`get_buffer_data_offset`
+
+marker_meta=$((event_header_size+print_header_size))
+
+make_str() {
+        cnt=$1
+       printf -- 'X%.0s' $(seq $cnt)
+}
+
+write_buffer() {
+       size=$1
+
+       str=`make_str $size`
+
+       # clear the buffer
+       echo > trace
+
+       # write the string into the marker
+       echo $str > trace_marker
+
+       echo $str
+}
+
+test_buffer() {
+       size_kb=$1
+       page_size=$((size_kb*1024))
+
+       size=`get_buffer_data_size`
+
+       # the size must be greater than or equal to page_size - data_offset
+       page_size=$((page_size-data_offset))
+       if [ $size -lt $page_size ]; then
+               exit fail
+       fi
+
+       # Now add a little more the meta data overhead will overflow
+
+       str=`write_buffer $size`
+
+       # Make sure the line was broken
+       new_str=`awk ' /tracing_mark_write:/ { sub(/^.*tracing_mark_write: /,"");printf "%s", $0; exit}' trace`
+
+       if [ "$new_str" = "$str" ]; then
+               exit fail;
+       fi
+
+       # Make sure the entire line can be found
+       new_str=`awk ' /tracing_mark_write:/ { sub(/^.*tracing_mark_write: /,"");printf "%s", $0; }' trace`
+
+       if [ "$new_str" != "$str" ]; then
+               exit fail;
+       fi
+}
+
+ORIG=`cat buffer_subbuf_size_kb`
+
+# Could test bigger sizes than 32K, but then creating the string
+# to write into the ring buffer takes too long
+for a in 4 8 16 32 ; do
+       echo $a > buffer_subbuf_size_kb
+       test_buffer $a
+done
+
+echo $ORIG > buffer_subbuf_size_kb
+
diff --git a/tools/testing/selftests/ftrace/test.d/00basic/trace_marker.tc b/tools/testing/selftests/ftrace/test.d/00basic/trace_marker.tc
new file mode 100644 (file)
index 0000000..9aa0db2
--- /dev/null
@@ -0,0 +1,82 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Basic tests on writing to trace_marker
+# requires: trace_marker
+# flags: instance
+
+get_buffer_data_size() {
+       sed -ne 's/^.*data.*size:\([0-9][0-9]*\).*/\1/p' events/header_page
+}
+
+get_buffer_data_offset() {
+       sed -ne 's/^.*data.*offset:\([0-9][0-9]*\).*/\1/p' events/header_page
+}
+
+get_event_header_size() {
+       type_len=`sed -ne 's/^.*type_len.*:[^0-9]*\([0-9][0-9]*\).*/\1/p' events/header_event`
+       time_len=`sed -ne 's/^.*time_delta.*:[^0-9]*\([0-9][0-9]*\).*/\1/p' events/header_event`
+       array_len=`sed -ne 's/^.*array.*:[^0-9]*\([0-9][0-9]*\).*/\1/p' events/header_event`
+       total_bits=$((type_len+time_len+array_len))
+       total_bits=$((total_bits+7))
+       echo $((total_bits/8))
+}
+
+get_print_event_buf_offset() {
+       sed -ne 's/^.*buf.*offset:\([0-9][0-9]*\).*/\1/p' events/ftrace/print/format
+}
+
+event_header_size=`get_event_header_size`
+print_header_size=`get_print_event_buf_offset`
+
+data_offset=`get_buffer_data_offset`
+
+marker_meta=$((event_header_size+print_header_size))
+
+make_str() {
+        cnt=$1
+       # subtract two for \n\0 as marker adds these
+       cnt=$((cnt-2))
+       printf -- 'X%.0s' $(seq $cnt)
+}
+
+write_buffer() {
+       size=$1
+
+       str=`make_str $size`
+
+       # clear the buffer
+       echo > trace
+
+       # write the string into the marker
+       echo -n $str > trace_marker
+
+       echo $str
+}
+
+test_buffer() {
+
+       size=`get_buffer_data_size`
+       oneline_size=$((size-marker_meta))
+       echo size = $size
+       echo meta size = $marker_meta
+
+       # Now add a little more the meta data overhead will overflow
+
+       str=`write_buffer $size`
+
+       # Make sure the line was broken
+       new_str=`awk ' /tracing_mark_write:/ { sub(/^.*tracing_mark_write: /,"");printf "%s", $0; exit}' trace`
+
+       if [ "$new_str" = "$str" ]; then
+               exit fail;
+       fi
+
+       # Make sure the entire line can be found
+       new_str=`awk ' /tracing_mark_write:/ { sub(/^.*tracing_mark_write: /,"");printf "%s", $0; }' trace`
+
+       if [ "$new_str" != "$str" ]; then
+               exit fail;
+       fi
+}
+
+test_buffer
index 352fc39f3c6c160bfdcd2b3c655bfe319f00892c..b62c7dba6777f975dd9158f6788a6177307bc9e4 100644 (file)
@@ -880,8 +880,8 @@ class TestDTH2452Tablet(test_multitouch.BaseTest.TestMultitouch, TouchTabletTest
         does not overlap with other contacts. The value of `t` may be
         incremented over time to move the point along a linear path.
         """
-        x = 50 + 10 * contact_id + t
-        y = 100 + 100 * contact_id + t
+        x = 50 + 10 * contact_id + t * 11
+        y = 100 + 100 * contact_id + t * 11
         return test_multitouch.Touch(contact_id, x, y)
 
     def make_contacts(self, n, t=0):
@@ -902,8 +902,8 @@ class TestDTH2452Tablet(test_multitouch.BaseTest.TestMultitouch, TouchTabletTest
         tracking_id = contact_ids.tracking_id
         slot_num = contact_ids.slot_num
 
-        x = 50 + 10 * contact_id + t
-        y = 100 + 100 * contact_id + t
+        x = 50 + 10 * contact_id + t * 11
+        y = 100 + 100 * contact_id + t * 11
 
         # If the data isn't supposed to be stored in any slots, there is
         # nothing we can check for in the evdev stream.
index 6ed328c863c4f13ccc9db5bcf51092bd4ce183c4..1a881e7a21d1b26ce7ad19de1cc5ea07d3773ff9 100644 (file)
@@ -116,6 +116,7 @@ TEST_F(iommufd, cmd_length)
        TEST_LENGTH(iommu_destroy, IOMMU_DESTROY, id);
        TEST_LENGTH(iommu_hw_info, IOMMU_GET_HW_INFO, __reserved);
        TEST_LENGTH(iommu_hwpt_alloc, IOMMU_HWPT_ALLOC, __reserved);
+       TEST_LENGTH(iommu_hwpt_invalidate, IOMMU_HWPT_INVALIDATE, __reserved);
        TEST_LENGTH(iommu_ioas_alloc, IOMMU_IOAS_ALLOC, out_ioas_id);
        TEST_LENGTH(iommu_ioas_iova_ranges, IOMMU_IOAS_IOVA_RANGES,
                    out_iova_alignment);
@@ -271,7 +272,9 @@ TEST_F(iommufd_ioas, alloc_hwpt_nested)
        struct iommu_hwpt_selftest data = {
                .iotlb = IOMMU_TEST_IOTLB_DEFAULT,
        };
+       struct iommu_hwpt_invalidate_selftest inv_reqs[2] = {};
        uint32_t nested_hwpt_id[2] = {};
+       uint32_t num_inv;
        uint32_t parent_hwpt_id = 0;
        uint32_t parent_hwpt_id_not_work = 0;
        uint32_t test_hwpt_id = 0;
@@ -330,6 +333,10 @@ TEST_F(iommufd_ioas, alloc_hwpt_nested)
                                           &nested_hwpt_id[1],
                                           IOMMU_HWPT_DATA_SELFTEST, &data,
                                           sizeof(data));
+               test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[0],
+                                             IOMMU_TEST_IOTLB_DEFAULT);
+               test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[1],
+                                             IOMMU_TEST_IOTLB_DEFAULT);
 
                /* Negative test: a nested hwpt on top of a nested hwpt */
                test_err_hwpt_alloc_nested(EINVAL, self->device_id,
@@ -340,6 +347,151 @@ TEST_F(iommufd_ioas, alloc_hwpt_nested)
                EXPECT_ERRNO(EBUSY,
                             _test_ioctl_destroy(self->fd, parent_hwpt_id));
 
+               /* hwpt_invalidate only supports a user-managed hwpt (nested) */
+               num_inv = 1;
+               test_err_hwpt_invalidate(ENOENT, parent_hwpt_id, inv_reqs,
+                                        IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
+                                        sizeof(*inv_reqs), &num_inv);
+               assert(!num_inv);
+
+               /* Check data_type by passing zero-length array */
+               num_inv = 0;
+               test_cmd_hwpt_invalidate(nested_hwpt_id[0], inv_reqs,
+                                        IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
+                                        sizeof(*inv_reqs), &num_inv);
+               assert(!num_inv);
+
+               /* Negative test: Invalid data_type */
+               num_inv = 1;
+               test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
+                                        IOMMU_HWPT_INVALIDATE_DATA_SELFTEST_INVALID,
+                                        sizeof(*inv_reqs), &num_inv);
+               assert(!num_inv);
+
+               /* Negative test: structure size sanity */
+               num_inv = 1;
+               test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
+                                        IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
+                                        sizeof(*inv_reqs) + 1, &num_inv);
+               assert(!num_inv);
+
+               num_inv = 1;
+               test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
+                                        IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
+                                        1, &num_inv);
+               assert(!num_inv);
+
+               /* Negative test: invalid flag is passed */
+               num_inv = 1;
+               inv_reqs[0].flags = 0xffffffff;
+               test_err_hwpt_invalidate(EOPNOTSUPP, nested_hwpt_id[0], inv_reqs,
+                                        IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
+                                        sizeof(*inv_reqs), &num_inv);
+               assert(!num_inv);
+
+               /* Negative test: invalid data_uptr when array is not empty */
+               num_inv = 1;
+               inv_reqs[0].flags = 0;
+               test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], NULL,
+                                        IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
+                                        sizeof(*inv_reqs), &num_inv);
+               assert(!num_inv);
+
+               /* Negative test: invalid entry_len when array is not empty */
+               num_inv = 1;
+               inv_reqs[0].flags = 0;
+               test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
+                                        IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
+                                        0, &num_inv);
+               assert(!num_inv);
+
+               /* Negative test: invalid iotlb_id */
+               num_inv = 1;
+               inv_reqs[0].flags = 0;
+               inv_reqs[0].iotlb_id = MOCK_NESTED_DOMAIN_IOTLB_ID_MAX + 1;
+               test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
+                                        IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
+                                        sizeof(*inv_reqs), &num_inv);
+               assert(!num_inv);
+
+               /*
+                * Invalidate the 1st iotlb entry but fail the 2nd request
+                * due to invalid flags configuration in the 2nd request.
+                */
+               num_inv = 2;
+               inv_reqs[0].flags = 0;
+               inv_reqs[0].iotlb_id = 0;
+               inv_reqs[1].flags = 0xffffffff;
+               inv_reqs[1].iotlb_id = 1;
+               test_err_hwpt_invalidate(EOPNOTSUPP, nested_hwpt_id[0], inv_reqs,
+                                        IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
+                                        sizeof(*inv_reqs), &num_inv);
+               assert(num_inv == 1);
+               test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 0, 0);
+               test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 1,
+                                         IOMMU_TEST_IOTLB_DEFAULT);
+               test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 2,
+                                         IOMMU_TEST_IOTLB_DEFAULT);
+               test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 3,
+                                         IOMMU_TEST_IOTLB_DEFAULT);
+
+               /*
+                * Invalidate the 1st iotlb entry but fail the 2nd request
+                * due to invalid iotlb_id configuration in the 2nd request.
+                */
+               num_inv = 2;
+               inv_reqs[0].flags = 0;
+               inv_reqs[0].iotlb_id = 0;
+               inv_reqs[1].flags = 0;
+               inv_reqs[1].iotlb_id = MOCK_NESTED_DOMAIN_IOTLB_ID_MAX + 1;
+               test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
+                                        IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
+                                        sizeof(*inv_reqs), &num_inv);
+               assert(num_inv == 1);
+               test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 0, 0);
+               test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 1,
+                                         IOMMU_TEST_IOTLB_DEFAULT);
+               test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 2,
+                                         IOMMU_TEST_IOTLB_DEFAULT);
+               test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 3,
+                                         IOMMU_TEST_IOTLB_DEFAULT);
+
+               /* Invalidate the 2nd iotlb entry and verify */
+               num_inv = 1;
+               inv_reqs[0].flags = 0;
+               inv_reqs[0].iotlb_id = 1;
+               test_cmd_hwpt_invalidate(nested_hwpt_id[0], inv_reqs,
+                                        IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
+                                        sizeof(*inv_reqs), &num_inv);
+               assert(num_inv == 1);
+               test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 0, 0);
+               test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 1, 0);
+               test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 2,
+                                         IOMMU_TEST_IOTLB_DEFAULT);
+               test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 3,
+                                         IOMMU_TEST_IOTLB_DEFAULT);
+
+               /* Invalidate the 3rd and 4th iotlb entries and verify */
+               num_inv = 2;
+               inv_reqs[0].flags = 0;
+               inv_reqs[0].iotlb_id = 2;
+               inv_reqs[1].flags = 0;
+               inv_reqs[1].iotlb_id = 3;
+               test_cmd_hwpt_invalidate(nested_hwpt_id[0], inv_reqs,
+                                        IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
+                                        sizeof(*inv_reqs), &num_inv);
+               assert(num_inv == 2);
+               test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[0], 0);
+
+               /* Invalidate all iotlb entries for nested_hwpt_id[1] and verify */
+               num_inv = 1;
+               inv_reqs[0].flags = IOMMU_TEST_INVALIDATE_FLAG_ALL;
+               test_cmd_hwpt_invalidate(nested_hwpt_id[1], inv_reqs,
+                                        IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
+                                        sizeof(*inv_reqs), &num_inv);
+               assert(num_inv == 1);
+               test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[1], 0);
+
                /* Attach device to nested_hwpt_id[0] that then will be busy */
                test_cmd_mock_domain_replace(self->stdev_id, nested_hwpt_id[0]);
                EXPECT_ERRNO(EBUSY,
index ad9202335656cc82e8475cf74aba72b6adf7e2b0..c646264aa41fdc1871c60bba6dc25841767f399b 100644 (file)
@@ -195,6 +195,61 @@ static int _test_cmd_hwpt_alloc(int fd, __u32 device_id, __u32 pt_id,
                     _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, flags, \
                                          hwpt_id, data_type, data, data_len))
 
+#define test_cmd_hwpt_check_iotlb(hwpt_id, iotlb_id, expected)                 \
+       ({                                                                     \
+               struct iommu_test_cmd test_cmd = {                             \
+                       .size = sizeof(test_cmd),                              \
+                       .op = IOMMU_TEST_OP_MD_CHECK_IOTLB,                    \
+                       .id = hwpt_id,                                         \
+                       .check_iotlb = {                                       \
+                               .id = iotlb_id,                                \
+                               .iotlb = expected,                             \
+                       },                                                     \
+               };                                                             \
+               ASSERT_EQ(0,                                                   \
+                         ioctl(self->fd,                                      \
+                               _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_IOTLB), \
+                               &test_cmd));                                   \
+       })
+
+#define test_cmd_hwpt_check_iotlb_all(hwpt_id, expected)                       \
+       ({                                                                     \
+               int i;                                                         \
+               for (i = 0; i < MOCK_NESTED_DOMAIN_IOTLB_NUM; i++)             \
+                       test_cmd_hwpt_check_iotlb(hwpt_id, i, expected);       \
+       })
+
+static int _test_cmd_hwpt_invalidate(int fd, __u32 hwpt_id, void *reqs,
+                                    uint32_t data_type, uint32_t lreq,
+                                    uint32_t *nreqs)
+{
+       struct iommu_hwpt_invalidate cmd = {
+               .size = sizeof(cmd),
+               .hwpt_id = hwpt_id,
+               .data_type = data_type,
+               .data_uptr = (uint64_t)reqs,
+               .entry_len = lreq,
+               .entry_num = *nreqs,
+       };
+       int rc = ioctl(fd, IOMMU_HWPT_INVALIDATE, &cmd);
+       *nreqs = cmd.entry_num;
+       return rc;
+}
+
+#define test_cmd_hwpt_invalidate(hwpt_id, reqs, data_type, lreq, nreqs)       \
+       ({                                                                    \
+               ASSERT_EQ(0,                                                  \
+                         _test_cmd_hwpt_invalidate(self->fd, hwpt_id, reqs,  \
+                                                   data_type, lreq, nreqs)); \
+       })
+#define test_err_hwpt_invalidate(_errno, hwpt_id, reqs, data_type, lreq, \
+                                nreqs)                                  \
+       ({                                                               \
+               EXPECT_ERRNO(_errno, _test_cmd_hwpt_invalidate(          \
+                                            self->fd, hwpt_id, reqs,    \
+                                            data_type, lreq, nreqs));   \
+       })
+
 static int _test_cmd_access_replace_ioas(int fd, __u32 access_id,
                                         unsigned int ioas_id)
 {
index 3e0c36b8ddd56ebbb2817ba76d3af58abcfdc4ee..492e937fab00648d5dbda4e1c98bdb1840468fde 100644 (file)
@@ -77,11 +77,12 @@ TEST_GEN_PROGS_x86_64 += x86_64/hyperv_svm_test
 TEST_GEN_PROGS_x86_64 += x86_64/hyperv_tlb_flush
 TEST_GEN_PROGS_x86_64 += x86_64/kvm_clock_test
 TEST_GEN_PROGS_x86_64 += x86_64/kvm_pv_test
-TEST_GEN_PROGS_x86_64 += x86_64/mmio_warning_test
 TEST_GEN_PROGS_x86_64 += x86_64/monitor_mwait_test
 TEST_GEN_PROGS_x86_64 += x86_64/nested_exceptions_test
 TEST_GEN_PROGS_x86_64 += x86_64/platform_info_test
 TEST_GEN_PROGS_x86_64 += x86_64/pmu_event_filter_test
+TEST_GEN_PROGS_x86_64 += x86_64/private_mem_conversions_test
+TEST_GEN_PROGS_x86_64 += x86_64/private_mem_kvm_exits_test
 TEST_GEN_PROGS_x86_64 += x86_64/set_boot_cpu_id
 TEST_GEN_PROGS_x86_64 += x86_64/set_sregs_test
 TEST_GEN_PROGS_x86_64 += x86_64/smaller_maxphyaddr_emulation_test
@@ -124,6 +125,7 @@ TEST_GEN_PROGS_x86_64 += access_tracking_perf_test
 TEST_GEN_PROGS_x86_64 += demand_paging_test
 TEST_GEN_PROGS_x86_64 += dirty_log_test
 TEST_GEN_PROGS_x86_64 += dirty_log_perf_test
+TEST_GEN_PROGS_x86_64 += guest_memfd_test
 TEST_GEN_PROGS_x86_64 += guest_print_test
 TEST_GEN_PROGS_x86_64 += hardware_disable_test
 TEST_GEN_PROGS_x86_64 += kvm_create_max_vcpus
@@ -184,12 +186,13 @@ TEST_GEN_PROGS_s390x += kvm_binary_stats_test
 
 TEST_GEN_PROGS_riscv += demand_paging_test
 TEST_GEN_PROGS_riscv += dirty_log_test
-TEST_GEN_PROGS_riscv += guest_print_test
 TEST_GEN_PROGS_riscv += get-reg-list
+TEST_GEN_PROGS_riscv += guest_print_test
+TEST_GEN_PROGS_riscv += kvm_binary_stats_test
 TEST_GEN_PROGS_riscv += kvm_create_max_vcpus
 TEST_GEN_PROGS_riscv += kvm_page_table_test
 TEST_GEN_PROGS_riscv += set_memory_region_test
-TEST_GEN_PROGS_riscv += kvm_binary_stats_test
+TEST_GEN_PROGS_riscv += steal_time
 
 SPLIT_TESTS += get-reg-list
 
index eb4217b7c7687f9282511c4cf6be5949bded796e..08a5ca5bed56a9f602c01c024b19771a5cc9e219 100644 (file)
@@ -705,7 +705,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
 
        print_test_banner(mode, p);
 
-       vm = ____vm_create(mode);
+       vm = ____vm_create(VM_SHAPE(mode));
        setup_memslots(vm, p);
        kvm_vm_elf_load(vm, program_invocation_name);
        setup_ucall(vm);
index 936f3a8d1b83e8131c540215969e4e523056f5fa..6cbecf4997676f327095a399a75dcfa514fca13c 100644 (file)
@@ -699,7 +699,7 @@ static struct kvm_vm *create_vm(enum vm_guest_mode mode, struct kvm_vcpu **vcpu,
 
        pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
 
-       vm = __vm_create(mode, 1, extra_mem_pages);
+       vm = __vm_create(VM_SHAPE(mode), 1, extra_mem_pages);
 
        log_mode_create_vm_done(vm);
        *vcpu = vm_vcpu_add(vm, 0, guest_code);
diff --git a/tools/testing/selftests/kvm/guest_memfd_test.c b/tools/testing/selftests/kvm/guest_memfd_test.c
new file mode 100644 (file)
index 0000000..c78a98c
--- /dev/null
@@ -0,0 +1,198 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright Intel Corporation, 2023
+ *
+ * Author: Chao Peng <chao.p.peng@linux.intel.com>
+ */
+
+#define _GNU_SOURCE
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+#include <stdio.h>
+#include <fcntl.h>
+
+#include <linux/bitmap.h>
+#include <linux/falloc.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include "test_util.h"
+#include "kvm_util_base.h"
+
+static void test_file_read_write(int fd)
+{
+       char buf[64];
+
+       TEST_ASSERT(read(fd, buf, sizeof(buf)) < 0,
+                   "read on a guest_mem fd should fail");
+       TEST_ASSERT(write(fd, buf, sizeof(buf)) < 0,
+                   "write on a guest_mem fd should fail");
+       TEST_ASSERT(pread(fd, buf, sizeof(buf), 0) < 0,
+                   "pread on a guest_mem fd should fail");
+       TEST_ASSERT(pwrite(fd, buf, sizeof(buf), 0) < 0,
+                   "pwrite on a guest_mem fd should fail");
+}
+
+static void test_mmap(int fd, size_t page_size)
+{
+       char *mem;
+
+       mem = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+       TEST_ASSERT_EQ(mem, MAP_FAILED);
+}
+
+static void test_file_size(int fd, size_t page_size, size_t total_size)
+{
+       struct stat sb;
+       int ret;
+
+       ret = fstat(fd, &sb);
+       TEST_ASSERT(!ret, "fstat should succeed");
+       TEST_ASSERT_EQ(sb.st_size, total_size);
+       TEST_ASSERT_EQ(sb.st_blksize, page_size);
+}
+
+static void test_fallocate(int fd, size_t page_size, size_t total_size)
+{
+       int ret;
+
+       ret = fallocate(fd, FALLOC_FL_KEEP_SIZE, 0, total_size);
+       TEST_ASSERT(!ret, "fallocate with aligned offset and size should succeed");
+
+       ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
+                       page_size - 1, page_size);
+       TEST_ASSERT(ret, "fallocate with unaligned offset should fail");
+
+       ret = fallocate(fd, FALLOC_FL_KEEP_SIZE, total_size, page_size);
+       TEST_ASSERT(ret, "fallocate beginning at total_size should fail");
+
+       ret = fallocate(fd, FALLOC_FL_KEEP_SIZE, total_size + page_size, page_size);
+       TEST_ASSERT(ret, "fallocate beginning after total_size should fail");
+
+       ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
+                       total_size, page_size);
+       TEST_ASSERT(!ret, "fallocate(PUNCH_HOLE) at total_size should succeed");
+
+       ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
+                       total_size + page_size, page_size);
+       TEST_ASSERT(!ret, "fallocate(PUNCH_HOLE) after total_size should succeed");
+
+       ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
+                       page_size, page_size - 1);
+       TEST_ASSERT(ret, "fallocate with unaligned size should fail");
+
+       ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
+                       page_size, page_size);
+       TEST_ASSERT(!ret, "fallocate(PUNCH_HOLE) with aligned offset and size should succeed");
+
+       ret = fallocate(fd, FALLOC_FL_KEEP_SIZE, page_size, page_size);
+       TEST_ASSERT(!ret, "fallocate to restore punched hole should succeed");
+}
+
+static void test_invalid_punch_hole(int fd, size_t page_size, size_t total_size)
+{
+       struct {
+               off_t offset;
+               off_t len;
+       } testcases[] = {
+               {0, 1},
+               {0, page_size - 1},
+               {0, page_size + 1},
+
+               {1, 1},
+               {1, page_size - 1},
+               {1, page_size},
+               {1, page_size + 1},
+
+               {page_size, 1},
+               {page_size, page_size - 1},
+               {page_size, page_size + 1},
+       };
+       int ret, i;
+
+       for (i = 0; i < ARRAY_SIZE(testcases); i++) {
+               ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
+                               testcases[i].offset, testcases[i].len);
+               TEST_ASSERT(ret == -1 && errno == EINVAL,
+                           "PUNCH_HOLE with !PAGE_SIZE offset (%lx) and/or length (%lx) should fail",
+                           testcases[i].offset, testcases[i].len);
+       }
+}
+
+static void test_create_guest_memfd_invalid(struct kvm_vm *vm)
+{
+       size_t page_size = getpagesize();
+       uint64_t flag;
+       size_t size;
+       int fd;
+
+       for (size = 1; size < page_size; size++) {
+               fd = __vm_create_guest_memfd(vm, size, 0);
+               TEST_ASSERT(fd == -1 && errno == EINVAL,
+                           "guest_memfd() with non-page-aligned page size '0x%lx' should fail with EINVAL",
+                           size);
+       }
+
+       for (flag = 0; flag; flag <<= 1) {
+               fd = __vm_create_guest_memfd(vm, page_size, flag);
+               TEST_ASSERT(fd == -1 && errno == EINVAL,
+                           "guest_memfd() with flag '0x%lx' should fail with EINVAL",
+                           flag);
+       }
+}
+
+static void test_create_guest_memfd_multiple(struct kvm_vm *vm)
+{
+       int fd1, fd2, ret;
+       struct stat st1, st2;
+
+       fd1 = __vm_create_guest_memfd(vm, 4096, 0);
+       TEST_ASSERT(fd1 != -1, "memfd creation should succeed");
+
+       ret = fstat(fd1, &st1);
+       TEST_ASSERT(ret != -1, "memfd fstat should succeed");
+       TEST_ASSERT(st1.st_size == 4096, "memfd st_size should match requested size");
+
+       fd2 = __vm_create_guest_memfd(vm, 8192, 0);
+       TEST_ASSERT(fd2 != -1, "memfd creation should succeed");
+
+       ret = fstat(fd2, &st2);
+       TEST_ASSERT(ret != -1, "memfd fstat should succeed");
+       TEST_ASSERT(st2.st_size == 8192, "second memfd st_size should match requested size");
+
+       ret = fstat(fd1, &st1);
+       TEST_ASSERT(ret != -1, "memfd fstat should succeed");
+       TEST_ASSERT(st1.st_size == 4096, "first memfd st_size should still match requested size");
+       TEST_ASSERT(st1.st_ino != st2.st_ino, "different memfd should have different inode numbers");
+}
+
+int main(int argc, char *argv[])
+{
+       size_t page_size;
+       size_t total_size;
+       int fd;
+       struct kvm_vm *vm;
+
+       TEST_REQUIRE(kvm_has_cap(KVM_CAP_GUEST_MEMFD));
+
+       page_size = getpagesize();
+       total_size = page_size * 4;
+
+       vm = vm_create_barebones();
+
+       test_create_guest_memfd_invalid(vm);
+       test_create_guest_memfd_multiple(vm);
+
+       fd = vm_create_guest_memfd(vm, total_size, 0);
+
+       test_file_read_write(fd);
+       test_mmap(fd, page_size);
+       test_file_size(fd, page_size, total_size);
+       test_fallocate(fd, page_size, total_size);
+       test_invalid_punch_hole(fd, page_size, total_size);
+
+       close(fd);
+}
index c42d683102c7a477ddca956285d8854bd44fa7c6..cf20e44e86f2f9fb7feeccf88fdc93fecd7fbfd2 100644 (file)
@@ -119,8 +119,8 @@ enum {
 /* Access flag update enable/disable */
 #define TCR_EL1_HA             (1ULL << 39)
 
-void aarch64_get_supported_page_sizes(uint32_t ipa,
-                                     bool *ps4k, bool *ps16k, bool *ps64k);
+void aarch64_get_supported_page_sizes(uint32_t ipa, uint32_t *ipa4k,
+                                       uint32_t *ipa16k, uint32_t *ipa64k);
 
 void vm_init_descriptor_tables(struct kvm_vm *vm);
 void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu);
index b691df33e64e122a87996ef5cc17e8aceead4685..63f5167397ccb4bf9ed07ce90b99cc89b36bd20c 100644 (file)
@@ -11,8 +11,8 @@ struct guest_mode {
 
 extern struct guest_mode guest_modes[NUM_VM_MODES];
 
-#define guest_mode_append(mode, supported, enabled) ({ \
-       guest_modes[mode] = (struct guest_mode){ supported, enabled }; \
+#define guest_mode_append(mode, enabled) ({ \
+       guest_modes[mode] = (struct guest_mode){ (enabled), (enabled) }; \
 })
 
 void guest_modes_append_default(void);
index a18db6a7b3cf474fe48cacf16bd962123f977e42..9e5afc472c14268bbe629cb9c1baf6049b702457 100644 (file)
@@ -44,7 +44,7 @@ typedef uint64_t vm_paddr_t; /* Virtual Machine (Guest) physical address */
 typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */
 
 struct userspace_mem_region {
-       struct kvm_userspace_memory_region region;
+       struct kvm_userspace_memory_region2 region;
        struct sparsebit *unused_phy_pages;
        int fd;
        off_t offset;
@@ -129,6 +129,7 @@ struct vcpu_reg_sublist {
        const char *name;
        long capability;
        int feature;
+       int feature_type;
        bool finalize;
        __u64 *regs;
        __u64 regs_n;
@@ -171,6 +172,7 @@ static inline struct userspace_mem_region *vm_get_mem_region(struct kvm_vm *vm,
 
 enum vm_guest_mode {
        VM_MODE_P52V48_4K,
+       VM_MODE_P52V48_16K,
        VM_MODE_P52V48_64K,
        VM_MODE_P48V48_4K,
        VM_MODE_P48V48_16K,
@@ -188,6 +190,23 @@ enum vm_guest_mode {
        NUM_VM_MODES,
 };
 
+struct vm_shape {
+       enum vm_guest_mode mode;
+       unsigned int type;
+};
+
+#define VM_TYPE_DEFAULT                        0
+
+#define VM_SHAPE(__mode)                       \
+({                                             \
+       struct vm_shape shape = {               \
+               .mode = (__mode),               \
+               .type = VM_TYPE_DEFAULT         \
+       };                                      \
+                                               \
+       shape;                                  \
+})
+
 #if defined(__aarch64__)
 
 extern enum vm_guest_mode vm_mode_default;
@@ -220,6 +239,8 @@ extern enum vm_guest_mode vm_mode_default;
 
 #endif
 
+#define VM_SHAPE_DEFAULT       VM_SHAPE(VM_MODE_DEFAULT)
+
 #define MIN_PAGE_SIZE          (1U << MIN_PAGE_SHIFT)
 #define PTES_PER_MIN_PAGE      ptes_per_page(MIN_PAGE_SIZE)
 
@@ -248,6 +269,13 @@ static inline bool kvm_has_cap(long cap)
 #define __KVM_SYSCALL_ERROR(_name, _ret) \
        "%s failed, rc: %i errno: %i (%s)", (_name), (_ret), errno, strerror(errno)
 
+/*
+ * Use the "inner", double-underscore macro when reporting errors from within
+ * other macros so that the name of ioctl() and not its literal numeric value
+ * is printed on error.  The "outer" macro is strongly preferred when reporting
+ * errors "directly", i.e. without an additional layer of macros, as it reduces
+ * the probability of passing in the wrong string.
+ */
 #define __KVM_IOCTL_ERROR(_name, _ret) __KVM_SYSCALL_ERROR(_name, _ret)
 #define KVM_IOCTL_ERROR(_ioctl, _ret) __KVM_IOCTL_ERROR(#_ioctl, _ret)
 
@@ -260,17 +288,13 @@ static inline bool kvm_has_cap(long cap)
 #define __kvm_ioctl(kvm_fd, cmd, arg)                          \
        kvm_do_ioctl(kvm_fd, cmd, arg)
 
-
-#define _kvm_ioctl(kvm_fd, cmd, name, arg)                     \
+#define kvm_ioctl(kvm_fd, cmd, arg)                            \
 ({                                                             \
        int ret = __kvm_ioctl(kvm_fd, cmd, arg);                \
                                                                \
-       TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(name, ret));        \
+       TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(#cmd, ret));        \
 })
 
-#define kvm_ioctl(kvm_fd, cmd, arg) \
-       _kvm_ioctl(kvm_fd, cmd, #cmd, arg)
-
 static __always_inline void static_assert_is_vm(struct kvm_vm *vm) { }
 
 #define __vm_ioctl(vm, cmd, arg)                               \
@@ -279,17 +303,42 @@ static __always_inline void static_assert_is_vm(struct kvm_vm *vm) { }
        kvm_do_ioctl((vm)->fd, cmd, arg);                       \
 })
 
-#define _vm_ioctl(vm, cmd, name, arg)                          \
+/*
+ * Assert that a VM or vCPU ioctl() succeeded, with extra magic to detect if
+ * the ioctl() failed because KVM killed/bugged the VM.  To detect a dead VM,
+ * probe KVM_CAP_USER_MEMORY, which (a) has been supported by KVM since before
+ * selftests existed and (b) should never outright fail, i.e. is supposed to
+ * return 0 or 1.  If KVM kills a VM, KVM returns -EIO for all ioctl()s for the
+ * VM and its vCPUs, including KVM_CHECK_EXTENSION.
+ */
+#define __TEST_ASSERT_VM_VCPU_IOCTL(cond, name, ret, vm)                               \
+do {                                                                                   \
+       int __errno = errno;                                                            \
+                                                                                       \
+       static_assert_is_vm(vm);                                                        \
+                                                                                       \
+       if (cond)                                                                       \
+               break;                                                                  \
+                                                                                       \
+       if (errno == EIO &&                                                             \
+           __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)KVM_CAP_USER_MEMORY) < 0) {     \
+               TEST_ASSERT(errno == EIO, "KVM killed the VM, should return -EIO");     \
+               TEST_FAIL("KVM killed/bugged the VM, check the kernel log for clues");  \
+       }                                                                               \
+       errno = __errno;                                                                \
+       TEST_ASSERT(cond, __KVM_IOCTL_ERROR(name, ret));                                \
+} while (0)
+
+#define TEST_ASSERT_VM_VCPU_IOCTL(cond, cmd, ret, vm)          \
+       __TEST_ASSERT_VM_VCPU_IOCTL(cond, #cmd, ret, vm)
+
+#define vm_ioctl(vm, cmd, arg)                                 \
 ({                                                             \
        int ret = __vm_ioctl(vm, cmd, arg);                     \
                                                                \
-       TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(name, ret));        \
+       __TEST_ASSERT_VM_VCPU_IOCTL(!ret, #cmd, ret, vm);               \
 })
 
-#define vm_ioctl(vm, cmd, arg)                                 \
-       _vm_ioctl(vm, cmd, #cmd, arg)
-
-
 static __always_inline void static_assert_is_vcpu(struct kvm_vcpu *vcpu) { }
 
 #define __vcpu_ioctl(vcpu, cmd, arg)                           \
@@ -298,16 +347,13 @@ static __always_inline void static_assert_is_vcpu(struct kvm_vcpu *vcpu) { }
        kvm_do_ioctl((vcpu)->fd, cmd, arg);                     \
 })
 
-#define _vcpu_ioctl(vcpu, cmd, name, arg)                      \
+#define vcpu_ioctl(vcpu, cmd, arg)                             \
 ({                                                             \
        int ret = __vcpu_ioctl(vcpu, cmd, arg);                 \
                                                                \
-       TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(name, ret));        \
+       __TEST_ASSERT_VM_VCPU_IOCTL(!ret, #cmd, ret, (vcpu)->vm);       \
 })
 
-#define vcpu_ioctl(vcpu, cmd, arg)                             \
-       _vcpu_ioctl(vcpu, cmd, #cmd, arg)
-
 /*
  * Looks up and returns the value corresponding to the capability
  * (KVM_CAP_*) given by cap.
@@ -316,7 +362,7 @@ static inline int vm_check_cap(struct kvm_vm *vm, long cap)
 {
        int ret =  __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)cap);
 
-       TEST_ASSERT(ret >= 0, KVM_IOCTL_ERROR(KVM_CHECK_EXTENSION, ret));
+       TEST_ASSERT_VM_VCPU_IOCTL(ret >= 0, KVM_CHECK_EXTENSION, ret, vm);
        return ret;
 }
 
@@ -333,6 +379,54 @@ static inline void vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0)
        vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
 }
 
+static inline void vm_set_memory_attributes(struct kvm_vm *vm, uint64_t gpa,
+                                           uint64_t size, uint64_t attributes)
+{
+       struct kvm_memory_attributes attr = {
+               .attributes = attributes,
+               .address = gpa,
+               .size = size,
+               .flags = 0,
+       };
+
+       /*
+        * KVM_SET_MEMORY_ATTRIBUTES overwrites _all_ attributes.  These flows
+        * need significant enhancements to support multiple attributes.
+        */
+       TEST_ASSERT(!attributes || attributes == KVM_MEMORY_ATTRIBUTE_PRIVATE,
+                   "Update me to support multiple attributes!");
+
+       vm_ioctl(vm, KVM_SET_MEMORY_ATTRIBUTES, &attr);
+}
+
+
+static inline void vm_mem_set_private(struct kvm_vm *vm, uint64_t gpa,
+                                     uint64_t size)
+{
+       vm_set_memory_attributes(vm, gpa, size, KVM_MEMORY_ATTRIBUTE_PRIVATE);
+}
+
+static inline void vm_mem_set_shared(struct kvm_vm *vm, uint64_t gpa,
+                                    uint64_t size)
+{
+       vm_set_memory_attributes(vm, gpa, size, 0);
+}
+
+void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t gpa, uint64_t size,
+                           bool punch_hole);
+
+static inline void vm_guest_mem_punch_hole(struct kvm_vm *vm, uint64_t gpa,
+                                          uint64_t size)
+{
+       vm_guest_mem_fallocate(vm, gpa, size, true);
+}
+
+static inline void vm_guest_mem_allocate(struct kvm_vm *vm, uint64_t gpa,
+                                        uint64_t size)
+{
+       vm_guest_mem_fallocate(vm, gpa, size, false);
+}
+
 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size);
 const char *vm_guest_mode_string(uint32_t i);
 
@@ -375,7 +469,7 @@ static inline int vm_get_stats_fd(struct kvm_vm *vm)
 {
        int fd = __vm_ioctl(vm, KVM_GET_STATS_FD, NULL);
 
-       TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_GET_STATS_FD, fd));
+       TEST_ASSERT_VM_VCPU_IOCTL(fd >= 0, KVM_GET_STATS_FD, fd, vm);
        return fd;
 }
 
@@ -431,14 +525,44 @@ static inline uint64_t vm_get_stat(struct kvm_vm *vm, const char *stat_name)
 
 void vm_create_irqchip(struct kvm_vm *vm);
 
+static inline int __vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size,
+                                       uint64_t flags)
+{
+       struct kvm_create_guest_memfd guest_memfd = {
+               .size = size,
+               .flags = flags,
+       };
+
+       return __vm_ioctl(vm, KVM_CREATE_GUEST_MEMFD, &guest_memfd);
+}
+
+static inline int vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size,
+                                       uint64_t flags)
+{
+       int fd = __vm_create_guest_memfd(vm, size, flags);
+
+       TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_GUEST_MEMFD, fd));
+       return fd;
+}
+
 void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
                               uint64_t gpa, uint64_t size, void *hva);
 int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
                                uint64_t gpa, uint64_t size, void *hva);
+void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
+                               uint64_t gpa, uint64_t size, void *hva,
+                               uint32_t guest_memfd, uint64_t guest_memfd_offset);
+int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
+                                uint64_t gpa, uint64_t size, void *hva,
+                                uint32_t guest_memfd, uint64_t guest_memfd_offset);
+
 void vm_userspace_mem_region_add(struct kvm_vm *vm,
        enum vm_mem_backing_src_type src_type,
        uint64_t guest_paddr, uint32_t slot, uint64_t npages,
        uint32_t flags);
+void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
+               uint64_t guest_paddr, uint32_t slot, uint64_t npages,
+               uint32_t flags, int guest_memfd_fd, uint64_t guest_memfd_offset);
 
 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
@@ -587,7 +711,7 @@ static inline int vcpu_get_stats_fd(struct kvm_vcpu *vcpu)
 {
        int fd = __vcpu_ioctl(vcpu, KVM_GET_STATS_FD, NULL);
 
-       TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_GET_STATS_FD, fd));
+       TEST_ASSERT_VM_VCPU_IOCTL(fd >= 0, KVM_CHECK_EXTENSION, fd, vcpu->vm);
        return fd;
 }
 
@@ -713,21 +837,33 @@ vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
  * __vm_create() does NOT create vCPUs, @nr_runnable_vcpus is used purely to
  * calculate the amount of memory needed for per-vCPU data, e.g. stacks.
  */
-struct kvm_vm *____vm_create(enum vm_guest_mode mode);
-struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint32_t nr_runnable_vcpus,
+struct kvm_vm *____vm_create(struct vm_shape shape);
+struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
                           uint64_t nr_extra_pages);
 
 static inline struct kvm_vm *vm_create_barebones(void)
 {
-       return ____vm_create(VM_MODE_DEFAULT);
+       return ____vm_create(VM_SHAPE_DEFAULT);
 }
 
+#ifdef __x86_64__
+static inline struct kvm_vm *vm_create_barebones_protected_vm(void)
+{
+       const struct vm_shape shape = {
+               .mode = VM_MODE_DEFAULT,
+               .type = KVM_X86_SW_PROTECTED_VM,
+       };
+
+       return ____vm_create(shape);
+}
+#endif
+
 static inline struct kvm_vm *vm_create(uint32_t nr_runnable_vcpus)
 {
-       return __vm_create(VM_MODE_DEFAULT, nr_runnable_vcpus, 0);
+       return __vm_create(VM_SHAPE_DEFAULT, nr_runnable_vcpus, 0);
 }
 
-struct kvm_vm *__vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
+struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus,
                                      uint64_t extra_mem_pages,
                                      void *guest_code, struct kvm_vcpu *vcpus[]);
 
@@ -735,17 +871,27 @@ static inline struct kvm_vm *vm_create_with_vcpus(uint32_t nr_vcpus,
                                                  void *guest_code,
                                                  struct kvm_vcpu *vcpus[])
 {
-       return __vm_create_with_vcpus(VM_MODE_DEFAULT, nr_vcpus, 0,
+       return __vm_create_with_vcpus(VM_SHAPE_DEFAULT, nr_vcpus, 0,
                                      guest_code, vcpus);
 }
 
+
+struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape,
+                                              struct kvm_vcpu **vcpu,
+                                              uint64_t extra_mem_pages,
+                                              void *guest_code);
+
 /*
  * Create a VM with a single vCPU with reasonable defaults and @extra_mem_pages
  * additional pages of guest memory.  Returns the VM and vCPU (via out param).
  */
-struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
-                                        uint64_t extra_mem_pages,
-                                        void *guest_code);
+static inline struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
+                                                      uint64_t extra_mem_pages,
+                                                      void *guest_code)
+{
+       return __vm_create_shape_with_one_vcpu(VM_SHAPE_DEFAULT, vcpu,
+                                              extra_mem_pages, guest_code);
+}
 
 static inline struct kvm_vm *vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
                                                     void *guest_code)
@@ -753,6 +899,13 @@ static inline struct kvm_vm *vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
        return __vm_create_with_one_vcpu(vcpu, 0, guest_code);
 }
 
+static inline struct kvm_vm *vm_create_shape_with_one_vcpu(struct vm_shape shape,
+                                                          struct kvm_vcpu **vcpu,
+                                                          void *guest_code)
+{
+       return __vm_create_shape_with_one_vcpu(shape, vcpu, 0, guest_code);
+}
+
 struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm);
 
 void kvm_pin_this_task_to_pcpu(uint32_t pcpu);
@@ -776,10 +929,6 @@ vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
        return n;
 }
 
-struct kvm_userspace_memory_region *
-kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
-                                uint64_t end);
-
 #define sync_global_to_guest(vm, g) ({                         \
        typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g));     \
        memcpy(_p, &(g), sizeof(g));                            \
index 5b62a3d2aa9b5ee193464e05ebc5df169a7ab4b0..a0f9efe5a2a8de6afda4d4531f1ca6bda22f4b9c 100644 (file)
 #include "kvm_util.h"
 #include <linux/stringify.h>
 
-static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t idx,
-                                   uint64_t  size)
+static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t subtype,
+                                   uint64_t idx, uint64_t size)
 {
-       return KVM_REG_RISCV | type | idx | size;
+       return KVM_REG_RISCV | type | subtype | idx | size;
 }
 
 #if __riscv_xlen == 64
@@ -22,24 +22,30 @@ static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t idx,
 #define KVM_REG_SIZE_ULONG     KVM_REG_SIZE_U32
 #endif
 
-#define RISCV_CONFIG_REG(name) __kvm_reg_id(KVM_REG_RISCV_CONFIG, \
-                                            KVM_REG_RISCV_CONFIG_REG(name), \
-                                            KVM_REG_SIZE_ULONG)
+#define RISCV_CONFIG_REG(name)         __kvm_reg_id(KVM_REG_RISCV_CONFIG, 0,           \
+                                                    KVM_REG_RISCV_CONFIG_REG(name),    \
+                                                    KVM_REG_SIZE_ULONG)
 
-#define RISCV_CORE_REG(name)   __kvm_reg_id(KVM_REG_RISCV_CORE, \
-                                            KVM_REG_RISCV_CORE_REG(name), \
-                                            KVM_REG_SIZE_ULONG)
+#define RISCV_CORE_REG(name)           __kvm_reg_id(KVM_REG_RISCV_CORE, 0,             \
+                                                    KVM_REG_RISCV_CORE_REG(name),      \
+                                                    KVM_REG_SIZE_ULONG)
 
-#define RISCV_CSR_REG(name)    __kvm_reg_id(KVM_REG_RISCV_CSR, \
-                                            KVM_REG_RISCV_CSR_REG(name), \
-                                            KVM_REG_SIZE_ULONG)
+#define RISCV_GENERAL_CSR_REG(name)    __kvm_reg_id(KVM_REG_RISCV_CSR,                 \
+                                                    KVM_REG_RISCV_CSR_GENERAL,         \
+                                                    KVM_REG_RISCV_CSR_REG(name),       \
+                                                    KVM_REG_SIZE_ULONG)
 
-#define RISCV_TIMER_REG(name)  __kvm_reg_id(KVM_REG_RISCV_TIMER, \
-                                            KVM_REG_RISCV_TIMER_REG(name), \
-                                            KVM_REG_SIZE_U64)
+#define RISCV_TIMER_REG(name)          __kvm_reg_id(KVM_REG_RISCV_TIMER, 0,            \
+                                                    KVM_REG_RISCV_TIMER_REG(name),     \
+                                                    KVM_REG_SIZE_U64)
 
-#define RISCV_ISA_EXT_REG(idx) __kvm_reg_id(KVM_REG_RISCV_ISA_EXT, \
-                                            idx, KVM_REG_SIZE_ULONG)
+#define RISCV_ISA_EXT_REG(idx)         __kvm_reg_id(KVM_REG_RISCV_ISA_EXT,             \
+                                                    KVM_REG_RISCV_ISA_SINGLE,          \
+                                                    idx, KVM_REG_SIZE_ULONG)
+
+#define RISCV_SBI_EXT_REG(idx)         __kvm_reg_id(KVM_REG_RISCV_SBI_EXT,             \
+                                                    KVM_REG_RISCV_SBI_SINGLE,          \
+                                                    idx, KVM_REG_SIZE_ULONG)
 
 /* L3 index Bit[47:39] */
 #define PGTBL_L3_INDEX_MASK                    0x0000FF8000000000ULL
@@ -102,6 +108,17 @@ static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t idx,
 #define SATP_ASID_SHIFT                                44
 #define SATP_ASID_MASK                         _AC(0xFFFF, UL)
 
+/* SBI return error codes */
+#define SBI_SUCCESS                            0
+#define SBI_ERR_FAILURE                                -1
+#define SBI_ERR_NOT_SUPPORTED                  -2
+#define SBI_ERR_INVALID_PARAM                  -3
+#define SBI_ERR_DENIED                         -4
+#define SBI_ERR_INVALID_ADDRESS                        -5
+#define SBI_ERR_ALREADY_AVAILABLE              -6
+#define SBI_ERR_ALREADY_STARTED                        -7
+#define SBI_ERR_ALREADY_STOPPED                        -8
+
 #define SBI_EXT_EXPERIMENTAL_START             0x08000000
 #define SBI_EXT_EXPERIMENTAL_END               0x08FFFFFF
 
@@ -109,6 +126,15 @@ static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t idx,
 #define KVM_RISCV_SELFTESTS_SBI_UCALL          0
 #define KVM_RISCV_SELFTESTS_SBI_UNEXP          1
 
+enum sbi_ext_id {
+       SBI_EXT_BASE = 0x10,
+       SBI_EXT_STA = 0x535441,
+};
+
+enum sbi_ext_base_fid {
+       SBI_EXT_BASE_PROBE_EXT = 3,
+};
+
 struct sbiret {
        long error;
        long value;
@@ -119,4 +145,6 @@ struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0,
                        unsigned long arg3, unsigned long arg4,
                        unsigned long arg5);
 
+bool guest_sbi_probe_extension(int extid, long *out_val);
+
 #endif /* SELFTEST_KVM_PROCESSOR_H */
index 8e5f413a593d94d0757648814d425e2710a9e9e5..71a41fa924b7d09cb1a3aaf9bcc779d7d3311110 100644 (file)
@@ -142,6 +142,11 @@ static inline bool backing_src_is_shared(enum vm_mem_backing_src_type t)
        return vm_mem_backing_src_alias(t)->flag & MAP_SHARED;
 }
 
+static inline bool backing_src_can_be_huge(enum vm_mem_backing_src_type t)
+{
+       return t != VM_MEM_SRC_ANONYMOUS && t != VM_MEM_SRC_SHMEM;
+}
+
 /* Aligns x up to the next multiple of size. Size must be a power of 2. */
 static inline uint64_t align_up(uint64_t x, uint64_t size)
 {
@@ -186,7 +191,7 @@ static inline uint32_t atoi_non_negative(const char *name, const char *num_str)
 }
 
 int guest_vsnprintf(char *buf, int n, const char *fmt, va_list args);
-int guest_snprintf(char *buf, int n, const char *fmt, ...);
+__printf(3, 4) int guest_snprintf(char *buf, int n, const char *fmt, ...);
 
 char *strdup_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2), nonnull(1)));
 
index ce33d306c2cba7d740697d9625d239be95891ee3..d9d6581b8d4f221d1d84ac6a8c085043ae5f4d48 100644 (file)
@@ -34,9 +34,10 @@ void ucall_arch_do_ucall(vm_vaddr_t uc);
 void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu);
 
 void ucall(uint64_t cmd, int nargs, ...);
-void ucall_fmt(uint64_t cmd, const char *fmt, ...);
-void ucall_assert(uint64_t cmd, const char *exp, const char *file,
-                 unsigned int line, const char *fmt, ...);
+__printf(2, 3) void ucall_fmt(uint64_t cmd, const char *fmt, ...);
+__printf(5, 6) void ucall_assert(uint64_t cmd, const char *exp,
+                                const char *file, unsigned int line,
+                                const char *fmt, ...);
 uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc);
 void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa);
 int ucall_nr_pages_required(uint64_t page_size);
@@ -52,6 +53,17 @@ int ucall_nr_pages_required(uint64_t page_size);
 #define GUEST_SYNC_ARGS(stage, arg1, arg2, arg3, arg4) \
                                ucall(UCALL_SYNC, 6, "hello", stage, arg1, arg2, arg3, arg4)
 #define GUEST_SYNC(stage)      ucall(UCALL_SYNC, 2, "hello", stage)
+#define GUEST_SYNC1(arg0)      ucall(UCALL_SYNC, 1, arg0)
+#define GUEST_SYNC2(arg0, arg1)        ucall(UCALL_SYNC, 2, arg0, arg1)
+#define GUEST_SYNC3(arg0, arg1, arg2) \
+                               ucall(UCALL_SYNC, 3, arg0, arg1, arg2)
+#define GUEST_SYNC4(arg0, arg1, arg2, arg3) \
+                               ucall(UCALL_SYNC, 4, arg0, arg1, arg2, arg3)
+#define GUEST_SYNC5(arg0, arg1, arg2, arg3, arg4) \
+                               ucall(UCALL_SYNC, 5, arg0, arg1, arg2, arg3, arg4)
+#define GUEST_SYNC6(arg0, arg1, arg2, arg3, arg4, arg5) \
+                               ucall(UCALL_SYNC, 6, arg0, arg1, arg2, arg3, arg4, arg5)
+
 #define GUEST_PRINTF(_fmt, _args...) ucall_fmt(UCALL_PRINTF, _fmt, ##_args)
 #define GUEST_DONE()           ucall(UCALL_DONE, 0)
 
index 25bc61dac5fbe69bcc62195dde70ef53f56868b0..a84863503fcb46cda532840f3be4512cf35061c3 100644 (file)
@@ -15,6 +15,7 @@
 #include <asm/msr-index.h>
 #include <asm/prctl.h>
 
+#include <linux/kvm_para.h>
 #include <linux/stringify.h>
 
 #include "../kvm_util.h"
@@ -1194,6 +1195,20 @@ uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
 uint64_t __xen_hypercall(uint64_t nr, uint64_t a0, void *a1);
 void xen_hypercall(uint64_t nr, uint64_t a0, void *a1);
 
+static inline uint64_t __kvm_hypercall_map_gpa_range(uint64_t gpa,
+                                                    uint64_t size, uint64_t flags)
+{
+       return kvm_hypercall(KVM_HC_MAP_GPA_RANGE, gpa, size >> PAGE_SHIFT, flags, 0);
+}
+
+static inline void kvm_hypercall_map_gpa_range(uint64_t gpa, uint64_t size,
+                                              uint64_t flags)
+{
+       uint64_t ret = __kvm_hypercall_map_gpa_range(gpa, size, flags);
+
+       GUEST_ASSERT(!ret);
+}
+
 void __vm_xsave_require_permission(uint64_t xfeature, const char *name);
 
 #define vm_xsave_require_permission(xfeature)  \
index 69f26d80c8216ed15c63984794223346f7d307a2..e37dc9c21888f4bc4ed06bf3bacff89e28c68b5d 100644 (file)
@@ -254,7 +254,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
 
        /* Create a VM with enough guest pages */
        guest_num_pages = test_mem_size / guest_page_size;
-       vm = __vm_create_with_vcpus(mode, nr_vcpus, guest_num_pages,
+       vm = __vm_create_with_vcpus(VM_SHAPE(mode), nr_vcpus, guest_num_pages,
                                    guest_code, test_args.vcpus);
 
        /* Align down GPA of the testing memslot */
index 6fe12e985ba568bea7b2a7829730f503b674af76..41c776b642c0cd0be722e4bad1e0e9cc1f0cff80 100644 (file)
@@ -12,6 +12,7 @@
 #include "kvm_util.h"
 #include "processor.h"
 #include <linux/bitfield.h>
+#include <linux/sizes.h>
 
 #define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN    0xac0000
 
@@ -58,13 +59,25 @@ static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva)
        return (gva >> vm->page_shift) & mask;
 }
 
+static inline bool use_lpa2_pte_format(struct kvm_vm *vm)
+{
+       return (vm->page_size == SZ_4K || vm->page_size == SZ_16K) &&
+           (vm->pa_bits > 48 || vm->va_bits > 48);
+}
+
 static uint64_t addr_pte(struct kvm_vm *vm, uint64_t pa, uint64_t attrs)
 {
        uint64_t pte;
 
-       pte = pa & GENMASK(47, vm->page_shift);
-       if (vm->page_shift == 16)
-               pte |= FIELD_GET(GENMASK(51, 48), pa) << 12;
+       if (use_lpa2_pte_format(vm)) {
+               pte = pa & GENMASK(49, vm->page_shift);
+               pte |= FIELD_GET(GENMASK(51, 50), pa) << 8;
+               attrs &= ~GENMASK(9, 8);
+       } else {
+               pte = pa & GENMASK(47, vm->page_shift);
+               if (vm->page_shift == 16)
+                       pte |= FIELD_GET(GENMASK(51, 48), pa) << 12;
+       }
        pte |= attrs;
 
        return pte;
@@ -74,9 +87,14 @@ static uint64_t pte_addr(struct kvm_vm *vm, uint64_t pte)
 {
        uint64_t pa;
 
-       pa = pte & GENMASK(47, vm->page_shift);
-       if (vm->page_shift == 16)
-               pa |= FIELD_GET(GENMASK(15, 12), pte) << 48;
+       if (use_lpa2_pte_format(vm)) {
+               pa = pte & GENMASK(49, vm->page_shift);
+               pa |= FIELD_GET(GENMASK(9, 8), pte) << 50;
+       } else {
+               pa = pte & GENMASK(47, vm->page_shift);
+               if (vm->page_shift == 16)
+                       pa |= FIELD_GET(GENMASK(15, 12), pte) << 48;
+       }
 
        return pa;
 }
@@ -266,9 +284,6 @@ void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init)
 
        /* Configure base granule size */
        switch (vm->mode) {
-       case VM_MODE_P52V48_4K:
-               TEST_FAIL("AArch64 does not support 4K sized pages "
-                         "with 52-bit physical address ranges");
        case VM_MODE_PXXV48_4K:
                TEST_FAIL("AArch64 does not support 4K sized pages "
                          "with ANY-bit physical address ranges");
@@ -278,12 +293,14 @@ void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init)
        case VM_MODE_P36V48_64K:
                tcr_el1 |= 1ul << 14; /* TG0 = 64KB */
                break;
+       case VM_MODE_P52V48_16K:
        case VM_MODE_P48V48_16K:
        case VM_MODE_P40V48_16K:
        case VM_MODE_P36V48_16K:
        case VM_MODE_P36V47_16K:
                tcr_el1 |= 2ul << 14; /* TG0 = 16KB */
                break;
+       case VM_MODE_P52V48_4K:
        case VM_MODE_P48V48_4K:
        case VM_MODE_P40V48_4K:
        case VM_MODE_P36V48_4K:
@@ -297,6 +314,8 @@ void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init)
 
        /* Configure output size */
        switch (vm->mode) {
+       case VM_MODE_P52V48_4K:
+       case VM_MODE_P52V48_16K:
        case VM_MODE_P52V48_64K:
                tcr_el1 |= 6ul << 32; /* IPS = 52 bits */
                ttbr0_el1 |= FIELD_GET(GENMASK(51, 48), vm->pgd) << 2;
@@ -325,6 +344,8 @@ void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init)
        /* TCR_EL1 |= IRGN0:WBWA | ORGN0:WBWA | SH0:Inner-Shareable */;
        tcr_el1 |= (1 << 8) | (1 << 10) | (3 << 12);
        tcr_el1 |= (64 - vm->va_bits) /* T0SZ */;
+       if (use_lpa2_pte_format(vm))
+               tcr_el1 |= (1ul << 59) /* DS */;
 
        vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), sctlr_el1);
        vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL1), tcr_el1);
@@ -492,12 +513,24 @@ uint32_t guest_get_vcpuid(void)
        return read_sysreg(tpidr_el1);
 }
 
-void aarch64_get_supported_page_sizes(uint32_t ipa,
-                                     bool *ps4k, bool *ps16k, bool *ps64k)
+static uint32_t max_ipa_for_page_size(uint32_t vm_ipa, uint32_t gran,
+                               uint32_t not_sup_val, uint32_t ipa52_min_val)
+{
+       if (gran == not_sup_val)
+               return 0;
+       else if (gran >= ipa52_min_val && vm_ipa >= 52)
+               return 52;
+       else
+               return min(vm_ipa, 48U);
+}
+
+void aarch64_get_supported_page_sizes(uint32_t ipa, uint32_t *ipa4k,
+                                       uint32_t *ipa16k, uint32_t *ipa64k)
 {
        struct kvm_vcpu_init preferred_init;
        int kvm_fd, vm_fd, vcpu_fd, err;
        uint64_t val;
+       uint32_t gran;
        struct kvm_one_reg reg = {
                .id     = KVM_ARM64_SYS_REG(SYS_ID_AA64MMFR0_EL1),
                .addr   = (uint64_t)&val,
@@ -518,9 +551,17 @@ void aarch64_get_supported_page_sizes(uint32_t ipa,
        err = ioctl(vcpu_fd, KVM_GET_ONE_REG, &reg);
        TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_GET_ONE_REG, vcpu_fd));
 
-       *ps4k = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN4), val) != 0xf;
-       *ps64k = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN64), val) == 0;
-       *ps16k = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN16), val) != 0;
+       gran = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN4), val);
+       *ipa4k = max_ipa_for_page_size(ipa, gran, ID_AA64MMFR0_EL1_TGRAN4_NI,
+                                       ID_AA64MMFR0_EL1_TGRAN4_52_BIT);
+
+       gran = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN64), val);
+       *ipa64k = max_ipa_for_page_size(ipa, gran, ID_AA64MMFR0_EL1_TGRAN64_NI,
+                                       ID_AA64MMFR0_EL1_TGRAN64_IMP);
+
+       gran = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN16), val);
+       *ipa16k = max_ipa_for_page_size(ipa, gran, ID_AA64MMFR0_EL1_TGRAN16_NI,
+                                       ID_AA64MMFR0_EL1_TGRAN16_52_BIT);
 
        close(vcpu_fd);
        close(vm_fd);
index 1df3ce4b16fd86a3f1750700d1392872a60cbf92..b04901e5513874b5c51606b6b63145806b875b58 100644 (file)
@@ -14,37 +14,33 @@ struct guest_mode guest_modes[NUM_VM_MODES];
 void guest_modes_append_default(void)
 {
 #ifndef __aarch64__
-       guest_mode_append(VM_MODE_DEFAULT, true, true);
+       guest_mode_append(VM_MODE_DEFAULT, true);
 #else
        {
                unsigned int limit = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE);
-               bool ps4k, ps16k, ps64k;
+               uint32_t ipa4k, ipa16k, ipa64k;
                int i;
 
-               aarch64_get_supported_page_sizes(limit, &ps4k, &ps16k, &ps64k);
+               aarch64_get_supported_page_sizes(limit, &ipa4k, &ipa16k, &ipa64k);
 
-               vm_mode_default = NUM_VM_MODES;
+               guest_mode_append(VM_MODE_P52V48_4K, ipa4k >= 52);
+               guest_mode_append(VM_MODE_P52V48_16K, ipa16k >= 52);
+               guest_mode_append(VM_MODE_P52V48_64K, ipa64k >= 52);
 
-               if (limit >= 52)
-                       guest_mode_append(VM_MODE_P52V48_64K, ps64k, ps64k);
-               if (limit >= 48) {
-                       guest_mode_append(VM_MODE_P48V48_4K, ps4k, ps4k);
-                       guest_mode_append(VM_MODE_P48V48_16K, ps16k, ps16k);
-                       guest_mode_append(VM_MODE_P48V48_64K, ps64k, ps64k);
-               }
-               if (limit >= 40) {
-                       guest_mode_append(VM_MODE_P40V48_4K, ps4k, ps4k);
-                       guest_mode_append(VM_MODE_P40V48_16K, ps16k, ps16k);
-                       guest_mode_append(VM_MODE_P40V48_64K, ps64k, ps64k);
-                       if (ps4k)
-                               vm_mode_default = VM_MODE_P40V48_4K;
-               }
-               if (limit >= 36) {
-                       guest_mode_append(VM_MODE_P36V48_4K, ps4k, ps4k);
-                       guest_mode_append(VM_MODE_P36V48_16K, ps16k, ps16k);
-                       guest_mode_append(VM_MODE_P36V48_64K, ps64k, ps64k);
-                       guest_mode_append(VM_MODE_P36V47_16K, ps16k, ps16k);
-               }
+               guest_mode_append(VM_MODE_P48V48_4K, ipa4k >= 48);
+               guest_mode_append(VM_MODE_P48V48_16K, ipa16k >= 48);
+               guest_mode_append(VM_MODE_P48V48_64K, ipa64k >= 48);
+
+               guest_mode_append(VM_MODE_P40V48_4K, ipa4k >= 40);
+               guest_mode_append(VM_MODE_P40V48_16K, ipa16k >= 40);
+               guest_mode_append(VM_MODE_P40V48_64K, ipa64k >= 40);
+
+               guest_mode_append(VM_MODE_P36V48_4K, ipa4k >= 36);
+               guest_mode_append(VM_MODE_P36V48_16K, ipa16k >= 36);
+               guest_mode_append(VM_MODE_P36V48_64K, ipa64k >= 36);
+               guest_mode_append(VM_MODE_P36V47_16K, ipa16k >= 36);
+
+               vm_mode_default = ipa4k >= 40 ? VM_MODE_P40V48_4K : NUM_VM_MODES;
 
                /*
                 * Pick the first supported IPA size if the default
@@ -72,7 +68,7 @@ void guest_modes_append_default(void)
                close(kvm_fd);
                /* Starting with z13 we have 47bits of physical address */
                if (info.ibc >= 0x30)
-                       guest_mode_append(VM_MODE_P47V64_4K, true, true);
+                       guest_mode_append(VM_MODE_P47V64_4K, true);
        }
 #endif
 #ifdef __riscv
@@ -80,9 +76,9 @@ void guest_modes_append_default(void)
                unsigned int sz = kvm_check_cap(KVM_CAP_VM_GPA_BITS);
 
                if (sz >= 52)
-                       guest_mode_append(VM_MODE_P52V48_4K, true, true);
+                       guest_mode_append(VM_MODE_P52V48_4K, true);
                if (sz >= 48)
-                       guest_mode_append(VM_MODE_P48V48_4K, true, true);
+                       guest_mode_append(VM_MODE_P48V48_4K, true);
        }
 #endif
 }
index 7a8af1821f5dae2993995c60e0ef08faa6431919..e066d584c65611b4da45b0312734c3fab7b3dcd6 100644 (file)
@@ -148,6 +148,7 @@ const char *vm_guest_mode_string(uint32_t i)
 {
        static const char * const strings[] = {
                [VM_MODE_P52V48_4K]     = "PA-bits:52,  VA-bits:48,  4K pages",
+               [VM_MODE_P52V48_16K]    = "PA-bits:52,  VA-bits:48, 16K pages",
                [VM_MODE_P52V48_64K]    = "PA-bits:52,  VA-bits:48, 64K pages",
                [VM_MODE_P48V48_4K]     = "PA-bits:48,  VA-bits:48,  4K pages",
                [VM_MODE_P48V48_16K]    = "PA-bits:48,  VA-bits:48, 16K pages",
@@ -173,6 +174,7 @@ const char *vm_guest_mode_string(uint32_t i)
 
 const struct vm_guest_mode_params vm_guest_mode_params[] = {
        [VM_MODE_P52V48_4K]     = { 52, 48,  0x1000, 12 },
+       [VM_MODE_P52V48_16K]    = { 52, 48,  0x4000, 14 },
        [VM_MODE_P52V48_64K]    = { 52, 48, 0x10000, 16 },
        [VM_MODE_P48V48_4K]     = { 48, 48,  0x1000, 12 },
        [VM_MODE_P48V48_16K]    = { 48, 48,  0x4000, 14 },
@@ -209,7 +211,7 @@ __weak void vm_vaddr_populate_bitmap(struct kvm_vm *vm)
                (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
 }
 
-struct kvm_vm *____vm_create(enum vm_guest_mode mode)
+struct kvm_vm *____vm_create(struct vm_shape shape)
 {
        struct kvm_vm *vm;
 
@@ -221,13 +223,13 @@ struct kvm_vm *____vm_create(enum vm_guest_mode mode)
        vm->regions.hva_tree = RB_ROOT;
        hash_init(vm->regions.slot_hash);
 
-       vm->mode = mode;
-       vm->type = 0;
+       vm->mode = shape.mode;
+       vm->type = shape.type;
 
-       vm->pa_bits = vm_guest_mode_params[mode].pa_bits;
-       vm->va_bits = vm_guest_mode_params[mode].va_bits;
-       vm->page_size = vm_guest_mode_params[mode].page_size;
-       vm->page_shift = vm_guest_mode_params[mode].page_shift;
+       vm->pa_bits = vm_guest_mode_params[vm->mode].pa_bits;
+       vm->va_bits = vm_guest_mode_params[vm->mode].va_bits;
+       vm->page_size = vm_guest_mode_params[vm->mode].page_size;
+       vm->page_shift = vm_guest_mode_params[vm->mode].page_shift;
 
        /* Setup mode specific traits. */
        switch (vm->mode) {
@@ -251,6 +253,7 @@ struct kvm_vm *____vm_create(enum vm_guest_mode mode)
        case VM_MODE_P36V48_64K:
                vm->pgtable_levels = 3;
                break;
+       case VM_MODE_P52V48_16K:
        case VM_MODE_P48V48_16K:
        case VM_MODE_P40V48_16K:
        case VM_MODE_P36V48_16K:
@@ -265,7 +268,7 @@ struct kvm_vm *____vm_create(enum vm_guest_mode mode)
                /*
                 * Ignore KVM support for 5-level paging (vm->va_bits == 57),
                 * it doesn't take effect unless a CR4.LA57 is set, which it
-                * isn't for this VM_MODE.
+                * isn't for this mode (48-bit virtual address space).
                 */
                TEST_ASSERT(vm->va_bits == 48 || vm->va_bits == 57,
                            "Linear address width (%d bits) not supported",
@@ -285,10 +288,11 @@ struct kvm_vm *____vm_create(enum vm_guest_mode mode)
                vm->pgtable_levels = 5;
                break;
        default:
-               TEST_FAIL("Unknown guest mode, mode: 0x%x", mode);
+               TEST_FAIL("Unknown guest mode: 0x%x", vm->mode);
        }
 
 #ifdef __aarch64__
+       TEST_ASSERT(!vm->type, "ARM doesn't support test-provided types");
        if (vm->pa_bits != 40)
                vm->type = KVM_VM_TYPE_ARM_IPA_SIZE(vm->pa_bits);
 #endif
@@ -347,19 +351,19 @@ static uint64_t vm_nr_pages_required(enum vm_guest_mode mode,
        return vm_adjust_num_guest_pages(mode, nr_pages);
 }
 
-struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint32_t nr_runnable_vcpus,
+struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
                           uint64_t nr_extra_pages)
 {
-       uint64_t nr_pages = vm_nr_pages_required(mode, nr_runnable_vcpus,
+       uint64_t nr_pages = vm_nr_pages_required(shape.mode, nr_runnable_vcpus,
                                                 nr_extra_pages);
        struct userspace_mem_region *slot0;
        struct kvm_vm *vm;
        int i;
 
-       pr_debug("%s: mode='%s' pages='%ld'\n", __func__,
-                vm_guest_mode_string(mode), nr_pages);
+       pr_debug("%s: mode='%s' type='%d', pages='%ld'\n", __func__,
+                vm_guest_mode_string(shape.mode), shape.type, nr_pages);
 
-       vm = ____vm_create(mode);
+       vm = ____vm_create(shape);
 
        vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 0, 0, nr_pages, 0);
        for (i = 0; i < NR_MEM_REGIONS; i++)
@@ -400,7 +404,7 @@ struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint32_t nr_runnable_vcpus,
  * extra_mem_pages is only used to calculate the maximum page table size,
  * no real memory allocation for non-slot0 memory in this function.
  */
-struct kvm_vm *__vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
+struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus,
                                      uint64_t extra_mem_pages,
                                      void *guest_code, struct kvm_vcpu *vcpus[])
 {
@@ -409,7 +413,7 @@ struct kvm_vm *__vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus
 
        TEST_ASSERT(!nr_vcpus || vcpus, "Must provide vCPU array");
 
-       vm = __vm_create(mode, nr_vcpus, extra_mem_pages);
+       vm = __vm_create(shape, nr_vcpus, extra_mem_pages);
 
        for (i = 0; i < nr_vcpus; ++i)
                vcpus[i] = vm_vcpu_add(vm, i, guest_code);
@@ -417,15 +421,15 @@ struct kvm_vm *__vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus
        return vm;
 }
 
-struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
-                                        uint64_t extra_mem_pages,
-                                        void *guest_code)
+struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape,
+                                              struct kvm_vcpu **vcpu,
+                                              uint64_t extra_mem_pages,
+                                              void *guest_code)
 {
        struct kvm_vcpu *vcpus[1];
        struct kvm_vm *vm;
 
-       vm = __vm_create_with_vcpus(VM_MODE_DEFAULT, 1, extra_mem_pages,
-                                   guest_code, vcpus);
+       vm = __vm_create_with_vcpus(shape, 1, extra_mem_pages, guest_code, vcpus);
 
        *vcpu = vcpus[0];
        return vm;
@@ -453,8 +457,9 @@ void kvm_vm_restart(struct kvm_vm *vmp)
                vm_create_irqchip(vmp);
 
        hash_for_each(vmp->regions.slot_hash, ctr, region, slot_node) {
-               int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION, &region->region);
-               TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
+               int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION2, &region->region);
+
+               TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION2 IOCTL failed,\n"
                            "  rc: %i errno: %i\n"
                            "  slot: %u flags: 0x%x\n"
                            "  guest_phys_addr: 0x%llx size: 0x%llx",
@@ -590,35 +595,6 @@ userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end)
        return NULL;
 }
 
-/*
- * KVM Userspace Memory Region Find
- *
- * Input Args:
- *   vm - Virtual Machine
- *   start - Starting VM physical address
- *   end - Ending VM physical address, inclusive.
- *
- * Output Args: None
- *
- * Return:
- *   Pointer to overlapping region, NULL if no such region.
- *
- * Public interface to userspace_mem_region_find. Allows tests to look up
- * the memslot datastructure for a given range of guest physical memory.
- */
-struct kvm_userspace_memory_region *
-kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
-                                uint64_t end)
-{
-       struct userspace_mem_region *region;
-
-       region = userspace_mem_region_find(vm, start, end);
-       if (!region)
-               return NULL;
-
-       return &region->region;
-}
-
 __weak void vcpu_arch_free(struct kvm_vcpu *vcpu)
 {
 
@@ -686,7 +662,7 @@ static void __vm_mem_region_delete(struct kvm_vm *vm,
        }
 
        region->region.memory_size = 0;
-       vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, &region->region);
+       vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, &region->region);
 
        sparsebit_free(&region->unused_phy_pages);
        ret = munmap(region->mmap_start, region->mmap_size);
@@ -697,6 +673,8 @@ static void __vm_mem_region_delete(struct kvm_vm *vm,
                TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
                close(region->fd);
        }
+       if (region->region.guest_memfd >= 0)
+               close(region->region.guest_memfd);
 
        free(region);
 }
@@ -898,36 +876,44 @@ void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
                    errno, strerror(errno));
 }
 
-/*
- * VM Userspace Memory Region Add
- *
- * Input Args:
- *   vm - Virtual Machine
- *   src_type - Storage source for this region.
- *              NULL to use anonymous memory.
- *   guest_paddr - Starting guest physical address
- *   slot - KVM region slot
- *   npages - Number of physical pages
- *   flags - KVM memory region flags (e.g. KVM_MEM_LOG_DIRTY_PAGES)
- *
- * Output Args: None
- *
- * Return: None
- *
- * Allocates a memory area of the number of pages specified by npages
- * and maps it to the VM specified by vm, at a starting physical address
- * given by guest_paddr.  The region is created with a KVM region slot
- * given by slot, which must be unique and < KVM_MEM_SLOTS_NUM.  The
- * region is created with the flags given by flags.
- */
-void vm_userspace_mem_region_add(struct kvm_vm *vm,
-       enum vm_mem_backing_src_type src_type,
-       uint64_t guest_paddr, uint32_t slot, uint64_t npages,
-       uint32_t flags)
+int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
+                                uint64_t gpa, uint64_t size, void *hva,
+                                uint32_t guest_memfd, uint64_t guest_memfd_offset)
+{
+       struct kvm_userspace_memory_region2 region = {
+               .slot = slot,
+               .flags = flags,
+               .guest_phys_addr = gpa,
+               .memory_size = size,
+               .userspace_addr = (uintptr_t)hva,
+               .guest_memfd = guest_memfd,
+               .guest_memfd_offset = guest_memfd_offset,
+       };
+
+       return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION2, &region);
+}
+
+void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
+                               uint64_t gpa, uint64_t size, void *hva,
+                               uint32_t guest_memfd, uint64_t guest_memfd_offset)
+{
+       int ret = __vm_set_user_memory_region2(vm, slot, flags, gpa, size, hva,
+                                              guest_memfd, guest_memfd_offset);
+
+       TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION2 failed, errno = %d (%s)",
+                   errno, strerror(errno));
+}
+
+
+/* FIXME: This thing needs to be ripped apart and rewritten. */
+void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
+               uint64_t guest_paddr, uint32_t slot, uint64_t npages,
+               uint32_t flags, int guest_memfd, uint64_t guest_memfd_offset)
 {
        int ret;
        struct userspace_mem_region *region;
        size_t backing_src_pagesz = get_backing_src_pagesz(src_type);
+       size_t mem_size = npages * vm->page_size;
        size_t alignment;
 
        TEST_ASSERT(vm_adjust_num_guest_pages(vm->mode, npages) == npages,
@@ -980,7 +966,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
        /* Allocate and initialize new mem region structure. */
        region = calloc(1, sizeof(*region));
        TEST_ASSERT(region != NULL, "Insufficient Memory");
-       region->mmap_size = npages * vm->page_size;
+       region->mmap_size = mem_size;
 
 #ifdef __s390x__
        /* On s390x, the host address must be aligned to 1M (due to PGSTEs) */
@@ -1027,14 +1013,38 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
        /* As needed perform madvise */
        if ((src_type == VM_MEM_SRC_ANONYMOUS ||
             src_type == VM_MEM_SRC_ANONYMOUS_THP) && thp_configured()) {
-               ret = madvise(region->host_mem, npages * vm->page_size,
+               ret = madvise(region->host_mem, mem_size,
                              src_type == VM_MEM_SRC_ANONYMOUS ? MADV_NOHUGEPAGE : MADV_HUGEPAGE);
                TEST_ASSERT(ret == 0, "madvise failed, addr: %p length: 0x%lx src_type: %s",
-                           region->host_mem, npages * vm->page_size,
+                           region->host_mem, mem_size,
                            vm_mem_backing_src_alias(src_type)->name);
        }
 
        region->backing_src_type = src_type;
+
+       if (flags & KVM_MEM_GUEST_MEMFD) {
+               if (guest_memfd < 0) {
+                       uint32_t guest_memfd_flags = 0;
+                       TEST_ASSERT(!guest_memfd_offset,
+                                   "Offset must be zero when creating new guest_memfd");
+                       guest_memfd = vm_create_guest_memfd(vm, mem_size, guest_memfd_flags);
+               } else {
+                       /*
+                        * Install a unique fd for each memslot so that the fd
+                        * can be closed when the region is deleted without
+                        * needing to track if the fd is owned by the framework
+                        * or by the caller.
+                        */
+                       guest_memfd = dup(guest_memfd);
+                       TEST_ASSERT(guest_memfd >= 0, __KVM_SYSCALL_ERROR("dup()", guest_memfd));
+               }
+
+               region->region.guest_memfd = guest_memfd;
+               region->region.guest_memfd_offset = guest_memfd_offset;
+       } else {
+               region->region.guest_memfd = -1;
+       }
+
        region->unused_phy_pages = sparsebit_alloc();
        sparsebit_set_num(region->unused_phy_pages,
                guest_paddr >> vm->page_shift, npages);
@@ -1043,13 +1053,14 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
        region->region.guest_phys_addr = guest_paddr;
        region->region.memory_size = npages * vm->page_size;
        region->region.userspace_addr = (uintptr_t) region->host_mem;
-       ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, &region->region);
-       TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
+       ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, &region->region);
+       TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION2 IOCTL failed,\n"
                "  rc: %i errno: %i\n"
                "  slot: %u flags: 0x%x\n"
-               "  guest_phys_addr: 0x%lx size: 0x%lx",
+               "  guest_phys_addr: 0x%lx size: 0x%lx guest_memfd: %d\n",
                ret, errno, slot, flags,
-               guest_paddr, (uint64_t) region->region.memory_size);
+               guest_paddr, (uint64_t) region->region.memory_size,
+               region->region.guest_memfd);
 
        /* Add to quick lookup data structures */
        vm_userspace_mem_region_gpa_insert(&vm->regions.gpa_tree, region);
@@ -1070,6 +1081,14 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
        }
 }
 
+void vm_userspace_mem_region_add(struct kvm_vm *vm,
+                                enum vm_mem_backing_src_type src_type,
+                                uint64_t guest_paddr, uint32_t slot,
+                                uint64_t npages, uint32_t flags)
+{
+       vm_mem_add(vm, src_type, guest_paddr, slot, npages, flags, -1, 0);
+}
+
 /*
  * Memslot to region
  *
@@ -1126,9 +1145,9 @@ void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags)
 
        region->region.flags = flags;
 
-       ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, &region->region);
+       ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, &region->region);
 
-       TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
+       TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION2 IOCTL failed,\n"
                "  rc: %i errno: %i slot: %u flags: 0x%x",
                ret, errno, slot, flags);
 }
@@ -1156,9 +1175,9 @@ void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa)
 
        region->region.guest_phys_addr = new_gpa;
 
-       ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, &region->region);
+       ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, &region->region);
 
-       TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION failed\n"
+       TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION2 failed\n"
                    "ret: %i errno: %i slot: %u new_gpa: 0x%lx",
                    ret, errno, slot, new_gpa);
 }
@@ -1181,6 +1200,34 @@ void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot)
        __vm_mem_region_delete(vm, memslot2region(vm, slot), true);
 }
 
+void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t base, uint64_t size,
+                           bool punch_hole)
+{
+       const int mode = FALLOC_FL_KEEP_SIZE | (punch_hole ? FALLOC_FL_PUNCH_HOLE : 0);
+       struct userspace_mem_region *region;
+       uint64_t end = base + size;
+       uint64_t gpa, len;
+       off_t fd_offset;
+       int ret;
+
+       for (gpa = base; gpa < end; gpa += len) {
+               uint64_t offset;
+
+               region = userspace_mem_region_find(vm, gpa, gpa);
+               TEST_ASSERT(region && region->region.flags & KVM_MEM_GUEST_MEMFD,
+                           "Private memory region not found for GPA 0x%lx", gpa);
+
+               offset = gpa - region->region.guest_phys_addr;
+               fd_offset = region->region.guest_memfd_offset + offset;
+               len = min_t(uint64_t, end - gpa, region->region.memory_size - offset);
+
+               ret = fallocate(region->region.guest_memfd, mode, fd_offset, len);
+               TEST_ASSERT(!ret, "fallocate() failed to %s at %lx (len = %lu), fd = %d, mode = %x, offset = %lx\n",
+                           punch_hole ? "punch hole" : "allocate", gpa, len,
+                           region->region.guest_memfd, mode, fd_offset);
+       }
+}
+
 /* Returns the size of a vCPU's kvm_run structure. */
 static int vcpu_mmap_sz(void)
 {
@@ -1227,7 +1274,7 @@ struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
        vcpu->vm = vm;
        vcpu->id = vcpu_id;
        vcpu->fd = __vm_ioctl(vm, KVM_CREATE_VCPU, (void *)(unsigned long)vcpu_id);
-       TEST_ASSERT(vcpu->fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VCPU, vcpu->fd));
+       TEST_ASSERT_VM_VCPU_IOCTL(vcpu->fd >= 0, KVM_CREATE_VCPU, vcpu->fd, vm);
 
        TEST_ASSERT(vcpu_mmap_sz() >= sizeof(*vcpu->run), "vcpu mmap size "
                "smaller than expected, vcpu_mmap_sz: %i expected_min: %zi",
index df457452d1464f3fa659d38202e8d8e7dc89de0b..d05487e5a371df1d17c96d4fbec1b1f6b2e60c0f 100644 (file)
@@ -168,7 +168,8 @@ struct kvm_vm *memstress_create_vm(enum vm_guest_mode mode, int nr_vcpus,
         * The memory is also added to memslot 0, but that's a benign side
         * effect as KVM allows aliasing HVAs in meslots.
         */
-       vm = __vm_create_with_vcpus(mode, nr_vcpus, slot0_pages + guest_num_pages,
+       vm = __vm_create_with_vcpus(VM_SHAPE(mode), nr_vcpus,
+                                   slot0_pages + guest_num_pages,
                                    memstress_guest_code, vcpus);
 
        args->vm = vm;
index d146ca71e0c0948b74830ebc7f2c4f05e407a1aa..7ca736fb4194046072bf69b3210f0fefd8ce0834 100644 (file)
@@ -201,7 +201,7 @@ void riscv_vcpu_mmu_setup(struct kvm_vcpu *vcpu)
        satp = (vm->pgd >> PGTBL_PAGE_SIZE_SHIFT) & SATP_PPN;
        satp |= SATP_MODE_48;
 
-       vcpu_set_reg(vcpu, RISCV_CSR_REG(satp), satp);
+       vcpu_set_reg(vcpu, RISCV_GENERAL_CSR_REG(satp), satp);
 }
 
 void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
@@ -315,7 +315,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
        vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.pc), (unsigned long)guest_code);
 
        /* Setup default exception vector of guest */
-       vcpu_set_reg(vcpu, RISCV_CSR_REG(stvec), (unsigned long)guest_unexp_trap);
+       vcpu_set_reg(vcpu, RISCV_GENERAL_CSR_REG(stvec), (unsigned long)guest_unexp_trap);
 
        return vcpu;
 }
@@ -367,3 +367,48 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
 void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
 {
 }
+
+struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0,
+                       unsigned long arg1, unsigned long arg2,
+                       unsigned long arg3, unsigned long arg4,
+                       unsigned long arg5)
+{
+       register uintptr_t a0 asm ("a0") = (uintptr_t)(arg0);
+       register uintptr_t a1 asm ("a1") = (uintptr_t)(arg1);
+       register uintptr_t a2 asm ("a2") = (uintptr_t)(arg2);
+       register uintptr_t a3 asm ("a3") = (uintptr_t)(arg3);
+       register uintptr_t a4 asm ("a4") = (uintptr_t)(arg4);
+       register uintptr_t a5 asm ("a5") = (uintptr_t)(arg5);
+       register uintptr_t a6 asm ("a6") = (uintptr_t)(fid);
+       register uintptr_t a7 asm ("a7") = (uintptr_t)(ext);
+       struct sbiret ret;
+
+       asm volatile (
+               "ecall"
+               : "+r" (a0), "+r" (a1)
+               : "r" (a2), "r" (a3), "r" (a4), "r" (a5), "r" (a6), "r" (a7)
+               : "memory");
+       ret.error = a0;
+       ret.value = a1;
+
+       return ret;
+}
+
+bool guest_sbi_probe_extension(int extid, long *out_val)
+{
+       struct sbiret ret;
+
+       ret = sbi_ecall(SBI_EXT_BASE, SBI_EXT_BASE_PROBE_EXT, extid,
+                       0, 0, 0, 0, 0);
+
+       __GUEST_ASSERT(!ret.error || ret.error == SBI_ERR_NOT_SUPPORTED,
+                      "ret.error=%ld, ret.value=%ld\n", ret.error, ret.value);
+
+       if (ret.error == SBI_ERR_NOT_SUPPORTED)
+               return false;
+
+       if (out_val)
+               *out_val = ret.value;
+
+       return true;
+}
index fe6d1004f018c08e12e91e2993408494bab04317..14ee17151a590b26721c91940e758192d34633c0 100644 (file)
 #include "kvm_util.h"
 #include "processor.h"
 
-struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0,
-                       unsigned long arg1, unsigned long arg2,
-                       unsigned long arg3, unsigned long arg4,
-                       unsigned long arg5)
-{
-       register uintptr_t a0 asm ("a0") = (uintptr_t)(arg0);
-       register uintptr_t a1 asm ("a1") = (uintptr_t)(arg1);
-       register uintptr_t a2 asm ("a2") = (uintptr_t)(arg2);
-       register uintptr_t a3 asm ("a3") = (uintptr_t)(arg3);
-       register uintptr_t a4 asm ("a4") = (uintptr_t)(arg4);
-       register uintptr_t a5 asm ("a5") = (uintptr_t)(arg5);
-       register uintptr_t a6 asm ("a6") = (uintptr_t)(fid);
-       register uintptr_t a7 asm ("a7") = (uintptr_t)(ext);
-       struct sbiret ret;
-
-       asm volatile (
-               "ecall"
-               : "+r" (a0), "+r" (a1)
-               : "r" (a2), "r" (a3), "r" (a4), "r" (a5), "r" (a6), "r" (a7)
-               : "memory");
-       ret.error = a0;
-       ret.value = a1;
-
-       return ret;
-}
-
 void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu)
 {
        struct kvm_run *run = vcpu->run;
index 25de4b8bc3472ecbe705367605c804abed408f17..4fd0f8951574475fc5a8c8ca879103d2e2434b25 100644 (file)
 
 #define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK)
 
+enum {
+       VCPU_FEATURE_ISA_EXT = 0,
+       VCPU_FEATURE_SBI_EXT,
+};
+
 static bool isa_ext_cant_disable[KVM_RISCV_ISA_EXT_MAX];
 
 bool filter_reg(__u64 reg)
@@ -28,31 +33,74 @@ bool filter_reg(__u64 reg)
         *
         * Note: The below list is alphabetically sorted.
         */
-       case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_A:
-       case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_C:
-       case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_D:
-       case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_F:
-       case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_H:
-       case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_I:
-       case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_M:
-       case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_V:
-       case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SMSTATEEN:
-       case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSAIA:
-       case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSTC:
-       case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVINVAL:
-       case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVNAPOT:
-       case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVPBMT:
-       case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBA:
-       case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBB:
-       case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBS:
-       case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOM:
-       case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOZ:
-       case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICNTR:
-       case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICOND:
-       case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICSR:
-       case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIFENCEI:
-       case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
-       case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHPM:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_A:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_C:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_D:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_F:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_H:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_I:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_M:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_V:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SMSTATEEN:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SSAIA:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SSTC:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SVINVAL:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SVNAPOT:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SVPBMT:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBA:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBB:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBC:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBKB:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBKC:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBKX:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBS:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZFA:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZFH:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZFHMIN:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICBOM:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICBOZ:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICNTR:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICOND:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICSR:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZIFENCEI:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZIHINTNTL:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZIHPM:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKND:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKNE:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKNH:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKR:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKSED:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKSH:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKT:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVBB:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVBC:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVFH:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVFHMIN:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVKB:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVKG:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVKNED:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVKNHA:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVKNHB:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVKSED:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVKSH:
+       case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVKT:
+       /*
+        * Like ISA_EXT registers, SBI_EXT registers are only visible when the
+        * host supports them and disabling them does not affect the visibility
+        * of the SBI_EXT register itself.
+        */
+       case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_V01:
+       case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_TIME:
+       case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_IPI:
+       case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_RFENCE:
+       case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_SRST:
+       case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_HSM:
+       case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_PMU:
+       case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_DBCN:
+       case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_STA:
+       case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_EXPERIMENTAL:
+       case KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_VENDOR:
                return true;
        /* AIA registers are always available when Ssaia can't be disabled */
        case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siselect):
@@ -75,12 +123,12 @@ bool check_reject_set(int err)
        return err == EINVAL;
 }
 
-static inline bool vcpu_has_ext(struct kvm_vcpu *vcpu, int ext)
+static bool vcpu_has_ext(struct kvm_vcpu *vcpu, uint64_t ext_id)
 {
        int ret;
        unsigned long value;
 
-       ret = __vcpu_get_reg(vcpu, RISCV_ISA_EXT_REG(ext), &value);
+       ret = __vcpu_get_reg(vcpu, ext_id, &value);
        return (ret) ? false : !!value;
 }
 
@@ -88,6 +136,7 @@ void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c)
 {
        unsigned long isa_ext_state[KVM_RISCV_ISA_EXT_MAX] = { 0 };
        struct vcpu_reg_sublist *s;
+       uint64_t feature;
        int rc;
 
        for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++)
@@ -103,15 +152,31 @@ void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c)
                        isa_ext_cant_disable[i] = true;
        }
 
+       for (int i = 0; i < KVM_RISCV_SBI_EXT_MAX; i++) {
+               rc = __vcpu_set_reg(vcpu, RISCV_SBI_EXT_REG(i), 0);
+               TEST_ASSERT(!rc || (rc == -1 && errno == ENOENT), "Unexpected error");
+       }
+
        for_each_sublist(c, s) {
                if (!s->feature)
                        continue;
 
+               switch (s->feature_type) {
+               case VCPU_FEATURE_ISA_EXT:
+                       feature = RISCV_ISA_EXT_REG(s->feature);
+                       break;
+               case VCPU_FEATURE_SBI_EXT:
+                       feature = RISCV_SBI_EXT_REG(s->feature);
+                       break;
+               default:
+                       TEST_FAIL("Unknown feature type");
+               }
+
                /* Try to enable the desired extension */
-               __vcpu_set_reg(vcpu, RISCV_ISA_EXT_REG(s->feature), 1);
+               __vcpu_set_reg(vcpu, feature, 1);
 
                /* Double check whether the desired extension was enabled */
-               __TEST_REQUIRE(vcpu_has_ext(vcpu, s->feature),
+               __TEST_REQUIRE(vcpu_has_ext(vcpu, feature),
                               "%s not available, skipping tests\n", s->name);
        }
 }
@@ -335,15 +400,10 @@ static const char *fp_d_id_to_str(const char *prefix, __u64 id)
 }
 
 #define KVM_ISA_EXT_ARR(ext)           \
-[KVM_RISCV_ISA_EXT_##ext] = "KVM_RISCV_ISA_EXT_" #ext
+[KVM_RISCV_ISA_EXT_##ext] = "KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_" #ext
 
-static const char *isa_ext_id_to_str(const char *prefix, __u64 id)
+static const char *isa_ext_single_id_to_str(__u64 reg_off)
 {
-       /* reg_off is the offset into unsigned long kvm_isa_ext_arr[] */
-       __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_ISA_EXT);
-
-       assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_ISA_EXT);
-
        static const char * const kvm_isa_ext_reg_name[] = {
                KVM_ISA_EXT_ARR(A),
                KVM_ISA_EXT_ARR(C),
@@ -361,23 +421,87 @@ static const char *isa_ext_id_to_str(const char *prefix, __u64 id)
                KVM_ISA_EXT_ARR(SVPBMT),
                KVM_ISA_EXT_ARR(ZBA),
                KVM_ISA_EXT_ARR(ZBB),
+               KVM_ISA_EXT_ARR(ZBC),
+               KVM_ISA_EXT_ARR(ZBKB),
+               KVM_ISA_EXT_ARR(ZBKC),
+               KVM_ISA_EXT_ARR(ZBKX),
                KVM_ISA_EXT_ARR(ZBS),
+               KVM_ISA_EXT_ARR(ZFA),
+               KVM_ISA_EXT_ARR(ZFH),
+               KVM_ISA_EXT_ARR(ZFHMIN),
                KVM_ISA_EXT_ARR(ZICBOM),
                KVM_ISA_EXT_ARR(ZICBOZ),
                KVM_ISA_EXT_ARR(ZICNTR),
                KVM_ISA_EXT_ARR(ZICOND),
                KVM_ISA_EXT_ARR(ZICSR),
                KVM_ISA_EXT_ARR(ZIFENCEI),
+               KVM_ISA_EXT_ARR(ZIHINTNTL),
                KVM_ISA_EXT_ARR(ZIHINTPAUSE),
                KVM_ISA_EXT_ARR(ZIHPM),
+               KVM_ISA_EXT_ARR(ZKND),
+               KVM_ISA_EXT_ARR(ZKNE),
+               KVM_ISA_EXT_ARR(ZKNH),
+               KVM_ISA_EXT_ARR(ZKR),
+               KVM_ISA_EXT_ARR(ZKSED),
+               KVM_ISA_EXT_ARR(ZKSH),
+               KVM_ISA_EXT_ARR(ZKT),
+               KVM_ISA_EXT_ARR(ZVBB),
+               KVM_ISA_EXT_ARR(ZVBC),
+               KVM_ISA_EXT_ARR(ZVFH),
+               KVM_ISA_EXT_ARR(ZVFHMIN),
+               KVM_ISA_EXT_ARR(ZVKB),
+               KVM_ISA_EXT_ARR(ZVKG),
+               KVM_ISA_EXT_ARR(ZVKNED),
+               KVM_ISA_EXT_ARR(ZVKNHA),
+               KVM_ISA_EXT_ARR(ZVKNHB),
+               KVM_ISA_EXT_ARR(ZVKSED),
+               KVM_ISA_EXT_ARR(ZVKSH),
+               KVM_ISA_EXT_ARR(ZVKT),
        };
 
        if (reg_off >= ARRAY_SIZE(kvm_isa_ext_reg_name))
-               return strdup_printf("%lld /* UNKNOWN */", reg_off);
+               return strdup_printf("KVM_REG_RISCV_ISA_SINGLE | %lld /* UNKNOWN */", reg_off);
 
        return kvm_isa_ext_reg_name[reg_off];
 }
 
+static const char *isa_ext_multi_id_to_str(__u64 reg_subtype, __u64 reg_off)
+{
+       const char *unknown = "";
+
+       if (reg_off > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
+               unknown = " /* UNKNOWN */";
+
+       switch (reg_subtype) {
+       case KVM_REG_RISCV_ISA_MULTI_EN:
+               return strdup_printf("KVM_REG_RISCV_ISA_MULTI_EN | %lld%s", reg_off, unknown);
+       case KVM_REG_RISCV_ISA_MULTI_DIS:
+               return strdup_printf("KVM_REG_RISCV_ISA_MULTI_DIS | %lld%s", reg_off, unknown);
+       }
+
+       return strdup_printf("%lld | %lld /* UNKNOWN */", reg_subtype, reg_off);
+}
+
+static const char *isa_ext_id_to_str(const char *prefix, __u64 id)
+{
+       __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_ISA_EXT);
+       __u64 reg_subtype = reg_off & KVM_REG_RISCV_SUBTYPE_MASK;
+
+       assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_ISA_EXT);
+
+       reg_off &= ~KVM_REG_RISCV_SUBTYPE_MASK;
+
+       switch (reg_subtype) {
+       case KVM_REG_RISCV_ISA_SINGLE:
+               return isa_ext_single_id_to_str(reg_off);
+       case KVM_REG_RISCV_ISA_MULTI_EN:
+       case KVM_REG_RISCV_ISA_MULTI_DIS:
+               return isa_ext_multi_id_to_str(reg_subtype, reg_off);
+       }
+
+       return strdup_printf("%lld | %lld /* UNKNOWN */", reg_subtype, reg_off);
+}
+
 #define KVM_SBI_EXT_ARR(ext)           \
 [ext] = "KVM_REG_RISCV_SBI_SINGLE | " #ext
 
@@ -392,6 +516,7 @@ static const char *sbi_ext_single_id_to_str(__u64 reg_off)
                KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_SRST),
                KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_HSM),
                KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_PMU),
+               KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_STA),
                KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_EXPERIMENTAL),
                KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_VENDOR),
                KVM_SBI_EXT_ARR(KVM_RISCV_SBI_EXT_DBCN),
@@ -440,6 +565,32 @@ static const char *sbi_ext_id_to_str(const char *prefix, __u64 id)
        return strdup_printf("%lld | %lld /* UNKNOWN */", reg_subtype, reg_off);
 }
 
+static const char *sbi_sta_id_to_str(__u64 reg_off)
+{
+       switch (reg_off) {
+       case 0: return "KVM_REG_RISCV_SBI_STA | KVM_REG_RISCV_SBI_STA_REG(shmem_lo)";
+       case 1: return "KVM_REG_RISCV_SBI_STA | KVM_REG_RISCV_SBI_STA_REG(shmem_hi)";
+       }
+       return strdup_printf("KVM_REG_RISCV_SBI_STA | %lld /* UNKNOWN */", reg_off);
+}
+
+static const char *sbi_id_to_str(const char *prefix, __u64 id)
+{
+       __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_SBI_STATE);
+       __u64 reg_subtype = reg_off & KVM_REG_RISCV_SUBTYPE_MASK;
+
+       assert((id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_SBI_STATE);
+
+       reg_off &= ~KVM_REG_RISCV_SUBTYPE_MASK;
+
+       switch (reg_subtype) {
+       case KVM_REG_RISCV_SBI_STA:
+               return sbi_sta_id_to_str(reg_off);
+       }
+
+       return strdup_printf("%lld | %lld /* UNKNOWN */", reg_subtype, reg_off);
+}
+
 void print_reg(const char *prefix, __u64 id)
 {
        const char *reg_size = NULL;
@@ -496,6 +647,10 @@ void print_reg(const char *prefix, __u64 id)
                printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_SBI_EXT | %s,\n",
                                reg_size, sbi_ext_id_to_str(prefix, id));
                break;
+       case KVM_REG_RISCV_SBI_STATE:
+               printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_SBI_STATE | %s,\n",
+                               reg_size, sbi_id_to_str(prefix, id));
+               break;
        default:
                printf("\tKVM_REG_RISCV | %s | 0x%llx /* UNKNOWN */,\n",
                                reg_size, id & ~REG_MASK);
@@ -561,18 +716,6 @@ static __u64 base_regs[] = {
        KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(time),
        KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(compare),
        KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(state),
-       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_V01,
-       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_TIME,
-       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_IPI,
-       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_RFENCE,
-       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_SRST,
-       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_HSM,
-       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_PMU,
-       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_EXPERIMENTAL,
-       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_VENDOR,
-       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_DBCN,
-       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_MULTI_EN | 0,
-       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_MULTI_DIS | 0,
 };
 
 /*
@@ -583,66 +726,31 @@ static __u64 base_skips_set[] = {
        KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(state),
 };
 
-static __u64 h_regs[] = {
-       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_H,
+static __u64 sbi_base_regs[] = {
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_V01,
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_TIME,
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_IPI,
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_RFENCE,
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_SRST,
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_HSM,
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_EXPERIMENTAL,
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_VENDOR,
+};
+
+static __u64 sbi_sta_regs[] = {
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_STA,
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_STATE | KVM_REG_RISCV_SBI_STA | KVM_REG_RISCV_SBI_STA_REG(shmem_lo),
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_STATE | KVM_REG_RISCV_SBI_STA | KVM_REG_RISCV_SBI_STA_REG(shmem_hi),
 };
 
 static __u64 zicbom_regs[] = {
        KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicbom_block_size),
-       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOM,
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICBOM,
 };
 
 static __u64 zicboz_regs[] = {
        KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicboz_block_size),
-       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOZ,
-};
-
-static __u64 svpbmt_regs[] = {
-       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVPBMT,
-};
-
-static __u64 sstc_regs[] = {
-       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSTC,
-};
-
-static __u64 svinval_regs[] = {
-       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVINVAL,
-};
-
-static __u64 zihintpause_regs[] = {
-       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHINTPAUSE,
-};
-
-static __u64 zba_regs[] = {
-       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBA,
-};
-
-static __u64 zbb_regs[] = {
-       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBB,
-};
-
-static __u64 zbs_regs[] = {
-       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBS,
-};
-
-static __u64 zicntr_regs[] = {
-       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICNTR,
-};
-
-static __u64 zicond_regs[] = {
-       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICOND,
-};
-
-static __u64 zicsr_regs[] = {
-       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICSR,
-};
-
-static __u64 zifencei_regs[] = {
-       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIFENCEI,
-};
-
-static __u64 zihpm_regs[] = {
-       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHPM,
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICBOZ,
 };
 
 static __u64 aia_regs[] = {
@@ -653,12 +761,12 @@ static __u64 aia_regs[] = {
        KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siph),
        KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1h),
        KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2h),
-       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSAIA,
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SSAIA,
 };
 
 static __u64 smstateen_regs[] = {
        KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_SMSTATEEN | KVM_REG_RISCV_CSR_SMSTATEEN_REG(sstateen0),
-       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SMSTATEEN,
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SMSTATEEN,
 };
 
 static __u64 fp_f_regs[] = {
@@ -695,7 +803,7 @@ static __u64 fp_f_regs[] = {
        KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[30]),
        KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[31]),
        KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(fcsr),
-       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_F,
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_F,
 };
 
 static __u64 fp_d_regs[] = {
@@ -732,224 +840,196 @@ static __u64 fp_d_regs[] = {
        KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[30]),
        KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[31]),
        KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(fcsr),
-       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_D,
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_D,
 };
 
-#define BASE_SUBLIST \
+#define SUBLIST_BASE \
        {"base", .regs = base_regs, .regs_n = ARRAY_SIZE(base_regs), \
         .skips_set = base_skips_set, .skips_set_n = ARRAY_SIZE(base_skips_set),}
-#define H_REGS_SUBLIST \
-       {"h", .feature = KVM_RISCV_ISA_EXT_H, .regs = h_regs, .regs_n = ARRAY_SIZE(h_regs),}
-#define ZICBOM_REGS_SUBLIST \
+#define SUBLIST_SBI_BASE \
+       {"sbi-base", .feature_type = VCPU_FEATURE_SBI_EXT, .feature = KVM_RISCV_SBI_EXT_V01, \
+        .regs = sbi_base_regs, .regs_n = ARRAY_SIZE(sbi_base_regs),}
+#define SUBLIST_SBI_STA \
+       {"sbi-sta", .feature_type = VCPU_FEATURE_SBI_EXT, .feature = KVM_RISCV_SBI_EXT_STA, \
+        .regs = sbi_sta_regs, .regs_n = ARRAY_SIZE(sbi_sta_regs),}
+#define SUBLIST_ZICBOM \
        {"zicbom", .feature = KVM_RISCV_ISA_EXT_ZICBOM, .regs = zicbom_regs, .regs_n = ARRAY_SIZE(zicbom_regs),}
-#define ZICBOZ_REGS_SUBLIST \
+#define SUBLIST_ZICBOZ \
        {"zicboz", .feature = KVM_RISCV_ISA_EXT_ZICBOZ, .regs = zicboz_regs, .regs_n = ARRAY_SIZE(zicboz_regs),}
-#define SVPBMT_REGS_SUBLIST \
-       {"svpbmt", .feature = KVM_RISCV_ISA_EXT_SVPBMT, .regs = svpbmt_regs, .regs_n = ARRAY_SIZE(svpbmt_regs),}
-#define SSTC_REGS_SUBLIST \
-       {"sstc", .feature = KVM_RISCV_ISA_EXT_SSTC, .regs = sstc_regs, .regs_n = ARRAY_SIZE(sstc_regs),}
-#define SVINVAL_REGS_SUBLIST \
-       {"svinval", .feature = KVM_RISCV_ISA_EXT_SVINVAL, .regs = svinval_regs, .regs_n = ARRAY_SIZE(svinval_regs),}
-#define ZIHINTPAUSE_REGS_SUBLIST \
-       {"zihintpause", .feature = KVM_RISCV_ISA_EXT_ZIHINTPAUSE, .regs = zihintpause_regs, .regs_n = ARRAY_SIZE(zihintpause_regs),}
-#define ZBA_REGS_SUBLIST \
-       {"zba", .feature = KVM_RISCV_ISA_EXT_ZBA, .regs = zba_regs, .regs_n = ARRAY_SIZE(zba_regs),}
-#define ZBB_REGS_SUBLIST \
-       {"zbb", .feature = KVM_RISCV_ISA_EXT_ZBB, .regs = zbb_regs, .regs_n = ARRAY_SIZE(zbb_regs),}
-#define ZBS_REGS_SUBLIST \
-       {"zbs", .feature = KVM_RISCV_ISA_EXT_ZBS, .regs = zbs_regs, .regs_n = ARRAY_SIZE(zbs_regs),}
-#define ZICNTR_REGS_SUBLIST \
-       {"zicntr", .feature = KVM_RISCV_ISA_EXT_ZICNTR, .regs = zicntr_regs, .regs_n = ARRAY_SIZE(zicntr_regs),}
-#define ZICOND_REGS_SUBLIST \
-       {"zicond", .feature = KVM_RISCV_ISA_EXT_ZICOND, .regs = zicond_regs, .regs_n = ARRAY_SIZE(zicond_regs),}
-#define ZICSR_REGS_SUBLIST \
-       {"zicsr", .feature = KVM_RISCV_ISA_EXT_ZICSR, .regs = zicsr_regs, .regs_n = ARRAY_SIZE(zicsr_regs),}
-#define ZIFENCEI_REGS_SUBLIST \
-       {"zifencei", .feature = KVM_RISCV_ISA_EXT_ZIFENCEI, .regs = zifencei_regs, .regs_n = ARRAY_SIZE(zifencei_regs),}
-#define ZIHPM_REGS_SUBLIST \
-       {"zihpm", .feature = KVM_RISCV_ISA_EXT_ZIHPM, .regs = zihpm_regs, .regs_n = ARRAY_SIZE(zihpm_regs),}
-#define AIA_REGS_SUBLIST \
+#define SUBLIST_AIA \
        {"aia", .feature = KVM_RISCV_ISA_EXT_SSAIA, .regs = aia_regs, .regs_n = ARRAY_SIZE(aia_regs),}
-#define SMSTATEEN_REGS_SUBLIST \
+#define SUBLIST_SMSTATEEN \
        {"smstateen", .feature = KVM_RISCV_ISA_EXT_SMSTATEEN, .regs = smstateen_regs, .regs_n = ARRAY_SIZE(smstateen_regs),}
-#define FP_F_REGS_SUBLIST \
+#define SUBLIST_FP_F \
        {"fp_f", .feature = KVM_RISCV_ISA_EXT_F, .regs = fp_f_regs, \
                .regs_n = ARRAY_SIZE(fp_f_regs),}
-#define FP_D_REGS_SUBLIST \
+#define SUBLIST_FP_D \
        {"fp_d", .feature = KVM_RISCV_ISA_EXT_D, .regs = fp_d_regs, \
                .regs_n = ARRAY_SIZE(fp_d_regs),}
 
-static struct vcpu_reg_list h_config = {
-       .sublists = {
-       BASE_SUBLIST,
-       H_REGS_SUBLIST,
-       {0},
-       },
-};
-
-static struct vcpu_reg_list zicbom_config = {
-       .sublists = {
-       BASE_SUBLIST,
-       ZICBOM_REGS_SUBLIST,
-       {0},
-       },
-};
-
-static struct vcpu_reg_list zicboz_config = {
-       .sublists = {
-       BASE_SUBLIST,
-       ZICBOZ_REGS_SUBLIST,
-       {0},
-       },
-};
-
-static struct vcpu_reg_list svpbmt_config = {
-       .sublists = {
-       BASE_SUBLIST,
-       SVPBMT_REGS_SUBLIST,
-       {0},
-       },
-};
-
-static struct vcpu_reg_list sstc_config = {
-       .sublists = {
-       BASE_SUBLIST,
-       SSTC_REGS_SUBLIST,
-       {0},
-       },
-};
-
-static struct vcpu_reg_list svinval_config = {
-       .sublists = {
-       BASE_SUBLIST,
-       SVINVAL_REGS_SUBLIST,
-       {0},
-       },
-};
-
-static struct vcpu_reg_list zihintpause_config = {
-       .sublists = {
-       BASE_SUBLIST,
-       ZIHINTPAUSE_REGS_SUBLIST,
-       {0},
-       },
-};
-
-static struct vcpu_reg_list zba_config = {
-       .sublists = {
-       BASE_SUBLIST,
-       ZBA_REGS_SUBLIST,
-       {0},
-       },
-};
-
-static struct vcpu_reg_list zbb_config = {
-       .sublists = {
-       BASE_SUBLIST,
-       ZBB_REGS_SUBLIST,
-       {0},
-       },
-};
-
-static struct vcpu_reg_list zbs_config = {
-       .sublists = {
-       BASE_SUBLIST,
-       ZBS_REGS_SUBLIST,
-       {0},
-       },
-};
-
-static struct vcpu_reg_list zicntr_config = {
-       .sublists = {
-       BASE_SUBLIST,
-       ZICNTR_REGS_SUBLIST,
-       {0},
-       },
-};
-
-static struct vcpu_reg_list zicond_config = {
-       .sublists = {
-       BASE_SUBLIST,
-       ZICOND_REGS_SUBLIST,
-       {0},
-       },
-};
-
-static struct vcpu_reg_list zicsr_config = {
-       .sublists = {
-       BASE_SUBLIST,
-       ZICSR_REGS_SUBLIST,
-       {0},
-       },
-};
-
-static struct vcpu_reg_list zifencei_config = {
-       .sublists = {
-       BASE_SUBLIST,
-       ZIFENCEI_REGS_SUBLIST,
-       {0},
-       },
-};
-
-static struct vcpu_reg_list zihpm_config = {
-       .sublists = {
-       BASE_SUBLIST,
-       ZIHPM_REGS_SUBLIST,
-       {0},
-       },
-};
-
-static struct vcpu_reg_list aia_config = {
-       .sublists = {
-       BASE_SUBLIST,
-       AIA_REGS_SUBLIST,
-       {0},
-       },
-};
-
-static struct vcpu_reg_list smstateen_config = {
-       .sublists = {
-       BASE_SUBLIST,
-       SMSTATEEN_REGS_SUBLIST,
-       {0},
-       },
-};
-
-static struct vcpu_reg_list fp_f_config = {
-       .sublists = {
-       BASE_SUBLIST,
-       FP_F_REGS_SUBLIST,
-       {0},
-       },
-};
-
-static struct vcpu_reg_list fp_d_config = {
-       .sublists = {
-       BASE_SUBLIST,
-       FP_D_REGS_SUBLIST,
-       {0},
-       },
-};
+#define KVM_ISA_EXT_SIMPLE_CONFIG(ext, extu)                   \
+static __u64 regs_##ext[] = {                                  \
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG |                    \
+       KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE |      \
+       KVM_RISCV_ISA_EXT_##extu,                               \
+};                                                             \
+static struct vcpu_reg_list config_##ext = {                   \
+       .sublists = {                                           \
+               SUBLIST_BASE,                                   \
+               {                                               \
+                       .name = #ext,                           \
+                       .feature = KVM_RISCV_ISA_EXT_##extu,    \
+                       .regs = regs_##ext,                     \
+                       .regs_n = ARRAY_SIZE(regs_##ext),       \
+               },                                              \
+               {0},                                            \
+       },                                                      \
+}                                                              \
+
+#define KVM_SBI_EXT_SIMPLE_CONFIG(ext, extu)                   \
+static __u64 regs_sbi_##ext[] = {                              \
+       KVM_REG_RISCV | KVM_REG_SIZE_ULONG |                    \
+       KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE |      \
+       KVM_RISCV_SBI_EXT_##extu,                               \
+};                                                             \
+static struct vcpu_reg_list config_sbi_##ext = {               \
+       .sublists = {                                           \
+               SUBLIST_BASE,                                   \
+               {                                               \
+                       .name = "sbi-"#ext,                     \
+                       .feature_type = VCPU_FEATURE_SBI_EXT,   \
+                       .feature = KVM_RISCV_SBI_EXT_##extu,    \
+                       .regs = regs_sbi_##ext,                 \
+                       .regs_n = ARRAY_SIZE(regs_sbi_##ext),   \
+               },                                              \
+               {0},                                            \
+       },                                                      \
+}                                                              \
+
+#define KVM_ISA_EXT_SUBLIST_CONFIG(ext, extu)                  \
+static struct vcpu_reg_list config_##ext = {                   \
+       .sublists = {                                           \
+               SUBLIST_BASE,                                   \
+               SUBLIST_##extu,                                 \
+               {0},                                            \
+       },                                                      \
+}                                                              \
+
+#define KVM_SBI_EXT_SUBLIST_CONFIG(ext, extu)                  \
+static struct vcpu_reg_list config_sbi_##ext = {               \
+       .sublists = {                                           \
+               SUBLIST_BASE,                                   \
+               SUBLIST_SBI_##extu,                             \
+               {0},                                            \
+       },                                                      \
+}                                                              \
+
+/* Note: The below list is alphabetically sorted. */
+
+KVM_SBI_EXT_SUBLIST_CONFIG(base, BASE);
+KVM_SBI_EXT_SUBLIST_CONFIG(sta, STA);
+KVM_SBI_EXT_SIMPLE_CONFIG(pmu, PMU);
+KVM_SBI_EXT_SIMPLE_CONFIG(dbcn, DBCN);
+
+KVM_ISA_EXT_SUBLIST_CONFIG(aia, AIA);
+KVM_ISA_EXT_SUBLIST_CONFIG(fp_f, FP_F);
+KVM_ISA_EXT_SUBLIST_CONFIG(fp_d, FP_D);
+KVM_ISA_EXT_SIMPLE_CONFIG(h, H);
+KVM_ISA_EXT_SUBLIST_CONFIG(smstateen, SMSTATEEN);
+KVM_ISA_EXT_SIMPLE_CONFIG(sstc, SSTC);
+KVM_ISA_EXT_SIMPLE_CONFIG(svinval, SVINVAL);
+KVM_ISA_EXT_SIMPLE_CONFIG(svnapot, SVNAPOT);
+KVM_ISA_EXT_SIMPLE_CONFIG(svpbmt, SVPBMT);
+KVM_ISA_EXT_SIMPLE_CONFIG(zba, ZBA);
+KVM_ISA_EXT_SIMPLE_CONFIG(zbb, ZBB);
+KVM_ISA_EXT_SIMPLE_CONFIG(zbc, ZBC);
+KVM_ISA_EXT_SIMPLE_CONFIG(zbkb, ZBKB);
+KVM_ISA_EXT_SIMPLE_CONFIG(zbkc, ZBKC);
+KVM_ISA_EXT_SIMPLE_CONFIG(zbkx, ZBKX);
+KVM_ISA_EXT_SIMPLE_CONFIG(zbs, ZBS);
+KVM_ISA_EXT_SIMPLE_CONFIG(zfa, ZFA);
+KVM_ISA_EXT_SIMPLE_CONFIG(zfh, ZFH);
+KVM_ISA_EXT_SIMPLE_CONFIG(zfhmin, ZFHMIN);
+KVM_ISA_EXT_SUBLIST_CONFIG(zicbom, ZICBOM);
+KVM_ISA_EXT_SUBLIST_CONFIG(zicboz, ZICBOZ);
+KVM_ISA_EXT_SIMPLE_CONFIG(zicntr, ZICNTR);
+KVM_ISA_EXT_SIMPLE_CONFIG(zicond, ZICOND);
+KVM_ISA_EXT_SIMPLE_CONFIG(zicsr, ZICSR);
+KVM_ISA_EXT_SIMPLE_CONFIG(zifencei, ZIFENCEI);
+KVM_ISA_EXT_SIMPLE_CONFIG(zihintntl, ZIHINTNTL);
+KVM_ISA_EXT_SIMPLE_CONFIG(zihintpause, ZIHINTPAUSE);
+KVM_ISA_EXT_SIMPLE_CONFIG(zihpm, ZIHPM);
+KVM_ISA_EXT_SIMPLE_CONFIG(zknd, ZKND);
+KVM_ISA_EXT_SIMPLE_CONFIG(zkne, ZKNE);
+KVM_ISA_EXT_SIMPLE_CONFIG(zknh, ZKNH);
+KVM_ISA_EXT_SIMPLE_CONFIG(zkr, ZKR);
+KVM_ISA_EXT_SIMPLE_CONFIG(zksed, ZKSED);
+KVM_ISA_EXT_SIMPLE_CONFIG(zksh, ZKSH);
+KVM_ISA_EXT_SIMPLE_CONFIG(zkt, ZKT);
+KVM_ISA_EXT_SIMPLE_CONFIG(zvbb, ZVBB);
+KVM_ISA_EXT_SIMPLE_CONFIG(zvbc, ZVBC);
+KVM_ISA_EXT_SIMPLE_CONFIG(zvfh, ZVFH);
+KVM_ISA_EXT_SIMPLE_CONFIG(zvfhmin, ZVFHMIN);
+KVM_ISA_EXT_SIMPLE_CONFIG(zvkb, ZVKB);
+KVM_ISA_EXT_SIMPLE_CONFIG(zvkg, ZVKG);
+KVM_ISA_EXT_SIMPLE_CONFIG(zvkned, ZVKNED);
+KVM_ISA_EXT_SIMPLE_CONFIG(zvknha, ZVKNHA);
+KVM_ISA_EXT_SIMPLE_CONFIG(zvknhb, ZVKNHB);
+KVM_ISA_EXT_SIMPLE_CONFIG(zvksed, ZVKSED);
+KVM_ISA_EXT_SIMPLE_CONFIG(zvksh, ZVKSH);
+KVM_ISA_EXT_SIMPLE_CONFIG(zvkt, ZVKT);
 
 struct vcpu_reg_list *vcpu_configs[] = {
-       &h_config,
-       &zicbom_config,
-       &zicboz_config,
-       &svpbmt_config,
-       &sstc_config,
-       &svinval_config,
-       &zihintpause_config,
-       &zba_config,
-       &zbb_config,
-       &zbs_config,
-       &zicntr_config,
-       &zicond_config,
-       &zicsr_config,
-       &zifencei_config,
-       &zihpm_config,
-       &aia_config,
-       &smstateen_config,
-       &fp_f_config,
-       &fp_d_config,
+       &config_sbi_base,
+       &config_sbi_sta,
+       &config_sbi_pmu,
+       &config_sbi_dbcn,
+       &config_aia,
+       &config_fp_f,
+       &config_fp_d,
+       &config_h,
+       &config_smstateen,
+       &config_sstc,
+       &config_svinval,
+       &config_svnapot,
+       &config_svpbmt,
+       &config_zba,
+       &config_zbb,
+       &config_zbc,
+       &config_zbkb,
+       &config_zbkc,
+       &config_zbkx,
+       &config_zbs,
+       &config_zfa,
+       &config_zfh,
+       &config_zfhmin,
+       &config_zicbom,
+       &config_zicboz,
+       &config_zicntr,
+       &config_zicond,
+       &config_zicsr,
+       &config_zifencei,
+       &config_zihintntl,
+       &config_zihintpause,
+       &config_zihpm,
+       &config_zknd,
+       &config_zkne,
+       &config_zknh,
+       &config_zkr,
+       &config_zksed,
+       &config_zksh,
+       &config_zkt,
+       &config_zvbb,
+       &config_zvbc,
+       &config_zvfh,
+       &config_zvfhmin,
+       &config_zvkb,
+       &config_zvkg,
+       &config_zvkned,
+       &config_zvknha,
+       &config_zvknhb,
+       &config_zvksed,
+       &config_zvksh,
+       &config_zvkt,
 };
 int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);
index c8e0a6495a63ae2ca55ac97f56b928c72afdb469..626a2b8a203724db1f3fa395ceb8b4506b9080db 100644 (file)
@@ -94,11 +94,6 @@ static void guest_dirty_test_data(void)
        );
 }
 
-static struct kvm_vm *create_vm(void)
-{
-       return ____vm_create(VM_MODE_DEFAULT);
-}
-
 static void create_main_memslot(struct kvm_vm *vm)
 {
        int i;
@@ -157,7 +152,7 @@ static struct kvm_vm *create_vm_two_memslots(void)
 {
        struct kvm_vm *vm;
 
-       vm = create_vm();
+       vm = vm_create_barebones();
 
        create_memslots(vm);
 
@@ -276,7 +271,7 @@ static void assert_exit_was_hypercall(struct kvm_vcpu *vcpu)
 
 static void test_migration_mode(void)
 {
-       struct kvm_vm *vm = create_vm();
+       struct kvm_vm *vm = vm_create_barebones();
        struct kvm_vcpu *vcpu;
        u64 orig_psw;
        int rc;
@@ -670,7 +665,7 @@ struct testdef {
  */
 static int machine_has_cmma(void)
 {
-       struct kvm_vm *vm = create_vm();
+       struct kvm_vm *vm = vm_create_barebones();
        int r;
 
        r = !__kvm_has_device_attr(vm->fd, KVM_S390_VM_MEM_CTRL, KVM_S390_VM_MEM_ENABLE_CMMA);
index b32960189f5f18eb688ae5a9066ed6764efdabd1..075b80dbe2370d2ff472685f4b02b4e1243d7123 100644 (file)
@@ -157,17 +157,17 @@ static void guest_code_move_memory_region(void)
         */
        val = guest_spin_on_val(0);
        __GUEST_ASSERT(val == 1 || val == MMIO_VAL,
-                      "Expected '1' or MMIO ('%llx'), got '%llx'", MMIO_VAL, val);
+                      "Expected '1' or MMIO ('%lx'), got '%lx'", MMIO_VAL, val);
 
        /* Spin until the misaligning memory region move completes. */
        val = guest_spin_on_val(MMIO_VAL);
        __GUEST_ASSERT(val == 1 || val == 0,
-                      "Expected '0' or '1' (no MMIO), got '%llx'", val);
+                      "Expected '0' or '1' (no MMIO), got '%lx'", val);
 
        /* Spin until the memory region starts to get re-aligned. */
        val = guest_spin_on_val(0);
        __GUEST_ASSERT(val == 1 || val == MMIO_VAL,
-                      "Expected '1' or MMIO ('%llx'), got '%llx'", MMIO_VAL, val);
+                      "Expected '1' or MMIO ('%lx'), got '%lx'", MMIO_VAL, val);
 
        /* Spin until the re-aligning memory region move completes. */
        val = guest_spin_on_val(MMIO_VAL);
@@ -326,6 +326,55 @@ static void test_zero_memory_regions(void)
 }
 #endif /* __x86_64__ */
 
+static void test_invalid_memory_region_flags(void)
+{
+       uint32_t supported_flags = KVM_MEM_LOG_DIRTY_PAGES;
+       const uint32_t v2_only_flags = KVM_MEM_GUEST_MEMFD;
+       struct kvm_vm *vm;
+       int r, i;
+
+#if defined __aarch64__ || defined __x86_64__
+       supported_flags |= KVM_MEM_READONLY;
+#endif
+
+#ifdef __x86_64__
+       if (kvm_check_cap(KVM_CAP_VM_TYPES) & BIT(KVM_X86_SW_PROTECTED_VM))
+               vm = vm_create_barebones_protected_vm();
+       else
+#endif
+               vm = vm_create_barebones();
+
+       if (kvm_check_cap(KVM_CAP_MEMORY_ATTRIBUTES) & KVM_MEMORY_ATTRIBUTE_PRIVATE)
+               supported_flags |= KVM_MEM_GUEST_MEMFD;
+
+       for (i = 0; i < 32; i++) {
+               if ((supported_flags & BIT(i)) && !(v2_only_flags & BIT(i)))
+                       continue;
+
+               r = __vm_set_user_memory_region(vm, 0, BIT(i),
+                                               0, MEM_REGION_SIZE, NULL);
+
+               TEST_ASSERT(r && errno == EINVAL,
+                           "KVM_SET_USER_MEMORY_REGION should have failed on v2 only flag 0x%lx", BIT(i));
+
+               if (supported_flags & BIT(i))
+                       continue;
+
+               r = __vm_set_user_memory_region2(vm, 0, BIT(i),
+                                                0, MEM_REGION_SIZE, NULL, 0, 0);
+               TEST_ASSERT(r && errno == EINVAL,
+                           "KVM_SET_USER_MEMORY_REGION2 should have failed on unsupported flag 0x%lx", BIT(i));
+       }
+
+       if (supported_flags & KVM_MEM_GUEST_MEMFD) {
+               r = __vm_set_user_memory_region2(vm, 0,
+                                                KVM_MEM_LOG_DIRTY_PAGES | KVM_MEM_GUEST_MEMFD,
+                                                0, MEM_REGION_SIZE, NULL, 0, 0);
+               TEST_ASSERT(r && errno == EINVAL,
+                           "KVM_SET_USER_MEMORY_REGION2 should have failed, dirty logging private memory is unsupported");
+       }
+}
+
 /*
  * Test it can be added memory slots up to KVM_CAP_NR_MEMSLOTS, then any
  * tentative to add further slots should fail.
@@ -385,13 +434,105 @@ static void test_add_max_memory_regions(void)
        kvm_vm_free(vm);
 }
 
+
+#ifdef __x86_64__
+static void test_invalid_guest_memfd(struct kvm_vm *vm, int memfd,
+                                    size_t offset, const char *msg)
+{
+       int r = __vm_set_user_memory_region2(vm, MEM_REGION_SLOT, KVM_MEM_GUEST_MEMFD,
+                                            MEM_REGION_GPA, MEM_REGION_SIZE,
+                                            0, memfd, offset);
+       TEST_ASSERT(r == -1 && errno == EINVAL, "%s", msg);
+}
+
+static void test_add_private_memory_region(void)
+{
+       struct kvm_vm *vm, *vm2;
+       int memfd, i;
+
+       pr_info("Testing ADD of KVM_MEM_GUEST_MEMFD memory regions\n");
+
+       vm = vm_create_barebones_protected_vm();
+
+       test_invalid_guest_memfd(vm, vm->kvm_fd, 0, "KVM fd should fail");
+       test_invalid_guest_memfd(vm, vm->fd, 0, "VM's fd should fail");
+
+       memfd = kvm_memfd_alloc(MEM_REGION_SIZE, false);
+       test_invalid_guest_memfd(vm, memfd, 0, "Regular memfd() should fail");
+       close(memfd);
+
+       vm2 = vm_create_barebones_protected_vm();
+       memfd = vm_create_guest_memfd(vm2, MEM_REGION_SIZE, 0);
+       test_invalid_guest_memfd(vm, memfd, 0, "Other VM's guest_memfd() should fail");
+
+       vm_set_user_memory_region2(vm2, MEM_REGION_SLOT, KVM_MEM_GUEST_MEMFD,
+                                  MEM_REGION_GPA, MEM_REGION_SIZE, 0, memfd, 0);
+       close(memfd);
+       kvm_vm_free(vm2);
+
+       memfd = vm_create_guest_memfd(vm, MEM_REGION_SIZE, 0);
+       for (i = 1; i < PAGE_SIZE; i++)
+               test_invalid_guest_memfd(vm, memfd, i, "Unaligned offset should fail");
+
+       vm_set_user_memory_region2(vm, MEM_REGION_SLOT, KVM_MEM_GUEST_MEMFD,
+                                  MEM_REGION_GPA, MEM_REGION_SIZE, 0, memfd, 0);
+       close(memfd);
+
+       kvm_vm_free(vm);
+}
+
+static void test_add_overlapping_private_memory_regions(void)
+{
+       struct kvm_vm *vm;
+       int memfd;
+       int r;
+
+       pr_info("Testing ADD of overlapping KVM_MEM_GUEST_MEMFD memory regions\n");
+
+       vm = vm_create_barebones_protected_vm();
+
+       memfd = vm_create_guest_memfd(vm, MEM_REGION_SIZE * 4, 0);
+
+       vm_set_user_memory_region2(vm, MEM_REGION_SLOT, KVM_MEM_GUEST_MEMFD,
+                                  MEM_REGION_GPA, MEM_REGION_SIZE * 2, 0, memfd, 0);
+
+       vm_set_user_memory_region2(vm, MEM_REGION_SLOT + 1, KVM_MEM_GUEST_MEMFD,
+                                  MEM_REGION_GPA * 2, MEM_REGION_SIZE * 2,
+                                  0, memfd, MEM_REGION_SIZE * 2);
+
+       /*
+        * Delete the first memslot, and then attempt to recreate it except
+        * with a "bad" offset that results in overlap in the guest_memfd().
+        */
+       vm_set_user_memory_region2(vm, MEM_REGION_SLOT, KVM_MEM_GUEST_MEMFD,
+                                  MEM_REGION_GPA, 0, NULL, -1, 0);
+
+       /* Overlap the front half of the other slot. */
+       r = __vm_set_user_memory_region2(vm, MEM_REGION_SLOT, KVM_MEM_GUEST_MEMFD,
+                                        MEM_REGION_GPA * 2 - MEM_REGION_SIZE,
+                                        MEM_REGION_SIZE * 2,
+                                        0, memfd, 0);
+       TEST_ASSERT(r == -1 && errno == EEXIST, "%s",
+                   "Overlapping guest_memfd() bindings should fail with EEXIST");
+
+       /* And now the back half of the other slot. */
+       r = __vm_set_user_memory_region2(vm, MEM_REGION_SLOT, KVM_MEM_GUEST_MEMFD,
+                                        MEM_REGION_GPA * 2 + MEM_REGION_SIZE,
+                                        MEM_REGION_SIZE * 2,
+                                        0, memfd, 0);
+       TEST_ASSERT(r == -1 && errno == EEXIST, "%s",
+                   "Overlapping guest_memfd() bindings should fail with EEXIST");
+
+       close(memfd);
+       kvm_vm_free(vm);
+}
+#endif
+
 int main(int argc, char *argv[])
 {
 #ifdef __x86_64__
        int i, loops;
-#endif
 
-#ifdef __x86_64__
        /*
         * FIXME: the zero-memslot test fails on aarch64 and s390x because
         * KVM_RUN fails with ENOEXEC or EFAULT.
@@ -399,9 +540,19 @@ int main(int argc, char *argv[])
        test_zero_memory_regions();
 #endif
 
+       test_invalid_memory_region_flags();
+
        test_add_max_memory_regions();
 
 #ifdef __x86_64__
+       if (kvm_has_cap(KVM_CAP_GUEST_MEMFD) &&
+           (kvm_check_cap(KVM_CAP_VM_TYPES) & BIT(KVM_X86_SW_PROTECTED_VM))) {
+               test_add_private_memory_region();
+               test_add_overlapping_private_memory_regions();
+       } else {
+               pr_info("Skipping tests for KVM_MEM_GUEST_MEMFD memory regions\n");
+       }
+
        if (argc > 1)
                loops = atoi_positive("Number of iterations", argv[1]);
        else
index 171adfb2a6cbc45ffee0fcf4d149b49ed29ab2d7..bae0c5026f82f6f16022813437797c6b28c882e8 100644 (file)
@@ -11,7 +11,9 @@
 #include <pthread.h>
 #include <linux/kernel.h>
 #include <asm/kvm.h>
+#ifndef __riscv
 #include <asm/kvm_para.h>
+#endif
 
 #include "test_util.h"
 #include "kvm_util.h"
@@ -203,6 +205,103 @@ static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx)
        pr_info("    st_time: %ld\n", st->st_time);
 }
 
+#elif defined(__riscv)
+
+/* SBI STA shmem must have 64-byte alignment */
+#define STEAL_TIME_SIZE                ((sizeof(struct sta_struct) + 63) & ~63)
+
+static vm_paddr_t st_gpa[NR_VCPUS];
+
+struct sta_struct {
+       uint32_t sequence;
+       uint32_t flags;
+       uint64_t steal;
+       uint8_t preempted;
+       uint8_t pad[47];
+} __packed;
+
+static void sta_set_shmem(vm_paddr_t gpa, unsigned long flags)
+{
+       unsigned long lo = (unsigned long)gpa;
+#if __riscv_xlen == 32
+       unsigned long hi = (unsigned long)(gpa >> 32);
+#else
+       unsigned long hi = gpa == -1 ? -1 : 0;
+#endif
+       struct sbiret ret = sbi_ecall(SBI_EXT_STA, 0, lo, hi, flags, 0, 0, 0);
+
+       GUEST_ASSERT(ret.value == 0 && ret.error == 0);
+}
+
+static void check_status(struct sta_struct *st)
+{
+       GUEST_ASSERT(!(READ_ONCE(st->sequence) & 1));
+       GUEST_ASSERT(READ_ONCE(st->flags) == 0);
+       GUEST_ASSERT(READ_ONCE(st->preempted) == 0);
+}
+
+static void guest_code(int cpu)
+{
+       struct sta_struct *st = st_gva[cpu];
+       uint32_t sequence;
+       long out_val = 0;
+       bool probe;
+
+       probe = guest_sbi_probe_extension(SBI_EXT_STA, &out_val);
+       GUEST_ASSERT(probe && out_val == 1);
+
+       sta_set_shmem(st_gpa[cpu], 0);
+       GUEST_SYNC(0);
+
+       check_status(st);
+       WRITE_ONCE(guest_stolen_time[cpu], st->steal);
+       sequence = READ_ONCE(st->sequence);
+       check_status(st);
+       GUEST_SYNC(1);
+
+       check_status(st);
+       GUEST_ASSERT(sequence < READ_ONCE(st->sequence));
+       WRITE_ONCE(guest_stolen_time[cpu], st->steal);
+       check_status(st);
+       GUEST_DONE();
+}
+
+static bool is_steal_time_supported(struct kvm_vcpu *vcpu)
+{
+       uint64_t id = RISCV_SBI_EXT_REG(KVM_RISCV_SBI_EXT_STA);
+       unsigned long enabled;
+
+       vcpu_get_reg(vcpu, id, &enabled);
+       TEST_ASSERT(enabled == 0 || enabled == 1, "Expected boolean result");
+
+       return enabled;
+}
+
+static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i)
+{
+       /* ST_GPA_BASE is identity mapped */
+       st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE);
+       st_gpa[i] = addr_gva2gpa(vcpu->vm, (vm_vaddr_t)st_gva[i]);
+       sync_global_to_guest(vcpu->vm, st_gva[i]);
+       sync_global_to_guest(vcpu->vm, st_gpa[i]);
+}
+
+static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx)
+{
+       struct sta_struct *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]);
+       int i;
+
+       pr_info("VCPU%d:\n", vcpu_idx);
+       pr_info("    sequence:  %d\n", st->sequence);
+       pr_info("    flags:     %d\n", st->flags);
+       pr_info("    steal:     %"PRIu64"\n", st->steal);
+       pr_info("    preempted: %d\n", st->preempted);
+       pr_info("    pad:      ");
+       for (i = 0; i < 47; ++i)
+               pr_info("%d", st->pad[i]);
+       pr_info("\n");
+}
+
 #endif
 
 static void *do_steal_time(void *arg)
index f25749eaa6a84bb16f0405fdf8847ace33ce04ce..f5e1e98f04f9ef0a00f3d80ba8a0bd94f90feffd 100644 (file)
@@ -211,6 +211,8 @@ int main(void)
        vm_vaddr_t tsc_page_gva;
        int stage;
 
+       TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_TIME));
+
        vm = vm_create_with_one_vcpu(&vcpu, guest_main);
 
        vcpu_set_hv_cpuid(vcpu);
index 7bde0c4dfdbd18b769a2580c68c2e789a7a692a6..4c7257ecd2a68ce6fec6c9b93f8ae83afc6e7867 100644 (file)
@@ -240,11 +240,12 @@ int main(int argc, char *argv[])
        struct ucall uc;
        int stage;
 
-       vm = vm_create_with_one_vcpu(&vcpu, guest_code);
-
        TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
        TEST_REQUIRE(kvm_has_cap(KVM_CAP_NESTED_STATE));
        TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS));
+       TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_DIRECT_TLBFLUSH));
+
+       vm = vm_create_with_one_vcpu(&vcpu, guest_code);
 
        hcall_page = vm_vaddr_alloc_pages(vm, 1);
        memset(addr_gva2hva(vm, hcall_page), 0x0,  getpagesize());
index e036db1f32b9b33d99e0cbb55b349b87dbcdb6ef..949e08e98f31585a41f5d5ccd5bfb0bab3de0de9 100644 (file)
@@ -43,6 +43,8 @@ int main(void)
        uint64_t *outval;
        struct ucall uc;
 
+       TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_CPUID));
+
        /* Verify if extended hypercalls are supported */
        if (!kvm_cpuid_has(kvm_get_supported_hv_cpuid(),
                           HV_ENABLE_EXTENDED_HYPERCALLS)) {
index 9f28aa276c4e23608f22cb97c2fd5f4a84084988..4f4193fc74ffa29454c193ed741603c5fef1004b 100644 (file)
@@ -55,18 +55,18 @@ static void guest_msr(struct msr_data *msr)
        if (msr->fault_expected)
                __GUEST_ASSERT(vector == GP_VECTOR,
                               "Expected #GP on %sMSR(0x%x), got vector '0x%x'",
-                              msr->idx, msr->write ? "WR" : "RD", vector);
+                              msr->write ? "WR" : "RD", msr->idx, vector);
        else
                __GUEST_ASSERT(!vector,
                               "Expected success on %sMSR(0x%x), got vector '0x%x'",
-                              msr->idx, msr->write ? "WR" : "RD", vector);
+                              msr->write ? "WR" : "RD", msr->idx, vector);
 
        if (vector || is_write_only_msr(msr->idx))
                goto done;
 
        if (msr->write)
                __GUEST_ASSERT(!vector,
-                              "WRMSR(0x%x) to '0x%llx', RDMSR read '0x%llx'",
+                              "WRMSR(0x%x) to '0x%lx', RDMSR read '0x%lx'",
                               msr->idx, msr->write_val, msr_val);
 
        /* Invariant TSC bit appears when TSC invariant control MSR is written to */
@@ -102,11 +102,11 @@ static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall)
        vector = __hyperv_hypercall(hcall->control, input, output, &res);
        if (hcall->ud_expected) {
                __GUEST_ASSERT(vector == UD_VECTOR,
-                              "Expected #UD for control '%u', got vector '0x%x'",
+                              "Expected #UD for control '%lu', got vector '0x%x'",
                               hcall->control, vector);
        } else {
                __GUEST_ASSERT(!vector,
-                              "Expected no exception for control '%u', got vector '0x%x'",
+                              "Expected no exception for control '%lu', got vector '0x%x'",
                               hcall->control, vector);
                GUEST_ASSERT_EQ(res, hcall->expect);
        }
@@ -690,6 +690,8 @@ static void guest_test_hcalls_access(void)
 
 int main(void)
 {
+       TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_ENFORCE_CPUID));
+
        pr_info("Testing access to Hyper-V specific MSRs\n");
        guest_test_msrs_access();
 
index 6feb5ddb031dac66aeabe18b7733f8639f29af63..65e5f4c05068a8fff78caf386f76ef107ac407f2 100644 (file)
@@ -248,6 +248,8 @@ int main(int argc, char *argv[])
        int stage = 1, r;
        struct ucall uc;
 
+       TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_SEND_IPI));
+
        vm = vm_create_with_one_vcpu(&vcpu[0], sender_guest_code);
 
        /* Hypercall input/output */
index 6c127856209065f5345ff2590383e532891467fe..c9b18707edc03113db996d127342b4b6bfade843 100644 (file)
@@ -158,6 +158,7 @@ int main(int argc, char *argv[])
        int stage;
 
        TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));
+       TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_DIRECT_TLBFLUSH));
 
        /* Create VM */
        vm = vm_create_with_one_vcpu(&vcpu, guest_code);
index 4758b6ef5618e387d3a838908e78d2e51af79fa7..c4443f71f8dd01f6aafde337c618d414c61c1ce3 100644 (file)
@@ -590,6 +590,8 @@ int main(int argc, char *argv[])
        struct ucall uc;
        int stage = 1, r, i;
 
+       TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_TLBFLUSH));
+
        vm = vm_create_with_one_vcpu(&vcpu[0], sender_guest_code);
 
        /* Test data page */
diff --git a/tools/testing/selftests/kvm/x86_64/mmio_warning_test.c b/tools/testing/selftests/kvm/x86_64/mmio_warning_test.c
deleted file mode 100644 (file)
index ce1ccc4..0000000
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * mmio_warning_test
- *
- * Copyright (C) 2019, Google LLC.
- *
- * This work is licensed under the terms of the GNU GPL, version 2.
- *
- * Test that we don't get a kernel warning when we call KVM_RUN after a
- * triple fault occurs.  To get the triple fault to occur we call KVM_RUN
- * on a VCPU that hasn't been properly setup.
- *
- */
-
-#define _GNU_SOURCE
-#include <fcntl.h>
-#include <kvm_util.h>
-#include <linux/kvm.h>
-#include <processor.h>
-#include <pthread.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/ioctl.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-#include <test_util.h>
-#include <unistd.h>
-
-#define NTHREAD 4
-#define NPROCESS 5
-
-struct thread_context {
-       int kvmcpu;
-       struct kvm_run *run;
-};
-
-void *thr(void *arg)
-{
-       struct thread_context *tc = (struct thread_context *)arg;
-       int res;
-       int kvmcpu = tc->kvmcpu;
-       struct kvm_run *run = tc->run;
-
-       res = ioctl(kvmcpu, KVM_RUN, 0);
-       pr_info("ret1=%d exit_reason=%d suberror=%d\n",
-               res, run->exit_reason, run->internal.suberror);
-
-       return 0;
-}
-
-void test(void)
-{
-       int i, kvm, kvmvm, kvmcpu;
-       pthread_t th[NTHREAD];
-       struct kvm_run *run;
-       struct thread_context tc;
-
-       kvm = open("/dev/kvm", O_RDWR);
-       TEST_ASSERT(kvm != -1, "failed to open /dev/kvm");
-       kvmvm = __kvm_ioctl(kvm, KVM_CREATE_VM, NULL);
-       TEST_ASSERT(kvmvm > 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, kvmvm));
-       kvmcpu = ioctl(kvmvm, KVM_CREATE_VCPU, 0);
-       TEST_ASSERT(kvmcpu != -1, KVM_IOCTL_ERROR(KVM_CREATE_VCPU, kvmcpu));
-       run = (struct kvm_run *)mmap(0, 4096, PROT_READ|PROT_WRITE, MAP_SHARED,
-                                   kvmcpu, 0);
-       tc.kvmcpu = kvmcpu;
-       tc.run = run;
-       srand(getpid());
-       for (i = 0; i < NTHREAD; i++) {
-               pthread_create(&th[i], NULL, thr, (void *)(uintptr_t)&tc);
-               usleep(rand() % 10000);
-       }
-       for (i = 0; i < NTHREAD; i++)
-               pthread_join(th[i], NULL);
-}
-
-int get_warnings_count(void)
-{
-       int warnings;
-       FILE *f;
-
-       f = popen("dmesg | grep \"WARNING:\" | wc -l", "r");
-       if (fscanf(f, "%d", &warnings) < 1)
-               warnings = 0;
-       pclose(f);
-
-       return warnings;
-}
-
-int main(void)
-{
-       int warnings_before, warnings_after;
-
-       TEST_REQUIRE(host_cpu_is_intel);
-
-       TEST_REQUIRE(!vm_is_unrestricted_guest(NULL));
-
-       warnings_before = get_warnings_count();
-
-       for (int i = 0; i < NPROCESS; ++i) {
-               int status;
-               int pid = fork();
-
-               if (pid < 0)
-                       exit(1);
-               if (pid == 0) {
-                       test();
-                       exit(0);
-               }
-               while (waitpid(pid, &status, __WALL) != pid)
-                       ;
-       }
-
-       warnings_after = get_warnings_count();
-       TEST_ASSERT(warnings_before == warnings_after,
-                  "Warnings found in kernel.  Run 'dmesg' to inspect them.");
-
-       return 0;
-}
index 80aa3d8b18f803c20092cc938d6822866146beda..853802641e1eafe553bc7c0676801227a5601be9 100644 (file)
@@ -27,10 +27,12 @@ do {                                                                        \
                                                                        \
        if (fault_wanted)                                               \
                __GUEST_ASSERT((vector) == UD_VECTOR,                   \
-                              "Expected #UD on " insn " for testcase '0x%x', got '0x%x'", vector); \
+                              "Expected #UD on " insn " for testcase '0x%x', got '0x%x'", \
+                              testcase, vector);                       \
        else                                                            \
                __GUEST_ASSERT(!(vector),                               \
-                              "Expected success on " insn " for testcase '0x%x', got '0x%x'", vector); \
+                              "Expected success on " insn " for testcase '0x%x', got '0x%x'", \
+                              testcase, vector);                       \
 } while (0)
 
 static void guest_monitor_wait(int testcase)
diff --git a/tools/testing/selftests/kvm/x86_64/private_mem_conversions_test.c b/tools/testing/selftests/kvm/x86_64/private_mem_conversions_test.c
new file mode 100644 (file)
index 0000000..65ad38b
--- /dev/null
@@ -0,0 +1,482 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022, Google LLC.
+ */
+#define _GNU_SOURCE /* for program_invocation_short_name */
+#include <fcntl.h>
+#include <limits.h>
+#include <pthread.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/kvm_para.h>
+#include <linux/memfd.h>
+#include <linux/sizes.h>
+
+#include <test_util.h>
+#include <kvm_util.h>
+#include <processor.h>
+
+#define BASE_DATA_SLOT         10
+#define BASE_DATA_GPA          ((uint64_t)(1ull << 32))
+#define PER_CPU_DATA_SIZE      ((uint64_t)(SZ_2M + PAGE_SIZE))
+
+/* Horrific macro so that the line info is captured accurately :-( */
+#define memcmp_g(gpa, pattern,  size)                                                          \
+do {                                                                                           \
+       uint8_t *mem = (uint8_t *)gpa;                                                          \
+       size_t i;                                                                               \
+                                                                                               \
+       for (i = 0; i < size; i++)                                                              \
+               __GUEST_ASSERT(mem[i] == pattern,                                               \
+                              "Guest expected 0x%x at offset %lu (gpa 0x%lx), got 0x%x",       \
+                              pattern, i, gpa + i, mem[i]);                                    \
+} while (0)
+
+static void memcmp_h(uint8_t *mem, uint64_t gpa, uint8_t pattern, size_t size)
+{
+       size_t i;
+
+       for (i = 0; i < size; i++)
+               TEST_ASSERT(mem[i] == pattern,
+                           "Host expected 0x%x at gpa 0x%lx, got 0x%x",
+                           pattern, gpa + i, mem[i]);
+}
+
+/*
+ * Run memory conversion tests with explicit conversion:
+ * Execute KVM hypercall to map/unmap gpa range which will cause userspace exit
+ * to back/unback private memory. Subsequent accesses by guest to the gpa range
+ * will not cause exit to userspace.
+ *
+ * Test memory conversion scenarios with following steps:
+ * 1) Access private memory using private access and verify that memory contents
+ *   are not visible to userspace.
+ * 2) Convert memory to shared using explicit conversions and ensure that
+ *   userspace is able to access the shared regions.
+ * 3) Convert memory back to private using explicit conversions and ensure that
+ *   userspace is again not able to access converted private regions.
+ */
+
+#define GUEST_STAGE(o, s) { .offset = o, .size = s }
+
+enum ucall_syncs {
+       SYNC_SHARED,
+       SYNC_PRIVATE,
+};
+
+static void guest_sync_shared(uint64_t gpa, uint64_t size,
+                             uint8_t current_pattern, uint8_t new_pattern)
+{
+       GUEST_SYNC5(SYNC_SHARED, gpa, size, current_pattern, new_pattern);
+}
+
+static void guest_sync_private(uint64_t gpa, uint64_t size, uint8_t pattern)
+{
+       GUEST_SYNC4(SYNC_PRIVATE, gpa, size, pattern);
+}
+
+/* Arbitrary values, KVM doesn't care about the attribute flags. */
+#define MAP_GPA_SET_ATTRIBUTES BIT(0)
+#define MAP_GPA_SHARED         BIT(1)
+#define MAP_GPA_DO_FALLOCATE   BIT(2)
+
+static void guest_map_mem(uint64_t gpa, uint64_t size, bool map_shared,
+                         bool do_fallocate)
+{
+       uint64_t flags = MAP_GPA_SET_ATTRIBUTES;
+
+       if (map_shared)
+               flags |= MAP_GPA_SHARED;
+       if (do_fallocate)
+               flags |= MAP_GPA_DO_FALLOCATE;
+       kvm_hypercall_map_gpa_range(gpa, size, flags);
+}
+
+static void guest_map_shared(uint64_t gpa, uint64_t size, bool do_fallocate)
+{
+       guest_map_mem(gpa, size, true, do_fallocate);
+}
+
+static void guest_map_private(uint64_t gpa, uint64_t size, bool do_fallocate)
+{
+       guest_map_mem(gpa, size, false, do_fallocate);
+}
+
+struct {
+       uint64_t offset;
+       uint64_t size;
+} static const test_ranges[] = {
+       GUEST_STAGE(0, PAGE_SIZE),
+       GUEST_STAGE(0, SZ_2M),
+       GUEST_STAGE(PAGE_SIZE, PAGE_SIZE),
+       GUEST_STAGE(PAGE_SIZE, SZ_2M),
+       GUEST_STAGE(SZ_2M, PAGE_SIZE),
+};
+
+static void guest_test_explicit_conversion(uint64_t base_gpa, bool do_fallocate)
+{
+       const uint8_t def_p = 0xaa;
+       const uint8_t init_p = 0xcc;
+       uint64_t j;
+       int i;
+
+       /* Memory should be shared by default. */
+       memset((void *)base_gpa, def_p, PER_CPU_DATA_SIZE);
+       memcmp_g(base_gpa, def_p, PER_CPU_DATA_SIZE);
+       guest_sync_shared(base_gpa, PER_CPU_DATA_SIZE, def_p, init_p);
+
+       memcmp_g(base_gpa, init_p, PER_CPU_DATA_SIZE);
+
+       for (i = 0; i < ARRAY_SIZE(test_ranges); i++) {
+               uint64_t gpa = base_gpa + test_ranges[i].offset;
+               uint64_t size = test_ranges[i].size;
+               uint8_t p1 = 0x11;
+               uint8_t p2 = 0x22;
+               uint8_t p3 = 0x33;
+               uint8_t p4 = 0x44;
+
+               /*
+                * Set the test region to pattern one to differentiate it from
+                * the data range as a whole (contains the initial pattern).
+                */
+               memset((void *)gpa, p1, size);
+
+               /*
+                * Convert to private, set and verify the private data, and
+                * then verify that the rest of the data (map shared) still
+                * holds the initial pattern, and that the host always sees the
+                * shared memory (initial pattern).  Unlike shared memory,
+                * punching a hole in private memory is destructive, i.e.
+                * previous values aren't guaranteed to be preserved.
+                */
+               guest_map_private(gpa, size, do_fallocate);
+
+               if (size > PAGE_SIZE) {
+                       memset((void *)gpa, p2, PAGE_SIZE);
+                       goto skip;
+               }
+
+               memset((void *)gpa, p2, size);
+               guest_sync_private(gpa, size, p1);
+
+               /*
+                * Verify that the private memory was set to pattern two, and
+                * that shared memory still holds the initial pattern.
+                */
+               memcmp_g(gpa, p2, size);
+               if (gpa > base_gpa)
+                       memcmp_g(base_gpa, init_p, gpa - base_gpa);
+               if (gpa + size < base_gpa + PER_CPU_DATA_SIZE)
+                       memcmp_g(gpa + size, init_p,
+                                (base_gpa + PER_CPU_DATA_SIZE) - (gpa + size));
+
+               /*
+                * Convert odd-number page frames back to shared to verify KVM
+                * also correctly handles holes in private ranges.
+                */
+               for (j = 0; j < size; j += PAGE_SIZE) {
+                       if ((j >> PAGE_SHIFT) & 1) {
+                               guest_map_shared(gpa + j, PAGE_SIZE, do_fallocate);
+                               guest_sync_shared(gpa + j, PAGE_SIZE, p1, p3);
+
+                               memcmp_g(gpa + j, p3, PAGE_SIZE);
+                       } else {
+                               guest_sync_private(gpa + j, PAGE_SIZE, p1);
+                       }
+               }
+
+skip:
+               /*
+                * Convert the entire region back to shared, explicitly write
+                * pattern three to fill in the even-number frames before
+                * asking the host to verify (and write pattern four).
+                */
+               guest_map_shared(gpa, size, do_fallocate);
+               memset((void *)gpa, p3, size);
+               guest_sync_shared(gpa, size, p3, p4);
+               memcmp_g(gpa, p4, size);
+
+               /* Reset the shared memory back to the initial pattern. */
+               memset((void *)gpa, init_p, size);
+
+               /*
+                * Free (via PUNCH_HOLE) *all* private memory so that the next
+                * iteration starts from a clean slate, e.g. with respect to
+                * whether or not there are pages/folios in guest_mem.
+                */
+               guest_map_shared(base_gpa, PER_CPU_DATA_SIZE, true);
+       }
+}
+
+static void guest_punch_hole(uint64_t gpa, uint64_t size)
+{
+       /* "Mapping" memory shared via fallocate() is done via PUNCH_HOLE. */
+       uint64_t flags = MAP_GPA_SHARED | MAP_GPA_DO_FALLOCATE;
+
+       kvm_hypercall_map_gpa_range(gpa, size, flags);
+}
+
+/*
+ * Test that PUNCH_HOLE actually frees memory by punching holes without doing a
+ * proper conversion.  Freeing (PUNCH_HOLE) should zap SPTEs, and reallocating
+ * (subsequent fault) should zero memory.
+ */
+static void guest_test_punch_hole(uint64_t base_gpa, bool precise)
+{
+       const uint8_t init_p = 0xcc;
+       int i;
+
+       /*
+        * Convert the entire range to private, this testcase is all about
+        * punching holes in guest_memfd, i.e. shared mappings aren't needed.
+        */
+       guest_map_private(base_gpa, PER_CPU_DATA_SIZE, false);
+
+       for (i = 0; i < ARRAY_SIZE(test_ranges); i++) {
+               uint64_t gpa = base_gpa + test_ranges[i].offset;
+               uint64_t size = test_ranges[i].size;
+
+               /*
+                * Free all memory before each iteration, even for the !precise
+                * case where the memory will be faulted back in.  Freeing and
+                * reallocating should obviously work, and freeing all memory
+                * minimizes the probability of cross-testcase influence.
+                */
+               guest_punch_hole(base_gpa, PER_CPU_DATA_SIZE);
+
+               /* Fault-in and initialize memory, and verify the pattern. */
+               if (precise) {
+                       memset((void *)gpa, init_p, size);
+                       memcmp_g(gpa, init_p, size);
+               } else {
+                       memset((void *)base_gpa, init_p, PER_CPU_DATA_SIZE);
+                       memcmp_g(base_gpa, init_p, PER_CPU_DATA_SIZE);
+               }
+
+               /*
+                * Punch a hole at the target range and verify that reads from
+                * the guest succeed and return zeroes.
+                */
+               guest_punch_hole(gpa, size);
+               memcmp_g(gpa, 0, size);
+       }
+}
+
+static void guest_code(uint64_t base_gpa)
+{
+       /*
+        * Run the conversion test twice, with and without doing fallocate() on
+        * the guest_memfd backing when converting between shared and private.
+        */
+       guest_test_explicit_conversion(base_gpa, false);
+       guest_test_explicit_conversion(base_gpa, true);
+
+       /*
+        * Run the PUNCH_HOLE test twice too, once with the entire guest_memfd
+        * faulted in, once with only the target range faulted in.
+        */
+       guest_test_punch_hole(base_gpa, false);
+       guest_test_punch_hole(base_gpa, true);
+       GUEST_DONE();
+}
+
+static void handle_exit_hypercall(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint64_t gpa = run->hypercall.args[0];
+       uint64_t size = run->hypercall.args[1] * PAGE_SIZE;
+       bool set_attributes = run->hypercall.args[2] & MAP_GPA_SET_ATTRIBUTES;
+       bool map_shared = run->hypercall.args[2] & MAP_GPA_SHARED;
+       bool do_fallocate = run->hypercall.args[2] & MAP_GPA_DO_FALLOCATE;
+       struct kvm_vm *vm = vcpu->vm;
+
+       TEST_ASSERT(run->hypercall.nr == KVM_HC_MAP_GPA_RANGE,
+                   "Wanted MAP_GPA_RANGE (%u), got '%llu'",
+                   KVM_HC_MAP_GPA_RANGE, run->hypercall.nr);
+
+       if (do_fallocate)
+               vm_guest_mem_fallocate(vm, gpa, size, map_shared);
+
+       if (set_attributes)
+               vm_set_memory_attributes(vm, gpa, size,
+                                        map_shared ? 0 : KVM_MEMORY_ATTRIBUTE_PRIVATE);
+       run->hypercall.ret = 0;
+}
+
+static bool run_vcpus;
+
+static void *__test_mem_conversions(void *__vcpu)
+{
+       struct kvm_vcpu *vcpu = __vcpu;
+       struct kvm_run *run = vcpu->run;
+       struct kvm_vm *vm = vcpu->vm;
+       struct ucall uc;
+
+       while (!READ_ONCE(run_vcpus))
+               ;
+
+       for ( ;; ) {
+               vcpu_run(vcpu);
+
+               if (run->exit_reason == KVM_EXIT_HYPERCALL) {
+                       handle_exit_hypercall(vcpu);
+                       continue;
+               }
+
+               TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+                           "Wanted KVM_EXIT_IO, got exit reason: %u (%s)",
+                           run->exit_reason, exit_reason_str(run->exit_reason));
+
+               switch (get_ucall(vcpu, &uc)) {
+               case UCALL_ABORT:
+                       REPORT_GUEST_ASSERT(uc);
+               case UCALL_SYNC: {
+                       uint64_t gpa  = uc.args[1];
+                       size_t size = uc.args[2];
+                       size_t i;
+
+                       TEST_ASSERT(uc.args[0] == SYNC_SHARED ||
+                                   uc.args[0] == SYNC_PRIVATE,
+                                   "Unknown sync command '%ld'", uc.args[0]);
+
+                       for (i = 0; i < size; i += vm->page_size) {
+                               size_t nr_bytes = min_t(size_t, vm->page_size, size - i);
+                               uint8_t *hva = addr_gpa2hva(vm, gpa + i);
+
+                               /* In all cases, the host should observe the shared data. */
+                               memcmp_h(hva, gpa + i, uc.args[3], nr_bytes);
+
+                               /* For shared, write the new pattern to guest memory. */
+                               if (uc.args[0] == SYNC_SHARED)
+                                       memset(hva, uc.args[4], nr_bytes);
+                       }
+                       break;
+               }
+               case UCALL_DONE:
+                       return NULL;
+               default:
+                       TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd);
+               }
+       }
+}
+
+static void test_mem_conversions(enum vm_mem_backing_src_type src_type, uint32_t nr_vcpus,
+                                uint32_t nr_memslots)
+{
+       /*
+        * Allocate enough memory so that each vCPU's chunk of memory can be
+        * naturally aligned with respect to the size of the backing store.
+        */
+       const size_t alignment = max_t(size_t, SZ_2M, get_backing_src_pagesz(src_type));
+       const size_t per_cpu_size = align_up(PER_CPU_DATA_SIZE, alignment);
+       const size_t memfd_size = per_cpu_size * nr_vcpus;
+       const size_t slot_size = memfd_size / nr_memslots;
+       struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
+       pthread_t threads[KVM_MAX_VCPUS];
+       struct kvm_vm *vm;
+       int memfd, i, r;
+
+       const struct vm_shape shape = {
+               .mode = VM_MODE_DEFAULT,
+               .type = KVM_X86_SW_PROTECTED_VM,
+       };
+
+       TEST_ASSERT(slot_size * nr_memslots == memfd_size,
+                   "The memfd size (0x%lx) needs to be cleanly divisible by the number of memslots (%u)",
+                   memfd_size, nr_memslots);
+       vm = __vm_create_with_vcpus(shape, nr_vcpus, 0, guest_code, vcpus);
+
+       vm_enable_cap(vm, KVM_CAP_EXIT_HYPERCALL, (1 << KVM_HC_MAP_GPA_RANGE));
+
+       memfd = vm_create_guest_memfd(vm, memfd_size, 0);
+
+       for (i = 0; i < nr_memslots; i++)
+               vm_mem_add(vm, src_type, BASE_DATA_GPA + slot_size * i,
+                          BASE_DATA_SLOT + i, slot_size / vm->page_size,
+                          KVM_MEM_GUEST_MEMFD, memfd, slot_size * i);
+
+       for (i = 0; i < nr_vcpus; i++) {
+               uint64_t gpa =  BASE_DATA_GPA + i * per_cpu_size;
+
+               vcpu_args_set(vcpus[i], 1, gpa);
+
+               /*
+                * Map only what is needed so that an out-of-bounds access
+                * results #PF => SHUTDOWN instead of data corruption.
+                */
+               virt_map(vm, gpa, gpa, PER_CPU_DATA_SIZE / vm->page_size);
+
+               pthread_create(&threads[i], NULL, __test_mem_conversions, vcpus[i]);
+       }
+
+       WRITE_ONCE(run_vcpus, true);
+
+       for (i = 0; i < nr_vcpus; i++)
+               pthread_join(threads[i], NULL);
+
+       kvm_vm_free(vm);
+
+       /*
+        * Allocate and free memory from the guest_memfd after closing the VM
+        * fd.  The guest_memfd is gifted a reference to its owning VM, i.e.
+        * should prevent the VM from being fully destroyed until the last
+        * reference to the guest_memfd is also put.
+        */
+       r = fallocate(memfd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE, 0, memfd_size);
+       TEST_ASSERT(!r, __KVM_SYSCALL_ERROR("fallocate()", r));
+
+       r = fallocate(memfd, FALLOC_FL_KEEP_SIZE, 0, memfd_size);
+       TEST_ASSERT(!r, __KVM_SYSCALL_ERROR("fallocate()", r));
+}
+
+static void usage(const char *cmd)
+{
+       puts("");
+       printf("usage: %s [-h] [-m nr_memslots] [-s mem_type] [-n nr_vcpus]\n", cmd);
+       puts("");
+       backing_src_help("-s");
+       puts("");
+       puts(" -n: specify the number of vcpus (default: 1)");
+       puts("");
+       puts(" -m: specify the number of memslots (default: 1)");
+       puts("");
+}
+
+int main(int argc, char *argv[])
+{
+       enum vm_mem_backing_src_type src_type = DEFAULT_VM_MEM_SRC;
+       uint32_t nr_memslots = 1;
+       uint32_t nr_vcpus = 1;
+       int opt;
+
+       TEST_REQUIRE(kvm_check_cap(KVM_CAP_VM_TYPES) & BIT(KVM_X86_SW_PROTECTED_VM));
+
+       while ((opt = getopt(argc, argv, "hm:s:n:")) != -1) {
+               switch (opt) {
+               case 's':
+                       src_type = parse_backing_src_type(optarg);
+                       break;
+               case 'n':
+                       nr_vcpus = atoi_positive("nr_vcpus", optarg);
+                       break;
+               case 'm':
+                       nr_memslots = atoi_positive("nr_memslots", optarg);
+                       break;
+               case 'h':
+               default:
+                       usage(argv[0]);
+                       exit(0);
+               }
+       }
+
+       test_mem_conversions(src_type, nr_vcpus, nr_memslots);
+
+       return 0;
+}
diff --git a/tools/testing/selftests/kvm/x86_64/private_mem_kvm_exits_test.c b/tools/testing/selftests/kvm/x86_64/private_mem_kvm_exits_test.c
new file mode 100644 (file)
index 0000000..13e72fc
--- /dev/null
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023, Google LLC.
+ */
+#include <linux/kvm.h>
+#include <pthread.h>
+#include <stdint.h>
+
+#include "kvm_util.h"
+#include "processor.h"
+#include "test_util.h"
+
+/* Arbitrarily selected to avoid overlaps with anything else */
+#define EXITS_TEST_GVA 0xc0000000
+#define EXITS_TEST_GPA EXITS_TEST_GVA
+#define EXITS_TEST_NPAGES 1
+#define EXITS_TEST_SIZE (EXITS_TEST_NPAGES * PAGE_SIZE)
+#define EXITS_TEST_SLOT 10
+
+static uint64_t guest_repeatedly_read(void)
+{
+       volatile uint64_t value;
+
+       while (true)
+               value = *((uint64_t *) EXITS_TEST_GVA);
+
+       return value;
+}
+
+static uint32_t run_vcpu_get_exit_reason(struct kvm_vcpu *vcpu)
+{
+       int r;
+
+       r = _vcpu_run(vcpu);
+       if (r) {
+               TEST_ASSERT(errno == EFAULT, KVM_IOCTL_ERROR(KVM_RUN, r));
+               TEST_ASSERT_EQ(vcpu->run->exit_reason, KVM_EXIT_MEMORY_FAULT);
+       }
+       return vcpu->run->exit_reason;
+}
+
+const struct vm_shape protected_vm_shape = {
+       .mode = VM_MODE_DEFAULT,
+       .type = KVM_X86_SW_PROTECTED_VM,
+};
+
+static void test_private_access_memslot_deleted(void)
+{
+       struct kvm_vm *vm;
+       struct kvm_vcpu *vcpu;
+       pthread_t vm_thread;
+       void *thread_return;
+       uint32_t exit_reason;
+
+       vm = vm_create_shape_with_one_vcpu(protected_vm_shape, &vcpu,
+                                          guest_repeatedly_read);
+
+       vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
+                                   EXITS_TEST_GPA, EXITS_TEST_SLOT,
+                                   EXITS_TEST_NPAGES,
+                                   KVM_MEM_GUEST_MEMFD);
+
+       virt_map(vm, EXITS_TEST_GVA, EXITS_TEST_GPA, EXITS_TEST_NPAGES);
+
+       /* Request to access page privately */
+       vm_mem_set_private(vm, EXITS_TEST_GPA, EXITS_TEST_SIZE);
+
+       pthread_create(&vm_thread, NULL,
+                      (void *(*)(void *))run_vcpu_get_exit_reason,
+                      (void *)vcpu);
+
+       vm_mem_region_delete(vm, EXITS_TEST_SLOT);
+
+       pthread_join(vm_thread, &thread_return);
+       exit_reason = (uint32_t)(uint64_t)thread_return;
+
+       TEST_ASSERT_EQ(exit_reason, KVM_EXIT_MEMORY_FAULT);
+       TEST_ASSERT_EQ(vcpu->run->memory_fault.flags, KVM_MEMORY_EXIT_FLAG_PRIVATE);
+       TEST_ASSERT_EQ(vcpu->run->memory_fault.gpa, EXITS_TEST_GPA);
+       TEST_ASSERT_EQ(vcpu->run->memory_fault.size, EXITS_TEST_SIZE);
+
+       kvm_vm_free(vm);
+}
+
+static void test_private_access_memslot_not_private(void)
+{
+       struct kvm_vm *vm;
+       struct kvm_vcpu *vcpu;
+       uint32_t exit_reason;
+
+       vm = vm_create_shape_with_one_vcpu(protected_vm_shape, &vcpu,
+                                          guest_repeatedly_read);
+
+       /* Add a non-private memslot (flags = 0) */
+       vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
+                                   EXITS_TEST_GPA, EXITS_TEST_SLOT,
+                                   EXITS_TEST_NPAGES, 0);
+
+       virt_map(vm, EXITS_TEST_GVA, EXITS_TEST_GPA, EXITS_TEST_NPAGES);
+
+       /* Request to access page privately */
+       vm_mem_set_private(vm, EXITS_TEST_GPA, EXITS_TEST_SIZE);
+
+       exit_reason = run_vcpu_get_exit_reason(vcpu);
+
+       TEST_ASSERT_EQ(exit_reason, KVM_EXIT_MEMORY_FAULT);
+       TEST_ASSERT_EQ(vcpu->run->memory_fault.flags, KVM_MEMORY_EXIT_FLAG_PRIVATE);
+       TEST_ASSERT_EQ(vcpu->run->memory_fault.gpa, EXITS_TEST_GPA);
+       TEST_ASSERT_EQ(vcpu->run->memory_fault.size, EXITS_TEST_SIZE);
+
+       kvm_vm_free(vm);
+}
+
+int main(int argc, char *argv[])
+{
+       TEST_REQUIRE(kvm_check_cap(KVM_CAP_VM_TYPES) & BIT(KVM_X86_SW_PROTECTED_VM));
+
+       test_private_access_memslot_deleted();
+       test_private_access_memslot_not_private();
+}
index 7ee44496cf97f62a9dcd2637fad26d210eac43be..0c7ce3d4e83a6f38fee498acdb62fb8076694ee7 100644 (file)
@@ -103,7 +103,7 @@ static void l1_guest_code(struct svm_test_data *svm, uint64_t is_nmi, uint64_t i
 
        run_guest(vmcb, svm->vmcb_gpa);
        __GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL,
-                      "Expected VMMCAL #VMEXIT, got '0x%x', info1 = '0x%llx, info2 = '0x%llx'",
+                      "Expected VMMCAL #VMEXIT, got '0x%x', info1 = '0x%lx, info2 = '0x%lx'",
                       vmcb->control.exit_code,
                       vmcb->control.exit_info_1, vmcb->control.exit_info_2);
 
@@ -133,7 +133,7 @@ static void l1_guest_code(struct svm_test_data *svm, uint64_t is_nmi, uint64_t i
 
        run_guest(vmcb, svm->vmcb_gpa);
        __GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_HLT,
-                      "Expected HLT #VMEXIT, got '0x%x', info1 = '0x%llx, info2 = '0x%llx'",
+                      "Expected HLT #VMEXIT, got '0x%x', info1 = '0x%lx, info2 = '0x%lx'",
                       vmcb->control.exit_code,
                       vmcb->control.exit_info_1, vmcb->control.exit_info_2);
 
index 85f34ca7e49e531c11e09f700738d0eec2a0e0fa..0ed32ec903d03548ce11fa5bcc42eba329808506 100644 (file)
@@ -271,7 +271,7 @@ int main(int argc, char *argv[])
 
        kvm_check_cap(KVM_CAP_MCE);
 
-       vm = __vm_create(VM_MODE_DEFAULT, 3, 0);
+       vm = __vm_create(VM_SHAPE_DEFAULT, 3, 0);
 
        kvm_ioctl(vm->kvm_fd, KVM_X86_GET_MCE_CAP_SUPPORTED,
                  &supported_mcg_caps);
index ebbcb0a3f7438720932decc70b6539a607319f0a..2a8d4ac2f0204780498dd42b6343deb5fbb04c9b 100644 (file)
@@ -56,7 +56,7 @@ static void guest_test_perf_capabilities_gp(uint64_t val)
        uint8_t vector = wrmsr_safe(MSR_IA32_PERF_CAPABILITIES, val);
 
        __GUEST_ASSERT(vector == GP_VECTOR,
-                      "Expected #GP for value '0x%llx', got vector '0x%x'",
+                      "Expected #GP for value '0x%lx', got vector '0x%x'",
                       val, vector);
 }
 
index 41ea7028a1f8d97aee9bda26890cae26cbded247..67a62a5a88951a11c969478f81da136672744e07 100644 (file)
@@ -125,21 +125,25 @@ void test_vmx_nested_state(struct kvm_vcpu *vcpu)
 
        /*
         * Setting vmxon_pa == -1ull and vmcs_pa == -1ull exits early without
-        * setting the nested state but flags other than eVMCS must be clear.
-        * The eVMCS flag can be set if the enlightened VMCS capability has
-        * been enabled.
+        * setting the nested state. When the eVMCS flag is not set, the
+        * expected return value is '0'.
         */
        set_default_vmx_state(state, state_sz);
+       state->flags = 0;
        state->hdr.vmx.vmxon_pa = -1ull;
        state->hdr.vmx.vmcs12_pa = -1ull;
-       test_nested_state_expect_einval(vcpu, state);
+       test_nested_state(vcpu, state);
 
-       state->flags &= KVM_STATE_NESTED_EVMCS;
+       /*
+        * When eVMCS is supported, the eVMCS flag can only be set if the
+        * enlightened VMCS capability has been enabled.
+        */
        if (have_evmcs) {
+               state->flags = KVM_STATE_NESTED_EVMCS;
                test_nested_state_expect_einval(vcpu, state);
                vcpu_enable_evmcs(vcpu);
+               test_nested_state(vcpu, state);
        }
-       test_nested_state(vcpu, state);
 
        /* It is invalid to have vmxon_pa == -1ull and SMM flags non-zero. */
        state->hdr.vmx.smm.flags = 1;
index 77d04a7bdaddd59d04f723d2b6858ca950eb9534..dc6217440db3ae193fd9bfbcfbaadea223e57c03 100644 (file)
@@ -25,7 +25,7 @@ do {                                                                                  \
                                                                                        \
        __GUEST_ASSERT((__supported & (xfeatures)) != (xfeatures) ||                    \
                       __supported == ((xfeatures) | (dependencies)),                   \
-                      "supported = 0x%llx, xfeatures = 0x%llx, dependencies = 0x%llx", \
+                      "supported = 0x%lx, xfeatures = 0x%llx, dependencies = 0x%llx",  \
                       __supported, (xfeatures), (dependencies));                       \
 } while (0)
 
@@ -42,7 +42,7 @@ do {                                                                  \
        uint64_t __supported = (supported_xcr0) & (xfeatures);          \
                                                                        \
        __GUEST_ASSERT(!__supported || __supported == (xfeatures),      \
-                      "supported = 0x%llx, xfeatures = 0x%llx",        \
+                      "supported = 0x%lx, xfeatures = 0x%llx",         \
                       __supported, (xfeatures));                       \
 } while (0)
 
@@ -81,7 +81,7 @@ static void guest_code(void)
 
        vector = xsetbv_safe(0, supported_xcr0);
        __GUEST_ASSERT(!vector,
-                      "Expected success on XSETBV(0x%llx), got vector '0x%x'",
+                      "Expected success on XSETBV(0x%lx), got vector '0x%x'",
                       supported_xcr0, vector);
 
        for (i = 0; i < 64; i++) {
@@ -90,7 +90,7 @@ static void guest_code(void)
 
                vector = xsetbv_safe(0, supported_xcr0 | BIT_ULL(i));
                __GUEST_ASSERT(vector == GP_VECTOR,
-                              "Expected #GP on XSETBV(0x%llx), supported XCR0 = %llx, got vector '0x%x'",
+                              "Expected #GP on XSETBV(0x%llx), supported XCR0 = %lx, got vector '0x%x'",
                               BIT_ULL(i), supported_xcr0, vector);
        }
 
index 5b79758cae627593c68b9fd465451efcf7b75f9f..e64bbdf0e86eac8bf1751ee287508aca1a7ed27c 100644 (file)
@@ -9,6 +9,7 @@
 
 #include <errno.h>
 #include <linux/landlock.h>
+#include <linux/securebits.h>
 #include <sys/capability.h>
 #include <sys/socket.h>
 #include <sys/syscall.h>
@@ -115,11 +116,16 @@ static void _init_caps(struct __test_metadata *const _metadata, bool drop_all)
                /* clang-format off */
                CAP_DAC_OVERRIDE,
                CAP_MKNOD,
+               CAP_NET_ADMIN,
+               CAP_NET_BIND_SERVICE,
                CAP_SYS_ADMIN,
                CAP_SYS_CHROOT,
-               CAP_NET_BIND_SERVICE,
                /* clang-format on */
        };
+       const unsigned int noroot = SECBIT_NOROOT | SECBIT_NOROOT_LOCKED;
+
+       if ((cap_get_secbits() & noroot) != noroot)
+               EXPECT_EQ(0, cap_set_secbits(noroot));
 
        cap_p = cap_get_proc();
        EXPECT_NE(NULL, cap_p)
@@ -137,6 +143,8 @@ static void _init_caps(struct __test_metadata *const _metadata, bool drop_all)
                        TH_LOG("Failed to cap_set_flag: %s", strerror(errno));
                }
        }
+
+       /* Automatically resets ambient capabilities. */
        EXPECT_NE(-1, cap_set_proc(cap_p))
        {
                TH_LOG("Failed to cap_set_proc: %s", strerror(errno));
@@ -145,6 +153,9 @@ static void _init_caps(struct __test_metadata *const _metadata, bool drop_all)
        {
                TH_LOG("Failed to cap_free: %s", strerror(errno));
        }
+
+       /* Quickly checks that ambient capabilities are cleared. */
+       EXPECT_NE(-1, cap_get_ambient(caps[0]));
 }
 
 /* We cannot put such helpers in a library because of kselftest_harness.h . */
@@ -158,8 +169,9 @@ static void __maybe_unused drop_caps(struct __test_metadata *const _metadata)
        _init_caps(_metadata, true);
 }
 
-static void _effective_cap(struct __test_metadata *const _metadata,
-                          const cap_value_t caps, const cap_flag_value_t value)
+static void _change_cap(struct __test_metadata *const _metadata,
+                       const cap_flag_t flag, const cap_value_t cap,
+                       const cap_flag_value_t value)
 {
        cap_t cap_p;
 
@@ -168,7 +180,7 @@ static void _effective_cap(struct __test_metadata *const _metadata,
        {
                TH_LOG("Failed to cap_get_proc: %s", strerror(errno));
        }
-       EXPECT_NE(-1, cap_set_flag(cap_p, CAP_EFFECTIVE, 1, &caps, value))
+       EXPECT_NE(-1, cap_set_flag(cap_p, flag, 1, &cap, value))
        {
                TH_LOG("Failed to cap_set_flag: %s", strerror(errno));
        }
@@ -183,15 +195,35 @@ static void _effective_cap(struct __test_metadata *const _metadata,
 }
 
 static void __maybe_unused set_cap(struct __test_metadata *const _metadata,
-                                  const cap_value_t caps)
+                                  const cap_value_t cap)
 {
-       _effective_cap(_metadata, caps, CAP_SET);
+       _change_cap(_metadata, CAP_EFFECTIVE, cap, CAP_SET);
 }
 
 static void __maybe_unused clear_cap(struct __test_metadata *const _metadata,
-                                    const cap_value_t caps)
+                                    const cap_value_t cap)
+{
+       _change_cap(_metadata, CAP_EFFECTIVE, cap, CAP_CLEAR);
+}
+
+static void __maybe_unused
+set_ambient_cap(struct __test_metadata *const _metadata, const cap_value_t cap)
+{
+       _change_cap(_metadata, CAP_INHERITABLE, cap, CAP_SET);
+
+       EXPECT_NE(-1, cap_set_ambient(cap, CAP_SET))
+       {
+               TH_LOG("Failed to set ambient capability %d: %s", cap,
+                      strerror(errno));
+       }
+}
+
+static void __maybe_unused clear_ambient_cap(
+       struct __test_metadata *const _metadata, const cap_value_t cap)
 {
-       _effective_cap(_metadata, caps, CAP_CLEAR);
+       EXPECT_EQ(1, cap_get_ambient(cap));
+       _change_cap(_metadata, CAP_INHERITABLE, cap, CAP_CLEAR);
+       EXPECT_EQ(0, cap_get_ambient(cap));
 }
 
 /* Receives an FD from a UNIX socket. Returns the received FD, or -errno. */
index 50818904397c577e6953b7fd66bbfe894827b120..2d6d9b43d958cfb7c247e2cfa1fdbdf7a48c4c08 100644 (file)
@@ -241,9 +241,11 @@ struct mnt_opt {
        const char *const data;
 };
 
-const struct mnt_opt mnt_tmp = {
+#define MNT_TMP_DATA "size=4m,mode=700"
+
+static const struct mnt_opt mnt_tmp = {
        .type = "tmpfs",
-       .data = "size=4m,mode=700",
+       .data = MNT_TMP_DATA,
 };
 
 static int mount_opt(const struct mnt_opt *const mnt, const char *const target)
@@ -4632,7 +4634,10 @@ FIXTURE_VARIANT(layout3_fs)
 /* clang-format off */
 FIXTURE_VARIANT_ADD(layout3_fs, tmpfs) {
        /* clang-format on */
-       .mnt = mnt_tmp,
+       .mnt = {
+               .type = "tmpfs",
+               .data = MNT_TMP_DATA,
+       },
        .file_path = file1_s1d1,
 };
 
index ea5f727dd25778df7def21365eae073981ee08fe..936cfc879f1d2c419195338a8af04c095fe770f8 100644 (file)
@@ -17,6 +17,7 @@
 #include <string.h>
 #include <sys/prctl.h>
 #include <sys/socket.h>
+#include <sys/syscall.h>
 #include <sys/un.h>
 
 #include "common.h"
@@ -54,6 +55,11 @@ struct service_fixture {
        };
 };
 
+static pid_t sys_gettid(void)
+{
+       return syscall(__NR_gettid);
+}
+
 static int set_service(struct service_fixture *const srv,
                       const struct protocol_variant prot,
                       const unsigned short index)
@@ -88,7 +94,7 @@ static int set_service(struct service_fixture *const srv,
        case AF_UNIX:
                srv->unix_addr.sun_family = prot.domain;
                sprintf(srv->unix_addr.sun_path,
-                       "_selftests-landlock-net-tid%d-index%d", gettid(),
+                       "_selftests-landlock-net-tid%d-index%d", sys_gettid(),
                        index);
                srv->unix_addr_len = SUN_LEN(&srv->unix_addr);
                srv->unix_addr.sun_path[0] = '\0';
@@ -101,8 +107,11 @@ static void setup_loopback(struct __test_metadata *const _metadata)
 {
        set_cap(_metadata, CAP_SYS_ADMIN);
        ASSERT_EQ(0, unshare(CLONE_NEWNET));
-       ASSERT_EQ(0, system("ip link set dev lo up"));
        clear_cap(_metadata, CAP_SYS_ADMIN);
+
+       set_ambient_cap(_metadata, CAP_NET_ADMIN);
+       ASSERT_EQ(0, system("ip link set dev lo up"));
+       clear_ambient_cap(_metadata, CAP_NET_ADMIN);
 }
 
 static bool is_restricted(const struct protocol_variant *const prot,
index c8416c54b4637b1380810f0c9f71bc99e1710b8e..b1fd7362c2feec339228036dace5d28fe1b1719b 100644 (file)
@@ -42,17 +42,6 @@ function die() {
        exit 1
 }
 
-# save existing dmesg so we can detect new content
-function save_dmesg() {
-       SAVED_DMESG=$(mktemp --tmpdir -t klp-dmesg-XXXXXX)
-       dmesg > "$SAVED_DMESG"
-}
-
-# cleanup temporary dmesg file from save_dmesg()
-function cleanup_dmesg_file() {
-       rm -f "$SAVED_DMESG"
-}
-
 function push_config() {
        DYNAMIC_DEBUG=$(grep '^kernel/livepatch' /sys/kernel/debug/dynamic_debug/control | \
                        awk -F'[: ]' '{print "file " $1 " line " $2 " " $4}')
@@ -99,7 +88,6 @@ function set_ftrace_enabled() {
 
 function cleanup() {
        pop_config
-       cleanup_dmesg_file
 }
 
 # setup_config - save the current config and set a script exit trap that
@@ -280,7 +268,15 @@ function set_pre_patch_ret {
 function start_test {
        local test="$1"
 
-       save_dmesg
+       # Dump something unique into the dmesg log, then stash the entry
+       # in LAST_DMESG.  The check_result() function will use it to
+       # find new kernel messages since the test started.
+       local last_dmesg_msg="livepatch kselftest timestamp: $(date --rfc-3339=ns)"
+       log "$last_dmesg_msg"
+       loop_until 'dmesg | grep -q "$last_dmesg_msg"' ||
+               die "buffer busy? can't find canary dmesg message: $last_dmesg_msg"
+       LAST_DMESG=$(dmesg | grep "$last_dmesg_msg")
+
        echo -n "TEST: $test ... "
        log "===== TEST: $test ====="
 }
@@ -291,23 +287,24 @@ function check_result {
        local expect="$*"
        local result
 
-       # Note: when comparing dmesg output, the kernel log timestamps
-       # help differentiate repeated testing runs.  Remove them with a
-       # post-comparison sed filter.
-
-       result=$(dmesg | comm --nocheck-order -13 "$SAVED_DMESG" - | \
+       # Test results include any new dmesg entry since LAST_DMESG, then:
+       # - include lines matching keywords
+       # - exclude lines matching keywords
+       # - filter out dmesg timestamp prefixes
+       result=$(dmesg | awk -v last_dmesg="$LAST_DMESG" 'p; $0 == last_dmesg { p=1 }' | \
                 grep -e 'livepatch:' -e 'test_klp' | \
                 grep -v '\(tainting\|taints\) kernel' | \
                 sed 's/^\[[ 0-9.]*\] //')
 
        if [[ "$expect" == "$result" ]] ; then
                echo "ok"
+       elif [[ "$result" == "" ]] ; then
+               echo -e "not ok\n\nbuffer overrun? can't find canary dmesg entry: $LAST_DMESG\n"
+               die "livepatch kselftest(s) failed"
        else
                echo -e "not ok\n\n$(diff -upr --label expected --label result <(echo "$expect") <(echo "$result"))\n"
                die "livepatch kselftest(s) failed"
        fi
-
-       cleanup_dmesg_file
 }
 
 # check_sysfs_rights(modname, rel_path, expected_rights) - check sysfs
index 0899019a7fcb4b04bcedca44227f2c2dd5a83597..e14bdd4455f2d2798077b8a701790bcee0732e90 100755 (executable)
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
 # SPDX-License-Identifier: GPL-2.0
 
 # Kselftest framework requirement - SKIP code is 4.
index 5b354c209e936f825cb4156c60ac0836e0be8288..894d28c3dd4785fcd4d14eae8caefc50a23aaa13 100644 (file)
 #include <unistd.h>
 #include <sys/mman.h>
 #include <fcntl.h>
-
-#define MAP_LENGTH             (2UL * 1024 * 1024)
-
-#define PAGE_SIZE              4096
+#include "vm_util.h"
 
 #define PAGE_COMPOUND_HEAD     (1UL << 15)
 #define PAGE_COMPOUND_TAIL     (1UL << 16)
@@ -39,6 +36,9 @@
 #define MAP_FLAGS              (MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB)
 #endif
 
+static size_t pagesize;
+static size_t maplength;
+
 static void write_bytes(char *addr, size_t length)
 {
        unsigned long i;
@@ -56,7 +56,7 @@ static unsigned long virt_to_pfn(void *addr)
        if (fd < 0)
                return -1UL;
 
-       lseek(fd, (unsigned long)addr / PAGE_SIZE * sizeof(pagemap), SEEK_SET);
+       lseek(fd, (unsigned long)addr / pagesize * sizeof(pagemap), SEEK_SET);
        read(fd, &pagemap, sizeof(pagemap));
        close(fd);
 
@@ -86,7 +86,7 @@ static int check_page_flags(unsigned long pfn)
         * this also verifies kernel has correctly set the fake page_head to tail
         * while hugetlb_free_vmemmap is enabled.
         */
-       for (i = 1; i < MAP_LENGTH / PAGE_SIZE; i++) {
+       for (i = 1; i < maplength / pagesize; i++) {
                read(fd, &pageflags, sizeof(pageflags));
                if ((pageflags & TAIL_PAGE_FLAGS) != TAIL_PAGE_FLAGS ||
                    (pageflags & HEAD_PAGE_FLAGS) == HEAD_PAGE_FLAGS) {
@@ -106,18 +106,25 @@ int main(int argc, char **argv)
        void *addr;
        unsigned long pfn;
 
-       addr = mmap(MAP_ADDR, MAP_LENGTH, PROT_READ | PROT_WRITE, MAP_FLAGS, -1, 0);
+       pagesize  = psize();
+       maplength = default_huge_page_size();
+       if (!maplength) {
+               printf("Unable to determine huge page size\n");
+               exit(1);
+       }
+
+       addr = mmap(MAP_ADDR, maplength, PROT_READ | PROT_WRITE, MAP_FLAGS, -1, 0);
        if (addr == MAP_FAILED) {
                perror("mmap");
                exit(1);
        }
 
        /* Trigger allocation of HugeTLB page. */
-       write_bytes(addr, MAP_LENGTH);
+       write_bytes(addr, maplength);
 
        pfn = virt_to_pfn(addr);
        if (pfn == -1UL) {
-               munmap(addr, MAP_LENGTH);
+               munmap(addr, maplength);
                perror("virt_to_pfn");
                exit(1);
        }
@@ -125,13 +132,13 @@ int main(int argc, char **argv)
        printf("Returned address is %p whose pfn is %lx\n", addr, pfn);
 
        if (check_page_flags(pfn) < 0) {
-               munmap(addr, MAP_LENGTH);
+               munmap(addr, maplength);
                perror("check_page_flags");
                exit(1);
        }
 
        /* munmap() length of MAP_HUGETLB memory must be hugepage aligned */
-       if (munmap(addr, MAP_LENGTH)) {
+       if (munmap(addr, maplength)) {
                perror("munmap");
                exit(1);
        }
index 380b691d3eb9fbe9c1070937d9561b732343aec0..b748c48908d9d4af9ba31fe7d2443329c13c3dc2 100644 (file)
@@ -566,7 +566,7 @@ static int ksm_merge_hugepages_time(int merge_type, int mapping, int prot,
        if (map_ptr_orig == MAP_FAILED)
                err(2, "initial mmap");
 
-       if (madvise(map_ptr, len + HPAGE_SIZE, MADV_HUGEPAGE))
+       if (madvise(map_ptr, len, MADV_HUGEPAGE))
                err(2, "MADV_HUGEPAGE");
 
        pagemap_fd = open("/proc/self/pagemap", O_RDONLY);
index 193281560b61be23d3b55857030ea07b4ad3f95d..86e8f2048a409028b28ece3f755d06f535726c47 100644 (file)
@@ -15,6 +15,7 @@
 #include <unistd.h>
 #include <sys/mman.h>
 #include <fcntl.h>
+#include "vm_util.h"
 
 #define LENGTH (256UL*1024*1024)
 #define PROTECTION (PROT_READ | PROT_WRITE)
@@ -58,10 +59,16 @@ int main(int argc, char **argv)
 {
        void *addr;
        int ret;
+       size_t hugepage_size;
        size_t length = LENGTH;
        int flags = FLAGS;
        int shift = 0;
 
+       hugepage_size = default_huge_page_size();
+       /* munmap with fail if the length is not page aligned */
+       if (hugepage_size > length)
+               length = hugepage_size;
+
        if (argc > 1)
                length = atol(argv[1]) << 20;
        if (argc > 2) {
index 1d4c1589c3055d3bb22eebe2c02fa7b015e4a665..2f8b991f78cb4cade90dc05f502a647a955fb582 100644 (file)
@@ -360,7 +360,8 @@ static long long remap_region(struct config c, unsigned int threshold_mb,
                              char pattern_seed)
 {
        void *addr, *src_addr, *dest_addr, *dest_preamble_addr;
-       unsigned long long i;
+       int d;
+       unsigned long long t;
        struct timespec t_start = {0, 0}, t_end = {0, 0};
        long long  start_ns, end_ns, align_mask, ret, offset;
        unsigned long long threshold;
@@ -378,8 +379,8 @@ static long long remap_region(struct config c, unsigned int threshold_mb,
 
        /* Set byte pattern for source block. */
        srand(pattern_seed);
-       for (i = 0; i < threshold; i++)
-               memset((char *) src_addr + i, (char) rand(), 1);
+       for (t = 0; t < threshold; t++)
+               memset((char *) src_addr + t, (char) rand(), 1);
 
        /* Mask to zero out lower bits of address for alignment */
        align_mask = ~(c.dest_alignment - 1);
@@ -420,8 +421,8 @@ static long long remap_region(struct config c, unsigned int threshold_mb,
 
                /* Set byte pattern for the dest preamble block. */
                srand(pattern_seed);
-               for (i = 0; i < c.dest_preamble_size; i++)
-                       memset((char *) dest_preamble_addr + i, (char) rand(), 1);
+               for (d = 0; d < c.dest_preamble_size; d++)
+                       memset((char *) dest_preamble_addr + d, (char) rand(), 1);
        }
 
        clock_gettime(CLOCK_MONOTONIC, &t_start);
@@ -437,14 +438,14 @@ static long long remap_region(struct config c, unsigned int threshold_mb,
 
        /* Verify byte pattern after remapping */
        srand(pattern_seed);
-       for (i = 0; i < threshold; i++) {
+       for (t = 0; t < threshold; t++) {
                char c = (char) rand();
 
-               if (((char *) dest_addr)[i] != c) {
+               if (((char *) dest_addr)[t] != c) {
                        ksft_print_msg("Data after remap doesn't match at offset %llu\n",
-                                      i);
+                                      t);
                        ksft_print_msg("Expected: %#x\t Got: %#x\n", c & 0xff,
-                                       ((char *) dest_addr)[i] & 0xff);
+                                       ((char *) dest_addr)[t] & 0xff);
                        ret = -1;
                        goto clean_up_dest;
                }
@@ -453,14 +454,14 @@ static long long remap_region(struct config c, unsigned int threshold_mb,
        /* Verify the dest preamble byte pattern after remapping */
        if (c.dest_preamble_size) {
                srand(pattern_seed);
-               for (i = 0; i < c.dest_preamble_size; i++) {
+               for (d = 0; d < c.dest_preamble_size; d++) {
                        char c = (char) rand();
 
-                       if (((char *) dest_preamble_addr)[i] != c) {
+                       if (((char *) dest_preamble_addr)[d] != c) {
                                ksft_print_msg("Preamble data after remap doesn't match at offset %d\n",
-                                              i);
+                                              d);
                                ksft_print_msg("Expected: %#x\t Got: %#x\n", c & 0xff,
-                                              ((char *) dest_preamble_addr)[i] & 0xff);
+                                              ((char *) dest_preamble_addr)[d] & 0xff);
                                ret = -1;
                                goto clean_up_dest;
                        }
index 45cae7cab27e12705c59cc56f6fdf5e675805f92..a0a75f3029043727b96bdb59728ed80d4d5cd9c0 100755 (executable)
@@ -29,9 +29,15 @@ check_supported_x86_64()
        # See man 1 gzip under '-f'.
        local pg_table_levels=$(gzip -dcfq "${config}" | grep PGTABLE_LEVELS | cut -d'=' -f 2)
 
+       local cpu_supports_pl5=$(awk '/^flags/ {if (/la57/) {print 0;}
+               else {print 1}; exit}' /proc/cpuinfo 2>/dev/null)
+
        if [[ "${pg_table_levels}" -lt 5 ]]; then
                echo "$0: PGTABLE_LEVELS=${pg_table_levels}, must be >= 5 to run this test"
                exit $ksft_skip
+       elif [[ "${cpu_supports_pl5}" -ne 0 ]]; then
+               echo "$0: CPU does not have the necessary la57 flag to support page table level 5"
+               exit $ksft_skip
        fi
 }
 
index 70a02301f4c276ba6313c3baa1ab3b5058a68b0c..3d2d2eb9d6fff077cca24fd82a2a4990c34706d1 100755 (executable)
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
 # SPDX-License-Identifier: GPL-2.0
 
 set -e
index 50818075e566e1abf1f2f9e587951e5abed238fc..211753756bdee87daf7ebb1af06dbb4c1f6ee383 100644 (file)
@@ -53,8 +53,7 @@ TEST_PROGS += bind_bhash.sh
 TEST_PROGS += ip_local_port_range.sh
 TEST_PROGS += rps_default_mask.sh
 TEST_PROGS += big_tcp.sh
-TEST_PROGS_EXTENDED := in_netns.sh setup_loopback.sh setup_veth.sh
-TEST_PROGS_EXTENDED += toeplitz_client.sh toeplitz.sh lib.sh
+TEST_PROGS_EXTENDED := toeplitz_client.sh toeplitz.sh
 TEST_GEN_FILES =  socket nettest
 TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy reuseport_addr_any
 TEST_GEN_FILES += tcp_mmap tcp_inq psock_snd txring_overwrite
@@ -84,6 +83,7 @@ TEST_PROGS += sctp_vrf.sh
 TEST_GEN_FILES += sctp_hello
 TEST_GEN_FILES += csum
 TEST_GEN_FILES += nat6to4.o
+TEST_GEN_FILES += xdp_dummy.o
 TEST_GEN_FILES += ip_local_port_range
 TEST_GEN_FILES += bind_wildcard
 TEST_PROGS += test_vxlan_mdb.sh
@@ -95,6 +95,7 @@ TEST_PROGS += fq_band_pktlimit.sh
 TEST_PROGS += vlan_hw_filter.sh
 
 TEST_FILES := settings
+TEST_FILES += in_netns.sh lib.sh net_helper.sh setup_loopback.sh setup_veth.sh
 
 include ../lib.mk
 
@@ -104,7 +105,7 @@ $(OUTPUT)/tcp_inq: LDLIBS += -lpthread
 $(OUTPUT)/bind_bhash: LDLIBS += -lpthread
 $(OUTPUT)/io_uring_zerocopy_tx: CFLAGS += -I../../../include/
 
-# Rules to generate bpf obj nat6to4.o
+# Rules to generate bpf objs
 CLANG ?= clang
 SCRATCH_DIR := $(OUTPUT)/tools
 BUILD_DIR := $(SCRATCH_DIR)/build
@@ -139,7 +140,7 @@ endif
 
 CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG),$(CLANG_TARGET_ARCH))
 
-$(OUTPUT)/nat6to4.o: nat6to4.c $(BPFOBJ) | $(MAKE_DIRS)
+$(OUTPUT)/nat6to4.o $(OUTPUT)/xdp_dummy.o: $(OUTPUT)/%.o : %.c $(BPFOBJ) | $(MAKE_DIRS)
        $(CLANG) -O2 --target=bpf -c $< $(CCINCLUDE) $(CLANG_SYS_INCLUDES) -o $@
 
 $(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile)                    \
index cde9a91c479716e178c569cac18bedc9698357b6..2db9d15cd45feafc007669ae358372249c74151a 100755 (executable)
@@ -122,7 +122,9 @@ do_netperf() {
        local netns=$1
 
        [ "$NF" = "6" ] && serip=$SERVER_IP6
-       ip net exec $netns netperf -$NF -t TCP_STREAM -H $serip 2>&1 >/dev/null
+
+       # use large write to be sure to generate big tcp packets
+       ip net exec $netns netperf -$NF -t TCP_STREAM -l 1 -H $serip -- -m 262144 2>&1 >/dev/null
 }
 
 do_test() {
index f30bd57d5e38744de09a86cab47ed4999ba46e25..8bc23fb4c82b71c88c6a0f292e8735b831a5d03f 100755 (executable)
@@ -89,7 +89,7 @@ for ovr in setsock cmsg both diff; do
        check_result $? 0 "TCLASS $prot $ovr - pass"
 
        while [ -d /proc/$BG ]; do
-           $NSEXE ./cmsg_sender -6 -p u $TGT6 1234
+           $NSEXE ./cmsg_sender -6 -p $p $m $((TOS2)) $TGT6 1234
        done
 
        tcpdump -r $TMPF -v 2>&1 | grep "class $TOS2" >> /dev/null
@@ -126,7 +126,7 @@ for ovr in setsock cmsg both diff; do
        check_result $? 0 "HOPLIMIT $prot $ovr - pass"
 
        while [ -d /proc/$BG ]; do
-           $NSEXE ./cmsg_sender -6 -p u $TGT6 1234
+           $NSEXE ./cmsg_sender -6 -p $p $m $LIM $TGT6 1234
        done
 
        tcpdump -r $TMPF -v 2>&1 | grep "hlim $LIM[^0-9]" >> /dev/null
index 8da562a9ae87e445a7e3003c4e07f589e3f85f0d..3b749addd364040491010ab9d4b50a19c4e3b643 100644 (file)
@@ -1,5 +1,6 @@
 CONFIG_USER_NS=y
 CONFIG_NET_NS=y
+CONFIG_BONDING=m
 CONFIG_BPF_SYSCALL=y
 CONFIG_TEST_BPF=m
 CONFIG_NUMA=y
@@ -14,9 +15,16 @@ CONFIG_VETH=y
 CONFIG_NET_IPVTI=y
 CONFIG_IPV6_VTI=y
 CONFIG_DUMMY=y
+CONFIG_BRIDGE_VLAN_FILTERING=y
 CONFIG_BRIDGE=y
+CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_VLAN_8021Q=y
+CONFIG_GENEVE=m
 CONFIG_IFB=y
+CONFIG_INET_DIAG=y
+CONFIG_INET_ESP=y
+CONFIG_INET_ESP_OFFLOAD=y
+CONFIG_IP_GRE=m
 CONFIG_NETFILTER=y
 CONFIG_NETFILTER_ADVANCED=y
 CONFIG_NF_CONNTRACK=m
@@ -24,16 +32,49 @@ CONFIG_NF_NAT=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP_NF_IPTABLES=m
 CONFIG_IP6_NF_NAT=m
+CONFIG_IP6_NF_RAW=m
 CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IPV6_GRE=m
+CONFIG_IPV6_SEG6_LWTUNNEL=y
+CONFIG_L2TP_ETH=m
+CONFIG_L2TP_IP=m
+CONFIG_L2TP=m
+CONFIG_L2TP_V3=y
+CONFIG_MACSEC=m
+CONFIG_MACVLAN=y
+CONFIG_MACVTAP=y
+CONFIG_MPLS=y
+CONFIG_MPTCP=y
 CONFIG_NF_TABLES=m
 CONFIG_NF_TABLES_IPV6=y
 CONFIG_NF_TABLES_IPV4=y
 CONFIG_NFT_NAT=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NET_ACT_CSUM=m
+CONFIG_NET_ACT_CT=m
+CONFIG_NET_ACT_GACT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_BPF=m
+CONFIG_NET_CLS_MATCHALL=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_IPGRE_DEMUX=m
+CONFIG_NET_IPGRE=m
+CONFIG_NET_SCH_FQ_CODEL=m
+CONFIG_NET_SCH_HTB=m
 CONFIG_NET_SCH_FQ=m
 CONFIG_NET_SCH_ETF=m
 CONFIG_NET_SCH_NETEM=y
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NFT_COMPAT=m
+CONFIG_NF_FLOW_TABLE=m
+CONFIG_PSAMPLE=m
+CONFIG_TCP_MD5SIG=y
 CONFIG_TEST_BLACKHOLE_DEV=m
 CONFIG_KALLSYMS=y
+CONFIG_TLS=m
 CONFIG_TRACEPOINTS=y
 CONFIG_NET_DROP_MONITOR=m
 CONFIG_NETDEVSIM=m
@@ -48,7 +89,10 @@ CONFIG_BAREUDP=m
 CONFIG_IPV6_IOAM6_LWTUNNEL=y
 CONFIG_CRYPTO_SM4_GENERIC=y
 CONFIG_AMT=m
+CONFIG_TUN=y
 CONFIG_VXLAN=m
 CONFIG_IP_SCTP=m
 CONFIG_NETFILTER_XT_MATCH_POLICY=m
 CONFIG_CRYPTO_ARIA=y
+CONFIG_XFRM_INTERFACE=m
+CONFIG_XFRM_USER=m
index 452693514be4b06842dbe32088c5495c2c933f0b..4de92632f48360c0002900260af9b35d34186f4b 100644 (file)
@@ -112,7 +112,7 @@ TEST_PROGS = bridge_fdb_learning_limit.sh \
        vxlan_symmetric_ipv6.sh \
        vxlan_symmetric.sh
 
-TEST_PROGS_EXTENDED := devlink_lib.sh \
+TEST_FILES := devlink_lib.sh \
        ethtool_lib.sh \
        fib_offload_lib.sh \
        forwarding.config.sample \
old mode 100755 (executable)
new mode 100644 (file)
index dca549443801135cf8db35f9545885a6ab772504..f9fe182dfbd44e9de0f0caa27b409281c0584081 100644 (file)
@@ -4,6 +4,9 @@
 ##############################################################################
 # Defines
 
+WAIT_TIMEOUT=${WAIT_TIMEOUT:=20}
+BUSYWAIT_TIMEOUT=$((WAIT_TIMEOUT * 1000)) # ms
+
 # Kselftest framework requirement - SKIP code is 4.
 ksft_skip=4
 # namespace list created by setup_ns
@@ -48,7 +51,7 @@ cleanup_ns()
 
        for ns in "$@"; do
                ip netns delete "${ns}" &> /dev/null
-               if ! busywait 2 ip netns list \| grep -vq "^$ns$" &> /dev/null; then
+               if ! busywait $BUSYWAIT_TIMEOUT ip netns list \| grep -vq "^$ns$" &> /dev/null; then
                        echo "Warn: Failed to remove namespace $ns"
                        ret=1
                fi
index e317c2e44dae840149fad7fe14a3a41d699b063e..4f80014cae4940a3f56ebb313349baa8540c0a0a 100644 (file)
@@ -22,8 +22,11 @@ CONFIG_NFT_TPROXY=m
 CONFIG_NFT_SOCKET=m
 CONFIG_IP_ADVANCED_ROUTER=y
 CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_MANGLE=m
 CONFIG_IP_NF_TARGET_REJECT=m
 CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IP6_NF_FILTER=m
 CONFIG_NET_ACT_CSUM=m
 CONFIG_NET_ACT_PEDIT=m
 CONFIG_NET_CLS_ACT=y
index 3a5b630261910b1cfdd001e05c97c07335b61827..c07386e21e0a4aa10b004cb820488f15ff18dd7a 100755 (executable)
@@ -643,13 +643,6 @@ kill_events_pids()
        mptcp_lib_kill_wait $evts_ns2_pid
 }
 
-kill_tests_wait()
-{
-       #shellcheck disable=SC2046
-       kill -SIGUSR1 $(ip netns pids $ns2) $(ip netns pids $ns1)
-       wait
-}
-
 pm_nl_set_limits()
 {
        local ns=$1
@@ -3453,7 +3446,7 @@ userspace_tests()
                chk_mptcp_info subflows 0 subflows 0
                chk_subflows_total 1 1
                kill_events_pids
-               wait $tests_pid
+               mptcp_lib_kill_wait $tests_pid
        fi
 
        # userspace pm create destroy subflow
@@ -3475,7 +3468,7 @@ userspace_tests()
                chk_mptcp_info subflows 0 subflows 0
                chk_subflows_total 1 1
                kill_events_pids
-               wait $tests_pid
+               mptcp_lib_kill_wait $tests_pid
        fi
 
        # userspace pm create id 0 subflow
@@ -3494,7 +3487,7 @@ userspace_tests()
                chk_mptcp_info subflows 1 subflows 1
                chk_subflows_total 2 2
                kill_events_pids
-               wait $tests_pid
+               mptcp_lib_kill_wait $tests_pid
        fi
 
        # userspace pm remove initial subflow
@@ -3518,7 +3511,7 @@ userspace_tests()
                chk_mptcp_info subflows 1 subflows 1
                chk_subflows_total 1 1
                kill_events_pids
-               wait $tests_pid
+               mptcp_lib_kill_wait $tests_pid
        fi
 
        # userspace pm send RM_ADDR for ID 0
@@ -3544,7 +3537,7 @@ userspace_tests()
                chk_mptcp_info subflows 1 subflows 1
                chk_subflows_total 1 1
                kill_events_pids
-               wait $tests_pid
+               mptcp_lib_kill_wait $tests_pid
        fi
 }
 
@@ -3558,7 +3551,8 @@ endpoint_tests()
                pm_nl_set_limits $ns2 2 2
                pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
                speed=slow \
-                       run_tests $ns1 $ns2 10.0.1.1 2>/dev/null &
+                       run_tests $ns1 $ns2 10.0.1.1 &
+               local tests_pid=$!
 
                wait_mpj $ns1
                pm_nl_check_endpoint "creation" \
@@ -3573,7 +3567,7 @@ endpoint_tests()
                pm_nl_add_endpoint $ns2 10.0.2.2 flags signal
                pm_nl_check_endpoint "modif is allowed" \
                        $ns2 10.0.2.2 id 1 flags signal
-               kill_tests_wait
+               mptcp_lib_kill_wait $tests_pid
        fi
 
        if reset "delete and re-add" &&
@@ -3582,7 +3576,8 @@ endpoint_tests()
                pm_nl_set_limits $ns2 1 1
                pm_nl_add_endpoint $ns2 10.0.2.2 id 2 dev ns2eth2 flags subflow
                test_linkfail=4 speed=20 \
-                       run_tests $ns1 $ns2 10.0.1.1 2>/dev/null &
+                       run_tests $ns1 $ns2 10.0.1.1 &
+               local tests_pid=$!
 
                wait_mpj $ns2
                chk_subflow_nr "before delete" 2
@@ -3597,7 +3592,7 @@ endpoint_tests()
                wait_mpj $ns2
                chk_subflow_nr "after re-add" 2
                chk_mptcp_info subflows 1 subflows 1
-               kill_tests_wait
+               mptcp_lib_kill_wait $tests_pid
        fi
 }
 
index 022262a2cfe0ee59976d398f665c8057dfaea0d7..3a2abae5993e2b4b32ae810d080dc3d336e41e11 100644 (file)
@@ -6,7 +6,7 @@ readonly KSFT_FAIL=1
 readonly KSFT_SKIP=4
 
 # shellcheck disable=SC2155 # declare and assign separately
-readonly KSFT_TEST=$(basename "${0}" | sed 's/\.sh$//g')
+readonly KSFT_TEST="${MPTCP_LIB_KSFT_TEST:-$(basename "${0}" .sh)}"
 
 MPTCP_LIB_SUBTESTS=()
 
index 79b65bdf05db6586726cc76d3313f12368d21dc5..abc5648b59abde537dca90791404691050c759e2 100644 (file)
@@ -1 +1 @@
-timeout=1200
+timeout=1800
index ae8ad5d6fb9dac680573b4207a67781e18773c09..0cc964e6f2c1768dad6a474cffd1c521580b7741 100755 (executable)
@@ -284,12 +284,12 @@ done
 
 setup
 run_test 10 10 0 0 "balanced bwidth"
-run_test 10 10 1 50 "balanced bwidth with unbalanced delay"
+run_test 10 10 1 25 "balanced bwidth with unbalanced delay"
 
 # we still need some additional infrastructure to pass the following test-cases
-run_test 30 10 0 0 "unbalanced bwidth"
-run_test 30 10 1 50 "unbalanced bwidth with unbalanced delay"
-run_test 30 10 50 1 "unbalanced bwidth with opposed, unbalanced delay"
+run_test 10 3 0 0 "unbalanced bwidth"
+run_test 10 3 1 25 "unbalanced bwidth with unbalanced delay"
+run_test 10 3 25 1 "unbalanced bwidth with opposed, unbalanced delay"
 
 mptcp_lib_result_print_all_tap
 exit $ret
old mode 100755 (executable)
new mode 100644 (file)
index f10879788f61ba4f4c01d6cb60a929048ec56540..d65fdd407d73f99bc0cbcdf17cfcca9ad6efea87 100755 (executable)
 #      Same as above but with IPv6
 
 source lib.sh
+source net_helper.sh
 
 PAUSE_ON_FAIL=no
 VERBOSE=0
@@ -707,23 +708,23 @@ setup_xfrm6() {
 }
 
 setup_xfrm4udp() {
-       setup_xfrm 4 ${veth4_a_addr} ${veth4_b_addr} "encap espinudp 4500 4500 0.0.0.0"
-       setup_nettest_xfrm 4 4500
+       setup_xfrm 4 ${veth4_a_addr} ${veth4_b_addr} "encap espinudp 4500 4500 0.0.0.0" && \
+               setup_nettest_xfrm 4 4500
 }
 
 setup_xfrm6udp() {
-       setup_xfrm 6 ${veth6_a_addr} ${veth6_b_addr} "encap espinudp 4500 4500 0.0.0.0"
-       setup_nettest_xfrm 6 4500
+       setup_xfrm 6 ${veth6_a_addr} ${veth6_b_addr} "encap espinudp 4500 4500 0.0.0.0" && \
+               setup_nettest_xfrm 6 4500
 }
 
 setup_xfrm4udprouted() {
-       setup_xfrm 4 ${prefix4}.${a_r1}.1 ${prefix4}.${b_r1}.1 "encap espinudp 4500 4500 0.0.0.0"
-       setup_nettest_xfrm 4 4500
+       setup_xfrm 4 ${prefix4}.${a_r1}.1 ${prefix4}.${b_r1}.1 "encap espinudp 4500 4500 0.0.0.0" && \
+               setup_nettest_xfrm 4 4500
 }
 
 setup_xfrm6udprouted() {
-       setup_xfrm 6 ${prefix6}:${a_r1}::1 ${prefix6}:${b_r1}::1 "encap espinudp 4500 4500 0.0.0.0"
-       setup_nettest_xfrm 6 4500
+       setup_xfrm 6 ${prefix6}:${a_r1}::1 ${prefix6}:${b_r1}::1 "encap espinudp 4500 4500 0.0.0.0" && \
+               setup_nettest_xfrm 6 4500
 }
 
 setup_routing_old() {
@@ -1336,13 +1337,15 @@ test_pmtu_ipvX_over_bridged_vxlanY_or_geneveY_exception() {
                        TCPDST="TCP:[${dst}]:50000"
                fi
                ${ns_b} socat -T 3 -u -6 TCP-LISTEN:50000 STDOUT > $tmpoutfile &
+               local socat_pid=$!
 
-               sleep 1
+               wait_local_port_listen ${NS_B} 50000 tcp
 
-               dd if=/dev/zero of=/dev/stdout status=none bs=1M count=1 | ${target} socat -T 3 -u STDIN $TCPDST,connect-timeout=3
+               dd if=/dev/zero status=none bs=1M count=1 | ${target} socat -T 3 -u STDIN $TCPDST,connect-timeout=3
 
                size=$(du -sb $tmpoutfile)
                size=${size%%/tmp/*}
+               wait ${socat_pid}
 
                [ $size -ne 1048576 ] && err "File size $size mismatches exepcted value in locally bridged vxlan test" && return 1
        done
@@ -1954,6 +1957,13 @@ check_command() {
        return 0
 }
 
+check_running() {
+       pid=${1}
+       cmd=${2}
+
+       [ "$(cat /proc/${pid}/cmdline 2>/dev/null | tr -d '\0')" = "{cmd}" ]
+}
+
 test_cleanup_vxlanX_exception() {
        outer="${1}"
        encap="vxlan"
@@ -1984,11 +1994,12 @@ test_cleanup_vxlanX_exception() {
 
        ${ns_a} ip link del dev veth_A-R1 &
        iplink_pid=$!
-       sleep 1
-       if [ "$(cat /proc/${iplink_pid}/cmdline 2>/dev/null | tr -d '\0')" = "iplinkdeldevveth_A-R1" ]; then
-               err "  can't delete veth device in a timely manner, PMTU dst likely leaked"
-               return 1
-       fi
+       for i in $(seq 1 20); do
+               check_running ${iplink_pid} "iplinkdeldevveth_A-R1" || return 0
+               sleep 0.1
+       done
+       err "  can't delete veth device in a timely manner, PMTU dst likely leaked"
+       return 1
 }
 
 test_cleanup_ipv6_exception() {
index a26c5624429fb1a029d1d472921154e73a7ea86b..4287a85298907969dbd7df7da0e1969494f7857e 100755 (executable)
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
 # SPDX-License-Identifier: GPL-2.0
 
 readonly ksft_skip=4
@@ -33,6 +33,10 @@ chk_rps() {
 
        rps_mask=$($cmd /sys/class/net/$dev_name/queues/rx-0/rps_cpus)
        printf "%-60s" "$msg"
+
+       # In case there is more than 32 CPUs we need to remove commas from masks
+       rps_mask=${rps_mask//,}
+       expected_rps_mask=${expected_rps_mask//,}
        if [ $rps_mask -eq $expected_rps_mask ]; then
                echo "[ ok ]"
        else
index a10a32952f2167d4042dfbfa54a859770a22ff48..874a2952aa8ee16b1841bdd7e2930e54a1c99303 100755 (executable)
@@ -28,6 +28,7 @@ ALL_TESTS="
        kci_test_neigh_get
        kci_test_bridge_parent_id
        kci_test_address_proto
+       kci_test_enslave_bonding
 "
 
 devdummy="test-dummy0"
@@ -439,7 +440,6 @@ kci_test_encap_vxlan()
        local ret=0
        vxlan="test-vxlan0"
        vlan="test-vlan0"
-       testns="$1"
        run_cmd ip -netns "$testns" link add "$vxlan" type vxlan id 42 group 239.1.1.1 \
                dev "$devdummy" dstport 4789
        if [ $? -ne 0 ]; then
@@ -484,7 +484,6 @@ kci_test_encap_fou()
 {
        local ret=0
        name="test-fou"
-       testns="$1"
        run_cmd_grep 'Usage: ip fou' ip fou help
        if [ $? -ne 0 ];then
                end_test "SKIP: fou: iproute2 too old"
@@ -525,8 +524,8 @@ kci_test_encap()
        run_cmd ip -netns "$testns" link set lo up
        run_cmd ip -netns "$testns" link add name "$devdummy" type dummy
        run_cmd ip -netns "$testns" link set "$devdummy" up
-       run_cmd kci_test_encap_vxlan "$testns"
-       run_cmd kci_test_encap_fou "$testns"
+       run_cmd kci_test_encap_vxlan
+       run_cmd kci_test_encap_fou
 
        ip netns del "$testns"
        return $ret
@@ -1241,6 +1240,31 @@ kci_test_address_proto()
        return $ret
 }
 
+kci_test_enslave_bonding()
+{
+       local bond="bond123"
+       local ret=0
+
+       setup_ns testns
+       if [ $? -ne 0 ]; then
+               end_test "SKIP bonding tests: cannot add net namespace $testns"
+               return $ksft_skip
+       fi
+
+       run_cmd ip -netns $testns link add dev $bond type bond mode balance-rr
+       run_cmd ip -netns $testns link add dev $devdummy type dummy
+       run_cmd ip -netns $testns link set dev $devdummy up
+       run_cmd ip -netns $testns link set dev $devdummy master $bond down
+       if [ $ret -ne 0 ]; then
+               end_test "FAIL: initially up interface added to a bond and set down"
+               ip netns del "$testns"
+               return 1
+       fi
+
+       end_test "PASS: enslave interface in a bond"
+       ip netns del "$testns"
+}
+
 kci_test_rtnl()
 {
        local current_test
old mode 100755 (executable)
new mode 100644 (file)
index a9a1759e035ca875ac22036fa52984b32ada76f8..1f78a87f6f37eaab8dc41850e1a906f9aef6315f 100644 (file)
@@ -11,7 +11,7 @@ setup_veth_ns() {
        local -r ns_mac="$4"
 
        [[ -e /var/run/netns/"${ns_name}" ]] || ip netns add "${ns_name}"
-       echo 100000 > "/sys/class/net/${ns_dev}/gro_flush_timeout"
+       echo 1000000 > "/sys/class/net/${ns_dev}/gro_flush_timeout"
        ip link set dev "${ns_dev}" netns "${ns_name}" mtu 65535
        ip -netns "${ns_name}" link set dev "${ns_dev}" up
 
index a148181641026e18e7d9c138ab7b6dde89cc90fd..e9fa14e1073226829e883d7c6621ae9e9a2ce173 100644 (file)
@@ -3,19 +3,16 @@
 #define _GNU_SOURCE
 #include <sched.h>
 
+#include <fcntl.h>
+
 #include <netinet/in.h>
 #include <sys/socket.h>
 #include <sys/sysinfo.h>
 
 #include "../kselftest_harness.h"
 
-#define CLIENT_PER_SERVER      32 /* More sockets, more reliable */
-#define NR_SERVER              self->nproc
-#define NR_CLIENT              (CLIENT_PER_SERVER * NR_SERVER)
-
 FIXTURE(so_incoming_cpu)
 {
-       int nproc;
        int *servers;
        union {
                struct sockaddr addr;
@@ -56,12 +53,47 @@ FIXTURE_VARIANT_ADD(so_incoming_cpu, after_all_listen)
        .when_to_set = AFTER_ALL_LISTEN,
 };
 
+static void write_sysctl(struct __test_metadata *_metadata,
+                        char *filename, char *string)
+{
+       int fd, len, ret;
+
+       fd = open(filename, O_WRONLY);
+       ASSERT_NE(fd, -1);
+
+       len = strlen(string);
+       ret = write(fd, string, len);
+       ASSERT_EQ(ret, len);
+}
+
+static void setup_netns(struct __test_metadata *_metadata)
+{
+       ASSERT_EQ(unshare(CLONE_NEWNET), 0);
+       ASSERT_EQ(system("ip link set lo up"), 0);
+
+       write_sysctl(_metadata, "/proc/sys/net/ipv4/ip_local_port_range", "10000 60001");
+       write_sysctl(_metadata, "/proc/sys/net/ipv4/tcp_tw_reuse", "0");
+}
+
+#define NR_PORT                                (60001 - 10000 - 1)
+#define NR_CLIENT_PER_SERVER_DEFAULT   32
+static int nr_client_per_server, nr_server, nr_client;
+
 FIXTURE_SETUP(so_incoming_cpu)
 {
-       self->nproc = get_nprocs();
-       ASSERT_LE(2, self->nproc);
+       setup_netns(_metadata);
+
+       nr_server = get_nprocs();
+       ASSERT_LE(2, nr_server);
+
+       if (NR_CLIENT_PER_SERVER_DEFAULT * nr_server < NR_PORT)
+               nr_client_per_server = NR_CLIENT_PER_SERVER_DEFAULT;
+       else
+               nr_client_per_server = NR_PORT / nr_server;
+
+       nr_client = nr_client_per_server * nr_server;
 
-       self->servers = malloc(sizeof(int) * NR_SERVER);
+       self->servers = malloc(sizeof(int) * nr_server);
        ASSERT_NE(self->servers, NULL);
 
        self->in_addr.sin_family = AF_INET;
@@ -74,7 +106,7 @@ FIXTURE_TEARDOWN(so_incoming_cpu)
 {
        int i;
 
-       for (i = 0; i < NR_SERVER; i++)
+       for (i = 0; i < nr_server; i++)
                close(self->servers[i]);
 
        free(self->servers);
@@ -110,10 +142,10 @@ int create_server(struct __test_metadata *_metadata,
        if (variant->when_to_set == BEFORE_LISTEN)
                set_so_incoming_cpu(_metadata, fd, cpu);
 
-       /* We don't use CLIENT_PER_SERVER here not to block
+       /* We don't use nr_client_per_server here not to block
         * this test at connect() if SO_INCOMING_CPU is broken.
         */
-       ret = listen(fd, NR_CLIENT);
+       ret = listen(fd, nr_client);
        ASSERT_EQ(ret, 0);
 
        if (variant->when_to_set == AFTER_LISTEN)
@@ -128,7 +160,7 @@ void create_servers(struct __test_metadata *_metadata,
 {
        int i, ret;
 
-       for (i = 0; i < NR_SERVER; i++) {
+       for (i = 0; i < nr_server; i++) {
                self->servers[i] = create_server(_metadata, self, variant, i);
 
                if (i == 0) {
@@ -138,7 +170,7 @@ void create_servers(struct __test_metadata *_metadata,
        }
 
        if (variant->when_to_set == AFTER_ALL_LISTEN) {
-               for (i = 0; i < NR_SERVER; i++)
+               for (i = 0; i < nr_server; i++)
                        set_so_incoming_cpu(_metadata, self->servers[i], i);
        }
 }
@@ -149,7 +181,7 @@ void create_clients(struct __test_metadata *_metadata,
        cpu_set_t cpu_set;
        int i, j, fd, ret;
 
-       for (i = 0; i < NR_SERVER; i++) {
+       for (i = 0; i < nr_server; i++) {
                CPU_ZERO(&cpu_set);
 
                CPU_SET(i, &cpu_set);
@@ -162,7 +194,7 @@ void create_clients(struct __test_metadata *_metadata,
                ret = sched_setaffinity(0, sizeof(cpu_set), &cpu_set);
                ASSERT_EQ(ret, 0);
 
-               for (j = 0; j < CLIENT_PER_SERVER; j++) {
+               for (j = 0; j < nr_client_per_server; j++) {
                        fd  = socket(AF_INET, SOCK_STREAM, 0);
                        ASSERT_NE(fd, -1);
 
@@ -180,8 +212,8 @@ void verify_incoming_cpu(struct __test_metadata *_metadata,
        int i, j, fd, cpu, ret, total = 0;
        socklen_t len = sizeof(int);
 
-       for (i = 0; i < NR_SERVER; i++) {
-               for (j = 0; j < CLIENT_PER_SERVER; j++) {
+       for (i = 0; i < nr_server; i++) {
+               for (j = 0; j < nr_client_per_server; j++) {
                        /* If we see -EAGAIN here, SO_INCOMING_CPU is broken */
                        fd = accept(self->servers[i], &self->addr, &self->addrlen);
                        ASSERT_NE(fd, -1);
@@ -195,7 +227,7 @@ void verify_incoming_cpu(struct __test_metadata *_metadata,
                }
        }
 
-       ASSERT_EQ(total, NR_CLIENT);
+       ASSERT_EQ(total, nr_client);
        TH_LOG("SO_INCOMING_CPU is very likely to be "
               "working correctly with %d sockets.", total);
 }
index 8e60bae67aa9f22f59d2251a32e32e62cad4c84c..522d991e310ebf0277dd8f576c7f56e89d229700 100644 (file)
@@ -52,5 +52,5 @@ $(OUTPUT)/%_ipv6: %.c
 
 $(OUTPUT)/icmps-accept_ipv4: CFLAGS+= -DTEST_ICMPS_ACCEPT
 $(OUTPUT)/icmps-accept_ipv6: CFLAGS+= -DTEST_ICMPS_ACCEPT
-$(OUTPUT)/bench-lookups_ipv4: LDFLAGS+= -lm
-$(OUTPUT)/bench-lookups_ipv6: LDFLAGS+= -lm
+$(OUTPUT)/bench-lookups_ipv4: LDLIBS+= -lm
+$(OUTPUT)/bench-lookups_ipv6: LDLIBS+= -lm
diff --git a/tools/testing/selftests/net/tcp_ao/config b/tools/testing/selftests/net/tcp_ao/config
new file mode 100644 (file)
index 0000000..d3277a9
--- /dev/null
@@ -0,0 +1,10 @@
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_RMD160=y
+CONFIG_CRYPTO_SHA1=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6=y
+CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_NET_VRF=y
+CONFIG_TCP_AO=y
+CONFIG_TCP_MD5SIG=y
+CONFIG_VETH=m
index c48b4970ca17e07220813192fadbc553cdc89250..24e62120b7924d3a1555a7e42097f9e55338b4db 100644 (file)
@@ -417,9 +417,9 @@ struct test_key {
                matches_vrf             : 1,
                is_current              : 1,
                is_rnext                : 1,
-               used_on_handshake       : 1,
-               used_after_accept       : 1,
-               used_on_client          : 1;
+               used_on_server_tx       : 1,
+               used_on_client_tx       : 1,
+               skip_counters_checks    : 1;
 };
 
 struct key_collection {
@@ -609,16 +609,14 @@ static int key_collection_socket(bool server, unsigned int port)
                                addr = &this_ip_dest;
                        sndid = key->client_keyid;
                        rcvid = key->server_keyid;
-                       set_current = key->is_current;
-                       set_rnext = key->is_rnext;
+                       key->used_on_client_tx = set_current = key->is_current;
+                       key->used_on_server_tx = set_rnext = key->is_rnext;
                }
 
                if (test_add_key_cr(sk, key->password, key->len,
                                    *addr, vrf, sndid, rcvid, key->maclen,
                                    key->alg, set_current, set_rnext))
                        test_key_error("setsockopt(TCP_AO_ADD_KEY)", key);
-               if (set_current || set_rnext)
-                       key->used_on_handshake = 1;
 #ifdef DEBUG
                test_print("%s [%u/%u] key: { %s, %u:%u, %u, %u:%u:%u:%u (%u)}",
                           server ? "server" : "client", i, collection.nr_keys,
@@ -640,22 +638,22 @@ static void verify_counters(const char *tst_name, bool is_listen_sk, bool server
        for (i = 0; i < collection.nr_keys; i++) {
                struct test_key *key = &collection.keys[i];
                uint8_t sndid, rcvid;
-               bool was_used;
+               bool rx_cnt_expected;
 
+               if (key->skip_counters_checks)
+                       continue;
                if (server) {
                        sndid = key->server_keyid;
                        rcvid = key->client_keyid;
-                       if (is_listen_sk)
-                               was_used = key->used_on_handshake;
-                       else
-                               was_used = key->used_after_accept;
+                       rx_cnt_expected = key->used_on_client_tx;
                } else {
                        sndid = key->client_keyid;
                        rcvid = key->server_keyid;
-                       was_used = key->used_on_client;
+                       rx_cnt_expected = key->used_on_server_tx;
                }
 
-               test_tcp_ao_key_counters_cmp(tst_name, a, b, was_used,
+               test_tcp_ao_key_counters_cmp(tst_name, a, b,
+                                            rx_cnt_expected ? TEST_CNT_KEY_GOOD : 0,
                                             sndid, rcvid);
        }
        test_tcp_ao_counters_free(a);
@@ -843,7 +841,7 @@ static void end_server(const char *tst_name, int sk,
        synchronize_threads(); /* 4: verified => closed */
        close(sk);
 
-       verify_counters(tst_name, true, false, begin, &end);
+       verify_counters(tst_name, false, true, begin, &end);
        synchronize_threads(); /* 5: counters */
 }
 
@@ -916,9 +914,8 @@ static int run_client(const char *tst_name, unsigned int port,
                current_index = nr_keys - 1;
        if (rnext_index < 0)
                rnext_index = nr_keys - 1;
-       collection.keys[current_index].used_on_handshake = 1;
-       collection.keys[rnext_index].used_after_accept = 1;
-       collection.keys[rnext_index].used_on_client = 1;
+       collection.keys[current_index].used_on_client_tx = 1;
+       collection.keys[rnext_index].used_on_server_tx = 1;
 
        synchronize_threads(); /* 3: accepted => send data */
        if (test_client_verify(sk, msg_sz, msg_nr, TEST_TIMEOUT_SEC)) {
@@ -1059,7 +1056,16 @@ static void check_current_back(const char *tst_name, unsigned int port,
                test_error("Can't change the current key");
        if (test_client_verify(sk, msg_len, nr_packets, TEST_TIMEOUT_SEC))
                test_fail("verify failed");
-       collection.keys[rotate_to_index].used_after_accept = 1;
+       /* There is a race here: between setting the current_key with
+        * setsockopt(TCP_AO_INFO) and starting to send some data - there
+        * might have been a segment received with the desired
+        * RNext_key set. In turn that would mean that the first outgoing
+        * segment will have the desired current_key (flipped back).
+        * Which is what the user/test wants. As it's racy, skip checking
+        * the counters, yet check what are the resulting current/rnext
+        * keys on both sides.
+        */
+       collection.keys[rotate_to_index].skip_counters_checks = 1;
 
        end_client(tst_name, sk, nr_keys, current_index, rnext_index, &tmp);
 }
@@ -1089,7 +1095,7 @@ static void roll_over_keys(const char *tst_name, unsigned int port,
                }
                verify_current_rnext(tst_name, sk, -1,
                                     collection.keys[i].server_keyid);
-               collection.keys[i].used_on_client = 1;
+               collection.keys[i].used_on_server_tx = 1;
                synchronize_threads(); /* verify current/rnext */
        }
        end_client(tst_name, sk, nr_keys, current_index, rnext_index, &tmp);
index c75d82885a2e1aa40f463bbdc65999c05c6a063d..15aeb0963058fdf645451206b3015dd707aa0c13 100644 (file)
@@ -62,7 +62,9 @@ int test_wait_fd(int sk, time_t sec, bool write)
                return -ETIMEDOUT;
        }
 
-       if (getsockopt(sk, SOL_SOCKET, SO_ERROR, &ret, &slen) || ret)
+       if (getsockopt(sk, SOL_SOCKET, SO_ERROR, &ret, &slen))
+               return -errno;
+       if (ret)
                return -ret;
        return 0;
 }
@@ -584,9 +586,11 @@ int test_client_verify(int sk, const size_t msg_len, const size_t nr,
 {
        size_t buf_sz = msg_len * nr;
        char *buf = alloca(buf_sz);
+       ssize_t ret;
 
        randomize_buffer(buf, buf_sz);
-       if (test_client_loop(sk, buf, buf_sz, msg_len, timeout_sec) != buf_sz)
-               return -1;
-       return 0;
+       ret = test_client_loop(sk, buf, buf_sz, msg_len, timeout_sec);
+       if (ret < 0)
+               return (int)ret;
+       return ret != buf_sz ? -1 : 0;
 }
index ac06009a7f5f65ddf0095aa6d7044e98abf032cf..7df8b8700e39e96292f8eafdf105ee0314a65497 100644 (file)
@@ -1,10 +1,33 @@
 // SPDX-License-Identifier: GPL-2.0
-/* Author: Dmitry Safonov <dima@arista.com> */
+/*
+ * The test checks that both active and passive reset have correct TCP-AO
+ * signature. An "active" reset (abort) here is procured from closing
+ * listen() socket with non-accepted connections in the queue:
+ * inet_csk_listen_stop() => inet_child_forget() =>
+ *                        => tcp_disconnect() => tcp_send_active_reset()
+ *
+ * The passive reset is quite hard to get on established TCP connections.
+ * It could be procured from non-established states, but the synchronization
+ * part from userspace in order to reliably get RST seems uneasy.
+ * So, instead it's procured by corrupting SEQ number on TIMED-WAIT state.
+ *
+ * It's important to test both passive and active RST as they go through
+ * different code-paths:
+ * - tcp_send_active_reset() makes no-data skb, sends it with tcp_transmit_skb()
+ * - tcp_v*_send_reset() create their reply skbs and send them with
+ *   ip_send_unicast_reply()
+ *
+ * In both cases TCP-AO signatures have to be correct, which is verified by
+ * (1) checking that the TCP-AO connection was reset and (2) TCP-AO counters.
+ *
+ * Author: Dmitry Safonov <dima@arista.com>
+ */
 #include <inttypes.h>
 #include "../../../../include/linux/kernel.h"
 #include "aolib.h"
 
 const size_t quota = 1000;
+const size_t packet_sz = 100;
 /*
  * Backlog == 0 means 1 connection in queue, see:
  * commit 64a146513f8f ("[NET]: Revert incorrect accept queue...")
@@ -59,26 +82,6 @@ static void close_forced(int sk)
        close(sk);
 }
 
-static int test_wait_for_exception(int sk, time_t sec)
-{
-       struct timeval tv = { .tv_sec = sec };
-       struct timeval *ptv = NULL;
-       fd_set efds;
-       int ret;
-
-       FD_ZERO(&efds);
-       FD_SET(sk, &efds);
-
-       if (sec)
-               ptv = &tv;
-
-       errno = 0;
-       ret = select(sk + 1, NULL, NULL, &efds, ptv);
-       if (ret < 0)
-               return -errno;
-       return ret ? sk : 0;
-}
-
 static void test_server_active_rst(unsigned int port)
 {
        struct tcp_ao_counters cnt1, cnt2;
@@ -155,17 +158,16 @@ static void test_server_passive_rst(unsigned int port)
                        test_fail("server returned %zd", bytes);
        }
 
-       synchronize_threads(); /* 3: chekpoint/restore the connection */
+       synchronize_threads(); /* 3: checkpoint the client */
+       synchronize_threads(); /* 4: close the server, creating twsk */
        if (test_get_tcp_ao_counters(sk, &ao2))
                test_error("test_get_tcp_ao_counters()");
-
-       synchronize_threads(); /* 4: terminate server + send more on client */
-       bytes = test_server_run(sk, quota, TEST_RETRANSMIT_SEC);
        close(sk);
+
+       synchronize_threads(); /* 5: restore the socket, send more data */
        test_tcp_ao_counters_cmp("passive RST server", &ao1, &ao2, TEST_CNT_GOOD);
 
-       synchronize_threads(); /* 5: verified => closed */
-       close(sk);
+       synchronize_threads(); /* 6: server exits */
 }
 
 static void *server_fn(void *arg)
@@ -284,7 +286,7 @@ static void test_client_active_rst(unsigned int port)
                test_error("test_wait_fds(): %d", err);
 
        synchronize_threads(); /* 3: close listen socket */
-       if (test_client_verify(sk[0], 100, quota / 100, TEST_TIMEOUT_SEC))
+       if (test_client_verify(sk[0], packet_sz, quota / packet_sz, TEST_TIMEOUT_SEC))
                test_fail("Failed to send data on connected socket");
        else
                test_ok("Verified established tcp connection");
@@ -323,7 +325,6 @@ static void test_client_passive_rst(unsigned int port)
        struct tcp_sock_state img;
        sockaddr_af saddr;
        int sk, err;
-       socklen_t slen = sizeof(err);
 
        sk = socket(test_family, SOCK_STREAM, IPPROTO_TCP);
        if (sk < 0)
@@ -337,18 +338,51 @@ static void test_client_passive_rst(unsigned int port)
                test_error("failed to connect()");
 
        synchronize_threads(); /* 2: accepted => send data */
-       if (test_client_verify(sk, 100, quota / 100, TEST_TIMEOUT_SEC))
+       if (test_client_verify(sk, packet_sz, quota / packet_sz, TEST_TIMEOUT_SEC))
                test_fail("Failed to send data on connected socket");
        else
                test_ok("Verified established tcp connection");
 
-       synchronize_threads(); /* 3: chekpoint/restore the connection */
+       synchronize_threads(); /* 3: checkpoint the client */
        test_enable_repair(sk);
        test_sock_checkpoint(sk, &img, &saddr);
        test_ao_checkpoint(sk, &ao_img);
-       test_kill_sk(sk);
+       test_disable_repair(sk);
 
-       img.out.seq += quota;
+       synchronize_threads(); /* 4: close the server, creating twsk */
+
+       /*
+        * The "corruption" in SEQ has to be small enough to fit into TCP
+        * window, see tcp_timewait_state_process() for out-of-window
+        * segments.
+        */
+       img.out.seq += 5; /* 5 is more noticeable in tcpdump than 1 */
+
+       /*
+        * FIXME: This is kind-of ugly and dirty, but it works.
+        *
+        * At this moment, the server has close'ed(sk).
+        * The passive RST that is being targeted here is new data after
+        * half-duplex close, see tcp_timewait_state_process() => TCP_TW_RST
+        *
+        * What is needed here is:
+        * (1) wait for FIN from the server
+        * (2) make sure that the ACK from the client went out
+        * (3) make sure that the ACK was received and processed by the server
+        *
+        * Otherwise, the data that will be sent from "repaired" socket
+        * post SEQ corruption may get to the server before it's in
+        * TCP_FIN_WAIT2.
+        *
+        * (1) is easy with select()/poll()
+        * (2) is possible by polling tcpi_state from TCP_INFO
+        * (3) is quite complex: as server's socket was already closed,
+        *     probably the way to do it would be tcp-diag.
+        */
+       sleep(TEST_RETRANSMIT_SEC);
+
+       synchronize_threads(); /* 5: restore the socket, send more data */
+       test_kill_sk(sk);
 
        sk = socket(test_family, SOCK_STREAM, IPPROTO_TCP);
        if (sk < 0)
@@ -366,25 +400,33 @@ static void test_client_passive_rst(unsigned int port)
        test_disable_repair(sk);
        test_sock_state_free(&img);
 
-       synchronize_threads(); /* 4: terminate server + send more on client */
-       if (test_client_verify(sk, 100, quota / 100, 2 * TEST_TIMEOUT_SEC))
-               test_ok("client connection broken post-seq-adjust");
-       else
-               test_fail("client connection still works post-seq-adjust");
-
-       test_wait_for_exception(sk, TEST_TIMEOUT_SEC);
-
-       if (getsockopt(sk, SOL_SOCKET, SO_ERROR, &err, &slen))
-               test_error("getsockopt()");
-       if (err != ECONNRESET && err != EPIPE)
-               test_fail("client connection was not reset: %d", err);
+       /*
+        * This is how "passive reset" is acquired in this test from TCP_TW_RST:
+        *
+        * IP 10.0.254.1.7011 > 10.0.1.1.59772: Flags [P.], seq 901:1001, ack 1001, win 249,
+        *    options [tcp-ao keyid 100 rnextkeyid 100 mac 0x10217d6c36a22379086ef3b1], length 100
+        * IP 10.0.254.1.7011 > 10.0.1.1.59772: Flags [F.], seq 1001, ack 1001, win 249,
+        *    options [tcp-ao keyid 100 rnextkeyid 100 mac 0x104ffc99b98c10a5298cc268], length 0
+        * IP 10.0.1.1.59772 > 10.0.254.1.7011: Flags [.], ack 1002, win 251,
+        *    options [tcp-ao keyid 100 rnextkeyid 100 mac 0xe496dd4f7f5a8a66873c6f93,nop,nop,sack 1 {1001:1002}], length 0
+        * IP 10.0.1.1.59772 > 10.0.254.1.7011: Flags [P.], seq 1006:1106, ack 1001, win 251,
+        *    options [tcp-ao keyid 100 rnextkeyid 100 mac 0x1b5f3330fb23fbcd0c77d0ca], length 100
+        * IP 10.0.254.1.7011 > 10.0.1.1.59772: Flags [R], seq 3215596252, win 0,
+        *    options [tcp-ao keyid 100 rnextkeyid 100 mac 0x0bcfbbf497bce844312304b2], length 0
+        */
+       err = test_client_verify(sk, packet_sz, quota / packet_sz, 2 * TEST_TIMEOUT_SEC);
+       /* Make sure that the connection was reset, not timeouted */
+       if (err && err == -ECONNRESET)
+               test_ok("client sock was passively reset post-seq-adjust");
+       else if (err)
+               test_fail("client sock was not reset post-seq-adjust: %d", err);
        else
-               test_ok("client connection was reset");
+               test_fail("client sock is yet connected post-seq-adjust");
 
        if (test_get_tcp_ao_counters(sk, &ao2))
                test_error("test_get_tcp_ao_counters()");
 
-       synchronize_threads(); /* 5: verified => closed */
+       synchronize_threads(); /* 6: server exits */
        close(sk);
        test_tcp_ao_counters_cmp("client passive RST", &ao1, &ao2, TEST_CNT_GOOD);
 }
@@ -410,6 +452,6 @@ static void *client_fn(void *arg)
 
 int main(int argc, char *argv[])
 {
-       test_init(15, server_fn, client_fn);
+       test_init(14, server_fn, client_fn);
        return 0;
 }
diff --git a/tools/testing/selftests/net/tcp_ao/settings b/tools/testing/selftests/net/tcp_ao/settings
new file mode 100644 (file)
index 0000000..6091b45
--- /dev/null
@@ -0,0 +1 @@
+timeout=120
index c5b568cd7d901ce19d26cc0228dc7089581bb7f1..6b59a652159f7754417471c066a06bd4eb511a41 100644 (file)
@@ -110,9 +110,9 @@ static void try_accept(const char *tst_name, unsigned int port,
                test_tcp_ao_counters_cmp(tst_name, &ao_cnt1, &ao_cnt2, cnt_expected);
 
 out:
-       synchronize_threads(); /* close() */
+       synchronize_threads(); /* test_kill_sk() */
        if (sk > 0)
-               close(sk);
+               test_kill_sk(sk);
 }
 
 static void server_add_routes(void)
@@ -302,10 +302,10 @@ static void try_connect(const char *tst_name, unsigned int port,
                test_ok("%s: connected", tst_name);
 
 out:
-       synchronize_threads(); /* close() */
+       synchronize_threads(); /* test_kill_sk() */
        /* _test_connect_socket() cleans up on failure */
        if (ret > 0)
-               close(sk);
+               test_kill_sk(sk);
 }
 
 #define PREINSTALL_MD5_FIRST   BIT(0)
@@ -486,10 +486,10 @@ static void try_to_add(const char *tst_name, unsigned int port,
        }
 
 out:
-       synchronize_threads(); /* close() */
+       synchronize_threads(); /* test_kill_sk() */
        /* _test_connect_socket() cleans up on failure */
        if (ret > 0)
-               close(sk);
+               test_kill_sk(sk);
 }
 
 static void client_add_ip(union tcp_addr *client, const char *ip)
index 464853a7f98290ec3a7c6bf69786cf8c12e1f789..7799e042a9719cda33ea7d004d2ae4a2ec608a4f 100644 (file)
@@ -707,6 +707,20 @@ TEST_F(tls, splice_from_pipe)
        EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
 }
 
+TEST_F(tls, splice_more)
+{
+       unsigned int f = SPLICE_F_NONBLOCK | SPLICE_F_MORE | SPLICE_F_GIFT;
+       int send_len = TLS_PAYLOAD_MAX_LEN;
+       char mem_send[TLS_PAYLOAD_MAX_LEN];
+       int i, send_pipe = 1;
+       int p[2];
+
+       ASSERT_GE(pipe(p), 0);
+       EXPECT_GE(write(p[1], mem_send, send_len), 0);
+       for (i = 0; i < 32; i++)
+               EXPECT_EQ(splice(p[0], NULL, self->fd, NULL, send_pipe, f), 1);
+}
+
 TEST_F(tls, splice_from_pipe2)
 {
        int send_len = 16000;
index af5dc57c8ce935907fd93279077c0d326205415e..8802604148dda1c2565fdb0d5b0aaabb0cad1427 100755 (executable)
@@ -7,7 +7,7 @@ source net_helper.sh
 
 readonly PEER_NS="ns-peer-$(mktemp -u XXXXXX)"
 
-BPF_FILE="../bpf/xdp_dummy.bpf.o"
+BPF_FILE="xdp_dummy.o"
 
 # set global exit status, but never reset nonzero one.
 check_err()
@@ -197,7 +197,7 @@ run_all() {
 }
 
 if [ ! -f ${BPF_FILE} ]; then
-       echo "Missing ${BPF_FILE}. Build bpf selftest first"
+       echo "Missing ${BPF_FILE}. Run 'make' first"
        exit -1
 fi
 
index cb664679b4342992a16694a182c7d0b3a7e9d80b..7080eae5312b2f9fa13c41868337fd4433fb0de6 100755 (executable)
@@ -7,7 +7,7 @@ source net_helper.sh
 
 readonly PEER_NS="ns-peer-$(mktemp -u XXXXXX)"
 
-BPF_FILE="../bpf/xdp_dummy.bpf.o"
+BPF_FILE="xdp_dummy.o"
 
 cleanup() {
        local -r jobs="$(jobs -p)"
@@ -84,7 +84,7 @@ run_all() {
 }
 
 if [ ! -f ${BPF_FILE} ]; then
-       echo "Missing ${BPF_FILE}. Build bpf selftest first"
+       echo "Missing ${BPF_FILE}. Run 'make' first"
        exit -1
 fi
 
index dd47fa96f6b3e5ea1cf1f750a4fd55d7a0c4592b..e1ff645bd3d1c7b0b8ba177ee73ce595a91f3808 100755 (executable)
@@ -7,7 +7,7 @@ source net_helper.sh
 
 readonly PEER_NS="ns-peer-$(mktemp -u XXXXXX)"
 
-BPF_FILE="../bpf/xdp_dummy.bpf.o"
+BPF_FILE="xdp_dummy.o"
 
 cleanup() {
        local -r jobs="$(jobs -p)"
@@ -85,12 +85,12 @@ run_all() {
 }
 
 if [ ! -f ${BPF_FILE} ]; then
-       echo "Missing ${BPF_FILE}. Build bpf selftest first"
+       echo "Missing ${BPF_FILE}. Run 'make' first"
        exit -1
 fi
 
 if [ ! -f nat6to4.o ]; then
-       echo "Missing nat6to4 helper. Build bpf nat6to4.o selftest first"
+       echo "Missing nat6to4 helper. Run 'make' first"
        exit -1
 fi
 
index c079565add39224eb99e011f941b6f0a11c1648c..9cd5e885e91f74b01007cf14bbdb9808aa04c632 100755 (executable)
@@ -1,7 +1,9 @@
 #!/bin/bash
 # SPDX-License-Identifier: GPL-2.0
 
-BPF_FILE="../bpf/xdp_dummy.bpf.o"
+source net_helper.sh
+
+BPF_FILE="xdp_dummy.o"
 readonly BASE="ns-$(mktemp -u XXXXXX)"
 readonly SRC=2
 readonly DST=1
@@ -37,6 +39,10 @@ create_ns() {
        for ns in $NS_SRC $NS_DST; do
                ip netns add $ns
                ip -n $ns link set dev lo up
+
+               # disable route solicitations to decrease 'noise' traffic
+               ip netns exec $ns sysctl -qw net.ipv6.conf.default.router_solicitations=0
+               ip netns exec $ns sysctl -qw net.ipv6.conf.all.router_solicitations=0
        done
 
        ip link add name veth$SRC type veth peer name veth$DST
@@ -78,6 +84,12 @@ create_vxlan_pair() {
                create_vxlan_endpoint $BASE$ns veth$ns $BM_NET_V6$((3 - $ns)) vxlan6$ns 6
                ip -n $BASE$ns addr add dev vxlan6$ns $OL_NET_V6$ns/24 nodad
        done
+
+       # preload neighbur cache, do avoid some noisy traffic
+       local addr_dst=$(ip -j -n $BASE$DST link show dev vxlan6$DST  |jq -r '.[]["address"]')
+       local addr_src=$(ip -j -n $BASE$SRC link show dev vxlan6$SRC  |jq -r '.[]["address"]')
+       ip -n $BASE$DST neigh add dev vxlan6$DST lladdr $addr_src $OL_NET_V6$SRC
+       ip -n $BASE$SRC neigh add dev vxlan6$SRC lladdr $addr_dst $OL_NET_V6$DST
 }
 
 is_ipv6() {
@@ -117,9 +129,9 @@ run_test() {
        # not enable GRO
        ip netns exec $NS_DST $ipt -A INPUT -p udp --dport 4789
        ip netns exec $NS_DST $ipt -A INPUT -p udp --dport 8000
-       ip netns exec $NS_DST ./udpgso_bench_rx -C 1000 -R 10 -n 10 -l 1300 $rx_args &
+       ip netns exec $NS_DST ./udpgso_bench_rx -C 2000 -R 100 -n 10 -l 1300 $rx_args &
        local spid=$!
-       sleep 0.1
+       wait_local_port_listen "$NS_DST" 8000 udp
        ip netns exec $NS_SRC ./udpgso_bench_tx $family -M 1 -s 13000 -S 1300 -D $dst
        local retc=$?
        wait $spid
@@ -166,9 +178,9 @@ run_bench() {
        # bind the sender and the receiver to different CPUs to try
        # get reproducible results
        ip netns exec $NS_DST bash -c "echo 2 > /sys/class/net/veth$DST/queues/rx-0/rps_cpus"
-       ip netns exec $NS_DST taskset 0x2 ./udpgso_bench_rx -C 1000 -R 10  &
+       ip netns exec $NS_DST taskset 0x2 ./udpgso_bench_rx -C 2000 -R 100  &
        local spid=$!
-       sleep 0.1
+       wait_local_port_listen "$NS_DST" 8000 udp
        ip netns exec $NS_SRC taskset 0x1 ./udpgso_bench_tx $family -l 3 -S 1300 -D $dst
        local retc=$?
        wait $spid
index f35a924d4a3030780447f2cc137f6ff373ed693c..1cbadd267c963c0c067308d3fb16493625e8f1b7 100644 (file)
@@ -375,7 +375,7 @@ static void do_recv(void)
                        do_flush_udp(fd);
 
                tnow = gettimeofday_ms();
-               if (tnow > treport) {
+               if (!cfg_expected_pkt_nr && tnow > treport) {
                        if (packets)
                                fprintf(stderr,
                                        "%s rx: %6lu MB/s %8lu calls/s\n",
index 2d073595c620210254bc372bc428b05121e9b26b..27574bbf2d6386f770673b82684edf07b586c79a 100755 (executable)
@@ -1,7 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 
-BPF_FILE="../bpf/xdp_dummy.bpf.o"
+BPF_FILE="xdp_dummy.o"
 readonly STATS="$(mktemp -p /tmp ns-XXXXXX)"
 readonly BASE=`basename $STATS`
 readonly SRC=2
@@ -218,7 +218,7 @@ while getopts "hs:" option; do
 done
 
 if [ ! -f ${BPF_FILE} ]; then
-       echo "Missing ${BPF_FILE}. Build bpf selftest first"
+       echo "Missing ${BPF_FILE}. Run 'make' first"
        exit 1
 fi
 
diff --git a/tools/testing/selftests/net/xdp_dummy.c b/tools/testing/selftests/net/xdp_dummy.c
new file mode 100644 (file)
index 0000000..d988b2e
--- /dev/null
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define KBUILD_MODNAME "xdp_dummy"
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+SEC("xdp")
+int xdp_dummy_prog(struct xdp_md *ctx)
+{
+       return XDP_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
index f18c6db13bbff402202f6bd4796b4581803e2d73..b11ea8ee67194604de4a7dcbda7539dbffe7b7a1 100644 (file)
@@ -13,7 +13,7 @@
 #include "../kselftest_harness.h"
 
 #define TEST_ZONE_ID 123
-#define CTA_FILTER_F_CTA_TUPLE_ZONE (1 << 2)
+#define NF_CT_DEFAULT_ZONE_ID 0
 
 static int reply_counter;
 
@@ -336,6 +336,9 @@ FIXTURE_SETUP(conntrack_dump_flush)
        ret = conntrack_data_generate_v4(self->sock, 0xf4f4f4f4, 0xf5f5f5f5,
                                         TEST_ZONE_ID + 2);
        EXPECT_EQ(ret, 0);
+       ret = conntrack_data_generate_v4(self->sock, 0xf6f6f6f6, 0xf7f7f7f7,
+                                        NF_CT_DEFAULT_ZONE_ID);
+       EXPECT_EQ(ret, 0);
 
        src = (struct in6_addr) {{
                .__u6_addr32 = {
@@ -395,6 +398,26 @@ FIXTURE_SETUP(conntrack_dump_flush)
                                         TEST_ZONE_ID + 2);
        EXPECT_EQ(ret, 0);
 
+       src = (struct in6_addr) {{
+               .__u6_addr32 = {
+                       0xb80d0120,
+                       0x00000000,
+                       0x00000000,
+                       0x07000000
+               }
+       }};
+       dst = (struct in6_addr) {{
+               .__u6_addr32 = {
+                       0xb80d0120,
+                       0x00000000,
+                       0x00000000,
+                       0x08000000
+               }
+       }};
+       ret = conntrack_data_generate_v6(self->sock, src, dst,
+                                        NF_CT_DEFAULT_ZONE_ID);
+       EXPECT_EQ(ret, 0);
+
        ret = conntracK_count_zone(self->sock, TEST_ZONE_ID);
        EXPECT_GE(ret, 2);
        if (ret > 2)
@@ -425,6 +448,24 @@ TEST_F(conntrack_dump_flush, test_flush_by_zone)
        EXPECT_EQ(ret, 2);
        ret = conntracK_count_zone(self->sock, TEST_ZONE_ID + 2);
        EXPECT_EQ(ret, 2);
+       ret = conntracK_count_zone(self->sock, NF_CT_DEFAULT_ZONE_ID);
+       EXPECT_EQ(ret, 2);
+}
+
+TEST_F(conntrack_dump_flush, test_flush_by_zone_default)
+{
+       int ret;
+
+       ret = conntrack_flush_zone(self->sock, NF_CT_DEFAULT_ZONE_ID);
+       EXPECT_EQ(ret, 0);
+       ret = conntracK_count_zone(self->sock, TEST_ZONE_ID);
+       EXPECT_EQ(ret, 2);
+       ret = conntracK_count_zone(self->sock, TEST_ZONE_ID + 1);
+       EXPECT_EQ(ret, 2);
+       ret = conntracK_count_zone(self->sock, TEST_ZONE_ID + 2);
+       EXPECT_EQ(ret, 2);
+       ret = conntracK_count_zone(self->sock, NF_CT_DEFAULT_ZONE_ID);
+       EXPECT_EQ(ret, 0);
 }
 
 TEST_HARNESS_MAIN
index 212c52ca90b5526649ec7c556d2acd667ef687ad..f3f86712956024d23af1ba81bfbb1803e0cb0c66 100755 (executable)
@@ -67,7 +67,10 @@ ___EOF___
 # build using nolibc on supported archs (smaller executable) and fall
 # back to regular glibc on other ones.
 if echo -e "#if __x86_64__||__i386__||__i486__||__i586__||__i686__" \
-          "||__ARM_EABI__||__aarch64__||__s390x__||__loongarch__\nyes\n#endif" \
+          "||__ARM_EABI__||__aarch64__||(__mips__ && _ABIO32)" \
+          "||__powerpc__||(__riscv && __riscv_xlen == 64)" \
+          "||__s390x__||__loongarch__" \
+          "\nyes\n#endif" \
    | ${CROSS_COMPILE}gcc -E -nostdlib -xc - \
    | grep -q '^yes'; then
        # architecture supported by nolibc
index d446099375038cc231ef885cbe3b9464737ce3db..979edbf4c8205a5bb7a64e3837dac13266df17fa 100644 (file)
@@ -1 +1,4 @@
 nohz_full=2-9
+rcutorture.stall_cpu=14
+rcutorture.stall_cpu_holdoff=90
+rcutorture.fwd_progress=0
index f224b84591fbfca4bc05f997fd5137928680da0c..cec81610a5f27bd31086024565ade68e2b9a570a 100644 (file)
@@ -4,7 +4,7 @@
 
 CFLAGS += -I$(top_srcdir)/tools/include
 
-TEST_GEN_PROGS := hwprobe cbo
+TEST_GEN_PROGS := hwprobe cbo which-cpus
 
 include ../../lib.mk
 
@@ -13,3 +13,6 @@ $(OUTPUT)/hwprobe: hwprobe.c sys_hwprobe.S
 
 $(OUTPUT)/cbo: cbo.c sys_hwprobe.S
        $(CC) -static -o$@ $(CFLAGS) $(LDFLAGS) $^
+
+$(OUTPUT)/which-cpus: which-cpus.c sys_hwprobe.S
+       $(CC) -static -o$@ $(CFLAGS) $(LDFLAGS) $^
index 50a2cc8aef387cacc2444d5fa7264c8953e5d0b8..c537d52fafc586d5644ca6f7ec13a4358b4051c0 100644 (file)
@@ -36,16 +36,14 @@ static void sigill_handler(int sig, siginfo_t *info, void *context)
        regs[0] += 4;
 }
 
-static void cbo_insn(char *base, int fn)
-{
-       uint32_t insn = MK_CBO(fn);
-
-       asm volatile(
-       "mv     a0, %0\n"
-       "li     a1, %1\n"
-       ".4byte %2\n"
-       : : "r" (base), "i" (fn), "i" (insn) : "a0", "a1", "memory");
-}
+#define cbo_insn(base, fn)                                                     \
+({                                                                             \
+       asm volatile(                                                           \
+       "mv     a0, %0\n"                                                       \
+       "li     a1, %1\n"                                                       \
+       ".4byte %2\n"                                                           \
+       : : "r" (base), "i" (fn), "i" (MK_CBO(fn)) : "a0", "a1", "memory");     \
+})
 
 static void cbo_inval(char *base) { cbo_insn(base, 0); }
 static void cbo_clean(char *base) { cbo_insn(base, 1); }
@@ -97,7 +95,7 @@ static void test_zicboz(void *arg)
        block_size = pair.value;
        ksft_test_result(rc == 0 && pair.key == RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE &&
                         is_power_of_2(block_size), "Zicboz block size\n");
-       ksft_print_msg("Zicboz block size: %ld\n", block_size);
+       ksft_print_msg("Zicboz block size: %llu\n", block_size);
 
        illegal_insn = false;
        cbo_zero(&mem[block_size]);
@@ -121,7 +119,7 @@ static void test_zicboz(void *arg)
                for (j = 0; j < block_size; ++j) {
                        if (mem[i * block_size + j] != expected) {
                                ksft_test_result_fail("cbo.zero check\n");
-                               ksft_print_msg("cbo.zero check: mem[%d] != 0x%x\n",
+                               ksft_print_msg("cbo.zero check: mem[%llu] != 0x%x\n",
                                               i * block_size + j, expected);
                                return;
                        }
@@ -201,7 +199,7 @@ int main(int argc, char **argv)
        pair.key = RISCV_HWPROBE_KEY_IMA_EXT_0;
        rc = riscv_hwprobe(&pair, 1, sizeof(cpu_set_t), (unsigned long *)&cpus, 0);
        if (rc < 0)
-               ksft_exit_fail_msg("hwprobe() failed with %d\n", rc);
+               ksft_exit_fail_msg("hwprobe() failed with %ld\n", rc);
        assert(rc == 0 && pair.key == RISCV_HWPROBE_KEY_IMA_EXT_0);
 
        if (pair.value & RISCV_HWPROBE_EXT_ZICBOZ) {
index c474891df307140fdd21a2a89b4d495790b47181..fd73c87804f348ff9a80a2b7f13d67bbd16903da 100644 (file)
@@ -29,7 +29,7 @@ int main(int argc, char **argv)
                /* Fail if the kernel claims not to recognize a base key. */
                if ((i < 4) && (pairs[i].key != i))
                        ksft_exit_fail_msg("Failed to recognize base key: key != i, "
-                                          "key=%ld, i=%ld\n", pairs[i].key, i);
+                                          "key=%lld, i=%ld\n", pairs[i].key, i);
 
                if (pairs[i].key != RISCV_HWPROBE_KEY_BASE_BEHAVIOR)
                        continue;
@@ -37,7 +37,7 @@ int main(int argc, char **argv)
                if (pairs[i].value & RISCV_HWPROBE_BASE_BEHAVIOR_IMA)
                        continue;
 
-               ksft_exit_fail_msg("Unexpected pair: (%ld, %ld)\n", pairs[i].key, pairs[i].value);
+               ksft_exit_fail_msg("Unexpected pair: (%lld, %llu)\n", pairs[i].key, pairs[i].value);
        }
 
        out = riscv_hwprobe(pairs, 8, 0, 0, 0);
@@ -47,7 +47,7 @@ int main(int argc, char **argv)
        ksft_test_result(out != 0, "Bad CPU set\n");
 
        out = riscv_hwprobe(pairs, 8, 1, 0, 0);
-       ksft_test_result(out != 0, "NULL CPU set with non-zero count\n");
+       ksft_test_result(out != 0, "NULL CPU set with non-zero size\n");
 
        pairs[0].key = RISCV_HWPROBE_KEY_BASE_BEHAVIOR;
        out = riscv_hwprobe(pairs, 1, 1, &cpus, 0);
index 721b0ce73a56907d1759a666ca4a43cc0a2aa957..e3fccb390c4dc94d0c224223192f767606b4da17 100644 (file)
@@ -10,6 +10,6 @@
  * contain the call.
  */
 long riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
-                  size_t cpu_count, unsigned long *cpus, unsigned int flags);
+                  size_t cpusetsize, unsigned long *cpus, unsigned int flags);
 
 #endif
diff --git a/tools/testing/selftests/riscv/hwprobe/which-cpus.c b/tools/testing/selftests/riscv/hwprobe/which-cpus.c
new file mode 100644 (file)
index 0000000..82c1214
--- /dev/null
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023 Ventana Micro Systems Inc.
+ *
+ * Test the RISCV_HWPROBE_WHICH_CPUS flag of hwprobe. Also provides a command
+ * line interface to get the cpu list for arbitrary hwprobe pairs.
+ */
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sched.h>
+#include <unistd.h>
+#include <assert.h>
+
+#include "hwprobe.h"
+#include "../../kselftest.h"
+
+static void help(void)
+{
+       printf("\n"
+              "which-cpus: [-h] [<key=value> [<key=value> ...]]\n\n"
+              "   Without parameters, tests the RISCV_HWPROBE_WHICH_CPUS flag of hwprobe.\n"
+              "   With parameters, where each parameter is a hwprobe pair written as\n"
+              "   <key=value>, outputs the cpulist for cpus which all match the given set\n"
+              "   of pairs.  'key' and 'value' should be in numeric form, e.g. 4=0x3b\n");
+}
+
+static void print_cpulist(cpu_set_t *cpus)
+{
+       int start = 0, end = 0;
+
+       if (!CPU_COUNT(cpus)) {
+               printf("cpus: None\n");
+               return;
+       }
+
+       printf("cpus:");
+       for (int i = 0, c = 0; i < CPU_COUNT(cpus); i++, c++) {
+               if (start != end && !CPU_ISSET(c, cpus))
+                       printf("-%d", end);
+
+               while (!CPU_ISSET(c, cpus))
+                       ++c;
+
+               if (i != 0 && c == end + 1) {
+                       end = c;
+                       continue;
+               }
+
+               printf("%c%d", i == 0 ? ' ' : ',', c);
+               start = end = c;
+       }
+       if (start != end)
+               printf("-%d", end);
+       printf("\n");
+}
+
+static void do_which_cpus(int argc, char **argv, cpu_set_t *cpus)
+{
+       struct riscv_hwprobe *pairs;
+       int nr_pairs = argc - 1;
+       char *start, *end;
+       int rc;
+
+       pairs = malloc(nr_pairs * sizeof(struct riscv_hwprobe));
+       assert(pairs);
+
+       for (int i = 0; i < nr_pairs; i++) {
+               start = argv[i + 1];
+               pairs[i].key = strtol(start, &end, 0);
+               assert(end != start && *end == '=');
+               start = end + 1;
+               pairs[i].value = strtoul(start, &end, 0);
+               assert(end != start && *end == '\0');
+       }
+
+       rc = riscv_hwprobe(pairs, nr_pairs, sizeof(cpu_set_t), (unsigned long *)cpus, RISCV_HWPROBE_WHICH_CPUS);
+       assert(rc == 0);
+       print_cpulist(cpus);
+       free(pairs);
+}
+
+int main(int argc, char **argv)
+{
+       struct riscv_hwprobe pairs[2];
+       cpu_set_t cpus_aff, cpus;
+       __u64 ext0_all;
+       long rc;
+
+       rc = sched_getaffinity(0, sizeof(cpu_set_t), &cpus_aff);
+       assert(rc == 0);
+
+       if (argc > 1) {
+               if (!strcmp(argv[1], "-h"))
+                       help();
+               else
+                       do_which_cpus(argc, argv, &cpus_aff);
+               return 0;
+       }
+
+       ksft_print_header();
+       ksft_set_plan(7);
+
+       pairs[0] = (struct riscv_hwprobe){ .key = RISCV_HWPROBE_KEY_BASE_BEHAVIOR, };
+       rc = riscv_hwprobe(pairs, 1, 0, NULL, 0);
+       assert(rc == 0 && pairs[0].key == RISCV_HWPROBE_KEY_BASE_BEHAVIOR &&
+              pairs[0].value == RISCV_HWPROBE_BASE_BEHAVIOR_IMA);
+
+       pairs[0] = (struct riscv_hwprobe){ .key = RISCV_HWPROBE_KEY_IMA_EXT_0, };
+       rc = riscv_hwprobe(pairs, 1, 0, NULL, 0);
+       assert(rc == 0 && pairs[0].key == RISCV_HWPROBE_KEY_IMA_EXT_0);
+       ext0_all = pairs[0].value;
+
+       pairs[0] = (struct riscv_hwprobe){ .key = RISCV_HWPROBE_KEY_BASE_BEHAVIOR, .value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA, };
+       CPU_ZERO(&cpus);
+       rc = riscv_hwprobe(pairs, 1, 0, (unsigned long *)&cpus, RISCV_HWPROBE_WHICH_CPUS);
+       ksft_test_result(rc == -EINVAL, "no cpusetsize\n");
+
+       pairs[0] = (struct riscv_hwprobe){ .key = RISCV_HWPROBE_KEY_BASE_BEHAVIOR, .value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA, };
+       rc = riscv_hwprobe(pairs, 1, sizeof(cpu_set_t), NULL, RISCV_HWPROBE_WHICH_CPUS);
+       ksft_test_result(rc == -EINVAL, "NULL cpus\n");
+
+       pairs[0] = (struct riscv_hwprobe){ .key = 0xbadc0de, };
+       CPU_ZERO(&cpus);
+       rc = riscv_hwprobe(pairs, 1, sizeof(cpu_set_t), (unsigned long *)&cpus, RISCV_HWPROBE_WHICH_CPUS);
+       ksft_test_result(rc == 0 && CPU_COUNT(&cpus) == 0, "unknown key\n");
+
+       pairs[0] = (struct riscv_hwprobe){ .key = RISCV_HWPROBE_KEY_BASE_BEHAVIOR, .value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA, };
+       pairs[1] = (struct riscv_hwprobe){ .key = RISCV_HWPROBE_KEY_BASE_BEHAVIOR, .value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA, };
+       CPU_ZERO(&cpus);
+       rc = riscv_hwprobe(pairs, 2, sizeof(cpu_set_t), (unsigned long *)&cpus, RISCV_HWPROBE_WHICH_CPUS);
+       ksft_test_result(rc == 0, "duplicate keys\n");
+
+       pairs[0] = (struct riscv_hwprobe){ .key = RISCV_HWPROBE_KEY_BASE_BEHAVIOR, .value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA, };
+       pairs[1] = (struct riscv_hwprobe){ .key = RISCV_HWPROBE_KEY_IMA_EXT_0, .value = ext0_all, };
+       CPU_ZERO(&cpus);
+       rc = riscv_hwprobe(pairs, 2, sizeof(cpu_set_t), (unsigned long *)&cpus, RISCV_HWPROBE_WHICH_CPUS);
+       ksft_test_result(rc == 0 && CPU_COUNT(&cpus) == sysconf(_SC_NPROCESSORS_ONLN), "set all cpus\n");
+
+       pairs[0] = (struct riscv_hwprobe){ .key = RISCV_HWPROBE_KEY_BASE_BEHAVIOR, .value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA, };
+       pairs[1] = (struct riscv_hwprobe){ .key = RISCV_HWPROBE_KEY_IMA_EXT_0, .value = ext0_all, };
+       memcpy(&cpus, &cpus_aff, sizeof(cpu_set_t));
+       rc = riscv_hwprobe(pairs, 2, sizeof(cpu_set_t), (unsigned long *)&cpus, RISCV_HWPROBE_WHICH_CPUS);
+       ksft_test_result(rc == 0 && CPU_EQUAL(&cpus, &cpus_aff), "set all affinity cpus\n");
+
+       pairs[0] = (struct riscv_hwprobe){ .key = RISCV_HWPROBE_KEY_BASE_BEHAVIOR, .value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA, };
+       pairs[1] = (struct riscv_hwprobe){ .key = RISCV_HWPROBE_KEY_IMA_EXT_0, .value = ~ext0_all, };
+       memcpy(&cpus, &cpus_aff, sizeof(cpu_set_t));
+       rc = riscv_hwprobe(pairs, 2, sizeof(cpu_set_t), (unsigned long *)&cpus, RISCV_HWPROBE_WHICH_CPUS);
+       ksft_test_result(rc == 0 && CPU_COUNT(&cpus) == 0, "clear all cpus\n");
+
+       ksft_finished();
+}
index 9b8434f62f570d472871641f8ec0c351fc48b3fb..2e0db9c5be6c334f9ed7d0187fae6ed6de950745 100644 (file)
@@ -18,6 +18,8 @@ struct addresses {
        int *on_56_addr;
 };
 
+// Only works on 64 bit
+#if __riscv_xlen == 64
 static inline void do_mmaps(struct addresses *mmap_addresses)
 {
        /*
@@ -50,6 +52,7 @@ static inline void do_mmaps(struct addresses *mmap_addresses)
        mmap_addresses->on_56_addr =
                mmap(on_56_bits, 5 * sizeof(int), prot, flags, 0, 0);
 }
+#endif /* __riscv_xlen == 64 */
 
 static inline int memory_layout(void)
 {
index 66764edb0d5268e8e2aabcb2c47aa3bfdc84033e..1dd94197da30cc5d17c3aa731e6a50b48d3569f4 100644 (file)
@@ -27,7 +27,7 @@ int main(void)
 
        datap = malloc(MAX_VSIZE);
        if (!datap) {
-               ksft_test_result_fail("fail to allocate memory for size = %lu\n", MAX_VSIZE);
+               ksft_test_result_fail("fail to allocate memory for size = %d\n", MAX_VSIZE);
                exit(-1);
        }
 
index 2c0d2b1126c1e31db76fbd722bdf311bfbaafa9d..1f9969bed2355befb50355e23625d9af8a5a5256 100644 (file)
@@ -1,4 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/wait.h>
+
 #define THIS_PROGRAM "./vstate_exec_nolibc"
 
 int main(int argc, char **argv)
index b348b475be570cdd14d9c7de13c35ece1dc3901c..27668fb3b6d08209b8c6a98dec01d6935941b47e 100644 (file)
@@ -1,20 +1,12 @@
 // SPDX-License-Identifier: GPL-2.0-only
 #include <sys/prctl.h>
 #include <unistd.h>
-#include <asm/hwprobe.h>
 #include <errno.h>
 #include <sys/wait.h>
 
+#include "../hwprobe/hwprobe.h"
 #include "../../kselftest.h"
 
-/*
- * Rather than relying on having a new enough libc to define this, just do it
- * ourselves.  This way we don't need to be coupled to a new-enough libc to
- * contain the call.
- */
-long riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
-                  size_t cpu_count, unsigned long *cpus, unsigned int flags);
-
 #define NEXT_PROGRAM "./vstate_exec_nolibc"
 static int launch_test(int test_inherit)
 {
@@ -68,7 +60,7 @@ int test_and_compare_child(long provided, long expected, int inherit)
        }
        rc = launch_test(inherit);
        if (rc != expected) {
-               ksft_test_result_fail("Test failed, check %d != %d\n", rc,
+               ksft_test_result_fail("Test failed, check %d != %ld\n", rc,
                                      expected);
                return -2;
        }
@@ -87,7 +79,7 @@ int main(void)
        pair.key = RISCV_HWPROBE_KEY_IMA_EXT_0;
        rc = riscv_hwprobe(&pair, 1, 0, NULL, 0);
        if (rc < 0) {
-               ksft_test_result_fail("hwprobe() failed with %d\n", rc);
+               ksft_test_result_fail("hwprobe() failed with %ld\n", rc);
                return -1;
        }
 
index 88754296196870a5d0ef3afb52373c8d40cbc598..2348d2c20d0a1aaf3a05a1c7005983f442708b3c 100644 (file)
@@ -24,6 +24,11 @@ bool rseq_validate_cpu_id(void)
 {
        return rseq_mm_cid_available();
 }
+static
+bool rseq_use_cpu_index(void)
+{
+       return false;   /* Use mm_cid */
+}
 #else
 # define RSEQ_PERCPU   RSEQ_PERCPU_CPU_ID
 static
@@ -36,6 +41,11 @@ bool rseq_validate_cpu_id(void)
 {
        return rseq_current_cpu_raw() >= 0;
 }
+static
+bool rseq_use_cpu_index(void)
+{
+       return true;    /* Use cpu_id as index. */
+}
 #endif
 
 struct percpu_lock_entry {
@@ -274,7 +284,7 @@ void test_percpu_list(void)
        /* Generate list entries for every usable cpu. */
        sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
        for (i = 0; i < CPU_SETSIZE; i++) {
-               if (!CPU_ISSET(i, &allowed_cpus))
+               if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
                        continue;
                for (j = 1; j <= 100; j++) {
                        struct percpu_list_node *node;
@@ -299,7 +309,7 @@ void test_percpu_list(void)
        for (i = 0; i < CPU_SETSIZE; i++) {
                struct percpu_list_node *node;
 
-               if (!CPU_ISSET(i, &allowed_cpus))
+               if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
                        continue;
 
                while ((node = __percpu_list_pop(&list, i))) {
index 20403d58345cd523186b9423750ea7ad669cdd96..2f37961240caa7cc43f142fac32fd7f9c9c211d4 100644 (file)
@@ -288,6 +288,11 @@ bool rseq_validate_cpu_id(void)
 {
        return rseq_mm_cid_available();
 }
+static
+bool rseq_use_cpu_index(void)
+{
+       return false;   /* Use mm_cid */
+}
 # ifdef TEST_MEMBARRIER
 /*
  * Membarrier does not currently support targeting a mm_cid, so
@@ -312,6 +317,11 @@ bool rseq_validate_cpu_id(void)
 {
        return rseq_current_cpu_raw() >= 0;
 }
+static
+bool rseq_use_cpu_index(void)
+{
+       return true;    /* Use cpu_id as index. */
+}
 # ifdef TEST_MEMBARRIER
 static
 int rseq_membarrier_expedited(int cpu)
@@ -715,7 +725,7 @@ void test_percpu_list(void)
        /* Generate list entries for every usable cpu. */
        sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
        for (i = 0; i < CPU_SETSIZE; i++) {
-               if (!CPU_ISSET(i, &allowed_cpus))
+               if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
                        continue;
                for (j = 1; j <= 100; j++) {
                        struct percpu_list_node *node;
@@ -752,7 +762,7 @@ void test_percpu_list(void)
        for (i = 0; i < CPU_SETSIZE; i++) {
                struct percpu_list_node *node;
 
-               if (!CPU_ISSET(i, &allowed_cpus))
+               if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
                        continue;
 
                while ((node = __percpu_list_pop(&list, i))) {
@@ -902,7 +912,7 @@ void test_percpu_buffer(void)
        /* Generate list entries for every usable cpu. */
        sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
        for (i = 0; i < CPU_SETSIZE; i++) {
-               if (!CPU_ISSET(i, &allowed_cpus))
+               if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
                        continue;
                /* Worse-case is every item in same CPU. */
                buffer.c[i].array =
@@ -952,7 +962,7 @@ void test_percpu_buffer(void)
        for (i = 0; i < CPU_SETSIZE; i++) {
                struct percpu_buffer_node *node;
 
-               if (!CPU_ISSET(i, &allowed_cpus))
+               if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
                        continue;
 
                while ((node = __percpu_buffer_pop(&buffer, i))) {
@@ -1113,7 +1123,7 @@ void test_percpu_memcpy_buffer(void)
        /* Generate list entries for every usable cpu. */
        sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
        for (i = 0; i < CPU_SETSIZE; i++) {
-               if (!CPU_ISSET(i, &allowed_cpus))
+               if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
                        continue;
                /* Worse-case is every item in same CPU. */
                buffer.c[i].array =
@@ -1160,7 +1170,7 @@ void test_percpu_memcpy_buffer(void)
        for (i = 0; i < CPU_SETSIZE; i++) {
                struct percpu_memcpy_buffer_node item;
 
-               if (!CPU_ISSET(i, &allowed_cpus))
+               if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
                        continue;
 
                while (__percpu_memcpy_buffer_pop(&buffer, &item, i)) {
index 5b5c9d558dee07bc1f7afd7df280e1189858451e..97b86980b768f4fa09da58f16d71ba42f42d2c8d 100644 (file)
@@ -38,10 +38,10 @@ unsigned long long timing(clockid_t clk_id, unsigned long long samples)
        i *= 1000000000ULL;
        i += finish.tv_nsec - start.tv_nsec;
 
-       printf("%lu.%09lu - %lu.%09lu = %llu (%.1fs)\n",
-               finish.tv_sec, finish.tv_nsec,
-               start.tv_sec, start.tv_nsec,
-               i, (double)i / 1000000000.0);
+       ksft_print_msg("%lu.%09lu - %lu.%09lu = %llu (%.1fs)\n",
+                      finish.tv_sec, finish.tv_nsec,
+                      start.tv_sec, start.tv_nsec,
+                      i, (double)i / 1000000000.0);
 
        return i;
 }
@@ -53,7 +53,7 @@ unsigned long long calibrate(void)
        pid_t pid, ret;
        int seconds = 15;
 
-       printf("Calibrating sample size for %d seconds worth of syscalls ...\n", seconds);
+       ksft_print_msg("Calibrating sample size for %d seconds worth of syscalls ...\n", seconds);
 
        samples = 0;
        pid = getpid();
@@ -98,24 +98,36 @@ bool le(int i_one, int i_two)
 }
 
 long compare(const char *name_one, const char *name_eval, const char *name_two,
-            unsigned long long one, bool (*eval)(int, int), unsigned long long two)
+            unsigned long long one, bool (*eval)(int, int), unsigned long long two,
+            bool skip)
 {
        bool good;
 
-       printf("\t%s %s %s (%lld %s %lld): ", name_one, name_eval, name_two,
-              (long long)one, name_eval, (long long)two);
+       if (skip) {
+               ksft_test_result_skip("%s %s %s\n", name_one, name_eval,
+                                     name_two);
+               return 0;
+       }
+
+       ksft_print_msg("\t%s %s %s (%lld %s %lld): ", name_one, name_eval, name_two,
+                      (long long)one, name_eval, (long long)two);
        if (one > INT_MAX) {
-               printf("Miscalculation! Measurement went negative: %lld\n", (long long)one);
-               return 1;
+               ksft_print_msg("Miscalculation! Measurement went negative: %lld\n", (long long)one);
+               good = false;
+               goto out;
        }
        if (two > INT_MAX) {
-               printf("Miscalculation! Measurement went negative: %lld\n", (long long)two);
-               return 1;
+               ksft_print_msg("Miscalculation! Measurement went negative: %lld\n", (long long)two);
+               good = false;
+               goto out;
        }
 
        good = eval(one, two);
        printf("%s\n", good ? "✔️" : "❌");
 
+out:
+       ksft_test_result(good, "%s %s %s\n", name_one, name_eval, name_two);
+
        return good ? 0 : 1;
 }
 
@@ -142,15 +154,22 @@ int main(int argc, char *argv[])
        unsigned long long samples, calc;
        unsigned long long native, filter1, filter2, bitmap1, bitmap2;
        unsigned long long entry, per_filter1, per_filter2;
+       bool skip = false;
 
        setbuf(stdout, NULL);
 
-       printf("Running on:\n");
+       ksft_print_header();
+       ksft_set_plan(7);
+
+       ksft_print_msg("Running on:\n");
+       ksft_print_msg("");
        system("uname -a");
 
-       printf("Current BPF sysctl settings:\n");
+       ksft_print_msg("Current BPF sysctl settings:\n");
        /* Avoid using "sysctl" which may not be installed. */
+       ksft_print_msg("");
        system("grep -H . /proc/sys/net/core/bpf_jit_enable");
+       ksft_print_msg("");
        system("grep -H . /proc/sys/net/core/bpf_jit_harden");
 
        if (argc > 1)
@@ -158,11 +177,11 @@ int main(int argc, char *argv[])
        else
                samples = calibrate();
 
-       printf("Benchmarking %llu syscalls...\n", samples);
+       ksft_print_msg("Benchmarking %llu syscalls...\n", samples);
 
        /* Native call */
        native = timing(CLOCK_PROCESS_CPUTIME_ID, samples) / samples;
-       printf("getpid native: %llu ns\n", native);
+       ksft_print_msg("getpid native: %llu ns\n", native);
 
        ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
        assert(ret == 0);
@@ -172,35 +191,37 @@ int main(int argc, char *argv[])
        assert(ret == 0);
 
        bitmap1 = timing(CLOCK_PROCESS_CPUTIME_ID, samples) / samples;
-       printf("getpid RET_ALLOW 1 filter (bitmap): %llu ns\n", bitmap1);
+       ksft_print_msg("getpid RET_ALLOW 1 filter (bitmap): %llu ns\n", bitmap1);
 
        /* Second filter resulting in a bitmap */
        ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &bitmap_prog);
        assert(ret == 0);
 
        bitmap2 = timing(CLOCK_PROCESS_CPUTIME_ID, samples) / samples;
-       printf("getpid RET_ALLOW 2 filters (bitmap): %llu ns\n", bitmap2);
+       ksft_print_msg("getpid RET_ALLOW 2 filters (bitmap): %llu ns\n", bitmap2);
 
        /* Third filter, can no longer be converted to bitmap */
        ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
        assert(ret == 0);
 
        filter1 = timing(CLOCK_PROCESS_CPUTIME_ID, samples) / samples;
-       printf("getpid RET_ALLOW 3 filters (full): %llu ns\n", filter1);
+       ksft_print_msg("getpid RET_ALLOW 3 filters (full): %llu ns\n", filter1);
 
        /* Fourth filter, can not be converted to bitmap because of filter 3 */
        ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &bitmap_prog);
        assert(ret == 0);
 
        filter2 = timing(CLOCK_PROCESS_CPUTIME_ID, samples) / samples;
-       printf("getpid RET_ALLOW 4 filters (full): %llu ns\n", filter2);
+       ksft_print_msg("getpid RET_ALLOW 4 filters (full): %llu ns\n", filter2);
 
        /* Estimations */
 #define ESTIMATE(fmt, var, what)       do {                    \
                var = (what);                                   \
-               printf("Estimated " fmt ": %llu ns\n", var);    \
-               if (var > INT_MAX)                              \
-                       goto more_samples;                      \
+               ksft_print_msg("Estimated " fmt ": %llu ns\n", var);    \
+               if (var > INT_MAX) {                            \
+                       skip = true;                            \
+                       ret |= 1;                               \
+               }                                               \
        } while (0)
 
        ESTIMATE("total seccomp overhead for 1 bitmapped filter", calc,
@@ -218,31 +239,34 @@ int main(int argc, char *argv[])
        ESTIMATE("seccomp per-filter overhead (filters / 4)", per_filter2,
                 (filter2 - native - entry) / 4);
 
-       printf("Expectations:\n");
-       ret |= compare("native", "≤", "1 bitmap", native, le, bitmap1);
-       bits = compare("native", "≤", "1 filter", native, le, filter1);
+       ksft_print_msg("Expectations:\n");
+       ret |= compare("native", "≤", "1 bitmap", native, le, bitmap1,
+                      skip);
+       bits = compare("native", "≤", "1 filter", native, le, filter1,
+                      skip);
        if (bits)
-               goto more_samples;
+               skip = true;
 
        ret |= compare("per-filter (last 2 diff)", "≈", "per-filter (filters / 4)",
-                       per_filter1, approx, per_filter2);
+                      per_filter1, approx, per_filter2, skip);
 
        bits = compare("1 bitmapped", "≈", "2 bitmapped",
-                       bitmap1 - native, approx, bitmap2 - native);
+                      bitmap1 - native, approx, bitmap2 - native, skip);
        if (bits) {
-               printf("Skipping constant action bitmap expectations: they appear unsupported.\n");
-               goto out;
+               ksft_print_msg("Skipping constant action bitmap expectations: they appear unsupported.\n");
+               skip = true;
        }
 
-       ret |= compare("entry", "≈", "1 bitmapped", entry, approx, bitmap1 - native);
-       ret |= compare("entry", "≈", "2 bitmapped", entry, approx, bitmap2 - native);
+       ret |= compare("entry", "≈", "1 bitmapped", entry, approx,
+                      bitmap1 - native, skip);
+       ret |= compare("entry", "≈", "2 bitmapped", entry, approx,
+                      bitmap2 - native, skip);
        ret |= compare("native + entry + (per filter * 4)", "≈", "4 filters total",
-                       entry + (per_filter1 * 4) + native, approx, filter2);
-       if (ret == 0)
-               goto out;
+                      entry + (per_filter1 * 4) + native, approx, filter2,
+                      skip);
 
-more_samples:
-       printf("Saw unexpected benchmark result. Try running again with more samples?\n");
-out:
-       return 0;
+       if (ret)
+               ksft_print_msg("Saw unexpected benchmark result. Try running again with more samples?\n");
+
+       ksft_finished();
 }
index 50aab6b57da34d0f9572f0bbc72f76f12acd6b02..867f88ce2570aef81e3283a9b6484460cfae9b68 100644 (file)
@@ -12,14 +12,16 @@ OBJCOPY := $(CROSS_COMPILE)objcopy
 endif
 
 INCLUDES := -I$(top_srcdir)/tools/include
-HOST_CFLAGS := -Wall -Werror -g $(INCLUDES) -fPIC -z noexecstack
-ENCL_CFLAGS := -Wall -Werror -static -nostdlib -nostartfiles -fPIC \
+HOST_CFLAGS := -Wall -Werror -g $(INCLUDES) -fPIC
+HOST_LDFLAGS := -z noexecstack -lcrypto
+ENCL_CFLAGS += -Wall -Werror -static-pie -nostdlib -ffreestanding -fPIE \
               -fno-stack-protector -mrdrnd $(INCLUDES)
+ENCL_LDFLAGS := -Wl,-T,test_encl.lds,--build-id=none
 
+ifeq ($(CAN_BUILD_X86_64), 1)
 TEST_CUSTOM_PROGS := $(OUTPUT)/test_sgx
 TEST_FILES := $(OUTPUT)/test_encl.elf
 
-ifeq ($(CAN_BUILD_X86_64), 1)
 all: $(TEST_CUSTOM_PROGS) $(OUTPUT)/test_encl.elf
 endif
 
@@ -28,7 +30,7 @@ $(OUTPUT)/test_sgx: $(OUTPUT)/main.o \
                    $(OUTPUT)/sigstruct.o \
                    $(OUTPUT)/call.o \
                    $(OUTPUT)/sign_key.o
-       $(CC) $(HOST_CFLAGS) -o $@ $^ -lcrypto
+       $(CC) $(HOST_CFLAGS) -o $@ $^ $(HOST_LDFLAGS)
 
 $(OUTPUT)/main.o: main.c
        $(CC) $(HOST_CFLAGS) -c $< -o $@
@@ -45,8 +47,8 @@ $(OUTPUT)/call.o: call.S
 $(OUTPUT)/sign_key.o: sign_key.S
        $(CC) $(HOST_CFLAGS) -c $< -o $@
 
-$(OUTPUT)/test_encl.elf: test_encl.lds test_encl.c test_encl_bootstrap.S
-       $(CC) $(ENCL_CFLAGS) -T $^ -o $@ -Wl,--build-id=none
+$(OUTPUT)/test_encl.elf: test_encl.c test_encl_bootstrap.S
+       $(CC) $(ENCL_CFLAGS) $^ -o $@ $(ENCL_LDFLAGS)
 
 EXTRA_CLEAN := \
        $(OUTPUT)/test_encl.elf \
index d8587c971941a8ac40f9a284fc34d2993fa0d8af..402f8787a71cc0221b1af0690efbee1c8e0bfb42 100644 (file)
@@ -13,6 +13,8 @@
 
 #define __aligned(x) __attribute__((__aligned__(x)))
 #define __packed __attribute__((packed))
+#define __used __attribute__((used))
+#define __section(x)__attribute__((__section__(x)))
 
 #include "../../../../arch/x86/include/asm/sgx.h"
 #include "../../../../arch/x86/include/asm/enclu.h"
index 94bdeac1cf041a28e6bdea9a006f1d0d1098c330..c9f658e44de6c1b2266012d0ea908d7a9b3bb605 100644 (file)
@@ -136,11 +136,11 @@ static bool encl_ioc_add_pages(struct encl *encl, struct encl_segment *seg)
  */
 uint64_t encl_get_entry(struct encl *encl, const char *symbol)
 {
+       Elf64_Sym *symtab = NULL;
+       char *sym_names = NULL;
        Elf64_Shdr *sections;
-       Elf64_Sym *symtab;
        Elf64_Ehdr *ehdr;
-       char *sym_names;
-       int num_sym;
+       int num_sym = 0;
        int i;
 
        ehdr = encl->bin;
@@ -161,6 +161,9 @@ uint64_t encl_get_entry(struct encl *encl, const char *symbol)
                }
        }
 
+       if (!symtab || !sym_names)
+               return 0;
+
        for (i = 0; i < num_sym; i++) {
                Elf64_Sym *sym = &symtab[i];
 
index a07896a463643d3bd8e6e29c1dfc7f6b44e5f49e..d73b29becf5b01b7ea5366a085194fbb7965ed87 100644 (file)
@@ -318,9 +318,9 @@ bool encl_measure(struct encl *encl)
        struct sgx_sigstruct *sigstruct = &encl->sigstruct;
        struct sgx_sigstruct_payload payload;
        uint8_t digest[SHA256_DIGEST_LENGTH];
+       EVP_MD_CTX *ctx = NULL;
        unsigned int siglen;
        RSA *key = NULL;
-       EVP_MD_CTX *ctx;
        int i;
 
        memset(sigstruct, 0, sizeof(*sigstruct));
@@ -384,7 +384,8 @@ bool encl_measure(struct encl *encl)
        return true;
 
 err:
-       EVP_MD_CTX_destroy(ctx);
+       if (ctx)
+               EVP_MD_CTX_destroy(ctx);
        RSA_free(key);
        return false;
 }
index c0d6397295e311499484e1332e4f7f7d05775bbb..2c4d709cce2d9151f37af2ccd243bf5c847d614e 100644 (file)
@@ -5,11 +5,12 @@
 #include "defines.h"
 
 /*
- * Data buffer spanning two pages that will be placed first in .data
- * segment. Even if not used internally the second page is needed by
- * external test manipulating page permissions.
+ * Data buffer spanning two pages that will be placed first in the .data
+ * segment via the linker script. Even if not used internally the second page
+ * is needed by external test manipulating page permissions, so mark
+ * encl_buffer as "used" to make sure it is entirely preserved by the compiler.
  */
-static uint8_t encl_buffer[8192] = { 1 };
+static uint8_t __used __section(".data.encl_buffer") encl_buffer[8192] = { 1 };
 
 enum sgx_enclu_function {
        EACCEPT = 0x5,
@@ -24,10 +25,11 @@ static void do_encl_emodpe(void *_op)
        secinfo.flags = op->flags;
 
        asm volatile(".byte 0x0f, 0x01, 0xd7"
-                               :
+                               : /* no outputs */
                                : "a" (EMODPE),
                                  "b" (&secinfo),
-                                 "c" (op->epc_addr));
+                                 "c" (op->epc_addr)
+                               : "memory" /* read from secinfo pointer */);
 }
 
 static void do_encl_eaccept(void *_op)
@@ -42,7 +44,8 @@ static void do_encl_eaccept(void *_op)
                                : "=a" (rax)
                                : "a" (EACCEPT),
                                  "b" (&secinfo),
-                                 "c" (op->epc_addr));
+                                 "c" (op->epc_addr)
+                               : "memory" /* read from secinfo pointer */);
 
        op->ret = rax;
 }
@@ -119,21 +122,41 @@ static void do_encl_op_nop(void *_op)
 
 }
 
+/*
+ * Symbol placed at the start of the enclave image by the linker script.
+ * Declare this extern symbol with visibility "hidden" to ensure the compiler
+ * does not access it through the GOT and generates position-independent
+ * addressing as __encl_base(%rip), so we can get the actual enclave base
+ * during runtime.
+ */
+extern const uint8_t __attribute__((visibility("hidden"))) __encl_base;
+
+typedef void (*encl_op_t)(void *);
+static const encl_op_t encl_op_array[ENCL_OP_MAX] = {
+       do_encl_op_put_to_buf,
+       do_encl_op_get_from_buf,
+       do_encl_op_put_to_addr,
+       do_encl_op_get_from_addr,
+       do_encl_op_nop,
+       do_encl_eaccept,
+       do_encl_emodpe,
+       do_encl_init_tcs_page,
+};
+
 void encl_body(void *rdi,  void *rsi)
 {
-       const void (*encl_op_array[ENCL_OP_MAX])(void *) = {
-               do_encl_op_put_to_buf,
-               do_encl_op_get_from_buf,
-               do_encl_op_put_to_addr,
-               do_encl_op_get_from_addr,
-               do_encl_op_nop,
-               do_encl_eaccept,
-               do_encl_emodpe,
-               do_encl_init_tcs_page,
-       };
-
-       struct encl_op_header *op = (struct encl_op_header *)rdi;
-
-       if (op->type < ENCL_OP_MAX)
-               (*encl_op_array[op->type])(op);
+       struct encl_op_header *header = (struct encl_op_header *)rdi;
+       encl_op_t op;
+
+       if (header->type >= ENCL_OP_MAX)
+               return;
+
+       /*
+        * The enclave base address needs to be added, as this call site
+        * *cannot be* made rip-relative by the compiler, or fixed up by
+        * any other possible means.
+        */
+       op = ((uint64_t)&__encl_base) + encl_op_array[header->type];
+
+       (*op)(header);
 }
index a1ec64f7d91fc52bc8a8971fd64f261790a73cf1..ffe851a1cac4063135edb5e0ae46a3c80dcb5edb 100644 (file)
@@ -10,6 +10,7 @@ PHDRS
 SECTIONS
 {
        . = 0;
+        __encl_base = .;
        .tcs : {
                *(.tcs*)
        } : tcs
@@ -23,6 +24,7 @@ SECTIONS
        } : text
 
        .data : {
+               *(.data.encl_buffer)
                *(.data*)
        } : data
 
@@ -31,11 +33,9 @@ SECTIONS
                *(.note*)
                *(.debug*)
                *(.eh_frame*)
+               *(.dyn*)
+               *(.gnu.hash)
        }
 }
 
-ASSERT(!DEFINED(.altinstructions), "ALTERNATIVES are not supported in enclaves")
-ASSERT(!DEFINED(.altinstr_replacement), "ALTERNATIVES are not supported in enclaves")
-ASSERT(!DEFINED(.discard.retpoline_safe), "RETPOLINE ALTERNATIVES are not supported in enclaves")
-ASSERT(!DEFINED(.discard.nospec), "RETPOLINE ALTERNATIVES are not supported in enclaves")
-ASSERT(!DEFINED(.got.plt), "Libcalls are not supported in enclaves")
+ASSERT(!DEFINED(_GLOBAL_OFFSET_TABLE_), "Libcalls through GOT are not supported in enclaves")
index 03ae0f57e29d0ef1f6ad7f963d78f4a09e0fbeac..d8c4ac94e032c9bef82d827ac37f00323ac0ab6c 100644 (file)
 encl_entry:
        # RBX contains the base address for TCS, which is the first address
        # inside the enclave for TCS #1 and one page into the enclave for
-       # TCS #2. By adding the value of encl_stack to it, we get
-       # the absolute address for the stack.
-       lea     (encl_stack)(%rbx), %rax
+       # TCS #2. First make it relative by substracting __encl_base and
+       # then add the address of encl_stack to get the address for the stack.
+       lea __encl_base(%rip), %rax
+       sub %rax, %rbx
+       lea encl_stack(%rip), %rax
+       add %rbx, %rax
        jmp encl_entry_core
 encl_dyn_entry:
        # Entry point for dynamically created TCS page expected to follow
@@ -55,25 +58,12 @@ encl_entry_core:
        push    %rax
 
        push    %rcx # push the address after EENTER
-       push    %rbx # push the enclave base address
 
+       # NOTE: as the selftest enclave is *not* intended for production,
+       # simplify the code by not initializing ABI registers on entry or
+       # cleansing caller-save registers on exit.
        call    encl_body
 
-       pop     %rbx # pop the enclave base address
-
-       /* Clear volatile GPRs, except RAX (EEXIT function). */
-       xor     %rcx, %rcx
-       xor     %rdx, %rdx
-       xor     %rdi, %rdi
-       xor     %rsi, %rsi
-       xor     %r8, %r8
-       xor     %r9, %r9
-       xor     %r10, %r10
-       xor     %r11, %r11
-
-       # Reset status flags.
-       add     %rdx, %rdx # OF = SF = AF = CF = 0; ZF = PF = 1
-
        # Prepare EEXIT target by popping the address of the instruction after
        # EENTER to RBX.
        pop     %rbx
index 2456a399eb9ae1ce2a3c90c10ce1403dd23f62d1..afd18c678ff5a584a92e13ae8ee30bcba90f21ca 100644 (file)
@@ -28,10 +28,15 @@ FOPTS       :=      -flto=auto -ffat-lto-objects -fexceptions -fstack-protector-strong \
                -fasynchronous-unwind-tables -fstack-clash-protection
 WOPTS  :=      -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -Wno-maybe-uninitialized
 
+ifeq ($(CC),clang)
+  FOPTS := $(filter-out -ffat-lto-objects, $(FOPTS))
+  WOPTS := $(filter-out -Wno-maybe-uninitialized, $(WOPTS))
+endif
+
 TRACEFS_HEADERS        := $$($(PKG_CONFIG) --cflags libtracefs)
 
 CFLAGS :=      -O -g -DVERSION=\"$(VERSION)\" $(FOPTS) $(MOPTS) $(WOPTS) $(TRACEFS_HEADERS) $(EXTRA_CFLAGS)
-LDFLAGS        :=      -ggdb $(EXTRA_LDFLAGS)
+LDFLAGS        :=      -flto=auto -ggdb $(EXTRA_LDFLAGS)
 LIBS   :=      $$($(PKG_CONFIG) --libs libtracefs)
 
 SRC    :=      $(wildcard src/*.c)
index 8f81fa007364890dd4303b047c484786a56390f2..01870d50942a19a242f6444c7b11f019b460ec4e 100644 (file)
@@ -135,8 +135,7 @@ static void osnoise_hist_update_multiple(struct osnoise_tool *tool, int cpu,
        if (params->output_divisor)
                duration = duration / params->output_divisor;
 
-       if (data->bucket_size)
-               bucket = duration / data->bucket_size;
+       bucket = duration / data->bucket_size;
 
        total_duration = duration * count;
 
@@ -480,7 +479,11 @@ static void osnoise_hist_usage(char *usage)
 
        for (i = 0; msg[i]; i++)
                fprintf(stderr, "%s\n", msg[i]);
-       exit(1);
+
+       if (usage)
+               exit(EXIT_FAILURE);
+
+       exit(EXIT_SUCCESS);
 }
 
 /*
index f7c959be8677799788eda3e577246cd3d2ec444a..457360db07673191fbc034c9fdb3193b91c4499c 100644 (file)
@@ -331,7 +331,11 @@ static void osnoise_top_usage(struct osnoise_top_params *params, char *usage)
 
        for (i = 0; msg[i]; i++)
                fprintf(stderr, "%s\n", msg[i]);
-       exit(1);
+
+       if (usage)
+               exit(EXIT_FAILURE);
+
+       exit(EXIT_SUCCESS);
 }
 
 /*
index 47d3d8b53cb2177fe7db4c39a21e630aca22fa23..dbf154082f958c146bed6537dc527f83e57993d4 100644 (file)
@@ -178,8 +178,7 @@ timerlat_hist_update(struct osnoise_tool *tool, int cpu,
        if (params->output_divisor)
                latency = latency / params->output_divisor;
 
-       if (data->bucket_size)
-               bucket = latency / data->bucket_size;
+       bucket = latency / data->bucket_size;
 
        if (!context) {
                hist = data->hist[cpu].irq;
@@ -546,7 +545,11 @@ static void timerlat_hist_usage(char *usage)
 
        for (i = 0; msg[i]; i++)
                fprintf(stderr, "%s\n", msg[i]);
-       exit(1);
+
+       if (usage)
+               exit(EXIT_FAILURE);
+
+       exit(EXIT_SUCCESS);
 }
 
 /*
index 1640f121baca50d99b94621309522d3fb824bc33..3e9af2c3868880197dc3075b74d94a15bea07d38 100644 (file)
@@ -375,7 +375,11 @@ static void timerlat_top_usage(char *usage)
 
        for (i = 0; msg[i]; i++)
                fprintf(stderr, "%s\n", msg[i]);
-       exit(1);
+
+       if (usage)
+               exit(EXIT_FAILURE);
+
+       exit(EXIT_SUCCESS);
 }
 
 /*
index c769d7b3842c0967e85f7dc1d8c6c705edd1f2dd..9ac71a66840c1bec2e944f3a9db0f427f3c7edfb 100644 (file)
@@ -238,12 +238,6 @@ static inline int sched_setattr(pid_t pid, const struct sched_attr *attr,
        return syscall(__NR_sched_setattr, pid, attr, flags);
 }
 
-static inline int sched_getattr(pid_t pid, struct sched_attr *attr,
-                               unsigned int size, unsigned int flags)
-{
-       return syscall(__NR_sched_getattr, pid, attr, size, flags);
-}
-
 int __set_sched_attr(int pid, struct sched_attr *attr)
 {
        int flags = 0;
@@ -479,13 +473,13 @@ int parse_prio(char *arg, struct sched_attr *sched_param)
                if (prio == INVALID_VAL)
                        return -1;
 
-               if (prio < sched_get_priority_min(SCHED_OTHER))
+               if (prio < MIN_NICE)
                        return -1;
-               if (prio > sched_get_priority_max(SCHED_OTHER))
+               if (prio > MAX_NICE)
                        return -1;
 
                sched_param->sched_policy   = SCHED_OTHER;
-               sched_param->sched_priority = prio;
+               sched_param->sched_nice = prio;
                break;
        default:
                return -1;
@@ -536,7 +530,7 @@ int set_cpu_dma_latency(int32_t latency)
  */
 static const int find_mount(const char *fs, char *mp, int sizeof_mp)
 {
-       char mount_point[MAX_PATH];
+       char mount_point[MAX_PATH+1];
        char type[100];
        int found = 0;
        FILE *fp;
index 04ed1e650495a357daabfe40653c1eab5e89b005..d44513e6c66a01a5fc75f472dcfb9ebbfbd57bfb 100644 (file)
@@ -9,6 +9,8 @@
  */
 #define BUFF_U64_STR_SIZE      24
 #define MAX_PATH               1024
+#define MAX_NICE               20
+#define MIN_NICE               -19
 
 #define container_of(ptr, type, member)({                      \
        const typeof(((type *)0)->member) *__mptr = (ptr);      \
index 3d0f3888a58c66816fca24b5a9d7ab2106f0e992..485f8aeddbe033f227faf32139630d2a616cbd66 100644 (file)
@@ -28,10 +28,15 @@ FOPTS       :=      -flto=auto -ffat-lto-objects -fexceptions -fstack-protector-strong \
                -fasynchronous-unwind-tables -fstack-clash-protection
 WOPTS  :=      -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -Wno-maybe-uninitialized
 
+ifeq ($(CC),clang)
+  FOPTS := $(filter-out -ffat-lto-objects, $(FOPTS))
+  WOPTS := $(filter-out -Wno-maybe-uninitialized, $(WOPTS))
+endif
+
 TRACEFS_HEADERS        := $$($(PKG_CONFIG) --cflags libtracefs)
 
 CFLAGS :=      -O -g -DVERSION=\"$(VERSION)\" $(FOPTS) $(MOPTS) $(WOPTS) $(TRACEFS_HEADERS) $(EXTRA_CFLAGS) -I include
-LDFLAGS        :=      -ggdb $(EXTRA_LDFLAGS)
+LDFLAGS        :=      -flto=auto -ggdb $(EXTRA_LDFLAGS)
 LIBS   :=      $$($(PKG_CONFIG) --libs libtracefs)
 
 SRC    :=      $(wildcard src/*.c)
index ad28582bcf2b1ca6b6c9ba9e5d09b0bb5fbe63c0..f04479ecc96c0b75af1afb2e7855cf1cf2491970 100644 (file)
@@ -210,9 +210,9 @@ static char *ikm_read_reactor(char *monitor_name)
 static char *ikm_get_current_reactor(char *monitor_name)
 {
        char *reactors = ikm_read_reactor(monitor_name);
+       char *curr_reactor = NULL;
        char *start;
        char *end;
-       char *curr_reactor;
 
        if (!reactors)
                return NULL;
index 61230532fef10f7261db75e5757a8b2c4366d2d9..edcdb8abfa31ca82e2cb506e2c7749ad8116e7e0 100644 (file)
@@ -27,6 +27,7 @@
 static unsigned int offset;
 static unsigned int ino = 721;
 static time_t default_mtime;
+static bool do_file_mtime;
 static bool do_csum = false;
 
 struct file_handler {
@@ -329,6 +330,7 @@ static int cpio_mkfile(const char *name, const char *location,
        int file;
        int retval;
        int rc = -1;
+       time_t mtime;
        int namesize;
        unsigned int i;
        uint32_t csum = 0;
@@ -347,16 +349,21 @@ static int cpio_mkfile(const char *name, const char *location,
                goto error;
        }
 
-       if (buf.st_mtime > 0xffffffff) {
-               fprintf(stderr, "%s: Timestamp exceeds maximum cpio timestamp, clipping.\n",
-                       location);
-               buf.st_mtime = 0xffffffff;
-       }
+       if (do_file_mtime) {
+               mtime = default_mtime;
+       } else {
+               mtime = buf.st_mtime;
+               if (mtime > 0xffffffff) {
+                       fprintf(stderr, "%s: Timestamp exceeds maximum cpio timestamp, clipping.\n",
+                                       location);
+                       mtime = 0xffffffff;
+               }
 
-       if (buf.st_mtime < 0) {
-               fprintf(stderr, "%s: Timestamp negative, clipping.\n",
-                       location);
-               buf.st_mtime = 0;
+               if (mtime < 0) {
+                       fprintf(stderr, "%s: Timestamp negative, clipping.\n",
+                                       location);
+                       mtime = 0;
+               }
        }
 
        if (buf.st_size > 0xffffffff) {
@@ -387,7 +394,7 @@ static int cpio_mkfile(const char *name, const char *location,
                        (long) uid,             /* uid */
                        (long) gid,             /* gid */
                        nlinks,                 /* nlink */
-                       (long) buf.st_mtime,    /* mtime */
+                       (long) mtime,           /* mtime */
                        size,                   /* filesize */
                        3,                      /* major */
                        1,                      /* minor */
@@ -536,8 +543,9 @@ static void usage(const char *prog)
                "file /sbin/kinit /usr/src/klibc/kinit/kinit 0755 0 0\n"
                "\n"
                "<timestamp> is time in seconds since Epoch that will be used\n"
-               "as mtime for symlinks, special files and directories. The default\n"
-               "is to use the current time for these entries.\n"
+               "as mtime for symlinks, directories, regular and special files.\n"
+               "The default is to use the current time for all files, but\n"
+               "preserve modification time for regular files.\n"
                "-c: calculate and store 32-bit checksums for file data.\n",
                prog);
 }
@@ -594,6 +602,7 @@ int main (int argc, char *argv[])
                                usage(argv[0]);
                                exit(1);
                        }
+                       do_file_mtime = true;
                        break;
                case 'c':
                        do_csum = true;
index 484d0873061ca5041c546f1c44520a1848ec1d16..184dab4ee871c6d2f54e1b095fab43c5f9f97e9a 100644 (file)
@@ -4,13 +4,16 @@
 config HAVE_KVM
        bool
 
-config HAVE_KVM_PFNCACHE
+config KVM_COMMON
        bool
+       select EVENTFD
+       select INTERVAL_TREE
+       select PREEMPT_NOTIFIERS
 
-config HAVE_KVM_IRQCHIP
+config HAVE_KVM_PFNCACHE
        bool
 
-config HAVE_KVM_IRQFD
+config HAVE_KVM_IRQCHIP
        bool
 
 config HAVE_KVM_IRQ_ROUTING
@@ -39,10 +42,6 @@ config NEED_KVM_DIRTY_RING_WITH_BITMAP
        bool
        depends on HAVE_KVM_DIRTY_RING
 
-config HAVE_KVM_EVENTFD
-       bool
-       select EVENTFD
-
 config KVM_MMIO
        bool
 
@@ -92,3 +91,20 @@ config HAVE_KVM_PM_NOTIFIER
 
 config KVM_GENERIC_HARDWARE_ENABLING
        bool
+
+config KVM_GENERIC_MMU_NOTIFIER
+       select MMU_NOTIFIER
+       bool
+
+config KVM_GENERIC_MEMORY_ATTRIBUTES
+       depends on KVM_GENERIC_MMU_NOTIFIER
+       bool
+
+config KVM_PRIVATE_MEM
+       select XARRAY_MULTI
+       bool
+
+config KVM_GENERIC_PRIVATE_MEM
+       select KVM_GENERIC_MEMORY_ATTRIBUTES
+       select KVM_PRIVATE_MEM
+       bool
index 2c27d5d0c367c5873902c512604e1ce418f6a11f..724c89af78af963c1d3b160486cf3dc09e5a4e2b 100644 (file)
@@ -12,3 +12,4 @@ kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o
 kvm-$(CONFIG_HAVE_KVM_IRQ_ROUTING) += $(KVM)/irqchip.o
 kvm-$(CONFIG_HAVE_KVM_DIRTY_RING) += $(KVM)/dirty_ring.o
 kvm-$(CONFIG_HAVE_KVM_PFNCACHE) += $(KVM)/pfncache.o
+kvm-$(CONFIG_KVM_PRIVATE_MEM) += $(KVM)/guest_memfd.o
index c1cd7dfe4a9088fc046c73e3e24dca4293ff99f5..86d267db87bb13b6bb7050b0325415abf21eea2e 100644 (file)
@@ -58,7 +58,7 @@ static void kvm_reset_dirty_gfn(struct kvm *kvm, u32 slot, u64 offset, u64 mask)
        as_id = slot >> 16;
        id = (u16)slot;
 
-       if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
+       if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
                return;
 
        memslot = id_to_memslot(__kvm_memslots(kvm, as_id), id);
index c0e230f4c3e9318a8d949054b739518995f58ce3..229570059a1bb5fc44be96499b17090d13150d4e 100644 (file)
@@ -28,7 +28,7 @@
 
 #include <kvm/iodev.h>
 
-#ifdef CONFIG_HAVE_KVM_IRQFD
+#ifdef CONFIG_HAVE_KVM_IRQCHIP
 
 static struct workqueue_struct *irqfd_cleanup_wq;
 
@@ -526,21 +526,7 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
        synchronize_srcu(&kvm->irq_srcu);
        kvm_arch_post_irq_ack_notifier_list_update(kvm);
 }
-#endif
-
-void
-kvm_eventfd_init(struct kvm *kvm)
-{
-#ifdef CONFIG_HAVE_KVM_IRQFD
-       spin_lock_init(&kvm->irqfds.lock);
-       INIT_LIST_HEAD(&kvm->irqfds.items);
-       INIT_LIST_HEAD(&kvm->irqfds.resampler_list);
-       mutex_init(&kvm->irqfds.resampler_lock);
-#endif
-       INIT_LIST_HEAD(&kvm->ioeventfds);
-}
 
-#ifdef CONFIG_HAVE_KVM_IRQFD
 /*
  * shutdown any irqfd's that match fd+gsi
  */
@@ -1012,3 +998,15 @@ kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
 
        return kvm_assign_ioeventfd(kvm, args);
 }
+
+void
+kvm_eventfd_init(struct kvm *kvm)
+{
+#ifdef CONFIG_HAVE_KVM_IRQCHIP
+       spin_lock_init(&kvm->irqfds.lock);
+       INIT_LIST_HEAD(&kvm->irqfds.items);
+       INIT_LIST_HEAD(&kvm->irqfds.resampler_list);
+       mutex_init(&kvm->irqfds.resampler_lock);
+#endif
+       INIT_LIST_HEAD(&kvm->ioeventfds);
+}
diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
new file mode 100644 (file)
index 0000000..0f4e0cf
--- /dev/null
@@ -0,0 +1,532 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/backing-dev.h>
+#include <linux/falloc.h>
+#include <linux/kvm_host.h>
+#include <linux/pagemap.h>
+#include <linux/anon_inodes.h>
+
+#include "kvm_mm.h"
+
+struct kvm_gmem {
+       struct kvm *kvm;
+       struct xarray bindings;
+       struct list_head entry;
+};
+
+static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index)
+{
+       struct folio *folio;
+
+       /* TODO: Support huge pages. */
+       folio = filemap_grab_folio(inode->i_mapping, index);
+       if (IS_ERR_OR_NULL(folio))
+               return NULL;
+
+       /*
+        * Use the up-to-date flag to track whether or not the memory has been
+        * zeroed before being handed off to the guest.  There is no backing
+        * storage for the memory, so the folio will remain up-to-date until
+        * it's removed.
+        *
+        * TODO: Skip clearing pages when trusted firmware will do it when
+        * assigning memory to the guest.
+        */
+       if (!folio_test_uptodate(folio)) {
+               unsigned long nr_pages = folio_nr_pages(folio);
+               unsigned long i;
+
+               for (i = 0; i < nr_pages; i++)
+                       clear_highpage(folio_page(folio, i));
+
+               folio_mark_uptodate(folio);
+       }
+
+       /*
+        * Ignore accessed, referenced, and dirty flags.  The memory is
+        * unevictable and there is no storage to write back to.
+        */
+       return folio;
+}
+
+static void kvm_gmem_invalidate_begin(struct kvm_gmem *gmem, pgoff_t start,
+                                     pgoff_t end)
+{
+       bool flush = false, found_memslot = false;
+       struct kvm_memory_slot *slot;
+       struct kvm *kvm = gmem->kvm;
+       unsigned long index;
+
+       xa_for_each_range(&gmem->bindings, index, slot, start, end - 1) {
+               pgoff_t pgoff = slot->gmem.pgoff;
+
+               struct kvm_gfn_range gfn_range = {
+                       .start = slot->base_gfn + max(pgoff, start) - pgoff,
+                       .end = slot->base_gfn + min(pgoff + slot->npages, end) - pgoff,
+                       .slot = slot,
+                       .may_block = true,
+               };
+
+               if (!found_memslot) {
+                       found_memslot = true;
+
+                       KVM_MMU_LOCK(kvm);
+                       kvm_mmu_invalidate_begin(kvm);
+               }
+
+               flush |= kvm_mmu_unmap_gfn_range(kvm, &gfn_range);
+       }
+
+       if (flush)
+               kvm_flush_remote_tlbs(kvm);
+
+       if (found_memslot)
+               KVM_MMU_UNLOCK(kvm);
+}
+
+static void kvm_gmem_invalidate_end(struct kvm_gmem *gmem, pgoff_t start,
+                                   pgoff_t end)
+{
+       struct kvm *kvm = gmem->kvm;
+
+       if (xa_find(&gmem->bindings, &start, end - 1, XA_PRESENT)) {
+               KVM_MMU_LOCK(kvm);
+               kvm_mmu_invalidate_end(kvm);
+               KVM_MMU_UNLOCK(kvm);
+       }
+}
+
+static long kvm_gmem_punch_hole(struct inode *inode, loff_t offset, loff_t len)
+{
+       struct list_head *gmem_list = &inode->i_mapping->i_private_list;
+       pgoff_t start = offset >> PAGE_SHIFT;
+       pgoff_t end = (offset + len) >> PAGE_SHIFT;
+       struct kvm_gmem *gmem;
+
+       /*
+        * Bindings must be stable across invalidation to ensure the start+end
+        * are balanced.
+        */
+       filemap_invalidate_lock(inode->i_mapping);
+
+       list_for_each_entry(gmem, gmem_list, entry)
+               kvm_gmem_invalidate_begin(gmem, start, end);
+
+       truncate_inode_pages_range(inode->i_mapping, offset, offset + len - 1);
+
+       list_for_each_entry(gmem, gmem_list, entry)
+               kvm_gmem_invalidate_end(gmem, start, end);
+
+       filemap_invalidate_unlock(inode->i_mapping);
+
+       return 0;
+}
+
+static long kvm_gmem_allocate(struct inode *inode, loff_t offset, loff_t len)
+{
+       struct address_space *mapping = inode->i_mapping;
+       pgoff_t start, index, end;
+       int r;
+
+       /* Dedicated guest is immutable by default. */
+       if (offset + len > i_size_read(inode))
+               return -EINVAL;
+
+       filemap_invalidate_lock_shared(mapping);
+
+       start = offset >> PAGE_SHIFT;
+       end = (offset + len) >> PAGE_SHIFT;
+
+       r = 0;
+       for (index = start; index < end; ) {
+               struct folio *folio;
+
+               if (signal_pending(current)) {
+                       r = -EINTR;
+                       break;
+               }
+
+               folio = kvm_gmem_get_folio(inode, index);
+               if (!folio) {
+                       r = -ENOMEM;
+                       break;
+               }
+
+               index = folio_next_index(folio);
+
+               folio_unlock(folio);
+               folio_put(folio);
+
+               /* 64-bit only, wrapping the index should be impossible. */
+               if (WARN_ON_ONCE(!index))
+                       break;
+
+               cond_resched();
+       }
+
+       filemap_invalidate_unlock_shared(mapping);
+
+       return r;
+}
+
+static long kvm_gmem_fallocate(struct file *file, int mode, loff_t offset,
+                              loff_t len)
+{
+       int ret;
+
+       if (!(mode & FALLOC_FL_KEEP_SIZE))
+               return -EOPNOTSUPP;
+
+       if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
+               return -EOPNOTSUPP;
+
+       if (!PAGE_ALIGNED(offset) || !PAGE_ALIGNED(len))
+               return -EINVAL;
+
+       if (mode & FALLOC_FL_PUNCH_HOLE)
+               ret = kvm_gmem_punch_hole(file_inode(file), offset, len);
+       else
+               ret = kvm_gmem_allocate(file_inode(file), offset, len);
+
+       if (!ret)
+               file_modified(file);
+       return ret;
+}
+
+static int kvm_gmem_release(struct inode *inode, struct file *file)
+{
+       struct kvm_gmem *gmem = file->private_data;
+       struct kvm_memory_slot *slot;
+       struct kvm *kvm = gmem->kvm;
+       unsigned long index;
+
+       /*
+        * Prevent concurrent attempts to *unbind* a memslot.  This is the last
+        * reference to the file and thus no new bindings can be created, but
+        * dereferencing the slot for existing bindings needs to be protected
+        * against memslot updates, specifically so that unbind doesn't race
+        * and free the memslot (kvm_gmem_get_file() will return NULL).
+        */
+       mutex_lock(&kvm->slots_lock);
+
+       filemap_invalidate_lock(inode->i_mapping);
+
+       xa_for_each(&gmem->bindings, index, slot)
+               rcu_assign_pointer(slot->gmem.file, NULL);
+
+       synchronize_rcu();
+
+       /*
+        * All in-flight operations are gone and new bindings can be created.
+        * Zap all SPTEs pointed at by this file.  Do not free the backing
+        * memory, as its lifetime is associated with the inode, not the file.
+        */
+       kvm_gmem_invalidate_begin(gmem, 0, -1ul);
+       kvm_gmem_invalidate_end(gmem, 0, -1ul);
+
+       list_del(&gmem->entry);
+
+       filemap_invalidate_unlock(inode->i_mapping);
+
+       mutex_unlock(&kvm->slots_lock);
+
+       xa_destroy(&gmem->bindings);
+       kfree(gmem);
+
+       kvm_put_kvm(kvm);
+
+       return 0;
+}
+
+static inline struct file *kvm_gmem_get_file(struct kvm_memory_slot *slot)
+{
+       /*
+        * Do not return slot->gmem.file if it has already been closed;
+        * there might be some time between the last fput() and when
+        * kvm_gmem_release() clears slot->gmem.file, and you do not
+        * want to spin in the meanwhile.
+        */
+       return get_file_active(&slot->gmem.file);
+}
+
+static struct file_operations kvm_gmem_fops = {
+       .open           = generic_file_open,
+       .release        = kvm_gmem_release,
+       .fallocate      = kvm_gmem_fallocate,
+};
+
+void kvm_gmem_init(struct module *module)
+{
+       kvm_gmem_fops.owner = module;
+}
+
+static int kvm_gmem_migrate_folio(struct address_space *mapping,
+                                 struct folio *dst, struct folio *src,
+                                 enum migrate_mode mode)
+{
+       WARN_ON_ONCE(1);
+       return -EINVAL;
+}
+
+static int kvm_gmem_error_folio(struct address_space *mapping, struct folio *folio)
+{
+       struct list_head *gmem_list = &mapping->i_private_list;
+       struct kvm_gmem *gmem;
+       pgoff_t start, end;
+
+       filemap_invalidate_lock_shared(mapping);
+
+       start = folio->index;
+       end = start + folio_nr_pages(folio);
+
+       list_for_each_entry(gmem, gmem_list, entry)
+               kvm_gmem_invalidate_begin(gmem, start, end);
+
+       /*
+        * Do not truncate the range, what action is taken in response to the
+        * error is userspace's decision (assuming the architecture supports
+        * gracefully handling memory errors).  If/when the guest attempts to
+        * access a poisoned page, kvm_gmem_get_pfn() will return -EHWPOISON,
+        * at which point KVM can either terminate the VM or propagate the
+        * error to userspace.
+        */
+
+       list_for_each_entry(gmem, gmem_list, entry)
+               kvm_gmem_invalidate_end(gmem, start, end);
+
+       filemap_invalidate_unlock_shared(mapping);
+
+       return MF_DELAYED;
+}
+
+static const struct address_space_operations kvm_gmem_aops = {
+       .dirty_folio = noop_dirty_folio,
+       .migrate_folio  = kvm_gmem_migrate_folio,
+       .error_remove_folio = kvm_gmem_error_folio,
+};
+
+static int kvm_gmem_getattr(struct mnt_idmap *idmap, const struct path *path,
+                           struct kstat *stat, u32 request_mask,
+                           unsigned int query_flags)
+{
+       struct inode *inode = path->dentry->d_inode;
+
+       generic_fillattr(idmap, request_mask, inode, stat);
+       return 0;
+}
+
+static int kvm_gmem_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+                           struct iattr *attr)
+{
+       return -EINVAL;
+}
+static const struct inode_operations kvm_gmem_iops = {
+       .getattr        = kvm_gmem_getattr,
+       .setattr        = kvm_gmem_setattr,
+};
+
+static int __kvm_gmem_create(struct kvm *kvm, loff_t size, u64 flags)
+{
+       const char *anon_name = "[kvm-gmem]";
+       struct kvm_gmem *gmem;
+       struct inode *inode;
+       struct file *file;
+       int fd, err;
+
+       fd = get_unused_fd_flags(0);
+       if (fd < 0)
+               return fd;
+
+       gmem = kzalloc(sizeof(*gmem), GFP_KERNEL);
+       if (!gmem) {
+               err = -ENOMEM;
+               goto err_fd;
+       }
+
+       file = anon_inode_create_getfile(anon_name, &kvm_gmem_fops, gmem,
+                                        O_RDWR, NULL);
+       if (IS_ERR(file)) {
+               err = PTR_ERR(file);
+               goto err_gmem;
+       }
+
+       file->f_flags |= O_LARGEFILE;
+
+       inode = file->f_inode;
+       WARN_ON(file->f_mapping != inode->i_mapping);
+
+       inode->i_private = (void *)(unsigned long)flags;
+       inode->i_op = &kvm_gmem_iops;
+       inode->i_mapping->a_ops = &kvm_gmem_aops;
+       inode->i_mode |= S_IFREG;
+       inode->i_size = size;
+       mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER);
+       mapping_set_unmovable(inode->i_mapping);
+       /* Unmovable mappings are supposed to be marked unevictable as well. */
+       WARN_ON_ONCE(!mapping_unevictable(inode->i_mapping));
+
+       kvm_get_kvm(kvm);
+       gmem->kvm = kvm;
+       xa_init(&gmem->bindings);
+       list_add(&gmem->entry, &inode->i_mapping->i_private_list);
+
+       fd_install(fd, file);
+       return fd;
+
+err_gmem:
+       kfree(gmem);
+err_fd:
+       put_unused_fd(fd);
+       return err;
+}
+
+int kvm_gmem_create(struct kvm *kvm, struct kvm_create_guest_memfd *args)
+{
+       loff_t size = args->size;
+       u64 flags = args->flags;
+       u64 valid_flags = 0;
+
+       if (flags & ~valid_flags)
+               return -EINVAL;
+
+       if (size <= 0 || !PAGE_ALIGNED(size))
+               return -EINVAL;
+
+       return __kvm_gmem_create(kvm, size, flags);
+}
+
+int kvm_gmem_bind(struct kvm *kvm, struct kvm_memory_slot *slot,
+                 unsigned int fd, loff_t offset)
+{
+       loff_t size = slot->npages << PAGE_SHIFT;
+       unsigned long start, end;
+       struct kvm_gmem *gmem;
+       struct inode *inode;
+       struct file *file;
+       int r = -EINVAL;
+
+       BUILD_BUG_ON(sizeof(gfn_t) != sizeof(slot->gmem.pgoff));
+
+       file = fget(fd);
+       if (!file)
+               return -EBADF;
+
+       if (file->f_op != &kvm_gmem_fops)
+               goto err;
+
+       gmem = file->private_data;
+       if (gmem->kvm != kvm)
+               goto err;
+
+       inode = file_inode(file);
+
+       if (offset < 0 || !PAGE_ALIGNED(offset) ||
+           offset + size > i_size_read(inode))
+               goto err;
+
+       filemap_invalidate_lock(inode->i_mapping);
+
+       start = offset >> PAGE_SHIFT;
+       end = start + slot->npages;
+
+       if (!xa_empty(&gmem->bindings) &&
+           xa_find(&gmem->bindings, &start, end - 1, XA_PRESENT)) {
+               filemap_invalidate_unlock(inode->i_mapping);
+               goto err;
+       }
+
+       /*
+        * No synchronize_rcu() needed, any in-flight readers are guaranteed to
+        * be see either a NULL file or this new file, no need for them to go
+        * away.
+        */
+       rcu_assign_pointer(slot->gmem.file, file);
+       slot->gmem.pgoff = start;
+
+       xa_store_range(&gmem->bindings, start, end - 1, slot, GFP_KERNEL);
+       filemap_invalidate_unlock(inode->i_mapping);
+
+       /*
+        * Drop the reference to the file, even on success.  The file pins KVM,
+        * not the other way 'round.  Active bindings are invalidated if the
+        * file is closed before memslots are destroyed.
+        */
+       r = 0;
+err:
+       fput(file);
+       return r;
+}
+
+void kvm_gmem_unbind(struct kvm_memory_slot *slot)
+{
+       unsigned long start = slot->gmem.pgoff;
+       unsigned long end = start + slot->npages;
+       struct kvm_gmem *gmem;
+       struct file *file;
+
+       /*
+        * Nothing to do if the underlying file was already closed (or is being
+        * closed right now), kvm_gmem_release() invalidates all bindings.
+        */
+       file = kvm_gmem_get_file(slot);
+       if (!file)
+               return;
+
+       gmem = file->private_data;
+
+       filemap_invalidate_lock(file->f_mapping);
+       xa_store_range(&gmem->bindings, start, end - 1, NULL, GFP_KERNEL);
+       rcu_assign_pointer(slot->gmem.file, NULL);
+       synchronize_rcu();
+       filemap_invalidate_unlock(file->f_mapping);
+
+       fput(file);
+}
+
+int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
+                    gfn_t gfn, kvm_pfn_t *pfn, int *max_order)
+{
+       pgoff_t index = gfn - slot->base_gfn + slot->gmem.pgoff;
+       struct kvm_gmem *gmem;
+       struct folio *folio;
+       struct page *page;
+       struct file *file;
+       int r;
+
+       file = kvm_gmem_get_file(slot);
+       if (!file)
+               return -EFAULT;
+
+       gmem = file->private_data;
+
+       if (WARN_ON_ONCE(xa_load(&gmem->bindings, index) != slot)) {
+               r = -EIO;
+               goto out_fput;
+       }
+
+       folio = kvm_gmem_get_folio(file_inode(file), index);
+       if (!folio) {
+               r = -ENOMEM;
+               goto out_fput;
+       }
+
+       if (folio_test_hwpoison(folio)) {
+               r = -EHWPOISON;
+               goto out_unlock;
+       }
+
+       page = folio_file_page(folio, index);
+
+       *pfn = page_to_pfn(page);
+       if (max_order)
+               *max_order = 0;
+
+       r = 0;
+
+out_unlock:
+       folio_unlock(folio);
+out_fput:
+       fput(file);
+
+       return r;
+}
+EXPORT_SYMBOL_GPL(kvm_gmem_get_pfn);
index 7db96875ac46279886e464d37801254367530891..10bfc88a69f72b6a0e310cca043fb04882e24eb1 100644 (file)
@@ -533,30 +533,43 @@ void kvm_destroy_vcpus(struct kvm *kvm)
 }
 EXPORT_SYMBOL_GPL(kvm_destroy_vcpus);
 
-#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
+#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
 {
        return container_of(mn, struct kvm, mmu_notifier);
 }
 
-typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
+typedef bool (*gfn_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
 
-typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start,
-                            unsigned long end);
+typedef void (*on_lock_fn_t)(struct kvm *kvm);
 
-typedef void (*on_unlock_fn_t)(struct kvm *kvm);
-
-struct kvm_hva_range {
-       unsigned long start;
-       unsigned long end;
+struct kvm_mmu_notifier_range {
+       /*
+        * 64-bit addresses, as KVM notifiers can operate on host virtual
+        * addresses (unsigned long) and guest physical addresses (64-bit).
+        */
+       u64 start;
+       u64 end;
        union kvm_mmu_notifier_arg arg;
-       hva_handler_t handler;
+       gfn_handler_t handler;
        on_lock_fn_t on_lock;
-       on_unlock_fn_t on_unlock;
        bool flush_on_ret;
        bool may_block;
 };
 
+/*
+ * The inner-most helper returns a tuple containing the return value from the
+ * arch- and action-specific handler, plus a flag indicating whether or not at
+ * least one memslot was found, i.e. if the handler found guest memory.
+ *
+ * Note, most notifiers are averse to booleans, so even though KVM tracks the
+ * return from arch code as a bool, outer helpers will cast it to an int. :-(
+ */
+typedef struct kvm_mmu_notifier_return {
+       bool ret;
+       bool found_memslot;
+} kvm_mn_ret_t;
+
 /*
  * Use a dedicated stub instead of NULL to indicate that there is no callback
  * function/handler.  The compiler technically can't guarantee that a real
@@ -578,26 +591,29 @@ static const union kvm_mmu_notifier_arg KVM_MMU_NOTIFIER_NO_ARG;
             node;                                                           \
             node = interval_tree_iter_next(node, start, last))      \
 
-static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
-                                                 const struct kvm_hva_range *range)
+static __always_inline kvm_mn_ret_t __kvm_handle_hva_range(struct kvm *kvm,
+                                                          const struct kvm_mmu_notifier_range *range)
 {
-       bool ret = false, locked = false;
+       struct kvm_mmu_notifier_return r = {
+               .ret = false,
+               .found_memslot = false,
+       };
        struct kvm_gfn_range gfn_range;
        struct kvm_memory_slot *slot;
        struct kvm_memslots *slots;
        int i, idx;
 
        if (WARN_ON_ONCE(range->end <= range->start))
-               return 0;
+               return r;
 
        /* A null handler is allowed if and only if on_lock() is provided. */
        if (WARN_ON_ONCE(IS_KVM_NULL_FN(range->on_lock) &&
                         IS_KVM_NULL_FN(range->handler)))
-               return 0;
+               return r;
 
        idx = srcu_read_lock(&kvm->srcu);
 
-       for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+       for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
                struct interval_tree_node *node;
 
                slots = __kvm_memslots(kvm, i);
@@ -606,9 +622,9 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
                        unsigned long hva_start, hva_end;
 
                        slot = container_of(node, struct kvm_memory_slot, hva_node[slots->node_idx]);
-                       hva_start = max(range->start, slot->userspace_addr);
-                       hva_end = min(range->end, slot->userspace_addr +
-                                                 (slot->npages << PAGE_SHIFT));
+                       hva_start = max_t(unsigned long, range->start, slot->userspace_addr);
+                       hva_end = min_t(unsigned long, range->end,
+                                       slot->userspace_addr + (slot->npages << PAGE_SHIFT));
 
                        /*
                         * To optimize for the likely case where the address
@@ -627,71 +643,66 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
                        gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot);
                        gfn_range.slot = slot;
 
-                       if (!locked) {
-                               locked = true;
+                       if (!r.found_memslot) {
+                               r.found_memslot = true;
                                KVM_MMU_LOCK(kvm);
                                if (!IS_KVM_NULL_FN(range->on_lock))
-                                       range->on_lock(kvm, range->start, range->end);
+                                       range->on_lock(kvm);
+
                                if (IS_KVM_NULL_FN(range->handler))
                                        break;
                        }
-                       ret |= range->handler(kvm, &gfn_range);
+                       r.ret |= range->handler(kvm, &gfn_range);
                }
        }
 
-       if (range->flush_on_ret && ret)
+       if (range->flush_on_ret && r.ret)
                kvm_flush_remote_tlbs(kvm);
 
-       if (locked) {
+       if (r.found_memslot)
                KVM_MMU_UNLOCK(kvm);
-               if (!IS_KVM_NULL_FN(range->on_unlock))
-                       range->on_unlock(kvm);
-       }
 
        srcu_read_unlock(&kvm->srcu, idx);
 
-       /* The notifiers are averse to booleans. :-( */
-       return (int)ret;
+       return r;
 }
 
 static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
                                                unsigned long start,
                                                unsigned long end,
                                                union kvm_mmu_notifier_arg arg,
-                                               hva_handler_t handler)
+                                               gfn_handler_t handler)
 {
        struct kvm *kvm = mmu_notifier_to_kvm(mn);
-       const struct kvm_hva_range range = {
+       const struct kvm_mmu_notifier_range range = {
                .start          = start,
                .end            = end,
                .arg            = arg,
                .handler        = handler,
                .on_lock        = (void *)kvm_null_fn,
-               .on_unlock      = (void *)kvm_null_fn,
                .flush_on_ret   = true,
                .may_block      = false,
        };
 
-       return __kvm_handle_hva_range(kvm, &range);
+       return __kvm_handle_hva_range(kvm, &range).ret;
 }
 
 static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn,
                                                         unsigned long start,
                                                         unsigned long end,
-                                                        hva_handler_t handler)
+                                                        gfn_handler_t handler)
 {
        struct kvm *kvm = mmu_notifier_to_kvm(mn);
-       const struct kvm_hva_range range = {
+       const struct kvm_mmu_notifier_range range = {
                .start          = start,
                .end            = end,
                .handler        = handler,
                .on_lock        = (void *)kvm_null_fn,
-               .on_unlock      = (void *)kvm_null_fn,
                .flush_on_ret   = false,
                .may_block      = false,
        };
 
-       return __kvm_handle_hva_range(kvm, &range);
+       return __kvm_handle_hva_range(kvm, &range).ret;
 }
 
 static bool kvm_change_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
@@ -736,16 +747,29 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
        kvm_handle_hva_range(mn, address, address + 1, arg, kvm_change_spte_gfn);
 }
 
-void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start,
-                             unsigned long end)
+void kvm_mmu_invalidate_begin(struct kvm *kvm)
 {
+       lockdep_assert_held_write(&kvm->mmu_lock);
        /*
         * The count increase must become visible at unlock time as no
         * spte can be established without taking the mmu_lock and
         * count is also read inside the mmu_lock critical section.
         */
        kvm->mmu_invalidate_in_progress++;
+
        if (likely(kvm->mmu_invalidate_in_progress == 1)) {
+               kvm->mmu_invalidate_range_start = INVALID_GPA;
+               kvm->mmu_invalidate_range_end = INVALID_GPA;
+       }
+}
+
+void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end)
+{
+       lockdep_assert_held_write(&kvm->mmu_lock);
+
+       WARN_ON_ONCE(!kvm->mmu_invalidate_in_progress);
+
+       if (likely(kvm->mmu_invalidate_range_start == INVALID_GPA)) {
                kvm->mmu_invalidate_range_start = start;
                kvm->mmu_invalidate_range_end = end;
        } else {
@@ -765,16 +789,21 @@ void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start,
        }
 }
 
+bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
+{
+       kvm_mmu_invalidate_range_add(kvm, range->start, range->end);
+       return kvm_unmap_gfn_range(kvm, range);
+}
+
 static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
                                        const struct mmu_notifier_range *range)
 {
        struct kvm *kvm = mmu_notifier_to_kvm(mn);
-       const struct kvm_hva_range hva_range = {
+       const struct kvm_mmu_notifier_range hva_range = {
                .start          = range->start,
                .end            = range->end,
-               .handler        = kvm_unmap_gfn_range,
+               .handler        = kvm_mmu_unmap_gfn_range,
                .on_lock        = kvm_mmu_invalidate_begin,
-               .on_unlock      = kvm_arch_guest_memory_reclaimed,
                .flush_on_ret   = true,
                .may_block      = mmu_notifier_range_blockable(range),
        };
@@ -806,14 +835,21 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
        gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end,
                                          hva_range.may_block);
 
-       __kvm_handle_hva_range(kvm, &hva_range);
+       /*
+        * If one or more memslots were found and thus zapped, notify arch code
+        * that guest memory has been reclaimed.  This needs to be done *after*
+        * dropping mmu_lock, as x86's reclaim path is slooooow.
+        */
+       if (__kvm_handle_hva_range(kvm, &hva_range).found_memslot)
+               kvm_arch_guest_memory_reclaimed(kvm);
 
        return 0;
 }
 
-void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start,
-                           unsigned long end)
+void kvm_mmu_invalidate_end(struct kvm *kvm)
 {
+       lockdep_assert_held_write(&kvm->mmu_lock);
+
        /*
         * This sequence increase will notify the kvm page fault that
         * the page that is going to be mapped in the spte could have
@@ -827,18 +863,24 @@ void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start,
         * in conjunction with the smp_rmb in mmu_invalidate_retry().
         */
        kvm->mmu_invalidate_in_progress--;
+       KVM_BUG_ON(kvm->mmu_invalidate_in_progress < 0, kvm);
+
+       /*
+        * Assert that at least one range was added between start() and end().
+        * Not adding a range isn't fatal, but it is a KVM bug.
+        */
+       WARN_ON_ONCE(kvm->mmu_invalidate_range_start == INVALID_GPA);
 }
 
 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
                                        const struct mmu_notifier_range *range)
 {
        struct kvm *kvm = mmu_notifier_to_kvm(mn);
-       const struct kvm_hva_range hva_range = {
+       const struct kvm_mmu_notifier_range hva_range = {
                .start          = range->start,
                .end            = range->end,
                .handler        = (void *)kvm_null_fn,
                .on_lock        = kvm_mmu_invalidate_end,
-               .on_unlock      = (void *)kvm_null_fn,
                .flush_on_ret   = false,
                .may_block      = mmu_notifier_range_blockable(range),
        };
@@ -857,8 +899,6 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
         */
        if (wake)
                rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait);
-
-       BUG_ON(kvm->mmu_invalidate_in_progress < 0);
 }
 
 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
@@ -932,14 +972,14 @@ static int kvm_init_mmu_notifier(struct kvm *kvm)
        return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
 }
 
-#else  /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */
+#else  /* !CONFIG_KVM_GENERIC_MMU_NOTIFIER */
 
 static int kvm_init_mmu_notifier(struct kvm *kvm)
 {
        return 0;
 }
 
-#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
+#endif /* CONFIG_KVM_GENERIC_MMU_NOTIFIER */
 
 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
 static int kvm_pm_notifier_call(struct notifier_block *bl,
@@ -985,6 +1025,9 @@ static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
 /* This does not remove the slot from struct kvm_memslots data structures */
 static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
 {
+       if (slot->flags & KVM_MEM_GUEST_MEMFD)
+               kvm_gmem_unbind(slot);
+
        kvm_destroy_dirty_bitmap(slot);
 
        kvm_arch_free_memslot(kvm, slot);
@@ -1166,6 +1209,9 @@ static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
        spin_lock_init(&kvm->mn_invalidate_lock);
        rcuwait_init(&kvm->mn_memslots_update_rcuwait);
        xa_init(&kvm->vcpu_array);
+#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+       xa_init(&kvm->mem_attr_array);
+#endif
 
        INIT_LIST_HEAD(&kvm->gpc_list);
        spin_lock_init(&kvm->gpc_lock);
@@ -1190,7 +1236,7 @@ static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
                goto out_err_no_irq_srcu;
 
        refcount_set(&kvm->users_count, 1);
-       for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+       for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
                for (j = 0; j < 2; j++) {
                        slots = &kvm->__memslots[i][j];
 
@@ -1222,7 +1268,7 @@ static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
        if (r)
                goto out_err_no_disable;
 
-#ifdef CONFIG_HAVE_KVM_IRQFD
+#ifdef CONFIG_HAVE_KVM_IRQCHIP
        INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
 #endif
 
@@ -1256,7 +1302,7 @@ out_err:
 out_err_no_debugfs:
        kvm_coalesced_mmio_free(kvm);
 out_no_coalesced_mmio:
-#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
+#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
        if (kvm->mmu_notifier.ops)
                mmu_notifier_unregister(&kvm->mmu_notifier, current->mm);
 #endif
@@ -1315,7 +1361,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
                kvm->buses[i] = NULL;
        }
        kvm_coalesced_mmio_free(kvm);
-#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
+#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
        mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
        /*
         * At this point, pending calls to invalidate_range_start()
@@ -1324,20 +1370,30 @@ static void kvm_destroy_vm(struct kvm *kvm)
         * No threads can be waiting in kvm_swap_active_memslots() as the
         * last reference on KVM has been dropped, but freeing
         * memslots would deadlock without this manual intervention.
+        *
+        * If the count isn't unbalanced, i.e. KVM did NOT unregister its MMU
+        * notifier between a start() and end(), then there shouldn't be any
+        * in-progress invalidations.
         */
        WARN_ON(rcuwait_active(&kvm->mn_memslots_update_rcuwait));
-       kvm->mn_active_invalidate_count = 0;
+       if (kvm->mn_active_invalidate_count)
+               kvm->mn_active_invalidate_count = 0;
+       else
+               WARN_ON(kvm->mmu_invalidate_in_progress);
 #else
        kvm_flush_shadow_all(kvm);
 #endif
        kvm_arch_destroy_vm(kvm);
        kvm_destroy_devices(kvm);
-       for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+       for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
                kvm_free_memslots(kvm, &kvm->__memslots[i][0]);
                kvm_free_memslots(kvm, &kvm->__memslots[i][1]);
        }
        cleanup_srcu_struct(&kvm->irq_srcu);
        cleanup_srcu_struct(&kvm->srcu);
+#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+       xa_destroy(&kvm->mem_attr_array);
+#endif
        kvm_arch_free_vm(kvm);
        preempt_notifier_dec();
        hardware_disable_all();
@@ -1538,10 +1594,26 @@ static void kvm_replace_memslot(struct kvm *kvm,
        }
 }
 
-static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem)
+/*
+ * Flags that do not access any of the extra space of struct
+ * kvm_userspace_memory_region2.  KVM_SET_USER_MEMORY_REGION_V1_FLAGS
+ * only allows these.
+ */
+#define KVM_SET_USER_MEMORY_REGION_V1_FLAGS \
+       (KVM_MEM_LOG_DIRTY_PAGES | KVM_MEM_READONLY)
+
+static int check_memory_region_flags(struct kvm *kvm,
+                                    const struct kvm_userspace_memory_region2 *mem)
 {
        u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES;
 
+       if (kvm_arch_has_private_mem(kvm))
+               valid_flags |= KVM_MEM_GUEST_MEMFD;
+
+       /* Dirty logging private memory is not currently supported. */
+       if (mem->flags & KVM_MEM_GUEST_MEMFD)
+               valid_flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
+
 #ifdef __KVM_HAVE_READONLY_MEM
        valid_flags |= KVM_MEM_READONLY;
 #endif
@@ -1603,7 +1675,7 @@ static void kvm_swap_active_memslots(struct kvm *kvm, int as_id)
         * space 0 will use generations 0, 2, 4, ... while address space 1 will
         * use generations 1, 3, 5, ...
         */
-       gen += KVM_ADDRESS_SPACE_NUM;
+       gen += kvm_arch_nr_memslot_as_ids(kvm);
 
        kvm_arch_memslots_updated(kvm, gen);
 
@@ -1940,7 +2012,7 @@ static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id,
  * Must be called holding kvm->slots_lock for write.
  */
 int __kvm_set_memory_region(struct kvm *kvm,
-                           const struct kvm_userspace_memory_region *mem)
+                           const struct kvm_userspace_memory_region2 *mem)
 {
        struct kvm_memory_slot *old, *new;
        struct kvm_memslots *slots;
@@ -1950,7 +2022,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
        int as_id, id;
        int r;
 
-       r = check_memory_region_flags(mem);
+       r = check_memory_region_flags(kvm, mem);
        if (r)
                return r;
 
@@ -1969,7 +2041,11 @@ int __kvm_set_memory_region(struct kvm *kvm,
             !access_ok((void __user *)(unsigned long)mem->userspace_addr,
                        mem->memory_size))
                return -EINVAL;
-       if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM)
+       if (mem->flags & KVM_MEM_GUEST_MEMFD &&
+           (mem->guest_memfd_offset & (PAGE_SIZE - 1) ||
+            mem->guest_memfd_offset + mem->memory_size < mem->guest_memfd_offset))
+               return -EINVAL;
+       if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_MEM_SLOTS_NUM)
                return -EINVAL;
        if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
                return -EINVAL;
@@ -2007,6 +2083,9 @@ int __kvm_set_memory_region(struct kvm *kvm,
                if ((kvm->nr_memslot_pages + npages) < kvm->nr_memslot_pages)
                        return -EINVAL;
        } else { /* Modify an existing slot. */
+               /* Private memslots are immutable, they can only be deleted. */
+               if (mem->flags & KVM_MEM_GUEST_MEMFD)
+                       return -EINVAL;
                if ((mem->userspace_addr != old->userspace_addr) ||
                    (npages != old->npages) ||
                    ((mem->flags ^ old->flags) & KVM_MEM_READONLY))
@@ -2035,16 +2114,29 @@ int __kvm_set_memory_region(struct kvm *kvm,
        new->npages = npages;
        new->flags = mem->flags;
        new->userspace_addr = mem->userspace_addr;
+       if (mem->flags & KVM_MEM_GUEST_MEMFD) {
+               r = kvm_gmem_bind(kvm, new, mem->guest_memfd, mem->guest_memfd_offset);
+               if (r)
+                       goto out;
+       }
 
        r = kvm_set_memslot(kvm, old, new, change);
        if (r)
-               kfree(new);
+               goto out_unbind;
+
+       return 0;
+
+out_unbind:
+       if (mem->flags & KVM_MEM_GUEST_MEMFD)
+               kvm_gmem_unbind(new);
+out:
+       kfree(new);
        return r;
 }
 EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
 
 int kvm_set_memory_region(struct kvm *kvm,
-                         const struct kvm_userspace_memory_region *mem)
+                         const struct kvm_userspace_memory_region2 *mem)
 {
        int r;
 
@@ -2056,7 +2148,7 @@ int kvm_set_memory_region(struct kvm *kvm,
 EXPORT_SYMBOL_GPL(kvm_set_memory_region);
 
 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
-                                         struct kvm_userspace_memory_region *mem)
+                                         struct kvm_userspace_memory_region2 *mem)
 {
        if ((u16)mem->slot >= KVM_USER_MEM_SLOTS)
                return -EINVAL;
@@ -2089,7 +2181,7 @@ int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
 
        as_id = log->slot >> 16;
        id = (u16)log->slot;
-       if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
+       if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
                return -EINVAL;
 
        slots = __kvm_memslots(kvm, as_id);
@@ -2151,7 +2243,7 @@ static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log)
 
        as_id = log->slot >> 16;
        id = (u16)log->slot;
-       if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
+       if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
                return -EINVAL;
 
        slots = __kvm_memslots(kvm, as_id);
@@ -2263,7 +2355,7 @@ static int kvm_clear_dirty_log_protect(struct kvm *kvm,
 
        as_id = log->slot >> 16;
        id = (u16)log->slot;
-       if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
+       if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
                return -EINVAL;
 
        if (log->first_page & 63)
@@ -2335,6 +2427,200 @@ static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm,
 }
 #endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
 
+#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+/*
+ * Returns true if _all_ gfns in the range [@start, @end) have attributes
+ * matching @attrs.
+ */
+bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
+                                    unsigned long attrs)
+{
+       XA_STATE(xas, &kvm->mem_attr_array, start);
+       unsigned long index;
+       bool has_attrs;
+       void *entry;
+
+       rcu_read_lock();
+
+       if (!attrs) {
+               has_attrs = !xas_find(&xas, end - 1);
+               goto out;
+       }
+
+       has_attrs = true;
+       for (index = start; index < end; index++) {
+               do {
+                       entry = xas_next(&xas);
+               } while (xas_retry(&xas, entry));
+
+               if (xas.xa_index != index || xa_to_value(entry) != attrs) {
+                       has_attrs = false;
+                       break;
+               }
+       }
+
+out:
+       rcu_read_unlock();
+       return has_attrs;
+}
+
+static u64 kvm_supported_mem_attributes(struct kvm *kvm)
+{
+       if (!kvm || kvm_arch_has_private_mem(kvm))
+               return KVM_MEMORY_ATTRIBUTE_PRIVATE;
+
+       return 0;
+}
+
+static __always_inline void kvm_handle_gfn_range(struct kvm *kvm,
+                                                struct kvm_mmu_notifier_range *range)
+{
+       struct kvm_gfn_range gfn_range;
+       struct kvm_memory_slot *slot;
+       struct kvm_memslots *slots;
+       struct kvm_memslot_iter iter;
+       bool found_memslot = false;
+       bool ret = false;
+       int i;
+
+       gfn_range.arg = range->arg;
+       gfn_range.may_block = range->may_block;
+
+       for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
+               slots = __kvm_memslots(kvm, i);
+
+               kvm_for_each_memslot_in_gfn_range(&iter, slots, range->start, range->end) {
+                       slot = iter.slot;
+                       gfn_range.slot = slot;
+
+                       gfn_range.start = max(range->start, slot->base_gfn);
+                       gfn_range.end = min(range->end, slot->base_gfn + slot->npages);
+                       if (gfn_range.start >= gfn_range.end)
+                               continue;
+
+                       if (!found_memslot) {
+                               found_memslot = true;
+                               KVM_MMU_LOCK(kvm);
+                               if (!IS_KVM_NULL_FN(range->on_lock))
+                                       range->on_lock(kvm);
+                       }
+
+                       ret |= range->handler(kvm, &gfn_range);
+               }
+       }
+
+       if (range->flush_on_ret && ret)
+               kvm_flush_remote_tlbs(kvm);
+
+       if (found_memslot)
+               KVM_MMU_UNLOCK(kvm);
+}
+
+static bool kvm_pre_set_memory_attributes(struct kvm *kvm,
+                                         struct kvm_gfn_range *range)
+{
+       /*
+        * Unconditionally add the range to the invalidation set, regardless of
+        * whether or not the arch callback actually needs to zap SPTEs.  E.g.
+        * if KVM supports RWX attributes in the future and the attributes are
+        * going from R=>RW, zapping isn't strictly necessary.  Unconditionally
+        * adding the range allows KVM to require that MMU invalidations add at
+        * least one range between begin() and end(), e.g. allows KVM to detect
+        * bugs where the add() is missed.  Relaxing the rule *might* be safe,
+        * but it's not obvious that allowing new mappings while the attributes
+        * are in flux is desirable or worth the complexity.
+        */
+       kvm_mmu_invalidate_range_add(kvm, range->start, range->end);
+
+       return kvm_arch_pre_set_memory_attributes(kvm, range);
+}
+
+/* Set @attributes for the gfn range [@start, @end). */
+static int kvm_vm_set_mem_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
+                                    unsigned long attributes)
+{
+       struct kvm_mmu_notifier_range pre_set_range = {
+               .start = start,
+               .end = end,
+               .handler = kvm_pre_set_memory_attributes,
+               .on_lock = kvm_mmu_invalidate_begin,
+               .flush_on_ret = true,
+               .may_block = true,
+       };
+       struct kvm_mmu_notifier_range post_set_range = {
+               .start = start,
+               .end = end,
+               .arg.attributes = attributes,
+               .handler = kvm_arch_post_set_memory_attributes,
+               .on_lock = kvm_mmu_invalidate_end,
+               .may_block = true,
+       };
+       unsigned long i;
+       void *entry;
+       int r = 0;
+
+       entry = attributes ? xa_mk_value(attributes) : NULL;
+
+       mutex_lock(&kvm->slots_lock);
+
+       /* Nothing to do if the entire range as the desired attributes. */
+       if (kvm_range_has_memory_attributes(kvm, start, end, attributes))
+               goto out_unlock;
+
+       /*
+        * Reserve memory ahead of time to avoid having to deal with failures
+        * partway through setting the new attributes.
+        */
+       for (i = start; i < end; i++) {
+               r = xa_reserve(&kvm->mem_attr_array, i, GFP_KERNEL_ACCOUNT);
+               if (r)
+                       goto out_unlock;
+       }
+
+       kvm_handle_gfn_range(kvm, &pre_set_range);
+
+       for (i = start; i < end; i++) {
+               r = xa_err(xa_store(&kvm->mem_attr_array, i, entry,
+                                   GFP_KERNEL_ACCOUNT));
+               KVM_BUG_ON(r, kvm);
+       }
+
+       kvm_handle_gfn_range(kvm, &post_set_range);
+
+out_unlock:
+       mutex_unlock(&kvm->slots_lock);
+
+       return r;
+}
+static int kvm_vm_ioctl_set_mem_attributes(struct kvm *kvm,
+                                          struct kvm_memory_attributes *attrs)
+{
+       gfn_t start, end;
+
+       /* flags is currently not used. */
+       if (attrs->flags)
+               return -EINVAL;
+       if (attrs->attributes & ~kvm_supported_mem_attributes(kvm))
+               return -EINVAL;
+       if (attrs->size == 0 || attrs->address + attrs->size < attrs->address)
+               return -EINVAL;
+       if (!PAGE_ALIGNED(attrs->address) || !PAGE_ALIGNED(attrs->size))
+               return -EINVAL;
+
+       start = attrs->address >> PAGE_SHIFT;
+       end = (attrs->address + attrs->size) >> PAGE_SHIFT;
+
+       /*
+        * xarray tracks data using "unsigned long", and as a result so does
+        * KVM.  For simplicity, supports generic attributes only on 64-bit
+        * architectures.
+        */
+       BUILD_BUG_ON(sizeof(attrs->attributes) != sizeof(unsigned long));
+
+       return kvm_vm_set_mem_attributes(kvm, start, end, attrs->attributes);
+}
+#endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
+
 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
 {
        return __gfn_to_memslot(kvm_memslots(kvm), gfn);
@@ -4527,13 +4813,14 @@ static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
 {
        switch (arg) {
        case KVM_CAP_USER_MEMORY:
+       case KVM_CAP_USER_MEMORY2:
        case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
        case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
        case KVM_CAP_INTERNAL_ERROR_DATA:
 #ifdef CONFIG_HAVE_KVM_MSI
        case KVM_CAP_SIGNAL_MSI:
 #endif
-#ifdef CONFIG_HAVE_KVM_IRQFD
+#ifdef CONFIG_HAVE_KVM_IRQCHIP
        case KVM_CAP_IRQFD:
 #endif
        case KVM_CAP_IOEVENTFD_ANY_LENGTH:
@@ -4555,9 +4842,11 @@ static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
        case KVM_CAP_IRQ_ROUTING:
                return KVM_MAX_IRQ_ROUTES;
 #endif
-#if KVM_ADDRESS_SPACE_NUM > 1
+#if KVM_MAX_NR_ADDRESS_SPACES > 1
        case KVM_CAP_MULTI_ADDRESS_SPACE:
-               return KVM_ADDRESS_SPACE_NUM;
+               if (kvm)
+                       return kvm_arch_nr_memslot_as_ids(kvm);
+               return KVM_MAX_NR_ADDRESS_SPACES;
 #endif
        case KVM_CAP_NR_MEMSLOTS:
                return KVM_USER_MEM_SLOTS;
@@ -4578,7 +4867,16 @@ static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
 #endif
        case KVM_CAP_BINARY_STATS_FD:
        case KVM_CAP_SYSTEM_EVENT_DATA:
+       case KVM_CAP_DEVICE_CTRL:
                return 1;
+#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+       case KVM_CAP_MEMORY_ATTRIBUTES:
+               return kvm_supported_mem_attributes(kvm);
+#endif
+#ifdef CONFIG_KVM_PRIVATE_MEM
+       case KVM_CAP_GUEST_MEMFD:
+               return !kvm || kvm_arch_has_private_mem(kvm);
+#endif
        default:
                break;
        }
@@ -4657,7 +4955,7 @@ bool kvm_are_all_memslots_empty(struct kvm *kvm)
 
        lockdep_assert_held(&kvm->slots_lock);
 
-       for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+       for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
                if (!kvm_memslots_empty(__kvm_memslots(kvm, i)))
                        return false;
        }
@@ -4783,6 +5081,14 @@ static int kvm_vm_ioctl_get_stats_fd(struct kvm *kvm)
        return fd;
 }
 
+#define SANITY_CHECK_MEM_REGION_FIELD(field)                                   \
+do {                                                                           \
+       BUILD_BUG_ON(offsetof(struct kvm_userspace_memory_region, field) !=             \
+                    offsetof(struct kvm_userspace_memory_region2, field));     \
+       BUILD_BUG_ON(sizeof_field(struct kvm_userspace_memory_region, field) !=         \
+                    sizeof_field(struct kvm_userspace_memory_region2, field)); \
+} while (0)
+
 static long kvm_vm_ioctl(struct file *filp,
                           unsigned int ioctl, unsigned long arg)
 {
@@ -4805,15 +5111,39 @@ static long kvm_vm_ioctl(struct file *filp,
                r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap);
                break;
        }
+       case KVM_SET_USER_MEMORY_REGION2:
        case KVM_SET_USER_MEMORY_REGION: {
-               struct kvm_userspace_memory_region kvm_userspace_mem;
+               struct kvm_userspace_memory_region2 mem;
+               unsigned long size;
+
+               if (ioctl == KVM_SET_USER_MEMORY_REGION) {
+                       /*
+                        * Fields beyond struct kvm_userspace_memory_region shouldn't be
+                        * accessed, but avoid leaking kernel memory in case of a bug.
+                        */
+                       memset(&mem, 0, sizeof(mem));
+                       size = sizeof(struct kvm_userspace_memory_region);
+               } else {
+                       size = sizeof(struct kvm_userspace_memory_region2);
+               }
+
+               /* Ensure the common parts of the two structs are identical. */
+               SANITY_CHECK_MEM_REGION_FIELD(slot);
+               SANITY_CHECK_MEM_REGION_FIELD(flags);
+               SANITY_CHECK_MEM_REGION_FIELD(guest_phys_addr);
+               SANITY_CHECK_MEM_REGION_FIELD(memory_size);
+               SANITY_CHECK_MEM_REGION_FIELD(userspace_addr);
 
                r = -EFAULT;
-               if (copy_from_user(&kvm_userspace_mem, argp,
-                                               sizeof(kvm_userspace_mem)))
+               if (copy_from_user(&mem, argp, size))
                        goto out;
 
-               r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem);
+               r = -EINVAL;
+               if (ioctl == KVM_SET_USER_MEMORY_REGION &&
+                   (mem.flags & ~KVM_SET_USER_MEMORY_REGION_V1_FLAGS))
+                       goto out;
+
+               r = kvm_vm_ioctl_set_memory_region(kvm, &mem);
                break;
        }
        case KVM_GET_DIRTY_LOG: {
@@ -4927,9 +5257,8 @@ static long kvm_vm_ioctl(struct file *filp,
                        goto out;
                if (routing.nr) {
                        urouting = argp;
-                       entries = vmemdup_user(urouting->entries,
-                                              array_size(sizeof(*entries),
-                                                         routing.nr));
+                       entries = vmemdup_array_user(urouting->entries,
+                                                    routing.nr, sizeof(*entries));
                        if (IS_ERR(entries)) {
                                r = PTR_ERR(entries);
                                goto out;
@@ -4941,6 +5270,18 @@ static long kvm_vm_ioctl(struct file *filp,
                break;
        }
 #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */
+#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+       case KVM_SET_MEMORY_ATTRIBUTES: {
+               struct kvm_memory_attributes attrs;
+
+               r = -EFAULT;
+               if (copy_from_user(&attrs, argp, sizeof(attrs)))
+                       goto out;
+
+               r = kvm_vm_ioctl_set_mem_attributes(kvm, &attrs);
+               break;
+       }
+#endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
        case KVM_CREATE_DEVICE: {
                struct kvm_create_device cd;
 
@@ -4968,6 +5309,18 @@ static long kvm_vm_ioctl(struct file *filp,
        case KVM_GET_STATS_FD:
                r = kvm_vm_ioctl_get_stats_fd(kvm);
                break;
+#ifdef CONFIG_KVM_PRIVATE_MEM
+       case KVM_CREATE_GUEST_MEMFD: {
+               struct kvm_create_guest_memfd guest_memfd;
+
+               r = -EFAULT;
+               if (copy_from_user(&guest_memfd, argp, sizeof(guest_memfd)))
+                       goto out;
+
+               r = kvm_gmem_create(kvm, &guest_memfd);
+               break;
+       }
+#endif
        default:
                r = kvm_arch_vm_ioctl(filp, ioctl, arg);
        }
@@ -5139,11 +5492,6 @@ static long kvm_dev_ioctl(struct file *filp,
                r += PAGE_SIZE;    /* coalesced mmio ring page */
 #endif
                break;
-       case KVM_TRACE_ENABLE:
-       case KVM_TRACE_PAUSE:
-       case KVM_TRACE_DISABLE:
-               r = -EOPNOTSUPP;
-               break;
        default:
                return kvm_arch_dev_ioctl(filp, ioctl, arg);
        }
@@ -6104,6 +6452,8 @@ int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
        if (WARN_ON_ONCE(r))
                goto err_vfio;
 
+       kvm_gmem_init(module);
+
        /*
         * Registration _must_ be the very last thing done, as this exposes
         * /dev/kvm to userspace, i.e. all infrastructure must be setup!
index 180f1a09e6ba7bc8b25275e6f938a1484c6e5f30..ecefc7ec51af8516c14c13bf0ad68ad1bc369e77 100644 (file)
@@ -37,4 +37,30 @@ static inline void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm,
 }
 #endif /* HAVE_KVM_PFNCACHE */
 
+#ifdef CONFIG_KVM_PRIVATE_MEM
+void kvm_gmem_init(struct module *module);
+int kvm_gmem_create(struct kvm *kvm, struct kvm_create_guest_memfd *args);
+int kvm_gmem_bind(struct kvm *kvm, struct kvm_memory_slot *slot,
+                 unsigned int fd, loff_t offset);
+void kvm_gmem_unbind(struct kvm_memory_slot *slot);
+#else
+static inline void kvm_gmem_init(struct module *module)
+{
+
+}
+
+static inline int kvm_gmem_bind(struct kvm *kvm,
+                                        struct kvm_memory_slot *slot,
+                                        unsigned int fd, loff_t offset)
+{
+       WARN_ON_ONCE(1);
+       return -EIO;
+}
+
+static inline void kvm_gmem_unbind(struct kvm_memory_slot *slot)
+{
+       WARN_ON_ONCE(1);
+}
+#endif /* CONFIG_KVM_PRIVATE_MEM */
+
 #endif /* __KVM_MM_H__ */